]> git.saurik.com Git - apple/dyld.git/blob - dyld3/ClosureBuilder.cpp
dyld-851.27.tar.gz
[apple/dyld.git] / dyld3 / ClosureBuilder.cpp
1 /*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include <sys/types.h>
25 #include <sys/stat.h>
26 #include <sys/errno.h>
27 #include <sys/mman.h>
28 #include <sys/mman.h>
29 #include <sys/param.h>
30 #include <ext/__hash>
31 #include <fcntl.h>
32 #include <unistd.h>
33 #include <string.h>
34 #include <sys/types.h>
35 #include <sys/sysctl.h>
36
37 #include <mach-o/dyld_priv.h>
38
39 #include "ClosureWriter.h"
40 #include "ClosureBuilder.h"
41 #include "MachOAnalyzer.h"
42 #include "MachOAnalyzerSet.h"
43 #include "libdyldEntryVector.h"
44 #include "RootsChecker.h"
45 #include "Tracing.h"
46
47 #define CLOSURE_SELOPT_WRITE
48 #include "objc-shared-cache.h"
49
50 #if BUILDING_DYLD
51 namespace dyld { void log(const char*, ...); }
52 #endif
53
54 namespace dyld3 {
55 namespace closure {
56
57
58 const DlopenClosure* ClosureBuilder::sRetryDlopenClosure = (const DlopenClosure*)(-1);
59
60 ClosureBuilder::ClosureBuilder(uint32_t startImageNum, const FileSystem& fileSystem, const RootsChecker& rootsChecker,
61 const DyldSharedCache* dyldCache, bool dyldCacheIsLive,
62 const GradedArchs& archs, const PathOverrides& pathOverrides, AtPath atPathHandling, bool allowRelativePaths,
63 LaunchErrorInfo* errorInfo, Platform platform, DylibFixupHandler handler)
64 : _fileSystem(fileSystem), _rootsChecker(rootsChecker), _dyldCache(dyldCache), _pathOverrides(pathOverrides), _archs(archs), _platform(platform), _startImageNum(startImageNum),
65 _dylibFixupHandler(handler), _atPathHandling(atPathHandling), _launchErrorInfo(errorInfo), _dyldCacheIsLive(dyldCacheIsLive), _allowRelativePaths(allowRelativePaths)
66 {
67 if ( dyldCache != nullptr ) {
68 _dyldImageArray = dyldCache->cachedDylibsImageArray();
69 }
70 }
71
72
73 ClosureBuilder::~ClosureBuilder() {
74 if ( _tempPaths != nullptr )
75 PathPool::deallocate(_tempPaths);
76 if ( _mustBeMissingPaths != nullptr )
77 PathPool::deallocate(_mustBeMissingPaths);
78 if ( _objcDuplicateClassWarnings != nullptr )
79 PathPool::deallocate(_objcDuplicateClassWarnings);
80 }
81
82 static bool iOSSupport(const char* path)
83 {
84 return ( strncmp(path, "/System/iOSSupport/", 19) == 0 );
85 }
86
87 bool ClosureBuilder::findImage(const char* loadPath, const LoadedImageChain& forImageChain, BuilderLoadedImage*& foundImage, LinkageType linkageType,
88 uint32_t compatVersion, bool canUseSharedCacheClosure)
89 {
90 // There shouldn't be an error here as the callers should stop trying to find more images if they get an error for an image
91 _diag.assertNoError();
92
93 __block bool result = false;
94
95 // record if this is a non-overridable path
96 bool pathIsInDyldCacheWhichCannotBeOverridden = false;
97 bool dylibsExpectedOnDisk = true;
98 if ( _dyldCache != nullptr ) {
99 pathIsInDyldCacheWhichCannotBeOverridden = _dyldCache->hasNonOverridablePath(loadPath);
100 dylibsExpectedOnDisk = _dyldCache->header.dylibsExpectedOnDisk;
101 }
102
103 // when building dyld cache for macOS, if requesting dylib is iOSMac unzippered twin, tell pathOverrides object to look in /System/iOSSupport first
104 dyld3::Platform targetPlatform = _platform;
105 if ( _makingDyldCacheImages && (_platform == dyld3::Platform::macOS) ) {
106 if ( forImageChain.image.loadAddress()->builtForPlatform(Platform::iOSMac, true) )
107 targetPlatform = Platform::iOSMac;
108 }
109
110 _pathOverrides.forEachPathVariant(loadPath, pathIsInDyldCacheWhichCannotBeOverridden, ^(const char* possibleVariantPath, bool isFallbackPath, bool& stopPathVariant) {
111
112 // This check is within forEachPathVariant() to let DYLD_LIBRARY_PATH override LC_RPATH
113 bool isRPath = (strncmp(possibleVariantPath, "@rpath/", 7) == 0);
114
115 // passing a leaf name to dlopen() allows rpath searching for it
116 // FIXME: Does this apply to DYLD_INSERT_LIBRARIES too?
117 bool implictRPath = (linkageType == LinkageType::kDynamic) && (loadPath[0] != '/') && (loadPath == possibleVariantPath) && (_atPathHandling != AtPath::none);
118
119 // expand @ paths
120 forEachResolvedPathVar(possibleVariantPath, forImageChain, implictRPath, linkageType,
121 ^(const char* possiblePath, bool& stop) {
122 if ( possibleVariantPath != possiblePath )
123 _atPathUsed = true;
124
125 // look at already loaded images
126 const char* leafName = strrchr(possiblePath, '/');
127 for (BuilderLoadedImage& li: _loadedImages) {
128 if ( strcmp(li.path(), possiblePath) == 0 ) {
129 foundImage = &li;
130 result = true;
131 stop = true;
132 return;
133 }
134 else if ( isRPath ) {
135 // Special case @rpath/ because name in li.fileInfo.path is full path.
136 // Getting installName is expensive, so first see if an already loaded image
137 // has same leaf name and if so see if its installName matches request @rpath
138 if (const char* aLeaf = strrchr(li.path(), '/')) {
139 if ( strcmp(aLeaf, leafName) == 0 ) {
140 if ( li.loadAddress()->isDylib() && (strcmp(loadPath, li.loadAddress()->installName()) == 0) ) {
141 foundImage = &li;
142 result = true;
143 stop = true;
144 return;
145 }
146 }
147 }
148 }
149 }
150
151 // look to see if image already loaded via a different symlink
152 bool fileFound = false;
153 uint64_t fileFoundINode = 0;
154 uint64_t fileFoundMTime = 0;
155 bool inodesMatchRuntime = false;
156 // Note, we only do this check if we even expect to find this on-disk
157 // We can also use the pathIsInDyldCacheWhichCannotBeOverridden result if we are still trying the same path
158 // it was computed from
159 if ( dylibsExpectedOnDisk || !pathIsInDyldCacheWhichCannotBeOverridden || (loadPath != possiblePath) ) {
160 if ( _fileSystem.fileExists(possiblePath, &fileFoundINode, &fileFoundMTime, nullptr, &inodesMatchRuntime) ) {
161 fileFound = true;
162 for (BuilderLoadedImage& li: _loadedImages) {
163 if ( (li.loadedFileInfo.inode == 0) && (li.loadedFileInfo.mtime == 0) ) {
164 // Some already loaded image does not have an inode/mtime recorded, fix that if we can
165 if ( dylibsExpectedOnDisk || !li.loadAddress()->inDyldCache() ) {
166 _fileSystem.fileExists(li.path(), &li.loadedFileInfo.inode, &li.loadedFileInfo.mtime , nullptr, nullptr);
167 }
168 }
169 if ( (li.loadedFileInfo.inode == fileFoundINode) && (li.loadedFileInfo.mtime == fileFoundMTime) ) {
170 foundImage = &li;
171 result = true;
172 stop = true;
173 return;
174 }
175 }
176 }
177 }
178
179 // We record the realpath of the file in the loaded images, but we might be loading via a symlink path.
180 // We need to search using the realpath just in case the dylib the symlink points to was overwritten while
181 // the process is running
182 if ( fileFound ) {
183 char realPath[MAXPATHLEN];
184 if ( _fileSystem.getRealPath(possiblePath, realPath) ) {
185 for (BuilderLoadedImage& li: _loadedImages) {
186 if ( strcmp(li.path(), realPath) == 0 ) {
187 foundImage = &li;
188 result = true;
189 stop = true;
190 return;
191 }
192 }
193 }
194 }
195
196 bool unmapWhenDone = false;
197 bool contentRebased = false;
198 bool hasInits = false;
199 bool markNeverUnload = false;
200 bool mustBuildClosure = _dyldCacheInvalidFormatVersion;
201 ImageNum overrideImageNum = 0;
202 ImageNum foundImageNum = 0;
203 const MachOAnalyzer* mh = nullptr;
204 const char* filePath = nullptr;
205 LoadedFileInfo loadedFileInfo;
206
207 // look in dyld cache
208 filePath = possiblePath;
209 char realPath[MAXPATHLEN];
210 if ( _dyldImageArray != nullptr ) {
211 uint32_t dyldCacheImageIndex;
212 bool foundInCache = _dyldCache->hasImagePath(possiblePath, dyldCacheImageIndex);
213 if ( !foundInCache && fileFound ) {
214 // see if this is an OS dylib/bundle with a pre-built dlopen closure
215 // We can only use the pre-built closure if we are dynamic linkage (a dlopen) and
216 // there are no roots
217 if ( canUseSharedCacheClosure && (linkageType == LinkageType::kDynamic) ) {
218 if (const dyld3::closure::Image* otherImage = _dyldCache->findDlopenOtherImage(possiblePath) ) {
219 uint64_t expectedInode;
220 uint64_t expectedModTime;
221 if ( !otherImage->isInvalid() ) {
222 bool hasInodeInfo = otherImage->hasFileModTimeAndInode(expectedInode, expectedModTime);
223 // use pre-built Image if it does not have mtime/inode or it does and it has matches current file info
224 if ( !hasInodeInfo || ((expectedInode == fileFoundINode) && (expectedModTime == fileFoundMTime)) ) {
225 loadedFileInfo = MachOAnalyzer::load(_diag, _fileSystem, possiblePath, _archs, _platform, realPath);
226 if ( _diag.noError() ) {
227 mh = (const MachOAnalyzer*)loadedFileInfo.fileContent;
228 foundImageNum = otherImage->imageNum();
229 unmapWhenDone = true;
230 contentRebased = false;
231 hasInits = otherImage->hasInitializers() || otherImage->mayHavePlusLoads();
232 // Use the realpath in the case where we loaded a symlink
233 // The closure must have recordered an alias path
234 if (realPath[0] != '\0')
235 filePath = realPath;
236 }
237 }
238 }
239 }
240 }
241 }
242
243 // If found in the cache, but not on-disk, this may be an already loaded image, but we are opening the alias.
244 // For example, we are trying to open .../AppKit but we already have a loaded root of .../Versions/C/AppKit
245 // This doesn't work with the calls to realpath when the symlinks themselves were removed from disk.
246 if ( foundInCache && !fileFound ) {
247 ImageNum dyldCacheImageNum = dyldCacheImageIndex + 1;
248 for (BuilderLoadedImage& li: _loadedImages) {
249 if ( (li.overrideImageNum == dyldCacheImageNum) || (li.imageNum == dyldCacheImageNum) ) {
250 foundImage = &li;
251 result = true;
252 stop = true;
253 return;
254 }
255 }
256 }
257
258 // if not found in cache, may be a symlink to something in cache
259 // We have to do this check even if the symlink target is not on disk as we may
260 // have symlinks in iOSMac dlopen paths which are resolved to a dylib removed from disk
261 if ( !foundInCache && (mh == nullptr) ) {
262 if ( _fileSystem.getRealPath(possiblePath, realPath) ) {
263 foundInCache = _dyldCache->hasImagePath(realPath, dyldCacheImageIndex);
264 if ( foundInCache ) {
265 ImageNum dyldCacheImageNum = dyldCacheImageIndex + 1;
266 const Image* image = _dyldImageArray->imageForNum(dyldCacheImageNum);
267 filePath = image->path();
268 #if BUILDING_LIBDYLD
269 // handle case where OS dylib was updated after this process launched
270 if ( foundInCache ) {
271 for (BuilderLoadedImage& li: _loadedImages) {
272 if ( strcmp(li.path(), filePath) == 0 ) {
273 foundImage = &li;
274 result = true;
275 stop = true;
276 return;
277 }
278 }
279 }
280 #endif
281 }
282 }
283 }
284
285 // if using a cached dylib, look to see if there is an override
286 if ( foundInCache ) {
287 ImageNum dyldCacheImageNum = dyldCacheImageIndex + 1;
288 bool useCache = true;
289 markNeverUnload = true; // dylibs in cache, or dylibs that override cache should not be unloaded at runtime
290 bool ignoreCacheDylib = false;
291 const Image* image = _dyldImageArray->imageForNum(dyldCacheImageNum);
292 if ( image->overridableDylib() ) {
293 if ( fileFound ) {
294 if ( _makingClosuresInCache ) {
295 // during iOS cache build, don't look at files on disk, use ones in cache
296 useCache = true;
297 } else if ( !_rootsChecker.onDiskFileIsRoot(filePath, _dyldCache, image,
298 &_fileSystem, fileFoundINode, fileFoundMTime) ) {
299 // file exists, but is not a root
300 useCache = true;
301 } else {
302 // iOS internal build. Any disk on cache overrides cache
303 useCache = false;
304 }
305 }
306 if ( useCache && ((targetPlatform == Platform::iOSMac) || (targetPlatform == Platform::macOS)) ) {
307 // check this cached dylib is suitable for catalyst or mac program
308 mh = (MachOAnalyzer*)_dyldCache->getIndexedImageEntry(dyldCacheImageNum-1, loadedFileInfo.mtime, loadedFileInfo.inode);
309 if ( !mh->loadableIntoProcess(targetPlatform, possiblePath) ) {
310 useCache = false;
311 mh = nullptr;
312 ignoreCacheDylib = true;
313 }
314 }
315 if ( !useCache && !ignoreCacheDylib ) {
316 overrideImageNum = dyldCacheImageNum;
317 _foundDyldCacheRoots = true;
318 }
319 }
320 if ( useCache ) {
321 foundImageNum = dyldCacheImageNum;
322 mh = (MachOAnalyzer*)_dyldCache->getIndexedImageEntry(foundImageNum-1, loadedFileInfo.mtime, loadedFileInfo.inode);
323 unmapWhenDone = false;
324 // if we are building ImageArray in dyld cache, content is not rebased
325 contentRebased = !_makingDyldCacheImages && _dyldCacheIsLive;
326 hasInits = image->hasInitializers() || image->mayHavePlusLoads();
327 // If the cache format is different from dyld/libdyld then we can't use this closure.
328 if ( (_dyldCache->header.formatVersion != dyld3::closure::kFormatVersion) || !canUseSharedCacheClosure ) {
329 mustBuildClosure = true;
330 _foundDyldCacheRoots = true;
331 }
332 }
333 }
334 }
335
336 // If we are building the cache, and don't find an image, then it might be weak so just return
337 if (_makingDyldCacheImages) {
338 addMustBeMissingPath(possiblePath);
339 return;
340 }
341
342 // if not found yet, mmap file
343 if ( mh == nullptr ) {
344 loadedFileInfo = MachOAnalyzer::load(_diag, _fileSystem, filePath, _archs, _platform, realPath);
345 mh = (const MachOAnalyzer*)loadedFileInfo.fileContent;
346 if ( mh == nullptr ) {
347 // Don't add must be missing paths for dlopen as we don't cache dlopen closures
348 if (_isLaunchClosure) {
349 // If we found the file then we want to skip it as its not a valid macho for this platform/arch
350 // We can't record skipped file mtime/inode for caches built on a different machine that it runs on.
351 // In that case, we expect the file to be mastered out, as otherwise we couldn't detect if its
352 // changed or not on the device side
353 if (fileFound && inodesMatchRuntime) {
354 addSkippedFile(possiblePath, fileFoundINode, fileFoundMTime);
355 } else {
356 addMustBeMissingPath(possiblePath);
357 }
358 }
359 return;
360 }
361 if ( linkageType != LinkageType::kDynamic ) {
362 // LC_LOAD_DYLIB can only link with dylibs, and DYLD_INSERT_LIBRARIES can only be dylibs
363 if ( !mh->isDylib() ) {
364 _diag.error("found '%s' which is not a dylib. Needed by '%s'", filePath, forImageChain.image.path());
365 return;
366 }
367 // verify this is compatable dylib version
368 const char* installName;
369 uint32_t foundCompatVers;
370 uint32_t foundCurrentVers;
371 mh->getDylibInstallName(&installName, &foundCompatVers, &foundCurrentVers);
372 if ( (foundCompatVers < compatVersion) && mh->enforceCompatVersion() ) {
373 char foundStr[32];
374 char requiredStr[32];
375 MachOFile::packedVersionToString(foundCompatVers, foundStr);
376 MachOFile::packedVersionToString(compatVersion, requiredStr);
377 _diag.error("found '%s' which has compat version (%s) which is less than required (%s). Needed by '%s'",
378 filePath, foundStr, requiredStr, forImageChain.image.path());
379 return;
380 }
381 }
382 else if ( mh->isMainExecutable() ) {
383 // when dlopen()ing a main executable, it must be dynamic Position Independent Executable
384 if ( !mh->isPIE() || !mh->isDynamicExecutable() ) {
385 _diag.error("not PIE");
386 return;
387 }
388 }
389 // Use the realpath in the case where we loaded a symlink
390 // The closure must have recordered an alias path
391 if (realPath[0] != '\0')
392 filePath = realPath;
393 foundImageNum = _startImageNum + _nextIndex++;
394 _foundNonCachedImage = true;
395 mustBuildClosure = true;
396 unmapWhenDone = true;
397 } else {
398 loadedFileInfo.fileContent = mh;
399 }
400
401 if ( mh->inDyldCache() ) {
402 // We may be loading from a symlink, so use the path in the cache which is the realpath
403 filePath = _dyldImageArray->imageForNum(foundImageNum)->path();
404 }
405
406 // if path is not original path, or its an inserted path (as forEachInColonList uses a stack temporary)
407 if ( (filePath != loadPath) || (linkageType == LinkageType::kInserted) ) {
408 if ( !mh->inDyldCache() ) {
409 // possiblePath may be a temporary (stack) string, since we found file at that path, make it permanent
410 filePath = strdup_temp(filePath);
411 }
412 // check if this overrides what would have been found in cache
413 // This is the case where we didn't find the image with the path in the shared cache, perhaps as it used library paths
414 // but the path we requested had pointed in to the cache
415 // FIXME: What if load path is via an @rpath and we will override the cache?
416 if ( overrideImageNum == 0 ) {
417 if ( _dyldImageArray != nullptr ) {
418 uint32_t dyldCacheImageIndex;
419 if ( _dyldCache->hasImagePath(loadPath, dyldCacheImageIndex) ) {
420 ImageNum possibleOverrideNum = dyldCacheImageIndex+1;
421 if ( possibleOverrideNum != foundImageNum )
422 overrideImageNum = possibleOverrideNum;
423 }
424 }
425 }
426 }
427
428 // check if this is an iOSMac dylib that is overriding a macOS dylib in the dyld cache
429 if ( mh->inDyldCache() && iOSSupport(filePath) ) {
430 const char* twinPath = &filePath[18];
431 uint32_t dyldCacheImageIndex;
432 if ( (_dyldCache != nullptr) && _dyldCache->hasImagePath(twinPath, dyldCacheImageIndex) ) {
433 ImageNum possibleOverrideNum = dyldCacheImageIndex+1;
434 if ( possibleOverrideNum != foundImageNum )
435 overrideImageNum = possibleOverrideNum;
436 }
437 }
438
439 if ( !markNeverUnload ) {
440 switch (linkageType) {
441 case LinkageType::kStatic:
442 // Static linkages can only be unloaded if the image loading us can be unloaded
443 markNeverUnload = forImageChain.image.markNeverUnload;
444 break;
445 case LinkageType::kDynamic:
446 markNeverUnload = false;
447 break;
448 case LinkageType::kInserted:
449 // Inserted libraries must never be unloaded
450 markNeverUnload = true;
451 break;
452 };
453 }
454
455 if ( !markNeverUnload ) {
456 // If the parent didn't force us to be never unload, other conditions still may
457 markNeverUnload = mh->markNeverUnload(_diag);
458 }
459
460 // Set the path again just in case it was strdup'ed.
461 loadedFileInfo.path = filePath;
462
463 // add new entry
464 BuilderLoadedImage entry;
465 entry.loadedFileInfo = loadedFileInfo;
466 entry.imageNum = foundImageNum;
467 entry.unmapWhenDone = unmapWhenDone;
468 entry.contentRebased = contentRebased;
469 entry.hasInits = hasInits;
470 entry.markNeverUnload = markNeverUnload;
471 entry.rtldLocal = false;
472 entry.isBadImage = false;
473 entry.mustBuildClosure = mustBuildClosure;
474 entry.hasMissingWeakImports = false;
475 entry.hasInterposingTuples = !mh->inDyldCache() && mh->hasInterposingTuples();
476 entry.overrideImageNum = overrideImageNum;
477 entry.exportsTrieOffset = 0;
478 entry.exportsTrieSize = 0;
479 _loadedImages.push_back(entry);
480 foundImage = &_loadedImages.back();
481 if ( isFallbackPath )
482 _fallbackPathUsed = true;
483 stop = true;
484 result = true;
485 });
486 if (result)
487 stopPathVariant = true;
488 }, targetPlatform);
489
490 // If we found a file, but also had an error, then we must have logged a diagnostic for a file we couldn't use.
491 // Clear that for now.
492 // FIXME: Surface this to the user in case they wanted to see the error
493 if (result && _diag.hasError())
494 _diag.clearError();
495
496 return result;
497 }
498
499 bool ClosureBuilder::expandAtLoaderPath(const char* loadPath, bool fromLCRPATH, const BuilderLoadedImage& loadedImage, char fixedPath[])
500 {
501 switch ( _atPathHandling ) {
502 case AtPath::none:
503 return false;
504 case AtPath::onlyInRPaths:
505 if ( !fromLCRPATH ) {
506 // <rdar://42360708> allow @loader_path in LC_LOAD_DYLIB during dlopen()
507 if ( _isLaunchClosure )
508 return false;
509 }
510 break;
511 case AtPath::all:
512 break;
513 }
514 if ( strncmp(loadPath, "@loader_path/", 13) == 0 ) {
515 strlcpy(fixedPath, loadedImage.path(), PATH_MAX);
516 char* lastSlash = strrchr(fixedPath, '/');
517 if ( lastSlash != nullptr ) {
518 strcpy(lastSlash+1, &loadPath[13]);
519 return true;
520 }
521 }
522 else if ( fromLCRPATH && (strcmp(loadPath, "@loader_path") == 0) ) {
523 // <rdar://problem/52881387> in LC_RPATH allow "@loader_path" without trailing slash
524 strlcpy(fixedPath, loadedImage.path(), PATH_MAX);
525 char* lastSlash = strrchr(fixedPath, '/');
526 if ( lastSlash != nullptr ) {
527 lastSlash[1] = '\0';
528 return true;
529 }
530 }
531
532 return false;
533 }
534
535 bool ClosureBuilder::expandAtExecutablePath(const char* loadPath, bool fromLCRPATH, bool fromLCRPATHinMain, char fixedPath[])
536 {
537 switch ( _atPathHandling ) {
538 case AtPath::none:
539 return false;
540 case AtPath::onlyInRPaths:
541 if ( !fromLCRPATH )
542 return false;
543 // main executables can always have an LC_RPATH that uses @executable_path, other images cannot if restricted
544 if ( !fromLCRPATHinMain )
545 return false;
546 break;
547 case AtPath::all:
548 break;
549 }
550
551 if ( strncmp(loadPath, "@executable_path/", 17) == 0 ) {
552 strlcpy(fixedPath, _mainProgLoadPath, PATH_MAX);
553 char* lastSlash = strrchr(fixedPath, '/');
554 if ( lastSlash != nullptr ) {
555 strcpy(lastSlash+1, &loadPath[17]);
556 return true;
557 }
558 }
559 else if ( fromLCRPATH && (strcmp(loadPath, "@executable_path") == 0) ) {
560 // <rdar://problem/52881387> in LC_RPATH allow "@executable_path" without trailing slash
561 strlcpy(fixedPath, _mainProgLoadPath, PATH_MAX);
562 char* lastSlash = strrchr(fixedPath, '/');
563 if ( lastSlash != nullptr ) {
564 lastSlash[1] = '\0';
565 return true;
566 }
567 }
568
569 return false;
570 }
571
572 void ClosureBuilder::forEachResolvedPathVar(const char* loadPath, const LoadedImageChain& forImageChain,
573 bool implictRPath, LinkageType linkageType,
574 void (^handler)(const char* possiblePath, bool& stop))
575 {
576 // don't expand @loader_path or @executable_path if disallowed
577 if ( (_atPathHandling == AtPath::none) && (loadPath[0] == '@') && (loadPath[1] != 'r') ) {
578 bool stop = false;
579 handler(loadPath, stop);
580 return;
581 }
582
583 // quick out if not @ path or not implicit rpath
584 if ( !implictRPath && (loadPath[0] != '@') ) {
585 bool stop = false;
586 handler(loadPath, stop);
587 return;
588 }
589
590 // expand @loader_path
591 // Note this isn't supported for DYLD_INSERT_LIBRARIES
592 BLOCK_ACCCESSIBLE_ARRAY(char, tempPath, PATH_MAX); // read as: char tempPath[PATH_MAX];
593 if ( (linkageType != LinkageType::kInserted) && expandAtLoaderPath(loadPath, false, forImageChain.image, tempPath) ) {
594 bool stop = false;
595 handler(tempPath, stop);
596 return;
597 }
598
599 // expand @executable_path
600 // Note this is supported for DYLD_INSERT_LIBRARIES
601 if ( expandAtExecutablePath(loadPath, false, false, tempPath) ) {
602 bool stop = false;
603 handler(tempPath, stop);
604 return;
605 }
606
607 // expand @rpath
608 // Note this isn't supported for DYLD_INSERT_LIBRARIES
609 const char* rpathTail = nullptr;
610 char implicitRpathBuffer[PATH_MAX];
611 if ( linkageType != LinkageType::kInserted ) {
612 if ( strncmp(loadPath, "@rpath/", 7) == 0 ) {
613 // note: rpathTail starts with '/'
614 rpathTail = &loadPath[6];
615 }
616 else if ( implictRPath ) {
617 // make rpathTail starts with '/'
618 strlcpy(implicitRpathBuffer, "/", PATH_MAX);
619 strlcat(implicitRpathBuffer, loadPath, PATH_MAX);
620 rpathTail = implicitRpathBuffer;
621 }
622 }
623 if ( rpathTail != nullptr ) {
624 // rpath is expansion is technically a stack of rpath dirs built starting with main executable and pushing
625 // LC_RPATHS from each dylib as they are recursively loaded. Our imageChain represents that stack.
626 __block bool done = false;
627 for (const LoadedImageChain* link = &forImageChain; (link != nullptr) && !done; link = link->previous) {
628 bool mainExecutable = link->image.loadAddress()->isMainExecutable();
629 link->image.loadAddress()->forEachRPath(^(const char* rPath, bool& stop) {
630 // fprintf(stderr, "LC_RPATH %s from %s\n", rPath, link->image.loadedFileInfo.path);
631 if ( expandAtLoaderPath(rPath, true, link->image, tempPath) || expandAtExecutablePath(rPath, true, mainExecutable, tempPath) ) {
632 // @loader_path allowed and expended
633 strlcat(tempPath, rpathTail, PATH_MAX);
634 handler(tempPath, stop);
635 }
636 else if ( rPath[0] == '/' ) {
637 #if (TARGET_OS_OSX && TARGET_CPU_ARM64)
638 if ( (_platform == Platform::iOS) && (strncmp(rPath, "/usr/lib/swift", 14) == 0) ) {
639 // LC_RPATH is to /usr/lib/swift, but running on macOS that is /System/iOSSupport/usr/lib/swift
640 strlcpy(tempPath, "/System/iOSSupport", PATH_MAX);
641 strlcat(tempPath, rPath, PATH_MAX);
642 strlcat(tempPath, rpathTail, PATH_MAX);
643 handler(tempPath, stop);
644 if (stop) {
645 done = true;
646 return;
647 }
648 }
649 #endif
650 // LC_RPATH is an absolute path, not blocked by AtPath::none
651 strlcpy(tempPath, rPath, PATH_MAX);
652 strlcat(tempPath, rpathTail, PATH_MAX);
653 handler(tempPath, stop);
654 }
655 if (stop)
656 done = true;
657 #if 0
658 if ( _fileSystem.fileExists(tempPath) ) {
659 stop = true;
660 result = strdup_temp(tempPath);
661 }
662 else {
663 // Don't add must be missing paths for dlopen as we don't cache dlopen closures
664 if (_isLaunchClosure) {
665 addMustBeMissingPath(tempPath);
666 }
667 }
668 #endif
669 });
670 }
671 if (done)
672 return;
673 }
674
675 bool stop = false;
676 handler(loadPath, stop);
677 }
678
679 const char* ClosureBuilder::strdup_temp(const char* path) const
680 {
681 if ( _tempPaths == nullptr )
682 _tempPaths = PathPool::allocate();
683 return _tempPaths->add(path);
684 }
685
686 void ClosureBuilder::addMustBeMissingPath(const char* path)
687 {
688 //fprintf(stderr, "must be missing: %s\n", path);
689 if ( _mustBeMissingPaths == nullptr )
690 _mustBeMissingPaths = PathPool::allocate();
691 // don't add path if already in list
692 if ( !_mustBeMissingPaths->contains(path) )
693 _mustBeMissingPaths->add(path);
694 }
695
696 void ClosureBuilder::addSkippedFile(const char* path, uint64_t inode, uint64_t mtime)
697 {
698 _skippedFiles.push_back({ strdup_temp(path), inode, mtime });
699 }
700
701 ClosureBuilder::BuilderLoadedImage& ClosureBuilder::findLoadedImage(ImageNum imageNum)
702 {
703 for (BuilderLoadedImage& li : _loadedImages) {
704 if ( li.imageNum == imageNum ) {
705 return li;
706 }
707 }
708 for (BuilderLoadedImage& li : _loadedImages) {
709 if ( li.overrideImageNum == imageNum ) {
710 return li;
711 }
712 }
713 assert(0 && "LoadedImage not found by num");
714 }
715
716 const ClosureBuilder::BuilderLoadedImage& ClosureBuilder::findLoadedImage(ImageNum imageNum) const
717 {
718 for (const BuilderLoadedImage& li : _loadedImages) {
719 if ( li.imageNum == imageNum ) {
720 return li;
721 }
722 }
723 for (const BuilderLoadedImage& li : _loadedImages) {
724 if ( li.overrideImageNum == imageNum ) {
725 return li;
726 }
727 }
728 assert(0 && "LoadedImage not found");
729 }
730
731 ClosureBuilder::BuilderLoadedImage& ClosureBuilder::findLoadedImage(const MachOAnalyzer* mh)
732 {
733 for (BuilderLoadedImage& li : _loadedImages) {
734 if ( li.loadAddress() == mh ) {
735 return li;
736 }
737 }
738 assert(0 && "LoadedImage not found by mh");
739 }
740
741 const MachOAnalyzer* ClosureBuilder::machOForImageNum(ImageNum imageNum)
742 {
743 return findLoadedImage(imageNum).loadAddress();
744 }
745
746 const MachOAnalyzer* ClosureBuilder::findDependent(const MachOLoaded* mh, uint32_t depIndex)
747 {
748 for (const BuilderLoadedImage& li : _loadedImages) {
749 if ( li.loadAddress() == mh ) {
750 if (li.isBadImage) {
751 // Bad image duting building group 1 closures, so the dependents array
752 // is potentially incomplete.
753 return nullptr;
754 }
755 ImageNum childNum = li.dependents[depIndex].imageNum();
756 // This is typically something like a missing weak-dylib we are re-exporting a weak-import symbol from
757 if (childNum == kMissingWeakLinkedImage)
758 return nullptr;
759 return machOForImageNum(childNum);
760 }
761 }
762 return nullptr;
763 }
764
765 ImageNum ClosureBuilder::imageNumForMachO(const MachOAnalyzer* mh)
766 {
767 for (const BuilderLoadedImage& li : _loadedImages) {
768 if ( li.loadAddress() == mh ) {
769 return li.imageNum;
770 }
771 }
772 assert(0 && "unknown mach-o");
773 return 0;
774 }
775
776 void ClosureBuilder::recursiveLoadDependents(LoadedImageChain& forImageChain, bool canUseSharedCacheClosure)
777 {
778 // if dependents is set, then we have already loaded this
779 if ( forImageChain.image.dependents.begin() != nullptr )
780 return;
781
782 uintptr_t startDepIndex = _dependencies.count();
783 // add dependents
784 __block uint32_t depIndex = 0;
785 forImageChain.image.loadAddress()->forEachDependentDylib(^(const char* loadPath, bool isWeak, bool isReExport, bool isUpward, uint32_t compatVersion, uint32_t curVersion, bool &stop) {
786 Image::LinkKind kind = Image::LinkKind::regular;
787 if ( isWeak )
788 kind = Image::LinkKind::weak;
789 else if ( isReExport )
790 kind = Image::LinkKind::reExport;
791 else if ( isUpward )
792 kind = Image::LinkKind::upward;
793 BuilderLoadedImage* foundImage;
794 if ( findImage(loadPath, forImageChain, foundImage, LinkageType::kStatic, compatVersion, canUseSharedCacheClosure) ) {
795 ImageNum foundImageNum = foundImage->imageNum;
796 if ( _diag.noError() )
797 _dependencies.push_back(Image::LinkedImage(kind, foundImageNum));
798 }
799 else if ( isWeak ) {
800 _dependencies.push_back(Image::LinkedImage(Image::LinkKind::weak, kMissingWeakLinkedImage));
801 // <rdar://problem/54387345> don't let an error loading weak dylib cause everything to fail
802 // _diag is checked after each dependent load, so if there is an error it was with loading the current dylib.
803 // Since it is a weak load, it is ok to ignore and and go on.
804 _diag.clearError();
805 }
806 else {
807 BLOCK_ACCCESSIBLE_ARRAY(char, extra, 4096);
808 extra[0] = '\0';
809 const char* targetLeaf = strrchr(loadPath, '/');
810 if ( targetLeaf == nullptr )
811 targetLeaf = loadPath;
812 if ( _mustBeMissingPaths != nullptr ) {
813 strcpy(extra, ", tried but didn't find: ");
814 _mustBeMissingPaths->forEachPath(^(const char* aPath) {
815 const char* aLeaf = strrchr(aPath, '/');
816 if ( aLeaf == nullptr )
817 aLeaf = aPath;
818 if ( strcmp(targetLeaf, aLeaf) == 0 ) {
819 strlcat(extra, "'", 4096);
820 strlcat(extra, aPath, 4096);
821 strlcat(extra, "' ", 4096);
822 }
823 });
824 }
825 if ( !_skippedFiles.empty() ) {
826 strcpy(extra, ", tried but invalid: ");
827 for (const SkippedFile& skippedFile : _skippedFiles) {
828 const char* aPath = skippedFile.path;
829 const char* aLeaf = strrchr(aPath, '/');
830 if ( aLeaf == nullptr )
831 aLeaf = aPath;
832 if ( strcmp(targetLeaf, aLeaf) == 0 ) {
833 strlcat(extra, "'", 4096);
834 strlcat(extra, aPath, 4096);
835 strlcat(extra, "' ", 4096);
836 }
837 }
838 }
839 if ( _diag.hasError() ) {
840 #if BUILDING_CACHE_BUILDER
841 std::string errorMessageBuffer = _diag.errorMessage();
842 const char* msg = errorMessageBuffer.c_str();
843 #else
844 const char* msg = _diag.errorMessage();
845 #endif
846 char msgCopy[strlen(msg)+4];
847 strcpy(msgCopy, msg);
848 _diag.error("dependent dylib '%s' not found for '%s'. %s", loadPath, forImageChain.image.path(), msgCopy);
849 }
850 else {
851 _diag.error("dependent dylib '%s' not found for '%s'%s", loadPath, forImageChain.image.path(), extra);
852 }
853 if ( _launchErrorInfo != nullptr ) {
854 _launchErrorInfo->kind = DYLD_EXIT_REASON_DYLIB_MISSING;
855 _launchErrorInfo->clientOfDylibPath = strdup_temp(forImageChain.image.path());
856 _launchErrorInfo->targetDylibPath = strdup_temp(loadPath);
857 _launchErrorInfo->symbol = nullptr;
858 }
859 }
860 ++depIndex;
861 if ( _diag.hasError() )
862 stop = true;
863 });
864 if ( _diag.hasError() )
865 return;
866 forImageChain.image.dependents = _dependencies.subArray(startDepIndex, depIndex);
867
868 // breadth first recurse
869 for (Image::LinkedImage dep : forImageChain.image.dependents) {
870 // don't recurse upwards
871 if ( dep.kind() == Image::LinkKind::upward )
872 continue;
873 // don't recurse down missing weak links
874 if ( (dep.kind() == Image::LinkKind::weak) && (dep.imageNum() == kMissingWeakLinkedImage) )
875 continue;
876 BuilderLoadedImage& depLoadedImage = findLoadedImage(dep.imageNum());
877 LoadedImageChain chain = { &forImageChain, depLoadedImage };
878 recursiveLoadDependents(chain, canUseSharedCacheClosure);
879 if ( _diag.hasError() )
880 break;
881 }
882 }
883
884 void ClosureBuilder::loadDanglingUpwardLinks(bool canUseSharedCacheClosure)
885 {
886 bool danglingFixed;
887 do {
888 danglingFixed = false;
889 for (BuilderLoadedImage& li : _loadedImages) {
890 if ( li.dependents.begin() == nullptr ) {
891 // this image has not have dependents set (probably a dangling upward link or referenced by upward link)
892 LoadedImageChain chain = { nullptr, li };
893 recursiveLoadDependents(chain, canUseSharedCacheClosure);
894 danglingFixed = true;
895 break;
896 }
897 }
898 } while (danglingFixed && _diag.noError());
899 }
900
901 bool ClosureBuilder::overridableDylib(const BuilderLoadedImage& forImage)
902 {
903 // on macOS, the cache can be customer/development in the basesystem/main OS
904 // on embedded platforms with Internal cache, allow overrides
905 // on customer caches, only allow libdispatch.dylib to be overridden
906 return _dyldCache->isOverridablePath(forImage.path());
907 }
908
909 void ClosureBuilder::buildImage(ImageWriter& writer, BuilderLoadedImage& forImage)
910 {
911 const MachOAnalyzer* macho = forImage.loadAddress();
912 // set ImageNum
913 writer.setImageNum(forImage.imageNum);
914
915 // set flags
916 writer.setHasWeakDefs(macho->hasWeakDefs());
917 writer.setIsBundle(macho->isBundle());
918 writer.setIsDylib(macho->isDylib());
919 writer.setIs64(macho->is64());
920 writer.setIsExecutable(macho->isMainExecutable());
921 writer.setUses16KPages(macho->uses16KPages());
922 if ( macho->inDyldCache() ) {
923 // only set on dylibs in the dyld shared cache
924 writer.setOverridableDylib(overridableDylib(forImage));
925 }
926 writer.setInDyldCache(macho->inDyldCache());
927 if ( macho->hasObjC() ) {
928 writer.setHasObjC(true);
929 bool hasPlusLoads = macho->hasPlusLoadMethod(_diag);
930 writer.setHasPlusLoads(hasPlusLoads);
931 if ( hasPlusLoads )
932 forImage.hasInits = true;
933 }
934 else {
935 writer.setHasObjC(false);
936 writer.setHasPlusLoads(false);
937 }
938
939 if ( forImage.markNeverUnload ) {
940 writer.setNeverUnload(true);
941 }
942
943 #if BUILDING_DYLD || BUILDING_LIBDYLD
944 if ( _foundDyldCacheRoots ) {
945 // If we had roots, then some images are potentially on-disk while others are
946 // being rebuilt for a new initializer order, but do not exist on disk
947 if ( macho->inDyldCache() && !_dyldCache->header.dylibsExpectedOnDisk ) {
948 // don't add file info for shared cache files mastered out of final file system
949 }
950 else {
951 // file is either not in cache or is in cache but not mastered out
952 writer.setFileInfo(forImage.loadedFileInfo.inode, forImage.loadedFileInfo.mtime);
953 }
954 } else {
955 // shared cache not built by dyld or libdyld.dylib, so must be real file
956 writer.setFileInfo(forImage.loadedFileInfo.inode, forImage.loadedFileInfo.mtime);
957 }
958 #else
959 // in cache builder code
960 if ( !_dyldCache->header.dylibsExpectedOnDisk ) {
961 // don't add file info for shared cache files mastered out of final file system
962 // This also covers executable and dlopen closures as we are not running on a live
963 // file system. no we don't have access to accurate inode/mtime
964 }
965 else {
966 // file is either not in cache or is in cache but not mastered out
967 writer.setFileInfo(forImage.loadedFileInfo.inode, forImage.loadedFileInfo.mtime);
968 }
969 #endif
970
971 // add info on how to load image
972 if ( !macho->inDyldCache() ) {
973 writer.setMappingInfo(forImage.loadedFileInfo.sliceOffset, macho->mappedSize());
974 // add code signature, if signed
975 uint32_t codeSigFileOffset;
976 uint32_t codeSigSize;
977 if ( macho->hasCodeSignature(codeSigFileOffset, codeSigSize) ) {
978 writer.setCodeSignatureLocation(codeSigFileOffset, codeSigSize);
979 macho->forEachCDHash(^(const uint8_t *cdHash) {
980 writer.addCDHash(cdHash);
981 });
982 }
983 // add FairPlay encryption range if encrypted
984 uint32_t fairPlayFileOffset;
985 uint32_t fairPlaySize;
986 if ( macho->isFairPlayEncrypted(fairPlayFileOffset, fairPlaySize) ) {
987 writer.setFairPlayEncryptionRange(fairPlayFileOffset, fairPlaySize);
988 }
989 }
990
991 // set path
992 writer.addPath(forImage.path());
993 if ( _aliases != nullptr ) {
994 for (const CachedDylibAlias& alias : *_aliases) {
995 if ( strcmp(alias.realPath, forImage.path()) == 0 )
996 writer.addPath(alias.aliasPath);
997 }
998 }
999
1000 // set uuid, if has one
1001 uuid_t uuid;
1002 if ( macho->getUuid(uuid) )
1003 writer.setUUID(uuid);
1004
1005 // set dependents
1006 writer.setDependents(forImage.dependents);
1007
1008 // set segments
1009 addSegments(writer, macho);
1010
1011 // if shared cache contains two variants of same framework (macOS and iOS), mark iOS one as override of macOS one
1012 if ( _makingDyldCacheImages && iOSSupport(forImage.path()) ) {
1013 const char* truncName = forImage.path()+18;
1014 for (const BuilderLoadedImage& li : _loadedImages) {
1015 if ( strcmp(li.path(), truncName) == 0 ) {
1016 writer.setAsOverrideOf(li.imageNum);
1017 }
1018 }
1019 }
1020
1021 // record if this dylib overrides something in the cache
1022 if ( forImage.overrideImageNum != 0 ) {
1023 writer.setAsOverrideOf(forImage.overrideImageNum);
1024 const char* overridePath = _dyldImageArray->imageForNum(forImage.overrideImageNum)->path();
1025 writer.addPath(overridePath);
1026 if ( strcmp(overridePath, "/usr/lib/system/libdyld.dylib") == 0 )
1027 _libDyldImageNum = forImage.imageNum;
1028 else if ( strcmp(overridePath, "/usr/lib/libSystem.B.dylib") == 0 )
1029 _libSystemImageNum = forImage.imageNum;
1030 }
1031
1032 // record fix up info
1033 if ( macho->inDyldCache() && !_makingDyldCacheImages ) {
1034 // when building app closures, don't record fix up info about dylibs in the cache
1035 }
1036 else if ( _makeMinimalClosure ) {
1037 // don't record fix up info in dyld3s mode
1038 writer.setFixupsNotEncoded();
1039 }
1040 else if ( !_makingDyldCacheImages && macho->hasChainedFixups() ) {
1041 // when building app closures, just evaluate target of chain binds and record that table
1042 addChainedFixupInfo(writer, forImage);
1043 }
1044 else {
1045 // run rebase/bind opcodes or chained fixups
1046 addFixupInfo(writer, forImage);
1047 }
1048 if ( _diag.hasError() ) {
1049 writer.setInvalid();
1050 return;
1051 }
1052
1053
1054 // add initializers
1055 #if BUILDING_CACHE_BUILDER
1056
1057 // In the shared cache builder, we'll only ever see 'inDyldCache' images here for the shared
1058 // cache dylibs themselves. These are in an intermediate state where the cache is not live, the pointers
1059 // are unslid, but the pointers also don't contain fixup chains
1060 dyld3::MachOAnalyzer::VMAddrConverter vmAddrConverter = macho->makeVMAddrConverter(forImage.contentRebased);
1061 if ( macho->inDyldCache() ) {
1062 vmAddrConverter.preferredLoadAddress = 0;
1063 vmAddrConverter.slide = 0;
1064 vmAddrConverter.chainedPointerFormat = 0;
1065 vmAddrConverter.contentRebased = false;
1066 vmAddrConverter.sharedCacheChainedPointerFormat = MachOAnalyzer::VMAddrConverter::SharedCacheFormat::none;
1067 }
1068
1069 #else
1070
1071 dyld3::MachOAnalyzer::VMAddrConverter vmAddrConverter = macho->makeVMAddrConverter(forImage.contentRebased);
1072 #if !(BUILDING_LIBDYLD || BUILDING_DYLD)
1073 // The shared cache is always live in dyld/libdyld, but if we get here then we are an offline tool
1074 // In that case, use the shared cache vmAddrConverter if we need it
1075 if ( macho->inDyldCache() )
1076 vmAddrConverter = _dyldCache->makeVMAddrConverter(forImage.contentRebased);
1077 #endif
1078
1079 #endif // BUILDING_CACHE_BUILDER
1080
1081 __block unsigned initCount = 0;
1082 Diagnostics initializerDiag;
1083 macho->forEachInitializer(initializerDiag, vmAddrConverter, ^(uint32_t offset) {
1084 ++initCount;
1085 }, _dyldCache);
1086 if ( initializerDiag.noError() ) {
1087 if ( initCount != 0 ) {
1088 BLOCK_ACCCESSIBLE_ARRAY(uint32_t, initOffsets, initCount);
1089 __block unsigned index = 0;
1090 macho->forEachInitializer(_diag, vmAddrConverter, ^(uint32_t offset) {
1091 initOffsets[index++] = offset;
1092 }, _dyldCache);
1093 writer.setInitOffsets(initOffsets, initCount);
1094 forImage.hasInits = true;
1095 }
1096 }
1097 else {
1098 // mod_init_func section is malformed, might be self modifying pointers
1099 macho->forEachInitializerPointerSection(_diag, ^(uint32_t sectionOffset, uint32_t sectionSize, const uint8_t* content, bool& stop) {
1100 writer.setInitSectRange(sectionOffset, sectionSize);
1101 forImage.hasInits = true;
1102 });
1103 }
1104
1105
1106 // add terminators (except for dylibs in the cache because they are never unloaded)
1107 if ( !macho->inDyldCache() ) {
1108 __block unsigned termCount = 0;
1109 macho->forEachTerminator(_diag, vmAddrConverter, ^(uint32_t offset) {
1110 ++termCount;
1111 });
1112 if ( termCount != 0 ) {
1113 BLOCK_ACCCESSIBLE_ARRAY(uint32_t, termOffsets, termCount);
1114 __block unsigned index = 0;
1115 macho->forEachTerminator(_diag, vmAddrConverter, ^(uint32_t offset) {
1116 termOffsets[index++] = offset;
1117 });
1118 writer.setTermOffsets(termOffsets, termCount);
1119 }
1120 }
1121
1122 // record if image has DOF sections
1123 STACK_ALLOC_ARRAY(uint32_t, dofSectionOffsets, 256);
1124 macho->forEachDOFSection(_diag, ^(uint32_t offset) {
1125 dofSectionOffsets.push_back(offset);
1126 });
1127 if ( !dofSectionOffsets.empty() ) {
1128 writer.setDofOffsets(dofSectionOffsets);
1129 }
1130
1131 }
1132
1133 void ClosureBuilder::addSegments(ImageWriter& writer, const MachOAnalyzer* mh)
1134 {
1135 const uint32_t segCount = mh->segmentCount();
1136 if ( mh->inDyldCache() ) {
1137 uint64_t cacheUnslideBaseAddress = _dyldCache->unslidLoadAddress();
1138 BLOCK_ACCCESSIBLE_ARRAY(Image::DyldCacheSegment, segs, segCount);
1139 mh->forEachSegment(^(const MachOAnalyzer::SegmentInfo& info, bool& stop) {
1140 segs[info.segIndex] = { (uint32_t)(info.vmAddr-cacheUnslideBaseAddress), (uint32_t)info.vmSize, info.protections };
1141 });
1142 writer.setCachedSegments(segs, segCount);
1143 }
1144 else {
1145 const uint32_t pageSize = (mh->uses16KPages() ? 0x4000 : 0x1000);
1146 __block uint32_t diskSegIndex = 0;
1147 __block uint32_t totalPageCount = 0;
1148 __block uint32_t lastFileOffsetEnd = 0;
1149 __block uint64_t lastVmAddrEnd = 0;
1150 BLOCK_ACCCESSIBLE_ARRAY(Image::DiskSegment, dsegs, segCount*3); // room for padding
1151 mh->forEachSegment(^(const MachOAnalyzer::SegmentInfo& info, bool& stop) {
1152 if ( (info.fileOffset != 0) && (info.fileOffset != lastFileOffsetEnd) ) {
1153 Image::DiskSegment filePadding;
1154 filePadding.filePageCount = (info.fileOffset - lastFileOffsetEnd)/pageSize;
1155 filePadding.vmPageCount = 0;
1156 filePadding.permissions = 0;
1157 filePadding.paddingNotSeg = 1;
1158 dsegs[diskSegIndex++] = filePadding;
1159 }
1160 if ( (lastVmAddrEnd != 0) && (info.vmAddr != lastVmAddrEnd) ) {
1161 Image::DiskSegment vmPadding;
1162 vmPadding.filePageCount = 0;
1163 vmPadding.vmPageCount = (info.vmAddr - lastVmAddrEnd)/pageSize;
1164 vmPadding.permissions = 0;
1165 vmPadding.paddingNotSeg = 1;
1166 dsegs[diskSegIndex++] = vmPadding;
1167 totalPageCount += vmPadding.vmPageCount;
1168 }
1169 {
1170 Image::DiskSegment segInfo;
1171 segInfo.filePageCount = (info.fileSize+pageSize-1)/pageSize;
1172 segInfo.vmPageCount = (info.vmSize+pageSize-1)/pageSize;
1173 segInfo.permissions = info.protections & 7;
1174 segInfo.paddingNotSeg = 0;
1175 if ( info.readOnlyData )
1176 segInfo.permissions = Image::DiskSegment::kReadOnlyDataPermissions;
1177 dsegs[diskSegIndex++] = segInfo;
1178 totalPageCount += segInfo.vmPageCount;
1179 if ( info.fileSize != 0 )
1180 lastFileOffsetEnd = (uint32_t)(info.fileOffset + info.fileSize);
1181 if ( info.vmSize != 0 )
1182 lastVmAddrEnd = info.vmAddr + info.vmSize;
1183 }
1184 });
1185 writer.setDiskSegments(dsegs, diskSegIndex);
1186 }
1187 }
1188
1189 static bool isTupleFixup(uint64_t tupleSectVmStartOffset, uint64_t tupleSectVmEndOffset, uint64_t imageOffsetOfFixup, uint32_t entrySize, uint32_t& tupleIndex)
1190 {
1191 if ( imageOffsetOfFixup < tupleSectVmStartOffset )
1192 return false;
1193 if ( imageOffsetOfFixup > tupleSectVmEndOffset )
1194 return false;
1195 uint64_t offsetIntoSection = imageOffsetOfFixup - tupleSectVmStartOffset;
1196 tupleIndex = (uint32_t)(offsetIntoSection/entrySize);
1197 return (tupleIndex*entrySize == offsetIntoSection) || ((tupleIndex*entrySize+entrySize/2) == offsetIntoSection);
1198 }
1199
1200 void ClosureBuilder::addInterposingTuples(LaunchClosureWriter& writer, const Image* image, const MachOAnalyzer* mh)
1201 {
1202 const unsigned pointerSize = mh->pointerSize();
1203 const uint64_t baseAddress = mh->preferredLoadAddress();
1204 mh->forEachInterposingSection(_diag, ^(uint64_t sectVmOffset, uint64_t sectVmSize, bool &stop) {
1205 const uint32_t entrySize = 2*pointerSize;
1206 const uint32_t tupleCount = (uint32_t)(sectVmSize/entrySize);
1207 const uint64_t sectVmEndOffset = sectVmOffset + sectVmSize;
1208 BLOCK_ACCCESSIBLE_ARRAY(InterposingTuple, resolvedTuples, tupleCount);
1209 for (uint32_t i=0; i < tupleCount; ++i) {
1210 resolvedTuples[i].stockImplementation.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
1211 resolvedTuples[i].stockImplementation.absolute.value = 0;
1212 resolvedTuples[i].newImplementation.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
1213 resolvedTuples[i].newImplementation.absolute.value = 0;
1214 }
1215 // figure out what the replacement (rebase) and replacement (bind) of the tuple point to
1216 image->forEachFixup(^(uint64_t imageOffsetToRebase, bool& rebaseStop) {
1217 uint32_t tupleIndex;
1218 if ( isTupleFixup(sectVmOffset, sectVmEndOffset, imageOffsetToRebase, entrySize, tupleIndex) ) {
1219 const void* content = (uint8_t*)mh + imageOffsetToRebase;
1220 uint64_t unslidTargetAddress = mh->is64() ? *(uint64_t*)content : *(uint32_t*)content;
1221 resolvedTuples[tupleIndex].newImplementation.image.kind = Image::ResolvedSymbolTarget::kindImage;
1222 resolvedTuples[tupleIndex].newImplementation.image.imageNum = image->imageNum();
1223 resolvedTuples[tupleIndex].newImplementation.image.offset = unslidTargetAddress - mh->preferredLoadAddress();
1224 }
1225 },
1226 ^(uint64_t imageOffsetToBind, Image::ResolvedSymbolTarget bindTarget, bool &bindStop) {
1227 uint32_t tupleIndex;
1228 if ( isTupleFixup(sectVmOffset, sectVmEndOffset, imageOffsetToBind, entrySize, tupleIndex) ) {
1229 resolvedTuples[tupleIndex].stockImplementation = bindTarget;
1230 }
1231 },
1232 ^(uint64_t imageOffsetToStartsInfo, const Array<Image::ResolvedSymbolTarget>& targets, bool& chainStop) {
1233 mh->withChainStarts(_diag, imageOffsetToStartsInfo, ^(const dyld_chained_starts_in_image* startsInfo) {
1234 mh->forEachFixupInAllChains(_diag, startsInfo, false, ^(MachOLoaded::ChainedFixupPointerOnDisk* fixupLoc, const dyld_chained_starts_in_segment* segInfo, bool& fixupsStop) {
1235 uint64_t fixupOffset = (uint8_t*)fixupLoc - (uint8_t*)mh;
1236 uint32_t tupleIndex;
1237 if ( !isTupleFixup(sectVmOffset, sectVmEndOffset, fixupOffset, entrySize, tupleIndex) )
1238 return;
1239 uint32_t bindOrdinal;
1240 int64_t addend;
1241 uint64_t rebaseTargetOffset;
1242 if ( fixupLoc->isBind(segInfo->pointer_format, bindOrdinal, addend) ) {
1243 if ( bindOrdinal < targets.count() ) {
1244 resolvedTuples[tupleIndex].stockImplementation = targets[bindOrdinal];
1245 }
1246 else {
1247 _diag.error("out of range bind ordinal %d (max %lu)", bindOrdinal, targets.count());
1248 fixupsStop = true;
1249 }
1250 }
1251 else if ( fixupLoc->isRebase(segInfo->pointer_format, baseAddress, rebaseTargetOffset) ) {
1252 resolvedTuples[tupleIndex].newImplementation.image.kind = Image::ResolvedSymbolTarget::kindImage;
1253 resolvedTuples[tupleIndex].newImplementation.image.imageNum = image->imageNum();
1254 resolvedTuples[tupleIndex].newImplementation.image.offset = rebaseTargetOffset;
1255 }
1256 });
1257 });
1258 },
1259 ^(uint64_t imageOffsetToFixup) {
1260 // objc optimisation can't be interposed so nothing to do here.
1261 },
1262 ^(uint64_t imageOffsetToBind, Image::ResolvedSymbolTarget bindTarget, bool &bindStop) {
1263 // objc protocol optimisation fixups can't be interposed so nothing to do here.
1264 },
1265 ^(uint64_t imageOffsetToFixup, uint32_t selectorIndex, bool inSharedCache, bool &fixupStop) {
1266 // objc selector optimisation fixups can't be interposed so nothing to do here.
1267 },
1268 ^(uint64_t imageOffsetToFixup, bool &fixupStop) {
1269 // objc stable Swift optimisation fixups can't be interposed so nothing to do here.
1270 },
1271 ^(uint64_t imageOffsetToFixup, bool &fixupStop) {
1272 // objc method list optimisation fixups can't be interposed so nothing to do here.
1273 });
1274
1275 // remove any tuples in which both sides are not set (or target is weak-import NULL)
1276 STACK_ALLOC_ARRAY(InterposingTuple, goodTuples, tupleCount);
1277 for (uint32_t i=0; i < tupleCount; ++i) {
1278 if ( (resolvedTuples[i].stockImplementation.image.kind != Image::ResolvedSymbolTarget::kindAbsolute)
1279 && (resolvedTuples[i].newImplementation.image.kind != Image::ResolvedSymbolTarget::kindAbsolute) )
1280 goodTuples.push_back(resolvedTuples[i]);
1281 }
1282 writer.addInterposingTuples(goodTuples);
1283 _interposingTuplesUsed = !goodTuples.empty();
1284
1285 // if the target of the interposing is in the dyld shared cache, add a PatchEntry so the cache is fixed up at launch
1286 STACK_ALLOC_ARRAY(Closure::PatchEntry, patches, goodTuples.count());
1287 for (const InterposingTuple& aTuple : goodTuples) {
1288 if ( aTuple.stockImplementation.sharedCache.kind == Image::ResolvedSymbolTarget::kindSharedCache ) {
1289 uint32_t imageIndex;
1290 assert(_dyldCache->addressInText((uint32_t)aTuple.stockImplementation.sharedCache.offset, &imageIndex));
1291 ImageNum imageInCache = imageIndex+1;
1292 Closure::PatchEntry patch;
1293 patch.exportCacheOffset = (uint32_t)aTuple.stockImplementation.sharedCache.offset;
1294 patch.overriddenDylibInCache = imageInCache;
1295 patch.replacement = aTuple.newImplementation;
1296 patches.push_back(patch);
1297 }
1298 }
1299 writer.addCachePatches(patches);
1300 });
1301 }
1302
1303 const Image::RebasePattern RebasePatternBuilder::_s_maxLeapPattern = { 0xFFFFF, 0, 0xF};
1304 const uint64_t RebasePatternBuilder::_s_maxLeapCount = _s_maxLeapPattern.repeatCount * _s_maxLeapPattern.skipCount;
1305
1306
1307
1308 RebasePatternBuilder::RebasePatternBuilder(OverflowSafeArray<closure::Image::RebasePattern>& entriesStorage, uint64_t ptrSize)
1309 : _rebaseEntries(entriesStorage), _lastLocation(-ptrSize), _ptrSize(ptrSize)
1310 {
1311 }
1312
1313 void RebasePatternBuilder::add(uint64_t runtimeOffset)
1314 {
1315 const uint64_t delta = runtimeOffset - _lastLocation;
1316 const bool aligned = ((delta % _ptrSize) == 0);
1317 if ( delta == _ptrSize ) {
1318 // this rebase location is contiguous to previous
1319 if ( _rebaseEntries.back().contigCount < 255 ) {
1320 // just bump previous's contigCount
1321 _rebaseEntries.back().contigCount++;
1322 }
1323 else {
1324 // previous contiguous run already has max 255, so start a new run
1325 _rebaseEntries.push_back({ 1, 1, 0 });
1326 }
1327 }
1328 else if ( aligned && (delta <= (_ptrSize*15)) ) {
1329 // this rebase is within skip distance of last rebase
1330 _rebaseEntries.back().skipCount = (uint8_t)((delta-_ptrSize)/_ptrSize);
1331 int lastIndex = (int)(_rebaseEntries.count() - 1);
1332 if ( lastIndex > 1 ) {
1333 if ( (_rebaseEntries[lastIndex].contigCount == _rebaseEntries[lastIndex-1].contigCount)
1334 && (_rebaseEntries[lastIndex].skipCount == _rebaseEntries[lastIndex-1].skipCount) ) {
1335 // this entry as same contig and skip as prev, so remove it and bump repeat count of previous
1336 _rebaseEntries.pop_back();
1337 _rebaseEntries.back().repeatCount += 1;
1338 }
1339 }
1340 _rebaseEntries.push_back({ 1, 1, 0 });
1341 }
1342 else {
1343 uint64_t advanceCount = (delta-_ptrSize);
1344 if ( (runtimeOffset < _lastLocation) && (_lastLocation != -_ptrSize) ) {
1345 // out of rebases! handle this be resting rebase offset to zero
1346 _rebaseEntries.push_back({ 0, 0, 0 });
1347 advanceCount = runtimeOffset;
1348 }
1349 // if next rebase is too far to reach with one pattern, use series
1350 while ( advanceCount > _s_maxLeapCount ) {
1351 _rebaseEntries.push_back(_s_maxLeapPattern);
1352 advanceCount -= _s_maxLeapCount;
1353 }
1354 // if next rebase is not reachable with skipCount==1 or skipCount==15, add intermediate
1355 while ( advanceCount > _s_maxLeapPattern.repeatCount ) {
1356 uint64_t count = advanceCount / _s_maxLeapPattern.skipCount;
1357 _rebaseEntries.push_back({ (uint32_t)count, 0, _s_maxLeapPattern.skipCount });
1358 advanceCount -= (count*_s_maxLeapPattern.skipCount);
1359 }
1360 if ( advanceCount != 0 )
1361 _rebaseEntries.push_back({ (uint32_t)advanceCount, 0, 1 });
1362 _rebaseEntries.push_back({ 1, 1, 0 });
1363 }
1364 _lastLocation = runtimeOffset;
1365
1366 }
1367
1368
1369 BindPatternBuilder::BindPatternBuilder(OverflowSafeArray<closure::Image::BindPattern>& entriesStorage, uint64_t ptrSize)
1370 : _bindEntries(entriesStorage), _ptrSize(ptrSize), _lastOffset(-ptrSize), _lastTarget({ {0, 0} })
1371 {
1372 }
1373
1374 void BindPatternBuilder::add(uint64_t runtimeOffset, Image::ResolvedSymbolTarget target, bool weakBindCoalese)
1375 {
1376 if ( weakBindCoalese ) {
1377 // may be previous bind to this location
1378 // if so, update that rather create new BindPattern
1379 for (Image::BindPattern& aBind : _bindEntries) {
1380 if ( (aBind.startVmOffset == runtimeOffset) && (aBind.repeatCount == 1) && (aBind.skipCount == 0) ) {
1381 aBind.target = target;
1382 return;
1383 }
1384 }
1385 }
1386 bool mergedIntoPrevious = false;
1387 if ( !mergedIntoPrevious && (target == _lastTarget) && (runtimeOffset > _lastOffset) && !_bindEntries.empty() ) {
1388 uint64_t skipAmount = (runtimeOffset - _lastOffset - _ptrSize)/_ptrSize;
1389 if ( skipAmount*_ptrSize != (runtimeOffset - _lastOffset - _ptrSize) ) {
1390 // misaligned pointer means we cannot optimize
1391 }
1392 else {
1393 if ( (_bindEntries.back().repeatCount == 1) && (_bindEntries.back().skipCount == 0) && (skipAmount <= 255) ) {
1394 _bindEntries.back().repeatCount = 2;
1395 _bindEntries.back().skipCount = skipAmount;
1396 assert(_bindEntries.back().skipCount == skipAmount); // check overflow
1397 mergedIntoPrevious = true;
1398 }
1399 else if ( (_bindEntries.back().skipCount == skipAmount) && (_bindEntries.back().repeatCount < 0xfff) ) {
1400 uint32_t prevRepeatCount = _bindEntries.back().repeatCount;
1401 _bindEntries.back().repeatCount += 1;
1402 assert(_bindEntries.back().repeatCount > prevRepeatCount); // check overflow
1403 mergedIntoPrevious = true;
1404 }
1405 }
1406 }
1407 if ( (target == _lastTarget) && (runtimeOffset == _lastOffset) && !_bindEntries.empty() ) {
1408 // duplicate bind for same location, ignore this one
1409 mergedIntoPrevious = true;
1410 }
1411 if ( !mergedIntoPrevious ) {
1412 Image::BindPattern pattern;
1413 pattern.target = target;
1414 pattern.startVmOffset = runtimeOffset;
1415 pattern.repeatCount = 1;
1416 pattern.skipCount = 0;
1417 assert(pattern.startVmOffset == runtimeOffset);
1418 _bindEntries.push_back(pattern);
1419 }
1420 _lastTarget = target;
1421 _lastOffset = runtimeOffset;
1422 }
1423
1424
1425 bool ClosureBuilder::mas_fromImageWeakDefLookup(const WrappedMachO& fromWmo, const char* symbolName, uint64_t addend, CachePatchHandler patcher, FixupTarget& target) const
1426 {
1427 // when building dylibs into the dyld cache, there is no load-order, so we cannot use the standard algorithm
1428 // otherwise call through to standard weak-def coalescing algorithm
1429 if ( !_makingDyldCacheImages )
1430 return MachOAnalyzerSet::mas_fromImageWeakDefLookup(fromWmo, symbolName, addend, patcher, target);
1431
1432
1433 // look first in /usr/lib/libc++, most will be here
1434 Diagnostics diag;
1435 for (const BuilderLoadedImage& li : _loadedImages) {
1436 if ( li.loadAddress()->hasWeakDefs() && (strncmp(li.path(), "/usr/lib/libc++", 15) == 0) ) {
1437 WrappedMachO libcxxWmo(li.loadAddress(), this, (void*)&li);
1438 if ( libcxxWmo.findSymbolIn(diag, symbolName, addend, target) )
1439 return true;
1440 }
1441 }
1442
1443 // if not found, try looking in the images itself, most custom weak-def symbols have a copy in the image itself
1444 if ( fromWmo.findSymbolIn(diag, symbolName, addend, target) )
1445 return true;
1446
1447 // if we link with something that also defines this weak-def, use it
1448 ClosureBuilder::BuilderLoadedImage* fromImage = (ClosureBuilder::BuilderLoadedImage*)(fromWmo._other);
1449 for (Image::LinkedImage child : fromImage->dependents) {
1450 if (child.imageNum() == kMissingWeakLinkedImage)
1451 continue;
1452 if (child.kind() == Image::LinkKind::upward)
1453 continue;
1454 const BuilderLoadedImage& childLi = findLoadedImage(child.imageNum());
1455 if ( childLi.loadAddress()->hasWeakDefs() ) {
1456 WrappedMachO childWmo(childLi.loadAddress(), this, (void*)&childLi);
1457 if ( childWmo.findSymbolIn(diag, symbolName, addend, target) )
1458 return true;
1459 }
1460 }
1461 return false;
1462 }
1463
1464 void ClosureBuilder::mas_forEachImage(void (^handler)(const WrappedMachO& wmo, bool hidden, bool& stop)) const
1465 {
1466 bool stop = false;
1467 for (const ClosureBuilder::BuilderLoadedImage& li : _loadedImages) {
1468 WrappedMachO wmo(li.loadAddress(), this, (void*)&li);
1469 handler(wmo, li.rtldLocal, stop);
1470 if ( stop )
1471 break;
1472 }
1473 }
1474
1475 bool ClosureBuilder::wmo_missingSymbolResolver(const WrappedMachO* fromWmo, bool weakImport, bool lazyBind, const char* symbolName, const char* expectedInDylibPath, const char* clientPath, FixupTarget& target) const
1476 {
1477 // if weakImport and missing, bind to NULL
1478 if ( weakImport ) {
1479 // construct NULL target
1480 target.offsetInImage = 0;
1481 target.kind = FixupTarget::Kind::bindAbsolute;
1482 target.requestedSymbolName = symbolName;
1483 target.foundSymbolName = nullptr;
1484 // Record that we found a missing weak import so that the objc optimizer doens't have to check
1485 ClosureBuilder::BuilderLoadedImage* fromBLI = (ClosureBuilder::BuilderLoadedImage*)(fromWmo->_other);
1486 fromBLI->hasMissingWeakImports = true;
1487 return true;
1488 }
1489 // dyld3 binds everything ahead of time, to simulator lazy failure
1490 // if non-weakImport and lazy, then bind to __dyld_missing_symbol_abort()
1491 if ( lazyBind && _allowMissingLazies ) {
1492 for (const BuilderLoadedImage& li : _loadedImages) {
1493 if ( li.loadAddress()->isDylib() && (strcmp(li.loadAddress()->installName(), "/usr/lib/system/libdyld.dylib") == 0) ) {
1494 WrappedMachO libdyldWmo(li.loadAddress(), this, (void*)&li);
1495 Diagnostics diag;
1496 if ( libdyldWmo.findSymbolIn(diag, "__dyld_missing_symbol_abort", 0, target) ) {
1497 // <rdar://problem/44315944> closures should bind missing lazy-bind symbols to a missing symbol handler in libdyld in flat namespace
1498 return true;
1499 }
1500 break;
1501 }
1502 }
1503 }
1504 // support abort payload
1505 if ( _launchErrorInfo != nullptr ) {
1506 _launchErrorInfo->kind = DYLD_EXIT_REASON_SYMBOL_MISSING;
1507 _launchErrorInfo->clientOfDylibPath = strdup_temp(clientPath);
1508 _launchErrorInfo->targetDylibPath = strdup_temp(expectedInDylibPath);
1509 _launchErrorInfo->symbol = symbolName;
1510 }
1511 return false;
1512 }
1513
1514 void ClosureBuilder::mas_mainExecutable(WrappedMachO& wmo) const
1515 {
1516 const ClosureBuilder::BuilderLoadedImage& mainLi = _loadedImages[_mainProgLoadIndex];
1517 WrappedMachO mainWmo(mainLi.loadAddress(), this, (void*)&mainLi);
1518 wmo = mainWmo;
1519 }
1520
1521 void* ClosureBuilder::mas_dyldCache() const
1522 {
1523 return (void*)_dyldCache;
1524 }
1525
1526 bool ClosureBuilder::wmo_dependent(const WrappedMachO* wmo, uint32_t depIndex, WrappedMachO& childWmo, bool& missingWeakDylib) const
1527 {
1528 ClosureBuilder::BuilderLoadedImage* forImage = (ClosureBuilder::BuilderLoadedImage*)(wmo->_other);
1529
1530 if ( depIndex >= forImage->dependents.count() )
1531 return false;
1532
1533 ImageNum childNum = forImage->dependents[depIndex].imageNum();
1534 if ( childNum == kMissingWeakLinkedImage ) {
1535 missingWeakDylib = true;
1536 return true;
1537 }
1538 const BuilderLoadedImage& depLoadedImage = this->findLoadedImage(childNum);
1539 childWmo = WrappedMachO(depLoadedImage.loadAddress(), this, (void*)&depLoadedImage);
1540 missingWeakDylib = false;
1541 return true;
1542 }
1543
1544 const char* ClosureBuilder::wmo_path(const WrappedMachO* wmo) const
1545 {
1546 ClosureBuilder::BuilderLoadedImage* forImage = (ClosureBuilder::BuilderLoadedImage*)(wmo->_other);
1547 return forImage->loadedFileInfo.path;
1548 }
1549
1550 MachOAnalyzerSet::ExportsTrie ClosureBuilder::wmo_getExportsTrie(const WrappedMachO* wmo) const
1551 {
1552 ClosureBuilder::BuilderLoadedImage* forImage = (ClosureBuilder::BuilderLoadedImage*)(wmo->_other);
1553 if ( forImage->exportsTrieOffset == 0 ) {
1554 // if trie location not already cached, look it up
1555 wmo->_mh->hasExportTrie(forImage->exportsTrieOffset, forImage->exportsTrieSize);
1556 }
1557 const uint8_t* start = nullptr;
1558 const uint8_t* end = nullptr;
1559 if ( forImage->exportsTrieOffset != 0 ) {
1560 start = (uint8_t*)wmo->_mh + forImage->exportsTrieOffset;
1561 end = start + forImage->exportsTrieSize;
1562 }
1563 return { start, end };
1564 }
1565
1566
1567 Image::ResolvedSymbolTarget ClosureBuilder::makeResolvedTarget(const FixupTarget& target) const
1568 {
1569 Image::ResolvedSymbolTarget resolvedTarget;
1570 switch ( target.kind ) {
1571 case MachOAnalyzerSet::FixupTarget::Kind::rebase:
1572 assert(0 && "target is a rebase");
1573 break;
1574 case MachOAnalyzerSet::FixupTarget::Kind::bindToImage:
1575 if ( target.foundInImage._mh->inDyldCache() ) {
1576 resolvedTarget.sharedCache.kind = Image::ResolvedSymbolTarget::kindSharedCache;
1577 resolvedTarget.sharedCache.offset = (uint8_t*)target.foundInImage._mh - (uint8_t*)_dyldCache + target.offsetInImage;
1578 }
1579 else {
1580 ClosureBuilder::BuilderLoadedImage* targetBuildLoaderImage = (ClosureBuilder::BuilderLoadedImage*)(target.foundInImage._other);
1581 resolvedTarget.image.kind = Image::ResolvedSymbolTarget::kindImage;
1582 resolvedTarget.image.imageNum = targetBuildLoaderImage->imageNum;
1583 resolvedTarget.image.offset = target.offsetInImage;
1584 }
1585 return resolvedTarget;
1586 case MachOAnalyzerSet::FixupTarget::Kind::bindAbsolute:
1587 resolvedTarget.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
1588 resolvedTarget.absolute.value = target.offsetInImage;
1589 return resolvedTarget;
1590 case MachOAnalyzerSet::FixupTarget::Kind::bindMissingSymbol:
1591 assert(0 && "unknown FixupTarget::Kind::bindMissingSymbol found in closure");
1592 break;
1593 }
1594 assert(0 && "unknown FixupTarget kind");
1595 }
1596
1597 void ClosureBuilder::addFixupInfo(ImageWriter& writer, BuilderLoadedImage& forImage)
1598 {
1599 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::RebasePattern, rebaseEntries, 1024);
1600 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::BindPattern, binds, 512);
1601 __block RebasePatternBuilder rebaseBuilder(rebaseEntries, forImage.loadAddress()->pointerSize());
1602 __block BindPatternBuilder bindBuilder(binds, forImage.loadAddress()->pointerSize());
1603
1604 const bool stompedLazyOpcodes = forImage.loadAddress()->hasStompedLazyOpcodes();
1605 WrappedMachO forImage_wmo(forImage.loadAddress(), this, (void*)&forImage);
1606 forImage_wmo.forEachFixup(_diag,
1607 ^(uint64_t fixupLocRuntimeOffset, PointerMetaData pmd, const MachOAnalyzerSet::FixupTarget& target, bool& stop) {
1608 if ( target.kind == MachOAnalyzerSet::FixupTarget::Kind::rebase ) {
1609 // normally ignore rebase on lazy pointer because dyld3 will immediately bind that same pointer
1610 // but if app is licensewared and stomps lazy bind opcodes, keep the rebases
1611 if ( target.isLazyBindRebase && !stompedLazyOpcodes )
1612 return;
1613 }
1614 if ( _dylibFixupHandler ) {
1615 // applying fixups to dylibs in dyld cache as the cache is being built
1616 _dylibFixupHandler(forImage.loadAddress(), fixupLocRuntimeOffset, pmd, target);
1617 return;
1618 }
1619 switch ( target.kind ) {
1620 case MachOAnalyzerSet::FixupTarget::Kind::rebase:
1621 if ( !_leaveRebasesAsOpcodes )
1622 rebaseBuilder.add(fixupLocRuntimeOffset);
1623 break;
1624 case MachOAnalyzerSet::FixupTarget::Kind::bindToImage:
1625 case MachOAnalyzerSet::FixupTarget::Kind::bindAbsolute:
1626 bindBuilder.add(fixupLocRuntimeOffset, makeResolvedTarget(target), target.weakCoalesced);
1627 break;
1628 case MachOAnalyzerSet::FixupTarget::Kind::bindMissingSymbol:
1629 // this is last call from forEachFixup() because a symbol could not be resolved
1630 break;
1631 }
1632 },
1633 ^(uint32_t cachedDylibIndex, uint32_t exportCacheOffset, const FixupTarget& target) {
1634 addWeakDefCachePatch(cachedDylibIndex, exportCacheOffset, target);
1635 }
1636 );
1637
1638 // check for __dyld section in main executable to support licenseware
1639 if ( forImage.loadAddress()->filetype == MH_EXECUTE ) {
1640 forImage.loadAddress()->forEachSection(^(const MachOAnalyzer::SectionInfo& sectInfo, bool malformedSectionRange, bool& stop) {
1641 if ( (strcmp(sectInfo.sectName, "__dyld") == 0) && (strcmp(sectInfo.segInfo.segName, "__DATA") == 0) ) {
1642 // find dyld3::compatFuncLookup in libdyld.dylib
1643 assert(_libDyldImageNum != 0);
1644 const BuilderLoadedImage& libdyldImage = findLoadedImage(_libDyldImageNum);
1645 WrappedMachO libdyldWmo(libdyldImage.loadAddress(), this, (void*)&libdyldImage);
1646 FixupTarget libdyldCompatTarget;
1647 if ( libdyldWmo.findSymbolIn(_diag, "__ZN5dyld316compatFuncLookupEPKcPPv", 0, libdyldCompatTarget) ) {
1648 // dyld_func_lookup is second pointer in __dyld section
1649 uint64_t fixupLocRuntimeOffset = sectInfo.sectAddr - forImage.loadAddress()->preferredLoadAddress() + forImage.loadAddress()->pointerSize();
1650 bindBuilder.add(fixupLocRuntimeOffset, makeResolvedTarget(libdyldCompatTarget), false);
1651 }
1652 else {
1653 _diag.error("libdyld.dylib is missing dyld3::compatFuncLookup");
1654 }
1655 }
1656 });
1657 }
1658
1659 // add all rebase and bind info into closure, unless building dyld cache
1660 if ( !_makingDyldCacheImages ) {
1661 if ( _leaveRebasesAsOpcodes )
1662 writer.setRebasesNotEncoded();
1663 else
1664 writer.setRebaseInfo(rebaseEntries);
1665 writer.setBindInfo(binds);
1666 }
1667
1668 // i386 programs also use text relocs to rebase stubs
1669 if ( (forImage.loadAddress()->cputype == CPU_TYPE_I386) && !_makingDyldCacheImages ) {
1670 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::TextFixupPattern, textRebases, 512);
1671 __block uint64_t lastOffset = -4;
1672 forImage.loadAddress()->forEachTextRebase(_diag, ^(uint64_t runtimeOffset, bool& stop) {
1673 if ( textRebases.freeCount() < 2 ) {
1674 _diag.error("too many text rebase locations (%ld) in %s", textRebases.maxCount(), writer.currentImage()->path());
1675 stop = true;
1676 }
1677 bool mergedIntoPrevious = false;
1678 if ( (runtimeOffset > lastOffset) && !textRebases.empty() ) {
1679 uint32_t skipAmount = (uint32_t)(runtimeOffset - lastOffset);
1680 if ( (textRebases.back().repeatCount == 1) && (textRebases.back().skipCount == 0) ) {
1681 textRebases.back().repeatCount = 2;
1682 textRebases.back().skipCount = skipAmount;
1683 mergedIntoPrevious = true;
1684 }
1685 else if ( textRebases.back().skipCount == skipAmount ) {
1686 textRebases.back().repeatCount += 1;
1687 mergedIntoPrevious = true;
1688 }
1689 }
1690 if ( !mergedIntoPrevious ) {
1691 Image::TextFixupPattern pattern;
1692 pattern.target.raw = 0;
1693 pattern.startVmOffset = (uint32_t)runtimeOffset;
1694 pattern.repeatCount = 1;
1695 pattern.skipCount = 0;
1696 textRebases.push_back(pattern);
1697 }
1698 lastOffset = runtimeOffset;
1699 });
1700 writer.setTextRebaseInfo(textRebases);
1701 }
1702
1703 }
1704
1705
1706
1707
1708 void ClosureBuilder::addWeakDefCachePatch(uint32_t cachedDylibIndex, uint32_t exportCacheOffset, const FixupTarget& patchTarget)
1709 {
1710 // minimal closures don't need weak def patches, they are regenerated at launch
1711 if ( _makeMinimalClosure )
1712 return;
1713
1714 // don't add duplicates
1715 for (const Closure::PatchEntry& aPatch : _weakDefCacheOverrides) {
1716 if ( aPatch.exportCacheOffset == exportCacheOffset )
1717 return;
1718 }
1719 // add new patch entry
1720 ClosureBuilder::BuilderLoadedImage* targetImage = (ClosureBuilder::BuilderLoadedImage*)(patchTarget.foundInImage._other);
1721 Closure::PatchEntry patch;
1722 patch.overriddenDylibInCache = cachedDylibIndex+1; // convert image index to ImageNum
1723 patch.exportCacheOffset = exportCacheOffset;
1724 patch.replacement.image.kind = Image::ResolvedSymbolTarget::kindImage;
1725 patch.replacement.image.imageNum = targetImage->imageNum;
1726 patch.replacement.image.offset = patchTarget.offsetInImage;
1727 _weakDefCacheOverrides.push_back(patch);
1728 }
1729
1730 void ClosureBuilder::addChainedFixupInfo(ImageWriter& writer, BuilderLoadedImage& forImage)
1731 {
1732 // as a side effect of building targets array, we discover if anything in dyld cache uses weak-defs that need
1733 // to be redirected to an impl in some other dylib (cache patched)
1734 auto patchAddr = ^(uint32_t cachedDylibIndex, uint32_t exportCacheOffset, const FixupTarget& patchTarget) {
1735 addWeakDefCachePatch(cachedDylibIndex, exportCacheOffset, patchTarget);
1736 };
1737
1738 // build array of targets
1739 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::ResolvedSymbolTarget, targets, 1024);
1740 forImage.loadAddress()->forEachChainedFixupTarget(_diag, ^(int libOrdinal, const char* symbolName, uint64_t addend, bool weakImport, bool& stop) {
1741 FixupTarget target;
1742 WrappedMachO forImageWmo(forImage.loadAddress(), this, (void*)&forImage);
1743 if ( wmo_findSymbolFrom(&forImageWmo, _diag, libOrdinal, symbolName, weakImport, false, addend, patchAddr, target) )
1744 targets.push_back(makeResolvedTarget(target));
1745 else
1746 stop = true;
1747 });
1748 if ( _diag.hasError() )
1749 return;
1750
1751 // C++ main executables can overide operator new, check for that
1752 if ( forImage.loadAddress()->isMainExecutable() && forImage.loadAddress()->hasWeakDefs() ) {
1753 WrappedMachO mainWmo(forImage.loadAddress(), this, (void*)&forImage);
1754 wmo_findExtraSymbolFrom(&mainWmo, patchAddr);
1755 }
1756
1757 uint64_t chainStartsOffset = forImage.loadAddress()->chainStartsOffset();
1758 writer.setChainedFixups(chainStartsOffset, targets);
1759 }
1760
1761 void ClosureBuilder::depthFirstRecurseSetInitInfo(uint32_t loadIndex, InitInfo initInfos[], uint32_t& initOrder, bool& hasError)
1762 {
1763 if ( initInfos[loadIndex].visited )
1764 return;
1765 initInfos[loadIndex].visited = true;
1766 initInfos[loadIndex].danglingUpward = false;
1767
1768 if (_loadedImages[loadIndex].isBadImage) {
1769 hasError = true;
1770 return;
1771 }
1772 for (const Image::LinkedImage& dep : _loadedImages[loadIndex].dependents) {
1773 if ( dep.imageNum() == kMissingWeakLinkedImage )
1774 continue;
1775 const ClosureBuilder::BuilderLoadedImage& depLi = findLoadedImage(dep.imageNum());
1776 uint32_t depLoadIndex = (uint32_t)_loadedImages.index(depLi);
1777 if ( dep.kind() == Image::LinkKind::upward ) {
1778 if ( !initInfos[depLoadIndex].visited )
1779 initInfos[depLoadIndex].danglingUpward = true;
1780 }
1781 else {
1782 depthFirstRecurseSetInitInfo(depLoadIndex, initInfos, initOrder, hasError);
1783 if (hasError)
1784 return;
1785 }
1786 }
1787 initInfos[loadIndex].initOrder = initOrder++;
1788 }
1789
1790 void ClosureBuilder::computeInitOrder(ImageWriter& imageWriter, uint32_t loadIndex)
1791 {
1792 // allocate array to track initializers
1793 InitInfo initInfos[_loadedImages.count()];
1794 bzero(initInfos, sizeof(initInfos));
1795
1796 // recurse all images and build initializer list from bottom up
1797 uint32_t initOrder = 1;
1798 bool hasMissingDependent = false;
1799 depthFirstRecurseSetInitInfo(loadIndex, initInfos, initOrder, hasMissingDependent);
1800 if (hasMissingDependent) {
1801 imageWriter.setInvalid();
1802 return;
1803 }
1804
1805 // any images not visited yet are are danging, force add them to end of init list
1806 for (uint32_t i=0; i < (uint32_t)_loadedImages.count(); ++i) {
1807 if ( !initInfos[i].visited && initInfos[i].danglingUpward ) {
1808 depthFirstRecurseSetInitInfo(i, initInfos, initOrder, hasMissingDependent);
1809 }
1810 }
1811
1812 if (hasMissingDependent) {
1813 imageWriter.setInvalid();
1814 return;
1815 }
1816
1817 // build array of just images with initializer
1818 STACK_ALLOC_ARRAY(uint32_t, indexOfImagesWithInits, _loadedImages.count());
1819 uint32_t index = 0;
1820 for (const BuilderLoadedImage& li : _loadedImages) {
1821 if ( initInfos[index].visited && li.hasInits ) {
1822 indexOfImagesWithInits.push_back(index);
1823 }
1824 ++index;
1825 }
1826
1827 // bubble sort (FIXME)
1828 if ( indexOfImagesWithInits.count() > 1 ) {
1829 for (uint32_t i=0; i < indexOfImagesWithInits.count()-1; ++i) {
1830 for (uint32_t j=0; j < indexOfImagesWithInits.count()-i-1; ++j) {
1831 if ( initInfos[indexOfImagesWithInits[j]].initOrder > initInfos[indexOfImagesWithInits[j+1]].initOrder ) {
1832 uint32_t temp = indexOfImagesWithInits[j];
1833 indexOfImagesWithInits[j] = indexOfImagesWithInits[j+1];
1834 indexOfImagesWithInits[j+1] = temp;
1835 }
1836 }
1837 }
1838 }
1839
1840 // copy ImageNum of each image with initializers into array
1841 ImageNum initNums[indexOfImagesWithInits.count()];
1842 for (uint32_t i=0; i < indexOfImagesWithInits.count(); ++i) {
1843 initNums[i] = _loadedImages[indexOfImagesWithInits[i]].imageNum;
1844 }
1845
1846 // add to closure info
1847 imageWriter.setInitsOrder(initNums, (uint32_t)indexOfImagesWithInits.count());
1848 }
1849
1850 void ClosureBuilder::addClosureInfo(LaunchClosureWriter& closureWriter)
1851 {
1852 // record which is libSystem
1853 assert(_libSystemImageNum != 0);
1854 closureWriter.setLibSystemImageNum(_libSystemImageNum);
1855
1856 // record which is libdyld
1857 assert(_libDyldImageNum != 0);
1858 const BuilderLoadedImage& libdyldImage = findLoadedImage(_libDyldImageNum);
1859 WrappedMachO libdyldWmo(libdyldImage.loadAddress(), this, (void*)&libdyldImage);
1860 FixupTarget libdyldEntryTarget;
1861 if ( libdyldWmo.findSymbolIn(_diag, "__ZN5dyld318entryVectorForDyldE", 0, libdyldEntryTarget) ) {
1862 const dyld3::LibDyldEntryVector* libDyldEntry = nullptr;
1863 if ( libdyldEntryTarget.kind == MachOAnalyzerSet::FixupTarget::Kind::bindToImage ) {
1864 libDyldEntry = (dyld3::LibDyldEntryVector*)((uint8_t*)libdyldEntryTarget.foundInImage._mh + libdyldEntryTarget.offsetInImage);
1865 }
1866 // peak at entry vector to see if version is compatible
1867 if ( (libDyldEntry != nullptr) && ((libDyldEntry->binaryFormatVersion & LibDyldEntryVector::kBinaryFormatVersionMask) == dyld3::closure::kFormatVersion) ) {
1868 Image::ResolvedSymbolTarget entryLocation = makeResolvedTarget(libdyldEntryTarget);
1869 closureWriter.setLibDyldEntry(entryLocation);
1870 }
1871 else
1872 _diag.error("libdyld.dylib entry vector is incompatible");
1873 }
1874 else {
1875 _diag.error("libdyld.dylib is missing entry vector");
1876 }
1877
1878 // record which is main executable
1879 ImageNum mainProgImageNum = _loadedImages[_mainProgLoadIndex].imageNum;
1880 closureWriter.setTopImageNum(mainProgImageNum);
1881
1882 // add entry
1883 uint64_t entryOffset;
1884 bool usesCRT;
1885 if ( _loadedImages[_mainProgLoadIndex].loadAddress()->getEntry(entryOffset, usesCRT) ) {
1886 Image::ResolvedSymbolTarget location;
1887 location.image.kind = Image::ResolvedSymbolTarget::kindImage;
1888 location.image.imageNum = mainProgImageNum;
1889 location.image.offset = (uint32_t)entryOffset;
1890 if ( usesCRT )
1891 closureWriter.setStartEntry(location);
1892 else
1893 closureWriter.setMainEntry(location);
1894 }
1895
1896 // add env vars that must match at launch time
1897 _pathOverrides.forEachEnvVar(^(const char* envVar) {
1898 closureWriter.addEnvVar(envVar);
1899 });
1900
1901 // add list of files which must be missing
1902 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(const char*, paths, 8192);
1903 if ( _mustBeMissingPaths != nullptr ) {
1904 _mustBeMissingPaths->forEachPath(^(const char* aPath) {
1905 paths.push_back(aPath);
1906 });
1907 }
1908 closureWriter.setMustBeMissingFiles(paths);
1909
1910 // add list of files which must be be present with a specific inode/mtime
1911 if (!_skippedFiles.empty())
1912 closureWriter.setMustExistFiles(_skippedFiles);
1913 }
1914 void ClosureBuilder::invalidateInitializerRoots()
1915 {
1916 while (true) {
1917 bool madeChange = false;
1918 for (uintptr_t loadedImageIndex = _alreadyInitedIndex; loadedImageIndex != _loadedImages.count(); ++loadedImageIndex) {
1919 BuilderLoadedImage& li = _loadedImages[loadedImageIndex];
1920 if ( li.mustBuildClosure ) {
1921 // Already invalidated
1922 continue;
1923 }
1924 for (Image::LinkedImage depIndex : li.dependents) {
1925 if ( depIndex.imageNum() == kMissingWeakLinkedImage )
1926 continue;
1927 const BuilderLoadedImage& depImage = findLoadedImage(depIndex.imageNum());
1928 // If a dependent is bad, or a new image num, or an override, then we need this image to get a new closure
1929 if ( depImage.mustBuildClosure ) {
1930 li.mustBuildClosure = true; // mark bad
1931 madeChange = true;
1932 }
1933 }
1934 }
1935 if (!madeChange)
1936 break;
1937 // If we made a change, then we detected an existing image with a dependent which needed to be rebuilt.
1938 // This corresponds to a root of the shared cache where the existing image is a shared cache one and the root is the depImage
1939 _foundDyldCacheRoots = true;
1940 }
1941 }
1942
1943 size_t ClosureBuilder::HashCString::hash(const char* v) {
1944 // FIXME: Use hash<string_view> when it has the correct visibility markup
1945 return __gnu_cxx::hash<const char*>{}(v);
1946 }
1947
1948 bool ClosureBuilder::EqualCString::equal(const char* s1, const char* s2) {
1949 return strcmp(s1, s2) == 0;
1950 }
1951
1952
1953
1954 struct HashUInt64 {
1955 static size_t hash(const uint64_t& v) {
1956 return std::hash<uint64_t>{}(v);
1957 }
1958 };
1959
1960 struct EqualUInt64 {
1961 static bool equal(uint64_t s1, uint64_t s2) {
1962 return s1 == s2;
1963 }
1964 };
1965
1966 void ClosureBuilder::writeClassOrProtocolHashTable(bool classes, Array<ObjCOptimizerImage>& objcImages) {
1967 __block MultiMap<const char*, dyld3::closure::Image::ObjCClassImageOffset, HashCString, EqualCString> seenClassesMap;
1968 __block Map<const char*, dyld3::closure::Image::ObjCClassNameImageOffset, HashCString, EqualCString> classNameMap;
1969 __block OverflowSafeArray<const char*> classNames;
1970
1971 // Note we walk the images backwards as we want them in load order to match the order they are registered with objc
1972 for (size_t imageIndex = 0, reverseIndex = (objcImages.count() - 1); imageIndex != objcImages.count(); ++imageIndex, --reverseIndex) {
1973 if (objcImages[reverseIndex].diag.hasError())
1974 continue;
1975 ObjCOptimizerImage& image = objcImages[reverseIndex];
1976 const OverflowSafeArray<ObjCOptimizerImage::SeenClass>& seenClasses = classes ? image.seenClasses : image.seenProtocols;
1977
1978 for (const ObjCOptimizerImage::SeenClass& seenClass : seenClasses) {
1979 closure::Image::ObjCClassNameImageOffset classNameTarget = seenClass.first;
1980 dyld3::closure::Image::ObjCClassImageOffset classDataTarget = seenClass.second;
1981 Image::ObjCClassImage classImage = _objcClassesHashTableImages[classNameTarget.classNameImageIndex];
1982
1983 const BuilderLoadedImage& li = findLoadedImage(classImage.imageNum);
1984 const dyld3::MachOAnalyzer* ma = li.loadAddress();
1985
1986 const char* className = ((const char*)ma) + classImage.offsetOfClassNames + classNameTarget.classNameImageOffset;
1987 //uint64_t nameVMAddr = ma->preferredLoadAddress() + classImage.offsetOfClassNames + classNameTarget.classNameImageOffset;
1988 //printf("%s: 0x%08llx = '%s'\n", li.path(), nameVMAddr, className);
1989 seenClassesMap.insert({ className, classDataTarget });
1990
1991 // Also track the name
1992 auto itAndInserted = classNameMap.insert({ className, dyld3::closure::Image::ObjCClassNameImageOffset() });
1993 if (itAndInserted.second) {
1994 // We inserted the class name so we need to add it to the strings for the closure hash table
1995 classNames.push_back(className);
1996
1997 // We already computed a class name target in a previous loop so use that one
1998 itAndInserted.first->second = seenClass.first;
1999
2000 // If we are processing protocols, and this is the first one we've seen, then track its ISA to be fixed up
2001 if ( !classes ) {
2002 uint64_t protocolVMOffset = classImage.offsetOfClasses + classDataTarget.classData.imageOffset;
2003 image.protocolISAFixups.push_back(protocolVMOffset);
2004 }
2005 }
2006 }
2007 }
2008
2009 __block uint32_t duplicateCount = 0;
2010 seenClassesMap.forEachEntry(^(const char *const &key, const Image::ObjCClassImageOffset **values,
2011 uint64_t valuesCount) {
2012 if (valuesCount != 1)
2013 duplicateCount += valuesCount;
2014 });
2015
2016 // If we have closure class names, we need to make a hash table for them.
2017 OverflowSafeArray<uint8_t>& hashTable = classes ? _objcClassesHashTable : _objcProtocolsHashTable;
2018 if (!classNames.empty()) {
2019 objc_opt::perfect_hash phash;
2020 objc_opt::make_perfect(classNames, phash);
2021 size_t size = ObjCClassOpt::size(phash, duplicateCount);
2022 hashTable.resize(size);
2023 //printf("Class table size: %lld\n", size);
2024 ObjCClassOpt* classesHashTable = (ObjCClassOpt*)hashTable.begin();
2025 classesHashTable->write(phash, classNameMap.array(), seenClassesMap, duplicateCount);
2026 }
2027 }
2028
2029 bool ClosureBuilder::optimizeObjC(Array<ImageWriter>& writers) {
2030 if ( _dyldCache == nullptr )
2031 return false;
2032
2033 // If we have the read only data, make sure it has a valid selector table inside.
2034 const objc_opt::objc_clsopt_t* objcClassOpt = nullptr;
2035 const objc_opt::objc_selopt_t* objcSelOpt = nullptr;
2036 const objc_opt::objc_protocolopt2_t* objcProtocolOpt = nullptr;
2037 if (const objc_opt::objc_opt_t* optObjCHeader = _dyldCache->objcOpt()) {
2038 objcClassOpt = optObjCHeader->clsopt();
2039 objcSelOpt = optObjCHeader->selopt();
2040 objcProtocolOpt = optObjCHeader->protocolopt2();
2041 }
2042
2043 if ( !objcClassOpt || !objcSelOpt || !objcProtocolOpt )
2044 return false;
2045
2046 // We have 24 bits of index in SelectorReferenceFixup so we can't handle a
2047 // shared cache selector table larger than that
2048 if ( objcSelOpt->usedCount() >= (1 << 24) )
2049 return false;
2050
2051 // Make sure we have the pointers section with the pointer to the protocol class
2052 const void* objcOptPtrs = _dyldCache->objcOptPtrs();
2053 if ( objcOptPtrs == nullptr )
2054 return false;
2055
2056 uint32_t pointerSize = _loadedImages.begin()->loadAddress()->pointerSize();
2057 uint64_t classProtocolVMAddr = (pointerSize == 8) ? *(uint64_t*)objcOptPtrs : *(uint32_t*)objcOptPtrs;
2058
2059 Image::ResolvedSymbolTarget objcProtocolClassTarget;
2060 objcProtocolClassTarget.sharedCache.kind = Image::ResolvedSymbolTarget::kindSharedCache;
2061 if ( _dyldCacheIsLive ) {
2062 // If we are on arm64e, the protocol ISA in the shared cache was signed. We don't
2063 // want the signature bits in the encoded value
2064 #if __has_feature(ptrauth_calls)
2065 classProtocolVMAddr = (uint64_t)__builtin_ptrauth_strip((void*)classProtocolVMAddr, ptrauth_key_asda);
2066 #endif
2067 objcProtocolClassTarget.sharedCache.offset = classProtocolVMAddr - (uint64_t)_dyldCache;
2068 } else {
2069 objcProtocolClassTarget.sharedCache.offset = classProtocolVMAddr - _dyldCache->unslidLoadAddress();
2070 }
2071
2072 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(ObjCOptimizerImage, objcImages, 32);
2073 ArrayFinalizer<ObjCOptimizerImage> scopedCleanup(objcImages,
2074 ^(ObjCOptimizerImage& objcImage) {
2075 objcImage.~ObjCOptimizerImage();
2076 });
2077
2078 // Find all the images with valid objc info
2079 // Also add shared cache images to a map so that we can see them later for looking up classes
2080 Map<const dyld3::MachOAnalyzer*, bool, HashPointer, EqualPointer> sharedCacheImagesMap;
2081 for (size_t imageIndex = 0, writerIndex = 0; imageIndex != _loadedImages.count(); ++imageIndex) {
2082 BuilderLoadedImage& li = _loadedImages[imageIndex];
2083
2084 // Skip shared cache images as even if they need a new closure, the objc runtime can still use
2085 // the optimized shared cache tables.
2086 if ( li.loadAddress()->inDyldCache() ) {
2087 sharedCacheImagesMap.insert({ li.loadAddress(), true });
2088 // Bump the writer index if we have a writer for this image
2089 if ( li.mustBuildClosure )
2090 ++writerIndex;
2091 continue;
2092 }
2093 // Images which don't need a closure can be skipped. They are from the shared cache
2094 if ( !li.mustBuildClosure )
2095 continue;
2096
2097 // If we have a root of libobjc, just give up for now
2098 if ( !strcmp(li.path(), "/usr/lib/libobjc.A.dylib"))
2099 return false;
2100
2101 ImageWriter& writer = writers[writerIndex];
2102 ++writerIndex;
2103
2104 const dyld3::MachOAnalyzer* ma = li.loadAddress();
2105
2106 // Skip images with chained fixups other than arm64e legacy fixups until we can test them
2107 // FIXME: Handle chained fixups
2108 if ( ma->hasChainedFixups() ) {
2109 switch ( ma->chainedPointerFormat() ) {
2110 case DYLD_CHAINED_PTR_ARM64E:
2111 case DYLD_CHAINED_PTR_64:
2112 // We've tested the 64-bit chained fixups.
2113 break;
2114 case DYLD_CHAINED_PTR_64_OFFSET:
2115 case DYLD_CHAINED_PTR_ARM64E_USERLAND:
2116 case DYLD_CHAINED_PTR_ARM64E_USERLAND24:
2117 case DYLD_CHAINED_PTR_ARM64E_FIRMWARE:
2118 // FIXME: Test 64-bit offset chained fixups then enable this.
2119 continue;
2120 case DYLD_CHAINED_PTR_32:
2121 case DYLD_CHAINED_PTR_32_CACHE:
2122 case DYLD_CHAINED_PTR_32_FIRMWARE:
2123 // FIXME: Test 32-bit chained fixups then enable this.
2124 continue;
2125 }
2126 }
2127
2128 const MachOAnalyzer::ObjCImageInfo* objcImageInfo = ma->objcImageInfo();
2129 if ( objcImageInfo == nullptr )
2130 continue;
2131
2132 // This image is good so record it for use later.
2133 objcImages.default_constuct_back();
2134 ObjCOptimizerImage& image = objcImages.back();
2135 image.loadedImage = &li;
2136 image.writer = &writer;
2137
2138 // Find FairPlay encryption range if encrypted
2139 uint32_t fairPlayFileOffset;
2140 uint32_t fairPlaySize;
2141 if ( ma->isFairPlayEncrypted(fairPlayFileOffset, fairPlaySize) ) {
2142 image.fairplayFileOffsetStart = fairPlayFileOffset;
2143 image.fairplayFileOffsetEnd = fairPlayFileOffset;
2144 }
2145
2146 // Set the offset to the objc image info
2147 image.objcImageInfoVMOffset = (uint64_t)objcImageInfo - (uint64_t)ma;
2148 }
2149
2150 // objc supports a linker set which is a magic section of duplicate objc classes to ignore
2151 // We need to match that behaviour
2152 Map<const char*, bool, HashCString, EqualCString> duplicateClassesToIgnore;
2153 parseObjCClassDuplicates(duplicateClassesToIgnore);
2154
2155 OverflowSafeArray<const char*> closureSelectorStrings;
2156 Map<const char*, dyld3::closure::Image::ObjCImageOffset, HashCString, EqualCString> closureSelectorMap;
2157 OverflowSafeArray<const char*> closureDuplicateSharedCacheClassNames;
2158 Map<const char*, dyld3::closure::Image::ObjCDuplicateClass, HashCString, EqualCString> closureDuplicateSharedCacheClassMap;
2159 for (ObjCOptimizerImage& image : objcImages) {
2160 optimizeObjCClasses(objcClassOpt, sharedCacheImagesMap, closureDuplicateSharedCacheClassMap, duplicateClassesToIgnore, image);
2161 if (image.diag.hasError())
2162 continue;
2163
2164 optimizeObjCProtocols(objcProtocolOpt, sharedCacheImagesMap, image);
2165 if (image.diag.hasError())
2166 continue;
2167
2168 optimizeObjCSelectors(objcSelOpt, closureSelectorMap, image);
2169 if (image.diag.hasError())
2170 continue;
2171
2172 // If this image is still valid, then add its intermediate results to the main tables
2173
2174 // Class results
2175 for (auto nameAndDataVMOffset : image.classesNameAndDataVMOffsets) {
2176 uint64_t nameVMOffset = nameAndDataVMOffset.first;
2177 uint64_t dataVMOffset = nameAndDataVMOffset.second;
2178 _objcClassesHashTableImages.push_back({ image.loadedImage->imageNum, (uint32_t)nameVMOffset, (uint32_t)dataVMOffset });
2179 }
2180 image.classesNameAndDataVMOffsets.clear();
2181
2182 for (const auto& stringAndDuplicate : image.classSharedCacheDuplicates) {
2183 closureDuplicateSharedCacheClassMap[stringAndDuplicate.first] = stringAndDuplicate.second;
2184 closureDuplicateSharedCacheClassNames.push_back(stringAndDuplicate.first);
2185 }
2186
2187 // Selector results
2188 // Note we don't need to add the selector binds here. Its easier just to process them later from each image
2189 for (const auto& stringAndTarget : image.selectorMap) {
2190 closureSelectorMap[stringAndTarget.first] = stringAndTarget.second;
2191 closureSelectorStrings.push_back(stringAndTarget.first);
2192 }
2193 if (image.methodNameVMOffset)
2194 _objcSelectorsHashTableImages.push_back({ image.loadedImage->imageNum, (uint32_t)*image.methodNameVMOffset });
2195 }
2196
2197 // If we successfully analyzed the classes and selectors, we can now emit their data
2198 // Set all the writers to have optimized objc
2199 for (ObjCOptimizerImage& image : objcImages) {
2200 if (image.diag.hasError())
2201 continue;
2202 image.writer->setHasPrecomputedObjC(true);
2203 }
2204
2205 // Write out the class table
2206 writeClassOrProtocolHashTable(true, objcImages);
2207
2208 // Write out the protocol table
2209 writeClassOrProtocolHashTable(false, objcImages);
2210
2211 // If we have closure duplicate classes, we need to make a hash table for them.
2212 closure::ObjCStringTable* duplicateClassesTable = nullptr;
2213 if (!closureDuplicateSharedCacheClassNames.empty()) {
2214 objc_opt::perfect_hash phash;
2215 objc_opt::make_perfect(closureDuplicateSharedCacheClassNames, phash);
2216 size_t size = ObjCStringTable::size(phash);
2217 _objcClassesDuplicatesHashTable.resize(size);
2218 //printf("Duplicate classes table size: %lld\n", size);
2219 duplicateClassesTable = (closure::ObjCClassDuplicatesOpt*)_objcClassesDuplicatesHashTable.begin();
2220 duplicateClassesTable->write(phash, closureDuplicateSharedCacheClassMap.array());
2221 }
2222
2223 // If we have closure selectors, we need to make a hash table for them.
2224 closure::ObjCStringTable* selectorStringTable = nullptr;
2225 if (!closureSelectorStrings.empty()) {
2226 objc_opt::perfect_hash phash;
2227 objc_opt::make_perfect(closureSelectorStrings, phash);
2228 size_t size = ObjCStringTable::size(phash);
2229 _objcSelectorsHashTable.resize(size);
2230 //printf("Selector table size: %lld\n", size);
2231 selectorStringTable = (closure::ObjCStringTable*)_objcSelectorsHashTable.begin();
2232 selectorStringTable->write(phash, closureSelectorMap.array());
2233 }
2234
2235 // Add fixups for the image info, protocol ISAs, and selector refs
2236 for (ObjCOptimizerImage& image : objcImages) {
2237 if (image.diag.hasError())
2238 continue;
2239
2240 // Protocol ISA references
2241 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::ProtocolISAFixup, protocolFixups, 512);
2242 if ( !image.protocolISAFixups.empty() ) {
2243
2244 __block uint64_t lastOffset = -pointerSize;
2245 for (uint64_t runtimeOffset : image.protocolISAFixups) {
2246 bool mergedIntoPrevious = false;
2247 if ( (runtimeOffset > lastOffset) && !protocolFixups.empty() ) {
2248 uint64_t skipAmount = (runtimeOffset - lastOffset - pointerSize)/pointerSize;
2249 if ( skipAmount*pointerSize != (runtimeOffset - lastOffset - pointerSize) ) {
2250 // misaligned pointer means we cannot optimize
2251 }
2252 else {
2253 if ( (protocolFixups.back().repeatCount == 1) && (protocolFixups.back().skipCount == 0) && (skipAmount <= 255) ) {
2254 protocolFixups.back().repeatCount = 2;
2255 protocolFixups.back().skipCount = skipAmount;
2256 assert(protocolFixups.back().skipCount == skipAmount); // check overflow
2257 mergedIntoPrevious = true;
2258 }
2259 else if ( (protocolFixups.back().skipCount == skipAmount) && (protocolFixups.back().repeatCount < 0xfff) ) {
2260 uint32_t prevRepeatCount = protocolFixups.back().repeatCount;
2261 protocolFixups.back().repeatCount += 1;
2262 assert(protocolFixups.back().repeatCount > prevRepeatCount); // check overflow
2263 mergedIntoPrevious = true;
2264 }
2265 }
2266 }
2267 if ( !mergedIntoPrevious ) {
2268 Image::ProtocolISAFixup pattern;
2269 pattern.startVmOffset = runtimeOffset;
2270 pattern.repeatCount = 1;
2271 pattern.skipCount = 0;
2272 assert(pattern.startVmOffset == runtimeOffset);
2273 protocolFixups.push_back(pattern);
2274 }
2275 lastOffset = runtimeOffset;
2276 }
2277 }
2278
2279 // Selector references
2280 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::SelectorReferenceFixup, selRefFixups, 512);
2281 if ( !image.selectorFixups.empty() ) {
2282 uint64_t prevVMOffset = 0;
2283 const uint64_t maxChainOffset = (4 * ((1 << 7) - 1));
2284 for (const ObjCOptimizerImage::SelectorFixup& selectorFixup : image.selectorFixups) {
2285 assert( (selectorFixup.fixupVMOffset & 3) == 0 );
2286 if ( (selectorFixup.fixupVMOffset - prevVMOffset) <= maxChainOffset ) {
2287 // Add this to the previous chain
2288 selRefFixups.back().chainEntry.next = (uint32_t)(selectorFixup.fixupVMOffset - prevVMOffset) / 4;
2289 } else {
2290 // Need to start a new chain as the previous offset can't reach
2291 Image::SelectorReferenceFixup fixup;
2292 fixup.chainStartVMOffset = selectorFixup.fixupVMOffset;
2293 selRefFixups.push_back(fixup);
2294 }
2295
2296 if ( selectorFixup.isSharedCache ) {
2297 // If the entry is in the shared cache then we already have the index for it
2298 Image::SelectorReferenceFixup fixup;
2299 fixup.chainEntry.index = selectorFixup.sharedCache.selectorTableIndex;
2300 fixup.chainEntry.next = 0;
2301 fixup.chainEntry.inSharedCache = 1;
2302 selRefFixups.push_back(fixup);
2303 } else {
2304 // We had to record the string for the closure table entries as we don't know the
2305 // index until now
2306 uint32_t selectorTableIndex = selectorStringTable->getIndex(selectorFixup.image.selectorString);
2307 assert(selectorTableIndex != ObjCSelectorOpt::indexNotFound);
2308 Image::SelectorReferenceFixup fixup;
2309 fixup.chainEntry.index = selectorTableIndex;
2310 fixup.chainEntry.next = 0;
2311 fixup.chainEntry.inSharedCache = 0;
2312 selRefFixups.push_back(fixup);
2313 }
2314
2315 prevVMOffset = selectorFixup.fixupVMOffset;
2316 }
2317 }
2318
2319 // Stable Swift fixups
2320 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::ClassStableSwiftFixup, stableSwiftFixups, 512);
2321 if ( !image.classStableSwiftFixups.empty() ) {
2322
2323 __block uint64_t lastOffset = -pointerSize;
2324 for (uint64_t runtimeOffset : image.classStableSwiftFixups) {
2325 bool mergedIntoPrevious = false;
2326 if ( (runtimeOffset > lastOffset) && !stableSwiftFixups.empty() ) {
2327 uint64_t skipAmount = (runtimeOffset - lastOffset - pointerSize)/pointerSize;
2328 if ( skipAmount*pointerSize != (runtimeOffset - lastOffset - pointerSize) ) {
2329 // misaligned pointer means we cannot optimize
2330 }
2331 else {
2332 if ( (stableSwiftFixups.back().repeatCount == 1) && (stableSwiftFixups.back().skipCount == 0) && (skipAmount <= 255) ) {
2333 stableSwiftFixups.back().repeatCount = 2;
2334 stableSwiftFixups.back().skipCount = skipAmount;
2335 assert(stableSwiftFixups.back().skipCount == skipAmount); // check overflow
2336 mergedIntoPrevious = true;
2337 }
2338 else if ( (stableSwiftFixups.back().skipCount == skipAmount) && (stableSwiftFixups.back().repeatCount < 0xfff) ) {
2339 uint32_t prevRepeatCount = stableSwiftFixups.back().repeatCount;
2340 stableSwiftFixups.back().repeatCount += 1;
2341 assert(stableSwiftFixups.back().repeatCount > prevRepeatCount); // check overflow
2342 mergedIntoPrevious = true;
2343 }
2344 }
2345 }
2346 if ( !mergedIntoPrevious ) {
2347 Image::ClassStableSwiftFixup pattern;
2348 pattern.startVmOffset = runtimeOffset;
2349 pattern.repeatCount = 1;
2350 pattern.skipCount = 0;
2351 assert(pattern.startVmOffset == runtimeOffset);
2352 stableSwiftFixups.push_back(pattern);
2353 }
2354 lastOffset = runtimeOffset;
2355 }
2356 }
2357
2358 // Method list fixups
2359 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::MethodListFixup, methodListFixups, 512);
2360 if ( !image.methodListFixups.empty() ) {
2361
2362 __block uint64_t lastOffset = -pointerSize;
2363 for (uint64_t runtimeOffset : image.methodListFixups) {
2364 bool mergedIntoPrevious = false;
2365 if ( (runtimeOffset > lastOffset) && !methodListFixups.empty() ) {
2366 uint64_t skipAmount = (runtimeOffset - lastOffset - pointerSize)/pointerSize;
2367 if ( skipAmount*pointerSize != (runtimeOffset - lastOffset - pointerSize) ) {
2368 // misaligned pointer means we cannot optimize
2369 }
2370 else {
2371 if ( (methodListFixups.back().repeatCount == 1) && (methodListFixups.back().skipCount == 0) && (skipAmount <= 255) ) {
2372 methodListFixups.back().repeatCount = 2;
2373 methodListFixups.back().skipCount = skipAmount;
2374 assert(methodListFixups.back().skipCount == skipAmount); // check overflow
2375 mergedIntoPrevious = true;
2376 }
2377 else if ( (methodListFixups.back().skipCount == skipAmount) && (methodListFixups.back().repeatCount < 0xfff) ) {
2378 uint32_t prevRepeatCount = methodListFixups.back().repeatCount;
2379 methodListFixups.back().repeatCount += 1;
2380 assert(methodListFixups.back().repeatCount > prevRepeatCount); // check overflow
2381 mergedIntoPrevious = true;
2382 }
2383 }
2384 }
2385 if ( !mergedIntoPrevious ) {
2386 Image::MethodListFixup pattern;
2387 pattern.startVmOffset = runtimeOffset;
2388 pattern.repeatCount = 1;
2389 pattern.skipCount = 0;
2390 assert(pattern.startVmOffset == runtimeOffset);
2391 methodListFixups.push_back(pattern);
2392 }
2393 lastOffset = runtimeOffset;
2394 }
2395 }
2396
2397 image.writer->setObjCFixupInfo(objcProtocolClassTarget, image.objcImageInfoVMOffset, protocolFixups,
2398 selRefFixups, stableSwiftFixups, methodListFixups);
2399 }
2400
2401 return true;
2402 }
2403
2404 void ClosureBuilder::optimizeObjCSelectors(const objc_opt::objc_selopt_t* objcSelOpt,
2405 const Map<const char*, dyld3::closure::Image::ObjCImageOffset, HashCString, EqualCString>& closureSelectorMap,
2406 ObjCOptimizerImage& image) {
2407
2408 BuilderLoadedImage& li = *image.loadedImage;
2409
2410 const dyld3::MachOAnalyzer* ma = li.loadAddress();
2411 uint32_t pointerSize = ma->pointerSize();
2412 const uint64_t loadAddress = ma->preferredLoadAddress();
2413 const dyld3::MachOAnalyzer::VMAddrConverter vmAddrConverter = ma->makeVMAddrConverter(li.contentRebased);
2414
2415 // The legacy (objc1) codebase uses a bunch of sections we don't want to reason about. If we see them just give up.
2416 __block bool foundBadSection = false;
2417 ma->forEachSection(^(const MachOAnalyzer::SectionInfo &sectInfo, bool malformedSectionRange, bool &stop) {
2418 if ( strcmp(sectInfo.segInfo.segName, "__OBJC") != 0 )
2419 return;
2420 if (strcmp(sectInfo.sectName, "__module_info") == 0) {
2421 foundBadSection = true;
2422 stop = true;
2423 return;
2424 }
2425 if (strcmp(sectInfo.sectName, "__protocol") == 0) {
2426 foundBadSection = true;
2427 stop = true;
2428 return;
2429 }
2430 if (strcmp(sectInfo.sectName, "__message_refs") == 0) {
2431 foundBadSection = true;
2432 stop = true;
2433 return;
2434 }
2435 });
2436 if (foundBadSection) {
2437 image.diag.error("Old objc section");
2438 return;
2439 }
2440
2441 __block MachOAnalyzer::SectionCache selectorStringSectionCache(ma);
2442
2443 uint32_t sharedCacheSentinelIndex = objcSelOpt->getSentinelIndex();
2444
2445 // Track the locations where we've updated selector references. With relative method lists,
2446 // we share selref slots across classes, categories, protocols, and SEL() expressions, so we may
2447 // visit a location more than once
2448 __block Map<uint64_t, bool, HashUInt64, EqualUInt64> seenSelectorReferenceImageOffsets;
2449
2450 auto visitReferenceToObjCSelector = ^void(uint64_t selectorStringVMAddr, uint64_t selectorReferenceVMAddr) {
2451
2452 uint64_t selectorUseImageOffset = selectorReferenceVMAddr - loadAddress;
2453 auto selUseItAndInserted = seenSelectorReferenceImageOffsets.insert({ selectorUseImageOffset, true });
2454 if ( !selUseItAndInserted.second ) {
2455 // If we didn't insert the selector reference, then its already there so we should skip it
2456 return;
2457 }
2458
2459 if ( (selectorUseImageOffset & 3) != 0 ) {
2460 image.diag.error("Unaligned selector reference fixup");
2461 return;
2462 }
2463
2464 // Image::SelectorReferenceFixup only has a 32-bit reach
2465 if ( selectorUseImageOffset >= (1ULL << 32) ) {
2466 image.diag.error("Selector reference fixup exceeds supported vm offset");
2467 return;
2468 }
2469
2470 // Get the section for the name
2471 const char* selectorString = nullptr;
2472 MachOAnalyzer::PrintableStringResult selectorStringResult = MachOAnalyzer::PrintableStringResult::UnknownSection;
2473 __block uint64_t selectorStringSectionStartVMAddr = 0;
2474 auto selectorStringSectionHandler = ^bool(const MachOAnalyzer::SectionInfo& sectInfo) {
2475
2476 // We only have 24-bits in ObjCClassNameImageOffset to index in to the strings
2477 if (sectInfo.sectSize >= Image::ObjCImageOffset::maximumOffset) {
2478 return false;
2479 }
2480
2481 // We use 32-bit offsets so make sure the section is no larger than that.
2482 uint64_t classNameVMOffset = sectInfo.sectAddr - loadAddress;
2483 if (classNameVMOffset >= (1ULL << 32)) {
2484 return false;
2485 }
2486
2487 selectorStringSectionStartVMAddr = sectInfo.sectAddr;
2488 return true;
2489 };
2490 selectorString = ma->getPrintableString(selectorStringVMAddr, selectorStringResult,
2491 &selectorStringSectionCache, selectorStringSectionHandler);
2492
2493 if ( selectorStringResult != MachOAnalyzer::PrintableStringResult::CanPrint ) {
2494 image.diag.error("Invalid selector string for objc optimisation");
2495 return;
2496 }
2497
2498 uint32_t cacheSelectorIndex = objcSelOpt->getIndexForKey(selectorString);
2499 //printf("selector: %p -> %p %s\n", methodName, cacheSelector, selectorString);
2500
2501 if ( cacheSelectorIndex != sharedCacheSentinelIndex ) {
2502 // We got the selector from the cache so add a fixup to point there.
2503 ObjCOptimizerImage::SelectorFixup fixup;
2504 fixup.isSharedCache = true;
2505 fixup.fixupVMOffset = (uint32_t)selectorUseImageOffset;
2506 fixup.sharedCache.selectorTableIndex = cacheSelectorIndex;
2507
2508 //printf("Overriding fixup at 0x%08llX to cache offset 0x%08llX\n", selectorUseImageOffset, (uint64_t)cacheSelector - (uint64_t)_dyldCache);
2509 image.selectorFixups.push_back(fixup);
2510 return;
2511 }
2512
2513 // See if this selector is already in the closure map from a previous image
2514 auto closureSelectorIt = closureSelectorMap.find(selectorString);
2515 if (closureSelectorIt != closureSelectorMap.end()) {
2516 // This selector was found in a previous image, so use it here.
2517 ObjCOptimizerImage::SelectorFixup fixup;
2518 fixup.isSharedCache = false;
2519 fixup.fixupVMOffset = (uint32_t)selectorUseImageOffset;
2520 fixup.image.selectorString = selectorString;
2521
2522 //printf("Overriding fixup at 0x%08llX to '%s' offset 0x%08llX\n", selectorUseImageOffset, findLoadedImage(target.image.imageNum).path(), target.image.offset);
2523 image.selectorFixups.push_back(fixup);
2524 return;
2525 }
2526
2527 // See if this selector is already in the map for this image
2528 auto itAndInserted = image.selectorMap.insert({ selectorString, dyld3::closure::Image::ObjCImageOffset() });
2529 if (itAndInserted.second) {
2530 // We added the selector so its pointing in to our own image.
2531 // We don't need to add a fixup to our image, but we do need to
2532 // populate the data for other images later to point here.
2533 // First put our image in the list if its not already there.
2534 uint64_t methodNameVMOffset = selectorStringSectionStartVMAddr - loadAddress;
2535 if (!image.methodNameVMOffset) {
2536 if ( _objcSelectorsHashTableImages.count() == Image::ObjCImageOffset::maximumImageIndex ) {
2537 image.diag.error("Out of space for selector hash images");
2538 return;
2539 }
2540 image.methodNameVMOffset = methodNameVMOffset;
2541 } else {
2542 // If we already set the offset to the start of the method names section, double check that
2543 // the section we are in right now is the same as that one. Otherwise we don't have the code
2544 // to handle both right now.
2545 if (*image.methodNameVMOffset != methodNameVMOffset) {
2546 image.diag.error("Cannot handle more than one selector strings section");
2547 return;
2548 }
2549 }
2550
2551 dyld3::closure::Image::ObjCImageOffset target;
2552 target.imageIndex = (uint32_t)_objcSelectorsHashTableImages.count();
2553 target.imageOffset = (uint32_t)(selectorStringVMAddr - selectorStringSectionStartVMAddr);
2554 itAndInserted.first->second = target;
2555 return;
2556 }
2557
2558 // This selector was found elsewhere in our image. If this reference already points to the same
2559 // selector string as we found before (and it should!) then we have nothing to do. Otherwise we
2560 // need to add a fixup here to make sure we point to our chosen definition.
2561 uint32_t imageOffset = (uint32_t)(selectorStringVMAddr - loadAddress);
2562 if ( imageOffset == (*image.methodNameVMOffset + itAndInserted.first->second.imageOffset) )
2563 return;
2564
2565 ObjCOptimizerImage::SelectorFixup fixup;
2566 fixup.isSharedCache = false;
2567 fixup.fixupVMOffset = (uint32_t)selectorUseImageOffset;
2568 fixup.image.selectorString = selectorString;
2569
2570 //printf("Overriding fixup at 0x%08llX to '%s' offset 0x%08llX\n", selectorUseImageOffset, findLoadedImage(target.image.imageNum).path(), target.image.offset);
2571 image.selectorFixups.push_back(fixup);
2572 };
2573
2574 auto visitMethod = ^(uint64_t methodVMAddr, const dyld3::MachOAnalyzer::ObjCMethod& method) {
2575 visitReferenceToObjCSelector(method.nameVMAddr, method.nameLocationVMAddr);
2576 };
2577
2578 auto visitMethodList = ^(uint64_t methodListVMAddr) {
2579 if ( methodListVMAddr == 0 )
2580 return;
2581 bool isRelativeMethodList = false;
2582 ma->forEachObjCMethod(methodListVMAddr, vmAddrConverter, visitMethod, &isRelativeMethodList);
2583 if (image.diag.hasError())
2584 return;
2585 // Record the offset to the method list so that we can mark it as being uniqued
2586 // We can only do this if we have a pointer based method list as relative method lists are
2587 // in read-only memory
2588 if ( !isRelativeMethodList )
2589 image.methodListFixups.push_back(methodListVMAddr - loadAddress);
2590 };
2591
2592 auto visitClass = ^(Diagnostics& diag, uint64_t classVMAddr,
2593 uint64_t classSuperclassVMAddr, uint64_t classDataVMAddr,
2594 const dyld3::MachOAnalyzer::ObjCClassInfo& objcClass, bool isMetaClass) {
2595 visitMethodList(objcClass.baseMethodsVMAddr(pointerSize));
2596 };
2597
2598 auto visitCategory = ^(Diagnostics& diag, uint64_t categoryVMAddr,
2599 const dyld3::MachOAnalyzer::ObjCCategory& objcCategory) {
2600 visitMethodList(objcCategory.instanceMethodsVMAddr);
2601 visitMethodList(objcCategory.classMethodsVMAddr);
2602 };
2603 auto visitProtocol = ^(Diagnostics& diag, uint64_t protocolVMAddr,
2604 const dyld3::MachOAnalyzer::ObjCProtocol& objCProtocol) {
2605 visitMethodList(objCProtocol.instanceMethodsVMAddr);
2606 visitMethodList(objCProtocol.classMethodsVMAddr);
2607 visitMethodList(objCProtocol.optionalInstanceMethodsVMAddr);
2608 visitMethodList(objCProtocol.optionalClassMethodsVMAddr);
2609 };
2610
2611 // Walk the class list
2612 ma->forEachObjCClass(image.diag, vmAddrConverter, visitClass);
2613 if (image.diag.hasError())
2614 return;
2615
2616 // Walk the category list
2617 ma->forEachObjCCategory(image.diag, vmAddrConverter, visitCategory);
2618 if (image.diag.hasError())
2619 return;
2620
2621 // Walk the protocol list
2622 ma->forEachObjCProtocol(image.diag, vmAddrConverter, visitProtocol);
2623 if (image.diag.hasError())
2624 return;
2625
2626 // Visit the selector refs
2627 ma->forEachObjCSelectorReference(image.diag, vmAddrConverter, ^(uint64_t selRefVMAddr, uint64_t selRefTargetVMAddr) {
2628 visitReferenceToObjCSelector(selRefTargetVMAddr, selRefVMAddr);
2629 });
2630 if (image.diag.hasError())
2631 return;
2632
2633 // Visit the message refs
2634 // Note this isn't actually supported in libobjc any more. Its logic for deciding whether to support it is if this is true:
2635 // #if (defined(__x86_64__) && (TARGET_OS_OSX || TARGET_OS_SIMULATOR))
2636 // So to keep it simple, lets only do this walk if we are x86_64
2637 if ( ma->isArch("x86_64") || ma->isArch("x86_64h") ) {
2638 if (ma->hasObjCMessageReferences()) {
2639 image.diag.error("Cannot handle message refs");
2640 return;
2641 }
2642 }
2643 }
2644
2645 static const dyld3::MachOAnalyzer* getMachHeaderFromObjCHeaderInfo(const void* opaqueHeaderInfo, uint32_t pointerSize) {
2646 if (pointerSize == 8) {
2647 typedef int64_t PtrTy;
2648 struct HeaderInfo {
2649 PtrTy mhdr_offset; // offset to mach_header_64
2650 PtrTy info_offset; // offset to objc_image_info *
2651 };
2652 const HeaderInfo* headerInfo = (const HeaderInfo*)opaqueHeaderInfo;
2653 return (const dyld3::MachOAnalyzer*)(((const uint8_t*)&headerInfo->mhdr_offset) + headerInfo->mhdr_offset);
2654 } else {
2655 typedef int32_t PtrTy;
2656 struct HeaderInfo {
2657 PtrTy mhdr_offset; // offset to mach_header
2658 PtrTy info_offset; // offset to objc_image_info *
2659 };
2660 const HeaderInfo* headerInfo = (const HeaderInfo*)opaqueHeaderInfo;
2661 return (const dyld3::MachOAnalyzer*)(((const uint8_t*)&headerInfo->mhdr_offset) + headerInfo->mhdr_offset);
2662 }
2663 }
2664
2665 void ClosureBuilder::addDuplicateObjCClassWarning(const char* className,
2666 const char* duplicateDefinitionPath,
2667 const char* canonicalDefinitionPath)
2668 {
2669 if ( _objcDuplicateClassWarnings == nullptr )
2670 _objcDuplicateClassWarnings = PathPool::allocate();
2671 // Use a diagnostic to give us a buffer we can safely print to
2672 Diagnostics diag;
2673 diag.error("Class %s is implemented in both %s and %s. One of the two will be used. Which one is undefined.",
2674 className, canonicalDefinitionPath, duplicateDefinitionPath);
2675 #if BUILDING_CACHE_BUILDER
2676 _objcDuplicateClassWarnings->add(diag.errorMessage().c_str());
2677 #else
2678 _objcDuplicateClassWarnings->add(diag.errorMessage());
2679 #endif
2680 }
2681
2682 void ClosureBuilder::optimizeObjCClasses(const objc_opt::objc_clsopt_t* objcClassOpt,
2683 const Map<const dyld3::MachOAnalyzer*, bool, HashPointer, EqualPointer>& sharedCacheImagesMap,
2684 const Map<const char*, dyld3::closure::Image::ObjCDuplicateClass, HashCString, EqualCString>& duplicateSharedCacheClasses,
2685 const Map<const char*, bool, HashCString, EqualCString>& duplicateClassesToIgnore,
2686 ObjCOptimizerImage& image) {
2687
2688 BuilderLoadedImage& li = *image.loadedImage;
2689 OverflowSafeArray<ObjCOptimizerImage::SeenClass>& seenClasses = image.seenClasses;
2690
2691 const dyld3::MachOAnalyzer* ma = li.loadAddress();
2692 const uint32_t pointerSize = ma->pointerSize();
2693 const uint64_t loadAddress = ma->preferredLoadAddress();
2694 const dyld3::MachOAnalyzer::VMAddrConverter vmAddrConverter = ma->makeVMAddrConverter(li.contentRebased);
2695
2696 // Keep track of any missing weak imports so that we can tell if the superclasses are nil
2697 // This is necessary as the shared cache will be marked with 'no missing weak superclasses'
2698 // and so we need to continue to satisfy that constraint
2699 __block Map<uint64_t, bool, HashUInt64, EqualUInt64> missingWeakImportOffets;
2700 if (li.hasMissingWeakImports) {
2701 const Image* closureImage = image.writer->currentImage();
2702 if ( closureImage->hasChainedFixups() ) {
2703 const Array<Image::ResolvedSymbolTarget> targets = closureImage->chainedTargets();
2704 if ( !targets.empty() ) {
2705 ma->withChainStarts(_diag, closureImage->chainedStartsOffset(), ^(const dyld_chained_starts_in_image* startsInfo) {
2706 ma->forEachFixupInAllChains(_diag, startsInfo, false, ^(MachOLoaded::ChainedFixupPointerOnDisk* fixupLoc,
2707 const dyld_chained_starts_in_segment* segInfo, bool& fixupsStop) {
2708 uint64_t fixupOffset = (uint8_t*)fixupLoc - (uint8_t*)ma;
2709 uint32_t bindOrdinal;
2710 int64_t addend;
2711 if ( fixupLoc->isBind(segInfo->pointer_format, bindOrdinal, addend) ) {
2712 if ( bindOrdinal < targets.count() ) {
2713 const Image::ResolvedSymbolTarget& target = targets[bindOrdinal];
2714 if ( (target.absolute.kind == Image::ResolvedSymbolTarget::kindAbsolute) && (target.absolute.value == 0) )
2715 missingWeakImportOffets[fixupOffset] = true;
2716 }
2717 else {
2718 image.diag.error("out of range bind ordinal %d (max %lu)", bindOrdinal, targets.count());
2719 fixupsStop = true;
2720 }
2721 }
2722 });
2723 });
2724 if (image.diag.hasError())
2725 return;
2726 }
2727 }
2728 else {
2729 closureImage->forEachBind(^(uint64_t imageOffsetToBind, Image::ResolvedSymbolTarget bindTarget, bool &stop) {
2730 if ( (bindTarget.absolute.kind == Image::ResolvedSymbolTarget::kindAbsolute) && (bindTarget.absolute.value == 0) )
2731 missingWeakImportOffets[imageOffsetToBind] = true;
2732 });
2733 }
2734 }
2735
2736 // Class names and data may be in different sections depending on swift vs objc so handle multiple sections
2737 __block MachOAnalyzer::SectionCache classNameSectionCache(ma);
2738 __block MachOAnalyzer::SectionCache classSectionCache(ma);
2739
2740 ma->forEachObjCClass(image.diag, vmAddrConverter, ^(Diagnostics &diag, uint64_t classVMAddr,
2741 uint64_t classSuperclassVMAddr, uint64_t classDataVMAddr,
2742 const MachOAnalyzer::ObjCClassInfo &objcClass, bool isMetaClass) {
2743 if (isMetaClass) return;
2744
2745 // Make sure the superclass pointer is not nil
2746 uint64_t superclassRuntimeOffset = classSuperclassVMAddr - loadAddress;
2747 if (missingWeakImportOffets.find(superclassRuntimeOffset) != missingWeakImportOffets.end()) {
2748 diag.error("Missing weak superclass");
2749 return;
2750 }
2751
2752 // Does this class need to be fixed up for stable Swift ABI.
2753 // Note the order matches the objc runtime in that we always do this fix before checking for dupes,
2754 // but after excluding classes with missing weak superclasses.
2755 if (objcClass.isUnfixedBackwardDeployingStableSwift()) {
2756 // Class really is stable Swift, pretending to be pre-stable.
2757 // Fix its lie. This involves fixing the FAST bits on the class data value, so record that vmaddr
2758 image.classStableSwiftFixups.push_back(classDataVMAddr - loadAddress);
2759 }
2760
2761 // Get the section for the name
2762 const char* className = nullptr;
2763 MachOAnalyzer::PrintableStringResult classNameResult = MachOAnalyzer::PrintableStringResult::UnknownSection;
2764 __block uint64_t classNameSectionStartVMAddr = 0;
2765 auto classNameSectionHandler = ^bool(const MachOAnalyzer::SectionInfo& sectInfo) {
2766 // We only have 24-bits in ObjCClassNameImageOffset to index in to the strings
2767 if (sectInfo.sectSize >= Image::ObjCClassNameImageOffset::maximumOffset) {
2768 return false;
2769 }
2770
2771 // We use 32-bit offsets so make sure the section is no larger than that.
2772 uint64_t classNameVMOffset = sectInfo.sectAddr - loadAddress;
2773 if (classNameVMOffset >= (1ULL << 32)) {
2774 return false;
2775 }
2776
2777 classNameSectionStartVMAddr = sectInfo.sectAddr;
2778 return true;
2779 };
2780 uint64_t classNameVMAddr = objcClass.nameVMAddr(pointerSize);
2781 className = ma->getPrintableString(classNameVMAddr, classNameResult,
2782 &classNameSectionCache, classNameSectionHandler);
2783
2784 if ( classNameResult != MachOAnalyzer::PrintableStringResult::CanPrint ) {
2785 diag.error("Invalid class name for objc optimisation");
2786 return;
2787 }
2788
2789 // If the class also exists in a shared cache image which is loaded, then objc
2790 // would have found that one, regardless of load order. So we can just skip this one.
2791 {
2792 void *cls;
2793 void *hi;
2794 uint32_t index;
2795 uint32_t count = objcClassOpt->getClassHeaderAndIndex(className, cls, hi, index);
2796 if (count == 1) {
2797 // exactly one matching class. Check if its loaded
2798 const dyld3::MachOAnalyzer* sharedCacheMA = getMachHeaderFromObjCHeaderInfo(hi, pointerSize);
2799 if (sharedCacheImagesMap.find(sharedCacheMA) != sharedCacheImagesMap.end()) {
2800 if ( duplicateClassesToIgnore.find(className) == duplicateClassesToIgnore.end() )
2801 addDuplicateObjCClassWarning(className, li.path(), sharedCacheMA->installName());
2802
2803 // We have a duplicate class, so check if we've already got it in our map.
2804 if ( duplicateSharedCacheClasses.find(className) == duplicateSharedCacheClasses.end() ) {
2805 // We haven't seen this one yet
2806 Image::ObjCDuplicateClass duplicateClass;
2807 duplicateClass.sharedCacheClassOptIndex = index;
2808 duplicateClass.sharedCacheClassDuplicateIndex = 0;
2809 image.classSharedCacheDuplicates.insert({ className, duplicateClass });
2810 }
2811 }
2812 }
2813 else if (count > 1) {
2814 // more than one matching class - find one that is loaded
2815 void *clslist[count];
2816 void *hilist[count];
2817 objcClassOpt->getClassesAndHeaders(className, clslist, hilist);
2818 for (uint32_t i = 0; i < count; i++) {
2819 const dyld3::MachOAnalyzer* sharedCacheMA = getMachHeaderFromObjCHeaderInfo(hilist[i], pointerSize);
2820 if (sharedCacheImagesMap.find(sharedCacheMA) != sharedCacheImagesMap.end()) {
2821 if ( duplicateClassesToIgnore.find(className) == duplicateClassesToIgnore.end() )
2822 addDuplicateObjCClassWarning(className, li.path(), sharedCacheMA->installName());
2823
2824 // We have a duplicate class, so check if we've already got it in our map.
2825 if ( duplicateSharedCacheClasses.find(className) == duplicateSharedCacheClasses.end() ) {
2826 // We haven't seen this one yet
2827 Image::ObjCDuplicateClass duplicateClass;
2828 duplicateClass.sharedCacheClassOptIndex = index;
2829 duplicateClass.sharedCacheClassDuplicateIndex = i;
2830 image.classSharedCacheDuplicates.insert({ className, duplicateClass });
2831 }
2832
2833 break;
2834 }
2835 }
2836 }
2837 }
2838
2839 // Get the section for the class itself
2840 __block uint64_t classSectionStartVMAddr = 0;
2841 auto classSectionHandler = ^bool(const MachOAnalyzer::SectionInfo& sectInfo) {
2842 // We only have 23-bits in ObjCClassImageOffset to index in to the classes
2843 if (sectInfo.sectSize > Image::ObjCClassImageOffset::maximumOffset) {
2844 return false;
2845 }
2846
2847 // We use 32-bit offsets so make sure the section is no larger than that.
2848 uint64_t classDatasVMOffset = sectInfo.sectAddr - loadAddress;
2849 if (classDatasVMOffset >= (1ULL << 32)) {
2850 return false;
2851 }
2852
2853 classSectionStartVMAddr = sectInfo.sectAddr;
2854 return true;
2855 };
2856 if (!classSectionCache.findSectionForVMAddr(classVMAddr, classSectionHandler)) {
2857 diag.error("Invalid class for objc optimisation");
2858 return;
2859 }
2860
2861 // Make sure we have an entry for our images offsets for later
2862 uint64_t classNameSectionVMOffset = classNameSectionStartVMAddr - loadAddress;
2863 uint64_t classSectionVMOffset = classSectionStartVMAddr - loadAddress;
2864 uint64_t hashTableVMOffsetsIndex = 0;
2865 for (auto nameAndDataVMOffset : image.classesNameAndDataVMOffsets) {
2866 if ( (nameAndDataVMOffset.first == classNameSectionVMOffset) && (nameAndDataVMOffset.second == classSectionVMOffset) )
2867 break;
2868 ++hashTableVMOffsetsIndex;
2869 }
2870
2871 if (hashTableVMOffsetsIndex == image.classesNameAndDataVMOffsets.count()) {
2872 // Didn't find an image entry with this offset. Add one if we have space
2873 uint64_t totalHashTableImages = image.classesNameAndDataVMOffsets.count() + _objcClassesHashTableImages.count();
2874 if ( totalHashTableImages == Image::ObjCClassNameImageOffset::maximumImageIndex ) {
2875 // No more space. We need to give up
2876 diag.error("No more space for class hash table image");
2877 return;
2878 }
2879 image.classesNameAndDataVMOffsets.push_back({ classNameSectionVMOffset, classSectionVMOffset });
2880 }
2881
2882 hashTableVMOffsetsIndex += _objcClassesHashTableImages.count();
2883
2884 uint64_t classNameOffset = classNameVMAddr - classNameSectionStartVMAddr;
2885 uint64_t classDataOffset = classVMAddr - classSectionStartVMAddr;
2886
2887 closure::Image::ObjCClassNameImageOffset classNameTarget;
2888 classNameTarget.classNameImageIndex = (uint32_t)hashTableVMOffsetsIndex;
2889 classNameTarget.classNameImageOffset = (uint32_t)classNameOffset;
2890
2891 dyld3::closure::Image::ObjCClassImageOffset classDataTarget;
2892 classDataTarget.classData.imageIndex = (uint32_t)hashTableVMOffsetsIndex;
2893 classDataTarget.classData.imageOffset = (uint32_t)classDataOffset;
2894 classDataTarget.classData.isDuplicate = 0;
2895
2896 seenClasses.push_back({ classNameTarget, classDataTarget });
2897 });
2898 }
2899
2900 void ClosureBuilder::optimizeObjCProtocols(const objc_opt::objc_protocolopt2_t* objcProtocolOpt,
2901 const Map<const dyld3::MachOAnalyzer*, bool, HashPointer, EqualPointer>& sharedCacheImagesMap,
2902 ObjCOptimizerImage& image) {
2903
2904 BuilderLoadedImage& li = *image.loadedImage;
2905 OverflowSafeArray<ObjCOptimizerImage::SeenClass>& seenProtocols = image.seenProtocols;
2906
2907 const dyld3::MachOAnalyzer* ma = li.loadAddress();
2908 const uint32_t pointerSize = ma->pointerSize();
2909 const uint64_t loadAddress = ma->preferredLoadAddress();
2910 const dyld3::MachOAnalyzer::VMAddrConverter vmAddrConverter = ma->makeVMAddrConverter(li.contentRebased);
2911
2912 // Protocol names and data may be in different sections depending on swift vs objc so handle multiple sections
2913 __block MachOAnalyzer::SectionCache protocolNameSectionCache(ma);
2914 __block MachOAnalyzer::SectionCache protocolSectionCache(ma);
2915
2916 ma->forEachObjCProtocol(image.diag, vmAddrConverter, ^(Diagnostics &diag, uint64_t protocolVMAddr,
2917 const dyld3::MachOAnalyzer::ObjCProtocol &objCProtocol) {
2918 if ( objCProtocol.isaVMAddr != 0 ) {
2919 // We can't optimize this protocol if it has an ISA as we want to override it
2920 diag.error("Protocol ISA cannot be non-zero");
2921 return;
2922 }
2923
2924 // Get the section for the name
2925 const char* protocolName = nullptr;
2926 MachOAnalyzer::PrintableStringResult protocolNameResult = MachOAnalyzer::PrintableStringResult::UnknownSection;
2927 __block uint64_t protocolNameSectionStartVMAddr = 0;
2928 auto protocolNameSectionHandler = ^bool(const MachOAnalyzer::SectionInfo& sectInfo) {
2929 // We only have 24-bits in ObjCClassNameImageOffset to index in to the strings
2930 if (sectInfo.sectSize >= Image::ObjCClassNameImageOffset::maximumOffset) {
2931 return false;
2932 }
2933
2934 // We use 32-bit offsets so make sure the section is no larger than that.
2935 uint64_t protocolNameVMOffset = sectInfo.sectAddr - loadAddress;
2936 if (protocolNameVMOffset >= (1ULL << 32)) {
2937 return false;
2938 }
2939
2940 protocolNameSectionStartVMAddr = sectInfo.sectAddr;
2941 return true;
2942 };
2943 uint64_t protocolNameVMAddr = objCProtocol.nameVMAddr;
2944 protocolName = ma->getPrintableString(protocolNameVMAddr, protocolNameResult,
2945 &protocolNameSectionCache, protocolNameSectionHandler);
2946
2947 if ( protocolNameResult != MachOAnalyzer::PrintableStringResult::CanPrint ) {
2948 diag.error("Invalid protocol name for objc optimisation");
2949 return;
2950 }
2951
2952 // If the protocol also exists in a shared cache image which is loaded, then objc
2953 // would have found that one, regardless of load order. So we can just skip this one.
2954 {
2955 void *cls;
2956 void *hi;
2957 uint32_t count = objcProtocolOpt->getClassAndHeader(protocolName, cls, hi);
2958 if (count == 1) {
2959 // exactly one matching protocol. Check if its loaded
2960 if (sharedCacheImagesMap.find(getMachHeaderFromObjCHeaderInfo(hi, pointerSize)) != sharedCacheImagesMap.end())
2961 return;
2962 }
2963 else if (count > 1) {
2964 // more than one matching protocol - find one that is loaded
2965 void *clslist[count];
2966 void *hilist[count];
2967 objcProtocolOpt->getClassesAndHeaders(protocolName, clslist, hilist);
2968 for (uint32_t i = 0; i < count; i++) {
2969 if (sharedCacheImagesMap.find(getMachHeaderFromObjCHeaderInfo(hilist[i], pointerSize)) != sharedCacheImagesMap.end())
2970 return;
2971 }
2972 }
2973 }
2974
2975 // Get the section for the protocol itself
2976 __block uint64_t protocolSectionStartVMAddr = 0;
2977 auto protocolSectionHandler = ^bool(const MachOAnalyzer::SectionInfo& sectInfo) {
2978 // We only have 23-bits in ObjCClassImageOffset to index in to the protocols
2979 if (sectInfo.sectSize > Image::ObjCClassImageOffset::maximumOffset) {
2980 return false;
2981 }
2982
2983 // We use 32-bit offsets so make sure the section is no larger than that.
2984 uint64_t protocolDatasVMOffset = sectInfo.sectAddr - loadAddress;
2985 if (protocolDatasVMOffset >= (1ULL << 32)) {
2986 return false;
2987 }
2988
2989 protocolSectionStartVMAddr = sectInfo.sectAddr;
2990 return true;
2991 };
2992 if (!protocolSectionCache.findSectionForVMAddr(protocolVMAddr, protocolSectionHandler)) {
2993 diag.error("Invalid protocol for objc optimisation");
2994 return;
2995 }
2996
2997 // Make sure we have an entry for our images offsets for later
2998 uint64_t protocolNameSectionVMOffset = protocolNameSectionStartVMAddr - loadAddress;
2999 uint64_t protocolSectionVMOffset = protocolSectionStartVMAddr - loadAddress;
3000 uint64_t hashTableVMOffsetsIndex = 0;
3001 for (auto nameAndDataVMOffset : image.classesNameAndDataVMOffsets) {
3002 if ( (nameAndDataVMOffset.first == protocolNameSectionVMOffset) && (nameAndDataVMOffset.second == protocolSectionVMOffset) )
3003 break;
3004 ++hashTableVMOffsetsIndex;
3005 }
3006
3007 if (hashTableVMOffsetsIndex == image.classesNameAndDataVMOffsets.count()) {
3008 // Didn't find an image entry with this offset. Add one if we have space
3009 uint64_t totalHashTableImages = image.classesNameAndDataVMOffsets.count() + _objcClassesHashTableImages.count();
3010 if ( totalHashTableImages == Image::ObjCClassNameImageOffset::maximumImageIndex ) {
3011 // No more space. We need to give up
3012 diag.error("No more space for protocol hash table image");
3013 return;
3014 }
3015 image.classesNameAndDataVMOffsets.push_back({ protocolNameSectionVMOffset, protocolSectionVMOffset });
3016 }
3017
3018 hashTableVMOffsetsIndex += _objcClassesHashTableImages.count();
3019
3020 uint64_t protocolNameOffset = protocolNameVMAddr - protocolNameSectionStartVMAddr;
3021 uint64_t protocolDataOffset = protocolVMAddr - protocolSectionStartVMAddr;
3022
3023 closure::Image::ObjCClassNameImageOffset protocolNameTarget;
3024 protocolNameTarget.classNameImageIndex = (uint32_t)hashTableVMOffsetsIndex;
3025 protocolNameTarget.classNameImageOffset = (uint32_t)protocolNameOffset;
3026
3027 dyld3::closure::Image::ObjCClassImageOffset protocolDataTarget;
3028 protocolDataTarget.classData.imageIndex = (uint32_t)hashTableVMOffsetsIndex;
3029 protocolDataTarget.classData.imageOffset = (uint32_t)protocolDataOffset;
3030 protocolDataTarget.classData.isDuplicate = 0;
3031
3032 seenProtocols.push_back({ protocolNameTarget, protocolDataTarget });
3033 });
3034 }
3035
3036 void ClosureBuilder::parseObjCClassDuplicates(Map<const char*, bool, HashCString, EqualCString>& duplicateClassesToIgnore) {
3037 const ClosureBuilder::BuilderLoadedImage& mainLi = _loadedImages[_mainProgLoadIndex];
3038
3039 const dyld3::MachOAnalyzer* ma = mainLi.loadAddress();
3040
3041 const uint32_t pointerSize = ma->pointerSize();
3042 const intptr_t slide = ma->getSlide();
3043 const dyld3::MachOAnalyzer::VMAddrConverter vmAddrConverter = ma->makeVMAddrConverter(mainLi.contentRebased);
3044
3045 uint64_t sectionSize = 0;
3046 const void* section = ma->findSectionContent("__DATA", "__objc_dupclass", sectionSize);
3047
3048 if ( !section )
3049 return;
3050
3051 // Ignore sections which are the wrong size
3052 if ( (sectionSize % pointerSize) != 0 )
3053 return;
3054
3055 // Copied from objc-abi.h
3056 typedef struct _objc_duplicate_class {
3057 uint32_t version;
3058 uint32_t flags;
3059 const char name[64];
3060 } objc_duplicate_class;
3061
3062 for (uint64_t offset = 0; offset != sectionSize; offset += pointerSize) {
3063 uint64_t vmAddr = *(uint64_t*)((uint64_t)section + offset);
3064 vmAddr = vmAddrConverter.convertToVMAddr(vmAddr);
3065 const objc_duplicate_class* duplicateClass = (const objc_duplicate_class*)(vmAddr + slide);
3066 duplicateClassesToIgnore.insert({ duplicateClass->name, true });
3067 }
3068 }
3069
3070 // used at launch by dyld when kernel has already mapped main executable
3071 const LaunchClosure* ClosureBuilder::makeLaunchClosure(const LoadedFileInfo& fileInfo, bool allowInsertFailures)
3072 {
3073 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_BUILD_CLOSURE, 0, 0, 0);
3074 const mach_header* mainMH = (const mach_header*)fileInfo.fileContent;
3075 // set up stack based storage for all arrays
3076 BuilderLoadedImage loadImagesStorage[512];
3077 Image::LinkedImage dependenciesStorage[512*8];
3078 InterposingTuple tuplesStorage[64];
3079 Closure::PatchEntry cachePatchStorage[64];
3080 _loadedImages.setInitialStorage(loadImagesStorage, 512);
3081 _dependencies.setInitialStorage(dependenciesStorage, 512*8);
3082 _interposingTuples.setInitialStorage(tuplesStorage, 64);
3083 _weakDefCacheOverrides.setInitialStorage(cachePatchStorage, 64);
3084 ArrayFinalizer<BuilderLoadedImage> scopedCleanup(_loadedImages, ^(BuilderLoadedImage& li) { if (li.unmapWhenDone) {_fileSystem.unloadFile(li.loadedFileInfo); li.unmapWhenDone=false;} });
3085
3086 const MachOAnalyzer* mainExecutable = MachOAnalyzer::validMainExecutable(_diag, mainMH, fileInfo.path, fileInfo.sliceLen, _archs, _platform);
3087 if ( mainExecutable == nullptr )
3088 return nullptr;
3089 if ( !mainExecutable->isDynamicExecutable() ) {
3090 _diag.error("not a main executable");
3091 return nullptr;
3092 }
3093 if ( _platform == Platform::macOS ) {
3094 // If this is an iOSMac program running on macOS, switch platforms
3095 if ( mainExecutable->builtForPlatform(Platform::iOSMac, true) ) {
3096 //_platform = Platform::iOSMac;
3097 Platform* selfPlatform = const_cast<Platform*>(&_platform);
3098 *selfPlatform = Platform::iOSMac;
3099 }
3100 #if (TARGET_OS_OSX && TARGET_CPU_ARM64)
3101 else if ( mainExecutable->builtForPlatform(Platform::iOS, true) ) {
3102 //_platform = Platform::iOS;
3103 Platform* selfPlatform = const_cast<Platform*>(&_platform);
3104 *selfPlatform = Platform::iOS;
3105 }
3106 #endif
3107 if ( mainExecutable->usesObjCGarbageCollection() ) {
3108 _diag.error("program requires ObjC Garbage Collection which is no longer supported");
3109 return nullptr;
3110 }
3111 }
3112 // <rdar://problem/63308841> licenseware apps that zero out lazy bind opcodes cannot be pre-bound
3113 if ( mainExecutable->hasStompedLazyOpcodes() )
3114 _makeMinimalClosure = true;
3115
3116 _isLaunchClosure = true;
3117 _allowMissingLazies = true;
3118
3119 #if BUILDING_CACHE_BUILDER
3120 _makingClosuresInCache = true;
3121 #endif
3122
3123 _nextIndex = 0;
3124
3125 // add main executable
3126 __block BuilderLoadedImage mainEntry;
3127 mainEntry.loadedFileInfo = fileInfo;
3128 mainEntry.imageNum = 0; // We can't fill this in until we've done inserted dylibs
3129 mainEntry.unmapWhenDone = false;
3130 mainEntry.contentRebased = false;
3131 mainEntry.hasInits = false;
3132 mainEntry.markNeverUnload = true;
3133 mainEntry.rtldLocal = false;
3134 mainEntry.isBadImage = false;
3135 mainEntry.mustBuildClosure = true;
3136 mainEntry.hasMissingWeakImports = false;
3137 mainEntry.hasInterposingTuples = false; // only dylibs not in the dyld cache can have interposing tuples
3138 mainEntry.overrideImageNum = 0;
3139 mainEntry.exportsTrieOffset = 0;
3140 mainEntry.exportsTrieSize = 0;
3141
3142 // Set the executable load path so that @executable_path can use it later
3143 _mainProgLoadPath = fileInfo.path;
3144
3145 // add any DYLD_INSERT_LIBRARIES
3146 _pathOverrides.forEachInsertedDylib(^(const char* dylibPath, bool &stop) {
3147 LoadedImageChain chainMain = { nullptr, mainEntry };
3148 BuilderLoadedImage* foundTopImage;
3149 if ( !findImage(dylibPath, chainMain, foundTopImage, LinkageType::kInserted, 0, true) ) {
3150 if ( !allowInsertFailures ) {
3151 if ( _diag.noError() )
3152 // if no other error was reported while trying to find the library, that means it is missing
3153 _diag.error("could not load inserted dylib '%s' because image not found", dylibPath);
3154 stop = true;
3155 return;
3156 }
3157 _diag.clearError(); // FIXME add way to plumb back warning
3158 }
3159 });
3160
3161 if ( _diag.hasError() )
3162 return nullptr;
3163
3164 _mainProgLoadIndex = (uint32_t)_loadedImages.count();
3165 mainEntry.imageNum = _startImageNum + _nextIndex++;
3166 _loadedImages.push_back(mainEntry);
3167
3168 // get mach_headers for all images needed to launch this main executable
3169 LoadedImageChain chainStart = { nullptr, _loadedImages[_mainProgLoadIndex] };
3170 recursiveLoadDependents(chainStart);
3171 if ( _diag.hasError() )
3172 return nullptr;
3173 for (uint32_t i=0; i < _mainProgLoadIndex; ++i) {
3174 LoadedImageChain insertChainStart = { nullptr, _loadedImages[i] };
3175 recursiveLoadDependents(insertChainStart);
3176 if ( _diag.hasError() )
3177 return nullptr;
3178 }
3179 loadDanglingUpwardLinks();
3180
3181 // If we have an on-disk image then we need all images which are dependent on the disk image to get a new
3182 // initializer order. Its not enough to just do the top level image as we may dlopen while in dlopen
3183 invalidateInitializerRoots();
3184
3185 // now that everything loaded, set _libDyldImageNum and _libSystemImageNum
3186 for (BuilderLoadedImage& li : _loadedImages) {
3187 if ( mainExecutable->builtForPlatform(Platform::driverKit) ) {
3188 if ( li.loadAddress()->isDylib() && (strcmp(li.loadAddress()->installName(), "/System/DriverKit/usr/lib/system/libdyld.dylib") == 0) )
3189 _libDyldImageNum = li.imageNum;
3190 else if ( strcmp(li.path(), "/System/DriverKit/usr/lib/libSystem.dylib") == 0 )
3191 _libSystemImageNum = li.imageNum;
3192 } else {
3193 if ( li.loadAddress()->isDylib() && (strcmp(li.loadAddress()->installName(), "/usr/lib/system/libdyld.dylib") == 0) )
3194 _libDyldImageNum = li.imageNum;
3195 else if ( strcmp(li.path(), "/usr/lib/libSystem.B.dylib") == 0 )
3196 _libSystemImageNum = li.imageNum;
3197 }
3198 // don't use minimal closures when interposing is in play because we don't have runtime support to do interposing
3199 if ( li.hasInterposingTuples ) {
3200 _makeMinimalClosure = false;
3201 _leaveRebasesAsOpcodes = false;
3202 }
3203 }
3204
3205 // only some images need to go into closure (non-rooted ones from dyld cache do not)
3206 STACK_ALLOC_ARRAY(ImageWriter, writers, _loadedImages.count());
3207 for (BuilderLoadedImage& li : _loadedImages) {
3208 if ( li.mustBuildClosure ) {
3209 writers.push_back(ImageWriter());
3210 buildImage(writers.back(), li);
3211 if ( _diag.hasError() )
3212 return nullptr;
3213 }
3214 }
3215
3216 // only build objc closure info when building full closures
3217 bool optimizedObjC = !_makeMinimalClosure && optimizeObjC(writers);
3218
3219 // Note we have to compute the init order after buildImage as buildImage may set hasInits to true
3220 for (uintptr_t imageIndex = 0, writerIndex = 0; imageIndex != _loadedImages.count(); ++imageIndex) {
3221 BuilderLoadedImage& li = _loadedImages[imageIndex];
3222 if ( li.mustBuildClosure ) {
3223 computeInitOrder(writers[writerIndex], (uint32_t)imageIndex);
3224 writerIndex++;
3225 }
3226 }
3227
3228 // combine all Image objects into one ImageArray
3229 ImageArrayWriter imageArrayWriter(_startImageNum, (uint32_t)writers.count(), _foundDyldCacheRoots);
3230 for (ImageWriter& writer : writers) {
3231 imageArrayWriter.appendImage(writer.finalize());
3232 writer.deallocate();
3233 }
3234 const ImageArray* imageArray = imageArrayWriter.finalize();
3235
3236 // merge ImageArray object into LaunchClosure object
3237 __block LaunchClosureWriter closureWriter(imageArray);
3238
3239 if (optimizedObjC) {
3240 if (!_objcSelectorsHashTable.empty())
3241 closureWriter.setObjCSelectorInfo(_objcSelectorsHashTable, _objcSelectorsHashTableImages);
3242
3243 if (!_objcClassesHashTableImages.empty()) {
3244 closureWriter.setObjCClassAndProtocolInfo(_objcClassesHashTable, _objcProtocolsHashTable,
3245 _objcClassesHashTableImages);
3246 }
3247
3248 if ( _objcDuplicateClassWarnings != nullptr ) {
3249 _objcDuplicateClassWarnings->forEachPath(^(const char* warning) {
3250 closureWriter.addWarning(Closure::Warning::duplicateObjCClass, warning);
3251 });
3252 }
3253
3254 if (!_objcClassesDuplicatesHashTable.empty())
3255 closureWriter.setObjCDuplicateClassesInfo(_objcClassesDuplicatesHashTable);
3256 }
3257
3258 // record shared cache info
3259 if ( _dyldCache != nullptr ) {
3260 // record cache UUID
3261 uuid_t cacheUUID;
3262 _dyldCache->getUUID(cacheUUID);
3263 closureWriter.setDyldCacheUUID(cacheUUID);
3264
3265 // record any cache patching needed because of dylib overriding cache
3266 for (const BuilderLoadedImage& li : _loadedImages) {
3267 if ( li.overrideImageNum != 0 ) {
3268 uint32_t imageIndex = li.overrideImageNum - (uint32_t)_dyldImageArray->startImageNum();
3269 STACK_ALLOC_ARRAY(Closure::PatchEntry, patches, _dyldCache->patchableExportCount(imageIndex));
3270 MachOLoaded::DependentToMachOLoaded reexportFinder = ^(const MachOLoaded* mh, uint32_t depIndex) {
3271 return (const MachOLoaded*)findDependent(mh, depIndex);
3272 };
3273 //fprintf(stderr, "'%s' overrides something in cache\n", li.loadedFileInfo.path);
3274 _dyldCache->forEachPatchableExport(imageIndex, ^(uint32_t cacheOffsetOfImpl, const char* symbolName) {
3275 dyld3::MachOAnalyzer::FoundSymbol foundInfo;
3276 Diagnostics patchDiag;
3277 Closure::PatchEntry patch;
3278 patch.overriddenDylibInCache = li.overrideImageNum;
3279 patch.exportCacheOffset = cacheOffsetOfImpl;
3280 if ( li.loadAddress()->findExportedSymbol(patchDiag, symbolName, false, foundInfo, reexportFinder) ) {
3281 const MachOAnalyzer* impDylib = (const MachOAnalyzer*)foundInfo.foundInDylib;
3282 patch.replacement.image.kind = Image::ResolvedSymbolTarget::kindImage;
3283 patch.replacement.image.imageNum = findLoadedImage(impDylib).imageNum;
3284 patch.replacement.image.offset = foundInfo.value;
3285 }
3286 else {
3287 // this means the symbol is missing in the cache override dylib, see it moved to a sibling
3288 // <rdar://problem/59196856> allow patched impls to move between re-export sibling dylibs
3289 bool foundViaParent = false;
3290 for (const BuilderLoadedImage& li2 : _loadedImages) {
3291 if ( (li2.overrideImageNum != 0) && (li2.imageNum != li.imageNum) ) {
3292 for (Image::LinkedImage aDep : li2.dependents) {
3293 if ( (aDep.kind() == Image::LinkKind::reExport) && (aDep.imageNum() == li.imageNum) ) {
3294 if ( li2.loadAddress()->findExportedSymbol(patchDiag, symbolName, false, foundInfo, reexportFinder) ) {
3295 const MachOAnalyzer* impDylib = (const MachOAnalyzer*)foundInfo.foundInDylib;
3296 patch.replacement.image.kind = Image::ResolvedSymbolTarget::kindImage;
3297 patch.replacement.image.imageNum = findLoadedImage(impDylib).imageNum;
3298 patch.replacement.image.offset = foundInfo.value;
3299 foundViaParent = true;
3300 //fprintf(stderr, "found patch target '%s' previously in '%s', now in '%s'\n", symbolName, li.path(), li2.path());
3301 break;
3302 }
3303 }
3304 }
3305 }
3306 }
3307 if ( !foundViaParent ) {
3308 // symbol is missing from override, set other cached dylibs that used it to NULL
3309 //fprintf(stderr, "could not find symbol '%s' in %s \n", symbolName, li.path());
3310 patch.replacement.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
3311 patch.replacement.absolute.value = 0;
3312 }
3313 }
3314 patches.push_back(patch);
3315 });
3316 closureWriter.addCachePatches(patches);
3317 }
3318 }
3319
3320 // record any cache patching needed because weak-def C++ symbols override dyld cache
3321 if ( !_weakDefCacheOverrides.empty() ) {
3322 closureWriter.addCachePatches(_weakDefCacheOverrides);
3323 }
3324 }
3325
3326 #if TARGET_OS_OSX
3327 uint32_t progVarsOffset;
3328 if ( mainExecutable->hasProgramVars(_diag, progVarsOffset) ) {
3329 // on macOS binaries may have a __dyld section that has ProgramVars to use
3330 closureWriter.setHasProgramVars(progVarsOffset);
3331 }
3332 if ( _diag.hasError() )
3333 return nullptr;
3334 #endif
3335
3336 // record any interposing info
3337 if ( !_interposingDisabled ) {
3338 imageArray->forEachImage(^(const Image* image, bool &stop) {
3339 if ( !image->inDyldCache() )
3340 addInterposingTuples(closureWriter, image, findLoadedImage(image->imageNum()).loadAddress());
3341 });
3342 }
3343
3344 // modify fixups in contained Images by applying interposing tuples
3345 closureWriter.applyInterposing((const LaunchClosure*)closureWriter.currentTypedBytes());
3346
3347 // set flags
3348 closureWriter.setUsedInterposing(_interposingTuplesUsed);
3349 closureWriter.setUsedAtPaths(_atPathUsed);
3350 closureWriter.setUsedFallbackPaths(_fallbackPathUsed);
3351 closureWriter.setHasInsertedLibraries(_mainProgLoadIndex > 0);
3352 closureWriter.setInitImageCount((uint32_t)_loadedImages.count());
3353
3354 // add other closure attributes
3355 addClosureInfo(closureWriter);
3356
3357 // make result
3358 const LaunchClosure* result = closureWriter.finalize();
3359 imageArrayWriter.deallocate();
3360
3361 timer.setData4(dyld3::DyldTimingBuildClosure::LaunchClosure_Built);
3362
3363 return result;
3364 }
3365
3366 // used by libdyld for dlopen()
3367 const DlopenClosure* ClosureBuilder::makeDlopenClosure(const char* path, const LaunchClosure* mainClosure, const Array<LoadedImage>& alreadyLoadedList,
3368 closure::ImageNum callerImageNum, bool noLoad, bool forceBindLazies, bool canUseSharedCacheClosure, closure::ImageNum* topImageNum)
3369 {
3370 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_BUILD_CLOSURE, 0, 0, 0);
3371 // set up stack based storage for all arrays
3372 BuilderLoadedImage loadImagesStorage[256];
3373 Image::LinkedImage dependenciesStorage[128];
3374 Closure::PatchEntry cachePatchStorage[64];
3375 _loadedImages.setInitialStorage(loadImagesStorage, 256);
3376 _dependencies.setInitialStorage(dependenciesStorage, 128);
3377 _weakDefCacheOverrides.setInitialStorage(cachePatchStorage, 64);
3378 ArrayFinalizer<BuilderLoadedImage> scopedCleanup(_loadedImages, ^(BuilderLoadedImage& li) { if (li.unmapWhenDone) {_fileSystem.unloadFile(li.loadedFileInfo); li.unmapWhenDone=false;} });
3379
3380 // fill in builder array from already loaded images
3381 bool cachedDylibsExpectedOnDisk = _dyldCache ? _dyldCache->header.dylibsExpectedOnDisk : true;
3382 uintptr_t callerImageIndex = UINTPTR_MAX;
3383 for (const LoadedImage& ali : alreadyLoadedList) {
3384 const Image* image = ali.image();
3385 const MachOAnalyzer* ma = (MachOAnalyzer*)(ali.loadedAddress());
3386 bool inDyldCache = ma->inDyldCache();
3387 BuilderLoadedImage entry;
3388 ImageNum overrideImageNum;
3389 entry.loadedFileInfo.path = image->path();
3390 entry.loadedFileInfo.fileContent = ma;
3391 entry.loadedFileInfo.sliceOffset = 0;
3392 entry.loadedFileInfo.inode = 0;
3393 entry.loadedFileInfo.mtime = 0;
3394 entry.imageNum = image->imageNum();
3395 entry.dependents = image->dependentsArray();
3396 entry.unmapWhenDone = false;
3397 entry.contentRebased = inDyldCache;
3398 entry.hasInits = false;
3399 entry.markNeverUnload = image->neverUnload();
3400 entry.rtldLocal = ali.hideFromFlatSearch();
3401 entry.isBadImage = false;
3402 entry.mustBuildClosure = false;
3403 entry.hasMissingWeakImports = false;
3404 entry.hasInterposingTuples = !inDyldCache && ma->isDylib() && ma->hasInterposingTuples();
3405 entry.overrideImageNum = 0;
3406 entry.exportsTrieOffset = 0;
3407 entry.exportsTrieSize = 0;
3408 if ( image->isOverrideOfDyldCacheImage(overrideImageNum) ) {
3409 entry.overrideImageNum = overrideImageNum;
3410 canUseSharedCacheClosure = false;
3411 }
3412 if ( !inDyldCache || cachedDylibsExpectedOnDisk )
3413 image->hasFileModTimeAndInode(entry.loadedFileInfo.inode, entry.loadedFileInfo.mtime);
3414 if ( entry.imageNum == callerImageNum )
3415 callerImageIndex = _loadedImages.count();
3416 _loadedImages.push_back(entry);
3417 }
3418 _alreadyInitedIndex = (uint32_t)_loadedImages.count();
3419
3420 // find main executable (may be needed for @executable_path)
3421 _isLaunchClosure = false;
3422 for (uint32_t i=0; i < alreadyLoadedList.count(); ++i) {
3423 if ( _loadedImages[i].loadAddress()->isMainExecutable() ) {
3424 _mainProgLoadIndex = i;
3425 _mainProgLoadPath = _loadedImages[i].path();
3426 break;
3427 }
3428 }
3429
3430 // We can't use an existing dlopen closure if the main closure had interposing tuples
3431 if (canUseSharedCacheClosure) {
3432 if (mainClosure->hasInterposings())
3433 canUseSharedCacheClosure = false;
3434 }
3435
3436 // add top level dylib being dlopen()ed
3437 BuilderLoadedImage* foundTopImage = nullptr;
3438 _nextIndex = 0;
3439 // @rpath has caller's LC_PRATH, then main executable's LC_RPATH
3440 BuilderLoadedImage& callerImage = (callerImageIndex != UINTPTR_MAX) ? _loadedImages[callerImageIndex] : _loadedImages[_mainProgLoadIndex];
3441 LoadedImageChain chainMain = { nullptr, _loadedImages[_mainProgLoadIndex] };
3442 LoadedImageChain chainCaller = { &chainMain, callerImage };
3443 if ( !findImage(path, chainCaller, foundTopImage, LinkageType::kDynamic, 0, canUseSharedCacheClosure) ) {
3444 // If we didn't find the image, it might be a symlink to something in the dyld cache that is not on disk
3445 if ( (_dyldCache != nullptr) && !_dyldCache->header.dylibsExpectedOnDisk ) {
3446 char resolvedPath[PATH_MAX];
3447 if ( _fileSystem.getRealPath(path, resolvedPath) ) {
3448 _diag.clearError();
3449 if ( !findImage(resolvedPath, chainMain, foundTopImage, LinkageType::kDynamic, 0, canUseSharedCacheClosure) ) {
3450 return nullptr;
3451 }
3452 } else {
3453 // We didn't find a new path from realpath
3454 return nullptr;
3455 }
3456 } else {
3457 // cached dylibs on disk, so don't call realpath() again, it would have been found first call to findImage()
3458 return nullptr;
3459 }
3460 }
3461
3462 // exit early in RTLD_NOLOAD mode
3463 if ( noLoad ) {
3464 timer.setData4(dyld3::DyldTimingBuildClosure::DlopenClosure_NoLoad);
3465 // if no new images added to _loadedImages, then requested path was already loaded
3466 if ( (uint32_t)_loadedImages.count() == _alreadyInitedIndex )
3467 *topImageNum = foundTopImage->imageNum;
3468 else
3469 *topImageNum = 0;
3470 return nullptr;
3471 }
3472
3473 // fast path if roots are not allowed and target is in dyld cache or is other
3474 if ( (_dyldCache != nullptr) && (_dyldCache->header.cacheType == kDyldSharedCacheTypeProduction) ) {
3475 if ( foundTopImage->imageNum < closure::kFirstLaunchClosureImageNum ) {
3476 if (foundTopImage->imageNum < closure::kLastDyldCacheImageNum)
3477 timer.setData4(dyld3::DyldTimingBuildClosure::DlopenClosure_UsedSharedCacheDylib);
3478 else
3479 timer.setData4(dyld3::DyldTimingBuildClosure::DlopenClosure_UsedSharedCacheOther);
3480 *topImageNum = foundTopImage->imageNum;
3481 return nullptr;
3482 }
3483 }
3484
3485 // recursive load dependents
3486 // @rpath for stuff top dylib depends on uses LC_RPATH from caller, main exe, and dylib being dlopen()ed
3487 LoadedImageChain chainTopDylib = { &chainMain, *foundTopImage };
3488 recursiveLoadDependents(chainTopDylib, canUseSharedCacheClosure);
3489 if ( _diag.hasError() )
3490 return nullptr;
3491 loadDanglingUpwardLinks(canUseSharedCacheClosure);
3492 if ( _diag.hasError() )
3493 return nullptr;
3494
3495 // RTLD_NOW means fail the dlopen() if a symbol cannot be bound
3496 _allowMissingLazies = !forceBindLazies;
3497
3498 // If we got this far, we are not using a prebuilt dlopen-closure
3499 // Since dlopen closures are never saved to disk, don't put fixups into the closure
3500 // Except if interposing is used, since we don't have plumbing to apply interposing dynamically
3501 _makeMinimalClosure = !mainClosure->hasInterposings();
3502
3503 // only some images need to go into closure (ones from dyld cache do not, unless the cache format changed)
3504 STACK_ALLOC_ARRAY(ImageWriter, writers, _loadedImages.count());
3505 if ( _foundNonCachedImage || _foundDyldCacheRoots ) {
3506 // If we have an on-disk image then we need all images which are dependent on the disk image to get a new
3507 // initializer order. Its not enough to just do the top level image as we may dlopen while in dlopen
3508 invalidateInitializerRoots();
3509
3510 for (uintptr_t loadedImageIndex = 0; loadedImageIndex != _loadedImages.count(); ++loadedImageIndex) {
3511 BuilderLoadedImage& li = _loadedImages[loadedImageIndex];
3512 if ( li.mustBuildClosure ) {
3513 writers.push_back(ImageWriter());
3514 buildImage(writers.back(), li);
3515 if ( _diag.hasError() )
3516 return nullptr;
3517 }
3518 }
3519
3520 // Note we have to compute the init order after buildImage as buildImage may set hasInits to true
3521 for (uintptr_t imageIndex = 0, writerIndex = 0; imageIndex != _loadedImages.count(); ++imageIndex) {
3522 BuilderLoadedImage& li = _loadedImages[imageIndex];
3523 if ( li.mustBuildClosure ) {
3524 computeInitOrder(writers[writerIndex], (uint32_t)imageIndex);
3525 writerIndex++;
3526 }
3527 }
3528 }
3529 if ( _diag.hasError() )
3530 return nullptr;
3531
3532 // check if top image loaded is in shared cache along with everything it depends on
3533 *topImageNum = foundTopImage->imageNum;
3534 if ( _foundNonCachedImage || _foundDyldCacheRoots ) {
3535 if ( canUseSharedCacheClosure && ( foundTopImage->imageNum < closure::kFirstLaunchClosureImageNum ) ) {
3536 // We used a shared cache built closure, but now discovered roots. We need to try again
3537 topImageNum = 0;
3538 return sRetryDlopenClosure;
3539 }
3540 } else {
3541 if (foundTopImage->imageNum < closure::kLastDyldCacheImageNum)
3542 timer.setData4(dyld3::DyldTimingBuildClosure::DlopenClosure_UsedSharedCacheDylib);
3543 else
3544 timer.setData4(dyld3::DyldTimingBuildClosure::DlopenClosure_UsedSharedCacheOther);
3545 return nullptr;
3546 }
3547
3548 // combine all Image objects into one ImageArray
3549 ImageArrayWriter imageArrayWriter(_startImageNum, (uint32_t)writers.count(), _foundDyldCacheRoots);
3550 for (ImageWriter& writer : writers) {
3551 imageArrayWriter.appendImage(writer.finalize());
3552 writer.deallocate();
3553 }
3554 const ImageArray* imageArray = imageArrayWriter.finalize();
3555
3556 // merge ImageArray object into LaunchClosure object
3557 DlopenClosureWriter closureWriter(imageArray);
3558
3559 // add other closure attributes
3560 closureWriter.setTopImageNum(foundTopImage->imageNum);
3561
3562 // record any cache patching needed because of dylib overriding cache
3563 if ( _dyldCache != nullptr ) {
3564 for (const BuilderLoadedImage& li : _loadedImages) {
3565 if ( (li.overrideImageNum != 0) && (li.imageNum >= _startImageNum) ) {
3566 const Image* cacheImage = _dyldImageArray->imageForNum(li.overrideImageNum);
3567 uint32_t imageIndex = cacheImage->imageNum() - (uint32_t)_dyldCache->cachedDylibsImageArray()->startImageNum();
3568 STACK_ALLOC_ARRAY(Closure::PatchEntry, patches, _dyldCache->patchableExportCount(imageIndex));
3569 MachOLoaded::DependentToMachOLoaded reexportFinder = ^(const MachOLoaded* mh, uint32_t depIndex) {
3570 return (const MachOLoaded*)findDependent(mh, depIndex);
3571 };
3572 //fprintf(stderr, "'%s' overrides '%s'\n", li.loadedFileInfo.path, cacheImage->path());
3573 _dyldCache->forEachPatchableExport(imageIndex,
3574 ^(uint32_t cacheOffsetOfImpl, const char* symbolName) {
3575 dyld3::MachOAnalyzer::FoundSymbol foundInfo;
3576 Diagnostics patchDiag;
3577 Closure::PatchEntry patch;
3578 patch.overriddenDylibInCache = li.overrideImageNum;
3579 patch.exportCacheOffset = cacheOffsetOfImpl;
3580 if ( li.loadAddress()->findExportedSymbol(patchDiag, symbolName, false, foundInfo, reexportFinder) ) {
3581 const MachOAnalyzer* impDylib = (const MachOAnalyzer*)foundInfo.foundInDylib;
3582 patch.replacement.image.kind = Image::ResolvedSymbolTarget::kindImage;
3583 patch.replacement.image.imageNum = findLoadedImage(impDylib).imageNum;
3584 patch.replacement.image.offset = foundInfo.value;
3585 }
3586 else {
3587 patch.replacement.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
3588 patch.replacement.absolute.value = 0;
3589 }
3590 patches.push_back(patch);
3591 });
3592 closureWriter.addCachePatches(patches);
3593 }
3594 }
3595 }
3596
3597 // modify fixups in contained Images by applying interposing tuples
3598 closureWriter.applyInterposing(mainClosure);
3599
3600 // Dlopen's should never keep track of missing paths as we don't cache these closures.
3601 assert(_mustBeMissingPaths == nullptr);
3602
3603 // make final DlopenClosure object
3604 const DlopenClosure* result = closureWriter.finalize();
3605 imageArrayWriter.deallocate();
3606 timer.setData4(dyld3::DyldTimingBuildClosure::DlopenClosure_Built);
3607 return result;
3608 }
3609
3610
3611 // used by dyld_closure_util
3612 const LaunchClosure* ClosureBuilder::makeLaunchClosure(const char* mainPath, bool allowInsertFailures)
3613 {
3614 char realerPath[MAXPATHLEN];
3615 closure::LoadedFileInfo loadedFileInfo = MachOAnalyzer::load(_diag, _fileSystem, mainPath, _archs, _platform, realerPath);
3616 if ( _diag.hasError() )
3617 return nullptr;
3618 loadedFileInfo.path = mainPath;
3619 const MachOAnalyzer* mh = (const MachOAnalyzer*)loadedFileInfo.fileContent;
3620 if (mh == nullptr) {
3621 _diag.error("could not load file");
3622 return nullptr;
3623 }
3624 const_cast<PathOverrides*>(&_pathOverrides)->setMainExecutable(mh, mainPath);
3625 const LaunchClosure* launchClosure = makeLaunchClosure(loadedFileInfo, allowInsertFailures);
3626 loadedFileInfo.unload(loadedFileInfo);
3627 return launchClosure;
3628 }
3629
3630 void ClosureBuilder::setDyldCacheInvalidFormatVersion() {
3631 _dyldCacheInvalidFormatVersion = true;
3632 }
3633
3634
3635 // used by dyld shared cache builder
3636 const ImageArray* ClosureBuilder::makeDyldCacheImageArray(const Array<CachedDylibInfo>& dylibs, const Array<CachedDylibAlias>& aliases)
3637 {
3638 // because this is run in cache builder using dispatch_apply() there is minimal stack space
3639 // so set up storage for all arrays to be vm_allocated
3640 uintptr_t maxImageCount = dylibs.count() + 16;
3641 _loadedImages.reserve(maxImageCount);
3642 _dependencies.reserve(maxImageCount*16);
3643
3644 _makingDyldCacheImages = true;
3645 _allowMissingLazies = false;
3646 _aliases = &aliases;
3647
3648 // build _loadedImages[] with every dylib in cache
3649 __block ImageNum imageNum = _startImageNum;
3650 for (const CachedDylibInfo& aDylibInfo : dylibs) {
3651 BuilderLoadedImage entry;
3652 entry.loadedFileInfo = aDylibInfo.fileInfo;
3653 entry.imageNum = imageNum++;
3654 entry.unmapWhenDone = false;
3655 entry.contentRebased = false;
3656 entry.hasInits = false;
3657 entry.markNeverUnload = true;
3658 entry.rtldLocal = false;
3659 entry.isBadImage = false;
3660 entry.mustBuildClosure = false;
3661 entry.hasMissingWeakImports = false;
3662 entry.hasInterposingTuples = false; // dylibs in dyld cache cannot have interposing tuples
3663 entry.overrideImageNum = 0;
3664 entry.exportsTrieOffset = 0;
3665 entry.exportsTrieSize = 0;
3666 _loadedImages.push_back(entry);
3667 }
3668
3669 // wire up dependencies between cached dylibs
3670 for (BuilderLoadedImage& li : _loadedImages) {
3671 LoadedImageChain chainStart = { nullptr, li };
3672 recursiveLoadDependents(chainStart);
3673 if ( _diag.hasError() )
3674 break;
3675 }
3676 assert(_loadedImages.count() == dylibs.count());
3677
3678 // create an ImageWriter for each cached dylib
3679 STACK_ALLOC_ARRAY(ImageWriter, writers, _loadedImages.count());
3680 for (BuilderLoadedImage& li : _loadedImages) {
3681 writers.push_back(ImageWriter());
3682 buildImage(writers.back(), li);
3683 }
3684
3685 // add initializer order into each dylib
3686 // Note we have to compute the init order after buildImage as buildImage may set hasInits to true
3687 for (const BuilderLoadedImage& li : _loadedImages) {
3688 uint32_t index = li.imageNum - _startImageNum;
3689 computeInitOrder(writers[index], index);
3690 }
3691
3692 // combine all Image objects into one ImageArray
3693 ImageArrayWriter imageArrayWriter(_startImageNum, (uint32_t)writers.count(), _foundDyldCacheRoots);
3694 for (ImageWriter& writer : writers) {
3695 imageArrayWriter.appendImage(writer.finalize());
3696 writer.deallocate();
3697 }
3698 const ImageArray* imageArray = imageArrayWriter.finalize();
3699
3700 return imageArray;
3701 }
3702
3703
3704 #if BUILDING_CACHE_BUILDER
3705 const ImageArray* ClosureBuilder::makeOtherDylibsImageArray(const Array<LoadedFileInfo>& otherDylibs, uint32_t cachedDylibsCount)
3706 {
3707 // because this is run in cache builder using dispatch_apply() there is minimal stack space
3708 // so set up storage for all arrays to be vm_allocated
3709 uintptr_t maxImageCount = otherDylibs.count() + cachedDylibsCount + 128;
3710 _loadedImages.reserve(maxImageCount);
3711 _dependencies.reserve(maxImageCount*16);
3712
3713 // build _loadedImages[] with every dylib in cache, followed by others
3714 _nextIndex = 0;
3715 for (const LoadedFileInfo& aDylibInfo : otherDylibs) {
3716 auto *mh = (const MachOAnalyzer*)aDylibInfo.fileContent;
3717
3718 BuilderLoadedImage entry;
3719 entry.loadedFileInfo = aDylibInfo;
3720 entry.imageNum = _startImageNum + _nextIndex++;
3721 entry.unmapWhenDone = false;
3722 entry.contentRebased = false;
3723 entry.hasInits = false;
3724 entry.markNeverUnload = mh->markNeverUnload(_diag);
3725 entry.rtldLocal = false;
3726 entry.isBadImage = false;
3727 entry.mustBuildClosure = false;
3728 entry.hasMissingWeakImports = false;
3729 entry.hasInterposingTuples = false; // all images here have passed canHavePrecomputedDlopenClosure() which does not allow interposing tuples
3730 entry.overrideImageNum = 0;
3731 entry.exportsTrieOffset = 0;
3732 entry.exportsTrieSize = 0;
3733 _loadedImages.push_back(entry);
3734 }
3735
3736 // wire up dependencies between cached dylibs
3737 // Note, _loadedImages can grow when we call recursiveLoadDependents so we need
3738 // to check the count on each iteration.
3739 for (uint64_t index = 0; index != _loadedImages.count(); ++index) {
3740 BuilderLoadedImage& li = _loadedImages[index];
3741 LoadedImageChain chainStart = { nullptr, li };
3742 recursiveLoadDependents(chainStart);
3743 if ( _diag.hasError() ) {
3744 _diag.warning("while building dlopen closure for %s: %s", li.loadedFileInfo.path, _diag.errorMessage().c_str());
3745 //fprintf(stderr, "while building dlopen closure for %s: %s\n", li.loadedFileInfo.path, _diag.errorMessage().c_str());
3746 _diag.clearError();
3747 li.isBadImage = true; // mark bad
3748 }
3749 }
3750
3751 auto invalidateBadImages = [&]() {
3752 // Invalidate images with bad dependencies
3753 while (true) {
3754 bool madeChange = false;
3755 for (BuilderLoadedImage& li : _loadedImages) {
3756 if (li.isBadImage) {
3757 // Already invalidated
3758 continue;
3759 }
3760 for (Image::LinkedImage depIndex : li.dependents) {
3761 if ( depIndex.imageNum() == kMissingWeakLinkedImage )
3762 continue;
3763 if ( depIndex.imageNum() >= dyld3::closure::kLastDyldCacheImageNum ) {
3764 // dlopen closures can only depend on the shared cache. This is because if foo.dylib links bar.dylib
3765 // and bar.dylib is loaded in to the launch closure, then the dlopen closure for foo.dylib wouldn't see
3766 // bar.dylib at the image num in the launch closure
3767 _diag.warning("while building dlopen closure for %s: dependent dylib is not from shared cache", li.loadedFileInfo.path);
3768 li.isBadImage = true; // mark bad
3769 madeChange = true;
3770 continue;
3771 }
3772 BuilderLoadedImage& depImage = findLoadedImage(depIndex.imageNum());
3773 if (depImage.isBadImage) {
3774 _diag.warning("while building dlopen closure for %s: dependent dylib had error", li.loadedFileInfo.path);
3775 li.isBadImage = true; // mark bad
3776 madeChange = true;
3777 }
3778 }
3779 }
3780 if (!madeChange)
3781 break;
3782 }
3783 };
3784
3785 invalidateBadImages();
3786
3787 // create an ImageWriter for each cached dylib
3788 STACK_ALLOC_ARRAY(ImageWriter, writers, _loadedImages.count());
3789 for (BuilderLoadedImage& li : _loadedImages) {
3790 if ( li.isBadImage ) {
3791 writers.push_back(ImageWriter());
3792 writers.back().setInvalid();
3793 continue;
3794 }
3795 if ( li.imageNum < dyld3::closure::kLastDyldCacheImageNum )
3796 continue;
3797 writers.push_back(ImageWriter());
3798 buildImage(writers.back(), li);
3799 if ( _diag.hasError() ) {
3800 _diag.warning("while building dlopen closure for %s: %s", li.loadedFileInfo.path, _diag.errorMessage().c_str());
3801 //fprintf(stderr, "while building dlopen closure for %s: %s\n", li.loadedFileInfo.path, _diag.errorMessage().c_str());
3802 _diag.clearError();
3803 li.isBadImage = true; // mark bad
3804 writers.back().setInvalid();
3805 }
3806 }
3807
3808 invalidateBadImages();
3809
3810 // add initializer order into each dylib
3811 // Note we have to compute the init order after buildImage as buildImage may set hasInits to true
3812 for (const BuilderLoadedImage& li : _loadedImages) {
3813 if ( li.imageNum < dyld3::closure::kLastDyldCacheImageNum )
3814 continue;
3815 if (li.isBadImage)
3816 continue;
3817 uint32_t index = li.imageNum - _startImageNum;
3818 computeInitOrder(writers[index], index);
3819 }
3820
3821 // combine all Image objects into one ImageArray
3822 ImageArrayWriter imageArrayWriter(_startImageNum, (uint32_t)writers.count(), _foundDyldCacheRoots);
3823 for (ImageWriter& writer : writers) {
3824 imageArrayWriter.appendImage(writer.finalize());
3825 writer.deallocate();
3826 }
3827 const ImageArray* imageArray = imageArrayWriter.finalize();
3828
3829 return imageArray;
3830 }
3831 #endif
3832
3833
3834 bool ClosureBuilder::inLoadedImageArray(const Array<LoadedImage>& loadedList, ImageNum imageNum)
3835 {
3836 for (const LoadedImage& ali : loadedList) {
3837 if ( ali.image()->representsImageNum(imageNum) )
3838 return true;
3839 }
3840 return false;
3841 }
3842
3843 void ClosureBuilder::buildLoadOrderRecurse(Array<LoadedImage>& loadedList, const Array<const ImageArray*>& imagesArrays, const Image* image)
3844 {
3845 // breadth first load
3846 STACK_ALLOC_ARRAY(const Image*, needToRecurse, 256);
3847 image->forEachDependentImage(^(uint32_t dependentIndex, dyld3::closure::Image::LinkKind kind, ImageNum depImageNum, bool &stop) {
3848 if ( !inLoadedImageArray(loadedList, depImageNum) ) {
3849 const Image* depImage = ImageArray::findImage(imagesArrays, depImageNum);
3850 loadedList.push_back(LoadedImage::make(depImage));
3851 needToRecurse.push_back(depImage);
3852 }
3853 });
3854
3855 // recurse load
3856 for (const Image* img : needToRecurse) {
3857 buildLoadOrderRecurse(loadedList, imagesArrays, img);
3858 }
3859 }
3860
3861 void ClosureBuilder::buildLoadOrder(Array<LoadedImage>& loadedList, const Array<const ImageArray*>& imagesArrays, const Closure* toAdd)
3862 {
3863 const dyld3::closure::Image* topImage = ImageArray::findImage(imagesArrays, toAdd->topImageNum());
3864 loadedList.push_back(LoadedImage::make(topImage));
3865 buildLoadOrderRecurse(loadedList, imagesArrays, topImage);
3866 }
3867
3868
3869
3870 //////////////////////////// ObjCStringTable ////////////////////////////////////////
3871
3872 template<typename PerfectHashT, typename ImageOffsetT>
3873 void ObjCStringTable::write(const PerfectHashT& phash, const Array<std::pair<const char*, ImageOffsetT>>& strings)
3874 {
3875 ObjCSelectorOpt::StringTarget sentinel = (ObjCSelectorOpt::StringTarget)ImageOffsetT::sentinelValue;
3876 // Set header
3877 capacity = phash.capacity;
3878 occupied = phash.occupied;
3879 shift = phash.shift;
3880 mask = phash.mask;
3881 sentinelTarget = sentinel;
3882 roundedTabSize = std::max(phash.mask+1, 4U);
3883 salt = phash.salt;
3884
3885 // Set hash data
3886 for (uint32_t i = 0; i < 256; i++) {
3887 scramble[i] = phash.scramble[i];
3888 }
3889 for (uint32_t i = 0; i < phash.mask+1; i++) {
3890 tab[i] = phash.tab[i];
3891 }
3892
3893 dyld3::Array<StringTarget> targetsArray = targets();
3894 dyld3::Array<StringHashCheckByte> checkBytesArray = checkBytes();
3895
3896 // Set offsets to the sentinel
3897 for (uint32_t i = 0; i < phash.capacity; i++) {
3898 targetsArray[i] = sentinel;
3899 }
3900 // Set checkbytes to 0
3901 for (uint32_t i = 0; i < phash.capacity; i++) {
3902 checkBytesArray[i] = 0;
3903 }
3904
3905 // Set real string offsets and checkbytes
3906 for (const auto& s : strings) {
3907 assert(s.second.raw != sentinelTarget);
3908 uint32_t h = hash(s.first);
3909 targetsArray[h] = s.second.raw;
3910 checkBytesArray[h] = checkbyte(s.first);
3911 }
3912 }
3913
3914 //////////////////////////// ObjCClassOpt ////////////////////////////////////////
3915
3916
3917 template<typename PerfectHashT, typename ImageOffsetT, typename ClassesMapT>
3918 void ObjCClassOpt::write(const PerfectHashT& phash, const Array<std::pair<const char*, ImageOffsetT>>& strings,
3919 const ClassesMapT& classes, uint32_t preCalculatedDuplicateCount)
3920 {
3921 ObjCStringTable::write(phash, strings);
3922
3923 __block dyld3::Array<ClassTarget> classOffsetsArray = classOffsets();
3924 __block dyld3::Array<ClassTarget> duplicateOffsetsArray = duplicateOffsets(preCalculatedDuplicateCount);
3925
3926 // Set class offsets to 0
3927 for (uint32_t i = 0; i < capacity; i++) {
3928 classOffsetsArray[i].raw = dyld3::closure::Image::ObjCImageOffset::sentinelValue;
3929 }
3930
3931 classes.forEachEntry(^(const char *const &key, const Image::ObjCClassImageOffset **values, uint64_t valuesCount) {
3932 uint32_t keyIndex = getIndex(key);
3933 assert(keyIndex != indexNotFound);
3934 assert(classOffsetsArray[keyIndex].raw == dyld3::closure::Image::ObjCImageOffset::sentinelValue);
3935
3936 if (valuesCount == 1) {
3937 // Only one entry so write it in to the class offsets directly
3938 Image::ObjCClassImageOffset classImageOffset = *(values[0]);
3939 assert(classImageOffset.classData.isDuplicate == 0);
3940 classOffsetsArray[keyIndex] = classImageOffset;
3941 return;
3942 }
3943
3944 // We have more than one value. We add a placeholder to the class offsets which tells us the head
3945 // of the linked list of classes in the duplicates array
3946 uint32_t dest = duplicateCount();
3947 duplicateCount() += valuesCount;
3948
3949 Image::ObjCClassImageOffset classImagePlaceholder;
3950 assert(valuesCount < (1 << 8));
3951 classImagePlaceholder.duplicateData.count = (uint32_t)valuesCount;
3952 classImagePlaceholder.duplicateData.index = dest;
3953 classImagePlaceholder.duplicateData.isDuplicate = 1;
3954 classOffsetsArray[keyIndex] = classImagePlaceholder;
3955
3956 for (uint64_t i = 0; i != valuesCount; ++i) {
3957 Image::ObjCClassImageOffset classImageOffset = *(values[i]);
3958 assert(classImageOffset.classData.isDuplicate == 0);
3959 duplicateOffsetsArray.push_back(classImageOffset);
3960 }
3961 });
3962 }
3963
3964
3965 } // namespace closure
3966 } // namespace dyld3