dyld-832.7.1.tar.gz
[apple/dyld.git] / dyld3 / ClosureBuilder.cpp
1 /*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include <sys/types.h>
25 #include <sys/stat.h>
26 #include <sys/errno.h>
27 #include <sys/mman.h>
28 #include <sys/mman.h>
29 #include <sys/param.h>
30 #include <ext/__hash>
31 #include <fcntl.h>
32 #include <unistd.h>
33 #include <string.h>
34 #include <sys/types.h>
35 #include <sys/sysctl.h>
36
37 #include <mach-o/dyld_priv.h>
38
39 #include "ClosureWriter.h"
40 #include "ClosureBuilder.h"
41 #include "MachOAnalyzer.h"
42 #include "MachOAnalyzerSet.h"
43 #include "libdyldEntryVector.h"
44 #include "RootsChecker.h"
45 #include "Tracing.h"
46
47 #define CLOSURE_SELOPT_WRITE
48 #include "objc-shared-cache.h"
49
50 #if BUILDING_DYLD
51 namespace dyld { void log(const char*, ...); }
52 #endif
53
54 namespace dyld3 {
55 namespace closure {
56
57
58 const DlopenClosure* ClosureBuilder::sRetryDlopenClosure = (const DlopenClosure*)(-1);
59
60 ClosureBuilder::ClosureBuilder(uint32_t startImageNum, const FileSystem& fileSystem, const RootsChecker& rootsChecker,
61 const DyldSharedCache* dyldCache, bool dyldCacheIsLive,
62 const GradedArchs& archs, const PathOverrides& pathOverrides, AtPath atPathHandling, bool allowRelativePaths,
63 LaunchErrorInfo* errorInfo, Platform platform, DylibFixupHandler handler)
64 : _fileSystem(fileSystem), _rootsChecker(rootsChecker), _dyldCache(dyldCache), _pathOverrides(pathOverrides), _archs(archs), _platform(platform), _startImageNum(startImageNum),
65 _dylibFixupHandler(handler), _atPathHandling(atPathHandling), _launchErrorInfo(errorInfo), _dyldCacheIsLive(dyldCacheIsLive), _allowRelativePaths(allowRelativePaths)
66 {
67 if ( dyldCache != nullptr ) {
68 _dyldImageArray = dyldCache->cachedDylibsImageArray();
69 }
70 }
71
72
73 ClosureBuilder::~ClosureBuilder() {
74 if ( _tempPaths != nullptr )
75 PathPool::deallocate(_tempPaths);
76 if ( _mustBeMissingPaths != nullptr )
77 PathPool::deallocate(_mustBeMissingPaths);
78 if ( _objcDuplicateClassWarnings != nullptr )
79 PathPool::deallocate(_objcDuplicateClassWarnings);
80 }
81
82 static bool iOSSupport(const char* path)
83 {
84 return ( strncmp(path, "/System/iOSSupport/", 19) == 0 );
85 }
86
87 bool ClosureBuilder::findImage(const char* loadPath, const LoadedImageChain& forImageChain, BuilderLoadedImage*& foundImage, LinkageType linkageType,
88 uint32_t compatVersion, bool canUseSharedCacheClosure)
89 {
90 // There shouldn't be an error here as the callers should stop trying to find more images if they get an error for an image
91 _diag.assertNoError();
92
93 __block bool result = false;
94
95 // record if this is a non-overridable path
96 bool pathIsInDyldCacheWhichCannotBeOverridden = false;
97 bool dylibsExpectedOnDisk = true;
98 if ( _dyldCache != nullptr ) {
99 pathIsInDyldCacheWhichCannotBeOverridden = _dyldCache->hasNonOverridablePath(loadPath);
100 dylibsExpectedOnDisk = _dyldCache->header.dylibsExpectedOnDisk;
101 }
102
103 // when building dyld cache for macOS, if requesting dylib is iOSMac unzippered twin, tell pathOverrides object to look in /System/iOSSupport first
104 dyld3::Platform targetPlatform = _platform;
105 if ( _makingDyldCacheImages && (_platform == dyld3::Platform::macOS) ) {
106 if ( forImageChain.image.loadAddress()->builtForPlatform(Platform::iOSMac, true) )
107 targetPlatform = Platform::iOSMac;
108 }
109
110 _pathOverrides.forEachPathVariant(loadPath, pathIsInDyldCacheWhichCannotBeOverridden, ^(const char* possibleVariantPath, bool isFallbackPath, bool& stopPathVariant) {
111
112 // This check is within forEachPathVariant() to let DYLD_LIBRARY_PATH override LC_RPATH
113 bool isRPath = (strncmp(possibleVariantPath, "@rpath/", 7) == 0);
114
115 // passing a leaf name to dlopen() allows rpath searching for it
116 // FIXME: Does this apply to DYLD_INSERT_LIBRARIES too?
117 bool implictRPath = (linkageType == LinkageType::kDynamic) && (loadPath[0] != '/') && (loadPath == possibleVariantPath) && (_atPathHandling != AtPath::none);
118
119 // expand @ paths
120 forEachResolvedPathVar(possibleVariantPath, forImageChain, implictRPath, linkageType,
121 ^(const char* possiblePath, bool& stop) {
122 if ( possibleVariantPath != possiblePath )
123 _atPathUsed = true;
124
125 // look at already loaded images
126 const char* leafName = strrchr(possiblePath, '/');
127 for (BuilderLoadedImage& li: _loadedImages) {
128 if ( strcmp(li.path(), possiblePath) == 0 ) {
129 foundImage = &li;
130 result = true;
131 stop = true;
132 return;
133 }
134 else if ( isRPath ) {
135 // Special case @rpath/ because name in li.fileInfo.path is full path.
136 // Getting installName is expensive, so first see if an already loaded image
137 // has same leaf name and if so see if its installName matches request @rpath
138 if (const char* aLeaf = strrchr(li.path(), '/')) {
139 if ( strcmp(aLeaf, leafName) == 0 ) {
140 if ( li.loadAddress()->isDylib() && (strcmp(loadPath, li.loadAddress()->installName()) == 0) ) {
141 foundImage = &li;
142 result = true;
143 stop = true;
144 return;
145 }
146 }
147 }
148 }
149 }
150
151 // look to see if image already loaded via a different symlink
152 bool fileFound = false;
153 uint64_t fileFoundINode = 0;
154 uint64_t fileFoundMTime = 0;
155 bool inodesMatchRuntime = false;
156 // Note, we only do this check if we even expect to find this on-disk
157 // We can also use the pathIsInDyldCacheWhichCannotBeOverridden result if we are still trying the same path
158 // it was computed from
159 if ( dylibsExpectedOnDisk || !pathIsInDyldCacheWhichCannotBeOverridden || (loadPath != possiblePath) ) {
160 if ( _fileSystem.fileExists(possiblePath, &fileFoundINode, &fileFoundMTime, nullptr, &inodesMatchRuntime) ) {
161 fileFound = true;
162 for (BuilderLoadedImage& li: _loadedImages) {
163 if ( (li.loadedFileInfo.inode == 0) && (li.loadedFileInfo.mtime == 0) ) {
164 // Some already loaded image does not have an inode/mtime recorded, fix that if we can
165 if ( dylibsExpectedOnDisk || !li.loadAddress()->inDyldCache() ) {
166 _fileSystem.fileExists(li.path(), &li.loadedFileInfo.inode, &li.loadedFileInfo.mtime , nullptr, nullptr);
167 }
168 }
169 if ( (li.loadedFileInfo.inode == fileFoundINode) && (li.loadedFileInfo.mtime == fileFoundMTime) ) {
170 foundImage = &li;
171 result = true;
172 stop = true;
173 return;
174 }
175 }
176 }
177 }
178
179 // We record the realpath of the file in the loaded images, but we might be loading via a symlink path.
180 // We need to search using the realpath just in case the dylib the symlink points to was overwritten while
181 // the process is running
182 if ( fileFound ) {
183 char realPath[MAXPATHLEN];
184 if ( _fileSystem.getRealPath(possiblePath, realPath) ) {
185 for (BuilderLoadedImage& li: _loadedImages) {
186 if ( strcmp(li.path(), realPath) == 0 ) {
187 foundImage = &li;
188 result = true;
189 stop = true;
190 return;
191 }
192 }
193 }
194 }
195
196 bool unmapWhenDone = false;
197 bool contentRebased = false;
198 bool hasInits = false;
199 bool markNeverUnload = false;
200 bool mustBuildClosure = _dyldCacheInvalidFormatVersion;
201 ImageNum overrideImageNum = 0;
202 ImageNum foundImageNum = 0;
203 const MachOAnalyzer* mh = nullptr;
204 const char* filePath = nullptr;
205 LoadedFileInfo loadedFileInfo;
206
207 // look in dyld cache
208 filePath = possiblePath;
209 char realPath[MAXPATHLEN];
210 if ( _dyldImageArray != nullptr ) {
211 uint32_t dyldCacheImageIndex;
212 bool foundInCache = _dyldCache->hasImagePath(possiblePath, dyldCacheImageIndex);
213 if ( !foundInCache && fileFound ) {
214 // see if this is an OS dylib/bundle with a pre-built dlopen closure
215 // We can only use the pre-built closure if we are dynamic linkage (a dlopen) and
216 // there are no roots
217 if ( canUseSharedCacheClosure && (linkageType == LinkageType::kDynamic) ) {
218 if (const dyld3::closure::Image* otherImage = _dyldCache->findDlopenOtherImage(possiblePath) ) {
219 uint64_t expectedInode;
220 uint64_t expectedModTime;
221 if ( !otherImage->isInvalid() ) {
222 bool hasInodeInfo = otherImage->hasFileModTimeAndInode(expectedInode, expectedModTime);
223 // use pre-built Image if it does not have mtime/inode or it does and it has matches current file info
224 if ( !hasInodeInfo || ((expectedInode == fileFoundINode) && (expectedModTime == fileFoundMTime)) ) {
225 loadedFileInfo = MachOAnalyzer::load(_diag, _fileSystem, possiblePath, _archs, _platform, realPath);
226 if ( _diag.noError() ) {
227 mh = (const MachOAnalyzer*)loadedFileInfo.fileContent;
228 foundImageNum = otherImage->imageNum();
229 unmapWhenDone = true;
230 contentRebased = false;
231 hasInits = otherImage->hasInitializers() || otherImage->mayHavePlusLoads();
232 // Use the realpath in the case where we loaded a symlink
233 // The closure must have recordered an alias path
234 if (realPath[0] != '\0')
235 filePath = realPath;
236 }
237 }
238 }
239 }
240 }
241 }
242
243 // If found in the cache, but not on-disk, this may be an already loaded image, but we are opening the alias.
244 // For example, we are trying to open .../AppKit but we already have a loaded root of .../Versions/C/AppKit
245 // This doesn't work with the calls to realpath when the symlinks themselves were removed from disk.
246 if ( foundInCache && !fileFound ) {
247 ImageNum dyldCacheImageNum = dyldCacheImageIndex + 1;
248 for (BuilderLoadedImage& li: _loadedImages) {
249 if ( li.overrideImageNum == dyldCacheImageNum ) {
250 foundImage = &li;
251 result = true;
252 stop = true;
253 return;
254 }
255 }
256 }
257
258 // if not found in cache, may be a symlink to something in cache
259 // We have to do this check even if the symlink target is not on disk as we may
260 // have symlinks in iOSMac dlopen paths which are resolved to a dylib removed from disk
261 if ( !foundInCache && (mh == nullptr) ) {
262 if ( _fileSystem.getRealPath(possiblePath, realPath) ) {
263 foundInCache = _dyldCache->hasImagePath(realPath, dyldCacheImageIndex);
264 if ( foundInCache ) {
265 filePath = realPath;
266 #if BUILDING_LIBDYLD
267 // handle case where OS dylib was updated after this process launched
268 if ( foundInCache ) {
269 for (BuilderLoadedImage& li: _loadedImages) {
270 if ( strcmp(li.path(), realPath) == 0 ) {
271 foundImage = &li;
272 result = true;
273 stop = true;
274 return;
275 }
276 }
277 }
278 #endif
279 }
280 }
281 }
282
283 // if using a cached dylib, look to see if there is an override
284 if ( foundInCache ) {
285 ImageNum dyldCacheImageNum = dyldCacheImageIndex + 1;
286 bool useCache = true;
287 markNeverUnload = true; // dylibs in cache, or dylibs that override cache should not be unloaded at runtime
288 bool ignoreCacheDylib = false;
289 const Image* image = _dyldImageArray->imageForNum(dyldCacheImageNum);
290 if ( image->overridableDylib() ) {
291 if ( fileFound ) {
292 if ( _makingClosuresInCache ) {
293 // during iOS cache build, don't look at files on disk, use ones in cache
294 useCache = true;
295 } else if ( !_rootsChecker.onDiskFileIsRoot(filePath, _dyldCache, image,
296 &_fileSystem, fileFoundINode, fileFoundMTime) ) {
297 // file exists, but is not a root
298 useCache = true;
299 } else {
300 // iOS internal build. Any disk on cache overrides cache
301 useCache = false;
302 }
303 }
304 if ( useCache && ((targetPlatform == Platform::iOSMac) || (targetPlatform == Platform::macOS)) ) {
305 // check this cached dylib is suitable for catalyst or mac program
306 mh = (MachOAnalyzer*)_dyldCache->getIndexedImageEntry(dyldCacheImageNum-1, loadedFileInfo.mtime, loadedFileInfo.inode);
307 if ( !mh->loadableIntoProcess(targetPlatform, possiblePath) ) {
308 useCache = false;
309 mh = nullptr;
310 ignoreCacheDylib = true;
311 }
312 }
313 if ( !useCache && !ignoreCacheDylib ) {
314 overrideImageNum = dyldCacheImageNum;
315 _foundDyldCacheRoots = true;
316 }
317 }
318 if ( useCache ) {
319 foundImageNum = dyldCacheImageNum;
320 mh = (MachOAnalyzer*)_dyldCache->getIndexedImageEntry(foundImageNum-1, loadedFileInfo.mtime, loadedFileInfo.inode);
321 unmapWhenDone = false;
322 // if we are building ImageArray in dyld cache, content is not rebased
323 contentRebased = !_makingDyldCacheImages && _dyldCacheIsLive;
324 hasInits = image->hasInitializers() || image->mayHavePlusLoads();
325 // If the cache format is different from dyld/libdyld then we can't use this closure.
326 if ( (_dyldCache->header.formatVersion != dyld3::closure::kFormatVersion) || !canUseSharedCacheClosure ) {
327 mustBuildClosure = true;
328 _foundDyldCacheRoots = true;
329 }
330 }
331 }
332 }
333
334 // If we are building the cache, and don't find an image, then it might be weak so just return
335 if (_makingDyldCacheImages) {
336 addMustBeMissingPath(possiblePath);
337 return;
338 }
339
340 // if not found yet, mmap file
341 if ( mh == nullptr ) {
342 loadedFileInfo = MachOAnalyzer::load(_diag, _fileSystem, filePath, _archs, _platform, realPath);
343 mh = (const MachOAnalyzer*)loadedFileInfo.fileContent;
344 if ( mh == nullptr ) {
345 // Don't add must be missing paths for dlopen as we don't cache dlopen closures
346 if (_isLaunchClosure) {
347 // If we found the file then we want to skip it as its not a valid macho for this platform/arch
348 // We can't record skipped file mtime/inode for caches built on a different machine that it runs on.
349 // In that case, we expect the file to be mastered out, as otherwise we couldn't detect if its
350 // changed or not on the device side
351 if (fileFound && inodesMatchRuntime) {
352 addSkippedFile(possiblePath, fileFoundINode, fileFoundMTime);
353 } else {
354 addMustBeMissingPath(possiblePath);
355 }
356 }
357 return;
358 }
359 if ( linkageType != LinkageType::kDynamic ) {
360 // LC_LOAD_DYLIB can only link with dylibs, and DYLD_INSERT_LIBRARIES can only be dylibs
361 if ( !mh->isDylib() ) {
362 _diag.error("found '%s' which is not a dylib. Needed by '%s'", filePath, forImageChain.image.path());
363 return;
364 }
365 // verify this is compatable dylib version
366 const char* installName;
367 uint32_t foundCompatVers;
368 uint32_t foundCurrentVers;
369 mh->getDylibInstallName(&installName, &foundCompatVers, &foundCurrentVers);
370 if ( (foundCompatVers < compatVersion) && mh->enforceCompatVersion() ) {
371 char foundStr[32];
372 char requiredStr[32];
373 MachOFile::packedVersionToString(foundCompatVers, foundStr);
374 MachOFile::packedVersionToString(compatVersion, requiredStr);
375 _diag.error("found '%s' which has compat version (%s) which is less than required (%s). Needed by '%s'",
376 filePath, foundStr, requiredStr, forImageChain.image.path());
377 return;
378 }
379 }
380 else if ( mh->isMainExecutable() ) {
381 // when dlopen()ing a main executable, it must be dynamic Position Independent Executable
382 if ( !mh->isPIE() || !mh->isDynamicExecutable() ) {
383 _diag.error("not PIE");
384 return;
385 }
386 }
387 // Use the realpath in the case where we loaded a symlink
388 // The closure must have recordered an alias path
389 if (realPath[0] != '\0')
390 filePath = realPath;
391 foundImageNum = _startImageNum + _nextIndex++;
392 _foundNonCachedImage = true;
393 mustBuildClosure = true;
394 unmapWhenDone = true;
395 } else {
396 loadedFileInfo.fileContent = mh;
397 }
398
399 if ( mh->inDyldCache() ) {
400 // We may be loading from a symlink, so use the path in the cache which is the realpath
401 filePath = _dyldImageArray->imageForNum(foundImageNum)->path();
402 }
403
404 // if path is not original path, or its an inserted path (as forEachInColonList uses a stack temporary)
405 if ( (filePath != loadPath) || (linkageType == LinkageType::kInserted) ) {
406 if ( !mh->inDyldCache() ) {
407 // possiblePath may be a temporary (stack) string, since we found file at that path, make it permanent
408 filePath = strdup_temp(filePath);
409 }
410 // check if this overrides what would have been found in cache
411 // This is the case where we didn't find the image with the path in the shared cache, perhaps as it used library paths
412 // but the path we requested had pointed in to the cache
413 // FIXME: What if load path is via an @rpath and we will override the cache?
414 if ( overrideImageNum == 0 ) {
415 if ( _dyldImageArray != nullptr ) {
416 uint32_t dyldCacheImageIndex;
417 if ( _dyldCache->hasImagePath(loadPath, dyldCacheImageIndex) ) {
418 ImageNum possibleOverrideNum = dyldCacheImageIndex+1;
419 if ( possibleOverrideNum != foundImageNum )
420 overrideImageNum = possibleOverrideNum;
421 }
422 }
423 }
424 }
425
426 // check if this is an iOSMac dylib that is overriding a macOS dylib in the dyld cache
427 if ( mh->inDyldCache() && iOSSupport(filePath) ) {
428 const char* twinPath = &filePath[18];
429 uint32_t dyldCacheImageIndex;
430 if ( (_dyldCache != nullptr) && _dyldCache->hasImagePath(twinPath, dyldCacheImageIndex) ) {
431 ImageNum possibleOverrideNum = dyldCacheImageIndex+1;
432 if ( possibleOverrideNum != foundImageNum )
433 overrideImageNum = possibleOverrideNum;
434 }
435 }
436
437 if ( !markNeverUnload ) {
438 switch (linkageType) {
439 case LinkageType::kStatic:
440 // Static linkages can only be unloaded if the image loading us can be unloaded
441 markNeverUnload = forImageChain.image.markNeverUnload;
442 break;
443 case LinkageType::kDynamic:
444 markNeverUnload = false;
445 break;
446 case LinkageType::kInserted:
447 // Inserted libraries must never be unloaded
448 markNeverUnload = true;
449 break;
450 };
451 }
452
453 if ( !markNeverUnload ) {
454 // If the parent didn't force us to be never unload, other conditions still may
455 if ( mh->hasThreadLocalVariables() ) {
456 markNeverUnload = true;
457 } else if ( mh->hasObjC() && mh->isDylib() ) {
458 markNeverUnload = true;
459 } else {
460 // record if image has DOF sections
461 __block bool hasDOFs = false;
462 mh->forEachDOFSection(_diag, ^(uint32_t offset) {
463 hasDOFs = true;
464 });
465 if ( hasDOFs )
466 markNeverUnload = true;
467 }
468 }
469
470 // Set the path again just in case it was strdup'ed.
471 loadedFileInfo.path = filePath;
472
473 // add new entry
474 BuilderLoadedImage entry;
475 entry.loadedFileInfo = loadedFileInfo;
476 entry.imageNum = foundImageNum;
477 entry.unmapWhenDone = unmapWhenDone;
478 entry.contentRebased = contentRebased;
479 entry.hasInits = hasInits;
480 entry.markNeverUnload = markNeverUnload;
481 entry.rtldLocal = false;
482 entry.isBadImage = false;
483 entry.mustBuildClosure = mustBuildClosure;
484 entry.hasMissingWeakImports = false;
485 entry.hasInterposingTuples = !mh->inDyldCache() && mh->hasInterposingTuples();
486 entry.overrideImageNum = overrideImageNum;
487 entry.exportsTrieOffset = 0;
488 entry.exportsTrieSize = 0;
489 _loadedImages.push_back(entry);
490 foundImage = &_loadedImages.back();
491 if ( isFallbackPath )
492 _fallbackPathUsed = true;
493 stop = true;
494 result = true;
495 });
496 if (result)
497 stopPathVariant = true;
498 }, targetPlatform);
499
500 // If we found a file, but also had an error, then we must have logged a diagnostic for a file we couldn't use.
501 // Clear that for now.
502 // FIXME: Surface this to the user in case they wanted to see the error
503 if (result && _diag.hasError())
504 _diag.clearError();
505
506 return result;
507 }
508
509 bool ClosureBuilder::expandAtLoaderPath(const char* loadPath, bool fromLCRPATH, const BuilderLoadedImage& loadedImage, char fixedPath[])
510 {
511 switch ( _atPathHandling ) {
512 case AtPath::none:
513 return false;
514 case AtPath::onlyInRPaths:
515 if ( !fromLCRPATH ) {
516 // <rdar://42360708> allow @loader_path in LC_LOAD_DYLIB during dlopen()
517 if ( _isLaunchClosure )
518 return false;
519 }
520 break;
521 case AtPath::all:
522 break;
523 }
524 if ( strncmp(loadPath, "@loader_path/", 13) == 0 ) {
525 strlcpy(fixedPath, loadedImage.path(), PATH_MAX);
526 char* lastSlash = strrchr(fixedPath, '/');
527 if ( lastSlash != nullptr ) {
528 strcpy(lastSlash+1, &loadPath[13]);
529 return true;
530 }
531 }
532 else if ( fromLCRPATH && (strcmp(loadPath, "@loader_path") == 0) ) {
533 // <rdar://problem/52881387> in LC_RPATH allow "@loader_path" without trailing slash
534 strlcpy(fixedPath, loadedImage.path(), PATH_MAX);
535 char* lastSlash = strrchr(fixedPath, '/');
536 if ( lastSlash != nullptr ) {
537 lastSlash[1] = '\0';
538 return true;
539 }
540 }
541
542 return false;
543 }
544
545 bool ClosureBuilder::expandAtExecutablePath(const char* loadPath, bool fromLCRPATH, bool fromLCRPATHinMain, char fixedPath[])
546 {
547 switch ( _atPathHandling ) {
548 case AtPath::none:
549 return false;
550 case AtPath::onlyInRPaths:
551 if ( !fromLCRPATH )
552 return false;
553 // main executables can always have an LC_RPATH that uses @executable_path, other images cannot if restricted
554 if ( !fromLCRPATHinMain )
555 return false;
556 break;
557 case AtPath::all:
558 break;
559 }
560
561 if ( strncmp(loadPath, "@executable_path/", 17) == 0 ) {
562 strlcpy(fixedPath, _mainProgLoadPath, PATH_MAX);
563 char* lastSlash = strrchr(fixedPath, '/');
564 if ( lastSlash != nullptr ) {
565 strcpy(lastSlash+1, &loadPath[17]);
566 return true;
567 }
568 }
569 else if ( fromLCRPATH && (strcmp(loadPath, "@executable_path") == 0) ) {
570 // <rdar://problem/52881387> in LC_RPATH allow "@executable_path" without trailing slash
571 strlcpy(fixedPath, _mainProgLoadPath, PATH_MAX);
572 char* lastSlash = strrchr(fixedPath, '/');
573 if ( lastSlash != nullptr ) {
574 lastSlash[1] = '\0';
575 return true;
576 }
577 }
578
579 return false;
580 }
581
582 void ClosureBuilder::forEachResolvedPathVar(const char* loadPath, const LoadedImageChain& forImageChain,
583 bool implictRPath, LinkageType linkageType,
584 void (^handler)(const char* possiblePath, bool& stop))
585 {
586 // don't expand @loader_path or @executable_path if disallowed
587 if ( (_atPathHandling == AtPath::none) && (loadPath[0] == '@') && (loadPath[1] != 'r') ) {
588 bool stop = false;
589 handler(loadPath, stop);
590 return;
591 }
592
593 // quick out if not @ path or not implicit rpath
594 if ( !implictRPath && (loadPath[0] != '@') ) {
595 bool stop = false;
596 handler(loadPath, stop);
597 return;
598 }
599
600 // expand @loader_path
601 // Note this isn't supported for DYLD_INSERT_LIBRARIES
602 BLOCK_ACCCESSIBLE_ARRAY(char, tempPath, PATH_MAX); // read as: char tempPath[PATH_MAX];
603 if ( (linkageType != LinkageType::kInserted) && expandAtLoaderPath(loadPath, false, forImageChain.image, tempPath) ) {
604 bool stop = false;
605 handler(tempPath, stop);
606 return;
607 }
608
609 // expand @executable_path
610 // Note this is supported for DYLD_INSERT_LIBRARIES
611 if ( expandAtExecutablePath(loadPath, false, false, tempPath) ) {
612 bool stop = false;
613 handler(tempPath, stop);
614 return;
615 }
616
617 // expand @rpath
618 // Note this isn't supported for DYLD_INSERT_LIBRARIES
619 const char* rpathTail = nullptr;
620 char implicitRpathBuffer[PATH_MAX];
621 if ( linkageType != LinkageType::kInserted ) {
622 if ( strncmp(loadPath, "@rpath/", 7) == 0 ) {
623 // note: rpathTail starts with '/'
624 rpathTail = &loadPath[6];
625 }
626 else if ( implictRPath ) {
627 // make rpathTail starts with '/'
628 strlcpy(implicitRpathBuffer, "/", PATH_MAX);
629 strlcat(implicitRpathBuffer, loadPath, PATH_MAX);
630 rpathTail = implicitRpathBuffer;
631 }
632 }
633 if ( rpathTail != nullptr ) {
634 // rpath is expansion is technically a stack of rpath dirs built starting with main executable and pushing
635 // LC_RPATHS from each dylib as they are recursively loaded. Our imageChain represents that stack.
636 __block bool done = false;
637 for (const LoadedImageChain* link = &forImageChain; (link != nullptr) && !done; link = link->previous) {
638 bool mainExecutable = link->image.loadAddress()->isMainExecutable();
639 link->image.loadAddress()->forEachRPath(^(const char* rPath, bool& stop) {
640 // fprintf(stderr, "LC_RPATH %s from %s\n", rPath, link->image.loadedFileInfo.path);
641 if ( expandAtLoaderPath(rPath, true, link->image, tempPath) || expandAtExecutablePath(rPath, true, mainExecutable, tempPath) ) {
642 // @loader_path allowed and expended
643 strlcat(tempPath, rpathTail, PATH_MAX);
644 handler(tempPath, stop);
645 }
646 else if ( rPath[0] == '/' ) {
647 #if (TARGET_OS_OSX && TARGET_CPU_ARM64)
648 if ( (_platform == Platform::iOS) && (strncmp(rPath, "/usr/lib/swift", 14) == 0) ) {
649 // LC_RPATH is to /usr/lib/swift, but running on macOS that is /System/iOSSupport/usr/lib/swift
650 strlcpy(tempPath, "/System/iOSSupport", PATH_MAX);
651 strlcat(tempPath, rPath, PATH_MAX);
652 strlcat(tempPath, rpathTail, PATH_MAX);
653 handler(tempPath, stop);
654 if (stop) {
655 done = true;
656 return;
657 }
658 }
659 #endif
660 // LC_RPATH is an absolute path, not blocked by AtPath::none
661 strlcpy(tempPath, rPath, PATH_MAX);
662 strlcat(tempPath, rpathTail, PATH_MAX);
663 handler(tempPath, stop);
664 }
665 if (stop)
666 done = true;
667 #if 0
668 if ( _fileSystem.fileExists(tempPath) ) {
669 stop = true;
670 result = strdup_temp(tempPath);
671 }
672 else {
673 // Don't add must be missing paths for dlopen as we don't cache dlopen closures
674 if (_isLaunchClosure) {
675 addMustBeMissingPath(tempPath);
676 }
677 }
678 #endif
679 });
680 }
681 if (done)
682 return;
683 }
684
685 bool stop = false;
686 handler(loadPath, stop);
687 }
688
689 const char* ClosureBuilder::strdup_temp(const char* path) const
690 {
691 if ( _tempPaths == nullptr )
692 _tempPaths = PathPool::allocate();
693 return _tempPaths->add(path);
694 }
695
696 void ClosureBuilder::addMustBeMissingPath(const char* path)
697 {
698 //fprintf(stderr, "must be missing: %s\n", path);
699 if ( _mustBeMissingPaths == nullptr )
700 _mustBeMissingPaths = PathPool::allocate();
701 // don't add path if already in list
702 if ( !_mustBeMissingPaths->contains(path) )
703 _mustBeMissingPaths->add(path);
704 }
705
706 void ClosureBuilder::addSkippedFile(const char* path, uint64_t inode, uint64_t mtime)
707 {
708 _skippedFiles.push_back({ strdup_temp(path), inode, mtime });
709 }
710
711 ClosureBuilder::BuilderLoadedImage& ClosureBuilder::findLoadedImage(ImageNum imageNum)
712 {
713 for (BuilderLoadedImage& li : _loadedImages) {
714 if ( li.imageNum == imageNum ) {
715 return li;
716 }
717 }
718 for (BuilderLoadedImage& li : _loadedImages) {
719 if ( li.overrideImageNum == imageNum ) {
720 return li;
721 }
722 }
723 assert(0 && "LoadedImage not found by num");
724 }
725
726 const ClosureBuilder::BuilderLoadedImage& ClosureBuilder::findLoadedImage(ImageNum imageNum) const
727 {
728 for (const BuilderLoadedImage& li : _loadedImages) {
729 if ( li.imageNum == imageNum ) {
730 return li;
731 }
732 }
733 for (const BuilderLoadedImage& li : _loadedImages) {
734 if ( li.overrideImageNum == imageNum ) {
735 return li;
736 }
737 }
738 assert(0 && "LoadedImage not found");
739 }
740
741 ClosureBuilder::BuilderLoadedImage& ClosureBuilder::findLoadedImage(const MachOAnalyzer* mh)
742 {
743 for (BuilderLoadedImage& li : _loadedImages) {
744 if ( li.loadAddress() == mh ) {
745 return li;
746 }
747 }
748 assert(0 && "LoadedImage not found by mh");
749 }
750
751 const MachOAnalyzer* ClosureBuilder::machOForImageNum(ImageNum imageNum)
752 {
753 return findLoadedImage(imageNum).loadAddress();
754 }
755
756 const MachOAnalyzer* ClosureBuilder::findDependent(const MachOLoaded* mh, uint32_t depIndex)
757 {
758 for (const BuilderLoadedImage& li : _loadedImages) {
759 if ( li.loadAddress() == mh ) {
760 if (li.isBadImage) {
761 // Bad image duting building group 1 closures, so the dependents array
762 // is potentially incomplete.
763 return nullptr;
764 }
765 ImageNum childNum = li.dependents[depIndex].imageNum();
766 // This is typically something like a missing weak-dylib we are re-exporting a weak-import symbol from
767 if (childNum == kMissingWeakLinkedImage)
768 return nullptr;
769 return machOForImageNum(childNum);
770 }
771 }
772 return nullptr;
773 }
774
775 ImageNum ClosureBuilder::imageNumForMachO(const MachOAnalyzer* mh)
776 {
777 for (const BuilderLoadedImage& li : _loadedImages) {
778 if ( li.loadAddress() == mh ) {
779 return li.imageNum;
780 }
781 }
782 assert(0 && "unknown mach-o");
783 return 0;
784 }
785
786 void ClosureBuilder::recursiveLoadDependents(LoadedImageChain& forImageChain, bool canUseSharedCacheClosure)
787 {
788 // if dependents is set, then we have already loaded this
789 if ( forImageChain.image.dependents.begin() != nullptr )
790 return;
791
792 uintptr_t startDepIndex = _dependencies.count();
793 // add dependents
794 __block uint32_t depIndex = 0;
795 forImageChain.image.loadAddress()->forEachDependentDylib(^(const char* loadPath, bool isWeak, bool isReExport, bool isUpward, uint32_t compatVersion, uint32_t curVersion, bool &stop) {
796 Image::LinkKind kind = Image::LinkKind::regular;
797 if ( isWeak )
798 kind = Image::LinkKind::weak;
799 else if ( isReExport )
800 kind = Image::LinkKind::reExport;
801 else if ( isUpward )
802 kind = Image::LinkKind::upward;
803 BuilderLoadedImage* foundImage;
804 if ( findImage(loadPath, forImageChain, foundImage, LinkageType::kStatic, compatVersion, canUseSharedCacheClosure) ) {
805 ImageNum foundImageNum = foundImage->imageNum;
806 if ( _diag.noError() )
807 _dependencies.push_back(Image::LinkedImage(kind, foundImageNum));
808 }
809 else if ( isWeak ) {
810 _dependencies.push_back(Image::LinkedImage(Image::LinkKind::weak, kMissingWeakLinkedImage));
811 // <rdar://problem/54387345> don't let an error loading weak dylib cause everything to fail
812 // _diag is checked after each dependent load, so if there is an error it was with loading the current dylib.
813 // Since it is a weak load, it is ok to ignore and and go on.
814 _diag.clearError();
815 }
816 else {
817 BLOCK_ACCCESSIBLE_ARRAY(char, extra, 4096);
818 extra[0] = '\0';
819 const char* targetLeaf = strrchr(loadPath, '/');
820 if ( targetLeaf == nullptr )
821 targetLeaf = loadPath;
822 if ( _mustBeMissingPaths != nullptr ) {
823 strcpy(extra, ", tried but didn't find: ");
824 _mustBeMissingPaths->forEachPath(^(const char* aPath) {
825 const char* aLeaf = strrchr(aPath, '/');
826 if ( aLeaf == nullptr )
827 aLeaf = aPath;
828 if ( strcmp(targetLeaf, aLeaf) == 0 ) {
829 strlcat(extra, "'", 4096);
830 strlcat(extra, aPath, 4096);
831 strlcat(extra, "' ", 4096);
832 }
833 });
834 }
835 if ( !_skippedFiles.empty() ) {
836 strcpy(extra, ", tried but invalid: ");
837 for (const SkippedFile& skippedFile : _skippedFiles) {
838 const char* aPath = skippedFile.path;
839 const char* aLeaf = strrchr(aPath, '/');
840 if ( aLeaf == nullptr )
841 aLeaf = aPath;
842 if ( strcmp(targetLeaf, aLeaf) == 0 ) {
843 strlcat(extra, "'", 4096);
844 strlcat(extra, aPath, 4096);
845 strlcat(extra, "' ", 4096);
846 }
847 }
848 }
849 if ( _diag.hasError() ) {
850 #if BUILDING_CACHE_BUILDER
851 std::string errorMessageBuffer = _diag.errorMessage();
852 const char* msg = errorMessageBuffer.c_str();
853 #else
854 const char* msg = _diag.errorMessage();
855 #endif
856 char msgCopy[strlen(msg)+4];
857 strcpy(msgCopy, msg);
858 _diag.error("dependent dylib '%s' not found for '%s'. %s", loadPath, forImageChain.image.path(), msgCopy);
859 }
860 else {
861 _diag.error("dependent dylib '%s' not found for '%s'%s", loadPath, forImageChain.image.path(), extra);
862 }
863 if ( _launchErrorInfo != nullptr ) {
864 _launchErrorInfo->kind = DYLD_EXIT_REASON_DYLIB_MISSING;
865 _launchErrorInfo->clientOfDylibPath = strdup_temp(forImageChain.image.path());
866 _launchErrorInfo->targetDylibPath = strdup_temp(loadPath);
867 _launchErrorInfo->symbol = nullptr;
868 }
869 }
870 ++depIndex;
871 if ( _diag.hasError() )
872 stop = true;
873 });
874 if ( _diag.hasError() )
875 return;
876 forImageChain.image.dependents = _dependencies.subArray(startDepIndex, depIndex);
877
878 // breadth first recurse
879 for (Image::LinkedImage dep : forImageChain.image.dependents) {
880 // don't recurse upwards
881 if ( dep.kind() == Image::LinkKind::upward )
882 continue;
883 // don't recurse down missing weak links
884 if ( (dep.kind() == Image::LinkKind::weak) && (dep.imageNum() == kMissingWeakLinkedImage) )
885 continue;
886 BuilderLoadedImage& depLoadedImage = findLoadedImage(dep.imageNum());
887 LoadedImageChain chain = { &forImageChain, depLoadedImage };
888 recursiveLoadDependents(chain, canUseSharedCacheClosure);
889 if ( _diag.hasError() )
890 break;
891 }
892 }
893
894 void ClosureBuilder::loadDanglingUpwardLinks(bool canUseSharedCacheClosure)
895 {
896 bool danglingFixed;
897 do {
898 danglingFixed = false;
899 for (BuilderLoadedImage& li : _loadedImages) {
900 if ( li.dependents.begin() == nullptr ) {
901 // this image has not have dependents set (probably a dangling upward link or referenced by upward link)
902 LoadedImageChain chain = { nullptr, li };
903 recursiveLoadDependents(chain, canUseSharedCacheClosure);
904 danglingFixed = true;
905 break;
906 }
907 }
908 } while (danglingFixed && _diag.noError());
909 }
910
911 bool ClosureBuilder::overridableDylib(const BuilderLoadedImage& forImage)
912 {
913 // on macOS, the cache can be customer/development in the basesystem/main OS
914 // on embedded platforms with Internal cache, allow overrides
915 // on customer caches, only allow libdispatch.dylib to be overridden
916 return _dyldCache->isOverridablePath(forImage.path());
917 }
918
919 void ClosureBuilder::buildImage(ImageWriter& writer, BuilderLoadedImage& forImage)
920 {
921 const MachOAnalyzer* macho = forImage.loadAddress();
922 // set ImageNum
923 writer.setImageNum(forImage.imageNum);
924
925 // set flags
926 writer.setHasWeakDefs(macho->hasWeakDefs());
927 writer.setIsBundle(macho->isBundle());
928 writer.setIsDylib(macho->isDylib());
929 writer.setIs64(macho->is64());
930 writer.setIsExecutable(macho->isMainExecutable());
931 writer.setUses16KPages(macho->uses16KPages());
932 if ( macho->inDyldCache() ) {
933 // only set on dylibs in the dyld shared cache
934 writer.setOverridableDylib(overridableDylib(forImage));
935 }
936 writer.setInDyldCache(macho->inDyldCache());
937 if ( macho->hasObjC() ) {
938 writer.setHasObjC(true);
939 bool hasPlusLoads = macho->hasPlusLoadMethod(_diag);
940 writer.setHasPlusLoads(hasPlusLoads);
941 if ( hasPlusLoads )
942 forImage.hasInits = true;
943 }
944 else {
945 writer.setHasObjC(false);
946 writer.setHasPlusLoads(false);
947 }
948
949 if ( forImage.markNeverUnload ) {
950 writer.setNeverUnload(true);
951 }
952
953 #if BUILDING_DYLD || BUILDING_LIBDYLD
954 if ( _foundDyldCacheRoots ) {
955 // If we had roots, then some images are potentially on-disk while others are
956 // being rebuilt for a new initializer order, but do not exist on disk
957 if ( macho->inDyldCache() && !_dyldCache->header.dylibsExpectedOnDisk ) {
958 // don't add file info for shared cache files mastered out of final file system
959 }
960 else {
961 // file is either not in cache or is in cache but not mastered out
962 writer.setFileInfo(forImage.loadedFileInfo.inode, forImage.loadedFileInfo.mtime);
963 }
964 } else {
965 // shared cache not built by dyld or libdyld.dylib, so must be real file
966 writer.setFileInfo(forImage.loadedFileInfo.inode, forImage.loadedFileInfo.mtime);
967 }
968 #else
969 // in cache builder code
970 if ( !_dyldCache->header.dylibsExpectedOnDisk ) {
971 // don't add file info for shared cache files mastered out of final file system
972 // This also covers executable and dlopen closures as we are not running on a live
973 // file system. no we don't have access to accurate inode/mtime
974 }
975 else {
976 // file is either not in cache or is in cache but not mastered out
977 writer.setFileInfo(forImage.loadedFileInfo.inode, forImage.loadedFileInfo.mtime);
978 }
979 #endif
980
981 // add info on how to load image
982 if ( !macho->inDyldCache() ) {
983 writer.setMappingInfo(forImage.loadedFileInfo.sliceOffset, macho->mappedSize());
984 // add code signature, if signed
985 uint32_t codeSigFileOffset;
986 uint32_t codeSigSize;
987 if ( macho->hasCodeSignature(codeSigFileOffset, codeSigSize) ) {
988 writer.setCodeSignatureLocation(codeSigFileOffset, codeSigSize);
989 macho->forEachCDHash(^(const uint8_t *cdHash) {
990 writer.addCDHash(cdHash);
991 });
992 }
993 // add FairPlay encryption range if encrypted
994 uint32_t fairPlayFileOffset;
995 uint32_t fairPlaySize;
996 if ( macho->isFairPlayEncrypted(fairPlayFileOffset, fairPlaySize) ) {
997 writer.setFairPlayEncryptionRange(fairPlayFileOffset, fairPlaySize);
998 }
999 }
1000
1001 // set path
1002 writer.addPath(forImage.path());
1003 if ( _aliases != nullptr ) {
1004 for (const CachedDylibAlias& alias : *_aliases) {
1005 if ( strcmp(alias.realPath, forImage.path()) == 0 )
1006 writer.addPath(alias.aliasPath);
1007 }
1008 }
1009
1010 // set uuid, if has one
1011 uuid_t uuid;
1012 if ( macho->getUuid(uuid) )
1013 writer.setUUID(uuid);
1014
1015 // set dependents
1016 writer.setDependents(forImage.dependents);
1017
1018 // set segments
1019 addSegments(writer, macho);
1020
1021 // if shared cache contains two variants of same framework (macOS and iOS), mark iOS one as override of macOS one
1022 if ( _makingDyldCacheImages && iOSSupport(forImage.path()) ) {
1023 const char* truncName = forImage.path()+18;
1024 for (const BuilderLoadedImage& li : _loadedImages) {
1025 if ( strcmp(li.path(), truncName) == 0 ) {
1026 writer.setAsOverrideOf(li.imageNum);
1027 }
1028 }
1029 }
1030
1031 // record if this dylib overrides something in the cache
1032 if ( forImage.overrideImageNum != 0 ) {
1033 writer.setAsOverrideOf(forImage.overrideImageNum);
1034 const char* overridePath = _dyldImageArray->imageForNum(forImage.overrideImageNum)->path();
1035 writer.addPath(overridePath);
1036 if ( strcmp(overridePath, "/usr/lib/system/libdyld.dylib") == 0 )
1037 _libDyldImageNum = forImage.imageNum;
1038 else if ( strcmp(overridePath, "/usr/lib/libSystem.B.dylib") == 0 )
1039 _libSystemImageNum = forImage.imageNum;
1040 }
1041
1042 // record fix up info
1043 if ( macho->inDyldCache() && !_makingDyldCacheImages ) {
1044 // when building app closures, don't record fix up info about dylibs in the cache
1045 }
1046 else if ( _makeMinimalClosure ) {
1047 // don't record fix up info in dyld3s mode
1048 writer.setFixupsNotEncoded();
1049 }
1050 else if ( !_makingDyldCacheImages && macho->hasChainedFixups() ) {
1051 // when building app closures, just evaluate target of chain binds and record that table
1052 addChainedFixupInfo(writer, forImage);
1053 }
1054 else {
1055 // run rebase/bind opcodes or chained fixups
1056 addFixupInfo(writer, forImage);
1057 }
1058 if ( _diag.hasError() ) {
1059 writer.setInvalid();
1060 return;
1061 }
1062
1063
1064 // add initializers
1065 #if BUILDING_CACHE_BUILDER
1066
1067 // In the shared cache builder, we'll only ever see 'inDyldCache' images here for the shared
1068 // cache dylibs themselves. These are in an intermediate state where the cache is not live, the pointers
1069 // are unslid, but the pointers also don't contain fixup chains
1070 dyld3::MachOAnalyzer::VMAddrConverter vmAddrConverter = macho->makeVMAddrConverter(forImage.contentRebased);
1071 if ( macho->inDyldCache() ) {
1072 vmAddrConverter.preferredLoadAddress = 0;
1073 vmAddrConverter.slide = 0;
1074 vmAddrConverter.chainedPointerFormat = 0;
1075 vmAddrConverter.contentRebased = false;
1076 vmAddrConverter.sharedCacheChainedPointerFormat = MachOAnalyzer::VMAddrConverter::SharedCacheFormat::none;
1077 }
1078
1079 #else
1080
1081 dyld3::MachOAnalyzer::VMAddrConverter vmAddrConverter = macho->makeVMAddrConverter(forImage.contentRebased);
1082 #if !(BUILDING_LIBDYLD || BUILDING_DYLD)
1083 // The shared cache is always live in dyld/libdyld, but if we get here then we are an offline tool
1084 // In that case, use the shared cache vmAddrConverter if we need it
1085 if ( macho->inDyldCache() )
1086 vmAddrConverter = _dyldCache->makeVMAddrConverter(forImage.contentRebased);
1087 #endif
1088
1089 #endif // BUILDING_CACHE_BUILDER
1090
1091 __block unsigned initCount = 0;
1092 Diagnostics initializerDiag;
1093 macho->forEachInitializer(initializerDiag, vmAddrConverter, ^(uint32_t offset) {
1094 ++initCount;
1095 }, _dyldCache);
1096 if ( initializerDiag.noError() ) {
1097 if ( initCount != 0 ) {
1098 BLOCK_ACCCESSIBLE_ARRAY(uint32_t, initOffsets, initCount);
1099 __block unsigned index = 0;
1100 macho->forEachInitializer(_diag, vmAddrConverter, ^(uint32_t offset) {
1101 initOffsets[index++] = offset;
1102 }, _dyldCache);
1103 writer.setInitOffsets(initOffsets, initCount);
1104 forImage.hasInits = true;
1105 }
1106 }
1107 else {
1108 // mod_init_func section is malformed, might be self modifying pointers
1109 macho->forEachInitializerPointerSection(_diag, ^(uint32_t sectionOffset, uint32_t sectionSize, const uint8_t* content, bool& stop) {
1110 writer.setInitSectRange(sectionOffset, sectionSize);
1111 forImage.hasInits = true;
1112 });
1113 }
1114
1115
1116 // add terminators (except for dylibs in the cache because they are never unloaded)
1117 if ( !macho->inDyldCache() ) {
1118 __block unsigned termCount = 0;
1119 macho->forEachTerminator(_diag, vmAddrConverter, ^(uint32_t offset) {
1120 ++termCount;
1121 });
1122 if ( termCount != 0 ) {
1123 BLOCK_ACCCESSIBLE_ARRAY(uint32_t, termOffsets, termCount);
1124 __block unsigned index = 0;
1125 macho->forEachTerminator(_diag, vmAddrConverter, ^(uint32_t offset) {
1126 termOffsets[index++] = offset;
1127 });
1128 writer.setTermOffsets(termOffsets, termCount);
1129 }
1130 }
1131
1132 // record if image has DOF sections
1133 STACK_ALLOC_ARRAY(uint32_t, dofSectionOffsets, 256);
1134 macho->forEachDOFSection(_diag, ^(uint32_t offset) {
1135 dofSectionOffsets.push_back(offset);
1136 });
1137 if ( !dofSectionOffsets.empty() ) {
1138 writer.setDofOffsets(dofSectionOffsets);
1139 }
1140
1141 }
1142
1143 void ClosureBuilder::addSegments(ImageWriter& writer, const MachOAnalyzer* mh)
1144 {
1145 const uint32_t segCount = mh->segmentCount();
1146 if ( mh->inDyldCache() ) {
1147 uint64_t cacheUnslideBaseAddress = _dyldCache->unslidLoadAddress();
1148 BLOCK_ACCCESSIBLE_ARRAY(Image::DyldCacheSegment, segs, segCount);
1149 mh->forEachSegment(^(const MachOAnalyzer::SegmentInfo& info, bool& stop) {
1150 segs[info.segIndex] = { (uint32_t)(info.vmAddr-cacheUnslideBaseAddress), (uint32_t)info.vmSize, info.protections };
1151 });
1152 writer.setCachedSegments(segs, segCount);
1153 }
1154 else {
1155 const uint32_t pageSize = (mh->uses16KPages() ? 0x4000 : 0x1000);
1156 __block uint32_t diskSegIndex = 0;
1157 __block uint32_t totalPageCount = 0;
1158 __block uint32_t lastFileOffsetEnd = 0;
1159 __block uint64_t lastVmAddrEnd = 0;
1160 BLOCK_ACCCESSIBLE_ARRAY(Image::DiskSegment, dsegs, segCount*3); // room for padding
1161 mh->forEachSegment(^(const MachOAnalyzer::SegmentInfo& info, bool& stop) {
1162 if ( (info.fileOffset != 0) && (info.fileOffset != lastFileOffsetEnd) ) {
1163 Image::DiskSegment filePadding;
1164 filePadding.filePageCount = (info.fileOffset - lastFileOffsetEnd)/pageSize;
1165 filePadding.vmPageCount = 0;
1166 filePadding.permissions = 0;
1167 filePadding.paddingNotSeg = 1;
1168 dsegs[diskSegIndex++] = filePadding;
1169 }
1170 if ( (lastVmAddrEnd != 0) && (info.vmAddr != lastVmAddrEnd) ) {
1171 Image::DiskSegment vmPadding;
1172 vmPadding.filePageCount = 0;
1173 vmPadding.vmPageCount = (info.vmAddr - lastVmAddrEnd)/pageSize;
1174 vmPadding.permissions = 0;
1175 vmPadding.paddingNotSeg = 1;
1176 dsegs[diskSegIndex++] = vmPadding;
1177 totalPageCount += vmPadding.vmPageCount;
1178 }
1179 {
1180 Image::DiskSegment segInfo;
1181 segInfo.filePageCount = (info.fileSize+pageSize-1)/pageSize;
1182 segInfo.vmPageCount = (info.vmSize+pageSize-1)/pageSize;
1183 segInfo.permissions = info.protections & 7;
1184 segInfo.paddingNotSeg = 0;
1185 if ( info.readOnlyData )
1186 segInfo.permissions = Image::DiskSegment::kReadOnlyDataPermissions;
1187 dsegs[diskSegIndex++] = segInfo;
1188 totalPageCount += segInfo.vmPageCount;
1189 if ( info.fileSize != 0 )
1190 lastFileOffsetEnd = (uint32_t)(info.fileOffset + info.fileSize);
1191 if ( info.vmSize != 0 )
1192 lastVmAddrEnd = info.vmAddr + info.vmSize;
1193 }
1194 });
1195 writer.setDiskSegments(dsegs, diskSegIndex);
1196 }
1197 }
1198
1199 static bool isTupleFixup(uint64_t tupleSectVmStartOffset, uint64_t tupleSectVmEndOffset, uint64_t imageOffsetOfFixup, uint32_t entrySize, uint32_t& tupleIndex)
1200 {
1201 if ( imageOffsetOfFixup < tupleSectVmStartOffset )
1202 return false;
1203 if ( imageOffsetOfFixup > tupleSectVmEndOffset )
1204 return false;
1205 uint64_t offsetIntoSection = imageOffsetOfFixup - tupleSectVmStartOffset;
1206 tupleIndex = (uint32_t)(offsetIntoSection/entrySize);
1207 return (tupleIndex*entrySize == offsetIntoSection) || ((tupleIndex*entrySize+entrySize/2) == offsetIntoSection);
1208 }
1209
1210 void ClosureBuilder::addInterposingTuples(LaunchClosureWriter& writer, const Image* image, const MachOAnalyzer* mh)
1211 {
1212 const unsigned pointerSize = mh->pointerSize();
1213 const uint64_t baseAddress = mh->preferredLoadAddress();
1214 mh->forEachInterposingSection(_diag, ^(uint64_t sectVmOffset, uint64_t sectVmSize, bool &stop) {
1215 const uint32_t entrySize = 2*pointerSize;
1216 const uint32_t tupleCount = (uint32_t)(sectVmSize/entrySize);
1217 const uint64_t sectVmEndOffset = sectVmOffset + sectVmSize;
1218 BLOCK_ACCCESSIBLE_ARRAY(InterposingTuple, resolvedTuples, tupleCount);
1219 for (uint32_t i=0; i < tupleCount; ++i) {
1220 resolvedTuples[i].stockImplementation.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
1221 resolvedTuples[i].stockImplementation.absolute.value = 0;
1222 resolvedTuples[i].newImplementation.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
1223 resolvedTuples[i].newImplementation.absolute.value = 0;
1224 }
1225 // figure out what the replacement (rebase) and replacement (bind) of the tuple point to
1226 image->forEachFixup(^(uint64_t imageOffsetToRebase, bool& rebaseStop) {
1227 uint32_t tupleIndex;
1228 if ( isTupleFixup(sectVmOffset, sectVmEndOffset, imageOffsetToRebase, entrySize, tupleIndex) ) {
1229 const void* content = (uint8_t*)mh + imageOffsetToRebase;
1230 uint64_t unslidTargetAddress = mh->is64() ? *(uint64_t*)content : *(uint32_t*)content;
1231 resolvedTuples[tupleIndex].newImplementation.image.kind = Image::ResolvedSymbolTarget::kindImage;
1232 resolvedTuples[tupleIndex].newImplementation.image.imageNum = image->imageNum();
1233 resolvedTuples[tupleIndex].newImplementation.image.offset = unslidTargetAddress - mh->preferredLoadAddress();
1234 }
1235 },
1236 ^(uint64_t imageOffsetToBind, Image::ResolvedSymbolTarget bindTarget, bool &bindStop) {
1237 uint32_t tupleIndex;
1238 if ( isTupleFixup(sectVmOffset, sectVmEndOffset, imageOffsetToBind, entrySize, tupleIndex) ) {
1239 resolvedTuples[tupleIndex].stockImplementation = bindTarget;
1240 }
1241 },
1242 ^(uint64_t imageOffsetToStartsInfo, const Array<Image::ResolvedSymbolTarget>& targets, bool& chainStop) {
1243 mh->withChainStarts(_diag, imageOffsetToStartsInfo, ^(const dyld_chained_starts_in_image* startsInfo) {
1244 mh->forEachFixupInAllChains(_diag, startsInfo, false, ^(MachOLoaded::ChainedFixupPointerOnDisk* fixupLoc, const dyld_chained_starts_in_segment* segInfo, bool& fixupsStop) {
1245 uint64_t fixupOffset = (uint8_t*)fixupLoc - (uint8_t*)mh;
1246 uint32_t tupleIndex;
1247 if ( !isTupleFixup(sectVmOffset, sectVmEndOffset, fixupOffset, entrySize, tupleIndex) )
1248 return;
1249 uint32_t bindOrdinal;
1250 int64_t addend;
1251 uint64_t rebaseTargetOffset;
1252 if ( fixupLoc->isBind(segInfo->pointer_format, bindOrdinal, addend) ) {
1253 if ( bindOrdinal < targets.count() ) {
1254 resolvedTuples[tupleIndex].stockImplementation = targets[bindOrdinal];
1255 }
1256 else {
1257 _diag.error("out of range bind ordinal %d (max %lu)", bindOrdinal, targets.count());
1258 fixupsStop = true;
1259 }
1260 }
1261 else if ( fixupLoc->isRebase(segInfo->pointer_format, baseAddress, rebaseTargetOffset) ) {
1262 resolvedTuples[tupleIndex].newImplementation.image.kind = Image::ResolvedSymbolTarget::kindImage;
1263 resolvedTuples[tupleIndex].newImplementation.image.imageNum = image->imageNum();
1264 resolvedTuples[tupleIndex].newImplementation.image.offset = rebaseTargetOffset;
1265 }
1266 });
1267 });
1268 },
1269 ^(uint64_t imageOffsetToFixup) {
1270 // objc optimisation can't be interposed so nothing to do here.
1271 },
1272 ^(uint64_t imageOffsetToBind, Image::ResolvedSymbolTarget bindTarget, bool &bindStop) {
1273 // objc protocol optimisation fixups can't be interposed so nothing to do here.
1274 },
1275 ^(uint64_t imageOffsetToFixup, uint32_t selectorIndex, bool inSharedCache, bool &fixupStop) {
1276 // objc selector optimisation fixups can't be interposed so nothing to do here.
1277 },
1278 ^(uint64_t imageOffsetToFixup, bool &fixupStop) {
1279 // objc stable Swift optimisation fixups can't be interposed so nothing to do here.
1280 },
1281 ^(uint64_t imageOffsetToFixup, bool &fixupStop) {
1282 // objc method list optimisation fixups can't be interposed so nothing to do here.
1283 });
1284
1285 // remove any tuples in which both sides are not set (or target is weak-import NULL)
1286 STACK_ALLOC_ARRAY(InterposingTuple, goodTuples, tupleCount);
1287 for (uint32_t i=0; i < tupleCount; ++i) {
1288 if ( (resolvedTuples[i].stockImplementation.image.kind != Image::ResolvedSymbolTarget::kindAbsolute)
1289 && (resolvedTuples[i].newImplementation.image.kind != Image::ResolvedSymbolTarget::kindAbsolute) )
1290 goodTuples.push_back(resolvedTuples[i]);
1291 }
1292 writer.addInterposingTuples(goodTuples);
1293 _interposingTuplesUsed = !goodTuples.empty();
1294
1295 // if the target of the interposing is in the dyld shared cache, add a PatchEntry so the cache is fixed up at launch
1296 STACK_ALLOC_ARRAY(Closure::PatchEntry, patches, goodTuples.count());
1297 for (const InterposingTuple& aTuple : goodTuples) {
1298 if ( aTuple.stockImplementation.sharedCache.kind == Image::ResolvedSymbolTarget::kindSharedCache ) {
1299 uint32_t imageIndex;
1300 assert(_dyldCache->addressInText((uint32_t)aTuple.stockImplementation.sharedCache.offset, &imageIndex));
1301 ImageNum imageInCache = imageIndex+1;
1302 Closure::PatchEntry patch;
1303 patch.exportCacheOffset = (uint32_t)aTuple.stockImplementation.sharedCache.offset;
1304 patch.overriddenDylibInCache = imageInCache;
1305 patch.replacement = aTuple.newImplementation;
1306 patches.push_back(patch);
1307 }
1308 }
1309 writer.addCachePatches(patches);
1310 });
1311 }
1312
1313 const Image::RebasePattern RebasePatternBuilder::_s_maxLeapPattern = { 0xFFFFF, 0, 0xF};
1314 const uint64_t RebasePatternBuilder::_s_maxLeapCount = _s_maxLeapPattern.repeatCount * _s_maxLeapPattern.skipCount;
1315
1316
1317
1318 RebasePatternBuilder::RebasePatternBuilder(OverflowSafeArray<closure::Image::RebasePattern>& entriesStorage, uint64_t ptrSize)
1319 : _rebaseEntries(entriesStorage), _lastLocation(-ptrSize), _ptrSize(ptrSize)
1320 {
1321 }
1322
1323 void RebasePatternBuilder::add(uint64_t runtimeOffset)
1324 {
1325 const uint64_t delta = runtimeOffset - _lastLocation;
1326 const bool aligned = ((delta % _ptrSize) == 0);
1327 if ( delta == _ptrSize ) {
1328 // this rebase location is contiguous to previous
1329 if ( _rebaseEntries.back().contigCount < 255 ) {
1330 // just bump previous's contigCount
1331 _rebaseEntries.back().contigCount++;
1332 }
1333 else {
1334 // previous contiguous run already has max 255, so start a new run
1335 _rebaseEntries.push_back({ 1, 1, 0 });
1336 }
1337 }
1338 else if ( aligned && (delta <= (_ptrSize*15)) ) {
1339 // this rebase is within skip distance of last rebase
1340 _rebaseEntries.back().skipCount = (uint8_t)((delta-_ptrSize)/_ptrSize);
1341 int lastIndex = (int)(_rebaseEntries.count() - 1);
1342 if ( lastIndex > 1 ) {
1343 if ( (_rebaseEntries[lastIndex].contigCount == _rebaseEntries[lastIndex-1].contigCount)
1344 && (_rebaseEntries[lastIndex].skipCount == _rebaseEntries[lastIndex-1].skipCount) ) {
1345 // this entry as same contig and skip as prev, so remove it and bump repeat count of previous
1346 _rebaseEntries.pop_back();
1347 _rebaseEntries.back().repeatCount += 1;
1348 }
1349 }
1350 _rebaseEntries.push_back({ 1, 1, 0 });
1351 }
1352 else {
1353 uint64_t advanceCount = (delta-_ptrSize);
1354 if ( (runtimeOffset < _lastLocation) && (_lastLocation != -_ptrSize) ) {
1355 // out of rebases! handle this be resting rebase offset to zero
1356 _rebaseEntries.push_back({ 0, 0, 0 });
1357 advanceCount = runtimeOffset;
1358 }
1359 // if next rebase is too far to reach with one pattern, use series
1360 while ( advanceCount > _s_maxLeapCount ) {
1361 _rebaseEntries.push_back(_s_maxLeapPattern);
1362 advanceCount -= _s_maxLeapCount;
1363 }
1364 // if next rebase is not reachable with skipCount==1 or skipCount==15, add intermediate
1365 while ( advanceCount > _s_maxLeapPattern.repeatCount ) {
1366 uint64_t count = advanceCount / _s_maxLeapPattern.skipCount;
1367 _rebaseEntries.push_back({ (uint32_t)count, 0, _s_maxLeapPattern.skipCount });
1368 advanceCount -= (count*_s_maxLeapPattern.skipCount);
1369 }
1370 if ( advanceCount != 0 )
1371 _rebaseEntries.push_back({ (uint32_t)advanceCount, 0, 1 });
1372 _rebaseEntries.push_back({ 1, 1, 0 });
1373 }
1374 _lastLocation = runtimeOffset;
1375
1376 }
1377
1378
1379 BindPatternBuilder::BindPatternBuilder(OverflowSafeArray<closure::Image::BindPattern>& entriesStorage, uint64_t ptrSize)
1380 : _bindEntries(entriesStorage), _ptrSize(ptrSize), _lastOffset(-ptrSize), _lastTarget({ {0, 0} })
1381 {
1382 }
1383
1384 void BindPatternBuilder::add(uint64_t runtimeOffset, Image::ResolvedSymbolTarget target, bool weakBindCoalese)
1385 {
1386 if ( weakBindCoalese ) {
1387 // may be previous bind to this location
1388 // if so, update that rather create new BindPattern
1389 for (Image::BindPattern& aBind : _bindEntries) {
1390 if ( (aBind.startVmOffset == runtimeOffset) && (aBind.repeatCount == 1) && (aBind.skipCount == 0) ) {
1391 aBind.target = target;
1392 return;
1393 }
1394 }
1395 }
1396 bool mergedIntoPrevious = false;
1397 if ( !mergedIntoPrevious && (target == _lastTarget) && (runtimeOffset > _lastOffset) && !_bindEntries.empty() ) {
1398 uint64_t skipAmount = (runtimeOffset - _lastOffset - _ptrSize)/_ptrSize;
1399 if ( skipAmount*_ptrSize != (runtimeOffset - _lastOffset - _ptrSize) ) {
1400 // misaligned pointer means we cannot optimize
1401 }
1402 else {
1403 if ( (_bindEntries.back().repeatCount == 1) && (_bindEntries.back().skipCount == 0) && (skipAmount <= 255) ) {
1404 _bindEntries.back().repeatCount = 2;
1405 _bindEntries.back().skipCount = skipAmount;
1406 assert(_bindEntries.back().skipCount == skipAmount); // check overflow
1407 mergedIntoPrevious = true;
1408 }
1409 else if ( (_bindEntries.back().skipCount == skipAmount) && (_bindEntries.back().repeatCount < 0xfff) ) {
1410 uint32_t prevRepeatCount = _bindEntries.back().repeatCount;
1411 _bindEntries.back().repeatCount += 1;
1412 assert(_bindEntries.back().repeatCount > prevRepeatCount); // check overflow
1413 mergedIntoPrevious = true;
1414 }
1415 }
1416 }
1417 if ( (target == _lastTarget) && (runtimeOffset == _lastOffset) && !_bindEntries.empty() ) {
1418 // duplicate bind for same location, ignore this one
1419 mergedIntoPrevious = true;
1420 }
1421 if ( !mergedIntoPrevious ) {
1422 Image::BindPattern pattern;
1423 pattern.target = target;
1424 pattern.startVmOffset = runtimeOffset;
1425 pattern.repeatCount = 1;
1426 pattern.skipCount = 0;
1427 assert(pattern.startVmOffset == runtimeOffset);
1428 _bindEntries.push_back(pattern);
1429 }
1430 _lastTarget = target;
1431 _lastOffset = runtimeOffset;
1432 }
1433
1434
1435 bool ClosureBuilder::mas_fromImageWeakDefLookup(const WrappedMachO& fromWmo, const char* symbolName, uint64_t addend, CachePatchHandler patcher, FixupTarget& target) const
1436 {
1437 // when building dylibs into the dyld cache, there is no load-order, so we cannot use the standard algorithm
1438 // otherwise call through to standard weak-def coalescing algorithm
1439 if ( !_makingDyldCacheImages )
1440 return MachOAnalyzerSet::mas_fromImageWeakDefLookup(fromWmo, symbolName, addend, patcher, target);
1441
1442
1443 // look first in /usr/lib/libc++, most will be here
1444 Diagnostics diag;
1445 for (const BuilderLoadedImage& li : _loadedImages) {
1446 if ( li.loadAddress()->hasWeakDefs() && (strncmp(li.path(), "/usr/lib/libc++", 15) == 0) ) {
1447 WrappedMachO libcxxWmo(li.loadAddress(), this, (void*)&li);
1448 if ( libcxxWmo.findSymbolIn(diag, symbolName, addend, target) )
1449 return true;
1450 }
1451 }
1452
1453 // if not found, try looking in the images itself, most custom weak-def symbols have a copy in the image itself
1454 if ( fromWmo.findSymbolIn(diag, symbolName, addend, target) )
1455 return true;
1456
1457 // if we link with something that also defines this weak-def, use it
1458 ClosureBuilder::BuilderLoadedImage* fromImage = (ClosureBuilder::BuilderLoadedImage*)(fromWmo._other);
1459 for (Image::LinkedImage child : fromImage->dependents) {
1460 if (child.imageNum() == kMissingWeakLinkedImage)
1461 continue;
1462 if (child.kind() == Image::LinkKind::upward)
1463 continue;
1464 const BuilderLoadedImage& childLi = findLoadedImage(child.imageNum());
1465 if ( childLi.loadAddress()->hasWeakDefs() ) {
1466 WrappedMachO childWmo(childLi.loadAddress(), this, (void*)&childLi);
1467 if ( childWmo.findSymbolIn(diag, symbolName, addend, target) )
1468 return true;
1469 }
1470 }
1471 return false;
1472 }
1473
1474 void ClosureBuilder::mas_forEachImage(void (^handler)(const WrappedMachO& wmo, bool hidden, bool& stop)) const
1475 {
1476 bool stop = false;
1477 for (const ClosureBuilder::BuilderLoadedImage& li : _loadedImages) {
1478 WrappedMachO wmo(li.loadAddress(), this, (void*)&li);
1479 handler(wmo, li.rtldLocal, stop);
1480 if ( stop )
1481 break;
1482 }
1483 }
1484
1485 bool ClosureBuilder::wmo_missingSymbolResolver(const WrappedMachO* fromWmo, bool weakImport, bool lazyBind, const char* symbolName, const char* expectedInDylibPath, const char* clientPath, FixupTarget& target) const
1486 {
1487 // if weakImport and missing, bind to NULL
1488 if ( weakImport ) {
1489 // construct NULL target
1490 target.offsetInImage = 0;
1491 target.kind = FixupTarget::Kind::bindAbsolute;
1492 target.requestedSymbolName = symbolName;
1493 target.foundSymbolName = nullptr;
1494 // Record that we found a missing weak import so that the objc optimizer doens't have to check
1495 ClosureBuilder::BuilderLoadedImage* fromBLI = (ClosureBuilder::BuilderLoadedImage*)(fromWmo->_other);
1496 fromBLI->hasMissingWeakImports = true;
1497 return true;
1498 }
1499 // dyld3 binds everything ahead of time, to simulator lazy failure
1500 // if non-weakImport and lazy, then bind to __dyld_missing_symbol_abort()
1501 if ( lazyBind && _allowMissingLazies ) {
1502 for (const BuilderLoadedImage& li : _loadedImages) {
1503 if ( li.loadAddress()->isDylib() && (strcmp(li.loadAddress()->installName(), "/usr/lib/system/libdyld.dylib") == 0) ) {
1504 WrappedMachO libdyldWmo(li.loadAddress(), this, (void*)&li);
1505 Diagnostics diag;
1506 if ( libdyldWmo.findSymbolIn(diag, "__dyld_missing_symbol_abort", 0, target) ) {
1507 // <rdar://problem/44315944> closures should bind missing lazy-bind symbols to a missing symbol handler in libdyld in flat namespace
1508 return true;
1509 }
1510 break;
1511 }
1512 }
1513 }
1514 // support abort payload
1515 if ( _launchErrorInfo != nullptr ) {
1516 _launchErrorInfo->kind = DYLD_EXIT_REASON_SYMBOL_MISSING;
1517 _launchErrorInfo->clientOfDylibPath = strdup_temp(clientPath);
1518 _launchErrorInfo->targetDylibPath = strdup_temp(expectedInDylibPath);
1519 _launchErrorInfo->symbol = symbolName;
1520 }
1521 return false;
1522 }
1523
1524 void ClosureBuilder::mas_mainExecutable(WrappedMachO& wmo) const
1525 {
1526 const ClosureBuilder::BuilderLoadedImage& mainLi = _loadedImages[_mainProgLoadIndex];
1527 WrappedMachO mainWmo(mainLi.loadAddress(), this, (void*)&mainLi);
1528 wmo = mainWmo;
1529 }
1530
1531 void* ClosureBuilder::mas_dyldCache() const
1532 {
1533 return (void*)_dyldCache;
1534 }
1535
1536 bool ClosureBuilder::wmo_dependent(const WrappedMachO* wmo, uint32_t depIndex, WrappedMachO& childWmo, bool& missingWeakDylib) const
1537 {
1538 ClosureBuilder::BuilderLoadedImage* forImage = (ClosureBuilder::BuilderLoadedImage*)(wmo->_other);
1539
1540 if ( depIndex >= forImage->dependents.count() )
1541 return false;
1542
1543 ImageNum childNum = forImage->dependents[depIndex].imageNum();
1544 if ( childNum == kMissingWeakLinkedImage ) {
1545 missingWeakDylib = true;
1546 return true;
1547 }
1548 const BuilderLoadedImage& depLoadedImage = this->findLoadedImage(childNum);
1549 childWmo = WrappedMachO(depLoadedImage.loadAddress(), this, (void*)&depLoadedImage);
1550 missingWeakDylib = false;
1551 return true;
1552 }
1553
1554 const char* ClosureBuilder::wmo_path(const WrappedMachO* wmo) const
1555 {
1556 ClosureBuilder::BuilderLoadedImage* forImage = (ClosureBuilder::BuilderLoadedImage*)(wmo->_other);
1557 return forImage->loadedFileInfo.path;
1558 }
1559
1560 MachOAnalyzerSet::ExportsTrie ClosureBuilder::wmo_getExportsTrie(const WrappedMachO* wmo) const
1561 {
1562 ClosureBuilder::BuilderLoadedImage* forImage = (ClosureBuilder::BuilderLoadedImage*)(wmo->_other);
1563 if ( forImage->exportsTrieOffset == 0 ) {
1564 // if trie location not already cached, look it up
1565 wmo->_mh->hasExportTrie(forImage->exportsTrieOffset, forImage->exportsTrieSize);
1566 }
1567 const uint8_t* start = nullptr;
1568 const uint8_t* end = nullptr;
1569 if ( forImage->exportsTrieOffset != 0 ) {
1570 start = (uint8_t*)wmo->_mh + forImage->exportsTrieOffset;
1571 end = start + forImage->exportsTrieSize;
1572 }
1573 return { start, end };
1574 }
1575
1576
1577 Image::ResolvedSymbolTarget ClosureBuilder::makeResolvedTarget(const FixupTarget& target) const
1578 {
1579 Image::ResolvedSymbolTarget resolvedTarget;
1580 switch ( target.kind ) {
1581 case MachOAnalyzerSet::FixupTarget::Kind::rebase:
1582 assert(0 && "target is a rebase");
1583 break;
1584 case MachOAnalyzerSet::FixupTarget::Kind::bindToImage:
1585 if ( target.foundInImage._mh->inDyldCache() ) {
1586 resolvedTarget.sharedCache.kind = Image::ResolvedSymbolTarget::kindSharedCache;
1587 resolvedTarget.sharedCache.offset = (uint8_t*)target.foundInImage._mh - (uint8_t*)_dyldCache + target.offsetInImage;
1588 }
1589 else {
1590 ClosureBuilder::BuilderLoadedImage* targetBuildLoaderImage = (ClosureBuilder::BuilderLoadedImage*)(target.foundInImage._other);
1591 resolvedTarget.image.kind = Image::ResolvedSymbolTarget::kindImage;
1592 resolvedTarget.image.imageNum = targetBuildLoaderImage->imageNum;
1593 resolvedTarget.image.offset = target.offsetInImage;
1594 }
1595 return resolvedTarget;
1596 case MachOAnalyzerSet::FixupTarget::Kind::bindAbsolute:
1597 resolvedTarget.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
1598 resolvedTarget.absolute.value = target.offsetInImage;
1599 return resolvedTarget;
1600 case MachOAnalyzerSet::FixupTarget::Kind::bindMissingSymbol:
1601 assert(0 && "unknown FixupTarget::Kind::bindMissingSymbol found in closure");
1602 break;
1603 }
1604 assert(0 && "unknown FixupTarget kind");
1605 }
1606
1607 void ClosureBuilder::addFixupInfo(ImageWriter& writer, BuilderLoadedImage& forImage)
1608 {
1609 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::RebasePattern, rebaseEntries, 1024);
1610 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::BindPattern, binds, 512);
1611 __block RebasePatternBuilder rebaseBuilder(rebaseEntries, forImage.loadAddress()->pointerSize());
1612 __block BindPatternBuilder bindBuilder(binds, forImage.loadAddress()->pointerSize());
1613
1614 const bool stompedLazyOpcodes = forImage.loadAddress()->hasStompedLazyOpcodes();
1615 WrappedMachO forImage_wmo(forImage.loadAddress(), this, (void*)&forImage);
1616 forImage_wmo.forEachFixup(_diag,
1617 ^(uint64_t fixupLocRuntimeOffset, PointerMetaData pmd, const MachOAnalyzerSet::FixupTarget& target, bool& stop) {
1618 if ( target.kind == MachOAnalyzerSet::FixupTarget::Kind::rebase ) {
1619 // normally ignore rebase on lazy pointer because dyld3 will immediately bind that same pointer
1620 // but if app is licensewared and stomps lazy bind opcodes, keep the rebases
1621 if ( target.isLazyBindRebase && !stompedLazyOpcodes )
1622 return;
1623 }
1624 if ( _dylibFixupHandler ) {
1625 // applying fixups to dylibs in dyld cache as the cache is being built
1626 _dylibFixupHandler(forImage.loadAddress(), fixupLocRuntimeOffset, pmd, target);
1627 return;
1628 }
1629 switch ( target.kind ) {
1630 case MachOAnalyzerSet::FixupTarget::Kind::rebase:
1631 if ( !_leaveRebasesAsOpcodes )
1632 rebaseBuilder.add(fixupLocRuntimeOffset);
1633 break;
1634 case MachOAnalyzerSet::FixupTarget::Kind::bindToImage:
1635 case MachOAnalyzerSet::FixupTarget::Kind::bindAbsolute:
1636 bindBuilder.add(fixupLocRuntimeOffset, makeResolvedTarget(target), target.weakCoalesced);
1637 break;
1638 case MachOAnalyzerSet::FixupTarget::Kind::bindMissingSymbol:
1639 // this is last call from forEachFixup() because a symbol could not be resolved
1640 break;
1641 }
1642 },
1643 ^(uint32_t cachedDylibIndex, uint32_t exportCacheOffset, const FixupTarget& target) {
1644 addWeakDefCachePatch(cachedDylibIndex, exportCacheOffset, target);
1645 }
1646 );
1647
1648 // check for __dyld section in main executable to support licenseware
1649 if ( forImage.loadAddress()->filetype == MH_EXECUTE ) {
1650 forImage.loadAddress()->forEachSection(^(const MachOAnalyzer::SectionInfo& sectInfo, bool malformedSectionRange, bool& stop) {
1651 if ( (strcmp(sectInfo.sectName, "__dyld") == 0) && (strcmp(sectInfo.segInfo.segName, "__DATA") == 0) ) {
1652 // find dyld3::compatFuncLookup in libdyld.dylib
1653 assert(_libDyldImageNum != 0);
1654 const BuilderLoadedImage& libdyldImage = findLoadedImage(_libDyldImageNum);
1655 WrappedMachO libdyldWmo(libdyldImage.loadAddress(), this, (void*)&libdyldImage);
1656 FixupTarget libdyldCompatTarget;
1657 if ( libdyldWmo.findSymbolIn(_diag, "__ZN5dyld316compatFuncLookupEPKcPPv", 0, libdyldCompatTarget) ) {
1658 // dyld_func_lookup is second pointer in __dyld section
1659 uint64_t fixupLocRuntimeOffset = sectInfo.sectAddr - forImage.loadAddress()->preferredLoadAddress() + forImage.loadAddress()->pointerSize();
1660 bindBuilder.add(fixupLocRuntimeOffset, makeResolvedTarget(libdyldCompatTarget), false);
1661 }
1662 else {
1663 _diag.error("libdyld.dylib is missing dyld3::compatFuncLookup");
1664 }
1665 }
1666 });
1667 }
1668
1669 // add all rebase and bind info into closure, unless building dyld cache
1670 if ( !_makingDyldCacheImages ) {
1671 if ( _leaveRebasesAsOpcodes )
1672 writer.setRebasesNotEncoded();
1673 else
1674 writer.setRebaseInfo(rebaseEntries);
1675 writer.setBindInfo(binds);
1676 }
1677
1678 // i386 programs also use text relocs to rebase stubs
1679 if ( (forImage.loadAddress()->cputype == CPU_TYPE_I386) && !_makingDyldCacheImages ) {
1680 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::TextFixupPattern, textRebases, 512);
1681 __block uint64_t lastOffset = -4;
1682 forImage.loadAddress()->forEachTextRebase(_diag, ^(uint64_t runtimeOffset, bool& stop) {
1683 if ( textRebases.freeCount() < 2 ) {
1684 _diag.error("too many text rebase locations (%ld) in %s", textRebases.maxCount(), writer.currentImage()->path());
1685 stop = true;
1686 }
1687 bool mergedIntoPrevious = false;
1688 if ( (runtimeOffset > lastOffset) && !textRebases.empty() ) {
1689 uint32_t skipAmount = (uint32_t)(runtimeOffset - lastOffset);
1690 if ( (textRebases.back().repeatCount == 1) && (textRebases.back().skipCount == 0) ) {
1691 textRebases.back().repeatCount = 2;
1692 textRebases.back().skipCount = skipAmount;
1693 mergedIntoPrevious = true;
1694 }
1695 else if ( textRebases.back().skipCount == skipAmount ) {
1696 textRebases.back().repeatCount += 1;
1697 mergedIntoPrevious = true;
1698 }
1699 }
1700 if ( !mergedIntoPrevious ) {
1701 Image::TextFixupPattern pattern;
1702 pattern.target.raw = 0;
1703 pattern.startVmOffset = (uint32_t)runtimeOffset;
1704 pattern.repeatCount = 1;
1705 pattern.skipCount = 0;
1706 textRebases.push_back(pattern);
1707 }
1708 lastOffset = runtimeOffset;
1709 });
1710 writer.setTextRebaseInfo(textRebases);
1711 }
1712
1713 }
1714
1715
1716
1717
1718 void ClosureBuilder::addWeakDefCachePatch(uint32_t cachedDylibIndex, uint32_t exportCacheOffset, const FixupTarget& patchTarget)
1719 {
1720 // minimal closures don't need weak def patches, they are regenerated at launch
1721 if ( _makeMinimalClosure )
1722 return;
1723
1724 // don't add duplicates
1725 for (const Closure::PatchEntry& aPatch : _weakDefCacheOverrides) {
1726 if ( aPatch.exportCacheOffset == exportCacheOffset )
1727 return;
1728 }
1729 // add new patch entry
1730 ClosureBuilder::BuilderLoadedImage* targetImage = (ClosureBuilder::BuilderLoadedImage*)(patchTarget.foundInImage._other);
1731 Closure::PatchEntry patch;
1732 patch.overriddenDylibInCache = cachedDylibIndex+1; // convert image index to ImageNum
1733 patch.exportCacheOffset = exportCacheOffset;
1734 patch.replacement.image.kind = Image::ResolvedSymbolTarget::kindImage;
1735 patch.replacement.image.imageNum = targetImage->imageNum;
1736 patch.replacement.image.offset = patchTarget.offsetInImage;
1737 _weakDefCacheOverrides.push_back(patch);
1738 }
1739
1740 void ClosureBuilder::addChainedFixupInfo(ImageWriter& writer, BuilderLoadedImage& forImage)
1741 {
1742 // as a side effect of building targets array, we discover if anything in dyld cache uses weak-defs that need
1743 // to be redirected to an impl in some other dylib (cache patched)
1744 auto patchAddr = ^(uint32_t cachedDylibIndex, uint32_t exportCacheOffset, const FixupTarget& patchTarget) {
1745 addWeakDefCachePatch(cachedDylibIndex, exportCacheOffset, patchTarget);
1746 };
1747
1748 // build array of targets
1749 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::ResolvedSymbolTarget, targets, 1024);
1750 forImage.loadAddress()->forEachChainedFixupTarget(_diag, ^(int libOrdinal, const char* symbolName, uint64_t addend, bool weakImport, bool& stop) {
1751 FixupTarget target;
1752 WrappedMachO forImageWmo(forImage.loadAddress(), this, (void*)&forImage);
1753 if ( wmo_findSymbolFrom(&forImageWmo, _diag, libOrdinal, symbolName, weakImport, false, addend, patchAddr, target) )
1754 targets.push_back(makeResolvedTarget(target));
1755 else
1756 stop = true;
1757 });
1758 if ( _diag.hasError() )
1759 return;
1760
1761 // C++ main executables can overide operator new, check for that
1762 if ( forImage.loadAddress()->isMainExecutable() && forImage.loadAddress()->hasWeakDefs() ) {
1763 WrappedMachO mainWmo(forImage.loadAddress(), this, (void*)&forImage);
1764 wmo_findExtraSymbolFrom(&mainWmo, patchAddr);
1765 }
1766
1767 uint64_t chainStartsOffset = forImage.loadAddress()->chainStartsOffset();
1768 writer.setChainedFixups(chainStartsOffset, targets);
1769 }
1770
1771 void ClosureBuilder::depthFirstRecurseSetInitInfo(uint32_t loadIndex, InitInfo initInfos[], uint32_t& initOrder, bool& hasError)
1772 {
1773 if ( initInfos[loadIndex].visited )
1774 return;
1775 initInfos[loadIndex].visited = true;
1776 initInfos[loadIndex].danglingUpward = false;
1777
1778 if (_loadedImages[loadIndex].isBadImage) {
1779 hasError = true;
1780 return;
1781 }
1782 for (const Image::LinkedImage& dep : _loadedImages[loadIndex].dependents) {
1783 if ( dep.imageNum() == kMissingWeakLinkedImage )
1784 continue;
1785 const ClosureBuilder::BuilderLoadedImage& depLi = findLoadedImage(dep.imageNum());
1786 uint32_t depLoadIndex = (uint32_t)_loadedImages.index(depLi);
1787 if ( dep.kind() == Image::LinkKind::upward ) {
1788 if ( !initInfos[depLoadIndex].visited )
1789 initInfos[depLoadIndex].danglingUpward = true;
1790 }
1791 else {
1792 depthFirstRecurseSetInitInfo(depLoadIndex, initInfos, initOrder, hasError);
1793 if (hasError)
1794 return;
1795 }
1796 }
1797 initInfos[loadIndex].initOrder = initOrder++;
1798 }
1799
1800 void ClosureBuilder::computeInitOrder(ImageWriter& imageWriter, uint32_t loadIndex)
1801 {
1802 // allocate array to track initializers
1803 InitInfo initInfos[_loadedImages.count()];
1804 bzero(initInfos, sizeof(initInfos));
1805
1806 // recurse all images and build initializer list from bottom up
1807 uint32_t initOrder = 1;
1808 bool hasMissingDependent = false;
1809 depthFirstRecurseSetInitInfo(loadIndex, initInfos, initOrder, hasMissingDependent);
1810 if (hasMissingDependent) {
1811 imageWriter.setInvalid();
1812 return;
1813 }
1814
1815 // any images not visited yet are are danging, force add them to end of init list
1816 for (uint32_t i=0; i < (uint32_t)_loadedImages.count(); ++i) {
1817 if ( !initInfos[i].visited && initInfos[i].danglingUpward ) {
1818 depthFirstRecurseSetInitInfo(i, initInfos, initOrder, hasMissingDependent);
1819 }
1820 }
1821
1822 if (hasMissingDependent) {
1823 imageWriter.setInvalid();
1824 return;
1825 }
1826
1827 // build array of just images with initializer
1828 STACK_ALLOC_ARRAY(uint32_t, indexOfImagesWithInits, _loadedImages.count());
1829 uint32_t index = 0;
1830 for (const BuilderLoadedImage& li : _loadedImages) {
1831 if ( initInfos[index].visited && li.hasInits ) {
1832 indexOfImagesWithInits.push_back(index);
1833 }
1834 ++index;
1835 }
1836
1837 // bubble sort (FIXME)
1838 if ( indexOfImagesWithInits.count() > 1 ) {
1839 for (uint32_t i=0; i < indexOfImagesWithInits.count()-1; ++i) {
1840 for (uint32_t j=0; j < indexOfImagesWithInits.count()-i-1; ++j) {
1841 if ( initInfos[indexOfImagesWithInits[j]].initOrder > initInfos[indexOfImagesWithInits[j+1]].initOrder ) {
1842 uint32_t temp = indexOfImagesWithInits[j];
1843 indexOfImagesWithInits[j] = indexOfImagesWithInits[j+1];
1844 indexOfImagesWithInits[j+1] = temp;
1845 }
1846 }
1847 }
1848 }
1849
1850 // copy ImageNum of each image with initializers into array
1851 ImageNum initNums[indexOfImagesWithInits.count()];
1852 for (uint32_t i=0; i < indexOfImagesWithInits.count(); ++i) {
1853 initNums[i] = _loadedImages[indexOfImagesWithInits[i]].imageNum;
1854 }
1855
1856 // add to closure info
1857 imageWriter.setInitsOrder(initNums, (uint32_t)indexOfImagesWithInits.count());
1858 }
1859
1860 void ClosureBuilder::addClosureInfo(LaunchClosureWriter& closureWriter)
1861 {
1862 // record which is libSystem
1863 assert(_libSystemImageNum != 0);
1864 closureWriter.setLibSystemImageNum(_libSystemImageNum);
1865
1866 // record which is libdyld
1867 assert(_libDyldImageNum != 0);
1868 const BuilderLoadedImage& libdyldImage = findLoadedImage(_libDyldImageNum);
1869 WrappedMachO libdyldWmo(libdyldImage.loadAddress(), this, (void*)&libdyldImage);
1870 FixupTarget libdyldEntryTarget;
1871 if ( libdyldWmo.findSymbolIn(_diag, "__ZN5dyld318entryVectorForDyldE", 0, libdyldEntryTarget) ) {
1872 const dyld3::LibDyldEntryVector* libDyldEntry = nullptr;
1873 if ( libdyldEntryTarget.kind == MachOAnalyzerSet::FixupTarget::Kind::bindToImage ) {
1874 libDyldEntry = (dyld3::LibDyldEntryVector*)((uint8_t*)libdyldEntryTarget.foundInImage._mh + libdyldEntryTarget.offsetInImage);
1875 }
1876 // peak at entry vector to see if version is compatible
1877 if ( (libDyldEntry != nullptr) && ((libDyldEntry->binaryFormatVersion & LibDyldEntryVector::kBinaryFormatVersionMask) == dyld3::closure::kFormatVersion) ) {
1878 Image::ResolvedSymbolTarget entryLocation = makeResolvedTarget(libdyldEntryTarget);
1879 closureWriter.setLibDyldEntry(entryLocation);
1880 }
1881 else
1882 _diag.error("libdyld.dylib entry vector is incompatible");
1883 }
1884 else {
1885 _diag.error("libdyld.dylib is missing entry vector");
1886 }
1887
1888 // record which is main executable
1889 ImageNum mainProgImageNum = _loadedImages[_mainProgLoadIndex].imageNum;
1890 closureWriter.setTopImageNum(mainProgImageNum);
1891
1892 // add entry
1893 uint64_t entryOffset;
1894 bool usesCRT;
1895 if ( _loadedImages[_mainProgLoadIndex].loadAddress()->getEntry(entryOffset, usesCRT) ) {
1896 Image::ResolvedSymbolTarget location;
1897 location.image.kind = Image::ResolvedSymbolTarget::kindImage;
1898 location.image.imageNum = mainProgImageNum;
1899 location.image.offset = (uint32_t)entryOffset;
1900 if ( usesCRT )
1901 closureWriter.setStartEntry(location);
1902 else
1903 closureWriter.setMainEntry(location);
1904 }
1905
1906 // add env vars that must match at launch time
1907 _pathOverrides.forEachEnvVar(^(const char* envVar) {
1908 closureWriter.addEnvVar(envVar);
1909 });
1910
1911 // add list of files which must be missing
1912 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(const char*, paths, 8192);
1913 if ( _mustBeMissingPaths != nullptr ) {
1914 _mustBeMissingPaths->forEachPath(^(const char* aPath) {
1915 paths.push_back(aPath);
1916 });
1917 }
1918 closureWriter.setMustBeMissingFiles(paths);
1919
1920 // add list of files which must be be present with a specific inode/mtime
1921 if (!_skippedFiles.empty())
1922 closureWriter.setMustExistFiles(_skippedFiles);
1923 }
1924 void ClosureBuilder::invalidateInitializerRoots()
1925 {
1926 while (true) {
1927 bool madeChange = false;
1928 for (uintptr_t loadedImageIndex = _alreadyInitedIndex; loadedImageIndex != _loadedImages.count(); ++loadedImageIndex) {
1929 BuilderLoadedImage& li = _loadedImages[loadedImageIndex];
1930 if ( li.mustBuildClosure ) {
1931 // Already invalidated
1932 continue;
1933 }
1934 for (Image::LinkedImage depIndex : li.dependents) {
1935 if ( depIndex.imageNum() == kMissingWeakLinkedImage )
1936 continue;
1937 const BuilderLoadedImage& depImage = findLoadedImage(depIndex.imageNum());
1938 // If a dependent is bad, or a new image num, or an override, then we need this image to get a new closure
1939 if ( depImage.mustBuildClosure ) {
1940 li.mustBuildClosure = true; // mark bad
1941 madeChange = true;
1942 }
1943 }
1944 }
1945 if (!madeChange)
1946 break;
1947 // If we made a change, then we detected an existing image with a dependent which needed to be rebuilt.
1948 // This corresponds to a root of the shared cache where the existing image is a shared cache one and the root is the depImage
1949 _foundDyldCacheRoots = true;
1950 }
1951 }
1952
1953 size_t ClosureBuilder::HashCString::hash(const char* v) {
1954 // FIXME: Use hash<string_view> when it has the correct visibility markup
1955 return __gnu_cxx::hash<const char*>{}(v);
1956 }
1957
1958 bool ClosureBuilder::EqualCString::equal(const char* s1, const char* s2) {
1959 return strcmp(s1, s2) == 0;
1960 }
1961
1962
1963
1964 struct HashUInt64 {
1965 static size_t hash(const uint64_t& v) {
1966 return std::hash<uint64_t>{}(v);
1967 }
1968 };
1969
1970 struct EqualUInt64 {
1971 static bool equal(uint64_t s1, uint64_t s2) {
1972 return s1 == s2;
1973 }
1974 };
1975
1976 void ClosureBuilder::writeClassOrProtocolHashTable(bool classes, Array<ObjCOptimizerImage>& objcImages) {
1977 __block MultiMap<const char*, dyld3::closure::Image::ObjCClassImageOffset, HashCString, EqualCString> seenClassesMap;
1978 __block Map<const char*, dyld3::closure::Image::ObjCClassNameImageOffset, HashCString, EqualCString> classNameMap;
1979 __block OverflowSafeArray<const char*> classNames;
1980
1981 // Note we walk the images backwards as we want them in load order to match the order they are registered with objc
1982 for (size_t imageIndex = 0, reverseIndex = (objcImages.count() - 1); imageIndex != objcImages.count(); ++imageIndex, --reverseIndex) {
1983 if (objcImages[reverseIndex].diag.hasError())
1984 continue;
1985 ObjCOptimizerImage& image = objcImages[reverseIndex];
1986 const OverflowSafeArray<ObjCOptimizerImage::SeenClass>& seenClasses = classes ? image.seenClasses : image.seenProtocols;
1987
1988 for (const ObjCOptimizerImage::SeenClass& seenClass : seenClasses) {
1989 closure::Image::ObjCClassNameImageOffset classNameTarget = seenClass.first;
1990 dyld3::closure::Image::ObjCClassImageOffset classDataTarget = seenClass.second;
1991 Image::ObjCClassImage classImage = _objcClassesHashTableImages[classNameTarget.classNameImageIndex];
1992
1993 const BuilderLoadedImage& li = findLoadedImage(classImage.imageNum);
1994 const dyld3::MachOAnalyzer* ma = li.loadAddress();
1995
1996 const char* className = ((const char*)ma) + classImage.offsetOfClassNames + classNameTarget.classNameImageOffset;
1997 //uint64_t nameVMAddr = ma->preferredLoadAddress() + classImage.offsetOfClassNames + classNameTarget.classNameImageOffset;
1998 //printf("%s: 0x%08llx = '%s'\n", li.path(), nameVMAddr, className);
1999 seenClassesMap.insert({ className, classDataTarget });
2000
2001 // Also track the name
2002 auto itAndInserted = classNameMap.insert({ className, dyld3::closure::Image::ObjCClassNameImageOffset() });
2003 if (itAndInserted.second) {
2004 // We inserted the class name so we need to add it to the strings for the closure hash table
2005 classNames.push_back(className);
2006
2007 // We already computed a class name target in a previous loop so use that one
2008 itAndInserted.first->second = seenClass.first;
2009
2010 // If we are processing protocols, and this is the first one we've seen, then track its ISA to be fixed up
2011 if ( !classes ) {
2012 uint64_t protocolVMOffset = classImage.offsetOfClasses + classDataTarget.classData.imageOffset;
2013 image.protocolISAFixups.push_back(protocolVMOffset);
2014 }
2015 }
2016 }
2017 }
2018
2019 __block uint32_t duplicateCount = 0;
2020 seenClassesMap.forEachEntry(^(const char *const &key, const Image::ObjCClassImageOffset **values,
2021 uint64_t valuesCount) {
2022 if (valuesCount != 1)
2023 duplicateCount += valuesCount;
2024 });
2025
2026 // If we have closure class names, we need to make a hash table for them.
2027 OverflowSafeArray<uint8_t>& hashTable = classes ? _objcClassesHashTable : _objcProtocolsHashTable;
2028 if (!classNames.empty()) {
2029 objc_opt::perfect_hash phash;
2030 objc_opt::make_perfect(classNames, phash);
2031 size_t size = ObjCClassOpt::size(phash, duplicateCount);
2032 hashTable.resize(size);
2033 //printf("Class table size: %lld\n", size);
2034 ObjCClassOpt* classesHashTable = (ObjCClassOpt*)hashTable.begin();
2035 classesHashTable->write(phash, classNameMap.array(), seenClassesMap, duplicateCount);
2036 }
2037 }
2038
2039 bool ClosureBuilder::optimizeObjC(Array<ImageWriter>& writers) {
2040 if ( _dyldCache == nullptr )
2041 return false;
2042
2043 // If we have the read only data, make sure it has a valid selector table inside.
2044 const objc_opt::objc_clsopt_t* objcClassOpt = nullptr;
2045 const objc_opt::objc_selopt_t* objcSelOpt = nullptr;
2046 const objc_opt::objc_protocolopt2_t* objcProtocolOpt = nullptr;
2047 if (const objc_opt::objc_opt_t* optObjCHeader = _dyldCache->objcOpt()) {
2048 objcClassOpt = optObjCHeader->clsopt();
2049 objcSelOpt = optObjCHeader->selopt();
2050 objcProtocolOpt = optObjCHeader->protocolopt2();
2051 }
2052
2053 if ( !objcClassOpt || !objcSelOpt || !objcProtocolOpt )
2054 return false;
2055
2056 // We have 24 bits of index in SelectorReferenceFixup so we can't handle a
2057 // shared cache selector table larger than that
2058 if ( objcSelOpt->usedCount() >= (1 << 24) )
2059 return false;
2060
2061 // Make sure we have the pointers section with the pointer to the protocol class
2062 const void* objcOptPtrs = _dyldCache->objcOptPtrs();
2063 if ( objcOptPtrs == nullptr )
2064 return false;
2065
2066 uint32_t pointerSize = _loadedImages.begin()->loadAddress()->pointerSize();
2067 uint64_t classProtocolVMAddr = (pointerSize == 8) ? *(uint64_t*)objcOptPtrs : *(uint32_t*)objcOptPtrs;
2068
2069 Image::ResolvedSymbolTarget objcProtocolClassTarget;
2070 objcProtocolClassTarget.sharedCache.kind = Image::ResolvedSymbolTarget::kindSharedCache;
2071 if ( _dyldCacheIsLive ) {
2072 // If we are on arm64e, the protocol ISA in the shared cache was signed. We don't
2073 // want the signature bits in the encoded value
2074 #if __has_feature(ptrauth_calls)
2075 classProtocolVMAddr = (uint64_t)__builtin_ptrauth_strip((void*)classProtocolVMAddr, ptrauth_key_asda);
2076 #endif
2077 objcProtocolClassTarget.sharedCache.offset = classProtocolVMAddr - (uint64_t)_dyldCache;
2078 } else {
2079 objcProtocolClassTarget.sharedCache.offset = classProtocolVMAddr - _dyldCache->unslidLoadAddress();
2080 }
2081
2082 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(ObjCOptimizerImage, objcImages, 32);
2083 ArrayFinalizer<ObjCOptimizerImage> scopedCleanup(objcImages,
2084 ^(ObjCOptimizerImage& objcImage) {
2085 objcImage.~ObjCOptimizerImage();
2086 });
2087
2088 // Find all the images with valid objc info
2089 // Also add shared cache images to a map so that we can see them later for looking up classes
2090 Map<const dyld3::MachOAnalyzer*, bool, HashPointer, EqualPointer> sharedCacheImagesMap;
2091 for (size_t imageIndex = 0, writerIndex = 0; imageIndex != _loadedImages.count(); ++imageIndex) {
2092 BuilderLoadedImage& li = _loadedImages[imageIndex];
2093
2094 // Skip shared cache images as even if they need a new closure, the objc runtime can still use
2095 // the optimized shared cache tables.
2096 if ( li.loadAddress()->inDyldCache() ) {
2097 sharedCacheImagesMap.insert({ li.loadAddress(), true });
2098 // Bump the writer index if we have a writer for this image
2099 if ( li.mustBuildClosure )
2100 ++writerIndex;
2101 continue;
2102 }
2103 // Images which don't need a closure can be skipped. They are from the shared cache
2104 if ( !li.mustBuildClosure )
2105 continue;
2106
2107 // If we have a root of libobjc, just give up for now
2108 if ( !strcmp(li.path(), "/usr/lib/libobjc.A.dylib"))
2109 return false;
2110
2111 ImageWriter& writer = writers[writerIndex];
2112 ++writerIndex;
2113
2114 const dyld3::MachOAnalyzer* ma = li.loadAddress();
2115
2116 // Skip images with chained fixups other than arm64e legacy fixups until we can test them
2117 // FIXME: Handle chained fixups
2118 if ( ma->hasChainedFixups() ) {
2119 switch ( ma->chainedPointerFormat() ) {
2120 case DYLD_CHAINED_PTR_ARM64E:
2121 case DYLD_CHAINED_PTR_64:
2122 // We've tested the 64-bit chained fixups.
2123 break;
2124 case DYLD_CHAINED_PTR_64_OFFSET:
2125 case DYLD_CHAINED_PTR_ARM64E_USERLAND:
2126 case DYLD_CHAINED_PTR_ARM64E_USERLAND24:
2127 case DYLD_CHAINED_PTR_ARM64E_FIRMWARE:
2128 // FIXME: Test 64-bit offset chained fixups then enable this.
2129 continue;
2130 case DYLD_CHAINED_PTR_32:
2131 case DYLD_CHAINED_PTR_32_CACHE:
2132 case DYLD_CHAINED_PTR_32_FIRMWARE:
2133 // FIXME: Test 32-bit chained fixups then enable this.
2134 continue;
2135 }
2136 }
2137
2138 const MachOAnalyzer::ObjCImageInfo* objcImageInfo = ma->objcImageInfo();
2139 if ( objcImageInfo == nullptr )
2140 continue;
2141
2142 // This image is good so record it for use later.
2143 objcImages.default_constuct_back();
2144 ObjCOptimizerImage& image = objcImages.back();
2145 image.loadedImage = &li;
2146 image.writer = &writer;
2147
2148 // Find FairPlay encryption range if encrypted
2149 uint32_t fairPlayFileOffset;
2150 uint32_t fairPlaySize;
2151 if ( ma->isFairPlayEncrypted(fairPlayFileOffset, fairPlaySize) ) {
2152 image.fairplayFileOffsetStart = fairPlayFileOffset;
2153 image.fairplayFileOffsetEnd = fairPlayFileOffset;
2154 }
2155
2156 // Set the offset to the objc image info
2157 image.objcImageInfoVMOffset = (uint64_t)objcImageInfo - (uint64_t)ma;
2158 }
2159
2160 // objc supports a linker set which is a magic section of duplicate objc classes to ignore
2161 // We need to match that behaviour
2162 Map<const char*, bool, HashCString, EqualCString> duplicateClassesToIgnore;
2163 parseObjCClassDuplicates(duplicateClassesToIgnore);
2164
2165 OverflowSafeArray<const char*> closureSelectorStrings;
2166 Map<const char*, dyld3::closure::Image::ObjCImageOffset, HashCString, EqualCString> closureSelectorMap;
2167 OverflowSafeArray<const char*> closureDuplicateSharedCacheClassNames;
2168 Map<const char*, dyld3::closure::Image::ObjCDuplicateClass, HashCString, EqualCString> closureDuplicateSharedCacheClassMap;
2169 for (ObjCOptimizerImage& image : objcImages) {
2170 optimizeObjCClasses(objcClassOpt, sharedCacheImagesMap, closureDuplicateSharedCacheClassMap, duplicateClassesToIgnore, image);
2171 if (image.diag.hasError())
2172 continue;
2173
2174 optimizeObjCProtocols(objcProtocolOpt, sharedCacheImagesMap, image);
2175 if (image.diag.hasError())
2176 continue;
2177
2178 optimizeObjCSelectors(objcSelOpt, closureSelectorMap, image);
2179 if (image.diag.hasError())
2180 continue;
2181
2182 // If this image is still valid, then add its intermediate results to the main tables
2183
2184 // Class results
2185 for (auto nameAndDataVMOffset : image.classesNameAndDataVMOffsets) {
2186 uint64_t nameVMOffset = nameAndDataVMOffset.first;
2187 uint64_t dataVMOffset = nameAndDataVMOffset.second;
2188 _objcClassesHashTableImages.push_back({ image.loadedImage->imageNum, (uint32_t)nameVMOffset, (uint32_t)dataVMOffset });
2189 }
2190 image.classesNameAndDataVMOffsets.clear();
2191
2192 for (const auto& stringAndDuplicate : image.classSharedCacheDuplicates) {
2193 closureDuplicateSharedCacheClassMap[stringAndDuplicate.first] = stringAndDuplicate.second;
2194 closureDuplicateSharedCacheClassNames.push_back(stringAndDuplicate.first);
2195 }
2196
2197 // Selector results
2198 // Note we don't need to add the selector binds here. Its easier just to process them later from each image
2199 for (const auto& stringAndTarget : image.selectorMap) {
2200 closureSelectorMap[stringAndTarget.first] = stringAndTarget.second;
2201 closureSelectorStrings.push_back(stringAndTarget.first);
2202 }
2203 if (image.methodNameVMOffset)
2204 _objcSelectorsHashTableImages.push_back({ image.loadedImage->imageNum, (uint32_t)*image.methodNameVMOffset });
2205 }
2206
2207 // If we successfully analyzed the classes and selectors, we can now emit their data
2208 // Set all the writers to have optimized objc
2209 for (ObjCOptimizerImage& image : objcImages) {
2210 if (image.diag.hasError())
2211 continue;
2212 image.writer->setHasPrecomputedObjC(true);
2213 }
2214
2215 // Write out the class table
2216 writeClassOrProtocolHashTable(true, objcImages);
2217
2218 // Write out the protocol table
2219 writeClassOrProtocolHashTable(false, objcImages);
2220
2221 // If we have closure duplicate classes, we need to make a hash table for them.
2222 closure::ObjCStringTable* duplicateClassesTable = nullptr;
2223 if (!closureDuplicateSharedCacheClassNames.empty()) {
2224 objc_opt::perfect_hash phash;
2225 objc_opt::make_perfect(closureDuplicateSharedCacheClassNames, phash);
2226 size_t size = ObjCStringTable::size(phash);
2227 _objcClassesDuplicatesHashTable.resize(size);
2228 //printf("Duplicate classes table size: %lld\n", size);
2229 duplicateClassesTable = (closure::ObjCClassDuplicatesOpt*)_objcClassesDuplicatesHashTable.begin();
2230 duplicateClassesTable->write(phash, closureDuplicateSharedCacheClassMap.array());
2231 }
2232
2233 // If we have closure selectors, we need to make a hash table for them.
2234 closure::ObjCStringTable* selectorStringTable = nullptr;
2235 if (!closureSelectorStrings.empty()) {
2236 objc_opt::perfect_hash phash;
2237 objc_opt::make_perfect(closureSelectorStrings, phash);
2238 size_t size = ObjCStringTable::size(phash);
2239 _objcSelectorsHashTable.resize(size);
2240 //printf("Selector table size: %lld\n", size);
2241 selectorStringTable = (closure::ObjCStringTable*)_objcSelectorsHashTable.begin();
2242 selectorStringTable->write(phash, closureSelectorMap.array());
2243 }
2244
2245 // Add fixups for the image info, protocol ISAs, and selector refs
2246 for (ObjCOptimizerImage& image : objcImages) {
2247 if (image.diag.hasError())
2248 continue;
2249
2250 // Protocol ISA references
2251 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::ProtocolISAFixup, protocolFixups, 512);
2252 if ( !image.protocolISAFixups.empty() ) {
2253
2254 __block uint64_t lastOffset = -pointerSize;
2255 for (uint64_t runtimeOffset : image.protocolISAFixups) {
2256 bool mergedIntoPrevious = false;
2257 if ( (runtimeOffset > lastOffset) && !protocolFixups.empty() ) {
2258 uint64_t skipAmount = (runtimeOffset - lastOffset - pointerSize)/pointerSize;
2259 if ( skipAmount*pointerSize != (runtimeOffset - lastOffset - pointerSize) ) {
2260 // misaligned pointer means we cannot optimize
2261 }
2262 else {
2263 if ( (protocolFixups.back().repeatCount == 1) && (protocolFixups.back().skipCount == 0) && (skipAmount <= 255) ) {
2264 protocolFixups.back().repeatCount = 2;
2265 protocolFixups.back().skipCount = skipAmount;
2266 assert(protocolFixups.back().skipCount == skipAmount); // check overflow
2267 mergedIntoPrevious = true;
2268 }
2269 else if ( (protocolFixups.back().skipCount == skipAmount) && (protocolFixups.back().repeatCount < 0xfff) ) {
2270 uint32_t prevRepeatCount = protocolFixups.back().repeatCount;
2271 protocolFixups.back().repeatCount += 1;
2272 assert(protocolFixups.back().repeatCount > prevRepeatCount); // check overflow
2273 mergedIntoPrevious = true;
2274 }
2275 }
2276 }
2277 if ( !mergedIntoPrevious ) {
2278 Image::ProtocolISAFixup pattern;
2279 pattern.startVmOffset = runtimeOffset;
2280 pattern.repeatCount = 1;
2281 pattern.skipCount = 0;
2282 assert(pattern.startVmOffset == runtimeOffset);
2283 protocolFixups.push_back(pattern);
2284 }
2285 lastOffset = runtimeOffset;
2286 }
2287 }
2288
2289 // Selector references
2290 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::SelectorReferenceFixup, selRefFixups, 512);
2291 if ( !image.selectorFixups.empty() ) {
2292 uint64_t prevVMOffset = 0;
2293 const uint64_t maxChainOffset = (4 * ((1 << 7) - 1));
2294 for (const ObjCOptimizerImage::SelectorFixup& selectorFixup : image.selectorFixups) {
2295 assert( (selectorFixup.fixupVMOffset & 3) == 0 );
2296 if ( (selectorFixup.fixupVMOffset - prevVMOffset) <= maxChainOffset ) {
2297 // Add this to the previous chain
2298 selRefFixups.back().chainEntry.next = (uint32_t)(selectorFixup.fixupVMOffset - prevVMOffset) / 4;
2299 } else {
2300 // Need to start a new chain as the previous offset can't reach
2301 Image::SelectorReferenceFixup fixup;
2302 fixup.chainStartVMOffset = selectorFixup.fixupVMOffset;
2303 selRefFixups.push_back(fixup);
2304 }
2305
2306 if ( selectorFixup.isSharedCache ) {
2307 // If the entry is in the shared cache then we already have the index for it
2308 Image::SelectorReferenceFixup fixup;
2309 fixup.chainEntry.index = selectorFixup.sharedCache.selectorTableIndex;
2310 fixup.chainEntry.next = 0;
2311 fixup.chainEntry.inSharedCache = 1;
2312 selRefFixups.push_back(fixup);
2313 } else {
2314 // We had to record the string for the closure table entries as we don't know the
2315 // index until now
2316 uint32_t selectorTableIndex = selectorStringTable->getIndex(selectorFixup.image.selectorString);
2317 assert(selectorTableIndex != ObjCSelectorOpt::indexNotFound);
2318 Image::SelectorReferenceFixup fixup;
2319 fixup.chainEntry.index = selectorTableIndex;
2320 fixup.chainEntry.next = 0;
2321 fixup.chainEntry.inSharedCache = 0;
2322 selRefFixups.push_back(fixup);
2323 }
2324
2325 prevVMOffset = selectorFixup.fixupVMOffset;
2326 }
2327 }
2328
2329 // Stable Swift fixups
2330 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::ClassStableSwiftFixup, stableSwiftFixups, 512);
2331 if ( !image.classStableSwiftFixups.empty() ) {
2332
2333 __block uint64_t lastOffset = -pointerSize;
2334 for (uint64_t runtimeOffset : image.classStableSwiftFixups) {
2335 bool mergedIntoPrevious = false;
2336 if ( (runtimeOffset > lastOffset) && !stableSwiftFixups.empty() ) {
2337 uint64_t skipAmount = (runtimeOffset - lastOffset - pointerSize)/pointerSize;
2338 if ( skipAmount*pointerSize != (runtimeOffset - lastOffset - pointerSize) ) {
2339 // misaligned pointer means we cannot optimize
2340 }
2341 else {
2342 if ( (stableSwiftFixups.back().repeatCount == 1) && (stableSwiftFixups.back().skipCount == 0) && (skipAmount <= 255) ) {
2343 stableSwiftFixups.back().repeatCount = 2;
2344 stableSwiftFixups.back().skipCount = skipAmount;
2345 assert(stableSwiftFixups.back().skipCount == skipAmount); // check overflow
2346 mergedIntoPrevious = true;
2347 }
2348 else if ( (stableSwiftFixups.back().skipCount == skipAmount) && (stableSwiftFixups.back().repeatCount < 0xfff) ) {
2349 uint32_t prevRepeatCount = stableSwiftFixups.back().repeatCount;
2350 stableSwiftFixups.back().repeatCount += 1;
2351 assert(stableSwiftFixups.back().repeatCount > prevRepeatCount); // check overflow
2352 mergedIntoPrevious = true;
2353 }
2354 }
2355 }
2356 if ( !mergedIntoPrevious ) {
2357 Image::ClassStableSwiftFixup pattern;
2358 pattern.startVmOffset = runtimeOffset;
2359 pattern.repeatCount = 1;
2360 pattern.skipCount = 0;
2361 assert(pattern.startVmOffset == runtimeOffset);
2362 stableSwiftFixups.push_back(pattern);
2363 }
2364 lastOffset = runtimeOffset;
2365 }
2366 }
2367
2368 // Method list fixups
2369 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::MethodListFixup, methodListFixups, 512);
2370 if ( !image.methodListFixups.empty() ) {
2371
2372 __block uint64_t lastOffset = -pointerSize;
2373 for (uint64_t runtimeOffset : image.methodListFixups) {
2374 bool mergedIntoPrevious = false;
2375 if ( (runtimeOffset > lastOffset) && !methodListFixups.empty() ) {
2376 uint64_t skipAmount = (runtimeOffset - lastOffset - pointerSize)/pointerSize;
2377 if ( skipAmount*pointerSize != (runtimeOffset - lastOffset - pointerSize) ) {
2378 // misaligned pointer means we cannot optimize
2379 }
2380 else {
2381 if ( (methodListFixups.back().repeatCount == 1) && (methodListFixups.back().skipCount == 0) && (skipAmount <= 255) ) {
2382 methodListFixups.back().repeatCount = 2;
2383 methodListFixups.back().skipCount = skipAmount;
2384 assert(methodListFixups.back().skipCount == skipAmount); // check overflow
2385 mergedIntoPrevious = true;
2386 }
2387 else if ( (methodListFixups.back().skipCount == skipAmount) && (methodListFixups.back().repeatCount < 0xfff) ) {
2388 uint32_t prevRepeatCount = methodListFixups.back().repeatCount;
2389 methodListFixups.back().repeatCount += 1;
2390 assert(methodListFixups.back().repeatCount > prevRepeatCount); // check overflow
2391 mergedIntoPrevious = true;
2392 }
2393 }
2394 }
2395 if ( !mergedIntoPrevious ) {
2396 Image::MethodListFixup pattern;
2397 pattern.startVmOffset = runtimeOffset;
2398 pattern.repeatCount = 1;
2399 pattern.skipCount = 0;
2400 assert(pattern.startVmOffset == runtimeOffset);
2401 methodListFixups.push_back(pattern);
2402 }
2403 lastOffset = runtimeOffset;
2404 }
2405 }
2406
2407 image.writer->setObjCFixupInfo(objcProtocolClassTarget, image.objcImageInfoVMOffset, protocolFixups,
2408 selRefFixups, stableSwiftFixups, methodListFixups);
2409 }
2410
2411 return true;
2412 }
2413
2414 void ClosureBuilder::optimizeObjCSelectors(const objc_opt::objc_selopt_t* objcSelOpt,
2415 const Map<const char*, dyld3::closure::Image::ObjCImageOffset, HashCString, EqualCString>& closureSelectorMap,
2416 ObjCOptimizerImage& image) {
2417
2418 BuilderLoadedImage& li = *image.loadedImage;
2419
2420 const dyld3::MachOAnalyzer* ma = li.loadAddress();
2421 uint32_t pointerSize = ma->pointerSize();
2422 const uint64_t loadAddress = ma->preferredLoadAddress();
2423 const dyld3::MachOAnalyzer::VMAddrConverter vmAddrConverter = ma->makeVMAddrConverter(li.contentRebased);
2424
2425 // The legacy (objc1) codebase uses a bunch of sections we don't want to reason about. If we see them just give up.
2426 __block bool foundBadSection = false;
2427 ma->forEachSection(^(const MachOAnalyzer::SectionInfo &sectInfo, bool malformedSectionRange, bool &stop) {
2428 if ( strcmp(sectInfo.segInfo.segName, "__OBJC") != 0 )
2429 return;
2430 if (strcmp(sectInfo.sectName, "__module_info") == 0) {
2431 foundBadSection = true;
2432 stop = true;
2433 return;
2434 }
2435 if (strcmp(sectInfo.sectName, "__protocol") == 0) {
2436 foundBadSection = true;
2437 stop = true;
2438 return;
2439 }
2440 if (strcmp(sectInfo.sectName, "__message_refs") == 0) {
2441 foundBadSection = true;
2442 stop = true;
2443 return;
2444 }
2445 });
2446 if (foundBadSection) {
2447 image.diag.error("Old objc section");
2448 return;
2449 }
2450
2451 __block MachOAnalyzer::SectionCache selectorStringSectionCache(ma);
2452
2453 uint32_t sharedCacheSentinelIndex = objcSelOpt->getSentinelIndex();
2454
2455 // Track the locations where we've updated selector references. With relative method lists,
2456 // we share selref slots across classes, categories, protocols, and SEL() expressions, so we may
2457 // visit a location more than once
2458 __block Map<uint64_t, bool, HashUInt64, EqualUInt64> seenSelectorReferenceImageOffsets;
2459
2460 auto visitReferenceToObjCSelector = ^void(uint64_t selectorStringVMAddr, uint64_t selectorReferenceVMAddr) {
2461
2462 uint64_t selectorUseImageOffset = selectorReferenceVMAddr - loadAddress;
2463 auto selUseItAndInserted = seenSelectorReferenceImageOffsets.insert({ selectorUseImageOffset, true });
2464 if ( !selUseItAndInserted.second ) {
2465 // If we didn't insert the selector reference, then its already there so we should skip it
2466 return;
2467 }
2468
2469 if ( (selectorUseImageOffset & 3) != 0 ) {
2470 image.diag.error("Unaligned selector reference fixup");
2471 return;
2472 }
2473
2474 // Image::SelectorReferenceFixup only has a 32-bit reach
2475 if ( selectorUseImageOffset >= (1ULL << 32) ) {
2476 image.diag.error("Selector reference fixup exceeds supported vm offset");
2477 return;
2478 }
2479
2480 // Get the section for the name
2481 const char* selectorString = nullptr;
2482 MachOAnalyzer::PrintableStringResult selectorStringResult = MachOAnalyzer::PrintableStringResult::UnknownSection;
2483 __block uint64_t selectorStringSectionStartVMAddr = 0;
2484 auto selectorStringSectionHandler = ^bool(const MachOAnalyzer::SectionInfo& sectInfo) {
2485
2486 // We only have 24-bits in ObjCClassNameImageOffset to index in to the strings
2487 if (sectInfo.sectSize >= Image::ObjCImageOffset::maximumOffset) {
2488 return false;
2489 }
2490
2491 // We use 32-bit offsets so make sure the section is no larger than that.
2492 uint64_t classNameVMOffset = sectInfo.sectAddr - loadAddress;
2493 if (classNameVMOffset >= (1ULL << 32)) {
2494 return false;
2495 }
2496
2497 selectorStringSectionStartVMAddr = sectInfo.sectAddr;
2498 return true;
2499 };
2500 selectorString = ma->getPrintableString(selectorStringVMAddr, selectorStringResult,
2501 &selectorStringSectionCache, selectorStringSectionHandler);
2502
2503 if ( selectorStringResult != MachOAnalyzer::PrintableStringResult::CanPrint ) {
2504 image.diag.error("Invalid selector string for objc optimisation");
2505 return;
2506 }
2507
2508 uint32_t cacheSelectorIndex = objcSelOpt->getIndexForKey(selectorString);
2509 //printf("selector: %p -> %p %s\n", methodName, cacheSelector, selectorString);
2510
2511 if ( cacheSelectorIndex != sharedCacheSentinelIndex ) {
2512 // We got the selector from the cache so add a fixup to point there.
2513 ObjCOptimizerImage::SelectorFixup fixup;
2514 fixup.isSharedCache = true;
2515 fixup.fixupVMOffset = (uint32_t)selectorUseImageOffset;
2516 fixup.sharedCache.selectorTableIndex = cacheSelectorIndex;
2517
2518 //printf("Overriding fixup at 0x%08llX to cache offset 0x%08llX\n", selectorUseImageOffset, (uint64_t)cacheSelector - (uint64_t)_dyldCache);
2519 image.selectorFixups.push_back(fixup);
2520 return;
2521 }
2522
2523 // See if this selector is already in the closure map from a previous image
2524 auto closureSelectorIt = closureSelectorMap.find(selectorString);
2525 if (closureSelectorIt != closureSelectorMap.end()) {
2526 // This selector was found in a previous image, so use it here.
2527 ObjCOptimizerImage::SelectorFixup fixup;
2528 fixup.isSharedCache = false;
2529 fixup.fixupVMOffset = (uint32_t)selectorUseImageOffset;
2530 fixup.image.selectorString = selectorString;
2531
2532 //printf("Overriding fixup at 0x%08llX to '%s' offset 0x%08llX\n", selectorUseImageOffset, findLoadedImage(target.image.imageNum).path(), target.image.offset);
2533 image.selectorFixups.push_back(fixup);
2534 return;
2535 }
2536
2537 // See if this selector is already in the map for this image
2538 auto itAndInserted = image.selectorMap.insert({ selectorString, dyld3::closure::Image::ObjCImageOffset() });
2539 if (itAndInserted.second) {
2540 // We added the selector so its pointing in to our own image.
2541 // We don't need to add a fixup to our image, but we do need to
2542 // populate the data for other images later to point here.
2543 // First put our image in the list if its not already there.
2544 uint64_t methodNameVMOffset = selectorStringSectionStartVMAddr - loadAddress;
2545 if (!image.methodNameVMOffset) {
2546 if ( _objcSelectorsHashTableImages.count() == Image::ObjCImageOffset::maximumImageIndex ) {
2547 image.diag.error("Out of space for selector hash images");
2548 return;
2549 }
2550 image.methodNameVMOffset = methodNameVMOffset;
2551 } else {
2552 // If we already set the offset to the start of the method names section, double check that
2553 // the section we are in right now is the same as that one. Otherwise we don't have the code
2554 // to handle both right now.
2555 if (*image.methodNameVMOffset != methodNameVMOffset) {
2556 image.diag.error("Cannot handle more than one selector strings section");
2557 return;
2558 }
2559 }
2560
2561 dyld3::closure::Image::ObjCImageOffset target;
2562 target.imageIndex = (uint32_t)_objcSelectorsHashTableImages.count();
2563 target.imageOffset = (uint32_t)(selectorStringVMAddr - selectorStringSectionStartVMAddr);
2564 itAndInserted.first->second = target;
2565 return;
2566 }
2567
2568 // This selector was found elsewhere in our image. If this reference already points to the same
2569 // selector string as we found before (and it should!) then we have nothing to do. Otherwise we
2570 // need to add a fixup here to make sure we point to our chosen definition.
2571 uint32_t imageOffset = (uint32_t)(selectorStringVMAddr - loadAddress);
2572 if ( imageOffset == (*image.methodNameVMOffset + itAndInserted.first->second.imageOffset) )
2573 return;
2574
2575 ObjCOptimizerImage::SelectorFixup fixup;
2576 fixup.isSharedCache = false;
2577 fixup.fixupVMOffset = (uint32_t)selectorUseImageOffset;
2578 fixup.image.selectorString = selectorString;
2579
2580 //printf("Overriding fixup at 0x%08llX to '%s' offset 0x%08llX\n", selectorUseImageOffset, findLoadedImage(target.image.imageNum).path(), target.image.offset);
2581 image.selectorFixups.push_back(fixup);
2582 };
2583
2584 auto visitMethod = ^(uint64_t methodVMAddr, const dyld3::MachOAnalyzer::ObjCMethod& method) {
2585 visitReferenceToObjCSelector(method.nameVMAddr, method.nameLocationVMAddr);
2586 };
2587
2588 auto visitMethodList = ^(uint64_t methodListVMAddr) {
2589 if ( methodListVMAddr == 0 )
2590 return;
2591 bool isRelativeMethodList = false;
2592 ma->forEachObjCMethod(methodListVMAddr, vmAddrConverter, visitMethod, &isRelativeMethodList);
2593 if (image.diag.hasError())
2594 return;
2595 // Record the offset to the method list so that we can mark it as being uniqued
2596 // We can only do this if we have a pointer based method list as relative method lists are
2597 // in read-only memory
2598 if ( !isRelativeMethodList )
2599 image.methodListFixups.push_back(methodListVMAddr - loadAddress);
2600 };
2601
2602 auto visitClass = ^(Diagnostics& diag, uint64_t classVMAddr,
2603 uint64_t classSuperclassVMAddr, uint64_t classDataVMAddr,
2604 const dyld3::MachOAnalyzer::ObjCClassInfo& objcClass, bool isMetaClass) {
2605 visitMethodList(objcClass.baseMethodsVMAddr(pointerSize));
2606 };
2607
2608 auto visitCategory = ^(Diagnostics& diag, uint64_t categoryVMAddr,
2609 const dyld3::MachOAnalyzer::ObjCCategory& objcCategory) {
2610 visitMethodList(objcCategory.instanceMethodsVMAddr);
2611 visitMethodList(objcCategory.classMethodsVMAddr);
2612 };
2613 auto visitProtocol = ^(Diagnostics& diag, uint64_t protocolVMAddr,
2614 const dyld3::MachOAnalyzer::ObjCProtocol& objCProtocol) {
2615 visitMethodList(objCProtocol.instanceMethodsVMAddr);
2616 visitMethodList(objCProtocol.classMethodsVMAddr);
2617 visitMethodList(objCProtocol.optionalInstanceMethodsVMAddr);
2618 visitMethodList(objCProtocol.optionalClassMethodsVMAddr);
2619 };
2620
2621 // Walk the class list
2622 ma->forEachObjCClass(image.diag, vmAddrConverter, visitClass);
2623 if (image.diag.hasError())
2624 return;
2625
2626 // Walk the category list
2627 ma->forEachObjCCategory(image.diag, vmAddrConverter, visitCategory);
2628 if (image.diag.hasError())
2629 return;
2630
2631 // Walk the protocol list
2632 ma->forEachObjCProtocol(image.diag, vmAddrConverter, visitProtocol);
2633 if (image.diag.hasError())
2634 return;
2635
2636 // Visit the selector refs
2637 ma->forEachObjCSelectorReference(image.diag, vmAddrConverter, ^(uint64_t selRefVMAddr, uint64_t selRefTargetVMAddr) {
2638 visitReferenceToObjCSelector(selRefTargetVMAddr, selRefVMAddr);
2639 });
2640 if (image.diag.hasError())
2641 return;
2642
2643 // Visit the message refs
2644 // Note this isn't actually supported in libobjc any more. Its logic for deciding whether to support it is if this is true:
2645 // #if (defined(__x86_64__) && (TARGET_OS_OSX || TARGET_OS_SIMULATOR))
2646 // So to keep it simple, lets only do this walk if we are x86_64
2647 if ( ma->isArch("x86_64") || ma->isArch("x86_64h") ) {
2648 if (ma->hasObjCMessageReferences()) {
2649 image.diag.error("Cannot handle message refs");
2650 return;
2651 }
2652 }
2653 }
2654
2655 static const dyld3::MachOAnalyzer* getMachHeaderFromObjCHeaderInfo(const void* opaqueHeaderInfo, uint32_t pointerSize) {
2656 if (pointerSize == 8) {
2657 typedef int64_t PtrTy;
2658 struct HeaderInfo {
2659 PtrTy mhdr_offset; // offset to mach_header_64
2660 PtrTy info_offset; // offset to objc_image_info *
2661 };
2662 const HeaderInfo* headerInfo = (const HeaderInfo*)opaqueHeaderInfo;
2663 return (const dyld3::MachOAnalyzer*)(((const uint8_t*)&headerInfo->mhdr_offset) + headerInfo->mhdr_offset);
2664 } else {
2665 typedef int32_t PtrTy;
2666 struct HeaderInfo {
2667 PtrTy mhdr_offset; // offset to mach_header
2668 PtrTy info_offset; // offset to objc_image_info *
2669 };
2670 const HeaderInfo* headerInfo = (const HeaderInfo*)opaqueHeaderInfo;
2671 return (const dyld3::MachOAnalyzer*)(((const uint8_t*)&headerInfo->mhdr_offset) + headerInfo->mhdr_offset);
2672 }
2673 }
2674
2675 void ClosureBuilder::addDuplicateObjCClassWarning(const char* className,
2676 const char* duplicateDefinitionPath,
2677 const char* canonicalDefinitionPath)
2678 {
2679 if ( _objcDuplicateClassWarnings == nullptr )
2680 _objcDuplicateClassWarnings = PathPool::allocate();
2681 // Use a diagnostic to give us a buffer we can safely print to
2682 Diagnostics diag;
2683 diag.error("Class %s is implemented in both %s and %s. One of the two will be used. Which one is undefined.",
2684 className, canonicalDefinitionPath, duplicateDefinitionPath);
2685 #if BUILDING_CACHE_BUILDER
2686 _objcDuplicateClassWarnings->add(diag.errorMessage().c_str());
2687 #else
2688 _objcDuplicateClassWarnings->add(diag.errorMessage());
2689 #endif
2690 }
2691
2692 void ClosureBuilder::optimizeObjCClasses(const objc_opt::objc_clsopt_t* objcClassOpt,
2693 const Map<const dyld3::MachOAnalyzer*, bool, HashPointer, EqualPointer>& sharedCacheImagesMap,
2694 const Map<const char*, dyld3::closure::Image::ObjCDuplicateClass, HashCString, EqualCString>& duplicateSharedCacheClasses,
2695 const Map<const char*, bool, HashCString, EqualCString>& duplicateClassesToIgnore,
2696 ObjCOptimizerImage& image) {
2697
2698 BuilderLoadedImage& li = *image.loadedImage;
2699 OverflowSafeArray<ObjCOptimizerImage::SeenClass>& seenClasses = image.seenClasses;
2700
2701 const dyld3::MachOAnalyzer* ma = li.loadAddress();
2702 const uint32_t pointerSize = ma->pointerSize();
2703 const uint64_t loadAddress = ma->preferredLoadAddress();
2704 const dyld3::MachOAnalyzer::VMAddrConverter vmAddrConverter = ma->makeVMAddrConverter(li.contentRebased);
2705
2706 // Keep track of any missing weak imports so that we can tell if the superclasses are nil
2707 // This is necessary as the shared cache will be marked with 'no missing weak superclasses'
2708 // and so we need to continue to satisfy that constraint
2709 __block Map<uint64_t, bool, HashUInt64, EqualUInt64> missingWeakImportOffets;
2710 if (li.hasMissingWeakImports) {
2711 const Image* closureImage = image.writer->currentImage();
2712 if ( closureImage->hasChainedFixups() ) {
2713 const Array<Image::ResolvedSymbolTarget> targets = closureImage->chainedTargets();
2714 if ( !targets.empty() ) {
2715 ma->withChainStarts(_diag, closureImage->chainedStartsOffset(), ^(const dyld_chained_starts_in_image* startsInfo) {
2716 ma->forEachFixupInAllChains(_diag, startsInfo, false, ^(MachOLoaded::ChainedFixupPointerOnDisk* fixupLoc,
2717 const dyld_chained_starts_in_segment* segInfo, bool& fixupsStop) {
2718 uint64_t fixupOffset = (uint8_t*)fixupLoc - (uint8_t*)ma;
2719 uint32_t bindOrdinal;
2720 int64_t addend;
2721 if ( fixupLoc->isBind(segInfo->pointer_format, bindOrdinal, addend) ) {
2722 if ( bindOrdinal < targets.count() ) {
2723 const Image::ResolvedSymbolTarget& target = targets[bindOrdinal];
2724 if ( (target.absolute.kind == Image::ResolvedSymbolTarget::kindAbsolute) && (target.absolute.value == 0) )
2725 missingWeakImportOffets[fixupOffset] = true;
2726 }
2727 else {
2728 image.diag.error("out of range bind ordinal %d (max %lu)", bindOrdinal, targets.count());
2729 fixupsStop = true;
2730 }
2731 }
2732 });
2733 });
2734 if (image.diag.hasError())
2735 return;
2736 }
2737 }
2738 else {
2739 closureImage->forEachBind(^(uint64_t imageOffsetToBind, Image::ResolvedSymbolTarget bindTarget, bool &stop) {
2740 if ( (bindTarget.absolute.kind == Image::ResolvedSymbolTarget::kindAbsolute) && (bindTarget.absolute.value == 0) )
2741 missingWeakImportOffets[imageOffsetToBind] = true;
2742 });
2743 }
2744 }
2745
2746 // Class names and data may be in different sections depending on swift vs objc so handle multiple sections
2747 __block MachOAnalyzer::SectionCache classNameSectionCache(ma);
2748 __block MachOAnalyzer::SectionCache classSectionCache(ma);
2749
2750 ma->forEachObjCClass(image.diag, vmAddrConverter, ^(Diagnostics &diag, uint64_t classVMAddr,
2751 uint64_t classSuperclassVMAddr, uint64_t classDataVMAddr,
2752 const MachOAnalyzer::ObjCClassInfo &objcClass, bool isMetaClass) {
2753 if (isMetaClass) return;
2754
2755 // Make sure the superclass pointer is not nil
2756 uint64_t superclassRuntimeOffset = classSuperclassVMAddr - loadAddress;
2757 if (missingWeakImportOffets.find(superclassRuntimeOffset) != missingWeakImportOffets.end()) {
2758 diag.error("Missing weak superclass");
2759 return;
2760 }
2761
2762 // Does this class need to be fixed up for stable Swift ABI.
2763 // Note the order matches the objc runtime in that we always do this fix before checking for dupes,
2764 // but after excluding classes with missing weak superclasses.
2765 if (objcClass.isUnfixedBackwardDeployingStableSwift()) {
2766 // Class really is stable Swift, pretending to be pre-stable.
2767 // Fix its lie. This involves fixing the FAST bits on the class data value, so record that vmaddr
2768 image.classStableSwiftFixups.push_back(classDataVMAddr - loadAddress);
2769 }
2770
2771 // Get the section for the name
2772 const char* className = nullptr;
2773 MachOAnalyzer::PrintableStringResult classNameResult = MachOAnalyzer::PrintableStringResult::UnknownSection;
2774 __block uint64_t classNameSectionStartVMAddr = 0;
2775 auto classNameSectionHandler = ^bool(const MachOAnalyzer::SectionInfo& sectInfo) {
2776 // We only have 24-bits in ObjCClassNameImageOffset to index in to the strings
2777 if (sectInfo.sectSize >= Image::ObjCClassNameImageOffset::maximumOffset) {
2778 return false;
2779 }
2780
2781 // We use 32-bit offsets so make sure the section is no larger than that.
2782 uint64_t classNameVMOffset = sectInfo.sectAddr - loadAddress;
2783 if (classNameVMOffset >= (1ULL << 32)) {
2784 return false;
2785 }
2786
2787 classNameSectionStartVMAddr = sectInfo.sectAddr;
2788 return true;
2789 };
2790 uint64_t classNameVMAddr = objcClass.nameVMAddr(pointerSize);
2791 className = ma->getPrintableString(classNameVMAddr, classNameResult,
2792 &classNameSectionCache, classNameSectionHandler);
2793
2794 if ( classNameResult != MachOAnalyzer::PrintableStringResult::CanPrint ) {
2795 diag.error("Invalid class name for objc optimisation");
2796 return;
2797 }
2798
2799 // If the class also exists in a shared cache image which is loaded, then objc
2800 // would have found that one, regardless of load order. So we can just skip this one.
2801 {
2802 void *cls;
2803 void *hi;
2804 uint32_t index;
2805 uint32_t count = objcClassOpt->getClassHeaderAndIndex(className, cls, hi, index);
2806 if (count == 1) {
2807 // exactly one matching class. Check if its loaded
2808 const dyld3::MachOAnalyzer* sharedCacheMA = getMachHeaderFromObjCHeaderInfo(hi, pointerSize);
2809 if (sharedCacheImagesMap.find(sharedCacheMA) != sharedCacheImagesMap.end()) {
2810 if ( duplicateClassesToIgnore.find(className) == duplicateClassesToIgnore.end() )
2811 addDuplicateObjCClassWarning(className, li.path(), sharedCacheMA->installName());
2812
2813 // We have a duplicate class, so check if we've already got it in our map.
2814 if ( duplicateSharedCacheClasses.find(className) == duplicateSharedCacheClasses.end() ) {
2815 // We haven't seen this one yet
2816 Image::ObjCDuplicateClass duplicateClass;
2817 duplicateClass.sharedCacheClassOptIndex = index;
2818 duplicateClass.sharedCacheClassDuplicateIndex = 0;
2819 image.classSharedCacheDuplicates.insert({ className, duplicateClass });
2820 }
2821 }
2822 }
2823 else if (count > 1) {
2824 // more than one matching class - find one that is loaded
2825 void *clslist[count];
2826 void *hilist[count];
2827 objcClassOpt->getClassesAndHeaders(className, clslist, hilist);
2828 for (uint32_t i = 0; i < count; i++) {
2829 const dyld3::MachOAnalyzer* sharedCacheMA = getMachHeaderFromObjCHeaderInfo(hilist[i], pointerSize);
2830 if (sharedCacheImagesMap.find(sharedCacheMA) != sharedCacheImagesMap.end()) {
2831 if ( duplicateClassesToIgnore.find(className) == duplicateClassesToIgnore.end() )
2832 addDuplicateObjCClassWarning(className, li.path(), sharedCacheMA->installName());
2833
2834 // We have a duplicate class, so check if we've already got it in our map.
2835 if ( duplicateSharedCacheClasses.find(className) == duplicateSharedCacheClasses.end() ) {
2836 // We haven't seen this one yet
2837 Image::ObjCDuplicateClass duplicateClass;
2838 duplicateClass.sharedCacheClassOptIndex = index;
2839 duplicateClass.sharedCacheClassDuplicateIndex = i;
2840 image.classSharedCacheDuplicates.insert({ className, duplicateClass });
2841 }
2842
2843 break;
2844 }
2845 }
2846 }
2847 }
2848
2849 // Get the section for the class itself
2850 __block uint64_t classSectionStartVMAddr = 0;
2851 auto classSectionHandler = ^bool(const MachOAnalyzer::SectionInfo& sectInfo) {
2852 // We only have 23-bits in ObjCClassImageOffset to index in to the classes
2853 if (sectInfo.sectSize > Image::ObjCClassImageOffset::maximumOffset) {
2854 return false;
2855 }
2856
2857 // We use 32-bit offsets so make sure the section is no larger than that.
2858 uint64_t classDatasVMOffset = sectInfo.sectAddr - loadAddress;
2859 if (classDatasVMOffset >= (1ULL << 32)) {
2860 return false;
2861 }
2862
2863 classSectionStartVMAddr = sectInfo.sectAddr;
2864 return true;
2865 };
2866 if (!classSectionCache.findSectionForVMAddr(classVMAddr, classSectionHandler)) {
2867 diag.error("Invalid class for objc optimisation");
2868 return;
2869 }
2870
2871 // Make sure we have an entry for our images offsets for later
2872 uint64_t classNameSectionVMOffset = classNameSectionStartVMAddr - loadAddress;
2873 uint64_t classSectionVMOffset = classSectionStartVMAddr - loadAddress;
2874 uint64_t hashTableVMOffsetsIndex = 0;
2875 for (auto nameAndDataVMOffset : image.classesNameAndDataVMOffsets) {
2876 if ( (nameAndDataVMOffset.first == classNameSectionVMOffset) && (nameAndDataVMOffset.second == classSectionVMOffset) )
2877 break;
2878 ++hashTableVMOffsetsIndex;
2879 }
2880
2881 if (hashTableVMOffsetsIndex == image.classesNameAndDataVMOffsets.count()) {
2882 // Didn't find an image entry with this offset. Add one if we have space
2883 uint64_t totalHashTableImages = image.classesNameAndDataVMOffsets.count() + _objcClassesHashTableImages.count();
2884 if ( totalHashTableImages == Image::ObjCClassNameImageOffset::maximumImageIndex ) {
2885 // No more space. We need to give up
2886 diag.error("No more space for class hash table image");
2887 return;
2888 }
2889 image.classesNameAndDataVMOffsets.push_back({ classNameSectionVMOffset, classSectionVMOffset });
2890 }
2891
2892 hashTableVMOffsetsIndex += _objcClassesHashTableImages.count();
2893
2894 uint64_t classNameOffset = classNameVMAddr - classNameSectionStartVMAddr;
2895 uint64_t classDataOffset = classVMAddr - classSectionStartVMAddr;
2896
2897 closure::Image::ObjCClassNameImageOffset classNameTarget;
2898 classNameTarget.classNameImageIndex = (uint32_t)hashTableVMOffsetsIndex;
2899 classNameTarget.classNameImageOffset = (uint32_t)classNameOffset;
2900
2901 dyld3::closure::Image::ObjCClassImageOffset classDataTarget;
2902 classDataTarget.classData.imageIndex = (uint32_t)hashTableVMOffsetsIndex;
2903 classDataTarget.classData.imageOffset = (uint32_t)classDataOffset;
2904 classDataTarget.classData.isDuplicate = 0;
2905
2906 seenClasses.push_back({ classNameTarget, classDataTarget });
2907 });
2908 }
2909
2910 void ClosureBuilder::optimizeObjCProtocols(const objc_opt::objc_protocolopt2_t* objcProtocolOpt,
2911 const Map<const dyld3::MachOAnalyzer*, bool, HashPointer, EqualPointer>& sharedCacheImagesMap,
2912 ObjCOptimizerImage& image) {
2913
2914 BuilderLoadedImage& li = *image.loadedImage;
2915 OverflowSafeArray<ObjCOptimizerImage::SeenClass>& seenProtocols = image.seenProtocols;
2916
2917 const dyld3::MachOAnalyzer* ma = li.loadAddress();
2918 const uint32_t pointerSize = ma->pointerSize();
2919 const uint64_t loadAddress = ma->preferredLoadAddress();
2920 const dyld3::MachOAnalyzer::VMAddrConverter vmAddrConverter = ma->makeVMAddrConverter(li.contentRebased);
2921
2922 // Protocol names and data may be in different sections depending on swift vs objc so handle multiple sections
2923 __block MachOAnalyzer::SectionCache protocolNameSectionCache(ma);
2924 __block MachOAnalyzer::SectionCache protocolSectionCache(ma);
2925
2926 ma->forEachObjCProtocol(image.diag, vmAddrConverter, ^(Diagnostics &diag, uint64_t protocolVMAddr,
2927 const dyld3::MachOAnalyzer::ObjCProtocol &objCProtocol) {
2928 if ( objCProtocol.isaVMAddr != 0 ) {
2929 // We can't optimize this protocol if it has an ISA as we want to override it
2930 diag.error("Protocol ISA cannot be non-zero");
2931 return;
2932 }
2933
2934 // Get the section for the name
2935 const char* protocolName = nullptr;
2936 MachOAnalyzer::PrintableStringResult protocolNameResult = MachOAnalyzer::PrintableStringResult::UnknownSection;
2937 __block uint64_t protocolNameSectionStartVMAddr = 0;
2938 auto protocolNameSectionHandler = ^bool(const MachOAnalyzer::SectionInfo& sectInfo) {
2939 // We only have 24-bits in ObjCClassNameImageOffset to index in to the strings
2940 if (sectInfo.sectSize >= Image::ObjCClassNameImageOffset::maximumOffset) {
2941 return false;
2942 }
2943
2944 // We use 32-bit offsets so make sure the section is no larger than that.
2945 uint64_t protocolNameVMOffset = sectInfo.sectAddr - loadAddress;
2946 if (protocolNameVMOffset >= (1ULL << 32)) {
2947 return false;
2948 }
2949
2950 protocolNameSectionStartVMAddr = sectInfo.sectAddr;
2951 return true;
2952 };
2953 uint64_t protocolNameVMAddr = objCProtocol.nameVMAddr;
2954 protocolName = ma->getPrintableString(protocolNameVMAddr, protocolNameResult,
2955 &protocolNameSectionCache, protocolNameSectionHandler);
2956
2957 if ( protocolNameResult != MachOAnalyzer::PrintableStringResult::CanPrint ) {
2958 diag.error("Invalid protocol name for objc optimisation");
2959 return;
2960 }
2961
2962 // If the protocol also exists in a shared cache image which is loaded, then objc
2963 // would have found that one, regardless of load order. So we can just skip this one.
2964 {
2965 void *cls;
2966 void *hi;
2967 uint32_t count = objcProtocolOpt->getClassAndHeader(protocolName, cls, hi);
2968 if (count == 1) {
2969 // exactly one matching protocol. Check if its loaded
2970 if (sharedCacheImagesMap.find(getMachHeaderFromObjCHeaderInfo(hi, pointerSize)) != sharedCacheImagesMap.end())
2971 return;
2972 }
2973 else if (count > 1) {
2974 // more than one matching protocol - find one that is loaded
2975 void *clslist[count];
2976 void *hilist[count];
2977 objcProtocolOpt->getClassesAndHeaders(protocolName, clslist, hilist);
2978 for (uint32_t i = 0; i < count; i++) {
2979 if (sharedCacheImagesMap.find(getMachHeaderFromObjCHeaderInfo(hilist[i], pointerSize)) != sharedCacheImagesMap.end())
2980 return;
2981 }
2982 }
2983 }
2984
2985 // Get the section for the protocol itself
2986 __block uint64_t protocolSectionStartVMAddr = 0;
2987 auto protocolSectionHandler = ^bool(const MachOAnalyzer::SectionInfo& sectInfo) {
2988 // We only have 23-bits in ObjCClassImageOffset to index in to the protocols
2989 if (sectInfo.sectSize > Image::ObjCClassImageOffset::maximumOffset) {
2990 return false;
2991 }
2992
2993 // We use 32-bit offsets so make sure the section is no larger than that.
2994 uint64_t protocolDatasVMOffset = sectInfo.sectAddr - loadAddress;
2995 if (protocolDatasVMOffset >= (1ULL << 32)) {
2996 return false;
2997 }
2998
2999 protocolSectionStartVMAddr = sectInfo.sectAddr;
3000 return true;
3001 };
3002 if (!protocolSectionCache.findSectionForVMAddr(protocolVMAddr, protocolSectionHandler)) {
3003 diag.error("Invalid protocol for objc optimisation");
3004 return;
3005 }
3006
3007 // Make sure we have an entry for our images offsets for later
3008 uint64_t protocolNameSectionVMOffset = protocolNameSectionStartVMAddr - loadAddress;
3009 uint64_t protocolSectionVMOffset = protocolSectionStartVMAddr - loadAddress;
3010 uint64_t hashTableVMOffsetsIndex = 0;
3011 for (auto nameAndDataVMOffset : image.classesNameAndDataVMOffsets) {
3012 if ( (nameAndDataVMOffset.first == protocolNameSectionVMOffset) && (nameAndDataVMOffset.second == protocolSectionVMOffset) )
3013 break;
3014 ++hashTableVMOffsetsIndex;
3015 }
3016
3017 if (hashTableVMOffsetsIndex == image.classesNameAndDataVMOffsets.count()) {
3018 // Didn't find an image entry with this offset. Add one if we have space
3019 uint64_t totalHashTableImages = image.classesNameAndDataVMOffsets.count() + _objcClassesHashTableImages.count();
3020 if ( totalHashTableImages == Image::ObjCClassNameImageOffset::maximumImageIndex ) {
3021 // No more space. We need to give up
3022 diag.error("No more space for protocol hash table image");
3023 return;
3024 }
3025 image.classesNameAndDataVMOffsets.push_back({ protocolNameSectionVMOffset, protocolSectionVMOffset });
3026 }
3027
3028 hashTableVMOffsetsIndex += _objcClassesHashTableImages.count();
3029
3030 uint64_t protocolNameOffset = protocolNameVMAddr - protocolNameSectionStartVMAddr;
3031 uint64_t protocolDataOffset = protocolVMAddr - protocolSectionStartVMAddr;
3032
3033 closure::Image::ObjCClassNameImageOffset protocolNameTarget;
3034 protocolNameTarget.classNameImageIndex = (uint32_t)hashTableVMOffsetsIndex;
3035 protocolNameTarget.classNameImageOffset = (uint32_t)protocolNameOffset;
3036
3037 dyld3::closure::Image::ObjCClassImageOffset protocolDataTarget;
3038 protocolDataTarget.classData.imageIndex = (uint32_t)hashTableVMOffsetsIndex;
3039 protocolDataTarget.classData.imageOffset = (uint32_t)protocolDataOffset;
3040 protocolDataTarget.classData.isDuplicate = 0;
3041
3042 seenProtocols.push_back({ protocolNameTarget, protocolDataTarget });
3043 });
3044 }
3045
3046 void ClosureBuilder::parseObjCClassDuplicates(Map<const char*, bool, HashCString, EqualCString>& duplicateClassesToIgnore) {
3047 const ClosureBuilder::BuilderLoadedImage& mainLi = _loadedImages[_mainProgLoadIndex];
3048
3049 const dyld3::MachOAnalyzer* ma = mainLi.loadAddress();
3050
3051 const uint32_t pointerSize = ma->pointerSize();
3052 const intptr_t slide = ma->getSlide();
3053 const dyld3::MachOAnalyzer::VMAddrConverter vmAddrConverter = ma->makeVMAddrConverter(mainLi.contentRebased);
3054
3055 uint64_t sectionSize = 0;
3056 const void* section = ma->findSectionContent("__DATA", "__objc_dupclass", sectionSize);
3057
3058 if ( !section )
3059 return;
3060
3061 // Ignore sections which are the wrong size
3062 if ( (sectionSize % pointerSize) != 0 )
3063 return;
3064
3065 // Copied from objc-abi.h
3066 typedef struct _objc_duplicate_class {
3067 uint32_t version;
3068 uint32_t flags;
3069 const char name[64];
3070 } objc_duplicate_class;
3071
3072 for (uint64_t offset = 0; offset != sectionSize; offset += pointerSize) {
3073 uint64_t vmAddr = *(uint64_t*)((uint64_t)section + offset);
3074 vmAddr = vmAddrConverter.convertToVMAddr(vmAddr);
3075 const objc_duplicate_class* duplicateClass = (const objc_duplicate_class*)(vmAddr + slide);
3076 duplicateClassesToIgnore.insert({ duplicateClass->name, true });
3077 }
3078 }
3079
3080 // used at launch by dyld when kernel has already mapped main executable
3081 const LaunchClosure* ClosureBuilder::makeLaunchClosure(const LoadedFileInfo& fileInfo, bool allowInsertFailures)
3082 {
3083 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_BUILD_CLOSURE, 0, 0, 0);
3084 const mach_header* mainMH = (const mach_header*)fileInfo.fileContent;
3085 // set up stack based storage for all arrays
3086 BuilderLoadedImage loadImagesStorage[512];
3087 Image::LinkedImage dependenciesStorage[512*8];
3088 InterposingTuple tuplesStorage[64];
3089 Closure::PatchEntry cachePatchStorage[64];
3090 _loadedImages.setInitialStorage(loadImagesStorage, 512);
3091 _dependencies.setInitialStorage(dependenciesStorage, 512*8);
3092 _interposingTuples.setInitialStorage(tuplesStorage, 64);
3093 _weakDefCacheOverrides.setInitialStorage(cachePatchStorage, 64);
3094 ArrayFinalizer<BuilderLoadedImage> scopedCleanup(_loadedImages, ^(BuilderLoadedImage& li) { if (li.unmapWhenDone) {_fileSystem.unloadFile(li.loadedFileInfo); li.unmapWhenDone=false;} });
3095
3096 const MachOAnalyzer* mainExecutable = MachOAnalyzer::validMainExecutable(_diag, mainMH, fileInfo.path, fileInfo.sliceLen, _archs, _platform);
3097 if ( mainExecutable == nullptr )
3098 return nullptr;
3099 if ( !mainExecutable->isDynamicExecutable() ) {
3100 _diag.error("not a main executable");
3101 return nullptr;
3102 }
3103 if ( _platform == Platform::macOS ) {
3104 // If this is an iOSMac program running on macOS, switch platforms
3105 if ( mainExecutable->builtForPlatform(Platform::iOSMac, true) ) {
3106 //_platform = Platform::iOSMac;
3107 Platform* selfPlatform = const_cast<Platform*>(&_platform);
3108 *selfPlatform = Platform::iOSMac;
3109 }
3110 #if (TARGET_OS_OSX && TARGET_CPU_ARM64)
3111 else if ( mainExecutable->builtForPlatform(Platform::iOS, true) ) {
3112 //_platform = Platform::iOS;
3113 Platform* selfPlatform = const_cast<Platform*>(&_platform);
3114 *selfPlatform = Platform::iOS;
3115 }
3116 #endif
3117 if ( mainExecutable->usesObjCGarbageCollection() ) {
3118 _diag.error("program requires ObjC Garbage Collection which is no longer supported");
3119 return nullptr;
3120 }
3121 }
3122 // <rdar://problem/63308841> licenseware apps that zero out lazy bind opcodes cannot be pre-bound
3123 if ( mainExecutable->hasStompedLazyOpcodes() )
3124 _makeMinimalClosure = true;
3125
3126 _isLaunchClosure = true;
3127 _allowMissingLazies = true;
3128
3129 #if BUILDING_CACHE_BUILDER
3130 _makingClosuresInCache = true;
3131 #endif
3132
3133 _nextIndex = 0;
3134
3135 // add main executable
3136 __block BuilderLoadedImage mainEntry;
3137 mainEntry.loadedFileInfo = fileInfo;
3138 mainEntry.imageNum = 0; // We can't fill this in until we've done inserted dylibs
3139 mainEntry.unmapWhenDone = false;
3140 mainEntry.contentRebased = false;
3141 mainEntry.hasInits = false;
3142 mainEntry.markNeverUnload = true;
3143 mainEntry.rtldLocal = false;
3144 mainEntry.isBadImage = false;
3145 mainEntry.mustBuildClosure = true;
3146 mainEntry.hasMissingWeakImports = false;
3147 mainEntry.hasInterposingTuples = false; // only dylibs not in the dyld cache can have interposing tuples
3148 mainEntry.overrideImageNum = 0;
3149 mainEntry.exportsTrieOffset = 0;
3150 mainEntry.exportsTrieSize = 0;
3151
3152 // Set the executable load path so that @executable_path can use it later
3153 _mainProgLoadPath = fileInfo.path;
3154
3155 // add any DYLD_INSERT_LIBRARIES
3156 _pathOverrides.forEachInsertedDylib(^(const char* dylibPath, bool &stop) {
3157 LoadedImageChain chainMain = { nullptr, mainEntry };
3158 BuilderLoadedImage* foundTopImage;
3159 if ( !findImage(dylibPath, chainMain, foundTopImage, LinkageType::kInserted, 0, true) ) {
3160 if ( !allowInsertFailures ) {
3161 if ( _diag.noError() )
3162 // if no other error was reported while trying to find the library, that means it is missing
3163 _diag.error("could not load inserted dylib '%s' because image not found", dylibPath);
3164 stop = true;
3165 return;
3166 }
3167 _diag.clearError(); // FIXME add way to plumb back warning
3168 }
3169 });
3170
3171 if ( _diag.hasError() )
3172 return nullptr;
3173
3174 _mainProgLoadIndex = (uint32_t)_loadedImages.count();
3175 mainEntry.imageNum = _startImageNum + _nextIndex++;
3176 _loadedImages.push_back(mainEntry);
3177
3178 // get mach_headers for all images needed to launch this main executable
3179 LoadedImageChain chainStart = { nullptr, _loadedImages[_mainProgLoadIndex] };
3180 recursiveLoadDependents(chainStart);
3181 if ( _diag.hasError() )
3182 return nullptr;
3183 for (uint32_t i=0; i < _mainProgLoadIndex; ++i) {
3184 LoadedImageChain insertChainStart = { nullptr, _loadedImages[i] };
3185 recursiveLoadDependents(insertChainStart);
3186 if ( _diag.hasError() )
3187 return nullptr;
3188 }
3189 loadDanglingUpwardLinks();
3190
3191 // If we have an on-disk image then we need all images which are dependent on the disk image to get a new
3192 // initializer order. Its not enough to just do the top level image as we may dlopen while in dlopen
3193 invalidateInitializerRoots();
3194
3195 // now that everything loaded, set _libDyldImageNum and _libSystemImageNum
3196 for (BuilderLoadedImage& li : _loadedImages) {
3197 if ( mainExecutable->builtForPlatform(Platform::driverKit) ) {
3198 if ( li.loadAddress()->isDylib() && (strcmp(li.loadAddress()->installName(), "/System/DriverKit/usr/lib/system/libdyld.dylib") == 0) )
3199 _libDyldImageNum = li.imageNum;
3200 else if ( strcmp(li.path(), "/System/DriverKit/usr/lib/libSystem.dylib") == 0 )
3201 _libSystemImageNum = li.imageNum;
3202 } else {
3203 if ( li.loadAddress()->isDylib() && (strcmp(li.loadAddress()->installName(), "/usr/lib/system/libdyld.dylib") == 0) )
3204 _libDyldImageNum = li.imageNum;
3205 else if ( strcmp(li.path(), "/usr/lib/libSystem.B.dylib") == 0 )
3206 _libSystemImageNum = li.imageNum;
3207 }
3208 // don't use minimal closures when interposing is in play because we don't have runtime support to do interposing
3209 if ( li.hasInterposingTuples ) {
3210 _makeMinimalClosure = false;
3211 _leaveRebasesAsOpcodes = false;
3212 }
3213 }
3214
3215 // only some images need to go into closure (non-rooted ones from dyld cache do not)
3216 STACK_ALLOC_ARRAY(ImageWriter, writers, _loadedImages.count());
3217 for (BuilderLoadedImage& li : _loadedImages) {
3218 if ( li.mustBuildClosure ) {
3219 writers.push_back(ImageWriter());
3220 buildImage(writers.back(), li);
3221 if ( _diag.hasError() )
3222 return nullptr;
3223 }
3224 }
3225
3226 // only build objc closure info when building full closures
3227 bool optimizedObjC = !_makeMinimalClosure && optimizeObjC(writers);
3228
3229 // Note we have to compute the init order after buildImage as buildImage may set hasInits to true
3230 for (uintptr_t imageIndex = 0, writerIndex = 0; imageIndex != _loadedImages.count(); ++imageIndex) {
3231 BuilderLoadedImage& li = _loadedImages[imageIndex];
3232 if ( li.mustBuildClosure ) {
3233 computeInitOrder(writers[writerIndex], (uint32_t)imageIndex);
3234 writerIndex++;
3235 }
3236 }
3237
3238 // combine all Image objects into one ImageArray
3239 ImageArrayWriter imageArrayWriter(_startImageNum, (uint32_t)writers.count(), _foundDyldCacheRoots);
3240 for (ImageWriter& writer : writers) {
3241 imageArrayWriter.appendImage(writer.finalize());
3242 writer.deallocate();
3243 }
3244 const ImageArray* imageArray = imageArrayWriter.finalize();
3245
3246 // merge ImageArray object into LaunchClosure object
3247 __block LaunchClosureWriter closureWriter(imageArray);
3248
3249 if (optimizedObjC) {
3250 if (!_objcSelectorsHashTable.empty())
3251 closureWriter.setObjCSelectorInfo(_objcSelectorsHashTable, _objcSelectorsHashTableImages);
3252
3253 if (!_objcClassesHashTableImages.empty()) {
3254 closureWriter.setObjCClassAndProtocolInfo(_objcClassesHashTable, _objcProtocolsHashTable,
3255 _objcClassesHashTableImages);
3256 }
3257
3258 if ( _objcDuplicateClassWarnings != nullptr ) {
3259 _objcDuplicateClassWarnings->forEachPath(^(const char* warning) {
3260 closureWriter.addWarning(Closure::Warning::duplicateObjCClass, warning);
3261 });
3262 }
3263
3264 if (!_objcClassesDuplicatesHashTable.empty())
3265 closureWriter.setObjCDuplicateClassesInfo(_objcClassesDuplicatesHashTable);
3266 }
3267
3268 // record shared cache info
3269 if ( _dyldCache != nullptr ) {
3270 // record cache UUID
3271 uuid_t cacheUUID;
3272 _dyldCache->getUUID(cacheUUID);
3273 closureWriter.setDyldCacheUUID(cacheUUID);
3274
3275 // record any cache patching needed because of dylib overriding cache
3276 for (const BuilderLoadedImage& li : _loadedImages) {
3277 if ( li.overrideImageNum != 0 ) {
3278 uint32_t imageIndex = li.overrideImageNum - (uint32_t)_dyldImageArray->startImageNum();
3279 STACK_ALLOC_ARRAY(Closure::PatchEntry, patches, _dyldCache->patchableExportCount(imageIndex));
3280 MachOLoaded::DependentToMachOLoaded reexportFinder = ^(const MachOLoaded* mh, uint32_t depIndex) {
3281 return (const MachOLoaded*)findDependent(mh, depIndex);
3282 };
3283 //fprintf(stderr, "'%s' overrides something in cache\n", li.loadedFileInfo.path);
3284 _dyldCache->forEachPatchableExport(imageIndex, ^(uint32_t cacheOffsetOfImpl, const char* symbolName) {
3285 dyld3::MachOAnalyzer::FoundSymbol foundInfo;
3286 Diagnostics patchDiag;
3287 Closure::PatchEntry patch;
3288 patch.overriddenDylibInCache = li.overrideImageNum;
3289 patch.exportCacheOffset = cacheOffsetOfImpl;
3290 if ( li.loadAddress()->findExportedSymbol(patchDiag, symbolName, false, foundInfo, reexportFinder) ) {
3291 const MachOAnalyzer* impDylib = (const MachOAnalyzer*)foundInfo.foundInDylib;
3292 patch.replacement.image.kind = Image::ResolvedSymbolTarget::kindImage;
3293 patch.replacement.image.imageNum = findLoadedImage(impDylib).imageNum;
3294 patch.replacement.image.offset = foundInfo.value;
3295 }
3296 else {
3297 // this means the symbol is missing in the cache override dylib, see it moved to a sibling
3298 // <rdar://problem/59196856> allow patched impls to move between re-export sibling dylibs
3299 bool foundViaParent = false;
3300 for (const BuilderLoadedImage& li2 : _loadedImages) {
3301 if ( (li2.overrideImageNum != 0) && (li2.imageNum != li.imageNum) ) {
3302 for (Image::LinkedImage aDep : li2.dependents) {
3303 if ( (aDep.kind() == Image::LinkKind::reExport) && (aDep.imageNum() == li.imageNum) ) {
3304 if ( li2.loadAddress()->findExportedSymbol(patchDiag, symbolName, false, foundInfo, reexportFinder) ) {
3305 const MachOAnalyzer* impDylib = (const MachOAnalyzer*)foundInfo.foundInDylib;
3306 patch.replacement.image.kind = Image::ResolvedSymbolTarget::kindImage;
3307 patch.replacement.image.imageNum = findLoadedImage(impDylib).imageNum;
3308 patch.replacement.image.offset = foundInfo.value;
3309 foundViaParent = true;
3310 //fprintf(stderr, "found patch target '%s' previously in '%s', now in '%s'\n", symbolName, li.path(), li2.path());
3311 break;
3312 }
3313 }
3314 }
3315 }
3316 }
3317 if ( !foundViaParent ) {
3318 // symbol is missing from override, set other cached dylibs that used it to NULL
3319 //fprintf(stderr, "could not find symbol '%s' in %s \n", symbolName, li.path());
3320 patch.replacement.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
3321 patch.replacement.absolute.value = 0;
3322 }
3323 }
3324 patches.push_back(patch);
3325 });
3326 closureWriter.addCachePatches(patches);
3327 }
3328 }
3329
3330 // record any cache patching needed because weak-def C++ symbols override dyld cache
3331 if ( !_weakDefCacheOverrides.empty() ) {
3332 closureWriter.addCachePatches(_weakDefCacheOverrides);
3333 }
3334 }
3335
3336 #if TARGET_OS_OSX
3337 uint32_t progVarsOffset;
3338 if ( mainExecutable->hasProgramVars(_diag, progVarsOffset) ) {
3339 // on macOS binaries may have a __dyld section that has ProgramVars to use
3340 closureWriter.setHasProgramVars(progVarsOffset);
3341 }
3342 if ( _diag.hasError() )
3343 return nullptr;
3344 #endif
3345
3346 // record any interposing info
3347 if ( !_interposingDisabled ) {
3348 imageArray->forEachImage(^(const Image* image, bool &stop) {
3349 if ( !image->inDyldCache() )
3350 addInterposingTuples(closureWriter, image, findLoadedImage(image->imageNum()).loadAddress());
3351 });
3352 }
3353
3354 // modify fixups in contained Images by applying interposing tuples
3355 closureWriter.applyInterposing((const LaunchClosure*)closureWriter.currentTypedBytes());
3356
3357 // set flags
3358 closureWriter.setUsedInterposing(_interposingTuplesUsed);
3359 closureWriter.setUsedAtPaths(_atPathUsed);
3360 closureWriter.setUsedFallbackPaths(_fallbackPathUsed);
3361 closureWriter.setHasInsertedLibraries(_mainProgLoadIndex > 0);
3362 closureWriter.setInitImageCount((uint32_t)_loadedImages.count());
3363
3364 // add other closure attributes
3365 addClosureInfo(closureWriter);
3366
3367 // make result
3368 const LaunchClosure* result = closureWriter.finalize();
3369 imageArrayWriter.deallocate();
3370
3371 timer.setData4(dyld3::DyldTimingBuildClosure::LaunchClosure_Built);
3372
3373 return result;
3374 }
3375
3376 // used by libdyld for dlopen()
3377 const DlopenClosure* ClosureBuilder::makeDlopenClosure(const char* path, const LaunchClosure* mainClosure, const Array<LoadedImage>& alreadyLoadedList,
3378 closure::ImageNum callerImageNum, bool noLoad, bool forceBindLazies, bool canUseSharedCacheClosure, closure::ImageNum* topImageNum)
3379 {
3380 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_BUILD_CLOSURE, 0, 0, 0);
3381 // set up stack based storage for all arrays
3382 BuilderLoadedImage loadImagesStorage[256];
3383 Image::LinkedImage dependenciesStorage[128];
3384 Closure::PatchEntry cachePatchStorage[64];
3385 _loadedImages.setInitialStorage(loadImagesStorage, 256);
3386 _dependencies.setInitialStorage(dependenciesStorage, 128);
3387 _weakDefCacheOverrides.setInitialStorage(cachePatchStorage, 64);
3388 ArrayFinalizer<BuilderLoadedImage> scopedCleanup(_loadedImages, ^(BuilderLoadedImage& li) { if (li.unmapWhenDone) {_fileSystem.unloadFile(li.loadedFileInfo); li.unmapWhenDone=false;} });
3389
3390 // fill in builder array from already loaded images
3391 bool cachedDylibsExpectedOnDisk = _dyldCache ? _dyldCache->header.dylibsExpectedOnDisk : true;
3392 uintptr_t callerImageIndex = UINTPTR_MAX;
3393 for (const LoadedImage& ali : alreadyLoadedList) {
3394 const Image* image = ali.image();
3395 const MachOAnalyzer* ma = (MachOAnalyzer*)(ali.loadedAddress());
3396 bool inDyldCache = ma->inDyldCache();
3397 BuilderLoadedImage entry;
3398 ImageNum overrideImageNum;
3399 entry.loadedFileInfo.path = image->path();
3400 entry.loadedFileInfo.fileContent = ma;
3401 entry.loadedFileInfo.sliceOffset = 0;
3402 entry.loadedFileInfo.inode = 0;
3403 entry.loadedFileInfo.mtime = 0;
3404 entry.imageNum = image->imageNum();
3405 entry.dependents = image->dependentsArray();
3406 entry.unmapWhenDone = false;
3407 entry.contentRebased = inDyldCache;
3408 entry.hasInits = false;
3409 entry.markNeverUnload = image->neverUnload();
3410 entry.rtldLocal = ali.hideFromFlatSearch();
3411 entry.isBadImage = false;
3412 entry.mustBuildClosure = false;
3413 entry.hasMissingWeakImports = false;
3414 entry.hasInterposingTuples = !inDyldCache && ma->isDylib() && ma->hasInterposingTuples();
3415 entry.overrideImageNum = 0;
3416 entry.exportsTrieOffset = 0;
3417 entry.exportsTrieSize = 0;
3418 if ( image->isOverrideOfDyldCacheImage(overrideImageNum) ) {
3419 entry.overrideImageNum = overrideImageNum;
3420 canUseSharedCacheClosure = false;
3421 }
3422 if ( !inDyldCache || cachedDylibsExpectedOnDisk )
3423 image->hasFileModTimeAndInode(entry.loadedFileInfo.inode, entry.loadedFileInfo.mtime);
3424 if ( entry.imageNum == callerImageNum )
3425 callerImageIndex = _loadedImages.count();
3426 _loadedImages.push_back(entry);
3427 }
3428 _alreadyInitedIndex = (uint32_t)_loadedImages.count();
3429
3430 // find main executable (may be needed for @executable_path)
3431 _isLaunchClosure = false;
3432 for (uint32_t i=0; i < alreadyLoadedList.count(); ++i) {
3433 if ( _loadedImages[i].loadAddress()->isMainExecutable() ) {
3434 _mainProgLoadIndex = i;
3435 _mainProgLoadPath = _loadedImages[i].path();
3436 break;
3437 }
3438 }
3439
3440 // We can't use an existing dlopen closure if the main closure had interposing tuples
3441 if (canUseSharedCacheClosure) {
3442 if (mainClosure->hasInterposings())
3443 canUseSharedCacheClosure = false;
3444 }
3445
3446 // add top level dylib being dlopen()ed
3447 BuilderLoadedImage* foundTopImage = nullptr;
3448 _nextIndex = 0;
3449 // @rpath has caller's LC_PRATH, then main executable's LC_RPATH
3450 BuilderLoadedImage& callerImage = (callerImageIndex != UINTPTR_MAX) ? _loadedImages[callerImageIndex] : _loadedImages[_mainProgLoadIndex];
3451 LoadedImageChain chainMain = { nullptr, _loadedImages[_mainProgLoadIndex] };
3452 LoadedImageChain chainCaller = { &chainMain, callerImage };
3453 if ( !findImage(path, chainCaller, foundTopImage, LinkageType::kDynamic, 0, canUseSharedCacheClosure) ) {
3454 // If we didn't find the image, it might be a symlink to something in the dyld cache that is not on disk
3455 if ( (_dyldCache != nullptr) && !_dyldCache->header.dylibsExpectedOnDisk ) {
3456 char resolvedPath[PATH_MAX];
3457 if ( _fileSystem.getRealPath(path, resolvedPath) ) {
3458 _diag.clearError();
3459 if ( !findImage(resolvedPath, chainMain, foundTopImage, LinkageType::kDynamic, 0, canUseSharedCacheClosure) ) {
3460 return nullptr;
3461 }
3462 } else {
3463 // We didn't find a new path from realpath
3464 return nullptr;
3465 }
3466 } else {
3467 // cached dylibs on disk, so don't call realpath() again, it would have been found first call to findImage()
3468 return nullptr;
3469 }
3470 }
3471
3472 // exit early in RTLD_NOLOAD mode
3473 if ( noLoad ) {
3474 timer.setData4(dyld3::DyldTimingBuildClosure::DlopenClosure_NoLoad);
3475 // if no new images added to _loadedImages, then requested path was already loaded
3476 if ( (uint32_t)_loadedImages.count() == _alreadyInitedIndex )
3477 *topImageNum = foundTopImage->imageNum;
3478 else
3479 *topImageNum = 0;
3480 return nullptr;
3481 }
3482
3483 // fast path if roots are not allowed and target is in dyld cache or is other
3484 if ( (_dyldCache != nullptr) && (_dyldCache->header.cacheType == kDyldSharedCacheTypeProduction) ) {
3485 if ( foundTopImage->imageNum < closure::kFirstLaunchClosureImageNum ) {
3486 if (foundTopImage->imageNum < closure::kLastDyldCacheImageNum)
3487 timer.setData4(dyld3::DyldTimingBuildClosure::DlopenClosure_UsedSharedCacheDylib);
3488 else
3489 timer.setData4(dyld3::DyldTimingBuildClosure::DlopenClosure_UsedSharedCacheOther);
3490 *topImageNum = foundTopImage->imageNum;
3491 return nullptr;
3492 }
3493 }
3494
3495 // recursive load dependents
3496 // @rpath for stuff top dylib depends on uses LC_RPATH from caller, main exe, and dylib being dlopen()ed
3497 LoadedImageChain chainTopDylib = { &chainMain, *foundTopImage };
3498 recursiveLoadDependents(chainTopDylib, canUseSharedCacheClosure);
3499 if ( _diag.hasError() )
3500 return nullptr;
3501 loadDanglingUpwardLinks(canUseSharedCacheClosure);
3502 if ( _diag.hasError() )
3503 return nullptr;
3504
3505 // RTLD_NOW means fail the dlopen() if a symbol cannot be bound
3506 _allowMissingLazies = !forceBindLazies;
3507
3508 // If we got this far, we are not using a prebuilt dlopen-closure
3509 // Since dlopen closures are never saved to disk, don't put fixups into the closure
3510 // Except if interposing is used, since we don't have plumbing to apply interposing dynamically
3511 _makeMinimalClosure = !mainClosure->hasInterposings();
3512
3513 // only some images need to go into closure (ones from dyld cache do not, unless the cache format changed)
3514 STACK_ALLOC_ARRAY(ImageWriter, writers, _loadedImages.count());
3515 if ( _foundNonCachedImage || _foundDyldCacheRoots ) {
3516 // If we have an on-disk image then we need all images which are dependent on the disk image to get a new
3517 // initializer order. Its not enough to just do the top level image as we may dlopen while in dlopen
3518 invalidateInitializerRoots();
3519
3520 for (uintptr_t loadedImageIndex = 0; loadedImageIndex != _loadedImages.count(); ++loadedImageIndex) {
3521 BuilderLoadedImage& li = _loadedImages[loadedImageIndex];
3522 if ( li.mustBuildClosure ) {
3523 writers.push_back(ImageWriter());
3524 buildImage(writers.back(), li);
3525 if ( _diag.hasError() )
3526 return nullptr;
3527 }
3528 }
3529
3530 // Note we have to compute the init order after buildImage as buildImage may set hasInits to true
3531 for (uintptr_t imageIndex = 0, writerIndex = 0; imageIndex != _loadedImages.count(); ++imageIndex) {
3532 BuilderLoadedImage& li = _loadedImages[imageIndex];
3533 if ( li.mustBuildClosure ) {
3534 computeInitOrder(writers[writerIndex], (uint32_t)imageIndex);
3535 writerIndex++;
3536 }
3537 }
3538 }
3539 if ( _diag.hasError() )
3540 return nullptr;
3541
3542 // check if top image loaded is in shared cache along with everything it depends on
3543 *topImageNum = foundTopImage->imageNum;
3544 if ( _foundNonCachedImage || _foundDyldCacheRoots ) {
3545 if ( canUseSharedCacheClosure && ( foundTopImage->imageNum < closure::kFirstLaunchClosureImageNum ) ) {
3546 // We used a shared cache built closure, but now discovered roots. We need to try again
3547 topImageNum = 0;
3548 return sRetryDlopenClosure;
3549 }
3550 } else {
3551 if (foundTopImage->imageNum < closure::kLastDyldCacheImageNum)
3552 timer.setData4(dyld3::DyldTimingBuildClosure::DlopenClosure_UsedSharedCacheDylib);
3553 else
3554 timer.setData4(dyld3::DyldTimingBuildClosure::DlopenClosure_UsedSharedCacheOther);
3555 return nullptr;
3556 }
3557
3558 // combine all Image objects into one ImageArray
3559 ImageArrayWriter imageArrayWriter(_startImageNum, (uint32_t)writers.count(), _foundDyldCacheRoots);
3560 for (ImageWriter& writer : writers) {
3561 imageArrayWriter.appendImage(writer.finalize());
3562 writer.deallocate();
3563 }
3564 const ImageArray* imageArray = imageArrayWriter.finalize();
3565
3566 // merge ImageArray object into LaunchClosure object
3567 DlopenClosureWriter closureWriter(imageArray);
3568
3569 // add other closure attributes
3570 closureWriter.setTopImageNum(foundTopImage->imageNum);
3571
3572 // record any cache patching needed because of dylib overriding cache
3573 if ( _dyldCache != nullptr ) {
3574 for (const BuilderLoadedImage& li : _loadedImages) {
3575 if ( (li.overrideImageNum != 0) && (li.imageNum >= _startImageNum) ) {
3576 const Image* cacheImage = _dyldImageArray->imageForNum(li.overrideImageNum);
3577 uint32_t imageIndex = cacheImage->imageNum() - (uint32_t)_dyldCache->cachedDylibsImageArray()->startImageNum();
3578 STACK_ALLOC_ARRAY(Closure::PatchEntry, patches, _dyldCache->patchableExportCount(imageIndex));
3579 MachOLoaded::DependentToMachOLoaded reexportFinder = ^(const MachOLoaded* mh, uint32_t depIndex) {
3580 return (const MachOLoaded*)findDependent(mh, depIndex);
3581 };
3582 //fprintf(stderr, "'%s' overrides '%s'\n", li.loadedFileInfo.path, cacheImage->path());
3583 _dyldCache->forEachPatchableExport(imageIndex,
3584 ^(uint32_t cacheOffsetOfImpl, const char* symbolName) {
3585 dyld3::MachOAnalyzer::FoundSymbol foundInfo;
3586 Diagnostics patchDiag;
3587 Closure::PatchEntry patch;
3588 patch.overriddenDylibInCache = li.overrideImageNum;
3589 patch.exportCacheOffset = cacheOffsetOfImpl;
3590 if ( li.loadAddress()->findExportedSymbol(patchDiag, symbolName, false, foundInfo, reexportFinder) ) {
3591 const MachOAnalyzer* impDylib = (const MachOAnalyzer*)foundInfo.foundInDylib;
3592 patch.replacement.image.kind = Image::ResolvedSymbolTarget::kindImage;
3593 patch.replacement.image.imageNum = findLoadedImage(impDylib).imageNum;
3594 patch.replacement.image.offset = foundInfo.value;
3595 }
3596 else {
3597 patch.replacement.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
3598 patch.replacement.absolute.value = 0;
3599 }
3600 patches.push_back(patch);
3601 });
3602 closureWriter.addCachePatches(patches);
3603 }
3604 }
3605 }
3606
3607 // modify fixups in contained Images by applying interposing tuples
3608 closureWriter.applyInterposing(mainClosure);
3609
3610 // Dlopen's should never keep track of missing paths as we don't cache these closures.
3611 assert(_mustBeMissingPaths == nullptr);
3612
3613 // make final DlopenClosure object
3614 const DlopenClosure* result = closureWriter.finalize();
3615 imageArrayWriter.deallocate();
3616 timer.setData4(dyld3::DyldTimingBuildClosure::DlopenClosure_Built);
3617 return result;
3618 }
3619
3620
3621 // used by dyld_closure_util
3622 const LaunchClosure* ClosureBuilder::makeLaunchClosure(const char* mainPath, bool allowInsertFailures)
3623 {
3624 char realerPath[MAXPATHLEN];
3625 closure::LoadedFileInfo loadedFileInfo = MachOAnalyzer::load(_diag, _fileSystem, mainPath, _archs, _platform, realerPath);
3626 if ( _diag.hasError() )
3627 return nullptr;
3628 loadedFileInfo.path = mainPath;
3629 const MachOAnalyzer* mh = (const MachOAnalyzer*)loadedFileInfo.fileContent;
3630 if (mh == nullptr) {
3631 _diag.error("could not load file");
3632 return nullptr;
3633 }
3634 const_cast<PathOverrides*>(&_pathOverrides)->setMainExecutable(mh, mainPath);
3635 const LaunchClosure* launchClosure = makeLaunchClosure(loadedFileInfo, allowInsertFailures);
3636 loadedFileInfo.unload(loadedFileInfo);
3637 return launchClosure;
3638 }
3639
3640 void ClosureBuilder::setDyldCacheInvalidFormatVersion() {
3641 _dyldCacheInvalidFormatVersion = true;
3642 }
3643
3644
3645 // used by dyld shared cache builder
3646 const ImageArray* ClosureBuilder::makeDyldCacheImageArray(const Array<CachedDylibInfo>& dylibs, const Array<CachedDylibAlias>& aliases)
3647 {
3648 // because this is run in cache builder using dispatch_apply() there is minimal stack space
3649 // so set up storage for all arrays to be vm_allocated
3650 uintptr_t maxImageCount = dylibs.count() + 16;
3651 _loadedImages.reserve(maxImageCount);
3652 _dependencies.reserve(maxImageCount*16);
3653
3654 _makingDyldCacheImages = true;
3655 _allowMissingLazies = false;
3656 _aliases = &aliases;
3657
3658 // build _loadedImages[] with every dylib in cache
3659 __block ImageNum imageNum = _startImageNum;
3660 for (const CachedDylibInfo& aDylibInfo : dylibs) {
3661 BuilderLoadedImage entry;
3662 entry.loadedFileInfo = aDylibInfo.fileInfo;
3663 entry.imageNum = imageNum++;
3664 entry.unmapWhenDone = false;
3665 entry.contentRebased = false;
3666 entry.hasInits = false;
3667 entry.markNeverUnload = true;
3668 entry.rtldLocal = false;
3669 entry.isBadImage = false;
3670 entry.mustBuildClosure = false;
3671 entry.hasMissingWeakImports = false;
3672 entry.hasInterposingTuples = false; // dylibs in dyld cache cannot have interposing tuples
3673 entry.overrideImageNum = 0;
3674 entry.exportsTrieOffset = 0;
3675 entry.exportsTrieSize = 0;
3676 _loadedImages.push_back(entry);
3677 }
3678
3679 // wire up dependencies between cached dylibs
3680 for (BuilderLoadedImage& li : _loadedImages) {
3681 LoadedImageChain chainStart = { nullptr, li };
3682 recursiveLoadDependents(chainStart);
3683 if ( _diag.hasError() )
3684 break;
3685 }
3686 assert(_loadedImages.count() == dylibs.count());
3687
3688 // create an ImageWriter for each cached dylib
3689 STACK_ALLOC_ARRAY(ImageWriter, writers, _loadedImages.count());
3690 for (BuilderLoadedImage& li : _loadedImages) {
3691 writers.push_back(ImageWriter());
3692 buildImage(writers.back(), li);
3693 }
3694
3695 // add initializer order into each dylib
3696 // Note we have to compute the init order after buildImage as buildImage may set hasInits to true
3697 for (const BuilderLoadedImage& li : _loadedImages) {
3698 uint32_t index = li.imageNum - _startImageNum;
3699 computeInitOrder(writers[index], index);
3700 }
3701
3702 // combine all Image objects into one ImageArray
3703 ImageArrayWriter imageArrayWriter(_startImageNum, (uint32_t)writers.count(), _foundDyldCacheRoots);
3704 for (ImageWriter& writer : writers) {
3705 imageArrayWriter.appendImage(writer.finalize());
3706 writer.deallocate();
3707 }
3708 const ImageArray* imageArray = imageArrayWriter.finalize();
3709
3710 return imageArray;
3711 }
3712
3713
3714 #if BUILDING_CACHE_BUILDER
3715 const ImageArray* ClosureBuilder::makeOtherDylibsImageArray(const Array<LoadedFileInfo>& otherDylibs, uint32_t cachedDylibsCount)
3716 {
3717 // because this is run in cache builder using dispatch_apply() there is minimal stack space
3718 // so set up storage for all arrays to be vm_allocated
3719 uintptr_t maxImageCount = otherDylibs.count() + cachedDylibsCount + 128;
3720 _loadedImages.reserve(maxImageCount);
3721 _dependencies.reserve(maxImageCount*16);
3722
3723 // build _loadedImages[] with every dylib in cache, followed by others
3724 _nextIndex = 0;
3725 for (const LoadedFileInfo& aDylibInfo : otherDylibs) {
3726 BuilderLoadedImage entry;
3727 entry.loadedFileInfo = aDylibInfo;
3728 entry.imageNum = _startImageNum + _nextIndex++;
3729 entry.unmapWhenDone = false;
3730 entry.contentRebased = false;
3731 entry.hasInits = false;
3732 entry.markNeverUnload = false;
3733 entry.rtldLocal = false;
3734 entry.isBadImage = false;
3735 entry.mustBuildClosure = false;
3736 entry.hasMissingWeakImports = false;
3737 entry.hasInterposingTuples = false; // all images here have passed canHavePrecomputedDlopenClosure() which does not allow interposing tuples
3738 entry.overrideImageNum = 0;
3739 entry.exportsTrieOffset = 0;
3740 entry.exportsTrieSize = 0;
3741 _loadedImages.push_back(entry);
3742 }
3743
3744 // wire up dependencies between cached dylibs
3745 // Note, _loadedImages can grow when we call recursiveLoadDependents so we need
3746 // to check the count on each iteration.
3747 for (uint64_t index = 0; index != _loadedImages.count(); ++index) {
3748 BuilderLoadedImage& li = _loadedImages[index];
3749 LoadedImageChain chainStart = { nullptr, li };
3750 recursiveLoadDependents(chainStart);
3751 if ( _diag.hasError() ) {
3752 _diag.warning("while building dlopen closure for %s: %s", li.loadedFileInfo.path, _diag.errorMessage().c_str());
3753 //fprintf(stderr, "while building dlopen closure for %s: %s\n", li.loadedFileInfo.path, _diag.errorMessage().c_str());
3754 _diag.clearError();
3755 li.isBadImage = true; // mark bad
3756 }
3757 }
3758
3759 auto invalidateBadImages = [&]() {
3760 // Invalidate images with bad dependencies
3761 while (true) {
3762 bool madeChange = false;
3763 for (BuilderLoadedImage& li : _loadedImages) {
3764 if (li.isBadImage) {
3765 // Already invalidated
3766 continue;
3767 }
3768 for (Image::LinkedImage depIndex : li.dependents) {
3769 if ( depIndex.imageNum() == kMissingWeakLinkedImage )
3770 continue;
3771 if ( depIndex.imageNum() >= dyld3::closure::kLastDyldCacheImageNum ) {
3772 // dlopen closures can only depend on the shared cache. This is because if foo.dylib links bar.dylib
3773 // and bar.dylib is loaded in to the launch closure, then the dlopen closure for foo.dylib wouldn't see
3774 // bar.dylib at the image num in the launch closure
3775 _diag.warning("while building dlopen closure for %s: dependent dylib is not from shared cache", li.loadedFileInfo.path);
3776 li.isBadImage = true; // mark bad
3777 madeChange = true;
3778 continue;
3779 }
3780 BuilderLoadedImage& depImage = findLoadedImage(depIndex.imageNum());
3781 if (depImage.isBadImage) {
3782 _diag.warning("while building dlopen closure for %s: dependent dylib had error", li.loadedFileInfo.path);
3783 li.isBadImage = true; // mark bad
3784 madeChange = true;
3785 }
3786 }
3787 }
3788 if (!madeChange)
3789 break;
3790 }
3791 };
3792
3793 invalidateBadImages();
3794
3795 // create an ImageWriter for each cached dylib
3796 STACK_ALLOC_ARRAY(ImageWriter, writers, _loadedImages.count());
3797 for (BuilderLoadedImage& li : _loadedImages) {
3798 if ( li.isBadImage ) {
3799 writers.push_back(ImageWriter());
3800 writers.back().setInvalid();
3801 continue;
3802 }
3803 if ( li.imageNum < dyld3::closure::kLastDyldCacheImageNum )
3804 continue;
3805 writers.push_back(ImageWriter());
3806 buildImage(writers.back(), li);
3807 if ( _diag.hasError() ) {
3808 _diag.warning("while building dlopen closure for %s: %s", li.loadedFileInfo.path, _diag.errorMessage().c_str());
3809 //fprintf(stderr, "while building dlopen closure for %s: %s\n", li.loadedFileInfo.path, _diag.errorMessage().c_str());
3810 _diag.clearError();
3811 li.isBadImage = true; // mark bad
3812 writers.back().setInvalid();
3813 }
3814 }
3815
3816 invalidateBadImages();
3817
3818 // add initializer order into each dylib
3819 // Note we have to compute the init order after buildImage as buildImage may set hasInits to true
3820 for (const BuilderLoadedImage& li : _loadedImages) {
3821 if ( li.imageNum < dyld3::closure::kLastDyldCacheImageNum )
3822 continue;
3823 if (li.isBadImage)
3824 continue;
3825 uint32_t index = li.imageNum - _startImageNum;
3826 computeInitOrder(writers[index], index);
3827 }
3828
3829 // combine all Image objects into one ImageArray
3830 ImageArrayWriter imageArrayWriter(_startImageNum, (uint32_t)writers.count(), _foundDyldCacheRoots);
3831 for (ImageWriter& writer : writers) {
3832 imageArrayWriter.appendImage(writer.finalize());
3833 writer.deallocate();
3834 }
3835 const ImageArray* imageArray = imageArrayWriter.finalize();
3836
3837 return imageArray;
3838 }
3839 #endif
3840
3841
3842 bool ClosureBuilder::inLoadedImageArray(const Array<LoadedImage>& loadedList, ImageNum imageNum)
3843 {
3844 for (const LoadedImage& ali : loadedList) {
3845 if ( ali.image()->representsImageNum(imageNum) )
3846 return true;
3847 }
3848 return false;
3849 }
3850
3851 void ClosureBuilder::buildLoadOrderRecurse(Array<LoadedImage>& loadedList, const Array<const ImageArray*>& imagesArrays, const Image* image)
3852 {
3853 // breadth first load
3854 STACK_ALLOC_ARRAY(const Image*, needToRecurse, 256);
3855 image->forEachDependentImage(^(uint32_t dependentIndex, dyld3::closure::Image::LinkKind kind, ImageNum depImageNum, bool &stop) {
3856 if ( !inLoadedImageArray(loadedList, depImageNum) ) {
3857 const Image* depImage = ImageArray::findImage(imagesArrays, depImageNum);
3858 loadedList.push_back(LoadedImage::make(depImage));
3859 needToRecurse.push_back(depImage);
3860 }
3861 });
3862
3863 // recurse load
3864 for (const Image* img : needToRecurse) {
3865 buildLoadOrderRecurse(loadedList, imagesArrays, img);
3866 }
3867 }
3868
3869 void ClosureBuilder::buildLoadOrder(Array<LoadedImage>& loadedList, const Array<const ImageArray*>& imagesArrays, const Closure* toAdd)
3870 {
3871 const dyld3::closure::Image* topImage = ImageArray::findImage(imagesArrays, toAdd->topImageNum());
3872 loadedList.push_back(LoadedImage::make(topImage));
3873 buildLoadOrderRecurse(loadedList, imagesArrays, topImage);
3874 }
3875
3876
3877
3878 //////////////////////////// ObjCStringTable ////////////////////////////////////////
3879
3880 template<typename PerfectHashT, typename ImageOffsetT>
3881 void ObjCStringTable::write(const PerfectHashT& phash, const Array<std::pair<const char*, ImageOffsetT>>& strings)
3882 {
3883 ObjCSelectorOpt::StringTarget sentinel = (ObjCSelectorOpt::StringTarget)ImageOffsetT::sentinelValue;
3884 // Set header
3885 capacity = phash.capacity;
3886 occupied = phash.occupied;
3887 shift = phash.shift;
3888 mask = phash.mask;
3889 sentinelTarget = sentinel;
3890 roundedTabSize = std::max(phash.mask+1, 4U);
3891 salt = phash.salt;
3892
3893 // Set hash data
3894 for (uint32_t i = 0; i < 256; i++) {
3895 scramble[i] = phash.scramble[i];
3896 }
3897 for (uint32_t i = 0; i < phash.mask+1; i++) {
3898 tab[i] = phash.tab[i];
3899 }
3900
3901 dyld3::Array<StringTarget> targetsArray = targets();
3902 dyld3::Array<StringHashCheckByte> checkBytesArray = checkBytes();
3903
3904 // Set offsets to the sentinel
3905 for (uint32_t i = 0; i < phash.capacity; i++) {
3906 targetsArray[i] = sentinel;
3907 }
3908 // Set checkbytes to 0
3909 for (uint32_t i = 0; i < phash.capacity; i++) {
3910 checkBytesArray[i] = 0;
3911 }
3912
3913 // Set real string offsets and checkbytes
3914 for (const auto& s : strings) {
3915 assert(s.second.raw != sentinelTarget);
3916 uint32_t h = hash(s.first);
3917 targetsArray[h] = s.second.raw;
3918 checkBytesArray[h] = checkbyte(s.first);
3919 }
3920 }
3921
3922 //////////////////////////// ObjCClassOpt ////////////////////////////////////////
3923
3924
3925 template<typename PerfectHashT, typename ImageOffsetT, typename ClassesMapT>
3926 void ObjCClassOpt::write(const PerfectHashT& phash, const Array<std::pair<const char*, ImageOffsetT>>& strings,
3927 const ClassesMapT& classes, uint32_t preCalculatedDuplicateCount)
3928 {
3929 ObjCStringTable::write(phash, strings);
3930
3931 __block dyld3::Array<ClassTarget> classOffsetsArray = classOffsets();
3932 __block dyld3::Array<ClassTarget> duplicateOffsetsArray = duplicateOffsets(preCalculatedDuplicateCount);
3933
3934 // Set class offsets to 0
3935 for (uint32_t i = 0; i < capacity; i++) {
3936 classOffsetsArray[i].raw = dyld3::closure::Image::ObjCImageOffset::sentinelValue;
3937 }
3938
3939 classes.forEachEntry(^(const char *const &key, const Image::ObjCClassImageOffset **values, uint64_t valuesCount) {
3940 uint32_t keyIndex = getIndex(key);
3941 assert(keyIndex != indexNotFound);
3942 assert(classOffsetsArray[keyIndex].raw == dyld3::closure::Image::ObjCImageOffset::sentinelValue);
3943
3944 if (valuesCount == 1) {
3945 // Only one entry so write it in to the class offsets directly
3946 Image::ObjCClassImageOffset classImageOffset = *(values[0]);
3947 assert(classImageOffset.classData.isDuplicate == 0);
3948 classOffsetsArray[keyIndex] = classImageOffset;
3949 return;
3950 }
3951
3952 // We have more than one value. We add a placeholder to the class offsets which tells us the head
3953 // of the linked list of classes in the duplicates array
3954 uint32_t dest = duplicateCount();
3955 duplicateCount() += valuesCount;
3956
3957 Image::ObjCClassImageOffset classImagePlaceholder;
3958 assert(valuesCount < (1 << 8));
3959 classImagePlaceholder.duplicateData.count = (uint32_t)valuesCount;
3960 classImagePlaceholder.duplicateData.index = dest;
3961 classImagePlaceholder.duplicateData.isDuplicate = 1;
3962 classOffsetsArray[keyIndex] = classImagePlaceholder;
3963
3964 for (uint64_t i = 0; i != valuesCount; ++i) {
3965 Image::ObjCClassImageOffset classImageOffset = *(values[i]);
3966 assert(classImageOffset.classData.isDuplicate == 0);
3967 duplicateOffsetsArray.push_back(classImageOffset);
3968 }
3969 });
3970 }
3971
3972
3973 } // namespace closure
3974 } // namespace dyld3