]> git.saurik.com Git - apple/dyld.git/blob - dyld3/shared-cache/DyldSharedCache.cpp
dyld-832.7.3.tar.gz
[apple/dyld.git] / dyld3 / shared-cache / DyldSharedCache.cpp
1 /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*-
2 *
3 * Copyright (c) 2014 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26 #include <dirent.h>
27 #include <sys/errno.h>
28 #include <sys/fcntl.h>
29 #include <sys/param.h>
30 #include <mach/mach.h>
31 #include <mach-o/loader.h>
32 #include <mach-o/fat.h>
33 #include <mach-o/dyld_priv.h>
34 #include <assert.h>
35 #include <unistd.h>
36 #include <dlfcn.h>
37
38 #if BUILDING_CACHE_BUILDER
39 #include <set>
40 #include <string>
41 #include <vector>
42 #include <unordered_map>
43 #include <unordered_set>
44 #include "SharedCacheBuilder.h"
45 #include "FileUtils.h"
46 #endif
47
48 #define NO_ULEB
49 #include "MachOLoaded.h"
50 #include "ClosureFileSystemPhysical.h"
51 #include "DyldSharedCache.h"
52 #include "Trie.hpp"
53 #include "StringUtils.h"
54
55 #include "objc-shared-cache.h"
56
57 #if !(BUILDING_LIBDYLD || BUILDING_DYLD)
58 #include "JSONWriter.h"
59 #include <sstream>
60 #endif
61
62
63 #if BUILDING_CACHE_BUILDER
64 DyldSharedCache::CreateResults DyldSharedCache::create(const CreateOptions& options,
65 const dyld3::closure::FileSystem& fileSystem,
66 const std::vector<MappedMachO>& dylibsToCache,
67 const std::vector<MappedMachO>& otherOsDylibs,
68 const std::vector<MappedMachO>& osExecutables)
69 {
70 CreateResults results;
71 SharedCacheBuilder cache(options, fileSystem);
72 if (!cache.errorMessage().empty()) {
73 results.errorMessage = cache.errorMessage();
74 return results;
75 }
76
77 std::vector<FileAlias> aliases;
78 switch ( options.platform ) {
79 case dyld3::Platform::iOS:
80 case dyld3::Platform::watchOS:
81 case dyld3::Platform::tvOS:
82 // FIXME: embedded cache builds should be getting aliases from manifest
83 aliases.push_back({"/System/Library/Frameworks/IOKit.framework/Versions/A/IOKit", "/System/Library/Frameworks/IOKit.framework/IOKit"});
84 aliases.push_back({"/usr/lib/libstdc++.6.dylib", "/usr/lib/libstdc++.dylib"});
85 aliases.push_back({"/usr/lib/libstdc++.6.dylib", "/usr/lib/libstdc++.6.0.9.dylib"});
86 aliases.push_back({"/usr/lib/libz.1.dylib", "/usr/lib/libz.dylib"});
87 aliases.push_back({"/usr/lib/libSystem.B.dylib", "/usr/lib/libSystem.dylib"});
88 aliases.push_back({"/System/Library/Frameworks/Foundation.framework/Foundation", "/usr/lib/libextension.dylib"}); // <rdar://44315703>
89 break;
90 default:
91 break;
92 }
93
94 cache.build(dylibsToCache, otherOsDylibs, osExecutables, aliases);
95
96 results.agileSignature = cache.agileSignature();
97 results.cdHashFirst = cache.cdHashFirst();
98 results.cdHashSecond = cache.cdHashSecond();
99 results.warnings = cache.warnings();
100 results.evictions = cache.evictions();
101 if ( cache.errorMessage().empty() ) {
102 if ( !options.outputFilePath.empty() ) {
103 // write cache file, if path non-empty
104 cache.writeFile(options.outputFilePath);
105 }
106 if ( !options.outputMapFilePath.empty() ) {
107 // write map file, if path non-empty
108 cache.writeMapFile(options.outputMapFilePath);
109 }
110 }
111 results.errorMessage = cache.errorMessage();
112 cache.deleteBuffer();
113 return results;
114 }
115
116 bool DyldSharedCache::verifySelfContained(std::vector<MappedMachO>& dylibsToCache,
117 std::unordered_set<std::string>& badZippered,
118 MappedMachO (^loader)(const std::string& runtimePath, Diagnostics& diag),
119 std::vector<std::pair<DyldSharedCache::MappedMachO, std::set<std::string>>>& rejected)
120 {
121 // build map of dylibs
122 __block std::map<std::string, std::set<std::string>> badDylibs;
123 __block std::set<std::string> knownDylibs;
124 for (const DyldSharedCache::MappedMachO& dylib : dylibsToCache) {
125 std::set<std::string> reasons;
126 if ( dylib.mh->canBePlacedInDyldCache(dylib.runtimePath.c_str(), ^(const char* msg) { badDylibs[dylib.runtimePath].insert(msg);}) ) {
127 knownDylibs.insert(dylib.runtimePath);
128 knownDylibs.insert(dylib.mh->installName());
129 } else {
130 badDylibs[dylib.runtimePath].insert("");
131 }
132 }
133
134 // check all dependencies to assure every dylib in cache only depends on other dylibs in cache
135 __block std::set<std::string> missingWeakDylibs;
136 __block bool doAgain = true;
137 while ( doAgain ) {
138 __block std::vector<DyldSharedCache::MappedMachO> foundMappings;
139 doAgain = false;
140 // scan dylib list making sure all dependents are in dylib list
141 for (const DyldSharedCache::MappedMachO& dylib : dylibsToCache) {
142 if ( badDylibs.count(dylib.runtimePath) != 0 )
143 continue;
144 dylib.mh->forEachDependentDylib(^(const char* loadPath, bool isWeak, bool isReExport, bool isUpward, uint32_t compatVersion, uint32_t curVersion, bool& stop) {
145 if ( isWeak && (missingWeakDylibs.count(loadPath) != 0) )
146 return;
147 if ( knownDylibs.count(loadPath) == 0 ) {
148 doAgain = true;
149 if ( badZippered.count(loadPath) != 0 ) {
150 badDylibs[dylib.runtimePath].insert("");
151 knownDylibs.erase(dylib.runtimePath);
152 knownDylibs.erase(dylib.mh->installName());
153 badZippered.insert(dylib.runtimePath);
154 badZippered.insert(dylib.mh->installName());
155 return;
156 }
157 Diagnostics diag;
158 MappedMachO foundMapping;
159 if ( badDylibs.count(loadPath) == 0 )
160 foundMapping = loader(loadPath, diag);
161 if ( foundMapping.length == 0 ) {
162 // We allow weakly linked dylibs to be missing only if they are not present on disk
163 // The shared cache doesn't contain enough information to patch them in later if they are
164 // found on disk, so we don't want to pull something in to cache and cut it off from a dylib it
165 // could have used.
166 if ( isWeak ) {
167 missingWeakDylibs.insert(loadPath);
168 return;
169 }
170
171 if (diag.hasError())
172 badDylibs[dylib.runtimePath].insert(diag.errorMessage());
173 else
174 badDylibs[dylib.runtimePath].insert(std::string("Could not find dependency '") + loadPath +"'");
175 knownDylibs.erase(dylib.runtimePath);
176 knownDylibs.erase(dylib.mh->installName());
177 }
178 else {
179 std::set<std::string> reasons;
180 if ( foundMapping.mh->canBePlacedInDyldCache(foundMapping.runtimePath.c_str(), ^(const char* msg) { badDylibs[foundMapping.runtimePath].insert(msg);})) {
181 // see if existing mapping was returned
182 bool alreadyInVector = false;
183 for (const MappedMachO& existing : dylibsToCache) {
184 if ( existing.mh == foundMapping.mh ) {
185 alreadyInVector = true;
186 break;
187 }
188 }
189 if ( !alreadyInVector )
190 foundMappings.push_back(foundMapping);
191 knownDylibs.insert(loadPath);
192 knownDylibs.insert(foundMapping.runtimePath);
193 knownDylibs.insert(foundMapping.mh->installName());
194 } else {
195 badDylibs[dylib.runtimePath].insert("");
196 }
197 }
198 }
199 });
200 }
201 dylibsToCache.insert(dylibsToCache.end(), foundMappings.begin(), foundMappings.end());
202 // remove bad dylibs
203 const auto badDylibsCopy = badDylibs;
204 dylibsToCache.erase(std::remove_if(dylibsToCache.begin(), dylibsToCache.end(), [&](const DyldSharedCache::MappedMachO& dylib) {
205 auto i = badDylibsCopy.find(dylib.runtimePath);
206 if ( i != badDylibsCopy.end()) {
207 // Only add the warning if we are not a bad zippered dylib
208 if ( badZippered.count(dylib.runtimePath) == 0 )
209 rejected.push_back(std::make_pair(dylib, i->second));
210 return true;
211 }
212 else {
213 return false;
214 }
215 }), dylibsToCache.end());
216 }
217
218 return badDylibs.empty();
219 }
220 #endif
221
222 template<typename T>
223 const T DyldSharedCache::getAddrField(uint64_t addr) const {
224 uint64_t slide = (uint64_t)this - unslidLoadAddress();
225 return (const T)(addr + slide);
226 }
227
228 uint64_t DyldSharedCache::getCodeSignAddress() const
229 {
230 auto mappings = (const dyld_cache_mapping_info*)((uint8_t*)this + header.mappingOffset);
231 return mappings[header.mappingCount-1].address + mappings[header.mappingCount-1].size;
232 }
233
234 void DyldSharedCache::forEachRegion(void (^handler)(const void* content, uint64_t vmAddr, uint64_t size, uint32_t permissions,
235 uint64_t flags)) const
236 {
237 // <rdar://problem/49875993> sanity check cache header
238 if ( strncmp(header.magic, "dyld_v1", 7) != 0 )
239 return;
240 if ( header.mappingOffset > 1024 )
241 return;
242 if ( header.mappingCount > 20 )
243 return;
244 if ( header.mappingOffset <= __offsetof(dyld_cache_header, mappingWithSlideOffset) ) {
245 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
246 const dyld_cache_mapping_info* mappingsEnd = &mappings[header.mappingCount];
247 for (const dyld_cache_mapping_info* m=mappings; m < mappingsEnd; ++m) {
248 handler((char*)this + m->fileOffset, m->address, m->size, m->initProt, 0);
249 }
250 } else {
251 const dyld_cache_mapping_and_slide_info* mappings = (const dyld_cache_mapping_and_slide_info*)((char*)this + header.mappingWithSlideOffset);
252 const dyld_cache_mapping_and_slide_info* mappingsEnd = &mappings[header.mappingCount];
253 for (const dyld_cache_mapping_and_slide_info* m=mappings; m < mappingsEnd; ++m) {
254 handler((char*)this + m->fileOffset, m->address, m->size, m->initProt, m->flags);
255 }
256 }
257 }
258
259 bool DyldSharedCache::inCache(const void* addr, size_t length, bool& readOnly) const
260 {
261 // quick out if before start of cache
262 if ( addr < this )
263 return false;
264
265 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
266 uintptr_t slide = (uintptr_t)this - (uintptr_t)(mappings[0].address);
267 uintptr_t unslidStart = (uintptr_t)addr - slide;
268
269 // quick out if after end of cache
270 const dyld_cache_mapping_info* lastMapping = &mappings[header.mappingCount - 1];
271 if ( unslidStart > (lastMapping->address + lastMapping->size) )
272 return false;
273
274 // walk cache regions
275 const dyld_cache_mapping_info* mappingsEnd = &mappings[header.mappingCount];
276 uintptr_t unslidEnd = unslidStart + length;
277 for (const dyld_cache_mapping_info* m=mappings; m < mappingsEnd; ++m) {
278 if ( (unslidStart >= m->address) && (unslidEnd < (m->address+m->size)) ) {
279 readOnly = ((m->initProt & VM_PROT_WRITE) == 0);
280 return true;
281 }
282 }
283
284 return false;
285 }
286
287 bool DyldSharedCache::isAlias(const char* path) const {
288 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
289 uintptr_t slide = (uintptr_t)this - (uintptr_t)(mappings[0].address);
290 // paths for aliases are store between cache header and first segment
291 return path < ((char*)mappings[0].address + slide);
292 }
293
294 void DyldSharedCache::forEachImage(void (^handler)(const mach_header* mh, const char* installName)) const
295 {
296 const dyld_cache_image_info* dylibs = (dyld_cache_image_info*)((char*)this + header.imagesOffset);
297 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
298 if ( mappings[0].fileOffset != 0 )
299 return;
300 uint64_t firstImageOffset = 0;
301 uint64_t firstRegionAddress = mappings[0].address;
302 for (uint32_t i=0; i < header.imagesCount; ++i) {
303 const char* dylibPath = (char*)this + dylibs[i].pathFileOffset;
304 uint64_t offset = dylibs[i].address - firstRegionAddress;
305 if ( firstImageOffset == 0 )
306 firstImageOffset = offset;
307 // skip over aliases
308 if ( dylibs[i].pathFileOffset < firstImageOffset)
309 continue;
310 const mach_header* mh = (mach_header*)((char*)this + offset);
311 handler(mh, dylibPath);
312 }
313 }
314
315 void DyldSharedCache::forEachImageEntry(void (^handler)(const char* path, uint64_t mTime, uint64_t inode)) const
316 {
317 const dyld_cache_image_info* dylibs = (dyld_cache_image_info*)((char*)this + header.imagesOffset);
318 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
319 if ( mappings[0].fileOffset != 0 )
320 return;
321 uint64_t firstImageOffset = 0;
322 uint64_t firstRegionAddress = mappings[0].address;
323 for (uint32_t i=0; i < header.imagesCount; ++i) {
324 const char* dylibPath = (char*)this + dylibs[i].pathFileOffset;
325 uint64_t offset = dylibs[i].address - firstRegionAddress;
326 if ( firstImageOffset == 0 )
327 firstImageOffset = offset;
328 // skip over aliases
329 if ( dylibs[i].pathFileOffset < firstImageOffset)
330 continue;
331 handler(dylibPath, dylibs[i].modTime, dylibs[i].inode);
332 }
333 }
334
335 const bool DyldSharedCache::hasLocalSymbolsInfo() const
336 {
337 return (header.localSymbolsOffset != 0 && header.mappingOffset > offsetof(dyld_cache_header,localSymbolsSize));
338 }
339
340 const void* DyldSharedCache::getLocalNlistEntries() const
341 {
342 // check for cache without local symbols info
343 if (!this->hasLocalSymbolsInfo())
344 return nullptr;
345 const auto localInfo = (dyld_cache_local_symbols_info*)((uint8_t*)this + header.localSymbolsOffset);
346 return (uint8_t*)localInfo + localInfo->nlistOffset;
347 }
348
349 const uint32_t DyldSharedCache::getLocalNlistCount() const
350 {
351 // check for cache without local symbols info
352 if (!this->hasLocalSymbolsInfo())
353 return 0;
354 const auto localInfo = (dyld_cache_local_symbols_info*)((uint8_t*)this + header.localSymbolsOffset);
355 return localInfo->nlistCount;
356 }
357
358 const char* DyldSharedCache::getLocalStrings() const
359 {
360 // check for cache without local symbols info
361 if (!this->hasLocalSymbolsInfo())
362 return nullptr;
363 const auto localInfo = (dyld_cache_local_symbols_info*)((uint8_t*)this + header.localSymbolsOffset);
364 return (char*)localInfo + localInfo->stringsOffset;
365 }
366
367 const uint32_t DyldSharedCache::getLocalStringsSize() const
368 {
369 // check for cache without local symbols info
370 if (!this->hasLocalSymbolsInfo())
371 return 0;
372 const auto localInfo = (dyld_cache_local_symbols_info*)((uint8_t*)this + header.localSymbolsOffset);
373 return localInfo->stringsSize;
374 }
375
376 void DyldSharedCache::forEachLocalSymbolEntry(void (^handler)(uint32_t dylibOffset, uint32_t nlistStartIndex, uint32_t nlistCount, bool& stop)) const
377 {
378 // check for cache without local symbols info
379 if (!this->hasLocalSymbolsInfo())
380 return;
381 const auto localInfo = (dyld_cache_local_symbols_info*)((uint8_t*)this + header.localSymbolsOffset);
382 const auto localEntries = (dyld_cache_local_symbols_entry*)((uint8_t*)localInfo + localInfo->entriesOffset);
383 bool stop = false;
384 for (uint32_t i = 0; i < localInfo->entriesCount; i++) {
385 dyld_cache_local_symbols_entry localEntry = localEntries[i];
386 handler(localEntry.dylibOffset, localEntry.nlistStartIndex, localEntry.nlistCount, stop);
387 }
388 }
389
390
391 const mach_header* DyldSharedCache::getIndexedImageEntry(uint32_t index, uint64_t& mTime, uint64_t& inode) const
392 {
393 const dyld_cache_image_info* dylibs = (dyld_cache_image_info*)((char*)this + header.imagesOffset);
394 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
395 mTime = dylibs[index].modTime;
396 inode = dylibs[index].inode;
397 return (mach_header*)((uint8_t*)this + dylibs[index].address - mappings[0].address);
398 }
399
400
401 const char* DyldSharedCache::getIndexedImagePath(uint32_t index) const
402 {
403 auto dylibs = (const dyld_cache_image_info*)((char*)this + header.imagesOffset);
404 return (char*)this + dylibs[index].pathFileOffset;
405 }
406
407 void DyldSharedCache::forEachImageTextSegment(void (^handler)(uint64_t loadAddressUnslid, uint64_t textSegmentSize, const uuid_t dylibUUID, const char* installName, bool& stop)) const
408 {
409 // check for old cache without imagesText array
410 if ( (header.mappingOffset <= __offsetof(dyld_cache_header, imagesTextOffset)) || (header.imagesTextCount == 0) )
411 return;
412
413 // walk imageText table and call callback for each entry
414 const dyld_cache_image_text_info* imagesText = (dyld_cache_image_text_info*)((char*)this + header.imagesTextOffset);
415 const dyld_cache_image_text_info* imagesTextEnd = &imagesText[header.imagesTextCount];
416 bool stop = false;
417 for (const dyld_cache_image_text_info* p=imagesText; p < imagesTextEnd && !stop; ++p) {
418 handler(p->loadAddress, p->textSegmentSize, p->uuid, (char*)this + p->pathOffset, stop);
419 }
420 }
421
422 bool DyldSharedCache::addressInText(uint32_t cacheOffset, uint32_t* imageIndex) const
423 {
424 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
425 if ( cacheOffset > mappings[0].size )
426 return false;
427 uint64_t targetAddr = mappings[0].address + cacheOffset;
428 // walk imageText table and call callback for each entry
429 const dyld_cache_image_text_info* imagesText = (dyld_cache_image_text_info*)((char*)this + header.imagesTextOffset);
430 const dyld_cache_image_text_info* imagesTextEnd = &imagesText[header.imagesTextCount];
431 for (const dyld_cache_image_text_info* p=imagesText; p < imagesTextEnd; ++p) {
432 if ( (p->loadAddress <= targetAddr) && (targetAddr < p->loadAddress+p->textSegmentSize) ) {
433 *imageIndex = (uint32_t)(p-imagesText);
434 return true;
435 }
436 }
437 return false;
438 }
439
440 const char* DyldSharedCache::archName() const
441 {
442 const char* archSubString = ((char*)this) + 7;
443 while (*archSubString == ' ')
444 ++archSubString;
445 return archSubString;
446 }
447
448
449 dyld3::Platform DyldSharedCache::platform() const
450 {
451 return (dyld3::Platform)header.platform;
452 }
453
454 #if BUILDING_CACHE_BUILDER
455 std::string DyldSharedCache::mapFile() const
456 {
457 __block std::string result;
458 __block std::vector<uint64_t> regionStartAddresses;
459 __block std::vector<uint64_t> regionSizes;
460 __block std::vector<uint64_t> regionFileOffsets;
461
462 result.reserve(256*1024);
463 forEachRegion(^(const void* content, uint64_t vmAddr, uint64_t size, uint32_t permissions,
464 uint64_t flags) {
465 regionStartAddresses.push_back(vmAddr);
466 regionSizes.push_back(size);
467 regionFileOffsets.push_back((uint8_t*)content - (uint8_t*)this);
468 char lineBuffer[256];
469 const char* prot = "RW";
470 if ( permissions == (VM_PROT_EXECUTE|VM_PROT_READ) )
471 prot = "EX";
472 else if ( permissions == VM_PROT_READ )
473 prot = "RO";
474 if ( size > 1024*1024 )
475 sprintf(lineBuffer, "mapping %s %4lluMB 0x%0llX -> 0x%0llX\n", prot, size/(1024*1024), vmAddr, vmAddr+size);
476 else
477 sprintf(lineBuffer, "mapping %s %4lluKB 0x%0llX -> 0x%0llX\n", prot, size/1024, vmAddr, vmAddr+size);
478 result += lineBuffer;
479 });
480
481 // TODO: add linkedit breakdown
482 result += "\n\n";
483
484 forEachImage(^(const mach_header* mh, const char* installName) {
485 result += std::string(installName) + "\n";
486 const dyld3::MachOFile* mf = (dyld3::MachOFile*)mh;
487 mf->forEachSegment(^(const dyld3::MachOFile::SegmentInfo& info, bool& stop) {
488 char lineBuffer[256];
489 sprintf(lineBuffer, "\t%16s 0x%08llX -> 0x%08llX\n", info.segName, info.vmAddr, info.vmAddr+info.vmSize);
490 result += lineBuffer;
491 });
492 result += "\n";
493 });
494
495 return result;
496 }
497 #endif
498
499
500 uint64_t DyldSharedCache::unslidLoadAddress() const
501 {
502 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
503 return mappings[0].address;
504 }
505
506 void DyldSharedCache::getUUID(uuid_t uuid) const
507 {
508 memcpy(uuid, header.uuid, sizeof(uuid_t));
509 }
510
511 uint64_t DyldSharedCache::mappedSize() const
512 {
513 __block uint64_t startAddr = 0;
514 __block uint64_t endAddr = 0;
515 forEachRegion(^(const void* content, uint64_t vmAddr, uint64_t size, uint32_t permissions,
516 uint64_t flags) {
517 if ( startAddr == 0 )
518 startAddr = vmAddr;
519 uint64_t end = vmAddr+size;
520 if ( end > endAddr )
521 endAddr = end;
522 });
523 return (endAddr - startAddr);
524 }
525
526 bool DyldSharedCache::findMachHeaderImageIndex(const mach_header* mh, uint32_t& imageIndex) const
527 {
528 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
529 uintptr_t slide = (uintptr_t)this - (uintptr_t)(mappings[0].address);
530 uint64_t unslidMh = (uintptr_t)mh - slide;
531 const dyld_cache_image_info* dylibs = (dyld_cache_image_info*)((char*)this + header.imagesOffset);
532 for (uint32_t i=0; i < header.imagesCount; ++i) {
533 if ( dylibs[i].address == unslidMh ) {
534 imageIndex = i;
535 return true;
536 }
537 }
538 return false;
539 }
540
541 bool DyldSharedCache::hasImagePath(const char* dylibPath, uint32_t& imageIndex) const
542 {
543 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
544 if ( mappings[0].fileOffset != 0 )
545 return false;
546 if ( header.mappingOffset >= 0x118 ) {
547 uintptr_t slide = (uintptr_t)this - (uintptr_t)(mappings[0].address);
548 const uint8_t* dylibTrieStart = (uint8_t*)(this->header.dylibsTrieAddr + slide);
549 const uint8_t* dylibTrieEnd = dylibTrieStart + this->header.dylibsTrieSize;
550
551 Diagnostics diag;
552 const uint8_t* imageNode = dyld3::MachOLoaded::trieWalk(diag, dylibTrieStart, dylibTrieEnd, dylibPath);
553 if ( imageNode != NULL ) {
554 imageIndex = (uint32_t)dyld3::MachOFile::read_uleb128(diag, imageNode, dylibTrieEnd);
555 return true;
556 }
557 }
558 else {
559 const dyld_cache_image_info* dylibs = (dyld_cache_image_info*)((char*)this + header.imagesOffset);
560 uint64_t firstImageOffset = 0;
561 uint64_t firstRegionAddress = mappings[0].address;
562 for (uint32_t i=0; i < header.imagesCount; ++i) {
563 const char* aPath = (char*)this + dylibs[i].pathFileOffset;
564 if ( strcmp(aPath, dylibPath) == 0 ) {
565 imageIndex = i;
566 return true;
567 }
568 uint64_t offset = dylibs[i].address - firstRegionAddress;
569 if ( firstImageOffset == 0 )
570 firstImageOffset = offset;
571 // skip over aliases
572 if ( dylibs[i].pathFileOffset < firstImageOffset)
573 continue;
574 }
575 }
576
577 return false;
578 }
579
580 bool DyldSharedCache::isOverridablePath(const char* dylibPath) const
581 {
582 // all dylibs in customer dyld cache cannot be overridden except libdispatch.dylib
583 if ( header.cacheType == kDyldSharedCacheTypeProduction ) {
584 return (strcmp(dylibPath, "/usr/lib/system/libdispatch.dylib") == 0);
585 }
586 // in dev caches we can override all paths
587 return true;
588 }
589
590 bool DyldSharedCache::hasNonOverridablePath(const char* dylibPath) const
591 {
592 // all dylibs in customer dyld cache cannot be overridden except libdispatch.dylib
593 bool pathIsInDyldCacheWhichCannotBeOverridden = false;
594 if ( header.cacheType == kDyldSharedCacheTypeProduction ) {
595 uint32_t imageIndex;
596 pathIsInDyldCacheWhichCannotBeOverridden = this->hasImagePath(dylibPath, imageIndex);
597 if ( pathIsInDyldCacheWhichCannotBeOverridden && isOverridablePath(dylibPath) )
598 pathIsInDyldCacheWhichCannotBeOverridden = false;
599 }
600 return pathIsInDyldCacheWhichCannotBeOverridden;
601 }
602
603 #if !BUILDING_LIBDSC
604 const dyld3::closure::Image* DyldSharedCache::findDlopenOtherImage(const char* path) const
605 {
606 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
607 if ( mappings[0].fileOffset != 0 )
608 return nullptr;
609 if ( header.mappingOffset < __offsetof(dyld_cache_header, otherImageArrayAddr) )
610 return nullptr;
611 if ( header.otherImageArrayAddr == 0 )
612 return nullptr;
613 uintptr_t slide = (uintptr_t)this - (uintptr_t)(mappings[0].address);
614 const uint8_t* dylibTrieStart = (uint8_t*)(this->header.otherTrieAddr + slide);
615 const uint8_t* dylibTrieEnd = dylibTrieStart + this->header.otherTrieSize;
616
617 Diagnostics diag;
618 const uint8_t* imageNode = dyld3::MachOLoaded::trieWalk(diag, dylibTrieStart, dylibTrieEnd, path);
619 if ( imageNode != NULL ) {
620 dyld3::closure::ImageNum imageNum = (uint32_t)dyld3::MachOFile::read_uleb128(diag, imageNode, dylibTrieEnd);
621 uint64_t arrayAddrOffset = header.otherImageArrayAddr - mappings[0].address;
622 const dyld3::closure::ImageArray* otherImageArray = (dyld3::closure::ImageArray*)((char*)this + arrayAddrOffset);
623 return otherImageArray->imageForNum(imageNum);
624 }
625
626 return nullptr;
627 }
628
629 const dyld3::closure::LaunchClosure* DyldSharedCache::findClosure(const char* executablePath) const
630 {
631 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
632 uintptr_t slide = (uintptr_t)this - (uintptr_t)(mappings[0].address);
633 const uint8_t* executableTrieStart = (uint8_t*)(this->header.progClosuresTrieAddr + slide);
634 const uint8_t* executableTrieEnd = executableTrieStart + this->header.progClosuresTrieSize;
635 const uint8_t* closuresStart = (uint8_t*)(this->header.progClosuresAddr + slide);
636
637 Diagnostics diag;
638 const uint8_t* imageNode = dyld3::MachOLoaded::trieWalk(diag, executableTrieStart, executableTrieEnd, executablePath);
639 if ( (imageNode == NULL) && (strncmp(executablePath, "/System/", 8) == 0) ) {
640 // anything in /System/ should have a closure. Perhaps it was launched via symlink path
641 char realPath[PATH_MAX];
642 if ( realpath(executablePath, realPath) != NULL )
643 imageNode = dyld3::MachOLoaded::trieWalk(diag, executableTrieStart, executableTrieEnd, realPath);
644 }
645 if ( imageNode != NULL ) {
646 uint32_t closureOffset = (uint32_t)dyld3::MachOFile::read_uleb128(diag, imageNode, executableTrieEnd);
647 if ( closureOffset < this->header.progClosuresSize )
648 return (dyld3::closure::LaunchClosure*)((uint8_t*)closuresStart + closureOffset);
649 }
650
651 return nullptr;
652 }
653
654 #if !BUILDING_LIBDYLD && !BUILDING_DYLD
655 void DyldSharedCache::forEachLaunchClosure(void (^handler)(const char* executableRuntimePath, const dyld3::closure::LaunchClosure* closure)) const
656 {
657 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
658 uintptr_t slide = (uintptr_t)this - (uintptr_t)(mappings[0].address);
659 const uint8_t* executableTrieStart = (uint8_t*)(this->header.progClosuresTrieAddr + slide);
660 const uint8_t* executableTrieEnd = executableTrieStart + this->header.progClosuresTrieSize;
661 const uint8_t* closuresStart = (uint8_t*)(this->header.progClosuresAddr + slide);
662
663 std::vector<DylibIndexTrie::Entry> closureEntries;
664 if ( Trie<DylibIndex>::parseTrie(executableTrieStart, executableTrieEnd, closureEntries) ) {
665 for (DylibIndexTrie::Entry& entry : closureEntries ) {
666 uint32_t offset = entry.info.index;
667 if ( offset < this->header.progClosuresSize )
668 handler(entry.name.c_str(), (const dyld3::closure::LaunchClosure*)(closuresStart+offset));
669 }
670 }
671 }
672
673 void DyldSharedCache::forEachDlopenImage(void (^handler)(const char* runtimePath, const dyld3::closure::Image* image)) const
674 {
675 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
676 uintptr_t slide = (uintptr_t)this - (uintptr_t)(mappings[0].address);
677 const uint8_t* otherTrieStart = (uint8_t*)(this->header.otherTrieAddr + slide);
678 const uint8_t* otherTrieEnd = otherTrieStart + this->header.otherTrieSize;
679
680 std::vector<DylibIndexTrie::Entry> otherEntries;
681 if ( Trie<DylibIndex>::parseTrie(otherTrieStart, otherTrieEnd, otherEntries) ) {
682 for (const DylibIndexTrie::Entry& entry : otherEntries ) {
683 dyld3::closure::ImageNum imageNum = entry.info.index;
684 uint64_t arrayAddrOffset = header.otherImageArrayAddr - mappings[0].address;
685 const dyld3::closure::ImageArray* otherImageArray = (dyld3::closure::ImageArray*)((char*)this + arrayAddrOffset);
686 handler(entry.name.c_str(), otherImageArray->imageForNum(imageNum));
687 }
688 }
689 }
690
691 void DyldSharedCache::forEachDylibPath(void (^handler)(const char* dylibPath, uint32_t index)) const
692 {
693 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
694 uintptr_t slide = (uintptr_t)this - (uintptr_t)(mappings[0].address);
695 const uint8_t* dylibTrieStart = (uint8_t*)(this->header.dylibsTrieAddr + slide);
696 const uint8_t* dylibTrieEnd = dylibTrieStart + this->header.dylibsTrieSize;
697
698 std::vector<DylibIndexTrie::Entry> dylibEntries;
699 if ( Trie<DylibIndex>::parseTrie(dylibTrieStart, dylibTrieEnd, dylibEntries) ) {
700 for (DylibIndexTrie::Entry& entry : dylibEntries ) {
701 handler(entry.name.c_str(), entry.info.index);
702 }
703 }
704 }
705 #endif // !BUILDING_LIBDYLD && !BUILDING_DYLD
706 #endif // !BUILDING_LIBDSC
707
708 const dyld3::closure::ImageArray* DyldSharedCache::cachedDylibsImageArray() const
709 {
710 // check for old cache without imagesArray
711 if ( header.mappingOffset < 0x100 )
712 return nullptr;
713
714 if ( header.dylibsImageArrayAddr == 0 )
715 return nullptr;
716
717 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
718 uint64_t arrayAddrOffset = header.dylibsImageArrayAddr - mappings[0].address;
719 return (dyld3::closure::ImageArray*)((char*)this + arrayAddrOffset);
720 }
721
722 const dyld3::closure::ImageArray* DyldSharedCache::otherOSImageArray() const
723 {
724 // check for old cache without imagesArray
725 if ( header.mappingOffset < __offsetof(dyld_cache_header, otherImageArrayAddr) )
726 return nullptr;
727
728 if ( header.otherImageArrayAddr == 0 )
729 return nullptr;
730
731 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
732 uint64_t arrayAddrOffset = header.otherImageArrayAddr - mappings[0].address;
733 return (dyld3::closure::ImageArray*)((char*)this + arrayAddrOffset);
734 }
735
736
737 uint32_t DyldSharedCache::patchableExportCount(uint32_t imageIndex) const {
738 if ( header.patchInfoAddr == 0 )
739 return 0;
740
741 const dyld_cache_patch_info* patchInfo = getAddrField<dyld_cache_patch_info*>(header.patchInfoAddr);
742 const dyld_cache_image_patches* patchArray = getAddrField<dyld_cache_image_patches*>(patchInfo->patchTableArrayAddr);
743 if (imageIndex > patchInfo->patchTableArrayCount)
744 return 0;
745 return patchArray[imageIndex].patchExportsCount;
746 }
747
748 void DyldSharedCache::forEachPatchableExport(uint32_t imageIndex, void (^handler)(uint32_t cacheOffsetOfImpl, const char* exportName)) const {
749 if ( header.patchInfoAddr == 0 )
750 return;
751
752 const dyld_cache_patch_info* patchInfo = getAddrField<dyld_cache_patch_info*>(header.patchInfoAddr);
753 const dyld_cache_image_patches* patchArray = getAddrField<dyld_cache_image_patches*>(patchInfo->patchTableArrayAddr);
754 if (imageIndex > patchInfo->patchTableArrayCount)
755 return;
756 const dyld_cache_image_patches& patch = patchArray[imageIndex];
757 if ( (patch.patchExportsStartIndex + patch.patchExportsCount) > patchInfo->patchExportArrayCount )
758 return;
759 const dyld_cache_patchable_export* patchExports = getAddrField<dyld_cache_patchable_export*>(patchInfo->patchExportArrayAddr);
760 const char* exportNames = getAddrField<char*>(patchInfo->patchExportNamesAddr);
761 for (uint64_t exportIndex = 0; exportIndex != patch.patchExportsCount; ++exportIndex) {
762 const dyld_cache_patchable_export& patchExport = patchExports[patch.patchExportsStartIndex + exportIndex];
763 const char* exportName = ( patchExport.exportNameOffset < patchInfo->patchExportNamesSize ) ? &exportNames[patchExport.exportNameOffset] : "";
764 handler(patchExport.cacheOffsetOfImpl, exportName);
765 }
766 }
767
768 void DyldSharedCache::forEachPatchableUseOfExport(uint32_t imageIndex, uint32_t cacheOffsetOfImpl,
769 void (^handler)(dyld_cache_patchable_location patchLocation)) const {
770 if ( header.patchInfoAddr == 0 )
771 return;
772
773 // Loading a new cache so get the data from the cache header
774 const dyld_cache_patch_info* patchInfo = getAddrField<dyld_cache_patch_info*>(header.patchInfoAddr);
775 const dyld_cache_image_patches* patchArray = getAddrField<dyld_cache_image_patches*>(patchInfo->patchTableArrayAddr);
776 if (imageIndex > patchInfo->patchTableArrayCount)
777 return;
778 const dyld_cache_image_patches& patch = patchArray[imageIndex];
779 if ( (patch.patchExportsStartIndex + patch.patchExportsCount) > patchInfo->patchExportArrayCount )
780 return;
781 const dyld_cache_patchable_export* patchExports = getAddrField<dyld_cache_patchable_export*>(patchInfo->patchExportArrayAddr);
782 const dyld_cache_patchable_location* patchLocations = getAddrField<dyld_cache_patchable_location*>(patchInfo->patchLocationArrayAddr);
783 for (uint64_t exportIndex = 0; exportIndex != patch.patchExportsCount; ++exportIndex) {
784 const dyld_cache_patchable_export& patchExport = patchExports[patch.patchExportsStartIndex + exportIndex];
785 if ( patchExport.cacheOffsetOfImpl != cacheOffsetOfImpl )
786 continue;
787 if ( (patchExport.patchLocationsStartIndex + patchExport.patchLocationsCount) > patchInfo->patchLocationArrayCount )
788 return;
789 for (uint64_t locationIndex = 0; locationIndex != patchExport.patchLocationsCount; ++locationIndex) {
790 const dyld_cache_patchable_location& patchLocation = patchLocations[patchExport.patchLocationsStartIndex + locationIndex];
791 handler(patchLocation);
792 }
793 }
794 }
795
796 #if !(BUILDING_LIBDYLD || BUILDING_DYLD)
797 // MRM map file generator
798 std::string DyldSharedCache::generateJSONMap(const char* disposition) const {
799 dyld3::json::Node cacheNode;
800
801 cacheNode.map["version"].value = "1";
802 cacheNode.map["disposition"].value = disposition;
803 cacheNode.map["base-address"].value = dyld3::json::hex(unslidLoadAddress());
804 uuid_t cache_uuid;
805 getUUID(cache_uuid);
806 uuid_string_t cache_uuidStr;
807 uuid_unparse(cache_uuid, cache_uuidStr);
808 cacheNode.map["uuid"].value = cache_uuidStr;
809
810 __block dyld3::json::Node imagesNode;
811 forEachImage(^(const mach_header *mh, const char *installName) {
812 dyld3::json::Node imageNode;
813 imageNode.map["path"].value = installName;
814 dyld3::MachOAnalyzer* ma = (dyld3::MachOAnalyzer*)mh;
815 uuid_t uuid;
816 if (ma->getUuid(uuid)) {
817 uuid_string_t uuidStr;
818 uuid_unparse(uuid, uuidStr);
819 imageNode.map["uuid"].value = uuidStr;
820 }
821
822 __block dyld3::json::Node segmentsNode;
823 ma->forEachSegment(^(const dyld3::MachOAnalyzer::SegmentInfo &info, bool &stop) {
824 dyld3::json::Node segmentNode;
825 segmentNode.map["name"].value = info.segName;
826 segmentNode.map["start-vmaddr"].value = dyld3::json::hex(info.vmAddr);
827 segmentNode.map["end-vmaddr"].value = dyld3::json::hex(info.vmAddr + info.vmSize);
828 segmentsNode.array.push_back(segmentNode);
829 });
830 imageNode.map["segments"] = segmentsNode;
831 imagesNode.array.push_back(imageNode);
832 });
833
834 cacheNode.map["images"] = imagesNode;
835
836 std::stringstream stream;
837 printJSON(cacheNode, 0, stream);
838
839 return stream.str();
840 }
841
842 std::string DyldSharedCache::generateJSONDependents() const {
843 std::unordered_map<std::string, std::set<std::string>> dependents;
844 computeTransitiveDependents(dependents);
845
846 std::stringstream stream;
847
848 stream << "{";
849 bool first = true;
850 for (auto p : dependents) {
851 if (!first) stream << "," << std::endl;
852 first = false;
853
854 stream << "\"" << p.first << "\" : [" << std::endl;
855 bool firstDependent = true;
856 for (const std::string & dependent : p.second) {
857 if (!firstDependent) stream << "," << std::endl;
858 firstDependent = false;
859 stream << " \"" << dependent << "\"";
860 }
861 stream << "]" << std::endl;
862 }
863 stream << "}" << std::endl;
864 return stream.str();
865 }
866
867 #endif
868
869 #if !(BUILDING_LIBDYLD || BUILDING_DYLD)
870 dyld3::MachOAnalyzer::VMAddrConverter DyldSharedCache::makeVMAddrConverter(bool contentRebased) const {
871 typedef dyld3::MachOAnalyzer::VMAddrConverter VMAddrConverter;
872
873 __block VMAddrConverter::SharedCacheFormat pointerFormat = VMAddrConverter::SharedCacheFormat::none;
874 __block uint64_t pointerValueAdd = 0;;
875 forEachSlideInfo(^(uint64_t mappingStartAddress, uint64_t mappingSize, const uint8_t *mappingPagesStart, uint64_t slideInfoOffset, uint64_t slideInfoSize, const dyld_cache_slide_info *slideInfoHeader) {
876 assert(slideInfoHeader->version >= 2);
877 if ( slideInfoHeader->version == 2 ) {
878 const dyld_cache_slide_info2* slideInfo = (dyld_cache_slide_info2*)(slideInfoHeader);
879 assert(slideInfo->delta_mask == 0x00FFFF0000000000);
880 pointerFormat = VMAddrConverter::SharedCacheFormat::v2_x86_64_tbi;
881 pointerValueAdd = slideInfo->value_add;
882 } else if ( slideInfoHeader->version == 3 ) {
883 pointerFormat = VMAddrConverter::SharedCacheFormat::v3;
884 pointerValueAdd = unslidLoadAddress();
885 } else {
886 assert(false);
887 }
888 });
889
890 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
891 uintptr_t slide = (uintptr_t)this - (uintptr_t)(mappings[0].address);
892
893 VMAddrConverter vmAddrConverter;
894 vmAddrConverter.preferredLoadAddress = pointerValueAdd;
895 vmAddrConverter.slide = slide;
896 vmAddrConverter.chainedPointerFormat = 0;
897 vmAddrConverter.sharedCacheChainedPointerFormat = pointerFormat;
898 vmAddrConverter.contentRebased = contentRebased;
899
900 return vmAddrConverter;
901 }
902 #endif
903
904 const dyld_cache_slide_info* DyldSharedCache::legacyCacheSlideInfo() const
905 {
906 assert(header.mappingOffset <= __offsetof(dyld_cache_header, mappingWithSlideOffset));
907 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
908 uintptr_t slide = (uintptr_t)this - (uintptr_t)(mappings[0].address);
909
910 uint64_t offsetInLinkEditRegion = (header.slideInfoOffsetUnused - mappings[2].fileOffset);
911 return (dyld_cache_slide_info*)((uint8_t*)(mappings[2].address) + slide + offsetInLinkEditRegion);
912 }
913
914 const dyld_cache_mapping_info* DyldSharedCache::legacyCacheDataRegionMapping() const
915 {
916 assert(header.mappingOffset <= __offsetof(dyld_cache_header, mappingWithSlideOffset));
917 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
918 return &mappings[1];
919 }
920
921 const uint8_t* DyldSharedCache::legacyCacheDataRegionBuffer() const
922 {
923 assert(header.mappingOffset <= __offsetof(dyld_cache_header, mappingWithSlideOffset));
924 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
925 uintptr_t slide = (uintptr_t)this - (uintptr_t)(mappings[0].address);
926
927 return (uint8_t*)(legacyCacheDataRegionMapping()->address) + slide;
928 }
929
930 #if !BUILDING_LIBDSC
931 const objc_opt::objc_opt_t* DyldSharedCache::objcOpt() const {
932 // Find the objc image
933 const dyld3::MachOAnalyzer* objcMA = nullptr;
934
935 uint32_t imageIndex;
936 if ( hasImagePath("/usr/lib/libobjc.A.dylib", imageIndex) ) {
937 const dyld3::closure::ImageArray* images = cachedDylibsImageArray();
938 const dyld3::closure::Image* image = images->imageForNum(imageIndex+1);
939 objcMA = (const dyld3::MachOAnalyzer*)((uintptr_t)this + image->cacheOffset());
940 } else {
941 return nullptr;
942 }
943
944 // If we found the objc image, then try to find the read-only data inside.
945 __block const uint8_t* objcROContent = nullptr;
946 int64_t slide = objcMA->getSlide();
947 objcMA->forEachSection(^(const dyld3::MachOAnalyzer::SectionInfo& info, bool malformedSectionRange, bool& stop) {
948 if (strcmp(info.segInfo.segName, "__TEXT") != 0)
949 return;
950 if (strcmp(info.sectName, "__objc_opt_ro") != 0)
951 return;
952 if ( malformedSectionRange ) {
953 stop = true;
954 return;
955 }
956 objcROContent = (uint8_t*)(info.sectAddr + slide);
957 });
958
959 if (objcROContent == nullptr)
960 return nullptr;
961
962 const objc_opt::objc_opt_t* optObjCHeader = (const objc_opt::objc_opt_t*)objcROContent;
963 return optObjCHeader->version == objc_opt::VERSION ? optObjCHeader : nullptr;
964 }
965
966 const void* DyldSharedCache::objcOptPtrs() const {
967 // Find the objc image
968 const dyld3::MachOAnalyzer* objcMA = nullptr;
969
970 uint32_t imageIndex;
971 if ( hasImagePath("/usr/lib/libobjc.A.dylib", imageIndex) ) {
972 const dyld3::closure::ImageArray* images = cachedDylibsImageArray();
973 const dyld3::closure::Image* image = images->imageForNum(imageIndex+1);
974 objcMA = (const dyld3::MachOAnalyzer*)((uintptr_t)this + image->cacheOffset());
975 } else {
976 return nullptr;
977 }
978
979 // If we found the objc image, then try to find the read-only data inside.
980 __block const void* objcPointersContent = nullptr;
981 int64_t slide = objcMA->getSlide();
982 uint32_t pointerSize = objcMA->pointerSize();
983 objcMA->forEachSection(^(const dyld3::MachOAnalyzer::SectionInfo& info, bool malformedSectionRange, bool& stop) {
984 if ( (strncmp(info.segInfo.segName, "__DATA", 6) != 0) && (strncmp(info.segInfo.segName, "__AUTH", 6) != 0) )
985 return;
986 if (strcmp(info.sectName, "__objc_opt_ptrs") != 0)
987 return;
988 if ( info.sectSize != pointerSize ) {
989 stop = true;
990 return;
991 }
992 if ( malformedSectionRange ) {
993 stop = true;
994 return;
995 }
996 objcPointersContent = (uint8_t*)(info.sectAddr + slide);
997 });
998
999 return objcPointersContent;
1000 }
1001 #endif
1002
1003 std::pair<const void*, uint64_t> DyldSharedCache::getObjCConstantRange() const {
1004 const dyld3::MachOAnalyzer* libDyldMA = nullptr;
1005 uint32_t imageIndex;
1006 if ( hasImagePath("/usr/lib/system/libdyld.dylib", imageIndex) ) {
1007 const dyld3::closure::ImageArray* images = cachedDylibsImageArray();
1008 const dyld3::closure::Image* image = images->imageForNum(imageIndex+1);
1009 libDyldMA = (const dyld3::MachOAnalyzer*)((uintptr_t)this + image->cacheOffset());
1010
1011 std::pair<const void*, uint64_t> ranges = { nullptr, 0 };
1012 #if TARGET_OS_OSX
1013 ranges.first = libDyldMA->findSectionContent("__DATA", "__objc_ranges", ranges.second);
1014 #else
1015 ranges.first = libDyldMA->findSectionContent("__DATA_CONST", "__objc_ranges", ranges.second);
1016 #endif
1017 return ranges;
1018 }
1019
1020 return { nullptr, 0 };
1021 }
1022
1023 bool DyldSharedCache::hasSlideInfo() const {
1024 if ( header.mappingOffset <= __offsetof(dyld_cache_header, mappingWithSlideOffset) ) {
1025 return header.slideInfoSizeUnused != 0;
1026 } else {
1027 const dyld_cache_mapping_and_slide_info* slidableMappings = (const dyld_cache_mapping_and_slide_info*)((char*)this + header.mappingWithSlideOffset);
1028 for (uint32_t i = 0; i != header.mappingWithSlideCount; ++i) {
1029 if ( slidableMappings[i].slideInfoFileSize != 0 ) {
1030 return true;
1031 }
1032 }
1033 }
1034 return false;
1035 }
1036
1037 void DyldSharedCache::forEachSlideInfo(void (^handler)(uint64_t mappingStartAddress, uint64_t mappingSize,
1038 const uint8_t* mappingPagesStart,
1039 uint64_t slideInfoOffset, uint64_t slideInfoSize,
1040 const dyld_cache_slide_info* slideInfoHeader)) const {
1041 if ( header.mappingOffset <= __offsetof(dyld_cache_header, mappingWithSlideOffset) ) {
1042 // Old caches should get the slide info from the cache header and assume a single data region.
1043 const dyld_cache_mapping_info* dataMapping = legacyCacheDataRegionMapping();
1044 uint64_t dataStartAddress = dataMapping->address;
1045 uint64_t dataSize = dataMapping->size;
1046 const uint8_t* dataPagesStart = legacyCacheDataRegionBuffer();
1047 const dyld_cache_slide_info* slideInfoHeader = legacyCacheSlideInfo();
1048
1049 handler(dataStartAddress, dataSize, dataPagesStart,
1050 header.slideInfoOffsetUnused, header.slideInfoSizeUnused, slideInfoHeader);
1051 } else {
1052 const dyld_cache_mapping_and_slide_info* slidableMappings = (const dyld_cache_mapping_and_slide_info*)((char*)this + header.mappingWithSlideOffset);
1053 const dyld_cache_mapping_and_slide_info* linkeditMapping = &slidableMappings[header.mappingWithSlideCount - 1];
1054 uint64_t sharedCacheSlide = (uint64_t)this - unslidLoadAddress();
1055
1056 for (uint32_t i = 0; i != header.mappingWithSlideCount; ++i) {
1057 if ( slidableMappings[i].slideInfoFileOffset != 0 ) {
1058 // Get the data pages
1059 uint64_t dataStartAddress = slidableMappings[i].address;
1060 uint64_t dataSize = slidableMappings[i].size;
1061 const uint8_t* dataPagesStart = (uint8_t*)dataStartAddress + sharedCacheSlide;
1062
1063 // Get the slide info
1064 uint64_t offsetInLinkEditRegion = (slidableMappings[i].slideInfoFileOffset - linkeditMapping->fileOffset);
1065 const dyld_cache_slide_info* slideInfoHeader = (dyld_cache_slide_info*)((uint8_t*)(linkeditMapping->address) + sharedCacheSlide + offsetInLinkEditRegion);
1066 handler(dataStartAddress, dataSize, dataPagesStart,
1067 slidableMappings[i].slideInfoFileOffset, slidableMappings[i].slideInfoFileSize, slideInfoHeader);
1068 }
1069 }
1070 }
1071 }
1072
1073 #if BUILDING_LIBDYLD
1074 const char* DyldSharedCache::getCanonicalPath(const char *path) const {
1075 uint32_t dyldCacheImageIndex;
1076 if ( hasImagePath(path, dyldCacheImageIndex) )
1077 return getIndexedImagePath(dyldCacheImageIndex);
1078 #if TARGET_OS_OSX
1079 // on macOS support "Foo.framework/Foo" symlink
1080 char resolvedPath[PATH_MAX];
1081 realpath(path, resolvedPath);
1082 int realpathErrno = errno;
1083 // If realpath() resolves to a path which does not exist on disk, errno is set to ENOENT
1084 if ( (realpathErrno == ENOENT) || (realpathErrno == 0) ) {
1085 if ( hasImagePath(resolvedPath, dyldCacheImageIndex) )
1086 return getIndexedImagePath(dyldCacheImageIndex);
1087 }
1088 #endif
1089 return nullptr;
1090 }
1091 #endif
1092
1093 #if !(BUILDING_LIBDYLD || BUILDING_DYLD)
1094 void DyldSharedCache::fillMachOAnalyzersMap(std::unordered_map<std::string,dyld3::MachOAnalyzer*> & dylibAnalyzers) const {
1095 forEachImage(^(const mach_header *mh, const char *iteratedInstallName) {
1096 dylibAnalyzers[std::string(iteratedInstallName)] = (dyld3::MachOAnalyzer*)mh;
1097 });
1098 }
1099
1100 void DyldSharedCache::computeReverseDependencyMapForDylib(std::unordered_map<std::string, std::set<std::string>> &reverseDependencyMap, const std::unordered_map<std::string,dyld3::MachOAnalyzer*> & dylibAnalyzers, const std::string &loadPath) const {
1101 dyld3::MachOAnalyzer *ma = dylibAnalyzers.at(loadPath);
1102 if (reverseDependencyMap.find(loadPath) != reverseDependencyMap.end()) return;
1103 reverseDependencyMap[loadPath] = std::set<std::string>();
1104
1105 ma->forEachDependentDylib(^(const char *dependencyLoadPath, bool isWeak, bool isReExport, bool isUpward, uint32_t compatVersion, uint32_t curVersion, bool &stop) {
1106 if (isUpward) return;
1107 std::string dependencyLoadPathString = std::string(dependencyLoadPath);
1108 computeReverseDependencyMapForDylib(reverseDependencyMap, dylibAnalyzers, dependencyLoadPathString);
1109 reverseDependencyMap[dependencyLoadPathString].insert(loadPath);
1110 });
1111 }
1112
1113 // Walks the shared cache and construct the reverse dependency graph (if dylib A depends on B,
1114 // constructs the graph with B -> A edges)
1115 void DyldSharedCache::computeReverseDependencyMap(std::unordered_map<std::string, std::set<std::string>> &reverseDependencyMap) const {
1116 std::unordered_map<std::string,dyld3::MachOAnalyzer*> dylibAnalyzers;
1117
1118 fillMachOAnalyzersMap(dylibAnalyzers);
1119 forEachImage(^(const mach_header *mh, const char *installName) {
1120 computeReverseDependencyMapForDylib(reverseDependencyMap, dylibAnalyzers, std::string(installName));
1121 });
1122 }
1123
1124 // uses the reverse dependency graph constructed above to find the recursive set of dependents for each dylib
1125 void DyldSharedCache::findDependentsRecursively(std::unordered_map<std::string, std::set<std::string>> &transitiveDependents, const std::unordered_map<std::string, std::set<std::string>> &reverseDependencyMap, std::set<std::string> & visited, const std::string &loadPath) const {
1126
1127 if (transitiveDependents.find(loadPath) != transitiveDependents.end()) {
1128 return;
1129 }
1130
1131 if (visited.find(loadPath) != visited.end()) {
1132 return;
1133 }
1134
1135 visited.insert(loadPath);
1136
1137 std::set<std::string> dependents;
1138
1139 for (const std::string & dependent : reverseDependencyMap.at(loadPath)) {
1140 findDependentsRecursively(transitiveDependents, reverseDependencyMap, visited, dependent);
1141 if (transitiveDependents.find(dependent) != transitiveDependents.end()) {
1142 std::set<std::string> & theseTransitiveDependents = transitiveDependents.at(dependent);
1143 dependents.insert(theseTransitiveDependents.begin(), theseTransitiveDependents.end());
1144 }
1145 dependents.insert(dependent);
1146 }
1147
1148 transitiveDependents[loadPath] = dependents;
1149 }
1150
1151 // Fills a map from each install name N to the set of install names depending on N
1152 void DyldSharedCache::computeTransitiveDependents(std::unordered_map<std::string, std::set<std::string>> & transitiveDependents) const {
1153 std::unordered_map<std::string, std::set<std::string>> reverseDependencyMap;
1154 computeReverseDependencyMap(reverseDependencyMap);
1155 forEachImage(^(const mach_header *mh, const char *installName) {
1156 std::set<std::string> visited;
1157 findDependentsRecursively(transitiveDependents, reverseDependencyMap, visited, std::string(installName));
1158 });
1159 }
1160 #endif