dyld-851.27.tar.gz
[apple/dyld.git] / dyld3 / shared-cache / DyldSharedCache.cpp
1 /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*-
2 *
3 * Copyright (c) 2014 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26 #include <dirent.h>
27 #include <sys/errno.h>
28 #include <sys/fcntl.h>
29 #include <sys/param.h>
30 #include <mach/mach.h>
31 #include <mach-o/loader.h>
32 #include <mach-o/fat.h>
33 #include <mach-o/dyld_priv.h>
34 #include <assert.h>
35 #include <unistd.h>
36 #include <dlfcn.h>
37
38 #if BUILDING_CACHE_BUILDER
39 #include <set>
40 #include <string>
41 #include <vector>
42 #include <unordered_map>
43 #include <unordered_set>
44 #include "SharedCacheBuilder.h"
45 #include "FileUtils.h"
46 #endif
47
48 #define NO_ULEB
49 #include "MachOLoaded.h"
50 #include "ClosureFileSystemPhysical.h"
51 #include "DyldSharedCache.h"
52 #include "Trie.hpp"
53 #include "StringUtils.h"
54
55 #include "objc-shared-cache.h"
56
57 #if !(BUILDING_LIBDYLD || BUILDING_DYLD)
58 #include "JSONWriter.h"
59 #include <sstream>
60 #endif
61
62 #if (BUILDING_LIBDYLD || BUILDING_DYLD)
63 VIS_HIDDEN bool gEnableSharedCacheDataConst = false;
64 #endif
65
66
67 #if BUILDING_CACHE_BUILDER
68 DyldSharedCache::CreateResults DyldSharedCache::create(const CreateOptions& options,
69 const dyld3::closure::FileSystem& fileSystem,
70 const std::vector<MappedMachO>& dylibsToCache,
71 const std::vector<MappedMachO>& otherOsDylibs,
72 const std::vector<MappedMachO>& osExecutables)
73 {
74 CreateResults results;
75 SharedCacheBuilder cache(options, fileSystem);
76 if (!cache.errorMessage().empty()) {
77 results.errorMessage = cache.errorMessage();
78 return results;
79 }
80
81 std::vector<FileAlias> aliases;
82 switch ( options.platform ) {
83 case dyld3::Platform::iOS:
84 case dyld3::Platform::watchOS:
85 case dyld3::Platform::tvOS:
86 // FIXME: embedded cache builds should be getting aliases from manifest
87 aliases.push_back({"/System/Library/Frameworks/IOKit.framework/Versions/A/IOKit", "/System/Library/Frameworks/IOKit.framework/IOKit"});
88 aliases.push_back({"/usr/lib/libstdc++.6.dylib", "/usr/lib/libstdc++.dylib"});
89 aliases.push_back({"/usr/lib/libstdc++.6.dylib", "/usr/lib/libstdc++.6.0.9.dylib"});
90 aliases.push_back({"/usr/lib/libz.1.dylib", "/usr/lib/libz.dylib"});
91 aliases.push_back({"/usr/lib/libSystem.B.dylib", "/usr/lib/libSystem.dylib"});
92 aliases.push_back({"/System/Library/Frameworks/Foundation.framework/Foundation", "/usr/lib/libextension.dylib"}); // <rdar://44315703>
93 break;
94 default:
95 break;
96 }
97
98 cache.build(dylibsToCache, otherOsDylibs, osExecutables, aliases);
99
100 results.agileSignature = cache.agileSignature();
101 results.cdHashFirst = cache.cdHashFirst();
102 results.cdHashSecond = cache.cdHashSecond();
103 results.warnings = cache.warnings();
104 results.evictions = cache.evictions();
105 if ( cache.errorMessage().empty() ) {
106 if ( !options.outputFilePath.empty() ) {
107 // write cache file, if path non-empty
108 cache.writeFile(options.outputFilePath);
109 }
110 if ( !options.outputMapFilePath.empty() ) {
111 // write map file, if path non-empty
112 cache.writeMapFile(options.outputMapFilePath);
113 }
114 }
115 results.errorMessage = cache.errorMessage();
116 cache.deleteBuffer();
117 return results;
118 }
119
120 bool DyldSharedCache::verifySelfContained(std::vector<MappedMachO>& dylibsToCache,
121 std::unordered_set<std::string>& badZippered,
122 MappedMachO (^loader)(const std::string& runtimePath, Diagnostics& diag),
123 std::vector<std::pair<DyldSharedCache::MappedMachO, std::set<std::string>>>& rejected)
124 {
125 // build map of dylibs
126 __block std::map<std::string, std::set<std::string>> badDylibs;
127 __block std::set<std::string> knownDylibs;
128 for (const DyldSharedCache::MappedMachO& dylib : dylibsToCache) {
129 std::set<std::string> reasons;
130 if ( dylib.mh->canBePlacedInDyldCache(dylib.runtimePath.c_str(), ^(const char* msg) { badDylibs[dylib.runtimePath].insert(msg);}) ) {
131 knownDylibs.insert(dylib.runtimePath);
132 knownDylibs.insert(dylib.mh->installName());
133 } else {
134 badDylibs[dylib.runtimePath].insert("");
135 }
136 }
137
138 // check all dependencies to assure every dylib in cache only depends on other dylibs in cache
139 __block std::set<std::string> missingWeakDylibs;
140 __block bool doAgain = true;
141 while ( doAgain ) {
142 __block std::vector<DyldSharedCache::MappedMachO> foundMappings;
143 doAgain = false;
144 // scan dylib list making sure all dependents are in dylib list
145 for (const DyldSharedCache::MappedMachO& dylib : dylibsToCache) {
146 if ( badDylibs.count(dylib.runtimePath) != 0 )
147 continue;
148 dylib.mh->forEachDependentDylib(^(const char* loadPath, bool isWeak, bool isReExport, bool isUpward, uint32_t compatVersion, uint32_t curVersion, bool& stop) {
149 if ( isWeak && (missingWeakDylibs.count(loadPath) != 0) )
150 return;
151 if ( knownDylibs.count(loadPath) == 0 ) {
152 doAgain = true;
153 if ( badZippered.count(loadPath) != 0 ) {
154 badDylibs[dylib.runtimePath].insert("");
155 knownDylibs.erase(dylib.runtimePath);
156 knownDylibs.erase(dylib.mh->installName());
157 badZippered.insert(dylib.runtimePath);
158 badZippered.insert(dylib.mh->installName());
159 return;
160 }
161 Diagnostics diag;
162 MappedMachO foundMapping;
163 if ( badDylibs.count(loadPath) == 0 )
164 foundMapping = loader(loadPath, diag);
165 if ( foundMapping.length == 0 ) {
166 // We allow weakly linked dylibs to be missing only if they are not present on disk
167 // The shared cache doesn't contain enough information to patch them in later if they are
168 // found on disk, so we don't want to pull something in to cache and cut it off from a dylib it
169 // could have used.
170 if ( isWeak ) {
171 missingWeakDylibs.insert(loadPath);
172 return;
173 }
174
175 if (diag.hasError())
176 badDylibs[dylib.runtimePath].insert(diag.errorMessage());
177 else
178 badDylibs[dylib.runtimePath].insert(std::string("Could not find dependency '") + loadPath +"'");
179 knownDylibs.erase(dylib.runtimePath);
180 knownDylibs.erase(dylib.mh->installName());
181 }
182 else {
183 std::set<std::string> reasons;
184 if ( foundMapping.mh->canBePlacedInDyldCache(foundMapping.runtimePath.c_str(), ^(const char* msg) { badDylibs[foundMapping.runtimePath].insert(msg);})) {
185 // see if existing mapping was returned
186 bool alreadyInVector = false;
187 for (const MappedMachO& existing : dylibsToCache) {
188 if ( existing.mh == foundMapping.mh ) {
189 alreadyInVector = true;
190 break;
191 }
192 }
193 if ( !alreadyInVector )
194 foundMappings.push_back(foundMapping);
195 knownDylibs.insert(loadPath);
196 knownDylibs.insert(foundMapping.runtimePath);
197 knownDylibs.insert(foundMapping.mh->installName());
198 } else {
199 badDylibs[dylib.runtimePath].insert("");
200 }
201 }
202 }
203 });
204 }
205 dylibsToCache.insert(dylibsToCache.end(), foundMappings.begin(), foundMappings.end());
206 // remove bad dylibs
207 const auto badDylibsCopy = badDylibs;
208 dylibsToCache.erase(std::remove_if(dylibsToCache.begin(), dylibsToCache.end(), [&](const DyldSharedCache::MappedMachO& dylib) {
209 auto i = badDylibsCopy.find(dylib.runtimePath);
210 if ( i != badDylibsCopy.end()) {
211 // Only add the warning if we are not a bad zippered dylib
212 if ( badZippered.count(dylib.runtimePath) == 0 )
213 rejected.push_back(std::make_pair(dylib, i->second));
214 return true;
215 }
216 else {
217 return false;
218 }
219 }), dylibsToCache.end());
220 }
221
222 return badDylibs.empty();
223 }
224 #endif
225
226 template<typename T>
227 const T DyldSharedCache::getAddrField(uint64_t addr) const {
228 uint64_t slide = (uint64_t)this - unslidLoadAddress();
229 return (const T)(addr + slide);
230 }
231
232 uint64_t DyldSharedCache::getCodeSignAddress() const
233 {
234 auto mappings = (const dyld_cache_mapping_info*)((uint8_t*)this + header.mappingOffset);
235 return mappings[header.mappingCount-1].address + mappings[header.mappingCount-1].size;
236 }
237
238 void DyldSharedCache::forEachRegion(void (^handler)(const void* content, uint64_t vmAddr, uint64_t size,
239 uint32_t initProt, uint32_t maxProt, uint64_t flags)) const
240 {
241 // <rdar://problem/49875993> sanity check cache header
242 if ( strncmp(header.magic, "dyld_v1", 7) != 0 )
243 return;
244 if ( header.mappingOffset > 1024 )
245 return;
246 if ( header.mappingCount > 20 )
247 return;
248 if ( header.mappingOffset <= __offsetof(dyld_cache_header, mappingWithSlideOffset) ) {
249 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
250 const dyld_cache_mapping_info* mappingsEnd = &mappings[header.mappingCount];
251 for (const dyld_cache_mapping_info* m=mappings; m < mappingsEnd; ++m) {
252 handler((char*)this + m->fileOffset, m->address, m->size, m->initProt, m->maxProt, 0);
253 }
254 } else {
255 const dyld_cache_mapping_and_slide_info* mappings = (const dyld_cache_mapping_and_slide_info*)((char*)this + header.mappingWithSlideOffset);
256 const dyld_cache_mapping_and_slide_info* mappingsEnd = &mappings[header.mappingCount];
257 for (const dyld_cache_mapping_and_slide_info* m=mappings; m < mappingsEnd; ++m) {
258 handler((char*)this + m->fileOffset, m->address, m->size, m->initProt, m->maxProt, m->flags);
259 }
260 }
261 }
262
263 bool DyldSharedCache::inCache(const void* addr, size_t length, bool& readOnly) const
264 {
265 // quick out if before start of cache
266 if ( addr < this )
267 return false;
268
269 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
270 uintptr_t slide = (uintptr_t)this - (uintptr_t)(mappings[0].address);
271 uintptr_t unslidStart = (uintptr_t)addr - slide;
272
273 // quick out if after end of cache
274 const dyld_cache_mapping_info* lastMapping = &mappings[header.mappingCount - 1];
275 if ( unslidStart > (lastMapping->address + lastMapping->size) )
276 return false;
277
278 // walk cache regions
279 const dyld_cache_mapping_info* mappingsEnd = &mappings[header.mappingCount];
280 uintptr_t unslidEnd = unslidStart + length;
281 for (const dyld_cache_mapping_info* m=mappings; m < mappingsEnd; ++m) {
282 if ( (unslidStart >= m->address) && (unslidEnd < (m->address+m->size)) ) {
283 readOnly = ((m->initProt & VM_PROT_WRITE) == 0);
284 return true;
285 }
286 }
287
288 return false;
289 }
290
291 bool DyldSharedCache::isAlias(const char* path) const {
292 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
293 uintptr_t slide = (uintptr_t)this - (uintptr_t)(mappings[0].address);
294 // paths for aliases are store between cache header and first segment
295 return path < ((char*)mappings[0].address + slide);
296 }
297
298 void DyldSharedCache::forEachImage(void (^handler)(const mach_header* mh, const char* installName)) const
299 {
300 const dyld_cache_image_info* dylibs = (dyld_cache_image_info*)((char*)this + header.imagesOffset);
301 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
302 if ( mappings[0].fileOffset != 0 )
303 return;
304 uint64_t firstImageOffset = 0;
305 uint64_t firstRegionAddress = mappings[0].address;
306 for (uint32_t i=0; i < header.imagesCount; ++i) {
307 const char* dylibPath = (char*)this + dylibs[i].pathFileOffset;
308 uint64_t offset = dylibs[i].address - firstRegionAddress;
309 if ( firstImageOffset == 0 )
310 firstImageOffset = offset;
311 // skip over aliases
312 if ( dylibs[i].pathFileOffset < firstImageOffset)
313 continue;
314 const mach_header* mh = (mach_header*)((char*)this + offset);
315 handler(mh, dylibPath);
316 }
317 }
318
319 void DyldSharedCache::forEachImageEntry(void (^handler)(const char* path, uint64_t mTime, uint64_t inode)) const
320 {
321 const dyld_cache_image_info* dylibs = (dyld_cache_image_info*)((char*)this + header.imagesOffset);
322 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
323 if ( mappings[0].fileOffset != 0 )
324 return;
325 uint64_t firstImageOffset = 0;
326 uint64_t firstRegionAddress = mappings[0].address;
327 for (uint32_t i=0; i < header.imagesCount; ++i) {
328 const char* dylibPath = (char*)this + dylibs[i].pathFileOffset;
329 uint64_t offset = dylibs[i].address - firstRegionAddress;
330 if ( firstImageOffset == 0 )
331 firstImageOffset = offset;
332 // skip over aliases
333 if ( dylibs[i].pathFileOffset < firstImageOffset)
334 continue;
335 handler(dylibPath, dylibs[i].modTime, dylibs[i].inode);
336 }
337 }
338
339 const bool DyldSharedCache::hasLocalSymbolsInfo() const
340 {
341 return (header.localSymbolsOffset != 0 && header.mappingOffset > offsetof(dyld_cache_header,localSymbolsSize));
342 }
343
344 const void* DyldSharedCache::getLocalNlistEntries() const
345 {
346 // check for cache without local symbols info
347 if (!this->hasLocalSymbolsInfo())
348 return nullptr;
349 const auto localInfo = (dyld_cache_local_symbols_info*)((uint8_t*)this + header.localSymbolsOffset);
350 return (uint8_t*)localInfo + localInfo->nlistOffset;
351 }
352
353 const uint32_t DyldSharedCache::getLocalNlistCount() const
354 {
355 // check for cache without local symbols info
356 if (!this->hasLocalSymbolsInfo())
357 return 0;
358 const auto localInfo = (dyld_cache_local_symbols_info*)((uint8_t*)this + header.localSymbolsOffset);
359 return localInfo->nlistCount;
360 }
361
362 const char* DyldSharedCache::getLocalStrings() const
363 {
364 // check for cache without local symbols info
365 if (!this->hasLocalSymbolsInfo())
366 return nullptr;
367 const auto localInfo = (dyld_cache_local_symbols_info*)((uint8_t*)this + header.localSymbolsOffset);
368 return (char*)localInfo + localInfo->stringsOffset;
369 }
370
371 const uint32_t DyldSharedCache::getLocalStringsSize() const
372 {
373 // check for cache without local symbols info
374 if (!this->hasLocalSymbolsInfo())
375 return 0;
376 const auto localInfo = (dyld_cache_local_symbols_info*)((uint8_t*)this + header.localSymbolsOffset);
377 return localInfo->stringsSize;
378 }
379
380 void DyldSharedCache::forEachLocalSymbolEntry(void (^handler)(uint32_t dylibOffset, uint32_t nlistStartIndex, uint32_t nlistCount, bool& stop)) const
381 {
382 // check for cache without local symbols info
383 if (!this->hasLocalSymbolsInfo())
384 return;
385 const auto localInfo = (dyld_cache_local_symbols_info*)((uint8_t*)this + header.localSymbolsOffset);
386 const auto localEntries = (dyld_cache_local_symbols_entry*)((uint8_t*)localInfo + localInfo->entriesOffset);
387 bool stop = false;
388 for (uint32_t i = 0; i < localInfo->entriesCount; i++) {
389 dyld_cache_local_symbols_entry localEntry = localEntries[i];
390 handler(localEntry.dylibOffset, localEntry.nlistStartIndex, localEntry.nlistCount, stop);
391 }
392 }
393
394
395 const mach_header* DyldSharedCache::getIndexedImageEntry(uint32_t index, uint64_t& mTime, uint64_t& inode) const
396 {
397 const dyld_cache_image_info* dylibs = (dyld_cache_image_info*)((char*)this + header.imagesOffset);
398 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
399 mTime = dylibs[index].modTime;
400 inode = dylibs[index].inode;
401 return (mach_header*)((uint8_t*)this + dylibs[index].address - mappings[0].address);
402 }
403
404
405 const char* DyldSharedCache::getIndexedImagePath(uint32_t index) const
406 {
407 auto dylibs = (const dyld_cache_image_info*)((char*)this + header.imagesOffset);
408 return (char*)this + dylibs[index].pathFileOffset;
409 }
410
411 void DyldSharedCache::forEachImageTextSegment(void (^handler)(uint64_t loadAddressUnslid, uint64_t textSegmentSize, const uuid_t dylibUUID, const char* installName, bool& stop)) const
412 {
413 // check for old cache without imagesText array
414 if ( (header.mappingOffset <= __offsetof(dyld_cache_header, imagesTextOffset)) || (header.imagesTextCount == 0) )
415 return;
416
417 // walk imageText table and call callback for each entry
418 const dyld_cache_image_text_info* imagesText = (dyld_cache_image_text_info*)((char*)this + header.imagesTextOffset);
419 const dyld_cache_image_text_info* imagesTextEnd = &imagesText[header.imagesTextCount];
420 bool stop = false;
421 for (const dyld_cache_image_text_info* p=imagesText; p < imagesTextEnd && !stop; ++p) {
422 handler(p->loadAddress, p->textSegmentSize, p->uuid, (char*)this + p->pathOffset, stop);
423 }
424 }
425
426 bool DyldSharedCache::addressInText(uint32_t cacheOffset, uint32_t* imageIndex) const
427 {
428 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
429 if ( cacheOffset > mappings[0].size )
430 return false;
431 uint64_t targetAddr = mappings[0].address + cacheOffset;
432 // walk imageText table and call callback for each entry
433 const dyld_cache_image_text_info* imagesText = (dyld_cache_image_text_info*)((char*)this + header.imagesTextOffset);
434 const dyld_cache_image_text_info* imagesTextEnd = &imagesText[header.imagesTextCount];
435 for (const dyld_cache_image_text_info* p=imagesText; p < imagesTextEnd; ++p) {
436 if ( (p->loadAddress <= targetAddr) && (targetAddr < p->loadAddress+p->textSegmentSize) ) {
437 *imageIndex = (uint32_t)(p-imagesText);
438 return true;
439 }
440 }
441 return false;
442 }
443
444 const char* DyldSharedCache::archName() const
445 {
446 const char* archSubString = ((char*)this) + 7;
447 while (*archSubString == ' ')
448 ++archSubString;
449 return archSubString;
450 }
451
452
453 dyld3::Platform DyldSharedCache::platform() const
454 {
455 return (dyld3::Platform)header.platform;
456 }
457
458 #if BUILDING_CACHE_BUILDER
459 std::string DyldSharedCache::mapFile() const
460 {
461 __block std::string result;
462 __block std::vector<uint64_t> regionStartAddresses;
463 __block std::vector<uint64_t> regionSizes;
464 __block std::vector<uint64_t> regionFileOffsets;
465
466 result.reserve(256*1024);
467 forEachRegion(^(const void* content, uint64_t vmAddr, uint64_t size,
468 uint32_t initProt, uint32_t maxProt, uint64_t flags) {
469 regionStartAddresses.push_back(vmAddr);
470 regionSizes.push_back(size);
471 regionFileOffsets.push_back((uint8_t*)content - (uint8_t*)this);
472 char lineBuffer[256];
473 const char* prot = "RW";
474 if ( maxProt == (VM_PROT_EXECUTE|VM_PROT_READ) )
475 prot = "EX";
476 else if ( maxProt == VM_PROT_READ )
477 prot = "RO";
478 if ( size > 1024*1024 )
479 sprintf(lineBuffer, "mapping %s %4lluMB 0x%0llX -> 0x%0llX\n", prot, size/(1024*1024), vmAddr, vmAddr+size);
480 else
481 sprintf(lineBuffer, "mapping %s %4lluKB 0x%0llX -> 0x%0llX\n", prot, size/1024, vmAddr, vmAddr+size);
482 result += lineBuffer;
483 });
484
485 // TODO: add linkedit breakdown
486 result += "\n\n";
487
488 forEachImage(^(const mach_header* mh, const char* installName) {
489 result += std::string(installName) + "\n";
490 const dyld3::MachOFile* mf = (dyld3::MachOFile*)mh;
491 mf->forEachSegment(^(const dyld3::MachOFile::SegmentInfo& info, bool& stop) {
492 char lineBuffer[256];
493 sprintf(lineBuffer, "\t%16s 0x%08llX -> 0x%08llX\n", info.segName, info.vmAddr, info.vmAddr+info.vmSize);
494 result += lineBuffer;
495 });
496 result += "\n";
497 });
498
499 return result;
500 }
501 #endif
502
503
504 uint64_t DyldSharedCache::unslidLoadAddress() const
505 {
506 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
507 return mappings[0].address;
508 }
509
510 void DyldSharedCache::getUUID(uuid_t uuid) const
511 {
512 memcpy(uuid, header.uuid, sizeof(uuid_t));
513 }
514
515 uint64_t DyldSharedCache::mappedSize() const
516 {
517 __block uint64_t startAddr = 0;
518 __block uint64_t endAddr = 0;
519 forEachRegion(^(const void* content, uint64_t vmAddr, uint64_t size,
520 uint32_t initProt, uint32_t maxProt, uint64_t flags) {
521 if ( startAddr == 0 )
522 startAddr = vmAddr;
523 uint64_t end = vmAddr+size;
524 if ( end > endAddr )
525 endAddr = end;
526 });
527 return (endAddr - startAddr);
528 }
529
530 bool DyldSharedCache::findMachHeaderImageIndex(const mach_header* mh, uint32_t& imageIndex) const
531 {
532 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
533 uintptr_t slide = (uintptr_t)this - (uintptr_t)(mappings[0].address);
534 uint64_t unslidMh = (uintptr_t)mh - slide;
535 const dyld_cache_image_info* dylibs = (dyld_cache_image_info*)((char*)this + header.imagesOffset);
536 for (uint32_t i=0; i < header.imagesCount; ++i) {
537 if ( dylibs[i].address == unslidMh ) {
538 imageIndex = i;
539 return true;
540 }
541 }
542 return false;
543 }
544
545 bool DyldSharedCache::hasImagePath(const char* dylibPath, uint32_t& imageIndex) const
546 {
547 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
548 if ( mappings[0].fileOffset != 0 )
549 return false;
550 if ( header.mappingOffset >= 0x118 ) {
551 uintptr_t slide = (uintptr_t)this - (uintptr_t)(mappings[0].address);
552 const uint8_t* dylibTrieStart = (uint8_t*)(this->header.dylibsTrieAddr + slide);
553 const uint8_t* dylibTrieEnd = dylibTrieStart + this->header.dylibsTrieSize;
554
555 Diagnostics diag;
556 const uint8_t* imageNode = dyld3::MachOLoaded::trieWalk(diag, dylibTrieStart, dylibTrieEnd, dylibPath);
557 if ( imageNode != NULL ) {
558 imageIndex = (uint32_t)dyld3::MachOFile::read_uleb128(diag, imageNode, dylibTrieEnd);
559 return true;
560 }
561 }
562 else {
563 const dyld_cache_image_info* dylibs = (dyld_cache_image_info*)((char*)this + header.imagesOffset);
564 uint64_t firstImageOffset = 0;
565 uint64_t firstRegionAddress = mappings[0].address;
566 for (uint32_t i=0; i < header.imagesCount; ++i) {
567 const char* aPath = (char*)this + dylibs[i].pathFileOffset;
568 if ( strcmp(aPath, dylibPath) == 0 ) {
569 imageIndex = i;
570 return true;
571 }
572 uint64_t offset = dylibs[i].address - firstRegionAddress;
573 if ( firstImageOffset == 0 )
574 firstImageOffset = offset;
575 // skip over aliases
576 if ( dylibs[i].pathFileOffset < firstImageOffset)
577 continue;
578 }
579 }
580
581 return false;
582 }
583
584 bool DyldSharedCache::isOverridablePath(const char* dylibPath) const
585 {
586 // all dylibs in customer dyld cache cannot be overridden except libdispatch.dylib
587 if ( header.cacheType == kDyldSharedCacheTypeProduction ) {
588 return (strcmp(dylibPath, "/usr/lib/system/libdispatch.dylib") == 0);
589 }
590 // in dev caches we can override all paths
591 return true;
592 }
593
594 bool DyldSharedCache::hasNonOverridablePath(const char* dylibPath) const
595 {
596 // all dylibs in customer dyld cache cannot be overridden except libdispatch.dylib
597 bool pathIsInDyldCacheWhichCannotBeOverridden = false;
598 if ( header.cacheType == kDyldSharedCacheTypeProduction ) {
599 uint32_t imageIndex;
600 pathIsInDyldCacheWhichCannotBeOverridden = this->hasImagePath(dylibPath, imageIndex);
601 if ( pathIsInDyldCacheWhichCannotBeOverridden && isOverridablePath(dylibPath) )
602 pathIsInDyldCacheWhichCannotBeOverridden = false;
603 }
604 return pathIsInDyldCacheWhichCannotBeOverridden;
605 }
606
607 #if !BUILDING_LIBDSC
608 const dyld3::closure::Image* DyldSharedCache::findDlopenOtherImage(const char* path) const
609 {
610 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
611 if ( mappings[0].fileOffset != 0 )
612 return nullptr;
613 if ( header.mappingOffset < __offsetof(dyld_cache_header, otherImageArrayAddr) )
614 return nullptr;
615 if ( header.otherImageArrayAddr == 0 )
616 return nullptr;
617 uintptr_t slide = (uintptr_t)this - (uintptr_t)(mappings[0].address);
618 const uint8_t* dylibTrieStart = (uint8_t*)(this->header.otherTrieAddr + slide);
619 const uint8_t* dylibTrieEnd = dylibTrieStart + this->header.otherTrieSize;
620
621 Diagnostics diag;
622 const uint8_t* imageNode = dyld3::MachOLoaded::trieWalk(diag, dylibTrieStart, dylibTrieEnd, path);
623 if ( imageNode != NULL ) {
624 dyld3::closure::ImageNum imageNum = (uint32_t)dyld3::MachOFile::read_uleb128(diag, imageNode, dylibTrieEnd);
625 uint64_t arrayAddrOffset = header.otherImageArrayAddr - mappings[0].address;
626 const dyld3::closure::ImageArray* otherImageArray = (dyld3::closure::ImageArray*)((char*)this + arrayAddrOffset);
627 return otherImageArray->imageForNum(imageNum);
628 }
629
630 return nullptr;
631 }
632
633 const dyld3::closure::LaunchClosure* DyldSharedCache::findClosure(const char* executablePath) const
634 {
635 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
636 uintptr_t slide = (uintptr_t)this - (uintptr_t)(mappings[0].address);
637 const uint8_t* executableTrieStart = (uint8_t*)(this->header.progClosuresTrieAddr + slide);
638 const uint8_t* executableTrieEnd = executableTrieStart + this->header.progClosuresTrieSize;
639 const uint8_t* closuresStart = (uint8_t*)(this->header.progClosuresAddr + slide);
640
641 Diagnostics diag;
642 const uint8_t* imageNode = dyld3::MachOLoaded::trieWalk(diag, executableTrieStart, executableTrieEnd, executablePath);
643 if ( (imageNode == NULL) && (strncmp(executablePath, "/System/", 8) == 0) ) {
644 // anything in /System/ should have a closure. Perhaps it was launched via symlink path
645 char realPath[PATH_MAX];
646 if ( realpath(executablePath, realPath) != NULL )
647 imageNode = dyld3::MachOLoaded::trieWalk(diag, executableTrieStart, executableTrieEnd, realPath);
648 }
649 if ( imageNode != NULL ) {
650 uint32_t closureOffset = (uint32_t)dyld3::MachOFile::read_uleb128(diag, imageNode, executableTrieEnd);
651 if ( closureOffset < this->header.progClosuresSize )
652 return (dyld3::closure::LaunchClosure*)((uint8_t*)closuresStart + closureOffset);
653 }
654
655 return nullptr;
656 }
657
658 #if !BUILDING_LIBDYLD && !BUILDING_DYLD
659 void DyldSharedCache::forEachLaunchClosure(void (^handler)(const char* executableRuntimePath, const dyld3::closure::LaunchClosure* closure)) const
660 {
661 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
662 uintptr_t slide = (uintptr_t)this - (uintptr_t)(mappings[0].address);
663 const uint8_t* executableTrieStart = (uint8_t*)(this->header.progClosuresTrieAddr + slide);
664 const uint8_t* executableTrieEnd = executableTrieStart + this->header.progClosuresTrieSize;
665 const uint8_t* closuresStart = (uint8_t*)(this->header.progClosuresAddr + slide);
666
667 std::vector<DylibIndexTrie::Entry> closureEntries;
668 if ( Trie<DylibIndex>::parseTrie(executableTrieStart, executableTrieEnd, closureEntries) ) {
669 for (DylibIndexTrie::Entry& entry : closureEntries ) {
670 uint32_t offset = entry.info.index;
671 if ( offset < this->header.progClosuresSize )
672 handler(entry.name.c_str(), (const dyld3::closure::LaunchClosure*)(closuresStart+offset));
673 }
674 }
675 }
676
677 void DyldSharedCache::forEachDlopenImage(void (^handler)(const char* runtimePath, const dyld3::closure::Image* image)) const
678 {
679 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
680 uintptr_t slide = (uintptr_t)this - (uintptr_t)(mappings[0].address);
681 const uint8_t* otherTrieStart = (uint8_t*)(this->header.otherTrieAddr + slide);
682 const uint8_t* otherTrieEnd = otherTrieStart + this->header.otherTrieSize;
683
684 std::vector<DylibIndexTrie::Entry> otherEntries;
685 if ( Trie<DylibIndex>::parseTrie(otherTrieStart, otherTrieEnd, otherEntries) ) {
686 for (const DylibIndexTrie::Entry& entry : otherEntries ) {
687 dyld3::closure::ImageNum imageNum = entry.info.index;
688 uint64_t arrayAddrOffset = header.otherImageArrayAddr - mappings[0].address;
689 const dyld3::closure::ImageArray* otherImageArray = (dyld3::closure::ImageArray*)((char*)this + arrayAddrOffset);
690 handler(entry.name.c_str(), otherImageArray->imageForNum(imageNum));
691 }
692 }
693 }
694
695 void DyldSharedCache::forEachDylibPath(void (^handler)(const char* dylibPath, uint32_t index)) const
696 {
697 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
698 uintptr_t slide = (uintptr_t)this - (uintptr_t)(mappings[0].address);
699 const uint8_t* dylibTrieStart = (uint8_t*)(this->header.dylibsTrieAddr + slide);
700 const uint8_t* dylibTrieEnd = dylibTrieStart + this->header.dylibsTrieSize;
701
702 std::vector<DylibIndexTrie::Entry> dylibEntries;
703 if ( Trie<DylibIndex>::parseTrie(dylibTrieStart, dylibTrieEnd, dylibEntries) ) {
704 for (DylibIndexTrie::Entry& entry : dylibEntries ) {
705 handler(entry.name.c_str(), entry.info.index);
706 }
707 }
708 }
709 #endif // !BUILDING_LIBDYLD && !BUILDING_DYLD
710 #endif // !BUILDING_LIBDSC
711
712 const dyld3::closure::ImageArray* DyldSharedCache::cachedDylibsImageArray() const
713 {
714 // check for old cache without imagesArray
715 if ( header.mappingOffset < 0x100 )
716 return nullptr;
717
718 if ( header.dylibsImageArrayAddr == 0 )
719 return nullptr;
720
721 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
722 uint64_t arrayAddrOffset = header.dylibsImageArrayAddr - mappings[0].address;
723 return (dyld3::closure::ImageArray*)((char*)this + arrayAddrOffset);
724 }
725
726 const dyld3::closure::ImageArray* DyldSharedCache::otherOSImageArray() const
727 {
728 // check for old cache without imagesArray
729 if ( header.mappingOffset < __offsetof(dyld_cache_header, otherImageArrayAddr) )
730 return nullptr;
731
732 if ( header.otherImageArrayAddr == 0 )
733 return nullptr;
734
735 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
736 uint64_t arrayAddrOffset = header.otherImageArrayAddr - mappings[0].address;
737 return (dyld3::closure::ImageArray*)((char*)this + arrayAddrOffset);
738 }
739
740
741 uint32_t DyldSharedCache::patchableExportCount(uint32_t imageIndex) const {
742 if ( header.patchInfoAddr == 0 )
743 return 0;
744
745 const dyld_cache_patch_info* patchInfo = getAddrField<dyld_cache_patch_info*>(header.patchInfoAddr);
746 const dyld_cache_image_patches* patchArray = getAddrField<dyld_cache_image_patches*>(patchInfo->patchTableArrayAddr);
747 if (imageIndex > patchInfo->patchTableArrayCount)
748 return 0;
749 return patchArray[imageIndex].patchExportsCount;
750 }
751
752 void DyldSharedCache::forEachPatchableExport(uint32_t imageIndex, void (^handler)(uint32_t cacheOffsetOfImpl, const char* exportName)) const {
753 if ( header.patchInfoAddr == 0 )
754 return;
755
756 const dyld_cache_patch_info* patchInfo = getAddrField<dyld_cache_patch_info*>(header.patchInfoAddr);
757 const dyld_cache_image_patches* patchArray = getAddrField<dyld_cache_image_patches*>(patchInfo->patchTableArrayAddr);
758 if (imageIndex > patchInfo->patchTableArrayCount)
759 return;
760 const dyld_cache_image_patches& patch = patchArray[imageIndex];
761 if ( (patch.patchExportsStartIndex + patch.patchExportsCount) > patchInfo->patchExportArrayCount )
762 return;
763 const dyld_cache_patchable_export* patchExports = getAddrField<dyld_cache_patchable_export*>(patchInfo->patchExportArrayAddr);
764 const char* exportNames = getAddrField<char*>(patchInfo->patchExportNamesAddr);
765 for (uint64_t exportIndex = 0; exportIndex != patch.patchExportsCount; ++exportIndex) {
766 const dyld_cache_patchable_export& patchExport = patchExports[patch.patchExportsStartIndex + exportIndex];
767 const char* exportName = ( patchExport.exportNameOffset < patchInfo->patchExportNamesSize ) ? &exportNames[patchExport.exportNameOffset] : "";
768 handler(patchExport.cacheOffsetOfImpl, exportName);
769 }
770 }
771
772 void DyldSharedCache::forEachPatchableUseOfExport(uint32_t imageIndex, uint32_t cacheOffsetOfImpl,
773 void (^handler)(dyld_cache_patchable_location patchLocation)) const {
774 if ( header.patchInfoAddr == 0 )
775 return;
776
777 // Loading a new cache so get the data from the cache header
778 const dyld_cache_patch_info* patchInfo = getAddrField<dyld_cache_patch_info*>(header.patchInfoAddr);
779 const dyld_cache_image_patches* patchArray = getAddrField<dyld_cache_image_patches*>(patchInfo->patchTableArrayAddr);
780 if (imageIndex > patchInfo->patchTableArrayCount)
781 return;
782 const dyld_cache_image_patches& patch = patchArray[imageIndex];
783 if ( (patch.patchExportsStartIndex + patch.patchExportsCount) > patchInfo->patchExportArrayCount )
784 return;
785 const dyld_cache_patchable_export* patchExports = getAddrField<dyld_cache_patchable_export*>(patchInfo->patchExportArrayAddr);
786 const dyld_cache_patchable_location* patchLocations = getAddrField<dyld_cache_patchable_location*>(patchInfo->patchLocationArrayAddr);
787 for (uint64_t exportIndex = 0; exportIndex != patch.patchExportsCount; ++exportIndex) {
788 const dyld_cache_patchable_export& patchExport = patchExports[patch.patchExportsStartIndex + exportIndex];
789 if ( patchExport.cacheOffsetOfImpl != cacheOffsetOfImpl )
790 continue;
791 if ( (patchExport.patchLocationsStartIndex + patchExport.patchLocationsCount) > patchInfo->patchLocationArrayCount )
792 return;
793 for (uint64_t locationIndex = 0; locationIndex != patchExport.patchLocationsCount; ++locationIndex) {
794 const dyld_cache_patchable_location& patchLocation = patchLocations[patchExport.patchLocationsStartIndex + locationIndex];
795 handler(patchLocation);
796 }
797 }
798 }
799
800 #if (BUILDING_LIBDYLD || BUILDING_DYLD)
801 void DyldSharedCache::changeDataConstPermissions(mach_port_t machTask, uint32_t permissions,
802 DataConstLogFunc logFunc) const {
803
804 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
805 uintptr_t slide = (uintptr_t)this - (uintptr_t)(mappings[0].address);
806
807 if ( (permissions & VM_PROT_WRITE) != 0 )
808 permissions |= VM_PROT_COPY;
809
810 forEachRegion(^(const void *, uint64_t vmAddr, uint64_t size,
811 uint32_t initProt, uint32_t maxProt, uint64_t flags) {
812 void* content = (void*)(vmAddr + slide);
813 if ( ( flags & DYLD_CACHE_MAPPING_CONST_DATA) == 0 )
814 return;
815 if ( logFunc != nullptr ) {
816 logFunc("dyld: marking shared cache range 0x%x permissions: 0x%09lX -> 0x%09lX\n",
817 permissions, (long)content, (long)content + size);
818 }
819 kern_return_t result = vm_protect(machTask, (vm_address_t)content, (vm_size_t)size, false, permissions);
820 if ( result != KERN_SUCCESS ) {
821 if ( logFunc != nullptr )
822 logFunc("dyld: failed to mprotect shared cache due to: %d\n", result);
823 }
824 });
825 }
826
827 DyldSharedCache::DataConstLazyScopedWriter::DataConstLazyScopedWriter(const DyldSharedCache* cache, mach_port_t machTask, DataConstLogFunc logFunc)
828 : cache(cache), machTask(machTask), logFunc(logFunc) {
829 }
830
831 DyldSharedCache::DataConstLazyScopedWriter::~DataConstLazyScopedWriter() {
832 if ( wasMadeWritable )
833 cache->changeDataConstPermissions(machTask, VM_PROT_READ, logFunc);
834 }
835
836 void DyldSharedCache::DataConstLazyScopedWriter::makeWriteable() {
837 if ( wasMadeWritable )
838 return;
839 if ( !gEnableSharedCacheDataConst )
840 return;
841 if ( cache == nullptr )
842 return;
843 wasMadeWritable = true;
844 cache->changeDataConstPermissions(machTask, VM_PROT_READ | VM_PROT_WRITE, logFunc);
845 }
846
847 DyldSharedCache::DataConstScopedWriter::DataConstScopedWriter(const DyldSharedCache* cache, mach_port_t machTask, DataConstLogFunc logFunc)
848 : writer(cache, machTask, logFunc) {
849 writer.makeWriteable();
850 }
851 #endif
852
853 #if !(BUILDING_LIBDYLD || BUILDING_DYLD)
854 // MRM map file generator
855 std::string DyldSharedCache::generateJSONMap(const char* disposition) const {
856 dyld3::json::Node cacheNode;
857
858 cacheNode.map["version"].value = "1";
859 cacheNode.map["disposition"].value = disposition;
860 cacheNode.map["base-address"].value = dyld3::json::hex(unslidLoadAddress());
861 uuid_t cache_uuid;
862 getUUID(cache_uuid);
863 uuid_string_t cache_uuidStr;
864 uuid_unparse(cache_uuid, cache_uuidStr);
865 cacheNode.map["uuid"].value = cache_uuidStr;
866
867 __block dyld3::json::Node imagesNode;
868 forEachImage(^(const mach_header *mh, const char *installName) {
869 dyld3::json::Node imageNode;
870 imageNode.map["path"].value = installName;
871 dyld3::MachOAnalyzer* ma = (dyld3::MachOAnalyzer*)mh;
872 uuid_t uuid;
873 if (ma->getUuid(uuid)) {
874 uuid_string_t uuidStr;
875 uuid_unparse(uuid, uuidStr);
876 imageNode.map["uuid"].value = uuidStr;
877 }
878
879 __block dyld3::json::Node segmentsNode;
880 ma->forEachSegment(^(const dyld3::MachOAnalyzer::SegmentInfo &info, bool &stop) {
881 dyld3::json::Node segmentNode;
882 segmentNode.map["name"].value = info.segName;
883 segmentNode.map["start-vmaddr"].value = dyld3::json::hex(info.vmAddr);
884 segmentNode.map["end-vmaddr"].value = dyld3::json::hex(info.vmAddr + info.vmSize);
885 segmentsNode.array.push_back(segmentNode);
886 });
887 imageNode.map["segments"] = segmentsNode;
888 imagesNode.array.push_back(imageNode);
889 });
890
891 cacheNode.map["images"] = imagesNode;
892
893 std::stringstream stream;
894 printJSON(cacheNode, 0, stream);
895
896 return stream.str();
897 }
898
899 std::string DyldSharedCache::generateJSONDependents() const {
900 std::unordered_map<std::string, std::set<std::string>> dependents;
901 computeTransitiveDependents(dependents);
902
903 std::stringstream stream;
904
905 stream << "{";
906 bool first = true;
907 for (auto p : dependents) {
908 if (!first) stream << "," << std::endl;
909 first = false;
910
911 stream << "\"" << p.first << "\" : [" << std::endl;
912 bool firstDependent = true;
913 for (const std::string & dependent : p.second) {
914 if (!firstDependent) stream << "," << std::endl;
915 firstDependent = false;
916 stream << " \"" << dependent << "\"";
917 }
918 stream << "]" << std::endl;
919 }
920 stream << "}" << std::endl;
921 return stream.str();
922 }
923
924 #endif
925
926 #if !(BUILDING_LIBDYLD || BUILDING_DYLD)
927 dyld3::MachOAnalyzer::VMAddrConverter DyldSharedCache::makeVMAddrConverter(bool contentRebased) const {
928 typedef dyld3::MachOAnalyzer::VMAddrConverter VMAddrConverter;
929
930 __block VMAddrConverter::SharedCacheFormat pointerFormat = VMAddrConverter::SharedCacheFormat::none;
931 __block uint64_t pointerValueAdd = 0;;
932 forEachSlideInfo(^(uint64_t mappingStartAddress, uint64_t mappingSize, const uint8_t *mappingPagesStart, uint64_t slideInfoOffset, uint64_t slideInfoSize, const dyld_cache_slide_info *slideInfoHeader) {
933 assert(slideInfoHeader->version >= 2);
934 if ( slideInfoHeader->version == 2 ) {
935 const dyld_cache_slide_info2* slideInfo = (dyld_cache_slide_info2*)(slideInfoHeader);
936 assert(slideInfo->delta_mask == 0x00FFFF0000000000);
937 pointerFormat = VMAddrConverter::SharedCacheFormat::v2_x86_64_tbi;
938 pointerValueAdd = slideInfo->value_add;
939 } else if ( slideInfoHeader->version == 3 ) {
940 pointerFormat = VMAddrConverter::SharedCacheFormat::v3;
941 pointerValueAdd = unslidLoadAddress();
942 } else {
943 assert(false);
944 }
945 });
946
947 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
948 uintptr_t slide = (uintptr_t)this - (uintptr_t)(mappings[0].address);
949
950 VMAddrConverter vmAddrConverter;
951 vmAddrConverter.preferredLoadAddress = pointerValueAdd;
952 vmAddrConverter.slide = slide;
953 vmAddrConverter.chainedPointerFormat = 0;
954 vmAddrConverter.sharedCacheChainedPointerFormat = pointerFormat;
955 vmAddrConverter.contentRebased = contentRebased;
956
957 return vmAddrConverter;
958 }
959 #endif
960
961 const dyld_cache_slide_info* DyldSharedCache::legacyCacheSlideInfo() const
962 {
963 assert(header.mappingOffset <= __offsetof(dyld_cache_header, mappingWithSlideOffset));
964 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
965 uintptr_t slide = (uintptr_t)this - (uintptr_t)(mappings[0].address);
966
967 uint64_t offsetInLinkEditRegion = (header.slideInfoOffsetUnused - mappings[2].fileOffset);
968 return (dyld_cache_slide_info*)((uint8_t*)(mappings[2].address) + slide + offsetInLinkEditRegion);
969 }
970
971 const dyld_cache_mapping_info* DyldSharedCache::legacyCacheDataRegionMapping() const
972 {
973 assert(header.mappingOffset <= __offsetof(dyld_cache_header, mappingWithSlideOffset));
974 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
975 return &mappings[1];
976 }
977
978 const uint8_t* DyldSharedCache::legacyCacheDataRegionBuffer() const
979 {
980 assert(header.mappingOffset <= __offsetof(dyld_cache_header, mappingWithSlideOffset));
981 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
982 uintptr_t slide = (uintptr_t)this - (uintptr_t)(mappings[0].address);
983
984 return (uint8_t*)(legacyCacheDataRegionMapping()->address) + slide;
985 }
986
987 #if !BUILDING_LIBDSC
988 const objc_opt::objc_opt_t* DyldSharedCache::objcOpt() const {
989 // Find the objc image
990 const dyld3::MachOAnalyzer* objcMA = nullptr;
991
992 uint32_t imageIndex;
993 if ( hasImagePath("/usr/lib/libobjc.A.dylib", imageIndex) ) {
994 const dyld3::closure::ImageArray* images = cachedDylibsImageArray();
995 const dyld3::closure::Image* image = images->imageForNum(imageIndex+1);
996 objcMA = (const dyld3::MachOAnalyzer*)((uintptr_t)this + image->cacheOffset());
997 } else {
998 return nullptr;
999 }
1000
1001 // If we found the objc image, then try to find the read-only data inside.
1002 __block const uint8_t* objcROContent = nullptr;
1003 int64_t slide = objcMA->getSlide();
1004 objcMA->forEachSection(^(const dyld3::MachOAnalyzer::SectionInfo& info, bool malformedSectionRange, bool& stop) {
1005 if (strcmp(info.segInfo.segName, "__TEXT") != 0)
1006 return;
1007 if (strcmp(info.sectName, "__objc_opt_ro") != 0)
1008 return;
1009 if ( malformedSectionRange ) {
1010 stop = true;
1011 return;
1012 }
1013 objcROContent = (uint8_t*)(info.sectAddr + slide);
1014 });
1015
1016 if (objcROContent == nullptr)
1017 return nullptr;
1018
1019 const objc_opt::objc_opt_t* optObjCHeader = (const objc_opt::objc_opt_t*)objcROContent;
1020 return optObjCHeader->version == objc_opt::VERSION ? optObjCHeader : nullptr;
1021 }
1022
1023 const void* DyldSharedCache::objcOptPtrs() const {
1024 // Find the objc image
1025 const dyld3::MachOAnalyzer* objcMA = nullptr;
1026
1027 uint32_t imageIndex;
1028 if ( hasImagePath("/usr/lib/libobjc.A.dylib", imageIndex) ) {
1029 const dyld3::closure::ImageArray* images = cachedDylibsImageArray();
1030 const dyld3::closure::Image* image = images->imageForNum(imageIndex+1);
1031 objcMA = (const dyld3::MachOAnalyzer*)((uintptr_t)this + image->cacheOffset());
1032 } else {
1033 return nullptr;
1034 }
1035
1036 // If we found the objc image, then try to find the read-only data inside.
1037 __block const void* objcPointersContent = nullptr;
1038 int64_t slide = objcMA->getSlide();
1039 uint32_t pointerSize = objcMA->pointerSize();
1040 objcMA->forEachSection(^(const dyld3::MachOAnalyzer::SectionInfo& info, bool malformedSectionRange, bool& stop) {
1041 if ( (strncmp(info.segInfo.segName, "__DATA", 6) != 0) && (strncmp(info.segInfo.segName, "__AUTH", 6) != 0) )
1042 return;
1043 if (strcmp(info.sectName, "__objc_opt_ptrs") != 0)
1044 return;
1045 if ( info.sectSize != pointerSize ) {
1046 stop = true;
1047 return;
1048 }
1049 if ( malformedSectionRange ) {
1050 stop = true;
1051 return;
1052 }
1053 objcPointersContent = (uint8_t*)(info.sectAddr + slide);
1054 });
1055
1056 return objcPointersContent;
1057 }
1058 #endif
1059
1060 std::pair<const void*, uint64_t> DyldSharedCache::getObjCConstantRange() const {
1061 const dyld3::MachOAnalyzer* libDyldMA = nullptr;
1062 uint32_t imageIndex;
1063 if ( hasImagePath("/usr/lib/system/libdyld.dylib", imageIndex) ) {
1064 const dyld3::closure::ImageArray* images = cachedDylibsImageArray();
1065 const dyld3::closure::Image* image = images->imageForNum(imageIndex+1);
1066 libDyldMA = (const dyld3::MachOAnalyzer*)((uintptr_t)this + image->cacheOffset());
1067
1068 std::pair<const void*, uint64_t> ranges = { nullptr, 0 };
1069 #if TARGET_OS_OSX
1070 ranges.first = libDyldMA->findSectionContent("__DATA", "__objc_ranges", ranges.second);
1071 #else
1072 ranges.first = libDyldMA->findSectionContent("__DATA_CONST", "__objc_ranges", ranges.second);
1073 #endif
1074 return ranges;
1075 }
1076
1077 return { nullptr, 0 };
1078 }
1079
1080 bool DyldSharedCache::hasSlideInfo() const {
1081 if ( header.mappingOffset <= __offsetof(dyld_cache_header, mappingWithSlideOffset) ) {
1082 return header.slideInfoSizeUnused != 0;
1083 } else {
1084 const dyld_cache_mapping_and_slide_info* slidableMappings = (const dyld_cache_mapping_and_slide_info*)((char*)this + header.mappingWithSlideOffset);
1085 for (uint32_t i = 0; i != header.mappingWithSlideCount; ++i) {
1086 if ( slidableMappings[i].slideInfoFileSize != 0 ) {
1087 return true;
1088 }
1089 }
1090 }
1091 return false;
1092 }
1093
1094 void DyldSharedCache::forEachSlideInfo(void (^handler)(uint64_t mappingStartAddress, uint64_t mappingSize,
1095 const uint8_t* mappingPagesStart,
1096 uint64_t slideInfoOffset, uint64_t slideInfoSize,
1097 const dyld_cache_slide_info* slideInfoHeader)) const {
1098 if ( header.mappingOffset <= __offsetof(dyld_cache_header, mappingWithSlideOffset) ) {
1099 // Old caches should get the slide info from the cache header and assume a single data region.
1100 const dyld_cache_mapping_info* dataMapping = legacyCacheDataRegionMapping();
1101 uint64_t dataStartAddress = dataMapping->address;
1102 uint64_t dataSize = dataMapping->size;
1103 const uint8_t* dataPagesStart = legacyCacheDataRegionBuffer();
1104 const dyld_cache_slide_info* slideInfoHeader = legacyCacheSlideInfo();
1105
1106 handler(dataStartAddress, dataSize, dataPagesStart,
1107 header.slideInfoOffsetUnused, header.slideInfoSizeUnused, slideInfoHeader);
1108 } else {
1109 const dyld_cache_mapping_and_slide_info* slidableMappings = (const dyld_cache_mapping_and_slide_info*)((char*)this + header.mappingWithSlideOffset);
1110 const dyld_cache_mapping_and_slide_info* linkeditMapping = &slidableMappings[header.mappingWithSlideCount - 1];
1111 uint64_t sharedCacheSlide = (uint64_t)this - unslidLoadAddress();
1112
1113 for (uint32_t i = 0; i != header.mappingWithSlideCount; ++i) {
1114 if ( slidableMappings[i].slideInfoFileOffset != 0 ) {
1115 // Get the data pages
1116 uint64_t dataStartAddress = slidableMappings[i].address;
1117 uint64_t dataSize = slidableMappings[i].size;
1118 const uint8_t* dataPagesStart = (uint8_t*)dataStartAddress + sharedCacheSlide;
1119
1120 // Get the slide info
1121 uint64_t offsetInLinkEditRegion = (slidableMappings[i].slideInfoFileOffset - linkeditMapping->fileOffset);
1122 const dyld_cache_slide_info* slideInfoHeader = (dyld_cache_slide_info*)((uint8_t*)(linkeditMapping->address) + sharedCacheSlide + offsetInLinkEditRegion);
1123 handler(dataStartAddress, dataSize, dataPagesStart,
1124 slidableMappings[i].slideInfoFileOffset, slidableMappings[i].slideInfoFileSize, slideInfoHeader);
1125 }
1126 }
1127 }
1128 }
1129
1130 #if BUILDING_LIBDYLD
1131 const char* DyldSharedCache::getCanonicalPath(const char *path) const {
1132 uint32_t dyldCacheImageIndex;
1133 if ( hasImagePath(path, dyldCacheImageIndex) )
1134 return getIndexedImagePath(dyldCacheImageIndex);
1135 #if TARGET_OS_OSX
1136 // on macOS support "Foo.framework/Foo" symlink
1137 char resolvedPath[PATH_MAX];
1138 realpath(path, resolvedPath);
1139 int realpathErrno = errno;
1140 // If realpath() resolves to a path which does not exist on disk, errno is set to ENOENT
1141 if ( (realpathErrno == ENOENT) || (realpathErrno == 0) ) {
1142 if ( hasImagePath(resolvedPath, dyldCacheImageIndex) )
1143 return getIndexedImagePath(dyldCacheImageIndex);
1144 }
1145 #endif
1146 return nullptr;
1147 }
1148 #endif
1149
1150 #if !(BUILDING_LIBDYLD || BUILDING_DYLD)
1151 void DyldSharedCache::fillMachOAnalyzersMap(std::unordered_map<std::string,dyld3::MachOAnalyzer*> & dylibAnalyzers) const {
1152 forEachImage(^(const mach_header *mh, const char *iteratedInstallName) {
1153 dylibAnalyzers[std::string(iteratedInstallName)] = (dyld3::MachOAnalyzer*)mh;
1154 });
1155 }
1156
1157 void DyldSharedCache::computeReverseDependencyMapForDylib(std::unordered_map<std::string, std::set<std::string>> &reverseDependencyMap, const std::unordered_map<std::string,dyld3::MachOAnalyzer*> & dylibAnalyzers, const std::string &loadPath) const {
1158 dyld3::MachOAnalyzer *ma = dylibAnalyzers.at(loadPath);
1159 if (reverseDependencyMap.find(loadPath) != reverseDependencyMap.end()) return;
1160 reverseDependencyMap[loadPath] = std::set<std::string>();
1161
1162 ma->forEachDependentDylib(^(const char *dependencyLoadPath, bool isWeak, bool isReExport, bool isUpward, uint32_t compatVersion, uint32_t curVersion, bool &stop) {
1163 if (isUpward) return;
1164 std::string dependencyLoadPathString = std::string(dependencyLoadPath);
1165 computeReverseDependencyMapForDylib(reverseDependencyMap, dylibAnalyzers, dependencyLoadPathString);
1166 reverseDependencyMap[dependencyLoadPathString].insert(loadPath);
1167 });
1168 }
1169
1170 // Walks the shared cache and construct the reverse dependency graph (if dylib A depends on B,
1171 // constructs the graph with B -> A edges)
1172 void DyldSharedCache::computeReverseDependencyMap(std::unordered_map<std::string, std::set<std::string>> &reverseDependencyMap) const {
1173 std::unordered_map<std::string,dyld3::MachOAnalyzer*> dylibAnalyzers;
1174
1175 fillMachOAnalyzersMap(dylibAnalyzers);
1176 forEachImage(^(const mach_header *mh, const char *installName) {
1177 computeReverseDependencyMapForDylib(reverseDependencyMap, dylibAnalyzers, std::string(installName));
1178 });
1179 }
1180
1181 // uses the reverse dependency graph constructed above to find the recursive set of dependents for each dylib
1182 void DyldSharedCache::findDependentsRecursively(std::unordered_map<std::string, std::set<std::string>> &transitiveDependents, const std::unordered_map<std::string, std::set<std::string>> &reverseDependencyMap, std::set<std::string> & visited, const std::string &loadPath) const {
1183
1184 if (transitiveDependents.find(loadPath) != transitiveDependents.end()) {
1185 return;
1186 }
1187
1188 if (visited.find(loadPath) != visited.end()) {
1189 return;
1190 }
1191
1192 visited.insert(loadPath);
1193
1194 std::set<std::string> dependents;
1195
1196 for (const std::string & dependent : reverseDependencyMap.at(loadPath)) {
1197 findDependentsRecursively(transitiveDependents, reverseDependencyMap, visited, dependent);
1198 if (transitiveDependents.find(dependent) != transitiveDependents.end()) {
1199 std::set<std::string> & theseTransitiveDependents = transitiveDependents.at(dependent);
1200 dependents.insert(theseTransitiveDependents.begin(), theseTransitiveDependents.end());
1201 }
1202 dependents.insert(dependent);
1203 }
1204
1205 transitiveDependents[loadPath] = dependents;
1206 }
1207
1208 // Fills a map from each install name N to the set of install names depending on N
1209 void DyldSharedCache::computeTransitiveDependents(std::unordered_map<std::string, std::set<std::string>> & transitiveDependents) const {
1210 std::unordered_map<std::string, std::set<std::string>> reverseDependencyMap;
1211 computeReverseDependencyMap(reverseDependencyMap);
1212 forEachImage(^(const mach_header *mh, const char *installName) {
1213 std::set<std::string> visited;
1214 findDependentsRecursively(transitiveDependents, reverseDependencyMap, visited, std::string(installName));
1215 });
1216 }
1217 #endif