]> git.saurik.com Git - apple/dyld.git/blob - dyld3/shared-cache/DyldSharedCache.cpp
dyld-625.13.tar.gz
[apple/dyld.git] / dyld3 / shared-cache / DyldSharedCache.cpp
1 /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*-
2 *
3 * Copyright (c) 2014 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26 #include <dirent.h>
27 #include <sys/errno.h>
28 #include <sys/fcntl.h>
29 #include <sys/param.h>
30 #include <mach/mach.h>
31 #include <mach-o/loader.h>
32 #include <mach-o/fat.h>
33 #include <mach-o/dyld_priv.h>
34 #include <assert.h>
35 #include <unistd.h>
36 #include <dlfcn.h>
37 #include <CommonCrypto/CommonDigest.h>
38 #include <CommonCrypto/CommonDigestSPI.h>
39
40 #if BUILDING_CACHE_BUILDER
41 #include <set>
42 #include <string>
43 #include <vector>
44 #include <unordered_map>
45 #include <unordered_set>
46 #endif
47
48 #define NO_ULEB
49 #include "MachOLoaded.h"
50 #include "ClosureFileSystemPhysical.h"
51 #include "CacheBuilder.h"
52 #include "DyldSharedCache.h"
53 #include "Trie.hpp"
54 #include "StringUtils.h"
55 #include "FileUtils.h"
56
57
58
59 #if BUILDING_CACHE_BUILDER
60 DyldSharedCache::CreateResults DyldSharedCache::create(const CreateOptions& options,
61 const std::vector<MappedMachO>& dylibsToCache,
62 const std::vector<MappedMachO>& otherOsDylibs,
63 const std::vector<MappedMachO>& osExecutables)
64 {
65 CreateResults results;
66 const char* prefix = nullptr;
67 if ( (options.pathPrefixes.size() == 1) && !options.pathPrefixes[0].empty() )
68 prefix = options.pathPrefixes[0].c_str();
69 // FIXME: This prefix will be applied to dylib closures and executable closures, even though
70 // the old code didn't have a prefix on cache dylib closures
71 dyld3::closure::FileSystemPhysical fileSystem(prefix);
72 CacheBuilder cache(options, fileSystem);
73 if (!cache.errorMessage().empty()) {
74 results.errorMessage = cache.errorMessage();
75 return results;
76 }
77
78 std::vector<FileAlias> aliases;
79 switch ( options.platform ) {
80 case dyld3::Platform::iOS:
81 case dyld3::Platform::watchOS:
82 case dyld3::Platform::tvOS:
83 // FIXME: embedded cache builds should be getting aliases from manifest
84 aliases.push_back({"/System/Library/Frameworks/IOKit.framework/Versions/A/IOKit", "/System/Library/Frameworks/IOKit.framework/IOKit"});
85 aliases.push_back({"/usr/lib/libstdc++.6.dylib", "/usr/lib/libstdc++.dylib"});
86 aliases.push_back({"/usr/lib/libstdc++.6.dylib", "/usr/lib/libstdc++.6.0.9.dylib"});
87 aliases.push_back({"/usr/lib/libz.1.dylib", "/usr/lib/libz.dylib"});
88 aliases.push_back({"/usr/lib/libSystem.B.dylib", "/usr/lib/libSystem.dylib"});
89 break;
90 default:
91 break;
92 }
93
94 cache.build(dylibsToCache, otherOsDylibs, osExecutables, aliases);
95
96 results.agileSignature = cache.agileSignature();
97 results.cdHashFirst = cache.cdHashFirst();
98 results.cdHashSecond = cache.cdHashSecond();
99 results.warnings = cache.warnings();
100 results.evictions = cache.evictions();
101 if ( cache.errorMessage().empty() ) {
102 if ( !options.outputFilePath.empty() ) {
103 // write cache file, if path non-empty
104 cache.writeFile(options.outputFilePath);
105 }
106 if ( !options.outputMapFilePath.empty() ) {
107 // write map file, if path non-empty
108 cache.writeMapFile(options.outputMapFilePath);
109 }
110 }
111 results.errorMessage = cache.errorMessage();
112 cache.deleteBuffer();
113 return results;
114 }
115
116 bool DyldSharedCache::verifySelfContained(std::vector<MappedMachO>& dylibsToCache, MappedMachO (^loader)(const std::string& runtimePath), std::vector<std::pair<DyldSharedCache::MappedMachO, std::set<std::string>>>& rejected)
117 {
118 // build map of dylibs
119 __block std::map<std::string, std::set<std::string>> badDylibs;
120 __block std::set<std::string> knownDylibs;
121 for (const DyldSharedCache::MappedMachO& dylib : dylibsToCache) {
122 std::set<std::string> reasons;
123 if ( dylib.mh->canBePlacedInDyldCache(dylib.runtimePath.c_str(), ^(const char* msg) { badDylibs[dylib.runtimePath].insert(msg);}) ) {
124 knownDylibs.insert(dylib.runtimePath);
125 knownDylibs.insert(dylib.mh->installName());
126 }
127 }
128
129 // check all dependencies to assure every dylib in cache only depends on other dylibs in cache
130 __block bool doAgain = true;
131 while ( doAgain ) {
132 __block std::vector<DyldSharedCache::MappedMachO> foundMappings;
133 doAgain = false;
134 // scan dylib list making sure all dependents are in dylib list
135 for (const DyldSharedCache::MappedMachO& dylib : dylibsToCache) {
136 if ( badDylibs.count(dylib.runtimePath) != 0 )
137 continue;
138 dylib.mh->forEachDependentDylib(^(const char* loadPath, bool isWeak, bool isReExport, bool isUpward, uint32_t compatVersion, uint32_t curVersion, bool& stop) {
139 if ( knownDylibs.count(loadPath) == 0 ) {
140 doAgain = true;
141 MappedMachO foundMapping;
142 if ( badDylibs.count(loadPath) == 0 )
143 foundMapping = loader(loadPath);
144 if ( foundMapping.length == 0 ) {
145 badDylibs[dylib.runtimePath].insert(std::string("Could not find dependency '") + loadPath +"'");
146 knownDylibs.erase(dylib.runtimePath);
147 knownDylibs.erase(dylib.mh->installName());
148 }
149 else {
150 std::set<std::string> reasons;
151 if ( foundMapping.mh->canBePlacedInDyldCache(foundMapping.runtimePath.c_str(), ^(const char* msg) { badDylibs[foundMapping.runtimePath].insert(msg);})) {
152 // see if existing mapping was returned
153 bool alreadyInVector = false;
154 for (const MappedMachO& existing : dylibsToCache) {
155 if ( existing.mh == foundMapping.mh ) {
156 alreadyInVector = true;
157 break;
158 }
159 }
160 if ( !alreadyInVector )
161 foundMappings.push_back(foundMapping);
162 knownDylibs.insert(loadPath);
163 knownDylibs.insert(foundMapping.runtimePath);
164 knownDylibs.insert(foundMapping.mh->installName());
165 }
166 }
167 }
168 });
169 }
170 dylibsToCache.insert(dylibsToCache.end(), foundMappings.begin(), foundMappings.end());
171 // remove bad dylibs
172 const auto badDylibsCopy = badDylibs;
173 dylibsToCache.erase(std::remove_if(dylibsToCache.begin(), dylibsToCache.end(), [&](const DyldSharedCache::MappedMachO& dylib) {
174 auto i = badDylibsCopy.find(dylib.runtimePath);
175 if ( i != badDylibsCopy.end()) {
176 rejected.push_back(std::make_pair(dylib, i->second));
177 return true;
178 }
179 else {
180 return false;
181 }
182 }), dylibsToCache.end());
183 }
184
185 return badDylibs.empty();
186 }
187 #endif
188
189 void DyldSharedCache::forEachRegion(void (^handler)(const void* content, uint64_t vmAddr, uint64_t size, uint32_t permissions)) const
190 {
191 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
192 const dyld_cache_mapping_info* mappingsEnd = &mappings[header.mappingCount];
193 for (const dyld_cache_mapping_info* m=mappings; m < mappingsEnd; ++m) {
194 handler((char*)this + m->fileOffset, m->address, m->size, m->initProt);
195 }
196 }
197
198 bool DyldSharedCache::inCache(const void* addr, size_t length, bool& readOnly) const
199 {
200 // quick out if before start of cache
201 if ( addr < this )
202 return false;
203
204 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
205 uintptr_t slide = (uintptr_t)this - (uintptr_t)(mappings[0].address);
206 uintptr_t unslidStart = (uintptr_t)addr - slide;
207
208 // quick out if after end of cache
209 if ( unslidStart > (mappings[2].address + mappings[2].size) )
210 return false;
211
212 // walk cache regions
213 const dyld_cache_mapping_info* mappingsEnd = &mappings[header.mappingCount];
214 uintptr_t unslidEnd = unslidStart + length;
215 for (const dyld_cache_mapping_info* m=mappings; m < mappingsEnd; ++m) {
216 if ( (unslidStart >= m->address) && (unslidEnd < (m->address+m->size)) ) {
217 readOnly = ((m->initProt & VM_PROT_WRITE) == 0);
218 return true;
219 }
220 }
221
222 return false;
223 }
224
225 void DyldSharedCache::forEachImage(void (^handler)(const mach_header* mh, const char* installName)) const
226 {
227 const dyld_cache_image_info* dylibs = (dyld_cache_image_info*)((char*)this + header.imagesOffset);
228 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
229 if ( mappings[0].fileOffset != 0 )
230 return;
231 uint64_t firstImageOffset = 0;
232 uint64_t firstRegionAddress = mappings[0].address;
233 for (uint32_t i=0; i < header.imagesCount; ++i) {
234 const char* dylibPath = (char*)this + dylibs[i].pathFileOffset;
235 uint64_t offset = dylibs[i].address - firstRegionAddress;
236 if ( firstImageOffset == 0 )
237 firstImageOffset = offset;
238 // skip over aliases
239 if ( dylibs[i].pathFileOffset < firstImageOffset)
240 continue;
241 const mach_header* mh = (mach_header*)((char*)this + offset);
242 handler(mh, dylibPath);
243 }
244 }
245
246 void DyldSharedCache::forEachImageEntry(void (^handler)(const char* path, uint64_t mTime, uint64_t inode)) const
247 {
248 const dyld_cache_image_info* dylibs = (dyld_cache_image_info*)((char*)this + header.imagesOffset);
249 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
250 if ( mappings[0].fileOffset != 0 )
251 return;
252 uint64_t firstImageOffset = 0;
253 uint64_t firstRegionAddress = mappings[0].address;
254 for (uint32_t i=0; i < header.imagesCount; ++i) {
255 const char* dylibPath = (char*)this + dylibs[i].pathFileOffset;
256 uint64_t offset = dylibs[i].address - firstRegionAddress;
257 if ( firstImageOffset == 0 )
258 firstImageOffset = offset;
259 // skip over aliases
260 if ( dylibs[i].pathFileOffset < firstImageOffset)
261 continue;
262 handler(dylibPath, dylibs[i].modTime, dylibs[i].inode);
263 }
264 }
265
266 const mach_header* DyldSharedCache::getIndexedImageEntry(uint32_t index, uint64_t& mTime, uint64_t& inode) const
267 {
268 const dyld_cache_image_info* dylibs = (dyld_cache_image_info*)((char*)this + header.imagesOffset);
269 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
270 mTime = dylibs[index].modTime;
271 inode = dylibs[index].inode;
272 return (mach_header*)((uint8_t*)this + dylibs[index].address - mappings[0].address);
273 }
274
275 void DyldSharedCache::forEachImageTextSegment(void (^handler)(uint64_t loadAddressUnslid, uint64_t textSegmentSize, const uuid_t dylibUUID, const char* installName, bool& stop)) const
276 {
277 // check for old cache without imagesText array
278 if ( header.mappingOffset < 123 )
279 return;
280
281 // walk imageText table and call callback for each entry
282 const dyld_cache_image_text_info* imagesText = (dyld_cache_image_text_info*)((char*)this + header.imagesTextOffset);
283 const dyld_cache_image_text_info* imagesTextEnd = &imagesText[header.imagesTextCount];
284 bool stop = false;
285 for (const dyld_cache_image_text_info* p=imagesText; p < imagesTextEnd && !stop; ++p) {
286 handler(p->loadAddress, p->textSegmentSize, p->uuid, (char*)this + p->pathOffset, stop);
287 }
288 }
289
290 bool DyldSharedCache::addressInText(uint32_t cacheOffset, uint32_t* imageIndex) const
291 {
292 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
293 if ( cacheOffset > mappings[0].size )
294 return false;
295 uint64_t targetAddr = mappings[0].address + cacheOffset;
296 // walk imageText table and call callback for each entry
297 const dyld_cache_image_text_info* imagesText = (dyld_cache_image_text_info*)((char*)this + header.imagesTextOffset);
298 const dyld_cache_image_text_info* imagesTextEnd = &imagesText[header.imagesTextCount];
299 for (const dyld_cache_image_text_info* p=imagesText; p < imagesTextEnd; ++p) {
300 if ( (p->loadAddress <= targetAddr) && (targetAddr < p->loadAddress+p->textSegmentSize) ) {
301 *imageIndex = (uint32_t)(p-imagesText);
302 return true;
303 }
304 }
305 return false;
306 }
307
308 const char* DyldSharedCache::archName() const
309 {
310 const char* archSubString = ((char*)this) + 8;
311 while (*archSubString == ' ')
312 ++archSubString;
313 return archSubString;
314 }
315
316
317 dyld3::Platform DyldSharedCache::platform() const
318 {
319 return (dyld3::Platform)header.platform;
320 }
321
322 #if BUILDING_CACHE_BUILDER
323 std::string DyldSharedCache::mapFile() const
324 {
325 __block std::string result;
326 __block std::vector<uint64_t> regionStartAddresses;
327 __block std::vector<uint64_t> regionSizes;
328 __block std::vector<uint64_t> regionFileOffsets;
329
330 result.reserve(256*1024);
331 forEachRegion(^(const void* content, uint64_t vmAddr, uint64_t size, uint32_t permissions) {
332 regionStartAddresses.push_back(vmAddr);
333 regionSizes.push_back(size);
334 regionFileOffsets.push_back((uint8_t*)content - (uint8_t*)this);
335 char lineBuffer[256];
336 const char* prot = "RW";
337 if ( permissions == (VM_PROT_EXECUTE|VM_PROT_READ) )
338 prot = "EX";
339 else if ( permissions == VM_PROT_READ )
340 prot = "RO";
341 if ( size > 1024*1024 )
342 sprintf(lineBuffer, "mapping %s %4lluMB 0x%0llX -> 0x%0llX\n", prot, size/(1024*1024), vmAddr, vmAddr+size);
343 else
344 sprintf(lineBuffer, "mapping %s %4lluKB 0x%0llX -> 0x%0llX\n", prot, size/1024, vmAddr, vmAddr+size);
345 result += lineBuffer;
346 });
347
348 // TODO: add linkedit breakdown
349 result += "\n\n";
350
351 forEachImage(^(const mach_header* mh, const char* installName) {
352 result += std::string(installName) + "\n";
353 const dyld3::MachOFile* mf = (dyld3::MachOFile*)mh;
354 mf->forEachSegment(^(const dyld3::MachOFile::SegmentInfo& info, bool& stop) {
355 char lineBuffer[256];
356 sprintf(lineBuffer, "\t%16s 0x%08llX -> 0x%08llX\n", info.segName, info.vmAddr, info.vmAddr+info.vmSize);
357 result += lineBuffer;
358 });
359 result += "\n";
360 });
361
362 return result;
363 }
364 #endif
365
366
367 uint64_t DyldSharedCache::unslidLoadAddress() const
368 {
369 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
370 return mappings[0].address;
371 }
372
373 void DyldSharedCache::getUUID(uuid_t uuid) const
374 {
375 memcpy(uuid, header.uuid, sizeof(uuid_t));
376 }
377
378 uint64_t DyldSharedCache::mappedSize() const
379 {
380 __block uint64_t startAddr = 0;
381 __block uint64_t endAddr = 0;
382 forEachRegion(^(const void* content, uint64_t vmAddr, uint64_t size, uint32_t permissions) {
383 if ( startAddr == 0 )
384 startAddr = vmAddr;
385 uint64_t end = vmAddr+size;
386 if ( end > endAddr )
387 endAddr = end;
388 });
389 return (endAddr - startAddr);
390 }
391
392 bool DyldSharedCache::findMachHeaderImageIndex(const mach_header* mh, uint32_t& imageIndex) const
393 {
394 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
395 uintptr_t slide = (uintptr_t)this - (uintptr_t)(mappings[0].address);
396 uint64_t unslidMh = (uintptr_t)mh - slide;
397 const dyld_cache_image_info* dylibs = (dyld_cache_image_info*)((char*)this + header.imagesOffset);
398 for (uint32_t i=0; i < header.imagesCount; ++i) {
399 if ( dylibs[i].address == unslidMh ) {
400 imageIndex = i;
401 return true;
402 }
403 }
404 return false;
405 }
406
407 bool DyldSharedCache::hasImagePath(const char* dylibPath, uint32_t& imageIndex) const
408 {
409 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
410 if ( mappings[0].fileOffset != 0 )
411 return false;
412 if ( header.mappingOffset >= 0x118 ) {
413 uintptr_t slide = (uintptr_t)this - (uintptr_t)(mappings[0].address);
414 const uint8_t* dylibTrieStart = (uint8_t*)(this->header.dylibsTrieAddr + slide);
415 const uint8_t* dylibTrieEnd = dylibTrieStart + this->header.dylibsTrieSize;
416
417 Diagnostics diag;
418 const uint8_t* imageNode = dyld3::MachOLoaded::trieWalk(diag, dylibTrieStart, dylibTrieEnd, dylibPath);
419 if ( imageNode != NULL ) {
420 imageIndex = (uint32_t)dyld3::MachOFile::read_uleb128(diag, imageNode, dylibTrieEnd);
421 return true;
422 }
423 }
424 else {
425 const dyld_cache_image_info* dylibs = (dyld_cache_image_info*)((char*)this + header.imagesOffset);
426 uint64_t firstImageOffset = 0;
427 uint64_t firstRegionAddress = mappings[0].address;
428 for (uint32_t i=0; i < header.imagesCount; ++i) {
429 const char* aPath = (char*)this + dylibs[i].pathFileOffset;
430 if ( strcmp(aPath, dylibPath) == 0 ) {
431 imageIndex = i;
432 return true;
433 }
434 uint64_t offset = dylibs[i].address - firstRegionAddress;
435 if ( firstImageOffset == 0 )
436 firstImageOffset = offset;
437 // skip over aliases
438 if ( dylibs[i].pathFileOffset < firstImageOffset)
439 continue;
440 }
441 }
442
443 return false;
444 }
445
446 const dyld3::closure::Image* DyldSharedCache::findDlopenOtherImage(const char* path) const
447 {
448 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
449 if ( mappings[0].fileOffset != 0 )
450 return nullptr;
451 if ( header.mappingOffset < sizeof(dyld_cache_header) )
452 return nullptr;
453 if ( header.otherImageArrayAddr == 0 )
454 return nullptr;
455 uintptr_t slide = (uintptr_t)this - (uintptr_t)(mappings[0].address);
456 const uint8_t* dylibTrieStart = (uint8_t*)(this->header.otherTrieAddr + slide);
457 const uint8_t* dylibTrieEnd = dylibTrieStart + this->header.otherTrieSize;
458
459 Diagnostics diag;
460 const uint8_t* imageNode = dyld3::MachOLoaded::trieWalk(diag, dylibTrieStart, dylibTrieEnd, path);
461 if ( imageNode != NULL ) {
462 dyld3::closure::ImageNum imageNum = (uint32_t)dyld3::MachOFile::read_uleb128(diag, imageNode, dylibTrieEnd);
463 uint64_t arrayAddrOffset = header.otherImageArrayAddr - mappings[0].address;
464 const dyld3::closure::ImageArray* otherImageArray = (dyld3::closure::ImageArray*)((char*)this + arrayAddrOffset);
465 return otherImageArray->imageForNum(imageNum);
466 }
467
468 return nullptr;
469 }
470
471
472
473
474 const dyld3::closure::LaunchClosure* DyldSharedCache::findClosure(const char* executablePath) const
475 {
476 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
477 uintptr_t slide = (uintptr_t)this - (uintptr_t)(mappings[0].address);
478 const uint8_t* executableTrieStart = (uint8_t*)(this->header.progClosuresTrieAddr + slide);
479 const uint8_t* executableTrieEnd = executableTrieStart + this->header.progClosuresTrieSize;
480 const uint8_t* closuresStart = (uint8_t*)(this->header.progClosuresAddr + slide);
481
482 Diagnostics diag;
483 const uint8_t* imageNode = dyld3::MachOLoaded::trieWalk(diag, executableTrieStart, executableTrieEnd, executablePath);
484 if ( (imageNode == NULL) && (strncmp(executablePath, "/System/", 8) == 0) ) {
485 // anything in /System/ should have a closure. Perhaps it was launched via symlink path
486 char realPath[PATH_MAX];
487 if ( realpath(executablePath, realPath) != NULL )
488 imageNode = dyld3::MachOLoaded::trieWalk(diag, executableTrieStart, executableTrieEnd, realPath);
489 }
490 if ( imageNode != NULL ) {
491 uint32_t closureOffset = (uint32_t)dyld3::MachOFile::read_uleb128(diag, imageNode, executableTrieEnd);
492 if ( closureOffset < this->header.progClosuresSize )
493 return (dyld3::closure::LaunchClosure*)((uint8_t*)closuresStart + closureOffset);
494 }
495
496 return nullptr;
497 }
498
499 #if !BUILDING_LIBDYLD && !BUILDING_DYLD
500 void DyldSharedCache::forEachLaunchClosure(void (^handler)(const char* executableRuntimePath, const dyld3::closure::LaunchClosure* closure)) const
501 {
502 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
503 uintptr_t slide = (uintptr_t)this - (uintptr_t)(mappings[0].address);
504 const uint8_t* executableTrieStart = (uint8_t*)(this->header.progClosuresTrieAddr + slide);
505 const uint8_t* executableTrieEnd = executableTrieStart + this->header.progClosuresTrieSize;
506 const uint8_t* closuresStart = (uint8_t*)(this->header.progClosuresAddr + slide);
507
508 std::vector<DylibIndexTrie::Entry> closureEntries;
509 if ( Trie<DylibIndex>::parseTrie(executableTrieStart, executableTrieEnd, closureEntries) ) {
510 for (DylibIndexTrie::Entry& entry : closureEntries ) {
511 uint32_t offset = entry.info.index;
512 if ( offset < this->header.progClosuresSize )
513 handler(entry.name.c_str(), (const dyld3::closure::LaunchClosure*)(closuresStart+offset));
514 }
515 }
516 }
517
518 void DyldSharedCache::forEachDlopenImage(void (^handler)(const char* runtimePath, const dyld3::closure::Image* image)) const
519 {
520 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
521 uintptr_t slide = (uintptr_t)this - (uintptr_t)(mappings[0].address);
522 const uint8_t* otherTrieStart = (uint8_t*)(this->header.otherTrieAddr + slide);
523 const uint8_t* otherTrieEnd = otherTrieStart + this->header.otherTrieSize;
524
525 std::vector<DylibIndexTrie::Entry> otherEntries;
526 if ( Trie<DylibIndex>::parseTrie(otherTrieStart, otherTrieEnd, otherEntries) ) {
527 for (const DylibIndexTrie::Entry& entry : otherEntries ) {
528 dyld3::closure::ImageNum imageNum = entry.info.index;
529 uint64_t arrayAddrOffset = header.otherImageArrayAddr - mappings[0].address;
530 const dyld3::closure::ImageArray* otherImageArray = (dyld3::closure::ImageArray*)((char*)this + arrayAddrOffset);
531 handler(entry.name.c_str(), otherImageArray->imageForNum(imageNum));
532 }
533 }
534 }
535 #endif
536
537 const dyld3::closure::ImageArray* DyldSharedCache::cachedDylibsImageArray() const
538 {
539 // check for old cache without imagesArray
540 if ( header.mappingOffset < 0x100 )
541 return nullptr;
542
543 if ( header.dylibsImageArrayAddr == 0 )
544 return nullptr;
545
546 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
547 uint64_t arrayAddrOffset = header.dylibsImageArrayAddr - mappings[0].address;
548 return (dyld3::closure::ImageArray*)((char*)this + arrayAddrOffset);
549 }
550
551 const dyld3::closure::ImageArray* DyldSharedCache::otherOSImageArray() const
552 {
553 // check for old cache without imagesArray
554 if ( header.mappingOffset < sizeof(dyld_cache_header) )
555 return nullptr;
556
557 if ( header.otherImageArrayAddr == 0 )
558 return nullptr;
559
560 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
561 uint64_t arrayAddrOffset = header.otherImageArrayAddr - mappings[0].address;
562 return (dyld3::closure::ImageArray*)((char*)this + arrayAddrOffset);
563 }
564
565
566
567
568
569
570