]> git.saurik.com Git - apple/dyld.git/blob - dyld3/SharedCacheRuntime.cpp
dyld-519.2.2.tar.gz
[apple/dyld.git] / dyld3 / SharedCacheRuntime.cpp
1 /*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24
25
26 #include <stdint.h>
27 #include <string.h>
28 #include <unistd.h>
29 #include <fcntl.h>
30 #include <sys/param.h>
31 #include <sys/types.h>
32 #include <sys/stat.h>
33 #include <sys/syscall.h>
34 #include <sys/syslog.h>
35 #include <sys/sysctl.h>
36 #include <sys/mman.h>
37 #include <mach/mach.h>
38 #include <mach-o/fat.h>
39 #include <mach-o/loader.h>
40 #include <mach-o/ldsyms.h>
41 #include <mach/shared_region.h>
42 #include <mach/mach.h>
43 #include <Availability.h>
44 #include <TargetConditionals.h>
45
46 #include "dyld_cache_format.h"
47 #include "SharedCacheRuntime.h"
48 #include "LaunchCache.h"
49 #include "LaunchCacheFormat.h"
50 #include "Loading.h"
51
52 #define ENABLE_DYLIBS_TO_OVERRIDE_CACHE_SIZE 1024
53
54 // should be in mach/shared_region.h
55 extern "C" int __shared_region_check_np(uint64_t* startaddress);
56 extern "C" int __shared_region_map_and_slide_np(int fd, uint32_t count, const shared_file_mapping_np mappings[], long slide, const dyld_cache_slide_info2* slideInfo, size_t slideInfoSize);
57
58
59 namespace dyld {
60 extern int my_stat(const char* path, struct stat* buf);
61 extern int my_open(const char* path, int flag, int other);
62 extern void log(const char*, ...);
63 }
64
65
66 namespace dyld3 {
67
68
69 struct CacheInfo
70 {
71 int fd;
72 shared_file_mapping_np mappings[3];
73 uint64_t slideInfoAddressUnslid;
74 size_t slideInfoSize;
75 uint64_t cachedDylibsGroupUnslid;
76 uint64_t sharedRegionStart;
77 uint64_t sharedRegionSize;
78 uint64_t maxSlide;
79 };
80
81
82
83
84 #if __i386__
85 #define ARCH_NAME "i386"
86 #define ARCH_CACHE_MAGIC "dyld_v1 i386"
87 #elif __x86_64__
88 #define ARCH_NAME "x86_64"
89 #define ARCH_CACHE_MAGIC "dyld_v1 x86_64"
90 #define ARCH_NAME_H "x86_64h"
91 #define ARCH_CACHE_MAGIC_H "dyld_v1 x86_64h"
92 #elif __ARM_ARCH_7K__
93 #define ARCH_NAME "armv7k"
94 #define ARCH_CACHE_MAGIC "dyld_v1 armv7k"
95 #elif __ARM_ARCH_7A__
96 #define ARCH_NAME "armv7"
97 #define ARCH_CACHE_MAGIC "dyld_v1 armv7"
98 #elif __ARM_ARCH_7S__
99 #define ARCH_NAME "armv7s"
100 #define ARCH_CACHE_MAGIC "dyld_v1 armv7s"
101 #elif __arm64e__
102 #define ARCH_NAME "arm64e"
103 #define ARCH_CACHE_MAGIC "dyld_v1 arm64e"
104 #elif __arm64__
105 #define ARCH_NAME "arm64"
106 #define ARCH_CACHE_MAGIC "dyld_v1 arm64"
107 #endif
108
109
110
111 static void rebaseChain(uint8_t* pageContent, uint16_t startOffset, uintptr_t slideAmount, const dyld_cache_slide_info2* slideInfo)
112 {
113 const uintptr_t deltaMask = (uintptr_t)(slideInfo->delta_mask);
114 const uintptr_t valueMask = ~deltaMask;
115 const uintptr_t valueAdd = (uintptr_t)(slideInfo->value_add);
116 const unsigned deltaShift = __builtin_ctzll(deltaMask) - 2;
117
118 uint32_t pageOffset = startOffset;
119 uint32_t delta = 1;
120 while ( delta != 0 ) {
121 uint8_t* loc = pageContent + pageOffset;
122 uintptr_t rawValue = *((uintptr_t*)loc);
123 delta = (uint32_t)((rawValue & deltaMask) >> deltaShift);
124 uintptr_t value = (rawValue & valueMask);
125 if ( value != 0 ) {
126 value += valueAdd;
127 value += slideAmount;
128 }
129 *((uintptr_t*)loc) = value;
130 //dyld::log(" pageOffset=0x%03X, loc=%p, org value=0x%08llX, new value=0x%08llX, delta=0x%X\n", pageOffset, loc, (uint64_t)rawValue, (uint64_t)value, delta);
131 pageOffset += delta;
132 }
133 }
134
135
136 static void getCachePath(const SharedCacheOptions& options, size_t pathBufferSize, char pathBuffer[])
137 {
138 // set cache dir
139 if ( options.cacheDirOverride != nullptr ) {
140 strlcpy(pathBuffer, options.cacheDirOverride, pathBufferSize);
141 }
142 else {
143 #if __IPHONE_OS_VERSION_MIN_REQUIRED
144 strlcpy(pathBuffer, IPHONE_DYLD_SHARED_CACHE_DIR, sizeof(IPHONE_DYLD_SHARED_CACHE_DIR));
145 #else
146 strlcpy(pathBuffer, MACOSX_DYLD_SHARED_CACHE_DIR, sizeof(MACOSX_DYLD_SHARED_CACHE_DIR));
147 #endif
148 }
149
150 // append file component of cache file
151 if ( pathBuffer[strlen(pathBuffer)-1] != '/' )
152 strlcat(pathBuffer, "/", pathBufferSize);
153 #if __x86_64__ && !__IPHONE_OS_VERSION_MIN_REQUIRED
154 if ( options.useHaswell ) {
155 size_t len = strlen(pathBuffer);
156 struct stat haswellStatBuf;
157 strlcat(pathBuffer, DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME_H, pathBufferSize);
158 if ( dyld::my_stat(pathBuffer, &haswellStatBuf) == 0 )
159 return;
160 // no haswell cache file, use regular x86_64 cache
161 pathBuffer[len] = '\0';
162 }
163 #endif
164 strlcat(pathBuffer, DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME, pathBufferSize);
165
166 #if __IPHONE_OS_VERSION_MIN_REQUIRED && !TARGET_IPHONE_SIMULATOR
167 // use .development cache if it exists
168 struct stat enableStatBuf;
169 struct stat devCacheStatBuf;
170 struct stat optCacheStatBuf;
171 bool enableFileExists = (dyld::my_stat(IPHONE_DYLD_SHARED_CACHE_DIR "enable-dylibs-to-override-cache", &enableStatBuf) == 0);
172 bool devCacheExists = (dyld::my_stat(IPHONE_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME DYLD_SHARED_CACHE_DEVELOPMENT_EXT, &devCacheStatBuf) == 0);
173 bool optCacheExists = (dyld::my_stat(IPHONE_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME, &optCacheStatBuf) == 0);
174 if ( (enableFileExists && (enableStatBuf.st_size < ENABLE_DYLIBS_TO_OVERRIDE_CACHE_SIZE) && devCacheExists) || !optCacheExists )
175 strlcat(pathBuffer, DYLD_SHARED_CACHE_DEVELOPMENT_EXT, pathBufferSize);
176 #endif
177
178 }
179
180
181 int openSharedCacheFile(const SharedCacheOptions& options, SharedCacheLoadInfo* results)
182 {
183 getCachePath(options, sizeof(results->path), results->path);
184 return dyld::my_open(results->path, O_RDONLY, 0);
185 }
186
187 static bool validMagic(const SharedCacheOptions& options, const DyldSharedCache* cache)
188 {
189 if ( strcmp(cache->header.magic, ARCH_CACHE_MAGIC) == 0 )
190 return true;
191
192 #if __x86_64__
193 if ( options.useHaswell ) {
194 if ( strcmp(cache->header.magic, ARCH_CACHE_MAGIC_H) == 0 )
195 return true;
196 }
197 #endif
198 return false;
199 }
200
201
202 static bool validPlatform(const SharedCacheOptions& options, const DyldSharedCache* cache)
203 {
204 // grandfather in old cache that does not have platform in header
205 if ( cache->header.mappingOffset < 0xE0 )
206 return true;
207
208 if ( cache->header.platform != (uint32_t)MachOParser::currentPlatform() )
209 return false;
210
211 #if TARGET_IPHONE_SIMULATOR
212 if ( cache->header.simulator == 0 )
213 return false;
214 #else
215 if ( cache->header.simulator != 0 )
216 return false;
217 #endif
218
219 return true;
220 }
221
222
223 static void verboseSharedCacheMappings(const shared_file_mapping_np mappings[3])
224 {
225 for (int i=0; i < 3; ++i) {
226 dyld::log(" 0x%08llX->0x%08llX init=%x, max=%x %s%s%s\n",
227 mappings[i].sfm_address, mappings[i].sfm_address+mappings[i].sfm_size-1,
228 mappings[i].sfm_init_prot, mappings[i].sfm_init_prot,
229 ((mappings[i].sfm_init_prot & VM_PROT_READ) ? "read " : ""),
230 ((mappings[i].sfm_init_prot & VM_PROT_WRITE) ? "write " : ""),
231 ((mappings[i].sfm_init_prot & VM_PROT_EXECUTE) ? "execute " : ""));
232 }
233 }
234
235 static bool preflightCacheFile(const SharedCacheOptions& options, SharedCacheLoadInfo* results, CacheInfo* info)
236 {
237 // find and open shared cache file
238 int fd = openSharedCacheFile(options, results);
239 if ( fd == -1 ) {
240 results->errorMessage = "shared cache file cannot be opened";
241 return false;
242 }
243 struct stat cacheStatBuf;
244 if ( dyld::my_stat(results->path, &cacheStatBuf) != 0 ) {
245 results->errorMessage = "shared cache file cannot be stat()ed";
246 ::close(fd);
247 return false;
248 }
249 size_t cacheFileLength = (size_t)(cacheStatBuf.st_size);
250
251 // sanity check header and mappings
252 uint8_t firstPage[0x4000];
253 if ( ::pread(fd, firstPage, sizeof(firstPage), 0) != sizeof(firstPage) ) {
254 results->errorMessage = "shared cache header could not be read";
255 ::close(fd);
256 return false;
257 }
258 const DyldSharedCache* cache = (DyldSharedCache*)firstPage;
259 if ( !validMagic(options, cache) ) {
260 results->errorMessage = "shared cache file has wrong magic";
261 ::close(fd);
262 return false;
263 }
264 if ( !validPlatform(options, cache) ) {
265 results->errorMessage = "shared cache file is for a different platform";
266 ::close(fd);
267 return false;
268 }
269 const dyld_cache_mapping_info* const fileMappings = (dyld_cache_mapping_info*)&firstPage[cache->header.mappingOffset];
270 if ( (cache->header.mappingCount != 3)
271 || (cache->header.mappingOffset > 0x120)
272 || (fileMappings[0].fileOffset != 0)
273 || ((fileMappings[0].address + fileMappings[0].size) > fileMappings[1].address)
274 || ((fileMappings[1].address + fileMappings[1].size) > fileMappings[2].address)
275 || ((fileMappings[0].fileOffset + fileMappings[0].size) != fileMappings[1].fileOffset)
276 || ((fileMappings[1].fileOffset + fileMappings[1].size) != fileMappings[2].fileOffset)
277 || ((cache->header.codeSignatureOffset + cache->header.codeSignatureSize) != cacheFileLength)
278 || (fileMappings[0].maxProt != (VM_PROT_READ|VM_PROT_EXECUTE))
279 || (fileMappings[1].maxProt != (VM_PROT_READ|VM_PROT_WRITE))
280 || (fileMappings[2].maxProt != VM_PROT_READ) ) {
281 results->errorMessage = "shared cache file mappings are invalid";
282 ::close(fd);
283 return false;
284 }
285
286 if ( cache->header.mappingOffset >= 0xF8 ) {
287 if ( (fileMappings[0].address != cache->header.sharedRegionStart) || ((fileMappings[2].address + fileMappings[2].size) > (cache->header.sharedRegionStart+cache->header.sharedRegionSize)) ) {
288 results->errorMessage = "shared cache file mapping addressses invalid";
289 ::close(fd);
290 return false;
291 }
292 }
293 else {
294 if ( (fileMappings[0].address != SHARED_REGION_BASE) || ((fileMappings[2].address + fileMappings[2].size) > (SHARED_REGION_BASE+SHARED_REGION_SIZE)) ) {
295 results->errorMessage = "shared cache file mapping addressses invalid";
296 ::close(fd);
297 return false;
298 }
299 }
300
301 // register code signature of cache file
302 fsignatures_t siginfo;
303 siginfo.fs_file_start = 0; // cache always starts at beginning of file
304 siginfo.fs_blob_start = (void*)cache->header.codeSignatureOffset;
305 siginfo.fs_blob_size = (size_t)(cache->header.codeSignatureSize);
306 int result = fcntl(fd, F_ADDFILESIGS_RETURN, &siginfo);
307 if ( result == -1 ) {
308 results->errorMessage = "code signature registration for shared cache failed";
309 ::close(fd);
310 return false;
311 }
312
313 // <rdar://problem/23188073> validate code signature covers entire shared cache
314 uint64_t codeSignedLength = siginfo.fs_file_start;
315 if ( codeSignedLength < cache->header.codeSignatureOffset ) {
316 results->errorMessage = "code signature does not cover entire shared cache file";
317 ::close(fd);
318 return false;
319 }
320 void* mappedData = ::mmap(NULL, sizeof(firstPage), PROT_READ|PROT_EXEC, MAP_PRIVATE, fd, 0);
321 if ( mappedData == MAP_FAILED ) {
322 results->errorMessage = "first page of shared cache not mmap()able";
323 ::close(fd);
324 return false;
325 }
326 if ( memcmp(mappedData, firstPage, sizeof(firstPage)) != 0 ) {
327 results->errorMessage = "first page of shared cache not mmap()able";
328 ::close(fd);
329 return false;
330 }
331 ::munmap(mappedData, sizeof(firstPage));
332
333 // fill out results
334 info->fd = fd;
335 for (int i=0; i < 3; ++i) {
336 info->mappings[i].sfm_address = fileMappings[i].address;
337 info->mappings[i].sfm_size = fileMappings[i].size;
338 info->mappings[i].sfm_file_offset = fileMappings[i].fileOffset;
339 info->mappings[i].sfm_max_prot = fileMappings[i].maxProt;
340 info->mappings[i].sfm_init_prot = fileMappings[i].initProt;
341 }
342 info->mappings[1].sfm_max_prot |= VM_PROT_SLIDE;
343 info->mappings[1].sfm_init_prot |= VM_PROT_SLIDE;
344 info->slideInfoAddressUnslid = fileMappings[2].address + cache->header.slideInfoOffset - fileMappings[2].fileOffset;
345 info->slideInfoSize = (long)cache->header.slideInfoSize;
346 if ( cache->header.mappingOffset > 0xD0 )
347 info->cachedDylibsGroupUnslid = cache->header.dylibsImageGroupAddr;
348 else
349 info->cachedDylibsGroupUnslid = 0;
350 if ( cache->header.mappingOffset >= 0xf8 ) {
351 info->sharedRegionStart = cache->header.sharedRegionStart;
352 info->sharedRegionSize = cache->header.sharedRegionSize;
353 info->maxSlide = cache->header.maxSlide;
354 }
355 else {
356 info->sharedRegionStart = SHARED_REGION_BASE;
357 info->sharedRegionSize = SHARED_REGION_SIZE;
358 info->maxSlide = SHARED_REGION_SIZE - (fileMappings[2].address + fileMappings[2].size - fileMappings[0].address);
359 }
360 return true;
361 }
362
363 #if !TARGET_IPHONE_SIMULATOR
364 static bool reuseExistingCache(const SharedCacheOptions& options, SharedCacheLoadInfo* results)
365 {
366 uint64_t cacheBaseAddress;
367 #if __i386__
368 if ( syscall(294, &cacheBaseAddress) == 0 ) {
369 #else
370 if ( __shared_region_check_np(&cacheBaseAddress) == 0 ) {
371 #endif
372 const DyldSharedCache* existingCache = (DyldSharedCache*)cacheBaseAddress;
373 if ( validMagic(options, existingCache) ) {
374 const dyld_cache_mapping_info* const fileMappings = (dyld_cache_mapping_info*)(cacheBaseAddress + existingCache->header.mappingOffset);
375 results->loadAddress = existingCache;
376 results->slide = (long)(cacheBaseAddress - fileMappings[0].address);
377 if ( (existingCache->header.mappingOffset > 0xD0) && (existingCache->header.dylibsImageGroupAddr != 0) )
378 results->cachedDylibsGroup = (const launch_cache::binary_format::ImageGroup*)(existingCache->header.dylibsImageGroupAddr + results->slide);
379 else
380 results->cachedDylibsGroup = nullptr;
381 // we don't know the path this cache was previously loaded from, assume default
382 getCachePath(options, sizeof(results->path), results->path);
383 if ( options.verbose ) {
384 const shared_file_mapping_np* const mappings = (shared_file_mapping_np*)(cacheBaseAddress + existingCache->header.mappingOffset);
385 dyld::log("re-using existing shared cache (%s):\n", results->path);
386 shared_file_mapping_np slidMappings[3];
387 for (int i=0; i < 3; ++i) {
388 slidMappings[i] = mappings[i];
389 slidMappings[i].sfm_address += results->slide;
390 }
391 verboseSharedCacheMappings(slidMappings);
392 }
393 }
394 else {
395 results->errorMessage = "existing shared cache in memory is not compatible";
396 }
397 return true;
398 }
399 return false;
400 }
401
402 static long pickCacheASLR(CacheInfo& info)
403 {
404 // choose new random slide
405 #if __IPHONE_OS_VERSION_MIN_REQUIRED
406 // <rdar://problem/20848977> change shared cache slide for 32-bit arm to always be 16k aligned
407 long slide = ((arc4random() % info.maxSlide) & (-16384));
408 #else
409 long slide = ((arc4random() % info.maxSlide) & (-4096));
410 #endif
411
412 // <rdar://problem/32031197> respect -disable_aslr boot-arg
413 if ( dyld3::loader::bootArgsContains("-disable_aslr") )
414 slide = 0;
415
416 // update mappings
417 for (uint32_t i=0; i < 3; ++i) {
418 info.mappings[i].sfm_address += slide;
419 }
420
421 return slide;
422 }
423
424 static bool mapCacheSystemWide(const SharedCacheOptions& options, SharedCacheLoadInfo* results)
425 {
426 CacheInfo info;
427 if ( !preflightCacheFile(options, results, &info) )
428 return false;
429
430 const dyld_cache_slide_info2* slideInfo = nullptr;
431 if ( info.slideInfoSize != 0 ) {
432 results->slide = pickCacheASLR(info);
433 slideInfo = (dyld_cache_slide_info2*)(info.slideInfoAddressUnslid + results->slide);
434 }
435 if ( info.cachedDylibsGroupUnslid != 0 )
436 results->cachedDylibsGroup = (const launch_cache::binary_format::ImageGroup*)(info.cachedDylibsGroupUnslid + results->slide);
437 else
438 results->cachedDylibsGroup = nullptr;
439
440 int result = __shared_region_map_and_slide_np(info.fd, 3, info.mappings, results->slide, slideInfo, info.slideInfoSize);
441 ::close(info.fd);
442 if ( result == 0 ) {
443 results->loadAddress = (const DyldSharedCache*)(info.mappings[0].sfm_address);
444 }
445 else {
446 // could be another process beat us to it
447 if ( reuseExistingCache(options, results) )
448 return true;
449 // if cache does not exist, then really is an error
450 results->errorMessage = "syscall to map cache into shared region failed";
451 return false;
452 }
453
454 if ( options.verbose ) {
455 dyld::log("mapped dyld cache file system wide: %s\n", results->path);
456 verboseSharedCacheMappings(info.mappings);
457 }
458 return true;
459 }
460 #endif
461
462 static bool mapCachePrivate(const SharedCacheOptions& options, SharedCacheLoadInfo* results)
463 {
464 // open and validate cache file
465 CacheInfo info;
466 if ( !preflightCacheFile(options, results, &info) )
467 return false;
468
469 // compute ALSR slide
470 results->slide = 0;
471 const dyld_cache_slide_info2* slideInfo = nullptr;
472 #if !TARGET_IPHONE_SIMULATOR // simulator caches do not support sliding
473 if ( info.slideInfoSize != 0 ) {
474 results->slide = pickCacheASLR(info);
475 slideInfo = (dyld_cache_slide_info2*)(info.slideInfoAddressUnslid + results->slide);
476 }
477 #endif
478 results->loadAddress = (const DyldSharedCache*)(info.mappings[0].sfm_address);
479 if ( info.cachedDylibsGroupUnslid != 0 )
480 results->cachedDylibsGroup = (const launch_cache::binary_format::ImageGroup*)(info.cachedDylibsGroupUnslid + results->slide);
481 else
482 results->cachedDylibsGroup = nullptr;
483
484 // remove the shared region sub-map
485 vm_deallocate(mach_task_self(), (vm_address_t)info.sharedRegionStart, (vm_size_t)info.sharedRegionSize);
486
487 // map cache just for this process with mmap()
488 for (int i=0; i < 3; ++i) {
489 void* mmapAddress = (void*)(uintptr_t)(info.mappings[i].sfm_address);
490 size_t size = (size_t)(info.mappings[i].sfm_size);
491 //dyld::log("dyld: mapping address %p with size 0x%08lX\n", mmapAddress, size);
492 int protection = 0;
493 if ( info.mappings[i].sfm_init_prot & VM_PROT_EXECUTE )
494 protection |= PROT_EXEC;
495 if ( info.mappings[i].sfm_init_prot & VM_PROT_READ )
496 protection |= PROT_READ;
497 if ( info.mappings[i].sfm_init_prot & VM_PROT_WRITE )
498 protection |= PROT_WRITE;
499 off_t offset = info.mappings[i].sfm_file_offset;
500 if ( ::mmap(mmapAddress, size, protection, MAP_FIXED | MAP_PRIVATE, info.fd, offset) != mmapAddress ) {
501 // failed to map some chunk of this shared cache file
502 // clear shared region
503 vm_deallocate(mach_task_self(), (vm_address_t)info.sharedRegionStart, (vm_size_t)info.sharedRegionSize);
504 // return failure
505 results->loadAddress = nullptr;
506 results->cachedDylibsGroup = nullptr;
507 results->errorMessage = "could not mmap() part of dyld cache";
508 return false;
509 }
510 }
511
512 // update all __DATA pages with slide info
513 const dyld_cache_slide_info* slideInfoHeader = (dyld_cache_slide_info*)slideInfo;
514 if ( slideInfoHeader != nullptr ) {
515 if ( slideInfoHeader->version != 2 ) {
516 results->errorMessage = "invalide slide info in cache file";
517 return false;
518 }
519 const dyld_cache_slide_info2* slideHeader = (dyld_cache_slide_info2*)slideInfo;
520 const uint32_t page_size = slideHeader->page_size;
521 const uint16_t* page_starts = (uint16_t*)((long)(slideInfo) + slideHeader->page_starts_offset);
522 const uint16_t* page_extras = (uint16_t*)((long)(slideInfo) + slideHeader->page_extras_offset);
523 const uintptr_t dataPagesStart = (uintptr_t)info.mappings[1].sfm_address;
524 for (int i=0; i < slideHeader->page_starts_count; ++i) {
525 uint8_t* page = (uint8_t*)(long)(dataPagesStart + (page_size*i));
526 uint16_t pageEntry = page_starts[i];
527 //dyld::log("page[%d]: page_starts[i]=0x%04X\n", i, pageEntry);
528 if ( pageEntry == DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE )
529 continue;
530 if ( pageEntry & DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA ) {
531 uint16_t chainIndex = (pageEntry & 0x3FFF);
532 bool done = false;
533 while ( !done ) {
534 uint16_t pInfo = page_extras[chainIndex];
535 uint16_t pageStartOffset = (pInfo & 0x3FFF)*4;
536 //dyld::log(" chain[%d] pageOffset=0x%03X\n", chainIndex, pageStartOffset);
537 rebaseChain(page, pageStartOffset, results->slide, slideInfo);
538 done = (pInfo & DYLD_CACHE_SLIDE_PAGE_ATTR_END);
539 ++chainIndex;
540 }
541 }
542 else {
543 uint32_t pageOffset = pageEntry * 4;
544 //dyld::log(" start pageOffset=0x%03X\n", pageOffset);
545 rebaseChain(page, pageOffset, results->slide, slideInfo);
546 }
547 }
548 }
549
550 if ( options.verbose ) {
551 dyld::log("mapped dyld cache file private to process (%s):\n", results->path);
552 verboseSharedCacheMappings(info.mappings);
553 }
554 return true;
555 }
556
557
558
559 bool loadDyldCache(const SharedCacheOptions& options, SharedCacheLoadInfo* results)
560 {
561 results->loadAddress = 0;
562 results->slide = 0;
563 results->cachedDylibsGroup = nullptr;
564 results->errorMessage = nullptr;
565
566 #if TARGET_IPHONE_SIMULATOR
567 // simulator only supports mmap()ing cache privately into process
568 return mapCachePrivate(options, results);
569 #else
570 if ( options.forcePrivate ) {
571 // mmap cache into this process only
572 return mapCachePrivate(options, results);
573 }
574 else {
575 // fast path: when cache is already mapped into shared region
576 if ( reuseExistingCache(options, results) )
577 return (results->errorMessage != nullptr);
578
579 // slow path: this is first process to load cache
580 return mapCacheSystemWide(options, results);
581 }
582 #endif
583 }
584
585
586 bool findInSharedCacheImage(const SharedCacheLoadInfo& loadInfo, const char* dylibPathToFind, SharedCacheFindDylibResults* results)
587 {
588 if ( loadInfo.loadAddress == nullptr )
589 return false;
590
591 // HACK: temp support for old caches
592 if ( (loadInfo.cachedDylibsGroup == nullptr) || (loadInfo.loadAddress->header.formatVersion != launch_cache::binary_format::kFormatVersion) ) {
593 const dyld_cache_image_info* const start = (dyld_cache_image_info*)((uint8_t*)loadInfo.loadAddress + loadInfo.loadAddress->header.imagesOffset);
594 const dyld_cache_image_info* const end = &start[loadInfo.loadAddress->header.imagesCount];
595 for (const dyld_cache_image_info* p = start; p != end; ++p) {
596 const char* aPath = (char*)loadInfo.loadAddress + p->pathFileOffset;
597 if ( strcmp(aPath, dylibPathToFind) == 0 ) {
598 results->mhInCache = (const mach_header*)(p->address+loadInfo.slide);
599 results->pathInCache = aPath;
600 results->slideInCache = loadInfo.slide;
601 results->imageData = nullptr;
602 return true;
603 }
604 }
605 return false;
606 }
607 // HACK: end
608
609 launch_cache::ImageGroup dylibsGroup(loadInfo.cachedDylibsGroup);
610 uint32_t foundIndex;
611 const launch_cache::binary_format::Image* imageData = dylibsGroup.findImageByPath(dylibPathToFind, foundIndex);
612 #if __MAC_OS_X_VERSION_MIN_REQUIRED
613 // <rdar://problem/32740215> handle symlink to cached dylib
614 if ( imageData == nullptr ) {
615 char resolvedPath[PATH_MAX];
616 if ( realpath(dylibPathToFind, resolvedPath) != nullptr )
617 imageData = dylibsGroup.findImageByPath(resolvedPath, foundIndex);
618 }
619 #endif
620 if ( imageData == nullptr )
621 return false;
622
623 launch_cache::Image image(imageData);
624 results->mhInCache = (const mach_header*)((uintptr_t)loadInfo.loadAddress + image.cacheOffset());
625 results->pathInCache = image.path();
626 results->slideInCache = loadInfo.slide;
627 results->imageData = imageData;
628 return true;
629 }
630
631
632 bool pathIsInSharedCacheImage(const SharedCacheLoadInfo& loadInfo, const char* dylibPathToFind)
633 {
634 if ( (loadInfo.loadAddress == nullptr) || (loadInfo.cachedDylibsGroup == nullptr) || (loadInfo.loadAddress->header.formatVersion != launch_cache::binary_format::kFormatVersion) )
635 return false;
636
637 launch_cache::ImageGroup dylibsGroup(loadInfo.cachedDylibsGroup);
638 uint32_t foundIndex;
639 const launch_cache::binary_format::Image* imageData = dylibsGroup.findImageByPath(dylibPathToFind, foundIndex);
640 return (imageData != nullptr);
641 }
642
643
644 } // namespace dyld3
645