2 * Copyright (c) 2017 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
30 #include <sys/param.h>
31 #include <sys/types.h>
33 #include <sys/syscall.h>
34 #include <sys/syslog.h>
35 #include <sys/sysctl.h>
37 #include <mach/mach.h>
38 #include <mach-o/fat.h>
39 #include <mach-o/loader.h>
40 #include <mach-o/ldsyms.h>
41 #include <mach/shared_region.h>
42 #include <mach/mach.h>
43 #include <Availability.h>
44 #include <TargetConditionals.h>
46 #include "dyld_cache_format.h"
47 #include "SharedCacheRuntime.h"
50 #define ENABLE_DYLIBS_TO_OVERRIDE_CACHE_SIZE 1024
52 // should be in mach/shared_region.h
53 extern "C" int __shared_region_check_np(uint64_t* startaddress
);
54 extern "C" int __shared_region_map_and_slide_np(int fd
, uint32_t count
, const shared_file_mapping_np mappings
[], long slide
, const dyld_cache_slide_info2
* slideInfo
, size_t slideInfoSize
);
58 extern int my_stat(const char* path
, struct stat
* buf
);
59 extern int my_open(const char* path
, int flag
, int other
);
60 extern void log(const char*, ...);
70 shared_file_mapping_np mappings
[3];
71 uint64_t slideInfoAddressUnslid
;
73 uint64_t cachedDylibsGroupUnslid
;
74 uint64_t sharedRegionStart
;
75 uint64_t sharedRegionSize
;
83 #define ARCH_NAME "i386"
84 #define ARCH_CACHE_MAGIC "dyld_v1 i386"
86 #define ARCH_NAME "x86_64"
87 #define ARCH_CACHE_MAGIC "dyld_v1 x86_64"
88 #define ARCH_NAME_H "x86_64h"
89 #define ARCH_CACHE_MAGIC_H "dyld_v1 x86_64h"
91 #define ARCH_NAME "armv7k"
92 #define ARCH_CACHE_MAGIC "dyld_v1 armv7k"
94 #define ARCH_NAME "armv7"
95 #define ARCH_CACHE_MAGIC "dyld_v1 armv7"
97 #define ARCH_NAME "armv7s"
98 #define ARCH_CACHE_MAGIC "dyld_v1 armv7s"
100 #define ARCH_NAME "arm64e"
101 #define ARCH_CACHE_MAGIC "dyld_v1 arm64e"
104 #define ARCH_NAME "arm64"
105 #define ARCH_CACHE_MAGIC "dyld_v1 arm64"
107 #define ARCH_NAME "arm64_32"
108 #define ARCH_CACHE_MAGIC "dyld_v1arm64_32"
114 static void rebaseChainV2(uint8_t* pageContent
, uint16_t startOffset
, uintptr_t slideAmount
, const dyld_cache_slide_info2
* slideInfo
)
116 const uintptr_t deltaMask
= (uintptr_t)(slideInfo
->delta_mask
);
117 const uintptr_t valueMask
= ~deltaMask
;
118 const uintptr_t valueAdd
= (uintptr_t)(slideInfo
->value_add
);
119 const unsigned deltaShift
= __builtin_ctzll(deltaMask
) - 2;
121 uint32_t pageOffset
= startOffset
;
123 while ( delta
!= 0 ) {
124 uint8_t* loc
= pageContent
+ pageOffset
;
125 uintptr_t rawValue
= *((uintptr_t*)loc
);
126 delta
= (uint32_t)((rawValue
& deltaMask
) >> deltaShift
);
127 uintptr_t value
= (rawValue
& valueMask
);
130 value
+= slideAmount
;
132 *((uintptr_t*)loc
) = value
;
133 //dyld::log(" pageOffset=0x%03X, loc=%p, org value=0x%08llX, new value=0x%08llX, delta=0x%X\n", pageOffset, loc, (uint64_t)rawValue, (uint64_t)value, delta);
139 static void rebaseChainV4(uint8_t* pageContent
, uint16_t startOffset
, uintptr_t slideAmount
, const dyld_cache_slide_info4
* slideInfo
)
141 const uintptr_t deltaMask
= (uintptr_t)(slideInfo
->delta_mask
);
142 const uintptr_t valueMask
= ~deltaMask
;
143 const uintptr_t valueAdd
= (uintptr_t)(slideInfo
->value_add
);
144 const unsigned deltaShift
= __builtin_ctzll(deltaMask
) - 2;
146 uint32_t pageOffset
= startOffset
;
148 while ( delta
!= 0 ) {
149 uint8_t* loc
= pageContent
+ pageOffset
;
150 uintptr_t rawValue
= *((uintptr_t*)loc
);
151 delta
= (uint32_t)((rawValue
& deltaMask
) >> deltaShift
);
152 uintptr_t value
= (rawValue
& valueMask
);
153 if ( (value
& 0xFFFF8000) == 0 ) {
154 // small positive non-pointer, use as-is
156 else if ( (value
& 0x3FFF8000) == 0x3FFF8000 ) {
157 // small negative non-pointer
162 value
+= slideAmount
;
164 *((uintptr_t*)loc
) = value
;
165 //dyld::log(" pageOffset=0x%03X, loc=%p, org value=0x%08llX, new value=0x%08llX, delta=0x%X\n", pageOffset, loc, (uint64_t)rawValue, (uint64_t)value, delta);
171 static void getCachePath(const SharedCacheOptions
& options
, size_t pathBufferSize
, char pathBuffer
[])
174 if ( options
.cacheDirOverride
!= nullptr ) {
175 strlcpy(pathBuffer
, options
.cacheDirOverride
, pathBufferSize
);
178 #if __IPHONE_OS_VERSION_MIN_REQUIRED
179 strlcpy(pathBuffer
, IPHONE_DYLD_SHARED_CACHE_DIR
, sizeof(IPHONE_DYLD_SHARED_CACHE_DIR
));
181 strlcpy(pathBuffer
, MACOSX_DYLD_SHARED_CACHE_DIR
, sizeof(MACOSX_DYLD_SHARED_CACHE_DIR
));
185 // append file component of cache file
186 if ( pathBuffer
[strlen(pathBuffer
)-1] != '/' )
187 strlcat(pathBuffer
, "/", pathBufferSize
);
188 #if __x86_64__ && !__IPHONE_OS_VERSION_MIN_REQUIRED
189 if ( options
.useHaswell
) {
190 size_t len
= strlen(pathBuffer
);
191 struct stat haswellStatBuf
;
192 strlcat(pathBuffer
, DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME_H
, pathBufferSize
);
193 if ( dyld::my_stat(pathBuffer
, &haswellStatBuf
) == 0 )
195 // no haswell cache file, use regular x86_64 cache
196 pathBuffer
[len
] = '\0';
199 strlcat(pathBuffer
, DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME
, pathBufferSize
);
201 #if __IPHONE_OS_VERSION_MIN_REQUIRED && !TARGET_IPHONE_SIMULATOR
202 // use .development cache if it exists
203 struct stat enableStatBuf
;
204 struct stat devCacheStatBuf
;
205 struct stat optCacheStatBuf
;
206 bool developmentDevice
= dyld3::internalInstall();
207 bool enableFileExists
= (dyld::my_stat(IPHONE_DYLD_SHARED_CACHE_DIR
"enable-dylibs-to-override-cache", &enableStatBuf
) == 0);
208 bool devCacheExists
= (dyld::my_stat(IPHONE_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME DYLD_SHARED_CACHE_DEVELOPMENT_EXT
, &devCacheStatBuf
) == 0);
209 bool optCacheExists
= (dyld::my_stat(IPHONE_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME
, &optCacheStatBuf
) == 0);
210 if ( developmentDevice
&& ((enableFileExists
&& (enableStatBuf
.st_size
< ENABLE_DYLIBS_TO_OVERRIDE_CACHE_SIZE
) && devCacheExists
) || !optCacheExists
) )
211 strlcat(pathBuffer
, DYLD_SHARED_CACHE_DEVELOPMENT_EXT
, pathBufferSize
);
217 int openSharedCacheFile(const SharedCacheOptions
& options
, SharedCacheLoadInfo
* results
)
219 getCachePath(options
, sizeof(results
->path
), results
->path
);
220 return dyld::my_open(results
->path
, O_RDONLY
, 0);
223 static bool validMagic(const SharedCacheOptions
& options
, const DyldSharedCache
* cache
)
225 if ( strcmp(cache
->header
.magic
, ARCH_CACHE_MAGIC
) == 0 )
229 if ( options
.useHaswell
) {
230 if ( strcmp(cache
->header
.magic
, ARCH_CACHE_MAGIC_H
) == 0 )
238 static bool validPlatform(const SharedCacheOptions
& options
, const DyldSharedCache
* cache
)
240 // grandfather in old cache that does not have platform in header
241 if ( cache
->header
.mappingOffset
< 0xE0 )
244 if ( cache
->header
.platform
!= (uint32_t)MachOFile::currentPlatform() )
247 #if TARGET_IPHONE_SIMULATOR
248 if ( cache
->header
.simulator
== 0 )
251 if ( cache
->header
.simulator
!= 0 )
259 static void verboseSharedCacheMappings(const shared_file_mapping_np mappings
[3])
261 for (int i
=0; i
< 3; ++i
) {
262 dyld::log(" 0x%08llX->0x%08llX init=%x, max=%x %s%s%s\n",
263 mappings
[i
].sfm_address
, mappings
[i
].sfm_address
+mappings
[i
].sfm_size
-1,
264 mappings
[i
].sfm_init_prot
, mappings
[i
].sfm_init_prot
,
265 ((mappings
[i
].sfm_init_prot
& VM_PROT_READ
) ? "read " : ""),
266 ((mappings
[i
].sfm_init_prot
& VM_PROT_WRITE
) ? "write " : ""),
267 ((mappings
[i
].sfm_init_prot
& VM_PROT_EXECUTE
) ? "execute " : ""));
271 static bool preflightCacheFile(const SharedCacheOptions
& options
, SharedCacheLoadInfo
* results
, CacheInfo
* info
)
273 // find and open shared cache file
274 int fd
= openSharedCacheFile(options
, results
);
276 results
->errorMessage
= "shared cache file cannot be opened";
280 struct stat cacheStatBuf
;
281 if ( dyld::my_stat(results
->path
, &cacheStatBuf
) != 0 ) {
282 results
->errorMessage
= "shared cache file cannot be stat()ed";
286 size_t cacheFileLength
= (size_t)(cacheStatBuf
.st_size
);
288 // sanity check header and mappings
289 uint8_t firstPage
[0x4000];
290 if ( ::pread(fd
, firstPage
, sizeof(firstPage
), 0) != sizeof(firstPage
) ) {
291 results
->errorMessage
= "shared cache header could not be read";
295 const DyldSharedCache
* cache
= (DyldSharedCache
*)firstPage
;
296 if ( !validMagic(options
, cache
) ) {
297 results
->errorMessage
= "shared cache file has wrong magic";
301 if ( !validPlatform(options
, cache
) ) {
302 results
->errorMessage
= "shared cache file is for a different platform";
306 if ( (cache
->header
.mappingCount
!= 3) || (cache
->header
.mappingOffset
> 0x138) ) {
307 results
->errorMessage
= "shared cache file mappings are invalid";
311 const dyld_cache_mapping_info
* const fileMappings
= (dyld_cache_mapping_info
*)&firstPage
[cache
->header
.mappingOffset
];
312 if ( (fileMappings
[0].fileOffset
!= 0)
313 || ((fileMappings
[0].address
+ fileMappings
[0].size
) > fileMappings
[1].address
)
314 || ((fileMappings
[1].address
+ fileMappings
[1].size
) > fileMappings
[2].address
)
315 || ((fileMappings
[0].fileOffset
+ fileMappings
[0].size
) != fileMappings
[1].fileOffset
)
316 || ((fileMappings
[1].fileOffset
+ fileMappings
[1].size
) != fileMappings
[2].fileOffset
)
317 || ((cache
->header
.codeSignatureOffset
+ cache
->header
.codeSignatureSize
) != cacheFileLength
)
318 || (fileMappings
[0].maxProt
!= (VM_PROT_READ
|VM_PROT_EXECUTE
))
319 || (fileMappings
[1].maxProt
!= (VM_PROT_READ
|VM_PROT_WRITE
))
320 || (fileMappings
[2].maxProt
!= VM_PROT_READ
) ) {
321 results
->errorMessage
= "shared cache file mappings are invalid";
326 if ( cache
->header
.mappingOffset
>= 0xF8 ) {
327 if ( (fileMappings
[0].address
!= cache
->header
.sharedRegionStart
) || ((fileMappings
[2].address
+ fileMappings
[2].size
) > (cache
->header
.sharedRegionStart
+cache
->header
.sharedRegionSize
)) ) {
328 results
->errorMessage
= "shared cache file mapping addressses invalid";
334 if ( (fileMappings
[0].address
!= SHARED_REGION_BASE
) || ((fileMappings
[2].address
+ fileMappings
[2].size
) > (SHARED_REGION_BASE
+SHARED_REGION_SIZE
)) ) {
335 results
->errorMessage
= "shared cache file mapping addressses invalid";
341 // register code signature of cache file
342 fsignatures_t siginfo
;
343 siginfo
.fs_file_start
= 0; // cache always starts at beginning of file
344 siginfo
.fs_blob_start
= (void*)cache
->header
.codeSignatureOffset
;
345 siginfo
.fs_blob_size
= (size_t)(cache
->header
.codeSignatureSize
);
346 int result
= fcntl(fd
, F_ADDFILESIGS_RETURN
, &siginfo
);
347 if ( result
== -1 ) {
348 results
->errorMessage
= "code signature registration for shared cache failed";
353 // <rdar://problem/23188073> validate code signature covers entire shared cache
354 uint64_t codeSignedLength
= siginfo
.fs_file_start
;
355 if ( codeSignedLength
< cache
->header
.codeSignatureOffset
) {
356 results
->errorMessage
= "code signature does not cover entire shared cache file";
360 void* mappedData
= ::mmap(NULL
, sizeof(firstPage
), PROT_READ
|PROT_EXEC
, MAP_PRIVATE
, fd
, 0);
361 if ( mappedData
== MAP_FAILED
) {
362 results
->errorMessage
= "first page of shared cache not mmap()able";
366 if ( memcmp(mappedData
, firstPage
, sizeof(firstPage
)) != 0 ) {
367 results
->errorMessage
= "first page of mmap()ed shared cache not valid";
371 ::munmap(mappedData
, sizeof(firstPage
));
375 for (int i
=0; i
< 3; ++i
) {
376 info
->mappings
[i
].sfm_address
= fileMappings
[i
].address
;
377 info
->mappings
[i
].sfm_size
= fileMappings
[i
].size
;
378 info
->mappings
[i
].sfm_file_offset
= fileMappings
[i
].fileOffset
;
379 info
->mappings
[i
].sfm_max_prot
= fileMappings
[i
].maxProt
;
380 info
->mappings
[i
].sfm_init_prot
= fileMappings
[i
].initProt
;
382 info
->mappings
[1].sfm_max_prot
|= VM_PROT_SLIDE
;
383 info
->mappings
[1].sfm_init_prot
|= VM_PROT_SLIDE
;
384 info
->slideInfoAddressUnslid
= fileMappings
[2].address
+ cache
->header
.slideInfoOffset
- fileMappings
[2].fileOffset
;
385 info
->slideInfoSize
= (long)cache
->header
.slideInfoSize
;
386 if ( cache
->header
.mappingOffset
> 0xD0 )
387 info
->cachedDylibsGroupUnslid
= cache
->header
.dylibsImageGroupAddr
;
389 info
->cachedDylibsGroupUnslid
= 0;
390 if ( cache
->header
.mappingOffset
>= 0xf8 ) {
391 info
->sharedRegionStart
= cache
->header
.sharedRegionStart
;
392 info
->sharedRegionSize
= cache
->header
.sharedRegionSize
;
393 info
->maxSlide
= cache
->header
.maxSlide
;
396 info
->sharedRegionStart
= SHARED_REGION_BASE
;
397 info
->sharedRegionSize
= SHARED_REGION_SIZE
;
398 info
->maxSlide
= SHARED_REGION_SIZE
- (fileMappings
[2].address
+ fileMappings
[2].size
- fileMappings
[0].address
);
404 #if !TARGET_IPHONE_SIMULATOR
406 // update all __DATA pages with slide info
407 static bool rebaseDataPages(bool isVerbose
, CacheInfo
& info
, SharedCacheLoadInfo
* results
)
409 uint64_t dataPagesStart
= info
.mappings
[1].sfm_address
;
410 const dyld_cache_slide_info
* slideInfo
= nullptr;
411 if ( info
.slideInfoSize
!= 0 ) {
412 slideInfo
= (dyld_cache_slide_info
*)(info
.slideInfoAddressUnslid
+ results
->slide
);
414 const dyld_cache_slide_info
* slideInfoHeader
= (dyld_cache_slide_info
*)slideInfo
;
415 if ( slideInfoHeader
!= nullptr ) {
416 if ( slideInfoHeader
->version
== 2 ) {
417 const dyld_cache_slide_info2
* slideHeader
= (dyld_cache_slide_info2
*)slideInfo
;
418 const uint32_t page_size
= slideHeader
->page_size
;
419 const uint16_t* page_starts
= (uint16_t*)((long)(slideInfo
) + slideHeader
->page_starts_offset
);
420 const uint16_t* page_extras
= (uint16_t*)((long)(slideInfo
) + slideHeader
->page_extras_offset
);
421 for (int i
=0; i
< slideHeader
->page_starts_count
; ++i
) {
422 uint8_t* page
= (uint8_t*)(long)(dataPagesStart
+ (page_size
*i
));
423 uint16_t pageEntry
= page_starts
[i
];
424 //dyld::log("page[%d]: page_starts[i]=0x%04X\n", i, pageEntry);
425 if ( pageEntry
== DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE
)
427 if ( pageEntry
& DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA
) {
428 uint16_t chainIndex
= (pageEntry
& 0x3FFF);
431 uint16_t pInfo
= page_extras
[chainIndex
];
432 uint16_t pageStartOffset
= (pInfo
& 0x3FFF)*4;
433 //dyld::log(" chain[%d] pageOffset=0x%03X\n", chainIndex, pageStartOffset);
434 rebaseChainV2(page
, pageStartOffset
, results
->slide
, slideHeader
);
435 done
= (pInfo
& DYLD_CACHE_SLIDE_PAGE_ATTR_END
);
440 uint32_t pageOffset
= pageEntry
* 4;
441 //dyld::log(" start pageOffset=0x%03X\n", pageOffset);
442 rebaseChainV2(page
, pageOffset
, results
->slide
, slideHeader
);
447 else if ( slideInfoHeader
->version
== 3 ) {
448 const dyld_cache_slide_info3
* slideHeader
= (dyld_cache_slide_info3
*)slideInfo
;
449 const uint32_t pageSize
= slideHeader
->page_size
;
450 for (int i
=0; i
< slideHeader
->page_starts_count
; ++i
) {
451 uint8_t* page
= (uint8_t*)(dataPagesStart
+ (pageSize
*i
));
452 uint64_t delta
= slideHeader
->page_starts
[i
];
453 if ( delta
== DYLD_CACHE_SLIDE_V3_PAGE_ATTR_NO_REBASE
)
455 delta
= delta
/sizeof(uint64_t); // initial offset is byte based
456 dyld_cache_slide_pointer3
* loc
= (dyld_cache_slide_pointer3
*)page
;
459 delta
= loc
->plain
.offsetToNextPointer
;
460 if ( loc
->auth
.authenticated
) {
461 #if __has_feature(ptrauth_calls)
462 uint64_t target
= info
.sharedRegionStart
+ loc
->auth
.offsetFromSharedCacheBase
+ results
->slide
;
463 MachOLoaded::ChainedFixupPointerOnDisk ptr
;
464 ptr
.raw
= *((uint64_t*)loc
);
465 loc
->raw
= ptr
.signPointer(loc
, target
);
467 results
->errorMessage
= "invalid pointer kind in cache file";
472 loc
->raw
= MachOLoaded::ChainedFixupPointerOnDisk::signExtend51(loc
->plain
.pointerValue
) + results
->slide
;
474 } while (delta
!= 0);
478 else if ( slideInfoHeader
->version
== 4 ) {
479 const dyld_cache_slide_info4
* slideHeader
= (dyld_cache_slide_info4
*)slideInfo
;
480 const uint32_t page_size
= slideHeader
->page_size
;
481 const uint16_t* page_starts
= (uint16_t*)((long)(slideInfo
) + slideHeader
->page_starts_offset
);
482 const uint16_t* page_extras
= (uint16_t*)((long)(slideInfo
) + slideHeader
->page_extras_offset
);
483 for (int i
=0; i
< slideHeader
->page_starts_count
; ++i
) {
484 uint8_t* page
= (uint8_t*)(long)(dataPagesStart
+ (page_size
*i
));
485 uint16_t pageEntry
= page_starts
[i
];
486 //dyld::log("page[%d]: page_starts[i]=0x%04X\n", i, pageEntry);
487 if ( pageEntry
== DYLD_CACHE_SLIDE4_PAGE_NO_REBASE
)
489 if ( pageEntry
& DYLD_CACHE_SLIDE4_PAGE_USE_EXTRA
) {
490 uint16_t chainIndex
= (pageEntry
& DYLD_CACHE_SLIDE4_PAGE_INDEX
);
493 uint16_t pInfo
= page_extras
[chainIndex
];
494 uint16_t pageStartOffset
= (pInfo
& DYLD_CACHE_SLIDE4_PAGE_INDEX
)*4;
495 //dyld::log(" chain[%d] pageOffset=0x%03X\n", chainIndex, pageStartOffset);
496 rebaseChainV4(page
, pageStartOffset
, results
->slide
, slideHeader
);
497 done
= (pInfo
& DYLD_CACHE_SLIDE4_PAGE_EXTRA_END
);
502 uint32_t pageOffset
= pageEntry
* 4;
503 //dyld::log(" start pageOffset=0x%03X\n", pageOffset);
504 rebaseChainV4(page
, pageOffset
, results
->slide
, slideHeader
);
510 results
->errorMessage
= "invalid slide info in cache file";
517 static bool reuseExistingCache(const SharedCacheOptions
& options
, SharedCacheLoadInfo
* results
)
519 uint64_t cacheBaseAddress
;
521 if ( syscall(294, &cacheBaseAddress
) == 0 ) {
523 if ( __shared_region_check_np(&cacheBaseAddress
) == 0 ) {
525 const DyldSharedCache
* existingCache
= (DyldSharedCache
*)cacheBaseAddress
;
526 if ( validMagic(options
, existingCache
) ) {
527 const dyld_cache_mapping_info
* const fileMappings
= (dyld_cache_mapping_info
*)(cacheBaseAddress
+ existingCache
->header
.mappingOffset
);
528 results
->loadAddress
= existingCache
;
529 results
->slide
= (long)(cacheBaseAddress
- fileMappings
[0].address
);
530 // we don't know the path this cache was previously loaded from, assume default
531 getCachePath(options
, sizeof(results
->path
), results
->path
);
532 if ( options
.verbose
) {
533 const shared_file_mapping_np
* const mappings
= (shared_file_mapping_np
*)(cacheBaseAddress
+ existingCache
->header
.mappingOffset
);
534 dyld::log("re-using existing shared cache (%s):\n", results
->path
);
535 shared_file_mapping_np slidMappings
[3];
536 for (int i
=0; i
< 3; ++i
) {
537 slidMappings
[i
] = mappings
[i
];
538 slidMappings
[i
].sfm_address
+= results
->slide
;
540 verboseSharedCacheMappings(slidMappings
);
544 results
->errorMessage
= "existing shared cache in memory is not compatible";
551 static long pickCacheASLR(CacheInfo
& info
)
553 // choose new random slide
554 #if __IPHONE_OS_VERSION_MIN_REQUIRED
555 // <rdar://problem/20848977> change shared cache slide for 32-bit arm to always be 16k aligned
556 long slide
= ((arc4random() % info
.maxSlide
) & (-16384));
558 long slide
= ((arc4random() % info
.maxSlide
) & (-4096));
561 // <rdar://problem/32031197> respect -disable_aslr boot-arg
562 if ( dyld3::bootArgsContains("-disable_aslr") )
566 for (uint32_t i
=0; i
< 3; ++i
) {
567 info
.mappings
[i
].sfm_address
+= slide
;
573 static bool mapCacheSystemWide(const SharedCacheOptions
& options
, SharedCacheLoadInfo
* results
)
576 if ( !preflightCacheFile(options
, results
, &info
) )
579 const dyld_cache_slide_info2
* slideInfo
= nullptr;
580 if ( info
.slideInfoSize
!= 0 ) {
581 results
->slide
= pickCacheASLR(info
);
582 slideInfo
= (dyld_cache_slide_info2
*)(info
.slideInfoAddressUnslid
+ results
->slide
);
585 int result
= __shared_region_map_and_slide_np(info
.fd
, 3, info
.mappings
, results
->slide
, slideInfo
, info
.slideInfoSize
);
588 results
->loadAddress
= (const DyldSharedCache
*)(info
.mappings
[0].sfm_address
);
591 // could be another process beat us to it
592 if ( reuseExistingCache(options
, results
) )
594 // if cache does not exist, then really is an error
595 if ( results
->errorMessage
== nullptr )
596 results
->errorMessage
= "syscall to map cache into shared region failed";
600 if ( options
.verbose
) {
601 dyld::log("mapped dyld cache file system wide: %s\n", results
->path
);
602 verboseSharedCacheMappings(info
.mappings
);
606 #endif // TARGET_IPHONE_SIMULATOR
608 static bool mapCachePrivate(const SharedCacheOptions
& options
, SharedCacheLoadInfo
* results
)
610 // open and validate cache file
612 if ( !preflightCacheFile(options
, results
, &info
) )
615 // compute ALSR slide
617 #if !TARGET_IPHONE_SIMULATOR // simulator caches do not support sliding
618 if ( info
.slideInfoSize
!= 0 ) {
619 results
->slide
= pickCacheASLR(info
);
622 results
->loadAddress
= (const DyldSharedCache
*)(info
.mappings
[0].sfm_address
);
624 // remove the shared region sub-map
625 vm_deallocate(mach_task_self(), (vm_address_t
)info
.sharedRegionStart
, (vm_size_t
)info
.sharedRegionSize
);
627 // map cache just for this process with mmap()
628 for (int i
=0; i
< 3; ++i
) {
629 void* mmapAddress
= (void*)(uintptr_t)(info
.mappings
[i
].sfm_address
);
630 size_t size
= (size_t)(info
.mappings
[i
].sfm_size
);
631 //dyld::log("dyld: mapping address %p with size 0x%08lX\n", mmapAddress, size);
633 if ( info
.mappings
[i
].sfm_init_prot
& VM_PROT_EXECUTE
)
634 protection
|= PROT_EXEC
;
635 if ( info
.mappings
[i
].sfm_init_prot
& VM_PROT_READ
)
636 protection
|= PROT_READ
;
637 if ( info
.mappings
[i
].sfm_init_prot
& VM_PROT_WRITE
)
638 protection
|= PROT_WRITE
;
639 off_t offset
= info
.mappings
[i
].sfm_file_offset
;
640 if ( ::mmap(mmapAddress
, size
, protection
, MAP_FIXED
| MAP_PRIVATE
, info
.fd
, offset
) != mmapAddress
) {
641 // failed to map some chunk of this shared cache file
642 // clear shared region
643 vm_deallocate(mach_task_self(), (vm_address_t
)info
.sharedRegionStart
, (vm_size_t
)info
.sharedRegionSize
);
645 results
->loadAddress
= nullptr;
646 results
->errorMessage
= "could not mmap() part of dyld cache";
651 #if TARGET_IPHONE_SIMULATOR // simulator caches do not support sliding
654 bool success
= rebaseDataPages(options
.verbose
, info
, results
);
656 if ( options
.verbose
) {
657 dyld::log("mapped dyld cache file private to process (%s):\n", results
->path
);
658 verboseSharedCacheMappings(info
.mappings
);
666 bool loadDyldCache(const SharedCacheOptions
& options
, SharedCacheLoadInfo
* results
)
668 results
->loadAddress
= 0;
670 results
->errorMessage
= nullptr;
672 #if TARGET_IPHONE_SIMULATOR
673 // simulator only supports mmap()ing cache privately into process
674 return mapCachePrivate(options
, results
);
676 if ( options
.forcePrivate
) {
677 // mmap cache into this process only
678 return mapCachePrivate(options
, results
);
681 // fast path: when cache is already mapped into shared region
682 bool hasError
= false;
683 if ( reuseExistingCache(options
, results
) ) {
684 hasError
= (results
->errorMessage
!= nullptr);
686 // slow path: this is first process to load cache
687 hasError
= mapCacheSystemWide(options
, results
);
695 bool findInSharedCacheImage(const SharedCacheLoadInfo
& loadInfo
, const char* dylibPathToFind
, SharedCacheFindDylibResults
* results
)
697 if ( loadInfo
.loadAddress
== nullptr )
700 if ( loadInfo
.loadAddress
->header
.formatVersion
!= dyld3::closure::kFormatVersion
) {
701 // support for older cache with a different Image* format
702 #if __IPHONE_OS_VERSION_MIN_REQUIRED
704 for (const char* s
=dylibPathToFind
; *s
!= '\0'; ++s
)
707 const dyld_cache_image_info
* const start
= (dyld_cache_image_info
*)((uint8_t*)loadInfo
.loadAddress
+ loadInfo
.loadAddress
->header
.imagesOffset
);
708 const dyld_cache_image_info
* const end
= &start
[loadInfo
.loadAddress
->header
.imagesCount
];
709 for (const dyld_cache_image_info
* p
= start
; p
!= end
; ++p
) {
710 #if __IPHONE_OS_VERSION_MIN_REQUIRED
711 // on iOS, inode is used to hold hash of path
712 if ( (p
->modTime
== 0) && (p
->inode
!= hash
) )
715 const char* aPath
= (char*)loadInfo
.loadAddress
+ p
->pathFileOffset
;
716 if ( strcmp(aPath
, dylibPathToFind
) == 0 ) {
717 results
->mhInCache
= (const mach_header
*)(p
->address
+loadInfo
.slide
);
718 results
->pathInCache
= aPath
;
719 results
->slideInCache
= loadInfo
.slide
;
720 results
->image
= nullptr;
727 const dyld3::closure::ImageArray
* images
= loadInfo
.loadAddress
->cachedDylibsImageArray();
728 results
->image
= nullptr;
730 if ( loadInfo
.loadAddress
->hasImagePath(dylibPathToFind
, imageIndex
) ) {
731 results
->image
= images
->imageForNum(imageIndex
+1);
733 #if __MAC_OS_X_VERSION_MIN_REQUIRED
735 // <rdar://problem/32740215> handle symlink to cached dylib
736 if ( loadInfo
.loadAddress
->header
.dylibsExpectedOnDisk
) {
738 if ( dyld::my_stat(dylibPathToFind
, &statBuf
) == 0 ) {
739 // on macOS we store the inode and mtime of each dylib in the cache in the dyld_cache_image_info array
740 const dyld_cache_image_info
* const start
= (dyld_cache_image_info
*)((uint8_t*)loadInfo
.loadAddress
+ loadInfo
.loadAddress
->header
.imagesOffset
);
741 const dyld_cache_image_info
* const end
= &start
[loadInfo
.loadAddress
->header
.imagesCount
];
742 for (const dyld_cache_image_info
* p
= start
; p
!= end
; ++p
) {
743 if ( (p
->inode
== statBuf
.st_ino
) && (p
->modTime
== statBuf
.st_mtime
) ) {
744 imageIndex
= (uint32_t)(p
- start
);
745 results
->image
= images
->imageForNum(imageIndex
+1);
752 char resolvedPath
[PATH_MAX
];
753 if ( realpath(dylibPathToFind
, resolvedPath
) != nullptr ) {
754 if ( loadInfo
.loadAddress
->hasImagePath(resolvedPath
, imageIndex
) ) {
755 results
->image
= images
->imageForNum(imageIndex
+1);
761 if ( results
->image
== nullptr )
764 results
->mhInCache
= (const mach_header
*)((uintptr_t)loadInfo
.loadAddress
+ results
->image
->cacheOffset());
765 results
->pathInCache
= results
->image
->path();
766 results
->slideInCache
= loadInfo
.slide
;
771 bool pathIsInSharedCacheImage(const SharedCacheLoadInfo
& loadInfo
, const char* dylibPathToFind
)
773 if ( (loadInfo
.loadAddress
== nullptr) || (loadInfo
.loadAddress
->header
.formatVersion
!= closure::kFormatVersion
) )
777 return loadInfo
.loadAddress
->hasImagePath(dylibPathToFind
, imageIndex
);