2 * Copyright (c) 2017 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
30 #include <sys/param.h>
31 #include <sys/types.h>
33 #include <sys/syscall.h>
34 #include <sys/syslog.h>
35 #include <sys/sysctl.h>
37 #include <mach/mach.h>
38 #include <mach-o/fat.h>
39 #include <mach-o/loader.h>
40 #include <mach-o/ldsyms.h>
41 #include <mach/shared_region.h>
42 #include <mach/mach.h>
43 #include <Availability.h>
44 #include <TargetConditionals.h>
46 #include "dyld_cache_format.h"
47 #include "SharedCacheRuntime.h"
51 #define ENABLE_DYLIBS_TO_OVERRIDE_CACHE_SIZE 1024
53 // should be in mach/shared_region.h
54 extern "C" int __shared_region_check_np(uint64_t* startaddress
);
55 extern "C" int __shared_region_map_and_slide_np(int fd
, uint32_t count
, const shared_file_mapping_np mappings
[], long slide
, const dyld_cache_slide_info2
* slideInfo
, size_t slideInfoSize
);
59 extern int my_stat(const char* path
, struct stat
* buf
);
60 extern int my_open(const char* path
, int flag
, int other
);
61 extern void log(const char*, ...);
71 shared_file_mapping_np mappings
[3];
72 uint64_t slideInfoAddressUnslid
;
74 uint64_t sharedRegionStart
;
75 uint64_t sharedRegionSize
;
83 #define ARCH_NAME "i386"
84 #define ARCH_CACHE_MAGIC "dyld_v1 i386"
86 #define ARCH_NAME "x86_64"
87 #define ARCH_CACHE_MAGIC "dyld_v1 x86_64"
88 #define ARCH_NAME_H "x86_64h"
89 #define ARCH_CACHE_MAGIC_H "dyld_v1 x86_64h"
91 #define ARCH_NAME "armv7k"
92 #define ARCH_CACHE_MAGIC "dyld_v1 armv7k"
94 #define ARCH_NAME "armv7"
95 #define ARCH_CACHE_MAGIC "dyld_v1 armv7"
97 #define ARCH_NAME "armv7s"
98 #define ARCH_CACHE_MAGIC "dyld_v1 armv7s"
100 #define ARCH_NAME "arm64e"
101 #define ARCH_CACHE_MAGIC "dyld_v1 arm64e"
104 #define ARCH_NAME "arm64"
105 #define ARCH_CACHE_MAGIC "dyld_v1 arm64"
107 #define ARCH_NAME "arm64_32"
108 #define ARCH_CACHE_MAGIC "dyld_v1arm64_32"
113 #if !TARGET_OS_SIMULATOR
114 static void rebaseChainV2(uint8_t* pageContent
, uint16_t startOffset
, uintptr_t slideAmount
, const dyld_cache_slide_info2
* slideInfo
)
116 const uintptr_t deltaMask
= (uintptr_t)(slideInfo
->delta_mask
);
117 const uintptr_t valueMask
= ~deltaMask
;
118 const uintptr_t valueAdd
= (uintptr_t)(slideInfo
->value_add
);
119 const unsigned deltaShift
= __builtin_ctzll(deltaMask
) - 2;
121 uint32_t pageOffset
= startOffset
;
123 while ( delta
!= 0 ) {
124 uint8_t* loc
= pageContent
+ pageOffset
;
125 uintptr_t rawValue
= *((uintptr_t*)loc
);
126 delta
= (uint32_t)((rawValue
& deltaMask
) >> deltaShift
);
127 uintptr_t value
= (rawValue
& valueMask
);
130 value
+= slideAmount
;
132 *((uintptr_t*)loc
) = value
;
133 //dyld::log(" pageOffset=0x%03X, loc=%p, org value=0x%08llX, new value=0x%08llX, delta=0x%X\n", pageOffset, loc, (uint64_t)rawValue, (uint64_t)value, delta);
139 #if !__LP64__ && !TARGET_OS_SIMULATOR
140 static void rebaseChainV4(uint8_t* pageContent
, uint16_t startOffset
, uintptr_t slideAmount
, const dyld_cache_slide_info4
* slideInfo
)
142 const uintptr_t deltaMask
= (uintptr_t)(slideInfo
->delta_mask
);
143 const uintptr_t valueMask
= ~deltaMask
;
144 const uintptr_t valueAdd
= (uintptr_t)(slideInfo
->value_add
);
145 const unsigned deltaShift
= __builtin_ctzll(deltaMask
) - 2;
147 uint32_t pageOffset
= startOffset
;
149 while ( delta
!= 0 ) {
150 uint8_t* loc
= pageContent
+ pageOffset
;
151 uintptr_t rawValue
= *((uintptr_t*)loc
);
152 delta
= (uint32_t)((rawValue
& deltaMask
) >> deltaShift
);
153 uintptr_t value
= (rawValue
& valueMask
);
154 if ( (value
& 0xFFFF8000) == 0 ) {
155 // small positive non-pointer, use as-is
157 else if ( (value
& 0x3FFF8000) == 0x3FFF8000 ) {
158 // small negative non-pointer
163 value
+= slideAmount
;
165 *((uintptr_t*)loc
) = value
;
166 //dyld::log(" pageOffset=0x%03X, loc=%p, org value=0x%08llX, new value=0x%08llX, delta=0x%X\n", pageOffset, loc, (uint64_t)rawValue, (uint64_t)value, delta);
172 static void getCachePath(const SharedCacheOptions
& options
, size_t pathBufferSize
, char pathBuffer
[])
175 if ( options
.cacheDirOverride
!= nullptr ) {
176 strlcpy(pathBuffer
, options
.cacheDirOverride
, pathBufferSize
);
179 #if __IPHONE_OS_VERSION_MIN_REQUIRED
180 strlcpy(pathBuffer
, IPHONE_DYLD_SHARED_CACHE_DIR
, sizeof(IPHONE_DYLD_SHARED_CACHE_DIR
));
182 strlcpy(pathBuffer
, MACOSX_DYLD_SHARED_CACHE_DIR
, sizeof(MACOSX_DYLD_SHARED_CACHE_DIR
));
186 // append file component of cache file
187 if ( pathBuffer
[strlen(pathBuffer
)-1] != '/' )
188 strlcat(pathBuffer
, "/", pathBufferSize
);
189 #if __x86_64__ && !__IPHONE_OS_VERSION_MIN_REQUIRED
190 if ( options
.useHaswell
) {
191 size_t len
= strlen(pathBuffer
);
192 struct stat haswellStatBuf
;
193 strlcat(pathBuffer
, DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME_H
, pathBufferSize
);
194 if ( dyld::my_stat(pathBuffer
, &haswellStatBuf
) == 0 )
196 // no haswell cache file, use regular x86_64 cache
197 pathBuffer
[len
] = '\0';
201 strlcat(pathBuffer
, DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME
, pathBufferSize
);
203 #if __IPHONE_OS_VERSION_MIN_REQUIRED && !TARGET_OS_SIMULATOR
204 // use .development cache if it exists
205 struct stat enableStatBuf
;
206 struct stat devCacheStatBuf
;
207 struct stat optCacheStatBuf
;
208 bool developmentDevice
= dyld3::internalInstall();
209 bool enableFileExists
= (dyld::my_stat(IPHONE_DYLD_SHARED_CACHE_DIR
"enable-dylibs-to-override-cache", &enableStatBuf
) == 0);
210 bool devCacheExists
= (dyld::my_stat(IPHONE_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME DYLD_SHARED_CACHE_DEVELOPMENT_EXT
, &devCacheStatBuf
) == 0);
211 bool optCacheExists
= (dyld::my_stat(IPHONE_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME
, &optCacheStatBuf
) == 0);
212 if ( !BootArgs::forceCustomerCache() && developmentDevice
&& ((enableFileExists
&& (enableStatBuf
.st_size
< ENABLE_DYLIBS_TO_OVERRIDE_CACHE_SIZE
) && devCacheExists
) || !optCacheExists
) )
213 strlcat(pathBuffer
, DYLD_SHARED_CACHE_DEVELOPMENT_EXT
, pathBufferSize
);
219 int openSharedCacheFile(const SharedCacheOptions
& options
, SharedCacheLoadInfo
* results
)
221 getCachePath(options
, sizeof(results
->path
), results
->path
);
222 return dyld::my_open(results
->path
, O_RDONLY
, 0);
225 static bool validMagic(const SharedCacheOptions
& options
, const DyldSharedCache
* cache
)
227 if ( strcmp(cache
->header
.magic
, ARCH_CACHE_MAGIC
) == 0 )
231 if ( options
.useHaswell
) {
232 if ( strcmp(cache
->header
.magic
, ARCH_CACHE_MAGIC_H
) == 0 )
240 static bool validPlatform(const SharedCacheOptions
& options
, const DyldSharedCache
* cache
)
242 // grandfather in old cache that does not have platform in header
243 if ( cache
->header
.mappingOffset
< 0xE0 )
246 if ( cache
->header
.platform
!= (uint32_t)MachOFile::currentPlatform() )
249 #if TARGET_OS_SIMULATOR
250 if ( cache
->header
.simulator
== 0 )
253 if ( cache
->header
.simulator
!= 0 )
260 #if !TARGET_OS_SIMULATOR
261 static void verboseSharedCacheMappings(const shared_file_mapping_np mappings
[3])
263 for (int i
=0; i
< 3; ++i
) {
264 dyld::log(" 0x%08llX->0x%08llX init=%x, max=%x %s%s%s\n",
265 mappings
[i
].sfm_address
, mappings
[i
].sfm_address
+mappings
[i
].sfm_size
-1,
266 mappings
[i
].sfm_init_prot
, mappings
[i
].sfm_init_prot
,
267 ((mappings
[i
].sfm_init_prot
& VM_PROT_READ
) ? "read " : ""),
268 ((mappings
[i
].sfm_init_prot
& VM_PROT_WRITE
) ? "write " : ""),
269 ((mappings
[i
].sfm_init_prot
& VM_PROT_EXECUTE
) ? "execute " : ""));
274 static bool preflightCacheFile(const SharedCacheOptions
& options
, SharedCacheLoadInfo
* results
, CacheInfo
* info
)
277 // find and open shared cache file
278 int fd
= openSharedCacheFile(options
, results
);
280 results
->errorMessage
= "shared cache file open() failed";
284 struct stat cacheStatBuf
;
285 if ( dyld::my_stat(results
->path
, &cacheStatBuf
) != 0 ) {
286 results
->errorMessage
= "shared cache file stat() failed";
290 size_t cacheFileLength
= (size_t)(cacheStatBuf
.st_size
);
292 // sanity check header and mappings
293 uint8_t firstPage
[0x4000];
294 if ( ::pread(fd
, firstPage
, sizeof(firstPage
), 0) != sizeof(firstPage
) ) {
295 results
->errorMessage
= "shared cache file pread() failed";
299 const DyldSharedCache
* cache
= (DyldSharedCache
*)firstPage
;
300 if ( !validMagic(options
, cache
) ) {
301 results
->errorMessage
= "shared cache file has wrong magic";
305 if ( !validPlatform(options
, cache
) ) {
306 results
->errorMessage
= "shared cache file is for a different platform";
310 if ( (cache
->header
.mappingCount
!= 3) || (cache
->header
.mappingOffset
> 0x148) ) {
311 results
->errorMessage
= "shared cache file mappings are invalid";
315 const dyld_cache_mapping_info
* const fileMappings
= (dyld_cache_mapping_info
*)&firstPage
[cache
->header
.mappingOffset
];
316 if ( (fileMappings
[0].fileOffset
!= 0)
317 || ((fileMappings
[0].address
+ fileMappings
[0].size
) > fileMappings
[1].address
)
318 || ((fileMappings
[1].address
+ fileMappings
[1].size
) > fileMappings
[2].address
)
319 || ((fileMappings
[0].fileOffset
+ fileMappings
[0].size
) != fileMappings
[1].fileOffset
)
320 || ((fileMappings
[1].fileOffset
+ fileMappings
[1].size
) != fileMappings
[2].fileOffset
)
321 || ((cache
->header
.codeSignatureOffset
+ cache
->header
.codeSignatureSize
) != cacheFileLength
)
322 || (fileMappings
[0].maxProt
!= (VM_PROT_READ
|VM_PROT_EXECUTE
))
323 || (fileMappings
[1].maxProt
!= (VM_PROT_READ
|VM_PROT_WRITE
))
324 || (fileMappings
[2].maxProt
!= VM_PROT_READ
) ) {
325 results
->errorMessage
= "shared cache file mappings are invalid";
330 if ( cache
->header
.mappingOffset
>= 0xF8 ) {
331 if ( (fileMappings
[0].address
!= cache
->header
.sharedRegionStart
) || ((fileMappings
[2].address
+ fileMappings
[2].size
) > (cache
->header
.sharedRegionStart
+cache
->header
.sharedRegionSize
)) ) {
332 results
->errorMessage
= "shared cache file mapping addressses invalid";
338 if ( (fileMappings
[0].address
!= SHARED_REGION_BASE
) || ((fileMappings
[2].address
+ fileMappings
[2].size
) > (SHARED_REGION_BASE
+SHARED_REGION_SIZE
)) ) {
339 results
->errorMessage
= "shared cache file mapping addressses invalid";
345 // register code signature of cache file
346 fsignatures_t siginfo
;
347 siginfo
.fs_file_start
= 0; // cache always starts at beginning of file
348 siginfo
.fs_blob_start
= (void*)cache
->header
.codeSignatureOffset
;
349 siginfo
.fs_blob_size
= (size_t)(cache
->header
.codeSignatureSize
);
350 int result
= fcntl(fd
, F_ADDFILESIGS_RETURN
, &siginfo
);
351 if ( result
== -1 ) {
352 results
->errorMessage
= "code signature registration for shared cache failed";
357 // <rdar://problem/23188073> validate code signature covers entire shared cache
358 uint64_t codeSignedLength
= siginfo
.fs_file_start
;
359 if ( codeSignedLength
< cache
->header
.codeSignatureOffset
) {
360 results
->errorMessage
= "code signature does not cover entire shared cache file";
364 void* mappedData
= ::mmap(NULL
, sizeof(firstPage
), PROT_READ
|PROT_EXEC
, MAP_PRIVATE
, fd
, 0);
365 if ( mappedData
== MAP_FAILED
) {
366 results
->errorMessage
= "first page of shared cache not mmap()able";
370 if ( memcmp(mappedData
, firstPage
, sizeof(firstPage
)) != 0 ) {
371 results
->errorMessage
= "first page of mmap()ed shared cache not valid";
375 ::munmap(mappedData
, sizeof(firstPage
));
379 for (int i
=0; i
< 3; ++i
) {
380 info
->mappings
[i
].sfm_address
= fileMappings
[i
].address
;
381 info
->mappings
[i
].sfm_size
= fileMappings
[i
].size
;
382 info
->mappings
[i
].sfm_file_offset
= fileMappings
[i
].fileOffset
;
383 info
->mappings
[i
].sfm_max_prot
= fileMappings
[i
].maxProt
;
384 info
->mappings
[i
].sfm_init_prot
= fileMappings
[i
].initProt
;
386 info
->mappings
[1].sfm_max_prot
|= VM_PROT_SLIDE
;
387 info
->mappings
[1].sfm_init_prot
|= VM_PROT_SLIDE
;
388 info
->slideInfoAddressUnslid
= fileMappings
[2].address
+ cache
->header
.slideInfoOffset
- fileMappings
[2].fileOffset
;
389 info
->slideInfoSize
= (long)cache
->header
.slideInfoSize
;
390 if ( cache
->header
.mappingOffset
>= 0xf8 ) {
391 info
->sharedRegionStart
= cache
->header
.sharedRegionStart
;
392 info
->sharedRegionSize
= cache
->header
.sharedRegionSize
;
393 info
->maxSlide
= cache
->header
.maxSlide
;
396 info
->sharedRegionStart
= SHARED_REGION_BASE
;
397 info
->sharedRegionSize
= SHARED_REGION_SIZE
;
398 info
->maxSlide
= SHARED_REGION_SIZE
- (fileMappings
[2].address
+ fileMappings
[2].size
- fileMappings
[0].address
);
404 #if !TARGET_OS_SIMULATOR
406 // update all __DATA pages with slide info
407 static bool rebaseDataPages(bool isVerbose
, CacheInfo
& info
, SharedCacheLoadInfo
* results
)
409 uint64_t dataPagesStart
= info
.mappings
[1].sfm_address
;
410 const dyld_cache_slide_info
* slideInfo
= nullptr;
411 if ( info
.slideInfoSize
!= 0 ) {
412 slideInfo
= (dyld_cache_slide_info
*)(info
.slideInfoAddressUnslid
+ results
->slide
);
414 const dyld_cache_slide_info
* slideInfoHeader
= (dyld_cache_slide_info
*)slideInfo
;
415 if ( slideInfoHeader
!= nullptr ) {
416 if ( slideInfoHeader
->version
== 2 ) {
417 const dyld_cache_slide_info2
* slideHeader
= (dyld_cache_slide_info2
*)slideInfo
;
418 const uint32_t page_size
= slideHeader
->page_size
;
419 const uint16_t* page_starts
= (uint16_t*)((long)(slideInfo
) + slideHeader
->page_starts_offset
);
420 const uint16_t* page_extras
= (uint16_t*)((long)(slideInfo
) + slideHeader
->page_extras_offset
);
421 for (int i
=0; i
< slideHeader
->page_starts_count
; ++i
) {
422 uint8_t* page
= (uint8_t*)(long)(dataPagesStart
+ (page_size
*i
));
423 uint16_t pageEntry
= page_starts
[i
];
424 //dyld::log("page[%d]: page_starts[i]=0x%04X\n", i, pageEntry);
425 if ( pageEntry
== DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE
)
427 if ( pageEntry
& DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA
) {
428 uint16_t chainIndex
= (pageEntry
& 0x3FFF);
431 uint16_t pInfo
= page_extras
[chainIndex
];
432 uint16_t pageStartOffset
= (pInfo
& 0x3FFF)*4;
433 //dyld::log(" chain[%d] pageOffset=0x%03X\n", chainIndex, pageStartOffset);
434 rebaseChainV2(page
, pageStartOffset
, results
->slide
, slideHeader
);
435 done
= (pInfo
& DYLD_CACHE_SLIDE_PAGE_ATTR_END
);
440 uint32_t pageOffset
= pageEntry
* 4;
441 //dyld::log(" start pageOffset=0x%03X\n", pageOffset);
442 rebaseChainV2(page
, pageOffset
, results
->slide
, slideHeader
);
447 else if ( slideInfoHeader
->version
== 3 ) {
448 const dyld_cache_slide_info3
* slideHeader
= (dyld_cache_slide_info3
*)slideInfo
;
449 const uint32_t pageSize
= slideHeader
->page_size
;
450 for (int i
=0; i
< slideHeader
->page_starts_count
; ++i
) {
451 uint8_t* page
= (uint8_t*)(dataPagesStart
+ (pageSize
*i
));
452 uint64_t delta
= slideHeader
->page_starts
[i
];
453 if ( delta
== DYLD_CACHE_SLIDE_V3_PAGE_ATTR_NO_REBASE
)
455 delta
= delta
/sizeof(uint64_t); // initial offset is byte based
456 dyld_cache_slide_pointer3
* loc
= (dyld_cache_slide_pointer3
*)page
;
459 delta
= loc
->plain
.offsetToNextPointer
;
460 if ( loc
->auth
.authenticated
) {
461 #if __has_feature(ptrauth_calls)
462 uint64_t target
= info
.sharedRegionStart
+ loc
->auth
.offsetFromSharedCacheBase
+ results
->slide
;
463 MachOLoaded::ChainedFixupPointerOnDisk ptr
;
464 ptr
.raw64
= *((uint64_t*)loc
);
465 loc
->raw
= ptr
.arm64e
.signPointer(loc
, target
);
467 results
->errorMessage
= "invalid pointer kind in cache file";
472 MachOLoaded::ChainedFixupPointerOnDisk ptr
;
473 ptr
.raw64
= *((uint64_t*)loc
);
474 loc
->raw
= ptr
.arm64e
.unpackTarget() + results
->slide
;
476 } while (delta
!= 0);
480 else if ( slideInfoHeader
->version
== 4 ) {
481 const dyld_cache_slide_info4
* slideHeader
= (dyld_cache_slide_info4
*)slideInfo
;
482 const uint32_t page_size
= slideHeader
->page_size
;
483 const uint16_t* page_starts
= (uint16_t*)((long)(slideInfo
) + slideHeader
->page_starts_offset
);
484 const uint16_t* page_extras
= (uint16_t*)((long)(slideInfo
) + slideHeader
->page_extras_offset
);
485 for (int i
=0; i
< slideHeader
->page_starts_count
; ++i
) {
486 uint8_t* page
= (uint8_t*)(long)(dataPagesStart
+ (page_size
*i
));
487 uint16_t pageEntry
= page_starts
[i
];
488 //dyld::log("page[%d]: page_starts[i]=0x%04X\n", i, pageEntry);
489 if ( pageEntry
== DYLD_CACHE_SLIDE4_PAGE_NO_REBASE
)
491 if ( pageEntry
& DYLD_CACHE_SLIDE4_PAGE_USE_EXTRA
) {
492 uint16_t chainIndex
= (pageEntry
& DYLD_CACHE_SLIDE4_PAGE_INDEX
);
495 uint16_t pInfo
= page_extras
[chainIndex
];
496 uint16_t pageStartOffset
= (pInfo
& DYLD_CACHE_SLIDE4_PAGE_INDEX
)*4;
497 //dyld::log(" chain[%d] pageOffset=0x%03X\n", chainIndex, pageStartOffset);
498 rebaseChainV4(page
, pageStartOffset
, results
->slide
, slideHeader
);
499 done
= (pInfo
& DYLD_CACHE_SLIDE4_PAGE_EXTRA_END
);
504 uint32_t pageOffset
= pageEntry
* 4;
505 //dyld::log(" start pageOffset=0x%03X\n", pageOffset);
506 rebaseChainV4(page
, pageOffset
, results
->slide
, slideHeader
);
512 results
->errorMessage
= "invalid slide info in cache file";
519 static bool reuseExistingCache(const SharedCacheOptions
& options
, SharedCacheLoadInfo
* results
)
521 uint64_t cacheBaseAddress
;
523 if ( syscall(294, &cacheBaseAddress
) == 0 ) {
525 if ( __shared_region_check_np(&cacheBaseAddress
) == 0 ) {
527 const DyldSharedCache
* existingCache
= (DyldSharedCache
*)cacheBaseAddress
;
528 if ( validMagic(options
, existingCache
) ) {
529 const dyld_cache_mapping_info
* const fileMappings
= (dyld_cache_mapping_info
*)(cacheBaseAddress
+ existingCache
->header
.mappingOffset
);
530 results
->loadAddress
= existingCache
;
531 results
->slide
= (long)(cacheBaseAddress
- fileMappings
[0].address
);
532 // we don't know the path this cache was previously loaded from, assume default
533 getCachePath(options
, sizeof(results
->path
), results
->path
);
534 if ( options
.verbose
) {
535 const shared_file_mapping_np
* const mappings
= (shared_file_mapping_np
*)(cacheBaseAddress
+ existingCache
->header
.mappingOffset
);
536 dyld::log("re-using existing shared cache (%s):\n", results
->path
);
537 shared_file_mapping_np slidMappings
[3];
538 for (int i
=0; i
< 3; ++i
) {
539 slidMappings
[i
] = mappings
[i
];
540 slidMappings
[i
].sfm_address
+= results
->slide
;
542 verboseSharedCacheMappings(slidMappings
);
546 results
->errorMessage
= "existing shared cache in memory is not compatible";
553 static long pickCacheASLR(CacheInfo
& info
)
555 // choose new random slide
556 #if __IPHONE_OS_VERSION_MIN_REQUIRED
557 // <rdar://problem/20848977> change shared cache slide for 32-bit arm to always be 16k aligned
558 long slide
= ((arc4random() % info
.maxSlide
) & (-16384));
560 long slide
= ((arc4random() % info
.maxSlide
) & (-4096));
563 // <rdar://problem/32031197> respect -disable_aslr boot-arg
564 if ( BootArgs::contains("-disable_aslr") )
568 for (uint32_t i
=0; i
< 3; ++i
) {
569 info
.mappings
[i
].sfm_address
+= slide
;
575 static bool mapCacheSystemWide(const SharedCacheOptions
& options
, SharedCacheLoadInfo
* results
)
578 if ( !preflightCacheFile(options
, results
, &info
) )
581 const dyld_cache_slide_info2
* slideInfo
= nullptr;
582 if ( info
.slideInfoSize
!= 0 ) {
583 results
->slide
= pickCacheASLR(info
);
584 slideInfo
= (dyld_cache_slide_info2
*)(info
.slideInfoAddressUnslid
+ results
->slide
);
587 int result
= __shared_region_map_and_slide_np(info
.fd
, 3, info
.mappings
, results
->slide
, slideInfo
, info
.slideInfoSize
);
590 results
->loadAddress
= (const DyldSharedCache
*)(info
.mappings
[0].sfm_address
);
593 // could be another process beat us to it
594 if ( reuseExistingCache(options
, results
) )
596 // if cache does not exist, then really is an error
597 if ( results
->errorMessage
== nullptr )
598 results
->errorMessage
= "syscall to map cache into shared region failed";
602 if ( options
.verbose
) {
603 dyld::log("mapped dyld cache file system wide: %s\n", results
->path
);
604 verboseSharedCacheMappings(info
.mappings
);
608 #endif // TARGET_OS_SIMULATOR
610 static bool mapCachePrivate(const SharedCacheOptions
& options
, SharedCacheLoadInfo
* results
)
612 // open and validate cache file
614 if ( !preflightCacheFile(options
, results
, &info
) )
617 // compute ALSR slide
619 #if !TARGET_OS_SIMULATOR // simulator caches do not support sliding
620 if ( info
.slideInfoSize
!= 0 ) {
621 results
->slide
= pickCacheASLR(info
);
624 results
->loadAddress
= (const DyldSharedCache
*)(info
.mappings
[0].sfm_address
);
626 // deallocate any existing system wide shared cache
627 deallocateExistingSharedCache();
629 #if TARGET_OS_SIMULATOR && TARGET_OS_WATCH
630 // <rdar://problem/50887685> watchOS 32-bit cache does not overlap macOS dyld cache address range
631 // mmap() of a file needs a vm_allocation behind it, so make one
632 vm_address_t loadAddress
= 0x40000000;
633 ::vm_allocate(mach_task_self(), &loadAddress
, 0x40000000, VM_FLAGS_FIXED
);
636 // map cache just for this process with mmap()
637 for (int i
=0; i
< 3; ++i
) {
638 void* mmapAddress
= (void*)(uintptr_t)(info
.mappings
[i
].sfm_address
);
639 size_t size
= (size_t)(info
.mappings
[i
].sfm_size
);
640 //dyld::log("dyld: mapping address %p with size 0x%08lX\n", mmapAddress, size);
642 if ( info
.mappings
[i
].sfm_init_prot
& VM_PROT_EXECUTE
)
643 protection
|= PROT_EXEC
;
644 if ( info
.mappings
[i
].sfm_init_prot
& VM_PROT_READ
)
645 protection
|= PROT_READ
;
646 if ( info
.mappings
[i
].sfm_init_prot
& VM_PROT_WRITE
)
647 protection
|= PROT_WRITE
;
648 off_t offset
= info
.mappings
[i
].sfm_file_offset
;
649 if ( ::mmap(mmapAddress
, size
, protection
, MAP_FIXED
| MAP_PRIVATE
, info
.fd
, offset
) != mmapAddress
) {
650 // failed to map some chunk of this shared cache file
651 // clear shared region
652 ::mmap((void*)((long)SHARED_REGION_BASE
), SHARED_REGION_SIZE
, PROT_NONE
, MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, 0, 0);
654 results
->loadAddress
= nullptr;
655 results
->errorMessage
= "could not mmap() part of dyld cache";
662 #if TARGET_OS_SIMULATOR // simulator caches do not support sliding
665 bool success
= rebaseDataPages(options
.verbose
, info
, results
);
667 if ( options
.verbose
) {
668 dyld::log("mapped dyld cache file private to process (%s):\n", results
->path
);
669 verboseSharedCacheMappings(info
.mappings
);
677 bool loadDyldCache(const SharedCacheOptions
& options
, SharedCacheLoadInfo
* results
)
679 results
->loadAddress
= 0;
681 results
->errorMessage
= nullptr;
683 #if TARGET_OS_SIMULATOR
684 // simulator only supports mmap()ing cache privately into process
685 return mapCachePrivate(options
, results
);
687 if ( options
.forcePrivate
) {
688 // mmap cache into this process only
689 return mapCachePrivate(options
, results
);
692 // fast path: when cache is already mapped into shared region
693 bool hasError
= false;
694 if ( reuseExistingCache(options
, results
) ) {
695 hasError
= (results
->errorMessage
!= nullptr);
697 // slow path: this is first process to load cache
698 hasError
= mapCacheSystemWide(options
, results
);
706 bool findInSharedCacheImage(const SharedCacheLoadInfo
& loadInfo
, const char* dylibPathToFind
, SharedCacheFindDylibResults
* results
)
708 if ( loadInfo
.loadAddress
== nullptr )
711 if ( loadInfo
.loadAddress
->header
.formatVersion
!= dyld3::closure::kFormatVersion
) {
712 // support for older cache with a different Image* format
713 #if __IPHONE_OS_VERSION_MIN_REQUIRED
715 for (const char* s
=dylibPathToFind
; *s
!= '\0'; ++s
)
718 const dyld_cache_image_info
* const start
= (dyld_cache_image_info
*)((uint8_t*)loadInfo
.loadAddress
+ loadInfo
.loadAddress
->header
.imagesOffset
);
719 const dyld_cache_image_info
* const end
= &start
[loadInfo
.loadAddress
->header
.imagesCount
];
720 for (const dyld_cache_image_info
* p
= start
; p
!= end
; ++p
) {
721 #if __IPHONE_OS_VERSION_MIN_REQUIRED
722 // on iOS, inode is used to hold hash of path
723 if ( (p
->modTime
== 0) && (p
->inode
!= hash
) )
726 const char* aPath
= (char*)loadInfo
.loadAddress
+ p
->pathFileOffset
;
727 if ( strcmp(aPath
, dylibPathToFind
) == 0 ) {
728 results
->mhInCache
= (const mach_header
*)(p
->address
+loadInfo
.slide
);
729 results
->pathInCache
= aPath
;
730 results
->slideInCache
= loadInfo
.slide
;
731 results
->image
= nullptr;
738 const dyld3::closure::ImageArray
* images
= loadInfo
.loadAddress
->cachedDylibsImageArray();
739 results
->image
= nullptr;
741 if ( loadInfo
.loadAddress
->hasImagePath(dylibPathToFind
, imageIndex
) ) {
742 results
->image
= images
->imageForNum(imageIndex
+1);
744 #if __MAC_OS_X_VERSION_MIN_REQUIRED
746 // <rdar://problem/32740215> handle symlink to cached dylib
747 if ( loadInfo
.loadAddress
->header
.dylibsExpectedOnDisk
) {
749 if ( dyld::my_stat(dylibPathToFind
, &statBuf
) == 0 ) {
750 // on macOS we store the inode and mtime of each dylib in the cache in the dyld_cache_image_info array
751 const dyld_cache_image_info
* const start
= (dyld_cache_image_info
*)((uint8_t*)loadInfo
.loadAddress
+ loadInfo
.loadAddress
->header
.imagesOffset
);
752 const dyld_cache_image_info
* const end
= &start
[loadInfo
.loadAddress
->header
.imagesCount
];
753 for (const dyld_cache_image_info
* p
= start
; p
!= end
; ++p
) {
754 if ( (p
->inode
== statBuf
.st_ino
) && (p
->modTime
== statBuf
.st_mtime
) ) {
755 imageIndex
= (uint32_t)(p
- start
);
756 results
->image
= images
->imageForNum(imageIndex
+1);
763 char resolvedPath
[PATH_MAX
];
764 if ( realpath(dylibPathToFind
, resolvedPath
) != nullptr ) {
765 if ( loadInfo
.loadAddress
->hasImagePath(resolvedPath
, imageIndex
) ) {
766 results
->image
= images
->imageForNum(imageIndex
+1);
772 if ( results
->image
== nullptr )
775 results
->mhInCache
= (const mach_header
*)((uintptr_t)loadInfo
.loadAddress
+ results
->image
->cacheOffset());
776 results
->pathInCache
= results
->image
->path();
777 results
->slideInCache
= loadInfo
.slide
;
782 bool pathIsInSharedCacheImage(const SharedCacheLoadInfo
& loadInfo
, const char* dylibPathToFind
)
784 if ( (loadInfo
.loadAddress
== nullptr) )
788 return loadInfo
.loadAddress
->hasImagePath(dylibPathToFind
, imageIndex
);
791 void deallocateExistingSharedCache()
793 #if TARGET_OS_SIMULATOR
794 // dyld deallocated macOS shared cache before jumping into dyld_sim
796 // <rdar://problem/5077374> remove the shared region sub-map
797 uint64_t existingCacheAddress
= 0;
798 if ( __shared_region_check_np(&existingCacheAddress
) == 0 ) {
799 ::mmap((void*)((long)SHARED_REGION_BASE
), SHARED_REGION_SIZE
, PROT_NONE
, MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, 0, 0);