2 * Copyright (c) 2017 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
31 #include <sys/param.h>
32 #include <sys/types.h>
34 #include <sys/syscall.h>
35 #include <sys/syslog.h>
36 #include <sys/sysctl.h>
38 #include <mach/mach.h>
39 #include <mach-o/fat.h>
40 #include <mach-o/loader.h>
41 #include <mach-o/ldsyms.h>
42 #include <mach/shared_region.h>
43 #include <mach/mach.h>
44 #include <Availability.h>
45 #include <TargetConditionals.h>
47 #include "dyld_cache_format.h"
48 #include "SharedCacheRuntime.h"
52 #define ENABLE_DYLIBS_TO_OVERRIDE_CACHE_SIZE 1024
54 // should be in mach/shared_region.h
55 extern "C" int __shared_region_check_np(uint64_t* startaddress
);
56 extern "C" int __shared_region_map_and_slide_np(int fd
, uint32_t count
, const shared_file_mapping_np mappings
[], long slide
, const dyld_cache_slide_info2
* slideInfo
, size_t slideInfoSize
);
57 extern "C" int __shared_region_map_and_slide_2_np(uint32_t files_count
, const shared_file_np files
[], uint32_t mappings_count
, const shared_file_mapping_slide_np mappings
[]);
59 #ifndef VM_PROT_NOAUTH
60 #define VM_PROT_NOAUTH 0x40 /* must not interfere with normal prot assignments */
64 extern void log(const char*, ...);
65 extern void logToConsole(const char* format
, ...);
66 #if defined(__x86_64__) && !TARGET_OS_SIMULATOR
77 shared_file_mapping_slide_np mappings
[DyldSharedCache::MaxMappings
];
78 uint32_t mappingsCount
;
79 // All mappings come from the same file
81 uint64_t sharedRegionStart
;
82 uint64_t sharedRegionSize
;
90 #define ARCH_NAME "i386"
91 #define ARCH_CACHE_MAGIC "dyld_v1 i386"
93 #define ARCH_NAME "x86_64"
94 #define ARCH_CACHE_MAGIC "dyld_v1 x86_64"
95 #define ARCH_NAME_H "x86_64h"
96 #define ARCH_CACHE_MAGIC_H "dyld_v1 x86_64h"
98 #define ARCH_NAME "armv7k"
99 #define ARCH_CACHE_MAGIC "dyld_v1 armv7k"
100 #elif __ARM_ARCH_7A__
101 #define ARCH_NAME "armv7"
102 #define ARCH_CACHE_MAGIC "dyld_v1 armv7"
103 #elif __ARM_ARCH_7S__
104 #define ARCH_NAME "armv7s"
105 #define ARCH_CACHE_MAGIC "dyld_v1 armv7s"
107 #define ARCH_NAME "arm64e"
108 #define ARCH_CACHE_MAGIC "dyld_v1 arm64e"
111 #define ARCH_NAME "arm64"
112 #define ARCH_CACHE_MAGIC "dyld_v1 arm64"
114 #define ARCH_NAME "arm64_32"
115 #define ARCH_CACHE_MAGIC "dyld_v1arm64_32"
120 #if !TARGET_OS_SIMULATOR
121 static void rebaseChainV2(uint8_t* pageContent
, uint16_t startOffset
, uintptr_t slideAmount
, const dyld_cache_slide_info2
* slideInfo
)
123 const uintptr_t deltaMask
= (uintptr_t)(slideInfo
->delta_mask
);
124 const uintptr_t valueMask
= ~deltaMask
;
125 const uintptr_t valueAdd
= (uintptr_t)(slideInfo
->value_add
);
126 const unsigned deltaShift
= __builtin_ctzll(deltaMask
) - 2;
128 uint32_t pageOffset
= startOffset
;
130 while ( delta
!= 0 ) {
131 uint8_t* loc
= pageContent
+ pageOffset
;
132 uintptr_t rawValue
= *((uintptr_t*)loc
);
133 delta
= (uint32_t)((rawValue
& deltaMask
) >> deltaShift
);
134 uintptr_t value
= (rawValue
& valueMask
);
137 value
+= slideAmount
;
139 *((uintptr_t*)loc
) = value
;
140 //dyld::log(" pageOffset=0x%03X, loc=%p, org value=0x%08llX, new value=0x%08llX, delta=0x%X\n", pageOffset, loc, (uint64_t)rawValue, (uint64_t)value, delta);
146 #if !__LP64__ && !TARGET_OS_SIMULATOR
147 static void rebaseChainV4(uint8_t* pageContent
, uint16_t startOffset
, uintptr_t slideAmount
, const dyld_cache_slide_info4
* slideInfo
)
149 const uintptr_t deltaMask
= (uintptr_t)(slideInfo
->delta_mask
);
150 const uintptr_t valueMask
= ~deltaMask
;
151 const uintptr_t valueAdd
= (uintptr_t)(slideInfo
->value_add
);
152 const unsigned deltaShift
= __builtin_ctzll(deltaMask
) - 2;
154 uint32_t pageOffset
= startOffset
;
156 while ( delta
!= 0 ) {
157 uint8_t* loc
= pageContent
+ pageOffset
;
158 uintptr_t rawValue
= *((uintptr_t*)loc
);
159 delta
= (uint32_t)((rawValue
& deltaMask
) >> deltaShift
);
160 uintptr_t value
= (rawValue
& valueMask
);
161 if ( (value
& 0xFFFF8000) == 0 ) {
162 // small positive non-pointer, use as-is
164 else if ( (value
& 0x3FFF8000) == 0x3FFF8000 ) {
165 // small negative non-pointer
170 value
+= slideAmount
;
172 *((uintptr_t*)loc
) = value
;
173 //dyld::log(" pageOffset=0x%03X, loc=%p, org value=0x%08llX, new value=0x%08llX, delta=0x%X\n", pageOffset, loc, (uint64_t)rawValue, (uint64_t)value, delta);
180 bool getMacOSCachePath(char pathBuffer
[], size_t pathBufferSize
,
181 const char* cacheDir
, bool useHaswell
) {
182 // Clear old attempts at finding a cache, if any
183 pathBuffer
[0] = '\0';
186 strlcpy(pathBuffer
, cacheDir
, pathBufferSize
);
188 // append file component of cache file
189 if ( pathBuffer
[strlen(pathBuffer
)-1] != '/' )
190 strlcat(pathBuffer
, "/", pathBufferSize
);
194 size_t len
= strlen(pathBuffer
);
195 struct stat haswellStatBuf
;
196 strlcat(pathBuffer
, DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME_H
, pathBufferSize
);
197 if ( dyld3::stat(pathBuffer
, &haswellStatBuf
) == 0 )
199 // no haswell cache file, use regular x86_64 cache
200 pathBuffer
[len
] = '\0';
205 strlcat(pathBuffer
, DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME
, pathBufferSize
);
206 if ( dyld3::stat(pathBuffer
, &statBuf
) == 0 )
211 #endif // TARGET_OS_OSX
213 static void getCachePath(const SharedCacheOptions
& options
, size_t pathBufferSize
, char pathBuffer
[])
217 if ( options
.cacheDirOverride
!= nullptr ) {
218 getMacOSCachePath(pathBuffer
, pathBufferSize
, options
.cacheDirOverride
, options
.useHaswell
);
220 getMacOSCachePath(pathBuffer
, pathBufferSize
, MACOSX_MRM_DYLD_SHARED_CACHE_DIR
, options
.useHaswell
);
223 #else // TARGET_OS_OSX
226 if ( options
.cacheDirOverride
!= nullptr ) {
227 strlcpy(pathBuffer
, options
.cacheDirOverride
, pathBufferSize
);
229 strlcpy(pathBuffer
, IPHONE_DYLD_SHARED_CACHE_DIR
, sizeof(IPHONE_DYLD_SHARED_CACHE_DIR
));
232 // append file component of cache file
233 if ( pathBuffer
[strlen(pathBuffer
)-1] != '/' )
234 strlcat(pathBuffer
, "/", pathBufferSize
);
236 strlcat(pathBuffer
, DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME
, pathBufferSize
);
238 #if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR
239 // use .development cache if it exists
240 if ( BootArgs::forceCustomerCache() ) {
241 // The boot-arg always wins. Use the customer cache if we are told to
244 if ( !dyld3::internalInstall() ) {
245 // We can't use the development cache on customer installs
248 if ( BootArgs::forceDevelopmentCache() ) {
249 // The boot-arg always wins. Use the development cache if we are told to
250 strlcat(pathBuffer
, DYLD_SHARED_CACHE_DEVELOPMENT_EXT
, pathBufferSize
);
254 // If only one or the other caches exists, then use the one we have
255 struct stat devCacheStatBuf
;
256 struct stat optCacheStatBuf
;
257 bool devCacheExists
= (dyld3::stat(IPHONE_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME DYLD_SHARED_CACHE_DEVELOPMENT_EXT
, &devCacheStatBuf
) == 0);
258 bool optCacheExists
= (dyld3::stat(IPHONE_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME
, &optCacheStatBuf
) == 0);
259 if ( !devCacheExists
) {
260 // If the dev cache doesn't exist, then use the customer cache
263 if ( !optCacheExists
) {
264 // If the customer cache doesn't exist, then use the development cache
265 strlcat(pathBuffer
, DYLD_SHARED_CACHE_DEVELOPMENT_EXT
, pathBufferSize
);
269 // Finally, check for the sentinels
270 struct stat enableStatBuf
;
271 //struct stat sentinelStatBuf;
272 bool enableFileExists
= (dyld3::stat(IPHONE_DYLD_SHARED_CACHE_DIR
"enable-dylibs-to-override-cache", &enableStatBuf
) == 0);
273 // FIXME: rdar://problem/59813537 Re-enable once automation is updated to use boot-arg
274 bool sentinelFileExists
= false;
275 //bool sentinelFileExists = (dyld3::stat(MACOSX_MRM_DYLD_SHARED_CACHE_DIR "enable_development_mode", &sentinelStatBuf) == 0);
276 if ( enableFileExists
&& (enableStatBuf
.st_size
< ENABLE_DYLIBS_TO_OVERRIDE_CACHE_SIZE
) ) {
277 // if the old enable file exists, use the development cache
278 strlcat(pathBuffer
, DYLD_SHARED_CACHE_DEVELOPMENT_EXT
, pathBufferSize
);
281 if ( sentinelFileExists
) {
282 // If the new sentinel exists, then use the development cache
283 strlcat(pathBuffer
, DYLD_SHARED_CACHE_DEVELOPMENT_EXT
, pathBufferSize
);
288 #endif //!TARGET_OS_OSX
292 int openSharedCacheFile(const SharedCacheOptions
& options
, SharedCacheLoadInfo
* results
)
294 getCachePath(options
, sizeof(results
->path
), results
->path
);
295 return dyld3::open(results
->path
, O_RDONLY
, 0);
298 static bool validMagic(const SharedCacheOptions
& options
, const DyldSharedCache
* cache
)
300 if ( strcmp(cache
->header
.magic
, ARCH_CACHE_MAGIC
) == 0 )
304 if ( options
.useHaswell
) {
305 if ( strcmp(cache
->header
.magic
, ARCH_CACHE_MAGIC_H
) == 0 )
313 static bool validPlatform(const SharedCacheOptions
& options
, const DyldSharedCache
* cache
)
315 // grandfather in old cache that does not have platform in header
316 if ( cache
->header
.mappingOffset
< 0xE0 )
319 if ( cache
->header
.platform
!= (uint32_t)MachOFile::currentPlatform() )
322 #if TARGET_OS_SIMULATOR
323 if ( cache
->header
.simulator
== 0 )
326 if ( cache
->header
.simulator
!= 0 )
333 #if !TARGET_OS_SIMULATOR
334 static void verboseSharedCacheMappings(const shared_file_mapping_slide_np mappings
[DyldSharedCache::MaxMappings
],
335 uint32_t mappingsCount
)
337 for (int i
=0; i
< mappingsCount
; ++i
) {
338 const char* mappingName
= "";
339 if ( mappings
[i
].sms_init_prot
& VM_PROT_WRITE
) {
340 if ( mappings
[i
].sms_init_prot
& VM_PROT_NOAUTH
) {
342 mappingName
= "data";
345 mappingName
= "auth";
348 uint32_t init_prot
= mappings
[i
].sms_init_prot
& (VM_PROT_READ
| VM_PROT_WRITE
| VM_PROT_EXECUTE
);
349 uint32_t max_prot
= mappings
[i
].sms_max_prot
& (VM_PROT_READ
| VM_PROT_WRITE
| VM_PROT_EXECUTE
);
350 dyld::log(" 0x%08llX->0x%08llX init=%x, max=%x %s%s%s%s\n",
351 mappings
[i
].sms_address
, mappings
[i
].sms_address
+mappings
[i
].sms_size
-1,
353 ((mappings
[i
].sms_init_prot
& VM_PROT_READ
) ? "read " : ""),
354 ((mappings
[i
].sms_init_prot
& VM_PROT_WRITE
) ? "write " : ""),
355 ((mappings
[i
].sms_init_prot
& VM_PROT_EXECUTE
) ? "execute " : ""),
361 static bool preflightCacheFile(const SharedCacheOptions
& options
, SharedCacheLoadInfo
* results
, CacheInfo
* info
)
364 // find and open shared cache file
365 int fd
= openSharedCacheFile(options
, results
);
367 results
->errorMessage
= "shared cache file open() failed";
371 struct stat cacheStatBuf
;
372 if ( dyld3::stat(results
->path
, &cacheStatBuf
) != 0 ) {
373 results
->errorMessage
= "shared cache file stat() failed";
377 size_t cacheFileLength
= (size_t)(cacheStatBuf
.st_size
);
379 // sanity check header and mappings
380 uint8_t firstPage
[0x4000];
381 if ( ::pread(fd
, firstPage
, sizeof(firstPage
), 0) != sizeof(firstPage
) ) {
382 results
->errorMessage
= "shared cache file pread() failed";
386 const DyldSharedCache
* cache
= (DyldSharedCache
*)firstPage
;
387 if ( !validMagic(options
, cache
) ) {
388 results
->errorMessage
= "shared cache file has wrong magic";
392 if ( !validPlatform(options
, cache
) ) {
393 results
->errorMessage
= "shared cache file is for a different platform";
397 if ( (cache
->header
.mappingCount
< 3) || (cache
->header
.mappingCount
> DyldSharedCache::MaxMappings
) || (cache
->header
.mappingOffset
> 0x168) ) {
398 results
->errorMessage
= "shared cache file mappings are invalid";
402 const dyld_cache_mapping_info
* const fileMappings
= (dyld_cache_mapping_info
*)&firstPage
[cache
->header
.mappingOffset
];
403 const dyld_cache_mapping_info
* textMapping
= &fileMappings
[0];
404 const dyld_cache_mapping_info
* firstDataMapping
= &fileMappings
[1];
405 const dyld_cache_mapping_info
* linkeditMapping
= &fileMappings
[cache
->header
.mappingCount
- 1];
406 if ( (textMapping
->fileOffset
!= 0)
407 || ((fileMappings
[0].address
+ fileMappings
[0].size
) > firstDataMapping
->address
)
408 || ((fileMappings
[0].fileOffset
+ fileMappings
[0].size
) != firstDataMapping
->fileOffset
)
409 || ((cache
->header
.codeSignatureOffset
+ cache
->header
.codeSignatureSize
) != cacheFileLength
)
410 || (textMapping
->maxProt
!= (VM_PROT_READ
|VM_PROT_EXECUTE
))
411 || (linkeditMapping
->maxProt
!= VM_PROT_READ
) ) {
412 results
->errorMessage
= "shared cache text/linkedit mappings are invalid";
417 // Check the __DATA mappings
418 for (unsigned i
= 1; i
!= (cache
->header
.mappingCount
- 1); ++i
) {
419 if ( ((fileMappings
[i
].address
+ fileMappings
[i
].size
) > fileMappings
[i
+ 1].address
)
420 || ((fileMappings
[i
].fileOffset
+ fileMappings
[i
].size
) != fileMappings
[i
+ 1].fileOffset
)
421 || (fileMappings
[i
].maxProt
!= (VM_PROT_READ
|VM_PROT_WRITE
)) ) {
422 results
->errorMessage
= "shared cache data mappings are invalid";
428 if ( (textMapping
->address
!= cache
->header
.sharedRegionStart
) || ((linkeditMapping
->address
+ linkeditMapping
->size
) > (cache
->header
.sharedRegionStart
+cache
->header
.sharedRegionSize
)) ) {
429 results
->errorMessage
= "shared cache file mapping addressses invalid";
434 // register code signature of cache file
435 fsignatures_t siginfo
;
436 siginfo
.fs_file_start
= 0; // cache always starts at beginning of file
437 siginfo
.fs_blob_start
= (void*)cache
->header
.codeSignatureOffset
;
438 siginfo
.fs_blob_size
= (size_t)(cache
->header
.codeSignatureSize
);
439 int result
= fcntl(fd
, F_ADDFILESIGS_RETURN
, &siginfo
);
440 if ( result
== -1 ) {
441 results
->errorMessage
= "code signature registration for shared cache failed";
446 // <rdar://problem/23188073> validate code signature covers entire shared cache
447 uint64_t codeSignedLength
= siginfo
.fs_file_start
;
448 if ( codeSignedLength
< cache
->header
.codeSignatureOffset
) {
449 results
->errorMessage
= "code signature does not cover entire shared cache file";
453 void* mappedData
= ::mmap(NULL
, sizeof(firstPage
), PROT_READ
|PROT_EXEC
, MAP_PRIVATE
, fd
, 0);
454 if ( mappedData
== MAP_FAILED
) {
455 results
->errorMessage
= "first page of shared cache not mmap()able";
459 if ( memcmp(mappedData
, firstPage
, sizeof(firstPage
)) != 0 ) {
460 results
->errorMessage
= "first page of mmap()ed shared cache not valid";
464 ::munmap(mappedData
, sizeof(firstPage
));
467 info
->mappingsCount
= cache
->header
.mappingCount
;
468 // We have to emit the mapping for the __LINKEDIT before the slid mappings
469 // This is so that the kernel has already mapped __LINKEDIT in to its address space
470 // for when it copies the slid info for each __DATA mapping
471 for (int i
=0; i
< cache
->header
.mappingCount
; ++i
) {
472 uint64_t slideInfoFileOffset
= 0;
473 uint64_t slideInfoFileSize
= 0;
474 vm_prot_t authProt
= 0;
475 if ( cache
->header
.mappingOffset
<= __offsetof(dyld_cache_header
, mappingWithSlideOffset
) ) {
476 // Old cache without the new slid mappings
478 // Add slide info to the __DATA mapping
479 slideInfoFileOffset
= cache
->header
.slideInfoOffsetUnused
;
480 slideInfoFileSize
= cache
->header
.slideInfoSizeUnused
;
481 // Don't set auth prot to anything interseting on the old mapppings
485 // New cache where each mapping has a corresponding slid mapping
486 const dyld_cache_mapping_and_slide_info
* slidableMappings
= (const dyld_cache_mapping_and_slide_info
*)&firstPage
[cache
->header
.mappingWithSlideOffset
];
487 slideInfoFileOffset
= slidableMappings
[i
].slideInfoFileOffset
;
488 slideInfoFileSize
= slidableMappings
[i
].slideInfoFileSize
;
489 if ( (slidableMappings
[i
].flags
& DYLD_CACHE_MAPPING_AUTH_DATA
) == 0 )
490 authProt
= VM_PROT_NOAUTH
;
493 // Add a file for each mapping
495 info
->mappings
[i
].sms_address
= fileMappings
[i
].address
;
496 info
->mappings
[i
].sms_size
= fileMappings
[i
].size
;
497 info
->mappings
[i
].sms_file_offset
= fileMappings
[i
].fileOffset
;
498 info
->mappings
[i
].sms_slide_size
= 0;
499 info
->mappings
[i
].sms_slide_start
= 0;
500 info
->mappings
[i
].sms_max_prot
= fileMappings
[i
].maxProt
;
501 info
->mappings
[i
].sms_init_prot
= fileMappings
[i
].initProt
;
502 if ( slideInfoFileSize
!= 0 ) {
503 uint64_t offsetInLinkEditRegion
= (slideInfoFileOffset
- linkeditMapping
->fileOffset
);
504 info
->mappings
[i
].sms_slide_start
= (user_addr_t
)(linkeditMapping
->address
+ offsetInLinkEditRegion
);
505 info
->mappings
[i
].sms_slide_size
= (user_addr_t
)slideInfoFileSize
;
506 info
->mappings
[i
].sms_init_prot
|= (VM_PROT_SLIDE
| authProt
);
507 info
->mappings
[i
].sms_max_prot
|= (VM_PROT_SLIDE
| authProt
);
510 info
->sharedRegionStart
= cache
->header
.sharedRegionStart
;
511 info
->sharedRegionSize
= cache
->header
.sharedRegionSize
;
512 info
->maxSlide
= cache
->header
.maxSlide
;
517 #if !TARGET_OS_SIMULATOR
519 // update all __DATA pages with slide info
520 static bool rebaseDataPages(bool isVerbose
, const dyld_cache_slide_info
* slideInfo
, const uint8_t *dataPagesStart
,
521 uint64_t sharedRegionStart
, SharedCacheLoadInfo
* results
)
523 const dyld_cache_slide_info
* slideInfoHeader
= slideInfo
;
524 if ( slideInfoHeader
!= nullptr ) {
525 if ( slideInfoHeader
->version
== 2 ) {
526 const dyld_cache_slide_info2
* slideHeader
= (dyld_cache_slide_info2
*)slideInfo
;
527 const uint32_t page_size
= slideHeader
->page_size
;
528 const uint16_t* page_starts
= (uint16_t*)((long)(slideInfo
) + slideHeader
->page_starts_offset
);
529 const uint16_t* page_extras
= (uint16_t*)((long)(slideInfo
) + slideHeader
->page_extras_offset
);
530 for (int i
=0; i
< slideHeader
->page_starts_count
; ++i
) {
531 uint8_t* page
= (uint8_t*)(long)(dataPagesStart
+ (page_size
*i
));
532 uint16_t pageEntry
= page_starts
[i
];
533 //dyld::log("page[%d]: page_starts[i]=0x%04X\n", i, pageEntry);
534 if ( pageEntry
== DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE
)
536 if ( pageEntry
& DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA
) {
537 uint16_t chainIndex
= (pageEntry
& 0x3FFF);
540 uint16_t pInfo
= page_extras
[chainIndex
];
541 uint16_t pageStartOffset
= (pInfo
& 0x3FFF)*4;
542 //dyld::log(" chain[%d] pageOffset=0x%03X\n", chainIndex, pageStartOffset);
543 rebaseChainV2(page
, pageStartOffset
, results
->slide
, slideHeader
);
544 done
= (pInfo
& DYLD_CACHE_SLIDE_PAGE_ATTR_END
);
549 uint32_t pageOffset
= pageEntry
* 4;
550 //dyld::log(" start pageOffset=0x%03X\n", pageOffset);
551 rebaseChainV2(page
, pageOffset
, results
->slide
, slideHeader
);
556 else if ( slideInfoHeader
->version
== 3 ) {
557 const dyld_cache_slide_info3
* slideHeader
= (dyld_cache_slide_info3
*)slideInfo
;
558 const uint32_t pageSize
= slideHeader
->page_size
;
559 for (int i
=0; i
< slideHeader
->page_starts_count
; ++i
) {
560 uint8_t* page
= (uint8_t*)(dataPagesStart
+ (pageSize
*i
));
561 uint64_t delta
= slideHeader
->page_starts
[i
];
562 //dyld::log("page[%d]: page_starts[i]=0x%04X\n", i, delta);
563 if ( delta
== DYLD_CACHE_SLIDE_V3_PAGE_ATTR_NO_REBASE
)
565 delta
= delta
/sizeof(uint64_t); // initial offset is byte based
566 dyld_cache_slide_pointer3
* loc
= (dyld_cache_slide_pointer3
*)page
;
569 delta
= loc
->plain
.offsetToNextPointer
;
570 if ( loc
->auth
.authenticated
) {
571 #if __has_feature(ptrauth_calls)
572 uint64_t target
= sharedRegionStart
+ loc
->auth
.offsetFromSharedCacheBase
+ results
->slide
;
573 MachOLoaded::ChainedFixupPointerOnDisk ptr
;
574 ptr
.raw64
= *((uint64_t*)loc
);
575 loc
->raw
= ptr
.arm64e
.signPointer(loc
, target
);
577 results
->errorMessage
= "invalid pointer kind in cache file";
582 MachOLoaded::ChainedFixupPointerOnDisk ptr
;
583 ptr
.raw64
= *((uint64_t*)loc
);
584 loc
->raw
= ptr
.arm64e
.unpackTarget() + results
->slide
;
586 } while (delta
!= 0);
590 else if ( slideInfoHeader
->version
== 4 ) {
591 const dyld_cache_slide_info4
* slideHeader
= (dyld_cache_slide_info4
*)slideInfo
;
592 const uint32_t page_size
= slideHeader
->page_size
;
593 const uint16_t* page_starts
= (uint16_t*)((long)(slideInfo
) + slideHeader
->page_starts_offset
);
594 const uint16_t* page_extras
= (uint16_t*)((long)(slideInfo
) + slideHeader
->page_extras_offset
);
595 for (int i
=0; i
< slideHeader
->page_starts_count
; ++i
) {
596 uint8_t* page
= (uint8_t*)(long)(dataPagesStart
+ (page_size
*i
));
597 uint16_t pageEntry
= page_starts
[i
];
598 //dyld::log("page[%d]: page_starts[i]=0x%04X\n", i, pageEntry);
599 if ( pageEntry
== DYLD_CACHE_SLIDE4_PAGE_NO_REBASE
)
601 if ( pageEntry
& DYLD_CACHE_SLIDE4_PAGE_USE_EXTRA
) {
602 uint16_t chainIndex
= (pageEntry
& DYLD_CACHE_SLIDE4_PAGE_INDEX
);
605 uint16_t pInfo
= page_extras
[chainIndex
];
606 uint16_t pageStartOffset
= (pInfo
& DYLD_CACHE_SLIDE4_PAGE_INDEX
)*4;
607 //dyld::log(" chain[%d] pageOffset=0x%03X\n", chainIndex, pageStartOffset);
608 rebaseChainV4(page
, pageStartOffset
, results
->slide
, slideHeader
);
609 done
= (pInfo
& DYLD_CACHE_SLIDE4_PAGE_EXTRA_END
);
614 uint32_t pageOffset
= pageEntry
* 4;
615 //dyld::log(" start pageOffset=0x%03X\n", pageOffset);
616 rebaseChainV4(page
, pageOffset
, results
->slide
, slideHeader
);
622 results
->errorMessage
= "invalid slide info in cache file";
629 static bool reuseExistingCache(const SharedCacheOptions
& options
, SharedCacheLoadInfo
* results
)
631 uint64_t cacheBaseAddress
;
633 if ( syscall(294, &cacheBaseAddress
) == 0 ) {
635 if ( __shared_region_check_np(&cacheBaseAddress
) == 0 ) {
637 const DyldSharedCache
* existingCache
= (DyldSharedCache
*)cacheBaseAddress
;
638 if ( validMagic(options
, existingCache
) ) {
639 const dyld_cache_mapping_info
* const fileMappings
= (dyld_cache_mapping_info
*)(cacheBaseAddress
+ existingCache
->header
.mappingOffset
);
640 results
->loadAddress
= existingCache
;
641 results
->slide
= (long)(cacheBaseAddress
- fileMappings
[0].address
);
642 // we don't know the path this cache was previously loaded from, assume default
643 getCachePath(options
, sizeof(results
->path
), results
->path
);
644 if ( options
.verbose
) {
645 const shared_file_mapping_np
* const mappings
= (shared_file_mapping_np
*)(cacheBaseAddress
+ existingCache
->header
.mappingOffset
);
646 dyld::log("re-using existing shared cache (%s):\n", results
->path
);
647 shared_file_mapping_slide_np slidMappings
[DyldSharedCache::MaxMappings
];
648 for (int i
=0; i
< DyldSharedCache::MaxMappings
; ++i
) {
649 slidMappings
[i
].sms_address
= mappings
[i
].sfm_address
;
650 slidMappings
[i
].sms_size
= mappings
[i
].sfm_size
;
651 slidMappings
[i
].sms_file_offset
= mappings
[i
].sfm_file_offset
;
652 slidMappings
[i
].sms_max_prot
= mappings
[i
].sfm_max_prot
;
653 slidMappings
[i
].sms_init_prot
= mappings
[i
].sfm_init_prot
;
655 slidMappings
[i
].sms_address
+= results
->slide
;
656 if ( existingCache
->header
.mappingOffset
> __offsetof(dyld_cache_header
, mappingWithSlideOffset
) ) {
657 // New caches have slide info on each new mapping
658 const dyld_cache_mapping_and_slide_info
* const slidableMappings
= (dyld_cache_mapping_and_slide_info
*)(cacheBaseAddress
+ existingCache
->header
.mappingWithSlideOffset
);
659 assert(existingCache
->header
.mappingWithSlideCount
<= DyldSharedCache::MaxMappings
);
660 if ( !(slidableMappings
[i
].flags
& DYLD_CACHE_MAPPING_AUTH_DATA
) ) {
661 slidMappings
[i
].sms_max_prot
|= VM_PROT_NOAUTH
;
662 slidMappings
[i
].sms_init_prot
|= VM_PROT_NOAUTH
;
666 verboseSharedCacheMappings(slidMappings
, existingCache
->header
.mappingCount
);
670 results
->errorMessage
= "existing shared cache in memory is not compatible";
677 static long pickCacheASLRSlide(CacheInfo
& info
)
679 // choose new random slide
680 #if TARGET_OS_IPHONE || (TARGET_OS_OSX && TARGET_CPU_ARM64)
681 // <rdar://problem/20848977> change shared cache slide for 32-bit arm to always be 16k aligned
683 if (info
.maxSlide
== 0)
686 slide
= ((arc4random() % info
.maxSlide
) & (-16384));
689 if (info
.maxSlide
== 0)
692 slide
= ((arc4random() % info
.maxSlide
) & (-4096));
693 #if defined(__x86_64__) && !TARGET_OS_SIMULATOR
694 if (dyld::isTranslated()) {
703 static bool mapCacheSystemWide(const SharedCacheOptions
& options
, SharedCacheLoadInfo
* results
)
706 if ( !preflightCacheFile(options
, results
, &info
) )
710 if ( info
.mappingsCount
!= 3 ) {
711 uint32_t maxSlide
= options
.disableASLR
? 0 : (uint32_t)info
.maxSlide
;
714 file
.sf_fd
= info
.fd
;
715 file
.sf_mappings_count
= info
.mappingsCount
;
716 // For the new syscall, this is actually the max slide. The kernel now owns the actual slide
717 file
.sf_slide
= maxSlide
;
718 result
= __shared_region_map_and_slide_2_np(1, &file
, info
.mappingsCount
, info
.mappings
);
720 // With the old syscall, dyld has to choose the slide
721 results
->slide
= options
.disableASLR
? 0 : pickCacheASLRSlide(info
);
723 // update mappings based on the slide we choose
724 for (uint32_t i
=0; i
< info
.mappingsCount
; ++i
) {
725 info
.mappings
[i
].sms_address
+= results
->slide
;
726 if ( info
.mappings
[i
].sms_slide_size
!= 0 )
727 info
.mappings
[i
].sms_slide_start
+= (uint32_t)results
->slide
;
730 // If we get here then we don't have the new kernel function, so use the old one
731 const dyld_cache_slide_info2
* slideInfo
= nullptr;
732 size_t slideInfoSize
= 0;
733 shared_file_mapping_np mappings
[3];
734 for (unsigned i
= 0; i
!= 3; ++i
) {
735 mappings
[i
].sfm_address
= info
.mappings
[i
].sms_address
;
736 mappings
[i
].sfm_size
= info
.mappings
[i
].sms_size
;
737 mappings
[i
].sfm_file_offset
= info
.mappings
[i
].sms_file_offset
;
738 mappings
[i
].sfm_max_prot
= info
.mappings
[i
].sms_max_prot
;
739 mappings
[i
].sfm_init_prot
= info
.mappings
[i
].sms_init_prot
;
740 if ( info
.mappings
[i
].sms_slide_size
!= 0 ) {
741 slideInfo
= (dyld_cache_slide_info2
*)info
.mappings
[i
].sms_slide_start
;
742 slideInfoSize
= (size_t)info
.mappings
[i
].sms_slide_size
;
745 result
= __shared_region_map_and_slide_np(info
.fd
, 3, mappings
, results
->slide
, slideInfo
, slideInfoSize
);
750 results
->loadAddress
= (const DyldSharedCache
*)(info
.mappings
[0].sms_address
);
751 if ( info
.mappingsCount
!= 3 ) {
752 // We don't know our own slide any more as the kernel owns it, so ask for it again now
753 if ( reuseExistingCache(options
, results
) )
755 // Uh oh, we mapped the kernel, but we didn't find the slide
756 dyld::logToConsole("dyld: error finding shared cache slide for system wide mapping\n");
761 // could be another process beat us to it
762 if ( reuseExistingCache(options
, results
) )
764 // if cache does not exist, then really is an error
765 if ( results
->errorMessage
== nullptr )
766 results
->errorMessage
= "syscall to map cache into shared region failed";
770 if ( options
.verbose
) {
771 dyld::log("mapped dyld cache file system wide: %s\n", results
->path
);
772 verboseSharedCacheMappings(info
.mappings
, info
.mappingsCount
);
776 #endif // TARGET_OS_SIMULATOR
778 static bool mapCachePrivate(const SharedCacheOptions
& options
, SharedCacheLoadInfo
* results
)
780 // open and validate cache file
782 if ( !preflightCacheFile(options
, results
, &info
) )
785 // compute ALSR slide
787 #if !TARGET_OS_SIMULATOR
788 results
->slide
= options
.disableASLR
? 0 : pickCacheASLRSlide(info
);
792 for (uint32_t i
=0; i
< info
.mappingsCount
; ++i
) {
793 info
.mappings
[i
].sms_address
+= (uint32_t)results
->slide
;
794 if ( info
.mappings
[i
].sms_slide_size
!= 0 )
795 info
.mappings
[i
].sms_slide_start
+= (uint32_t)results
->slide
;
798 results
->loadAddress
= (const DyldSharedCache
*)(info
.mappings
[0].sms_address
);
800 // deallocate any existing system wide shared cache
801 deallocateExistingSharedCache();
803 #if TARGET_OS_SIMULATOR && TARGET_OS_WATCH
804 // <rdar://problem/50887685> watchOS 32-bit cache does not overlap macOS dyld cache address range
805 // mmap() of a file needs a vm_allocation behind it, so make one
806 vm_address_t loadAddress
= 0x40000000;
807 ::vm_allocate(mach_task_self(), &loadAddress
, 0x40000000, VM_FLAGS_FIXED
);
810 // map cache just for this process with mmap()
811 for (int i
=0; i
< info
.mappingsCount
; ++i
) {
812 void* mmapAddress
= (void*)(uintptr_t)(info
.mappings
[i
].sms_address
);
813 size_t size
= (size_t)(info
.mappings
[i
].sms_size
);
814 //dyld::log("dyld: mapping address %p with size 0x%08lX\n", mmapAddress, size);
816 if ( info
.mappings
[i
].sms_init_prot
& VM_PROT_EXECUTE
)
817 protection
|= PROT_EXEC
;
818 if ( info
.mappings
[i
].sms_init_prot
& VM_PROT_READ
)
819 protection
|= PROT_READ
;
820 if ( info
.mappings
[i
].sms_init_prot
& VM_PROT_WRITE
)
821 protection
|= PROT_WRITE
;
822 off_t offset
= info
.mappings
[i
].sms_file_offset
;
823 if ( ::mmap(mmapAddress
, size
, protection
, MAP_FIXED
| MAP_PRIVATE
, info
.fd
, offset
) != mmapAddress
) {
824 // failed to map some chunk of this shared cache file
825 // clear shared region
826 ::mmap((void*)((long)SHARED_REGION_BASE
), SHARED_REGION_SIZE
, PROT_NONE
, MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, 0, 0);
828 results
->loadAddress
= nullptr;
829 results
->errorMessage
= "could not mmap() part of dyld cache";
836 #if TARGET_OS_SIMULATOR // simulator caches do not support sliding
839 __block
bool success
= true;
840 for (int i
=0; i
< info
.mappingsCount
; ++i
) {
841 if ( info
.mappings
[i
].sms_slide_size
== 0 )
843 const dyld_cache_slide_info
* slideInfoHeader
= (const dyld_cache_slide_info
*)info
.mappings
[i
].sms_slide_start
;
844 const uint8_t* mappingPagesStart
= (const uint8_t*)info
.mappings
[i
].sms_address
;
845 success
&= rebaseDataPages(options
.verbose
, slideInfoHeader
, mappingPagesStart
, info
.sharedRegionStart
, results
);
848 if ( options
.verbose
) {
849 dyld::log("mapped dyld cache file private to process (%s):\n", results
->path
);
850 verboseSharedCacheMappings(info
.mappings
, info
.mappingsCount
);
858 bool loadDyldCache(const SharedCacheOptions
& options
, SharedCacheLoadInfo
* results
)
860 results
->loadAddress
= 0;
862 results
->errorMessage
= nullptr;
864 #if TARGET_OS_SIMULATOR
865 // simulator only supports mmap()ing cache privately into process
866 return mapCachePrivate(options
, results
);
868 if ( options
.forcePrivate
) {
869 // mmap cache into this process only
870 return mapCachePrivate(options
, results
);
873 // fast path: when cache is already mapped into shared region
874 bool hasError
= false;
875 if ( reuseExistingCache(options
, results
) ) {
876 hasError
= (results
->errorMessage
!= nullptr);
878 // slow path: this is first process to load cache
879 hasError
= mapCacheSystemWide(options
, results
);
887 bool findInSharedCacheImage(const SharedCacheLoadInfo
& loadInfo
, const char* dylibPathToFind
, SharedCacheFindDylibResults
* results
)
889 if ( loadInfo
.loadAddress
== nullptr )
892 if ( loadInfo
.loadAddress
->header
.formatVersion
!= dyld3::closure::kFormatVersion
) {
893 // support for older cache with a different Image* format
896 for (const char* s
=dylibPathToFind
; *s
!= '\0'; ++s
)
899 const dyld_cache_image_info
* const start
= (dyld_cache_image_info
*)((uint8_t*)loadInfo
.loadAddress
+ loadInfo
.loadAddress
->header
.imagesOffset
);
900 const dyld_cache_image_info
* const end
= &start
[loadInfo
.loadAddress
->header
.imagesCount
];
901 for (const dyld_cache_image_info
* p
= start
; p
!= end
; ++p
) {
903 // on iOS, inode is used to hold hash of path
904 if ( (p
->modTime
== 0) && (p
->inode
!= hash
) )
907 const char* aPath
= (char*)loadInfo
.loadAddress
+ p
->pathFileOffset
;
908 if ( strcmp(aPath
, dylibPathToFind
) == 0 ) {
909 results
->mhInCache
= (const mach_header
*)(p
->address
+loadInfo
.slide
);
910 results
->pathInCache
= aPath
;
911 results
->slideInCache
= loadInfo
.slide
;
912 results
->image
= nullptr;
919 const dyld3::closure::ImageArray
* images
= loadInfo
.loadAddress
->cachedDylibsImageArray();
920 results
->image
= nullptr;
922 if ( loadInfo
.loadAddress
->hasImagePath(dylibPathToFind
, imageIndex
) ) {
923 results
->image
= images
->imageForNum(imageIndex
+1);
926 if ( results
->image
== nullptr )
929 results
->mhInCache
= (const mach_header
*)((uintptr_t)loadInfo
.loadAddress
+ results
->image
->cacheOffset());
930 results
->pathInCache
= results
->image
->path();
931 results
->slideInCache
= loadInfo
.slide
;
936 bool pathIsInSharedCacheImage(const SharedCacheLoadInfo
& loadInfo
, const char* dylibPathToFind
)
938 if ( (loadInfo
.loadAddress
== nullptr) )
942 return loadInfo
.loadAddress
->hasImagePath(dylibPathToFind
, imageIndex
);
945 void deallocateExistingSharedCache()
947 #if TARGET_OS_SIMULATOR
948 // dyld deallocated macOS shared cache before jumping into dyld_sim
950 // <rdar://problem/50773474> remove the shared region sub-map
951 uint64_t existingCacheAddress
= 0;
952 if ( __shared_region_check_np(&existingCacheAddress
) == 0 ) {
953 ::mmap((void*)((long)SHARED_REGION_BASE
), SHARED_REGION_SIZE
, PROT_NONE
, MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, 0, 0);