2 * Copyright (c) 2017 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
31 #include <sys/param.h>
32 #include <sys/types.h>
34 #include <sys/syscall.h>
35 #include <sys/syslog.h>
36 #include <sys/sysctl.h>
38 #include <mach/mach.h>
39 #include <mach-o/fat.h>
40 #include <mach-o/loader.h>
41 #include <mach-o/ldsyms.h>
42 #include <mach/shared_region.h>
43 #include <mach/mach.h>
44 #include <Availability.h>
45 #include <TargetConditionals.h>
47 #include "dyld_cache_format.h"
48 #include "SharedCacheRuntime.h"
52 #define ENABLE_DYLIBS_TO_OVERRIDE_CACHE_SIZE 1024
54 // should be in mach/shared_region.h
55 extern "C" int __shared_region_check_np(uint64_t* startaddress
);
56 extern "C" int __shared_region_map_and_slide_np(int fd
, uint32_t count
, const shared_file_mapping_np mappings
[], long slide
, const dyld_cache_slide_info2
* slideInfo
, size_t slideInfoSize
);
57 extern "C" int __shared_region_map_and_slide_2_np(uint32_t files_count
, const shared_file_np files
[], uint32_t mappings_count
, const shared_file_mapping_slide_np mappings
[]);
59 #ifndef VM_PROT_NOAUTH
60 #define VM_PROT_NOAUTH 0x40 /* must not interfere with normal prot assignments */
63 extern bool gEnableSharedCacheDataConst
;
66 extern void log(const char*, ...);
67 extern void logToConsole(const char* format
, ...);
68 #if defined(__x86_64__) && !TARGET_OS_SIMULATOR
79 shared_file_mapping_slide_np mappings
[DyldSharedCache::MaxMappings
];
80 uint32_t mappingsCount
;
81 // All mappings come from the same file
83 uint64_t sharedRegionStart
;
84 uint64_t sharedRegionSize
;
92 #define ARCH_NAME "i386"
93 #define ARCH_CACHE_MAGIC "dyld_v1 i386"
95 #define ARCH_NAME "x86_64"
96 #define ARCH_CACHE_MAGIC "dyld_v1 x86_64"
97 #define ARCH_NAME_H "x86_64h"
98 #define ARCH_CACHE_MAGIC_H "dyld_v1 x86_64h"
100 #define ARCH_NAME "armv7k"
101 #define ARCH_CACHE_MAGIC "dyld_v1 armv7k"
102 #elif __ARM_ARCH_7A__
103 #define ARCH_NAME "armv7"
104 #define ARCH_CACHE_MAGIC "dyld_v1 armv7"
105 #elif __ARM_ARCH_7S__
106 #define ARCH_NAME "armv7s"
107 #define ARCH_CACHE_MAGIC "dyld_v1 armv7s"
109 #define ARCH_NAME "arm64e"
110 #define ARCH_CACHE_MAGIC "dyld_v1 arm64e"
113 #define ARCH_NAME "arm64"
114 #define ARCH_CACHE_MAGIC "dyld_v1 arm64"
116 #define ARCH_NAME "arm64_32"
117 #define ARCH_CACHE_MAGIC "dyld_v1arm64_32"
122 #if !TARGET_OS_SIMULATOR
123 static void rebaseChainV2(uint8_t* pageContent
, uint16_t startOffset
, uintptr_t slideAmount
, const dyld_cache_slide_info2
* slideInfo
)
125 const uintptr_t deltaMask
= (uintptr_t)(slideInfo
->delta_mask
);
126 const uintptr_t valueMask
= ~deltaMask
;
127 const uintptr_t valueAdd
= (uintptr_t)(slideInfo
->value_add
);
128 const unsigned deltaShift
= __builtin_ctzll(deltaMask
) - 2;
130 uint32_t pageOffset
= startOffset
;
132 while ( delta
!= 0 ) {
133 uint8_t* loc
= pageContent
+ pageOffset
;
134 uintptr_t rawValue
= *((uintptr_t*)loc
);
135 delta
= (uint32_t)((rawValue
& deltaMask
) >> deltaShift
);
136 uintptr_t value
= (rawValue
& valueMask
);
139 value
+= slideAmount
;
141 *((uintptr_t*)loc
) = value
;
142 //dyld::log(" pageOffset=0x%03X, loc=%p, org value=0x%08llX, new value=0x%08llX, delta=0x%X\n", pageOffset, loc, (uint64_t)rawValue, (uint64_t)value, delta);
148 #if !__LP64__ && !TARGET_OS_SIMULATOR
149 static void rebaseChainV4(uint8_t* pageContent
, uint16_t startOffset
, uintptr_t slideAmount
, const dyld_cache_slide_info4
* slideInfo
)
151 const uintptr_t deltaMask
= (uintptr_t)(slideInfo
->delta_mask
);
152 const uintptr_t valueMask
= ~deltaMask
;
153 const uintptr_t valueAdd
= (uintptr_t)(slideInfo
->value_add
);
154 const unsigned deltaShift
= __builtin_ctzll(deltaMask
) - 2;
156 uint32_t pageOffset
= startOffset
;
158 while ( delta
!= 0 ) {
159 uint8_t* loc
= pageContent
+ pageOffset
;
160 uintptr_t rawValue
= *((uintptr_t*)loc
);
161 delta
= (uint32_t)((rawValue
& deltaMask
) >> deltaShift
);
162 uintptr_t value
= (rawValue
& valueMask
);
163 if ( (value
& 0xFFFF8000) == 0 ) {
164 // small positive non-pointer, use as-is
166 else if ( (value
& 0x3FFF8000) == 0x3FFF8000 ) {
167 // small negative non-pointer
172 value
+= slideAmount
;
174 *((uintptr_t*)loc
) = value
;
175 //dyld::log(" pageOffset=0x%03X, loc=%p, org value=0x%08llX, new value=0x%08llX, delta=0x%X\n", pageOffset, loc, (uint64_t)rawValue, (uint64_t)value, delta);
182 bool getMacOSCachePath(char pathBuffer
[], size_t pathBufferSize
,
183 const char* cacheDir
, bool useHaswell
) {
184 // Clear old attempts at finding a cache, if any
185 pathBuffer
[0] = '\0';
188 strlcpy(pathBuffer
, cacheDir
, pathBufferSize
);
190 // append file component of cache file
191 if ( pathBuffer
[strlen(pathBuffer
)-1] != '/' )
192 strlcat(pathBuffer
, "/", pathBufferSize
);
196 size_t len
= strlen(pathBuffer
);
197 struct stat haswellStatBuf
;
198 strlcat(pathBuffer
, DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME_H
, pathBufferSize
);
199 if ( dyld3::stat(pathBuffer
, &haswellStatBuf
) == 0 )
201 // no haswell cache file, use regular x86_64 cache
202 pathBuffer
[len
] = '\0';
207 strlcat(pathBuffer
, DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME
, pathBufferSize
);
208 if ( dyld3::stat(pathBuffer
, &statBuf
) == 0 )
213 #endif // TARGET_OS_OSX
215 static void getCachePath(const SharedCacheOptions
& options
, size_t pathBufferSize
, char pathBuffer
[])
219 if ( options
.cacheDirOverride
!= nullptr ) {
220 getMacOSCachePath(pathBuffer
, pathBufferSize
, options
.cacheDirOverride
, options
.useHaswell
);
222 getMacOSCachePath(pathBuffer
, pathBufferSize
, MACOSX_MRM_DYLD_SHARED_CACHE_DIR
, options
.useHaswell
);
225 #else // TARGET_OS_OSX
228 if ( options
.cacheDirOverride
!= nullptr ) {
229 strlcpy(pathBuffer
, options
.cacheDirOverride
, pathBufferSize
);
231 strlcpy(pathBuffer
, IPHONE_DYLD_SHARED_CACHE_DIR
, sizeof(IPHONE_DYLD_SHARED_CACHE_DIR
));
234 // append file component of cache file
235 if ( pathBuffer
[strlen(pathBuffer
)-1] != '/' )
236 strlcat(pathBuffer
, "/", pathBufferSize
);
238 strlcat(pathBuffer
, DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME
, pathBufferSize
);
240 #if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR
241 // use .development cache if it exists
242 if ( BootArgs::forceCustomerCache() ) {
243 // The boot-arg always wins. Use the customer cache if we are told to
246 if ( !dyld3::internalInstall() ) {
247 // We can't use the development cache on customer installs
250 if ( BootArgs::forceDevelopmentCache() ) {
251 // The boot-arg always wins. Use the development cache if we are told to
252 strlcat(pathBuffer
, DYLD_SHARED_CACHE_DEVELOPMENT_EXT
, pathBufferSize
);
256 // If only one or the other caches exists, then use the one we have
257 struct stat devCacheStatBuf
;
258 struct stat optCacheStatBuf
;
259 bool devCacheExists
= (dyld3::stat(IPHONE_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME DYLD_SHARED_CACHE_DEVELOPMENT_EXT
, &devCacheStatBuf
) == 0);
260 bool optCacheExists
= (dyld3::stat(IPHONE_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME
, &optCacheStatBuf
) == 0);
261 if ( !devCacheExists
) {
262 // If the dev cache doesn't exist, then use the customer cache
265 if ( !optCacheExists
) {
266 // If the customer cache doesn't exist, then use the development cache
267 strlcat(pathBuffer
, DYLD_SHARED_CACHE_DEVELOPMENT_EXT
, pathBufferSize
);
271 // Finally, check for the sentinels
272 struct stat enableStatBuf
;
273 //struct stat sentinelStatBuf;
274 bool enableFileExists
= (dyld3::stat(IPHONE_DYLD_SHARED_CACHE_DIR
"enable-dylibs-to-override-cache", &enableStatBuf
) == 0);
275 // FIXME: rdar://problem/59813537 Re-enable once automation is updated to use boot-arg
276 bool sentinelFileExists
= false;
277 //bool sentinelFileExists = (dyld3::stat(MACOSX_MRM_DYLD_SHARED_CACHE_DIR "enable_development_mode", &sentinelStatBuf) == 0);
278 if ( enableFileExists
&& (enableStatBuf
.st_size
< ENABLE_DYLIBS_TO_OVERRIDE_CACHE_SIZE
) ) {
279 // if the old enable file exists, use the development cache
280 strlcat(pathBuffer
, DYLD_SHARED_CACHE_DEVELOPMENT_EXT
, pathBufferSize
);
283 if ( sentinelFileExists
) {
284 // If the new sentinel exists, then use the development cache
285 strlcat(pathBuffer
, DYLD_SHARED_CACHE_DEVELOPMENT_EXT
, pathBufferSize
);
290 #endif //!TARGET_OS_OSX
294 int openSharedCacheFile(const SharedCacheOptions
& options
, SharedCacheLoadInfo
* results
)
296 getCachePath(options
, sizeof(results
->path
), results
->path
);
297 return dyld3::open(results
->path
, O_RDONLY
, 0);
300 static bool validMagic(const SharedCacheOptions
& options
, const DyldSharedCache
* cache
)
302 if ( strcmp(cache
->header
.magic
, ARCH_CACHE_MAGIC
) == 0 )
306 if ( options
.useHaswell
) {
307 if ( strcmp(cache
->header
.magic
, ARCH_CACHE_MAGIC_H
) == 0 )
315 static bool validPlatform(const SharedCacheOptions
& options
, const DyldSharedCache
* cache
)
317 // grandfather in old cache that does not have platform in header
318 if ( cache
->header
.mappingOffset
< 0xE0 )
321 if ( cache
->header
.platform
!= (uint32_t)MachOFile::currentPlatform() )
324 #if TARGET_OS_SIMULATOR
325 if ( cache
->header
.simulator
== 0 )
328 if ( cache
->header
.simulator
!= 0 )
335 #if !TARGET_OS_SIMULATOR
336 static void verboseSharedCacheMappings(const shared_file_mapping_slide_np mappings
[DyldSharedCache::MaxMappings
],
337 uint32_t mappingsCount
)
339 for (int i
=0; i
< mappingsCount
; ++i
) {
340 const char* mappingName
= "";
341 if ( mappings
[i
].sms_max_prot
& VM_PROT_WRITE
) {
342 if ( mappings
[i
].sms_max_prot
& VM_PROT_NOAUTH
) {
344 mappingName
= "data";
347 mappingName
= "auth";
350 uint32_t init_prot
= mappings
[i
].sms_init_prot
& (VM_PROT_READ
| VM_PROT_WRITE
| VM_PROT_EXECUTE
);
351 uint32_t max_prot
= mappings
[i
].sms_max_prot
& (VM_PROT_READ
| VM_PROT_WRITE
| VM_PROT_EXECUTE
);
352 dyld::log(" 0x%08llX->0x%08llX init=%x, max=%x %s%s%s%s\n",
353 mappings
[i
].sms_address
, mappings
[i
].sms_address
+mappings
[i
].sms_size
-1,
355 ((mappings
[i
].sms_init_prot
& VM_PROT_READ
) ? "read " : ""),
356 ((mappings
[i
].sms_init_prot
& VM_PROT_WRITE
) ? "write " : ""),
357 ((mappings
[i
].sms_init_prot
& VM_PROT_EXECUTE
) ? "execute " : ""),
363 static void verboseSharedCacheMappingsToConsole(const shared_file_mapping_slide_np mappings
[DyldSharedCache::MaxMappings
],
364 uint32_t mappingsCount
)
366 for (int i
=0; i
< mappingsCount
; ++i
) {
367 const char* mappingName
= "";
368 if ( mappings
[i
].sms_max_prot
& VM_PROT_WRITE
) {
369 if ( mappings
[i
].sms_max_prot
& VM_PROT_NOAUTH
) {
371 mappingName
= "data";
374 mappingName
= "auth";
377 uint32_t init_prot
= mappings
[i
].sms_init_prot
& (VM_PROT_READ
| VM_PROT_WRITE
| VM_PROT_EXECUTE
);
378 uint32_t max_prot
= mappings
[i
].sms_max_prot
& (VM_PROT_READ
| VM_PROT_WRITE
| VM_PROT_EXECUTE
);
379 dyld::logToConsole("dyld: mapping 0x%08llX->0x%08llX init=%x, max=%x %s%s%s%s\n",
380 mappings
[i
].sms_address
, mappings
[i
].sms_address
+mappings
[i
].sms_size
-1,
382 ((mappings
[i
].sms_init_prot
& VM_PROT_READ
) ? "read " : ""),
383 ((mappings
[i
].sms_init_prot
& VM_PROT_WRITE
) ? "write " : ""),
384 ((mappings
[i
].sms_init_prot
& VM_PROT_EXECUTE
) ? "execute " : ""),
390 static bool preflightCacheFile(const SharedCacheOptions
& options
, SharedCacheLoadInfo
* results
, CacheInfo
* info
)
393 // find and open shared cache file
394 int fd
= openSharedCacheFile(options
, results
);
396 results
->errorMessage
= "shared cache file open() failed";
400 struct stat cacheStatBuf
;
401 if ( dyld3::stat(results
->path
, &cacheStatBuf
) != 0 ) {
402 results
->errorMessage
= "shared cache file stat() failed";
406 size_t cacheFileLength
= (size_t)(cacheStatBuf
.st_size
);
408 // sanity check header and mappings
409 uint8_t firstPage
[0x4000];
410 if ( ::pread(fd
, firstPage
, sizeof(firstPage
), 0) != sizeof(firstPage
) ) {
411 results
->errorMessage
= "shared cache file pread() failed";
415 const DyldSharedCache
* cache
= (DyldSharedCache
*)firstPage
;
416 if ( !validMagic(options
, cache
) ) {
417 results
->errorMessage
= "shared cache file has wrong magic";
421 if ( !validPlatform(options
, cache
) ) {
422 results
->errorMessage
= "shared cache file is for a different platform";
426 if ( (cache
->header
.mappingCount
< 3) || (cache
->header
.mappingCount
> DyldSharedCache::MaxMappings
) || (cache
->header
.mappingOffset
> 0x168) ) {
427 results
->errorMessage
= "shared cache file mappings are invalid";
431 const dyld_cache_mapping_info
* const fileMappings
= (dyld_cache_mapping_info
*)&firstPage
[cache
->header
.mappingOffset
];
432 const dyld_cache_mapping_info
* textMapping
= &fileMappings
[0];
433 const dyld_cache_mapping_info
* firstDataMapping
= &fileMappings
[1];
434 const dyld_cache_mapping_info
* linkeditMapping
= &fileMappings
[cache
->header
.mappingCount
- 1];
435 if ( (textMapping
->fileOffset
!= 0)
436 || ((fileMappings
[0].address
+ fileMappings
[0].size
) > firstDataMapping
->address
)
437 || ((fileMappings
[0].fileOffset
+ fileMappings
[0].size
) != firstDataMapping
->fileOffset
)
438 || ((cache
->header
.codeSignatureOffset
+ cache
->header
.codeSignatureSize
) != cacheFileLength
)
439 || (textMapping
->maxProt
!= (VM_PROT_READ
|VM_PROT_EXECUTE
))
440 || (linkeditMapping
->maxProt
!= VM_PROT_READ
) ) {
441 results
->errorMessage
= "shared cache text/linkedit mappings are invalid";
446 // Check the __DATA mappings
447 for (unsigned i
= 1; i
!= (cache
->header
.mappingCount
- 1); ++i
) {
448 if ( ((fileMappings
[i
].address
+ fileMappings
[i
].size
) > fileMappings
[i
+ 1].address
)
449 || ((fileMappings
[i
].fileOffset
+ fileMappings
[i
].size
) != fileMappings
[i
+ 1].fileOffset
)
450 || (fileMappings
[i
].maxProt
!= (VM_PROT_READ
|VM_PROT_WRITE
)) ) {
451 results
->errorMessage
= "shared cache data mappings are invalid";
457 if ( (textMapping
->address
!= cache
->header
.sharedRegionStart
) || ((linkeditMapping
->address
+ linkeditMapping
->size
) > (cache
->header
.sharedRegionStart
+cache
->header
.sharedRegionSize
)) ) {
458 results
->errorMessage
= "shared cache file mapping addressses invalid";
463 // register code signature of cache file
464 fsignatures_t siginfo
;
465 siginfo
.fs_file_start
= 0; // cache always starts at beginning of file
466 siginfo
.fs_blob_start
= (void*)cache
->header
.codeSignatureOffset
;
467 siginfo
.fs_blob_size
= (size_t)(cache
->header
.codeSignatureSize
);
468 int result
= fcntl(fd
, F_ADDFILESIGS_RETURN
, &siginfo
);
469 if ( result
== -1 ) {
470 results
->errorMessage
= "code signature registration for shared cache failed";
475 // <rdar://problem/23188073> validate code signature covers entire shared cache
476 uint64_t codeSignedLength
= siginfo
.fs_file_start
;
477 if ( codeSignedLength
< cache
->header
.codeSignatureOffset
) {
478 results
->errorMessage
= "code signature does not cover entire shared cache file";
482 void* mappedData
= ::mmap(NULL
, sizeof(firstPage
), PROT_READ
|PROT_EXEC
, MAP_PRIVATE
, fd
, 0);
483 if ( mappedData
== MAP_FAILED
) {
484 results
->errorMessage
= "first page of shared cache not mmap()able";
488 if ( memcmp(mappedData
, firstPage
, sizeof(firstPage
)) != 0 ) {
489 results
->errorMessage
= "first page of mmap()ed shared cache not valid";
493 ::munmap(mappedData
, sizeof(firstPage
));
496 info
->mappingsCount
= cache
->header
.mappingCount
;
497 // We have to emit the mapping for the __LINKEDIT before the slid mappings
498 // This is so that the kernel has already mapped __LINKEDIT in to its address space
499 // for when it copies the slid info for each __DATA mapping
500 for (int i
=0; i
< cache
->header
.mappingCount
; ++i
) {
501 uint64_t slideInfoFileOffset
= 0;
502 uint64_t slideInfoFileSize
= 0;
503 vm_prot_t authProt
= 0;
504 vm_prot_t initProt
= fileMappings
[i
].initProt
;
505 if ( cache
->header
.mappingOffset
<= __offsetof(dyld_cache_header
, mappingWithSlideOffset
) ) {
506 // Old cache without the new slid mappings
508 // Add slide info to the __DATA mapping
509 slideInfoFileOffset
= cache
->header
.slideInfoOffsetUnused
;
510 slideInfoFileSize
= cache
->header
.slideInfoSizeUnused
;
511 // Don't set auth prot to anything interseting on the old mapppings
515 // New cache where each mapping has a corresponding slid mapping
516 const dyld_cache_mapping_and_slide_info
* slidableMappings
= (const dyld_cache_mapping_and_slide_info
*)&firstPage
[cache
->header
.mappingWithSlideOffset
];
517 slideInfoFileOffset
= slidableMappings
[i
].slideInfoFileOffset
;
518 slideInfoFileSize
= slidableMappings
[i
].slideInfoFileSize
;
519 if ( (slidableMappings
[i
].flags
& DYLD_CACHE_MAPPING_AUTH_DATA
) == 0 )
520 authProt
= VM_PROT_NOAUTH
;
521 if ( (slidableMappings
[i
].flags
& DYLD_CACHE_MAPPING_CONST_DATA
) != 0 ) {
522 // The cache was built with __DATA_CONST being read-only. We can override that
524 if ( !gEnableSharedCacheDataConst
)
525 initProt
|= VM_PROT_WRITE
;
529 // Add a file for each mapping
531 info
->mappings
[i
].sms_address
= fileMappings
[i
].address
;
532 info
->mappings
[i
].sms_size
= fileMappings
[i
].size
;
533 info
->mappings
[i
].sms_file_offset
= fileMappings
[i
].fileOffset
;
534 info
->mappings
[i
].sms_slide_size
= 0;
535 info
->mappings
[i
].sms_slide_start
= 0;
536 info
->mappings
[i
].sms_max_prot
= fileMappings
[i
].maxProt
;
537 info
->mappings
[i
].sms_init_prot
= initProt
;
538 if ( slideInfoFileSize
!= 0 ) {
539 uint64_t offsetInLinkEditRegion
= (slideInfoFileOffset
- linkeditMapping
->fileOffset
);
540 info
->mappings
[i
].sms_slide_start
= (user_addr_t
)(linkeditMapping
->address
+ offsetInLinkEditRegion
);
541 info
->mappings
[i
].sms_slide_size
= (user_addr_t
)slideInfoFileSize
;
542 info
->mappings
[i
].sms_init_prot
|= (VM_PROT_SLIDE
| authProt
);
543 info
->mappings
[i
].sms_max_prot
|= (VM_PROT_SLIDE
| authProt
);
546 info
->sharedRegionStart
= cache
->header
.sharedRegionStart
;
547 info
->sharedRegionSize
= cache
->header
.sharedRegionSize
;
548 info
->maxSlide
= cache
->header
.maxSlide
;
553 #if !TARGET_OS_SIMULATOR
555 // update all __DATA pages with slide info
556 static bool rebaseDataPages(bool isVerbose
, const dyld_cache_slide_info
* slideInfo
, const uint8_t *dataPagesStart
,
557 uint64_t sharedRegionStart
, SharedCacheLoadInfo
* results
)
559 const dyld_cache_slide_info
* slideInfoHeader
= slideInfo
;
560 if ( slideInfoHeader
!= nullptr ) {
561 if ( slideInfoHeader
->version
== 2 ) {
562 const dyld_cache_slide_info2
* slideHeader
= (dyld_cache_slide_info2
*)slideInfo
;
563 const uint32_t page_size
= slideHeader
->page_size
;
564 const uint16_t* page_starts
= (uint16_t*)((long)(slideInfo
) + slideHeader
->page_starts_offset
);
565 const uint16_t* page_extras
= (uint16_t*)((long)(slideInfo
) + slideHeader
->page_extras_offset
);
566 for (int i
=0; i
< slideHeader
->page_starts_count
; ++i
) {
567 uint8_t* page
= (uint8_t*)(long)(dataPagesStart
+ (page_size
*i
));
568 uint16_t pageEntry
= page_starts
[i
];
569 //dyld::log("page[%d]: page_starts[i]=0x%04X\n", i, pageEntry);
570 if ( pageEntry
== DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE
)
572 if ( pageEntry
& DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA
) {
573 uint16_t chainIndex
= (pageEntry
& 0x3FFF);
576 uint16_t pInfo
= page_extras
[chainIndex
];
577 uint16_t pageStartOffset
= (pInfo
& 0x3FFF)*4;
578 //dyld::log(" chain[%d] pageOffset=0x%03X\n", chainIndex, pageStartOffset);
579 rebaseChainV2(page
, pageStartOffset
, results
->slide
, slideHeader
);
580 done
= (pInfo
& DYLD_CACHE_SLIDE_PAGE_ATTR_END
);
585 uint32_t pageOffset
= pageEntry
* 4;
586 //dyld::log(" start pageOffset=0x%03X\n", pageOffset);
587 rebaseChainV2(page
, pageOffset
, results
->slide
, slideHeader
);
592 else if ( slideInfoHeader
->version
== 3 ) {
593 const dyld_cache_slide_info3
* slideHeader
= (dyld_cache_slide_info3
*)slideInfo
;
594 const uint32_t pageSize
= slideHeader
->page_size
;
595 for (int i
=0; i
< slideHeader
->page_starts_count
; ++i
) {
596 uint8_t* page
= (uint8_t*)(dataPagesStart
+ (pageSize
*i
));
597 uint64_t delta
= slideHeader
->page_starts
[i
];
598 //dyld::log("page[%d]: page_starts[i]=0x%04X\n", i, delta);
599 if ( delta
== DYLD_CACHE_SLIDE_V3_PAGE_ATTR_NO_REBASE
)
601 delta
= delta
/sizeof(uint64_t); // initial offset is byte based
602 dyld_cache_slide_pointer3
* loc
= (dyld_cache_slide_pointer3
*)page
;
605 delta
= loc
->plain
.offsetToNextPointer
;
606 if ( loc
->auth
.authenticated
) {
607 #if __has_feature(ptrauth_calls)
608 uint64_t target
= sharedRegionStart
+ loc
->auth
.offsetFromSharedCacheBase
+ results
->slide
;
609 MachOLoaded::ChainedFixupPointerOnDisk ptr
;
610 ptr
.raw64
= *((uint64_t*)loc
);
611 loc
->raw
= ptr
.arm64e
.signPointer(loc
, target
);
613 results
->errorMessage
= "invalid pointer kind in cache file";
618 MachOLoaded::ChainedFixupPointerOnDisk ptr
;
619 ptr
.raw64
= *((uint64_t*)loc
);
620 loc
->raw
= ptr
.arm64e
.unpackTarget() + results
->slide
;
622 } while (delta
!= 0);
626 else if ( slideInfoHeader
->version
== 4 ) {
627 const dyld_cache_slide_info4
* slideHeader
= (dyld_cache_slide_info4
*)slideInfo
;
628 const uint32_t page_size
= slideHeader
->page_size
;
629 const uint16_t* page_starts
= (uint16_t*)((long)(slideInfo
) + slideHeader
->page_starts_offset
);
630 const uint16_t* page_extras
= (uint16_t*)((long)(slideInfo
) + slideHeader
->page_extras_offset
);
631 for (int i
=0; i
< slideHeader
->page_starts_count
; ++i
) {
632 uint8_t* page
= (uint8_t*)(long)(dataPagesStart
+ (page_size
*i
));
633 uint16_t pageEntry
= page_starts
[i
];
634 //dyld::log("page[%d]: page_starts[i]=0x%04X\n", i, pageEntry);
635 if ( pageEntry
== DYLD_CACHE_SLIDE4_PAGE_NO_REBASE
)
637 if ( pageEntry
& DYLD_CACHE_SLIDE4_PAGE_USE_EXTRA
) {
638 uint16_t chainIndex
= (pageEntry
& DYLD_CACHE_SLIDE4_PAGE_INDEX
);
641 uint16_t pInfo
= page_extras
[chainIndex
];
642 uint16_t pageStartOffset
= (pInfo
& DYLD_CACHE_SLIDE4_PAGE_INDEX
)*4;
643 //dyld::log(" chain[%d] pageOffset=0x%03X\n", chainIndex, pageStartOffset);
644 rebaseChainV4(page
, pageStartOffset
, results
->slide
, slideHeader
);
645 done
= (pInfo
& DYLD_CACHE_SLIDE4_PAGE_EXTRA_END
);
650 uint32_t pageOffset
= pageEntry
* 4;
651 //dyld::log(" start pageOffset=0x%03X\n", pageOffset);
652 rebaseChainV4(page
, pageOffset
, results
->slide
, slideHeader
);
658 results
->errorMessage
= "invalid slide info in cache file";
665 static bool reuseExistingCache(const SharedCacheOptions
& options
, SharedCacheLoadInfo
* results
)
667 uint64_t cacheBaseAddress
;
669 if ( syscall(294, &cacheBaseAddress
) == 0 ) {
671 if ( __shared_region_check_np(&cacheBaseAddress
) == 0 ) {
673 const DyldSharedCache
* existingCache
= (DyldSharedCache
*)cacheBaseAddress
;
674 if ( validMagic(options
, existingCache
) ) {
675 const dyld_cache_mapping_info
* const fileMappings
= (dyld_cache_mapping_info
*)(cacheBaseAddress
+ existingCache
->header
.mappingOffset
);
676 results
->loadAddress
= existingCache
;
677 results
->slide
= (long)(cacheBaseAddress
- fileMappings
[0].address
);
678 // we don't know the path this cache was previously loaded from, assume default
679 getCachePath(options
, sizeof(results
->path
), results
->path
);
680 if ( options
.verbose
) {
681 const dyld_cache_mapping_and_slide_info
* const mappings
= (const dyld_cache_mapping_and_slide_info
*)(cacheBaseAddress
+ existingCache
->header
.mappingWithSlideOffset
);
682 dyld::log("re-using existing shared cache (%s):\n", results
->path
);
683 shared_file_mapping_slide_np slidMappings
[DyldSharedCache::MaxMappings
];
684 for (int i
=0; i
< DyldSharedCache::MaxMappings
; ++i
) {
685 slidMappings
[i
].sms_address
= mappings
[i
].address
;
686 slidMappings
[i
].sms_size
= mappings
[i
].size
;
687 slidMappings
[i
].sms_file_offset
= mappings
[i
].fileOffset
;
688 slidMappings
[i
].sms_max_prot
= mappings
[i
].maxProt
;
689 slidMappings
[i
].sms_init_prot
= mappings
[i
].initProt
;
690 slidMappings
[i
].sms_address
+= results
->slide
;
691 if ( existingCache
->header
.mappingOffset
> __offsetof(dyld_cache_header
, mappingWithSlideOffset
) ) {
692 // New caches have slide info on each new mapping
693 const dyld_cache_mapping_and_slide_info
* const slidableMappings
= (dyld_cache_mapping_and_slide_info
*)(cacheBaseAddress
+ existingCache
->header
.mappingWithSlideOffset
);
694 assert(existingCache
->header
.mappingWithSlideCount
<= DyldSharedCache::MaxMappings
);
695 if ( !(slidableMappings
[i
].flags
& DYLD_CACHE_MAPPING_AUTH_DATA
) ) {
696 slidMappings
[i
].sms_max_prot
|= VM_PROT_NOAUTH
;
697 slidMappings
[i
].sms_init_prot
|= VM_PROT_NOAUTH
;
699 if ( (slidableMappings
[i
].flags
& DYLD_CACHE_MAPPING_CONST_DATA
) != 0 ) {
700 // The cache was built with __DATA_CONST being read-only. We can override that
702 if ( !gEnableSharedCacheDataConst
)
703 slidMappings
[i
].sms_init_prot
|= VM_PROT_WRITE
;
707 verboseSharedCacheMappings(slidMappings
, existingCache
->header
.mappingCount
);
711 results
->errorMessage
= "existing shared cache in memory is not compatible";
719 static long pickCacheASLRSlide(CacheInfo
& info
)
721 // choose new random slide
722 #if TARGET_OS_IPHONE || (TARGET_OS_OSX && TARGET_CPU_ARM64)
723 // <rdar://problem/20848977> change shared cache slide for 32-bit arm to always be 16k aligned
725 if (info
.maxSlide
== 0)
728 slide
= ((arc4random() % info
.maxSlide
) & (-16384));
731 if (info
.maxSlide
== 0)
734 slide
= ((arc4random() % info
.maxSlide
) & (-4096));
735 #if defined(__x86_64__) && !TARGET_OS_SIMULATOR
736 if (dyld::isTranslated()) {
745 static bool mapCacheSystemWide(const SharedCacheOptions
& options
, SharedCacheLoadInfo
* results
)
748 if ( !preflightCacheFile(options
, results
, &info
) )
752 if ( info
.mappingsCount
!= 3 ) {
753 uint32_t maxSlide
= options
.disableASLR
? 0 : (uint32_t)info
.maxSlide
;
756 file
.sf_fd
= info
.fd
;
757 file
.sf_mappings_count
= info
.mappingsCount
;
758 // For the new syscall, this is actually the max slide. The kernel now owns the actual slide
759 file
.sf_slide
= maxSlide
;
760 result
= __shared_region_map_and_slide_2_np(1, &file
, info
.mappingsCount
, info
.mappings
);
762 // With the old syscall, dyld has to choose the slide
763 results
->slide
= options
.disableASLR
? 0 : pickCacheASLRSlide(info
);
765 // update mappings based on the slide we choose
766 for (uint32_t i
=0; i
< info
.mappingsCount
; ++i
) {
767 info
.mappings
[i
].sms_address
+= results
->slide
;
768 if ( info
.mappings
[i
].sms_slide_size
!= 0 )
769 info
.mappings
[i
].sms_slide_start
+= (uint32_t)results
->slide
;
772 // If we get here then we don't have the new kernel function, so use the old one
773 const dyld_cache_slide_info2
* slideInfo
= nullptr;
774 size_t slideInfoSize
= 0;
775 shared_file_mapping_np mappings
[3];
776 for (unsigned i
= 0; i
!= 3; ++i
) {
777 mappings
[i
].sfm_address
= info
.mappings
[i
].sms_address
;
778 mappings
[i
].sfm_size
= info
.mappings
[i
].sms_size
;
779 mappings
[i
].sfm_file_offset
= info
.mappings
[i
].sms_file_offset
;
780 mappings
[i
].sfm_max_prot
= info
.mappings
[i
].sms_max_prot
;
781 mappings
[i
].sfm_init_prot
= info
.mappings
[i
].sms_init_prot
;
782 if ( info
.mappings
[i
].sms_slide_size
!= 0 ) {
783 slideInfo
= (dyld_cache_slide_info2
*)info
.mappings
[i
].sms_slide_start
;
784 slideInfoSize
= (size_t)info
.mappings
[i
].sms_slide_size
;
787 result
= __shared_region_map_and_slide_np(info
.fd
, 3, mappings
, results
->slide
, slideInfo
, slideInfoSize
);
792 results
->loadAddress
= (const DyldSharedCache
*)(info
.mappings
[0].sms_address
);
793 if ( info
.mappingsCount
!= 3 ) {
794 // We don't know our own slide any more as the kernel owns it, so ask for it again now
795 if ( reuseExistingCache(options
, results
) ) {
797 // update mappings based on the slide the kernel chose
798 for (uint32_t i
=0; i
< info
.mappingsCount
; ++i
) {
799 info
.mappings
[i
].sms_address
+= results
->slide
;
800 if ( info
.mappings
[i
].sms_slide_size
!= 0 )
801 info
.mappings
[i
].sms_slide_start
+= (uint32_t)results
->slide
;
804 if ( options
.verbose
)
805 verboseSharedCacheMappingsToConsole(info
.mappings
, info
.mappingsCount
);
808 // Uh oh, we mapped the kernel, but we didn't find the slide
809 if ( options
.verbose
)
810 dyld::logToConsole("dyld: error finding shared cache slide for system wide mapping\n");
815 // could be another process beat us to it
816 if ( reuseExistingCache(options
, results
) )
818 // if cache does not exist, then really is an error
819 if ( results
->errorMessage
== nullptr )
820 results
->errorMessage
= "syscall to map cache into shared region failed";
824 if ( options
.verbose
) {
825 dyld::log("mapped dyld cache file system wide: %s\n", results
->path
);
826 verboseSharedCacheMappings(info
.mappings
, info
.mappingsCount
);
830 #endif // TARGET_OS_SIMULATOR
832 static bool mapCachePrivate(const SharedCacheOptions
& options
, SharedCacheLoadInfo
* results
)
834 // open and validate cache file
836 if ( !preflightCacheFile(options
, results
, &info
) )
839 // compute ALSR slide
841 #if !TARGET_OS_SIMULATOR
842 results
->slide
= options
.disableASLR
? 0 : pickCacheASLRSlide(info
);
846 for (uint32_t i
=0; i
< info
.mappingsCount
; ++i
) {
847 info
.mappings
[i
].sms_address
+= (uint32_t)results
->slide
;
848 if ( info
.mappings
[i
].sms_slide_size
!= 0 )
849 info
.mappings
[i
].sms_slide_start
+= (uint32_t)results
->slide
;
852 results
->loadAddress
= (const DyldSharedCache
*)(info
.mappings
[0].sms_address
);
854 // deallocate any existing system wide shared cache
855 deallocateExistingSharedCache();
857 #if TARGET_OS_SIMULATOR && TARGET_OS_WATCH
858 // <rdar://problem/50887685> watchOS 32-bit cache does not overlap macOS dyld cache address range
859 // mmap() of a file needs a vm_allocation behind it, so make one
860 vm_address_t loadAddress
= 0x40000000;
861 ::vm_allocate(mach_task_self(), &loadAddress
, 0x40000000, VM_FLAGS_FIXED
);
864 // map cache just for this process with mmap()
865 for (int i
=0; i
< info
.mappingsCount
; ++i
) {
866 void* mmapAddress
= (void*)(uintptr_t)(info
.mappings
[i
].sms_address
);
867 size_t size
= (size_t)(info
.mappings
[i
].sms_size
);
868 //dyld::log("dyld: mapping address %p with size 0x%08lX\n", mmapAddress, size);
870 if ( info
.mappings
[i
].sms_init_prot
& VM_PROT_EXECUTE
)
871 protection
|= PROT_EXEC
;
872 if ( info
.mappings
[i
].sms_init_prot
& VM_PROT_READ
)
873 protection
|= PROT_READ
;
874 if ( info
.mappings
[i
].sms_init_prot
& VM_PROT_WRITE
)
875 protection
|= PROT_WRITE
;
876 off_t offset
= info
.mappings
[i
].sms_file_offset
;
877 if ( ::mmap(mmapAddress
, size
, protection
, MAP_FIXED
| MAP_PRIVATE
, info
.fd
, offset
) != mmapAddress
) {
878 // failed to map some chunk of this shared cache file
879 // clear shared region
880 ::mmap((void*)((long)SHARED_REGION_BASE
), SHARED_REGION_SIZE
, PROT_NONE
, MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, 0, 0);
882 results
->loadAddress
= nullptr;
883 results
->errorMessage
= "could not mmap() part of dyld cache";
890 #if TARGET_OS_SIMULATOR // simulator caches do not support sliding
894 // Change __DATA_CONST to read-write for this block
895 DyldSharedCache::DataConstScopedWriter
patcher(results
->loadAddress
, mach_task_self(), options
.verbose
? &dyld::log
: nullptr);
897 __block
bool success
= true;
898 for (int i
=0; i
< info
.mappingsCount
; ++i
) {
899 if ( info
.mappings
[i
].sms_slide_size
== 0 )
901 const dyld_cache_slide_info
* slideInfoHeader
= (const dyld_cache_slide_info
*)info
.mappings
[i
].sms_slide_start
;
902 const uint8_t* mappingPagesStart
= (const uint8_t*)info
.mappings
[i
].sms_address
;
903 success
&= rebaseDataPages(options
.verbose
, slideInfoHeader
, mappingPagesStart
, info
.sharedRegionStart
, results
);
906 if ( options
.verbose
) {
907 dyld::log("mapped dyld cache file private to process (%s):\n", results
->path
);
908 verboseSharedCacheMappings(info
.mappings
, info
.mappingsCount
);
916 bool loadDyldCache(const SharedCacheOptions
& options
, SharedCacheLoadInfo
* results
)
918 results
->loadAddress
= 0;
920 results
->errorMessage
= nullptr;
922 #if TARGET_OS_SIMULATOR
923 // simulator only supports mmap()ing cache privately into process
924 return mapCachePrivate(options
, results
);
926 if ( options
.forcePrivate
) {
927 // mmap cache into this process only
928 return mapCachePrivate(options
, results
);
931 // fast path: when cache is already mapped into shared region
932 bool hasError
= false;
933 if ( reuseExistingCache(options
, results
) ) {
934 hasError
= (results
->errorMessage
!= nullptr);
936 // slow path: this is first process to load cache
937 hasError
= mapCacheSystemWide(options
, results
);
945 bool findInSharedCacheImage(const SharedCacheLoadInfo
& loadInfo
, const char* dylibPathToFind
, SharedCacheFindDylibResults
* results
)
947 if ( loadInfo
.loadAddress
== nullptr )
950 if ( loadInfo
.loadAddress
->header
.formatVersion
!= dyld3::closure::kFormatVersion
) {
951 // support for older cache with a different Image* format
954 for (const char* s
=dylibPathToFind
; *s
!= '\0'; ++s
)
957 const dyld_cache_image_info
* const start
= (dyld_cache_image_info
*)((uint8_t*)loadInfo
.loadAddress
+ loadInfo
.loadAddress
->header
.imagesOffset
);
958 const dyld_cache_image_info
* const end
= &start
[loadInfo
.loadAddress
->header
.imagesCount
];
959 for (const dyld_cache_image_info
* p
= start
; p
!= end
; ++p
) {
961 // on iOS, inode is used to hold hash of path
962 if ( (p
->modTime
== 0) && (p
->inode
!= hash
) )
965 const char* aPath
= (char*)loadInfo
.loadAddress
+ p
->pathFileOffset
;
966 if ( strcmp(aPath
, dylibPathToFind
) == 0 ) {
967 results
->mhInCache
= (const mach_header
*)(p
->address
+loadInfo
.slide
);
968 results
->pathInCache
= aPath
;
969 results
->slideInCache
= loadInfo
.slide
;
970 results
->image
= nullptr;
977 const dyld3::closure::ImageArray
* images
= loadInfo
.loadAddress
->cachedDylibsImageArray();
978 results
->image
= nullptr;
980 if ( loadInfo
.loadAddress
->hasImagePath(dylibPathToFind
, imageIndex
) ) {
981 results
->image
= images
->imageForNum(imageIndex
+1);
984 if ( results
->image
== nullptr )
987 results
->mhInCache
= (const mach_header
*)((uintptr_t)loadInfo
.loadAddress
+ results
->image
->cacheOffset());
988 results
->pathInCache
= results
->image
->path();
989 results
->slideInCache
= loadInfo
.slide
;
994 bool pathIsInSharedCacheImage(const SharedCacheLoadInfo
& loadInfo
, const char* dylibPathToFind
)
996 if ( (loadInfo
.loadAddress
== nullptr) )
1000 return loadInfo
.loadAddress
->hasImagePath(dylibPathToFind
, imageIndex
);
1003 void deallocateExistingSharedCache()
1005 #if TARGET_OS_SIMULATOR
1006 // dyld deallocated macOS shared cache before jumping into dyld_sim
1008 // <rdar://problem/50773474> remove the shared region sub-map
1009 uint64_t existingCacheAddress
= 0;
1010 if ( __shared_region_check_np(&existingCacheAddress
) == 0 ) {
1011 ::mmap((void*)((long)SHARED_REGION_BASE
), SHARED_REGION_SIZE
, PROT_NONE
, MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, 0, 0);
1017 } // namespace dyld3