dyld-832.7.1.tar.gz
[apple/dyld.git] / dyld3 / SharedCacheRuntime.cpp
1 /*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24
25
26 #include <stdint.h>
27 #include <string.h>
28 #include <unistd.h>
29 #include <errno.h>
30 #include <fcntl.h>
31 #include <sys/param.h>
32 #include <sys/types.h>
33 #include <sys/stat.h>
34 #include <sys/syscall.h>
35 #include <sys/syslog.h>
36 #include <sys/sysctl.h>
37 #include <sys/mman.h>
38 #include <mach/mach.h>
39 #include <mach-o/fat.h>
40 #include <mach-o/loader.h>
41 #include <mach-o/ldsyms.h>
42 #include <mach/shared_region.h>
43 #include <mach/mach.h>
44 #include <Availability.h>
45 #include <TargetConditionals.h>
46
47 #include "dyld_cache_format.h"
48 #include "SharedCacheRuntime.h"
49 #include "Loading.h"
50 #include "BootArgs.h"
51
52 #define ENABLE_DYLIBS_TO_OVERRIDE_CACHE_SIZE 1024
53
54 // should be in mach/shared_region.h
55 extern "C" int __shared_region_check_np(uint64_t* startaddress);
56 extern "C" int __shared_region_map_and_slide_np(int fd, uint32_t count, const shared_file_mapping_np mappings[], long slide, const dyld_cache_slide_info2* slideInfo, size_t slideInfoSize);
57 extern "C" int __shared_region_map_and_slide_2_np(uint32_t files_count, const shared_file_np files[], uint32_t mappings_count, const shared_file_mapping_slide_np mappings[]);
58
59 #ifndef VM_PROT_NOAUTH
60 #define VM_PROT_NOAUTH 0x40 /* must not interfere with normal prot assignments */
61 #endif
62
63 namespace dyld {
64 extern void log(const char*, ...);
65 extern void logToConsole(const char* format, ...);
66 #if defined(__x86_64__) && !TARGET_OS_SIMULATOR
67 bool isTranslated();
68 #endif
69 }
70
71
72 namespace dyld3 {
73
74
75 struct CacheInfo
76 {
77 shared_file_mapping_slide_np mappings[DyldSharedCache::MaxMappings];
78 uint32_t mappingsCount;
79 // All mappings come from the same file
80 int fd = 0;
81 uint64_t sharedRegionStart;
82 uint64_t sharedRegionSize;
83 uint64_t maxSlide;
84 };
85
86
87
88
89 #if __i386__
90 #define ARCH_NAME "i386"
91 #define ARCH_CACHE_MAGIC "dyld_v1 i386"
92 #elif __x86_64__
93 #define ARCH_NAME "x86_64"
94 #define ARCH_CACHE_MAGIC "dyld_v1 x86_64"
95 #define ARCH_NAME_H "x86_64h"
96 #define ARCH_CACHE_MAGIC_H "dyld_v1 x86_64h"
97 #elif __ARM_ARCH_7K__
98 #define ARCH_NAME "armv7k"
99 #define ARCH_CACHE_MAGIC "dyld_v1 armv7k"
100 #elif __ARM_ARCH_7A__
101 #define ARCH_NAME "armv7"
102 #define ARCH_CACHE_MAGIC "dyld_v1 armv7"
103 #elif __ARM_ARCH_7S__
104 #define ARCH_NAME "armv7s"
105 #define ARCH_CACHE_MAGIC "dyld_v1 armv7s"
106 #elif __arm64e__
107 #define ARCH_NAME "arm64e"
108 #define ARCH_CACHE_MAGIC "dyld_v1 arm64e"
109 #elif __arm64__
110 #if __LP64__
111 #define ARCH_NAME "arm64"
112 #define ARCH_CACHE_MAGIC "dyld_v1 arm64"
113 #else
114 #define ARCH_NAME "arm64_32"
115 #define ARCH_CACHE_MAGIC "dyld_v1arm64_32"
116 #endif
117 #endif
118
119
120 #if !TARGET_OS_SIMULATOR
121 static void rebaseChainV2(uint8_t* pageContent, uint16_t startOffset, uintptr_t slideAmount, const dyld_cache_slide_info2* slideInfo)
122 {
123 const uintptr_t deltaMask = (uintptr_t)(slideInfo->delta_mask);
124 const uintptr_t valueMask = ~deltaMask;
125 const uintptr_t valueAdd = (uintptr_t)(slideInfo->value_add);
126 const unsigned deltaShift = __builtin_ctzll(deltaMask) - 2;
127
128 uint32_t pageOffset = startOffset;
129 uint32_t delta = 1;
130 while ( delta != 0 ) {
131 uint8_t* loc = pageContent + pageOffset;
132 uintptr_t rawValue = *((uintptr_t*)loc);
133 delta = (uint32_t)((rawValue & deltaMask) >> deltaShift);
134 uintptr_t value = (rawValue & valueMask);
135 if ( value != 0 ) {
136 value += valueAdd;
137 value += slideAmount;
138 }
139 *((uintptr_t*)loc) = value;
140 //dyld::log(" pageOffset=0x%03X, loc=%p, org value=0x%08llX, new value=0x%08llX, delta=0x%X\n", pageOffset, loc, (uint64_t)rawValue, (uint64_t)value, delta);
141 pageOffset += delta;
142 }
143 }
144 #endif
145
146 #if !__LP64__ && !TARGET_OS_SIMULATOR
147 static void rebaseChainV4(uint8_t* pageContent, uint16_t startOffset, uintptr_t slideAmount, const dyld_cache_slide_info4* slideInfo)
148 {
149 const uintptr_t deltaMask = (uintptr_t)(slideInfo->delta_mask);
150 const uintptr_t valueMask = ~deltaMask;
151 const uintptr_t valueAdd = (uintptr_t)(slideInfo->value_add);
152 const unsigned deltaShift = __builtin_ctzll(deltaMask) - 2;
153
154 uint32_t pageOffset = startOffset;
155 uint32_t delta = 1;
156 while ( delta != 0 ) {
157 uint8_t* loc = pageContent + pageOffset;
158 uintptr_t rawValue = *((uintptr_t*)loc);
159 delta = (uint32_t)((rawValue & deltaMask) >> deltaShift);
160 uintptr_t value = (rawValue & valueMask);
161 if ( (value & 0xFFFF8000) == 0 ) {
162 // small positive non-pointer, use as-is
163 }
164 else if ( (value & 0x3FFF8000) == 0x3FFF8000 ) {
165 // small negative non-pointer
166 value |= 0xC0000000;
167 }
168 else {
169 value += valueAdd;
170 value += slideAmount;
171 }
172 *((uintptr_t*)loc) = value;
173 //dyld::log(" pageOffset=0x%03X, loc=%p, org value=0x%08llX, new value=0x%08llX, delta=0x%X\n", pageOffset, loc, (uint64_t)rawValue, (uint64_t)value, delta);
174 pageOffset += delta;
175 }
176 }
177 #endif
178
179 #if TARGET_OS_OSX
180 bool getMacOSCachePath(char pathBuffer[], size_t pathBufferSize,
181 const char* cacheDir, bool useHaswell) {
182 // Clear old attempts at finding a cache, if any
183 pathBuffer[0] = '\0';
184
185 // set cache dir
186 strlcpy(pathBuffer, cacheDir, pathBufferSize);
187
188 // append file component of cache file
189 if ( pathBuffer[strlen(pathBuffer)-1] != '/' )
190 strlcat(pathBuffer, "/", pathBufferSize);
191
192 #if __x86_64__
193 if ( useHaswell ) {
194 size_t len = strlen(pathBuffer);
195 struct stat haswellStatBuf;
196 strlcat(pathBuffer, DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME_H, pathBufferSize);
197 if ( dyld3::stat(pathBuffer, &haswellStatBuf) == 0 )
198 return true;
199 // no haswell cache file, use regular x86_64 cache
200 pathBuffer[len] = '\0';
201 }
202 #endif
203
204 struct stat statBuf;
205 strlcat(pathBuffer, DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME, pathBufferSize);
206 if ( dyld3::stat(pathBuffer, &statBuf) == 0 )
207 return true;
208
209 return false;
210 }
211 #endif // TARGET_OS_OSX
212
213 static void getCachePath(const SharedCacheOptions& options, size_t pathBufferSize, char pathBuffer[])
214 {
215 #if TARGET_OS_OSX
216
217 if ( options.cacheDirOverride != nullptr ) {
218 getMacOSCachePath(pathBuffer, pathBufferSize, options.cacheDirOverride, options.useHaswell);
219 } else {
220 getMacOSCachePath(pathBuffer, pathBufferSize, MACOSX_MRM_DYLD_SHARED_CACHE_DIR, options.useHaswell);
221 }
222
223 #else // TARGET_OS_OSX
224
225 // Non-macOS path
226 if ( options.cacheDirOverride != nullptr ) {
227 strlcpy(pathBuffer, options.cacheDirOverride, pathBufferSize);
228 } else {
229 strlcpy(pathBuffer, IPHONE_DYLD_SHARED_CACHE_DIR, sizeof(IPHONE_DYLD_SHARED_CACHE_DIR));
230 }
231
232 // append file component of cache file
233 if ( pathBuffer[strlen(pathBuffer)-1] != '/' )
234 strlcat(pathBuffer, "/", pathBufferSize);
235
236 strlcat(pathBuffer, DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME, pathBufferSize);
237
238 #if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR
239 // use .development cache if it exists
240 if ( BootArgs::forceCustomerCache() ) {
241 // The boot-arg always wins. Use the customer cache if we are told to
242 return;
243 }
244 if ( !dyld3::internalInstall() ) {
245 // We can't use the development cache on customer installs
246 return;
247 }
248 if ( BootArgs::forceDevelopmentCache() ) {
249 // The boot-arg always wins. Use the development cache if we are told to
250 strlcat(pathBuffer, DYLD_SHARED_CACHE_DEVELOPMENT_EXT, pathBufferSize);
251 return;
252 }
253
254 // If only one or the other caches exists, then use the one we have
255 struct stat devCacheStatBuf;
256 struct stat optCacheStatBuf;
257 bool devCacheExists = (dyld3::stat(IPHONE_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME DYLD_SHARED_CACHE_DEVELOPMENT_EXT, &devCacheStatBuf) == 0);
258 bool optCacheExists = (dyld3::stat(IPHONE_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME, &optCacheStatBuf) == 0);
259 if ( !devCacheExists ) {
260 // If the dev cache doesn't exist, then use the customer cache
261 return;
262 }
263 if ( !optCacheExists ) {
264 // If the customer cache doesn't exist, then use the development cache
265 strlcat(pathBuffer, DYLD_SHARED_CACHE_DEVELOPMENT_EXT, pathBufferSize);
266 return;
267 }
268
269 // Finally, check for the sentinels
270 struct stat enableStatBuf;
271 //struct stat sentinelStatBuf;
272 bool enableFileExists = (dyld3::stat(IPHONE_DYLD_SHARED_CACHE_DIR "enable-dylibs-to-override-cache", &enableStatBuf) == 0);
273 // FIXME: rdar://problem/59813537 Re-enable once automation is updated to use boot-arg
274 bool sentinelFileExists = false;
275 //bool sentinelFileExists = (dyld3::stat(MACOSX_MRM_DYLD_SHARED_CACHE_DIR "enable_development_mode", &sentinelStatBuf) == 0);
276 if ( enableFileExists && (enableStatBuf.st_size < ENABLE_DYLIBS_TO_OVERRIDE_CACHE_SIZE) ) {
277 // if the old enable file exists, use the development cache
278 strlcat(pathBuffer, DYLD_SHARED_CACHE_DEVELOPMENT_EXT, pathBufferSize);
279 return;
280 }
281 if ( sentinelFileExists ) {
282 // If the new sentinel exists, then use the development cache
283 strlcat(pathBuffer, DYLD_SHARED_CACHE_DEVELOPMENT_EXT, pathBufferSize);
284 return;
285 }
286 #endif
287
288 #endif //!TARGET_OS_OSX
289 }
290
291
292 int openSharedCacheFile(const SharedCacheOptions& options, SharedCacheLoadInfo* results)
293 {
294 getCachePath(options, sizeof(results->path), results->path);
295 return dyld3::open(results->path, O_RDONLY, 0);
296 }
297
298 static bool validMagic(const SharedCacheOptions& options, const DyldSharedCache* cache)
299 {
300 if ( strcmp(cache->header.magic, ARCH_CACHE_MAGIC) == 0 )
301 return true;
302
303 #if __x86_64__
304 if ( options.useHaswell ) {
305 if ( strcmp(cache->header.magic, ARCH_CACHE_MAGIC_H) == 0 )
306 return true;
307 }
308 #endif
309 return false;
310 }
311
312
313 static bool validPlatform(const SharedCacheOptions& options, const DyldSharedCache* cache)
314 {
315 // grandfather in old cache that does not have platform in header
316 if ( cache->header.mappingOffset < 0xE0 )
317 return true;
318
319 if ( cache->header.platform != (uint32_t)MachOFile::currentPlatform() )
320 return false;
321
322 #if TARGET_OS_SIMULATOR
323 if ( cache->header.simulator == 0 )
324 return false;
325 #else
326 if ( cache->header.simulator != 0 )
327 return false;
328 #endif
329
330 return true;
331 }
332
333 #if !TARGET_OS_SIMULATOR
334 static void verboseSharedCacheMappings(const shared_file_mapping_slide_np mappings[DyldSharedCache::MaxMappings],
335 uint32_t mappingsCount)
336 {
337 for (int i=0; i < mappingsCount; ++i) {
338 const char* mappingName = "";
339 if ( mappings[i].sms_init_prot & VM_PROT_WRITE ) {
340 if ( mappings[i].sms_init_prot & VM_PROT_NOAUTH ) {
341 // __DATA*
342 mappingName = "data";
343 } else {
344 // __AUTH*
345 mappingName = "auth";
346 }
347 }
348 uint32_t init_prot = mappings[i].sms_init_prot & (VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE);
349 uint32_t max_prot = mappings[i].sms_max_prot & (VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE);
350 dyld::log(" 0x%08llX->0x%08llX init=%x, max=%x %s%s%s%s\n",
351 mappings[i].sms_address, mappings[i].sms_address+mappings[i].sms_size-1,
352 init_prot, max_prot,
353 ((mappings[i].sms_init_prot & VM_PROT_READ) ? "read " : ""),
354 ((mappings[i].sms_init_prot & VM_PROT_WRITE) ? "write " : ""),
355 ((mappings[i].sms_init_prot & VM_PROT_EXECUTE) ? "execute " : ""),
356 mappingName);
357 }
358 }
359 #endif
360
361 static bool preflightCacheFile(const SharedCacheOptions& options, SharedCacheLoadInfo* results, CacheInfo* info)
362 {
363
364 // find and open shared cache file
365 int fd = openSharedCacheFile(options, results);
366 if ( fd == -1 ) {
367 results->errorMessage = "shared cache file open() failed";
368 return false;
369 }
370
371 struct stat cacheStatBuf;
372 if ( dyld3::stat(results->path, &cacheStatBuf) != 0 ) {
373 results->errorMessage = "shared cache file stat() failed";
374 ::close(fd);
375 return false;
376 }
377 size_t cacheFileLength = (size_t)(cacheStatBuf.st_size);
378
379 // sanity check header and mappings
380 uint8_t firstPage[0x4000];
381 if ( ::pread(fd, firstPage, sizeof(firstPage), 0) != sizeof(firstPage) ) {
382 results->errorMessage = "shared cache file pread() failed";
383 ::close(fd);
384 return false;
385 }
386 const DyldSharedCache* cache = (DyldSharedCache*)firstPage;
387 if ( !validMagic(options, cache) ) {
388 results->errorMessage = "shared cache file has wrong magic";
389 ::close(fd);
390 return false;
391 }
392 if ( !validPlatform(options, cache) ) {
393 results->errorMessage = "shared cache file is for a different platform";
394 ::close(fd);
395 return false;
396 }
397 if ( (cache->header.mappingCount < 3) || (cache->header.mappingCount > DyldSharedCache::MaxMappings) || (cache->header.mappingOffset > 0x168) ) {
398 results->errorMessage = "shared cache file mappings are invalid";
399 ::close(fd);
400 return false;
401 }
402 const dyld_cache_mapping_info* const fileMappings = (dyld_cache_mapping_info*)&firstPage[cache->header.mappingOffset];
403 const dyld_cache_mapping_info* textMapping = &fileMappings[0];
404 const dyld_cache_mapping_info* firstDataMapping = &fileMappings[1];
405 const dyld_cache_mapping_info* linkeditMapping = &fileMappings[cache->header.mappingCount - 1];
406 if ( (textMapping->fileOffset != 0)
407 || ((fileMappings[0].address + fileMappings[0].size) > firstDataMapping->address)
408 || ((fileMappings[0].fileOffset + fileMappings[0].size) != firstDataMapping->fileOffset)
409 || ((cache->header.codeSignatureOffset + cache->header.codeSignatureSize) != cacheFileLength)
410 || (textMapping->maxProt != (VM_PROT_READ|VM_PROT_EXECUTE))
411 || (linkeditMapping->maxProt != VM_PROT_READ) ) {
412 results->errorMessage = "shared cache text/linkedit mappings are invalid";
413 ::close(fd);
414 return false;
415 }
416
417 // Check the __DATA mappings
418 for (unsigned i = 1; i != (cache->header.mappingCount - 1); ++i) {
419 if ( ((fileMappings[i].address + fileMappings[i].size) > fileMappings[i + 1].address)
420 || ((fileMappings[i].fileOffset + fileMappings[i].size) != fileMappings[i + 1].fileOffset)
421 || (fileMappings[i].maxProt != (VM_PROT_READ|VM_PROT_WRITE)) ) {
422 results->errorMessage = "shared cache data mappings are invalid";
423 ::close(fd);
424 return false;
425 }
426 }
427
428 if ( (textMapping->address != cache->header.sharedRegionStart) || ((linkeditMapping->address + linkeditMapping->size) > (cache->header.sharedRegionStart+cache->header.sharedRegionSize)) ) {
429 results->errorMessage = "shared cache file mapping addressses invalid";
430 ::close(fd);
431 return false;
432 }
433
434 // register code signature of cache file
435 fsignatures_t siginfo;
436 siginfo.fs_file_start = 0; // cache always starts at beginning of file
437 siginfo.fs_blob_start = (void*)cache->header.codeSignatureOffset;
438 siginfo.fs_blob_size = (size_t)(cache->header.codeSignatureSize);
439 int result = fcntl(fd, F_ADDFILESIGS_RETURN, &siginfo);
440 if ( result == -1 ) {
441 results->errorMessage = "code signature registration for shared cache failed";
442 ::close(fd);
443 return false;
444 }
445
446 // <rdar://problem/23188073> validate code signature covers entire shared cache
447 uint64_t codeSignedLength = siginfo.fs_file_start;
448 if ( codeSignedLength < cache->header.codeSignatureOffset ) {
449 results->errorMessage = "code signature does not cover entire shared cache file";
450 ::close(fd);
451 return false;
452 }
453 void* mappedData = ::mmap(NULL, sizeof(firstPage), PROT_READ|PROT_EXEC, MAP_PRIVATE, fd, 0);
454 if ( mappedData == MAP_FAILED ) {
455 results->errorMessage = "first page of shared cache not mmap()able";
456 ::close(fd);
457 return false;
458 }
459 if ( memcmp(mappedData, firstPage, sizeof(firstPage)) != 0 ) {
460 results->errorMessage = "first page of mmap()ed shared cache not valid";
461 ::close(fd);
462 return false;
463 }
464 ::munmap(mappedData, sizeof(firstPage));
465
466 // fill out results
467 info->mappingsCount = cache->header.mappingCount;
468 // We have to emit the mapping for the __LINKEDIT before the slid mappings
469 // This is so that the kernel has already mapped __LINKEDIT in to its address space
470 // for when it copies the slid info for each __DATA mapping
471 for (int i=0; i < cache->header.mappingCount; ++i) {
472 uint64_t slideInfoFileOffset = 0;
473 uint64_t slideInfoFileSize = 0;
474 vm_prot_t authProt = 0;
475 if ( cache->header.mappingOffset <= __offsetof(dyld_cache_header, mappingWithSlideOffset) ) {
476 // Old cache without the new slid mappings
477 if ( i == 1 ) {
478 // Add slide info to the __DATA mapping
479 slideInfoFileOffset = cache->header.slideInfoOffsetUnused;
480 slideInfoFileSize = cache->header.slideInfoSizeUnused;
481 // Don't set auth prot to anything interseting on the old mapppings
482 authProt = 0;
483 }
484 } else {
485 // New cache where each mapping has a corresponding slid mapping
486 const dyld_cache_mapping_and_slide_info* slidableMappings = (const dyld_cache_mapping_and_slide_info*)&firstPage[cache->header.mappingWithSlideOffset];
487 slideInfoFileOffset = slidableMappings[i].slideInfoFileOffset;
488 slideInfoFileSize = slidableMappings[i].slideInfoFileSize;
489 if ( (slidableMappings[i].flags & DYLD_CACHE_MAPPING_AUTH_DATA) == 0 )
490 authProt = VM_PROT_NOAUTH;
491 }
492
493 // Add a file for each mapping
494 info->fd = fd;
495 info->mappings[i].sms_address = fileMappings[i].address;
496 info->mappings[i].sms_size = fileMappings[i].size;
497 info->mappings[i].sms_file_offset = fileMappings[i].fileOffset;
498 info->mappings[i].sms_slide_size = 0;
499 info->mappings[i].sms_slide_start = 0;
500 info->mappings[i].sms_max_prot = fileMappings[i].maxProt;
501 info->mappings[i].sms_init_prot = fileMappings[i].initProt;
502 if ( slideInfoFileSize != 0 ) {
503 uint64_t offsetInLinkEditRegion = (slideInfoFileOffset - linkeditMapping->fileOffset);
504 info->mappings[i].sms_slide_start = (user_addr_t)(linkeditMapping->address + offsetInLinkEditRegion);
505 info->mappings[i].sms_slide_size = (user_addr_t)slideInfoFileSize;
506 info->mappings[i].sms_init_prot |= (VM_PROT_SLIDE | authProt);
507 info->mappings[i].sms_max_prot |= (VM_PROT_SLIDE | authProt);
508 }
509 }
510 info->sharedRegionStart = cache->header.sharedRegionStart;
511 info->sharedRegionSize = cache->header.sharedRegionSize;
512 info->maxSlide = cache->header.maxSlide;
513 return true;
514 }
515
516
517 #if !TARGET_OS_SIMULATOR
518
519 // update all __DATA pages with slide info
520 static bool rebaseDataPages(bool isVerbose, const dyld_cache_slide_info* slideInfo, const uint8_t *dataPagesStart,
521 uint64_t sharedRegionStart, SharedCacheLoadInfo* results)
522 {
523 const dyld_cache_slide_info* slideInfoHeader = slideInfo;
524 if ( slideInfoHeader != nullptr ) {
525 if ( slideInfoHeader->version == 2 ) {
526 const dyld_cache_slide_info2* slideHeader = (dyld_cache_slide_info2*)slideInfo;
527 const uint32_t page_size = slideHeader->page_size;
528 const uint16_t* page_starts = (uint16_t*)((long)(slideInfo) + slideHeader->page_starts_offset);
529 const uint16_t* page_extras = (uint16_t*)((long)(slideInfo) + slideHeader->page_extras_offset);
530 for (int i=0; i < slideHeader->page_starts_count; ++i) {
531 uint8_t* page = (uint8_t*)(long)(dataPagesStart + (page_size*i));
532 uint16_t pageEntry = page_starts[i];
533 //dyld::log("page[%d]: page_starts[i]=0x%04X\n", i, pageEntry);
534 if ( pageEntry == DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE )
535 continue;
536 if ( pageEntry & DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA ) {
537 uint16_t chainIndex = (pageEntry & 0x3FFF);
538 bool done = false;
539 while ( !done ) {
540 uint16_t pInfo = page_extras[chainIndex];
541 uint16_t pageStartOffset = (pInfo & 0x3FFF)*4;
542 //dyld::log(" chain[%d] pageOffset=0x%03X\n", chainIndex, pageStartOffset);
543 rebaseChainV2(page, pageStartOffset, results->slide, slideHeader);
544 done = (pInfo & DYLD_CACHE_SLIDE_PAGE_ATTR_END);
545 ++chainIndex;
546 }
547 }
548 else {
549 uint32_t pageOffset = pageEntry * 4;
550 //dyld::log(" start pageOffset=0x%03X\n", pageOffset);
551 rebaseChainV2(page, pageOffset, results->slide, slideHeader);
552 }
553 }
554 }
555 #if __LP64__
556 else if ( slideInfoHeader->version == 3 ) {
557 const dyld_cache_slide_info3* slideHeader = (dyld_cache_slide_info3*)slideInfo;
558 const uint32_t pageSize = slideHeader->page_size;
559 for (int i=0; i < slideHeader->page_starts_count; ++i) {
560 uint8_t* page = (uint8_t*)(dataPagesStart + (pageSize*i));
561 uint64_t delta = slideHeader->page_starts[i];
562 //dyld::log("page[%d]: page_starts[i]=0x%04X\n", i, delta);
563 if ( delta == DYLD_CACHE_SLIDE_V3_PAGE_ATTR_NO_REBASE )
564 continue;
565 delta = delta/sizeof(uint64_t); // initial offset is byte based
566 dyld_cache_slide_pointer3* loc = (dyld_cache_slide_pointer3*)page;
567 do {
568 loc += delta;
569 delta = loc->plain.offsetToNextPointer;
570 if ( loc->auth.authenticated ) {
571 #if __has_feature(ptrauth_calls)
572 uint64_t target = sharedRegionStart + loc->auth.offsetFromSharedCacheBase + results->slide;
573 MachOLoaded::ChainedFixupPointerOnDisk ptr;
574 ptr.raw64 = *((uint64_t*)loc);
575 loc->raw = ptr.arm64e.signPointer(loc, target);
576 #else
577 results->errorMessage = "invalid pointer kind in cache file";
578 return false;
579 #endif
580 }
581 else {
582 MachOLoaded::ChainedFixupPointerOnDisk ptr;
583 ptr.raw64 = *((uint64_t*)loc);
584 loc->raw = ptr.arm64e.unpackTarget() + results->slide;
585 }
586 } while (delta != 0);
587 }
588 }
589 #else
590 else if ( slideInfoHeader->version == 4 ) {
591 const dyld_cache_slide_info4* slideHeader = (dyld_cache_slide_info4*)slideInfo;
592 const uint32_t page_size = slideHeader->page_size;
593 const uint16_t* page_starts = (uint16_t*)((long)(slideInfo) + slideHeader->page_starts_offset);
594 const uint16_t* page_extras = (uint16_t*)((long)(slideInfo) + slideHeader->page_extras_offset);
595 for (int i=0; i < slideHeader->page_starts_count; ++i) {
596 uint8_t* page = (uint8_t*)(long)(dataPagesStart + (page_size*i));
597 uint16_t pageEntry = page_starts[i];
598 //dyld::log("page[%d]: page_starts[i]=0x%04X\n", i, pageEntry);
599 if ( pageEntry == DYLD_CACHE_SLIDE4_PAGE_NO_REBASE )
600 continue;
601 if ( pageEntry & DYLD_CACHE_SLIDE4_PAGE_USE_EXTRA ) {
602 uint16_t chainIndex = (pageEntry & DYLD_CACHE_SLIDE4_PAGE_INDEX);
603 bool done = false;
604 while ( !done ) {
605 uint16_t pInfo = page_extras[chainIndex];
606 uint16_t pageStartOffset = (pInfo & DYLD_CACHE_SLIDE4_PAGE_INDEX)*4;
607 //dyld::log(" chain[%d] pageOffset=0x%03X\n", chainIndex, pageStartOffset);
608 rebaseChainV4(page, pageStartOffset, results->slide, slideHeader);
609 done = (pInfo & DYLD_CACHE_SLIDE4_PAGE_EXTRA_END);
610 ++chainIndex;
611 }
612 }
613 else {
614 uint32_t pageOffset = pageEntry * 4;
615 //dyld::log(" start pageOffset=0x%03X\n", pageOffset);
616 rebaseChainV4(page, pageOffset, results->slide, slideHeader);
617 }
618 }
619 }
620 #endif // LP64
621 else {
622 results->errorMessage = "invalid slide info in cache file";
623 return false;
624 }
625 }
626 return true;
627 }
628
629 static bool reuseExistingCache(const SharedCacheOptions& options, SharedCacheLoadInfo* results)
630 {
631 uint64_t cacheBaseAddress;
632 #if __i386__
633 if ( syscall(294, &cacheBaseAddress) == 0 ) {
634 #else
635 if ( __shared_region_check_np(&cacheBaseAddress) == 0 ) {
636 #endif
637 const DyldSharedCache* existingCache = (DyldSharedCache*)cacheBaseAddress;
638 if ( validMagic(options, existingCache) ) {
639 const dyld_cache_mapping_info* const fileMappings = (dyld_cache_mapping_info*)(cacheBaseAddress + existingCache->header.mappingOffset);
640 results->loadAddress = existingCache;
641 results->slide = (long)(cacheBaseAddress - fileMappings[0].address);
642 // we don't know the path this cache was previously loaded from, assume default
643 getCachePath(options, sizeof(results->path), results->path);
644 if ( options.verbose ) {
645 const shared_file_mapping_np* const mappings = (shared_file_mapping_np*)(cacheBaseAddress + existingCache->header.mappingOffset);
646 dyld::log("re-using existing shared cache (%s):\n", results->path);
647 shared_file_mapping_slide_np slidMappings[DyldSharedCache::MaxMappings];
648 for (int i=0; i < DyldSharedCache::MaxMappings; ++i) {
649 slidMappings[i].sms_address = mappings[i].sfm_address;
650 slidMappings[i].sms_size = mappings[i].sfm_size;
651 slidMappings[i].sms_file_offset = mappings[i].sfm_file_offset;
652 slidMappings[i].sms_max_prot = mappings[i].sfm_max_prot;
653 slidMappings[i].sms_init_prot = mappings[i].sfm_init_prot;
654
655 slidMappings[i].sms_address += results->slide;
656 if ( existingCache->header.mappingOffset > __offsetof(dyld_cache_header, mappingWithSlideOffset) ) {
657 // New caches have slide info on each new mapping
658 const dyld_cache_mapping_and_slide_info* const slidableMappings = (dyld_cache_mapping_and_slide_info*)(cacheBaseAddress + existingCache->header.mappingWithSlideOffset);
659 assert(existingCache->header.mappingWithSlideCount <= DyldSharedCache::MaxMappings);
660 if ( !(slidableMappings[i].flags & DYLD_CACHE_MAPPING_AUTH_DATA) ) {
661 slidMappings[i].sms_max_prot |= VM_PROT_NOAUTH;
662 slidMappings[i].sms_init_prot |= VM_PROT_NOAUTH;
663 }
664 }
665 }
666 verboseSharedCacheMappings(slidMappings, existingCache->header.mappingCount);
667 }
668 }
669 else {
670 results->errorMessage = "existing shared cache in memory is not compatible";
671 }
672 return true;
673 }
674 return false;
675 }
676
677 static long pickCacheASLRSlide(CacheInfo& info)
678 {
679 // choose new random slide
680 #if TARGET_OS_IPHONE || (TARGET_OS_OSX && TARGET_CPU_ARM64)
681 // <rdar://problem/20848977> change shared cache slide for 32-bit arm to always be 16k aligned
682 long slide;
683 if (info.maxSlide == 0)
684 slide = 0;
685 else
686 slide = ((arc4random() % info.maxSlide) & (-16384));
687 #else
688 long slide;
689 if (info.maxSlide == 0)
690 slide = 0;
691 else
692 slide = ((arc4random() % info.maxSlide) & (-4096));
693 #if defined(__x86_64__) && !TARGET_OS_SIMULATOR
694 if (dyld::isTranslated()) {
695 slide &= (-16384);
696 }
697 #endif
698 #endif
699
700 return slide;
701 }
702
703 static bool mapCacheSystemWide(const SharedCacheOptions& options, SharedCacheLoadInfo* results)
704 {
705 CacheInfo info;
706 if ( !preflightCacheFile(options, results, &info) )
707 return false;
708
709 int result = 0;
710 if ( info.mappingsCount != 3 ) {
711 uint32_t maxSlide = options.disableASLR ? 0 : (uint32_t)info.maxSlide;
712
713 shared_file_np file;
714 file.sf_fd = info.fd;
715 file.sf_mappings_count = info.mappingsCount;
716 // For the new syscall, this is actually the max slide. The kernel now owns the actual slide
717 file.sf_slide = maxSlide;
718 result = __shared_region_map_and_slide_2_np(1, &file, info.mappingsCount, info.mappings);
719 } else {
720 // With the old syscall, dyld has to choose the slide
721 results->slide = options.disableASLR ? 0 : pickCacheASLRSlide(info);
722
723 // update mappings based on the slide we choose
724 for (uint32_t i=0; i < info.mappingsCount; ++i) {
725 info.mappings[i].sms_address += results->slide;
726 if ( info.mappings[i].sms_slide_size != 0 )
727 info.mappings[i].sms_slide_start += (uint32_t)results->slide;
728 }
729
730 // If we get here then we don't have the new kernel function, so use the old one
731 const dyld_cache_slide_info2* slideInfo = nullptr;
732 size_t slideInfoSize = 0;
733 shared_file_mapping_np mappings[3];
734 for (unsigned i = 0; i != 3; ++i) {
735 mappings[i].sfm_address = info.mappings[i].sms_address;
736 mappings[i].sfm_size = info.mappings[i].sms_size;
737 mappings[i].sfm_file_offset = info.mappings[i].sms_file_offset;
738 mappings[i].sfm_max_prot = info.mappings[i].sms_max_prot;
739 mappings[i].sfm_init_prot = info.mappings[i].sms_init_prot;
740 if ( info.mappings[i].sms_slide_size != 0 ) {
741 slideInfo = (dyld_cache_slide_info2*)info.mappings[i].sms_slide_start;
742 slideInfoSize = (size_t)info.mappings[i].sms_slide_size;
743 }
744 }
745 result = __shared_region_map_and_slide_np(info.fd, 3, mappings, results->slide, slideInfo, slideInfoSize);
746 }
747
748 ::close(info.fd);
749 if ( result == 0 ) {
750 results->loadAddress = (const DyldSharedCache*)(info.mappings[0].sms_address);
751 if ( info.mappingsCount != 3 ) {
752 // We don't know our own slide any more as the kernel owns it, so ask for it again now
753 if ( reuseExistingCache(options, results) )
754 return true;
755 // Uh oh, we mapped the kernel, but we didn't find the slide
756 dyld::logToConsole("dyld: error finding shared cache slide for system wide mapping\n");
757 return false;
758 }
759 }
760 else {
761 // could be another process beat us to it
762 if ( reuseExistingCache(options, results) )
763 return true;
764 // if cache does not exist, then really is an error
765 if ( results->errorMessage == nullptr )
766 results->errorMessage = "syscall to map cache into shared region failed";
767 return false;
768 }
769
770 if ( options.verbose ) {
771 dyld::log("mapped dyld cache file system wide: %s\n", results->path);
772 verboseSharedCacheMappings(info.mappings, info.mappingsCount);
773 }
774 return true;
775 }
776 #endif // TARGET_OS_SIMULATOR
777
778 static bool mapCachePrivate(const SharedCacheOptions& options, SharedCacheLoadInfo* results)
779 {
780 // open and validate cache file
781 CacheInfo info;
782 if ( !preflightCacheFile(options, results, &info) )
783 return false;
784
785 // compute ALSR slide
786 results->slide = 0;
787 #if !TARGET_OS_SIMULATOR
788 results->slide = options.disableASLR ? 0 : pickCacheASLRSlide(info);
789 #endif
790
791 // update mappings
792 for (uint32_t i=0; i < info.mappingsCount; ++i) {
793 info.mappings[i].sms_address += (uint32_t)results->slide;
794 if ( info.mappings[i].sms_slide_size != 0 )
795 info.mappings[i].sms_slide_start += (uint32_t)results->slide;
796 }
797
798 results->loadAddress = (const DyldSharedCache*)(info.mappings[0].sms_address);
799
800 // deallocate any existing system wide shared cache
801 deallocateExistingSharedCache();
802
803 #if TARGET_OS_SIMULATOR && TARGET_OS_WATCH
804 // <rdar://problem/50887685> watchOS 32-bit cache does not overlap macOS dyld cache address range
805 // mmap() of a file needs a vm_allocation behind it, so make one
806 vm_address_t loadAddress = 0x40000000;
807 ::vm_allocate(mach_task_self(), &loadAddress, 0x40000000, VM_FLAGS_FIXED);
808 #endif
809
810 // map cache just for this process with mmap()
811 for (int i=0; i < info.mappingsCount; ++i) {
812 void* mmapAddress = (void*)(uintptr_t)(info.mappings[i].sms_address);
813 size_t size = (size_t)(info.mappings[i].sms_size);
814 //dyld::log("dyld: mapping address %p with size 0x%08lX\n", mmapAddress, size);
815 int protection = 0;
816 if ( info.mappings[i].sms_init_prot & VM_PROT_EXECUTE )
817 protection |= PROT_EXEC;
818 if ( info.mappings[i].sms_init_prot & VM_PROT_READ )
819 protection |= PROT_READ;
820 if ( info.mappings[i].sms_init_prot & VM_PROT_WRITE )
821 protection |= PROT_WRITE;
822 off_t offset = info.mappings[i].sms_file_offset;
823 if ( ::mmap(mmapAddress, size, protection, MAP_FIXED | MAP_PRIVATE, info.fd, offset) != mmapAddress ) {
824 // failed to map some chunk of this shared cache file
825 // clear shared region
826 ::mmap((void*)((long)SHARED_REGION_BASE), SHARED_REGION_SIZE, PROT_NONE, MAP_FIXED | MAP_PRIVATE| MAP_ANON, 0, 0);
827 // return failure
828 results->loadAddress = nullptr;
829 results->errorMessage = "could not mmap() part of dyld cache";
830 ::close(info.fd);
831 return false;
832 }
833 }
834 ::close(info.fd);
835
836 #if TARGET_OS_SIMULATOR // simulator caches do not support sliding
837 return true;
838 #else
839 __block bool success = true;
840 for (int i=0; i < info.mappingsCount; ++i) {
841 if ( info.mappings[i].sms_slide_size == 0 )
842 continue;
843 const dyld_cache_slide_info* slideInfoHeader = (const dyld_cache_slide_info*)info.mappings[i].sms_slide_start;
844 const uint8_t* mappingPagesStart = (const uint8_t*)info.mappings[i].sms_address;
845 success &= rebaseDataPages(options.verbose, slideInfoHeader, mappingPagesStart, info.sharedRegionStart, results);
846 }
847
848 if ( options.verbose ) {
849 dyld::log("mapped dyld cache file private to process (%s):\n", results->path);
850 verboseSharedCacheMappings(info.mappings, info.mappingsCount);
851 }
852 return success;
853 #endif
854 }
855
856
857
858 bool loadDyldCache(const SharedCacheOptions& options, SharedCacheLoadInfo* results)
859 {
860 results->loadAddress = 0;
861 results->slide = 0;
862 results->errorMessage = nullptr;
863
864 #if TARGET_OS_SIMULATOR
865 // simulator only supports mmap()ing cache privately into process
866 return mapCachePrivate(options, results);
867 #else
868 if ( options.forcePrivate ) {
869 // mmap cache into this process only
870 return mapCachePrivate(options, results);
871 }
872 else {
873 // fast path: when cache is already mapped into shared region
874 bool hasError = false;
875 if ( reuseExistingCache(options, results) ) {
876 hasError = (results->errorMessage != nullptr);
877 } else {
878 // slow path: this is first process to load cache
879 hasError = mapCacheSystemWide(options, results);
880 }
881 return hasError;
882 }
883 #endif
884 }
885
886
887 bool findInSharedCacheImage(const SharedCacheLoadInfo& loadInfo, const char* dylibPathToFind, SharedCacheFindDylibResults* results)
888 {
889 if ( loadInfo.loadAddress == nullptr )
890 return false;
891
892 if ( loadInfo.loadAddress->header.formatVersion != dyld3::closure::kFormatVersion ) {
893 // support for older cache with a different Image* format
894 #if TARGET_OS_IPHONE
895 uint64_t hash = 0;
896 for (const char* s=dylibPathToFind; *s != '\0'; ++s)
897 hash += hash*4 + *s;
898 #endif
899 const dyld_cache_image_info* const start = (dyld_cache_image_info*)((uint8_t*)loadInfo.loadAddress + loadInfo.loadAddress->header.imagesOffset);
900 const dyld_cache_image_info* const end = &start[loadInfo.loadAddress->header.imagesCount];
901 for (const dyld_cache_image_info* p = start; p != end; ++p) {
902 #if TARGET_OS_IPHONE
903 // on iOS, inode is used to hold hash of path
904 if ( (p->modTime == 0) && (p->inode != hash) )
905 continue;
906 #endif
907 const char* aPath = (char*)loadInfo.loadAddress + p->pathFileOffset;
908 if ( strcmp(aPath, dylibPathToFind) == 0 ) {
909 results->mhInCache = (const mach_header*)(p->address+loadInfo.slide);
910 results->pathInCache = aPath;
911 results->slideInCache = loadInfo.slide;
912 results->image = nullptr;
913 return true;
914 }
915 }
916 return false;
917 }
918
919 const dyld3::closure::ImageArray* images = loadInfo.loadAddress->cachedDylibsImageArray();
920 results->image = nullptr;
921 uint32_t imageIndex;
922 if ( loadInfo.loadAddress->hasImagePath(dylibPathToFind, imageIndex) ) {
923 results->image = images->imageForNum(imageIndex+1);
924 }
925
926 if ( results->image == nullptr )
927 return false;
928
929 results->mhInCache = (const mach_header*)((uintptr_t)loadInfo.loadAddress + results->image->cacheOffset());
930 results->pathInCache = results->image->path();
931 results->slideInCache = loadInfo.slide;
932 return true;
933 }
934
935
936 bool pathIsInSharedCacheImage(const SharedCacheLoadInfo& loadInfo, const char* dylibPathToFind)
937 {
938 if ( (loadInfo.loadAddress == nullptr) )
939 return false;
940
941 uint32_t imageIndex;
942 return loadInfo.loadAddress->hasImagePath(dylibPathToFind, imageIndex);
943 }
944
945 void deallocateExistingSharedCache()
946 {
947 #if TARGET_OS_SIMULATOR
948 // dyld deallocated macOS shared cache before jumping into dyld_sim
949 #else
950 // <rdar://problem/50773474> remove the shared region sub-map
951 uint64_t existingCacheAddress = 0;
952 if ( __shared_region_check_np(&existingCacheAddress) == 0 ) {
953 ::mmap((void*)((long)SHARED_REGION_BASE), SHARED_REGION_SIZE, PROT_NONE, MAP_FIXED | MAP_PRIVATE| MAP_ANON, 0, 0);
954 }
955 #endif
956
957 }
958
959 } // namespace dyld3
960