]> git.saurik.com Git - apple/dyld.git/blob - dyld3/SharedCacheRuntime.cpp
dyld-851.27.tar.gz
[apple/dyld.git] / dyld3 / SharedCacheRuntime.cpp
1 /*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24
25
26 #include <stdint.h>
27 #include <string.h>
28 #include <unistd.h>
29 #include <errno.h>
30 #include <fcntl.h>
31 #include <sys/param.h>
32 #include <sys/types.h>
33 #include <sys/stat.h>
34 #include <sys/syscall.h>
35 #include <sys/syslog.h>
36 #include <sys/sysctl.h>
37 #include <sys/mman.h>
38 #include <mach/mach.h>
39 #include <mach-o/fat.h>
40 #include <mach-o/loader.h>
41 #include <mach-o/ldsyms.h>
42 #include <mach/shared_region.h>
43 #include <mach/mach.h>
44 #include <Availability.h>
45 #include <TargetConditionals.h>
46
47 #include "dyld_cache_format.h"
48 #include "SharedCacheRuntime.h"
49 #include "Loading.h"
50 #include "BootArgs.h"
51
52 #define ENABLE_DYLIBS_TO_OVERRIDE_CACHE_SIZE 1024
53
54 // should be in mach/shared_region.h
55 extern "C" int __shared_region_check_np(uint64_t* startaddress);
56 extern "C" int __shared_region_map_and_slide_np(int fd, uint32_t count, const shared_file_mapping_np mappings[], long slide, const dyld_cache_slide_info2* slideInfo, size_t slideInfoSize);
57 extern "C" int __shared_region_map_and_slide_2_np(uint32_t files_count, const shared_file_np files[], uint32_t mappings_count, const shared_file_mapping_slide_np mappings[]);
58
59 #ifndef VM_PROT_NOAUTH
60 #define VM_PROT_NOAUTH 0x40 /* must not interfere with normal prot assignments */
61 #endif
62
63 extern bool gEnableSharedCacheDataConst;
64
65 namespace dyld {
66 extern void log(const char*, ...);
67 extern void logToConsole(const char* format, ...);
68 #if defined(__x86_64__) && !TARGET_OS_SIMULATOR
69 bool isTranslated();
70 #endif
71 }
72
73
74 namespace dyld3 {
75
76
77 struct CacheInfo
78 {
79 shared_file_mapping_slide_np mappings[DyldSharedCache::MaxMappings];
80 uint32_t mappingsCount;
81 // All mappings come from the same file
82 int fd = 0;
83 uint64_t sharedRegionStart;
84 uint64_t sharedRegionSize;
85 uint64_t maxSlide;
86 };
87
88
89
90
91 #if __i386__
92 #define ARCH_NAME "i386"
93 #define ARCH_CACHE_MAGIC "dyld_v1 i386"
94 #elif __x86_64__
95 #define ARCH_NAME "x86_64"
96 #define ARCH_CACHE_MAGIC "dyld_v1 x86_64"
97 #define ARCH_NAME_H "x86_64h"
98 #define ARCH_CACHE_MAGIC_H "dyld_v1 x86_64h"
99 #elif __ARM_ARCH_7K__
100 #define ARCH_NAME "armv7k"
101 #define ARCH_CACHE_MAGIC "dyld_v1 armv7k"
102 #elif __ARM_ARCH_7A__
103 #define ARCH_NAME "armv7"
104 #define ARCH_CACHE_MAGIC "dyld_v1 armv7"
105 #elif __ARM_ARCH_7S__
106 #define ARCH_NAME "armv7s"
107 #define ARCH_CACHE_MAGIC "dyld_v1 armv7s"
108 #elif __arm64e__
109 #define ARCH_NAME "arm64e"
110 #define ARCH_CACHE_MAGIC "dyld_v1 arm64e"
111 #elif __arm64__
112 #if __LP64__
113 #define ARCH_NAME "arm64"
114 #define ARCH_CACHE_MAGIC "dyld_v1 arm64"
115 #else
116 #define ARCH_NAME "arm64_32"
117 #define ARCH_CACHE_MAGIC "dyld_v1arm64_32"
118 #endif
119 #endif
120
121
122 #if !TARGET_OS_SIMULATOR
123 static void rebaseChainV2(uint8_t* pageContent, uint16_t startOffset, uintptr_t slideAmount, const dyld_cache_slide_info2* slideInfo)
124 {
125 const uintptr_t deltaMask = (uintptr_t)(slideInfo->delta_mask);
126 const uintptr_t valueMask = ~deltaMask;
127 const uintptr_t valueAdd = (uintptr_t)(slideInfo->value_add);
128 const unsigned deltaShift = __builtin_ctzll(deltaMask) - 2;
129
130 uint32_t pageOffset = startOffset;
131 uint32_t delta = 1;
132 while ( delta != 0 ) {
133 uint8_t* loc = pageContent + pageOffset;
134 uintptr_t rawValue = *((uintptr_t*)loc);
135 delta = (uint32_t)((rawValue & deltaMask) >> deltaShift);
136 uintptr_t value = (rawValue & valueMask);
137 if ( value != 0 ) {
138 value += valueAdd;
139 value += slideAmount;
140 }
141 *((uintptr_t*)loc) = value;
142 //dyld::log(" pageOffset=0x%03X, loc=%p, org value=0x%08llX, new value=0x%08llX, delta=0x%X\n", pageOffset, loc, (uint64_t)rawValue, (uint64_t)value, delta);
143 pageOffset += delta;
144 }
145 }
146 #endif
147
148 #if !__LP64__ && !TARGET_OS_SIMULATOR
149 static void rebaseChainV4(uint8_t* pageContent, uint16_t startOffset, uintptr_t slideAmount, const dyld_cache_slide_info4* slideInfo)
150 {
151 const uintptr_t deltaMask = (uintptr_t)(slideInfo->delta_mask);
152 const uintptr_t valueMask = ~deltaMask;
153 const uintptr_t valueAdd = (uintptr_t)(slideInfo->value_add);
154 const unsigned deltaShift = __builtin_ctzll(deltaMask) - 2;
155
156 uint32_t pageOffset = startOffset;
157 uint32_t delta = 1;
158 while ( delta != 0 ) {
159 uint8_t* loc = pageContent + pageOffset;
160 uintptr_t rawValue = *((uintptr_t*)loc);
161 delta = (uint32_t)((rawValue & deltaMask) >> deltaShift);
162 uintptr_t value = (rawValue & valueMask);
163 if ( (value & 0xFFFF8000) == 0 ) {
164 // small positive non-pointer, use as-is
165 }
166 else if ( (value & 0x3FFF8000) == 0x3FFF8000 ) {
167 // small negative non-pointer
168 value |= 0xC0000000;
169 }
170 else {
171 value += valueAdd;
172 value += slideAmount;
173 }
174 *((uintptr_t*)loc) = value;
175 //dyld::log(" pageOffset=0x%03X, loc=%p, org value=0x%08llX, new value=0x%08llX, delta=0x%X\n", pageOffset, loc, (uint64_t)rawValue, (uint64_t)value, delta);
176 pageOffset += delta;
177 }
178 }
179 #endif
180
181 #if TARGET_OS_OSX
182 bool getMacOSCachePath(char pathBuffer[], size_t pathBufferSize,
183 const char* cacheDir, bool useHaswell) {
184 // Clear old attempts at finding a cache, if any
185 pathBuffer[0] = '\0';
186
187 // set cache dir
188 strlcpy(pathBuffer, cacheDir, pathBufferSize);
189
190 // append file component of cache file
191 if ( pathBuffer[strlen(pathBuffer)-1] != '/' )
192 strlcat(pathBuffer, "/", pathBufferSize);
193
194 #if __x86_64__
195 if ( useHaswell ) {
196 size_t len = strlen(pathBuffer);
197 struct stat haswellStatBuf;
198 strlcat(pathBuffer, DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME_H, pathBufferSize);
199 if ( dyld3::stat(pathBuffer, &haswellStatBuf) == 0 )
200 return true;
201 // no haswell cache file, use regular x86_64 cache
202 pathBuffer[len] = '\0';
203 }
204 #endif
205
206 struct stat statBuf;
207 strlcat(pathBuffer, DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME, pathBufferSize);
208 if ( dyld3::stat(pathBuffer, &statBuf) == 0 )
209 return true;
210
211 return false;
212 }
213 #endif // TARGET_OS_OSX
214
215 static void getCachePath(const SharedCacheOptions& options, size_t pathBufferSize, char pathBuffer[])
216 {
217 #if TARGET_OS_OSX
218
219 if ( options.cacheDirOverride != nullptr ) {
220 getMacOSCachePath(pathBuffer, pathBufferSize, options.cacheDirOverride, options.useHaswell);
221 } else {
222 getMacOSCachePath(pathBuffer, pathBufferSize, MACOSX_MRM_DYLD_SHARED_CACHE_DIR, options.useHaswell);
223 }
224
225 #else // TARGET_OS_OSX
226
227 // Non-macOS path
228 if ( options.cacheDirOverride != nullptr ) {
229 strlcpy(pathBuffer, options.cacheDirOverride, pathBufferSize);
230 } else {
231 strlcpy(pathBuffer, IPHONE_DYLD_SHARED_CACHE_DIR, sizeof(IPHONE_DYLD_SHARED_CACHE_DIR));
232 }
233
234 // append file component of cache file
235 if ( pathBuffer[strlen(pathBuffer)-1] != '/' )
236 strlcat(pathBuffer, "/", pathBufferSize);
237
238 strlcat(pathBuffer, DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME, pathBufferSize);
239
240 #if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR
241 // use .development cache if it exists
242 if ( BootArgs::forceCustomerCache() ) {
243 // The boot-arg always wins. Use the customer cache if we are told to
244 return;
245 }
246 if ( !dyld3::internalInstall() ) {
247 // We can't use the development cache on customer installs
248 return;
249 }
250 if ( BootArgs::forceDevelopmentCache() ) {
251 // The boot-arg always wins. Use the development cache if we are told to
252 strlcat(pathBuffer, DYLD_SHARED_CACHE_DEVELOPMENT_EXT, pathBufferSize);
253 return;
254 }
255
256 // If only one or the other caches exists, then use the one we have
257 struct stat devCacheStatBuf;
258 struct stat optCacheStatBuf;
259 bool devCacheExists = (dyld3::stat(IPHONE_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME DYLD_SHARED_CACHE_DEVELOPMENT_EXT, &devCacheStatBuf) == 0);
260 bool optCacheExists = (dyld3::stat(IPHONE_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME, &optCacheStatBuf) == 0);
261 if ( !devCacheExists ) {
262 // If the dev cache doesn't exist, then use the customer cache
263 return;
264 }
265 if ( !optCacheExists ) {
266 // If the customer cache doesn't exist, then use the development cache
267 strlcat(pathBuffer, DYLD_SHARED_CACHE_DEVELOPMENT_EXT, pathBufferSize);
268 return;
269 }
270
271 // Finally, check for the sentinels
272 struct stat enableStatBuf;
273 //struct stat sentinelStatBuf;
274 bool enableFileExists = (dyld3::stat(IPHONE_DYLD_SHARED_CACHE_DIR "enable-dylibs-to-override-cache", &enableStatBuf) == 0);
275 // FIXME: rdar://problem/59813537 Re-enable once automation is updated to use boot-arg
276 bool sentinelFileExists = false;
277 //bool sentinelFileExists = (dyld3::stat(MACOSX_MRM_DYLD_SHARED_CACHE_DIR "enable_development_mode", &sentinelStatBuf) == 0);
278 if ( enableFileExists && (enableStatBuf.st_size < ENABLE_DYLIBS_TO_OVERRIDE_CACHE_SIZE) ) {
279 // if the old enable file exists, use the development cache
280 strlcat(pathBuffer, DYLD_SHARED_CACHE_DEVELOPMENT_EXT, pathBufferSize);
281 return;
282 }
283 if ( sentinelFileExists ) {
284 // If the new sentinel exists, then use the development cache
285 strlcat(pathBuffer, DYLD_SHARED_CACHE_DEVELOPMENT_EXT, pathBufferSize);
286 return;
287 }
288 #endif
289
290 #endif //!TARGET_OS_OSX
291 }
292
293
294 int openSharedCacheFile(const SharedCacheOptions& options, SharedCacheLoadInfo* results)
295 {
296 getCachePath(options, sizeof(results->path), results->path);
297 return dyld3::open(results->path, O_RDONLY, 0);
298 }
299
300 static bool validMagic(const SharedCacheOptions& options, const DyldSharedCache* cache)
301 {
302 if ( strcmp(cache->header.magic, ARCH_CACHE_MAGIC) == 0 )
303 return true;
304
305 #if __x86_64__
306 if ( options.useHaswell ) {
307 if ( strcmp(cache->header.magic, ARCH_CACHE_MAGIC_H) == 0 )
308 return true;
309 }
310 #endif
311 return false;
312 }
313
314
315 static bool validPlatform(const SharedCacheOptions& options, const DyldSharedCache* cache)
316 {
317 // grandfather in old cache that does not have platform in header
318 if ( cache->header.mappingOffset < 0xE0 )
319 return true;
320
321 if ( cache->header.platform != (uint32_t)MachOFile::currentPlatform() )
322 return false;
323
324 #if TARGET_OS_SIMULATOR
325 if ( cache->header.simulator == 0 )
326 return false;
327 #else
328 if ( cache->header.simulator != 0 )
329 return false;
330 #endif
331
332 return true;
333 }
334
335 #if !TARGET_OS_SIMULATOR
336 static void verboseSharedCacheMappings(const shared_file_mapping_slide_np mappings[DyldSharedCache::MaxMappings],
337 uint32_t mappingsCount)
338 {
339 for (int i=0; i < mappingsCount; ++i) {
340 const char* mappingName = "";
341 if ( mappings[i].sms_max_prot & VM_PROT_WRITE ) {
342 if ( mappings[i].sms_max_prot & VM_PROT_NOAUTH ) {
343 // __DATA*
344 mappingName = "data";
345 } else {
346 // __AUTH*
347 mappingName = "auth";
348 }
349 }
350 uint32_t init_prot = mappings[i].sms_init_prot & (VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE);
351 uint32_t max_prot = mappings[i].sms_max_prot & (VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE);
352 dyld::log(" 0x%08llX->0x%08llX init=%x, max=%x %s%s%s%s\n",
353 mappings[i].sms_address, mappings[i].sms_address+mappings[i].sms_size-1,
354 init_prot, max_prot,
355 ((mappings[i].sms_init_prot & VM_PROT_READ) ? "read " : ""),
356 ((mappings[i].sms_init_prot & VM_PROT_WRITE) ? "write " : ""),
357 ((mappings[i].sms_init_prot & VM_PROT_EXECUTE) ? "execute " : ""),
358 mappingName);
359 }
360 }
361
362
363 static void verboseSharedCacheMappingsToConsole(const shared_file_mapping_slide_np mappings[DyldSharedCache::MaxMappings],
364 uint32_t mappingsCount)
365 {
366 for (int i=0; i < mappingsCount; ++i) {
367 const char* mappingName = "";
368 if ( mappings[i].sms_max_prot & VM_PROT_WRITE ) {
369 if ( mappings[i].sms_max_prot & VM_PROT_NOAUTH ) {
370 // __DATA*
371 mappingName = "data";
372 } else {
373 // __AUTH*
374 mappingName = "auth";
375 }
376 }
377 uint32_t init_prot = mappings[i].sms_init_prot & (VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE);
378 uint32_t max_prot = mappings[i].sms_max_prot & (VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE);
379 dyld::logToConsole("dyld: mapping 0x%08llX->0x%08llX init=%x, max=%x %s%s%s%s\n",
380 mappings[i].sms_address, mappings[i].sms_address+mappings[i].sms_size-1,
381 init_prot, max_prot,
382 ((mappings[i].sms_init_prot & VM_PROT_READ) ? "read " : ""),
383 ((mappings[i].sms_init_prot & VM_PROT_WRITE) ? "write " : ""),
384 ((mappings[i].sms_init_prot & VM_PROT_EXECUTE) ? "execute " : ""),
385 mappingName);
386 }
387 }
388 #endif
389
390 static bool preflightCacheFile(const SharedCacheOptions& options, SharedCacheLoadInfo* results, CacheInfo* info)
391 {
392
393 // find and open shared cache file
394 int fd = openSharedCacheFile(options, results);
395 if ( fd == -1 ) {
396 results->errorMessage = "shared cache file open() failed";
397 return false;
398 }
399
400 struct stat cacheStatBuf;
401 if ( dyld3::stat(results->path, &cacheStatBuf) != 0 ) {
402 results->errorMessage = "shared cache file stat() failed";
403 ::close(fd);
404 return false;
405 }
406 size_t cacheFileLength = (size_t)(cacheStatBuf.st_size);
407
408 // sanity check header and mappings
409 uint8_t firstPage[0x4000];
410 if ( ::pread(fd, firstPage, sizeof(firstPage), 0) != sizeof(firstPage) ) {
411 results->errorMessage = "shared cache file pread() failed";
412 ::close(fd);
413 return false;
414 }
415 const DyldSharedCache* cache = (DyldSharedCache*)firstPage;
416 if ( !validMagic(options, cache) ) {
417 results->errorMessage = "shared cache file has wrong magic";
418 ::close(fd);
419 return false;
420 }
421 if ( !validPlatform(options, cache) ) {
422 results->errorMessage = "shared cache file is for a different platform";
423 ::close(fd);
424 return false;
425 }
426 if ( (cache->header.mappingCount < 3) || (cache->header.mappingCount > DyldSharedCache::MaxMappings) || (cache->header.mappingOffset > 0x168) ) {
427 results->errorMessage = "shared cache file mappings are invalid";
428 ::close(fd);
429 return false;
430 }
431 const dyld_cache_mapping_info* const fileMappings = (dyld_cache_mapping_info*)&firstPage[cache->header.mappingOffset];
432 const dyld_cache_mapping_info* textMapping = &fileMappings[0];
433 const dyld_cache_mapping_info* firstDataMapping = &fileMappings[1];
434 const dyld_cache_mapping_info* linkeditMapping = &fileMappings[cache->header.mappingCount - 1];
435 if ( (textMapping->fileOffset != 0)
436 || ((fileMappings[0].address + fileMappings[0].size) > firstDataMapping->address)
437 || ((fileMappings[0].fileOffset + fileMappings[0].size) != firstDataMapping->fileOffset)
438 || ((cache->header.codeSignatureOffset + cache->header.codeSignatureSize) != cacheFileLength)
439 || (textMapping->maxProt != (VM_PROT_READ|VM_PROT_EXECUTE))
440 || (linkeditMapping->maxProt != VM_PROT_READ) ) {
441 results->errorMessage = "shared cache text/linkedit mappings are invalid";
442 ::close(fd);
443 return false;
444 }
445
446 // Check the __DATA mappings
447 for (unsigned i = 1; i != (cache->header.mappingCount - 1); ++i) {
448 if ( ((fileMappings[i].address + fileMappings[i].size) > fileMappings[i + 1].address)
449 || ((fileMappings[i].fileOffset + fileMappings[i].size) != fileMappings[i + 1].fileOffset)
450 || (fileMappings[i].maxProt != (VM_PROT_READ|VM_PROT_WRITE)) ) {
451 results->errorMessage = "shared cache data mappings are invalid";
452 ::close(fd);
453 return false;
454 }
455 }
456
457 if ( (textMapping->address != cache->header.sharedRegionStart) || ((linkeditMapping->address + linkeditMapping->size) > (cache->header.sharedRegionStart+cache->header.sharedRegionSize)) ) {
458 results->errorMessage = "shared cache file mapping addressses invalid";
459 ::close(fd);
460 return false;
461 }
462
463 // register code signature of cache file
464 fsignatures_t siginfo;
465 siginfo.fs_file_start = 0; // cache always starts at beginning of file
466 siginfo.fs_blob_start = (void*)cache->header.codeSignatureOffset;
467 siginfo.fs_blob_size = (size_t)(cache->header.codeSignatureSize);
468 int result = fcntl(fd, F_ADDFILESIGS_RETURN, &siginfo);
469 if ( result == -1 ) {
470 results->errorMessage = "code signature registration for shared cache failed";
471 ::close(fd);
472 return false;
473 }
474
475 // <rdar://problem/23188073> validate code signature covers entire shared cache
476 uint64_t codeSignedLength = siginfo.fs_file_start;
477 if ( codeSignedLength < cache->header.codeSignatureOffset ) {
478 results->errorMessage = "code signature does not cover entire shared cache file";
479 ::close(fd);
480 return false;
481 }
482 void* mappedData = ::mmap(NULL, sizeof(firstPage), PROT_READ|PROT_EXEC, MAP_PRIVATE, fd, 0);
483 if ( mappedData == MAP_FAILED ) {
484 results->errorMessage = "first page of shared cache not mmap()able";
485 ::close(fd);
486 return false;
487 }
488 if ( memcmp(mappedData, firstPage, sizeof(firstPage)) != 0 ) {
489 results->errorMessage = "first page of mmap()ed shared cache not valid";
490 ::close(fd);
491 return false;
492 }
493 ::munmap(mappedData, sizeof(firstPage));
494
495 // fill out results
496 info->mappingsCount = cache->header.mappingCount;
497 // We have to emit the mapping for the __LINKEDIT before the slid mappings
498 // This is so that the kernel has already mapped __LINKEDIT in to its address space
499 // for when it copies the slid info for each __DATA mapping
500 for (int i=0; i < cache->header.mappingCount; ++i) {
501 uint64_t slideInfoFileOffset = 0;
502 uint64_t slideInfoFileSize = 0;
503 vm_prot_t authProt = 0;
504 vm_prot_t initProt = fileMappings[i].initProt;
505 if ( cache->header.mappingOffset <= __offsetof(dyld_cache_header, mappingWithSlideOffset) ) {
506 // Old cache without the new slid mappings
507 if ( i == 1 ) {
508 // Add slide info to the __DATA mapping
509 slideInfoFileOffset = cache->header.slideInfoOffsetUnused;
510 slideInfoFileSize = cache->header.slideInfoSizeUnused;
511 // Don't set auth prot to anything interseting on the old mapppings
512 authProt = 0;
513 }
514 } else {
515 // New cache where each mapping has a corresponding slid mapping
516 const dyld_cache_mapping_and_slide_info* slidableMappings = (const dyld_cache_mapping_and_slide_info*)&firstPage[cache->header.mappingWithSlideOffset];
517 slideInfoFileOffset = slidableMappings[i].slideInfoFileOffset;
518 slideInfoFileSize = slidableMappings[i].slideInfoFileSize;
519 if ( (slidableMappings[i].flags & DYLD_CACHE_MAPPING_AUTH_DATA) == 0 )
520 authProt = VM_PROT_NOAUTH;
521 if ( (slidableMappings[i].flags & DYLD_CACHE_MAPPING_CONST_DATA) != 0 ) {
522 // The cache was built with __DATA_CONST being read-only. We can override that
523 // with a boot-arg
524 if ( !gEnableSharedCacheDataConst )
525 initProt |= VM_PROT_WRITE;
526 }
527 }
528
529 // Add a file for each mapping
530 info->fd = fd;
531 info->mappings[i].sms_address = fileMappings[i].address;
532 info->mappings[i].sms_size = fileMappings[i].size;
533 info->mappings[i].sms_file_offset = fileMappings[i].fileOffset;
534 info->mappings[i].sms_slide_size = 0;
535 info->mappings[i].sms_slide_start = 0;
536 info->mappings[i].sms_max_prot = fileMappings[i].maxProt;
537 info->mappings[i].sms_init_prot = initProt;
538 if ( slideInfoFileSize != 0 ) {
539 uint64_t offsetInLinkEditRegion = (slideInfoFileOffset - linkeditMapping->fileOffset);
540 info->mappings[i].sms_slide_start = (user_addr_t)(linkeditMapping->address + offsetInLinkEditRegion);
541 info->mappings[i].sms_slide_size = (user_addr_t)slideInfoFileSize;
542 info->mappings[i].sms_init_prot |= (VM_PROT_SLIDE | authProt);
543 info->mappings[i].sms_max_prot |= (VM_PROT_SLIDE | authProt);
544 }
545 }
546 info->sharedRegionStart = cache->header.sharedRegionStart;
547 info->sharedRegionSize = cache->header.sharedRegionSize;
548 info->maxSlide = cache->header.maxSlide;
549 return true;
550 }
551
552
553 #if !TARGET_OS_SIMULATOR
554
555 // update all __DATA pages with slide info
556 static bool rebaseDataPages(bool isVerbose, const dyld_cache_slide_info* slideInfo, const uint8_t *dataPagesStart,
557 uint64_t sharedRegionStart, SharedCacheLoadInfo* results)
558 {
559 const dyld_cache_slide_info* slideInfoHeader = slideInfo;
560 if ( slideInfoHeader != nullptr ) {
561 if ( slideInfoHeader->version == 2 ) {
562 const dyld_cache_slide_info2* slideHeader = (dyld_cache_slide_info2*)slideInfo;
563 const uint32_t page_size = slideHeader->page_size;
564 const uint16_t* page_starts = (uint16_t*)((long)(slideInfo) + slideHeader->page_starts_offset);
565 const uint16_t* page_extras = (uint16_t*)((long)(slideInfo) + slideHeader->page_extras_offset);
566 for (int i=0; i < slideHeader->page_starts_count; ++i) {
567 uint8_t* page = (uint8_t*)(long)(dataPagesStart + (page_size*i));
568 uint16_t pageEntry = page_starts[i];
569 //dyld::log("page[%d]: page_starts[i]=0x%04X\n", i, pageEntry);
570 if ( pageEntry == DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE )
571 continue;
572 if ( pageEntry & DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA ) {
573 uint16_t chainIndex = (pageEntry & 0x3FFF);
574 bool done = false;
575 while ( !done ) {
576 uint16_t pInfo = page_extras[chainIndex];
577 uint16_t pageStartOffset = (pInfo & 0x3FFF)*4;
578 //dyld::log(" chain[%d] pageOffset=0x%03X\n", chainIndex, pageStartOffset);
579 rebaseChainV2(page, pageStartOffset, results->slide, slideHeader);
580 done = (pInfo & DYLD_CACHE_SLIDE_PAGE_ATTR_END);
581 ++chainIndex;
582 }
583 }
584 else {
585 uint32_t pageOffset = pageEntry * 4;
586 //dyld::log(" start pageOffset=0x%03X\n", pageOffset);
587 rebaseChainV2(page, pageOffset, results->slide, slideHeader);
588 }
589 }
590 }
591 #if __LP64__
592 else if ( slideInfoHeader->version == 3 ) {
593 const dyld_cache_slide_info3* slideHeader = (dyld_cache_slide_info3*)slideInfo;
594 const uint32_t pageSize = slideHeader->page_size;
595 for (int i=0; i < slideHeader->page_starts_count; ++i) {
596 uint8_t* page = (uint8_t*)(dataPagesStart + (pageSize*i));
597 uint64_t delta = slideHeader->page_starts[i];
598 //dyld::log("page[%d]: page_starts[i]=0x%04X\n", i, delta);
599 if ( delta == DYLD_CACHE_SLIDE_V3_PAGE_ATTR_NO_REBASE )
600 continue;
601 delta = delta/sizeof(uint64_t); // initial offset is byte based
602 dyld_cache_slide_pointer3* loc = (dyld_cache_slide_pointer3*)page;
603 do {
604 loc += delta;
605 delta = loc->plain.offsetToNextPointer;
606 if ( loc->auth.authenticated ) {
607 #if __has_feature(ptrauth_calls)
608 uint64_t target = sharedRegionStart + loc->auth.offsetFromSharedCacheBase + results->slide;
609 MachOLoaded::ChainedFixupPointerOnDisk ptr;
610 ptr.raw64 = *((uint64_t*)loc);
611 loc->raw = ptr.arm64e.signPointer(loc, target);
612 #else
613 results->errorMessage = "invalid pointer kind in cache file";
614 return false;
615 #endif
616 }
617 else {
618 MachOLoaded::ChainedFixupPointerOnDisk ptr;
619 ptr.raw64 = *((uint64_t*)loc);
620 loc->raw = ptr.arm64e.unpackTarget() + results->slide;
621 }
622 } while (delta != 0);
623 }
624 }
625 #else
626 else if ( slideInfoHeader->version == 4 ) {
627 const dyld_cache_slide_info4* slideHeader = (dyld_cache_slide_info4*)slideInfo;
628 const uint32_t page_size = slideHeader->page_size;
629 const uint16_t* page_starts = (uint16_t*)((long)(slideInfo) + slideHeader->page_starts_offset);
630 const uint16_t* page_extras = (uint16_t*)((long)(slideInfo) + slideHeader->page_extras_offset);
631 for (int i=0; i < slideHeader->page_starts_count; ++i) {
632 uint8_t* page = (uint8_t*)(long)(dataPagesStart + (page_size*i));
633 uint16_t pageEntry = page_starts[i];
634 //dyld::log("page[%d]: page_starts[i]=0x%04X\n", i, pageEntry);
635 if ( pageEntry == DYLD_CACHE_SLIDE4_PAGE_NO_REBASE )
636 continue;
637 if ( pageEntry & DYLD_CACHE_SLIDE4_PAGE_USE_EXTRA ) {
638 uint16_t chainIndex = (pageEntry & DYLD_CACHE_SLIDE4_PAGE_INDEX);
639 bool done = false;
640 while ( !done ) {
641 uint16_t pInfo = page_extras[chainIndex];
642 uint16_t pageStartOffset = (pInfo & DYLD_CACHE_SLIDE4_PAGE_INDEX)*4;
643 //dyld::log(" chain[%d] pageOffset=0x%03X\n", chainIndex, pageStartOffset);
644 rebaseChainV4(page, pageStartOffset, results->slide, slideHeader);
645 done = (pInfo & DYLD_CACHE_SLIDE4_PAGE_EXTRA_END);
646 ++chainIndex;
647 }
648 }
649 else {
650 uint32_t pageOffset = pageEntry * 4;
651 //dyld::log(" start pageOffset=0x%03X\n", pageOffset);
652 rebaseChainV4(page, pageOffset, results->slide, slideHeader);
653 }
654 }
655 }
656 #endif // LP64
657 else {
658 results->errorMessage = "invalid slide info in cache file";
659 return false;
660 }
661 }
662 return true;
663 }
664
665 static bool reuseExistingCache(const SharedCacheOptions& options, SharedCacheLoadInfo* results)
666 {
667 uint64_t cacheBaseAddress;
668 #if __i386__
669 if ( syscall(294, &cacheBaseAddress) == 0 ) {
670 #else
671 if ( __shared_region_check_np(&cacheBaseAddress) == 0 ) {
672 #endif
673 const DyldSharedCache* existingCache = (DyldSharedCache*)cacheBaseAddress;
674 if ( validMagic(options, existingCache) ) {
675 const dyld_cache_mapping_info* const fileMappings = (dyld_cache_mapping_info*)(cacheBaseAddress + existingCache->header.mappingOffset);
676 results->loadAddress = existingCache;
677 results->slide = (long)(cacheBaseAddress - fileMappings[0].address);
678 // we don't know the path this cache was previously loaded from, assume default
679 getCachePath(options, sizeof(results->path), results->path);
680 if ( options.verbose ) {
681 const dyld_cache_mapping_and_slide_info* const mappings = (const dyld_cache_mapping_and_slide_info*)(cacheBaseAddress + existingCache->header.mappingWithSlideOffset);
682 dyld::log("re-using existing shared cache (%s):\n", results->path);
683 shared_file_mapping_slide_np slidMappings[DyldSharedCache::MaxMappings];
684 for (int i=0; i < DyldSharedCache::MaxMappings; ++i) {
685 slidMappings[i].sms_address = mappings[i].address;
686 slidMappings[i].sms_size = mappings[i].size;
687 slidMappings[i].sms_file_offset = mappings[i].fileOffset;
688 slidMappings[i].sms_max_prot = mappings[i].maxProt;
689 slidMappings[i].sms_init_prot = mappings[i].initProt;
690 slidMappings[i].sms_address += results->slide;
691 if ( existingCache->header.mappingOffset > __offsetof(dyld_cache_header, mappingWithSlideOffset) ) {
692 // New caches have slide info on each new mapping
693 const dyld_cache_mapping_and_slide_info* const slidableMappings = (dyld_cache_mapping_and_slide_info*)(cacheBaseAddress + existingCache->header.mappingWithSlideOffset);
694 assert(existingCache->header.mappingWithSlideCount <= DyldSharedCache::MaxMappings);
695 if ( !(slidableMappings[i].flags & DYLD_CACHE_MAPPING_AUTH_DATA) ) {
696 slidMappings[i].sms_max_prot |= VM_PROT_NOAUTH;
697 slidMappings[i].sms_init_prot |= VM_PROT_NOAUTH;
698 }
699 if ( (slidableMappings[i].flags & DYLD_CACHE_MAPPING_CONST_DATA) != 0 ) {
700 // The cache was built with __DATA_CONST being read-only. We can override that
701 // with a boot-arg
702 if ( !gEnableSharedCacheDataConst )
703 slidMappings[i].sms_init_prot |= VM_PROT_WRITE;
704 }
705 }
706 }
707 verboseSharedCacheMappings(slidMappings, existingCache->header.mappingCount);
708 }
709 }
710 else {
711 results->errorMessage = "existing shared cache in memory is not compatible";
712 }
713
714 return true;
715 }
716 return false;
717 }
718
719 static long pickCacheASLRSlide(CacheInfo& info)
720 {
721 // choose new random slide
722 #if TARGET_OS_IPHONE || (TARGET_OS_OSX && TARGET_CPU_ARM64)
723 // <rdar://problem/20848977> change shared cache slide for 32-bit arm to always be 16k aligned
724 long slide;
725 if (info.maxSlide == 0)
726 slide = 0;
727 else
728 slide = ((arc4random() % info.maxSlide) & (-16384));
729 #else
730 long slide;
731 if (info.maxSlide == 0)
732 slide = 0;
733 else
734 slide = ((arc4random() % info.maxSlide) & (-4096));
735 #if defined(__x86_64__) && !TARGET_OS_SIMULATOR
736 if (dyld::isTranslated()) {
737 slide &= (-16384);
738 }
739 #endif
740 #endif
741
742 return slide;
743 }
744
745 static bool mapCacheSystemWide(const SharedCacheOptions& options, SharedCacheLoadInfo* results)
746 {
747 CacheInfo info;
748 if ( !preflightCacheFile(options, results, &info) )
749 return false;
750
751 int result = 0;
752 if ( info.mappingsCount != 3 ) {
753 uint32_t maxSlide = options.disableASLR ? 0 : (uint32_t)info.maxSlide;
754
755 shared_file_np file;
756 file.sf_fd = info.fd;
757 file.sf_mappings_count = info.mappingsCount;
758 // For the new syscall, this is actually the max slide. The kernel now owns the actual slide
759 file.sf_slide = maxSlide;
760 result = __shared_region_map_and_slide_2_np(1, &file, info.mappingsCount, info.mappings);
761 } else {
762 // With the old syscall, dyld has to choose the slide
763 results->slide = options.disableASLR ? 0 : pickCacheASLRSlide(info);
764
765 // update mappings based on the slide we choose
766 for (uint32_t i=0; i < info.mappingsCount; ++i) {
767 info.mappings[i].sms_address += results->slide;
768 if ( info.mappings[i].sms_slide_size != 0 )
769 info.mappings[i].sms_slide_start += (uint32_t)results->slide;
770 }
771
772 // If we get here then we don't have the new kernel function, so use the old one
773 const dyld_cache_slide_info2* slideInfo = nullptr;
774 size_t slideInfoSize = 0;
775 shared_file_mapping_np mappings[3];
776 for (unsigned i = 0; i != 3; ++i) {
777 mappings[i].sfm_address = info.mappings[i].sms_address;
778 mappings[i].sfm_size = info.mappings[i].sms_size;
779 mappings[i].sfm_file_offset = info.mappings[i].sms_file_offset;
780 mappings[i].sfm_max_prot = info.mappings[i].sms_max_prot;
781 mappings[i].sfm_init_prot = info.mappings[i].sms_init_prot;
782 if ( info.mappings[i].sms_slide_size != 0 ) {
783 slideInfo = (dyld_cache_slide_info2*)info.mappings[i].sms_slide_start;
784 slideInfoSize = (size_t)info.mappings[i].sms_slide_size;
785 }
786 }
787 result = __shared_region_map_and_slide_np(info.fd, 3, mappings, results->slide, slideInfo, slideInfoSize);
788 }
789
790 ::close(info.fd);
791 if ( result == 0 ) {
792 results->loadAddress = (const DyldSharedCache*)(info.mappings[0].sms_address);
793 if ( info.mappingsCount != 3 ) {
794 // We don't know our own slide any more as the kernel owns it, so ask for it again now
795 if ( reuseExistingCache(options, results) ) {
796
797 // update mappings based on the slide the kernel chose
798 for (uint32_t i=0; i < info.mappingsCount; ++i) {
799 info.mappings[i].sms_address += results->slide;
800 if ( info.mappings[i].sms_slide_size != 0 )
801 info.mappings[i].sms_slide_start += (uint32_t)results->slide;
802 }
803
804 if ( options.verbose )
805 verboseSharedCacheMappingsToConsole(info.mappings, info.mappingsCount);
806 return true;
807 }
808 // Uh oh, we mapped the kernel, but we didn't find the slide
809 if ( options.verbose )
810 dyld::logToConsole("dyld: error finding shared cache slide for system wide mapping\n");
811 return false;
812 }
813 }
814 else {
815 // could be another process beat us to it
816 if ( reuseExistingCache(options, results) )
817 return true;
818 // if cache does not exist, then really is an error
819 if ( results->errorMessage == nullptr )
820 results->errorMessage = "syscall to map cache into shared region failed";
821 return false;
822 }
823
824 if ( options.verbose ) {
825 dyld::log("mapped dyld cache file system wide: %s\n", results->path);
826 verboseSharedCacheMappings(info.mappings, info.mappingsCount);
827 }
828 return true;
829 }
830 #endif // TARGET_OS_SIMULATOR
831
832 static bool mapCachePrivate(const SharedCacheOptions& options, SharedCacheLoadInfo* results)
833 {
834 // open and validate cache file
835 CacheInfo info;
836 if ( !preflightCacheFile(options, results, &info) )
837 return false;
838
839 // compute ALSR slide
840 results->slide = 0;
841 #if !TARGET_OS_SIMULATOR
842 results->slide = options.disableASLR ? 0 : pickCacheASLRSlide(info);
843 #endif
844
845 // update mappings
846 for (uint32_t i=0; i < info.mappingsCount; ++i) {
847 info.mappings[i].sms_address += (uint32_t)results->slide;
848 if ( info.mappings[i].sms_slide_size != 0 )
849 info.mappings[i].sms_slide_start += (uint32_t)results->slide;
850 }
851
852 results->loadAddress = (const DyldSharedCache*)(info.mappings[0].sms_address);
853
854 // deallocate any existing system wide shared cache
855 deallocateExistingSharedCache();
856
857 #if TARGET_OS_SIMULATOR && TARGET_OS_WATCH
858 // <rdar://problem/50887685> watchOS 32-bit cache does not overlap macOS dyld cache address range
859 // mmap() of a file needs a vm_allocation behind it, so make one
860 vm_address_t loadAddress = 0x40000000;
861 ::vm_allocate(mach_task_self(), &loadAddress, 0x40000000, VM_FLAGS_FIXED);
862 #endif
863
864 // map cache just for this process with mmap()
865 for (int i=0; i < info.mappingsCount; ++i) {
866 void* mmapAddress = (void*)(uintptr_t)(info.mappings[i].sms_address);
867 size_t size = (size_t)(info.mappings[i].sms_size);
868 //dyld::log("dyld: mapping address %p with size 0x%08lX\n", mmapAddress, size);
869 int protection = 0;
870 if ( info.mappings[i].sms_init_prot & VM_PROT_EXECUTE )
871 protection |= PROT_EXEC;
872 if ( info.mappings[i].sms_init_prot & VM_PROT_READ )
873 protection |= PROT_READ;
874 if ( info.mappings[i].sms_init_prot & VM_PROT_WRITE )
875 protection |= PROT_WRITE;
876 off_t offset = info.mappings[i].sms_file_offset;
877 if ( ::mmap(mmapAddress, size, protection, MAP_FIXED | MAP_PRIVATE, info.fd, offset) != mmapAddress ) {
878 // failed to map some chunk of this shared cache file
879 // clear shared region
880 ::mmap((void*)((long)SHARED_REGION_BASE), SHARED_REGION_SIZE, PROT_NONE, MAP_FIXED | MAP_PRIVATE| MAP_ANON, 0, 0);
881 // return failure
882 results->loadAddress = nullptr;
883 results->errorMessage = "could not mmap() part of dyld cache";
884 ::close(info.fd);
885 return false;
886 }
887 }
888 ::close(info.fd);
889
890 #if TARGET_OS_SIMULATOR // simulator caches do not support sliding
891 return true;
892 #else
893
894 // Change __DATA_CONST to read-write for this block
895 DyldSharedCache::DataConstScopedWriter patcher(results->loadAddress, mach_task_self(), options.verbose ? &dyld::log : nullptr);
896
897 __block bool success = true;
898 for (int i=0; i < info.mappingsCount; ++i) {
899 if ( info.mappings[i].sms_slide_size == 0 )
900 continue;
901 const dyld_cache_slide_info* slideInfoHeader = (const dyld_cache_slide_info*)info.mappings[i].sms_slide_start;
902 const uint8_t* mappingPagesStart = (const uint8_t*)info.mappings[i].sms_address;
903 success &= rebaseDataPages(options.verbose, slideInfoHeader, mappingPagesStart, info.sharedRegionStart, results);
904 }
905
906 if ( options.verbose ) {
907 dyld::log("mapped dyld cache file private to process (%s):\n", results->path);
908 verboseSharedCacheMappings(info.mappings, info.mappingsCount);
909 }
910 return success;
911 #endif
912 }
913
914
915
916 bool loadDyldCache(const SharedCacheOptions& options, SharedCacheLoadInfo* results)
917 {
918 results->loadAddress = 0;
919 results->slide = 0;
920 results->errorMessage = nullptr;
921
922 #if TARGET_OS_SIMULATOR
923 // simulator only supports mmap()ing cache privately into process
924 return mapCachePrivate(options, results);
925 #else
926 if ( options.forcePrivate ) {
927 // mmap cache into this process only
928 return mapCachePrivate(options, results);
929 }
930 else {
931 // fast path: when cache is already mapped into shared region
932 bool hasError = false;
933 if ( reuseExistingCache(options, results) ) {
934 hasError = (results->errorMessage != nullptr);
935 } else {
936 // slow path: this is first process to load cache
937 hasError = mapCacheSystemWide(options, results);
938 }
939 return hasError;
940 }
941 #endif
942 }
943
944
945 bool findInSharedCacheImage(const SharedCacheLoadInfo& loadInfo, const char* dylibPathToFind, SharedCacheFindDylibResults* results)
946 {
947 if ( loadInfo.loadAddress == nullptr )
948 return false;
949
950 if ( loadInfo.loadAddress->header.formatVersion != dyld3::closure::kFormatVersion ) {
951 // support for older cache with a different Image* format
952 #if TARGET_OS_IPHONE
953 uint64_t hash = 0;
954 for (const char* s=dylibPathToFind; *s != '\0'; ++s)
955 hash += hash*4 + *s;
956 #endif
957 const dyld_cache_image_info* const start = (dyld_cache_image_info*)((uint8_t*)loadInfo.loadAddress + loadInfo.loadAddress->header.imagesOffset);
958 const dyld_cache_image_info* const end = &start[loadInfo.loadAddress->header.imagesCount];
959 for (const dyld_cache_image_info* p = start; p != end; ++p) {
960 #if TARGET_OS_IPHONE
961 // on iOS, inode is used to hold hash of path
962 if ( (p->modTime == 0) && (p->inode != hash) )
963 continue;
964 #endif
965 const char* aPath = (char*)loadInfo.loadAddress + p->pathFileOffset;
966 if ( strcmp(aPath, dylibPathToFind) == 0 ) {
967 results->mhInCache = (const mach_header*)(p->address+loadInfo.slide);
968 results->pathInCache = aPath;
969 results->slideInCache = loadInfo.slide;
970 results->image = nullptr;
971 return true;
972 }
973 }
974 return false;
975 }
976
977 const dyld3::closure::ImageArray* images = loadInfo.loadAddress->cachedDylibsImageArray();
978 results->image = nullptr;
979 uint32_t imageIndex;
980 if ( loadInfo.loadAddress->hasImagePath(dylibPathToFind, imageIndex) ) {
981 results->image = images->imageForNum(imageIndex+1);
982 }
983
984 if ( results->image == nullptr )
985 return false;
986
987 results->mhInCache = (const mach_header*)((uintptr_t)loadInfo.loadAddress + results->image->cacheOffset());
988 results->pathInCache = results->image->path();
989 results->slideInCache = loadInfo.slide;
990 return true;
991 }
992
993
994 bool pathIsInSharedCacheImage(const SharedCacheLoadInfo& loadInfo, const char* dylibPathToFind)
995 {
996 if ( (loadInfo.loadAddress == nullptr) )
997 return false;
998
999 uint32_t imageIndex;
1000 return loadInfo.loadAddress->hasImagePath(dylibPathToFind, imageIndex);
1001 }
1002
1003 void deallocateExistingSharedCache()
1004 {
1005 #if TARGET_OS_SIMULATOR
1006 // dyld deallocated macOS shared cache before jumping into dyld_sim
1007 #else
1008 // <rdar://problem/50773474> remove the shared region sub-map
1009 uint64_t existingCacheAddress = 0;
1010 if ( __shared_region_check_np(&existingCacheAddress) == 0 ) {
1011 ::mmap((void*)((long)SHARED_REGION_BASE), SHARED_REGION_SIZE, PROT_NONE, MAP_FIXED | MAP_PRIVATE| MAP_ANON, 0, 0);
1012 }
1013 #endif
1014
1015 }
1016
1017 } // namespace dyld3
1018