dyld-750.5.tar.gz
[apple/dyld.git] / dyld3 / SharedCacheRuntime.cpp
1 /*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24
25
26 #include <stdint.h>
27 #include <string.h>
28 #include <unistd.h>
29 #include <fcntl.h>
30 #include <sys/param.h>
31 #include <sys/types.h>
32 #include <sys/stat.h>
33 #include <sys/syscall.h>
34 #include <sys/syslog.h>
35 #include <sys/sysctl.h>
36 #include <sys/mman.h>
37 #include <mach/mach.h>
38 #include <mach-o/fat.h>
39 #include <mach-o/loader.h>
40 #include <mach-o/ldsyms.h>
41 #include <mach/shared_region.h>
42 #include <mach/mach.h>
43 #include <Availability.h>
44 #include <TargetConditionals.h>
45
46 #include "dyld_cache_format.h"
47 #include "SharedCacheRuntime.h"
48 #include "Loading.h"
49 #include "BootArgs.h"
50
51 #define ENABLE_DYLIBS_TO_OVERRIDE_CACHE_SIZE 1024
52
53 // should be in mach/shared_region.h
54 extern "C" int __shared_region_check_np(uint64_t* startaddress);
55 extern "C" int __shared_region_map_and_slide_np(int fd, uint32_t count, const shared_file_mapping_np mappings[], long slide, const dyld_cache_slide_info2* slideInfo, size_t slideInfoSize);
56
57
58 namespace dyld {
59 extern int my_stat(const char* path, struct stat* buf);
60 extern int my_open(const char* path, int flag, int other);
61 extern void log(const char*, ...);
62 }
63
64
65 namespace dyld3 {
66
67
68 struct CacheInfo
69 {
70 int fd;
71 shared_file_mapping_np mappings[3];
72 uint64_t slideInfoAddressUnslid;
73 size_t slideInfoSize;
74 uint64_t sharedRegionStart;
75 uint64_t sharedRegionSize;
76 uint64_t maxSlide;
77 };
78
79
80
81
82 #if __i386__
83 #define ARCH_NAME "i386"
84 #define ARCH_CACHE_MAGIC "dyld_v1 i386"
85 #elif __x86_64__
86 #define ARCH_NAME "x86_64"
87 #define ARCH_CACHE_MAGIC "dyld_v1 x86_64"
88 #define ARCH_NAME_H "x86_64h"
89 #define ARCH_CACHE_MAGIC_H "dyld_v1 x86_64h"
90 #elif __ARM_ARCH_7K__
91 #define ARCH_NAME "armv7k"
92 #define ARCH_CACHE_MAGIC "dyld_v1 armv7k"
93 #elif __ARM_ARCH_7A__
94 #define ARCH_NAME "armv7"
95 #define ARCH_CACHE_MAGIC "dyld_v1 armv7"
96 #elif __ARM_ARCH_7S__
97 #define ARCH_NAME "armv7s"
98 #define ARCH_CACHE_MAGIC "dyld_v1 armv7s"
99 #elif __arm64e__
100 #define ARCH_NAME "arm64e"
101 #define ARCH_CACHE_MAGIC "dyld_v1 arm64e"
102 #elif __arm64__
103 #if __LP64__
104 #define ARCH_NAME "arm64"
105 #define ARCH_CACHE_MAGIC "dyld_v1 arm64"
106 #else
107 #define ARCH_NAME "arm64_32"
108 #define ARCH_CACHE_MAGIC "dyld_v1arm64_32"
109 #endif
110 #endif
111
112
113 #if !TARGET_OS_SIMULATOR
114 static void rebaseChainV2(uint8_t* pageContent, uint16_t startOffset, uintptr_t slideAmount, const dyld_cache_slide_info2* slideInfo)
115 {
116 const uintptr_t deltaMask = (uintptr_t)(slideInfo->delta_mask);
117 const uintptr_t valueMask = ~deltaMask;
118 const uintptr_t valueAdd = (uintptr_t)(slideInfo->value_add);
119 const unsigned deltaShift = __builtin_ctzll(deltaMask) - 2;
120
121 uint32_t pageOffset = startOffset;
122 uint32_t delta = 1;
123 while ( delta != 0 ) {
124 uint8_t* loc = pageContent + pageOffset;
125 uintptr_t rawValue = *((uintptr_t*)loc);
126 delta = (uint32_t)((rawValue & deltaMask) >> deltaShift);
127 uintptr_t value = (rawValue & valueMask);
128 if ( value != 0 ) {
129 value += valueAdd;
130 value += slideAmount;
131 }
132 *((uintptr_t*)loc) = value;
133 //dyld::log(" pageOffset=0x%03X, loc=%p, org value=0x%08llX, new value=0x%08llX, delta=0x%X\n", pageOffset, loc, (uint64_t)rawValue, (uint64_t)value, delta);
134 pageOffset += delta;
135 }
136 }
137 #endif
138
139 #if !__LP64__ && !TARGET_OS_SIMULATOR
140 static void rebaseChainV4(uint8_t* pageContent, uint16_t startOffset, uintptr_t slideAmount, const dyld_cache_slide_info4* slideInfo)
141 {
142 const uintptr_t deltaMask = (uintptr_t)(slideInfo->delta_mask);
143 const uintptr_t valueMask = ~deltaMask;
144 const uintptr_t valueAdd = (uintptr_t)(slideInfo->value_add);
145 const unsigned deltaShift = __builtin_ctzll(deltaMask) - 2;
146
147 uint32_t pageOffset = startOffset;
148 uint32_t delta = 1;
149 while ( delta != 0 ) {
150 uint8_t* loc = pageContent + pageOffset;
151 uintptr_t rawValue = *((uintptr_t*)loc);
152 delta = (uint32_t)((rawValue & deltaMask) >> deltaShift);
153 uintptr_t value = (rawValue & valueMask);
154 if ( (value & 0xFFFF8000) == 0 ) {
155 // small positive non-pointer, use as-is
156 }
157 else if ( (value & 0x3FFF8000) == 0x3FFF8000 ) {
158 // small negative non-pointer
159 value |= 0xC0000000;
160 }
161 else {
162 value += valueAdd;
163 value += slideAmount;
164 }
165 *((uintptr_t*)loc) = value;
166 //dyld::log(" pageOffset=0x%03X, loc=%p, org value=0x%08llX, new value=0x%08llX, delta=0x%X\n", pageOffset, loc, (uint64_t)rawValue, (uint64_t)value, delta);
167 pageOffset += delta;
168 }
169 }
170 #endif
171
172 static void getCachePath(const SharedCacheOptions& options, size_t pathBufferSize, char pathBuffer[])
173 {
174 // set cache dir
175 if ( options.cacheDirOverride != nullptr ) {
176 strlcpy(pathBuffer, options.cacheDirOverride, pathBufferSize);
177 }
178 else {
179 #if __IPHONE_OS_VERSION_MIN_REQUIRED
180 strlcpy(pathBuffer, IPHONE_DYLD_SHARED_CACHE_DIR, sizeof(IPHONE_DYLD_SHARED_CACHE_DIR));
181 #else
182 strlcpy(pathBuffer, MACOSX_DYLD_SHARED_CACHE_DIR, sizeof(MACOSX_DYLD_SHARED_CACHE_DIR));
183 #endif
184 }
185
186 // append file component of cache file
187 if ( pathBuffer[strlen(pathBuffer)-1] != '/' )
188 strlcat(pathBuffer, "/", pathBufferSize);
189 #if __x86_64__ && !__IPHONE_OS_VERSION_MIN_REQUIRED
190 if ( options.useHaswell ) {
191 size_t len = strlen(pathBuffer);
192 struct stat haswellStatBuf;
193 strlcat(pathBuffer, DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME_H, pathBufferSize);
194 if ( dyld::my_stat(pathBuffer, &haswellStatBuf) == 0 )
195 return;
196 // no haswell cache file, use regular x86_64 cache
197 pathBuffer[len] = '\0';
198 }
199 #endif
200
201 strlcat(pathBuffer, DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME, pathBufferSize);
202
203 #if __IPHONE_OS_VERSION_MIN_REQUIRED && !TARGET_OS_SIMULATOR
204 // use .development cache if it exists
205 struct stat enableStatBuf;
206 struct stat devCacheStatBuf;
207 struct stat optCacheStatBuf;
208 bool developmentDevice = dyld3::internalInstall();
209 bool enableFileExists = (dyld::my_stat(IPHONE_DYLD_SHARED_CACHE_DIR "enable-dylibs-to-override-cache", &enableStatBuf) == 0);
210 bool devCacheExists = (dyld::my_stat(IPHONE_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME DYLD_SHARED_CACHE_DEVELOPMENT_EXT, &devCacheStatBuf) == 0);
211 bool optCacheExists = (dyld::my_stat(IPHONE_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME, &optCacheStatBuf) == 0);
212 if ( !BootArgs::forceCustomerCache() && developmentDevice && ((enableFileExists && (enableStatBuf.st_size < ENABLE_DYLIBS_TO_OVERRIDE_CACHE_SIZE) && devCacheExists) || !optCacheExists) )
213 strlcat(pathBuffer, DYLD_SHARED_CACHE_DEVELOPMENT_EXT, pathBufferSize);
214 #endif
215
216 }
217
218
219 int openSharedCacheFile(const SharedCacheOptions& options, SharedCacheLoadInfo* results)
220 {
221 getCachePath(options, sizeof(results->path), results->path);
222 return dyld::my_open(results->path, O_RDONLY, 0);
223 }
224
225 static bool validMagic(const SharedCacheOptions& options, const DyldSharedCache* cache)
226 {
227 if ( strcmp(cache->header.magic, ARCH_CACHE_MAGIC) == 0 )
228 return true;
229
230 #if __x86_64__
231 if ( options.useHaswell ) {
232 if ( strcmp(cache->header.magic, ARCH_CACHE_MAGIC_H) == 0 )
233 return true;
234 }
235 #endif
236 return false;
237 }
238
239
240 static bool validPlatform(const SharedCacheOptions& options, const DyldSharedCache* cache)
241 {
242 // grandfather in old cache that does not have platform in header
243 if ( cache->header.mappingOffset < 0xE0 )
244 return true;
245
246 if ( cache->header.platform != (uint32_t)MachOFile::currentPlatform() )
247 return false;
248
249 #if TARGET_OS_SIMULATOR
250 if ( cache->header.simulator == 0 )
251 return false;
252 #else
253 if ( cache->header.simulator != 0 )
254 return false;
255 #endif
256
257 return true;
258 }
259
260 #if !TARGET_OS_SIMULATOR
261 static void verboseSharedCacheMappings(const shared_file_mapping_np mappings[3])
262 {
263 for (int i=0; i < 3; ++i) {
264 dyld::log(" 0x%08llX->0x%08llX init=%x, max=%x %s%s%s\n",
265 mappings[i].sfm_address, mappings[i].sfm_address+mappings[i].sfm_size-1,
266 mappings[i].sfm_init_prot, mappings[i].sfm_init_prot,
267 ((mappings[i].sfm_init_prot & VM_PROT_READ) ? "read " : ""),
268 ((mappings[i].sfm_init_prot & VM_PROT_WRITE) ? "write " : ""),
269 ((mappings[i].sfm_init_prot & VM_PROT_EXECUTE) ? "execute " : ""));
270 }
271 }
272 #endif
273
274 static bool preflightCacheFile(const SharedCacheOptions& options, SharedCacheLoadInfo* results, CacheInfo* info)
275 {
276
277 // find and open shared cache file
278 int fd = openSharedCacheFile(options, results);
279 if ( fd == -1 ) {
280 results->errorMessage = "shared cache file open() failed";
281 return false;
282 }
283
284 struct stat cacheStatBuf;
285 if ( dyld::my_stat(results->path, &cacheStatBuf) != 0 ) {
286 results->errorMessage = "shared cache file stat() failed";
287 ::close(fd);
288 return false;
289 }
290 size_t cacheFileLength = (size_t)(cacheStatBuf.st_size);
291
292 // sanity check header and mappings
293 uint8_t firstPage[0x4000];
294 if ( ::pread(fd, firstPage, sizeof(firstPage), 0) != sizeof(firstPage) ) {
295 results->errorMessage = "shared cache file pread() failed";
296 ::close(fd);
297 return false;
298 }
299 const DyldSharedCache* cache = (DyldSharedCache*)firstPage;
300 if ( !validMagic(options, cache) ) {
301 results->errorMessage = "shared cache file has wrong magic";
302 ::close(fd);
303 return false;
304 }
305 if ( !validPlatform(options, cache) ) {
306 results->errorMessage = "shared cache file is for a different platform";
307 ::close(fd);
308 return false;
309 }
310 if ( (cache->header.mappingCount != 3) || (cache->header.mappingOffset > 0x148) ) {
311 results->errorMessage = "shared cache file mappings are invalid";
312 ::close(fd);
313 return false;
314 }
315 const dyld_cache_mapping_info* const fileMappings = (dyld_cache_mapping_info*)&firstPage[cache->header.mappingOffset];
316 if ( (fileMappings[0].fileOffset != 0)
317 || ((fileMappings[0].address + fileMappings[0].size) > fileMappings[1].address)
318 || ((fileMappings[1].address + fileMappings[1].size) > fileMappings[2].address)
319 || ((fileMappings[0].fileOffset + fileMappings[0].size) != fileMappings[1].fileOffset)
320 || ((fileMappings[1].fileOffset + fileMappings[1].size) != fileMappings[2].fileOffset)
321 || ((cache->header.codeSignatureOffset + cache->header.codeSignatureSize) != cacheFileLength)
322 || (fileMappings[0].maxProt != (VM_PROT_READ|VM_PROT_EXECUTE))
323 || (fileMappings[1].maxProt != (VM_PROT_READ|VM_PROT_WRITE))
324 || (fileMappings[2].maxProt != VM_PROT_READ) ) {
325 results->errorMessage = "shared cache file mappings are invalid";
326 ::close(fd);
327 return false;
328 }
329
330 if ( cache->header.mappingOffset >= 0xF8 ) {
331 if ( (fileMappings[0].address != cache->header.sharedRegionStart) || ((fileMappings[2].address + fileMappings[2].size) > (cache->header.sharedRegionStart+cache->header.sharedRegionSize)) ) {
332 results->errorMessage = "shared cache file mapping addressses invalid";
333 ::close(fd);
334 return false;
335 }
336 }
337 else {
338 if ( (fileMappings[0].address != SHARED_REGION_BASE) || ((fileMappings[2].address + fileMappings[2].size) > (SHARED_REGION_BASE+SHARED_REGION_SIZE)) ) {
339 results->errorMessage = "shared cache file mapping addressses invalid";
340 ::close(fd);
341 return false;
342 }
343 }
344
345 // register code signature of cache file
346 fsignatures_t siginfo;
347 siginfo.fs_file_start = 0; // cache always starts at beginning of file
348 siginfo.fs_blob_start = (void*)cache->header.codeSignatureOffset;
349 siginfo.fs_blob_size = (size_t)(cache->header.codeSignatureSize);
350 int result = fcntl(fd, F_ADDFILESIGS_RETURN, &siginfo);
351 if ( result == -1 ) {
352 results->errorMessage = "code signature registration for shared cache failed";
353 ::close(fd);
354 return false;
355 }
356
357 // <rdar://problem/23188073> validate code signature covers entire shared cache
358 uint64_t codeSignedLength = siginfo.fs_file_start;
359 if ( codeSignedLength < cache->header.codeSignatureOffset ) {
360 results->errorMessage = "code signature does not cover entire shared cache file";
361 ::close(fd);
362 return false;
363 }
364 void* mappedData = ::mmap(NULL, sizeof(firstPage), PROT_READ|PROT_EXEC, MAP_PRIVATE, fd, 0);
365 if ( mappedData == MAP_FAILED ) {
366 results->errorMessage = "first page of shared cache not mmap()able";
367 ::close(fd);
368 return false;
369 }
370 if ( memcmp(mappedData, firstPage, sizeof(firstPage)) != 0 ) {
371 results->errorMessage = "first page of mmap()ed shared cache not valid";
372 ::close(fd);
373 return false;
374 }
375 ::munmap(mappedData, sizeof(firstPage));
376
377 // fill out results
378 info->fd = fd;
379 for (int i=0; i < 3; ++i) {
380 info->mappings[i].sfm_address = fileMappings[i].address;
381 info->mappings[i].sfm_size = fileMappings[i].size;
382 info->mappings[i].sfm_file_offset = fileMappings[i].fileOffset;
383 info->mappings[i].sfm_max_prot = fileMappings[i].maxProt;
384 info->mappings[i].sfm_init_prot = fileMappings[i].initProt;
385 }
386 info->mappings[1].sfm_max_prot |= VM_PROT_SLIDE;
387 info->mappings[1].sfm_init_prot |= VM_PROT_SLIDE;
388 info->slideInfoAddressUnslid = fileMappings[2].address + cache->header.slideInfoOffset - fileMappings[2].fileOffset;
389 info->slideInfoSize = (long)cache->header.slideInfoSize;
390 if ( cache->header.mappingOffset >= 0xf8 ) {
391 info->sharedRegionStart = cache->header.sharedRegionStart;
392 info->sharedRegionSize = cache->header.sharedRegionSize;
393 info->maxSlide = cache->header.maxSlide;
394 }
395 else {
396 info->sharedRegionStart = SHARED_REGION_BASE;
397 info->sharedRegionSize = SHARED_REGION_SIZE;
398 info->maxSlide = SHARED_REGION_SIZE - (fileMappings[2].address + fileMappings[2].size - fileMappings[0].address);
399 }
400 return true;
401 }
402
403
404 #if !TARGET_OS_SIMULATOR
405
406 // update all __DATA pages with slide info
407 static bool rebaseDataPages(bool isVerbose, CacheInfo& info, SharedCacheLoadInfo* results)
408 {
409 uint64_t dataPagesStart = info.mappings[1].sfm_address;
410 const dyld_cache_slide_info* slideInfo = nullptr;
411 if ( info.slideInfoSize != 0 ) {
412 slideInfo = (dyld_cache_slide_info*)(info.slideInfoAddressUnslid + results->slide);
413 }
414 const dyld_cache_slide_info* slideInfoHeader = (dyld_cache_slide_info*)slideInfo;
415 if ( slideInfoHeader != nullptr ) {
416 if ( slideInfoHeader->version == 2 ) {
417 const dyld_cache_slide_info2* slideHeader = (dyld_cache_slide_info2*)slideInfo;
418 const uint32_t page_size = slideHeader->page_size;
419 const uint16_t* page_starts = (uint16_t*)((long)(slideInfo) + slideHeader->page_starts_offset);
420 const uint16_t* page_extras = (uint16_t*)((long)(slideInfo) + slideHeader->page_extras_offset);
421 for (int i=0; i < slideHeader->page_starts_count; ++i) {
422 uint8_t* page = (uint8_t*)(long)(dataPagesStart + (page_size*i));
423 uint16_t pageEntry = page_starts[i];
424 //dyld::log("page[%d]: page_starts[i]=0x%04X\n", i, pageEntry);
425 if ( pageEntry == DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE )
426 continue;
427 if ( pageEntry & DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA ) {
428 uint16_t chainIndex = (pageEntry & 0x3FFF);
429 bool done = false;
430 while ( !done ) {
431 uint16_t pInfo = page_extras[chainIndex];
432 uint16_t pageStartOffset = (pInfo & 0x3FFF)*4;
433 //dyld::log(" chain[%d] pageOffset=0x%03X\n", chainIndex, pageStartOffset);
434 rebaseChainV2(page, pageStartOffset, results->slide, slideHeader);
435 done = (pInfo & DYLD_CACHE_SLIDE_PAGE_ATTR_END);
436 ++chainIndex;
437 }
438 }
439 else {
440 uint32_t pageOffset = pageEntry * 4;
441 //dyld::log(" start pageOffset=0x%03X\n", pageOffset);
442 rebaseChainV2(page, pageOffset, results->slide, slideHeader);
443 }
444 }
445 }
446 #if __LP64__
447 else if ( slideInfoHeader->version == 3 ) {
448 const dyld_cache_slide_info3* slideHeader = (dyld_cache_slide_info3*)slideInfo;
449 const uint32_t pageSize = slideHeader->page_size;
450 for (int i=0; i < slideHeader->page_starts_count; ++i) {
451 uint8_t* page = (uint8_t*)(dataPagesStart + (pageSize*i));
452 uint64_t delta = slideHeader->page_starts[i];
453 if ( delta == DYLD_CACHE_SLIDE_V3_PAGE_ATTR_NO_REBASE )
454 continue;
455 delta = delta/sizeof(uint64_t); // initial offset is byte based
456 dyld_cache_slide_pointer3* loc = (dyld_cache_slide_pointer3*)page;
457 do {
458 loc += delta;
459 delta = loc->plain.offsetToNextPointer;
460 if ( loc->auth.authenticated ) {
461 #if __has_feature(ptrauth_calls)
462 uint64_t target = info.sharedRegionStart + loc->auth.offsetFromSharedCacheBase + results->slide;
463 MachOLoaded::ChainedFixupPointerOnDisk ptr;
464 ptr.raw64 = *((uint64_t*)loc);
465 loc->raw = ptr.arm64e.signPointer(loc, target);
466 #else
467 results->errorMessage = "invalid pointer kind in cache file";
468 return false;
469 #endif
470 }
471 else {
472 MachOLoaded::ChainedFixupPointerOnDisk ptr;
473 ptr.raw64 = *((uint64_t*)loc);
474 loc->raw = ptr.arm64e.unpackTarget() + results->slide;
475 }
476 } while (delta != 0);
477 }
478 }
479 #else
480 else if ( slideInfoHeader->version == 4 ) {
481 const dyld_cache_slide_info4* slideHeader = (dyld_cache_slide_info4*)slideInfo;
482 const uint32_t page_size = slideHeader->page_size;
483 const uint16_t* page_starts = (uint16_t*)((long)(slideInfo) + slideHeader->page_starts_offset);
484 const uint16_t* page_extras = (uint16_t*)((long)(slideInfo) + slideHeader->page_extras_offset);
485 for (int i=0; i < slideHeader->page_starts_count; ++i) {
486 uint8_t* page = (uint8_t*)(long)(dataPagesStart + (page_size*i));
487 uint16_t pageEntry = page_starts[i];
488 //dyld::log("page[%d]: page_starts[i]=0x%04X\n", i, pageEntry);
489 if ( pageEntry == DYLD_CACHE_SLIDE4_PAGE_NO_REBASE )
490 continue;
491 if ( pageEntry & DYLD_CACHE_SLIDE4_PAGE_USE_EXTRA ) {
492 uint16_t chainIndex = (pageEntry & DYLD_CACHE_SLIDE4_PAGE_INDEX);
493 bool done = false;
494 while ( !done ) {
495 uint16_t pInfo = page_extras[chainIndex];
496 uint16_t pageStartOffset = (pInfo & DYLD_CACHE_SLIDE4_PAGE_INDEX)*4;
497 //dyld::log(" chain[%d] pageOffset=0x%03X\n", chainIndex, pageStartOffset);
498 rebaseChainV4(page, pageStartOffset, results->slide, slideHeader);
499 done = (pInfo & DYLD_CACHE_SLIDE4_PAGE_EXTRA_END);
500 ++chainIndex;
501 }
502 }
503 else {
504 uint32_t pageOffset = pageEntry * 4;
505 //dyld::log(" start pageOffset=0x%03X\n", pageOffset);
506 rebaseChainV4(page, pageOffset, results->slide, slideHeader);
507 }
508 }
509 }
510 #endif // LP64
511 else {
512 results->errorMessage = "invalid slide info in cache file";
513 return false;
514 }
515 }
516 return true;
517 }
518
519 static bool reuseExistingCache(const SharedCacheOptions& options, SharedCacheLoadInfo* results)
520 {
521 uint64_t cacheBaseAddress;
522 #if __i386__
523 if ( syscall(294, &cacheBaseAddress) == 0 ) {
524 #else
525 if ( __shared_region_check_np(&cacheBaseAddress) == 0 ) {
526 #endif
527 const DyldSharedCache* existingCache = (DyldSharedCache*)cacheBaseAddress;
528 if ( validMagic(options, existingCache) ) {
529 const dyld_cache_mapping_info* const fileMappings = (dyld_cache_mapping_info*)(cacheBaseAddress + existingCache->header.mappingOffset);
530 results->loadAddress = existingCache;
531 results->slide = (long)(cacheBaseAddress - fileMappings[0].address);
532 // we don't know the path this cache was previously loaded from, assume default
533 getCachePath(options, sizeof(results->path), results->path);
534 if ( options.verbose ) {
535 const shared_file_mapping_np* const mappings = (shared_file_mapping_np*)(cacheBaseAddress + existingCache->header.mappingOffset);
536 dyld::log("re-using existing shared cache (%s):\n", results->path);
537 shared_file_mapping_np slidMappings[3];
538 for (int i=0; i < 3; ++i) {
539 slidMappings[i] = mappings[i];
540 slidMappings[i].sfm_address += results->slide;
541 }
542 verboseSharedCacheMappings(slidMappings);
543 }
544 }
545 else {
546 results->errorMessage = "existing shared cache in memory is not compatible";
547 }
548 return true;
549 }
550 return false;
551 }
552
553 static long pickCacheASLR(CacheInfo& info)
554 {
555 // choose new random slide
556 #if __IPHONE_OS_VERSION_MIN_REQUIRED
557 // <rdar://problem/20848977> change shared cache slide for 32-bit arm to always be 16k aligned
558 long slide = ((arc4random() % info.maxSlide) & (-16384));
559 #else
560 long slide = ((arc4random() % info.maxSlide) & (-4096));
561 #endif
562
563 // <rdar://problem/32031197> respect -disable_aslr boot-arg
564 if ( BootArgs::contains("-disable_aslr") )
565 slide = 0;
566
567 // update mappings
568 for (uint32_t i=0; i < 3; ++i) {
569 info.mappings[i].sfm_address += slide;
570 }
571
572 return slide;
573 }
574
575 static bool mapCacheSystemWide(const SharedCacheOptions& options, SharedCacheLoadInfo* results)
576 {
577 CacheInfo info;
578 if ( !preflightCacheFile(options, results, &info) )
579 return false;
580
581 const dyld_cache_slide_info2* slideInfo = nullptr;
582 if ( info.slideInfoSize != 0 ) {
583 results->slide = pickCacheASLR(info);
584 slideInfo = (dyld_cache_slide_info2*)(info.slideInfoAddressUnslid + results->slide);
585 }
586
587 int result = __shared_region_map_and_slide_np(info.fd, 3, info.mappings, results->slide, slideInfo, info.slideInfoSize);
588 ::close(info.fd);
589 if ( result == 0 ) {
590 results->loadAddress = (const DyldSharedCache*)(info.mappings[0].sfm_address);
591 }
592 else {
593 // could be another process beat us to it
594 if ( reuseExistingCache(options, results) )
595 return true;
596 // if cache does not exist, then really is an error
597 if ( results->errorMessage == nullptr )
598 results->errorMessage = "syscall to map cache into shared region failed";
599 return false;
600 }
601
602 if ( options.verbose ) {
603 dyld::log("mapped dyld cache file system wide: %s\n", results->path);
604 verboseSharedCacheMappings(info.mappings);
605 }
606 return true;
607 }
608 #endif // TARGET_OS_SIMULATOR
609
610 static bool mapCachePrivate(const SharedCacheOptions& options, SharedCacheLoadInfo* results)
611 {
612 // open and validate cache file
613 CacheInfo info;
614 if ( !preflightCacheFile(options, results, &info) )
615 return false;
616
617 // compute ALSR slide
618 results->slide = 0;
619 #if !TARGET_OS_SIMULATOR // simulator caches do not support sliding
620 if ( info.slideInfoSize != 0 ) {
621 results->slide = pickCacheASLR(info);
622 }
623 #endif
624 results->loadAddress = (const DyldSharedCache*)(info.mappings[0].sfm_address);
625
626 // deallocate any existing system wide shared cache
627 deallocateExistingSharedCache();
628
629 #if TARGET_OS_SIMULATOR && TARGET_OS_WATCH
630 // <rdar://problem/50887685> watchOS 32-bit cache does not overlap macOS dyld cache address range
631 // mmap() of a file needs a vm_allocation behind it, so make one
632 vm_address_t loadAddress = 0x40000000;
633 ::vm_allocate(mach_task_self(), &loadAddress, 0x40000000, VM_FLAGS_FIXED);
634 #endif
635
636 // map cache just for this process with mmap()
637 for (int i=0; i < 3; ++i) {
638 void* mmapAddress = (void*)(uintptr_t)(info.mappings[i].sfm_address);
639 size_t size = (size_t)(info.mappings[i].sfm_size);
640 //dyld::log("dyld: mapping address %p with size 0x%08lX\n", mmapAddress, size);
641 int protection = 0;
642 if ( info.mappings[i].sfm_init_prot & VM_PROT_EXECUTE )
643 protection |= PROT_EXEC;
644 if ( info.mappings[i].sfm_init_prot & VM_PROT_READ )
645 protection |= PROT_READ;
646 if ( info.mappings[i].sfm_init_prot & VM_PROT_WRITE )
647 protection |= PROT_WRITE;
648 off_t offset = info.mappings[i].sfm_file_offset;
649 if ( ::mmap(mmapAddress, size, protection, MAP_FIXED | MAP_PRIVATE, info.fd, offset) != mmapAddress ) {
650 // failed to map some chunk of this shared cache file
651 // clear shared region
652 ::mmap((void*)((long)SHARED_REGION_BASE), SHARED_REGION_SIZE, PROT_NONE, MAP_FIXED | MAP_PRIVATE| MAP_ANON, 0, 0);
653 // return failure
654 results->loadAddress = nullptr;
655 results->errorMessage = "could not mmap() part of dyld cache";
656 ::close(info.fd);
657 return false;
658 }
659 }
660 ::close(info.fd);
661
662 #if TARGET_OS_SIMULATOR // simulator caches do not support sliding
663 return true;
664 #else
665 bool success = rebaseDataPages(options.verbose, info, results);
666
667 if ( options.verbose ) {
668 dyld::log("mapped dyld cache file private to process (%s):\n", results->path);
669 verboseSharedCacheMappings(info.mappings);
670 }
671 return success;
672 #endif
673 }
674
675
676
677 bool loadDyldCache(const SharedCacheOptions& options, SharedCacheLoadInfo* results)
678 {
679 results->loadAddress = 0;
680 results->slide = 0;
681 results->errorMessage = nullptr;
682
683 #if TARGET_OS_SIMULATOR
684 // simulator only supports mmap()ing cache privately into process
685 return mapCachePrivate(options, results);
686 #else
687 if ( options.forcePrivate ) {
688 // mmap cache into this process only
689 return mapCachePrivate(options, results);
690 }
691 else {
692 // fast path: when cache is already mapped into shared region
693 bool hasError = false;
694 if ( reuseExistingCache(options, results) ) {
695 hasError = (results->errorMessage != nullptr);
696 } else {
697 // slow path: this is first process to load cache
698 hasError = mapCacheSystemWide(options, results);
699 }
700 return hasError;
701 }
702 #endif
703 }
704
705
706 bool findInSharedCacheImage(const SharedCacheLoadInfo& loadInfo, const char* dylibPathToFind, SharedCacheFindDylibResults* results)
707 {
708 if ( loadInfo.loadAddress == nullptr )
709 return false;
710
711 if ( loadInfo.loadAddress->header.formatVersion != dyld3::closure::kFormatVersion ) {
712 // support for older cache with a different Image* format
713 #if __IPHONE_OS_VERSION_MIN_REQUIRED
714 uint64_t hash = 0;
715 for (const char* s=dylibPathToFind; *s != '\0'; ++s)
716 hash += hash*4 + *s;
717 #endif
718 const dyld_cache_image_info* const start = (dyld_cache_image_info*)((uint8_t*)loadInfo.loadAddress + loadInfo.loadAddress->header.imagesOffset);
719 const dyld_cache_image_info* const end = &start[loadInfo.loadAddress->header.imagesCount];
720 for (const dyld_cache_image_info* p = start; p != end; ++p) {
721 #if __IPHONE_OS_VERSION_MIN_REQUIRED
722 // on iOS, inode is used to hold hash of path
723 if ( (p->modTime == 0) && (p->inode != hash) )
724 continue;
725 #endif
726 const char* aPath = (char*)loadInfo.loadAddress + p->pathFileOffset;
727 if ( strcmp(aPath, dylibPathToFind) == 0 ) {
728 results->mhInCache = (const mach_header*)(p->address+loadInfo.slide);
729 results->pathInCache = aPath;
730 results->slideInCache = loadInfo.slide;
731 results->image = nullptr;
732 return true;
733 }
734 }
735 return false;
736 }
737
738 const dyld3::closure::ImageArray* images = loadInfo.loadAddress->cachedDylibsImageArray();
739 results->image = nullptr;
740 uint32_t imageIndex;
741 if ( loadInfo.loadAddress->hasImagePath(dylibPathToFind, imageIndex) ) {
742 results->image = images->imageForNum(imageIndex+1);
743 }
744 #if __MAC_OS_X_VERSION_MIN_REQUIRED
745 else {
746 // <rdar://problem/32740215> handle symlink to cached dylib
747 if ( loadInfo.loadAddress->header.dylibsExpectedOnDisk ) {
748 struct stat statBuf;
749 if ( dyld::my_stat(dylibPathToFind, &statBuf) == 0 ) {
750 // on macOS we store the inode and mtime of each dylib in the cache in the dyld_cache_image_info array
751 const dyld_cache_image_info* const start = (dyld_cache_image_info*)((uint8_t*)loadInfo.loadAddress + loadInfo.loadAddress->header.imagesOffset);
752 const dyld_cache_image_info* const end = &start[loadInfo.loadAddress->header.imagesCount];
753 for (const dyld_cache_image_info* p = start; p != end; ++p) {
754 if ( (p->inode == statBuf.st_ino) && (p->modTime == statBuf.st_mtime) ) {
755 imageIndex = (uint32_t)(p - start);
756 results->image = images->imageForNum(imageIndex+1);
757 break;
758 }
759 }
760 }
761 }
762 else {
763 char resolvedPath[PATH_MAX];
764 if ( realpath(dylibPathToFind, resolvedPath) != nullptr ) {
765 if ( loadInfo.loadAddress->hasImagePath(resolvedPath, imageIndex) ) {
766 results->image = images->imageForNum(imageIndex+1);
767 }
768 }
769 }
770 }
771 #endif
772 if ( results->image == nullptr )
773 return false;
774
775 results->mhInCache = (const mach_header*)((uintptr_t)loadInfo.loadAddress + results->image->cacheOffset());
776 results->pathInCache = results->image->path();
777 results->slideInCache = loadInfo.slide;
778 return true;
779 }
780
781
782 bool pathIsInSharedCacheImage(const SharedCacheLoadInfo& loadInfo, const char* dylibPathToFind)
783 {
784 if ( (loadInfo.loadAddress == nullptr) )
785 return false;
786
787 uint32_t imageIndex;
788 return loadInfo.loadAddress->hasImagePath(dylibPathToFind, imageIndex);
789 }
790
791 void deallocateExistingSharedCache()
792 {
793 #if TARGET_OS_SIMULATOR
794 // dyld deallocated macOS shared cache before jumping into dyld_sim
795 #else
796 // <rdar://problem/5077374> remove the shared region sub-map
797 uint64_t existingCacheAddress = 0;
798 if ( __shared_region_check_np(&existingCacheAddress) == 0 ) {
799 ::mmap((void*)((long)SHARED_REGION_BASE), SHARED_REGION_SIZE, PROT_NONE, MAP_FIXED | MAP_PRIVATE| MAP_ANON, 0, 0);
800 }
801 #endif
802
803 }
804
805 } // namespace dyld3
806