]> git.saurik.com Git - apple/dyld.git/blob - dyld3/SharedCacheRuntime.cpp
dyld-635.2.tar.gz
[apple/dyld.git] / dyld3 / SharedCacheRuntime.cpp
1 /*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24
25
26 #include <stdint.h>
27 #include <string.h>
28 #include <unistd.h>
29 #include <fcntl.h>
30 #include <sys/param.h>
31 #include <sys/types.h>
32 #include <sys/stat.h>
33 #include <sys/syscall.h>
34 #include <sys/syslog.h>
35 #include <sys/sysctl.h>
36 #include <sys/mman.h>
37 #include <mach/mach.h>
38 #include <mach-o/fat.h>
39 #include <mach-o/loader.h>
40 #include <mach-o/ldsyms.h>
41 #include <mach/shared_region.h>
42 #include <mach/mach.h>
43 #include <Availability.h>
44 #include <TargetConditionals.h>
45
46 #include "dyld_cache_format.h"
47 #include "SharedCacheRuntime.h"
48 #include "Loading.h"
49
50 #define ENABLE_DYLIBS_TO_OVERRIDE_CACHE_SIZE 1024
51
52 // should be in mach/shared_region.h
53 extern "C" int __shared_region_check_np(uint64_t* startaddress);
54 extern "C" int __shared_region_map_and_slide_np(int fd, uint32_t count, const shared_file_mapping_np mappings[], long slide, const dyld_cache_slide_info2* slideInfo, size_t slideInfoSize);
55
56
57 namespace dyld {
58 extern int my_stat(const char* path, struct stat* buf);
59 extern int my_open(const char* path, int flag, int other);
60 extern void log(const char*, ...);
61 }
62
63
64 namespace dyld3 {
65
66
67 struct CacheInfo
68 {
69 int fd;
70 shared_file_mapping_np mappings[3];
71 uint64_t slideInfoAddressUnslid;
72 size_t slideInfoSize;
73 uint64_t cachedDylibsGroupUnslid;
74 uint64_t sharedRegionStart;
75 uint64_t sharedRegionSize;
76 uint64_t maxSlide;
77 };
78
79
80
81
82 #if __i386__
83 #define ARCH_NAME "i386"
84 #define ARCH_CACHE_MAGIC "dyld_v1 i386"
85 #elif __x86_64__
86 #define ARCH_NAME "x86_64"
87 #define ARCH_CACHE_MAGIC "dyld_v1 x86_64"
88 #define ARCH_NAME_H "x86_64h"
89 #define ARCH_CACHE_MAGIC_H "dyld_v1 x86_64h"
90 #elif __ARM_ARCH_7K__
91 #define ARCH_NAME "armv7k"
92 #define ARCH_CACHE_MAGIC "dyld_v1 armv7k"
93 #elif __ARM_ARCH_7A__
94 #define ARCH_NAME "armv7"
95 #define ARCH_CACHE_MAGIC "dyld_v1 armv7"
96 #elif __ARM_ARCH_7S__
97 #define ARCH_NAME "armv7s"
98 #define ARCH_CACHE_MAGIC "dyld_v1 armv7s"
99 #elif __arm64e__
100 #define ARCH_NAME "arm64e"
101 #define ARCH_CACHE_MAGIC "dyld_v1 arm64e"
102 #elif __arm64__
103 #if __LP64__
104 #define ARCH_NAME "arm64"
105 #define ARCH_CACHE_MAGIC "dyld_v1 arm64"
106 #else
107 #define ARCH_NAME "arm64_32"
108 #define ARCH_CACHE_MAGIC "dyld_v1arm64_32"
109 #endif
110 #endif
111
112
113
114 static void rebaseChainV2(uint8_t* pageContent, uint16_t startOffset, uintptr_t slideAmount, const dyld_cache_slide_info2* slideInfo)
115 {
116 const uintptr_t deltaMask = (uintptr_t)(slideInfo->delta_mask);
117 const uintptr_t valueMask = ~deltaMask;
118 const uintptr_t valueAdd = (uintptr_t)(slideInfo->value_add);
119 const unsigned deltaShift = __builtin_ctzll(deltaMask) - 2;
120
121 uint32_t pageOffset = startOffset;
122 uint32_t delta = 1;
123 while ( delta != 0 ) {
124 uint8_t* loc = pageContent + pageOffset;
125 uintptr_t rawValue = *((uintptr_t*)loc);
126 delta = (uint32_t)((rawValue & deltaMask) >> deltaShift);
127 uintptr_t value = (rawValue & valueMask);
128 if ( value != 0 ) {
129 value += valueAdd;
130 value += slideAmount;
131 }
132 *((uintptr_t*)loc) = value;
133 //dyld::log(" pageOffset=0x%03X, loc=%p, org value=0x%08llX, new value=0x%08llX, delta=0x%X\n", pageOffset, loc, (uint64_t)rawValue, (uint64_t)value, delta);
134 pageOffset += delta;
135 }
136 }
137
138 #if !__LP64__
139 static void rebaseChainV4(uint8_t* pageContent, uint16_t startOffset, uintptr_t slideAmount, const dyld_cache_slide_info4* slideInfo)
140 {
141 const uintptr_t deltaMask = (uintptr_t)(slideInfo->delta_mask);
142 const uintptr_t valueMask = ~deltaMask;
143 const uintptr_t valueAdd = (uintptr_t)(slideInfo->value_add);
144 const unsigned deltaShift = __builtin_ctzll(deltaMask) - 2;
145
146 uint32_t pageOffset = startOffset;
147 uint32_t delta = 1;
148 while ( delta != 0 ) {
149 uint8_t* loc = pageContent + pageOffset;
150 uintptr_t rawValue = *((uintptr_t*)loc);
151 delta = (uint32_t)((rawValue & deltaMask) >> deltaShift);
152 uintptr_t value = (rawValue & valueMask);
153 if ( (value & 0xFFFF8000) == 0 ) {
154 // small positive non-pointer, use as-is
155 }
156 else if ( (value & 0x3FFF8000) == 0x3FFF8000 ) {
157 // small negative non-pointer
158 value |= 0xC0000000;
159 }
160 else {
161 value += valueAdd;
162 value += slideAmount;
163 }
164 *((uintptr_t*)loc) = value;
165 //dyld::log(" pageOffset=0x%03X, loc=%p, org value=0x%08llX, new value=0x%08llX, delta=0x%X\n", pageOffset, loc, (uint64_t)rawValue, (uint64_t)value, delta);
166 pageOffset += delta;
167 }
168 }
169 #endif
170
171 static void getCachePath(const SharedCacheOptions& options, size_t pathBufferSize, char pathBuffer[])
172 {
173 // set cache dir
174 if ( options.cacheDirOverride != nullptr ) {
175 strlcpy(pathBuffer, options.cacheDirOverride, pathBufferSize);
176 }
177 else {
178 #if __IPHONE_OS_VERSION_MIN_REQUIRED
179 strlcpy(pathBuffer, IPHONE_DYLD_SHARED_CACHE_DIR, sizeof(IPHONE_DYLD_SHARED_CACHE_DIR));
180 #else
181 strlcpy(pathBuffer, MACOSX_DYLD_SHARED_CACHE_DIR, sizeof(MACOSX_DYLD_SHARED_CACHE_DIR));
182 #endif
183 }
184
185 // append file component of cache file
186 if ( pathBuffer[strlen(pathBuffer)-1] != '/' )
187 strlcat(pathBuffer, "/", pathBufferSize);
188 #if __x86_64__ && !__IPHONE_OS_VERSION_MIN_REQUIRED
189 if ( options.useHaswell ) {
190 size_t len = strlen(pathBuffer);
191 struct stat haswellStatBuf;
192 strlcat(pathBuffer, DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME_H, pathBufferSize);
193 if ( dyld::my_stat(pathBuffer, &haswellStatBuf) == 0 )
194 return;
195 // no haswell cache file, use regular x86_64 cache
196 pathBuffer[len] = '\0';
197 }
198 #endif
199 strlcat(pathBuffer, DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME, pathBufferSize);
200
201 #if __IPHONE_OS_VERSION_MIN_REQUIRED && !TARGET_IPHONE_SIMULATOR
202 // use .development cache if it exists
203 struct stat enableStatBuf;
204 struct stat devCacheStatBuf;
205 struct stat optCacheStatBuf;
206 bool developmentDevice = dyld3::internalInstall();
207 bool enableFileExists = (dyld::my_stat(IPHONE_DYLD_SHARED_CACHE_DIR "enable-dylibs-to-override-cache", &enableStatBuf) == 0);
208 bool devCacheExists = (dyld::my_stat(IPHONE_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME DYLD_SHARED_CACHE_DEVELOPMENT_EXT, &devCacheStatBuf) == 0);
209 bool optCacheExists = (dyld::my_stat(IPHONE_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME, &optCacheStatBuf) == 0);
210 if ( developmentDevice && ((enableFileExists && (enableStatBuf.st_size < ENABLE_DYLIBS_TO_OVERRIDE_CACHE_SIZE) && devCacheExists) || !optCacheExists) )
211 strlcat(pathBuffer, DYLD_SHARED_CACHE_DEVELOPMENT_EXT, pathBufferSize);
212 #endif
213
214 }
215
216
217 int openSharedCacheFile(const SharedCacheOptions& options, SharedCacheLoadInfo* results)
218 {
219 getCachePath(options, sizeof(results->path), results->path);
220 return dyld::my_open(results->path, O_RDONLY, 0);
221 }
222
223 static bool validMagic(const SharedCacheOptions& options, const DyldSharedCache* cache)
224 {
225 if ( strcmp(cache->header.magic, ARCH_CACHE_MAGIC) == 0 )
226 return true;
227
228 #if __x86_64__
229 if ( options.useHaswell ) {
230 if ( strcmp(cache->header.magic, ARCH_CACHE_MAGIC_H) == 0 )
231 return true;
232 }
233 #endif
234 return false;
235 }
236
237
238 static bool validPlatform(const SharedCacheOptions& options, const DyldSharedCache* cache)
239 {
240 // grandfather in old cache that does not have platform in header
241 if ( cache->header.mappingOffset < 0xE0 )
242 return true;
243
244 if ( cache->header.platform != (uint32_t)MachOFile::currentPlatform() )
245 return false;
246
247 #if TARGET_IPHONE_SIMULATOR
248 if ( cache->header.simulator == 0 )
249 return false;
250 #else
251 if ( cache->header.simulator != 0 )
252 return false;
253 #endif
254
255 return true;
256 }
257
258
259 static void verboseSharedCacheMappings(const shared_file_mapping_np mappings[3])
260 {
261 for (int i=0; i < 3; ++i) {
262 dyld::log(" 0x%08llX->0x%08llX init=%x, max=%x %s%s%s\n",
263 mappings[i].sfm_address, mappings[i].sfm_address+mappings[i].sfm_size-1,
264 mappings[i].sfm_init_prot, mappings[i].sfm_init_prot,
265 ((mappings[i].sfm_init_prot & VM_PROT_READ) ? "read " : ""),
266 ((mappings[i].sfm_init_prot & VM_PROT_WRITE) ? "write " : ""),
267 ((mappings[i].sfm_init_prot & VM_PROT_EXECUTE) ? "execute " : ""));
268 }
269 }
270
271 static bool preflightCacheFile(const SharedCacheOptions& options, SharedCacheLoadInfo* results, CacheInfo* info)
272 {
273 // find and open shared cache file
274 int fd = openSharedCacheFile(options, results);
275 if ( fd == -1 ) {
276 results->errorMessage = "shared cache file cannot be opened";
277 return false;
278 }
279
280 struct stat cacheStatBuf;
281 if ( dyld::my_stat(results->path, &cacheStatBuf) != 0 ) {
282 results->errorMessage = "shared cache file cannot be stat()ed";
283 ::close(fd);
284 return false;
285 }
286 size_t cacheFileLength = (size_t)(cacheStatBuf.st_size);
287
288 // sanity check header and mappings
289 uint8_t firstPage[0x4000];
290 if ( ::pread(fd, firstPage, sizeof(firstPage), 0) != sizeof(firstPage) ) {
291 results->errorMessage = "shared cache header could not be read";
292 ::close(fd);
293 return false;
294 }
295 const DyldSharedCache* cache = (DyldSharedCache*)firstPage;
296 if ( !validMagic(options, cache) ) {
297 results->errorMessage = "shared cache file has wrong magic";
298 ::close(fd);
299 return false;
300 }
301 if ( !validPlatform(options, cache) ) {
302 results->errorMessage = "shared cache file is for a different platform";
303 ::close(fd);
304 return false;
305 }
306 if ( (cache->header.mappingCount != 3) || (cache->header.mappingOffset > 0x138) ) {
307 results->errorMessage = "shared cache file mappings are invalid";
308 ::close(fd);
309 return false;
310 }
311 const dyld_cache_mapping_info* const fileMappings = (dyld_cache_mapping_info*)&firstPage[cache->header.mappingOffset];
312 if ( (fileMappings[0].fileOffset != 0)
313 || ((fileMappings[0].address + fileMappings[0].size) > fileMappings[1].address)
314 || ((fileMappings[1].address + fileMappings[1].size) > fileMappings[2].address)
315 || ((fileMappings[0].fileOffset + fileMappings[0].size) != fileMappings[1].fileOffset)
316 || ((fileMappings[1].fileOffset + fileMappings[1].size) != fileMappings[2].fileOffset)
317 || ((cache->header.codeSignatureOffset + cache->header.codeSignatureSize) != cacheFileLength)
318 || (fileMappings[0].maxProt != (VM_PROT_READ|VM_PROT_EXECUTE))
319 || (fileMappings[1].maxProt != (VM_PROT_READ|VM_PROT_WRITE))
320 || (fileMappings[2].maxProt != VM_PROT_READ) ) {
321 results->errorMessage = "shared cache file mappings are invalid";
322 ::close(fd);
323 return false;
324 }
325
326 if ( cache->header.mappingOffset >= 0xF8 ) {
327 if ( (fileMappings[0].address != cache->header.sharedRegionStart) || ((fileMappings[2].address + fileMappings[2].size) > (cache->header.sharedRegionStart+cache->header.sharedRegionSize)) ) {
328 results->errorMessage = "shared cache file mapping addressses invalid";
329 ::close(fd);
330 return false;
331 }
332 }
333 else {
334 if ( (fileMappings[0].address != SHARED_REGION_BASE) || ((fileMappings[2].address + fileMappings[2].size) > (SHARED_REGION_BASE+SHARED_REGION_SIZE)) ) {
335 results->errorMessage = "shared cache file mapping addressses invalid";
336 ::close(fd);
337 return false;
338 }
339 }
340
341 // register code signature of cache file
342 fsignatures_t siginfo;
343 siginfo.fs_file_start = 0; // cache always starts at beginning of file
344 siginfo.fs_blob_start = (void*)cache->header.codeSignatureOffset;
345 siginfo.fs_blob_size = (size_t)(cache->header.codeSignatureSize);
346 int result = fcntl(fd, F_ADDFILESIGS_RETURN, &siginfo);
347 if ( result == -1 ) {
348 results->errorMessage = "code signature registration for shared cache failed";
349 ::close(fd);
350 return false;
351 }
352
353 // <rdar://problem/23188073> validate code signature covers entire shared cache
354 uint64_t codeSignedLength = siginfo.fs_file_start;
355 if ( codeSignedLength < cache->header.codeSignatureOffset ) {
356 results->errorMessage = "code signature does not cover entire shared cache file";
357 ::close(fd);
358 return false;
359 }
360 void* mappedData = ::mmap(NULL, sizeof(firstPage), PROT_READ|PROT_EXEC, MAP_PRIVATE, fd, 0);
361 if ( mappedData == MAP_FAILED ) {
362 results->errorMessage = "first page of shared cache not mmap()able";
363 ::close(fd);
364 return false;
365 }
366 if ( memcmp(mappedData, firstPage, sizeof(firstPage)) != 0 ) {
367 results->errorMessage = "first page of mmap()ed shared cache not valid";
368 ::close(fd);
369 return false;
370 }
371 ::munmap(mappedData, sizeof(firstPage));
372
373 // fill out results
374 info->fd = fd;
375 for (int i=0; i < 3; ++i) {
376 info->mappings[i].sfm_address = fileMappings[i].address;
377 info->mappings[i].sfm_size = fileMappings[i].size;
378 info->mappings[i].sfm_file_offset = fileMappings[i].fileOffset;
379 info->mappings[i].sfm_max_prot = fileMappings[i].maxProt;
380 info->mappings[i].sfm_init_prot = fileMappings[i].initProt;
381 }
382 info->mappings[1].sfm_max_prot |= VM_PROT_SLIDE;
383 info->mappings[1].sfm_init_prot |= VM_PROT_SLIDE;
384 info->slideInfoAddressUnslid = fileMappings[2].address + cache->header.slideInfoOffset - fileMappings[2].fileOffset;
385 info->slideInfoSize = (long)cache->header.slideInfoSize;
386 if ( cache->header.mappingOffset > 0xD0 )
387 info->cachedDylibsGroupUnslid = cache->header.dylibsImageGroupAddr;
388 else
389 info->cachedDylibsGroupUnslid = 0;
390 if ( cache->header.mappingOffset >= 0xf8 ) {
391 info->sharedRegionStart = cache->header.sharedRegionStart;
392 info->sharedRegionSize = cache->header.sharedRegionSize;
393 info->maxSlide = cache->header.maxSlide;
394 }
395 else {
396 info->sharedRegionStart = SHARED_REGION_BASE;
397 info->sharedRegionSize = SHARED_REGION_SIZE;
398 info->maxSlide = SHARED_REGION_SIZE - (fileMappings[2].address + fileMappings[2].size - fileMappings[0].address);
399 }
400 return true;
401 }
402
403
404 #if !TARGET_IPHONE_SIMULATOR
405
406 // update all __DATA pages with slide info
407 static bool rebaseDataPages(bool isVerbose, CacheInfo& info, SharedCacheLoadInfo* results)
408 {
409 uint64_t dataPagesStart = info.mappings[1].sfm_address;
410 const dyld_cache_slide_info* slideInfo = nullptr;
411 if ( info.slideInfoSize != 0 ) {
412 slideInfo = (dyld_cache_slide_info*)(info.slideInfoAddressUnslid + results->slide);
413 }
414 const dyld_cache_slide_info* slideInfoHeader = (dyld_cache_slide_info*)slideInfo;
415 if ( slideInfoHeader != nullptr ) {
416 if ( slideInfoHeader->version == 2 ) {
417 const dyld_cache_slide_info2* slideHeader = (dyld_cache_slide_info2*)slideInfo;
418 const uint32_t page_size = slideHeader->page_size;
419 const uint16_t* page_starts = (uint16_t*)((long)(slideInfo) + slideHeader->page_starts_offset);
420 const uint16_t* page_extras = (uint16_t*)((long)(slideInfo) + slideHeader->page_extras_offset);
421 for (int i=0; i < slideHeader->page_starts_count; ++i) {
422 uint8_t* page = (uint8_t*)(long)(dataPagesStart + (page_size*i));
423 uint16_t pageEntry = page_starts[i];
424 //dyld::log("page[%d]: page_starts[i]=0x%04X\n", i, pageEntry);
425 if ( pageEntry == DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE )
426 continue;
427 if ( pageEntry & DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA ) {
428 uint16_t chainIndex = (pageEntry & 0x3FFF);
429 bool done = false;
430 while ( !done ) {
431 uint16_t pInfo = page_extras[chainIndex];
432 uint16_t pageStartOffset = (pInfo & 0x3FFF)*4;
433 //dyld::log(" chain[%d] pageOffset=0x%03X\n", chainIndex, pageStartOffset);
434 rebaseChainV2(page, pageStartOffset, results->slide, slideHeader);
435 done = (pInfo & DYLD_CACHE_SLIDE_PAGE_ATTR_END);
436 ++chainIndex;
437 }
438 }
439 else {
440 uint32_t pageOffset = pageEntry * 4;
441 //dyld::log(" start pageOffset=0x%03X\n", pageOffset);
442 rebaseChainV2(page, pageOffset, results->slide, slideHeader);
443 }
444 }
445 }
446 #if __LP64__
447 else if ( slideInfoHeader->version == 3 ) {
448 const dyld_cache_slide_info3* slideHeader = (dyld_cache_slide_info3*)slideInfo;
449 const uint32_t pageSize = slideHeader->page_size;
450 for (int i=0; i < slideHeader->page_starts_count; ++i) {
451 uint8_t* page = (uint8_t*)(dataPagesStart + (pageSize*i));
452 uint64_t delta = slideHeader->page_starts[i];
453 if ( delta == DYLD_CACHE_SLIDE_V3_PAGE_ATTR_NO_REBASE )
454 continue;
455 delta = delta/sizeof(uint64_t); // initial offset is byte based
456 dyld_cache_slide_pointer3* loc = (dyld_cache_slide_pointer3*)page;
457 do {
458 loc += delta;
459 delta = loc->plain.offsetToNextPointer;
460 if ( loc->auth.authenticated ) {
461 #if __has_feature(ptrauth_calls)
462 uint64_t target = info.sharedRegionStart + loc->auth.offsetFromSharedCacheBase + results->slide;
463 MachOLoaded::ChainedFixupPointerOnDisk ptr;
464 ptr.raw = *((uint64_t*)loc);
465 loc->raw = ptr.signPointer(loc, target);
466 #else
467 results->errorMessage = "invalid pointer kind in cache file";
468 return false;
469 #endif
470 }
471 else {
472 loc->raw = MachOLoaded::ChainedFixupPointerOnDisk::signExtend51(loc->plain.pointerValue) + results->slide;
473 }
474 } while (delta != 0);
475 }
476 }
477 #else
478 else if ( slideInfoHeader->version == 4 ) {
479 const dyld_cache_slide_info4* slideHeader = (dyld_cache_slide_info4*)slideInfo;
480 const uint32_t page_size = slideHeader->page_size;
481 const uint16_t* page_starts = (uint16_t*)((long)(slideInfo) + slideHeader->page_starts_offset);
482 const uint16_t* page_extras = (uint16_t*)((long)(slideInfo) + slideHeader->page_extras_offset);
483 for (int i=0; i < slideHeader->page_starts_count; ++i) {
484 uint8_t* page = (uint8_t*)(long)(dataPagesStart + (page_size*i));
485 uint16_t pageEntry = page_starts[i];
486 //dyld::log("page[%d]: page_starts[i]=0x%04X\n", i, pageEntry);
487 if ( pageEntry == DYLD_CACHE_SLIDE4_PAGE_NO_REBASE )
488 continue;
489 if ( pageEntry & DYLD_CACHE_SLIDE4_PAGE_USE_EXTRA ) {
490 uint16_t chainIndex = (pageEntry & DYLD_CACHE_SLIDE4_PAGE_INDEX);
491 bool done = false;
492 while ( !done ) {
493 uint16_t pInfo = page_extras[chainIndex];
494 uint16_t pageStartOffset = (pInfo & DYLD_CACHE_SLIDE4_PAGE_INDEX)*4;
495 //dyld::log(" chain[%d] pageOffset=0x%03X\n", chainIndex, pageStartOffset);
496 rebaseChainV4(page, pageStartOffset, results->slide, slideHeader);
497 done = (pInfo & DYLD_CACHE_SLIDE4_PAGE_EXTRA_END);
498 ++chainIndex;
499 }
500 }
501 else {
502 uint32_t pageOffset = pageEntry * 4;
503 //dyld::log(" start pageOffset=0x%03X\n", pageOffset);
504 rebaseChainV4(page, pageOffset, results->slide, slideHeader);
505 }
506 }
507 }
508 #endif // LP64
509 else {
510 results->errorMessage = "invalid slide info in cache file";
511 return false;
512 }
513 }
514 return true;
515 }
516
517 static bool reuseExistingCache(const SharedCacheOptions& options, SharedCacheLoadInfo* results)
518 {
519 uint64_t cacheBaseAddress;
520 #if __i386__
521 if ( syscall(294, &cacheBaseAddress) == 0 ) {
522 #else
523 if ( __shared_region_check_np(&cacheBaseAddress) == 0 ) {
524 #endif
525 const DyldSharedCache* existingCache = (DyldSharedCache*)cacheBaseAddress;
526 if ( validMagic(options, existingCache) ) {
527 const dyld_cache_mapping_info* const fileMappings = (dyld_cache_mapping_info*)(cacheBaseAddress + existingCache->header.mappingOffset);
528 results->loadAddress = existingCache;
529 results->slide = (long)(cacheBaseAddress - fileMappings[0].address);
530 // we don't know the path this cache was previously loaded from, assume default
531 getCachePath(options, sizeof(results->path), results->path);
532 if ( options.verbose ) {
533 const shared_file_mapping_np* const mappings = (shared_file_mapping_np*)(cacheBaseAddress + existingCache->header.mappingOffset);
534 dyld::log("re-using existing shared cache (%s):\n", results->path);
535 shared_file_mapping_np slidMappings[3];
536 for (int i=0; i < 3; ++i) {
537 slidMappings[i] = mappings[i];
538 slidMappings[i].sfm_address += results->slide;
539 }
540 verboseSharedCacheMappings(slidMappings);
541 }
542 }
543 else {
544 results->errorMessage = "existing shared cache in memory is not compatible";
545 }
546 return true;
547 }
548 return false;
549 }
550
551 static long pickCacheASLR(CacheInfo& info)
552 {
553 // choose new random slide
554 #if __IPHONE_OS_VERSION_MIN_REQUIRED
555 // <rdar://problem/20848977> change shared cache slide for 32-bit arm to always be 16k aligned
556 long slide = ((arc4random() % info.maxSlide) & (-16384));
557 #else
558 long slide = ((arc4random() % info.maxSlide) & (-4096));
559 #endif
560
561 // <rdar://problem/32031197> respect -disable_aslr boot-arg
562 if ( dyld3::bootArgsContains("-disable_aslr") )
563 slide = 0;
564
565 // update mappings
566 for (uint32_t i=0; i < 3; ++i) {
567 info.mappings[i].sfm_address += slide;
568 }
569
570 return slide;
571 }
572
573 static bool mapCacheSystemWide(const SharedCacheOptions& options, SharedCacheLoadInfo* results)
574 {
575 CacheInfo info;
576 if ( !preflightCacheFile(options, results, &info) )
577 return false;
578
579 const dyld_cache_slide_info2* slideInfo = nullptr;
580 if ( info.slideInfoSize != 0 ) {
581 results->slide = pickCacheASLR(info);
582 slideInfo = (dyld_cache_slide_info2*)(info.slideInfoAddressUnslid + results->slide);
583 }
584
585 int result = __shared_region_map_and_slide_np(info.fd, 3, info.mappings, results->slide, slideInfo, info.slideInfoSize);
586 ::close(info.fd);
587 if ( result == 0 ) {
588 results->loadAddress = (const DyldSharedCache*)(info.mappings[0].sfm_address);
589 }
590 else {
591 // could be another process beat us to it
592 if ( reuseExistingCache(options, results) )
593 return true;
594 // if cache does not exist, then really is an error
595 if ( results->errorMessage == nullptr )
596 results->errorMessage = "syscall to map cache into shared region failed";
597 return false;
598 }
599
600 if ( options.verbose ) {
601 dyld::log("mapped dyld cache file system wide: %s\n", results->path);
602 verboseSharedCacheMappings(info.mappings);
603 }
604 return true;
605 }
606 #endif // TARGET_IPHONE_SIMULATOR
607
608 static bool mapCachePrivate(const SharedCacheOptions& options, SharedCacheLoadInfo* results)
609 {
610 // open and validate cache file
611 CacheInfo info;
612 if ( !preflightCacheFile(options, results, &info) )
613 return false;
614
615 // compute ALSR slide
616 results->slide = 0;
617 #if !TARGET_IPHONE_SIMULATOR // simulator caches do not support sliding
618 if ( info.slideInfoSize != 0 ) {
619 results->slide = pickCacheASLR(info);
620 }
621 #endif
622 results->loadAddress = (const DyldSharedCache*)(info.mappings[0].sfm_address);
623
624 // remove the shared region sub-map
625 vm_deallocate(mach_task_self(), (vm_address_t)info.sharedRegionStart, (vm_size_t)info.sharedRegionSize);
626
627 // map cache just for this process with mmap()
628 for (int i=0; i < 3; ++i) {
629 void* mmapAddress = (void*)(uintptr_t)(info.mappings[i].sfm_address);
630 size_t size = (size_t)(info.mappings[i].sfm_size);
631 //dyld::log("dyld: mapping address %p with size 0x%08lX\n", mmapAddress, size);
632 int protection = 0;
633 if ( info.mappings[i].sfm_init_prot & VM_PROT_EXECUTE )
634 protection |= PROT_EXEC;
635 if ( info.mappings[i].sfm_init_prot & VM_PROT_READ )
636 protection |= PROT_READ;
637 if ( info.mappings[i].sfm_init_prot & VM_PROT_WRITE )
638 protection |= PROT_WRITE;
639 off_t offset = info.mappings[i].sfm_file_offset;
640 if ( ::mmap(mmapAddress, size, protection, MAP_FIXED | MAP_PRIVATE, info.fd, offset) != mmapAddress ) {
641 // failed to map some chunk of this shared cache file
642 // clear shared region
643 vm_deallocate(mach_task_self(), (vm_address_t)info.sharedRegionStart, (vm_size_t)info.sharedRegionSize);
644 // return failure
645 results->loadAddress = nullptr;
646 results->errorMessage = "could not mmap() part of dyld cache";
647 return false;
648 }
649 }
650
651 #if TARGET_IPHONE_SIMULATOR // simulator caches do not support sliding
652 return true;
653 #else
654 bool success = rebaseDataPages(options.verbose, info, results);
655
656 if ( options.verbose ) {
657 dyld::log("mapped dyld cache file private to process (%s):\n", results->path);
658 verboseSharedCacheMappings(info.mappings);
659 }
660 return success;
661 #endif
662 }
663
664
665
666 bool loadDyldCache(const SharedCacheOptions& options, SharedCacheLoadInfo* results)
667 {
668 results->loadAddress = 0;
669 results->slide = 0;
670 results->errorMessage = nullptr;
671
672 #if TARGET_IPHONE_SIMULATOR
673 // simulator only supports mmap()ing cache privately into process
674 return mapCachePrivate(options, results);
675 #else
676 if ( options.forcePrivate ) {
677 // mmap cache into this process only
678 return mapCachePrivate(options, results);
679 }
680 else {
681 // fast path: when cache is already mapped into shared region
682 bool hasError = false;
683 if ( reuseExistingCache(options, results) ) {
684 hasError = (results->errorMessage != nullptr);
685 } else {
686 // slow path: this is first process to load cache
687 hasError = mapCacheSystemWide(options, results);
688 }
689 return hasError;
690 }
691 #endif
692 }
693
694
695 bool findInSharedCacheImage(const SharedCacheLoadInfo& loadInfo, const char* dylibPathToFind, SharedCacheFindDylibResults* results)
696 {
697 if ( loadInfo.loadAddress == nullptr )
698 return false;
699
700 if ( loadInfo.loadAddress->header.formatVersion != dyld3::closure::kFormatVersion ) {
701 // support for older cache with a different Image* format
702 #if __IPHONE_OS_VERSION_MIN_REQUIRED
703 uint64_t hash = 0;
704 for (const char* s=dylibPathToFind; *s != '\0'; ++s)
705 hash += hash*4 + *s;
706 #endif
707 const dyld_cache_image_info* const start = (dyld_cache_image_info*)((uint8_t*)loadInfo.loadAddress + loadInfo.loadAddress->header.imagesOffset);
708 const dyld_cache_image_info* const end = &start[loadInfo.loadAddress->header.imagesCount];
709 for (const dyld_cache_image_info* p = start; p != end; ++p) {
710 #if __IPHONE_OS_VERSION_MIN_REQUIRED
711 // on iOS, inode is used to hold hash of path
712 if ( (p->modTime == 0) && (p->inode != hash) )
713 continue;
714 #endif
715 const char* aPath = (char*)loadInfo.loadAddress + p->pathFileOffset;
716 if ( strcmp(aPath, dylibPathToFind) == 0 ) {
717 results->mhInCache = (const mach_header*)(p->address+loadInfo.slide);
718 results->pathInCache = aPath;
719 results->slideInCache = loadInfo.slide;
720 results->image = nullptr;
721 return true;
722 }
723 }
724 return false;
725 }
726
727 const dyld3::closure::ImageArray* images = loadInfo.loadAddress->cachedDylibsImageArray();
728 results->image = nullptr;
729 uint32_t imageIndex;
730 if ( loadInfo.loadAddress->hasImagePath(dylibPathToFind, imageIndex) ) {
731 results->image = images->imageForNum(imageIndex+1);
732 }
733 #if __MAC_OS_X_VERSION_MIN_REQUIRED
734 else {
735 // <rdar://problem/32740215> handle symlink to cached dylib
736 if ( loadInfo.loadAddress->header.dylibsExpectedOnDisk ) {
737 struct stat statBuf;
738 if ( dyld::my_stat(dylibPathToFind, &statBuf) == 0 ) {
739 // on macOS we store the inode and mtime of each dylib in the cache in the dyld_cache_image_info array
740 const dyld_cache_image_info* const start = (dyld_cache_image_info*)((uint8_t*)loadInfo.loadAddress + loadInfo.loadAddress->header.imagesOffset);
741 const dyld_cache_image_info* const end = &start[loadInfo.loadAddress->header.imagesCount];
742 for (const dyld_cache_image_info* p = start; p != end; ++p) {
743 if ( (p->inode == statBuf.st_ino) && (p->modTime == statBuf.st_mtime) ) {
744 imageIndex = (uint32_t)(p - start);
745 results->image = images->imageForNum(imageIndex+1);
746 break;
747 }
748 }
749 }
750 }
751 else {
752 char resolvedPath[PATH_MAX];
753 if ( realpath(dylibPathToFind, resolvedPath) != nullptr ) {
754 if ( loadInfo.loadAddress->hasImagePath(resolvedPath, imageIndex) ) {
755 results->image = images->imageForNum(imageIndex+1);
756 }
757 }
758 }
759 }
760 #endif
761 if ( results->image == nullptr )
762 return false;
763
764 results->mhInCache = (const mach_header*)((uintptr_t)loadInfo.loadAddress + results->image->cacheOffset());
765 results->pathInCache = results->image->path();
766 results->slideInCache = loadInfo.slide;
767 return true;
768 }
769
770
771 bool pathIsInSharedCacheImage(const SharedCacheLoadInfo& loadInfo, const char* dylibPathToFind)
772 {
773 if ( (loadInfo.loadAddress == nullptr) || (loadInfo.loadAddress->header.formatVersion != closure::kFormatVersion) )
774 return false;
775
776 uint32_t imageIndex;
777 return loadInfo.loadAddress->hasImagePath(dylibPathToFind, imageIndex);
778 }
779
780
781 } // namespace dyld3
782