]> git.saurik.com Git - apple/dyld.git/blob - dyld3/shared-cache/CacheBuilder.cpp
dyld-635.2.tar.gz
[apple/dyld.git] / dyld3 / shared-cache / CacheBuilder.cpp
1 /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*-
2 *
3 * Copyright (c) 2014 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26 #include <unistd.h>
27 #include <dirent.h>
28 #include <sys/errno.h>
29 #include <sys/fcntl.h>
30 #include <sys/param.h>
31 #include <sys/types.h>
32 #include <sys/stat.h>
33 #include <mach/mach.h>
34 #include <mach/mach_time.h>
35 #include <mach/mach_vm.h>
36 #include <mach-o/loader.h>
37 #include <mach-o/fat.h>
38 #include <mach/shared_region.h>
39 #include <assert.h>
40 #include <CommonCrypto/CommonHMAC.h>
41 #include <CommonCrypto/CommonDigest.h>
42 #include <CommonCrypto/CommonDigestSPI.h>
43 #include <pthread/pthread.h>
44
45 #include <string>
46 #include <vector>
47 #include <unordered_map>
48 #include <unordered_set>
49
50 #include "MachOFileAbstraction.hpp"
51 #include "CodeSigningTypes.h"
52 #include "DyldSharedCache.h"
53 #include "CacheBuilder.h"
54 #include "FileAbstraction.hpp"
55 #include "Trie.hpp"
56 #include "FileUtils.h"
57 #include "Diagnostics.h"
58 #include "ClosureBuilder.h"
59 #include "Closure.h"
60 #include "StringUtils.h"
61
62 #if __has_include("dyld_cache_config.h")
63 #include "dyld_cache_config.h"
64 #else
65 #define ARM_SHARED_REGION_START 0x1A000000ULL
66 #define ARM_SHARED_REGION_SIZE 0x26000000ULL
67 #define ARM64_SHARED_REGION_START 0x180000000ULL
68 #define ARM64_SHARED_REGION_SIZE 0x40000000ULL
69 #endif
70
71 #ifndef ARM64_32_SHARED_REGION_START
72 #define ARM64_32_SHARED_REGION_START 0x1A000000ULL
73 #define ARM64_32_SHARED_REGION_SIZE 0x26000000ULL
74 #endif
75
76 const CacheBuilder::ArchLayout CacheBuilder::_s_archLayout[] = {
77 { 0x7FFF20000000ULL, 0xEFE00000ULL, 0x40000000, 0xFFFF000000000000, "x86_64", 0, 0, 0, 12, 2, true, true },
78 { 0x7FFF20000000ULL, 0xEFE00000ULL, 0x40000000, 0xFFFF000000000000, "x86_64h", 0, 0, 0, 12, 2, true, true },
79 { SHARED_REGION_BASE_I386, SHARED_REGION_SIZE_I386, 0x00200000, 0x0, "i386", 0, 0, 0, 12, 0, false, false },
80 { ARM64_SHARED_REGION_START, ARM64_SHARED_REGION_SIZE, 0x02000000, 0x00FFFF0000000000, "arm64", 0x0000C000, 0x00100000, 0x07F00000, 14, 2, false, true },
81 #if SUPPORT_ARCH_arm64e
82 { ARM64_SHARED_REGION_START, ARM64_SHARED_REGION_SIZE, 0x02000000, 0x00FFFF0000000000, "arm64e", 0x0000C000, 0x00100000, 0x07F00000, 14, 2, false, true },
83 #endif
84 #if SUPPORT_ARCH_arm64_32
85 { ARM64_32_SHARED_REGION_START, ARM64_32_SHARED_REGION_SIZE,0x02000000, 0xC0000000, "arm64_32",0x0000C000, 0x00100000, 0x07F00000, 14, 6, false, false },
86 #endif
87 { ARM_SHARED_REGION_START, ARM_SHARED_REGION_SIZE, 0x02000000, 0xE0000000, "armv7s", 0, 0, 0, 14, 4, false, false },
88 { ARM_SHARED_REGION_START, ARM_SHARED_REGION_SIZE, 0x00400000, 0xE0000000, "armv7k", 0, 0, 0, 14, 4, false, false },
89 { 0x40000000, 0x40000000, 0x02000000, 0x0, "sim-x86", 0, 0, 0, 14, 0, false, false }
90 };
91
92
93 // These are dylibs that may be interposed, so stubs calling into them should never be bypassed
94 const char* const CacheBuilder::_s_neverStubEliminate[] = {
95 "/usr/lib/system/libdispatch.dylib",
96 nullptr
97 };
98
99
100 CacheBuilder::CacheBuilder(const DyldSharedCache::CreateOptions& options, const dyld3::closure::FileSystem& fileSystem)
101 : _options(options)
102 , _fileSystem(fileSystem)
103 , _fullAllocatedBuffer(0)
104 , _diagnostics(options.loggingPrefix, options.verbose)
105 , _archLayout(nullptr)
106 , _aliasCount(0)
107 , _slideInfoFileOffset(0)
108 , _slideInfoBufferSizeAllocated(0)
109 , _allocatedBufferSize(0)
110 , _branchPoolsLinkEditStartAddr(0)
111 {
112
113 std::string targetArch = options.archName;
114 if ( options.forSimulator && (options.archName == "i386") )
115 targetArch = "sim-x86";
116
117 for (const ArchLayout& layout : _s_archLayout) {
118 if ( layout.archName == targetArch ) {
119 _archLayout = &layout;
120 break;
121 }
122 }
123
124 if (!_archLayout) {
125 _diagnostics.error("Tool was built without support for: '%s'", targetArch.c_str());
126 }
127 }
128
129
130 std::string CacheBuilder::errorMessage()
131 {
132 return _diagnostics.errorMessage();
133 }
134
135 const std::set<std::string> CacheBuilder::warnings()
136 {
137 return _diagnostics.warnings();
138 }
139
140 const std::set<const dyld3::MachOAnalyzer*> CacheBuilder::evictions()
141 {
142 return _evictions;
143 }
144
145 void CacheBuilder::deleteBuffer()
146 {
147 vm_deallocate(mach_task_self(), _fullAllocatedBuffer, _archLayout->sharedMemorySize);
148 _fullAllocatedBuffer = 0;
149 _allocatedBufferSize = 0;
150 }
151
152
153 void CacheBuilder::makeSortedDylibs(const std::vector<LoadedMachO>& dylibs, const std::unordered_map<std::string, unsigned> sortOrder)
154 {
155 for (const LoadedMachO& dylib : dylibs) {
156 _sortedDylibs.push_back({ &dylib, dylib.mappedFile.runtimePath, {} });
157 }
158
159 std::sort(_sortedDylibs.begin(), _sortedDylibs.end(), [&](const DylibInfo& a, const DylibInfo& b) {
160 const auto& orderA = sortOrder.find(a.input->mappedFile.runtimePath);
161 const auto& orderB = sortOrder.find(b.input->mappedFile.runtimePath);
162 bool foundA = (orderA != sortOrder.end());
163 bool foundB = (orderB != sortOrder.end());
164
165 // Order all __DATA_DIRTY segments specified in the order file first, in
166 // the order specified in the file, followed by any other __DATA_DIRTY
167 // segments in lexicographic order.
168 if ( foundA && foundB )
169 return orderA->second < orderB->second;
170 else if ( foundA )
171 return true;
172 else if ( foundB )
173 return false;
174 else
175 return a.input->mappedFile.runtimePath < b.input->mappedFile.runtimePath;
176 });
177 }
178
179
180 inline uint32_t absolutetime_to_milliseconds(uint64_t abstime)
181 {
182 return (uint32_t)(abstime/1000/1000);
183 }
184
185 struct DylibAndSize
186 {
187 const CacheBuilder::LoadedMachO* input;
188 const char* installName;
189 uint64_t size;
190 };
191
192 uint64_t CacheBuilder::cacheOverflowAmount()
193 {
194 if ( _archLayout->sharedRegionsAreDiscontiguous ) {
195 // for macOS x86_64 cache, need to check each region for overflow
196 if ( _readExecuteRegion.sizeInUse > 0x60000000 )
197 return (_readExecuteRegion.sizeInUse - 0x60000000);
198
199 if ( _readWriteRegion.sizeInUse > 0x40000000 )
200 return (_readWriteRegion.sizeInUse - 0x40000000);
201
202 if ( _readOnlyRegion.sizeInUse > 0x3FE00000 )
203 return (_readOnlyRegion.sizeInUse - 0x3FE00000);
204 }
205 else {
206 bool alreadyOptimized = (_readOnlyRegion.sizeInUse != _readOnlyRegion.bufferSize);
207 uint64_t vmSize = _readOnlyRegion.unslidLoadAddress - _readExecuteRegion.unslidLoadAddress;
208 if ( alreadyOptimized )
209 vmSize += _readOnlyRegion.sizeInUse;
210 else if ( _options.excludeLocalSymbols )
211 vmSize += (_readOnlyRegion.sizeInUse * 37/100); // assume locals removal and LINKEDIT optimzation reduces LINKEDITs %25 of original size
212 else
213 vmSize += (_readOnlyRegion.sizeInUse * 80/100); // assume LINKEDIT optimzation reduces LINKEDITs to %80 of original size
214 if ( vmSize > _archLayout->sharedMemorySize )
215 return vmSize - _archLayout->sharedMemorySize;
216 }
217 // fits in shared region
218 return 0;
219 }
220
221 size_t CacheBuilder::evictLeafDylibs(uint64_t reductionTarget, std::vector<const LoadedMachO*>& overflowDylibs)
222 {
223 // build count of how many references there are to each dylib
224 __block std::map<std::string, unsigned int> referenceCount;
225 for (const DylibInfo& dylib : _sortedDylibs) {
226 dylib.input->mappedFile.mh->forEachDependentDylib(^(const char* loadPath, bool isWeak, bool isReExport, bool isUpward, uint32_t compatVersion, uint32_t curVersion, bool &stop) {
227 referenceCount[loadPath] += 1;
228 });
229 }
230
231 // find all dylibs not referenced
232 std::vector<DylibAndSize> unreferencedDylibs;
233 for (const DylibInfo& dylib : _sortedDylibs) {
234 const char* installName = dylib.input->mappedFile.mh->installName();
235 if ( referenceCount.count(installName) == 0 ) {
236 // conservative: sum up all segments except LINKEDIT
237 __block uint64_t segsSize = 0;
238 dylib.input->mappedFile.mh->forEachSegment(^(const dyld3::MachOFile::SegmentInfo& info, bool& stop) {
239 if ( strcmp(info.segName, "__LINKEDIT") != 0 )
240 segsSize += info.vmSize;
241 });
242 unreferencedDylibs.push_back({ dylib.input, installName, segsSize });
243 }
244 }
245 // sort leaf dylibs by size
246 std::sort(unreferencedDylibs.begin(), unreferencedDylibs.end(), [&](const DylibAndSize& a, const DylibAndSize& b) {
247 return ( a.size > b.size );
248 });
249
250 // build set of dylibs that if removed will allow cache to build
251 for (DylibAndSize& dylib : unreferencedDylibs) {
252 if ( _options.verbose )
253 _diagnostics.warning("to prevent cache overflow, not caching %s", dylib.installName);
254 _evictions.insert(dylib.input->mappedFile.mh);
255 // Track the evicted dylibs so we can try build "other" dlopen closures for them.
256 overflowDylibs.push_back(dylib.input);
257 if ( dylib.size > reductionTarget )
258 break;
259 reductionTarget -= dylib.size;
260 }
261
262 // prune _sortedDylibs
263 _sortedDylibs.erase(std::remove_if(_sortedDylibs.begin(), _sortedDylibs.end(), [&](const DylibInfo& dylib) {
264 return (_evictions.count(dylib.input->mappedFile.mh) != 0);
265 }),_sortedDylibs.end());
266
267 return _evictions.size();
268 }
269
270 // Handles building a list of input files to the CacheBuilder itself.
271 class CacheInputBuilder {
272 public:
273 CacheInputBuilder(const dyld3::closure::FileSystem& fileSystem,
274 std::string reqArchitecture, dyld3::Platform reqPlatform)
275 : fileSystem(fileSystem), reqArchitecture(reqArchitecture), reqPlatform(reqPlatform) { }
276
277 // Loads and maps any MachOs in the given list of files.
278 void loadMachOs(std::vector<CacheBuilder::InputFile>& inputFiles,
279 std::vector<CacheBuilder::LoadedMachO>& dylibsToCache,
280 std::vector<CacheBuilder::LoadedMachO>& otherDylibs,
281 std::vector<CacheBuilder::LoadedMachO>& executables,
282 std::vector<CacheBuilder::LoadedMachO>& couldNotLoadFiles) {
283
284 std::map<std::string, uint64_t> dylibInstallNameMap;
285 for (CacheBuilder::InputFile& inputFile : inputFiles) {
286 dyld3::closure::LoadedFileInfo loadedFileInfo = dyld3::MachOAnalyzer::load(inputFile.diag, fileSystem, inputFile.path, reqArchitecture.c_str(), reqPlatform);
287 const dyld3::MachOAnalyzer* ma = (const dyld3::MachOAnalyzer*)loadedFileInfo.fileContent;
288 if (ma == nullptr) {
289 couldNotLoadFiles.emplace_back((CacheBuilder::LoadedMachO){ DyldSharedCache::MappedMachO(), loadedFileInfo, &inputFile });
290 continue;
291 }
292
293 DyldSharedCache::MappedMachO mappedFile(inputFile.path, ma, loadedFileInfo.sliceLen, false, false,
294 loadedFileInfo.sliceOffset, loadedFileInfo.mtime, loadedFileInfo.inode);
295
296 // The file can be loaded with the given slice, but we may still want to exlude it from the cache.
297 if (ma->isDylib()) {
298 std::string installName = ma->installName();
299
300 // Let the platform exclude the file before we do anything else.
301 if (platformExcludesInstallName(installName)) {
302 inputFile.diag.verbose("Platform excluded file\n");
303 fileSystem.unloadFile(loadedFileInfo);
304 continue;
305 }
306
307 if (!ma->canBePlacedInDyldCache(inputFile.path, ^(const char* msg) {
308 inputFile.diag.warning("Dylib located at '%s' cannot be placed in cache because: %s", inputFile.path, msg);
309 })) {
310 // TODO: Add exclusion lists here?
311 // Probably not as we already applied the dylib exclusion list.
312 otherDylibs.emplace_back((CacheBuilder::LoadedMachO){ mappedFile, loadedFileInfo, &inputFile });
313 continue;
314 }
315
316 // Otherwise see if we have another file with this install name
317 auto iteratorAndInserted = dylibInstallNameMap.insert(std::make_pair(installName, dylibsToCache.size()));
318 if (iteratorAndInserted.second) {
319 // We inserted the dylib so we haven't seen another with this name.
320 if (installName[0] != '@' && installName != inputFile.path) {
321 inputFile.diag.warning("Dylib located at '%s' has installname '%s'", inputFile.path, installName.c_str());
322 }
323
324 dylibsToCache.emplace_back((CacheBuilder::LoadedMachO){ mappedFile, loadedFileInfo, &inputFile });
325 } else {
326 // We didn't insert this one so we've seen it before.
327 CacheBuilder::LoadedMachO& previousLoadedMachO = dylibsToCache[iteratorAndInserted.first->second];
328 inputFile.diag.warning("Multiple dylibs claim installname '%s' ('%s' and '%s')", installName.c_str(), inputFile.path, previousLoadedMachO.mappedFile.runtimePath.c_str());
329
330 // This is the "Good" one, overwrite
331 if (inputFile.path == installName) {
332 // Unload the old one
333 fileSystem.unloadFile(previousLoadedMachO.loadedFileInfo);
334
335 // And replace with this one.
336 previousLoadedMachO.mappedFile = mappedFile;
337 previousLoadedMachO.loadedFileInfo = loadedFileInfo;
338 }
339 }
340 } else if (ma->isBundle()) {
341 // TODO: Add exclusion lists here?
342 otherDylibs.emplace_back((CacheBuilder::LoadedMachO){ mappedFile, loadedFileInfo, &inputFile });
343 } else if (ma->isDynamicExecutable()) {
344 if (platformExcludesExecutablePath_macOS(inputFile.path)) {
345 inputFile.diag.verbose("Platform excluded file\n");
346 fileSystem.unloadFile(loadedFileInfo);
347 continue;
348 }
349 executables.emplace_back((CacheBuilder::LoadedMachO){ mappedFile, loadedFileInfo, &inputFile });
350 } else {
351 inputFile.diag.verbose("Unsupported mach file type\n");
352 fileSystem.unloadFile(loadedFileInfo);
353 }
354 }
355 }
356
357 private:
358
359
360
361 static bool platformExcludesInstallName_macOS(const std::string& installName) {
362 return false;
363 }
364
365 static bool platformExcludesInstallName_iOS(const std::string& installName) {
366 if ( installName == "/System/Library/Caches/com.apple.xpc/sdk.dylib" )
367 return true;
368 if ( installName == "/System/Library/Caches/com.apple.xpcd/xpcd_cache.dylib" )
369 return true;
370 return false;
371 }
372
373 static bool platformExcludesInstallName_tvOS(const std::string& installName) {
374 return platformExcludesInstallName_iOS(installName);
375 }
376
377 static bool platformExcludesInstallName_watchOS(const std::string& installName) {
378 return platformExcludesInstallName_iOS(installName);
379 }
380
381 static bool platformExcludesInstallName_bridgeOS(const std::string& installName) {
382 return platformExcludesInstallName_iOS(installName);
383 }
384
385 // Returns true if the current platform requires that this install name be excluded from the shared cache
386 // Note that this overrides any exclusion from anywhere else.
387 bool platformExcludesInstallName(const std::string& installName) {
388 switch (reqPlatform) {
389 case dyld3::Platform::unknown:
390 return false;
391 case dyld3::Platform::macOS:
392 return platformExcludesInstallName_macOS(installName);
393 case dyld3::Platform::iOS:
394 return platformExcludesInstallName_iOS(installName);
395 case dyld3::Platform::tvOS:
396 return platformExcludesInstallName_tvOS(installName);
397 case dyld3::Platform::watchOS:
398 return platformExcludesInstallName_watchOS(installName);
399 case dyld3::Platform::bridgeOS:
400 return platformExcludesInstallName_bridgeOS(installName);
401 case dyld3::Platform::iOSMac:
402 return false;
403 case dyld3::Platform::iOS_simulator:
404 return false;
405 case dyld3::Platform::tvOS_simulator:
406 return false;
407 case dyld3::Platform::watchOS_simulator:
408 return false;
409 }
410 }
411
412
413
414
415 static bool platformExcludesExecutablePath_macOS(const std::string& path) {
416 return false;
417 }
418
419 static bool platformExcludesExecutablePath_iOS(const std::string& path) {
420 //HACK exclude all launchd and installd variants until we can do something about xpcd_cache.dylib and friends
421 if (path == "/sbin/launchd"
422 || path == "/usr/local/sbin/launchd.debug"
423 || path == "/usr/local/sbin/launchd.development"
424 || path == "/usr/libexec/installd") {
425 return true;
426 }
427 return false;
428 }
429
430 static bool platformExcludesExecutablePath_tvOS(const std::string& path) {
431 return platformExcludesExecutablePath_iOS(path);
432 }
433
434 static bool platformExcludesExecutablePath_watchOS(const std::string& path) {
435 return platformExcludesExecutablePath_iOS(path);
436 }
437
438 static bool platformExcludesExecutablePath_bridgeOS(const std::string& path) {
439 return platformExcludesExecutablePath_iOS(path);
440 }
441
442 // Returns true if the current platform requires that this path be excluded from the shared cache
443 // Note that this overrides any exclusion from anywhere else.
444 bool platformExcludesExecutablePath(const std::string& path) {
445 switch (reqPlatform) {
446 case dyld3::Platform::unknown:
447 return false;
448 case dyld3::Platform::macOS:
449 return platformExcludesExecutablePath_macOS(path);
450 case dyld3::Platform::iOS:
451 return platformExcludesExecutablePath_iOS(path);
452 case dyld3::Platform::tvOS:
453 return platformExcludesExecutablePath_tvOS(path);
454 case dyld3::Platform::watchOS:
455 return platformExcludesExecutablePath_watchOS(path);
456 case dyld3::Platform::bridgeOS:
457 return platformExcludesExecutablePath_bridgeOS(path);
458 case dyld3::Platform::iOSMac:
459 return false;
460 case dyld3::Platform::iOS_simulator:
461 return false;
462 case dyld3::Platform::tvOS_simulator:
463 return false;
464 case dyld3::Platform::watchOS_simulator:
465 return false;
466 }
467 }
468
469 const dyld3::closure::FileSystem& fileSystem;
470 std::string reqArchitecture;
471 dyld3::Platform reqPlatform;
472 };
473
474 static void verifySelfContained(std::vector<CacheBuilder::LoadedMachO>& dylibsToCache,
475 std::vector<CacheBuilder::LoadedMachO>& otherDylibs,
476 std::vector<CacheBuilder::LoadedMachO>& couldNotLoadFiles)
477 {
478 // build map of dylibs
479 __block std::map<std::string, const CacheBuilder::LoadedMachO*> knownDylibs;
480 __block std::map<std::string, const CacheBuilder::LoadedMachO*> allDylibs;
481 for (const CacheBuilder::LoadedMachO& dylib : dylibsToCache) {
482 knownDylibs.insert({ dylib.mappedFile.runtimePath, &dylib });
483 allDylibs.insert({ dylib.mappedFile.runtimePath, &dylib });
484 if (const char* installName = dylib.mappedFile.mh->installName()) {
485 knownDylibs.insert({ installName, &dylib });
486 allDylibs.insert({ installName, &dylib });
487 }
488 }
489
490 for (const CacheBuilder::LoadedMachO& dylib : otherDylibs) {
491 allDylibs.insert({ dylib.mappedFile.runtimePath, &dylib });
492 if (const char* installName = dylib.mappedFile.mh->installName())
493 allDylibs.insert({ installName, &dylib });
494 }
495
496 for (const CacheBuilder::LoadedMachO& dylib : couldNotLoadFiles) {
497 allDylibs.insert({ dylib.inputFile->path, &dylib });
498 }
499
500 // check all dependencies to assure every dylib in cache only depends on other dylibs in cache
501 __block std::map<std::string, std::set<std::string>> badDylibs;
502 __block bool doAgain = true;
503 while ( doAgain ) {
504 doAgain = false;
505 // scan dylib list making sure all dependents are in dylib list
506 for (const CacheBuilder::LoadedMachO& dylib : dylibsToCache) {
507 if ( badDylibs.count(dylib.mappedFile.runtimePath) != 0 )
508 continue;
509 dylib.mappedFile.mh->forEachDependentDylib(^(const char* loadPath, bool isWeak, bool isReExport, bool isUpward, uint32_t compatVersion, uint32_t curVersion, bool& stop) {
510 if (isWeak)
511 return;
512 if ( knownDylibs.count(loadPath) == 0 ) {
513 badDylibs[dylib.mappedFile.runtimePath].insert(std::string("Could not find dependency '") + loadPath + "'");
514 knownDylibs.erase(dylib.mappedFile.runtimePath);
515 knownDylibs.erase(dylib.mappedFile.mh->installName());
516 doAgain = true;
517 }
518 });
519 }
520 }
521
522 // Now walk the dylibs which depend on missing dylibs and see if any of them are required binaries.
523 for (auto badDylibsIterator : badDylibs) {
524 const std::string& dylibRuntimePath = badDylibsIterator.first;
525 auto requiredDylibIterator = allDylibs.find(dylibRuntimePath);
526 if (requiredDylibIterator == allDylibs.end())
527 continue;
528 if (!requiredDylibIterator->second->inputFile->mustBeIncluded())
529 continue;
530 // This dylib is required so mark all dependencies as requried too
531 __block std::vector<const CacheBuilder::LoadedMachO*> worklist;
532 worklist.push_back(requiredDylibIterator->second);
533 while (!worklist.empty()) {
534 const CacheBuilder::LoadedMachO* dylib = worklist.back();
535 worklist.pop_back();
536 if (!dylib->mappedFile.mh)
537 continue;
538 dylib->mappedFile.mh->forEachDependentDylib(^(const char* loadPath, bool isWeak, bool isReExport, bool isUpward, uint32_t compatVersion, uint32_t curVersion, bool& stop) {
539 if (isWeak)
540 return;
541 auto dylibIterator = allDylibs.find(loadPath);
542 if (dylibIterator != allDylibs.end()) {
543 if (dylibIterator->second->inputFile->state == CacheBuilder::InputFile::Unset) {
544 dylibIterator->second->inputFile->state = CacheBuilder::InputFile::MustBeIncludedForDependent;
545 worklist.push_back(dylibIterator->second);
546 }
547 }
548 });
549 }
550 }
551
552 // FIXME: Make this an option we can pass in
553 const bool evictLeafDylibs = true;
554 if (evictLeafDylibs) {
555 doAgain = true;
556 while ( doAgain ) {
557 doAgain = false;
558
559 // build count of how many references there are to each dylib
560 __block std::set<std::string> referencedDylibs;
561 for (const CacheBuilder::LoadedMachO& dylib : dylibsToCache) {
562 if ( badDylibs.count(dylib.mappedFile.runtimePath) != 0 )
563 continue;
564 dylib.mappedFile.mh->forEachDependentDylib(^(const char* loadPath, bool isWeak, bool isReExport, bool isUpward, uint32_t compatVersion, uint32_t curVersion, bool &stop) {
565 referencedDylibs.insert(loadPath);
566 });
567 }
568
569 // find all dylibs not referenced
570 std::vector<DylibAndSize> unreferencedDylibs;
571 for (const CacheBuilder::LoadedMachO& dylib : dylibsToCache) {
572 if ( badDylibs.count(dylib.mappedFile.runtimePath) != 0 )
573 continue;
574 const char* installName = dylib.mappedFile.mh->installName();
575 if ( (referencedDylibs.count(installName) == 0) && (dylib.inputFile->state == CacheBuilder::InputFile::MustBeExcludedIfUnused) ) {
576 badDylibs[dylib.mappedFile.runtimePath].insert(std::string("It has been explicitly excluded as it is unused"));
577 doAgain = true;
578 }
579 }
580 }
581 }
582
583 // Move bad dylibs from dylibs to cache to other dylibs.
584 for (const CacheBuilder::LoadedMachO& dylib : dylibsToCache) {
585 auto i = badDylibs.find(dylib.mappedFile.runtimePath);
586 if ( i != badDylibs.end()) {
587 otherDylibs.push_back(dylib);
588 for (const std::string& reason : i->second )
589 otherDylibs.back().inputFile->diag.warning("Dylib located at '%s' not placed in shared cache because: %s", dylib.mappedFile.runtimePath.c_str(), reason.c_str());
590 }
591 }
592
593 const auto& badDylibsLambdaRef = badDylibs;
594 dylibsToCache.erase(std::remove_if(dylibsToCache.begin(), dylibsToCache.end(), [&](const CacheBuilder::LoadedMachO& dylib) {
595 if (badDylibsLambdaRef.find(dylib.mappedFile.runtimePath) != badDylibsLambdaRef.end())
596 return true;
597 return false;
598 }), dylibsToCache.end());
599 }
600
601 // This is the new build API which takes the raw files (which could be FAT) and tries to build a cache from them.
602 // We should remove the other build() method, or make it private so that this can wrap it.
603 void CacheBuilder::build(std::vector<CacheBuilder::InputFile>& inputFiles,
604 std::vector<DyldSharedCache::FileAlias>& aliases) {
605 // First filter down to files which are actually MachO's
606 CacheInputBuilder cacheInputBuilder(_fileSystem, _archLayout->archName, _options.platform);
607
608 std::vector<LoadedMachO> dylibsToCache;
609 std::vector<LoadedMachO> otherDylibs;
610 std::vector<LoadedMachO> executables;
611 std::vector<LoadedMachO> couldNotLoadFiles;
612 cacheInputBuilder.loadMachOs(inputFiles, dylibsToCache, otherDylibs, executables, couldNotLoadFiles);
613
614 verifySelfContained(dylibsToCache, otherDylibs, couldNotLoadFiles);
615
616 // Check for required binaries before we try to build the cache
617 if (!_diagnostics.hasError()) {
618 // If we succeeded in building, then now see if there was a missing required file, and if so why its missing.
619 std::string errorString;
620 for (const LoadedMachO& dylib : otherDylibs) {
621 if (dylib.inputFile->mustBeIncluded()) {
622 // An error loading a required file must be propagated up to the top level diagnostic handler.
623 bool gotWarning = false;
624 for (const std::string& warning : dylib.inputFile->diag.warnings()) {
625 gotWarning = true;
626 std::string message = warning;
627 if (message.back() == '\n')
628 message.pop_back();
629 if (!errorString.empty())
630 errorString += "ERROR: ";
631 errorString += "Required binary was not included in the shared cache '" + std::string(dylib.inputFile->path) + "' because: " + message + "\n";
632 }
633 if (!gotWarning) {
634 if (!errorString.empty())
635 errorString += "ERROR: ";
636 errorString += "Required binary was not included in the shared cache '" + std::string(dylib.inputFile->path) + "' because: 'unknown error. Please report to dyld'\n";
637 }
638 }
639 }
640 for (const LoadedMachO& dylib : couldNotLoadFiles) {
641 if (dylib.inputFile->mustBeIncluded()) {
642 if (dylib.inputFile->diag.hasError()) {
643 if (!errorString.empty())
644 errorString += "ERROR: ";
645 errorString += "Required binary was not included in the shared cache '" + std::string(dylib.inputFile->path) + "' because: " + dylib.inputFile->diag.errorMessage() + "\n";
646 } else {
647 if (!errorString.empty())
648 errorString += "ERROR: ";
649 errorString += "Required binary was not included in the shared cache '" + std::string(dylib.inputFile->path) + "' because: 'unknown error. Please report to dyld'\n";
650
651 }
652 }
653 }
654 if (!errorString.empty()) {
655 _diagnostics.error("%s", errorString.c_str());
656 }
657 }
658
659 if (!_diagnostics.hasError())
660 build(dylibsToCache, otherDylibs, executables, aliases);
661
662 if (!_diagnostics.hasError()) {
663 // If we succeeded in building, then now see if there was a missing required file, and if so why its missing.
664 std::string errorString;
665 for (CacheBuilder::InputFile& inputFile : inputFiles) {
666 if (inputFile.mustBeIncluded() && inputFile.diag.hasError()) {
667 // An error loading a required file must be propagated up to the top level diagnostic handler.
668 std::string message = inputFile.diag.errorMessage();
669 if (message.back() == '\n')
670 message.pop_back();
671 errorString += "Required binary was not included in the shared cache '" + std::string(inputFile.path) + "' because: " + message + "\n";
672 }
673 }
674 if (!errorString.empty()) {
675 _diagnostics.error("%s", errorString.c_str());
676 }
677 }
678
679 // Add all the warnings from the input files to the top level warnings on the main diagnostics object.
680 for (CacheBuilder::InputFile& inputFile : inputFiles) {
681 for (const std::string& warning : inputFile.diag.warnings())
682 _diagnostics.warning("%s", warning.c_str());
683 }
684
685 // Clean up the loaded files
686 for (LoadedMachO& loadedMachO : dylibsToCache)
687 _fileSystem.unloadFile(loadedMachO.loadedFileInfo);
688 for (LoadedMachO& loadedMachO : otherDylibs)
689 _fileSystem.unloadFile(loadedMachO.loadedFileInfo);
690 for (LoadedMachO& loadedMachO : executables)
691 _fileSystem.unloadFile(loadedMachO.loadedFileInfo);
692 }
693
694 void CacheBuilder::build(const std::vector<DyldSharedCache::MappedMachO>& dylibs,
695 const std::vector<DyldSharedCache::MappedMachO>& otherOsDylibsInput,
696 const std::vector<DyldSharedCache::MappedMachO>& osExecutables,
697 std::vector<DyldSharedCache::FileAlias>& aliases) {
698
699 std::vector<LoadedMachO> dylibsToCache;
700 std::vector<LoadedMachO> otherDylibs;
701 std::vector<LoadedMachO> executables;
702
703 for (const DyldSharedCache::MappedMachO& mappedMachO : dylibs) {
704 dyld3::closure::LoadedFileInfo loadedFileInfo;
705 loadedFileInfo.fileContent = mappedMachO.mh;
706 loadedFileInfo.fileContentLen = mappedMachO.length;
707 loadedFileInfo.sliceOffset = mappedMachO.sliceFileOffset;
708 loadedFileInfo.sliceLen = mappedMachO.length;
709 loadedFileInfo.inode = mappedMachO.inode;
710 loadedFileInfo.mtime = mappedMachO.modTime;
711 loadedFileInfo.path = mappedMachO.runtimePath.c_str();
712 dylibsToCache.emplace_back((LoadedMachO){ mappedMachO, loadedFileInfo, nullptr });
713 }
714
715 for (const DyldSharedCache::MappedMachO& mappedMachO : otherOsDylibsInput) {
716 dyld3::closure::LoadedFileInfo loadedFileInfo;
717 loadedFileInfo.fileContent = mappedMachO.mh;
718 loadedFileInfo.fileContentLen = mappedMachO.length;
719 loadedFileInfo.sliceOffset = mappedMachO.sliceFileOffset;
720 loadedFileInfo.sliceLen = mappedMachO.length;
721 loadedFileInfo.inode = mappedMachO.inode;
722 loadedFileInfo.mtime = mappedMachO.modTime;
723 loadedFileInfo.path = mappedMachO.runtimePath.c_str();
724 otherDylibs.emplace_back((LoadedMachO){ mappedMachO, loadedFileInfo, nullptr });
725 }
726
727 for (const DyldSharedCache::MappedMachO& mappedMachO : osExecutables) {
728 dyld3::closure::LoadedFileInfo loadedFileInfo;
729 loadedFileInfo.fileContent = mappedMachO.mh;
730 loadedFileInfo.fileContentLen = mappedMachO.length;
731 loadedFileInfo.sliceOffset = mappedMachO.sliceFileOffset;
732 loadedFileInfo.sliceLen = mappedMachO.length;
733 loadedFileInfo.inode = mappedMachO.inode;
734 loadedFileInfo.mtime = mappedMachO.modTime;
735 loadedFileInfo.path = mappedMachO.runtimePath.c_str();
736 executables.emplace_back((LoadedMachO){ mappedMachO, loadedFileInfo, nullptr });
737 }
738
739 build(dylibsToCache, otherDylibs, executables, aliases);
740 }
741
742 void CacheBuilder::build(const std::vector<LoadedMachO>& dylibs,
743 const std::vector<LoadedMachO>& otherOsDylibsInput,
744 const std::vector<LoadedMachO>& osExecutables,
745 std::vector<DyldSharedCache::FileAlias>& aliases)
746 {
747 // <rdar://problem/21317611> error out instead of crash if cache has no dylibs
748 // FIXME: plist should specify required vs optional dylibs
749 if ( dylibs.size() < 30 ) {
750 _diagnostics.error("missing required minimum set of dylibs");
751 return;
752 }
753 uint64_t t1 = mach_absolute_time();
754
755 // make copy of dylib list and sort
756 makeSortedDylibs(dylibs, _options.dylibOrdering);
757
758 // allocate space used by largest possible cache plus room for LINKEDITS before optimization
759 _allocatedBufferSize = _archLayout->sharedMemorySize * 1.50;
760 if ( vm_allocate(mach_task_self(), &_fullAllocatedBuffer, _allocatedBufferSize, VM_FLAGS_ANYWHERE) != 0 ) {
761 _diagnostics.error("could not allocate buffer");
762 return;
763 }
764
765 // assign addresses for each segment of each dylib in new cache
766 assignSegmentAddresses();
767 std::vector<const LoadedMachO*> overflowDylibs;
768 while ( cacheOverflowAmount() != 0 ) {
769 if ( !_options.evictLeafDylibsOnOverflow ) {
770 _diagnostics.error("cache overflow by %lluMB", cacheOverflowAmount() / 1024 / 1024);
771 return;
772 }
773 size_t evictionCount = evictLeafDylibs(cacheOverflowAmount(), overflowDylibs);
774 // re-layout cache
775 for (DylibInfo& dylib : _sortedDylibs)
776 dylib.cacheLocation.clear();
777 assignSegmentAddresses();
778
779 _diagnostics.verbose("cache overflow, evicted %lu leaf dylibs\n", evictionCount);
780 }
781 markPaddingInaccessible();
782
783 // copy all segments into cache
784 uint64_t t2 = mach_absolute_time();
785 writeCacheHeader();
786 copyRawSegments();
787
788 // rebase all dylibs for new location in cache
789 uint64_t t3 = mach_absolute_time();
790 _aslrTracker.setDataRegion(_readWriteRegion.buffer, _readWriteRegion.sizeInUse);
791 adjustAllImagesForNewSegmentLocations();
792 if ( _diagnostics.hasError() )
793 return;
794
795 // build ImageArray for dyld3, which has side effect of binding all cached dylibs
796 uint64_t t4 = mach_absolute_time();
797 buildImageArray(aliases);
798 if ( _diagnostics.hasError() )
799 return;
800
801 // optimize ObjC
802 uint64_t t5 = mach_absolute_time();
803 DyldSharedCache* dyldCache = (DyldSharedCache*)_readExecuteRegion.buffer;
804 if ( _options.optimizeObjC )
805 optimizeObjC();
806 if ( _diagnostics.hasError() )
807 return;
808
809
810 // optimize away stubs
811 uint64_t t6 = mach_absolute_time();
812 std::vector<uint64_t> branchPoolOffsets;
813 uint64_t cacheStartAddress = _archLayout->sharedMemoryStart;
814 if ( _options.optimizeStubs ) {
815 std::vector<uint64_t> branchPoolStartAddrs;
816 const uint64_t* p = (uint64_t*)((uint8_t*)dyldCache + dyldCache->header.branchPoolsOffset);
817 for (uint32_t i=0; i < dyldCache->header.branchPoolsCount; ++i) {
818 uint64_t poolAddr = p[i];
819 branchPoolStartAddrs.push_back(poolAddr);
820 branchPoolOffsets.push_back(poolAddr - cacheStartAddress);
821 }
822 optimizeAwayStubs(branchPoolStartAddrs, _branchPoolsLinkEditStartAddr);
823 }
824
825
826 // FIPS seal corecrypto, This must be done after stub elimination (so that __TEXT,__text is not changed after sealing)
827 fipsSign();
828
829 // merge and compact LINKEDIT segments
830 uint64_t t7 = mach_absolute_time();
831 optimizeLinkedit(branchPoolOffsets);
832
833 // copy ImageArray to end of read-only region
834 addImageArray();
835 if ( _diagnostics.hasError() )
836 return;
837
838 // compute and add dlopen closures for all other dylibs
839 addOtherImageArray(otherOsDylibsInput, overflowDylibs);
840 if ( _diagnostics.hasError() )
841 return;
842
843 // compute and add launch closures to end of read-only region
844 uint64_t t8 = mach_absolute_time();
845 addClosures(osExecutables);
846 if ( _diagnostics.hasError() )
847 return;
848
849 // update final readOnly region size
850 dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)(_readExecuteRegion.buffer + dyldCache->header.mappingOffset);
851 mappings[2].size = _readOnlyRegion.sizeInUse;
852 if ( _options.excludeLocalSymbols )
853 dyldCache->header.localSymbolsOffset = _readOnlyRegion.cacheFileOffset + _readOnlyRegion.sizeInUse;
854
855 // record max slide now that final size is established
856 if ( _archLayout->sharedRegionsAreDiscontiguous ) {
857 // special case x86_64 which has three non-contiguous chunks each in their own 1GB regions
858 uint64_t maxSlide0 = 0x60000000 - _readExecuteRegion.sizeInUse; // TEXT region has 1.5GB region
859 uint64_t maxSlide1 = 0x40000000 - _readWriteRegion.sizeInUse;
860 uint64_t maxSlide2 = 0x3FE00000 - _readOnlyRegion.sizeInUse;
861 dyldCache->header.maxSlide = std::min(std::min(maxSlide0, maxSlide1), maxSlide2);
862 }
863 else {
864 dyldCache->header.maxSlide = (_archLayout->sharedMemoryStart + _archLayout->sharedMemorySize) - (_readOnlyRegion.unslidLoadAddress + _readOnlyRegion.sizeInUse);
865 }
866
867 uint64_t t9 = mach_absolute_time();
868
869 // fill in slide info at start of region[2]
870 // do this last because it modifies pointers in DATA segments
871 if ( _options.cacheSupportsASLR ) {
872 #if SUPPORT_ARCH_arm64e
873 if ( strcmp(_archLayout->archName, "arm64e") == 0 )
874 writeSlideInfoV3(_aslrTracker.bitmap(), _aslrTracker.dataPageCount());
875 else
876 #endif
877 if ( _archLayout->is64 )
878 writeSlideInfoV2<Pointer64<LittleEndian>>(_aslrTracker.bitmap(), _aslrTracker.dataPageCount());
879 else
880 #if SUPPORT_ARCH_arm64_32
881 if ( strcmp(_archLayout->archName, "arm64_32") == 0 )
882 writeSlideInfoV4<Pointer32<LittleEndian>>(_aslrTracker.bitmap(), _aslrTracker.dataPageCount());
883 else
884 #endif
885 writeSlideInfoV2<Pointer32<LittleEndian>>(_aslrTracker.bitmap(), _aslrTracker.dataPageCount());
886 }
887
888 uint64_t t10 = mach_absolute_time();
889
890 // last sanity check on size
891 if ( cacheOverflowAmount() != 0 ) {
892 _diagnostics.error("cache overflow after optimizations 0x%llX -> 0x%llX", _readExecuteRegion.unslidLoadAddress, _readOnlyRegion.unslidLoadAddress + _readOnlyRegion.sizeInUse);
893 return;
894 }
895
896 // codesignature is part of file, but is not mapped
897 codeSign();
898 if ( _diagnostics.hasError() )
899 return;
900
901 uint64_t t11 = mach_absolute_time();
902
903 if ( _options.verbose ) {
904 fprintf(stderr, "time to layout cache: %ums\n", absolutetime_to_milliseconds(t2-t1));
905 fprintf(stderr, "time to copy cached dylibs into buffer: %ums\n", absolutetime_to_milliseconds(t3-t2));
906 fprintf(stderr, "time to adjust segments for new split locations: %ums\n", absolutetime_to_milliseconds(t4-t3));
907 fprintf(stderr, "time to bind all images: %ums\n", absolutetime_to_milliseconds(t5-t4));
908 fprintf(stderr, "time to optimize Objective-C: %ums\n", absolutetime_to_milliseconds(t6-t5));
909 fprintf(stderr, "time to do stub elimination: %ums\n", absolutetime_to_milliseconds(t7-t6));
910 fprintf(stderr, "time to optimize LINKEDITs: %ums\n", absolutetime_to_milliseconds(t8-t7));
911 fprintf(stderr, "time to build %lu closures: %ums\n", osExecutables.size(), absolutetime_to_milliseconds(t9-t8));
912 fprintf(stderr, "time to compute slide info: %ums\n", absolutetime_to_milliseconds(t10-t9));
913 fprintf(stderr, "time to compute UUID and codesign cache file: %ums\n", absolutetime_to_milliseconds(t11-t10));
914 }
915
916 return;
917 }
918
919
920 void CacheBuilder::writeCacheHeader()
921 {
922 // "dyld_v1" + spaces + archName(), with enough spaces to pad to 15 bytes
923 std::string magic = "dyld_v1";
924 magic.append(15 - magic.length() - _options.archName.length(), ' ');
925 magic.append(_options.archName);
926 assert(magic.length() == 15);
927
928 // fill in header
929 dyld_cache_header* dyldCacheHeader = (dyld_cache_header*)_readExecuteRegion.buffer;
930 memcpy(dyldCacheHeader->magic, magic.c_str(), 16);
931 dyldCacheHeader->mappingOffset = sizeof(dyld_cache_header);
932 dyldCacheHeader->mappingCount = 3;
933 dyldCacheHeader->imagesOffset = (uint32_t)(dyldCacheHeader->mappingOffset + 3*sizeof(dyld_cache_mapping_info) + sizeof(uint64_t)*_branchPoolStarts.size());
934 dyldCacheHeader->imagesCount = (uint32_t)_sortedDylibs.size() + _aliasCount;
935 dyldCacheHeader->dyldBaseAddress = 0;
936 dyldCacheHeader->codeSignatureOffset = 0;
937 dyldCacheHeader->codeSignatureSize = 0;
938 dyldCacheHeader->slideInfoOffset = _slideInfoFileOffset;
939 dyldCacheHeader->slideInfoSize = _slideInfoBufferSizeAllocated;
940 dyldCacheHeader->localSymbolsOffset = 0;
941 dyldCacheHeader->localSymbolsSize = 0;
942 dyldCacheHeader->cacheType = _options.optimizeStubs ? kDyldSharedCacheTypeProduction : kDyldSharedCacheTypeDevelopment;
943 dyldCacheHeader->accelerateInfoAddr = 0;
944 dyldCacheHeader->accelerateInfoSize = 0;
945 bzero(dyldCacheHeader->uuid, 16);// overwritten later by recomputeCacheUUID()
946 dyldCacheHeader->branchPoolsOffset = dyldCacheHeader->mappingOffset + 3*sizeof(dyld_cache_mapping_info);
947 dyldCacheHeader->branchPoolsCount = (uint32_t)_branchPoolStarts.size();
948 dyldCacheHeader->imagesTextOffset = dyldCacheHeader->imagesOffset + sizeof(dyld_cache_image_info)*dyldCacheHeader->imagesCount;
949 dyldCacheHeader->imagesTextCount = _sortedDylibs.size();
950 dyldCacheHeader->dylibsImageGroupAddr = 0;
951 dyldCacheHeader->dylibsImageGroupSize = 0;
952 dyldCacheHeader->otherImageGroupAddr = 0;
953 dyldCacheHeader->otherImageGroupSize = 0;
954 dyldCacheHeader->progClosuresAddr = 0;
955 dyldCacheHeader->progClosuresSize = 0;
956 dyldCacheHeader->progClosuresTrieAddr = 0;
957 dyldCacheHeader->progClosuresTrieSize = 0;
958 dyldCacheHeader->platform = (uint8_t)_options.platform;
959 dyldCacheHeader->formatVersion = dyld3::closure::kFormatVersion;
960 dyldCacheHeader->dylibsExpectedOnDisk = !_options.dylibsRemovedDuringMastering;
961 dyldCacheHeader->simulator = _options.forSimulator;
962 dyldCacheHeader->locallyBuiltCache = _options.isLocallyBuiltCache;
963 dyldCacheHeader->formatVersion = dyld3::closure::kFormatVersion;
964 dyldCacheHeader->sharedRegionStart = _archLayout->sharedMemoryStart;
965 dyldCacheHeader->sharedRegionSize = _archLayout->sharedMemorySize;
966
967 // fill in mappings
968 dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)(_readExecuteRegion.buffer + dyldCacheHeader->mappingOffset);
969 mappings[0].address = _readExecuteRegion.unslidLoadAddress;
970 mappings[0].fileOffset = 0;
971 mappings[0].size = _readExecuteRegion.sizeInUse;
972 mappings[0].maxProt = VM_PROT_READ | VM_PROT_EXECUTE;
973 mappings[0].initProt = VM_PROT_READ | VM_PROT_EXECUTE;
974 mappings[1].address = _readWriteRegion.unslidLoadAddress;
975 mappings[1].fileOffset = _readExecuteRegion.sizeInUse;
976 mappings[1].size = _readWriteRegion.sizeInUse;
977 mappings[1].maxProt = VM_PROT_READ | VM_PROT_WRITE;
978 mappings[1].initProt = VM_PROT_READ | VM_PROT_WRITE;
979 mappings[2].address = _readOnlyRegion.unslidLoadAddress;
980 mappings[2].fileOffset = _readExecuteRegion.sizeInUse + _readWriteRegion.sizeInUse;
981 mappings[2].size = _readOnlyRegion.sizeInUse;
982 mappings[2].maxProt = VM_PROT_READ;
983 mappings[2].initProt = VM_PROT_READ;
984
985 // fill in branch pool addresses
986 uint64_t* p = (uint64_t*)(_readExecuteRegion.buffer + dyldCacheHeader->branchPoolsOffset);
987 for (uint64_t pool : _branchPoolStarts) {
988 *p++ = pool;
989 }
990
991 // fill in image table
992 dyld_cache_image_info* images = (dyld_cache_image_info*)(_readExecuteRegion.buffer + dyldCacheHeader->imagesOffset);
993 for (const DylibInfo& dylib : _sortedDylibs) {
994 const char* installName = dylib.input->mappedFile.mh->installName();
995 images->address = dylib.cacheLocation[0].dstCacheUnslidAddress;
996 if ( _options.dylibsRemovedDuringMastering ) {
997 images->modTime = 0;
998 images->inode = pathHash(installName);
999 }
1000 else {
1001 images->modTime = dylib.input->mappedFile.modTime;
1002 images->inode = dylib.input->mappedFile.inode;
1003 }
1004 uint32_t installNameOffsetInTEXT = (uint32_t)(installName - (char*)dylib.input->mappedFile.mh);
1005 images->pathFileOffset = (uint32_t)dylib.cacheLocation[0].dstCacheFileOffset + installNameOffsetInTEXT;
1006 ++images;
1007 }
1008 // append aliases image records and strings
1009 /*
1010 for (auto &dylib : _dylibs) {
1011 if (!dylib->installNameAliases.empty()) {
1012 for (const std::string& alias : dylib->installNameAliases) {
1013 images->set_address(_segmentMap[dylib][0].address);
1014 if (_manifest.platform() == "osx") {
1015 images->modTime = dylib->lastModTime;
1016 images->inode = dylib->inode;
1017 }
1018 else {
1019 images->modTime = 0;
1020 images->inode = pathHash(alias.c_str());
1021 }
1022 images->pathFileOffset = offset;
1023 //fprintf(stderr, "adding alias %s for %s\n", alias.c_str(), dylib->installName.c_str());
1024 ::strcpy((char*)&_buffer[offset], alias.c_str());
1025 offset += alias.size() + 1;
1026 ++images;
1027 }
1028 }
1029 }
1030 */
1031 // calculate start of text image array and trailing string pool
1032 dyld_cache_image_text_info* textImages = (dyld_cache_image_text_info*)(_readExecuteRegion.buffer + dyldCacheHeader->imagesTextOffset);
1033 uint32_t stringOffset = (uint32_t)(dyldCacheHeader->imagesTextOffset + sizeof(dyld_cache_image_text_info) * _sortedDylibs.size());
1034
1035 // write text image array and image names pool at same time
1036 for (const DylibInfo& dylib : _sortedDylibs) {
1037 dylib.input->mappedFile.mh->getUuid(textImages->uuid);
1038 textImages->loadAddress = dylib.cacheLocation[0].dstCacheUnslidAddress;
1039 textImages->textSegmentSize = (uint32_t)dylib.cacheLocation[0].dstCacheSegmentSize;
1040 textImages->pathOffset = stringOffset;
1041 const char* installName = dylib.input->mappedFile.mh->installName();
1042 ::strcpy((char*)_readExecuteRegion.buffer + stringOffset, installName);
1043 stringOffset += (uint32_t)strlen(installName)+1;
1044 ++textImages;
1045 }
1046
1047 // make sure header did not overflow into first mapped image
1048 const dyld_cache_image_info* firstImage = (dyld_cache_image_info*)(_readExecuteRegion.buffer + dyldCacheHeader->imagesOffset);
1049 assert(stringOffset <= (firstImage->address - mappings[0].address));
1050 }
1051
1052 void CacheBuilder::copyRawSegments()
1053 {
1054 const bool log = false;
1055 dispatch_apply(_sortedDylibs.size(), DISPATCH_APPLY_AUTO, ^(size_t index) {
1056 const DylibInfo& dylib = _sortedDylibs[index];
1057 for (const SegmentMappingInfo& info : dylib.cacheLocation) {
1058 if (log) fprintf(stderr, "copy %s segment %s (0x%08X bytes) from %p to %p (logical addr 0x%llX) for %s\n",
1059 _options.archName.c_str(), info.segName, info.copySegmentSize, info.srcSegment, info.dstSegment, info.dstCacheUnslidAddress, dylib.input->mappedFile.runtimePath.c_str());
1060 ::memcpy(info.dstSegment, info.srcSegment, info.copySegmentSize);
1061 if (uint64_t paddingSize = info.dstCacheSegmentSize - info.copySegmentSize) {
1062 ::memset((char*)info.dstSegment + info.copySegmentSize, 0, paddingSize);
1063 }
1064 }
1065 });
1066 }
1067
1068 void CacheBuilder::adjustAllImagesForNewSegmentLocations()
1069 {
1070 __block std::vector<Diagnostics> diags;
1071 diags.resize(_sortedDylibs.size());
1072
1073 if (_options.platform == dyld3::Platform::macOS) {
1074 dispatch_apply(_sortedDylibs.size(), DISPATCH_APPLY_AUTO, ^(size_t index) {
1075 const DylibInfo& dylib = _sortedDylibs[index];
1076 adjustDylibSegments(dylib, diags[index]);
1077 });
1078 } else {
1079 // Note this has to be done in serial because the LOH Tracker isn't thread safe
1080 for (size_t index = 0; index != _sortedDylibs.size(); ++index) {
1081 const DylibInfo& dylib = _sortedDylibs[index];
1082 adjustDylibSegments(dylib, diags[index]);
1083 }
1084 }
1085
1086 for (const Diagnostics& diag : diags) {
1087 if ( diag.hasError() ) {
1088 _diagnostics.error("%s", diag.errorMessage().c_str());
1089 break;
1090 }
1091 }
1092 }
1093
1094 void CacheBuilder::assignSegmentAddresses()
1095 {
1096 // calculate size of header info and where first dylib's mach_header should start
1097 size_t startOffset = sizeof(dyld_cache_header) + 3*sizeof(dyld_cache_mapping_info);
1098 size_t maxPoolCount = 0;
1099 if ( _archLayout->branchReach != 0 )
1100 maxPoolCount = (_archLayout->sharedMemorySize / _archLayout->branchReach);
1101 startOffset += maxPoolCount * sizeof(uint64_t);
1102 startOffset += sizeof(dyld_cache_image_info) * _sortedDylibs.size();
1103 startOffset += sizeof(dyld_cache_image_text_info) * _sortedDylibs.size();
1104 for (const DylibInfo& dylib : _sortedDylibs) {
1105 startOffset += (strlen(dylib.input->mappedFile.mh->installName()) + 1);
1106 }
1107 //fprintf(stderr, "%s total header size = 0x%08lX\n", _options.archName.c_str(), startOffset);
1108 startOffset = align(startOffset, 12);
1109
1110 _branchPoolStarts.clear();
1111
1112 // assign TEXT segment addresses
1113 _readExecuteRegion.buffer = (uint8_t*)_fullAllocatedBuffer;
1114 _readExecuteRegion.bufferSize = 0;
1115 _readExecuteRegion.sizeInUse = 0;
1116 _readExecuteRegion.unslidLoadAddress = _archLayout->sharedMemoryStart;
1117 _readExecuteRegion.cacheFileOffset = 0;
1118 __block uint64_t addr = _readExecuteRegion.unslidLoadAddress + startOffset; // header
1119 __block uint64_t lastPoolAddress = addr;
1120 for (DylibInfo& dylib : _sortedDylibs) {
1121 __block uint64_t textSegVmAddr = 0;
1122 dylib.input->mappedFile.mh->forEachSegment(^(const dyld3::MachOFile::SegmentInfo& segInfo, bool& stop) {
1123 if ( strcmp(segInfo.segName, "__TEXT") == 0 )
1124 textSegVmAddr = segInfo.vmAddr;
1125 if ( segInfo.protections != (VM_PROT_READ | VM_PROT_EXECUTE) )
1126 return;
1127 // Insert branch island pools every 128MB for arm64
1128 if ( (_archLayout->branchPoolTextSize != 0) && ((addr + segInfo.vmSize - lastPoolAddress) > _archLayout->branchReach) ) {
1129 _branchPoolStarts.push_back(addr);
1130 _diagnostics.verbose("adding branch pool at 0x%llX\n", addr);
1131 lastPoolAddress = addr;
1132 addr += _archLayout->branchPoolTextSize;
1133 }
1134 // Keep __TEXT segments 4K or more aligned
1135 addr = align(addr, std::max((int)segInfo.p2align, (int)12));
1136 uint64_t offsetInRegion = addr - _readExecuteRegion.unslidLoadAddress;
1137 SegmentMappingInfo loc;
1138 loc.srcSegment = (uint8_t*)dylib.input->mappedFile.mh + segInfo.vmAddr - textSegVmAddr;
1139 loc.segName = segInfo.segName;
1140 loc.dstSegment = _readExecuteRegion.buffer + offsetInRegion;
1141 loc.dstCacheUnslidAddress = addr;
1142 loc.dstCacheFileOffset = (uint32_t)offsetInRegion;
1143 loc.dstCacheSegmentSize = (uint32_t)align(segInfo.sizeOfSections, 12);
1144 loc.copySegmentSize = (uint32_t)align(segInfo.sizeOfSections, 12);
1145 loc.srcSegmentIndex = segInfo.segIndex;
1146 dylib.cacheLocation.push_back(loc);
1147 addr += loc.dstCacheSegmentSize;
1148 });
1149 }
1150 // align TEXT region end
1151 uint64_t endTextAddress = align(addr, _archLayout->sharedRegionAlignP2);
1152 _readExecuteRegion.bufferSize = endTextAddress - _readExecuteRegion.unslidLoadAddress;
1153 _readExecuteRegion.sizeInUse = _readExecuteRegion.bufferSize;
1154
1155 // assign __DATA* addresses
1156 if ( _archLayout->sharedRegionsAreDiscontiguous )
1157 addr = _archLayout->sharedMemoryStart + 0x60000000;
1158 else
1159 addr = align((addr + _archLayout->sharedRegionPadding), _archLayout->sharedRegionAlignP2);
1160 _readWriteRegion.buffer = (uint8_t*)_fullAllocatedBuffer + addr - _archLayout->sharedMemoryStart;
1161 _readWriteRegion.bufferSize = 0;
1162 _readWriteRegion.sizeInUse = 0;
1163 _readWriteRegion.unslidLoadAddress = addr;
1164 _readWriteRegion.cacheFileOffset = _readExecuteRegion.sizeInUse;
1165
1166 // layout all __DATA_CONST segments
1167 __block int dataConstSegmentCount = 0;
1168 for (DylibInfo& dylib : _sortedDylibs) {
1169 __block uint64_t textSegVmAddr = 0;
1170 dylib.input->mappedFile.mh->forEachSegment(^(const dyld3::MachOFile::SegmentInfo& segInfo, bool& stop) {
1171 if ( strcmp(segInfo.segName, "__TEXT") == 0 )
1172 textSegVmAddr = segInfo.vmAddr;
1173 if ( segInfo.protections != (VM_PROT_READ | VM_PROT_WRITE) )
1174 return;
1175 if ( strcmp(segInfo.segName, "__DATA_CONST") != 0 )
1176 return;
1177 ++dataConstSegmentCount;
1178 // Pack __DATA_CONST segments
1179 addr = align(addr, segInfo.p2align);
1180 size_t copySize = std::min((size_t)segInfo.fileSize, (size_t)segInfo.sizeOfSections);
1181 uint64_t offsetInRegion = addr - _readWriteRegion.unslidLoadAddress;
1182 SegmentMappingInfo loc;
1183 loc.srcSegment = (uint8_t*)dylib.input->mappedFile.mh + segInfo.vmAddr - textSegVmAddr;
1184 loc.segName = segInfo.segName;
1185 loc.dstSegment = _readWriteRegion.buffer + offsetInRegion;
1186 loc.dstCacheUnslidAddress = addr;
1187 loc.dstCacheFileOffset = (uint32_t)(_readWriteRegion.cacheFileOffset + offsetInRegion);
1188 loc.dstCacheSegmentSize = (uint32_t)segInfo.sizeOfSections;
1189 loc.copySegmentSize = (uint32_t)copySize;
1190 loc.srcSegmentIndex = segInfo.segIndex;
1191 dylib.cacheLocation.push_back(loc);
1192 addr += loc.dstCacheSegmentSize;
1193 });
1194 }
1195
1196 // layout all __DATA segments (and other r/w non-dirty, non-const) segments
1197 for (DylibInfo& dylib : _sortedDylibs) {
1198 __block uint64_t textSegVmAddr = 0;
1199 dylib.input->mappedFile.mh->forEachSegment(^(const dyld3::MachOFile::SegmentInfo& segInfo, bool& stop) {
1200 if ( strcmp(segInfo.segName, "__TEXT") == 0 )
1201 textSegVmAddr = segInfo.vmAddr;
1202 if ( segInfo.protections != (VM_PROT_READ | VM_PROT_WRITE) )
1203 return;
1204 if ( strcmp(segInfo.segName, "__DATA_CONST") == 0 )
1205 return;
1206 if ( strcmp(segInfo.segName, "__DATA_DIRTY") == 0 )
1207 return;
1208 if ( dataConstSegmentCount > 10 ) {
1209 // Pack __DATA segments only if we also have __DATA_CONST segments
1210 addr = align(addr, segInfo.p2align);
1211 }
1212 else {
1213 // Keep __DATA segments 4K or more aligned
1214 addr = align(addr, std::max((int)segInfo.p2align, (int)12));
1215 }
1216 size_t copySize = std::min((size_t)segInfo.fileSize, (size_t)segInfo.sizeOfSections);
1217 uint64_t offsetInRegion = addr - _readWriteRegion.unslidLoadAddress;
1218 SegmentMappingInfo loc;
1219 loc.srcSegment = (uint8_t*)dylib.input->mappedFile.mh + segInfo.vmAddr - textSegVmAddr;
1220 loc.segName = segInfo.segName;
1221 loc.dstSegment = _readWriteRegion.buffer + offsetInRegion;
1222 loc.dstCacheUnslidAddress = addr;
1223 loc.dstCacheFileOffset = (uint32_t)(_readWriteRegion.cacheFileOffset + offsetInRegion);
1224 loc.dstCacheSegmentSize = (uint32_t)segInfo.sizeOfSections;
1225 loc.copySegmentSize = (uint32_t)copySize;
1226 loc.srcSegmentIndex = segInfo.segIndex;
1227 dylib.cacheLocation.push_back(loc);
1228 addr += loc.dstCacheSegmentSize;
1229 });
1230 }
1231
1232 // layout all __DATA_DIRTY segments, sorted (FIXME)
1233 const size_t dylibCount = _sortedDylibs.size();
1234 uint32_t dirtyDataSortIndexes[dylibCount];
1235 for (size_t i=0; i < dylibCount; ++i)
1236 dirtyDataSortIndexes[i] = (uint32_t)i;
1237 std::sort(&dirtyDataSortIndexes[0], &dirtyDataSortIndexes[dylibCount], [&](const uint32_t& a, const uint32_t& b) {
1238 const auto& orderA = _options.dirtyDataSegmentOrdering.find(_sortedDylibs[a].input->mappedFile.runtimePath);
1239 const auto& orderB = _options.dirtyDataSegmentOrdering.find(_sortedDylibs[b].input->mappedFile.runtimePath);
1240 bool foundA = (orderA != _options.dirtyDataSegmentOrdering.end());
1241 bool foundB = (orderB != _options.dirtyDataSegmentOrdering.end());
1242
1243 // Order all __DATA_DIRTY segments specified in the order file first, in the order specified in the file,
1244 // followed by any other __DATA_DIRTY segments in lexicographic order.
1245 if ( foundA && foundB )
1246 return orderA->second < orderB->second;
1247 else if ( foundA )
1248 return true;
1249 else if ( foundB )
1250 return false;
1251 else
1252 return _sortedDylibs[a].input->mappedFile.runtimePath < _sortedDylibs[b].input->mappedFile.runtimePath;
1253 });
1254 addr = align(addr, 12);
1255 for (size_t i=0; i < dylibCount; ++i) {
1256 DylibInfo& dylib = _sortedDylibs[dirtyDataSortIndexes[i]];
1257 __block uint64_t textSegVmAddr = 0;
1258 dylib.input->mappedFile.mh->forEachSegment(^(const dyld3::MachOFile::SegmentInfo& segInfo, bool& stop) {
1259 if ( strcmp(segInfo.segName, "__TEXT") == 0 )
1260 textSegVmAddr = segInfo.vmAddr;
1261 if ( segInfo.protections != (VM_PROT_READ | VM_PROT_WRITE) )
1262 return;
1263 if ( strcmp(segInfo.segName, "__DATA_DIRTY") != 0 )
1264 return;
1265 // Pack __DATA_DIRTY segments
1266 addr = align(addr, segInfo.p2align);
1267 size_t copySize = std::min((size_t)segInfo.fileSize, (size_t)segInfo.sizeOfSections);
1268 uint64_t offsetInRegion = addr - _readWriteRegion.unslidLoadAddress;
1269 SegmentMappingInfo loc;
1270 loc.srcSegment = (uint8_t*)dylib.input->mappedFile.mh + segInfo.vmAddr - textSegVmAddr;
1271 loc.segName = segInfo.segName;
1272 loc.dstSegment = _readWriteRegion.buffer + offsetInRegion;
1273 loc.dstCacheUnslidAddress = addr;
1274 loc.dstCacheFileOffset = (uint32_t)(_readWriteRegion.cacheFileOffset + offsetInRegion);
1275 loc.dstCacheSegmentSize = (uint32_t)segInfo.sizeOfSections;
1276 loc.copySegmentSize = (uint32_t)copySize;
1277 loc.srcSegmentIndex = segInfo.segIndex;
1278 dylib.cacheLocation.push_back(loc);
1279 addr += loc.dstCacheSegmentSize;
1280 });
1281 }
1282
1283 // align DATA region end
1284 uint64_t endDataAddress = align(addr, _archLayout->sharedRegionAlignP2);
1285 _readWriteRegion.bufferSize = endDataAddress - _readWriteRegion.unslidLoadAddress;
1286 _readWriteRegion.sizeInUse = _readWriteRegion.bufferSize;
1287
1288 // start read-only region
1289 if ( _archLayout->sharedRegionsAreDiscontiguous )
1290 addr = _archLayout->sharedMemoryStart + 0xA0000000;
1291 else
1292 addr = align((addr + _archLayout->sharedRegionPadding), _archLayout->sharedRegionAlignP2);
1293 _readOnlyRegion.buffer = (uint8_t*)_fullAllocatedBuffer + addr - _archLayout->sharedMemoryStart;
1294 _readOnlyRegion.bufferSize = 0;
1295 _readOnlyRegion.sizeInUse = 0;
1296 _readOnlyRegion.unslidLoadAddress = addr;
1297 _readOnlyRegion.cacheFileOffset = _readWriteRegion.cacheFileOffset + _readWriteRegion.sizeInUse;
1298
1299 // reserve space for kernel ASLR slide info at start of r/o region
1300 if ( _options.cacheSupportsASLR ) {
1301 size_t slideInfoSize = sizeof(dyld_cache_slide_info);
1302 slideInfoSize = std::max(slideInfoSize, sizeof(dyld_cache_slide_info2));
1303 slideInfoSize = std::max(slideInfoSize, sizeof(dyld_cache_slide_info3));
1304 slideInfoSize = std::max(slideInfoSize, sizeof(dyld_cache_slide_info4));
1305 _slideInfoBufferSizeAllocated = align(slideInfoSize + (_readWriteRegion.sizeInUse/4096) * _archLayout->slideInfoBytesPerPage, _archLayout->sharedRegionAlignP2);
1306 _slideInfoFileOffset = _readOnlyRegion.cacheFileOffset;
1307 addr += _slideInfoBufferSizeAllocated;
1308 }
1309
1310 // layout all read-only (but not LINKEDIT) segments
1311 for (DylibInfo& dylib : _sortedDylibs) {
1312 __block uint64_t textSegVmAddr = 0;
1313 dylib.input->mappedFile.mh->forEachSegment(^(const dyld3::MachOFile::SegmentInfo& segInfo, bool& stop) {
1314 if ( strcmp(segInfo.segName, "__TEXT") == 0 )
1315 textSegVmAddr = segInfo.vmAddr;
1316 if ( segInfo.protections != VM_PROT_READ )
1317 return;
1318 if ( strcmp(segInfo.segName, "__LINKEDIT") == 0 )
1319 return;
1320 // Keep segments segments 4K or more aligned
1321 addr = align(addr, std::max((int)segInfo.p2align, (int)12));
1322 uint64_t offsetInRegion = addr - _readOnlyRegion.unslidLoadAddress;
1323 SegmentMappingInfo loc;
1324 loc.srcSegment = (uint8_t*)dylib.input->mappedFile.mh + segInfo.vmAddr - textSegVmAddr;
1325 loc.segName = segInfo.segName;
1326 loc.dstSegment = _readOnlyRegion.buffer + offsetInRegion;
1327 loc.dstCacheUnslidAddress = addr;
1328 loc.dstCacheFileOffset = (uint32_t)(_readOnlyRegion.cacheFileOffset + offsetInRegion);
1329 loc.dstCacheSegmentSize = (uint32_t)align(segInfo.sizeOfSections, 12);
1330 loc.copySegmentSize = (uint32_t)segInfo.sizeOfSections;
1331 loc.srcSegmentIndex = segInfo.segIndex;
1332 dylib.cacheLocation.push_back(loc);
1333 addr += loc.dstCacheSegmentSize;
1334 });
1335 }
1336 // layout all LINKEDIT segments (after other read-only segments), aligned to 16KB
1337 addr = align(addr, 14);
1338 _nonLinkEditReadOnlySize = addr - _readOnlyRegion.unslidLoadAddress;
1339 for (DylibInfo& dylib : _sortedDylibs) {
1340 __block uint64_t textSegVmAddr = 0;
1341 dylib.input->mappedFile.mh->forEachSegment(^(const dyld3::MachOFile::SegmentInfo& segInfo, bool& stop) {
1342 if ( strcmp(segInfo.segName, "__TEXT") == 0 )
1343 textSegVmAddr = segInfo.vmAddr;
1344 if ( segInfo.protections != VM_PROT_READ )
1345 return;
1346 if ( strcmp(segInfo.segName, "__LINKEDIT") != 0 )
1347 return;
1348 // Keep segments segments 4K or more aligned
1349 addr = align(addr, std::max((int)segInfo.p2align, (int)12));
1350 size_t copySize = std::min((size_t)segInfo.fileSize, (size_t)segInfo.sizeOfSections);
1351 uint64_t offsetInRegion = addr - _readOnlyRegion.unslidLoadAddress;
1352 SegmentMappingInfo loc;
1353 loc.srcSegment = (uint8_t*)dylib.input->mappedFile.mh + segInfo.vmAddr - textSegVmAddr;
1354 loc.segName = segInfo.segName;
1355 loc.dstSegment = _readOnlyRegion.buffer + offsetInRegion;
1356 loc.dstCacheUnslidAddress = addr;
1357 loc.dstCacheFileOffset = (uint32_t)(_readOnlyRegion.cacheFileOffset + offsetInRegion);
1358 loc.dstCacheSegmentSize = (uint32_t)align(segInfo.sizeOfSections, 12);
1359 loc.copySegmentSize = (uint32_t)copySize;
1360 loc.srcSegmentIndex = segInfo.segIndex;
1361 dylib.cacheLocation.push_back(loc);
1362 addr += loc.dstCacheSegmentSize;
1363 });
1364 }
1365 // add room for branch pool linkedits
1366 _branchPoolsLinkEditStartAddr = addr;
1367 addr += (_branchPoolStarts.size() * _archLayout->branchPoolLinkEditSize);
1368
1369 // align r/o region end
1370 uint64_t endReadOnlyAddress = align(addr, _archLayout->sharedRegionAlignP2);
1371 _readOnlyRegion.bufferSize = endReadOnlyAddress - _readOnlyRegion.unslidLoadAddress;
1372 _readOnlyRegion.sizeInUse = _readOnlyRegion.bufferSize;
1373
1374 //fprintf(stderr, "RX region=%p -> %p, logical addr=0x%llX\n", _readExecuteRegion.buffer, _readExecuteRegion.buffer+_readExecuteRegion.bufferSize, _readExecuteRegion.unslidLoadAddress);
1375 //fprintf(stderr, "RW region=%p -> %p, logical addr=0x%llX\n", _readWriteRegion.buffer, _readWriteRegion.buffer+_readWriteRegion.bufferSize, _readWriteRegion.unslidLoadAddress);
1376 //fprintf(stderr, "RO region=%p -> %p, logical addr=0x%llX\n", _readOnlyRegion.buffer, _readOnlyRegion.buffer+_readOnlyRegion.bufferSize, _readOnlyRegion.unslidLoadAddress);
1377
1378 // sort SegmentMappingInfo for each image to be in the same order as original segments
1379 for (DylibInfo& dylib : _sortedDylibs) {
1380 std::sort(dylib.cacheLocation.begin(), dylib.cacheLocation.end(), [&](const SegmentMappingInfo& a, const SegmentMappingInfo& b) {
1381 return a.srcSegmentIndex < b.srcSegmentIndex;
1382 });
1383 }
1384 }
1385
1386 void CacheBuilder::markPaddingInaccessible()
1387 {
1388 // region between RX and RW
1389 uint8_t* startPad1 = _readExecuteRegion.buffer+_readExecuteRegion.sizeInUse;
1390 uint8_t* endPad1 = _readWriteRegion.buffer;
1391 ::vm_protect(mach_task_self(), (vm_address_t)startPad1, endPad1-startPad1, false, 0);
1392
1393 // region between RW and RO
1394 uint8_t* startPad2 = _readWriteRegion.buffer+_readWriteRegion.sizeInUse;
1395 uint8_t* endPad2 = _readOnlyRegion.buffer;
1396 ::vm_protect(mach_task_self(), (vm_address_t)startPad2, endPad2-startPad2, false, 0);
1397 }
1398
1399
1400 uint64_t CacheBuilder::pathHash(const char* path)
1401 {
1402 uint64_t sum = 0;
1403 for (const char* s=path; *s != '\0'; ++s)
1404 sum += sum*4 + *s;
1405 return sum;
1406 }
1407
1408
1409 void CacheBuilder::findDylibAndSegment(const void* contentPtr, std::string& foundDylibName, std::string& foundSegName)
1410 {
1411 foundDylibName = "???";
1412 foundSegName = "???";
1413 uint64_t unslidVmAddr = ((uint8_t*)contentPtr - _readExecuteRegion.buffer) + _readExecuteRegion.unslidLoadAddress;
1414 const DyldSharedCache* cache = (DyldSharedCache*)_readExecuteRegion.buffer;
1415 cache->forEachImage(^(const mach_header* mh, const char* installName) {
1416 ((dyld3::MachOLoaded*)mh)->forEachSegment(^(const dyld3::MachOFile::SegmentInfo& info, bool &stop) {
1417 if ( (unslidVmAddr >= info.vmAddr) && (unslidVmAddr < (info.vmAddr+info.vmSize)) ) {
1418 foundDylibName = installName;
1419 foundSegName = info.segName;
1420 stop = true;
1421 }
1422 });
1423 });
1424 }
1425
1426
1427 template <typename P>
1428 bool CacheBuilder::makeRebaseChainV2(uint8_t* pageContent, uint16_t lastLocationOffset, uint16_t offset, const dyld_cache_slide_info2* info)
1429 {
1430 typedef typename P::uint_t pint_t;
1431
1432 const pint_t deltaMask = (pint_t)(info->delta_mask);
1433 const pint_t valueMask = ~deltaMask;
1434 const pint_t valueAdd = (pint_t)(info->value_add);
1435 const unsigned deltaShift = __builtin_ctzll(deltaMask) - 2;
1436 const uint32_t maxDelta = (uint32_t)(deltaMask >> deltaShift);
1437
1438 pint_t* lastLoc = (pint_t*)&pageContent[lastLocationOffset+0];
1439 pint_t lastValue = (pint_t)P::getP(*lastLoc);
1440 if ( (lastValue - valueAdd) & deltaMask ) {
1441 std::string dylibName;
1442 std::string segName;
1443 findDylibAndSegment((void*)pageContent, dylibName, segName);
1444 _diagnostics.error("rebase pointer does not point within cache. lastOffset=0x%04X, seg=%s, dylib=%s\n",
1445 lastLocationOffset, segName.c_str(), dylibName.c_str());
1446 return false;
1447 }
1448 if ( offset <= (lastLocationOffset+maxDelta) ) {
1449 // previous location in range, make link from it
1450 // encode this location into last value
1451 pint_t delta = offset - lastLocationOffset;
1452 pint_t newLastValue = ((lastValue - valueAdd) & valueMask) | (delta << deltaShift);
1453 //warning(" add chain: delta = %d, lastOffset=0x%03X, offset=0x%03X, org value=0x%08lX, new value=0x%08lX",
1454 // offset - lastLocationOffset, lastLocationOffset, offset, (long)lastValue, (long)newLastValue);
1455 P::setP(*lastLoc, newLastValue);
1456 return true;
1457 }
1458 //fprintf(stderr, " too big delta = %d, lastOffset=0x%03X, offset=0x%03X\n", offset - lastLocationOffset, lastLocationOffset, offset);
1459
1460 // distance between rebase locations is too far
1461 // see if we can make a chain from non-rebase locations
1462 uint16_t nonRebaseLocationOffsets[1024];
1463 unsigned nrIndex = 0;
1464 for (uint16_t i = lastLocationOffset; i < offset-maxDelta; ) {
1465 nonRebaseLocationOffsets[nrIndex] = 0;
1466 for (int j=maxDelta; j > 0; j -= 4) {
1467 pint_t value = (pint_t)P::getP(*(pint_t*)&pageContent[i+j]);
1468 if ( value == 0 ) {
1469 // Steal values of 0 to be used in the rebase chain
1470 nonRebaseLocationOffsets[nrIndex] = i+j;
1471 break;
1472 }
1473 }
1474 if ( nonRebaseLocationOffsets[nrIndex] == 0 ) {
1475 lastValue = (pint_t)P::getP(*lastLoc);
1476 pint_t newValue = ((lastValue - valueAdd) & valueMask);
1477 //warning(" no way to make non-rebase delta chain, terminate off=0x%03X, old value=0x%08lX, new value=0x%08lX", lastLocationOffset, (long)value, (long)newValue);
1478 P::setP(*lastLoc, newValue);
1479 return false;
1480 }
1481 i = nonRebaseLocationOffsets[nrIndex];
1482 ++nrIndex;
1483 }
1484
1485 // we can make chain. go back and add each non-rebase location to chain
1486 uint16_t prevOffset = lastLocationOffset;
1487 pint_t* prevLoc = (pint_t*)&pageContent[prevOffset];
1488 for (unsigned n=0; n < nrIndex; ++n) {
1489 uint16_t nOffset = nonRebaseLocationOffsets[n];
1490 assert(nOffset != 0);
1491 pint_t* nLoc = (pint_t*)&pageContent[nOffset];
1492 uint32_t delta2 = nOffset - prevOffset;
1493 pint_t value = (pint_t)P::getP(*prevLoc);
1494 pint_t newValue;
1495 if ( value == 0 )
1496 newValue = (delta2 << deltaShift);
1497 else
1498 newValue = ((value - valueAdd) & valueMask) | (delta2 << deltaShift);
1499 //warning(" non-rebase delta = %d, to off=0x%03X, old value=0x%08lX, new value=0x%08lX", delta2, nOffset, (long)value, (long)newValue);
1500 P::setP(*prevLoc, newValue);
1501 prevOffset = nOffset;
1502 prevLoc = nLoc;
1503 }
1504 uint32_t delta3 = offset - prevOffset;
1505 pint_t value = (pint_t)P::getP(*prevLoc);
1506 pint_t newValue;
1507 if ( value == 0 )
1508 newValue = (delta3 << deltaShift);
1509 else
1510 newValue = ((value - valueAdd) & valueMask) | (delta3 << deltaShift);
1511 //warning(" non-rebase delta = %d, to off=0x%03X, old value=0x%08lX, new value=0x%08lX", delta3, offset, (long)value, (long)newValue);
1512 P::setP(*prevLoc, newValue);
1513
1514 return true;
1515 }
1516
1517
1518 template <typename P>
1519 void CacheBuilder::addPageStartsV2(uint8_t* pageContent, const bool bitmap[], const dyld_cache_slide_info2* info,
1520 std::vector<uint16_t>& pageStarts, std::vector<uint16_t>& pageExtras)
1521 {
1522 typedef typename P::uint_t pint_t;
1523
1524 const pint_t deltaMask = (pint_t)(info->delta_mask);
1525 const pint_t valueMask = ~deltaMask;
1526 const uint32_t pageSize = info->page_size;
1527 const pint_t valueAdd = (pint_t)(info->value_add);
1528
1529 uint16_t startValue = DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE;
1530 uint16_t lastLocationOffset = 0xFFFF;
1531 for(uint32_t i=0; i < pageSize/4; ++i) {
1532 unsigned offset = i*4;
1533 if ( bitmap[i] ) {
1534 if ( startValue == DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE ) {
1535 // found first rebase location in page
1536 startValue = i;
1537 }
1538 else if ( !makeRebaseChainV2<P>(pageContent, lastLocationOffset, offset, info) ) {
1539 // can't record all rebasings in one chain
1540 if ( (startValue & DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA) == 0 ) {
1541 // switch page_start to "extras" which is a list of chain starts
1542 unsigned indexInExtras = (unsigned)pageExtras.size();
1543 if ( indexInExtras > 0x3FFF ) {
1544 _diagnostics.error("rebase overflow in v2 page extras");
1545 return;
1546 }
1547 pageExtras.push_back(startValue);
1548 startValue = indexInExtras | DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA;
1549 }
1550 pageExtras.push_back(i);
1551 }
1552 lastLocationOffset = offset;
1553 }
1554 }
1555 if ( lastLocationOffset != 0xFFFF ) {
1556 // mark end of chain
1557 pint_t* lastLoc = (pint_t*)&pageContent[lastLocationOffset];
1558 pint_t lastValue = (pint_t)P::getP(*lastLoc);
1559 pint_t newValue = ((lastValue - valueAdd) & valueMask);
1560 P::setP(*lastLoc, newValue);
1561 }
1562 if ( startValue & DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA ) {
1563 // add end bit to extras
1564 pageExtras.back() |= DYLD_CACHE_SLIDE_PAGE_ATTR_END;
1565 }
1566 pageStarts.push_back(startValue);
1567 }
1568
1569 template <typename P>
1570 void CacheBuilder::writeSlideInfoV2(const bool bitmap[], unsigned dataPageCount)
1571 {
1572 typedef typename P::uint_t pint_t;
1573 typedef typename P::E E;
1574 const uint32_t pageSize = 4096;
1575
1576 // fill in fixed info
1577 assert(_slideInfoFileOffset != 0);
1578 dyld_cache_slide_info2* info = (dyld_cache_slide_info2*)_readOnlyRegion.buffer;
1579 info->version = 2;
1580 info->page_size = pageSize;
1581 info->delta_mask = _archLayout->pointerDeltaMask;
1582 info->value_add = (sizeof(pint_t) == 8) ? 0 : _archLayout->sharedMemoryStart; // only value_add for 32-bit archs
1583
1584 // set page starts and extras for each page
1585 std::vector<uint16_t> pageStarts;
1586 std::vector<uint16_t> pageExtras;
1587 pageStarts.reserve(dataPageCount);
1588 uint8_t* pageContent = _readWriteRegion.buffer;
1589 const bool* bitmapForPage = bitmap;
1590 for (unsigned i=0; i < dataPageCount; ++i) {
1591 //warning("page[%d]", i);
1592 addPageStartsV2<P>(pageContent, bitmapForPage, info, pageStarts, pageExtras);
1593 if ( _diagnostics.hasError() ) {
1594 return;
1595 }
1596 pageContent += pageSize;
1597 bitmapForPage += (sizeof(bool)*(pageSize/4));
1598 }
1599
1600 // fill in computed info
1601 info->page_starts_offset = sizeof(dyld_cache_slide_info2);
1602 info->page_starts_count = (unsigned)pageStarts.size();
1603 info->page_extras_offset = (unsigned)(sizeof(dyld_cache_slide_info2)+pageStarts.size()*sizeof(uint16_t));
1604 info->page_extras_count = (unsigned)pageExtras.size();
1605 uint16_t* pageStartsBuffer = (uint16_t*)((char*)info + info->page_starts_offset);
1606 uint16_t* pageExtrasBuffer = (uint16_t*)((char*)info + info->page_extras_offset);
1607 for (unsigned i=0; i < pageStarts.size(); ++i)
1608 pageStartsBuffer[i] = pageStarts[i];
1609 for (unsigned i=0; i < pageExtras.size(); ++i)
1610 pageExtrasBuffer[i] = pageExtras[i];
1611 // update header with final size
1612 uint64_t slideInfoSize = align(info->page_extras_offset + pageExtras.size()*sizeof(uint16_t), _archLayout->sharedRegionAlignP2);
1613 if ( slideInfoSize > _slideInfoBufferSizeAllocated ) {
1614 _diagnostics.error("kernel slide info overflow buffer");
1615 }
1616 ((dyld_cache_header*)_readExecuteRegion.buffer)->slideInfoSize = slideInfoSize;
1617 //fprintf(stderr, "pageCount=%u, page_starts_count=%lu, page_extras_count=%lu\n", dataPageCount, pageStarts.size(), pageExtras.size());
1618 }
1619
1620 // fits in to int16_t
1621 static bool smallValue(uint64_t value)
1622 {
1623 uint32_t high = (value & 0xFFFF8000);
1624 return (high == 0) || (high == 0xFFFF8000);
1625 }
1626
1627 template <typename P>
1628 bool CacheBuilder::makeRebaseChainV4(uint8_t* pageContent, uint16_t lastLocationOffset, uint16_t offset, const dyld_cache_slide_info4* info)
1629 {
1630 typedef typename P::uint_t pint_t;
1631
1632 const pint_t deltaMask = (pint_t)(info->delta_mask);
1633 const pint_t valueMask = ~deltaMask;
1634 const pint_t valueAdd = (pint_t)(info->value_add);
1635 const unsigned deltaShift = __builtin_ctzll(deltaMask) - 2;
1636 const uint32_t maxDelta = (uint32_t)(deltaMask >> deltaShift);
1637
1638 pint_t* lastLoc = (pint_t*)&pageContent[lastLocationOffset+0];
1639 pint_t lastValue = (pint_t)P::getP(*lastLoc);
1640 if ( (lastValue - valueAdd) & deltaMask ) {
1641 std::string dylibName;
1642 std::string segName;
1643 findDylibAndSegment((void*)pageContent, dylibName, segName);
1644 _diagnostics.error("rebase pointer does not point within cache. lastOffset=0x%04X, seg=%s, dylib=%s\n",
1645 lastLocationOffset, segName.c_str(), dylibName.c_str());
1646 return false;
1647 }
1648 if ( offset <= (lastLocationOffset+maxDelta) ) {
1649 // previous location in range, make link from it
1650 // encode this location into last value
1651 pint_t delta = offset - lastLocationOffset;
1652 pint_t newLastValue = ((lastValue - valueAdd) & valueMask) | (delta << deltaShift);
1653 //warning(" add chain: delta = %d, lastOffset=0x%03X, offset=0x%03X, org value=0x%08lX, new value=0x%08lX",
1654 // offset - lastLocationOffset, lastLocationOffset, offset, (long)lastValue, (long)newLastValue);
1655 P::setP(*lastLoc, newLastValue);
1656 return true;
1657 }
1658 //fprintf(stderr, " too big delta = %d, lastOffset=0x%03X, offset=0x%03X\n", offset - lastLocationOffset, lastLocationOffset, offset);
1659
1660 // distance between rebase locations is too far
1661 // see if we can make a chain from non-rebase locations
1662 uint16_t nonRebaseLocationOffsets[1024];
1663 unsigned nrIndex = 0;
1664 for (uint16_t i = lastLocationOffset; i < offset-maxDelta; ) {
1665 nonRebaseLocationOffsets[nrIndex] = 0;
1666 for (int j=maxDelta; j > 0; j -= 4) {
1667 pint_t value = (pint_t)P::getP(*(pint_t*)&pageContent[i+j]);
1668 if ( smallValue(value) ) {
1669 // Steal values of 0 to be used in the rebase chain
1670 nonRebaseLocationOffsets[nrIndex] = i+j;
1671 break;
1672 }
1673 }
1674 if ( nonRebaseLocationOffsets[nrIndex] == 0 ) {
1675 lastValue = (pint_t)P::getP(*lastLoc);
1676 pint_t newValue = ((lastValue - valueAdd) & valueMask);
1677 //fprintf(stderr, " no way to make non-rebase delta chain, terminate off=0x%03X, old value=0x%08lX, new value=0x%08lX\n",
1678 // lastLocationOffset, (long)lastValue, (long)newValue);
1679 P::setP(*lastLoc, newValue);
1680 return false;
1681 }
1682 i = nonRebaseLocationOffsets[nrIndex];
1683 ++nrIndex;
1684 }
1685
1686 // we can make chain. go back and add each non-rebase location to chain
1687 uint16_t prevOffset = lastLocationOffset;
1688 pint_t* prevLoc = (pint_t*)&pageContent[prevOffset];
1689 for (unsigned n=0; n < nrIndex; ++n) {
1690 uint16_t nOffset = nonRebaseLocationOffsets[n];
1691 assert(nOffset != 0);
1692 pint_t* nLoc = (pint_t*)&pageContent[nOffset];
1693 uint32_t delta2 = nOffset - prevOffset;
1694 pint_t value = (pint_t)P::getP(*prevLoc);
1695 pint_t newValue;
1696 if ( smallValue(value) )
1697 newValue = (value & valueMask) | (delta2 << deltaShift);
1698 else
1699 newValue = ((value - valueAdd) & valueMask) | (delta2 << deltaShift);
1700 //warning(" non-rebase delta = %d, to off=0x%03X, old value=0x%08lX, new value=0x%08lX", delta2, nOffset, (long)value, (long)newValue);
1701 P::setP(*prevLoc, newValue);
1702 prevOffset = nOffset;
1703 prevLoc = nLoc;
1704 }
1705 uint32_t delta3 = offset - prevOffset;
1706 pint_t value = (pint_t)P::getP(*prevLoc);
1707 pint_t newValue;
1708 if ( smallValue(value) )
1709 newValue = (value & valueMask) | (delta3 << deltaShift);
1710 else
1711 newValue = ((value - valueAdd) & valueMask) | (delta3 << deltaShift);
1712 //warning(" non-rebase delta = %d, to off=0x%03X, old value=0x%08lX, new value=0x%08lX", delta3, offset, (long)value, (long)newValue);
1713 P::setP(*prevLoc, newValue);
1714
1715 return true;
1716 }
1717
1718
1719 template <typename P>
1720 void CacheBuilder::addPageStartsV4(uint8_t* pageContent, const bool bitmap[], const dyld_cache_slide_info4* info,
1721 std::vector<uint16_t>& pageStarts, std::vector<uint16_t>& pageExtras)
1722 {
1723 typedef typename P::uint_t pint_t;
1724
1725 const pint_t deltaMask = (pint_t)(info->delta_mask);
1726 const pint_t valueMask = ~deltaMask;
1727 const uint32_t pageSize = info->page_size;
1728 const pint_t valueAdd = (pint_t)(info->value_add);
1729
1730 uint16_t startValue = DYLD_CACHE_SLIDE4_PAGE_NO_REBASE;
1731 uint16_t lastLocationOffset = 0xFFFF;
1732 for(uint32_t i=0; i < pageSize/4; ++i) {
1733 unsigned offset = i*4;
1734 if ( bitmap[i] ) {
1735 if ( startValue == DYLD_CACHE_SLIDE4_PAGE_NO_REBASE ) {
1736 // found first rebase location in page
1737 startValue = i;
1738 }
1739 else if ( !makeRebaseChainV4<P>(pageContent, lastLocationOffset, offset, info) ) {
1740 // can't record all rebasings in one chain
1741 if ( (startValue & DYLD_CACHE_SLIDE4_PAGE_USE_EXTRA) == 0 ) {
1742 // switch page_start to "extras" which is a list of chain starts
1743 unsigned indexInExtras = (unsigned)pageExtras.size();
1744 if ( indexInExtras >= DYLD_CACHE_SLIDE4_PAGE_INDEX ) {
1745 _diagnostics.error("rebase overflow in v4 page extras");
1746 return;
1747 }
1748 pageExtras.push_back(startValue);
1749 startValue = indexInExtras | DYLD_CACHE_SLIDE4_PAGE_USE_EXTRA;
1750 }
1751 pageExtras.push_back(i);
1752 }
1753 lastLocationOffset = offset;
1754 }
1755 }
1756 if ( lastLocationOffset != 0xFFFF ) {
1757 // mark end of chain
1758 pint_t* lastLoc = (pint_t*)&pageContent[lastLocationOffset];
1759 pint_t lastValue = (pint_t)P::getP(*lastLoc);
1760 pint_t newValue = ((lastValue - valueAdd) & valueMask);
1761 P::setP(*lastLoc, newValue);
1762 }
1763 if ( startValue & DYLD_CACHE_SLIDE4_PAGE_USE_EXTRA ) {
1764 // add end bit to extras
1765 pageExtras.back() |= DYLD_CACHE_SLIDE4_PAGE_EXTRA_END;
1766 }
1767 pageStarts.push_back(startValue);
1768 }
1769
1770
1771
1772 template <typename P>
1773 void CacheBuilder::writeSlideInfoV4(const bool bitmap[], unsigned dataPageCount)
1774 {
1775 typedef typename P::uint_t pint_t;
1776 typedef typename P::E E;
1777 const uint32_t pageSize = 4096;
1778
1779 // fill in fixed info
1780 assert(_slideInfoFileOffset != 0);
1781 dyld_cache_slide_info4* info = (dyld_cache_slide_info4*)_readOnlyRegion.buffer;
1782 info->version = 4;
1783 info->page_size = pageSize;
1784 info->delta_mask = _archLayout->pointerDeltaMask;
1785 info->value_add = (sizeof(pint_t) == 8) ? 0 : _archLayout->sharedMemoryStart; // only value_add for 32-bit archs
1786
1787 // set page starts and extras for each page
1788 std::vector<uint16_t> pageStarts;
1789 std::vector<uint16_t> pageExtras;
1790 pageStarts.reserve(dataPageCount);
1791 uint8_t* pageContent = _readWriteRegion.buffer;
1792 const bool* bitmapForPage = bitmap;
1793 for (unsigned i=0; i < dataPageCount; ++i) {
1794 addPageStartsV4<P>(pageContent, bitmapForPage, info, pageStarts, pageExtras);
1795 if ( _diagnostics.hasError() ) {
1796 return;
1797 }
1798 pageContent += pageSize;
1799 bitmapForPage += (sizeof(bool)*(pageSize/4));
1800 }
1801 // fill in computed info
1802 info->page_starts_offset = sizeof(dyld_cache_slide_info4);
1803 info->page_starts_count = (unsigned)pageStarts.size();
1804 info->page_extras_offset = (unsigned)(sizeof(dyld_cache_slide_info4)+pageStarts.size()*sizeof(uint16_t));
1805 info->page_extras_count = (unsigned)pageExtras.size();
1806 uint16_t* pageStartsBuffer = (uint16_t*)((char*)info + info->page_starts_offset);
1807 uint16_t* pageExtrasBuffer = (uint16_t*)((char*)info + info->page_extras_offset);
1808 for (unsigned i=0; i < pageStarts.size(); ++i)
1809 pageStartsBuffer[i] = pageStarts[i];
1810 for (unsigned i=0; i < pageExtras.size(); ++i)
1811 pageExtrasBuffer[i] = pageExtras[i];
1812 // update header with final size
1813 uint64_t slideInfoSize = align(info->page_extras_offset + pageExtras.size()*sizeof(uint16_t), _archLayout->sharedRegionAlignP2);
1814 if ( slideInfoSize > _slideInfoBufferSizeAllocated ) {
1815 _diagnostics.error("kernel slide info v4 overflow buffer");
1816 }
1817 ((dyld_cache_header*)_readExecuteRegion.buffer)->slideInfoSize = slideInfoSize;
1818 //fprintf(stderr, "pageCount=%u, page_starts_count=%lu, page_extras_count=%lu\n", dataPageCount, pageStarts.size(), pageExtras.size());
1819 }
1820
1821
1822 /*
1823 void CacheBuilder::writeSlideInfoV1()
1824 {
1825 // build one 128-byte bitmap per page (4096) of DATA
1826 uint8_t* const dataStart = (uint8_t*)_buffer.get() + regions[1].fileOffset;
1827 uint8_t* const dataEnd = dataStart + regions[1].size;
1828 const long bitmapSize = (dataEnd - dataStart)/(4*8);
1829 uint8_t* bitmap = (uint8_t*)calloc(bitmapSize, 1);
1830 for (void* p : _pointersForASLR) {
1831 if ( (p < dataStart) || ( p > dataEnd) )
1832 terminate("DATA pointer for sliding, out of range\n");
1833 long offset = (long)((uint8_t*)p - dataStart);
1834 if ( (offset % 4) != 0 )
1835 terminate("pointer not 4-byte aligned in DATA offset 0x%08lX\n", offset);
1836 long byteIndex = offset / (4*8);
1837 long bitInByte = (offset % 32) >> 2;
1838 bitmap[byteIndex] |= (1 << bitInByte);
1839 }
1840
1841 // allocate worst case size block of all slide info
1842 const unsigned entry_size = 4096/(8*4); // 8 bits per byte, possible pointer every 4 bytes.
1843 const unsigned toc_count = (unsigned)bitmapSize/entry_size;
1844 dyld_cache_slide_info* slideInfo = (dyld_cache_slide_info*)((uint8_t*)_buffer + _slideInfoFileOffset);
1845 slideInfo->version = 1;
1846 slideInfo->toc_offset = sizeof(dyld_cache_slide_info);
1847 slideInfo->toc_count = toc_count;
1848 slideInfo->entries_offset = (slideInfo->toc_offset+2*toc_count+127)&(-128);
1849 slideInfo->entries_count = 0;
1850 slideInfo->entries_size = entry_size;
1851 // append each unique entry
1852 const dyldCacheSlideInfoEntry* bitmapAsEntries = (dyldCacheSlideInfoEntry*)bitmap;
1853 dyldCacheSlideInfoEntry* const entriesInSlidInfo = (dyldCacheSlideInfoEntry*)((char*)slideInfo+slideInfo->entries_offset());
1854 int entry_count = 0;
1855 for (int i=0; i < toc_count; ++i) {
1856 const dyldCacheSlideInfoEntry* thisEntry = &bitmapAsEntries[i];
1857 // see if it is same as one already added
1858 bool found = false;
1859 for (int j=0; j < entry_count; ++j) {
1860 if ( memcmp(thisEntry, &entriesInSlidInfo[j], entry_size) == 0 ) {
1861 slideInfo->set_toc(i, j);
1862 found = true;
1863 break;
1864 }
1865 }
1866 if ( !found ) {
1867 // append to end
1868 memcpy(&entriesInSlidInfo[entry_count], thisEntry, entry_size);
1869 slideInfo->set_toc(i, entry_count++);
1870 }
1871 }
1872 slideInfo->entries_count = entry_count;
1873 ::free((void*)bitmap);
1874
1875 _buffer.header->slideInfoSize = align(slideInfo->entries_offset + entry_count*entry_size, _archLayout->sharedRegionAlignP2);
1876 }
1877
1878 */
1879
1880
1881
1882 uint16_t CacheBuilder::pageStartV3(uint8_t* pageContent, uint32_t pageSize, const bool bitmap[])
1883 {
1884 const int maxPerPage = pageSize / 4;
1885 uint16_t result = DYLD_CACHE_SLIDE_V3_PAGE_ATTR_NO_REBASE;
1886 dyld3::MachOLoaded::ChainedFixupPointerOnDisk* lastLoc = nullptr;
1887 for (int i=0; i < maxPerPage; ++i) {
1888 if ( bitmap[i] ) {
1889 if ( result == DYLD_CACHE_SLIDE_V3_PAGE_ATTR_NO_REBASE ) {
1890 // found first rebase location in page
1891 result = i * 4;
1892 }
1893 dyld3::MachOLoaded::ChainedFixupPointerOnDisk* loc = (dyld3::MachOLoaded::ChainedFixupPointerOnDisk*)(pageContent + i*4);;
1894 if ( lastLoc != nullptr ) {
1895 // update chain (original chain may be wrong because of segment packing)
1896 lastLoc->plainRebase.next = loc - lastLoc;
1897 }
1898 lastLoc = loc;
1899 }
1900 }
1901 if ( lastLoc != nullptr ) {
1902 // mark last one as end of chain
1903 lastLoc->plainRebase.next = 0;
1904 }
1905 return result;
1906 }
1907
1908
1909 void CacheBuilder::writeSlideInfoV3(const bool bitmap[], unsigned dataPageCount)
1910 {
1911 const uint32_t pageSize = 4096;
1912
1913 // fill in fixed info
1914 assert(_slideInfoFileOffset != 0);
1915 dyld_cache_slide_info3* info = (dyld_cache_slide_info3*)_readOnlyRegion.buffer;
1916 info->version = 3;
1917 info->page_size = pageSize;
1918 info->page_starts_count = dataPageCount;
1919 info->auth_value_add = _archLayout->sharedMemoryStart;
1920
1921 // fill in per-page starts
1922 uint8_t* pageContent = _readWriteRegion.buffer;
1923 const bool* bitmapForPage = bitmap;
1924 for (unsigned i=0; i < dataPageCount; ++i) {
1925 info->page_starts[i] = pageStartV3(pageContent, pageSize, bitmapForPage);
1926 pageContent += pageSize;
1927 bitmapForPage += (sizeof(bool)*(pageSize/4));
1928 }
1929
1930 // update header with final size
1931 dyld_cache_header* dyldCacheHeader = (dyld_cache_header*)_readExecuteRegion.buffer;
1932 dyldCacheHeader->slideInfoSize = align(__offsetof(dyld_cache_slide_info3, page_starts[dataPageCount]), _archLayout->sharedRegionAlignP2);
1933 if ( dyldCacheHeader->slideInfoSize > _slideInfoBufferSizeAllocated ) {
1934 _diagnostics.error("kernel slide info overflow buffer");
1935 }
1936 }
1937
1938
1939 void CacheBuilder::fipsSign()
1940 {
1941 // find libcorecrypto.dylib in cache being built
1942 DyldSharedCache* dyldCache = (DyldSharedCache*)_readExecuteRegion.buffer;
1943 __block const dyld3::MachOLoaded* ml = nullptr;
1944 dyldCache->forEachImage(^(const mach_header* mh, const char* installName) {
1945 if ( strcmp(installName, "/usr/lib/system/libcorecrypto.dylib") == 0 )
1946 ml = (dyld3::MachOLoaded*)mh;
1947 });
1948 if ( ml == nullptr ) {
1949 _diagnostics.warning("Could not find libcorecrypto.dylib, skipping FIPS sealing");
1950 return;
1951 }
1952
1953 // find location in libcorecrypto.dylib to store hash of __text section
1954 uint64_t hashStoreSize;
1955 const void* hashStoreLocation = ml->findSectionContent("__TEXT", "__fips_hmacs", hashStoreSize);
1956 if ( hashStoreLocation == nullptr ) {
1957 _diagnostics.warning("Could not find __TEXT/__fips_hmacs section in libcorecrypto.dylib, skipping FIPS sealing");
1958 return;
1959 }
1960 if ( hashStoreSize != 32 ) {
1961 _diagnostics.warning("__TEXT/__fips_hmacs section in libcorecrypto.dylib is not 32 bytes in size, skipping FIPS sealing");
1962 return;
1963 }
1964
1965 // compute hmac hash of __text section
1966 uint64_t textSize;
1967 const void* textLocation = ml->findSectionContent("__TEXT", "__text", textSize);
1968 if ( textLocation == nullptr ) {
1969 _diagnostics.warning("Could not find __TEXT/__text section in libcorecrypto.dylib, skipping FIPS sealing");
1970 return;
1971 }
1972 unsigned char hmac_key = 0;
1973 CCHmac(kCCHmacAlgSHA256, &hmac_key, 1, textLocation, textSize, (void*)hashStoreLocation); // store hash directly into hashStoreLocation
1974 }
1975
1976 void CacheBuilder::codeSign()
1977 {
1978 uint8_t dscHashType;
1979 uint8_t dscHashSize;
1980 uint32_t dscDigestFormat;
1981 bool agile = false;
1982
1983 // select which codesigning hash
1984 switch (_options.codeSigningDigestMode) {
1985 case DyldSharedCache::Agile:
1986 agile = true;
1987 // Fall through to SHA1, because the main code directory remains SHA1 for compatibility.
1988 case DyldSharedCache::SHA1only:
1989 dscHashType = CS_HASHTYPE_SHA1;
1990 dscHashSize = CS_HASH_SIZE_SHA1;
1991 dscDigestFormat = kCCDigestSHA1;
1992 break;
1993 case DyldSharedCache::SHA256only:
1994 dscHashType = CS_HASHTYPE_SHA256;
1995 dscHashSize = CS_HASH_SIZE_SHA256;
1996 dscDigestFormat = kCCDigestSHA256;
1997 break;
1998 default:
1999 _diagnostics.error("codeSigningDigestMode has unknown, unexpected value %d, bailing out.",
2000 _options.codeSigningDigestMode);
2001 return;
2002 }
2003
2004 std::string cacheIdentifier = "com.apple.dyld.cache." + _options.archName;
2005 if ( _options.dylibsRemovedDuringMastering ) {
2006 if ( _options.optimizeStubs )
2007 cacheIdentifier = "com.apple.dyld.cache." + _options.archName + ".release";
2008 else
2009 cacheIdentifier = "com.apple.dyld.cache." + _options.archName + ".development";
2010 }
2011 // get pointers into shared cache buffer
2012 size_t inBbufferSize = _readExecuteRegion.sizeInUse+_readWriteRegion.sizeInUse+_readOnlyRegion.sizeInUse+_localSymbolsRegion.sizeInUse;
2013
2014 // layout code signature contents
2015 uint32_t blobCount = agile ? 4 : 3;
2016 size_t idSize = cacheIdentifier.size()+1; // +1 for terminating 0
2017 uint32_t slotCount = (uint32_t)((inBbufferSize + CS_PAGE_SIZE - 1) / CS_PAGE_SIZE);
2018 uint32_t xSlotCount = CSSLOT_REQUIREMENTS;
2019 size_t idOffset = offsetof(CS_CodeDirectory, end_withExecSeg);
2020 size_t hashOffset = idOffset+idSize + dscHashSize*xSlotCount;
2021 size_t hash256Offset = idOffset+idSize + CS_HASH_SIZE_SHA256*xSlotCount;
2022 size_t cdSize = hashOffset + (slotCount * dscHashSize);
2023 size_t cd256Size = agile ? hash256Offset + (slotCount * CS_HASH_SIZE_SHA256) : 0;
2024 size_t reqsSize = 12;
2025 size_t cmsSize = sizeof(CS_Blob);
2026 size_t cdOffset = sizeof(CS_SuperBlob) + blobCount*sizeof(CS_BlobIndex);
2027 size_t cd256Offset = cdOffset + cdSize;
2028 size_t reqsOffset = cd256Offset + cd256Size; // equals cdOffset + cdSize if not agile
2029 size_t cmsOffset = reqsOffset + reqsSize;
2030 size_t sbSize = cmsOffset + cmsSize;
2031 size_t sigSize = align(sbSize, 14); // keep whole cache 16KB aligned
2032
2033 // allocate space for blob
2034 vm_address_t codeSigAlloc;
2035 if ( vm_allocate(mach_task_self(), &codeSigAlloc, sigSize, VM_FLAGS_ANYWHERE) != 0 ) {
2036 _diagnostics.error("could not allocate code signature buffer");
2037 return;
2038 }
2039 _codeSignatureRegion.buffer = (uint8_t*)codeSigAlloc;
2040 _codeSignatureRegion.bufferSize = sigSize;
2041 _codeSignatureRegion.sizeInUse = sigSize;
2042
2043 // create overall code signature which is a superblob
2044 CS_SuperBlob* sb = reinterpret_cast<CS_SuperBlob*>(_codeSignatureRegion.buffer);
2045 sb->magic = htonl(CSMAGIC_EMBEDDED_SIGNATURE);
2046 sb->length = htonl(sbSize);
2047 sb->count = htonl(blobCount);
2048 sb->index[0].type = htonl(CSSLOT_CODEDIRECTORY);
2049 sb->index[0].offset = htonl(cdOffset);
2050 sb->index[1].type = htonl(CSSLOT_REQUIREMENTS);
2051 sb->index[1].offset = htonl(reqsOffset);
2052 sb->index[2].type = htonl(CSSLOT_CMS_SIGNATURE);
2053 sb->index[2].offset = htonl(cmsOffset);
2054 if ( agile ) {
2055 sb->index[3].type = htonl(CSSLOT_ALTERNATE_CODEDIRECTORIES + 0);
2056 sb->index[3].offset = htonl(cd256Offset);
2057 }
2058
2059 // fill in empty requirements
2060 CS_RequirementsBlob* reqs = (CS_RequirementsBlob*)(((char*)sb)+reqsOffset);
2061 reqs->magic = htonl(CSMAGIC_REQUIREMENTS);
2062 reqs->length = htonl(sizeof(CS_RequirementsBlob));
2063 reqs->data = 0;
2064
2065 // initialize fixed fields of Code Directory
2066 CS_CodeDirectory* cd = (CS_CodeDirectory*)(((char*)sb)+cdOffset);
2067 cd->magic = htonl(CSMAGIC_CODEDIRECTORY);
2068 cd->length = htonl(cdSize);
2069 cd->version = htonl(0x20400); // supports exec segment
2070 cd->flags = htonl(kSecCodeSignatureAdhoc);
2071 cd->hashOffset = htonl(hashOffset);
2072 cd->identOffset = htonl(idOffset);
2073 cd->nSpecialSlots = htonl(xSlotCount);
2074 cd->nCodeSlots = htonl(slotCount);
2075 cd->codeLimit = htonl(inBbufferSize);
2076 cd->hashSize = dscHashSize;
2077 cd->hashType = dscHashType;
2078 cd->platform = 0; // not platform binary
2079 cd->pageSize = __builtin_ctz(CS_PAGE_SIZE); // log2(CS_PAGE_SIZE);
2080 cd->spare2 = 0; // unused (must be zero)
2081 cd->scatterOffset = 0; // not supported anymore
2082 cd->teamOffset = 0; // no team ID
2083 cd->spare3 = 0; // unused (must be zero)
2084 cd->codeLimit64 = 0; // falls back to codeLimit
2085
2086 // executable segment info
2087 cd->execSegBase = htonll(_readExecuteRegion.cacheFileOffset); // base of TEXT segment
2088 cd->execSegLimit = htonll(_readExecuteRegion.sizeInUse); // size of TEXT segment
2089 cd->execSegFlags = 0; // not a main binary
2090
2091 // initialize dynamic fields of Code Directory
2092 strcpy((char*)cd + idOffset, cacheIdentifier.c_str());
2093
2094 // add special slot hashes
2095 uint8_t* hashSlot = (uint8_t*)cd + hashOffset;
2096 uint8_t* reqsHashSlot = &hashSlot[-CSSLOT_REQUIREMENTS*dscHashSize];
2097 CCDigest(dscDigestFormat, (uint8_t*)reqs, sizeof(CS_RequirementsBlob), reqsHashSlot);
2098
2099 CS_CodeDirectory* cd256;
2100 uint8_t* hash256Slot;
2101 uint8_t* reqsHash256Slot;
2102 if ( agile ) {
2103 // Note that the assumption here is that the size up to the hashes is the same as for
2104 // sha1 code directory, and that they come last, after everything else.
2105
2106 cd256 = (CS_CodeDirectory*)(((char*)sb)+cd256Offset);
2107 cd256->magic = htonl(CSMAGIC_CODEDIRECTORY);
2108 cd256->length = htonl(cd256Size);
2109 cd256->version = htonl(0x20400); // supports exec segment
2110 cd256->flags = htonl(kSecCodeSignatureAdhoc);
2111 cd256->hashOffset = htonl(hash256Offset);
2112 cd256->identOffset = htonl(idOffset);
2113 cd256->nSpecialSlots = htonl(xSlotCount);
2114 cd256->nCodeSlots = htonl(slotCount);
2115 cd256->codeLimit = htonl(inBbufferSize);
2116 cd256->hashSize = CS_HASH_SIZE_SHA256;
2117 cd256->hashType = CS_HASHTYPE_SHA256;
2118 cd256->platform = 0; // not platform binary
2119 cd256->pageSize = __builtin_ctz(CS_PAGE_SIZE); // log2(CS_PAGE_SIZE);
2120 cd256->spare2 = 0; // unused (must be zero)
2121 cd256->scatterOffset = 0; // not supported anymore
2122 cd256->teamOffset = 0; // no team ID
2123 cd256->spare3 = 0; // unused (must be zero)
2124 cd256->codeLimit64 = 0; // falls back to codeLimit
2125
2126 // executable segment info
2127 cd256->execSegBase = cd->execSegBase;
2128 cd256->execSegLimit = cd->execSegLimit;
2129 cd256->execSegFlags = cd->execSegFlags;
2130
2131 // initialize dynamic fields of Code Directory
2132 strcpy((char*)cd256 + idOffset, cacheIdentifier.c_str());
2133
2134 // add special slot hashes
2135 hash256Slot = (uint8_t*)cd256 + hash256Offset;
2136 reqsHash256Slot = &hash256Slot[-CSSLOT_REQUIREMENTS*CS_HASH_SIZE_SHA256];
2137 CCDigest(kCCDigestSHA256, (uint8_t*)reqs, sizeof(CS_RequirementsBlob), reqsHash256Slot);
2138 }
2139 else {
2140 cd256 = NULL;
2141 hash256Slot = NULL;
2142 reqsHash256Slot = NULL;
2143 }
2144
2145 // fill in empty CMS blob for ad-hoc signing
2146 CS_Blob* cms = (CS_Blob*)(((char*)sb)+cmsOffset);
2147 cms->magic = htonl(CSMAGIC_BLOBWRAPPER);
2148 cms->length = htonl(sizeof(CS_Blob));
2149
2150
2151 // alter header of cache to record size and location of code signature
2152 // do this *before* hashing each page
2153 dyld_cache_header* cache = (dyld_cache_header*)_readExecuteRegion.buffer;
2154 cache->codeSignatureOffset = inBbufferSize;
2155 cache->codeSignatureSize = sigSize;
2156
2157 const uint32_t rwSlotStart = (uint32_t)(_readExecuteRegion.sizeInUse / CS_PAGE_SIZE);
2158 const uint32_t roSlotStart = (uint32_t)(rwSlotStart + _readWriteRegion.sizeInUse / CS_PAGE_SIZE);
2159 const uint32_t localsSlotStart = (uint32_t)(roSlotStart + _readOnlyRegion.sizeInUse / CS_PAGE_SIZE);
2160 auto codeSignPage = ^(size_t i) {
2161 const uint8_t* code = nullptr;
2162 // move to correct region
2163 if ( i < rwSlotStart )
2164 code = _readExecuteRegion.buffer + (i * CS_PAGE_SIZE);
2165 else if ( i >= rwSlotStart && i < roSlotStart )
2166 code = _readWriteRegion.buffer + ((i - rwSlotStart) * CS_PAGE_SIZE);
2167 else if ( i >= roSlotStart && i < localsSlotStart )
2168 code = _readOnlyRegion.buffer + ((i - roSlotStart) * CS_PAGE_SIZE);
2169 else
2170 code = _localSymbolsRegion.buffer + ((i - localsSlotStart) * CS_PAGE_SIZE);
2171
2172 CCDigest(dscDigestFormat, code, CS_PAGE_SIZE, hashSlot + (i * dscHashSize));
2173
2174 if ( agile ) {
2175 CCDigest(kCCDigestSHA256, code, CS_PAGE_SIZE, hash256Slot + (i * CS_HASH_SIZE_SHA256));
2176 }
2177 };
2178
2179 // compute hashes
2180 dispatch_apply(slotCount, DISPATCH_APPLY_AUTO, ^(size_t i) {
2181 codeSignPage(i);
2182 });
2183
2184 // Now that we have a code signature, compute a UUID from it.
2185
2186 // Clear existing UUID, then MD5 whole cache buffer.
2187 {
2188 uint8_t* uuidLoc = cache->uuid;
2189 assert(uuid_is_null(uuidLoc));
2190 static_assert(offsetof(dyld_cache_header, uuid) / CS_PAGE_SIZE == 0, "uuid is expected in the first page of the cache");
2191 CC_MD5((const void*)cd, (unsigned)cdSize, uuidLoc);
2192 // <rdar://problem/6723729> uuids should conform to RFC 4122 UUID version 4 & UUID version 5 formats
2193 uuidLoc[6] = ( uuidLoc[6] & 0x0F ) | ( 3 << 4 );
2194 uuidLoc[8] = ( uuidLoc[8] & 0x3F ) | 0x80;
2195
2196 // Now codesign page 0 again
2197 codeSignPage(0);
2198 }
2199
2200 // hash of entire code directory (cdHash) uses same hash as each page
2201 uint8_t fullCdHash[dscHashSize];
2202 CCDigest(dscDigestFormat, (const uint8_t*)cd, cdSize, fullCdHash);
2203 // Note: cdHash is defined as first 20 bytes of hash
2204 memcpy(_cdHashFirst, fullCdHash, 20);
2205 if ( agile ) {
2206 uint8_t fullCdHash256[CS_HASH_SIZE_SHA256];
2207 CCDigest(kCCDigestSHA256, (const uint8_t*)cd256, cd256Size, fullCdHash256);
2208 // Note: cdHash is defined as first 20 bytes of hash, even for sha256
2209 memcpy(_cdHashSecond, fullCdHash256, 20);
2210 }
2211 else {
2212 memset(_cdHashSecond, 0, 20);
2213 }
2214 }
2215
2216 const bool CacheBuilder::agileSignature()
2217 {
2218 return _options.codeSigningDigestMode == DyldSharedCache::Agile;
2219 }
2220
2221 static const std::string cdHash(uint8_t hash[20])
2222 {
2223 char buff[48];
2224 for (int i = 0; i < 20; ++i)
2225 sprintf(&buff[2*i], "%2.2x", hash[i]);
2226 return buff;
2227 }
2228
2229 const std::string CacheBuilder::cdHashFirst()
2230 {
2231 return cdHash(_cdHashFirst);
2232 }
2233
2234 const std::string CacheBuilder::cdHashSecond()
2235 {
2236 return cdHash(_cdHashSecond);
2237 }
2238
2239
2240 void CacheBuilder::buildImageArray(std::vector<DyldSharedCache::FileAlias>& aliases)
2241 {
2242 typedef dyld3::closure::ClosureBuilder::CachedDylibInfo CachedDylibInfo;
2243 typedef dyld3::closure::Image::PatchableExport::PatchLocation PatchLocation;
2244 typedef uint64_t CacheOffset;
2245
2246 // convert STL data structures to simple arrays to passe to makeDyldCacheImageArray()
2247 __block std::vector<CachedDylibInfo> dylibInfos;
2248 __block std::unordered_map<dyld3::closure::ImageNum, const dyld3::MachOLoaded*> imageNumToML;
2249 DyldSharedCache* cache = (DyldSharedCache*)_readExecuteRegion.buffer;
2250 cache->forEachImage(^(const mach_header* mh, const char* installName) {
2251 uint64_t mtime;
2252 uint64_t inode;
2253 cache->getIndexedImageEntry((uint32_t)dylibInfos.size(), mtime, inode);
2254 CachedDylibInfo entry;
2255 entry.fileInfo.fileContent = mh;
2256 entry.fileInfo.path = installName;
2257 entry.fileInfo.sliceOffset = 0;
2258 entry.fileInfo.inode = inode;
2259 entry.fileInfo.mtime = mtime;
2260 dylibInfos.push_back(entry);
2261 imageNumToML[(dyld3::closure::ImageNum)(dylibInfos.size())] = (dyld3::MachOLoaded*)mh;
2262 });
2263
2264 // Convert symlinks from STL to simple char pointers.
2265 std::vector<dyld3::closure::ClosureBuilder::CachedDylibAlias> dylibAliases;
2266 dylibAliases.reserve(aliases.size());
2267 for (const auto& alias : aliases)
2268 dylibAliases.push_back({ alias.realPath.c_str(), alias.aliasPath.c_str() });
2269
2270
2271 __block std::unordered_map<const dyld3::MachOLoaded*, std::set<CacheOffset>> dylibToItsExports;
2272 __block std::unordered_map<CacheOffset, std::vector<PatchLocation>> exportsToUses;
2273 __block std::unordered_map<CacheOffset, const char*> exportsToName;
2274
2275 dyld3::closure::ClosureBuilder::CacheDylibsBindingHandlers handlers;
2276
2277 handlers.chainedBind = ^(dyld3::closure::ImageNum, const dyld3::MachOLoaded* imageLoadAddress,
2278 const dyld3::Array<uint64_t>& starts,
2279 const dyld3::Array<dyld3::closure::Image::ResolvedSymbolTarget>& targets,
2280 const dyld3::Array<dyld3::closure::ClosureBuilder::ResolvedTargetInfo>& targetInfos) {
2281 for (uint64_t start : starts) {
2282 dyld3::closure::Image::forEachChainedFixup((void*)imageLoadAddress, start, ^(uint64_t* fixupLoc, dyld3::MachOLoaded::ChainedFixupPointerOnDisk fixupInfo, bool& stop) {
2283 // record location in aslr tracker so kernel can slide this on page-in
2284 _aslrTracker.add(fixupLoc);
2285
2286 // if bind, record info for patch table and convert to rebase
2287 if ( fixupInfo.plainBind.bind ) {
2288 dyld3::closure::Image::ResolvedSymbolTarget target = targets[fixupInfo.plainBind.ordinal];
2289 const dyld3::closure::ClosureBuilder::ResolvedTargetInfo& targetInfo = targetInfos[fixupInfo.plainBind.ordinal];
2290 dyld3::MachOLoaded::ChainedFixupPointerOnDisk* loc;
2291 uint64_t offsetInCache;
2292 switch ( target.sharedCache.kind ) {
2293 case dyld3::closure::Image::ResolvedSymbolTarget::kindSharedCache:
2294 loc = (dyld3::MachOLoaded::ChainedFixupPointerOnDisk*)fixupLoc;
2295 offsetInCache = target.sharedCache.offset - targetInfo.addend;
2296 dylibToItsExports[targetInfo.foundInDylib].insert(offsetInCache);
2297 exportsToName[offsetInCache] = targetInfo.foundSymbolName;
2298 if ( fixupInfo.authBind.auth ) {
2299 // turn this auth bind into an auth rebase into the cache
2300 loc->authRebase.bind = 0;
2301 loc->authRebase.target = target.sharedCache.offset;
2302 exportsToUses[offsetInCache].push_back(PatchLocation((uint8_t*)fixupLoc - _readExecuteRegion.buffer, targetInfo.addend, *loc));
2303 }
2304 else {
2305 // turn this plain bind into an plain rebase into the cache
2306 loc->plainRebase.bind = 0;
2307 loc->plainRebase.target = _archLayout->sharedMemoryStart + target.sharedCache.offset;
2308 exportsToUses[offsetInCache].push_back(PatchLocation((uint8_t*)fixupLoc - _readExecuteRegion.buffer, targetInfo.addend));
2309 }
2310 break;
2311 case dyld3::closure::Image::ResolvedSymbolTarget::kindAbsolute:
2312 if ( _archLayout->is64 )
2313 *((uint64_t*)fixupLoc) = target.absolute.value;
2314 else
2315 *((uint32_t*)fixupLoc) = (uint32_t)(target.absolute.value);
2316 // don't record absolute targets for ASLR
2317 break;
2318 default:
2319 assert(0 && "unsupported ResolvedSymbolTarget kind in dyld cache");
2320 }
2321 }
2322 });
2323 }
2324 };
2325
2326 handlers.rebase = ^(dyld3::closure::ImageNum imageNum, const dyld3::MachOLoaded* imageToFix, uint32_t runtimeOffset) {
2327 // record location in aslr tracker so kernel can slide this on page-in
2328 uint8_t* fixupLoc = (uint8_t*)imageToFix+runtimeOffset;
2329 _aslrTracker.add(fixupLoc);
2330 };
2331
2332 handlers.bind = ^(dyld3::closure::ImageNum imageNum, const dyld3::MachOLoaded* mh,
2333 uint32_t runtimeOffset, dyld3::closure::Image::ResolvedSymbolTarget target,
2334 const dyld3::closure::ClosureBuilder::ResolvedTargetInfo& targetInfo) {
2335 uint8_t* fixupLoc = (uint8_t*)mh+runtimeOffset;
2336
2337 // binder is called a second time for weak_bind info, which we ignore when building cache
2338 const bool weakDefUseAlreadySet = targetInfo.weakBindCoalese && _aslrTracker.has(fixupLoc);
2339
2340 // do actual bind that sets pointer in image to unslid target address
2341 uint64_t offsetInCache;
2342 switch ( target.sharedCache.kind ) {
2343 case dyld3::closure::Image::ResolvedSymbolTarget::kindSharedCache:
2344 offsetInCache = target.sharedCache.offset - targetInfo.addend;
2345 dylibToItsExports[targetInfo.foundInDylib].insert(offsetInCache);
2346 exportsToUses[offsetInCache].push_back(PatchLocation(fixupLoc - _readExecuteRegion.buffer, targetInfo.addend));
2347 exportsToName[offsetInCache] = targetInfo.foundSymbolName;
2348 if ( !weakDefUseAlreadySet ) {
2349 if ( _archLayout->is64 )
2350 *((uint64_t*)fixupLoc) = _archLayout->sharedMemoryStart + target.sharedCache.offset;
2351 else
2352 *((uint32_t*)fixupLoc) = (uint32_t)(_archLayout->sharedMemoryStart + target.sharedCache.offset);
2353 // record location in aslr tracker so kernel can slide this on page-in
2354 _aslrTracker.add(fixupLoc);
2355 }
2356 break;
2357 case dyld3::closure::Image::ResolvedSymbolTarget::kindAbsolute:
2358 if ( _archLayout->is64 )
2359 *((uint64_t*)fixupLoc) = target.absolute.value;
2360 else
2361 *((uint32_t*)fixupLoc) = (uint32_t)(target.absolute.value);
2362 // don't record absolute targets for ASLR
2363 // HACK: Split seg may have added a target. Remove it
2364 _aslrTracker.remove(fixupLoc);
2365 if ( (targetInfo.libOrdinal > 0) && (targetInfo.libOrdinal <= mh->dependentDylibCount()) ) {
2366 _missingWeakImports[fixupLoc] = mh->dependentDylibLoadPath(targetInfo.libOrdinal - 1);
2367 }
2368 break;
2369 default:
2370 assert(0 && "unsupported ResolvedSymbolTarget kind in dyld cache");
2371 }
2372 };
2373
2374 handlers.forEachExportsPatch = ^(dyld3::closure::ImageNum imageNum, void (^handler)(const dyld3::closure::ClosureBuilder::CacheDylibsBindingHandlers::PatchInfo&)) {
2375 const dyld3::MachOLoaded* ml = imageNumToML[imageNum];
2376 for (CacheOffset exportCacheOffset : dylibToItsExports[ml]) {
2377 dyld3::closure::ClosureBuilder::CacheDylibsBindingHandlers::PatchInfo info;
2378 std::vector<PatchLocation>& uses = exportsToUses[exportCacheOffset];
2379 uses.erase(std::unique(uses.begin(), uses.end()), uses.end());
2380 info.exportCacheOffset = (uint32_t)exportCacheOffset;
2381 info.exportSymbolName = exportsToName[exportCacheOffset];
2382 info.usesCount = (uint32_t)uses.size();
2383 info.usesArray = &uses.front();
2384 handler(info);
2385 }
2386 };
2387
2388
2389 // build ImageArray for all dylibs in dyld cache
2390 dyld3::closure::PathOverrides pathOverrides;
2391 dyld3::closure::ClosureBuilder cb(dyld3::closure::kFirstDyldCacheImageNum, _fileSystem, cache, false, pathOverrides, dyld3::closure::ClosureBuilder::AtPath::none, nullptr, _archLayout->archName, _options.platform, &handlers);
2392 dyld3::Array<CachedDylibInfo> dylibs(&dylibInfos[0], dylibInfos.size(), dylibInfos.size());
2393 const dyld3::Array<dyld3::closure::ClosureBuilder::CachedDylibAlias> aliasesArray(dylibAliases.data(), dylibAliases.size(), dylibAliases.size());
2394 _imageArray = cb.makeDyldCacheImageArray(_options.optimizeStubs, dylibs, aliasesArray);
2395 if ( cb.diagnostics().hasError() ) {
2396 _diagnostics.error("%s", cb.diagnostics().errorMessage().c_str());
2397 return;
2398 }
2399 }
2400
2401 void CacheBuilder::addImageArray()
2402 {
2403 // build trie of dylib paths
2404 __block std::vector<DylibIndexTrie::Entry> dylibEntrys;
2405 _imageArray->forEachImage(^(const dyld3::closure::Image* image, bool& stop) {
2406 dylibEntrys.push_back(DylibIndexTrie::Entry(image->path(), DylibIndex(image->imageNum()-1)));
2407 image->forEachAlias(^(const char *aliasPath, bool &innerStop) {
2408 dylibEntrys.push_back(DylibIndexTrie::Entry(aliasPath, DylibIndex(image->imageNum()-1)));
2409 });
2410 });
2411 DylibIndexTrie dylibsTrie(dylibEntrys);
2412 std::vector<uint8_t> trieBytes;
2413 dylibsTrie.emit(trieBytes);
2414 while ( (trieBytes.size() % 4) != 0 )
2415 trieBytes.push_back(0);
2416
2417 // check for fit
2418 uint64_t imageArraySize = _imageArray->size();
2419 size_t freeSpace = _readOnlyRegion.bufferSize - _readOnlyRegion.sizeInUse;
2420 if ( imageArraySize+trieBytes.size() > freeSpace ) {
2421 _diagnostics.error("cache buffer too small to hold ImageArray and Trie (buffer size=%lldMB, imageArray size=%lldMB, trie size=%luKB, free space=%ldMB)",
2422 _allocatedBufferSize/1024/1024, imageArraySize/1024/1024, trieBytes.size()/1024, freeSpace/1024/1024);
2423 return;
2424 }
2425
2426 // copy into cache and update header
2427 DyldSharedCache* dyldCache = (DyldSharedCache*)_readExecuteRegion.buffer;
2428 dyldCache->header.dylibsImageArrayAddr = _readOnlyRegion.unslidLoadAddress + _readOnlyRegion.sizeInUse;
2429 dyldCache->header.dylibsImageArraySize = imageArraySize;
2430 dyldCache->header.dylibsTrieAddr = dyldCache->header.dylibsImageArrayAddr + imageArraySize;
2431 dyldCache->header.dylibsTrieSize = trieBytes.size();
2432 ::memcpy(_readOnlyRegion.buffer + _readOnlyRegion.sizeInUse, _imageArray, imageArraySize);
2433 ::memcpy(_readOnlyRegion.buffer + _readOnlyRegion.sizeInUse + imageArraySize, &trieBytes[0], trieBytes.size());
2434 _readOnlyRegion.sizeInUse += align(imageArraySize+trieBytes.size(),14);
2435 }
2436
2437 void CacheBuilder::addOtherImageArray(const std::vector<LoadedMachO>& otherDylibsAndBundles, std::vector<const LoadedMachO*>& overflowDylibs)
2438 {
2439 DyldSharedCache* cache = (DyldSharedCache*)_readExecuteRegion.buffer;
2440 dyld3::closure::PathOverrides pathOverrides;
2441 dyld3::closure::ClosureBuilder cb(dyld3::closure::kFirstOtherOSImageNum, _fileSystem, cache, false, pathOverrides, dyld3::closure::ClosureBuilder::AtPath::none, nullptr, _archLayout->archName, _options.platform);
2442
2443 // make ImageArray for other dylibs and bundles
2444 STACK_ALLOC_ARRAY(dyld3::closure::LoadedFileInfo, others, otherDylibsAndBundles.size() + overflowDylibs.size());
2445 for (const LoadedMachO& other : otherDylibsAndBundles) {
2446 if ( !contains(other.loadedFileInfo.path, ".app/") )
2447 others.push_back(other.loadedFileInfo);
2448 }
2449
2450 for (const LoadedMachO* dylib : overflowDylibs) {
2451 if (dylib->mappedFile.mh->canHavePrecomputedDlopenClosure(dylib->mappedFile.runtimePath.c_str(), ^(const char*) {}) )
2452 others.push_back(dylib->loadedFileInfo);
2453 }
2454
2455 // Sort the others array by name so that it is deterministic
2456 std::sort(others.begin(), others.end(),
2457 [](const dyld3::closure::LoadedFileInfo& a, const dyld3::closure::LoadedFileInfo& b) {
2458 return strcmp(a.path, b.path) < 0;
2459 });
2460
2461 const dyld3::closure::ImageArray* otherImageArray = cb.makeOtherDylibsImageArray(others, (uint32_t)_sortedDylibs.size());
2462
2463 // build trie of paths
2464 __block std::vector<DylibIndexTrie::Entry> otherEntrys;
2465 otherImageArray->forEachImage(^(const dyld3::closure::Image* image, bool& stop) {
2466 if ( !image->isInvalid() )
2467 otherEntrys.push_back(DylibIndexTrie::Entry(image->path(), DylibIndex(image->imageNum())));
2468 });
2469 DylibIndexTrie dylibsTrie(otherEntrys);
2470 std::vector<uint8_t> trieBytes;
2471 dylibsTrie.emit(trieBytes);
2472 while ( (trieBytes.size() % 4) != 0 )
2473 trieBytes.push_back(0);
2474
2475 // check for fit
2476 uint64_t imageArraySize = otherImageArray->size();
2477 size_t freeSpace = _readOnlyRegion.bufferSize - _readOnlyRegion.sizeInUse;
2478 if ( imageArraySize+trieBytes.size() > freeSpace ) {
2479 _diagnostics.error("cache buffer too small to hold ImageArray and Trie (buffer size=%lldMB, imageArray size=%lldMB, trie size=%luKB, free space=%ldMB)",
2480 _allocatedBufferSize/1024/1024, imageArraySize/1024/1024, trieBytes.size()/1024, freeSpace/1024/1024);
2481 return;
2482 }
2483
2484 // copy into cache and update header
2485 DyldSharedCache* dyldCache = (DyldSharedCache*)_readExecuteRegion.buffer;
2486 dyldCache->header.otherImageArrayAddr = _readOnlyRegion.unslidLoadAddress + _readOnlyRegion.sizeInUse;
2487 dyldCache->header.otherImageArraySize = imageArraySize;
2488 dyldCache->header.otherTrieAddr = dyldCache->header.otherImageArrayAddr + imageArraySize;
2489 dyldCache->header.otherTrieSize = trieBytes.size();
2490 ::memcpy(_readOnlyRegion.buffer + _readOnlyRegion.sizeInUse, otherImageArray, imageArraySize);
2491 ::memcpy(_readOnlyRegion.buffer + _readOnlyRegion.sizeInUse + imageArraySize, &trieBytes[0], trieBytes.size());
2492 _readOnlyRegion.sizeInUse += align(imageArraySize+trieBytes.size(),14);
2493 }
2494
2495
2496 void CacheBuilder::addClosures(const std::vector<LoadedMachO>& osExecutables)
2497 {
2498 const DyldSharedCache* dyldCache = (DyldSharedCache*)_readExecuteRegion.buffer;
2499
2500 __block std::vector<Diagnostics> osExecutablesDiags;
2501 __block std::vector<const dyld3::closure::LaunchClosure*> osExecutablesClosures;
2502 osExecutablesDiags.resize(osExecutables.size());
2503 osExecutablesClosures.resize(osExecutables.size());
2504
2505 dispatch_apply(osExecutables.size(), DISPATCH_APPLY_AUTO, ^(size_t index) {
2506 const LoadedMachO& loadedMachO = osExecutables[index];
2507 // don't pre-build closures for staged apps into dyld cache, since they won't run from that location
2508 if ( startsWith(loadedMachO.mappedFile.runtimePath, "/private/var/staged_system_apps/") ) {
2509 return;
2510 }
2511 dyld3::closure::PathOverrides pathOverrides;
2512 dyld3::closure::ClosureBuilder builder(dyld3::closure::kFirstLaunchClosureImageNum, _fileSystem, dyldCache, false, pathOverrides, dyld3::closure::ClosureBuilder::AtPath::all, nullptr, _archLayout->archName, _options.platform, nullptr);
2513 bool issetuid = false;
2514 if ( this->_options.platform == dyld3::Platform::macOS )
2515 _fileSystem.fileExists(loadedMachO.loadedFileInfo.path, nullptr, nullptr, &issetuid);
2516 const dyld3::closure::LaunchClosure* mainClosure = builder.makeLaunchClosure(loadedMachO.loadedFileInfo, issetuid);
2517 if ( builder.diagnostics().hasError() ) {
2518 osExecutablesDiags[index].error("%s", builder.diagnostics().errorMessage().c_str());
2519 }
2520 else {
2521 assert(mainClosure != nullptr);
2522 osExecutablesClosures[index] = mainClosure;
2523 }
2524 });
2525
2526 std::map<std::string, const dyld3::closure::LaunchClosure*> closures;
2527 for (uint64_t i = 0, e = osExecutables.size(); i != e; ++i) {
2528 const LoadedMachO& loadedMachO = osExecutables[i];
2529 const Diagnostics& diag = osExecutablesDiags[i];
2530 if (diag.hasError()) {
2531 if ( _options.verbose ) {
2532 _diagnostics.warning("building closure for '%s': %s", loadedMachO.mappedFile.runtimePath.c_str(), diag.errorMessage().c_str());
2533 for (const std::string& warn : diag.warnings() )
2534 _diagnostics.warning("%s", warn.c_str());
2535 }
2536 if ( loadedMachO.inputFile && (loadedMachO.inputFile->mustBeIncluded()) ) {
2537 loadedMachO.inputFile->diag.error("%s", diag.errorMessage().c_str());
2538 }
2539 } else {
2540 // Note, a closure could be null here if it has a path we skip.
2541 if (osExecutablesClosures[i] != nullptr)
2542 closures[loadedMachO.mappedFile.runtimePath] = osExecutablesClosures[i];
2543 }
2544 }
2545
2546 osExecutablesDiags.clear();
2547 osExecutablesClosures.clear();
2548
2549 // preflight space needed
2550 size_t closuresSpace = 0;
2551 for (const auto& entry : closures) {
2552 closuresSpace += entry.second->size();
2553 }
2554 size_t freeSpace = _readOnlyRegion.bufferSize - _readOnlyRegion.sizeInUse;
2555 if ( closuresSpace > freeSpace ) {
2556 _diagnostics.error("cache buffer too small to hold all closures (buffer size=%lldMB, closures size=%ldMB, free space=%ldMB)",
2557 _allocatedBufferSize/1024/1024, closuresSpace/1024/1024, freeSpace/1024/1024);
2558 return;
2559 }
2560 DyldSharedCache* cache = (DyldSharedCache*)_readExecuteRegion.buffer;
2561 cache->header.progClosuresAddr = _readOnlyRegion.unslidLoadAddress + _readOnlyRegion.sizeInUse;
2562 uint8_t* closuresBase = _readOnlyRegion.buffer + _readOnlyRegion.sizeInUse;
2563 std::vector<DylibIndexTrie::Entry> closureEntrys;
2564 uint32_t currentClosureOffset = 0;
2565 for (const auto& entry : closures) {
2566 const dyld3::closure::LaunchClosure* closure = entry.second;
2567 closureEntrys.push_back(DylibIndexTrie::Entry(entry.first, DylibIndex(currentClosureOffset)));
2568 size_t size = closure->size();
2569 assert((size % 4) == 0);
2570 memcpy(closuresBase+currentClosureOffset, closure, size);
2571 currentClosureOffset += size;
2572 freeSpace -= size;
2573 closure->deallocate();
2574 }
2575 cache->header.progClosuresSize = currentClosureOffset;
2576 _readOnlyRegion.sizeInUse += currentClosureOffset;
2577 freeSpace = _readOnlyRegion.bufferSize - _readOnlyRegion.sizeInUse;
2578 // build trie of indexes into closures list
2579 DylibIndexTrie closureTrie(closureEntrys);
2580 std::vector<uint8_t> trieBytes;
2581 closureTrie.emit(trieBytes);
2582 while ( (trieBytes.size() % 8) != 0 )
2583 trieBytes.push_back(0);
2584 if ( trieBytes.size() > freeSpace ) {
2585 _diagnostics.error("cache buffer too small to hold all closures trie (buffer size=%lldMB, trie size=%ldMB, free space=%ldMB)",
2586 _allocatedBufferSize/1024/1024, trieBytes.size()/1024/1024, freeSpace/1024/1024);
2587 return;
2588 }
2589 memcpy(_readOnlyRegion.buffer + _readOnlyRegion.sizeInUse, &trieBytes[0], trieBytes.size());
2590 cache->header.progClosuresTrieAddr = _readOnlyRegion.unslidLoadAddress + _readOnlyRegion.sizeInUse;
2591 cache->header.progClosuresTrieSize = trieBytes.size();
2592 _readOnlyRegion.sizeInUse += trieBytes.size();
2593 _readOnlyRegion.sizeInUse = align(_readOnlyRegion.sizeInUse, 14);
2594 }
2595
2596
2597 bool CacheBuilder::writeCache(void (^cacheSizeCallback)(uint64_t size), bool (^copyCallback)(const uint8_t* src, uint64_t size, uint64_t dstOffset))
2598 {
2599 const dyld_cache_header* cacheHeader = (dyld_cache_header*)_readExecuteRegion.buffer;
2600 const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)(_readExecuteRegion.buffer + cacheHeader->mappingOffset);
2601 assert(_readExecuteRegion.sizeInUse == mappings[0].size);
2602 assert(_readWriteRegion.sizeInUse == mappings[1].size);
2603 assert(_readOnlyRegion.sizeInUse == mappings[2].size);
2604 assert(_readExecuteRegion.cacheFileOffset == mappings[0].fileOffset);
2605 assert(_readWriteRegion.cacheFileOffset == mappings[1].fileOffset);
2606 assert(_readOnlyRegion.cacheFileOffset == mappings[2].fileOffset);
2607 assert(_codeSignatureRegion.sizeInUse == cacheHeader->codeSignatureSize);
2608 assert(cacheHeader->codeSignatureOffset == mappings[2].fileOffset+_readOnlyRegion.sizeInUse+_localSymbolsRegion.sizeInUse);
2609 cacheSizeCallback(_readExecuteRegion.sizeInUse+_readWriteRegion.sizeInUse+_readOnlyRegion.sizeInUse+_localSymbolsRegion.sizeInUse+_codeSignatureRegion.sizeInUse);
2610 bool fullyWritten = copyCallback(_readExecuteRegion.buffer, _readExecuteRegion.sizeInUse, mappings[0].fileOffset);
2611 fullyWritten &= copyCallback(_readWriteRegion.buffer, _readWriteRegion.sizeInUse, mappings[1].fileOffset);
2612 fullyWritten &= copyCallback(_readOnlyRegion.buffer, _readOnlyRegion.sizeInUse, mappings[2].fileOffset);
2613 if ( _localSymbolsRegion.sizeInUse != 0 ) {
2614 assert(cacheHeader->localSymbolsOffset == mappings[2].fileOffset+_readOnlyRegion.sizeInUse);
2615 fullyWritten &= copyCallback(_localSymbolsRegion.buffer, _localSymbolsRegion.sizeInUse, cacheHeader->localSymbolsOffset);
2616 }
2617 fullyWritten &= copyCallback(_codeSignatureRegion.buffer, _codeSignatureRegion.sizeInUse, cacheHeader->codeSignatureOffset);
2618 return fullyWritten;
2619 }
2620
2621
2622 void CacheBuilder::writeFile(const std::string& path)
2623 {
2624 std::string pathTemplate = path + "-XXXXXX";
2625 size_t templateLen = strlen(pathTemplate.c_str())+2;
2626 char pathTemplateSpace[templateLen];
2627 strlcpy(pathTemplateSpace, pathTemplate.c_str(), templateLen);
2628 int fd = mkstemp(pathTemplateSpace);
2629 if ( fd != -1 ) {
2630 auto cacheSizeCallback = ^(uint64_t size) {
2631 ::ftruncate(fd, size);
2632 };
2633 auto copyCallback = ^(const uint8_t* src, uint64_t size, uint64_t dstOffset) {
2634 uint64_t writtenSize = pwrite(fd, src, size, dstOffset);
2635 return writtenSize == size;
2636 };
2637 bool fullyWritten = writeCache(cacheSizeCallback, copyCallback);
2638 if ( fullyWritten ) {
2639 ::fchmod(fd, S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH); // mkstemp() makes file "rw-------", switch it to "rw-r--r--"
2640 if ( ::rename(pathTemplateSpace, path.c_str()) == 0) {
2641 ::close(fd);
2642 return; // success
2643 }
2644 }
2645 else {
2646 _diagnostics.error("could not write file %s", pathTemplateSpace);
2647 }
2648 ::close(fd);
2649 ::unlink(pathTemplateSpace);
2650 }
2651 else {
2652 _diagnostics.error("could not open file %s", pathTemplateSpace);
2653 }
2654 }
2655
2656 void CacheBuilder::writeBuffer(uint8_t*& buffer, uint64_t& bufferSize) {
2657 auto cacheSizeCallback = ^(uint64_t size) {
2658 buffer = (uint8_t*)malloc(size);
2659 bufferSize = size;
2660 };
2661 auto copyCallback = ^(const uint8_t* src, uint64_t size, uint64_t dstOffset) {
2662 memcpy(buffer + dstOffset, src, size);
2663 return true;
2664 };
2665 bool fullyWritten = writeCache(cacheSizeCallback, copyCallback);
2666 assert(fullyWritten);
2667 }
2668
2669 void CacheBuilder::writeMapFile(const std::string& path)
2670 {
2671 const DyldSharedCache* cache = (DyldSharedCache*)_readExecuteRegion.buffer;
2672 std::string mapContent = cache->mapFile();
2673 safeSave(mapContent.c_str(), mapContent.size(), path);
2674 }
2675
2676 void CacheBuilder::writeMapFileBuffer(uint8_t*& buffer, uint64_t& bufferSize)
2677 {
2678 const DyldSharedCache* cache = (DyldSharedCache*)_readExecuteRegion.buffer;
2679 std::string mapContent = cache->mapFile();
2680 buffer = (uint8_t*)malloc(mapContent.size() + 1);
2681 bufferSize = mapContent.size() + 1;
2682 memcpy(buffer, mapContent.data(), bufferSize);
2683 }
2684
2685
2686 void CacheBuilder::forEachCacheDylib(void (^callback)(const std::string& path)) {
2687 for (const DylibInfo& dylibInfo : _sortedDylibs)
2688 callback(dylibInfo.runtimePath);
2689 }
2690
2691
2692 CacheBuilder::ASLR_Tracker::~ASLR_Tracker()
2693 {
2694 if ( _bitmap != nullptr )
2695 ::free(_bitmap);
2696 }
2697
2698 void CacheBuilder::ASLR_Tracker::setDataRegion(const void* rwRegionStart, size_t rwRegionSize)
2699 {
2700 _pageCount = (unsigned)(rwRegionSize+_pageSize-1)/_pageSize;
2701 _regionStart = (uint8_t*)rwRegionStart;
2702 _endStart = (uint8_t*)rwRegionStart + rwRegionSize;
2703 _bitmap = (bool*)calloc(_pageCount*(_pageSize/4)*sizeof(bool), 1);
2704 }
2705
2706 void CacheBuilder::ASLR_Tracker::add(void* loc)
2707 {
2708 uint8_t* p = (uint8_t*)loc;
2709 assert(p >= _regionStart);
2710 assert(p < _endStart);
2711 _bitmap[(p-_regionStart)/4] = true;
2712 }
2713
2714 void CacheBuilder::ASLR_Tracker::remove(void* loc)
2715 {
2716 uint8_t* p = (uint8_t*)loc;
2717 assert(p >= _regionStart);
2718 assert(p < _endStart);
2719 _bitmap[(p-_regionStart)/4] = false;
2720 }
2721
2722 bool CacheBuilder::ASLR_Tracker::has(void* loc)
2723 {
2724 uint8_t* p = (uint8_t*)loc;
2725 assert(p >= _regionStart);
2726 assert(p < _endStart);
2727 return _bitmap[(p-_regionStart)/4];
2728 }
2729
2730
2731
2732