1 /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*-
3 * Copyright (c) 2014 Apple Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
28 #include <sys/errno.h>
29 #include <sys/fcntl.h>
30 #include <sys/param.h>
31 #include <sys/types.h>
33 #include <mach/mach.h>
34 #include <mach/mach_time.h>
35 #include <mach/mach_vm.h>
36 #include <mach-o/loader.h>
37 #include <mach-o/fat.h>
38 #include <mach/shared_region.h>
40 #include <CommonCrypto/CommonHMAC.h>
41 #include <CommonCrypto/CommonDigest.h>
42 #include <CommonCrypto/CommonDigestSPI.h>
43 #include <pthread/pthread.h>
44 #include <apfs/apfs_fsctl.h>
48 #include <unordered_map>
49 #include <unordered_set>
51 #include "MachOFileAbstraction.hpp"
52 #include "CodeSigningTypes.h"
53 #include "DyldSharedCache.h"
54 #include "CacheBuilder.h"
55 #include "FileAbstraction.hpp"
57 #include "FileUtils.h"
58 #include "Diagnostics.h"
59 #include "ClosureBuilder.h"
61 #include "ClosureFileSystemNull.h"
62 #include "StringUtils.h"
64 #if __has_include("dyld_cache_config.h")
65 #include "dyld_cache_config.h"
67 #define ARM_SHARED_REGION_START 0x1A000000ULL
68 #define ARM_SHARED_REGION_SIZE 0x26000000ULL
69 #define ARM64_SHARED_REGION_START 0x180000000ULL
70 #define ARM64_SHARED_REGION_SIZE 0x100000000ULL
73 #ifndef ARM64_32_SHARED_REGION_START
74 #define ARM64_32_SHARED_REGION_START 0x1A000000ULL
75 #define ARM64_32_SHARED_REGION_SIZE 0x26000000ULL
78 #if ARM_SHARED_REGION_SIZE > 0x26000000ULL
79 #define ARMV7K_CHAIN_BITS 0xC0000000
80 #define ARMV7K_MAX 0x0
82 #define ARMV7K_CHAIN_BITS 0xE0000000
83 #define ARMV7K_MAX 0x20000000
86 const CacheBuilder::ArchLayout
CacheBuilder::_s_archLayout
[] = {
87 { 0x7FFF20000000ULL
, 0xEFE00000ULL
, 0x0, 0x40000000, 0x00FFFF0000000000, "x86_64", CS_PAGE_SIZE_4K
, 12, 2, true, true, true },
88 { 0x7FFF20000000ULL
, 0xEFE00000ULL
, 0x0, 0x40000000, 0x00FFFF0000000000, "x86_64h", CS_PAGE_SIZE_4K
, 12, 2, true, true, true },
89 { SHARED_REGION_BASE_I386
, SHARED_REGION_SIZE_I386
, 0x0, 0x00200000, 0x0, "i386", CS_PAGE_SIZE_4K
, 12, 0, false, false, true },
90 { ARM64_SHARED_REGION_START
, ARM64_SHARED_REGION_SIZE
, 0x0, 0x02000000, 0x00FFFF0000000000, "arm64", CS_PAGE_SIZE_4K
, 14, 2, false, true, false },
91 #if SUPPORT_ARCH_arm64e
92 { ARM64_SHARED_REGION_START
, ARM64_SHARED_REGION_SIZE
, 0x0, 0x02000000, 0x00FFFF0000000000, "arm64e", CS_PAGE_SIZE_16K
, 14, 2, false, true, false },
94 #if SUPPORT_ARCH_arm64_32
95 { ARM64_32_SHARED_REGION_START
, ARM64_32_SHARED_REGION_SIZE
,0x0, 0x02000000, 0xC0000000, "arm64_32", CS_PAGE_SIZE_16K
, 14, 6, false, false, true },
97 { ARM_SHARED_REGION_START
, ARM_SHARED_REGION_SIZE
, 0x0, 0x02000000, 0xE0000000, "armv7s", CS_PAGE_SIZE_4K
, 14, 4, false, false, true },
98 { ARM_SHARED_REGION_START
, ARM_SHARED_REGION_SIZE
, ARMV7K_MAX
, 0x00400000, ARMV7K_CHAIN_BITS
, "armv7k", CS_PAGE_SIZE_4K
, 14, 4, false, false, true },
99 { 0x40000000, 0x40000000, 0x0, 0x02000000, 0x0, "sim-x86", CS_PAGE_SIZE_4K
, 14, 0, false, false, true }
103 // These are dylibs that may be interposed, so stubs calling into them should never be bypassed
104 const char* const CacheBuilder::_s_neverStubEliminateDylibs
[] = {
105 "/usr/lib/system/libdispatch.dylib",
109 // These are functions that are interposed by Instruments.app or ASan
110 const char* const CacheBuilder::_s_neverStubEliminateSymbols
[] = {
115 "__objc_autoreleasePoolPop",
135 "_dispatch_barrier_async_f",
136 "_dispatch_group_async",
137 "_dispatch_group_async_f",
138 "_dispatch_source_set_cancel_handler",
139 "_dispatch_source_set_event_handler",
218 "_malloc_create_zone",
219 "_malloc_default_purgeable_zone",
220 "_malloc_default_zone",
222 "_malloc_make_nonpurgeable",
223 "_malloc_make_purgeable",
224 "_malloc_set_zone_name",
241 "_objc_autoreleasePoolPop",
243 "_objc_setProperty_atomic",
244 "_objc_setProperty_atomic_copy",
245 "_objc_setProperty_nonatomic",
246 "_objc_setProperty_nonatomic_copy",
254 "_pthread_attr_getdetachstate",
255 "_pthread_attr_getguardsize",
256 "_pthread_attr_getinheritsched",
257 "_pthread_attr_getschedparam",
258 "_pthread_attr_getschedpolicy",
259 "_pthread_attr_getscope",
260 "_pthread_attr_getstack",
261 "_pthread_attr_getstacksize",
262 "_pthread_condattr_getpshared",
264 "_pthread_getschedparam",
266 "_pthread_mutex_lock",
267 "_pthread_mutex_unlock",
268 "_pthread_mutexattr_getprioceiling",
269 "_pthread_mutexattr_getprotocol",
270 "_pthread_mutexattr_getpshared",
271 "_pthread_mutexattr_gettype",
272 "_pthread_rwlockattr_getpshared",
362 // <rdar://problem/22050956> always use stubs for C++ symbols that can be overridden
372 CacheBuilder::CacheBuilder(const DyldSharedCache::CreateOptions
& options
, const dyld3::closure::FileSystem
& fileSystem
)
374 , _fileSystem(fileSystem
)
375 , _fullAllocatedBuffer(0)
376 , _diagnostics(options
.loggingPrefix
, options
.verbose
)
377 , _archLayout(nullptr)
379 , _slideInfoFileOffset(0)
380 , _slideInfoBufferSizeAllocated(0)
381 , _allocatedBufferSize(0)
382 , _selectorStringsFromExecutables(0)
385 std::string targetArch
= options
.archs
->name();
386 if ( options
.forSimulator
&& (options
.archs
== &dyld3::GradedArchs::i386
) )
387 targetArch
= "sim-x86";
389 for (const ArchLayout
& layout
: _s_archLayout
) {
390 if ( layout
.archName
== targetArch
) {
391 _archLayout
= &layout
;
397 _diagnostics
.error("Tool was built without support for: '%s'", targetArch
.c_str());
402 std::string
CacheBuilder::errorMessage()
404 return _diagnostics
.errorMessage();
407 const std::set
<std::string
> CacheBuilder::warnings()
409 return _diagnostics
.warnings();
412 const std::set
<const dyld3::MachOAnalyzer
*> CacheBuilder::evictions()
417 void CacheBuilder::deleteBuffer()
420 vm_deallocate(mach_task_self(), _fullAllocatedBuffer
, _archLayout
->sharedMemorySize
);
421 _fullAllocatedBuffer
= 0;
422 _allocatedBufferSize
= 0;
423 // Local symbols buffer
424 if ( _localSymbolsRegion
.bufferSize
!= 0 ) {
425 vm_deallocate(mach_task_self(), (vm_address_t
)_localSymbolsRegion
.buffer
, _localSymbolsRegion
.bufferSize
);
426 _localSymbolsRegion
.buffer
= 0;
427 _localSymbolsRegion
.bufferSize
= 0;
430 vm_deallocate(mach_task_self(), (vm_address_t
)_codeSignatureRegion
.buffer
, _codeSignatureRegion
.bufferSize
);
431 _codeSignatureRegion
.buffer
= 0;
432 _codeSignatureRegion
.bufferSize
= 0;
436 void CacheBuilder::makeSortedDylibs(const std::vector
<LoadedMachO
>& dylibs
, const std::unordered_map
<std::string
, unsigned> sortOrder
)
438 for (const LoadedMachO
& dylib
: dylibs
) {
439 _sortedDylibs
.push_back({ &dylib
, dylib
.mappedFile
.runtimePath
, {} });
442 std::sort(_sortedDylibs
.begin(), _sortedDylibs
.end(), [&](const DylibInfo
& a
, const DylibInfo
& b
) {
443 const auto& orderA
= sortOrder
.find(a
.input
->mappedFile
.runtimePath
);
444 const auto& orderB
= sortOrder
.find(b
.input
->mappedFile
.runtimePath
);
445 bool foundA
= (orderA
!= sortOrder
.end());
446 bool foundB
= (orderB
!= sortOrder
.end());
448 // Order all __DATA_DIRTY segments specified in the order file first, in
449 // the order specified in the file, followed by any other __DATA_DIRTY
450 // segments in lexicographic order.
451 if ( foundA
&& foundB
)
452 return orderA
->second
< orderB
->second
;
458 // Sort mac before iOSMac
459 bool isIOSMacA
= strncmp(a
.input
->mappedFile
.runtimePath
.c_str(), "/System/iOSSupport/", 19) == 0;
460 bool isIOSMacB
= strncmp(b
.input
->mappedFile
.runtimePath
.c_str(), "/System/iOSSupport/", 19) == 0;
461 if (isIOSMacA
!= isIOSMacB
)
464 // Finally sort by path
465 return a
.input
->mappedFile
.runtimePath
< b
.input
->mappedFile
.runtimePath
;
470 inline uint32_t absolutetime_to_milliseconds(uint64_t abstime
)
472 return (uint32_t)(abstime
/1000/1000);
477 const CacheBuilder::LoadedMachO
* input
;
478 const char* installName
;
482 uint64_t CacheBuilder::cacheOverflowAmount()
484 if ( _archLayout
->sharedRegionsAreDiscontiguous
) {
485 // for macOS x86_64 cache, need to check each region for overflow
486 if ( _readExecuteRegion
.sizeInUse
> 0x60000000 )
487 return (_readExecuteRegion
.sizeInUse
- 0x60000000);
489 if ( _readWriteRegion
.sizeInUse
> 0x40000000 )
490 return (_readWriteRegion
.sizeInUse
- 0x40000000);
492 if ( _readOnlyRegion
.sizeInUse
> 0x3FE00000 )
493 return (_readOnlyRegion
.sizeInUse
- 0x3FE00000);
495 else if ( _archLayout
->textAndDataMaxSize
!= 0 ) {
496 // for armv7k, limit is 512MB of TEX+DATA
497 uint64_t totalTextAndData
= _readWriteRegion
.unslidLoadAddress
+ _readWriteRegion
.sizeInUse
- _readExecuteRegion
.unslidLoadAddress
;
498 if ( totalTextAndData
< _archLayout
->textAndDataMaxSize
)
501 return totalTextAndData
- _archLayout
->textAndDataMaxSize
;
504 bool alreadyOptimized
= (_readOnlyRegion
.sizeInUse
!= _readOnlyRegion
.bufferSize
);
505 uint64_t vmSize
= _readOnlyRegion
.unslidLoadAddress
- _readExecuteRegion
.unslidLoadAddress
;
506 if ( alreadyOptimized
)
507 vmSize
+= _readOnlyRegion
.sizeInUse
;
508 else if ( _options
.excludeLocalSymbols
)
509 vmSize
+= (_readOnlyRegion
.sizeInUse
* 37/100); // assume locals removal and LINKEDIT optimzation reduces LINKEDITs %37 of original size
511 vmSize
+= (_readOnlyRegion
.sizeInUse
* 80/100); // assume LINKEDIT optimzation reduces LINKEDITs to %80 of original size
512 if ( vmSize
> _archLayout
->sharedMemorySize
)
513 return vmSize
- _archLayout
->sharedMemorySize
;
515 // fits in shared region
519 size_t CacheBuilder::evictLeafDylibs(uint64_t reductionTarget
, std::vector
<const LoadedMachO
*>& overflowDylibs
)
521 // build a reverse map of all dylib dependencies
522 __block
std::map
<std::string
, std::set
<std::string
>> references
;
523 std::map
<std::string
, std::set
<std::string
>>* referencesPtr
= &references
;
524 for (const DylibInfo
& dylib
: _sortedDylibs
) {
525 // Esnure we have an entry (even if it is empty)
526 if (references
.count(dylib
.input
->mappedFile
.mh
->installName()) == 0) {
527 references
[dylib
.input
->mappedFile
.mh
->installName()] = std::set
<std::string
>();
529 dylib
.input
->mappedFile
.mh
->forEachDependentDylib(^(const char* loadPath
, bool isWeak
, bool isReExport
, bool isUpward
, uint32_t compatVersion
, uint32_t curVersion
, bool &stop
) {
530 references
[loadPath
].insert(dylib
.input
->mappedFile
.mh
->installName());
534 // Find the sizes of all the dylibs
535 std::vector
<DylibAndSize
> dylibsToSort
;
536 std::vector
<DylibAndSize
> sortedDylibs
;
537 for (const DylibInfo
& dylib
: _sortedDylibs
) {
538 const char* installName
= dylib
.input
->mappedFile
.mh
->installName();
539 __block
uint64_t segsSize
= 0;
540 dylib
.input
->mappedFile
.mh
->forEachSegment(^(const dyld3::MachOFile::SegmentInfo
& info
, bool& stop
) {
541 if ( strcmp(info
.segName
, "__LINKEDIT") != 0 )
542 segsSize
+= info
.vmSize
;
544 dylibsToSort
.push_back({ dylib
.input
, installName
, segsSize
});
547 // Build an ordered list of what to remove. At each step we do following
548 // 1) Find all dylibs that nothing else depends on
549 // 2a) If any of those dylibs are not in the order select the largest one of them
550 // 2b) If all the leaf dylibs are in the order file select the last dylib that appears last in the order file
551 // 3) Remove all entries to the removed file from the reverse dependency map
552 // 4) Go back to one and repeat until there are no more evictable dylibs
553 // This results in us always choosing the locally optimal selection, and then taking into account how that impacts
554 // the dependency graph for subsequent selections
556 bool candidateFound
= true;
557 while (candidateFound
) {
558 candidateFound
= false;
559 DylibAndSize candidate
;
560 uint64_t candidateOrder
= 0;
561 for(const auto& dylib
: dylibsToSort
) {
562 const auto &i
= referencesPtr
->find(dylib
.installName
);
563 assert(i
!= referencesPtr
->end());
564 if (!i
->second
.empty()) {
567 const auto& j
= _options
.dylibOrdering
.find(dylib
.input
->mappedFile
.runtimePath
);
569 if (j
!= _options
.dylibOrdering
.end()) {
572 // Not in the order file, set order sot it goes to the front of the list
575 if (order
> candidateOrder
||
576 (order
== UINT64_MAX
&& candidate
.size
< dylib
.size
)) {
577 // The new file is either a lower priority in the order file
578 // or the same priority as the candidate but larger
580 candidateOrder
= order
;
581 candidateFound
= true;
584 if (candidateFound
) {
585 sortedDylibs
.push_back(candidate
);
586 referencesPtr
->erase(candidate
.installName
);
587 for (auto& dependent
: references
) {
588 (void)dependent
.second
.erase(candidate
.installName
);
590 auto j
= std::find_if(dylibsToSort
.begin(), dylibsToSort
.end(), [&candidate
](const DylibAndSize
& dylib
) {
591 return (strcmp(candidate
.installName
, dylib
.installName
) == 0);
593 if (j
!= dylibsToSort
.end()) {
594 dylibsToSort
.erase(j
);
599 // build set of dylibs that if removed will allow cache to build
600 for (DylibAndSize
& dylib
: sortedDylibs
) {
601 if ( _options
.verbose
)
602 _diagnostics
.warning("to prevent cache overflow, not caching %s", dylib
.installName
);
603 _evictions
.insert(dylib
.input
->mappedFile
.mh
);
604 // Track the evicted dylibs so we can try build "other" dlopen closures for them.
605 overflowDylibs
.push_back(dylib
.input
);
606 if ( dylib
.size
> reductionTarget
)
608 reductionTarget
-= dylib
.size
;
611 // prune _sortedDylibs
612 _sortedDylibs
.erase(std::remove_if(_sortedDylibs
.begin(), _sortedDylibs
.end(), [&](const DylibInfo
& dylib
) {
613 return (_evictions
.count(dylib
.input
->mappedFile
.mh
) != 0);
614 }),_sortedDylibs
.end());
616 return _evictions
.size();
619 // Handles building a list of input files to the CacheBuilder itself.
620 class CacheInputBuilder
{
622 CacheInputBuilder(const dyld3::closure::FileSystem
& fileSystem
,
623 const dyld3::GradedArchs
& archs
, dyld3::Platform reqPlatform
)
624 : fileSystem(fileSystem
), reqArchs(archs
), reqPlatform(reqPlatform
) { }
626 // Loads and maps any MachOs in the given list of files.
627 void loadMachOs(std::vector
<CacheBuilder::InputFile
>& inputFiles
,
628 std::vector
<CacheBuilder::LoadedMachO
>& dylibsToCache
,
629 std::vector
<CacheBuilder::LoadedMachO
>& otherDylibs
,
630 std::vector
<CacheBuilder::LoadedMachO
>& executables
,
631 std::vector
<CacheBuilder::LoadedMachO
>& couldNotLoadFiles
) {
633 std::map
<std::string
, uint64_t> dylibInstallNameMap
;
634 for (CacheBuilder::InputFile
& inputFile
: inputFiles
) {
635 char realerPath
[MAXPATHLEN
];
636 dyld3::closure::LoadedFileInfo loadedFileInfo
= dyld3::MachOAnalyzer::load(inputFile
.diag
, fileSystem
, inputFile
.path
, reqArchs
, reqPlatform
, realerPath
);
637 const dyld3::MachOAnalyzer
* ma
= (const dyld3::MachOAnalyzer
*)loadedFileInfo
.fileContent
;
639 couldNotLoadFiles
.emplace_back((CacheBuilder::LoadedMachO
){ DyldSharedCache::MappedMachO(), loadedFileInfo
, &inputFile
});
643 DyldSharedCache::MappedMachO
mappedFile(inputFile
.path
, ma
, loadedFileInfo
.sliceLen
, false, false,
644 loadedFileInfo
.sliceOffset
, loadedFileInfo
.mtime
, loadedFileInfo
.inode
);
646 // The file can be loaded with the given slice, but we may still want to exlude it from the cache.
648 std::string installName
= ma
->installName();
650 // Let the platform exclude the file before we do anything else.
651 if (platformExcludesInstallName(installName
)) {
652 inputFile
.diag
.verbose("Platform excluded file\n");
653 fileSystem
.unloadFile(loadedFileInfo
);
657 if (!ma
->canBePlacedInDyldCache(inputFile
.path
, ^(const char* msg
) {
658 inputFile
.diag
.warning("Dylib located at '%s' cannot be placed in cache because: %s", inputFile
.path
, msg
);
660 // TODO: Add exclusion lists here?
661 // Probably not as we already applied the dylib exclusion list.
662 if (!ma
->canHavePrecomputedDlopenClosure(inputFile
.path
, ^(const char* msg
) {
663 inputFile
.diag
.verbose("Dylib located at '%s' cannot prebuild dlopen closure in cache because: %s", inputFile
.path
, msg
);
665 fileSystem
.unloadFile(loadedFileInfo
);
668 otherDylibs
.emplace_back((CacheBuilder::LoadedMachO
){ mappedFile
, loadedFileInfo
, &inputFile
});
672 // Otherwise see if we have another file with this install name
673 auto iteratorAndInserted
= dylibInstallNameMap
.insert(std::make_pair(installName
, dylibsToCache
.size()));
674 if (iteratorAndInserted
.second
) {
675 // We inserted the dylib so we haven't seen another with this name.
676 if (installName
[0] != '@' && installName
!= inputFile
.path
) {
677 inputFile
.diag
.warning("Dylib located at '%s' has installname '%s'", inputFile
.path
, installName
.c_str());
680 dylibsToCache
.emplace_back((CacheBuilder::LoadedMachO
){ mappedFile
, loadedFileInfo
, &inputFile
});
682 // We didn't insert this one so we've seen it before.
683 CacheBuilder::LoadedMachO
& previousLoadedMachO
= dylibsToCache
[iteratorAndInserted
.first
->second
];
684 inputFile
.diag
.warning("Multiple dylibs claim installname '%s' ('%s' and '%s')", installName
.c_str(), inputFile
.path
, previousLoadedMachO
.mappedFile
.runtimePath
.c_str());
686 // This is the "Good" one, overwrite
687 if (inputFile
.path
== installName
) {
688 // Unload the old one
689 fileSystem
.unloadFile(previousLoadedMachO
.loadedFileInfo
);
691 // And replace with this one.
692 previousLoadedMachO
.mappedFile
= mappedFile
;
693 previousLoadedMachO
.loadedFileInfo
= loadedFileInfo
;
696 } else if (ma
->isBundle()) {
697 // TODO: Add exclusion lists here?
698 if (!ma
->canHavePrecomputedDlopenClosure(inputFile
.path
, ^(const char* msg
) {
699 inputFile
.diag
.verbose("Dylib located at '%s' cannot prebuild dlopen closure in cache because: %s", inputFile
.path
, msg
);
701 fileSystem
.unloadFile(loadedFileInfo
);
704 otherDylibs
.emplace_back((CacheBuilder::LoadedMachO
){ mappedFile
, loadedFileInfo
, &inputFile
});
705 } else if (ma
->isDynamicExecutable()) {
706 if (platformExcludesExecutablePath_macOS(inputFile
.path
)) {
707 inputFile
.diag
.verbose("Platform excluded file\n");
708 fileSystem
.unloadFile(loadedFileInfo
);
711 executables
.emplace_back((CacheBuilder::LoadedMachO
){ mappedFile
, loadedFileInfo
, &inputFile
});
713 inputFile
.diag
.verbose("Unsupported mach file type\n");
714 fileSystem
.unloadFile(loadedFileInfo
);
723 static bool platformExcludesInstallName_macOS(const std::string
& installName
) {
727 static bool platformExcludesInstallName_iOS(const std::string
& installName
) {
728 if ( installName
== "/System/Library/Caches/com.apple.xpc/sdk.dylib" )
730 if ( installName
== "/System/Library/Caches/com.apple.xpcd/xpcd_cache.dylib" )
735 static bool platformExcludesInstallName_tvOS(const std::string
& installName
) {
736 return platformExcludesInstallName_iOS(installName
);
739 static bool platformExcludesInstallName_watchOS(const std::string
& installName
) {
740 return platformExcludesInstallName_iOS(installName
);
743 static bool platformExcludesInstallName_bridgeOS(const std::string
& installName
) {
744 return platformExcludesInstallName_iOS(installName
);
747 // Returns true if the current platform requires that this install name be excluded from the shared cache
748 // Note that this overrides any exclusion from anywhere else.
749 bool platformExcludesInstallName(const std::string
& installName
) {
750 switch (reqPlatform
) {
751 case dyld3::Platform::unknown
:
753 case dyld3::Platform::macOS
:
754 return platformExcludesInstallName_macOS(installName
);
755 case dyld3::Platform::iOS
:
756 return platformExcludesInstallName_iOS(installName
);
757 case dyld3::Platform::tvOS
:
758 return platformExcludesInstallName_tvOS(installName
);
759 case dyld3::Platform::watchOS
:
760 return platformExcludesInstallName_watchOS(installName
);
761 case dyld3::Platform::bridgeOS
:
762 return platformExcludesInstallName_bridgeOS(installName
);
763 case dyld3::Platform::iOSMac
:
765 case dyld3::Platform::iOS_simulator
:
767 case dyld3::Platform::tvOS_simulator
:
769 case dyld3::Platform::watchOS_simulator
:
771 case dyld3::Platform::driverKit
:
779 static bool platformExcludesExecutablePath_macOS(const std::string
& path
) {
783 static bool platformExcludesExecutablePath_iOS(const std::string
& path
) {
784 //HACK exclude all launchd and installd variants until we can do something about xpcd_cache.dylib and friends
785 if (path
== "/sbin/launchd"
786 || path
== "/usr/local/sbin/launchd.debug"
787 || path
== "/usr/local/sbin/launchd.development"
788 || path
== "/usr/libexec/installd") {
794 static bool platformExcludesExecutablePath_tvOS(const std::string
& path
) {
795 return platformExcludesExecutablePath_iOS(path
);
798 static bool platformExcludesExecutablePath_watchOS(const std::string
& path
) {
799 return platformExcludesExecutablePath_iOS(path
);
802 static bool platformExcludesExecutablePath_bridgeOS(const std::string
& path
) {
803 return platformExcludesExecutablePath_iOS(path
);
806 // Returns true if the current platform requires that this path be excluded from the shared cache
807 // Note that this overrides any exclusion from anywhere else.
808 bool platformExcludesExecutablePath(const std::string
& path
) {
809 switch (reqPlatform
) {
810 case dyld3::Platform::unknown
:
812 case dyld3::Platform::macOS
:
813 return platformExcludesExecutablePath_macOS(path
);
814 case dyld3::Platform::iOS
:
815 return platformExcludesExecutablePath_iOS(path
);
816 case dyld3::Platform::tvOS
:
817 return platformExcludesExecutablePath_tvOS(path
);
818 case dyld3::Platform::watchOS
:
819 return platformExcludesExecutablePath_watchOS(path
);
820 case dyld3::Platform::bridgeOS
:
821 return platformExcludesExecutablePath_bridgeOS(path
);
822 case dyld3::Platform::iOSMac
:
824 case dyld3::Platform::iOS_simulator
:
826 case dyld3::Platform::tvOS_simulator
:
828 case dyld3::Platform::watchOS_simulator
:
830 case dyld3::Platform::driverKit
:
835 const dyld3::closure::FileSystem
& fileSystem
;
836 const dyld3::GradedArchs
& reqArchs
;
837 dyld3::Platform reqPlatform
;
840 static void verifySelfContained(std::vector
<CacheBuilder::LoadedMachO
>& dylibsToCache
,
841 std::vector
<CacheBuilder::LoadedMachO
>& otherDylibs
,
842 std::vector
<CacheBuilder::LoadedMachO
>& couldNotLoadFiles
)
844 // build map of dylibs
845 __block
std::map
<std::string
, const CacheBuilder::LoadedMachO
*> knownDylibs
;
846 __block
std::map
<std::string
, const CacheBuilder::LoadedMachO
*> allDylibs
;
847 for (const CacheBuilder::LoadedMachO
& dylib
: dylibsToCache
) {
848 knownDylibs
.insert({ dylib
.mappedFile
.runtimePath
, &dylib
});
849 allDylibs
.insert({ dylib
.mappedFile
.runtimePath
, &dylib
});
850 if (const char* installName
= dylib
.mappedFile
.mh
->installName()) {
851 knownDylibs
.insert({ installName
, &dylib
});
852 allDylibs
.insert({ installName
, &dylib
});
856 for (const CacheBuilder::LoadedMachO
& dylib
: otherDylibs
) {
857 allDylibs
.insert({ dylib
.mappedFile
.runtimePath
, &dylib
});
858 if (const char* installName
= dylib
.mappedFile
.mh
->installName())
859 allDylibs
.insert({ installName
, &dylib
});
862 for (const CacheBuilder::LoadedMachO
& dylib
: couldNotLoadFiles
) {
863 allDylibs
.insert({ dylib
.inputFile
->path
, &dylib
});
866 // check all dependencies to assure every dylib in cache only depends on other dylibs in cache
867 __block
std::map
<std::string
, std::set
<std::string
>> badDylibs
;
868 __block
bool doAgain
= true;
871 // scan dylib list making sure all dependents are in dylib list
872 for (const CacheBuilder::LoadedMachO
& dylib
: dylibsToCache
) {
873 if ( badDylibs
.count(dylib
.mappedFile
.runtimePath
) != 0 )
875 dylib
.mappedFile
.mh
->forEachDependentDylib(^(const char* loadPath
, bool isWeak
, bool isReExport
, bool isUpward
, uint32_t compatVersion
, uint32_t curVersion
, bool& stop
) {
878 if ( knownDylibs
.count(loadPath
) == 0 ) {
879 badDylibs
[dylib
.mappedFile
.runtimePath
].insert(std::string("Could not find dependency '") + loadPath
+ "'");
880 knownDylibs
.erase(dylib
.mappedFile
.runtimePath
);
881 knownDylibs
.erase(dylib
.mappedFile
.mh
->installName());
888 // Now walk the dylibs which depend on missing dylibs and see if any of them are required binaries.
889 for (auto badDylibsIterator
: badDylibs
) {
890 const std::string
& dylibRuntimePath
= badDylibsIterator
.first
;
891 auto requiredDylibIterator
= allDylibs
.find(dylibRuntimePath
);
892 if (requiredDylibIterator
== allDylibs
.end())
894 if (!requiredDylibIterator
->second
->inputFile
->mustBeIncluded())
896 // This dylib is required so mark all dependencies as requried too
897 __block
std::vector
<const CacheBuilder::LoadedMachO
*> worklist
;
898 worklist
.push_back(requiredDylibIterator
->second
);
899 while (!worklist
.empty()) {
900 const CacheBuilder::LoadedMachO
* dylib
= worklist
.back();
902 if (!dylib
->mappedFile
.mh
)
904 dylib
->mappedFile
.mh
->forEachDependentDylib(^(const char* loadPath
, bool isWeak
, bool isReExport
, bool isUpward
, uint32_t compatVersion
, uint32_t curVersion
, bool& stop
) {
907 auto dylibIterator
= allDylibs
.find(loadPath
);
908 if (dylibIterator
!= allDylibs
.end()) {
909 if (dylibIterator
->second
->inputFile
->state
== CacheBuilder::InputFile::Unset
) {
910 dylibIterator
->second
->inputFile
->state
= CacheBuilder::InputFile::MustBeIncludedForDependent
;
911 worklist
.push_back(dylibIterator
->second
);
918 // FIXME: Make this an option we can pass in
919 const bool evictLeafDylibs
= true;
920 if (evictLeafDylibs
) {
925 // build count of how many references there are to each dylib
926 __block
std::set
<std::string
> referencedDylibs
;
927 for (const CacheBuilder::LoadedMachO
& dylib
: dylibsToCache
) {
928 if ( badDylibs
.count(dylib
.mappedFile
.runtimePath
) != 0 )
930 dylib
.mappedFile
.mh
->forEachDependentDylib(^(const char* loadPath
, bool isWeak
, bool isReExport
, bool isUpward
, uint32_t compatVersion
, uint32_t curVersion
, bool &stop
) {
931 referencedDylibs
.insert(loadPath
);
935 // find all dylibs not referenced
936 for (const CacheBuilder::LoadedMachO
& dylib
: dylibsToCache
) {
937 if ( badDylibs
.count(dylib
.mappedFile
.runtimePath
) != 0 )
939 const char* installName
= dylib
.mappedFile
.mh
->installName();
940 if ( (referencedDylibs
.count(installName
) == 0) && (dylib
.inputFile
->state
== CacheBuilder::InputFile::MustBeExcludedIfUnused
) ) {
941 badDylibs
[dylib
.mappedFile
.runtimePath
].insert(std::string("It has been explicitly excluded as it is unused"));
948 // Move bad dylibs from dylibs to cache to other dylibs.
949 for (const CacheBuilder::LoadedMachO
& dylib
: dylibsToCache
) {
950 auto i
= badDylibs
.find(dylib
.mappedFile
.runtimePath
);
951 if ( i
!= badDylibs
.end()) {
952 otherDylibs
.push_back(dylib
);
953 for (const std::string
& reason
: i
->second
)
954 otherDylibs
.back().inputFile
->diag
.warning("Dylib located at '%s' not placed in shared cache because: %s", dylib
.mappedFile
.runtimePath
.c_str(), reason
.c_str());
958 const auto& badDylibsLambdaRef
= badDylibs
;
959 dylibsToCache
.erase(std::remove_if(dylibsToCache
.begin(), dylibsToCache
.end(), [&](const CacheBuilder::LoadedMachO
& dylib
) {
960 if (badDylibsLambdaRef
.find(dylib
.mappedFile
.runtimePath
) != badDylibsLambdaRef
.end())
963 }), dylibsToCache
.end());
966 // This is the new build API which takes the raw files (which could be FAT) and tries to build a cache from them.
967 // We should remove the other build() method, or make it private so that this can wrap it.
968 void CacheBuilder::build(std::vector
<CacheBuilder::InputFile
>& inputFiles
,
969 std::vector
<DyldSharedCache::FileAlias
>& aliases
) {
970 // First filter down to files which are actually MachO's
971 CacheInputBuilder
cacheInputBuilder(_fileSystem
, *_options
.archs
, _options
.platform
);
973 std::vector
<LoadedMachO
> dylibsToCache
;
974 std::vector
<LoadedMachO
> otherDylibs
;
975 std::vector
<LoadedMachO
> executables
;
976 std::vector
<LoadedMachO
> couldNotLoadFiles
;
977 cacheInputBuilder
.loadMachOs(inputFiles
, dylibsToCache
, otherDylibs
, executables
, couldNotLoadFiles
);
979 verifySelfContained(dylibsToCache
, otherDylibs
, couldNotLoadFiles
);
981 // Check for required binaries before we try to build the cache
982 if (!_diagnostics
.hasError()) {
983 // If we succeeded in building, then now see if there was a missing required file, and if so why its missing.
984 std::string errorString
;
985 for (const LoadedMachO
& dylib
: otherDylibs
) {
986 if (dylib
.inputFile
->mustBeIncluded()) {
987 // An error loading a required file must be propagated up to the top level diagnostic handler.
988 bool gotWarning
= false;
989 for (const std::string
& warning
: dylib
.inputFile
->diag
.warnings()) {
991 std::string message
= warning
;
992 if (message
.back() == '\n')
994 if (!errorString
.empty())
995 errorString
+= "ERROR: ";
996 errorString
+= "Required binary was not included in the shared cache '" + std::string(dylib
.inputFile
->path
) + "' because: " + message
+ "\n";
999 if (!errorString
.empty())
1000 errorString
+= "ERROR: ";
1001 errorString
+= "Required binary was not included in the shared cache '" + std::string(dylib
.inputFile
->path
) + "' because: 'unknown error. Please report to dyld'\n";
1005 for (const LoadedMachO
& dylib
: couldNotLoadFiles
) {
1006 if (dylib
.inputFile
->mustBeIncluded()) {
1007 if (dylib
.inputFile
->diag
.hasError()) {
1008 if (!errorString
.empty())
1009 errorString
+= "ERROR: ";
1010 errorString
+= "Required binary was not included in the shared cache '" + std::string(dylib
.inputFile
->path
) + "' because: " + dylib
.inputFile
->diag
.errorMessage() + "\n";
1012 if (!errorString
.empty())
1013 errorString
+= "ERROR: ";
1014 errorString
+= "Required binary was not included in the shared cache '" + std::string(dylib
.inputFile
->path
) + "' because: 'unknown error. Please report to dyld'\n";
1019 if (!errorString
.empty()) {
1020 _diagnostics
.error("%s", errorString
.c_str());
1024 if (!_diagnostics
.hasError())
1025 build(dylibsToCache
, otherDylibs
, executables
, aliases
);
1027 if (!_diagnostics
.hasError()) {
1028 // If we succeeded in building, then now see if there was a missing required file, and if so why its missing.
1029 std::string errorString
;
1030 for (CacheBuilder::InputFile
& inputFile
: inputFiles
) {
1031 if (inputFile
.mustBeIncluded() && inputFile
.diag
.hasError()) {
1032 // An error loading a required file must be propagated up to the top level diagnostic handler.
1033 std::string message
= inputFile
.diag
.errorMessage();
1034 if (message
.back() == '\n')
1036 errorString
+= "Required binary was not included in the shared cache '" + std::string(inputFile
.path
) + "' because: " + message
+ "\n";
1039 if (!errorString
.empty()) {
1040 _diagnostics
.error("%s", errorString
.c_str());
1044 // Add all the warnings from the input files to the top level warnings on the main diagnostics object.
1045 for (CacheBuilder::InputFile
& inputFile
: inputFiles
) {
1046 for (const std::string
& warning
: inputFile
.diag
.warnings())
1047 _diagnostics
.warning("%s", warning
.c_str());
1050 // Clean up the loaded files
1051 for (LoadedMachO
& loadedMachO
: dylibsToCache
)
1052 _fileSystem
.unloadFile(loadedMachO
.loadedFileInfo
);
1053 for (LoadedMachO
& loadedMachO
: otherDylibs
)
1054 _fileSystem
.unloadFile(loadedMachO
.loadedFileInfo
);
1055 for (LoadedMachO
& loadedMachO
: executables
)
1056 _fileSystem
.unloadFile(loadedMachO
.loadedFileInfo
);
1059 void CacheBuilder::build(const std::vector
<DyldSharedCache::MappedMachO
>& dylibs
,
1060 const std::vector
<DyldSharedCache::MappedMachO
>& otherOsDylibsInput
,
1061 const std::vector
<DyldSharedCache::MappedMachO
>& osExecutables
,
1062 std::vector
<DyldSharedCache::FileAlias
>& aliases
) {
1064 std::vector
<LoadedMachO
> dylibsToCache
;
1065 std::vector
<LoadedMachO
> otherDylibs
;
1066 std::vector
<LoadedMachO
> executables
;
1068 for (const DyldSharedCache::MappedMachO
& mappedMachO
: dylibs
) {
1069 dyld3::closure::LoadedFileInfo loadedFileInfo
;
1070 loadedFileInfo
.fileContent
= mappedMachO
.mh
;
1071 loadedFileInfo
.fileContentLen
= mappedMachO
.length
;
1072 loadedFileInfo
.sliceOffset
= mappedMachO
.sliceFileOffset
;
1073 loadedFileInfo
.sliceLen
= mappedMachO
.length
;
1074 loadedFileInfo
.inode
= mappedMachO
.inode
;
1075 loadedFileInfo
.mtime
= mappedMachO
.modTime
;
1076 loadedFileInfo
.path
= mappedMachO
.runtimePath
.c_str();
1077 dylibsToCache
.emplace_back((LoadedMachO
){ mappedMachO
, loadedFileInfo
, nullptr });
1080 for (const DyldSharedCache::MappedMachO
& mappedMachO
: otherOsDylibsInput
) {
1081 dyld3::closure::LoadedFileInfo loadedFileInfo
;
1082 loadedFileInfo
.fileContent
= mappedMachO
.mh
;
1083 loadedFileInfo
.fileContentLen
= mappedMachO
.length
;
1084 loadedFileInfo
.sliceOffset
= mappedMachO
.sliceFileOffset
;
1085 loadedFileInfo
.sliceLen
= mappedMachO
.length
;
1086 loadedFileInfo
.inode
= mappedMachO
.inode
;
1087 loadedFileInfo
.mtime
= mappedMachO
.modTime
;
1088 loadedFileInfo
.path
= mappedMachO
.runtimePath
.c_str();
1089 otherDylibs
.emplace_back((LoadedMachO
){ mappedMachO
, loadedFileInfo
, nullptr });
1092 for (const DyldSharedCache::MappedMachO
& mappedMachO
: osExecutables
) {
1093 dyld3::closure::LoadedFileInfo loadedFileInfo
;
1094 loadedFileInfo
.fileContent
= mappedMachO
.mh
;
1095 loadedFileInfo
.fileContentLen
= mappedMachO
.length
;
1096 loadedFileInfo
.sliceOffset
= mappedMachO
.sliceFileOffset
;
1097 loadedFileInfo
.sliceLen
= mappedMachO
.length
;
1098 loadedFileInfo
.inode
= mappedMachO
.inode
;
1099 loadedFileInfo
.mtime
= mappedMachO
.modTime
;
1100 loadedFileInfo
.path
= mappedMachO
.runtimePath
.c_str();
1101 executables
.emplace_back((LoadedMachO
){ mappedMachO
, loadedFileInfo
, nullptr });
1104 build(dylibsToCache
, otherDylibs
, executables
, aliases
);
1107 void CacheBuilder::build(const std::vector
<LoadedMachO
>& dylibs
,
1108 const std::vector
<LoadedMachO
>& otherOsDylibsInput
,
1109 const std::vector
<LoadedMachO
>& osExecutables
,
1110 std::vector
<DyldSharedCache::FileAlias
>& aliases
)
1112 // <rdar://problem/21317611> error out instead of crash if cache has no dylibs
1113 // FIXME: plist should specify required vs optional dylibs
1114 if ( dylibs
.size() < 30 ) {
1115 _diagnostics
.error("missing required minimum set of dylibs");
1118 uint64_t t1
= mach_absolute_time();
1120 // make copy of dylib list and sort
1121 makeSortedDylibs(dylibs
, _options
.dylibOrdering
);
1123 // allocate space used by largest possible cache plus room for LINKEDITS before optimization
1124 _allocatedBufferSize
= _archLayout
->sharedMemorySize
* 1.50;
1125 if ( vm_allocate(mach_task_self(), &_fullAllocatedBuffer
, _allocatedBufferSize
, VM_FLAGS_ANYWHERE
) != 0 ) {
1126 _diagnostics
.error("could not allocate buffer");
1130 // assign addresses for each segment of each dylib in new cache
1131 parseCoalescableSegments();
1132 processSelectorStrings(osExecutables
);
1133 assignSegmentAddresses();
1134 std::vector
<const LoadedMachO
*> overflowDylibs
;
1135 while ( cacheOverflowAmount() != 0 ) {
1136 if ( !_options
.evictLeafDylibsOnOverflow
) {
1137 _diagnostics
.error("cache overflow by %lluMB", cacheOverflowAmount() / 1024 / 1024);
1140 size_t evictionCount
= evictLeafDylibs(cacheOverflowAmount(), overflowDylibs
);
1142 for (DylibInfo
& dylib
: _sortedDylibs
)
1143 dylib
.cacheLocation
.clear();
1144 _coalescedText
.clear();
1145 parseCoalescableSegments();
1146 processSelectorStrings(osExecutables
);
1147 assignSegmentAddresses();
1149 _diagnostics
.verbose("cache overflow, evicted %lu leaf dylibs\n", evictionCount
);
1151 markPaddingInaccessible();
1153 // copy all segments into cache
1154 uint64_t t2
= mach_absolute_time();
1158 // rebase all dylibs for new location in cache
1159 uint64_t t3
= mach_absolute_time();
1160 _aslrTracker
.setDataRegion(_readWriteRegion
.buffer
, _readWriteRegion
.sizeInUse
);
1161 if ( !_options
.cacheSupportsASLR
)
1162 _aslrTracker
.disable();
1163 adjustAllImagesForNewSegmentLocations();
1164 if ( _diagnostics
.hasError() )
1167 // build ImageArray for dyld3, which has side effect of binding all cached dylibs
1168 uint64_t t4
= mach_absolute_time();
1169 buildImageArray(aliases
);
1170 if ( _diagnostics
.hasError() )
1174 uint64_t t5
= mach_absolute_time();
1175 DyldSharedCache
* dyldCache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
1176 if ( _options
.optimizeObjC
)
1178 if ( _diagnostics
.hasError() )
1182 // optimize away stubs
1183 uint64_t t6
= mach_absolute_time();
1184 if ( _options
.optimizeStubs
)
1185 optimizeAwayStubs();
1188 // FIPS seal corecrypto, This must be done after stub elimination (so that __TEXT,__text is not changed after sealing)
1191 // merge and compact LINKEDIT segments
1192 uint64_t t7
= mach_absolute_time();
1195 // copy ImageArray to end of read-only region
1197 if ( _diagnostics
.hasError() )
1199 uint64_t t8
= mach_absolute_time();
1201 // don't add dyld3 closures to simulator cache
1202 if ( !dyld3::MachOFile::isSimulatorPlatform(_options
.platform
) ) {
1203 // compute and add dlopen closures for all other dylibs
1204 addOtherImageArray(otherOsDylibsInput
, overflowDylibs
);
1205 if ( _diagnostics
.hasError() )
1208 // compute and add launch closures to end of read-only region
1209 addClosures(osExecutables
);
1210 if ( _diagnostics
.hasError() )
1214 // update final readOnly region size
1215 dyld_cache_mapping_info
* mappings
= (dyld_cache_mapping_info
*)(_readExecuteRegion
.buffer
+ dyldCache
->header
.mappingOffset
);
1216 mappings
[2].size
= _readOnlyRegion
.sizeInUse
;
1217 if ( _options
.excludeLocalSymbols
)
1218 dyldCache
->header
.localSymbolsOffset
= _readOnlyRegion
.cacheFileOffset
+ _readOnlyRegion
.sizeInUse
;
1220 // record max slide now that final size is established
1221 if ( _archLayout
->sharedRegionsAreDiscontiguous
) {
1222 // special case x86_64 which has three non-contiguous chunks each in their own 1GB regions
1223 uint64_t maxSlide0
= 0x60000000 - _readExecuteRegion
.sizeInUse
; // TEXT region has 1.5GB region
1224 uint64_t maxSlide1
= 0x40000000 - _readWriteRegion
.sizeInUse
;
1225 uint64_t maxSlide2
= 0x3FE00000 - _readOnlyRegion
.sizeInUse
;
1226 dyldCache
->header
.maxSlide
= std::min(std::min(maxSlide0
, maxSlide1
), maxSlide2
);
1229 // <rdar://problem/49852839> branch predictor on arm64 currently only looks at low 32-bits, so don't slide cache more than 2GB
1230 if ( (_archLayout
->sharedMemorySize
== 0x100000000) && (_readExecuteRegion
.sizeInUse
< 0x80000000) )
1231 dyldCache
->header
.maxSlide
= 0x80000000 - _readExecuteRegion
.sizeInUse
;
1233 dyldCache
->header
.maxSlide
= (_archLayout
->sharedMemoryStart
+ _archLayout
->sharedMemorySize
) - (_readOnlyRegion
.unslidLoadAddress
+ _readOnlyRegion
.sizeInUse
);
1236 uint64_t t9
= mach_absolute_time();
1238 // fill in slide info at start of region[2]
1239 // do this last because it modifies pointers in DATA segments
1240 if ( _options
.cacheSupportsASLR
) {
1241 #if SUPPORT_ARCH_arm64e
1242 if ( strcmp(_archLayout
->archName
, "arm64e") == 0 )
1243 writeSlideInfoV3(_aslrTracker
.bitmap(), _aslrTracker
.dataPageCount());
1246 if ( _archLayout
->is64
)
1247 writeSlideInfoV2
<Pointer64
<LittleEndian
>>(_aslrTracker
.bitmap(), _aslrTracker
.dataPageCount());
1248 #if SUPPORT_ARCH_arm64_32 || SUPPORT_ARCH_armv7k
1249 else if ( _archLayout
->pointerDeltaMask
== 0xC0000000 )
1250 writeSlideInfoV4
<Pointer32
<LittleEndian
>>(_aslrTracker
.bitmap(), _aslrTracker
.dataPageCount());
1253 writeSlideInfoV2
<Pointer32
<LittleEndian
>>(_aslrTracker
.bitmap(), _aslrTracker
.dataPageCount());
1256 uint64_t t10
= mach_absolute_time();
1258 // last sanity check on size
1259 if ( cacheOverflowAmount() != 0 ) {
1260 _diagnostics
.error("cache overflow after optimizations 0x%llX -> 0x%llX", _readExecuteRegion
.unslidLoadAddress
, _readOnlyRegion
.unslidLoadAddress
+ _readOnlyRegion
.sizeInUse
);
1264 // codesignature is part of file, but is not mapped
1266 if ( _diagnostics
.hasError() )
1269 uint64_t t11
= mach_absolute_time();
1271 if ( _options
.verbose
) {
1272 fprintf(stderr
, "time to layout cache: %ums\n", absolutetime_to_milliseconds(t2
-t1
));
1273 fprintf(stderr
, "time to copy cached dylibs into buffer: %ums\n", absolutetime_to_milliseconds(t3
-t2
));
1274 fprintf(stderr
, "time to adjust segments for new split locations: %ums\n", absolutetime_to_milliseconds(t4
-t3
));
1275 fprintf(stderr
, "time to bind all images: %ums\n", absolutetime_to_milliseconds(t5
-t4
));
1276 fprintf(stderr
, "time to optimize Objective-C: %ums\n", absolutetime_to_milliseconds(t6
-t5
));
1277 fprintf(stderr
, "time to do stub elimination: %ums\n", absolutetime_to_milliseconds(t7
-t6
));
1278 fprintf(stderr
, "time to optimize LINKEDITs: %ums\n", absolutetime_to_milliseconds(t8
-t7
));
1279 fprintf(stderr
, "time to build %lu closures: %ums\n", osExecutables
.size(), absolutetime_to_milliseconds(t9
-t8
));
1280 fprintf(stderr
, "time to compute slide info: %ums\n", absolutetime_to_milliseconds(t10
-t9
));
1281 fprintf(stderr
, "time to compute UUID and codesign cache file: %ums\n", absolutetime_to_milliseconds(t11
-t10
));
1288 void CacheBuilder::writeCacheHeader()
1290 // "dyld_v1" + spaces + archName(), with enough spaces to pad to 15 bytes
1291 std::string magic
= "dyld_v1";
1292 magic
.append(15 - magic
.length() - strlen(_options
.archs
->name()), ' ');
1293 magic
.append(_options
.archs
->name());
1294 assert(magic
.length() == 15);
1297 dyld_cache_header
* dyldCacheHeader
= (dyld_cache_header
*)_readExecuteRegion
.buffer
;
1298 memcpy(dyldCacheHeader
->magic
, magic
.c_str(), 16);
1299 dyldCacheHeader
->mappingOffset
= sizeof(dyld_cache_header
);
1300 dyldCacheHeader
->mappingCount
= 3;
1301 dyldCacheHeader
->imagesOffset
= (uint32_t)(dyldCacheHeader
->mappingOffset
+ 3*sizeof(dyld_cache_mapping_info
));
1302 dyldCacheHeader
->imagesCount
= (uint32_t)_sortedDylibs
.size() + _aliasCount
;
1303 dyldCacheHeader
->dyldBaseAddress
= 0;
1304 dyldCacheHeader
->codeSignatureOffset
= 0;
1305 dyldCacheHeader
->codeSignatureSize
= 0;
1306 dyldCacheHeader
->slideInfoOffset
= _slideInfoFileOffset
;
1307 dyldCacheHeader
->slideInfoSize
= _slideInfoBufferSizeAllocated
;
1308 dyldCacheHeader
->localSymbolsOffset
= 0;
1309 dyldCacheHeader
->localSymbolsSize
= 0;
1310 dyldCacheHeader
->cacheType
= _options
.optimizeStubs
? kDyldSharedCacheTypeProduction
: kDyldSharedCacheTypeDevelopment
;
1311 dyldCacheHeader
->accelerateInfoAddr
= 0;
1312 dyldCacheHeader
->accelerateInfoSize
= 0;
1313 bzero(dyldCacheHeader
->uuid
, 16);// overwritten later by recomputeCacheUUID()
1314 dyldCacheHeader
->branchPoolsOffset
= 0;
1315 dyldCacheHeader
->branchPoolsCount
= 0;
1316 dyldCacheHeader
->imagesTextOffset
= dyldCacheHeader
->imagesOffset
+ sizeof(dyld_cache_image_info
)*dyldCacheHeader
->imagesCount
;
1317 dyldCacheHeader
->imagesTextCount
= _sortedDylibs
.size();
1318 dyldCacheHeader
->patchInfoAddr
= 0;
1319 dyldCacheHeader
->patchInfoSize
= 0;
1320 dyldCacheHeader
->otherImageGroupAddrUnused
= 0;
1321 dyldCacheHeader
->otherImageGroupSizeUnused
= 0;
1322 dyldCacheHeader
->progClosuresAddr
= 0;
1323 dyldCacheHeader
->progClosuresSize
= 0;
1324 dyldCacheHeader
->progClosuresTrieAddr
= 0;
1325 dyldCacheHeader
->progClosuresTrieSize
= 0;
1326 dyldCacheHeader
->platform
= (uint8_t)_options
.platform
;
1327 dyldCacheHeader
->formatVersion
= dyld3::closure::kFormatVersion
;
1328 dyldCacheHeader
->dylibsExpectedOnDisk
= !_options
.dylibsRemovedDuringMastering
;
1329 dyldCacheHeader
->simulator
= _options
.forSimulator
;
1330 dyldCacheHeader
->locallyBuiltCache
= _options
.isLocallyBuiltCache
;
1331 dyldCacheHeader
->builtFromChainedFixups
= (strcmp(_options
.archs
->name(), "arm64e") == 0); // FIXME
1332 dyldCacheHeader
->formatVersion
= dyld3::closure::kFormatVersion
;
1333 dyldCacheHeader
->sharedRegionStart
= _archLayout
->sharedMemoryStart
;
1334 dyldCacheHeader
->sharedRegionSize
= _archLayout
->sharedMemorySize
;
1337 dyld_cache_mapping_info
* mappings
= (dyld_cache_mapping_info
*)(_readExecuteRegion
.buffer
+ dyldCacheHeader
->mappingOffset
);
1338 mappings
[0].address
= _readExecuteRegion
.unslidLoadAddress
;
1339 mappings
[0].fileOffset
= 0;
1340 mappings
[0].size
= _readExecuteRegion
.sizeInUse
;
1341 mappings
[0].maxProt
= VM_PROT_READ
| VM_PROT_EXECUTE
;
1342 mappings
[0].initProt
= VM_PROT_READ
| VM_PROT_EXECUTE
;
1343 mappings
[1].address
= _readWriteRegion
.unslidLoadAddress
;
1344 mappings
[1].fileOffset
= _readExecuteRegion
.sizeInUse
;
1345 mappings
[1].size
= _readWriteRegion
.sizeInUse
;
1346 mappings
[1].maxProt
= VM_PROT_READ
| VM_PROT_WRITE
;
1347 mappings
[1].initProt
= VM_PROT_READ
| VM_PROT_WRITE
;
1348 mappings
[2].address
= _readOnlyRegion
.unslidLoadAddress
;
1349 mappings
[2].fileOffset
= _readExecuteRegion
.sizeInUse
+ _readWriteRegion
.sizeInUse
;
1350 mappings
[2].size
= _readOnlyRegion
.sizeInUse
;
1351 mappings
[2].maxProt
= VM_PROT_READ
;
1352 mappings
[2].initProt
= VM_PROT_READ
;
1354 // fill in image table
1355 dyld_cache_image_info
* images
= (dyld_cache_image_info
*)(_readExecuteRegion
.buffer
+ dyldCacheHeader
->imagesOffset
);
1356 for (const DylibInfo
& dylib
: _sortedDylibs
) {
1357 const char* installName
= dylib
.input
->mappedFile
.mh
->installName();
1358 images
->address
= dylib
.cacheLocation
[0].dstCacheUnslidAddress
;
1359 if ( _options
.dylibsRemovedDuringMastering
) {
1360 images
->modTime
= 0;
1361 images
->inode
= pathHash(installName
);
1364 images
->modTime
= dylib
.input
->mappedFile
.modTime
;
1365 images
->inode
= dylib
.input
->mappedFile
.inode
;
1367 uint32_t installNameOffsetInTEXT
= (uint32_t)(installName
- (char*)dylib
.input
->mappedFile
.mh
);
1368 images
->pathFileOffset
= (uint32_t)dylib
.cacheLocation
[0].dstCacheFileOffset
+ installNameOffsetInTEXT
;
1371 // append aliases image records and strings
1373 for (auto &dylib : _dylibs) {
1374 if (!dylib->installNameAliases.empty()) {
1375 for (const std::string& alias : dylib->installNameAliases) {
1376 images->set_address(_segmentMap[dylib][0].address);
1377 if (_manifest.platform() == "osx") {
1378 images->modTime = dylib->lastModTime;
1379 images->inode = dylib->inode;
1382 images->modTime = 0;
1383 images->inode = pathHash(alias.c_str());
1385 images->pathFileOffset = offset;
1386 //fprintf(stderr, "adding alias %s for %s\n", alias.c_str(), dylib->installName.c_str());
1387 ::strcpy((char*)&_buffer[offset], alias.c_str());
1388 offset += alias.size() + 1;
1394 // calculate start of text image array and trailing string pool
1395 dyld_cache_image_text_info
* textImages
= (dyld_cache_image_text_info
*)(_readExecuteRegion
.buffer
+ dyldCacheHeader
->imagesTextOffset
);
1396 uint32_t stringOffset
= (uint32_t)(dyldCacheHeader
->imagesTextOffset
+ sizeof(dyld_cache_image_text_info
) * _sortedDylibs
.size());
1398 // write text image array and image names pool at same time
1399 for (const DylibInfo
& dylib
: _sortedDylibs
) {
1400 dylib
.input
->mappedFile
.mh
->getUuid(textImages
->uuid
);
1401 textImages
->loadAddress
= dylib
.cacheLocation
[0].dstCacheUnslidAddress
;
1402 textImages
->textSegmentSize
= (uint32_t)dylib
.cacheLocation
[0].dstCacheSegmentSize
;
1403 textImages
->pathOffset
= stringOffset
;
1404 const char* installName
= dylib
.input
->mappedFile
.mh
->installName();
1405 ::strcpy((char*)_readExecuteRegion
.buffer
+ stringOffset
, installName
);
1406 stringOffset
+= (uint32_t)strlen(installName
)+1;
1410 // make sure header did not overflow into first mapped image
1411 const dyld_cache_image_info
* firstImage
= (dyld_cache_image_info
*)(_readExecuteRegion
.buffer
+ dyldCacheHeader
->imagesOffset
);
1412 assert(stringOffset
<= (firstImage
->address
- mappings
[0].address
));
1415 void CacheBuilder::copyRawSegments()
1417 const bool log
= false;
1418 dispatch_apply(_sortedDylibs
.size(), DISPATCH_APPLY_AUTO
, ^(size_t index
) {
1419 const DylibInfo
& dylib
= _sortedDylibs
[index
];
1420 for (const SegmentMappingInfo
& info
: dylib
.cacheLocation
) {
1421 if (log
) fprintf(stderr
, "copy %s segment %s (0x%08X bytes) from %p to %p (logical addr 0x%llX) for %s\n",
1422 _options
.archs
->name(), info
.segName
, info
.copySegmentSize
, info
.srcSegment
, info
.dstSegment
, info
.dstCacheUnslidAddress
, dylib
.input
->mappedFile
.runtimePath
.c_str());
1423 ::memcpy(info
.dstSegment
, info
.srcSegment
, info
.copySegmentSize
);
1427 // Copy the coalesced sections
1428 const uint64_t numCoalescedSections
= sizeof(CacheCoalescedText::SupportedSections
) / sizeof(*CacheCoalescedText::SupportedSections
);
1429 dispatch_apply(numCoalescedSections
, DISPATCH_APPLY_AUTO
, ^(size_t index
) {
1430 const CacheCoalescedText::StringSection
& cacheStringSection
= _coalescedText
.getSectionData(CacheCoalescedText::SupportedSections
[index
]);
1431 if (log
) fprintf(stderr
, "copy %s __TEXT_COAL section %s (0x%08X bytes) to %p (logical addr 0x%llX)\n",
1432 _options
.archs
->name(), CacheCoalescedText::SupportedSections
[index
],
1433 cacheStringSection
.bufferSize
, cacheStringSection
.bufferAddr
, cacheStringSection
.bufferVMAddr
);
1434 for (const auto& stringAndOffset
: cacheStringSection
.stringsToOffsets
)
1435 ::memcpy(cacheStringSection
.bufferAddr
+ stringAndOffset
.second
, stringAndOffset
.first
.data(), stringAndOffset
.first
.size() + 1);
1439 void CacheBuilder::adjustAllImagesForNewSegmentLocations()
1441 __block
std::vector
<Diagnostics
> diags
;
1442 diags
.resize(_sortedDylibs
.size());
1444 if (_options
.platform
== dyld3::Platform::macOS
) {
1445 dispatch_apply(_sortedDylibs
.size(), DISPATCH_APPLY_AUTO
, ^(size_t index
) {
1446 const DylibInfo
& dylib
= _sortedDylibs
[index
];
1447 adjustDylibSegments(dylib
, diags
[index
]);
1450 // Note this has to be done in serial because the LOH Tracker isn't thread safe
1451 for (size_t index
= 0; index
!= _sortedDylibs
.size(); ++index
) {
1452 const DylibInfo
& dylib
= _sortedDylibs
[index
];
1453 adjustDylibSegments(dylib
, diags
[index
]);
1457 for (const Diagnostics
& diag
: diags
) {
1458 if ( diag
.hasError() ) {
1459 _diagnostics
.error("%s", diag
.errorMessage().c_str());
1465 void CacheBuilder::processSelectorStrings(const std::vector
<LoadedMachO
>& executables
) {
1466 const bool log
= false;
1468 _selectorStringsFromExecutables
= 0;
1470 // Don't do this optimisation on watchOS where the shared cache is too small
1471 if (_options
.platform
== dyld3::Platform::watchOS
)
1474 // Get the method name coalesced section as that is where we need to put these strings
1475 CacheBuilder::CacheCoalescedText::StringSection
& cacheStringSection
= _coalescedText
.getSectionData("__objc_methname");
1476 for (const LoadedMachO
& executable
: executables
) {
1477 const dyld3::MachOAnalyzer
* ma
= (const dyld3::MachOAnalyzer
*)executable
.loadedFileInfo
.fileContent
;
1479 uint64_t sizeBeforeProcessing
= cacheStringSection
.bufferSize
;
1481 ma
->forEachObjCMethodName(^(const char *methodName
) {
1482 std::string_view str
= methodName
;
1483 auto itAndInserted
= cacheStringSection
.stringsToOffsets
.insert({ str
, cacheStringSection
.bufferSize
});
1484 if (itAndInserted
.second
) {
1485 // If we inserted the string then we need to include it in the total
1486 cacheStringSection
.bufferSize
+= str
.size() + 1;
1487 // if (log) printf("Selector: %s -> %s\n", ma->installName(), methodName);
1488 ++_selectorStringsFromExecutables
;
1492 uint64_t sizeAfterProcessing
= cacheStringSection
.bufferSize
;
1493 if ( log
&& (sizeBeforeProcessing
!= sizeAfterProcessing
) ) {
1494 printf("Pulled in % 6lld bytes of selectors from %s\n",
1495 sizeAfterProcessing
- sizeBeforeProcessing
, executable
.loadedFileInfo
.path
);
1499 _diagnostics
.verbose("Pulled in %lld selector strings from executables\n", _selectorStringsFromExecutables
);
1502 void CacheBuilder::parseCoalescableSegments() {
1503 const bool log
= false;
1505 for (DylibInfo
& dylib
: _sortedDylibs
)
1506 _coalescedText
.parseCoalescableText(dylib
.input
->mappedFile
.mh
, dylib
.textCoalescer
);
1509 for (const char* section
: CacheCoalescedText::SupportedSections
) {
1510 CacheCoalescedText::StringSection
& sectionData
= _coalescedText
.getSectionData(section
);
1511 printf("Coalesced %s from % 10lld -> % 10d, saving % 10lld bytes\n", section
,
1512 sectionData
.bufferSize
+ sectionData
.savedSpace
, sectionData
.bufferSize
, sectionData
.savedSpace
);
1517 void CacheBuilder::assignSegmentAddresses()
1519 // calculate size of header info and where first dylib's mach_header should start
1520 size_t startOffset
= sizeof(dyld_cache_header
) + 3*sizeof(dyld_cache_mapping_info
);
1521 startOffset
+= sizeof(dyld_cache_image_info
) * _sortedDylibs
.size();
1522 startOffset
+= sizeof(dyld_cache_image_text_info
) * _sortedDylibs
.size();
1523 for (const DylibInfo
& dylib
: _sortedDylibs
) {
1524 startOffset
+= (strlen(dylib
.input
->mappedFile
.mh
->installName()) + 1);
1526 //fprintf(stderr, "%s total header size = 0x%08lX\n", _options.archName.c_str(), startOffset);
1527 startOffset
= align(startOffset
, 12);
1529 // assign TEXT segment addresses
1530 _readExecuteRegion
.buffer
= (uint8_t*)_fullAllocatedBuffer
;
1531 _readExecuteRegion
.bufferSize
= 0;
1532 _readExecuteRegion
.sizeInUse
= 0;
1533 _readExecuteRegion
.unslidLoadAddress
= _archLayout
->sharedMemoryStart
;
1534 _readExecuteRegion
.cacheFileOffset
= 0;
1535 __block
uint64_t addr
= _readExecuteRegion
.unslidLoadAddress
+ startOffset
; // header
1536 for (DylibInfo
& dylib
: _sortedDylibs
) {
1537 __block
uint64_t textSegVmAddr
= 0;
1538 dylib
.input
->mappedFile
.mh
->forEachSegment(^(const dyld3::MachOFile::SegmentInfo
& segInfo
, bool& stop
) {
1539 if ( strcmp(segInfo
.segName
, "__TEXT") == 0 )
1540 textSegVmAddr
= segInfo
.vmAddr
;
1541 if ( segInfo
.protections
!= (VM_PROT_READ
| VM_PROT_EXECUTE
) )
1543 // We may have coalesced the sections at the end of this segment. In that case, shrink the segment to remove them.
1544 __block
size_t sizeOfSections
= 0;
1545 __block
bool foundCoalescedSection
= false;
1546 dylib
.input
->mappedFile
.mh
->forEachSection(^(const dyld3::MachOAnalyzer::SectionInfo
§Info
, bool malformedSectionRange
, bool &stopSection
) {
1547 if (strcmp(sectInfo
.segInfo
.segName
, segInfo
.segName
) != 0)
1549 if ( dylib
.textCoalescer
.sectionWasCoalesced(sectInfo
.sectName
)) {
1550 foundCoalescedSection
= true;
1552 sizeOfSections
= sectInfo
.sectAddr
+ sectInfo
.sectSize
- segInfo
.vmAddr
;
1555 if (!foundCoalescedSection
)
1556 sizeOfSections
= segInfo
.sizeOfSections
;
1558 // Keep __TEXT segments 4K or more aligned
1559 addr
= align(addr
, std::max((int)segInfo
.p2align
, (int)12));
1560 uint64_t offsetInRegion
= addr
- _readExecuteRegion
.unslidLoadAddress
;
1561 SegmentMappingInfo loc
;
1562 loc
.srcSegment
= (uint8_t*)dylib
.input
->mappedFile
.mh
+ segInfo
.vmAddr
- textSegVmAddr
;
1563 loc
.segName
= segInfo
.segName
;
1564 loc
.dstSegment
= _readExecuteRegion
.buffer
+ offsetInRegion
;
1565 loc
.dstCacheUnslidAddress
= addr
;
1566 loc
.dstCacheFileOffset
= (uint32_t)offsetInRegion
;
1567 loc
.dstCacheSegmentSize
= (uint32_t)align(sizeOfSections
, 12);
1568 loc
.dstCacheFileSize
= (uint32_t)align(sizeOfSections
, 12);
1569 loc
.copySegmentSize
= (uint32_t)sizeOfSections
;
1570 loc
.srcSegmentIndex
= segInfo
.segIndex
;
1571 dylib
.cacheLocation
.push_back(loc
);
1572 addr
+= loc
.dstCacheSegmentSize
;
1575 // move read-only segments to end of TEXT
1576 if ( _archLayout
->textAndDataMaxSize
!= 0 ) {
1577 for (DylibInfo
& dylib
: _sortedDylibs
) {
1578 __block
uint64_t textSegVmAddr
= 0;
1579 dylib
.input
->mappedFile
.mh
->forEachSegment(^(const dyld3::MachOFile::SegmentInfo
& segInfo
, bool& stop
) {
1580 if ( strcmp(segInfo
.segName
, "__TEXT") == 0 )
1581 textSegVmAddr
= segInfo
.vmAddr
;
1582 if ( segInfo
.protections
!= VM_PROT_READ
)
1584 if ( strcmp(segInfo
.segName
, "__LINKEDIT") == 0 )
1587 // Keep segments segments 4K or more aligned
1588 addr
= align(addr
, std::max((int)segInfo
.p2align
, (int)12));
1589 uint64_t offsetInRegion
= addr
- _readExecuteRegion
.unslidLoadAddress
;
1590 SegmentMappingInfo loc
;
1591 loc
.srcSegment
= (uint8_t*)dylib
.input
->mappedFile
.mh
+ segInfo
.vmAddr
- textSegVmAddr
;
1592 loc
.segName
= segInfo
.segName
;
1593 loc
.dstSegment
= _readExecuteRegion
.buffer
+ offsetInRegion
;
1594 loc
.dstCacheUnslidAddress
= addr
;
1595 loc
.dstCacheFileOffset
= (uint32_t)(_readExecuteRegion
.cacheFileOffset
+ offsetInRegion
);
1596 loc
.dstCacheSegmentSize
= (uint32_t)align(segInfo
.sizeOfSections
, 12);
1597 loc
.dstCacheFileSize
= (uint32_t)segInfo
.sizeOfSections
;
1598 loc
.copySegmentSize
= (uint32_t)segInfo
.sizeOfSections
;
1599 loc
.srcSegmentIndex
= segInfo
.segIndex
;
1600 dylib
.cacheLocation
.push_back(loc
);
1601 addr
+= loc
.dstCacheSegmentSize
;
1606 // reserve space for objc optimization tables and deduped strings
1607 uint64_t objcReadOnlyBufferVMAddr
= addr
;
1608 _objcReadOnlyBuffer
= _readExecuteRegion
.buffer
+ (addr
- _readExecuteRegion
.unslidLoadAddress
);
1610 // First the strings as we'll fill in the objc tables later in the optimizer
1611 for (const char* section
: CacheCoalescedText::SupportedSections
) {
1612 CacheCoalescedText::StringSection
& cacheStringSection
= _coalescedText
.getSectionData(section
);
1613 cacheStringSection
.bufferAddr
= _readExecuteRegion
.buffer
+ (addr
- _readExecuteRegion
.unslidLoadAddress
);
1614 cacheStringSection
.bufferVMAddr
= addr
;
1615 addr
+= cacheStringSection
.bufferSize
;
1618 addr
= align(addr
, 14);
1619 _objcReadOnlyBufferSizeUsed
= addr
- objcReadOnlyBufferVMAddr
;
1621 uint32_t totalSelectorRefCount
= (uint32_t)_selectorStringsFromExecutables
;
1622 uint32_t totalClassDefCount
= 0;
1623 uint32_t totalProtocolDefCount
= 0;
1624 for (DylibInfo
& dylib
: _sortedDylibs
) {
1625 dyld3::MachOAnalyzer::ObjCInfo info
= dylib
.input
->mappedFile
.mh
->getObjCInfo();
1626 totalSelectorRefCount
+= info
.selRefCount
;
1627 totalClassDefCount
+= info
.classDefCount
;
1628 totalProtocolDefCount
+= info
.protocolDefCount
;
1631 // now that shared cache coalesces all selector strings, use that better count
1632 uint32_t coalescedSelectorCount
= (uint32_t)_coalescedText
.objcMethNames
.stringsToOffsets
.size();
1633 if ( coalescedSelectorCount
> totalSelectorRefCount
)
1634 totalSelectorRefCount
= coalescedSelectorCount
;
1635 addr
+= align(computeReadOnlyObjC(totalSelectorRefCount
, totalClassDefCount
, totalProtocolDefCount
), 14);
1636 _objcReadOnlyBufferSizeAllocated
= addr
- objcReadOnlyBufferVMAddr
;
1639 // align TEXT region end
1640 uint64_t endTextAddress
= align(addr
, _archLayout
->sharedRegionAlignP2
);
1641 _readExecuteRegion
.bufferSize
= endTextAddress
- _readExecuteRegion
.unslidLoadAddress
;
1642 _readExecuteRegion
.sizeInUse
= _readExecuteRegion
.bufferSize
;
1645 // assign __DATA* addresses
1646 if ( _archLayout
->sharedRegionsAreDiscontiguous
)
1647 addr
= _archLayout
->sharedMemoryStart
+ 0x60000000;
1649 addr
= align((addr
+ _archLayout
->sharedRegionPadding
), _archLayout
->sharedRegionAlignP2
);
1650 _readWriteRegion
.buffer
= (uint8_t*)_fullAllocatedBuffer
+ addr
- _archLayout
->sharedMemoryStart
;
1651 _readWriteRegion
.bufferSize
= 0;
1652 _readWriteRegion
.sizeInUse
= 0;
1653 _readWriteRegion
.unslidLoadAddress
= addr
;
1654 _readWriteRegion
.cacheFileOffset
= _readExecuteRegion
.sizeInUse
;
1656 // layout all __DATA_CONST segments
1657 __block
int dataConstSegmentCount
= 0;
1658 for (DylibInfo
& dylib
: _sortedDylibs
) {
1659 __block
uint64_t textSegVmAddr
= 0;
1660 dylib
.input
->mappedFile
.mh
->forEachSegment(^(const dyld3::MachOFile::SegmentInfo
& segInfo
, bool& stop
) {
1661 if ( _options
.platform
== dyld3::Platform::watchOS_simulator
)
1663 if ( strcmp(segInfo
.segName
, "__TEXT") == 0 )
1664 textSegVmAddr
= segInfo
.vmAddr
;
1665 if ( segInfo
.protections
!= (VM_PROT_READ
| VM_PROT_WRITE
) )
1667 if ( strcmp(segInfo
.segName
, "__DATA_CONST") != 0 )
1669 ++dataConstSegmentCount
;
1670 // Pack __DATA_CONST segments
1671 addr
= align(addr
, segInfo
.p2align
);
1672 size_t copySize
= std::min((size_t)segInfo
.fileSize
, (size_t)segInfo
.sizeOfSections
);
1673 uint64_t offsetInRegion
= addr
- _readWriteRegion
.unslidLoadAddress
;
1674 SegmentMappingInfo loc
;
1675 loc
.srcSegment
= (uint8_t*)dylib
.input
->mappedFile
.mh
+ segInfo
.vmAddr
- textSegVmAddr
;
1676 loc
.segName
= segInfo
.segName
;
1677 loc
.dstSegment
= _readWriteRegion
.buffer
+ offsetInRegion
;
1678 loc
.dstCacheUnslidAddress
= addr
;
1679 loc
.dstCacheFileOffset
= (uint32_t)(_readWriteRegion
.cacheFileOffset
+ offsetInRegion
);
1680 loc
.dstCacheSegmentSize
= (uint32_t)segInfo
.sizeOfSections
;
1681 loc
.dstCacheFileSize
= (uint32_t)copySize
;
1682 loc
.copySegmentSize
= (uint32_t)copySize
;
1683 loc
.srcSegmentIndex
= segInfo
.segIndex
;
1684 dylib
.cacheLocation
.push_back(loc
);
1685 addr
+= loc
.dstCacheSegmentSize
;
1689 // layout all __DATA segments (and other r/w non-dirty, non-const) segments
1690 for (DylibInfo
& dylib
: _sortedDylibs
) {
1691 __block
uint64_t textSegVmAddr
= 0;
1692 dylib
.input
->mappedFile
.mh
->forEachSegment(^(const dyld3::MachOFile::SegmentInfo
& segInfo
, bool& stop
) {
1693 if ( strcmp(segInfo
.segName
, "__TEXT") == 0 )
1694 textSegVmAddr
= segInfo
.vmAddr
;
1695 if ( segInfo
.protections
!= (VM_PROT_READ
| VM_PROT_WRITE
) )
1697 if ( _options
.platform
!= dyld3::Platform::watchOS_simulator
) {
1698 if ( strcmp(segInfo
.segName
, "__DATA_CONST") == 0 )
1700 if ( strcmp(segInfo
.segName
, "__DATA_DIRTY") == 0 )
1703 bool forcePageAlignedData
= false;
1704 if (_options
.platform
== dyld3::Platform::macOS
) {
1705 forcePageAlignedData
= dylib
.input
->mappedFile
.mh
->hasUnalignedPointerFixups();
1706 //if ( forcePageAlignedData )
1707 // warning("unaligned pointer in %s\n", dylib.input->mappedFile.runtimePath.c_str());
1709 if ( (dataConstSegmentCount
> 10) && !forcePageAlignedData
) {
1710 // Pack __DATA segments only if we also have __DATA_CONST segments
1711 addr
= align(addr
, segInfo
.p2align
);
1714 // Keep __DATA segments 4K or more aligned
1715 addr
= align(addr
, std::max((int)segInfo
.p2align
, (int)12));
1717 size_t copySize
= std::min((size_t)segInfo
.fileSize
, (size_t)segInfo
.sizeOfSections
);
1718 uint64_t offsetInRegion
= addr
- _readWriteRegion
.unslidLoadAddress
;
1719 SegmentMappingInfo loc
;
1720 loc
.srcSegment
= (uint8_t*)dylib
.input
->mappedFile
.mh
+ segInfo
.vmAddr
- textSegVmAddr
;
1721 loc
.segName
= segInfo
.segName
;
1722 loc
.dstSegment
= _readWriteRegion
.buffer
+ offsetInRegion
;
1723 loc
.dstCacheUnslidAddress
= addr
;
1724 loc
.dstCacheFileOffset
= (uint32_t)(_readWriteRegion
.cacheFileOffset
+ offsetInRegion
);
1725 loc
.dstCacheSegmentSize
= (uint32_t)segInfo
.sizeOfSections
;
1726 loc
.dstCacheFileSize
= (uint32_t)copySize
;
1727 loc
.copySegmentSize
= (uint32_t)copySize
;
1728 loc
.srcSegmentIndex
= segInfo
.segIndex
;
1729 dylib
.cacheLocation
.push_back(loc
);
1730 addr
+= loc
.dstCacheSegmentSize
;
1734 // layout all __DATA_DIRTY segments, sorted (FIXME)
1735 const size_t dylibCount
= _sortedDylibs
.size();
1736 uint32_t dirtyDataSortIndexes
[dylibCount
];
1737 for (size_t i
=0; i
< dylibCount
; ++i
)
1738 dirtyDataSortIndexes
[i
] = (uint32_t)i
;
1739 std::sort(&dirtyDataSortIndexes
[0], &dirtyDataSortIndexes
[dylibCount
], [&](const uint32_t& a
, const uint32_t& b
) {
1740 const auto& orderA
= _options
.dirtyDataSegmentOrdering
.find(_sortedDylibs
[a
].input
->mappedFile
.runtimePath
);
1741 const auto& orderB
= _options
.dirtyDataSegmentOrdering
.find(_sortedDylibs
[b
].input
->mappedFile
.runtimePath
);
1742 bool foundA
= (orderA
!= _options
.dirtyDataSegmentOrdering
.end());
1743 bool foundB
= (orderB
!= _options
.dirtyDataSegmentOrdering
.end());
1745 // Order all __DATA_DIRTY segments specified in the order file first, in the order specified in the file,
1746 // followed by any other __DATA_DIRTY segments in lexicographic order.
1747 if ( foundA
&& foundB
)
1748 return orderA
->second
< orderB
->second
;
1754 return _sortedDylibs
[a
].input
->mappedFile
.runtimePath
< _sortedDylibs
[b
].input
->mappedFile
.runtimePath
;
1756 addr
= align(addr
, 12);
1757 for (size_t i
=0; i
< dylibCount
; ++i
) {
1758 DylibInfo
& dylib
= _sortedDylibs
[dirtyDataSortIndexes
[i
]];
1759 __block
uint64_t textSegVmAddr
= 0;
1760 dylib
.input
->mappedFile
.mh
->forEachSegment(^(const dyld3::MachOFile::SegmentInfo
& segInfo
, bool& stop
) {
1761 if ( _options
.platform
== dyld3::Platform::watchOS_simulator
)
1763 if ( strcmp(segInfo
.segName
, "__TEXT") == 0 )
1764 textSegVmAddr
= segInfo
.vmAddr
;
1765 if ( segInfo
.protections
!= (VM_PROT_READ
| VM_PROT_WRITE
) )
1767 if ( strcmp(segInfo
.segName
, "__DATA_DIRTY") != 0 )
1769 // Pack __DATA_DIRTY segments
1770 addr
= align(addr
, segInfo
.p2align
);
1771 size_t copySize
= std::min((size_t)segInfo
.fileSize
, (size_t)segInfo
.sizeOfSections
);
1772 uint64_t offsetInRegion
= addr
- _readWriteRegion
.unslidLoadAddress
;
1773 SegmentMappingInfo loc
;
1774 loc
.srcSegment
= (uint8_t*)dylib
.input
->mappedFile
.mh
+ segInfo
.vmAddr
- textSegVmAddr
;
1775 loc
.segName
= segInfo
.segName
;
1776 loc
.dstSegment
= _readWriteRegion
.buffer
+ offsetInRegion
;
1777 loc
.dstCacheUnslidAddress
= addr
;
1778 loc
.dstCacheFileOffset
= (uint32_t)(_readWriteRegion
.cacheFileOffset
+ offsetInRegion
);
1779 loc
.dstCacheSegmentSize
= (uint32_t)segInfo
.sizeOfSections
;
1780 loc
.dstCacheFileSize
= (uint32_t)copySize
;
1781 loc
.copySegmentSize
= (uint32_t)copySize
;
1782 loc
.srcSegmentIndex
= segInfo
.segIndex
;
1783 dylib
.cacheLocation
.push_back(loc
);
1784 addr
+= loc
.dstCacheSegmentSize
;
1788 // reserve space for objc r/w optimization tables
1789 _objcReadWriteBufferSizeAllocated
= align(computeReadWriteObjC((uint32_t)_sortedDylibs
.size(), totalProtocolDefCount
), 14);
1790 addr
= align(addr
, 4); // objc r/w section contains pointer and must be at least pointer align
1791 _objcReadWriteBuffer
= _readWriteRegion
.buffer
+ (addr
- _readWriteRegion
.unslidLoadAddress
);
1792 addr
+= _objcReadWriteBufferSizeAllocated
;
1794 // align DATA region end
1795 uint64_t endDataAddress
= align(addr
, _archLayout
->sharedRegionAlignP2
);
1796 _readWriteRegion
.bufferSize
= endDataAddress
- _readWriteRegion
.unslidLoadAddress
;
1797 _readWriteRegion
.sizeInUse
= _readWriteRegion
.bufferSize
;
1799 // start read-only region
1800 if ( _archLayout
->sharedRegionsAreDiscontiguous
)
1801 addr
= _archLayout
->sharedMemoryStart
+ 0xA0000000;
1803 addr
= align((addr
+ _archLayout
->sharedRegionPadding
), _archLayout
->sharedRegionAlignP2
);
1804 _readOnlyRegion
.buffer
= (uint8_t*)_fullAllocatedBuffer
+ addr
- _archLayout
->sharedMemoryStart
;
1805 _readOnlyRegion
.bufferSize
= 0;
1806 _readOnlyRegion
.sizeInUse
= 0;
1807 _readOnlyRegion
.unslidLoadAddress
= addr
;
1808 _readOnlyRegion
.cacheFileOffset
= _readWriteRegion
.cacheFileOffset
+ _readWriteRegion
.sizeInUse
;
1810 // reserve space for kernel ASLR slide info at start of r/o region
1811 if ( _options
.cacheSupportsASLR
) {
1812 size_t slideInfoSize
= sizeof(dyld_cache_slide_info
);
1813 slideInfoSize
= std::max(slideInfoSize
, sizeof(dyld_cache_slide_info2
));
1814 slideInfoSize
= std::max(slideInfoSize
, sizeof(dyld_cache_slide_info3
));
1815 slideInfoSize
= std::max(slideInfoSize
, sizeof(dyld_cache_slide_info4
));
1816 _slideInfoBufferSizeAllocated
= align(slideInfoSize
+ (_readWriteRegion
.sizeInUse
/4096) * _archLayout
->slideInfoBytesPerPage
, _archLayout
->sharedRegionAlignP2
);
1817 _slideInfoFileOffset
= _readOnlyRegion
.cacheFileOffset
;
1818 addr
+= _slideInfoBufferSizeAllocated
;
1821 // layout all read-only (but not LINKEDIT) segments
1822 if ( _archLayout
->textAndDataMaxSize
== 0 ) {
1823 for (DylibInfo
& dylib
: _sortedDylibs
) {
1824 __block
uint64_t textSegVmAddr
= 0;
1825 dylib
.input
->mappedFile
.mh
->forEachSegment(^(const dyld3::MachOFile::SegmentInfo
& segInfo
, bool& stop
) {
1826 if ( strcmp(segInfo
.segName
, "__TEXT") == 0 )
1827 textSegVmAddr
= segInfo
.vmAddr
;
1828 if ( segInfo
.protections
!= VM_PROT_READ
)
1830 if ( strcmp(segInfo
.segName
, "__LINKEDIT") == 0 )
1833 // Keep segments segments 4K or more aligned
1834 addr
= align(addr
, std::max((int)segInfo
.p2align
, (int)12));
1835 uint64_t offsetInRegion
= addr
- _readOnlyRegion
.unslidLoadAddress
;
1836 SegmentMappingInfo loc
;
1837 loc
.srcSegment
= (uint8_t*)dylib
.input
->mappedFile
.mh
+ segInfo
.vmAddr
- textSegVmAddr
;
1838 loc
.segName
= segInfo
.segName
;
1839 loc
.dstSegment
= _readOnlyRegion
.buffer
+ offsetInRegion
;
1840 loc
.dstCacheUnslidAddress
= addr
;
1841 loc
.dstCacheFileOffset
= (uint32_t)(_readOnlyRegion
.cacheFileOffset
+ offsetInRegion
);
1842 loc
.dstCacheSegmentSize
= (uint32_t)align(segInfo
.sizeOfSections
, 12);
1843 loc
.dstCacheFileSize
= (uint32_t)segInfo
.sizeOfSections
;
1844 loc
.copySegmentSize
= (uint32_t)segInfo
.sizeOfSections
;
1845 loc
.srcSegmentIndex
= segInfo
.segIndex
;
1846 dylib
.cacheLocation
.push_back(loc
);
1847 addr
+= loc
.dstCacheSegmentSize
;
1851 // layout all LINKEDIT segments (after other read-only segments), aligned to 16KB
1852 addr
= align(addr
, 14);
1853 _nonLinkEditReadOnlySize
= addr
- _readOnlyRegion
.unslidLoadAddress
;
1854 for (DylibInfo
& dylib
: _sortedDylibs
) {
1855 __block
uint64_t textSegVmAddr
= 0;
1856 dylib
.input
->mappedFile
.mh
->forEachSegment(^(const dyld3::MachOFile::SegmentInfo
& segInfo
, bool& stop
) {
1857 if ( strcmp(segInfo
.segName
, "__TEXT") == 0 )
1858 textSegVmAddr
= segInfo
.vmAddr
;
1859 if ( segInfo
.protections
!= VM_PROT_READ
)
1861 if ( strcmp(segInfo
.segName
, "__LINKEDIT") != 0 )
1863 // Keep segments segments 4K or more aligned
1864 addr
= align(addr
, std::max((int)segInfo
.p2align
, (int)12));
1865 size_t copySize
= std::min((size_t)segInfo
.fileSize
, (size_t)segInfo
.sizeOfSections
);
1866 uint64_t offsetInRegion
= addr
- _readOnlyRegion
.unslidLoadAddress
;
1867 SegmentMappingInfo loc
;
1868 loc
.srcSegment
= (uint8_t*)dylib
.input
->mappedFile
.mh
+ segInfo
.vmAddr
- textSegVmAddr
;
1869 loc
.segName
= segInfo
.segName
;
1870 loc
.dstSegment
= _readOnlyRegion
.buffer
+ offsetInRegion
;
1871 loc
.dstCacheUnslidAddress
= addr
;
1872 loc
.dstCacheFileOffset
= (uint32_t)(_readOnlyRegion
.cacheFileOffset
+ offsetInRegion
);
1873 loc
.dstCacheSegmentSize
= (uint32_t)align(segInfo
.sizeOfSections
, 12);
1874 loc
.dstCacheFileSize
= (uint32_t)copySize
;
1875 loc
.copySegmentSize
= (uint32_t)copySize
;
1876 loc
.srcSegmentIndex
= segInfo
.segIndex
;
1877 dylib
.cacheLocation
.push_back(loc
);
1878 addr
+= loc
.dstCacheSegmentSize
;
1882 // align r/o region end
1883 uint64_t endReadOnlyAddress
= align(addr
, _archLayout
->sharedRegionAlignP2
);
1884 _readOnlyRegion
.bufferSize
= endReadOnlyAddress
- _readOnlyRegion
.unslidLoadAddress
;
1885 _readOnlyRegion
.sizeInUse
= _readOnlyRegion
.bufferSize
;
1887 //fprintf(stderr, "RX region=%p -> %p, logical addr=0x%llX\n", _readExecuteRegion.buffer, _readExecuteRegion.buffer+_readExecuteRegion.bufferSize, _readExecuteRegion.unslidLoadAddress);
1888 //fprintf(stderr, "RW region=%p -> %p, logical addr=0x%llX\n", _readWriteRegion.buffer, _readWriteRegion.buffer+_readWriteRegion.bufferSize, _readWriteRegion.unslidLoadAddress);
1889 //fprintf(stderr, "RO region=%p -> %p, logical addr=0x%llX\n", _readOnlyRegion.buffer, _readOnlyRegion.buffer+_readOnlyRegion.bufferSize, _readOnlyRegion.unslidLoadAddress);
1891 // sort SegmentMappingInfo for each image to be in the same order as original segments
1892 for (DylibInfo
& dylib
: _sortedDylibs
) {
1893 std::sort(dylib
.cacheLocation
.begin(), dylib
.cacheLocation
.end(), [&](const SegmentMappingInfo
& a
, const SegmentMappingInfo
& b
) {
1894 return a
.srcSegmentIndex
< b
.srcSegmentIndex
;
1899 void CacheBuilder::markPaddingInaccessible()
1901 // region between RX and RW
1902 uint8_t* startPad1
= _readExecuteRegion
.buffer
+_readExecuteRegion
.sizeInUse
;
1903 uint8_t* endPad1
= _readWriteRegion
.buffer
;
1904 ::vm_protect(mach_task_self(), (vm_address_t
)startPad1
, endPad1
-startPad1
, false, 0);
1906 // region between RW and RO
1907 uint8_t* startPad2
= _readWriteRegion
.buffer
+_readWriteRegion
.sizeInUse
;
1908 uint8_t* endPad2
= _readOnlyRegion
.buffer
;
1909 ::vm_protect(mach_task_self(), (vm_address_t
)startPad2
, endPad2
-startPad2
, false, 0);
1913 uint64_t CacheBuilder::pathHash(const char* path
)
1916 for (const char* s
=path
; *s
!= '\0'; ++s
)
1922 void CacheBuilder::findDylibAndSegment(const void* contentPtr
, std::string
& foundDylibName
, std::string
& foundSegName
)
1924 foundDylibName
= "???";
1925 foundSegName
= "???";
1926 uint64_t unslidVmAddr
= ((uint8_t*)contentPtr
- _readExecuteRegion
.buffer
) + _readExecuteRegion
.unslidLoadAddress
;
1927 const DyldSharedCache
* cache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
1928 cache
->forEachImage(^(const mach_header
* mh
, const char* installName
) {
1929 ((dyld3::MachOLoaded
*)mh
)->forEachSegment(^(const dyld3::MachOFile::SegmentInfo
& info
, bool &stop
) {
1930 if ( (unslidVmAddr
>= info
.vmAddr
) && (unslidVmAddr
< (info
.vmAddr
+info
.vmSize
)) ) {
1931 foundDylibName
= installName
;
1932 foundSegName
= info
.segName
;
1940 template <typename P
>
1941 bool CacheBuilder::makeRebaseChainV2(uint8_t* pageContent
, uint16_t lastLocationOffset
, uint16_t offset
, const dyld_cache_slide_info2
* info
)
1943 typedef typename
P::uint_t pint_t
;
1945 const pint_t deltaMask
= (pint_t
)(info
->delta_mask
);
1946 const pint_t valueMask
= ~deltaMask
;
1947 const pint_t valueAdd
= (pint_t
)(info
->value_add
);
1948 const unsigned deltaShift
= __builtin_ctzll(deltaMask
) - 2;
1949 const uint32_t maxDelta
= (uint32_t)(deltaMask
>> deltaShift
);
1951 pint_t
* lastLoc
= (pint_t
*)&pageContent
[lastLocationOffset
+0];
1952 pint_t lastValue
= (pint_t
)P::getP(*lastLoc
);
1953 if ( (lastValue
- valueAdd
) & deltaMask
) {
1954 std::string dylibName
;
1955 std::string segName
;
1956 findDylibAndSegment((void*)pageContent
, dylibName
, segName
);
1957 _diagnostics
.error("rebase pointer (0x%0lX) does not point within cache. lastOffset=0x%04X, seg=%s, dylib=%s\n",
1958 (long)lastValue
, lastLocationOffset
, segName
.c_str(), dylibName
.c_str());
1961 if ( offset
<= (lastLocationOffset
+maxDelta
) ) {
1962 // previous location in range, make link from it
1963 // encode this location into last value
1964 pint_t delta
= offset
- lastLocationOffset
;
1965 pint_t newLastValue
= ((lastValue
- valueAdd
) & valueMask
) | (delta
<< deltaShift
);
1966 //warning(" add chain: delta = %d, lastOffset=0x%03X, offset=0x%03X, org value=0x%08lX, new value=0x%08lX",
1967 // offset - lastLocationOffset, lastLocationOffset, offset, (long)lastValue, (long)newLastValue);
1968 P::setP(*lastLoc
, newLastValue
);
1971 //fprintf(stderr, " too big delta = %d, lastOffset=0x%03X, offset=0x%03X\n", offset - lastLocationOffset, lastLocationOffset, offset);
1973 // distance between rebase locations is too far
1974 // see if we can make a chain from non-rebase locations
1975 uint16_t nonRebaseLocationOffsets
[1024];
1976 unsigned nrIndex
= 0;
1977 for (uint16_t i
= lastLocationOffset
; i
< offset
-maxDelta
; ) {
1978 nonRebaseLocationOffsets
[nrIndex
] = 0;
1979 for (int j
=maxDelta
; j
> 0; j
-= 4) {
1980 pint_t value
= (pint_t
)P::getP(*(pint_t
*)&pageContent
[i
+j
]);
1982 // Steal values of 0 to be used in the rebase chain
1983 nonRebaseLocationOffsets
[nrIndex
] = i
+j
;
1987 if ( nonRebaseLocationOffsets
[nrIndex
] == 0 ) {
1988 lastValue
= (pint_t
)P::getP(*lastLoc
);
1989 pint_t newValue
= ((lastValue
- valueAdd
) & valueMask
);
1990 //warning(" no way to make non-rebase delta chain, terminate off=0x%03X, old value=0x%08lX, new value=0x%08lX", lastLocationOffset, (long)value, (long)newValue);
1991 P::setP(*lastLoc
, newValue
);
1994 i
= nonRebaseLocationOffsets
[nrIndex
];
1998 // we can make chain. go back and add each non-rebase location to chain
1999 uint16_t prevOffset
= lastLocationOffset
;
2000 pint_t
* prevLoc
= (pint_t
*)&pageContent
[prevOffset
];
2001 for (unsigned n
=0; n
< nrIndex
; ++n
) {
2002 uint16_t nOffset
= nonRebaseLocationOffsets
[n
];
2003 assert(nOffset
!= 0);
2004 pint_t
* nLoc
= (pint_t
*)&pageContent
[nOffset
];
2005 uint32_t delta2
= nOffset
- prevOffset
;
2006 pint_t value
= (pint_t
)P::getP(*prevLoc
);
2009 newValue
= (delta2
<< deltaShift
);
2011 newValue
= ((value
- valueAdd
) & valueMask
) | (delta2
<< deltaShift
);
2012 //warning(" non-rebase delta = %d, to off=0x%03X, old value=0x%08lX, new value=0x%08lX", delta2, nOffset, (long)value, (long)newValue);
2013 P::setP(*prevLoc
, newValue
);
2014 prevOffset
= nOffset
;
2017 uint32_t delta3
= offset
- prevOffset
;
2018 pint_t value
= (pint_t
)P::getP(*prevLoc
);
2021 newValue
= (delta3
<< deltaShift
);
2023 newValue
= ((value
- valueAdd
) & valueMask
) | (delta3
<< deltaShift
);
2024 //warning(" non-rebase delta = %d, to off=0x%03X, old value=0x%08lX, new value=0x%08lX", delta3, offset, (long)value, (long)newValue);
2025 P::setP(*prevLoc
, newValue
);
2031 template <typename P
>
2032 void CacheBuilder::addPageStartsV2(uint8_t* pageContent
, const bool bitmap
[], const dyld_cache_slide_info2
* info
,
2033 std::vector
<uint16_t>& pageStarts
, std::vector
<uint16_t>& pageExtras
)
2035 typedef typename
P::uint_t pint_t
;
2037 const pint_t deltaMask
= (pint_t
)(info
->delta_mask
);
2038 const pint_t valueMask
= ~deltaMask
;
2039 const uint32_t pageSize
= info
->page_size
;
2040 const pint_t valueAdd
= (pint_t
)(info
->value_add
);
2042 uint16_t startValue
= DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE
;
2043 uint16_t lastLocationOffset
= 0xFFFF;
2044 for(uint32_t i
=0; i
< pageSize
/4; ++i
) {
2045 unsigned offset
= i
*4;
2047 if ( startValue
== DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE
) {
2048 // found first rebase location in page
2051 else if ( !makeRebaseChainV2
<P
>(pageContent
, lastLocationOffset
, offset
, info
) ) {
2052 // can't record all rebasings in one chain
2053 if ( (startValue
& DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA
) == 0 ) {
2054 // switch page_start to "extras" which is a list of chain starts
2055 unsigned indexInExtras
= (unsigned)pageExtras
.size();
2056 if ( indexInExtras
> 0x3FFF ) {
2057 _diagnostics
.error("rebase overflow in v2 page extras");
2060 pageExtras
.push_back(startValue
);
2061 startValue
= indexInExtras
| DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA
;
2063 pageExtras
.push_back(i
);
2065 lastLocationOffset
= offset
;
2068 if ( lastLocationOffset
!= 0xFFFF ) {
2069 // mark end of chain
2070 pint_t
* lastLoc
= (pint_t
*)&pageContent
[lastLocationOffset
];
2071 pint_t lastValue
= (pint_t
)P::getP(*lastLoc
);
2072 pint_t newValue
= ((lastValue
- valueAdd
) & valueMask
);
2073 P::setP(*lastLoc
, newValue
);
2075 if ( startValue
& DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA
) {
2076 // add end bit to extras
2077 pageExtras
.back() |= DYLD_CACHE_SLIDE_PAGE_ATTR_END
;
2079 pageStarts
.push_back(startValue
);
2082 template <typename P
>
2083 void CacheBuilder::writeSlideInfoV2(const bool bitmap
[], unsigned dataPageCount
)
2085 typedef typename
P::uint_t pint_t
;
2086 typedef typename
P::E E
;
2087 const uint32_t pageSize
= 4096;
2089 // fill in fixed info
2090 assert(_slideInfoFileOffset
!= 0);
2091 dyld_cache_slide_info2
* info
= (dyld_cache_slide_info2
*)_readOnlyRegion
.buffer
;
2093 info
->page_size
= pageSize
;
2094 info
->delta_mask
= _archLayout
->pointerDeltaMask
;
2095 info
->value_add
= _archLayout
->useValueAdd
? _archLayout
->sharedMemoryStart
: 0;
2097 // set page starts and extras for each page
2098 std::vector
<uint16_t> pageStarts
;
2099 std::vector
<uint16_t> pageExtras
;
2100 pageStarts
.reserve(dataPageCount
);
2101 uint8_t* pageContent
= _readWriteRegion
.buffer
;
2102 const bool* bitmapForPage
= bitmap
;
2103 for (unsigned i
=0; i
< dataPageCount
; ++i
) {
2104 //warning("page[%d]", i);
2105 addPageStartsV2
<P
>(pageContent
, bitmapForPage
, info
, pageStarts
, pageExtras
);
2106 if ( _diagnostics
.hasError() ) {
2109 pageContent
+= pageSize
;
2110 bitmapForPage
+= (sizeof(bool)*(pageSize
/4));
2113 // fill in computed info
2114 info
->page_starts_offset
= sizeof(dyld_cache_slide_info2
);
2115 info
->page_starts_count
= (unsigned)pageStarts
.size();
2116 info
->page_extras_offset
= (unsigned)(sizeof(dyld_cache_slide_info2
)+pageStarts
.size()*sizeof(uint16_t));
2117 info
->page_extras_count
= (unsigned)pageExtras
.size();
2118 uint16_t* pageStartsBuffer
= (uint16_t*)((char*)info
+ info
->page_starts_offset
);
2119 uint16_t* pageExtrasBuffer
= (uint16_t*)((char*)info
+ info
->page_extras_offset
);
2120 for (unsigned i
=0; i
< pageStarts
.size(); ++i
)
2121 pageStartsBuffer
[i
] = pageStarts
[i
];
2122 for (unsigned i
=0; i
< pageExtras
.size(); ++i
)
2123 pageExtrasBuffer
[i
] = pageExtras
[i
];
2124 // update header with final size
2125 uint64_t slideInfoSize
= align(info
->page_extras_offset
+ pageExtras
.size()*sizeof(uint16_t), _archLayout
->sharedRegionAlignP2
);
2126 if ( slideInfoSize
> _slideInfoBufferSizeAllocated
) {
2127 _diagnostics
.error("kernel slide info overflow buffer");
2129 ((dyld_cache_header
*)_readExecuteRegion
.buffer
)->slideInfoSize
= slideInfoSize
;
2130 //fprintf(stderr, "pageCount=%u, page_starts_count=%lu, page_extras_count=%lu\n", dataPageCount, pageStarts.size(), pageExtras.size());
2133 #if SUPPORT_ARCH_arm64_32 || SUPPORT_ARCH_armv7k
2134 // fits in to int16_t
2135 static bool smallValue(uint64_t value
)
2137 uint32_t high
= (value
& 0xFFFF8000);
2138 return (high
== 0) || (high
== 0xFFFF8000);
2141 template <typename P
>
2142 bool CacheBuilder::makeRebaseChainV4(uint8_t* pageContent
, uint16_t lastLocationOffset
, uint16_t offset
, const dyld_cache_slide_info4
* info
)
2144 typedef typename
P::uint_t pint_t
;
2146 const pint_t deltaMask
= (pint_t
)(info
->delta_mask
);
2147 const pint_t valueMask
= ~deltaMask
;
2148 const pint_t valueAdd
= (pint_t
)(info
->value_add
);
2149 const unsigned deltaShift
= __builtin_ctzll(deltaMask
) - 2;
2150 const uint32_t maxDelta
= (uint32_t)(deltaMask
>> deltaShift
);
2152 pint_t
* lastLoc
= (pint_t
*)&pageContent
[lastLocationOffset
+0];
2153 pint_t lastValue
= (pint_t
)P::getP(*lastLoc
);
2154 if ( (lastValue
- valueAdd
) & deltaMask
) {
2155 std::string dylibName
;
2156 std::string segName
;
2157 findDylibAndSegment((void*)pageContent
, dylibName
, segName
);
2158 _diagnostics
.error("rebase pointer does not point within cache. lastOffset=0x%04X, seg=%s, dylib=%s\n",
2159 lastLocationOffset
, segName
.c_str(), dylibName
.c_str());
2162 if ( offset
<= (lastLocationOffset
+maxDelta
) ) {
2163 // previous location in range, make link from it
2164 // encode this location into last value
2165 pint_t delta
= offset
- lastLocationOffset
;
2166 pint_t newLastValue
= ((lastValue
- valueAdd
) & valueMask
) | (delta
<< deltaShift
);
2167 //warning(" add chain: delta = %d, lastOffset=0x%03X, offset=0x%03X, org value=0x%08lX, new value=0x%08lX",
2168 // offset - lastLocationOffset, lastLocationOffset, offset, (long)lastValue, (long)newLastValue);
2169 P::setP(*lastLoc
, newLastValue
);
2172 //fprintf(stderr, " too big delta = %d, lastOffset=0x%03X, offset=0x%03X\n", offset - lastLocationOffset, lastLocationOffset, offset);
2174 // distance between rebase locations is too far
2175 // see if we can make a chain from non-rebase locations
2176 uint16_t nonRebaseLocationOffsets
[1024];
2177 unsigned nrIndex
= 0;
2178 for (uint16_t i
= lastLocationOffset
; i
< offset
-maxDelta
; ) {
2179 nonRebaseLocationOffsets
[nrIndex
] = 0;
2180 for (int j
=maxDelta
; j
> 0; j
-= 4) {
2181 pint_t value
= (pint_t
)P::getP(*(pint_t
*)&pageContent
[i
+j
]);
2182 if ( smallValue(value
) ) {
2183 // Steal values of 0 to be used in the rebase chain
2184 nonRebaseLocationOffsets
[nrIndex
] = i
+j
;
2188 if ( nonRebaseLocationOffsets
[nrIndex
] == 0 ) {
2189 lastValue
= (pint_t
)P::getP(*lastLoc
);
2190 pint_t newValue
= ((lastValue
- valueAdd
) & valueMask
);
2191 //fprintf(stderr, " no way to make non-rebase delta chain, terminate off=0x%03X, old value=0x%08lX, new value=0x%08lX\n",
2192 // lastLocationOffset, (long)lastValue, (long)newValue);
2193 P::setP(*lastLoc
, newValue
);
2196 i
= nonRebaseLocationOffsets
[nrIndex
];
2200 // we can make chain. go back and add each non-rebase location to chain
2201 uint16_t prevOffset
= lastLocationOffset
;
2202 pint_t
* prevLoc
= (pint_t
*)&pageContent
[prevOffset
];
2203 for (unsigned n
=0; n
< nrIndex
; ++n
) {
2204 uint16_t nOffset
= nonRebaseLocationOffsets
[n
];
2205 assert(nOffset
!= 0);
2206 pint_t
* nLoc
= (pint_t
*)&pageContent
[nOffset
];
2207 uint32_t delta2
= nOffset
- prevOffset
;
2208 pint_t value
= (pint_t
)P::getP(*prevLoc
);
2210 if ( smallValue(value
) )
2211 newValue
= (value
& valueMask
) | (delta2
<< deltaShift
);
2213 newValue
= ((value
- valueAdd
) & valueMask
) | (delta2
<< deltaShift
);
2214 //warning(" non-rebase delta = %d, to off=0x%03X, old value=0x%08lX, new value=0x%08lX", delta2, nOffset, (long)value, (long)newValue);
2215 P::setP(*prevLoc
, newValue
);
2216 prevOffset
= nOffset
;
2219 uint32_t delta3
= offset
- prevOffset
;
2220 pint_t value
= (pint_t
)P::getP(*prevLoc
);
2222 if ( smallValue(value
) )
2223 newValue
= (value
& valueMask
) | (delta3
<< deltaShift
);
2225 newValue
= ((value
- valueAdd
) & valueMask
) | (delta3
<< deltaShift
);
2226 //warning(" non-rebase delta = %d, to off=0x%03X, old value=0x%08lX, new value=0x%08lX", delta3, offset, (long)value, (long)newValue);
2227 P::setP(*prevLoc
, newValue
);
2233 template <typename P
>
2234 void CacheBuilder::addPageStartsV4(uint8_t* pageContent
, const bool bitmap
[], const dyld_cache_slide_info4
* info
,
2235 std::vector
<uint16_t>& pageStarts
, std::vector
<uint16_t>& pageExtras
)
2237 typedef typename
P::uint_t pint_t
;
2239 const pint_t deltaMask
= (pint_t
)(info
->delta_mask
);
2240 const pint_t valueMask
= ~deltaMask
;
2241 const uint32_t pageSize
= info
->page_size
;
2242 const pint_t valueAdd
= (pint_t
)(info
->value_add
);
2244 uint16_t startValue
= DYLD_CACHE_SLIDE4_PAGE_NO_REBASE
;
2245 uint16_t lastLocationOffset
= 0xFFFF;
2246 for(uint32_t i
=0; i
< pageSize
/4; ++i
) {
2247 unsigned offset
= i
*4;
2249 if ( startValue
== DYLD_CACHE_SLIDE4_PAGE_NO_REBASE
) {
2250 // found first rebase location in page
2253 else if ( !makeRebaseChainV4
<P
>(pageContent
, lastLocationOffset
, offset
, info
) ) {
2254 // can't record all rebasings in one chain
2255 if ( (startValue
& DYLD_CACHE_SLIDE4_PAGE_USE_EXTRA
) == 0 ) {
2256 // switch page_start to "extras" which is a list of chain starts
2257 unsigned indexInExtras
= (unsigned)pageExtras
.size();
2258 if ( indexInExtras
>= DYLD_CACHE_SLIDE4_PAGE_INDEX
) {
2259 _diagnostics
.error("rebase overflow in v4 page extras");
2262 pageExtras
.push_back(startValue
);
2263 startValue
= indexInExtras
| DYLD_CACHE_SLIDE4_PAGE_USE_EXTRA
;
2265 pageExtras
.push_back(i
);
2267 lastLocationOffset
= offset
;
2270 if ( lastLocationOffset
!= 0xFFFF ) {
2271 // mark end of chain
2272 pint_t
* lastLoc
= (pint_t
*)&pageContent
[lastLocationOffset
];
2273 pint_t lastValue
= (pint_t
)P::getP(*lastLoc
);
2274 pint_t newValue
= ((lastValue
- valueAdd
) & valueMask
);
2275 P::setP(*lastLoc
, newValue
);
2277 if ( startValue
& DYLD_CACHE_SLIDE4_PAGE_USE_EXTRA
) {
2278 // add end bit to extras
2279 pageExtras
.back() |= DYLD_CACHE_SLIDE4_PAGE_EXTRA_END
;
2281 pageStarts
.push_back(startValue
);
2286 template <typename P
>
2287 void CacheBuilder::writeSlideInfoV4(const bool bitmap
[], unsigned dataPageCount
)
2289 typedef typename
P::uint_t pint_t
;
2290 typedef typename
P::E E
;
2291 const uint32_t pageSize
= 4096;
2293 // fill in fixed info
2294 assert(_slideInfoFileOffset
!= 0);
2295 dyld_cache_slide_info4
* info
= (dyld_cache_slide_info4
*)_readOnlyRegion
.buffer
;
2297 info
->page_size
= pageSize
;
2298 info
->delta_mask
= _archLayout
->pointerDeltaMask
;
2299 info
->value_add
= info
->value_add
= _archLayout
->useValueAdd
? _archLayout
->sharedMemoryStart
: 0;
2301 // set page starts and extras for each page
2302 std::vector
<uint16_t> pageStarts
;
2303 std::vector
<uint16_t> pageExtras
;
2304 pageStarts
.reserve(dataPageCount
);
2305 uint8_t* pageContent
= _readWriteRegion
.buffer
;
2306 const bool* bitmapForPage
= bitmap
;
2307 for (unsigned i
=0; i
< dataPageCount
; ++i
) {
2308 addPageStartsV4
<P
>(pageContent
, bitmapForPage
, info
, pageStarts
, pageExtras
);
2309 if ( _diagnostics
.hasError() ) {
2312 pageContent
+= pageSize
;
2313 bitmapForPage
+= (sizeof(bool)*(pageSize
/4));
2315 // fill in computed info
2316 info
->page_starts_offset
= sizeof(dyld_cache_slide_info4
);
2317 info
->page_starts_count
= (unsigned)pageStarts
.size();
2318 info
->page_extras_offset
= (unsigned)(sizeof(dyld_cache_slide_info4
)+pageStarts
.size()*sizeof(uint16_t));
2319 info
->page_extras_count
= (unsigned)pageExtras
.size();
2320 uint16_t* pageStartsBuffer
= (uint16_t*)((char*)info
+ info
->page_starts_offset
);
2321 uint16_t* pageExtrasBuffer
= (uint16_t*)((char*)info
+ info
->page_extras_offset
);
2322 for (unsigned i
=0; i
< pageStarts
.size(); ++i
)
2323 pageStartsBuffer
[i
] = pageStarts
[i
];
2324 for (unsigned i
=0; i
< pageExtras
.size(); ++i
)
2325 pageExtrasBuffer
[i
] = pageExtras
[i
];
2326 // update header with final size
2327 uint64_t slideInfoSize
= align(info
->page_extras_offset
+ pageExtras
.size()*sizeof(uint16_t), _archLayout
->sharedRegionAlignP2
);
2328 if ( slideInfoSize
> _slideInfoBufferSizeAllocated
) {
2329 _diagnostics
.error("kernel slide info v4 overflow buffer");
2331 ((dyld_cache_header
*)_readExecuteRegion
.buffer
)->slideInfoSize
= slideInfoSize
;
2332 //fprintf(stderr, "pageCount=%u, page_starts_count=%lu, page_extras_count=%lu\n", dataPageCount, pageStarts.size(), pageExtras.size());
2337 void CacheBuilder::writeSlideInfoV1()
2339 // build one 128-byte bitmap per page (4096) of DATA
2340 uint8_t* const dataStart = (uint8_t*)_buffer.get() + regions[1].fileOffset;
2341 uint8_t* const dataEnd = dataStart + regions[1].size;
2342 const long bitmapSize = (dataEnd - dataStart)/(4*8);
2343 uint8_t* bitmap = (uint8_t*)calloc(bitmapSize, 1);
2344 for (void* p : _pointersForASLR) {
2345 if ( (p < dataStart) || ( p > dataEnd) )
2346 terminate("DATA pointer for sliding, out of range\n");
2347 long offset = (long)((uint8_t*)p - dataStart);
2348 if ( (offset % 4) != 0 )
2349 terminate("pointer not 4-byte aligned in DATA offset 0x%08lX\n", offset);
2350 long byteIndex = offset / (4*8);
2351 long bitInByte = (offset % 32) >> 2;
2352 bitmap[byteIndex] |= (1 << bitInByte);
2355 // allocate worst case size block of all slide info
2356 const unsigned entry_size = 4096/(8*4); // 8 bits per byte, possible pointer every 4 bytes.
2357 const unsigned toc_count = (unsigned)bitmapSize/entry_size;
2358 dyld_cache_slide_info* slideInfo = (dyld_cache_slide_info*)((uint8_t*)_buffer + _slideInfoFileOffset);
2359 slideInfo->version = 1;
2360 slideInfo->toc_offset = sizeof(dyld_cache_slide_info);
2361 slideInfo->toc_count = toc_count;
2362 slideInfo->entries_offset = (slideInfo->toc_offset+2*toc_count+127)&(-128);
2363 slideInfo->entries_count = 0;
2364 slideInfo->entries_size = entry_size;
2365 // append each unique entry
2366 const dyldCacheSlideInfoEntry* bitmapAsEntries = (dyldCacheSlideInfoEntry*)bitmap;
2367 dyldCacheSlideInfoEntry* const entriesInSlidInfo = (dyldCacheSlideInfoEntry*)((char*)slideInfo+slideInfo->entries_offset());
2368 int entry_count = 0;
2369 for (int i=0; i < toc_count; ++i) {
2370 const dyldCacheSlideInfoEntry* thisEntry = &bitmapAsEntries[i];
2371 // see if it is same as one already added
2373 for (int j=0; j < entry_count; ++j) {
2374 if ( memcmp(thisEntry, &entriesInSlidInfo[j], entry_size) == 0 ) {
2375 slideInfo->set_toc(i, j);
2382 memcpy(&entriesInSlidInfo[entry_count], thisEntry, entry_size);
2383 slideInfo->set_toc(i, entry_count++);
2386 slideInfo->entries_count = entry_count;
2387 ::free((void*)bitmap);
2389 _buffer.header->slideInfoSize = align(slideInfo->entries_offset + entry_count*entry_size, _archLayout->sharedRegionAlignP2);
2396 uint16_t CacheBuilder::pageStartV3(uint8_t* pageContent
, uint32_t pageSize
, const bool bitmap
[])
2398 const int maxPerPage
= pageSize
/ 4;
2399 uint16_t result
= DYLD_CACHE_SLIDE_V3_PAGE_ATTR_NO_REBASE
;
2400 dyld3::MachOLoaded::ChainedFixupPointerOnDisk
* lastLoc
= nullptr;
2401 for (int i
=0; i
< maxPerPage
; ++i
) {
2403 if ( result
== DYLD_CACHE_SLIDE_V3_PAGE_ATTR_NO_REBASE
) {
2404 // found first rebase location in page
2407 dyld3::MachOLoaded::ChainedFixupPointerOnDisk
* loc
= (dyld3::MachOLoaded::ChainedFixupPointerOnDisk
*)(pageContent
+ i
*4);;
2408 if ( lastLoc
!= nullptr ) {
2409 // update chain (original chain may be wrong because of segment packing)
2410 lastLoc
->arm64e
.rebase
.next
= loc
- lastLoc
;
2415 if ( lastLoc
!= nullptr ) {
2416 // mark last one as end of chain
2417 lastLoc
->arm64e
.rebase
.next
= 0;
2423 void CacheBuilder::writeSlideInfoV3(const bool bitmap
[], unsigned dataPageCount
)
2425 const uint32_t pageSize
= 4096;
2427 // fill in fixed info
2428 assert(_slideInfoFileOffset
!= 0);
2429 dyld_cache_slide_info3
* info
= (dyld_cache_slide_info3
*)_readOnlyRegion
.buffer
;
2431 info
->page_size
= pageSize
;
2432 info
->page_starts_count
= dataPageCount
;
2433 info
->auth_value_add
= _archLayout
->sharedMemoryStart
;
2435 // fill in per-page starts
2436 uint8_t* pageContent
= _readWriteRegion
.buffer
;
2437 const bool* bitmapForPage
= bitmap
;
2438 for (unsigned i
=0; i
< dataPageCount
; ++i
) {
2439 info
->page_starts
[i
] = pageStartV3(pageContent
, pageSize
, bitmapForPage
);
2440 pageContent
+= pageSize
;
2441 bitmapForPage
+= (sizeof(bool)*(pageSize
/4));
2444 // update header with final size
2445 dyld_cache_header
* dyldCacheHeader
= (dyld_cache_header
*)_readExecuteRegion
.buffer
;
2446 dyldCacheHeader
->slideInfoSize
= align(__offsetof(dyld_cache_slide_info3
, page_starts
[dataPageCount
]), _archLayout
->sharedRegionAlignP2
);
2447 if ( dyldCacheHeader
->slideInfoSize
> _slideInfoBufferSizeAllocated
) {
2448 _diagnostics
.error("kernel slide info overflow buffer");
2453 void CacheBuilder::fipsSign()
2455 // find libcorecrypto.dylib in cache being built
2456 DyldSharedCache
* dyldCache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
2457 __block
const dyld3::MachOLoaded
* ml
= nullptr;
2458 dyldCache
->forEachImage(^(const mach_header
* mh
, const char* installName
) {
2459 if ( strcmp(installName
, "/usr/lib/system/libcorecrypto.dylib") == 0 )
2460 ml
= (dyld3::MachOLoaded
*)mh
;
2462 if ( ml
== nullptr ) {
2463 _diagnostics
.warning("Could not find libcorecrypto.dylib, skipping FIPS sealing");
2467 // find location in libcorecrypto.dylib to store hash of __text section
2468 uint64_t hashStoreSize
;
2469 const void* hashStoreLocation
= ml
->findSectionContent("__TEXT", "__fips_hmacs", hashStoreSize
);
2470 if ( hashStoreLocation
== nullptr ) {
2471 _diagnostics
.warning("Could not find __TEXT/__fips_hmacs section in libcorecrypto.dylib, skipping FIPS sealing");
2474 if ( hashStoreSize
!= 32 ) {
2475 _diagnostics
.warning("__TEXT/__fips_hmacs section in libcorecrypto.dylib is not 32 bytes in size, skipping FIPS sealing");
2479 // compute hmac hash of __text section
2481 const void* textLocation
= ml
->findSectionContent("__TEXT", "__text", textSize
);
2482 if ( textLocation
== nullptr ) {
2483 _diagnostics
.warning("Could not find __TEXT/__text section in libcorecrypto.dylib, skipping FIPS sealing");
2486 unsigned char hmac_key
= 0;
2487 CCHmac(kCCHmacAlgSHA256
, &hmac_key
, 1, textLocation
, textSize
, (void*)hashStoreLocation
); // store hash directly into hashStoreLocation
2490 void CacheBuilder::codeSign()
2492 uint8_t dscHashType
;
2493 uint8_t dscHashSize
;
2494 uint32_t dscDigestFormat
;
2497 // select which codesigning hash
2498 switch (_options
.codeSigningDigestMode
) {
2499 case DyldSharedCache::Agile
:
2501 // Fall through to SHA1, because the main code directory remains SHA1 for compatibility.
2502 [[clang::fallthrough]];
2503 case DyldSharedCache::SHA1only
:
2504 #pragma clang diagnostic push
2505 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2506 dscHashType
= CS_HASHTYPE_SHA1
;
2507 dscHashSize
= CS_HASH_SIZE_SHA1
;
2508 dscDigestFormat
= kCCDigestSHA1
;
2509 #pragma clang diagnostic pop
2511 case DyldSharedCache::SHA256only
:
2512 dscHashType
= CS_HASHTYPE_SHA256
;
2513 dscHashSize
= CS_HASH_SIZE_SHA256
;
2514 dscDigestFormat
= kCCDigestSHA256
;
2517 _diagnostics
.error("codeSigningDigestMode has unknown, unexpected value %d, bailing out.",
2518 _options
.codeSigningDigestMode
);
2522 std::string cacheIdentifier
= "com.apple.dyld.cache.";
2523 cacheIdentifier
+= _options
.archs
->name();
2524 if ( _options
.dylibsRemovedDuringMastering
) {
2525 if ( _options
.optimizeStubs
)
2526 cacheIdentifier
+= ".release";
2528 cacheIdentifier
+= ".development";
2530 // get pointers into shared cache buffer
2531 size_t inBbufferSize
= _readExecuteRegion
.sizeInUse
+_readWriteRegion
.sizeInUse
+_readOnlyRegion
.sizeInUse
+_localSymbolsRegion
.sizeInUse
;
2533 const uint16_t pageSize
= _archLayout
->csPageSize
;
2535 // layout code signature contents
2536 uint32_t blobCount
= agile
? 4 : 3;
2537 size_t idSize
= cacheIdentifier
.size()+1; // +1 for terminating 0
2538 uint32_t slotCount
= (uint32_t)((inBbufferSize
+ pageSize
- 1) / pageSize
);
2539 uint32_t xSlotCount
= CSSLOT_REQUIREMENTS
;
2540 size_t idOffset
= offsetof(CS_CodeDirectory
, end_withExecSeg
);
2541 size_t hashOffset
= idOffset
+idSize
+ dscHashSize
*xSlotCount
;
2542 size_t hash256Offset
= idOffset
+idSize
+ CS_HASH_SIZE_SHA256
*xSlotCount
;
2543 size_t cdSize
= hashOffset
+ (slotCount
* dscHashSize
);
2544 size_t cd256Size
= agile
? hash256Offset
+ (slotCount
* CS_HASH_SIZE_SHA256
) : 0;
2545 size_t reqsSize
= 12;
2546 size_t cmsSize
= sizeof(CS_Blob
);
2547 size_t cdOffset
= sizeof(CS_SuperBlob
) + blobCount
*sizeof(CS_BlobIndex
);
2548 size_t cd256Offset
= cdOffset
+ cdSize
;
2549 size_t reqsOffset
= cd256Offset
+ cd256Size
; // equals cdOffset + cdSize if not agile
2550 size_t cmsOffset
= reqsOffset
+ reqsSize
;
2551 size_t sbSize
= cmsOffset
+ cmsSize
;
2552 size_t sigSize
= align(sbSize
, 14); // keep whole cache 16KB aligned
2554 // allocate space for blob
2555 vm_address_t codeSigAlloc
;
2556 if ( vm_allocate(mach_task_self(), &codeSigAlloc
, sigSize
, VM_FLAGS_ANYWHERE
) != 0 ) {
2557 _diagnostics
.error("could not allocate code signature buffer");
2560 _codeSignatureRegion
.buffer
= (uint8_t*)codeSigAlloc
;
2561 _codeSignatureRegion
.bufferSize
= sigSize
;
2562 _codeSignatureRegion
.sizeInUse
= sigSize
;
2564 // create overall code signature which is a superblob
2565 CS_SuperBlob
* sb
= reinterpret_cast<CS_SuperBlob
*>(_codeSignatureRegion
.buffer
);
2566 sb
->magic
= htonl(CSMAGIC_EMBEDDED_SIGNATURE
);
2567 sb
->length
= htonl(sbSize
);
2568 sb
->count
= htonl(blobCount
);
2569 sb
->index
[0].type
= htonl(CSSLOT_CODEDIRECTORY
);
2570 sb
->index
[0].offset
= htonl(cdOffset
);
2571 sb
->index
[1].type
= htonl(CSSLOT_REQUIREMENTS
);
2572 sb
->index
[1].offset
= htonl(reqsOffset
);
2573 sb
->index
[2].type
= htonl(CSSLOT_CMS_SIGNATURE
);
2574 sb
->index
[2].offset
= htonl(cmsOffset
);
2576 sb
->index
[3].type
= htonl(CSSLOT_ALTERNATE_CODEDIRECTORIES
+ 0);
2577 sb
->index
[3].offset
= htonl(cd256Offset
);
2580 // fill in empty requirements
2581 CS_RequirementsBlob
* reqs
= (CS_RequirementsBlob
*)(((char*)sb
)+reqsOffset
);
2582 reqs
->magic
= htonl(CSMAGIC_REQUIREMENTS
);
2583 reqs
->length
= htonl(sizeof(CS_RequirementsBlob
));
2586 // initialize fixed fields of Code Directory
2587 CS_CodeDirectory
* cd
= (CS_CodeDirectory
*)(((char*)sb
)+cdOffset
);
2588 cd
->magic
= htonl(CSMAGIC_CODEDIRECTORY
);
2589 cd
->length
= htonl(cdSize
);
2590 cd
->version
= htonl(0x20400); // supports exec segment
2591 cd
->flags
= htonl(kSecCodeSignatureAdhoc
);
2592 cd
->hashOffset
= htonl(hashOffset
);
2593 cd
->identOffset
= htonl(idOffset
);
2594 cd
->nSpecialSlots
= htonl(xSlotCount
);
2595 cd
->nCodeSlots
= htonl(slotCount
);
2596 cd
->codeLimit
= htonl(inBbufferSize
);
2597 cd
->hashSize
= dscHashSize
;
2598 cd
->hashType
= dscHashType
;
2599 cd
->platform
= 0; // not platform binary
2600 cd
->pageSize
= __builtin_ctz(pageSize
); // log2(CS_PAGE_SIZE);
2601 cd
->spare2
= 0; // unused (must be zero)
2602 cd
->scatterOffset
= 0; // not supported anymore
2603 cd
->teamOffset
= 0; // no team ID
2604 cd
->spare3
= 0; // unused (must be zero)
2605 cd
->codeLimit64
= 0; // falls back to codeLimit
2607 // executable segment info
2608 cd
->execSegBase
= htonll(_readExecuteRegion
.cacheFileOffset
); // base of TEXT segment
2609 cd
->execSegLimit
= htonll(_readExecuteRegion
.sizeInUse
); // size of TEXT segment
2610 cd
->execSegFlags
= 0; // not a main binary
2612 // initialize dynamic fields of Code Directory
2613 strcpy((char*)cd
+ idOffset
, cacheIdentifier
.c_str());
2615 // add special slot hashes
2616 uint8_t* hashSlot
= (uint8_t*)cd
+ hashOffset
;
2617 uint8_t* reqsHashSlot
= &hashSlot
[-CSSLOT_REQUIREMENTS
*dscHashSize
];
2618 CCDigest(dscDigestFormat
, (uint8_t*)reqs
, sizeof(CS_RequirementsBlob
), reqsHashSlot
);
2620 CS_CodeDirectory
* cd256
;
2621 uint8_t* hash256Slot
;
2622 uint8_t* reqsHash256Slot
;
2624 // Note that the assumption here is that the size up to the hashes is the same as for
2625 // sha1 code directory, and that they come last, after everything else.
2627 cd256
= (CS_CodeDirectory
*)(((char*)sb
)+cd256Offset
);
2628 cd256
->magic
= htonl(CSMAGIC_CODEDIRECTORY
);
2629 cd256
->length
= htonl(cd256Size
);
2630 cd256
->version
= htonl(0x20400); // supports exec segment
2631 cd256
->flags
= htonl(kSecCodeSignatureAdhoc
);
2632 cd256
->hashOffset
= htonl(hash256Offset
);
2633 cd256
->identOffset
= htonl(idOffset
);
2634 cd256
->nSpecialSlots
= htonl(xSlotCount
);
2635 cd256
->nCodeSlots
= htonl(slotCount
);
2636 cd256
->codeLimit
= htonl(inBbufferSize
);
2637 cd256
->hashSize
= CS_HASH_SIZE_SHA256
;
2638 cd256
->hashType
= CS_HASHTYPE_SHA256
;
2639 cd256
->platform
= 0; // not platform binary
2640 cd256
->pageSize
= __builtin_ctz(pageSize
); // log2(CS_PAGE_SIZE);
2641 cd256
->spare2
= 0; // unused (must be zero)
2642 cd256
->scatterOffset
= 0; // not supported anymore
2643 cd256
->teamOffset
= 0; // no team ID
2644 cd256
->spare3
= 0; // unused (must be zero)
2645 cd256
->codeLimit64
= 0; // falls back to codeLimit
2647 // executable segment info
2648 cd256
->execSegBase
= cd
->execSegBase
;
2649 cd256
->execSegLimit
= cd
->execSegLimit
;
2650 cd256
->execSegFlags
= cd
->execSegFlags
;
2652 // initialize dynamic fields of Code Directory
2653 strcpy((char*)cd256
+ idOffset
, cacheIdentifier
.c_str());
2655 // add special slot hashes
2656 hash256Slot
= (uint8_t*)cd256
+ hash256Offset
;
2657 reqsHash256Slot
= &hash256Slot
[-CSSLOT_REQUIREMENTS
*CS_HASH_SIZE_SHA256
];
2658 CCDigest(kCCDigestSHA256
, (uint8_t*)reqs
, sizeof(CS_RequirementsBlob
), reqsHash256Slot
);
2663 reqsHash256Slot
= NULL
;
2666 // fill in empty CMS blob for ad-hoc signing
2667 CS_Blob
* cms
= (CS_Blob
*)(((char*)sb
)+cmsOffset
);
2668 cms
->magic
= htonl(CSMAGIC_BLOBWRAPPER
);
2669 cms
->length
= htonl(sizeof(CS_Blob
));
2672 // alter header of cache to record size and location of code signature
2673 // do this *before* hashing each page
2674 dyld_cache_header
* cache
= (dyld_cache_header
*)_readExecuteRegion
.buffer
;
2675 cache
->codeSignatureOffset
= inBbufferSize
;
2676 cache
->codeSignatureSize
= sigSize
;
2678 const uint32_t rwSlotStart
= (uint32_t)(_readExecuteRegion
.sizeInUse
/ pageSize
);
2679 const uint32_t roSlotStart
= (uint32_t)(rwSlotStart
+ _readWriteRegion
.sizeInUse
/ pageSize
);
2680 const uint32_t localsSlotStart
= (uint32_t)(roSlotStart
+ _readOnlyRegion
.sizeInUse
/ pageSize
);
2681 auto codeSignPage
= ^(size_t i
) {
2682 const uint8_t* code
= nullptr;
2683 // move to correct region
2684 if ( i
< rwSlotStart
)
2685 code
= _readExecuteRegion
.buffer
+ (i
* pageSize
);
2686 else if ( i
>= rwSlotStart
&& i
< roSlotStart
)
2687 code
= _readWriteRegion
.buffer
+ ((i
- rwSlotStart
) * pageSize
);
2688 else if ( i
>= roSlotStart
&& i
< localsSlotStart
)
2689 code
= _readOnlyRegion
.buffer
+ ((i
- roSlotStart
) * pageSize
);
2691 code
= _localSymbolsRegion
.buffer
+ ((i
- localsSlotStart
) * pageSize
);
2693 CCDigest(dscDigestFormat
, code
, pageSize
, hashSlot
+ (i
* dscHashSize
));
2696 CCDigest(kCCDigestSHA256
, code
, pageSize
, hash256Slot
+ (i
* CS_HASH_SIZE_SHA256
));
2701 dispatch_apply(slotCount
, DISPATCH_APPLY_AUTO
, ^(size_t i
) {
2705 // Now that we have a code signature, compute a cache UUID by hashing the code signature blob
2707 uint8_t* uuidLoc
= cache
->uuid
;
2708 assert(uuid_is_null(uuidLoc
));
2709 static_assert(offsetof(dyld_cache_header
, uuid
) / CS_PAGE_SIZE_4K
== 0, "uuid is expected in the first page of the cache");
2710 uint8_t fullDigest
[CC_SHA256_DIGEST_LENGTH
];
2711 CC_SHA256((const void*)cd
, (unsigned)cdSize
, fullDigest
);
2712 memcpy(uuidLoc
, fullDigest
, 16);
2713 // <rdar://problem/6723729> uuids should conform to RFC 4122 UUID version 4 & UUID version 5 formats
2714 uuidLoc
[6] = ( uuidLoc
[6] & 0x0F ) | ( 3 << 4 );
2715 uuidLoc
[8] = ( uuidLoc
[8] & 0x3F ) | 0x80;
2717 // Now codesign page 0 again, because we modified it by setting uuid in header
2721 // hash of entire code directory (cdHash) uses same hash as each page
2722 uint8_t fullCdHash
[dscHashSize
];
2723 CCDigest(dscDigestFormat
, (const uint8_t*)cd
, cdSize
, fullCdHash
);
2724 // Note: cdHash is defined as first 20 bytes of hash
2725 memcpy(_cdHashFirst
, fullCdHash
, 20);
2727 uint8_t fullCdHash256
[CS_HASH_SIZE_SHA256
];
2728 CCDigest(kCCDigestSHA256
, (const uint8_t*)cd256
, cd256Size
, fullCdHash256
);
2729 // Note: cdHash is defined as first 20 bytes of hash, even for sha256
2730 memcpy(_cdHashSecond
, fullCdHash256
, 20);
2733 memset(_cdHashSecond
, 0, 20);
2737 const bool CacheBuilder::agileSignature()
2739 return _options
.codeSigningDigestMode
== DyldSharedCache::Agile
;
2742 static const std::string
cdHash(uint8_t hash
[20])
2745 for (int i
= 0; i
< 20; ++i
)
2746 sprintf(&buff
[2*i
], "%2.2x", hash
[i
]);
2750 const std::string
CacheBuilder::cdHashFirst()
2752 return cdHash(_cdHashFirst
);
2755 const std::string
CacheBuilder::cdHashSecond()
2757 return cdHash(_cdHashSecond
);
2760 const std::string
CacheBuilder::uuid() const
2762 dyld_cache_header
* cache
= (dyld_cache_header
*)_readExecuteRegion
.buffer
;
2763 uuid_string_t uuidStr
;
2764 uuid_unparse(cache
->uuid
, uuidStr
);
2768 static dyld_cache_patchable_location
makePatchLocation(size_t cacheOff
, uint64_t ad
) {
2769 int64_t signedAddend
= (int64_t)ad
;
2770 assert(((signedAddend
<< 52) >> 52) == signedAddend
);
2771 dyld_cache_patchable_location patch
;
2772 patch
.cacheOffset
= cacheOff
;
2774 patch
.authenticated
= 0;
2775 patch
.usesAddressDiversity
= 0;
2777 patch
.discriminator
= 0;
2781 static dyld_cache_patchable_location
makePatchLocation(size_t cacheOff
, uint64_t ad
,
2782 dyld3::MachOLoaded::ChainedFixupPointerOnDisk authInfo
) {
2783 int64_t signedAddend
= (int64_t)ad
;
2784 assert(((signedAddend
<< 52) >> 52) == signedAddend
);
2785 dyld_cache_patchable_location patch
;
2786 patch
.cacheOffset
= cacheOff
;
2788 patch
.authenticated
= authInfo
.arm64e
.authBind
.auth
;
2789 patch
.usesAddressDiversity
= authInfo
.arm64e
.authBind
.addrDiv
;
2790 patch
.key
= authInfo
.arm64e
.authBind
.key
;
2791 patch
.discriminator
= authInfo
.arm64e
.authBind
.diversity
;
2796 void CacheBuilder::buildImageArray(std::vector
<DyldSharedCache::FileAlias
>& aliases
)
2798 typedef dyld3::closure::ClosureBuilder::CachedDylibInfo CachedDylibInfo
;
2800 // convert STL data structures to simple arrays to passe to makeDyldCacheImageArray()
2801 __block
std::vector
<CachedDylibInfo
> dylibInfos
;
2802 __block
std::unordered_map
<dyld3::closure::ImageNum
, const dyld3::MachOLoaded
*> imageNumToML
;
2803 DyldSharedCache
* cache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
2804 cache
->forEachImage(^(const mach_header
* mh
, const char* installName
) {
2807 cache
->getIndexedImageEntry((uint32_t)dylibInfos
.size(), mtime
, inode
);
2808 CachedDylibInfo entry
;
2809 entry
.fileInfo
.fileContent
= mh
;
2810 entry
.fileInfo
.path
= installName
;
2811 entry
.fileInfo
.sliceOffset
= 0;
2812 entry
.fileInfo
.inode
= inode
;
2813 entry
.fileInfo
.mtime
= mtime
;
2814 dylibInfos
.push_back(entry
);
2815 imageNumToML
[(dyld3::closure::ImageNum
)(dylibInfos
.size())] = (dyld3::MachOLoaded
*)mh
;
2818 // Convert symlinks from STL to simple char pointers.
2819 std::vector
<dyld3::closure::ClosureBuilder::CachedDylibAlias
> dylibAliases
;
2820 dylibAliases
.reserve(aliases
.size());
2821 for (const auto& alias
: aliases
)
2822 dylibAliases
.push_back({ alias
.realPath
.c_str(), alias
.aliasPath
.c_str() });
2824 dyld3::closure::ClosureBuilder::CacheDylibsBindingHandlers handlers
;
2826 handlers
.chainedBind
= ^(dyld3::closure::ImageNum
, const dyld3::MachOLoaded
* imageLoadAddress
,
2827 const dyld_chained_starts_in_image
* starts
,
2828 const dyld3::Array
<dyld3::closure::Image::ResolvedSymbolTarget
>& targets
,
2829 const dyld3::Array
<dyld3::closure::ClosureBuilder::ResolvedTargetInfo
>& targetInfos
) {
2830 imageLoadAddress
->forEachFixupInAllChains(_diagnostics
, starts
, false, ^(dyld3::MachOLoaded::ChainedFixupPointerOnDisk
* fixupLoc
, const dyld_chained_starts_in_segment
* segInfo
, bool& stop
) {
2831 uint64_t offsetInCache
;
2832 dyld3::closure::Image::ResolvedSymbolTarget target
;
2833 const dyld3::closure::ClosureBuilder::ResolvedTargetInfo
* targetInfo
;
2834 switch (segInfo
->pointer_format
) {
2835 case DYLD_CHAINED_PTR_ARM64E
:
2836 if ( fixupLoc
->arm64e
.bind
.bind
) {
2837 target
= targets
[fixupLoc
->arm64e
.bind
.ordinal
];
2838 targetInfo
= &targetInfos
[fixupLoc
->arm64e
.bind
.ordinal
];
2839 switch ( target
.sharedCache
.kind
) {
2840 case dyld3::closure::Image::ResolvedSymbolTarget::kindSharedCache
:
2841 offsetInCache
= target
.sharedCache
.offset
- targetInfo
->addend
;
2842 _dylibToItsExports
[targetInfo
->foundInDylib
].insert(offsetInCache
);
2843 _exportsToName
[offsetInCache
] = targetInfo
->foundSymbolName
;
2844 if ( fixupLoc
->arm64e
.authBind
.auth
) {
2845 // turn this auth bind into an auth rebase into the cache
2846 fixupLoc
->arm64e
.authRebase
.bind
= 0;
2847 fixupLoc
->arm64e
.authRebase
.target
= target
.sharedCache
.offset
;
2848 _exportsToUses
[offsetInCache
].push_back(makePatchLocation((uint8_t*)fixupLoc
- _readExecuteRegion
.buffer
, targetInfo
->addend
, *fixupLoc
));
2851 // turn this plain bind into an plain rebase into the cache
2852 fixupLoc
->arm64e
.rebase
.bind
= 0;
2853 fixupLoc
->arm64e
.rebase
.target
= _archLayout
->sharedMemoryStart
+ target
.sharedCache
.offset
;
2854 _exportsToUses
[offsetInCache
].push_back(makePatchLocation((uint8_t*)fixupLoc
- _readExecuteRegion
.buffer
, targetInfo
->addend
));
2856 _aslrTracker
.add(fixupLoc
);
2858 case dyld3::closure::Image::ResolvedSymbolTarget::kindAbsolute
:
2859 fixupLoc
->raw64
= target
.absolute
.value
;
2860 // don't record absolute targets for ASLR
2861 // HACK: Split seg may have added a target. Remove it
2862 _aslrTracker
.remove(fixupLoc
);
2863 if ( (targetInfo
->libOrdinal
> 0) && (targetInfo
->libOrdinal
<= (int)(imageLoadAddress
->dependentDylibCount())) ) {
2864 _missingWeakImports
[fixupLoc
] = imageLoadAddress
->dependentDylibLoadPath(targetInfo
->libOrdinal
- 1);
2868 assert(0 && "unsupported ResolvedSymbolTarget kind in dyld cache");
2871 _aslrTracker
.add(fixupLoc
);
2874 case DYLD_CHAINED_PTR_64
:
2875 if ( fixupLoc
->generic64
.bind
.bind
) {
2876 target
= targets
[fixupLoc
->generic64
.bind
.ordinal
];
2877 targetInfo
= &targetInfos
[fixupLoc
->generic64
.bind
.ordinal
];
2878 switch ( target
.sharedCache
.kind
) {
2879 case dyld3::closure::Image::ResolvedSymbolTarget::kindSharedCache
:
2880 offsetInCache
= target
.sharedCache
.offset
- targetInfo
->addend
;
2881 _dylibToItsExports
[targetInfo
->foundInDylib
].insert(offsetInCache
);
2882 _exportsToName
[offsetInCache
] = targetInfo
->foundSymbolName
;
2883 // turn this bind into a rebase into the cache
2884 fixupLoc
->generic64
.rebase
.bind
= 0;
2885 fixupLoc
->generic64
.rebase
.next
= 0; // rechained later
2886 fixupLoc
->generic64
.rebase
.reserved
= 0;
2887 fixupLoc
->generic64
.rebase
.high8
= 0;
2888 fixupLoc
->generic64
.rebase
.target
= target
.sharedCache
.offset
;
2889 _exportsToUses
[offsetInCache
].push_back(makePatchLocation((uint8_t*)fixupLoc
- _readExecuteRegion
.buffer
, targetInfo
->addend
));
2890 _aslrTracker
.add(fixupLoc
);
2892 case dyld3::closure::Image::ResolvedSymbolTarget::kindAbsolute
:
2893 fixupLoc
->raw64
= target
.absolute
.value
;
2894 // don't record absolute targets for ASLR
2895 if ( (targetInfo
->libOrdinal
> 0) && (targetInfo
->libOrdinal
<= (int)(imageLoadAddress
->dependentDylibCount())) ) {
2896 _missingWeakImports
[fixupLoc
] = imageLoadAddress
->dependentDylibLoadPath(targetInfo
->libOrdinal
- 1);
2900 assert(0 && "unsupported ResolvedSymbolTarget kind in dyld cache");
2904 case DYLD_CHAINED_PTR_32
:
2905 if ( fixupLoc
->generic32
.bind
.bind
) {
2906 target
= targets
[fixupLoc
->generic32
.bind
.ordinal
];
2907 targetInfo
= &targetInfos
[fixupLoc
->generic32
.bind
.ordinal
];
2908 switch ( target
.sharedCache
.kind
) {
2909 case dyld3::closure::Image::ResolvedSymbolTarget::kindSharedCache
:
2910 offsetInCache
= target
.sharedCache
.offset
- targetInfo
->addend
;
2911 _dylibToItsExports
[targetInfo
->foundInDylib
].insert(offsetInCache
);
2912 _exportsToName
[offsetInCache
] = targetInfo
->foundSymbolName
;
2913 // turn this bind into a rebase into the cache
2914 fixupLoc
->cache32
.next
= 0; // rechained later
2915 fixupLoc
->cache32
.target
= (uint32_t)(target
.sharedCache
.offset
);
2916 _exportsToUses
[offsetInCache
].push_back(makePatchLocation((uint8_t*)fixupLoc
- _readExecuteRegion
.buffer
, targetInfo
->addend
));
2917 _aslrTracker
.add(fixupLoc
);
2919 case dyld3::closure::Image::ResolvedSymbolTarget::kindAbsolute
:
2920 fixupLoc
->raw32
= (uint32_t)target
.absolute
.value
;
2921 // don't record absolute targets for ASLR
2922 if ( (targetInfo
->libOrdinal
> 0) && (targetInfo
->libOrdinal
<= (int)(imageLoadAddress
->dependentDylibCount())) ) {
2923 _missingWeakImports
[fixupLoc
] = imageLoadAddress
->dependentDylibLoadPath(targetInfo
->libOrdinal
- 1);
2927 assert(0 && "unsupported ResolvedSymbolTarget kind in dyld cache");
2932 assert(0 && "unsupported chained bind type");
2938 handlers
.rebase
= ^(dyld3::closure::ImageNum imageNum
, const dyld3::MachOLoaded
* imageToFix
, uint32_t runtimeOffset
) {
2939 // record location in aslr tracker so kernel can slide this on page-in
2940 uint8_t* fixupLoc
= (uint8_t*)imageToFix
+runtimeOffset
;
2941 _aslrTracker
.add(fixupLoc
);
2944 handlers
.bind
= ^(dyld3::closure::ImageNum imageNum
, const dyld3::MachOLoaded
* mh
,
2945 uint32_t runtimeOffset
, dyld3::closure::Image::ResolvedSymbolTarget target
,
2946 const dyld3::closure::ClosureBuilder::ResolvedTargetInfo
& targetInfo
) {
2947 uint8_t* fixupLoc
= (uint8_t*)mh
+runtimeOffset
;
2949 // binder is called a second time for weak_bind info, which we ignore when building cache
2950 const bool weakDefUseAlreadySet
= targetInfo
.weakBindCoalese
&& _aslrTracker
.has(fixupLoc
);
2952 // do actual bind that sets pointer in image to unslid target address
2953 uint64_t offsetInCache
;
2954 switch ( target
.sharedCache
.kind
) {
2955 case dyld3::closure::Image::ResolvedSymbolTarget::kindSharedCache
:
2956 offsetInCache
= target
.sharedCache
.offset
- targetInfo
.addend
;
2957 _dylibToItsExports
[targetInfo
.foundInDylib
].insert(offsetInCache
);
2958 if (targetInfo
.isWeakDef
)
2959 _dylibWeakExports
.insert({ targetInfo
.foundInDylib
, offsetInCache
});
2960 _exportsToUses
[offsetInCache
].push_back(makePatchLocation(fixupLoc
- _readExecuteRegion
.buffer
, targetInfo
.addend
));
2961 _exportsToName
[offsetInCache
] = targetInfo
.foundSymbolName
;
2962 if ( !weakDefUseAlreadySet
) {
2963 if ( _archLayout
->is64
)
2964 *((uint64_t*)fixupLoc
) = _archLayout
->sharedMemoryStart
+ target
.sharedCache
.offset
;
2966 *((uint32_t*)fixupLoc
) = (uint32_t)(_archLayout
->sharedMemoryStart
+ target
.sharedCache
.offset
);
2967 // record location in aslr tracker so kernel can slide this on page-in
2968 _aslrTracker
.add(fixupLoc
);
2971 case dyld3::closure::Image::ResolvedSymbolTarget::kindAbsolute
:
2972 if ( _archLayout
->is64
)
2973 *((uint64_t*)fixupLoc
) = target
.absolute
.value
;
2975 *((uint32_t*)fixupLoc
) = (uint32_t)(target
.absolute
.value
);
2976 // don't record absolute targets for ASLR
2977 // HACK: Split seg may have added a target. Remove it
2978 _aslrTracker
.remove(fixupLoc
);
2979 if ( (targetInfo
.libOrdinal
> 0) && (targetInfo
.libOrdinal
<= (int)(mh
->dependentDylibCount())) ) {
2980 _missingWeakImports
[fixupLoc
] = mh
->dependentDylibLoadPath(targetInfo
.libOrdinal
- 1);
2984 assert(0 && "unsupported ResolvedSymbolTarget kind in dyld cache");
2988 // build ImageArray for all dylibs in dyld cache
2989 dyld3::closure::PathOverrides pathOverrides
;
2990 dyld3::closure::ClosureBuilder
cb(dyld3::closure::kFirstDyldCacheImageNum
, _fileSystem
, cache
, false, *_options
.archs
, pathOverrides
,
2991 dyld3::closure::ClosureBuilder::AtPath::none
, false, nullptr, _options
.platform
, &handlers
);
2992 dyld3::Array
<CachedDylibInfo
> dylibs(&dylibInfos
[0], dylibInfos
.size(), dylibInfos
.size());
2993 const dyld3::Array
<dyld3::closure::ClosureBuilder::CachedDylibAlias
> aliasesArray(dylibAliases
.data(), dylibAliases
.size(), dylibAliases
.size());
2994 _imageArray
= cb
.makeDyldCacheImageArray(_options
.optimizeStubs
, dylibs
, aliasesArray
);
2995 if ( cb
.diagnostics().hasError() ) {
2996 _diagnostics
.error("%s", cb
.diagnostics().errorMessage().c_str());
3001 static bool operator==(const dyld_cache_patchable_location
& a
, const dyld_cache_patchable_location
& b
) {
3002 return a
.cacheOffset
== b
.cacheOffset
;
3005 void CacheBuilder::addImageArray()
3007 // build trie of dylib paths
3008 __block
std::vector
<DylibIndexTrie::Entry
> dylibEntrys
;
3009 _imageArray
->forEachImage(^(const dyld3::closure::Image
* image
, bool& stop
) {
3010 dylibEntrys
.push_back(DylibIndexTrie::Entry(image
->path(), DylibIndex(image
->imageNum()-1)));
3011 image
->forEachAlias(^(const char *aliasPath
, bool &innerStop
) {
3012 dylibEntrys
.push_back(DylibIndexTrie::Entry(aliasPath
, DylibIndex(image
->imageNum()-1)));
3015 DylibIndexTrie
dylibsTrie(dylibEntrys
);
3016 std::vector
<uint8_t> trieBytes
;
3017 dylibsTrie
.emit(trieBytes
);
3018 while ( (trieBytes
.size() % 4) != 0 )
3019 trieBytes
.push_back(0);
3021 // build set of functions to never stub-eliminate because tools may need to override them
3022 std::unordered_set
<std::string
> alwaysGeneratePatch
;
3023 for (const char* const* p
=_s_neverStubEliminateSymbols
; *p
!= nullptr; ++p
)
3024 alwaysGeneratePatch
.insert(*p
);
3026 // Add the patches for the image array.
3027 __block
uint64_t numPatchImages
= _imageArray
->size();
3028 __block
uint64_t numPatchExports
= 0;
3029 __block
uint64_t numPatchLocations
= 0;
3030 __block
uint64_t numPatchExportNameBytes
= 0;
3032 auto needsPatch
= [&](bool dylibNeedsPatching
, const dyld3::MachOLoaded
* mh
,
3033 CacheOffset offset
) -> bool {
3034 if (dylibNeedsPatching
)
3036 if (_dylibWeakExports
.find({ mh
, offset
}) != _dylibWeakExports
.end())
3038 const std::string
& exportName
= _exportsToName
[offset
];
3039 return alwaysGeneratePatch
.find(exportName
) != alwaysGeneratePatch
.end();
3042 std::set
<std::string
> alwaysPatchDylibs
;
3043 for (const char* const* d
= _s_neverStubEliminateDylibs
; *d
!= nullptr; ++d
)
3044 alwaysPatchDylibs
.insert(*d
);
3046 // First calculate how much space we need
3047 const DyldSharedCache
* cache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
3048 cache
->forEachImage(^(const mach_header
* mh
, const char* installName
) {
3049 const dyld3::MachOLoaded
* ml
= (const dyld3::MachOLoaded
*)mh
;
3050 const std::set
<CacheOffset
>& dylibExports
= _dylibToItsExports
[ml
];
3052 // On a customer cache, only store patch locations for interposable dylibs and weak binding
3053 bool dylibNeedsPatching
= !_options
.optimizeStubs
|| alwaysPatchDylibs
.count(installName
);
3055 uint64_t numDylibExports
= 0;
3056 for (CacheOffset exportCacheOffset
: dylibExports
) {
3057 if (!needsPatch(dylibNeedsPatching
, ml
, exportCacheOffset
))
3059 std::vector
<dyld_cache_patchable_location
>& uses
= _exportsToUses
[exportCacheOffset
];
3060 uses
.erase(std::unique(uses
.begin(), uses
.end()), uses
.end());
3061 numPatchLocations
+= uses
.size();
3063 std::string exportName
= _exportsToName
[exportCacheOffset
];
3064 numPatchExportNameBytes
+= exportName
.size() + 1;
3066 numPatchExports
+= numDylibExports
;
3069 // Now reserve the space
3070 __block
std::vector
<dyld_cache_image_patches
> patchImages
;
3071 __block
std::vector
<dyld_cache_patchable_export
> patchExports
;
3072 __block
std::vector
<dyld_cache_patchable_location
> patchLocations
;
3073 __block
std::vector
<char> patchExportNames
;
3075 patchImages
.reserve(numPatchImages
);
3076 patchExports
.reserve(numPatchExports
);
3077 patchLocations
.reserve(numPatchLocations
);
3078 patchExportNames
.reserve(numPatchExportNameBytes
);
3080 // And now fill it with the patch data
3081 cache
->forEachImage(^(const mach_header
* mh
, const char* installName
) {
3082 const dyld3::MachOLoaded
* ml
= (const dyld3::MachOLoaded
*)mh
;
3083 const std::set
<CacheOffset
>& dylibExports
= _dylibToItsExports
[ml
];
3085 // On a customer cache, only store patch locations for interposable dylibs and weak binding
3086 bool dylibNeedsPatching
= !_options
.optimizeStubs
|| alwaysPatchDylibs
.count(installName
);
3088 // Add the patch image which points in to the exports
3089 dyld_cache_image_patches patchImage
;
3090 patchImage
.patchExportsStartIndex
= (uint32_t)patchExports
.size();
3091 patchImage
.patchExportsCount
= 0;
3093 // Then add each export which points to a list of locations and a name
3094 for (CacheOffset exportCacheOffset
: dylibExports
) {
3095 if (!needsPatch(dylibNeedsPatching
, ml
, exportCacheOffset
))
3097 ++patchImage
.patchExportsCount
;
3098 std::vector
<dyld_cache_patchable_location
>& uses
= _exportsToUses
[exportCacheOffset
];
3100 dyld_cache_patchable_export cacheExport
;
3101 cacheExport
.cacheOffsetOfImpl
= (uint32_t)exportCacheOffset
;
3102 cacheExport
.patchLocationsStartIndex
= (uint32_t)patchLocations
.size();
3103 cacheExport
.patchLocationsCount
= (uint32_t)uses
.size();
3104 cacheExport
.exportNameOffset
= (uint32_t)patchExportNames
.size();
3105 patchExports
.push_back(cacheExport
);
3107 // Now add the list of locations.
3108 patchLocations
.insert(patchLocations
.end(), uses
.begin(), uses
.end());
3110 // And add the export name
3111 const std::string
& exportName
= _exportsToName
[exportCacheOffset
];
3112 patchExportNames
.insert(patchExportNames
.end(), &exportName
[0], &exportName
[0] + exportName
.size() + 1);
3114 patchImages
.push_back(patchImage
);
3117 while ( (patchExportNames
.size() % 4) != 0 )
3118 patchExportNames
.push_back('\0');
3120 uint64_t patchInfoSize
= sizeof(dyld_cache_patch_info
);
3121 patchInfoSize
+= sizeof(dyld_cache_image_patches
) * patchImages
.size();
3122 patchInfoSize
+= sizeof(dyld_cache_patchable_export
) * patchExports
.size();
3123 patchInfoSize
+= sizeof(dyld_cache_patchable_location
) * patchLocations
.size();
3124 patchInfoSize
+= patchExportNames
.size();
3127 uint64_t imageArraySize
= _imageArray
->size();
3128 size_t freeSpace
= _readOnlyRegion
.bufferSize
- _readOnlyRegion
.sizeInUse
;
3129 if ( (imageArraySize
+trieBytes
.size()+patchInfoSize
) > freeSpace
) {
3130 _diagnostics
.error("cache buffer too small to hold ImageArray and Trie (buffer size=%lldMB, imageArray size=%lldMB, trie size=%luKB, patch size=%lluKB, free space=%ldMB)",
3131 _allocatedBufferSize
/1024/1024, imageArraySize
/1024/1024, trieBytes
.size()/1024, patchInfoSize
/1024, freeSpace
/1024/1024);
3135 // copy into cache and update header
3136 DyldSharedCache
* dyldCache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
3137 dyldCache
->header
.dylibsImageArrayAddr
= _readOnlyRegion
.unslidLoadAddress
+ _readOnlyRegion
.sizeInUse
;
3138 dyldCache
->header
.dylibsImageArraySize
= imageArraySize
;
3139 dyldCache
->header
.dylibsTrieAddr
= dyldCache
->header
.dylibsImageArrayAddr
+ imageArraySize
;
3140 dyldCache
->header
.dylibsTrieSize
= trieBytes
.size();
3141 ::memcpy(_readOnlyRegion
.buffer
+ _readOnlyRegion
.sizeInUse
, _imageArray
, imageArraySize
);
3142 ::memcpy(_readOnlyRegion
.buffer
+ _readOnlyRegion
.sizeInUse
+ imageArraySize
, &trieBytes
[0], trieBytes
.size());
3144 // Also write out the patch info
3145 dyldCache
->header
.patchInfoAddr
= dyldCache
->header
.dylibsTrieAddr
+ dyldCache
->header
.dylibsTrieSize
;
3146 dyldCache
->header
.patchInfoSize
= patchInfoSize
;
3147 dyld_cache_patch_info patchInfo
;
3148 patchInfo
.patchTableArrayAddr
= dyldCache
->header
.patchInfoAddr
+ sizeof(dyld_cache_patch_info
);
3149 patchInfo
.patchTableArrayCount
= patchImages
.size();
3150 patchInfo
.patchExportArrayAddr
= patchInfo
.patchTableArrayAddr
+ (patchInfo
.patchTableArrayCount
* sizeof(dyld_cache_image_patches
));
3151 patchInfo
.patchExportArrayCount
= patchExports
.size();
3152 patchInfo
.patchLocationArrayAddr
= patchInfo
.patchExportArrayAddr
+ (patchInfo
.patchExportArrayCount
* sizeof(dyld_cache_patchable_export
));
3153 patchInfo
.patchLocationArrayCount
= patchLocations
.size();
3154 patchInfo
.patchExportNamesAddr
= patchInfo
.patchLocationArrayAddr
+ (patchInfo
.patchLocationArrayCount
* sizeof(dyld_cache_patchable_location
));
3155 patchInfo
.patchExportNamesSize
= patchExportNames
.size();
3156 ::memcpy(_readOnlyRegion
.buffer
+ dyldCache
->header
.patchInfoAddr
- _readOnlyRegion
.unslidLoadAddress
,
3157 &patchInfo
, sizeof(dyld_cache_patch_info
));
3158 ::memcpy(_readOnlyRegion
.buffer
+ patchInfo
.patchTableArrayAddr
- _readOnlyRegion
.unslidLoadAddress
,
3159 &patchImages
[0], sizeof(patchImages
[0]) * patchImages
.size());
3160 ::memcpy(_readOnlyRegion
.buffer
+ patchInfo
.patchExportArrayAddr
- _readOnlyRegion
.unslidLoadAddress
,
3161 &patchExports
[0], sizeof(patchExports
[0]) * patchExports
.size());
3162 ::memcpy(_readOnlyRegion
.buffer
+ patchInfo
.patchLocationArrayAddr
- _readOnlyRegion
.unslidLoadAddress
,
3163 &patchLocations
[0], sizeof(patchLocations
[0]) * patchLocations
.size());
3164 ::memcpy(_readOnlyRegion
.buffer
+ patchInfo
.patchExportNamesAddr
- _readOnlyRegion
.unslidLoadAddress
,
3165 &patchExportNames
[0], patchExportNames
.size());
3167 _readOnlyRegion
.sizeInUse
+= align(imageArraySize
+trieBytes
.size()+patchInfoSize
,14);
3169 // Free the underlying image array buffer
3170 _imageArray
->deallocate();
3173 void CacheBuilder::addOtherImageArray(const std::vector
<LoadedMachO
>& otherDylibsAndBundles
, std::vector
<const LoadedMachO
*>& overflowDylibs
)
3175 DyldSharedCache
* cache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
3176 dyld3::closure::PathOverrides pathOverrides
;
3177 dyld3::closure::FileSystemNull nullFileSystem
;
3178 dyld3::closure::ClosureBuilder
cb(dyld3::closure::kFirstOtherOSImageNum
, nullFileSystem
, cache
, false, *_options
.archs
, pathOverrides
,
3179 dyld3::closure::ClosureBuilder::AtPath::none
, false, nullptr, _options
.platform
);
3181 // make ImageArray for other dylibs and bundles
3182 STACK_ALLOC_ARRAY(dyld3::closure::LoadedFileInfo
, others
, otherDylibsAndBundles
.size() + overflowDylibs
.size());
3183 for (const LoadedMachO
& other
: otherDylibsAndBundles
) {
3184 if ( !contains(other
.loadedFileInfo
.path
, "staged_system_apps/") )
3185 others
.push_back(other
.loadedFileInfo
);
3188 for (const LoadedMachO
* dylib
: overflowDylibs
) {
3189 if (dylib
->mappedFile
.mh
->canHavePrecomputedDlopenClosure(dylib
->mappedFile
.runtimePath
.c_str(), ^(const char*) {}) )
3190 others
.push_back(dylib
->loadedFileInfo
);
3193 // Sort the others array by name so that it is deterministic
3194 std::sort(others
.begin(), others
.end(),
3195 [](const dyld3::closure::LoadedFileInfo
& a
, const dyld3::closure::LoadedFileInfo
& b
) {
3196 // Sort mac before iOSMac
3197 bool isIOSMacA
= strncmp(a
.path
, "/System/iOSSupport/", 19) == 0;
3198 bool isIOSMacB
= strncmp(b
.path
, "/System/iOSSupport/", 19) == 0;
3199 if (isIOSMacA
!= isIOSMacB
)
3201 return strcmp(a
.path
, b
.path
) < 0;
3204 const dyld3::closure::ImageArray
* otherImageArray
= cb
.makeOtherDylibsImageArray(others
, (uint32_t)_sortedDylibs
.size());
3206 // build trie of paths
3207 __block
std::vector
<DylibIndexTrie::Entry
> otherEntrys
;
3208 otherImageArray
->forEachImage(^(const dyld3::closure::Image
* image
, bool& stop
) {
3209 if ( !image
->isInvalid() )
3210 otherEntrys
.push_back(DylibIndexTrie::Entry(image
->path(), DylibIndex(image
->imageNum())));
3212 DylibIndexTrie
dylibsTrie(otherEntrys
);
3213 std::vector
<uint8_t> trieBytes
;
3214 dylibsTrie
.emit(trieBytes
);
3215 while ( (trieBytes
.size() % 4) != 0 )
3216 trieBytes
.push_back(0);
3219 uint64_t imageArraySize
= otherImageArray
->size();
3220 size_t freeSpace
= _readOnlyRegion
.bufferSize
- _readOnlyRegion
.sizeInUse
;
3221 if ( imageArraySize
+trieBytes
.size() > freeSpace
) {
3222 _diagnostics
.error("cache buffer too small to hold ImageArray and Trie (buffer size=%lldMB, imageArray size=%lldMB, trie size=%luKB, free space=%ldMB)",
3223 _allocatedBufferSize
/1024/1024, imageArraySize
/1024/1024, trieBytes
.size()/1024, freeSpace
/1024/1024);
3227 // copy into cache and update header
3228 DyldSharedCache
* dyldCache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
3229 dyldCache
->header
.otherImageArrayAddr
= _readOnlyRegion
.unslidLoadAddress
+ _readOnlyRegion
.sizeInUse
;
3230 dyldCache
->header
.otherImageArraySize
= imageArraySize
;
3231 dyldCache
->header
.otherTrieAddr
= dyldCache
->header
.otherImageArrayAddr
+ imageArraySize
;
3232 dyldCache
->header
.otherTrieSize
= trieBytes
.size();
3233 ::memcpy(_readOnlyRegion
.buffer
+ _readOnlyRegion
.sizeInUse
, otherImageArray
, imageArraySize
);
3234 ::memcpy(_readOnlyRegion
.buffer
+ _readOnlyRegion
.sizeInUse
+ imageArraySize
, &trieBytes
[0], trieBytes
.size());
3235 _readOnlyRegion
.sizeInUse
+= align(imageArraySize
+trieBytes
.size(),14);
3237 // Free the underlying buffer
3238 otherImageArray
->deallocate();
3242 void CacheBuilder::addClosures(const std::vector
<LoadedMachO
>& osExecutables
)
3244 const DyldSharedCache
* dyldCache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
3246 __block
std::vector
<Diagnostics
> osExecutablesDiags
;
3247 __block
std::vector
<const dyld3::closure::LaunchClosure
*> osExecutablesClosures
;
3248 osExecutablesDiags
.resize(osExecutables
.size());
3249 osExecutablesClosures
.resize(osExecutables
.size());
3251 dispatch_apply(osExecutables
.size(), DISPATCH_APPLY_AUTO
, ^(size_t index
) {
3252 const LoadedMachO
& loadedMachO
= osExecutables
[index
];
3253 // don't pre-build closures for staged apps into dyld cache, since they won't run from that location
3254 if ( startsWith(loadedMachO
.mappedFile
.runtimePath
, "/private/var/staged_system_apps/") ) {
3257 dyld3::closure::PathOverrides pathOverrides
;
3258 dyld3::closure::ClosureBuilder
builder(dyld3::closure::kFirstLaunchClosureImageNum
, _fileSystem
, dyldCache
, false, *_options
.archs
, pathOverrides
,
3259 dyld3::closure::ClosureBuilder::AtPath::all
, false, nullptr, _options
.platform
, nullptr);
3260 bool issetuid
= false;
3261 if ( this->_options
.platform
== dyld3::Platform::macOS
|| dyld3::MachOFile::isSimulatorPlatform(this->_options
.platform
) )
3262 _fileSystem
.fileExists(loadedMachO
.loadedFileInfo
.path
, nullptr, nullptr, &issetuid
, nullptr);
3263 const dyld3::closure::LaunchClosure
* mainClosure
= builder
.makeLaunchClosure(loadedMachO
.loadedFileInfo
, issetuid
);
3264 if ( builder
.diagnostics().hasError() ) {
3265 osExecutablesDiags
[index
].error("%s", builder
.diagnostics().errorMessage().c_str());
3268 assert(mainClosure
!= nullptr);
3269 osExecutablesClosures
[index
] = mainClosure
;
3273 std::map
<std::string
, const dyld3::closure::LaunchClosure
*> closures
;
3274 for (uint64_t i
= 0, e
= osExecutables
.size(); i
!= e
; ++i
) {
3275 const LoadedMachO
& loadedMachO
= osExecutables
[i
];
3276 const Diagnostics
& diag
= osExecutablesDiags
[i
];
3277 if (diag
.hasError()) {
3278 if ( _options
.verbose
) {
3279 _diagnostics
.warning("building closure for '%s': %s", loadedMachO
.mappedFile
.runtimePath
.c_str(), diag
.errorMessage().c_str());
3280 for (const std::string
& warn
: diag
.warnings() )
3281 _diagnostics
.warning("%s", warn
.c_str());
3283 if ( loadedMachO
.inputFile
&& (loadedMachO
.inputFile
->mustBeIncluded()) ) {
3284 loadedMachO
.inputFile
->diag
.error("%s", diag
.errorMessage().c_str());
3287 // Note, a closure could be null here if it has a path we skip.
3288 if (osExecutablesClosures
[i
] != nullptr)
3289 closures
[loadedMachO
.mappedFile
.runtimePath
] = osExecutablesClosures
[i
];
3293 osExecutablesDiags
.clear();
3294 osExecutablesClosures
.clear();
3296 // preflight space needed
3297 size_t closuresSpace
= 0;
3298 for (const auto& entry
: closures
) {
3299 closuresSpace
+= entry
.second
->size();
3301 size_t freeSpace
= _readOnlyRegion
.bufferSize
- _readOnlyRegion
.sizeInUse
;
3302 if ( closuresSpace
> freeSpace
) {
3303 _diagnostics
.error("cache buffer too small to hold all closures (buffer size=%lldMB, closures size=%ldMB, free space=%ldMB)",
3304 _allocatedBufferSize
/1024/1024, closuresSpace
/1024/1024, freeSpace
/1024/1024);
3307 DyldSharedCache
* cache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
3308 cache
->header
.progClosuresAddr
= _readOnlyRegion
.unslidLoadAddress
+ _readOnlyRegion
.sizeInUse
;
3309 uint8_t* closuresBase
= _readOnlyRegion
.buffer
+ _readOnlyRegion
.sizeInUse
;
3310 std::vector
<DylibIndexTrie::Entry
> closureEntrys
;
3311 uint32_t currentClosureOffset
= 0;
3312 for (const auto& entry
: closures
) {
3313 const dyld3::closure::LaunchClosure
* closure
= entry
.second
;
3314 closureEntrys
.push_back(DylibIndexTrie::Entry(entry
.first
, DylibIndex(currentClosureOffset
)));
3315 size_t size
= closure
->size();
3316 assert((size
% 4) == 0);
3317 memcpy(closuresBase
+currentClosureOffset
, closure
, size
);
3318 currentClosureOffset
+= size
;
3320 closure
->deallocate();
3322 cache
->header
.progClosuresSize
= currentClosureOffset
;
3323 _readOnlyRegion
.sizeInUse
+= currentClosureOffset
;
3324 freeSpace
= _readOnlyRegion
.bufferSize
- _readOnlyRegion
.sizeInUse
;
3325 // build trie of indexes into closures list
3326 DylibIndexTrie
closureTrie(closureEntrys
);
3327 std::vector
<uint8_t> trieBytes
;
3328 closureTrie
.emit(trieBytes
);
3329 while ( (trieBytes
.size() % 8) != 0 )
3330 trieBytes
.push_back(0);
3331 if ( trieBytes
.size() > freeSpace
) {
3332 _diagnostics
.error("cache buffer too small to hold all closures trie (buffer size=%lldMB, trie size=%ldMB, free space=%ldMB)",
3333 _allocatedBufferSize
/1024/1024, trieBytes
.size()/1024/1024, freeSpace
/1024/1024);
3336 memcpy(_readOnlyRegion
.buffer
+ _readOnlyRegion
.sizeInUse
, &trieBytes
[0], trieBytes
.size());
3337 cache
->header
.progClosuresTrieAddr
= _readOnlyRegion
.unslidLoadAddress
+ _readOnlyRegion
.sizeInUse
;
3338 cache
->header
.progClosuresTrieSize
= trieBytes
.size();
3339 _readOnlyRegion
.sizeInUse
+= trieBytes
.size();
3340 _readOnlyRegion
.sizeInUse
= align(_readOnlyRegion
.sizeInUse
, 14);
3344 bool CacheBuilder::writeCache(void (^cacheSizeCallback
)(uint64_t size
), bool (^copyCallback
)(const uint8_t* src
, uint64_t size
, uint64_t dstOffset
))
3346 const dyld_cache_header
* cacheHeader
= (dyld_cache_header
*)_readExecuteRegion
.buffer
;
3347 const dyld_cache_mapping_info
* mappings
= (dyld_cache_mapping_info
*)(_readExecuteRegion
.buffer
+ cacheHeader
->mappingOffset
);
3348 assert(_readExecuteRegion
.sizeInUse
== mappings
[0].size
);
3349 assert(_readWriteRegion
.sizeInUse
== mappings
[1].size
);
3350 assert(_readOnlyRegion
.sizeInUse
== mappings
[2].size
);
3351 assert(_readExecuteRegion
.cacheFileOffset
== mappings
[0].fileOffset
);
3352 assert(_readWriteRegion
.cacheFileOffset
== mappings
[1].fileOffset
);
3353 assert(_readOnlyRegion
.cacheFileOffset
== mappings
[2].fileOffset
);
3354 assert(_codeSignatureRegion
.sizeInUse
== cacheHeader
->codeSignatureSize
);
3355 assert(cacheHeader
->codeSignatureOffset
== mappings
[2].fileOffset
+_readOnlyRegion
.sizeInUse
+_localSymbolsRegion
.sizeInUse
);
3356 cacheSizeCallback(_readExecuteRegion
.sizeInUse
+_readWriteRegion
.sizeInUse
+_readOnlyRegion
.sizeInUse
+_localSymbolsRegion
.sizeInUse
+_codeSignatureRegion
.sizeInUse
);
3357 bool fullyWritten
= copyCallback(_readExecuteRegion
.buffer
, _readExecuteRegion
.sizeInUse
, mappings
[0].fileOffset
);
3358 fullyWritten
&= copyCallback(_readWriteRegion
.buffer
, _readWriteRegion
.sizeInUse
, mappings
[1].fileOffset
);
3359 fullyWritten
&= copyCallback(_readOnlyRegion
.buffer
, _readOnlyRegion
.sizeInUse
, mappings
[2].fileOffset
);
3360 if ( _localSymbolsRegion
.sizeInUse
!= 0 ) {
3361 assert(cacheHeader
->localSymbolsOffset
== mappings
[2].fileOffset
+_readOnlyRegion
.sizeInUse
);
3362 fullyWritten
&= copyCallback(_localSymbolsRegion
.buffer
, _localSymbolsRegion
.sizeInUse
, cacheHeader
->localSymbolsOffset
);
3364 fullyWritten
&= copyCallback(_codeSignatureRegion
.buffer
, _codeSignatureRegion
.sizeInUse
, cacheHeader
->codeSignatureOffset
);
3365 return fullyWritten
;
3369 void CacheBuilder::writeFile(const std::string
& path
)
3371 std::string pathTemplate
= path
+ "-XXXXXX";
3372 size_t templateLen
= strlen(pathTemplate
.c_str())+2;
3373 BLOCK_ACCCESSIBLE_ARRAY(char, pathTemplateSpace
, templateLen
);
3374 strlcpy(pathTemplateSpace
, pathTemplate
.c_str(), templateLen
);
3375 int fd
= mkstemp(pathTemplateSpace
);
3377 auto cacheSizeCallback
= ^(uint64_t size
) {
3378 // if making macOS dyld cache for current OS into standard location
3379 if ( (_options
.platform
== dyld3::Platform::macOS
) && startsWith(path
, MACOSX_DYLD_SHARED_CACHE_DIR
) ) {
3380 // <rdar://48687550> pin cache file to SSD on fusion drives
3381 apfs_data_pin_location_t where
= APFS_PIN_DATA_TO_MAIN
;
3382 ::fsctl(pathTemplateSpace
, APFSIOC_PIN_DATA
, &where
, 0);
3384 // set final cache file size (may help defragment file)
3385 ::ftruncate(fd
, size
);
3387 auto copyCallback
= ^(const uint8_t* src
, uint64_t size
, uint64_t dstOffset
) {
3388 uint64_t writtenSize
= pwrite(fd
, src
, size
, dstOffset
);
3389 return writtenSize
== size
;
3391 // <rdar://problem/55370916> TOCTOU: verify path is still a realpath (not changed)
3392 char tempPath
[MAXPATHLEN
];
3393 if ( ::fcntl(fd
, F_GETPATH
, tempPath
) == 0 ) {
3394 size_t tempPathLen
= strlen(tempPath
);
3395 if ( tempPathLen
> 7 )
3396 tempPath
[tempPathLen
-7] = '\0'; // remove trailing -xxxxxx
3397 if ( path
!= tempPath
) {
3398 _diagnostics
.error("output file path changed from: '%s' to: '%s'", path
.c_str(), tempPath
);
3404 _diagnostics
.error("unable to fcntl(fd, F_GETPATH) on output file");
3408 bool fullyWritten
= writeCache(cacheSizeCallback
, copyCallback
);
3409 if ( fullyWritten
) {
3410 ::fchmod(fd
, S_IRUSR
|S_IRGRP
|S_IROTH
); // mkstemp() makes file "rw-------", switch it to "r--r--r--"
3411 // <rdar://problem/55370916> TOCTOU: verify path is still a realpath (not changed)
3412 char resolvedPath
[PATH_MAX
];
3413 ::realpath(path
.c_str(), resolvedPath
);
3414 // Note: if the target cache file does not already exist, realpath() will return NULL, but still fill in the path buffer
3415 if ( path
!= resolvedPath
) {
3416 _diagnostics
.error("output file path changed from: '%s' to: '%s'", path
.c_str(), resolvedPath
);
3419 if ( ::rename(pathTemplateSpace
, path
.c_str()) == 0) {
3425 _diagnostics
.error("could not write file %s", pathTemplateSpace
);
3428 ::unlink(pathTemplateSpace
);
3431 _diagnostics
.error("could not open file %s", pathTemplateSpace
);
3435 void CacheBuilder::writeBuffer(uint8_t*& buffer
, uint64_t& bufferSize
) {
3436 auto cacheSizeCallback
= ^(uint64_t size
) {
3437 buffer
= (uint8_t*)malloc(size
);
3440 auto copyCallback
= ^(const uint8_t* src
, uint64_t size
, uint64_t dstOffset
) {
3441 memcpy(buffer
+ dstOffset
, src
, size
);
3444 bool fullyWritten
= writeCache(cacheSizeCallback
, copyCallback
);
3445 assert(fullyWritten
);
3448 void CacheBuilder::writeMapFile(const std::string
& path
)
3450 const DyldSharedCache
* cache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
3451 std::string mapContent
= cache
->mapFile();
3452 safeSave(mapContent
.c_str(), mapContent
.size(), path
);
3455 std::string
CacheBuilder::getMapFileBuffer(const std::string
& cacheDisposition
) const
3457 const DyldSharedCache
* cache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
3458 return cache
->generateJSONMap(cacheDisposition
.c_str());
3462 void CacheBuilder::forEachCacheDylib(void (^callback
)(const std::string
& path
)) {
3463 for (const DylibInfo
& dylibInfo
: _sortedDylibs
)
3464 callback(dylibInfo
.runtimePath
);
3468 CacheBuilder::ASLR_Tracker::~ASLR_Tracker()
3470 if ( _bitmap
!= nullptr )
3474 void CacheBuilder::ASLR_Tracker::setDataRegion(const void* rwRegionStart
, size_t rwRegionSize
)
3476 _pageCount
= (unsigned)(rwRegionSize
+_pageSize
-1)/_pageSize
;
3477 _regionStart
= (uint8_t*)rwRegionStart
;
3478 _endStart
= (uint8_t*)rwRegionStart
+ rwRegionSize
;
3479 _bitmap
= (bool*)calloc(_pageCount
*(_pageSize
/4)*sizeof(bool), 1);
3482 void CacheBuilder::ASLR_Tracker::add(void* loc
)
3486 uint8_t* p
= (uint8_t*)loc
;
3487 assert(p
>= _regionStart
);
3488 assert(p
< _endStart
);
3489 _bitmap
[(p
-_regionStart
)/4] = true;
3492 void CacheBuilder::ASLR_Tracker::remove(void* loc
)
3496 uint8_t* p
= (uint8_t*)loc
;
3497 assert(p
>= _regionStart
);
3498 assert(p
< _endStart
);
3499 _bitmap
[(p
-_regionStart
)/4] = false;
3502 bool CacheBuilder::ASLR_Tracker::has(void* loc
)
3506 uint8_t* p
= (uint8_t*)loc
;
3507 assert(p
>= _regionStart
);
3508 assert(p
< _endStart
);
3509 return _bitmap
[(p
-_regionStart
)/4];
3513 //////////////////////////// DylibTextCoalescer ////////////////////////////////////
3515 bool CacheBuilder::DylibTextCoalescer::sectionWasCoalesced(std::string_view sectionName
) const {
3516 if (sectionName
.size() > 16)
3517 sectionName
= sectionName
.substr(0, 16);
3518 std::map
<std::string_view
, const DylibSectionOffsetToCacheSectionOffset
*> supportedSections
= {
3519 { "__objc_classname", &objcClassNames
},
3520 { "__objc_methname", &objcMethNames
},
3521 { "__objc_methtype", &objcMethTypes
}
3523 auto it
= supportedSections
.find(sectionName
);
3524 if (it
== supportedSections
.end())
3526 return !it
->second
->empty();
3529 CacheBuilder::DylibTextCoalescer::DylibSectionOffsetToCacheSectionOffset
& CacheBuilder::DylibTextCoalescer::getSectionCoalescer(std::string_view sectionName
) {
3530 if (sectionName
.size() > 16)
3531 sectionName
= sectionName
.substr(0, 16);
3532 std::map
<std::string_view
, DylibSectionOffsetToCacheSectionOffset
*> supportedSections
= {
3533 { "__objc_classname", &objcClassNames
},
3534 { "__objc_methname", &objcMethNames
},
3535 { "__objc_methtype", &objcMethTypes
}
3537 auto it
= supportedSections
.find(sectionName
);
3538 assert(it
!= supportedSections
.end());
3542 const CacheBuilder::DylibTextCoalescer::DylibSectionOffsetToCacheSectionOffset
& CacheBuilder::DylibTextCoalescer::getSectionCoalescer(std::string_view sectionName
) const {
3543 if (sectionName
.size() > 16)
3544 sectionName
= sectionName
.substr(0, 16);
3545 std::map
<std::string_view
, const DylibSectionOffsetToCacheSectionOffset
*> supportedSections
= {
3546 { "__objc_classname", &objcClassNames
},
3547 { "__objc_methname", &objcMethNames
},
3548 { "__objc_methtype", &objcMethTypes
}
3550 auto it
= supportedSections
.find(sectionName
);
3551 assert(it
!= supportedSections
.end());
3555 //////////////////////////// CacheCoalescedText ////////////////////////////////////
3556 const char* CacheBuilder::CacheCoalescedText::SupportedSections
[] = {
3562 void CacheBuilder::CacheCoalescedText::parseCoalescableText(const dyld3::MachOAnalyzer
*ma
,
3563 DylibTextCoalescer
& textCoalescer
) {
3564 static const bool log
= false;
3566 // We can only remove sections if we know we have split seg v2 to point to it
3567 // Otherwise, a PC relative load in the __TEXT segment wouldn't know how to point to the new strings
3568 // which are no longer in the same segment
3569 uint32_t splitSegSize
= 0;
3570 const void* splitSegStart
= ma
->getSplitSeg(splitSegSize
);
3574 if ((*(const uint8_t*)splitSegStart
) != DYLD_CACHE_ADJ_V2_FORMAT
)
3577 // We can only remove sections from the end of a segment, so cache them all and walk backwards.
3578 __block
std::vector
<std::pair
<std::string
, dyld3::MachOAnalyzer::SectionInfo
>> textSectionInfos
;
3579 ma
->forEachSection(^(const dyld3::MachOAnalyzer::SectionInfo
§Info
, bool malformedSectionRange
, bool &stop
) {
3580 if (strcmp(sectInfo
.segInfo
.segName
, "__TEXT") != 0)
3582 assert(!malformedSectionRange
);
3583 textSectionInfos
.push_back({ sectInfo
.sectName
, sectInfo
});
3586 const std::set
<std::string_view
> supportedSections(std::begin(SupportedSections
), std::end(SupportedSections
));
3587 int64_t slide
= ma
->getSlide();
3589 for (auto sectionInfoIt
= textSectionInfos
.rbegin(); sectionInfoIt
!= textSectionInfos
.rend(); ++sectionInfoIt
) {
3590 const std::string
& sectionName
= sectionInfoIt
->first
;
3591 const dyld3::MachOAnalyzer::SectionInfo
& sectInfo
= sectionInfoIt
->second
;
3593 // If we find a section we can't handle then stop here. Hopefully we coalesced some from the end.
3594 if (supportedSections
.find(sectionName
) == supportedSections
.end())
3597 StringSection
& cacheStringSection
= getSectionData(sectionName
);
3599 DylibTextCoalescer::DylibSectionOffsetToCacheSectionOffset
& sectionStringData
= textCoalescer
.getSectionCoalescer(sectionName
);
3601 // Walk the strings in this section
3602 const uint8_t* content
= (uint8_t*)(sectInfo
.sectAddr
+ slide
);
3603 const char* s
= (char*)content
;
3604 const char* end
= s
+ sectInfo
.sectSize
;
3606 std::string_view str
= s
;
3607 auto itAndInserted
= cacheStringSection
.stringsToOffsets
.insert({ str
, cacheStringSection
.bufferSize
});
3608 if (itAndInserted
.second
) {
3609 // If we inserted the string then we need to include it in the total
3610 cacheStringSection
.bufferSize
+= str
.size() + 1;
3612 printf("Selector: %s -> %s\n", ma
->installName(), s
);
3614 // Debugging only. If we didn't include the string then we saved that many bytes
3615 cacheStringSection
.savedSpace
+= str
.size() + 1;
3618 // Now keep track of this offset in our source dylib as pointing to this offset
3619 uint32_t sourceSectionOffset
= (uint32_t)((uint64_t)s
- (uint64_t)content
);
3620 uint32_t cacheSectionOffset
= itAndInserted
.first
->second
;
3621 sectionStringData
[sourceSectionOffset
] = cacheSectionOffset
;
3622 s
+= str
.size() + 1;
3627 void CacheBuilder::CacheCoalescedText::clear() {
3628 *this = CacheBuilder::CacheCoalescedText();
3632 CacheBuilder::CacheCoalescedText::StringSection
& CacheBuilder::CacheCoalescedText::getSectionData(std::string_view sectionName
) {
3633 if (sectionName
.size() > 16)
3634 sectionName
= sectionName
.substr(0, 16);
3635 std::map
<std::string_view
, StringSection
*> supportedSections
= {
3636 { "__objc_classname", &objcClassNames
},
3637 { "__objc_methname", &objcMethNames
},
3638 { "__objc_methtype", &objcMethTypes
}
3640 auto it
= supportedSections
.find(sectionName
);
3641 assert(it
!= supportedSections
.end());
3646 const CacheBuilder::CacheCoalescedText::StringSection
& CacheBuilder::CacheCoalescedText::getSectionData(std::string_view sectionName
) const {
3647 if (sectionName
.size() > 16)
3648 sectionName
= sectionName
.substr(0, 16);
3649 std::map
<std::string_view
, const StringSection
*> supportedSections
= {
3650 { "__objc_classname", &objcClassNames
},
3651 { "__objc_methname", &objcMethNames
},
3652 { "__objc_methtype", &objcMethTypes
}
3654 auto it
= supportedSections
.find(sectionName
);
3655 assert(it
!= supportedSections
.end());