1 /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*-
3 * Copyright (c) 2014 Apple Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
27 #include <sys/errno.h>
28 #include <sys/fcntl.h>
29 #include <sys/param.h>
30 #include <sys/types.h>
32 #include <mach/mach.h>
33 #include <mach/mach_vm.h>
34 #include <mach/mach_time.h>
35 #include <mach/shared_region.h>
36 #include <apfs/apfs_fsctl.h>
38 #include <CommonCrypto/CommonHMAC.h>
39 #include <CommonCrypto/CommonDigest.h>
40 #include <CommonCrypto/CommonDigestSPI.h>
42 #include "ClosureBuilder.h"
44 #include "ClosureFileSystemNull.h"
45 #include "CodeSigningTypes.h"
46 #include "MachOFileAbstraction.hpp"
47 #include "SharedCacheBuilder.h"
49 #include "FileUtils.h"
50 #include "StringUtils.h"
53 #if __has_include("dyld_cache_config.h")
54 #include "dyld_cache_config.h"
56 #define ARM_SHARED_REGION_START 0x1A000000ULL
57 #define ARM_SHARED_REGION_SIZE 0x26000000ULL
58 #define ARM64_SHARED_REGION_START 0x180000000ULL
59 #define ARM64_SHARED_REGION_SIZE 0x100000000ULL
62 #ifndef ARM64_32_SHARED_REGION_START
63 #define ARM64_32_SHARED_REGION_START 0x1A000000ULL
64 #define ARM64_32_SHARED_REGION_SIZE 0x26000000ULL
67 #if ARM_SHARED_REGION_SIZE > 0x26000000ULL
68 #define ARMV7K_CHAIN_BITS 0xC0000000
69 #define ARMV7K_MAX 0x0
71 #define ARMV7K_CHAIN_BITS 0xE0000000
72 #define ARMV7K_MAX 0x20000000
75 const SharedCacheBuilder::ArchLayout
SharedCacheBuilder::_s_archLayout
[] = {
76 { 0x7FFF20000000ULL
, 0xEFE00000ULL
, 0x0, 0x40000000, 0x00FFFF0000000000, "x86_64", CS_PAGE_SIZE_4K
, 12, 2, true, true, true },
77 { 0x7FFF20000000ULL
, 0xEFE00000ULL
, 0x0, 0x40000000, 0x00FFFF0000000000, "x86_64h", CS_PAGE_SIZE_4K
, 12, 2, true, true, true },
78 { SHARED_REGION_BASE_I386
, SHARED_REGION_SIZE_I386
, 0x0, 0x00200000, 0x0, "i386", CS_PAGE_SIZE_4K
, 12, 0, false, false, true },
79 { ARM64_SHARED_REGION_START
, ARM64_SHARED_REGION_SIZE
, 0x0, 0x02000000, 0x00FFFF0000000000, "arm64", CS_PAGE_SIZE_4K
, 14, 2, false, true, false },
80 #if SUPPORT_ARCH_arm64e
81 { ARM64_SHARED_REGION_START
, ARM64_SHARED_REGION_SIZE
, 0x0, 0x02000000, 0x00FFFF0000000000, "arm64e", CS_PAGE_SIZE_16K
, 14, 2, false, true, false },
83 #if SUPPORT_ARCH_arm64_32
84 { ARM64_32_SHARED_REGION_START
, ARM64_32_SHARED_REGION_SIZE
,0x0, 0x02000000, 0xC0000000, "arm64_32", CS_PAGE_SIZE_16K
, 14, 6, false, false, true },
86 { ARM_SHARED_REGION_START
, ARM_SHARED_REGION_SIZE
, 0x0, 0x02000000, 0xE0000000, "armv7s", CS_PAGE_SIZE_4K
, 14, 4, false, false, true },
87 { ARM_SHARED_REGION_START
, ARM_SHARED_REGION_SIZE
, ARMV7K_MAX
, 0x00400000, ARMV7K_CHAIN_BITS
, "armv7k", CS_PAGE_SIZE_4K
, 14, 4, false, false, true },
88 { 0x40000000, 0x40000000, 0x0, 0x02000000, 0x0, "sim-x86", CS_PAGE_SIZE_4K
, 14, 0, false, false, true }
92 // These are dylibs that may be interposed, so stubs calling into them should never be bypassed
93 const char* const SharedCacheBuilder::_s_neverStubEliminateDylibs
[] = {
94 "/usr/lib/system/libdispatch.dylib",
98 // These are functions that are interposed by Instruments.app or ASan
99 const char* const SharedCacheBuilder::_s_neverStubEliminateSymbols
[] = {
104 "__objc_autoreleasePoolPop",
124 "_dispatch_barrier_async_f",
125 "_dispatch_group_async",
126 "_dispatch_group_async_f",
127 "_dispatch_source_set_cancel_handler",
128 "_dispatch_source_set_event_handler",
207 "_malloc_create_zone",
208 "_malloc_default_purgeable_zone",
209 "_malloc_default_zone",
211 "_malloc_make_nonpurgeable",
212 "_malloc_make_purgeable",
213 "_malloc_set_zone_name",
230 "_objc_autoreleasePoolPop",
232 "_objc_setProperty_atomic",
233 "_objc_setProperty_atomic_copy",
234 "_objc_setProperty_nonatomic",
235 "_objc_setProperty_nonatomic_copy",
243 "_pthread_attr_getdetachstate",
244 "_pthread_attr_getguardsize",
245 "_pthread_attr_getinheritsched",
246 "_pthread_attr_getschedparam",
247 "_pthread_attr_getschedpolicy",
248 "_pthread_attr_getscope",
249 "_pthread_attr_getstack",
250 "_pthread_attr_getstacksize",
251 "_pthread_condattr_getpshared",
253 "_pthread_getschedparam",
255 "_pthread_mutex_lock",
256 "_pthread_mutex_unlock",
257 "_pthread_mutexattr_getprioceiling",
258 "_pthread_mutexattr_getprotocol",
259 "_pthread_mutexattr_getpshared",
260 "_pthread_mutexattr_gettype",
261 "_pthread_rwlockattr_getpshared",
351 // <rdar://problem/22050956> always use stubs for C++ symbols that can be overridden
361 inline uint32_t absolutetime_to_milliseconds(uint64_t abstime
)
363 return (uint32_t)(abstime
/1000/1000);
366 // Handles building a list of input files to the SharedCacheBuilder itself.
367 class CacheInputBuilder
{
369 CacheInputBuilder(const dyld3::closure::FileSystem
& fileSystem
,
370 const dyld3::GradedArchs
& archs
, dyld3::Platform reqPlatform
)
371 : fileSystem(fileSystem
), reqArchs(archs
), reqPlatform(reqPlatform
) { }
373 // Loads and maps any MachOs in the given list of files.
374 void loadMachOs(std::vector
<CacheBuilder::InputFile
>& inputFiles
,
375 std::vector
<CacheBuilder::LoadedMachO
>& dylibsToCache
,
376 std::vector
<CacheBuilder::LoadedMachO
>& otherDylibs
,
377 std::vector
<CacheBuilder::LoadedMachO
>& executables
,
378 std::vector
<CacheBuilder::LoadedMachO
>& couldNotLoadFiles
) {
380 std::map
<std::string
, uint64_t> dylibInstallNameMap
;
381 for (CacheBuilder::InputFile
& inputFile
: inputFiles
) {
382 char realerPath
[MAXPATHLEN
];
383 dyld3::closure::LoadedFileInfo loadedFileInfo
= dyld3::MachOAnalyzer::load(inputFile
.diag
, fileSystem
, inputFile
.path
, reqArchs
, reqPlatform
, realerPath
);
384 const dyld3::MachOAnalyzer
* ma
= (const dyld3::MachOAnalyzer
*)loadedFileInfo
.fileContent
;
386 couldNotLoadFiles
.emplace_back((CacheBuilder::LoadedMachO
){ DyldSharedCache::MappedMachO(), loadedFileInfo
, &inputFile
});
390 DyldSharedCache::MappedMachO
mappedFile(inputFile
.path
, ma
, loadedFileInfo
.sliceLen
, false, false,
391 loadedFileInfo
.sliceOffset
, loadedFileInfo
.mtime
, loadedFileInfo
.inode
);
393 // The file can be loaded with the given slice, but we may still want to exlude it from the cache.
395 std::string installName
= ma
->installName();
397 // Let the platform exclude the file before we do anything else.
398 if (platformExcludesInstallName(installName
)) {
399 inputFile
.diag
.verbose("Platform excluded file\n");
400 fileSystem
.unloadFile(loadedFileInfo
);
404 if (!ma
->canBePlacedInDyldCache(inputFile
.path
, ^(const char* msg
) {
405 inputFile
.diag
.warning("Dylib located at '%s' cannot be placed in cache because: %s", inputFile
.path
, msg
);
407 // TODO: Add exclusion lists here?
408 // Probably not as we already applied the dylib exclusion list.
409 if (!ma
->canHavePrecomputedDlopenClosure(inputFile
.path
, ^(const char* msg
) {
410 inputFile
.diag
.verbose("Dylib located at '%s' cannot prebuild dlopen closure in cache because: %s", inputFile
.path
, msg
);
412 fileSystem
.unloadFile(loadedFileInfo
);
415 otherDylibs
.emplace_back((CacheBuilder::LoadedMachO
){ mappedFile
, loadedFileInfo
, &inputFile
});
419 // Otherwise see if we have another file with this install name
420 auto iteratorAndInserted
= dylibInstallNameMap
.insert(std::make_pair(installName
, dylibsToCache
.size()));
421 if (iteratorAndInserted
.second
) {
422 // We inserted the dylib so we haven't seen another with this name.
423 if (installName
[0] != '@' && installName
!= inputFile
.path
) {
424 inputFile
.diag
.warning("Dylib located at '%s' has installname '%s'", inputFile
.path
, installName
.c_str());
427 dylibsToCache
.emplace_back((CacheBuilder::LoadedMachO
){ mappedFile
, loadedFileInfo
, &inputFile
});
429 // We didn't insert this one so we've seen it before.
430 CacheBuilder::LoadedMachO
& previousLoadedMachO
= dylibsToCache
[iteratorAndInserted
.first
->second
];
431 inputFile
.diag
.warning("Multiple dylibs claim installname '%s' ('%s' and '%s')", installName
.c_str(), inputFile
.path
, previousLoadedMachO
.mappedFile
.runtimePath
.c_str());
433 // This is the "Good" one, overwrite
434 if (inputFile
.path
== installName
) {
435 // Unload the old one
436 fileSystem
.unloadFile(previousLoadedMachO
.loadedFileInfo
);
438 // And replace with this one.
439 previousLoadedMachO
.mappedFile
= mappedFile
;
440 previousLoadedMachO
.loadedFileInfo
= loadedFileInfo
;
443 } else if (ma
->isBundle()) {
444 // TODO: Add exclusion lists here?
445 if (!ma
->canHavePrecomputedDlopenClosure(inputFile
.path
, ^(const char* msg
) {
446 inputFile
.diag
.verbose("Dylib located at '%s' cannot prebuild dlopen closure in cache because: %s", inputFile
.path
, msg
);
448 fileSystem
.unloadFile(loadedFileInfo
);
451 otherDylibs
.emplace_back((CacheBuilder::LoadedMachO
){ mappedFile
, loadedFileInfo
, &inputFile
});
452 } else if (ma
->isDynamicExecutable()) {
453 if (platformExcludesExecutablePath_macOS(inputFile
.path
)) {
454 inputFile
.diag
.verbose("Platform excluded file\n");
455 fileSystem
.unloadFile(loadedFileInfo
);
458 executables
.emplace_back((CacheBuilder::LoadedMachO
){ mappedFile
, loadedFileInfo
, &inputFile
});
460 inputFile
.diag
.verbose("Unsupported mach file type\n");
461 fileSystem
.unloadFile(loadedFileInfo
);
470 static bool platformExcludesInstallName_macOS(const std::string
& installName
) {
474 static bool platformExcludesInstallName_iOS(const std::string
& installName
) {
475 if ( installName
== "/System/Library/Caches/com.apple.xpc/sdk.dylib" )
477 if ( installName
== "/System/Library/Caches/com.apple.xpcd/xpcd_cache.dylib" )
482 static bool platformExcludesInstallName_tvOS(const std::string
& installName
) {
483 return platformExcludesInstallName_iOS(installName
);
486 static bool platformExcludesInstallName_watchOS(const std::string
& installName
) {
487 return platformExcludesInstallName_iOS(installName
);
490 static bool platformExcludesInstallName_bridgeOS(const std::string
& installName
) {
491 return platformExcludesInstallName_iOS(installName
);
494 // Returns true if the current platform requires that this install name be excluded from the shared cache
495 // Note that this overrides any exclusion from anywhere else.
496 bool platformExcludesInstallName(const std::string
& installName
) {
497 switch (reqPlatform
) {
498 case dyld3::Platform::unknown
:
500 case dyld3::Platform::macOS
:
501 return platformExcludesInstallName_macOS(installName
);
502 case dyld3::Platform::iOS
:
503 return platformExcludesInstallName_iOS(installName
);
504 case dyld3::Platform::tvOS
:
505 return platformExcludesInstallName_tvOS(installName
);
506 case dyld3::Platform::watchOS
:
507 return platformExcludesInstallName_watchOS(installName
);
508 case dyld3::Platform::bridgeOS
:
509 return platformExcludesInstallName_bridgeOS(installName
);
510 case dyld3::Platform::iOSMac
:
512 case dyld3::Platform::iOS_simulator
:
514 case dyld3::Platform::tvOS_simulator
:
516 case dyld3::Platform::watchOS_simulator
:
518 case dyld3::Platform::driverKit
:
526 static bool platformExcludesExecutablePath_macOS(const std::string
& path
) {
530 static bool platformExcludesExecutablePath_iOS(const std::string
& path
) {
531 //HACK exclude all launchd and installd variants until we can do something about xpcd_cache.dylib and friends
532 if (path
== "/sbin/launchd"
533 || path
== "/usr/local/sbin/launchd.debug"
534 || path
== "/usr/local/sbin/launchd.development"
535 || path
== "/usr/libexec/installd") {
541 static bool platformExcludesExecutablePath_tvOS(const std::string
& path
) {
542 return platformExcludesExecutablePath_iOS(path
);
545 static bool platformExcludesExecutablePath_watchOS(const std::string
& path
) {
546 return platformExcludesExecutablePath_iOS(path
);
549 static bool platformExcludesExecutablePath_bridgeOS(const std::string
& path
) {
550 return platformExcludesExecutablePath_iOS(path
);
553 // Returns true if the current platform requires that this path be excluded from the shared cache
554 // Note that this overrides any exclusion from anywhere else.
555 bool platformExcludesExecutablePath(const std::string
& path
) {
556 switch (reqPlatform
) {
557 case dyld3::Platform::unknown
:
559 case dyld3::Platform::macOS
:
560 return platformExcludesExecutablePath_macOS(path
);
561 case dyld3::Platform::iOS
:
562 return platformExcludesExecutablePath_iOS(path
);
563 case dyld3::Platform::tvOS
:
564 return platformExcludesExecutablePath_tvOS(path
);
565 case dyld3::Platform::watchOS
:
566 return platformExcludesExecutablePath_watchOS(path
);
567 case dyld3::Platform::bridgeOS
:
568 return platformExcludesExecutablePath_bridgeOS(path
);
569 case dyld3::Platform::iOSMac
:
571 case dyld3::Platform::iOS_simulator
:
573 case dyld3::Platform::tvOS_simulator
:
575 case dyld3::Platform::watchOS_simulator
:
577 case dyld3::Platform::driverKit
:
582 const dyld3::closure::FileSystem
& fileSystem
;
583 const dyld3::GradedArchs
& reqArchs
;
584 dyld3::Platform reqPlatform
;
587 SharedCacheBuilder::SharedCacheBuilder(const DyldSharedCache::CreateOptions
& options
,
588 const dyld3::closure::FileSystem
& fileSystem
)
589 : CacheBuilder(options
, fileSystem
) {
591 std::string targetArch
= options
.archs
->name();
592 if ( options
.forSimulator
&& (options
.archs
== &dyld3::GradedArchs::i386
) )
593 targetArch
= "sim-x86";
595 for (const ArchLayout
& layout
: _s_archLayout
) {
596 if ( layout
.archName
== targetArch
) {
597 _archLayout
= &layout
;
598 _is64
= _archLayout
->is64
;
604 _diagnostics
.error("Tool was built without support for: '%s'", targetArch
.c_str());
608 static void verifySelfContained(std::vector
<CacheBuilder::LoadedMachO
>& dylibsToCache
,
609 std::vector
<CacheBuilder::LoadedMachO
>& otherDylibs
,
610 std::vector
<CacheBuilder::LoadedMachO
>& couldNotLoadFiles
)
612 // build map of dylibs
613 __block
std::map
<std::string
, const CacheBuilder::LoadedMachO
*> knownDylibs
;
614 __block
std::map
<std::string
, const CacheBuilder::LoadedMachO
*> allDylibs
;
615 for (const CacheBuilder::LoadedMachO
& dylib
: dylibsToCache
) {
616 knownDylibs
.insert({ dylib
.mappedFile
.runtimePath
, &dylib
});
617 allDylibs
.insert({ dylib
.mappedFile
.runtimePath
, &dylib
});
618 if (const char* installName
= dylib
.mappedFile
.mh
->installName()) {
619 knownDylibs
.insert({ installName
, &dylib
});
620 allDylibs
.insert({ installName
, &dylib
});
624 for (const CacheBuilder::LoadedMachO
& dylib
: otherDylibs
) {
625 allDylibs
.insert({ dylib
.mappedFile
.runtimePath
, &dylib
});
626 if (const char* installName
= dylib
.mappedFile
.mh
->installName())
627 allDylibs
.insert({ installName
, &dylib
});
630 for (const CacheBuilder::LoadedMachO
& dylib
: couldNotLoadFiles
) {
631 allDylibs
.insert({ dylib
.inputFile
->path
, &dylib
});
634 // check all dependencies to assure every dylib in cache only depends on other dylibs in cache
635 __block
std::map
<std::string
, std::set
<std::string
>> badDylibs
;
636 __block
bool doAgain
= true;
639 // scan dylib list making sure all dependents are in dylib list
640 for (const CacheBuilder::LoadedMachO
& dylib
: dylibsToCache
) {
641 if ( badDylibs
.count(dylib
.mappedFile
.runtimePath
) != 0 )
643 dylib
.mappedFile
.mh
->forEachDependentDylib(^(const char* loadPath
, bool isWeak
, bool isReExport
, bool isUpward
, uint32_t compatVersion
, uint32_t curVersion
, bool& stop
) {
646 if ( knownDylibs
.count(loadPath
) == 0 ) {
647 badDylibs
[dylib
.mappedFile
.runtimePath
].insert(std::string("Could not find dependency '") + loadPath
+ "'");
648 knownDylibs
.erase(dylib
.mappedFile
.runtimePath
);
649 knownDylibs
.erase(dylib
.mappedFile
.mh
->installName());
656 // Now walk the dylibs which depend on missing dylibs and see if any of them are required binaries.
657 for (auto badDylibsIterator
: badDylibs
) {
658 const std::string
& dylibRuntimePath
= badDylibsIterator
.first
;
659 auto requiredDylibIterator
= allDylibs
.find(dylibRuntimePath
);
660 if (requiredDylibIterator
== allDylibs
.end())
662 if (!requiredDylibIterator
->second
->inputFile
->mustBeIncluded())
664 // This dylib is required so mark all dependencies as requried too
665 __block
std::vector
<const CacheBuilder::LoadedMachO
*> worklist
;
666 worklist
.push_back(requiredDylibIterator
->second
);
667 while (!worklist
.empty()) {
668 const CacheBuilder::LoadedMachO
* dylib
= worklist
.back();
670 if (!dylib
->mappedFile
.mh
)
672 dylib
->mappedFile
.mh
->forEachDependentDylib(^(const char* loadPath
, bool isWeak
, bool isReExport
, bool isUpward
, uint32_t compatVersion
, uint32_t curVersion
, bool& stop
) {
675 auto dylibIterator
= allDylibs
.find(loadPath
);
676 if (dylibIterator
!= allDylibs
.end()) {
677 if (dylibIterator
->second
->inputFile
->state
== CacheBuilder::InputFile::Unset
) {
678 dylibIterator
->second
->inputFile
->state
= CacheBuilder::InputFile::MustBeIncludedForDependent
;
679 worklist
.push_back(dylibIterator
->second
);
686 // FIXME: Make this an option we can pass in
687 const bool evictLeafDylibs
= true;
688 if (evictLeafDylibs
) {
693 // build count of how many references there are to each dylib
694 __block
std::set
<std::string
> referencedDylibs
;
695 for (const CacheBuilder::LoadedMachO
& dylib
: dylibsToCache
) {
696 if ( badDylibs
.count(dylib
.mappedFile
.runtimePath
) != 0 )
698 dylib
.mappedFile
.mh
->forEachDependentDylib(^(const char* loadPath
, bool isWeak
, bool isReExport
, bool isUpward
, uint32_t compatVersion
, uint32_t curVersion
, bool &stop
) {
699 referencedDylibs
.insert(loadPath
);
703 // find all dylibs not referenced
704 for (const CacheBuilder::LoadedMachO
& dylib
: dylibsToCache
) {
705 if ( badDylibs
.count(dylib
.mappedFile
.runtimePath
) != 0 )
707 const char* installName
= dylib
.mappedFile
.mh
->installName();
708 if ( (referencedDylibs
.count(installName
) == 0) && (dylib
.inputFile
->state
== CacheBuilder::InputFile::MustBeExcludedIfUnused
) ) {
709 badDylibs
[dylib
.mappedFile
.runtimePath
].insert(std::string("It has been explicitly excluded as it is unused"));
716 // Move bad dylibs from dylibs to cache to other dylibs.
717 for (const CacheBuilder::LoadedMachO
& dylib
: dylibsToCache
) {
718 auto i
= badDylibs
.find(dylib
.mappedFile
.runtimePath
);
719 if ( i
!= badDylibs
.end()) {
720 otherDylibs
.push_back(dylib
);
721 for (const std::string
& reason
: i
->second
)
722 otherDylibs
.back().inputFile
->diag
.warning("Dylib located at '%s' not placed in shared cache because: %s", dylib
.mappedFile
.runtimePath
.c_str(), reason
.c_str());
726 const auto& badDylibsLambdaRef
= badDylibs
;
727 dylibsToCache
.erase(std::remove_if(dylibsToCache
.begin(), dylibsToCache
.end(), [&](const CacheBuilder::LoadedMachO
& dylib
) {
728 if (badDylibsLambdaRef
.find(dylib
.mappedFile
.runtimePath
) != badDylibsLambdaRef
.end())
731 }), dylibsToCache
.end());
734 // This is the new build API which takes the raw files (which could be FAT) and tries to build a cache from them.
735 // We should remove the other build() method, or make it private so that this can wrap it.
736 void SharedCacheBuilder::build(std::vector
<CacheBuilder::InputFile
>& inputFiles
,
737 std::vector
<DyldSharedCache::FileAlias
>& aliases
) {
738 // First filter down to files which are actually MachO's
739 CacheInputBuilder
cacheInputBuilder(_fileSystem
, *_options
.archs
, _options
.platform
);
741 std::vector
<LoadedMachO
> dylibsToCache
;
742 std::vector
<LoadedMachO
> otherDylibs
;
743 std::vector
<LoadedMachO
> executables
;
744 std::vector
<LoadedMachO
> couldNotLoadFiles
;
745 cacheInputBuilder
.loadMachOs(inputFiles
, dylibsToCache
, otherDylibs
, executables
, couldNotLoadFiles
);
747 verifySelfContained(dylibsToCache
, otherDylibs
, couldNotLoadFiles
);
749 // Check for required binaries before we try to build the cache
750 if (!_diagnostics
.hasError()) {
751 // If we succeeded in building, then now see if there was a missing required file, and if so why its missing.
752 std::string errorString
;
753 for (const LoadedMachO
& dylib
: otherDylibs
) {
754 if (dylib
.inputFile
->mustBeIncluded()) {
755 // An error loading a required file must be propagated up to the top level diagnostic handler.
756 bool gotWarning
= false;
757 for (const std::string
& warning
: dylib
.inputFile
->diag
.warnings()) {
759 std::string message
= warning
;
760 if (message
.back() == '\n')
762 if (!errorString
.empty())
763 errorString
+= "ERROR: ";
764 errorString
+= "Required binary was not included in the shared cache '" + std::string(dylib
.inputFile
->path
) + "' because: " + message
+ "\n";
767 if (!errorString
.empty())
768 errorString
+= "ERROR: ";
769 errorString
+= "Required binary was not included in the shared cache '" + std::string(dylib
.inputFile
->path
) + "' because: 'unknown error. Please report to dyld'\n";
773 for (const LoadedMachO
& dylib
: couldNotLoadFiles
) {
774 if (dylib
.inputFile
->mustBeIncluded()) {
775 if (dylib
.inputFile
->diag
.hasError()) {
776 if (!errorString
.empty())
777 errorString
+= "ERROR: ";
778 errorString
+= "Required binary was not included in the shared cache '" + std::string(dylib
.inputFile
->path
) + "' because: " + dylib
.inputFile
->diag
.errorMessage() + "\n";
780 if (!errorString
.empty())
781 errorString
+= "ERROR: ";
782 errorString
+= "Required binary was not included in the shared cache '" + std::string(dylib
.inputFile
->path
) + "' because: 'unknown error. Please report to dyld'\n";
787 if (!errorString
.empty()) {
788 _diagnostics
.error("%s", errorString
.c_str());
792 if (!_diagnostics
.hasError())
793 build(dylibsToCache
, otherDylibs
, executables
, aliases
);
795 if (!_diagnostics
.hasError()) {
796 // If we succeeded in building, then now see if there was a missing required file, and if so why its missing.
797 std::string errorString
;
798 for (CacheBuilder::InputFile
& inputFile
: inputFiles
) {
799 if (inputFile
.mustBeIncluded() && inputFile
.diag
.hasError()) {
800 // An error loading a required file must be propagated up to the top level diagnostic handler.
801 std::string message
= inputFile
.diag
.errorMessage();
802 if (message
.back() == '\n')
804 errorString
+= "Required binary was not included in the shared cache '" + std::string(inputFile
.path
) + "' because: " + message
+ "\n";
807 if (!errorString
.empty()) {
808 _diagnostics
.error("%s", errorString
.c_str());
812 // Add all the warnings from the input files to the top level warnings on the main diagnostics object.
813 for (CacheBuilder::InputFile
& inputFile
: inputFiles
) {
814 for (const std::string
& warning
: inputFile
.diag
.warnings())
815 _diagnostics
.warning("%s", warning
.c_str());
818 // Clean up the loaded files
819 for (LoadedMachO
& loadedMachO
: dylibsToCache
)
820 _fileSystem
.unloadFile(loadedMachO
.loadedFileInfo
);
821 for (LoadedMachO
& loadedMachO
: otherDylibs
)
822 _fileSystem
.unloadFile(loadedMachO
.loadedFileInfo
);
823 for (LoadedMachO
& loadedMachO
: executables
)
824 _fileSystem
.unloadFile(loadedMachO
.loadedFileInfo
);
827 void SharedCacheBuilder::build(const std::vector
<DyldSharedCache::MappedMachO
>& dylibs
,
828 const std::vector
<DyldSharedCache::MappedMachO
>& otherOsDylibsInput
,
829 const std::vector
<DyldSharedCache::MappedMachO
>& osExecutables
,
830 std::vector
<DyldSharedCache::FileAlias
>& aliases
) {
832 std::vector
<LoadedMachO
> dylibsToCache
;
833 std::vector
<LoadedMachO
> otherDylibs
;
834 std::vector
<LoadedMachO
> executables
;
836 for (const DyldSharedCache::MappedMachO
& mappedMachO
: dylibs
) {
837 dyld3::closure::LoadedFileInfo loadedFileInfo
;
838 loadedFileInfo
.fileContent
= mappedMachO
.mh
;
839 loadedFileInfo
.fileContentLen
= mappedMachO
.length
;
840 loadedFileInfo
.sliceOffset
= mappedMachO
.sliceFileOffset
;
841 loadedFileInfo
.sliceLen
= mappedMachO
.length
;
842 loadedFileInfo
.inode
= mappedMachO
.inode
;
843 loadedFileInfo
.mtime
= mappedMachO
.modTime
;
844 loadedFileInfo
.path
= mappedMachO
.runtimePath
.c_str();
845 dylibsToCache
.emplace_back((LoadedMachO
){ mappedMachO
, loadedFileInfo
, nullptr });
848 for (const DyldSharedCache::MappedMachO
& mappedMachO
: otherOsDylibsInput
) {
849 dyld3::closure::LoadedFileInfo loadedFileInfo
;
850 loadedFileInfo
.fileContent
= mappedMachO
.mh
;
851 loadedFileInfo
.fileContentLen
= mappedMachO
.length
;
852 loadedFileInfo
.sliceOffset
= mappedMachO
.sliceFileOffset
;
853 loadedFileInfo
.sliceLen
= mappedMachO
.length
;
854 loadedFileInfo
.inode
= mappedMachO
.inode
;
855 loadedFileInfo
.mtime
= mappedMachO
.modTime
;
856 loadedFileInfo
.path
= mappedMachO
.runtimePath
.c_str();
857 otherDylibs
.emplace_back((LoadedMachO
){ mappedMachO
, loadedFileInfo
, nullptr });
860 for (const DyldSharedCache::MappedMachO
& mappedMachO
: osExecutables
) {
861 dyld3::closure::LoadedFileInfo loadedFileInfo
;
862 loadedFileInfo
.fileContent
= mappedMachO
.mh
;
863 loadedFileInfo
.fileContentLen
= mappedMachO
.length
;
864 loadedFileInfo
.sliceOffset
= mappedMachO
.sliceFileOffset
;
865 loadedFileInfo
.sliceLen
= mappedMachO
.length
;
866 loadedFileInfo
.inode
= mappedMachO
.inode
;
867 loadedFileInfo
.mtime
= mappedMachO
.modTime
;
868 loadedFileInfo
.path
= mappedMachO
.runtimePath
.c_str();
869 executables
.emplace_back((LoadedMachO
){ mappedMachO
, loadedFileInfo
, nullptr });
872 build(dylibsToCache
, otherDylibs
, executables
, aliases
);
875 void SharedCacheBuilder::build(const std::vector
<LoadedMachO
>& dylibs
,
876 const std::vector
<LoadedMachO
>& otherOsDylibsInput
,
877 const std::vector
<LoadedMachO
>& osExecutables
,
878 std::vector
<DyldSharedCache::FileAlias
>& aliases
)
880 // <rdar://problem/21317611> error out instead of crash if cache has no dylibs
881 // FIXME: plist should specify required vs optional dylibs
882 if ( dylibs
.size() < 30 ) {
883 _diagnostics
.error("missing required minimum set of dylibs");
886 uint64_t t1
= mach_absolute_time();
888 // make copy of dylib list and sort
889 makeSortedDylibs(dylibs
, _options
.dylibOrdering
);
891 // allocate space used by largest possible cache plus room for LINKEDITS before optimization
892 _allocatedBufferSize
= _archLayout
->sharedMemorySize
* 1.50;
893 if ( vm_allocate(mach_task_self(), &_fullAllocatedBuffer
, _allocatedBufferSize
, VM_FLAGS_ANYWHERE
) != 0 ) {
894 _diagnostics
.error("could not allocate buffer");
898 // assign addresses for each segment of each dylib in new cache
899 parseCoalescableSegments();
900 processSelectorStrings(osExecutables
);
901 assignSegmentAddresses();
902 std::vector
<const LoadedMachO
*> overflowDylibs
;
903 while ( cacheOverflowAmount() != 0 ) {
904 if ( !_options
.evictLeafDylibsOnOverflow
) {
905 _diagnostics
.error("cache overflow by %lluMB", cacheOverflowAmount() / 1024 / 1024);
908 size_t evictionCount
= evictLeafDylibs(cacheOverflowAmount(), overflowDylibs
);
910 for (DylibInfo
& dylib
: _sortedDylibs
)
911 dylib
.cacheLocation
.clear();
912 _coalescedText
.clear();
913 parseCoalescableSegments();
914 processSelectorStrings(osExecutables
);
915 assignSegmentAddresses();
917 _diagnostics
.verbose("cache overflow, evicted %lu leaf dylibs\n", evictionCount
);
919 markPaddingInaccessible();
921 // copy all segments into cache
922 uint64_t t2
= mach_absolute_time();
926 // rebase all dylibs for new location in cache
927 uint64_t t3
= mach_absolute_time();
928 _aslrTracker
.setDataRegion(_readWriteRegion
.buffer
, _readWriteRegion
.sizeInUse
);
929 if ( !_options
.cacheSupportsASLR
)
930 _aslrTracker
.disable();
931 adjustAllImagesForNewSegmentLocations();
932 if ( _diagnostics
.hasError() )
935 // build ImageArray for dyld3, which has side effect of binding all cached dylibs
936 uint64_t t4
= mach_absolute_time();
937 buildImageArray(aliases
);
938 if ( _diagnostics
.hasError() )
942 uint64_t t5
= mach_absolute_time();
943 DyldSharedCache
* dyldCache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
944 if ( _options
.optimizeObjC
)
946 if ( _diagnostics
.hasError() )
950 // optimize away stubs
951 uint64_t t6
= mach_absolute_time();
952 if ( _options
.optimizeStubs
)
956 // FIPS seal corecrypto, This must be done after stub elimination (so that __TEXT,__text is not changed after sealing)
959 // merge and compact LINKEDIT segments
960 uint64_t t7
= mach_absolute_time();
963 // copy ImageArray to end of read-only region
965 if ( _diagnostics
.hasError() )
967 uint64_t t8
= mach_absolute_time();
969 // don't add dyld3 closures to simulator cache
970 if ( !dyld3::MachOFile::isSimulatorPlatform(_options
.platform
) ) {
971 // compute and add dlopen closures for all other dylibs
972 addOtherImageArray(otherOsDylibsInput
, overflowDylibs
);
973 if ( _diagnostics
.hasError() )
976 // compute and add launch closures to end of read-only region
977 addClosures(osExecutables
);
978 if ( _diagnostics
.hasError() )
982 // update final readOnly region size
983 dyld_cache_mapping_info
* mappings
= (dyld_cache_mapping_info
*)(_readExecuteRegion
.buffer
+ dyldCache
->header
.mappingOffset
);
984 mappings
[2].size
= _readOnlyRegion
.sizeInUse
;
985 if ( _options
.excludeLocalSymbols
)
986 dyldCache
->header
.localSymbolsOffset
= _readOnlyRegion
.cacheFileOffset
+ _readOnlyRegion
.sizeInUse
;
988 // record max slide now that final size is established
989 if ( _archLayout
->sharedRegionsAreDiscontiguous
) {
990 // special case x86_64 which has three non-contiguous chunks each in their own 1GB regions
991 uint64_t maxSlide0
= 0x60000000 - _readExecuteRegion
.sizeInUse
; // TEXT region has 1.5GB region
992 uint64_t maxSlide1
= 0x40000000 - _readWriteRegion
.sizeInUse
;
993 uint64_t maxSlide2
= 0x3FE00000 - _readOnlyRegion
.sizeInUse
;
994 dyldCache
->header
.maxSlide
= std::min(std::min(maxSlide0
, maxSlide1
), maxSlide2
);
997 // <rdar://problem/49852839> branch predictor on arm64 currently only looks at low 32-bits, so don't slide cache more than 2GB
998 if ( (_archLayout
->sharedMemorySize
== 0x100000000) && (_readExecuteRegion
.sizeInUse
< 0x80000000) )
999 dyldCache
->header
.maxSlide
= 0x80000000 - _readExecuteRegion
.sizeInUse
;
1001 dyldCache
->header
.maxSlide
= (_archLayout
->sharedMemoryStart
+ _archLayout
->sharedMemorySize
) - (_readOnlyRegion
.unslidLoadAddress
+ _readOnlyRegion
.sizeInUse
);
1004 // mark if any input dylibs were built with chained fixups
1005 dyldCache
->header
.builtFromChainedFixups
= _someDylibsUsedChainedFixups
;
1007 uint64_t t9
= mach_absolute_time();
1009 // fill in slide info at start of region[2]
1010 // do this last because it modifies pointers in DATA segments
1011 if ( _options
.cacheSupportsASLR
) {
1012 #if SUPPORT_ARCH_arm64e
1013 if ( strcmp(_archLayout
->archName
, "arm64e") == 0 )
1014 writeSlideInfoV3(_aslrTracker
.bitmap(), _aslrTracker
.dataPageCount());
1017 if ( _archLayout
->is64
)
1018 writeSlideInfoV2
<Pointer64
<LittleEndian
>>(_aslrTracker
.bitmap(), _aslrTracker
.dataPageCount());
1019 #if SUPPORT_ARCH_arm64_32 || SUPPORT_ARCH_armv7k
1020 else if ( _archLayout
->pointerDeltaMask
== 0xC0000000 )
1021 writeSlideInfoV4
<Pointer32
<LittleEndian
>>(_aslrTracker
.bitmap(), _aslrTracker
.dataPageCount());
1024 writeSlideInfoV2
<Pointer32
<LittleEndian
>>(_aslrTracker
.bitmap(), _aslrTracker
.dataPageCount());
1027 uint64_t t10
= mach_absolute_time();
1029 // last sanity check on size
1030 if ( cacheOverflowAmount() != 0 ) {
1031 _diagnostics
.error("cache overflow after optimizations 0x%llX -> 0x%llX", _readExecuteRegion
.unslidLoadAddress
, _readOnlyRegion
.unslidLoadAddress
+ _readOnlyRegion
.sizeInUse
);
1035 // codesignature is part of file, but is not mapped
1037 if ( _diagnostics
.hasError() )
1040 uint64_t t11
= mach_absolute_time();
1042 if ( _options
.verbose
) {
1043 fprintf(stderr
, "time to layout cache: %ums\n", absolutetime_to_milliseconds(t2
-t1
));
1044 fprintf(stderr
, "time to copy cached dylibs into buffer: %ums\n", absolutetime_to_milliseconds(t3
-t2
));
1045 fprintf(stderr
, "time to adjust segments for new split locations: %ums\n", absolutetime_to_milliseconds(t4
-t3
));
1046 fprintf(stderr
, "time to bind all images: %ums\n", absolutetime_to_milliseconds(t5
-t4
));
1047 fprintf(stderr
, "time to optimize Objective-C: %ums\n", absolutetime_to_milliseconds(t6
-t5
));
1048 fprintf(stderr
, "time to do stub elimination: %ums\n", absolutetime_to_milliseconds(t7
-t6
));
1049 fprintf(stderr
, "time to optimize LINKEDITs: %ums\n", absolutetime_to_milliseconds(t8
-t7
));
1050 fprintf(stderr
, "time to build %lu closures: %ums\n", osExecutables
.size(), absolutetime_to_milliseconds(t9
-t8
));
1051 fprintf(stderr
, "time to compute slide info: %ums\n", absolutetime_to_milliseconds(t10
-t9
));
1052 fprintf(stderr
, "time to compute UUID and codesign cache file: %ums\n", absolutetime_to_milliseconds(t11
-t10
));
1058 const std::set
<std::string
> SharedCacheBuilder::warnings()
1060 return _diagnostics
.warnings();
1063 const std::set
<const dyld3::MachOAnalyzer
*> SharedCacheBuilder::evictions()
1068 void SharedCacheBuilder::deleteBuffer()
1071 vm_deallocate(mach_task_self(), _fullAllocatedBuffer
, _archLayout
->sharedMemorySize
);
1072 _fullAllocatedBuffer
= 0;
1073 _allocatedBufferSize
= 0;
1074 // Local symbols buffer
1075 if ( _localSymbolsRegion
.bufferSize
!= 0 ) {
1076 vm_deallocate(mach_task_self(), (vm_address_t
)_localSymbolsRegion
.buffer
, _localSymbolsRegion
.bufferSize
);
1077 _localSymbolsRegion
.buffer
= 0;
1078 _localSymbolsRegion
.bufferSize
= 0;
1081 vm_deallocate(mach_task_self(), (vm_address_t
)_codeSignatureRegion
.buffer
, _codeSignatureRegion
.bufferSize
);
1082 _codeSignatureRegion
.buffer
= 0;
1083 _codeSignatureRegion
.bufferSize
= 0;
1087 void SharedCacheBuilder::makeSortedDylibs(const std::vector
<LoadedMachO
>& dylibs
, const std::unordered_map
<std::string
, unsigned> sortOrder
)
1089 for (const LoadedMachO
& dylib
: dylibs
) {
1090 _sortedDylibs
.push_back({ &dylib
, dylib
.mappedFile
.runtimePath
, {} });
1093 std::sort(_sortedDylibs
.begin(), _sortedDylibs
.end(), [&](const DylibInfo
& a
, const DylibInfo
& b
) {
1094 const auto& orderA
= sortOrder
.find(a
.input
->mappedFile
.runtimePath
);
1095 const auto& orderB
= sortOrder
.find(b
.input
->mappedFile
.runtimePath
);
1096 bool foundA
= (orderA
!= sortOrder
.end());
1097 bool foundB
= (orderB
!= sortOrder
.end());
1099 // Order all __DATA_DIRTY segments specified in the order file first, in
1100 // the order specified in the file, followed by any other __DATA_DIRTY
1101 // segments in lexicographic order.
1102 if ( foundA
&& foundB
)
1103 return orderA
->second
< orderB
->second
;
1109 // Sort mac before iOSMac
1110 bool isIOSMacA
= strncmp(a
.input
->mappedFile
.runtimePath
.c_str(), "/System/iOSSupport/", 19) == 0;
1111 bool isIOSMacB
= strncmp(b
.input
->mappedFile
.runtimePath
.c_str(), "/System/iOSSupport/", 19) == 0;
1112 if (isIOSMacA
!= isIOSMacB
)
1115 // Finally sort by path
1116 return a
.input
->mappedFile
.runtimePath
< b
.input
->mappedFile
.runtimePath
;
1122 const CacheBuilder::LoadedMachO
* input
;
1123 const char* installName
;
1127 uint64_t SharedCacheBuilder::cacheOverflowAmount()
1129 if ( _archLayout
->sharedRegionsAreDiscontiguous
) {
1130 // for macOS x86_64 cache, need to check each region for overflow
1131 if ( _readExecuteRegion
.sizeInUse
> 0x60000000 )
1132 return (_readExecuteRegion
.sizeInUse
- 0x60000000);
1134 if ( _readWriteRegion
.sizeInUse
> 0x40000000 )
1135 return (_readWriteRegion
.sizeInUse
- 0x40000000);
1137 if ( _readOnlyRegion
.sizeInUse
> 0x3FE00000 )
1138 return (_readOnlyRegion
.sizeInUse
- 0x3FE00000);
1140 else if ( _archLayout
->textAndDataMaxSize
!= 0 ) {
1141 // for armv7k, limit is 512MB of TEX+DATA
1142 uint64_t totalTextAndData
= _readWriteRegion
.unslidLoadAddress
+ _readWriteRegion
.sizeInUse
- _readExecuteRegion
.unslidLoadAddress
;
1143 if ( totalTextAndData
< _archLayout
->textAndDataMaxSize
)
1146 return totalTextAndData
- _archLayout
->textAndDataMaxSize
;
1149 bool alreadyOptimized
= (_readOnlyRegion
.sizeInUse
!= _readOnlyRegion
.bufferSize
);
1150 uint64_t vmSize
= _readOnlyRegion
.unslidLoadAddress
- _readExecuteRegion
.unslidLoadAddress
;
1151 if ( alreadyOptimized
)
1152 vmSize
+= _readOnlyRegion
.sizeInUse
;
1153 else if ( _options
.excludeLocalSymbols
)
1154 vmSize
+= (_readOnlyRegion
.sizeInUse
* 37/100); // assume locals removal and LINKEDIT optimzation reduces LINKEDITs %37 of original size
1156 vmSize
+= (_readOnlyRegion
.sizeInUse
* 80/100); // assume LINKEDIT optimzation reduces LINKEDITs to %80 of original size
1157 if ( vmSize
> _archLayout
->sharedMemorySize
)
1158 return vmSize
- _archLayout
->sharedMemorySize
;
1160 // fits in shared region
1164 size_t SharedCacheBuilder::evictLeafDylibs(uint64_t reductionTarget
, std::vector
<const LoadedMachO
*>& overflowDylibs
)
1166 // build a reverse map of all dylib dependencies
1167 __block
std::map
<std::string
, std::set
<std::string
>> references
;
1168 std::map
<std::string
, std::set
<std::string
>>* referencesPtr
= &references
;
1169 for (const DylibInfo
& dylib
: _sortedDylibs
) {
1170 // Esnure we have an entry (even if it is empty)
1171 if (references
.count(dylib
.input
->mappedFile
.mh
->installName()) == 0) {
1172 references
[dylib
.input
->mappedFile
.mh
->installName()] = std::set
<std::string
>();
1174 dylib
.input
->mappedFile
.mh
->forEachDependentDylib(^(const char* loadPath
, bool isWeak
, bool isReExport
, bool isUpward
, uint32_t compatVersion
, uint32_t curVersion
, bool &stop
) {
1175 references
[loadPath
].insert(dylib
.input
->mappedFile
.mh
->installName());
1179 // Find the sizes of all the dylibs
1180 std::vector
<DylibAndSize
> dylibsToSort
;
1181 std::vector
<DylibAndSize
> sortedDylibs
;
1182 for (const DylibInfo
& dylib
: _sortedDylibs
) {
1183 const char* installName
= dylib
.input
->mappedFile
.mh
->installName();
1184 __block
uint64_t segsSize
= 0;
1185 dylib
.input
->mappedFile
.mh
->forEachSegment(^(const dyld3::MachOFile::SegmentInfo
& info
, bool& stop
) {
1186 if ( strcmp(info
.segName
, "__LINKEDIT") != 0 )
1187 segsSize
+= info
.vmSize
;
1189 dylibsToSort
.push_back({ dylib
.input
, installName
, segsSize
});
1192 // Build an ordered list of what to remove. At each step we do following
1193 // 1) Find all dylibs that nothing else depends on
1194 // 2a) If any of those dylibs are not in the order select the largest one of them
1195 // 2b) If all the leaf dylibs are in the order file select the last dylib that appears last in the order file
1196 // 3) Remove all entries to the removed file from the reverse dependency map
1197 // 4) Go back to one and repeat until there are no more evictable dylibs
1198 // This results in us always choosing the locally optimal selection, and then taking into account how that impacts
1199 // the dependency graph for subsequent selections
1201 bool candidateFound
= true;
1202 while (candidateFound
) {
1203 candidateFound
= false;
1204 DylibAndSize candidate
;
1205 uint64_t candidateOrder
= 0;
1206 for(const auto& dylib
: dylibsToSort
) {
1207 const auto &i
= referencesPtr
->find(dylib
.installName
);
1208 assert(i
!= referencesPtr
->end());
1209 if (!i
->second
.empty()) {
1212 const auto& j
= _options
.dylibOrdering
.find(dylib
.input
->mappedFile
.runtimePath
);
1214 if (j
!= _options
.dylibOrdering
.end()) {
1217 // Not in the order file, set order sot it goes to the front of the list
1220 if (order
> candidateOrder
||
1221 (order
== UINT64_MAX
&& candidate
.size
< dylib
.size
)) {
1222 // The new file is either a lower priority in the order file
1223 // or the same priority as the candidate but larger
1225 candidateOrder
= order
;
1226 candidateFound
= true;
1229 if (candidateFound
) {
1230 sortedDylibs
.push_back(candidate
);
1231 referencesPtr
->erase(candidate
.installName
);
1232 for (auto& dependent
: references
) {
1233 (void)dependent
.second
.erase(candidate
.installName
);
1235 auto j
= std::find_if(dylibsToSort
.begin(), dylibsToSort
.end(), [&candidate
](const DylibAndSize
& dylib
) {
1236 return (strcmp(candidate
.installName
, dylib
.installName
) == 0);
1238 if (j
!= dylibsToSort
.end()) {
1239 dylibsToSort
.erase(j
);
1244 // build set of dylibs that if removed will allow cache to build
1245 for (DylibAndSize
& dylib
: sortedDylibs
) {
1246 if ( _options
.verbose
)
1247 _diagnostics
.warning("to prevent cache overflow, not caching %s", dylib
.installName
);
1248 _evictions
.insert(dylib
.input
->mappedFile
.mh
);
1249 // Track the evicted dylibs so we can try build "other" dlopen closures for them.
1250 overflowDylibs
.push_back(dylib
.input
);
1251 if ( dylib
.size
> reductionTarget
)
1253 reductionTarget
-= dylib
.size
;
1256 // prune _sortedDylibs
1257 _sortedDylibs
.erase(std::remove_if(_sortedDylibs
.begin(), _sortedDylibs
.end(), [&](const DylibInfo
& dylib
) {
1258 return (_evictions
.count(dylib
.input
->mappedFile
.mh
) != 0);
1259 }),_sortedDylibs
.end());
1261 return _evictions
.size();
1265 void SharedCacheBuilder::writeCacheHeader()
1267 // "dyld_v1" + spaces + archName(), with enough spaces to pad to 15 bytes
1268 std::string magic
= "dyld_v1";
1269 magic
.append(15 - magic
.length() - strlen(_options
.archs
->name()), ' ');
1270 magic
.append(_options
.archs
->name());
1271 assert(magic
.length() == 15);
1274 dyld_cache_header
* dyldCacheHeader
= (dyld_cache_header
*)_readExecuteRegion
.buffer
;
1275 memcpy(dyldCacheHeader
->magic
, magic
.c_str(), 16);
1276 dyldCacheHeader
->mappingOffset
= sizeof(dyld_cache_header
);
1277 dyldCacheHeader
->mappingCount
= 3;
1278 dyldCacheHeader
->imagesOffset
= (uint32_t)(dyldCacheHeader
->mappingOffset
+ 3*sizeof(dyld_cache_mapping_info
));
1279 dyldCacheHeader
->imagesCount
= (uint32_t)_sortedDylibs
.size() + _aliasCount
;
1280 dyldCacheHeader
->dyldBaseAddress
= 0;
1281 dyldCacheHeader
->codeSignatureOffset
= 0;
1282 dyldCacheHeader
->codeSignatureSize
= 0;
1283 dyldCacheHeader
->slideInfoOffset
= _slideInfoFileOffset
;
1284 dyldCacheHeader
->slideInfoSize
= _slideInfoBufferSizeAllocated
;
1285 dyldCacheHeader
->localSymbolsOffset
= 0;
1286 dyldCacheHeader
->localSymbolsSize
= 0;
1287 dyldCacheHeader
->cacheType
= _options
.optimizeStubs
? kDyldSharedCacheTypeProduction
: kDyldSharedCacheTypeDevelopment
;
1288 dyldCacheHeader
->accelerateInfoAddr
= 0;
1289 dyldCacheHeader
->accelerateInfoSize
= 0;
1290 bzero(dyldCacheHeader
->uuid
, 16);// overwritten later by recomputeCacheUUID()
1291 dyldCacheHeader
->branchPoolsOffset
= 0;
1292 dyldCacheHeader
->branchPoolsCount
= 0;
1293 dyldCacheHeader
->imagesTextOffset
= dyldCacheHeader
->imagesOffset
+ sizeof(dyld_cache_image_info
)*dyldCacheHeader
->imagesCount
;
1294 dyldCacheHeader
->imagesTextCount
= _sortedDylibs
.size();
1295 dyldCacheHeader
->patchInfoAddr
= 0;
1296 dyldCacheHeader
->patchInfoSize
= 0;
1297 dyldCacheHeader
->otherImageGroupAddrUnused
= 0;
1298 dyldCacheHeader
->otherImageGroupSizeUnused
= 0;
1299 dyldCacheHeader
->progClosuresAddr
= 0;
1300 dyldCacheHeader
->progClosuresSize
= 0;
1301 dyldCacheHeader
->progClosuresTrieAddr
= 0;
1302 dyldCacheHeader
->progClosuresTrieSize
= 0;
1303 dyldCacheHeader
->platform
= (uint8_t)_options
.platform
;
1304 dyldCacheHeader
->formatVersion
= dyld3::closure::kFormatVersion
;
1305 dyldCacheHeader
->dylibsExpectedOnDisk
= !_options
.dylibsRemovedDuringMastering
;
1306 dyldCacheHeader
->simulator
= _options
.forSimulator
;
1307 dyldCacheHeader
->locallyBuiltCache
= _options
.isLocallyBuiltCache
;
1308 dyldCacheHeader
->builtFromChainedFixups
= false;
1309 dyldCacheHeader
->formatVersion
= dyld3::closure::kFormatVersion
;
1310 dyldCacheHeader
->sharedRegionStart
= _archLayout
->sharedMemoryStart
;
1311 dyldCacheHeader
->sharedRegionSize
= _archLayout
->sharedMemorySize
;
1314 dyld_cache_mapping_info
* mappings
= (dyld_cache_mapping_info
*)(_readExecuteRegion
.buffer
+ dyldCacheHeader
->mappingOffset
);
1315 mappings
[0].address
= _readExecuteRegion
.unslidLoadAddress
;
1316 mappings
[0].fileOffset
= 0;
1317 mappings
[0].size
= _readExecuteRegion
.sizeInUse
;
1318 mappings
[0].maxProt
= VM_PROT_READ
| VM_PROT_EXECUTE
;
1319 mappings
[0].initProt
= VM_PROT_READ
| VM_PROT_EXECUTE
;
1320 mappings
[1].address
= _readWriteRegion
.unslidLoadAddress
;
1321 mappings
[1].fileOffset
= _readExecuteRegion
.sizeInUse
;
1322 mappings
[1].size
= _readWriteRegion
.sizeInUse
;
1323 mappings
[1].maxProt
= VM_PROT_READ
| VM_PROT_WRITE
;
1324 mappings
[1].initProt
= VM_PROT_READ
| VM_PROT_WRITE
;
1325 mappings
[2].address
= _readOnlyRegion
.unslidLoadAddress
;
1326 mappings
[2].fileOffset
= _readExecuteRegion
.sizeInUse
+ _readWriteRegion
.sizeInUse
;
1327 mappings
[2].size
= _readOnlyRegion
.sizeInUse
;
1328 mappings
[2].maxProt
= VM_PROT_READ
;
1329 mappings
[2].initProt
= VM_PROT_READ
;
1331 // fill in image table
1332 dyld_cache_image_info
* images
= (dyld_cache_image_info
*)(_readExecuteRegion
.buffer
+ dyldCacheHeader
->imagesOffset
);
1333 for (const DylibInfo
& dylib
: _sortedDylibs
) {
1334 const char* installName
= dylib
.input
->mappedFile
.mh
->installName();
1335 images
->address
= dylib
.cacheLocation
[0].dstCacheUnslidAddress
;
1336 if ( _options
.dylibsRemovedDuringMastering
) {
1337 images
->modTime
= 0;
1338 images
->inode
= pathHash(installName
);
1341 images
->modTime
= dylib
.input
->mappedFile
.modTime
;
1342 images
->inode
= dylib
.input
->mappedFile
.inode
;
1344 uint32_t installNameOffsetInTEXT
= (uint32_t)(installName
- (char*)dylib
.input
->mappedFile
.mh
);
1345 images
->pathFileOffset
= (uint32_t)dylib
.cacheLocation
[0].dstCacheFileOffset
+ installNameOffsetInTEXT
;
1348 // append aliases image records and strings
1350 for (auto &dylib : _dylibs) {
1351 if (!dylib->installNameAliases.empty()) {
1352 for (const std::string& alias : dylib->installNameAliases) {
1353 images->set_address(_segmentMap[dylib][0].address);
1354 if (_manifest.platform() == "osx") {
1355 images->modTime = dylib->lastModTime;
1356 images->inode = dylib->inode;
1359 images->modTime = 0;
1360 images->inode = pathHash(alias.c_str());
1362 images->pathFileOffset = offset;
1363 //fprintf(stderr, "adding alias %s for %s\n", alias.c_str(), dylib->installName.c_str());
1364 ::strcpy((char*)&_buffer[offset], alias.c_str());
1365 offset += alias.size() + 1;
1371 // calculate start of text image array and trailing string pool
1372 dyld_cache_image_text_info
* textImages
= (dyld_cache_image_text_info
*)(_readExecuteRegion
.buffer
+ dyldCacheHeader
->imagesTextOffset
);
1373 uint32_t stringOffset
= (uint32_t)(dyldCacheHeader
->imagesTextOffset
+ sizeof(dyld_cache_image_text_info
) * _sortedDylibs
.size());
1375 // write text image array and image names pool at same time
1376 for (const DylibInfo
& dylib
: _sortedDylibs
) {
1377 dylib
.input
->mappedFile
.mh
->getUuid(textImages
->uuid
);
1378 textImages
->loadAddress
= dylib
.cacheLocation
[0].dstCacheUnslidAddress
;
1379 textImages
->textSegmentSize
= (uint32_t)dylib
.cacheLocation
[0].dstCacheSegmentSize
;
1380 textImages
->pathOffset
= stringOffset
;
1381 const char* installName
= dylib
.input
->mappedFile
.mh
->installName();
1382 ::strcpy((char*)_readExecuteRegion
.buffer
+ stringOffset
, installName
);
1383 stringOffset
+= (uint32_t)strlen(installName
)+1;
1387 // make sure header did not overflow into first mapped image
1388 const dyld_cache_image_info
* firstImage
= (dyld_cache_image_info
*)(_readExecuteRegion
.buffer
+ dyldCacheHeader
->imagesOffset
);
1389 assert(stringOffset
<= (firstImage
->address
- mappings
[0].address
));
1392 void SharedCacheBuilder::processSelectorStrings(const std::vector
<LoadedMachO
>& executables
) {
1393 const bool log
= false;
1395 _selectorStringsFromExecutables
= 0;
1397 // Don't do this optimisation on watchOS where the shared cache is too small
1398 if (_options
.platform
== dyld3::Platform::watchOS
)
1401 // Get the method name coalesced section as that is where we need to put these strings
1402 CacheBuilder::CacheCoalescedText::StringSection
& cacheStringSection
= _coalescedText
.getSectionData("__objc_methname");
1403 for (const LoadedMachO
& executable
: executables
) {
1404 const dyld3::MachOAnalyzer
* ma
= (const dyld3::MachOAnalyzer
*)executable
.loadedFileInfo
.fileContent
;
1406 uint64_t sizeBeforeProcessing
= cacheStringSection
.bufferSize
;
1408 ma
->forEachObjCMethodName(^(const char *methodName
) {
1409 std::string_view str
= methodName
;
1410 auto itAndInserted
= cacheStringSection
.stringsToOffsets
.insert({ str
, cacheStringSection
.bufferSize
});
1411 if (itAndInserted
.second
) {
1412 // If we inserted the string then we need to include it in the total
1413 cacheStringSection
.bufferSize
+= str
.size() + 1;
1414 // if (log) printf("Selector: %s -> %s\n", ma->installName(), methodName);
1415 ++_selectorStringsFromExecutables
;
1419 uint64_t sizeAfterProcessing
= cacheStringSection
.bufferSize
;
1420 if ( log
&& (sizeBeforeProcessing
!= sizeAfterProcessing
) ) {
1421 printf("Pulled in % 6lld bytes of selectors from %s\n",
1422 sizeAfterProcessing
- sizeBeforeProcessing
, executable
.loadedFileInfo
.path
);
1426 _diagnostics
.verbose("Pulled in %lld selector strings from executables\n", _selectorStringsFromExecutables
);
1429 void SharedCacheBuilder::parseCoalescableSegments() {
1430 const bool log
= false;
1432 for (DylibInfo
& dylib
: _sortedDylibs
)
1433 _coalescedText
.parseCoalescableText(dylib
.input
->mappedFile
.mh
, dylib
.textCoalescer
);
1436 for (const char* section
: CacheCoalescedText::SupportedSections
) {
1437 CacheCoalescedText::StringSection
& sectionData
= _coalescedText
.getSectionData(section
);
1438 printf("Coalesced %s from % 10lld -> % 10d, saving % 10lld bytes\n", section
,
1439 sectionData
.bufferSize
+ sectionData
.savedSpace
, sectionData
.bufferSize
, sectionData
.savedSpace
);
1444 void SharedCacheBuilder::assignSegmentAddresses()
1446 // calculate size of header info and where first dylib's mach_header should start
1447 size_t startOffset
= sizeof(dyld_cache_header
) + 3*sizeof(dyld_cache_mapping_info
);
1448 startOffset
+= sizeof(dyld_cache_image_info
) * _sortedDylibs
.size();
1449 startOffset
+= sizeof(dyld_cache_image_text_info
) * _sortedDylibs
.size();
1450 for (const DylibInfo
& dylib
: _sortedDylibs
) {
1451 startOffset
+= (strlen(dylib
.input
->mappedFile
.mh
->installName()) + 1);
1453 //fprintf(stderr, "%s total header size = 0x%08lX\n", _options.archName.c_str(), startOffset);
1454 startOffset
= align(startOffset
, 12);
1456 // HACK!: Rebase v4 assumes that values below 0x8000 are not pointers (encoding as offsets from the cache header).
1457 // If using a minimal cache, we need to pad out the cache header to make sure a pointer doesn't fall within that range
1458 #if SUPPORT_ARCH_arm64_32 || SUPPORT_ARCH_armv7k
1459 if ( _options
.cacheSupportsASLR
&& !_archLayout
->is64
) {
1460 if ( _archLayout
->pointerDeltaMask
== 0xC0000000 )
1461 startOffset
= std::max(startOffset
, (size_t)0x8000);
1465 // assign TEXT segment addresses
1466 _readExecuteRegion
.buffer
= (uint8_t*)_fullAllocatedBuffer
;
1467 _readExecuteRegion
.bufferSize
= 0;
1468 _readExecuteRegion
.sizeInUse
= 0;
1469 _readExecuteRegion
.unslidLoadAddress
= _archLayout
->sharedMemoryStart
;
1470 _readExecuteRegion
.cacheFileOffset
= 0;
1471 __block
uint64_t addr
= _readExecuteRegion
.unslidLoadAddress
+ startOffset
; // header
1472 for (DylibInfo
& dylib
: _sortedDylibs
) {
1473 __block
uint64_t textSegVmAddr
= 0;
1474 dylib
.input
->mappedFile
.mh
->forEachSegment(^(const dyld3::MachOFile::SegmentInfo
& segInfo
, bool& stop
) {
1475 if ( strcmp(segInfo
.segName
, "__TEXT") == 0 )
1476 textSegVmAddr
= segInfo
.vmAddr
;
1477 if ( segInfo
.protections
!= (VM_PROT_READ
| VM_PROT_EXECUTE
) )
1479 // We may have coalesced the sections at the end of this segment. In that case, shrink the segment to remove them.
1480 __block
size_t sizeOfSections
= 0;
1481 __block
bool foundCoalescedSection
= false;
1482 dylib
.input
->mappedFile
.mh
->forEachSection(^(const dyld3::MachOAnalyzer::SectionInfo
§Info
, bool malformedSectionRange
, bool &stopSection
) {
1483 if (strcmp(sectInfo
.segInfo
.segName
, segInfo
.segName
) != 0)
1485 if ( dylib
.textCoalescer
.sectionWasCoalesced(sectInfo
.sectName
)) {
1486 foundCoalescedSection
= true;
1488 sizeOfSections
= sectInfo
.sectAddr
+ sectInfo
.sectSize
- segInfo
.vmAddr
;
1491 if (!foundCoalescedSection
)
1492 sizeOfSections
= segInfo
.sizeOfSections
;
1494 // Keep __TEXT segments 4K or more aligned
1495 addr
= align(addr
, std::max((int)segInfo
.p2align
, (int)12));
1496 uint64_t offsetInRegion
= addr
- _readExecuteRegion
.unslidLoadAddress
;
1497 SegmentMappingInfo loc
;
1498 loc
.srcSegment
= (uint8_t*)dylib
.input
->mappedFile
.mh
+ segInfo
.vmAddr
- textSegVmAddr
;
1499 loc
.segName
= segInfo
.segName
;
1500 loc
.dstSegment
= _readExecuteRegion
.buffer
+ offsetInRegion
;
1501 loc
.dstCacheUnslidAddress
= addr
;
1502 loc
.dstCacheFileOffset
= (uint32_t)offsetInRegion
;
1503 loc
.dstCacheSegmentSize
= (uint32_t)align(sizeOfSections
, 12);
1504 loc
.dstCacheFileSize
= (uint32_t)align(sizeOfSections
, 12);
1505 loc
.copySegmentSize
= (uint32_t)sizeOfSections
;
1506 loc
.srcSegmentIndex
= segInfo
.segIndex
;
1507 dylib
.cacheLocation
.push_back(loc
);
1508 addr
+= loc
.dstCacheSegmentSize
;
1511 // move read-only segments to end of TEXT
1512 if ( _archLayout
->textAndDataMaxSize
!= 0 ) {
1513 for (DylibInfo
& dylib
: _sortedDylibs
) {
1514 __block
uint64_t textSegVmAddr
= 0;
1515 dylib
.input
->mappedFile
.mh
->forEachSegment(^(const dyld3::MachOFile::SegmentInfo
& segInfo
, bool& stop
) {
1516 if ( strcmp(segInfo
.segName
, "__TEXT") == 0 )
1517 textSegVmAddr
= segInfo
.vmAddr
;
1518 if ( segInfo
.protections
!= VM_PROT_READ
)
1520 if ( strcmp(segInfo
.segName
, "__LINKEDIT") == 0 )
1523 // Keep segments segments 4K or more aligned
1524 addr
= align(addr
, std::max((int)segInfo
.p2align
, (int)12));
1525 uint64_t offsetInRegion
= addr
- _readExecuteRegion
.unslidLoadAddress
;
1526 SegmentMappingInfo loc
;
1527 loc
.srcSegment
= (uint8_t*)dylib
.input
->mappedFile
.mh
+ segInfo
.vmAddr
- textSegVmAddr
;
1528 loc
.segName
= segInfo
.segName
;
1529 loc
.dstSegment
= _readExecuteRegion
.buffer
+ offsetInRegion
;
1530 loc
.dstCacheUnslidAddress
= addr
;
1531 loc
.dstCacheFileOffset
= (uint32_t)(_readExecuteRegion
.cacheFileOffset
+ offsetInRegion
);
1532 loc
.dstCacheSegmentSize
= (uint32_t)align(segInfo
.sizeOfSections
, 12);
1533 loc
.dstCacheFileSize
= (uint32_t)segInfo
.sizeOfSections
;
1534 loc
.copySegmentSize
= (uint32_t)segInfo
.sizeOfSections
;
1535 loc
.srcSegmentIndex
= segInfo
.segIndex
;
1536 dylib
.cacheLocation
.push_back(loc
);
1537 addr
+= loc
.dstCacheSegmentSize
;
1542 // reserve space for objc optimization tables and deduped strings
1543 uint64_t objcReadOnlyBufferVMAddr
= addr
;
1544 _objcReadOnlyBuffer
= _readExecuteRegion
.buffer
+ (addr
- _readExecuteRegion
.unslidLoadAddress
);
1546 // First the strings as we'll fill in the objc tables later in the optimizer
1547 for (const char* section
: CacheCoalescedText::SupportedSections
) {
1548 CacheCoalescedText::StringSection
& cacheStringSection
= _coalescedText
.getSectionData(section
);
1549 cacheStringSection
.bufferAddr
= _readExecuteRegion
.buffer
+ (addr
- _readExecuteRegion
.unslidLoadAddress
);
1550 cacheStringSection
.bufferVMAddr
= addr
;
1551 addr
+= cacheStringSection
.bufferSize
;
1554 addr
= align(addr
, 14);
1555 _objcReadOnlyBufferSizeUsed
= addr
- objcReadOnlyBufferVMAddr
;
1557 uint32_t totalSelectorRefCount
= (uint32_t)_selectorStringsFromExecutables
;
1558 uint32_t totalClassDefCount
= 0;
1559 uint32_t totalProtocolDefCount
= 0;
1560 for (DylibInfo
& dylib
: _sortedDylibs
) {
1561 dyld3::MachOAnalyzer::ObjCInfo info
= dylib
.input
->mappedFile
.mh
->getObjCInfo();
1562 totalSelectorRefCount
+= info
.selRefCount
;
1563 totalClassDefCount
+= info
.classDefCount
;
1564 totalProtocolDefCount
+= info
.protocolDefCount
;
1567 // now that shared cache coalesces all selector strings, use that better count
1568 uint32_t coalescedSelectorCount
= (uint32_t)_coalescedText
.objcMethNames
.stringsToOffsets
.size();
1569 if ( coalescedSelectorCount
> totalSelectorRefCount
)
1570 totalSelectorRefCount
= coalescedSelectorCount
;
1571 addr
+= align(computeReadOnlyObjC(totalSelectorRefCount
, totalClassDefCount
, totalProtocolDefCount
), 14);
1572 _objcReadOnlyBufferSizeAllocated
= addr
- objcReadOnlyBufferVMAddr
;
1575 // align TEXT region end
1576 uint64_t endTextAddress
= align(addr
, _archLayout
->sharedRegionAlignP2
);
1577 _readExecuteRegion
.bufferSize
= endTextAddress
- _readExecuteRegion
.unslidLoadAddress
;
1578 _readExecuteRegion
.sizeInUse
= _readExecuteRegion
.bufferSize
;
1581 // assign __DATA* addresses
1582 if ( _archLayout
->sharedRegionsAreDiscontiguous
)
1583 addr
= _archLayout
->sharedMemoryStart
+ 0x60000000;
1585 addr
= align((addr
+ _archLayout
->sharedRegionPadding
), _archLayout
->sharedRegionAlignP2
);
1586 _readWriteRegion
.buffer
= (uint8_t*)_fullAllocatedBuffer
+ addr
- _archLayout
->sharedMemoryStart
;
1587 _readWriteRegion
.bufferSize
= 0;
1588 _readWriteRegion
.sizeInUse
= 0;
1589 _readWriteRegion
.unslidLoadAddress
= addr
;
1590 _readWriteRegion
.cacheFileOffset
= _readExecuteRegion
.sizeInUse
;
1592 // layout all __DATA_CONST segments
1593 __block
int dataConstSegmentCount
= 0;
1594 for (DylibInfo
& dylib
: _sortedDylibs
) {
1595 __block
uint64_t textSegVmAddr
= 0;
1596 dylib
.input
->mappedFile
.mh
->forEachSegment(^(const dyld3::MachOFile::SegmentInfo
& segInfo
, bool& stop
) {
1597 if ( _options
.platform
== dyld3::Platform::watchOS_simulator
)
1599 if ( strcmp(segInfo
.segName
, "__TEXT") == 0 )
1600 textSegVmAddr
= segInfo
.vmAddr
;
1601 if ( segInfo
.protections
!= (VM_PROT_READ
| VM_PROT_WRITE
) )
1603 if ( strcmp(segInfo
.segName
, "__DATA_CONST") != 0 )
1605 ++dataConstSegmentCount
;
1606 // Pack __DATA_CONST segments
1607 addr
= align(addr
, segInfo
.p2align
);
1608 size_t copySize
= std::min((size_t)segInfo
.fileSize
, (size_t)segInfo
.sizeOfSections
);
1609 uint64_t offsetInRegion
= addr
- _readWriteRegion
.unslidLoadAddress
;
1610 SegmentMappingInfo loc
;
1611 loc
.srcSegment
= (uint8_t*)dylib
.input
->mappedFile
.mh
+ segInfo
.vmAddr
- textSegVmAddr
;
1612 loc
.segName
= segInfo
.segName
;
1613 loc
.dstSegment
= _readWriteRegion
.buffer
+ offsetInRegion
;
1614 loc
.dstCacheUnslidAddress
= addr
;
1615 loc
.dstCacheFileOffset
= (uint32_t)(_readWriteRegion
.cacheFileOffset
+ offsetInRegion
);
1616 loc
.dstCacheSegmentSize
= (uint32_t)segInfo
.sizeOfSections
;
1617 loc
.dstCacheFileSize
= (uint32_t)copySize
;
1618 loc
.copySegmentSize
= (uint32_t)copySize
;
1619 loc
.srcSegmentIndex
= segInfo
.segIndex
;
1620 dylib
.cacheLocation
.push_back(loc
);
1621 addr
+= loc
.dstCacheSegmentSize
;
1625 // layout all __DATA segments (and other r/w non-dirty, non-const) segments
1626 for (DylibInfo
& dylib
: _sortedDylibs
) {
1627 __block
uint64_t textSegVmAddr
= 0;
1628 dylib
.input
->mappedFile
.mh
->forEachSegment(^(const dyld3::MachOFile::SegmentInfo
& segInfo
, bool& stop
) {
1629 if ( strcmp(segInfo
.segName
, "__TEXT") == 0 )
1630 textSegVmAddr
= segInfo
.vmAddr
;
1631 if ( segInfo
.protections
!= (VM_PROT_READ
| VM_PROT_WRITE
) )
1633 if ( _options
.platform
!= dyld3::Platform::watchOS_simulator
) {
1634 if ( strcmp(segInfo
.segName
, "__DATA_CONST") == 0 )
1636 if ( strcmp(segInfo
.segName
, "__DATA_DIRTY") == 0 )
1639 bool forcePageAlignedData
= false;
1640 if (_options
.platform
== dyld3::Platform::macOS
) {
1641 forcePageAlignedData
= dylib
.input
->mappedFile
.mh
->hasUnalignedPointerFixups();
1642 //if ( forcePageAlignedData )
1643 // warning("unaligned pointer in %s\n", dylib.input->mappedFile.runtimePath.c_str());
1645 if ( (dataConstSegmentCount
> 10) && !forcePageAlignedData
) {
1646 // Pack __DATA segments only if we also have __DATA_CONST segments
1647 addr
= align(addr
, segInfo
.p2align
);
1650 // Keep __DATA segments 4K or more aligned
1651 addr
= align(addr
, std::max((int)segInfo
.p2align
, (int)12));
1653 size_t copySize
= std::min((size_t)segInfo
.fileSize
, (size_t)segInfo
.sizeOfSections
);
1654 uint64_t offsetInRegion
= addr
- _readWriteRegion
.unslidLoadAddress
;
1655 SegmentMappingInfo loc
;
1656 loc
.srcSegment
= (uint8_t*)dylib
.input
->mappedFile
.mh
+ segInfo
.vmAddr
- textSegVmAddr
;
1657 loc
.segName
= segInfo
.segName
;
1658 loc
.dstSegment
= _readWriteRegion
.buffer
+ offsetInRegion
;
1659 loc
.dstCacheUnslidAddress
= addr
;
1660 loc
.dstCacheFileOffset
= (uint32_t)(_readWriteRegion
.cacheFileOffset
+ offsetInRegion
);
1661 loc
.dstCacheSegmentSize
= (uint32_t)segInfo
.sizeOfSections
;
1662 loc
.dstCacheFileSize
= (uint32_t)copySize
;
1663 loc
.copySegmentSize
= (uint32_t)copySize
;
1664 loc
.srcSegmentIndex
= segInfo
.segIndex
;
1665 dylib
.cacheLocation
.push_back(loc
);
1666 addr
+= loc
.dstCacheSegmentSize
;
1670 // layout all __DATA_DIRTY segments, sorted (FIXME)
1671 const size_t dylibCount
= _sortedDylibs
.size();
1672 uint32_t dirtyDataSortIndexes
[dylibCount
];
1673 for (size_t i
=0; i
< dylibCount
; ++i
)
1674 dirtyDataSortIndexes
[i
] = (uint32_t)i
;
1675 std::sort(&dirtyDataSortIndexes
[0], &dirtyDataSortIndexes
[dylibCount
], [&](const uint32_t& a
, const uint32_t& b
) {
1676 const auto& orderA
= _options
.dirtyDataSegmentOrdering
.find(_sortedDylibs
[a
].input
->mappedFile
.runtimePath
);
1677 const auto& orderB
= _options
.dirtyDataSegmentOrdering
.find(_sortedDylibs
[b
].input
->mappedFile
.runtimePath
);
1678 bool foundA
= (orderA
!= _options
.dirtyDataSegmentOrdering
.end());
1679 bool foundB
= (orderB
!= _options
.dirtyDataSegmentOrdering
.end());
1681 // Order all __DATA_DIRTY segments specified in the order file first, in the order specified in the file,
1682 // followed by any other __DATA_DIRTY segments in lexicographic order.
1683 if ( foundA
&& foundB
)
1684 return orderA
->second
< orderB
->second
;
1690 return _sortedDylibs
[a
].input
->mappedFile
.runtimePath
< _sortedDylibs
[b
].input
->mappedFile
.runtimePath
;
1692 addr
= align(addr
, 12);
1693 for (size_t i
=0; i
< dylibCount
; ++i
) {
1694 DylibInfo
& dylib
= _sortedDylibs
[dirtyDataSortIndexes
[i
]];
1695 __block
uint64_t textSegVmAddr
= 0;
1696 dylib
.input
->mappedFile
.mh
->forEachSegment(^(const dyld3::MachOFile::SegmentInfo
& segInfo
, bool& stop
) {
1697 if ( _options
.platform
== dyld3::Platform::watchOS_simulator
)
1699 if ( strcmp(segInfo
.segName
, "__TEXT") == 0 )
1700 textSegVmAddr
= segInfo
.vmAddr
;
1701 if ( segInfo
.protections
!= (VM_PROT_READ
| VM_PROT_WRITE
) )
1703 if ( strcmp(segInfo
.segName
, "__DATA_DIRTY") != 0 )
1705 // Pack __DATA_DIRTY segments
1706 addr
= align(addr
, segInfo
.p2align
);
1707 size_t copySize
= std::min((size_t)segInfo
.fileSize
, (size_t)segInfo
.sizeOfSections
);
1708 uint64_t offsetInRegion
= addr
- _readWriteRegion
.unslidLoadAddress
;
1709 SegmentMappingInfo loc
;
1710 loc
.srcSegment
= (uint8_t*)dylib
.input
->mappedFile
.mh
+ segInfo
.vmAddr
- textSegVmAddr
;
1711 loc
.segName
= segInfo
.segName
;
1712 loc
.dstSegment
= _readWriteRegion
.buffer
+ offsetInRegion
;
1713 loc
.dstCacheUnslidAddress
= addr
;
1714 loc
.dstCacheFileOffset
= (uint32_t)(_readWriteRegion
.cacheFileOffset
+ offsetInRegion
);
1715 loc
.dstCacheSegmentSize
= (uint32_t)segInfo
.sizeOfSections
;
1716 loc
.dstCacheFileSize
= (uint32_t)copySize
;
1717 loc
.copySegmentSize
= (uint32_t)copySize
;
1718 loc
.srcSegmentIndex
= segInfo
.segIndex
;
1719 dylib
.cacheLocation
.push_back(loc
);
1720 addr
+= loc
.dstCacheSegmentSize
;
1724 // reserve space for objc r/w optimization tables
1725 _objcReadWriteBufferSizeAllocated
= align(computeReadWriteObjC((uint32_t)_sortedDylibs
.size(), totalProtocolDefCount
), 14);
1726 addr
= align(addr
, 4); // objc r/w section contains pointer and must be at least pointer align
1727 _objcReadWriteBuffer
= _readWriteRegion
.buffer
+ (addr
- _readWriteRegion
.unslidLoadAddress
);
1728 addr
+= _objcReadWriteBufferSizeAllocated
;
1730 // align DATA region end
1731 uint64_t endDataAddress
= align(addr
, _archLayout
->sharedRegionAlignP2
);
1732 _readWriteRegion
.bufferSize
= endDataAddress
- _readWriteRegion
.unslidLoadAddress
;
1733 _readWriteRegion
.sizeInUse
= _readWriteRegion
.bufferSize
;
1735 // start read-only region
1736 if ( _archLayout
->sharedRegionsAreDiscontiguous
)
1737 addr
= _archLayout
->sharedMemoryStart
+ 0xA0000000;
1739 addr
= align((addr
+ _archLayout
->sharedRegionPadding
), _archLayout
->sharedRegionAlignP2
);
1740 _readOnlyRegion
.buffer
= (uint8_t*)_fullAllocatedBuffer
+ addr
- _archLayout
->sharedMemoryStart
;
1741 _readOnlyRegion
.bufferSize
= 0;
1742 _readOnlyRegion
.sizeInUse
= 0;
1743 _readOnlyRegion
.unslidLoadAddress
= addr
;
1744 _readOnlyRegion
.cacheFileOffset
= _readWriteRegion
.cacheFileOffset
+ _readWriteRegion
.sizeInUse
;
1746 // reserve space for kernel ASLR slide info at start of r/o region
1747 if ( _options
.cacheSupportsASLR
) {
1748 size_t slideInfoSize
= sizeof(dyld_cache_slide_info
);
1749 slideInfoSize
= std::max(slideInfoSize
, sizeof(dyld_cache_slide_info2
));
1750 slideInfoSize
= std::max(slideInfoSize
, sizeof(dyld_cache_slide_info3
));
1751 slideInfoSize
= std::max(slideInfoSize
, sizeof(dyld_cache_slide_info4
));
1752 _slideInfoBufferSizeAllocated
= align(slideInfoSize
+ (_readWriteRegion
.sizeInUse
/4096) * _archLayout
->slideInfoBytesPerPage
+ 0x4000, _archLayout
->sharedRegionAlignP2
);
1753 _slideInfoFileOffset
= _readOnlyRegion
.cacheFileOffset
;
1754 addr
+= _slideInfoBufferSizeAllocated
;
1757 // layout all read-only (but not LINKEDIT) segments
1758 if ( _archLayout
->textAndDataMaxSize
== 0 ) {
1759 for (DylibInfo
& dylib
: _sortedDylibs
) {
1760 __block
uint64_t textSegVmAddr
= 0;
1761 dylib
.input
->mappedFile
.mh
->forEachSegment(^(const dyld3::MachOFile::SegmentInfo
& segInfo
, bool& stop
) {
1762 if ( strcmp(segInfo
.segName
, "__TEXT") == 0 )
1763 textSegVmAddr
= segInfo
.vmAddr
;
1764 if ( segInfo
.protections
!= VM_PROT_READ
)
1766 if ( strcmp(segInfo
.segName
, "__LINKEDIT") == 0 )
1769 // Keep segments segments 4K or more aligned
1770 addr
= align(addr
, std::max((int)segInfo
.p2align
, (int)12));
1771 uint64_t offsetInRegion
= addr
- _readOnlyRegion
.unslidLoadAddress
;
1772 SegmentMappingInfo loc
;
1773 loc
.srcSegment
= (uint8_t*)dylib
.input
->mappedFile
.mh
+ segInfo
.vmAddr
- textSegVmAddr
;
1774 loc
.segName
= segInfo
.segName
;
1775 loc
.dstSegment
= _readOnlyRegion
.buffer
+ offsetInRegion
;
1776 loc
.dstCacheUnslidAddress
= addr
;
1777 loc
.dstCacheFileOffset
= (uint32_t)(_readOnlyRegion
.cacheFileOffset
+ offsetInRegion
);
1778 loc
.dstCacheSegmentSize
= (uint32_t)align(segInfo
.sizeOfSections
, 12);
1779 loc
.dstCacheFileSize
= (uint32_t)segInfo
.sizeOfSections
;
1780 loc
.copySegmentSize
= (uint32_t)segInfo
.sizeOfSections
;
1781 loc
.srcSegmentIndex
= segInfo
.segIndex
;
1782 dylib
.cacheLocation
.push_back(loc
);
1783 addr
+= loc
.dstCacheSegmentSize
;
1787 // layout all LINKEDIT segments (after other read-only segments), aligned to 16KB
1788 addr
= align(addr
, 14);
1789 _nonLinkEditReadOnlySize
= addr
- _readOnlyRegion
.unslidLoadAddress
;
1790 for (DylibInfo
& dylib
: _sortedDylibs
) {
1791 __block
uint64_t textSegVmAddr
= 0;
1792 dylib
.input
->mappedFile
.mh
->forEachSegment(^(const dyld3::MachOFile::SegmentInfo
& segInfo
, bool& stop
) {
1793 if ( strcmp(segInfo
.segName
, "__TEXT") == 0 )
1794 textSegVmAddr
= segInfo
.vmAddr
;
1795 if ( segInfo
.protections
!= VM_PROT_READ
)
1797 if ( strcmp(segInfo
.segName
, "__LINKEDIT") != 0 )
1799 // Keep segments segments 4K or more aligned
1800 addr
= align(addr
, std::max((int)segInfo
.p2align
, (int)12));
1801 size_t copySize
= std::min((size_t)segInfo
.fileSize
, (size_t)segInfo
.sizeOfSections
);
1802 uint64_t offsetInRegion
= addr
- _readOnlyRegion
.unslidLoadAddress
;
1803 SegmentMappingInfo loc
;
1804 loc
.srcSegment
= (uint8_t*)dylib
.input
->mappedFile
.mh
+ segInfo
.vmAddr
- textSegVmAddr
;
1805 loc
.segName
= segInfo
.segName
;
1806 loc
.dstSegment
= _readOnlyRegion
.buffer
+ offsetInRegion
;
1807 loc
.dstCacheUnslidAddress
= addr
;
1808 loc
.dstCacheFileOffset
= (uint32_t)(_readOnlyRegion
.cacheFileOffset
+ offsetInRegion
);
1809 loc
.dstCacheSegmentSize
= (uint32_t)align(segInfo
.sizeOfSections
, 12);
1810 loc
.dstCacheFileSize
= (uint32_t)copySize
;
1811 loc
.copySegmentSize
= (uint32_t)copySize
;
1812 loc
.srcSegmentIndex
= segInfo
.segIndex
;
1813 dylib
.cacheLocation
.push_back(loc
);
1814 addr
+= loc
.dstCacheSegmentSize
;
1818 // align r/o region end
1819 uint64_t endReadOnlyAddress
= align(addr
, _archLayout
->sharedRegionAlignP2
);
1820 _readOnlyRegion
.bufferSize
= endReadOnlyAddress
- _readOnlyRegion
.unslidLoadAddress
;
1821 _readOnlyRegion
.sizeInUse
= _readOnlyRegion
.bufferSize
;
1823 //fprintf(stderr, "RX region=%p -> %p, logical addr=0x%llX\n", _readExecuteRegion.buffer, _readExecuteRegion.buffer+_readExecuteRegion.bufferSize, _readExecuteRegion.unslidLoadAddress);
1824 //fprintf(stderr, "RW region=%p -> %p, logical addr=0x%llX\n", _readWriteRegion.buffer, _readWriteRegion.buffer+_readWriteRegion.bufferSize, _readWriteRegion.unslidLoadAddress);
1825 //fprintf(stderr, "RO region=%p -> %p, logical addr=0x%llX\n", _readOnlyRegion.buffer, _readOnlyRegion.buffer+_readOnlyRegion.bufferSize, _readOnlyRegion.unslidLoadAddress);
1827 // sort SegmentMappingInfo for each image to be in the same order as original segments
1828 for (DylibInfo
& dylib
: _sortedDylibs
) {
1829 std::sort(dylib
.cacheLocation
.begin(), dylib
.cacheLocation
.end(), [&](const SegmentMappingInfo
& a
, const SegmentMappingInfo
& b
) {
1830 return a
.srcSegmentIndex
< b
.srcSegmentIndex
;
1835 static dyld_cache_patchable_location
makePatchLocation(size_t cacheOff
, uint64_t ad
) {
1836 int64_t signedAddend
= (int64_t)ad
;
1837 assert(((signedAddend
<< 52) >> 52) == signedAddend
);
1838 dyld_cache_patchable_location patch
;
1839 patch
.cacheOffset
= cacheOff
;
1841 patch
.authenticated
= 0;
1842 patch
.usesAddressDiversity
= 0;
1844 patch
.discriminator
= 0;
1848 static dyld_cache_patchable_location
makePatchLocation(size_t cacheOff
, uint64_t ad
,
1849 dyld3::MachOLoaded::ChainedFixupPointerOnDisk authInfo
) {
1850 int64_t signedAddend
= (int64_t)ad
;
1851 assert(((signedAddend
<< 52) >> 52) == signedAddend
);
1852 dyld_cache_patchable_location patch
;
1853 patch
.cacheOffset
= cacheOff
;
1855 patch
.authenticated
= authInfo
.arm64e
.authBind
.auth
;
1856 patch
.usesAddressDiversity
= authInfo
.arm64e
.authBind
.addrDiv
;
1857 patch
.key
= authInfo
.arm64e
.authBind
.key
;
1858 patch
.discriminator
= authInfo
.arm64e
.authBind
.diversity
;
1862 void SharedCacheBuilder::buildImageArray(std::vector
<DyldSharedCache::FileAlias
>& aliases
)
1864 typedef dyld3::closure::ClosureBuilder::CachedDylibInfo CachedDylibInfo
;
1866 // convert STL data structures to simple arrays to passe to makeDyldCacheImageArray()
1867 __block
std::vector
<CachedDylibInfo
> dylibInfos
;
1868 __block
std::unordered_map
<dyld3::closure::ImageNum
, const dyld3::MachOLoaded
*> imageNumToML
;
1869 DyldSharedCache
* cache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
1870 cache
->forEachImage(^(const mach_header
* mh
, const char* installName
) {
1873 cache
->getIndexedImageEntry((uint32_t)dylibInfos
.size(), mtime
, inode
);
1874 CachedDylibInfo entry
;
1875 entry
.fileInfo
.fileContent
= mh
;
1876 entry
.fileInfo
.path
= installName
;
1877 entry
.fileInfo
.sliceOffset
= 0;
1878 entry
.fileInfo
.inode
= inode
;
1879 entry
.fileInfo
.mtime
= mtime
;
1880 dylibInfos
.push_back(entry
);
1881 imageNumToML
[(dyld3::closure::ImageNum
)(dylibInfos
.size())] = (dyld3::MachOLoaded
*)mh
;
1884 // Convert symlinks from STL to simple char pointers.
1885 std::vector
<dyld3::closure::ClosureBuilder::CachedDylibAlias
> dylibAliases
;
1886 dylibAliases
.reserve(aliases
.size());
1887 for (const auto& alias
: aliases
)
1888 dylibAliases
.push_back({ alias
.realPath
.c_str(), alias
.aliasPath
.c_str() });
1890 dyld3::closure::ClosureBuilder::CacheDylibsBindingHandlers handlers
;
1892 handlers
.chainedBind
= ^(dyld3::closure::ImageNum
, const dyld3::MachOLoaded
* imageLoadAddress
,
1893 const dyld_chained_starts_in_image
* starts
,
1894 const dyld3::Array
<dyld3::closure::Image::ResolvedSymbolTarget
>& targets
,
1895 const dyld3::Array
<dyld3::closure::ClosureBuilder::ResolvedTargetInfo
>& targetInfos
) {
1896 _someDylibsUsedChainedFixups
= true;
1897 imageLoadAddress
->forEachFixupInAllChains(_diagnostics
, starts
, true, ^(dyld3::MachOLoaded::ChainedFixupPointerOnDisk
* fixupLoc
, const dyld_chained_starts_in_segment
* segInfo
, bool& stop
) {
1898 uint64_t offsetInCache
;
1899 dyld3::closure::Image::ResolvedSymbolTarget target
;
1900 const dyld3::closure::ClosureBuilder::ResolvedTargetInfo
* targetInfo
;
1901 dyld3::MachOLoaded::ChainedFixupPointerOnDisk orgValue
;
1902 uint64_t targetVMAddr
;
1904 bool pointerIntoCache
= true;
1905 switch (segInfo
->pointer_format
) {
1906 case DYLD_CHAINED_PTR_ARM64E
:
1907 orgValue
= *fixupLoc
;
1908 if ( fixupLoc
->arm64e
.bind
.bind
) {
1909 target
= targets
[fixupLoc
->arm64e
.bind
.ordinal
];
1910 targetInfo
= &targetInfos
[fixupLoc
->arm64e
.bind
.ordinal
];
1911 switch ( target
.sharedCache
.kind
) {
1912 case dyld3::closure::Image::ResolvedSymbolTarget::kindSharedCache
:
1913 offsetInCache
= target
.sharedCache
.offset
- targetInfo
->addend
;
1914 _dylibToItsExports
[targetInfo
->foundInDylib
].insert(offsetInCache
);
1915 _exportsToName
[offsetInCache
] = targetInfo
->foundSymbolName
;
1916 addend
= targetInfo
->addend
;
1918 uint16_t top16
= addend
>> 48;
1919 uint8_t top8
= addend
>> 56;
1920 // if top byte is non-zero and this is not a negative addend, pull off high8
1921 if ( (top16
!= 0xFFFF) && (top8
!= 0) ) {
1922 _aslrTracker
.setHigh8(fixupLoc
, top8
);
1923 addend
&= 0x00FFFFFFFFFFFFFFULL
;
1926 targetVMAddr
= _archLayout
->sharedMemoryStart
+ target
.sharedCache
.offset
;
1927 if ( fixupLoc
->arm64e
.authBind
.auth
) {
1928 // store auth data in side table
1929 _aslrTracker
.setAuthData(fixupLoc
, fixupLoc
->arm64e
.authBind
.diversity
, fixupLoc
->arm64e
.authBind
.addrDiv
, fixupLoc
->arm64e
.authBind
.key
);
1930 _exportsToUses
[offsetInCache
].push_back(makePatchLocation((uint8_t*)fixupLoc
- _readExecuteRegion
.buffer
, addend
, *fixupLoc
));
1933 _exportsToUses
[offsetInCache
].push_back(makePatchLocation((uint8_t*)fixupLoc
- _readExecuteRegion
.buffer
, addend
));
1934 // plain binds can have addend in chain
1935 targetVMAddr
+= fixupLoc
->arm64e
.bind
.addend
;
1937 // change location from a chain ptr into a raw pointer to the target vmaddr
1938 fixupLoc
->raw64
= targetVMAddr
;
1939 _aslrTracker
.add(fixupLoc
);
1941 case dyld3::closure::Image::ResolvedSymbolTarget::kindAbsolute
:
1942 fixupLoc
->raw64
= target
.absolute
.value
;
1943 pointerIntoCache
= false;
1944 // don't record absolute targets for ASLR
1945 _aslrTracker
.remove(fixupLoc
);
1946 if ( (targetInfo
->libOrdinal
> 0) && (targetInfo
->libOrdinal
<= (int)(imageLoadAddress
->dependentDylibCount())) ) {
1947 _missingWeakImports
[fixupLoc
] = imageLoadAddress
->dependentDylibLoadPath(targetInfo
->libOrdinal
- 1);
1951 assert(0 && "unsupported ResolvedSymbolTarget kind in dyld cache");
1955 // convert rebase chain entry to raw pointer to target vmaddr
1956 if ( fixupLoc
->arm64e
.rebase
.auth
) {
1957 // store auth data in side table
1958 _aslrTracker
.setAuthData(fixupLoc
, fixupLoc
->arm64e
.authRebase
.diversity
, fixupLoc
->arm64e
.authRebase
.addrDiv
, fixupLoc
->arm64e
.authRebase
.key
);
1959 targetVMAddr
= fixupLoc
->arm64e
.authRebase
.target
;
1960 fixupLoc
->raw64
= targetVMAddr
;
1963 targetVMAddr
= fixupLoc
->arm64e
.rebase
.target
;
1964 if ( targetVMAddr
== CacheBuilder::kRebaseTargetInSideTableArm64e
) {
1965 // target was stored in side table
1966 _aslrTracker
.hasRebaseTarget64(fixupLoc
, &targetVMAddr
);
1968 // store high8 in side table
1969 if ( fixupLoc
->arm64e
.rebase
.high8
)
1970 _aslrTracker
.setHigh8(fixupLoc
, fixupLoc
->arm64e
.rebase
.high8
);
1971 fixupLoc
->raw64
= targetVMAddr
;
1974 if ( pointerIntoCache
) {
1975 assert(fixupLoc
->raw64
> _readExecuteRegion
.unslidLoadAddress
);
1976 assert(fixupLoc
->raw64
< _readOnlyRegion
.unslidLoadAddress
+_readOnlyRegion
.sizeInUse
);
1979 case DYLD_CHAINED_PTR_64
:
1980 if ( fixupLoc
->generic64
.bind
.bind
) {
1981 target
= targets
[fixupLoc
->generic64
.bind
.ordinal
];
1982 targetInfo
= &targetInfos
[fixupLoc
->generic64
.bind
.ordinal
];
1983 switch ( target
.sharedCache
.kind
) {
1984 case dyld3::closure::Image::ResolvedSymbolTarget::kindSharedCache
:
1985 offsetInCache
= target
.sharedCache
.offset
- targetInfo
->addend
;
1986 _dylibToItsExports
[targetInfo
->foundInDylib
].insert(offsetInCache
);
1987 _exportsToName
[offsetInCache
] = targetInfo
->foundSymbolName
;
1988 addend
= targetInfo
->addend
+ fixupLoc
->generic64
.bind
.addend
;
1990 uint16_t top16
= addend
>> 48;
1991 uint8_t top8
= addend
>> 56;
1992 // if top byte is non-zero and this is not a negative addend, pull off high8
1993 if ( (top16
!= 0xFFFF) && (top8
!= 0) ) {
1994 _aslrTracker
.setHigh8(fixupLoc
, top8
);
1995 addend
&= 0x00FFFFFFFFFFFFFFULL
;
1998 // turn this bind into a flat vmaddr
1999 fixupLoc
->raw64
= _archLayout
->sharedMemoryStart
+ target
.sharedCache
.offset
+ fixupLoc
->generic64
.bind
.addend
;
2000 _exportsToUses
[offsetInCache
].push_back(makePatchLocation((uint8_t*)fixupLoc
- _readExecuteRegion
.buffer
, addend
));
2001 _aslrTracker
.add(fixupLoc
);
2003 case dyld3::closure::Image::ResolvedSymbolTarget::kindAbsolute
:
2004 fixupLoc
->raw64
= target
.absolute
.value
;
2005 pointerIntoCache
= false;
2006 // don't record absolute targets for ASLR
2007 _aslrTracker
.remove(fixupLoc
);
2008 if ( (targetInfo
->libOrdinal
> 0) && (targetInfo
->libOrdinal
<= (int)(imageLoadAddress
->dependentDylibCount())) ) {
2009 _missingWeakImports
[fixupLoc
] = imageLoadAddress
->dependentDylibLoadPath(targetInfo
->libOrdinal
- 1);
2013 assert(0 && "unsupported ResolvedSymbolTarget kind in dyld cache");
2017 // convert rebase chain entry to raw pointer to target vmaddr
2018 targetVMAddr
= fixupLoc
->generic64
.rebase
.target
;
2019 if ( targetVMAddr
== CacheBuilder::kRebaseTargetInSideTableArm64
) {
2020 // target was stored in side table
2021 _aslrTracker
.hasRebaseTarget64(fixupLoc
, &targetVMAddr
);
2023 // store high8 in side table
2024 if ( fixupLoc
->generic64
.rebase
.high8
)
2025 _aslrTracker
.setHigh8(fixupLoc
, fixupLoc
->generic64
.rebase
.high8
);
2026 fixupLoc
->raw64
= targetVMAddr
;
2028 if ( pointerIntoCache
) {
2029 assert(fixupLoc
->raw64
> _readExecuteRegion
.unslidLoadAddress
);
2030 assert(fixupLoc
->raw64
< _readOnlyRegion
.unslidLoadAddress
+_readOnlyRegion
.sizeInUse
);
2033 case DYLD_CHAINED_PTR_32
:
2034 if ( fixupLoc
->generic32
.bind
.bind
) {
2035 // turn this bind into a flat vmaddr pointer
2036 target
= targets
[fixupLoc
->generic32
.bind
.ordinal
];
2037 targetInfo
= &targetInfos
[fixupLoc
->generic32
.bind
.ordinal
];
2038 switch ( target
.sharedCache
.kind
) {
2039 case dyld3::closure::Image::ResolvedSymbolTarget::kindSharedCache
:
2040 offsetInCache
= target
.sharedCache
.offset
- targetInfo
->addend
;
2041 _dylibToItsExports
[targetInfo
->foundInDylib
].insert(offsetInCache
);
2042 _exportsToName
[offsetInCache
] = targetInfo
->foundSymbolName
;
2043 fixupLoc
->raw32
= (uint32_t)(_archLayout
->sharedMemoryStart
+ target
.sharedCache
.offset
+ fixupLoc
->generic32
.bind
.addend
);
2044 _exportsToUses
[offsetInCache
].push_back(makePatchLocation((uint8_t*)fixupLoc
- _readExecuteRegion
.buffer
, targetInfo
->addend
+fixupLoc
->generic32
.bind
.addend
));
2045 _aslrTracker
.add(fixupLoc
);
2047 case dyld3::closure::Image::ResolvedSymbolTarget::kindAbsolute
:
2048 fixupLoc
->raw32
= (uint32_t)target
.absolute
.value
;
2049 pointerIntoCache
= false;
2050 // don't record absolute targets for ASLR
2051 _aslrTracker
.remove(fixupLoc
);
2052 if ( (targetInfo
->libOrdinal
> 0) && (targetInfo
->libOrdinal
<= (int)(imageLoadAddress
->dependentDylibCount())) ) {
2053 _missingWeakImports
[fixupLoc
] = imageLoadAddress
->dependentDylibLoadPath(targetInfo
->libOrdinal
- 1);
2057 assert(0 && "unsupported ResolvedSymbolTarget kind in dyld cache");
2060 else if ( fixupLoc
->generic32
.rebase
.target
== CacheBuilder::kRebaseTargetInSideTableGeneric32
) {
2061 uint32_t targetVmAddr
;
2062 if ( _aslrTracker
.hasRebaseTarget32(fixupLoc
, &targetVmAddr
) )
2063 fixupLoc
->raw32
= targetVmAddr
;
2065 assert(0 && "missing target for rebase");
2067 else if ( fixupLoc
->generic32
.rebase
.target
> segInfo
->max_valid_pointer
) {
2068 __block
const char* badImagePath
= nullptr;
2069 cache
->forEachImage(^(const mach_header
* mh
, const char* installName
) {
2070 if ( mh
== imageLoadAddress
)
2071 badImagePath
= installName
;
2073 _diagnostics
.error("unexpected non-pointer in chain for image at %s", badImagePath
);
2075 pointerIntoCache
= false;
2077 if ( pointerIntoCache
) {
2078 assert(fixupLoc
->raw32
> _readExecuteRegion
.unslidLoadAddress
);
2079 assert(fixupLoc
->raw32
< _readOnlyRegion
.unslidLoadAddress
+_readOnlyRegion
.sizeInUse
);
2082 case DYLD_CHAINED_PTR_64_OFFSET
:
2083 case DYLD_CHAINED_PTR_ARM64E_OFFSET
:
2084 assert(0 && "unsupported chained bind type");
2087 assert(0 && "unsupported chained bind type");
2093 handlers
.rebase
= ^(dyld3::closure::ImageNum imageNum
, const dyld3::MachOLoaded
* imageToFix
, uint32_t runtimeOffset
) {
2094 // record location in aslr tracker so kernel can slide this on page-in
2095 uint8_t* fixupLoc
= (uint8_t*)imageToFix
+runtimeOffset
;
2096 _aslrTracker
.add(fixupLoc
);
2099 handlers
.bind
= ^(dyld3::closure::ImageNum imageNum
, const dyld3::MachOLoaded
* mh
,
2100 uint32_t runtimeOffset
, dyld3::closure::Image::ResolvedSymbolTarget target
,
2101 const dyld3::closure::ClosureBuilder::ResolvedTargetInfo
& targetInfo
) {
2102 uint8_t* fixupLoc
= (uint8_t*)mh
+runtimeOffset
;
2104 // binder is called a second time for weak_bind info, which we ignore when building cache
2105 const bool weakDefUseAlreadySet
= targetInfo
.weakBindCoalese
&& _aslrTracker
.has(fixupLoc
);
2107 // do actual bind that sets pointer in image to unslid target address
2108 uint64_t offsetInCache
;
2109 switch ( target
.sharedCache
.kind
) {
2110 case dyld3::closure::Image::ResolvedSymbolTarget::kindSharedCache
:
2111 offsetInCache
= target
.sharedCache
.offset
- targetInfo
.addend
;
2112 _dylibToItsExports
[targetInfo
.foundInDylib
].insert(offsetInCache
);
2113 if (targetInfo
.isWeakDef
)
2114 _dylibWeakExports
.insert({ targetInfo
.foundInDylib
, offsetInCache
});
2115 _exportsToUses
[offsetInCache
].push_back(makePatchLocation(fixupLoc
- _readExecuteRegion
.buffer
, targetInfo
.addend
));
2116 _exportsToName
[offsetInCache
] = targetInfo
.foundSymbolName
;
2117 if ( !weakDefUseAlreadySet
) {
2118 if ( _archLayout
->is64
)
2119 *((uint64_t*)fixupLoc
) = _archLayout
->sharedMemoryStart
+ target
.sharedCache
.offset
;
2121 *((uint32_t*)fixupLoc
) = (uint32_t)(_archLayout
->sharedMemoryStart
+ target
.sharedCache
.offset
);
2122 // record location in aslr tracker so kernel can slide this on page-in
2123 _aslrTracker
.add(fixupLoc
);
2126 case dyld3::closure::Image::ResolvedSymbolTarget::kindAbsolute
:
2127 if ( _archLayout
->is64
)
2128 *((uint64_t*)fixupLoc
) = target
.absolute
.value
;
2130 *((uint32_t*)fixupLoc
) = (uint32_t)(target
.absolute
.value
);
2131 // don't record absolute targets for ASLR
2132 // HACK: Split seg may have added a target. Remove it
2133 _aslrTracker
.remove(fixupLoc
);
2134 if ( (targetInfo
.libOrdinal
> 0) && (targetInfo
.libOrdinal
<= (int)(mh
->dependentDylibCount())) ) {
2135 _missingWeakImports
[fixupLoc
] = mh
->dependentDylibLoadPath(targetInfo
.libOrdinal
- 1);
2139 assert(0 && "unsupported ResolvedSymbolTarget kind in dyld cache");
2143 // build ImageArray for all dylibs in dyld cache
2144 dyld3::closure::PathOverrides pathOverrides
;
2145 dyld3::closure::ClosureBuilder
cb(dyld3::closure::kFirstDyldCacheImageNum
, _fileSystem
, cache
, false, *_options
.archs
, pathOverrides
,
2146 dyld3::closure::ClosureBuilder::AtPath::none
, false, nullptr, _options
.platform
, &handlers
);
2147 dyld3::Array
<CachedDylibInfo
> dylibs(&dylibInfos
[0], dylibInfos
.size(), dylibInfos
.size());
2148 const dyld3::Array
<dyld3::closure::ClosureBuilder::CachedDylibAlias
> aliasesArray(dylibAliases
.data(), dylibAliases
.size(), dylibAliases
.size());
2149 _imageArray
= cb
.makeDyldCacheImageArray(_options
.optimizeStubs
, dylibs
, aliasesArray
);
2150 if ( cb
.diagnostics().hasError() ) {
2151 _diagnostics
.error("%s", cb
.diagnostics().errorMessage().c_str());
2157 static bool operator==(const dyld_cache_patchable_location
& a
, const dyld_cache_patchable_location
& b
) {
2158 return a
.cacheOffset
== b
.cacheOffset
;
2161 void SharedCacheBuilder::addImageArray()
2163 // build trie of dylib paths
2164 __block
std::vector
<DylibIndexTrie::Entry
> dylibEntrys
;
2165 _imageArray
->forEachImage(^(const dyld3::closure::Image
* image
, bool& stop
) {
2166 dylibEntrys
.push_back(DylibIndexTrie::Entry(image
->path(), DylibIndex(image
->imageNum()-1)));
2167 image
->forEachAlias(^(const char *aliasPath
, bool &innerStop
) {
2168 dylibEntrys
.push_back(DylibIndexTrie::Entry(aliasPath
, DylibIndex(image
->imageNum()-1)));
2171 DylibIndexTrie
dylibsTrie(dylibEntrys
);
2172 std::vector
<uint8_t> trieBytes
;
2173 dylibsTrie
.emit(trieBytes
);
2174 while ( (trieBytes
.size() % 4) != 0 )
2175 trieBytes
.push_back(0);
2177 // build set of functions to never stub-eliminate because tools may need to override them
2178 std::unordered_set
<std::string
> alwaysGeneratePatch
;
2179 for (const char* const* p
=_s_neverStubEliminateSymbols
; *p
!= nullptr; ++p
)
2180 alwaysGeneratePatch
.insert(*p
);
2182 // Add the patches for the image array.
2183 __block
uint64_t numPatchImages
= _imageArray
->size();
2184 __block
uint64_t numPatchExports
= 0;
2185 __block
uint64_t numPatchLocations
= 0;
2186 __block
uint64_t numPatchExportNameBytes
= 0;
2188 auto needsPatch
= [&](bool dylibNeedsPatching
, const dyld3::MachOLoaded
* mh
,
2189 CacheOffset offset
) -> bool {
2190 if (dylibNeedsPatching
)
2192 if (_dylibWeakExports
.find({ mh
, offset
}) != _dylibWeakExports
.end())
2194 const std::string
& exportName
= _exportsToName
[offset
];
2195 return alwaysGeneratePatch
.find(exportName
) != alwaysGeneratePatch
.end();
2198 std::set
<std::string
> alwaysPatchDylibs
;
2199 for (const char* const* d
= _s_neverStubEliminateDylibs
; *d
!= nullptr; ++d
)
2200 alwaysPatchDylibs
.insert(*d
);
2202 // First calculate how much space we need
2203 const DyldSharedCache
* cache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
2204 cache
->forEachImage(^(const mach_header
* mh
, const char* installName
) {
2205 const dyld3::MachOLoaded
* ml
= (const dyld3::MachOLoaded
*)mh
;
2206 const std::set
<CacheOffset
>& dylibExports
= _dylibToItsExports
[ml
];
2208 // On a customer cache, only store patch locations for interposable dylibs and weak binding
2209 bool dylibNeedsPatching
= !_options
.optimizeStubs
|| alwaysPatchDylibs
.count(installName
);
2211 uint64_t numDylibExports
= 0;
2212 for (CacheOffset exportCacheOffset
: dylibExports
) {
2213 if (!needsPatch(dylibNeedsPatching
, ml
, exportCacheOffset
))
2215 std::vector
<dyld_cache_patchable_location
>& uses
= _exportsToUses
[exportCacheOffset
];
2216 uses
.erase(std::unique(uses
.begin(), uses
.end()), uses
.end());
2217 numPatchLocations
+= uses
.size();
2219 std::string exportName
= _exportsToName
[exportCacheOffset
];
2220 numPatchExportNameBytes
+= exportName
.size() + 1;
2222 numPatchExports
+= numDylibExports
;
2225 // Now reserve the space
2226 __block
std::vector
<dyld_cache_image_patches
> patchImages
;
2227 __block
std::vector
<dyld_cache_patchable_export
> patchExports
;
2228 __block
std::vector
<dyld_cache_patchable_location
> patchLocations
;
2229 __block
std::vector
<char> patchExportNames
;
2231 patchImages
.reserve(numPatchImages
);
2232 patchExports
.reserve(numPatchExports
);
2233 patchLocations
.reserve(numPatchLocations
);
2234 patchExportNames
.reserve(numPatchExportNameBytes
);
2236 // And now fill it with the patch data
2237 cache
->forEachImage(^(const mach_header
* mh
, const char* installName
) {
2238 const dyld3::MachOLoaded
* ml
= (const dyld3::MachOLoaded
*)mh
;
2239 const std::set
<CacheOffset
>& dylibExports
= _dylibToItsExports
[ml
];
2241 // On a customer cache, only store patch locations for interposable dylibs and weak binding
2242 bool dylibNeedsPatching
= !_options
.optimizeStubs
|| alwaysPatchDylibs
.count(installName
);
2244 // Add the patch image which points in to the exports
2245 dyld_cache_image_patches patchImage
;
2246 patchImage
.patchExportsStartIndex
= (uint32_t)patchExports
.size();
2247 patchImage
.patchExportsCount
= 0;
2249 // Then add each export which points to a list of locations and a name
2250 for (CacheOffset exportCacheOffset
: dylibExports
) {
2251 if (!needsPatch(dylibNeedsPatching
, ml
, exportCacheOffset
))
2253 ++patchImage
.patchExportsCount
;
2254 std::vector
<dyld_cache_patchable_location
>& uses
= _exportsToUses
[exportCacheOffset
];
2256 dyld_cache_patchable_export cacheExport
;
2257 cacheExport
.cacheOffsetOfImpl
= (uint32_t)exportCacheOffset
;
2258 cacheExport
.patchLocationsStartIndex
= (uint32_t)patchLocations
.size();
2259 cacheExport
.patchLocationsCount
= (uint32_t)uses
.size();
2260 cacheExport
.exportNameOffset
= (uint32_t)patchExportNames
.size();
2261 patchExports
.push_back(cacheExport
);
2263 // Now add the list of locations.
2264 patchLocations
.insert(patchLocations
.end(), uses
.begin(), uses
.end());
2266 // And add the export name
2267 const std::string
& exportName
= _exportsToName
[exportCacheOffset
];
2268 patchExportNames
.insert(patchExportNames
.end(), &exportName
[0], &exportName
[0] + exportName
.size() + 1);
2270 patchImages
.push_back(patchImage
);
2273 while ( (patchExportNames
.size() % 4) != 0 )
2274 patchExportNames
.push_back('\0');
2276 uint64_t patchInfoSize
= sizeof(dyld_cache_patch_info
);
2277 patchInfoSize
+= sizeof(dyld_cache_image_patches
) * patchImages
.size();
2278 patchInfoSize
+= sizeof(dyld_cache_patchable_export
) * patchExports
.size();
2279 patchInfoSize
+= sizeof(dyld_cache_patchable_location
) * patchLocations
.size();
2280 patchInfoSize
+= patchExportNames
.size();
2283 uint64_t imageArraySize
= _imageArray
->size();
2284 size_t freeSpace
= _readOnlyRegion
.bufferSize
- _readOnlyRegion
.sizeInUse
;
2285 if ( (imageArraySize
+trieBytes
.size()+patchInfoSize
) > freeSpace
) {
2286 _diagnostics
.error("cache buffer too small to hold ImageArray and Trie (buffer size=%lldMB, imageArray size=%lldMB, trie size=%luKB, patch size=%lluKB, free space=%ldMB)",
2287 _allocatedBufferSize
/1024/1024, imageArraySize
/1024/1024, trieBytes
.size()/1024, patchInfoSize
/1024, freeSpace
/1024/1024);
2291 // copy into cache and update header
2292 DyldSharedCache
* dyldCache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
2293 dyldCache
->header
.dylibsImageArrayAddr
= _readOnlyRegion
.unslidLoadAddress
+ _readOnlyRegion
.sizeInUse
;
2294 dyldCache
->header
.dylibsImageArraySize
= imageArraySize
;
2295 dyldCache
->header
.dylibsTrieAddr
= dyldCache
->header
.dylibsImageArrayAddr
+ imageArraySize
;
2296 dyldCache
->header
.dylibsTrieSize
= trieBytes
.size();
2297 ::memcpy(_readOnlyRegion
.buffer
+ _readOnlyRegion
.sizeInUse
, _imageArray
, imageArraySize
);
2298 ::memcpy(_readOnlyRegion
.buffer
+ _readOnlyRegion
.sizeInUse
+ imageArraySize
, &trieBytes
[0], trieBytes
.size());
2300 // Also write out the patch info
2301 dyldCache
->header
.patchInfoAddr
= dyldCache
->header
.dylibsTrieAddr
+ dyldCache
->header
.dylibsTrieSize
;
2302 dyldCache
->header
.patchInfoSize
= patchInfoSize
;
2303 dyld_cache_patch_info patchInfo
;
2304 patchInfo
.patchTableArrayAddr
= dyldCache
->header
.patchInfoAddr
+ sizeof(dyld_cache_patch_info
);
2305 patchInfo
.patchTableArrayCount
= patchImages
.size();
2306 patchInfo
.patchExportArrayAddr
= patchInfo
.patchTableArrayAddr
+ (patchInfo
.patchTableArrayCount
* sizeof(dyld_cache_image_patches
));
2307 patchInfo
.patchExportArrayCount
= patchExports
.size();
2308 patchInfo
.patchLocationArrayAddr
= patchInfo
.patchExportArrayAddr
+ (patchInfo
.patchExportArrayCount
* sizeof(dyld_cache_patchable_export
));
2309 patchInfo
.patchLocationArrayCount
= patchLocations
.size();
2310 patchInfo
.patchExportNamesAddr
= patchInfo
.patchLocationArrayAddr
+ (patchInfo
.patchLocationArrayCount
* sizeof(dyld_cache_patchable_location
));
2311 patchInfo
.patchExportNamesSize
= patchExportNames
.size();
2312 ::memcpy(_readOnlyRegion
.buffer
+ dyldCache
->header
.patchInfoAddr
- _readOnlyRegion
.unslidLoadAddress
,
2313 &patchInfo
, sizeof(dyld_cache_patch_info
));
2314 ::memcpy(_readOnlyRegion
.buffer
+ patchInfo
.patchTableArrayAddr
- _readOnlyRegion
.unslidLoadAddress
,
2315 &patchImages
[0], sizeof(patchImages
[0]) * patchImages
.size());
2316 ::memcpy(_readOnlyRegion
.buffer
+ patchInfo
.patchExportArrayAddr
- _readOnlyRegion
.unslidLoadAddress
,
2317 &patchExports
[0], sizeof(patchExports
[0]) * patchExports
.size());
2318 ::memcpy(_readOnlyRegion
.buffer
+ patchInfo
.patchLocationArrayAddr
- _readOnlyRegion
.unslidLoadAddress
,
2319 &patchLocations
[0], sizeof(patchLocations
[0]) * patchLocations
.size());
2320 ::memcpy(_readOnlyRegion
.buffer
+ patchInfo
.patchExportNamesAddr
- _readOnlyRegion
.unslidLoadAddress
,
2321 &patchExportNames
[0], patchExportNames
.size());
2323 _readOnlyRegion
.sizeInUse
+= align(imageArraySize
+trieBytes
.size()+patchInfoSize
,14);
2325 // Free the underlying image array buffer
2326 _imageArray
->deallocate();
2329 void SharedCacheBuilder::addOtherImageArray(const std::vector
<LoadedMachO
>& otherDylibsAndBundles
, std::vector
<const LoadedMachO
*>& overflowDylibs
)
2331 DyldSharedCache
* cache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
2332 dyld3::closure::PathOverrides pathOverrides
;
2333 dyld3::closure::FileSystemNull nullFileSystem
;
2334 dyld3::closure::ClosureBuilder
cb(dyld3::closure::kFirstOtherOSImageNum
, nullFileSystem
, cache
, false, *_options
.archs
, pathOverrides
,
2335 dyld3::closure::ClosureBuilder::AtPath::none
, false, nullptr, _options
.platform
);
2337 // make ImageArray for other dylibs and bundles
2338 STACK_ALLOC_ARRAY(dyld3::closure::LoadedFileInfo
, others
, otherDylibsAndBundles
.size() + overflowDylibs
.size());
2339 for (const LoadedMachO
& other
: otherDylibsAndBundles
) {
2340 if ( !contains(other
.loadedFileInfo
.path
, "staged_system_apps/") )
2341 others
.push_back(other
.loadedFileInfo
);
2344 for (const LoadedMachO
* dylib
: overflowDylibs
) {
2345 if (dylib
->mappedFile
.mh
->canHavePrecomputedDlopenClosure(dylib
->mappedFile
.runtimePath
.c_str(), ^(const char*) {}) )
2346 others
.push_back(dylib
->loadedFileInfo
);
2349 // Sort the others array by name so that it is deterministic
2350 std::sort(others
.begin(), others
.end(),
2351 [](const dyld3::closure::LoadedFileInfo
& a
, const dyld3::closure::LoadedFileInfo
& b
) {
2352 // Sort mac before iOSMac
2353 bool isIOSMacA
= strncmp(a
.path
, "/System/iOSSupport/", 19) == 0;
2354 bool isIOSMacB
= strncmp(b
.path
, "/System/iOSSupport/", 19) == 0;
2355 if (isIOSMacA
!= isIOSMacB
)
2357 return strcmp(a
.path
, b
.path
) < 0;
2360 const dyld3::closure::ImageArray
* otherImageArray
= cb
.makeOtherDylibsImageArray(others
, (uint32_t)_sortedDylibs
.size());
2362 // build trie of paths
2363 __block
std::vector
<DylibIndexTrie::Entry
> otherEntrys
;
2364 otherImageArray
->forEachImage(^(const dyld3::closure::Image
* image
, bool& stop
) {
2365 if ( !image
->isInvalid() )
2366 otherEntrys
.push_back(DylibIndexTrie::Entry(image
->path(), DylibIndex(image
->imageNum())));
2368 DylibIndexTrie
dylibsTrie(otherEntrys
);
2369 std::vector
<uint8_t> trieBytes
;
2370 dylibsTrie
.emit(trieBytes
);
2371 while ( (trieBytes
.size() % 4) != 0 )
2372 trieBytes
.push_back(0);
2375 uint64_t imageArraySize
= otherImageArray
->size();
2376 size_t freeSpace
= _readOnlyRegion
.bufferSize
- _readOnlyRegion
.sizeInUse
;
2377 if ( imageArraySize
+trieBytes
.size() > freeSpace
) {
2378 _diagnostics
.error("cache buffer too small to hold ImageArray and Trie (buffer size=%lldMB, imageArray size=%lldMB, trie size=%luKB, free space=%ldMB)",
2379 _allocatedBufferSize
/1024/1024, imageArraySize
/1024/1024, trieBytes
.size()/1024, freeSpace
/1024/1024);
2383 // copy into cache and update header
2384 DyldSharedCache
* dyldCache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
2385 dyldCache
->header
.otherImageArrayAddr
= _readOnlyRegion
.unslidLoadAddress
+ _readOnlyRegion
.sizeInUse
;
2386 dyldCache
->header
.otherImageArraySize
= imageArraySize
;
2387 dyldCache
->header
.otherTrieAddr
= dyldCache
->header
.otherImageArrayAddr
+ imageArraySize
;
2388 dyldCache
->header
.otherTrieSize
= trieBytes
.size();
2389 ::memcpy(_readOnlyRegion
.buffer
+ _readOnlyRegion
.sizeInUse
, otherImageArray
, imageArraySize
);
2390 ::memcpy(_readOnlyRegion
.buffer
+ _readOnlyRegion
.sizeInUse
+ imageArraySize
, &trieBytes
[0], trieBytes
.size());
2391 _readOnlyRegion
.sizeInUse
+= align(imageArraySize
+trieBytes
.size(),14);
2393 // Free the underlying buffer
2394 otherImageArray
->deallocate();
2398 void SharedCacheBuilder::addClosures(const std::vector
<LoadedMachO
>& osExecutables
)
2400 const DyldSharedCache
* dyldCache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
2402 __block
std::vector
<Diagnostics
> osExecutablesDiags
;
2403 __block
std::vector
<const dyld3::closure::LaunchClosure
*> osExecutablesClosures
;
2404 osExecutablesDiags
.resize(osExecutables
.size());
2405 osExecutablesClosures
.resize(osExecutables
.size());
2407 dispatch_apply(osExecutables
.size(), DISPATCH_APPLY_AUTO
, ^(size_t index
) {
2408 const LoadedMachO
& loadedMachO
= osExecutables
[index
];
2409 // don't pre-build closures for staged apps into dyld cache, since they won't run from that location
2410 if ( startsWith(loadedMachO
.mappedFile
.runtimePath
, "/private/var/staged_system_apps/") ) {
2413 dyld3::closure::PathOverrides pathOverrides
;
2414 dyld3::closure::ClosureBuilder
builder(dyld3::closure::kFirstLaunchClosureImageNum
, _fileSystem
, dyldCache
, false, *_options
.archs
, pathOverrides
,
2415 dyld3::closure::ClosureBuilder::AtPath::all
, false, nullptr, _options
.platform
, nullptr);
2416 bool issetuid
= false;
2417 if ( this->_options
.platform
== dyld3::Platform::macOS
|| dyld3::MachOFile::isSimulatorPlatform(this->_options
.platform
) )
2418 _fileSystem
.fileExists(loadedMachO
.loadedFileInfo
.path
, nullptr, nullptr, &issetuid
, nullptr);
2419 const dyld3::closure::LaunchClosure
* mainClosure
= builder
.makeLaunchClosure(loadedMachO
.loadedFileInfo
, issetuid
);
2420 if ( builder
.diagnostics().hasError() ) {
2421 osExecutablesDiags
[index
].error("%s", builder
.diagnostics().errorMessage().c_str());
2424 assert(mainClosure
!= nullptr);
2425 osExecutablesClosures
[index
] = mainClosure
;
2429 std::map
<std::string
, const dyld3::closure::LaunchClosure
*> closures
;
2430 for (uint64_t i
= 0, e
= osExecutables
.size(); i
!= e
; ++i
) {
2431 const LoadedMachO
& loadedMachO
= osExecutables
[i
];
2432 const Diagnostics
& diag
= osExecutablesDiags
[i
];
2433 if (diag
.hasError()) {
2434 if ( _options
.verbose
) {
2435 _diagnostics
.warning("building closure for '%s': %s", loadedMachO
.mappedFile
.runtimePath
.c_str(), diag
.errorMessage().c_str());
2436 for (const std::string
& warn
: diag
.warnings() )
2437 _diagnostics
.warning("%s", warn
.c_str());
2439 if ( loadedMachO
.inputFile
&& (loadedMachO
.inputFile
->mustBeIncluded()) ) {
2440 loadedMachO
.inputFile
->diag
.error("%s", diag
.errorMessage().c_str());
2443 // Note, a closure could be null here if it has a path we skip.
2444 if (osExecutablesClosures
[i
] != nullptr)
2445 closures
[loadedMachO
.mappedFile
.runtimePath
] = osExecutablesClosures
[i
];
2449 osExecutablesDiags
.clear();
2450 osExecutablesClosures
.clear();
2452 // preflight space needed
2453 size_t closuresSpace
= 0;
2454 for (const auto& entry
: closures
) {
2455 closuresSpace
+= entry
.second
->size();
2457 size_t freeSpace
= _readOnlyRegion
.bufferSize
- _readOnlyRegion
.sizeInUse
;
2458 if ( closuresSpace
> freeSpace
) {
2459 _diagnostics
.error("cache buffer too small to hold all closures (buffer size=%lldMB, closures size=%ldMB, free space=%ldMB)",
2460 _allocatedBufferSize
/1024/1024, closuresSpace
/1024/1024, freeSpace
/1024/1024);
2463 DyldSharedCache
* cache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
2464 cache
->header
.progClosuresAddr
= _readOnlyRegion
.unslidLoadAddress
+ _readOnlyRegion
.sizeInUse
;
2465 uint8_t* closuresBase
= _readOnlyRegion
.buffer
+ _readOnlyRegion
.sizeInUse
;
2466 std::vector
<DylibIndexTrie::Entry
> closureEntrys
;
2467 uint32_t currentClosureOffset
= 0;
2468 for (const auto& entry
: closures
) {
2469 const dyld3::closure::LaunchClosure
* closure
= entry
.second
;
2470 closureEntrys
.push_back(DylibIndexTrie::Entry(entry
.first
, DylibIndex(currentClosureOffset
)));
2471 size_t size
= closure
->size();
2472 assert((size
% 4) == 0);
2473 memcpy(closuresBase
+currentClosureOffset
, closure
, size
);
2474 currentClosureOffset
+= size
;
2476 closure
->deallocate();
2478 cache
->header
.progClosuresSize
= currentClosureOffset
;
2479 _readOnlyRegion
.sizeInUse
+= currentClosureOffset
;
2480 freeSpace
= _readOnlyRegion
.bufferSize
- _readOnlyRegion
.sizeInUse
;
2481 // build trie of indexes into closures list
2482 DylibIndexTrie
closureTrie(closureEntrys
);
2483 std::vector
<uint8_t> trieBytes
;
2484 closureTrie
.emit(trieBytes
);
2485 while ( (trieBytes
.size() % 8) != 0 )
2486 trieBytes
.push_back(0);
2487 if ( trieBytes
.size() > freeSpace
) {
2488 _diagnostics
.error("cache buffer too small to hold all closures trie (buffer size=%lldMB, trie size=%ldMB, free space=%ldMB)",
2489 _allocatedBufferSize
/1024/1024, trieBytes
.size()/1024/1024, freeSpace
/1024/1024);
2492 memcpy(_readOnlyRegion
.buffer
+ _readOnlyRegion
.sizeInUse
, &trieBytes
[0], trieBytes
.size());
2493 cache
->header
.progClosuresTrieAddr
= _readOnlyRegion
.unslidLoadAddress
+ _readOnlyRegion
.sizeInUse
;
2494 cache
->header
.progClosuresTrieSize
= trieBytes
.size();
2495 _readOnlyRegion
.sizeInUse
+= trieBytes
.size();
2496 _readOnlyRegion
.sizeInUse
= align(_readOnlyRegion
.sizeInUse
, 14);
2500 bool SharedCacheBuilder::writeCache(void (^cacheSizeCallback
)(uint64_t size
), bool (^copyCallback
)(const uint8_t* src
, uint64_t size
, uint64_t dstOffset
))
2502 const dyld_cache_header
* cacheHeader
= (dyld_cache_header
*)_readExecuteRegion
.buffer
;
2503 const dyld_cache_mapping_info
* mappings
= (dyld_cache_mapping_info
*)(_readExecuteRegion
.buffer
+ cacheHeader
->mappingOffset
);
2504 assert(_readExecuteRegion
.sizeInUse
== mappings
[0].size
);
2505 assert(_readWriteRegion
.sizeInUse
== mappings
[1].size
);
2506 assert(_readOnlyRegion
.sizeInUse
== mappings
[2].size
);
2507 assert(_readExecuteRegion
.cacheFileOffset
== mappings
[0].fileOffset
);
2508 assert(_readWriteRegion
.cacheFileOffset
== mappings
[1].fileOffset
);
2509 assert(_readOnlyRegion
.cacheFileOffset
== mappings
[2].fileOffset
);
2510 assert(_codeSignatureRegion
.sizeInUse
== cacheHeader
->codeSignatureSize
);
2511 assert(cacheHeader
->codeSignatureOffset
== mappings
[2].fileOffset
+_readOnlyRegion
.sizeInUse
+_localSymbolsRegion
.sizeInUse
);
2512 cacheSizeCallback(_readExecuteRegion
.sizeInUse
+_readWriteRegion
.sizeInUse
+_readOnlyRegion
.sizeInUse
+_localSymbolsRegion
.sizeInUse
+_codeSignatureRegion
.sizeInUse
);
2513 bool fullyWritten
= copyCallback(_readExecuteRegion
.buffer
, _readExecuteRegion
.sizeInUse
, mappings
[0].fileOffset
);
2514 fullyWritten
&= copyCallback(_readWriteRegion
.buffer
, _readWriteRegion
.sizeInUse
, mappings
[1].fileOffset
);
2515 fullyWritten
&= copyCallback(_readOnlyRegion
.buffer
, _readOnlyRegion
.sizeInUse
, mappings
[2].fileOffset
);
2516 if ( _localSymbolsRegion
.sizeInUse
!= 0 ) {
2517 assert(cacheHeader
->localSymbolsOffset
== mappings
[2].fileOffset
+_readOnlyRegion
.sizeInUse
);
2518 fullyWritten
&= copyCallback(_localSymbolsRegion
.buffer
, _localSymbolsRegion
.sizeInUse
, cacheHeader
->localSymbolsOffset
);
2520 fullyWritten
&= copyCallback(_codeSignatureRegion
.buffer
, _codeSignatureRegion
.sizeInUse
, cacheHeader
->codeSignatureOffset
);
2521 return fullyWritten
;
2525 void SharedCacheBuilder::writeFile(const std::string
& path
)
2527 std::string pathTemplate
= path
+ "-XXXXXX";
2528 size_t templateLen
= strlen(pathTemplate
.c_str())+2;
2529 BLOCK_ACCCESSIBLE_ARRAY(char, pathTemplateSpace
, templateLen
);
2530 strlcpy(pathTemplateSpace
, pathTemplate
.c_str(), templateLen
);
2531 int fd
= mkstemp(pathTemplateSpace
);
2533 auto cacheSizeCallback
= ^(uint64_t size
) {
2534 // if making macOS dyld cache for current OS into standard location
2535 if ( (_options
.platform
== dyld3::Platform::macOS
) && startsWith(path
, MACOSX_DYLD_SHARED_CACHE_DIR
) ) {
2536 // <rdar://48687550> pin cache file to SSD on fusion drives
2537 apfs_data_pin_location_t where
= APFS_PIN_DATA_TO_MAIN
;
2538 ::fsctl(pathTemplateSpace
, APFSIOC_PIN_DATA
, &where
, 0);
2540 // set final cache file size (may help defragment file)
2541 ::ftruncate(fd
, size
);
2543 auto copyCallback
= ^(const uint8_t* src
, uint64_t size
, uint64_t dstOffset
) {
2544 uint64_t writtenSize
= pwrite(fd
, src
, size
, dstOffset
);
2545 return writtenSize
== size
;
2547 // <rdar://problem/55370916> TOCTOU: verify path is still a realpath (not changed)
2548 char tempPath
[MAXPATHLEN
];
2549 if ( ::fcntl(fd
, F_GETPATH
, tempPath
) == 0 ) {
2550 size_t tempPathLen
= strlen(tempPath
);
2551 if ( tempPathLen
> 7 )
2552 tempPath
[tempPathLen
-7] = '\0'; // remove trailing -xxxxxx
2553 if ( path
!= tempPath
) {
2554 _diagnostics
.error("output file path changed from: '%s' to: '%s'", path
.c_str(), tempPath
);
2560 _diagnostics
.error("unable to fcntl(fd, F_GETPATH) on output file");
2564 bool fullyWritten
= writeCache(cacheSizeCallback
, copyCallback
);
2565 if ( fullyWritten
) {
2566 ::fchmod(fd
, S_IRUSR
|S_IRGRP
|S_IROTH
); // mkstemp() makes file "rw-------", switch it to "r--r--r--"
2567 // <rdar://problem/55370916> TOCTOU: verify path is still a realpath (not changed)
2568 char resolvedPath
[PATH_MAX
];
2569 ::realpath(path
.c_str(), resolvedPath
);
2570 // Note: if the target cache file does not already exist, realpath() will return NULL, but still fill in the path buffer
2571 if ( path
!= resolvedPath
) {
2572 _diagnostics
.error("output file path changed from: '%s' to: '%s'", path
.c_str(), resolvedPath
);
2575 if ( ::rename(pathTemplateSpace
, path
.c_str()) == 0) {
2581 _diagnostics
.error("could not write file %s", pathTemplateSpace
);
2584 ::unlink(pathTemplateSpace
);
2587 _diagnostics
.error("could not open file %s", pathTemplateSpace
);
2591 void SharedCacheBuilder::writeBuffer(uint8_t*& buffer
, uint64_t& bufferSize
) {
2592 auto cacheSizeCallback
= ^(uint64_t size
) {
2593 buffer
= (uint8_t*)malloc(size
);
2596 auto copyCallback
= ^(const uint8_t* src
, uint64_t size
, uint64_t dstOffset
) {
2597 memcpy(buffer
+ dstOffset
, src
, size
);
2600 bool fullyWritten
= writeCache(cacheSizeCallback
, copyCallback
);
2601 assert(fullyWritten
);
2604 void SharedCacheBuilder::writeMapFile(const std::string
& path
)
2606 const DyldSharedCache
* cache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
2607 std::string mapContent
= cache
->mapFile();
2608 safeSave(mapContent
.c_str(), mapContent
.size(), path
);
2611 std::string
SharedCacheBuilder::getMapFileBuffer(const std::string
& cacheDisposition
) const
2613 const DyldSharedCache
* cache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
2614 return cache
->generateJSONMap(cacheDisposition
.c_str());
2617 void SharedCacheBuilder::markPaddingInaccessible()
2619 // region between RX and RW
2620 uint8_t* startPad1
= _readExecuteRegion
.buffer
+_readExecuteRegion
.sizeInUse
;
2621 uint8_t* endPad1
= _readWriteRegion
.buffer
;
2622 ::vm_protect(mach_task_self(), (vm_address_t
)startPad1
, endPad1
-startPad1
, false, 0);
2624 // region between RW and RO
2625 uint8_t* startPad2
= _readWriteRegion
.buffer
+_readWriteRegion
.sizeInUse
;
2626 uint8_t* endPad2
= _readOnlyRegion
.buffer
;
2627 ::vm_protect(mach_task_self(), (vm_address_t
)startPad2
, endPad2
-startPad2
, false, 0);
2631 void SharedCacheBuilder::forEachCacheDylib(void (^callback
)(const std::string
& path
)) {
2632 for (const DylibInfo
& dylibInfo
: _sortedDylibs
)
2633 callback(dylibInfo
.runtimePath
);
2637 uint64_t SharedCacheBuilder::pathHash(const char* path
)
2640 for (const char* s
=path
; *s
!= '\0'; ++s
)
2646 void SharedCacheBuilder::findDylibAndSegment(const void* contentPtr
, std::string
& foundDylibName
, std::string
& foundSegName
)
2648 foundDylibName
= "???";
2649 foundSegName
= "???";
2650 uint64_t unslidVmAddr
= ((uint8_t*)contentPtr
- _readExecuteRegion
.buffer
) + _readExecuteRegion
.unslidLoadAddress
;
2651 const DyldSharedCache
* cache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
2652 cache
->forEachImage(^(const mach_header
* mh
, const char* installName
) {
2653 ((dyld3::MachOLoaded
*)mh
)->forEachSegment(^(const dyld3::MachOFile::SegmentInfo
& info
, bool &stop
) {
2654 if ( (unslidVmAddr
>= info
.vmAddr
) && (unslidVmAddr
< (info
.vmAddr
+info
.vmSize
)) ) {
2655 foundDylibName
= installName
;
2656 foundSegName
= info
.segName
;
2664 void SharedCacheBuilder::fipsSign()
2666 // find libcorecrypto.dylib in cache being built
2667 DyldSharedCache
* dyldCache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
2668 __block
const dyld3::MachOLoaded
* ml
= nullptr;
2669 dyldCache
->forEachImage(^(const mach_header
* mh
, const char* installName
) {
2670 if ( strcmp(installName
, "/usr/lib/system/libcorecrypto.dylib") == 0 )
2671 ml
= (dyld3::MachOLoaded
*)mh
;
2673 if ( ml
== nullptr ) {
2674 _diagnostics
.warning("Could not find libcorecrypto.dylib, skipping FIPS sealing");
2678 // find location in libcorecrypto.dylib to store hash of __text section
2679 uint64_t hashStoreSize
;
2680 const void* hashStoreLocation
= ml
->findSectionContent("__TEXT", "__fips_hmacs", hashStoreSize
);
2681 if ( hashStoreLocation
== nullptr ) {
2682 _diagnostics
.warning("Could not find __TEXT/__fips_hmacs section in libcorecrypto.dylib, skipping FIPS sealing");
2685 if ( hashStoreSize
!= 32 ) {
2686 _diagnostics
.warning("__TEXT/__fips_hmacs section in libcorecrypto.dylib is not 32 bytes in size, skipping FIPS sealing");
2690 // compute hmac hash of __text section
2692 const void* textLocation
= ml
->findSectionContent("__TEXT", "__text", textSize
);
2693 if ( textLocation
== nullptr ) {
2694 _diagnostics
.warning("Could not find __TEXT/__text section in libcorecrypto.dylib, skipping FIPS sealing");
2697 unsigned char hmac_key
= 0;
2698 CCHmac(kCCHmacAlgSHA256
, &hmac_key
, 1, textLocation
, textSize
, (void*)hashStoreLocation
); // store hash directly into hashStoreLocation
2701 void SharedCacheBuilder::codeSign()
2703 uint8_t dscHashType
;
2704 uint8_t dscHashSize
;
2705 uint32_t dscDigestFormat
;
2708 // select which codesigning hash
2709 switch (_options
.codeSigningDigestMode
) {
2710 case DyldSharedCache::Agile
:
2712 // Fall through to SHA1, because the main code directory remains SHA1 for compatibility.
2713 [[clang::fallthrough]];
2714 case DyldSharedCache::SHA1only
:
2715 #pragma clang diagnostic push
2716 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2717 dscHashType
= CS_HASHTYPE_SHA1
;
2718 dscHashSize
= CS_HASH_SIZE_SHA1
;
2719 dscDigestFormat
= kCCDigestSHA1
;
2720 #pragma clang diagnostic pop
2722 case DyldSharedCache::SHA256only
:
2723 dscHashType
= CS_HASHTYPE_SHA256
;
2724 dscHashSize
= CS_HASH_SIZE_SHA256
;
2725 dscDigestFormat
= kCCDigestSHA256
;
2728 _diagnostics
.error("codeSigningDigestMode has unknown, unexpected value %d, bailing out.",
2729 _options
.codeSigningDigestMode
);
2733 std::string cacheIdentifier
= "com.apple.dyld.cache.";
2734 cacheIdentifier
+= _options
.archs
->name();
2735 if ( _options
.dylibsRemovedDuringMastering
) {
2736 if ( _options
.optimizeStubs
)
2737 cacheIdentifier
+= ".release";
2739 cacheIdentifier
+= ".development";
2741 // get pointers into shared cache buffer
2742 size_t inBbufferSize
= _readExecuteRegion
.sizeInUse
+_readWriteRegion
.sizeInUse
+_readOnlyRegion
.sizeInUse
+_localSymbolsRegion
.sizeInUse
;
2744 const uint16_t pageSize
= _archLayout
->csPageSize
;
2746 // layout code signature contents
2747 uint32_t blobCount
= agile
? 4 : 3;
2748 size_t idSize
= cacheIdentifier
.size()+1; // +1 for terminating 0
2749 uint32_t slotCount
= (uint32_t)((inBbufferSize
+ pageSize
- 1) / pageSize
);
2750 uint32_t xSlotCount
= CSSLOT_REQUIREMENTS
;
2751 size_t idOffset
= offsetof(CS_CodeDirectory
, end_withExecSeg
);
2752 size_t hashOffset
= idOffset
+idSize
+ dscHashSize
*xSlotCount
;
2753 size_t hash256Offset
= idOffset
+idSize
+ CS_HASH_SIZE_SHA256
*xSlotCount
;
2754 size_t cdSize
= hashOffset
+ (slotCount
* dscHashSize
);
2755 size_t cd256Size
= agile
? hash256Offset
+ (slotCount
* CS_HASH_SIZE_SHA256
) : 0;
2756 size_t reqsSize
= 12;
2757 size_t cmsSize
= sizeof(CS_Blob
);
2758 size_t cdOffset
= sizeof(CS_SuperBlob
) + blobCount
*sizeof(CS_BlobIndex
);
2759 size_t cd256Offset
= cdOffset
+ cdSize
;
2760 size_t reqsOffset
= cd256Offset
+ cd256Size
; // equals cdOffset + cdSize if not agile
2761 size_t cmsOffset
= reqsOffset
+ reqsSize
;
2762 size_t sbSize
= cmsOffset
+ cmsSize
;
2763 size_t sigSize
= align(sbSize
, 14); // keep whole cache 16KB aligned
2765 // allocate space for blob
2766 vm_address_t codeSigAlloc
;
2767 if ( vm_allocate(mach_task_self(), &codeSigAlloc
, sigSize
, VM_FLAGS_ANYWHERE
) != 0 ) {
2768 _diagnostics
.error("could not allocate code signature buffer");
2771 _codeSignatureRegion
.buffer
= (uint8_t*)codeSigAlloc
;
2772 _codeSignatureRegion
.bufferSize
= sigSize
;
2773 _codeSignatureRegion
.sizeInUse
= sigSize
;
2775 // create overall code signature which is a superblob
2776 CS_SuperBlob
* sb
= reinterpret_cast<CS_SuperBlob
*>(_codeSignatureRegion
.buffer
);
2777 sb
->magic
= htonl(CSMAGIC_EMBEDDED_SIGNATURE
);
2778 sb
->length
= htonl(sbSize
);
2779 sb
->count
= htonl(blobCount
);
2780 sb
->index
[0].type
= htonl(CSSLOT_CODEDIRECTORY
);
2781 sb
->index
[0].offset
= htonl(cdOffset
);
2782 sb
->index
[1].type
= htonl(CSSLOT_REQUIREMENTS
);
2783 sb
->index
[1].offset
= htonl(reqsOffset
);
2784 sb
->index
[2].type
= htonl(CSSLOT_CMS_SIGNATURE
);
2785 sb
->index
[2].offset
= htonl(cmsOffset
);
2787 sb
->index
[3].type
= htonl(CSSLOT_ALTERNATE_CODEDIRECTORIES
+ 0);
2788 sb
->index
[3].offset
= htonl(cd256Offset
);
2791 // fill in empty requirements
2792 CS_RequirementsBlob
* reqs
= (CS_RequirementsBlob
*)(((char*)sb
)+reqsOffset
);
2793 reqs
->magic
= htonl(CSMAGIC_REQUIREMENTS
);
2794 reqs
->length
= htonl(sizeof(CS_RequirementsBlob
));
2797 // initialize fixed fields of Code Directory
2798 CS_CodeDirectory
* cd
= (CS_CodeDirectory
*)(((char*)sb
)+cdOffset
);
2799 cd
->magic
= htonl(CSMAGIC_CODEDIRECTORY
);
2800 cd
->length
= htonl(cdSize
);
2801 cd
->version
= htonl(0x20400); // supports exec segment
2802 cd
->flags
= htonl(kSecCodeSignatureAdhoc
);
2803 cd
->hashOffset
= htonl(hashOffset
);
2804 cd
->identOffset
= htonl(idOffset
);
2805 cd
->nSpecialSlots
= htonl(xSlotCount
);
2806 cd
->nCodeSlots
= htonl(slotCount
);
2807 cd
->codeLimit
= htonl(inBbufferSize
);
2808 cd
->hashSize
= dscHashSize
;
2809 cd
->hashType
= dscHashType
;
2810 cd
->platform
= 0; // not platform binary
2811 cd
->pageSize
= __builtin_ctz(pageSize
); // log2(CS_PAGE_SIZE);
2812 cd
->spare2
= 0; // unused (must be zero)
2813 cd
->scatterOffset
= 0; // not supported anymore
2814 cd
->teamOffset
= 0; // no team ID
2815 cd
->spare3
= 0; // unused (must be zero)
2816 cd
->codeLimit64
= 0; // falls back to codeLimit
2818 // executable segment info
2819 cd
->execSegBase
= htonll(_readExecuteRegion
.cacheFileOffset
); // base of TEXT segment
2820 cd
->execSegLimit
= htonll(_readExecuteRegion
.sizeInUse
); // size of TEXT segment
2821 cd
->execSegFlags
= 0; // not a main binary
2823 // initialize dynamic fields of Code Directory
2824 strcpy((char*)cd
+ idOffset
, cacheIdentifier
.c_str());
2826 // add special slot hashes
2827 uint8_t* hashSlot
= (uint8_t*)cd
+ hashOffset
;
2828 uint8_t* reqsHashSlot
= &hashSlot
[-CSSLOT_REQUIREMENTS
*dscHashSize
];
2829 CCDigest(dscDigestFormat
, (uint8_t*)reqs
, sizeof(CS_RequirementsBlob
), reqsHashSlot
);
2831 CS_CodeDirectory
* cd256
;
2832 uint8_t* hash256Slot
;
2833 uint8_t* reqsHash256Slot
;
2835 // Note that the assumption here is that the size up to the hashes is the same as for
2836 // sha1 code directory, and that they come last, after everything else.
2838 cd256
= (CS_CodeDirectory
*)(((char*)sb
)+cd256Offset
);
2839 cd256
->magic
= htonl(CSMAGIC_CODEDIRECTORY
);
2840 cd256
->length
= htonl(cd256Size
);
2841 cd256
->version
= htonl(0x20400); // supports exec segment
2842 cd256
->flags
= htonl(kSecCodeSignatureAdhoc
);
2843 cd256
->hashOffset
= htonl(hash256Offset
);
2844 cd256
->identOffset
= htonl(idOffset
);
2845 cd256
->nSpecialSlots
= htonl(xSlotCount
);
2846 cd256
->nCodeSlots
= htonl(slotCount
);
2847 cd256
->codeLimit
= htonl(inBbufferSize
);
2848 cd256
->hashSize
= CS_HASH_SIZE_SHA256
;
2849 cd256
->hashType
= CS_HASHTYPE_SHA256
;
2850 cd256
->platform
= 0; // not platform binary
2851 cd256
->pageSize
= __builtin_ctz(pageSize
); // log2(CS_PAGE_SIZE);
2852 cd256
->spare2
= 0; // unused (must be zero)
2853 cd256
->scatterOffset
= 0; // not supported anymore
2854 cd256
->teamOffset
= 0; // no team ID
2855 cd256
->spare3
= 0; // unused (must be zero)
2856 cd256
->codeLimit64
= 0; // falls back to codeLimit
2858 // executable segment info
2859 cd256
->execSegBase
= cd
->execSegBase
;
2860 cd256
->execSegLimit
= cd
->execSegLimit
;
2861 cd256
->execSegFlags
= cd
->execSegFlags
;
2863 // initialize dynamic fields of Code Directory
2864 strcpy((char*)cd256
+ idOffset
, cacheIdentifier
.c_str());
2866 // add special slot hashes
2867 hash256Slot
= (uint8_t*)cd256
+ hash256Offset
;
2868 reqsHash256Slot
= &hash256Slot
[-CSSLOT_REQUIREMENTS
*CS_HASH_SIZE_SHA256
];
2869 CCDigest(kCCDigestSHA256
, (uint8_t*)reqs
, sizeof(CS_RequirementsBlob
), reqsHash256Slot
);
2874 reqsHash256Slot
= NULL
;
2877 // fill in empty CMS blob for ad-hoc signing
2878 CS_Blob
* cms
= (CS_Blob
*)(((char*)sb
)+cmsOffset
);
2879 cms
->magic
= htonl(CSMAGIC_BLOBWRAPPER
);
2880 cms
->length
= htonl(sizeof(CS_Blob
));
2883 // alter header of cache to record size and location of code signature
2884 // do this *before* hashing each page
2885 dyld_cache_header
* cache
= (dyld_cache_header
*)_readExecuteRegion
.buffer
;
2886 cache
->codeSignatureOffset
= inBbufferSize
;
2887 cache
->codeSignatureSize
= sigSize
;
2889 const uint32_t rwSlotStart
= (uint32_t)(_readExecuteRegion
.sizeInUse
/ pageSize
);
2890 const uint32_t roSlotStart
= (uint32_t)(rwSlotStart
+ _readWriteRegion
.sizeInUse
/ pageSize
);
2891 const uint32_t localsSlotStart
= (uint32_t)(roSlotStart
+ _readOnlyRegion
.sizeInUse
/ pageSize
);
2892 auto codeSignPage
= ^(size_t i
) {
2893 const uint8_t* code
= nullptr;
2894 // move to correct region
2895 if ( i
< rwSlotStart
)
2896 code
= _readExecuteRegion
.buffer
+ (i
* pageSize
);
2897 else if ( i
>= rwSlotStart
&& i
< roSlotStart
)
2898 code
= _readWriteRegion
.buffer
+ ((i
- rwSlotStart
) * pageSize
);
2899 else if ( i
>= roSlotStart
&& i
< localsSlotStart
)
2900 code
= _readOnlyRegion
.buffer
+ ((i
- roSlotStart
) * pageSize
);
2902 code
= _localSymbolsRegion
.buffer
+ ((i
- localsSlotStart
) * pageSize
);
2904 CCDigest(dscDigestFormat
, code
, pageSize
, hashSlot
+ (i
* dscHashSize
));
2907 CCDigest(kCCDigestSHA256
, code
, pageSize
, hash256Slot
+ (i
* CS_HASH_SIZE_SHA256
));
2912 dispatch_apply(slotCount
, DISPATCH_APPLY_AUTO
, ^(size_t i
) {
2916 // Now that we have a code signature, compute a cache UUID by hashing the code signature blob
2918 uint8_t* uuidLoc
= cache
->uuid
;
2919 assert(uuid_is_null(uuidLoc
));
2920 static_assert(offsetof(dyld_cache_header
, uuid
) / CS_PAGE_SIZE_4K
== 0, "uuid is expected in the first page of the cache");
2921 uint8_t fullDigest
[CC_SHA256_DIGEST_LENGTH
];
2922 CC_SHA256((const void*)cd
, (unsigned)cdSize
, fullDigest
);
2923 memcpy(uuidLoc
, fullDigest
, 16);
2924 // <rdar://problem/6723729> uuids should conform to RFC 4122 UUID version 4 & UUID version 5 formats
2925 uuidLoc
[6] = ( uuidLoc
[6] & 0x0F ) | ( 3 << 4 );
2926 uuidLoc
[8] = ( uuidLoc
[8] & 0x3F ) | 0x80;
2928 // Now codesign page 0 again, because we modified it by setting uuid in header
2932 // hash of entire code directory (cdHash) uses same hash as each page
2933 uint8_t fullCdHash
[dscHashSize
];
2934 CCDigest(dscDigestFormat
, (const uint8_t*)cd
, cdSize
, fullCdHash
);
2935 // Note: cdHash is defined as first 20 bytes of hash
2936 memcpy(_cdHashFirst
, fullCdHash
, 20);
2938 uint8_t fullCdHash256
[CS_HASH_SIZE_SHA256
];
2939 CCDigest(kCCDigestSHA256
, (const uint8_t*)cd256
, cd256Size
, fullCdHash256
);
2940 // Note: cdHash is defined as first 20 bytes of hash, even for sha256
2941 memcpy(_cdHashSecond
, fullCdHash256
, 20);
2944 memset(_cdHashSecond
, 0, 20);
2948 const bool SharedCacheBuilder::agileSignature()
2950 return _options
.codeSigningDigestMode
== DyldSharedCache::Agile
;
2953 static const std::string
cdHash(uint8_t hash
[20])
2956 for (int i
= 0; i
< 20; ++i
)
2957 sprintf(&buff
[2*i
], "%2.2x", hash
[i
]);
2961 const std::string
SharedCacheBuilder::cdHashFirst()
2963 return cdHash(_cdHashFirst
);
2966 const std::string
SharedCacheBuilder::cdHashSecond()
2968 return cdHash(_cdHashSecond
);
2971 const std::string
SharedCacheBuilder::uuid() const
2973 dyld_cache_header
* cache
= (dyld_cache_header
*)_readExecuteRegion
.buffer
;
2974 uuid_string_t uuidStr
;
2975 uuid_unparse(cache
->uuid
, uuidStr
);
2981 template <typename P
>
2982 bool SharedCacheBuilder::makeRebaseChainV2(uint8_t* pageContent
, uint16_t lastLocationOffset
, uint16_t offset
, const dyld_cache_slide_info2
* info
)
2984 typedef typename
P::uint_t pint_t
;
2986 const pint_t deltaMask
= (pint_t
)(info
->delta_mask
);
2987 const pint_t valueMask
= ~deltaMask
;
2988 const pint_t valueAdd
= (pint_t
)(info
->value_add
);
2989 const unsigned deltaShift
= __builtin_ctzll(deltaMask
) - 2;
2990 const uint32_t maxDelta
= (uint32_t)(deltaMask
>> deltaShift
);
2992 pint_t
* lastLoc
= (pint_t
*)&pageContent
[lastLocationOffset
+0];
2993 pint_t lastValue
= (pint_t
)P::getP(*lastLoc
);
2994 if ( (lastValue
- valueAdd
) & deltaMask
) {
2995 std::string dylibName
;
2996 std::string segName
;
2997 findDylibAndSegment((void*)pageContent
, dylibName
, segName
);
2998 _diagnostics
.error("rebase pointer (0x%0lX) does not point within cache. lastOffset=0x%04X, seg=%s, dylib=%s\n",
2999 (long)lastValue
, lastLocationOffset
, segName
.c_str(), dylibName
.c_str());
3002 if ( offset
<= (lastLocationOffset
+maxDelta
) ) {
3003 // previous location in range, make link from it
3004 // encode this location into last value
3005 pint_t delta
= offset
- lastLocationOffset
;
3006 pint_t newLastValue
= ((lastValue
- valueAdd
) & valueMask
) | (delta
<< deltaShift
);
3007 //warning(" add chain: delta = %d, lastOffset=0x%03X, offset=0x%03X, org value=0x%08lX, new value=0x%08lX",
3008 // offset - lastLocationOffset, lastLocationOffset, offset, (long)lastValue, (long)newLastValue);
3010 if ( _aslrTracker
.hasHigh8(lastLoc
, &highByte
) ) {
3011 uint64_t tbi
= (uint64_t)highByte
<< 56;
3012 newLastValue
|= tbi
;
3014 P::setP(*lastLoc
, newLastValue
);
3017 //fprintf(stderr, " too big delta = %d, lastOffset=0x%03X, offset=0x%03X\n", offset - lastLocationOffset, lastLocationOffset, offset);
3019 // distance between rebase locations is too far
3020 // see if we can make a chain from non-rebase locations
3021 uint16_t nonRebaseLocationOffsets
[1024];
3022 unsigned nrIndex
= 0;
3023 for (uint16_t i
= lastLocationOffset
; i
< offset
-maxDelta
; ) {
3024 nonRebaseLocationOffsets
[nrIndex
] = 0;
3025 for (int j
=maxDelta
; j
> 0; j
-= 4) {
3026 pint_t value
= (pint_t
)P::getP(*(pint_t
*)&pageContent
[i
+j
]);
3028 // Steal values of 0 to be used in the rebase chain
3029 nonRebaseLocationOffsets
[nrIndex
] = i
+j
;
3033 if ( nonRebaseLocationOffsets
[nrIndex
] == 0 ) {
3034 lastValue
= (pint_t
)P::getP(*lastLoc
);
3035 pint_t newValue
= ((lastValue
- valueAdd
) & valueMask
);
3036 //warning(" no way to make non-rebase delta chain, terminate off=0x%03X, old value=0x%08lX, new value=0x%08lX", lastLocationOffset, (long)value, (long)newValue);
3037 P::setP(*lastLoc
, newValue
);
3040 i
= nonRebaseLocationOffsets
[nrIndex
];
3044 // we can make chain. go back and add each non-rebase location to chain
3045 uint16_t prevOffset
= lastLocationOffset
;
3046 pint_t
* prevLoc
= (pint_t
*)&pageContent
[prevOffset
];
3047 for (unsigned n
=0; n
< nrIndex
; ++n
) {
3048 uint16_t nOffset
= nonRebaseLocationOffsets
[n
];
3049 assert(nOffset
!= 0);
3050 pint_t
* nLoc
= (pint_t
*)&pageContent
[nOffset
];
3051 uint32_t delta2
= nOffset
- prevOffset
;
3052 pint_t value
= (pint_t
)P::getP(*prevLoc
);
3055 newValue
= (delta2
<< deltaShift
);
3057 newValue
= ((value
- valueAdd
) & valueMask
) | (delta2
<< deltaShift
);
3058 //warning(" non-rebase delta = %d, to off=0x%03X, old value=0x%08lX, new value=0x%08lX", delta2, nOffset, (long)value, (long)newValue);
3059 P::setP(*prevLoc
, newValue
);
3060 prevOffset
= nOffset
;
3063 uint32_t delta3
= offset
- prevOffset
;
3064 pint_t value
= (pint_t
)P::getP(*prevLoc
);
3067 newValue
= (delta3
<< deltaShift
);
3069 newValue
= ((value
- valueAdd
) & valueMask
) | (delta3
<< deltaShift
);
3070 //warning(" non-rebase delta = %d, to off=0x%03X, old value=0x%08lX, new value=0x%08lX", delta3, offset, (long)value, (long)newValue);
3071 P::setP(*prevLoc
, newValue
);
3077 template <typename P
>
3078 void SharedCacheBuilder::addPageStartsV2(uint8_t* pageContent
, const bool bitmap
[], const dyld_cache_slide_info2
* info
,
3079 std::vector
<uint16_t>& pageStarts
, std::vector
<uint16_t>& pageExtras
)
3081 typedef typename
P::uint_t pint_t
;
3083 const pint_t deltaMask
= (pint_t
)(info
->delta_mask
);
3084 const pint_t valueMask
= ~deltaMask
;
3085 const uint32_t pageSize
= info
->page_size
;
3086 const pint_t valueAdd
= (pint_t
)(info
->value_add
);
3088 uint16_t startValue
= DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE
;
3089 uint16_t lastLocationOffset
= 0xFFFF;
3090 for(uint32_t i
=0; i
< pageSize
/4; ++i
) {
3091 unsigned offset
= i
*4;
3093 if ( startValue
== DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE
) {
3094 // found first rebase location in page
3097 else if ( !makeRebaseChainV2
<P
>(pageContent
, lastLocationOffset
, offset
, info
) ) {
3098 // can't record all rebasings in one chain
3099 if ( (startValue
& DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA
) == 0 ) {
3100 // switch page_start to "extras" which is a list of chain starts
3101 unsigned indexInExtras
= (unsigned)pageExtras
.size();
3102 if ( indexInExtras
> 0x3FFF ) {
3103 _diagnostics
.error("rebase overflow in v2 page extras");
3106 pageExtras
.push_back(startValue
);
3107 startValue
= indexInExtras
| DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA
;
3109 pageExtras
.push_back(i
);
3111 lastLocationOffset
= offset
;
3114 if ( lastLocationOffset
!= 0xFFFF ) {
3115 // mark end of chain
3116 pint_t
* lastLoc
= (pint_t
*)&pageContent
[lastLocationOffset
];
3117 pint_t lastValue
= (pint_t
)P::getP(*lastLoc
);
3118 pint_t newValue
= ((lastValue
- valueAdd
) & valueMask
);
3119 P::setP(*lastLoc
, newValue
);
3121 if ( startValue
& DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA
) {
3122 // add end bit to extras
3123 pageExtras
.back() |= DYLD_CACHE_SLIDE_PAGE_ATTR_END
;
3125 pageStarts
.push_back(startValue
);
3128 template <typename P
>
3129 void SharedCacheBuilder::writeSlideInfoV2(const bool bitmap
[], unsigned dataPageCount
)
3131 typedef typename
P::uint_t pint_t
;
3132 typedef typename
P::E E
;
3133 const uint32_t pageSize
= 4096;
3135 // fill in fixed info
3136 assert(_slideInfoFileOffset
!= 0);
3137 dyld_cache_slide_info2
* info
= (dyld_cache_slide_info2
*)_readOnlyRegion
.buffer
;
3139 info
->page_size
= pageSize
;
3140 info
->delta_mask
= _archLayout
->pointerDeltaMask
;
3141 info
->value_add
= _archLayout
->useValueAdd
? _archLayout
->sharedMemoryStart
: 0;
3143 // set page starts and extras for each page
3144 std::vector
<uint16_t> pageStarts
;
3145 std::vector
<uint16_t> pageExtras
;
3146 pageStarts
.reserve(dataPageCount
);
3147 uint8_t* pageContent
= _readWriteRegion
.buffer
;
3148 const bool* bitmapForPage
= bitmap
;
3149 for (unsigned i
=0; i
< dataPageCount
; ++i
) {
3150 //warning("page[%d]", i);
3151 addPageStartsV2
<P
>(pageContent
, bitmapForPage
, info
, pageStarts
, pageExtras
);
3152 if ( _diagnostics
.hasError() ) {
3155 pageContent
+= pageSize
;
3156 bitmapForPage
+= (sizeof(bool)*(pageSize
/4));
3159 // fill in computed info
3160 info
->page_starts_offset
= sizeof(dyld_cache_slide_info2
);
3161 info
->page_starts_count
= (unsigned)pageStarts
.size();
3162 info
->page_extras_offset
= (unsigned)(sizeof(dyld_cache_slide_info2
)+pageStarts
.size()*sizeof(uint16_t));
3163 info
->page_extras_count
= (unsigned)pageExtras
.size();
3164 uint16_t* pageStartsBuffer
= (uint16_t*)((char*)info
+ info
->page_starts_offset
);
3165 uint16_t* pageExtrasBuffer
= (uint16_t*)((char*)info
+ info
->page_extras_offset
);
3166 for (unsigned i
=0; i
< pageStarts
.size(); ++i
)
3167 pageStartsBuffer
[i
] = pageStarts
[i
];
3168 for (unsigned i
=0; i
< pageExtras
.size(); ++i
)
3169 pageExtrasBuffer
[i
] = pageExtras
[i
];
3170 // update header with final size
3171 uint64_t slideInfoSize
= align(info
->page_extras_offset
+ pageExtras
.size()*sizeof(uint16_t), _archLayout
->sharedRegionAlignP2
);
3172 if ( slideInfoSize
> _slideInfoBufferSizeAllocated
) {
3173 _diagnostics
.error("kernel slide info overflow buffer");
3175 ((dyld_cache_header
*)_readExecuteRegion
.buffer
)->slideInfoSize
= slideInfoSize
;
3176 //fprintf(stderr, "pageCount=%u, page_starts_count=%lu, page_extras_count=%lu\n", dataPageCount, pageStarts.size(), pageExtras.size());
3179 #if SUPPORT_ARCH_arm64_32 || SUPPORT_ARCH_armv7k
3180 // fits in to int16_t
3181 static bool smallValue(uint64_t value
)
3183 uint32_t high
= (value
& 0xFFFF8000);
3184 return (high
== 0) || (high
== 0xFFFF8000);
3187 template <typename P
>
3188 bool SharedCacheBuilder::makeRebaseChainV4(uint8_t* pageContent
, uint16_t lastLocationOffset
, uint16_t offset
, const dyld_cache_slide_info4
* info
)
3190 typedef typename
P::uint_t pint_t
;
3192 const pint_t deltaMask
= (pint_t
)(info
->delta_mask
);
3193 const pint_t valueMask
= ~deltaMask
;
3194 const pint_t valueAdd
= (pint_t
)(info
->value_add
);
3195 const unsigned deltaShift
= __builtin_ctzll(deltaMask
) - 2;
3196 const uint32_t maxDelta
= (uint32_t)(deltaMask
>> deltaShift
);
3198 pint_t
* lastLoc
= (pint_t
*)&pageContent
[lastLocationOffset
+0];
3199 pint_t lastValue
= (pint_t
)P::getP(*lastLoc
);
3200 if ( (lastValue
- valueAdd
) & deltaMask
) {
3201 std::string dylibName
;
3202 std::string segName
;
3203 findDylibAndSegment((void*)pageContent
, dylibName
, segName
);
3204 _diagnostics
.error("rebase pointer does not point within cache. lastOffset=0x%04X, seg=%s, dylib=%s\n",
3205 lastLocationOffset
, segName
.c_str(), dylibName
.c_str());
3208 if ( offset
<= (lastLocationOffset
+maxDelta
) ) {
3209 // previous location in range, make link from it
3210 // encode this location into last value
3211 pint_t delta
= offset
- lastLocationOffset
;
3212 pint_t newLastValue
= ((lastValue
- valueAdd
) & valueMask
) | (delta
<< deltaShift
);
3213 //warning(" add chain: delta = %d, lastOffset=0x%03X, offset=0x%03X, org value=0x%08lX, new value=0x%08lX",
3214 // offset - lastLocationOffset, lastLocationOffset, offset, (long)lastValue, (long)newLastValue);
3215 P::setP(*lastLoc
, newLastValue
);
3218 //fprintf(stderr, " too big delta = %d, lastOffset=0x%03X, offset=0x%03X\n", offset - lastLocationOffset, lastLocationOffset, offset);
3220 // distance between rebase locations is too far
3221 // see if we can make a chain from non-rebase locations
3222 uint16_t nonRebaseLocationOffsets
[1024];
3223 unsigned nrIndex
= 0;
3224 for (uint16_t i
= lastLocationOffset
; i
< offset
-maxDelta
; ) {
3225 nonRebaseLocationOffsets
[nrIndex
] = 0;
3226 for (int j
=maxDelta
; j
> 0; j
-= 4) {
3227 pint_t value
= (pint_t
)P::getP(*(pint_t
*)&pageContent
[i
+j
]);
3228 if ( smallValue(value
) ) {
3229 // Steal values of 0 to be used in the rebase chain
3230 nonRebaseLocationOffsets
[nrIndex
] = i
+j
;
3234 if ( nonRebaseLocationOffsets
[nrIndex
] == 0 ) {
3235 lastValue
= (pint_t
)P::getP(*lastLoc
);
3236 pint_t newValue
= ((lastValue
- valueAdd
) & valueMask
);
3237 //fprintf(stderr, " no way to make non-rebase delta chain, terminate off=0x%03X, old value=0x%08lX, new value=0x%08lX\n",
3238 // lastLocationOffset, (long)lastValue, (long)newValue);
3239 P::setP(*lastLoc
, newValue
);
3242 i
= nonRebaseLocationOffsets
[nrIndex
];
3246 // we can make chain. go back and add each non-rebase location to chain
3247 uint16_t prevOffset
= lastLocationOffset
;
3248 pint_t
* prevLoc
= (pint_t
*)&pageContent
[prevOffset
];
3249 for (unsigned n
=0; n
< nrIndex
; ++n
) {
3250 uint16_t nOffset
= nonRebaseLocationOffsets
[n
];
3251 assert(nOffset
!= 0);
3252 pint_t
* nLoc
= (pint_t
*)&pageContent
[nOffset
];
3253 uint32_t delta2
= nOffset
- prevOffset
;
3254 pint_t value
= (pint_t
)P::getP(*prevLoc
);
3256 if ( smallValue(value
) )
3257 newValue
= (value
& valueMask
) | (delta2
<< deltaShift
);
3259 newValue
= ((value
- valueAdd
) & valueMask
) | (delta2
<< deltaShift
);
3260 //warning(" non-rebase delta = %d, to off=0x%03X, old value=0x%08lX, new value=0x%08lX", delta2, nOffset, (long)value, (long)newValue);
3261 P::setP(*prevLoc
, newValue
);
3262 prevOffset
= nOffset
;
3265 uint32_t delta3
= offset
- prevOffset
;
3266 pint_t value
= (pint_t
)P::getP(*prevLoc
);
3268 if ( smallValue(value
) )
3269 newValue
= (value
& valueMask
) | (delta3
<< deltaShift
);
3271 newValue
= ((value
- valueAdd
) & valueMask
) | (delta3
<< deltaShift
);
3272 //warning(" non-rebase delta = %d, to off=0x%03X, old value=0x%08lX, new value=0x%08lX", delta3, offset, (long)value, (long)newValue);
3273 P::setP(*prevLoc
, newValue
);
3279 template <typename P
>
3280 void SharedCacheBuilder::addPageStartsV4(uint8_t* pageContent
, const bool bitmap
[], const dyld_cache_slide_info4
* info
,
3281 std::vector
<uint16_t>& pageStarts
, std::vector
<uint16_t>& pageExtras
)
3283 typedef typename
P::uint_t pint_t
;
3285 const pint_t deltaMask
= (pint_t
)(info
->delta_mask
);
3286 const pint_t valueMask
= ~deltaMask
;
3287 const uint32_t pageSize
= info
->page_size
;
3288 const pint_t valueAdd
= (pint_t
)(info
->value_add
);
3290 uint16_t startValue
= DYLD_CACHE_SLIDE4_PAGE_NO_REBASE
;
3291 uint16_t lastLocationOffset
= 0xFFFF;
3292 for(uint32_t i
=0; i
< pageSize
/4; ++i
) {
3293 unsigned offset
= i
*4;
3295 if ( startValue
== DYLD_CACHE_SLIDE4_PAGE_NO_REBASE
) {
3296 // found first rebase location in page
3299 else if ( !makeRebaseChainV4
<P
>(pageContent
, lastLocationOffset
, offset
, info
) ) {
3300 // can't record all rebasings in one chain
3301 if ( (startValue
& DYLD_CACHE_SLIDE4_PAGE_USE_EXTRA
) == 0 ) {
3302 // switch page_start to "extras" which is a list of chain starts
3303 unsigned indexInExtras
= (unsigned)pageExtras
.size();
3304 if ( indexInExtras
>= DYLD_CACHE_SLIDE4_PAGE_INDEX
) {
3305 _diagnostics
.error("rebase overflow in v4 page extras");
3308 pageExtras
.push_back(startValue
);
3309 startValue
= indexInExtras
| DYLD_CACHE_SLIDE4_PAGE_USE_EXTRA
;
3311 pageExtras
.push_back(i
);
3313 lastLocationOffset
= offset
;
3316 if ( lastLocationOffset
!= 0xFFFF ) {
3317 // mark end of chain
3318 pint_t
* lastLoc
= (pint_t
*)&pageContent
[lastLocationOffset
];
3319 pint_t lastValue
= (pint_t
)P::getP(*lastLoc
);
3320 pint_t newValue
= ((lastValue
- valueAdd
) & valueMask
);
3321 P::setP(*lastLoc
, newValue
);
3323 if ( startValue
& DYLD_CACHE_SLIDE4_PAGE_USE_EXTRA
) {
3324 // add end bit to extras
3325 pageExtras
.back() |= DYLD_CACHE_SLIDE4_PAGE_EXTRA_END
;
3327 pageStarts
.push_back(startValue
);
3332 template <typename P
>
3333 void SharedCacheBuilder::writeSlideInfoV4(const bool bitmap
[], unsigned dataPageCount
)
3335 typedef typename
P::uint_t pint_t
;
3336 typedef typename
P::E E
;
3337 const uint32_t pageSize
= 4096;
3339 // fill in fixed info
3340 assert(_slideInfoFileOffset
!= 0);
3341 dyld_cache_slide_info4
* info
= (dyld_cache_slide_info4
*)_readOnlyRegion
.buffer
;
3343 info
->page_size
= pageSize
;
3344 info
->delta_mask
= _archLayout
->pointerDeltaMask
;
3345 info
->value_add
= info
->value_add
= _archLayout
->useValueAdd
? _archLayout
->sharedMemoryStart
: 0;
3347 // set page starts and extras for each page
3348 std::vector
<uint16_t> pageStarts
;
3349 std::vector
<uint16_t> pageExtras
;
3350 pageStarts
.reserve(dataPageCount
);
3351 uint8_t* pageContent
= _readWriteRegion
.buffer
;
3352 const bool* bitmapForPage
= bitmap
;
3353 for (unsigned i
=0; i
< dataPageCount
; ++i
) {
3354 addPageStartsV4
<P
>(pageContent
, bitmapForPage
, info
, pageStarts
, pageExtras
);
3355 if ( _diagnostics
.hasError() ) {
3358 pageContent
+= pageSize
;
3359 bitmapForPage
+= (sizeof(bool)*(pageSize
/4));
3361 // fill in computed info
3362 info
->page_starts_offset
= sizeof(dyld_cache_slide_info4
);
3363 info
->page_starts_count
= (unsigned)pageStarts
.size();
3364 info
->page_extras_offset
= (unsigned)(sizeof(dyld_cache_slide_info4
)+pageStarts
.size()*sizeof(uint16_t));
3365 info
->page_extras_count
= (unsigned)pageExtras
.size();
3366 uint16_t* pageStartsBuffer
= (uint16_t*)((char*)info
+ info
->page_starts_offset
);
3367 uint16_t* pageExtrasBuffer
= (uint16_t*)((char*)info
+ info
->page_extras_offset
);
3368 for (unsigned i
=0; i
< pageStarts
.size(); ++i
)
3369 pageStartsBuffer
[i
] = pageStarts
[i
];
3370 for (unsigned i
=0; i
< pageExtras
.size(); ++i
)
3371 pageExtrasBuffer
[i
] = pageExtras
[i
];
3372 // update header with final size
3373 uint64_t slideInfoSize
= align(info
->page_extras_offset
+ pageExtras
.size()*sizeof(uint16_t), _archLayout
->sharedRegionAlignP2
);
3374 if ( slideInfoSize
> _slideInfoBufferSizeAllocated
) {
3375 _diagnostics
.error("kernel slide info v4 overflow buffer, need %lldKB, have room for %lldKB", slideInfoSize
, _slideInfoBufferSizeAllocated
);
3377 ((dyld_cache_header
*)_readExecuteRegion
.buffer
)->slideInfoSize
= slideInfoSize
;
3378 //fprintf(stderr, "pageCount=%u, page_starts_count=%lu, page_extras_count=%lu\n", dataPageCount, pageStarts.size(), pageExtras.size());
3383 void CacheBuilder::writeSlideInfoV1()
3385 // build one 128-byte bitmap per page (4096) of DATA
3386 uint8_t* const dataStart = (uint8_t*)_buffer.get() + regions[1].fileOffset;
3387 uint8_t* const dataEnd = dataStart + regions[1].size;
3388 const long bitmapSize = (dataEnd - dataStart)/(4*8);
3389 uint8_t* bitmap = (uint8_t*)calloc(bitmapSize, 1);
3390 for (void* p : _pointersForASLR) {
3391 if ( (p < dataStart) || ( p > dataEnd) )
3392 terminate("DATA pointer for sliding, out of range\n");
3393 long offset = (long)((uint8_t*)p - dataStart);
3394 if ( (offset % 4) != 0 )
3395 terminate("pointer not 4-byte aligned in DATA offset 0x%08lX\n", offset);
3396 long byteIndex = offset / (4*8);
3397 long bitInByte = (offset % 32) >> 2;
3398 bitmap[byteIndex] |= (1 << bitInByte);
3401 // allocate worst case size block of all slide info
3402 const unsigned entry_size = 4096/(8*4); // 8 bits per byte, possible pointer every 4 bytes.
3403 const unsigned toc_count = (unsigned)bitmapSize/entry_size;
3404 dyld_cache_slide_info* slideInfo = (dyld_cache_slide_info*)((uint8_t*)_buffer + _slideInfoFileOffset);
3405 slideInfo->version = 1;
3406 slideInfo->toc_offset = sizeof(dyld_cache_slide_info);
3407 slideInfo->toc_count = toc_count;
3408 slideInfo->entries_offset = (slideInfo->toc_offset+2*toc_count+127)&(-128);
3409 slideInfo->entries_count = 0;
3410 slideInfo->entries_size = entry_size;
3411 // append each unique entry
3412 const dyldCacheSlideInfoEntry* bitmapAsEntries = (dyldCacheSlideInfoEntry*)bitmap;
3413 dyldCacheSlideInfoEntry* const entriesInSlidInfo = (dyldCacheSlideInfoEntry*)((char*)slideInfo+slideInfo->entries_offset());
3414 int entry_count = 0;
3415 for (int i=0; i < toc_count; ++i) {
3416 const dyldCacheSlideInfoEntry* thisEntry = &bitmapAsEntries[i];
3417 // see if it is same as one already added
3419 for (int j=0; j < entry_count; ++j) {
3420 if ( memcmp(thisEntry, &entriesInSlidInfo[j], entry_size) == 0 ) {
3421 slideInfo->set_toc(i, j);
3428 memcpy(&entriesInSlidInfo[entry_count], thisEntry, entry_size);
3429 slideInfo->set_toc(i, entry_count++);
3432 slideInfo->entries_count = entry_count;
3433 ::free((void*)bitmap);
3435 _buffer.header->slideInfoSize = align(slideInfo->entries_offset + entry_count*entry_size, _archLayout->sharedRegionAlignP2);
3441 void SharedCacheBuilder::setPointerContentV3(dyld3::MachOLoaded::ChainedFixupPointerOnDisk
* loc
, uint64_t targetVMAddr
, size_t next
)
3443 assert(targetVMAddr
> _readExecuteRegion
.unslidLoadAddress
);
3444 assert(targetVMAddr
< _readOnlyRegion
.unslidLoadAddress
+_readOnlyRegion
.sizeInUse
);
3445 dyld3::MachOLoaded::ChainedFixupPointerOnDisk tmp
;
3449 if ( _aslrTracker
.hasAuthData(loc
, &diversity
, &hasAddrDiv
, &key
) ) {
3450 // if base cache address cannot fit into target, then use offset
3451 tmp
.arm64e
.authRebase
.target
= _readExecuteRegion
.unslidLoadAddress
;
3452 if ( tmp
.arm64e
.authRebase
.target
!= _readExecuteRegion
.unslidLoadAddress
)
3453 targetVMAddr
-= _readExecuteRegion
.unslidLoadAddress
;
3454 loc
->arm64e
.authRebase
.target
= targetVMAddr
;
3455 loc
->arm64e
.authRebase
.diversity
= diversity
;
3456 loc
->arm64e
.authRebase
.addrDiv
= hasAddrDiv
;
3457 loc
->arm64e
.authRebase
.key
= key
;
3458 loc
->arm64e
.authRebase
.next
= next
;
3459 loc
->arm64e
.authRebase
.bind
= 0;
3460 loc
->arm64e
.authRebase
.auth
= 1;
3461 assert(loc
->arm64e
.authRebase
.target
== targetVMAddr
&& "target truncated");
3462 assert(loc
->arm64e
.authRebase
.next
== next
&& "next location truncated");
3465 uint8_t highByte
= 0;
3466 _aslrTracker
.hasHigh8(loc
, &highByte
);
3467 // if base cache address cannot fit into target, then use offset
3468 tmp
.arm64e
.rebase
.target
= _readExecuteRegion
.unslidLoadAddress
;
3469 if ( tmp
.arm64e
.rebase
.target
!= _readExecuteRegion
.unslidLoadAddress
)
3470 targetVMAddr
-= _readExecuteRegion
.unslidLoadAddress
;
3471 loc
->arm64e
.rebase
.target
= targetVMAddr
;
3472 loc
->arm64e
.rebase
.high8
= highByte
;
3473 loc
->arm64e
.rebase
.next
= next
;
3474 loc
->arm64e
.rebase
.bind
= 0;
3475 loc
->arm64e
.rebase
.auth
= 0;
3476 assert(loc
->arm64e
.rebase
.target
== targetVMAddr
&& "target truncated");
3477 assert(loc
->arm64e
.rebase
.next
== next
&& "next location truncated");
3481 uint16_t SharedCacheBuilder::pageStartV3(uint8_t* pageContent
, uint32_t pageSize
, const bool bitmap
[])
3483 const int maxPerPage
= pageSize
/ 4;
3484 uint16_t result
= DYLD_CACHE_SLIDE_V3_PAGE_ATTR_NO_REBASE
;
3485 dyld3::MachOLoaded::ChainedFixupPointerOnDisk
* lastLoc
= nullptr;
3486 for (int i
=0; i
< maxPerPage
; ++i
) {
3488 if ( result
== DYLD_CACHE_SLIDE_V3_PAGE_ATTR_NO_REBASE
) {
3489 // found first rebase location in page
3492 dyld3::MachOLoaded::ChainedFixupPointerOnDisk
* loc
= (dyld3::MachOLoaded::ChainedFixupPointerOnDisk
*)(pageContent
+ i
*4);;
3493 if ( lastLoc
!= nullptr ) {
3494 // convert vmaddr based pointers to arm64e dyld cache chains
3495 setPointerContentV3(lastLoc
, lastLoc
->raw64
, loc
- lastLoc
);
3500 if ( lastLoc
!= nullptr ) {
3501 // convert vmaddr based pointers to arm64e dyld cache chain, and mark end of chain
3502 setPointerContentV3(lastLoc
, lastLoc
->raw64
, 0);
3508 void SharedCacheBuilder::writeSlideInfoV3(const bool bitmap
[], unsigned dataPageCount
)
3510 const uint32_t pageSize
= 4096;
3512 // fill in fixed info
3513 assert(_slideInfoFileOffset
!= 0);
3514 dyld_cache_slide_info3
* info
= (dyld_cache_slide_info3
*)_readOnlyRegion
.buffer
;
3516 info
->page_size
= pageSize
;
3517 info
->page_starts_count
= dataPageCount
;
3518 info
->auth_value_add
= _archLayout
->sharedMemoryStart
;
3520 // fill in per-page starts
3521 uint8_t* pageContent
= _readWriteRegion
.buffer
;
3522 const bool* bitmapForPage
= bitmap
;
3523 for (unsigned i
=0; i
< dataPageCount
; ++i
) {
3524 info
->page_starts
[i
] = pageStartV3(pageContent
, pageSize
, bitmapForPage
);
3525 pageContent
+= pageSize
;
3526 bitmapForPage
+= (sizeof(bool)*(pageSize
/4));
3529 // update header with final size
3530 dyld_cache_header
* dyldCacheHeader
= (dyld_cache_header
*)_readExecuteRegion
.buffer
;
3531 dyldCacheHeader
->slideInfoSize
= align(__offsetof(dyld_cache_slide_info3
, page_starts
[dataPageCount
]), _archLayout
->sharedRegionAlignP2
);
3532 if ( dyldCacheHeader
->slideInfoSize
> _slideInfoBufferSizeAllocated
) {
3533 _diagnostics
.error("kernel slide info overflow buffer");