1 /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*-
3 * Copyright (c) 2014 Apple Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
27 #include <sys/errno.h>
28 #include <sys/fcntl.h>
29 #include <sys/param.h>
30 #include <sys/types.h>
32 #include <mach/mach.h>
33 #include <mach/mach_vm.h>
34 #include <mach/mach_time.h>
35 #include <mach/shared_region.h>
36 #include <apfs/apfs_fsctl.h>
39 #include <CommonCrypto/CommonHMAC.h>
40 #include <CommonCrypto/CommonDigest.h>
41 #include <CommonCrypto/CommonDigestSPI.h>
43 #include "mach-o/dyld_priv.h"
44 #include "ClosureBuilder.h"
46 #include "ClosureFileSystemNull.h"
47 #include "CodeSigningTypes.h"
48 #include "MachOFileAbstraction.hpp"
49 #include "SharedCacheBuilder.h"
50 #include "RootsChecker.h"
51 #include "IMPCachesBuilder.hpp"
53 #include "FileUtils.h"
54 #include "StringUtils.h"
57 #if __has_include("dyld_cache_config.h")
58 #include "dyld_cache_config.h"
60 #define ARM_SHARED_REGION_START 0x1A000000ULL
61 #define ARM_SHARED_REGION_SIZE 0x26000000ULL
62 #define ARM64_SHARED_REGION_START 0x180000000ULL
63 #define ARM64_SHARED_REGION_SIZE 0x100000000ULL
66 #if ARM64_SHARED_REGION_START == 0x7FFF00000000
67 #define ARM64_DELTA_MASK 0x00FF000000000000
69 #define ARM64_DELTA_MASK 0x00FFFF0000000000
72 #ifndef ARM64_32_SHARED_REGION_START
73 #define ARM64_32_SHARED_REGION_START 0x1A000000ULL
74 #define ARM64_32_SHARED_REGION_SIZE 0x26000000ULL
77 #define ARMV7K_CHAIN_BITS 0xC0000000
79 #if BUILDING_UPDATE_DYLD_CACHE_BUILDER
80 #define DISCONTIGUOUS_RX 0x7FFF20000000ULL
82 #define DISCONTIGUOUS_RX 0x7FFF20000000ULL // size for MRM builder
84 #define DISCONTIGUOUS_RW 0x7FFF80000000ULL
85 #define DISCONTIGUOUS_RO 0x7FFFC0000000ULL
86 #define DISCONTIGUOUS_RX_SIZE (DISCONTIGUOUS_RW - DISCONTIGUOUS_RX)
87 #define DISCONTIGUOUS_RW_SIZE 0x40000000
88 #define DISCONTIGUOUS_RO_SIZE 0x3FE00000
90 const SharedCacheBuilder::ArchLayout
SharedCacheBuilder::_s_archLayout
[] = {
91 { DISCONTIGUOUS_RX
, 0xEFE00000ULL
, 0x40000000, 0x00FFFF0000000000, "x86_64", CS_PAGE_SIZE_4K
, 14, 2, true, true, true },
92 { DISCONTIGUOUS_RX
, 0xEFE00000ULL
, 0x40000000, 0x00FFFF0000000000, "x86_64h", CS_PAGE_SIZE_4K
, 14, 2, true, true, true },
93 { SHARED_REGION_BASE_I386
, SHARED_REGION_SIZE_I386
, 0x00200000, 0x0, "i386", CS_PAGE_SIZE_4K
, 12, 0, false, false, true },
94 { ARM64_SHARED_REGION_START
, ARM64_SHARED_REGION_SIZE
, 0x02000000, ARM64_DELTA_MASK
, "arm64", CS_PAGE_SIZE_4K
, 14, 2, false, true, false },
95 #if SUPPORT_ARCH_arm64e
96 { ARM64_SHARED_REGION_START
, ARM64_SHARED_REGION_SIZE
, 0x02000000, ARM64_DELTA_MASK
, "arm64e", CS_PAGE_SIZE_16K
, 14, 2, false, true, false },
98 #if SUPPORT_ARCH_arm64_32
99 { ARM64_32_SHARED_REGION_START
, ARM64_32_SHARED_REGION_SIZE
, 0x02000000, 0xC0000000, "arm64_32", CS_PAGE_SIZE_16K
, 14, 6, false, false, true },
101 { ARM_SHARED_REGION_START
, ARM_SHARED_REGION_SIZE
, 0x02000000, 0xE0000000, "armv7s", CS_PAGE_SIZE_4K
, 14, 4, false, false, true },
102 { ARM_SHARED_REGION_START
, ARM_SHARED_REGION_SIZE
, 0x00400000, ARMV7K_CHAIN_BITS
, "armv7k", CS_PAGE_SIZE_4K
, 14, 4, false, false, true },
103 { 0x40000000, 0x40000000, 0x02000000, 0x0, "sim-x86", CS_PAGE_SIZE_4K
, 14, 0, false, false, true }
106 // These are functions that are interposed by Instruments.app or ASan
107 const char* const SharedCacheBuilder::_s_neverStubEliminateSymbols
[] = {
112 "__objc_autoreleasePoolPop",
132 "_dispatch_barrier_async_f",
133 "_dispatch_group_async",
134 "_dispatch_group_async_f",
135 "_dispatch_source_set_cancel_handler",
136 "_dispatch_source_set_event_handler",
215 "_malloc_create_zone",
216 "_malloc_default_purgeable_zone",
217 "_malloc_default_zone",
218 "_malloc_destroy_zone",
220 "_malloc_make_nonpurgeable",
221 "_malloc_make_purgeable",
222 "_malloc_set_zone_name",
223 "_malloc_zone_from_ptr",
240 "_objc_autoreleasePoolPop",
242 "_objc_setProperty_atomic",
243 "_objc_setProperty_atomic_copy",
244 "_objc_setProperty_nonatomic",
245 "_objc_setProperty_nonatomic_copy",
253 "_pthread_attr_getdetachstate",
254 "_pthread_attr_getguardsize",
255 "_pthread_attr_getinheritsched",
256 "_pthread_attr_getschedparam",
257 "_pthread_attr_getschedpolicy",
258 "_pthread_attr_getscope",
259 "_pthread_attr_getstack",
260 "_pthread_attr_getstacksize",
261 "_pthread_condattr_getpshared",
263 "_pthread_getschedparam",
265 "_pthread_mutex_lock",
266 "_pthread_mutex_unlock",
267 "_pthread_mutexattr_getprioceiling",
268 "_pthread_mutexattr_getprotocol",
269 "_pthread_mutexattr_getpshared",
270 "_pthread_mutexattr_gettype",
271 "_pthread_rwlockattr_getpshared",
361 // <rdar://problem/22050956> always use stubs for C++ symbols that can be overridden
371 inline uint32_t absolutetime_to_milliseconds(uint64_t abstime
)
373 return (uint32_t)(abstime
/1000/1000);
376 // Handles building a list of input files to the SharedCacheBuilder itself.
377 class CacheInputBuilder
{
379 CacheInputBuilder(const dyld3::closure::FileSystem
& fileSystem
,
380 const dyld3::GradedArchs
& archs
, dyld3::Platform reqPlatform
)
381 : fileSystem(fileSystem
), reqArchs(archs
), reqPlatform(reqPlatform
) { }
383 // Loads and maps any MachOs in the given list of files.
384 void loadMachOs(std::vector
<CacheBuilder::InputFile
>& inputFiles
,
385 std::vector
<CacheBuilder::LoadedMachO
>& dylibsToCache
,
386 std::vector
<CacheBuilder::LoadedMachO
>& otherDylibs
,
387 std::vector
<CacheBuilder::LoadedMachO
>& executables
,
388 std::vector
<CacheBuilder::LoadedMachO
>& couldNotLoadFiles
) {
390 std::map
<std::string
, uint64_t> dylibInstallNameMap
;
391 for (CacheBuilder::InputFile
& inputFile
: inputFiles
) {
392 char realerPath
[MAXPATHLEN
];
393 dyld3::closure::LoadedFileInfo loadedFileInfo
= dyld3::MachOAnalyzer::load(inputFile
.diag
, fileSystem
, inputFile
.path
, reqArchs
, reqPlatform
, realerPath
);
394 if ( (reqPlatform
== dyld3::Platform::macOS
) && inputFile
.diag
.hasError() ) {
395 // Try again with iOSMac
396 inputFile
.diag
.clearError();
397 loadedFileInfo
= dyld3::MachOAnalyzer::load(inputFile
.diag
, fileSystem
, inputFile
.path
, reqArchs
, dyld3::Platform::iOSMac
, realerPath
);
399 const dyld3::MachOAnalyzer
* ma
= (const dyld3::MachOAnalyzer
*)loadedFileInfo
.fileContent
;
401 couldNotLoadFiles
.emplace_back((CacheBuilder::LoadedMachO
){ DyldSharedCache::MappedMachO(), loadedFileInfo
, &inputFile
});
405 DyldSharedCache::MappedMachO
mappedFile(inputFile
.path
, ma
, loadedFileInfo
.sliceLen
, false, false,
406 loadedFileInfo
.sliceOffset
, loadedFileInfo
.mtime
, loadedFileInfo
.inode
);
408 // The file can be loaded with the given slice, but we may still want to exlude it from the cache.
410 std::string installName
= ma
->installName();
412 const char* dylibPath
= inputFile
.path
;
413 if ( (installName
!= inputFile
.path
) && (reqPlatform
== dyld3::Platform::macOS
) ) {
414 // We now typically require that install names and paths match. However symlinks may allow us to bring in a path which
415 // doesn't match its install name.
417 // /usr/lib/libstdc++.6.0.9.dylib is a real file with install name /usr/lib/libstdc++.6.dylib
418 // /usr/lib/libstdc++.6.dylib is a symlink to /usr/lib/libstdc++.6.0.9.dylib
419 // So long as we add both paths (with one as an alias) then this will work, even if dylibs are removed from disk
420 // but the symlink remains.
421 char resolvedSymlinkPath
[PATH_MAX
];
422 if ( fileSystem
.getRealPath(installName
.c_str(), resolvedSymlinkPath
) ) {
423 if (!strcmp(resolvedSymlinkPath
, inputFile
.path
)) {
424 // Symlink is the install name and points to the on-disk dylib
425 //fprintf(stderr, "Symlink works: %s == %s\n", inputFile.path, installName.c_str());
426 dylibPath
= installName
.c_str();
431 if (!ma
->canBePlacedInDyldCache(dylibPath
, ^(const char* msg
) {
432 inputFile
.diag
.warning("Dylib located at '%s' cannot be placed in cache because: %s", inputFile
.path
, msg
);
435 if (!ma
->canHavePrecomputedDlopenClosure(inputFile
.path
, ^(const char* msg
) {
436 inputFile
.diag
.verbose("Dylib located at '%s' cannot prebuild dlopen closure in cache because: %s", inputFile
.path
, msg
);
438 fileSystem
.unloadFile(loadedFileInfo
);
441 otherDylibs
.emplace_back((CacheBuilder::LoadedMachO
){ mappedFile
, loadedFileInfo
, &inputFile
});
445 // Otherwise see if we have another file with this install name
446 auto iteratorAndInserted
= dylibInstallNameMap
.insert(std::make_pair(installName
, dylibsToCache
.size()));
447 if (iteratorAndInserted
.second
) {
448 // We inserted the dylib so we haven't seen another with this name.
449 if (installName
[0] != '@' && installName
!= inputFile
.path
) {
450 inputFile
.diag
.warning("Dylib located at '%s' has installname '%s'", inputFile
.path
, installName
.c_str());
453 dylibsToCache
.emplace_back((CacheBuilder::LoadedMachO
){ mappedFile
, loadedFileInfo
, &inputFile
});
455 // We didn't insert this one so we've seen it before.
456 CacheBuilder::LoadedMachO
& previousLoadedMachO
= dylibsToCache
[iteratorAndInserted
.first
->second
];
457 inputFile
.diag
.warning("Multiple dylibs claim installname '%s' ('%s' and '%s')", installName
.c_str(), inputFile
.path
, previousLoadedMachO
.mappedFile
.runtimePath
.c_str());
459 // This is the "Good" one, overwrite
460 if (inputFile
.path
== installName
) {
461 // Unload the old one
462 fileSystem
.unloadFile(previousLoadedMachO
.loadedFileInfo
);
464 // And replace with this one.
465 previousLoadedMachO
.mappedFile
= mappedFile
;
466 previousLoadedMachO
.loadedFileInfo
= loadedFileInfo
;
469 } else if (ma
->isBundle()) {
471 if (!ma
->canHavePrecomputedDlopenClosure(inputFile
.path
, ^(const char* msg
) {
472 inputFile
.diag
.verbose("Dylib located at '%s' cannot prebuild dlopen closure in cache because: %s", inputFile
.path
, msg
);
474 fileSystem
.unloadFile(loadedFileInfo
);
477 otherDylibs
.emplace_back((CacheBuilder::LoadedMachO
){ mappedFile
, loadedFileInfo
, &inputFile
});
478 } else if (ma
->isDynamicExecutable()) {
480 // Let the platform exclude the file before we do anything else.
481 if (platformExcludesExecutablePath(inputFile
.path
)) {
482 inputFile
.diag
.verbose("Platform excluded file\n");
483 fileSystem
.unloadFile(loadedFileInfo
);
486 executables
.emplace_back((CacheBuilder::LoadedMachO
){ mappedFile
, loadedFileInfo
, &inputFile
});
488 inputFile
.diag
.verbose("Unsupported mach file type\n");
489 fileSystem
.unloadFile(loadedFileInfo
);
496 static bool platformExcludesExecutablePath_macOS(const std::string
& path
) {
497 // We no longer support ROSP, so skip all paths which start with the special prefix
498 if ( startsWith(path
, "/System/Library/Templates/Data/") )
501 static const char* sAllowedPrefixes
[] = {
506 "/Library/Apple/System/",
507 "/Library/Apple/usr/",
508 "/System/Applications/Safari.app/",
509 "/Library/CoreMediaIO/Plug-Ins/DAL/" // temp until plugins moved or closured working
512 bool inSearchDir
= false;
513 for (const char* searchDir
: sAllowedPrefixes
) {
514 if ( strncmp(searchDir
, path
.c_str(), strlen(searchDir
)) == 0 ) {
523 // Returns true if the current platform requires that this path be excluded from the shared cache
524 // Note that this overrides any exclusion from anywhere else.
525 bool platformExcludesExecutablePath(const std::string
& path
) {
526 switch (reqPlatform
) {
527 case dyld3::Platform::unknown
:
529 case dyld3::Platform::macOS
:
530 return platformExcludesExecutablePath_macOS(path
);
531 case dyld3::Platform::iOS
:
533 case dyld3::Platform::tvOS
:
535 case dyld3::Platform::watchOS
:
537 case dyld3::Platform::bridgeOS
:
539 case dyld3::Platform::iOSMac
:
540 return platformExcludesExecutablePath_macOS(path
);
541 case dyld3::Platform::iOS_simulator
:
543 case dyld3::Platform::tvOS_simulator
:
545 case dyld3::Platform::watchOS_simulator
:
547 case dyld3::Platform::driverKit
:
552 const dyld3::closure::FileSystem
& fileSystem
;
553 const dyld3::GradedArchs
& reqArchs
;
554 dyld3::Platform reqPlatform
;
557 SharedCacheBuilder::SharedCacheBuilder(const DyldSharedCache::CreateOptions
& options
,
558 const dyld3::closure::FileSystem
& fileSystem
)
559 : CacheBuilder(options
, fileSystem
) {
561 std::string targetArch
= options
.archs
->name();
562 if ( options
.forSimulator
&& (options
.archs
== &dyld3::GradedArchs::i386
) )
563 targetArch
= "sim-x86";
565 for (const ArchLayout
& layout
: _s_archLayout
) {
566 if ( layout
.archName
== targetArch
) {
567 _archLayout
= &layout
;
568 _is64
= _archLayout
->is64
;
574 _diagnostics
.error("Tool was built without support for: '%s'", targetArch
.c_str());
578 static void verifySelfContained(const dyld3::closure::FileSystem
& fileSystem
,
579 std::vector
<CacheBuilder::LoadedMachO
>& dylibsToCache
,
580 std::vector
<CacheBuilder::LoadedMachO
>& otherDylibs
,
581 std::vector
<CacheBuilder::LoadedMachO
>& couldNotLoadFiles
)
583 // build map of dylibs
584 __block
std::map
<std::string
, const CacheBuilder::LoadedMachO
*> knownDylibs
;
585 __block
std::map
<std::string
, const CacheBuilder::LoadedMachO
*> allDylibs
;
586 for (const CacheBuilder::LoadedMachO
& dylib
: dylibsToCache
) {
587 knownDylibs
.insert({ dylib
.mappedFile
.runtimePath
, &dylib
});
588 allDylibs
.insert({ dylib
.mappedFile
.runtimePath
, &dylib
});
589 if (const char* installName
= dylib
.mappedFile
.mh
->installName()) {
590 knownDylibs
.insert({ installName
, &dylib
});
591 allDylibs
.insert({ installName
, &dylib
});
595 for (const CacheBuilder::LoadedMachO
& dylib
: otherDylibs
) {
596 allDylibs
.insert({ dylib
.mappedFile
.runtimePath
, &dylib
});
597 if (const char* installName
= dylib
.mappedFile
.mh
->installName())
598 allDylibs
.insert({ installName
, &dylib
});
601 for (const CacheBuilder::LoadedMachO
& dylib
: couldNotLoadFiles
) {
602 allDylibs
.insert({ dylib
.inputFile
->path
, &dylib
});
605 // Exclude bad unzippered twins. These are where a zippered binary links
606 // an unzippered twin
607 std::unordered_map
<std::string
, std::string
> macOSPathToTwinPath
;
608 for (const CacheBuilder::LoadedMachO
& dylib
: dylibsToCache
) {
609 macOSPathToTwinPath
[dylib
.mappedFile
.runtimePath
] = "";
611 for (const CacheBuilder::LoadedMachO
& dylib
: dylibsToCache
) {
612 if ( startsWith(dylib
.mappedFile
.runtimePath
, "/System/iOSSupport/") ) {
613 std::string tail
= dylib
.mappedFile
.runtimePath
.substr(18);
614 if ( macOSPathToTwinPath
.find(tail
) != macOSPathToTwinPath
.end() )
615 macOSPathToTwinPath
[tail
] = dylib
.mappedFile
.runtimePath
;
619 __block
std::map
<std::string
, std::set
<std::string
>> badDylibs
;
620 for (const CacheBuilder::LoadedMachO
& dylib
: dylibsToCache
) {
621 if ( badDylibs
.count(dylib
.mappedFile
.runtimePath
) != 0 )
623 if ( dylib
.mappedFile
.mh
->isZippered() ) {
624 dylib
.mappedFile
.mh
->forEachDependentDylib(^(const char* loadPath
, bool isWeak
, bool isReExport
, bool isUpward
, uint32_t compatVersion
, uint32_t curVersion
, bool& stop
) {
625 auto macOSAndTwinPath
= macOSPathToTwinPath
.find(loadPath
);
626 if ( macOSAndTwinPath
!= macOSPathToTwinPath
.end() ) {
627 const std::string
& twinPath
= macOSAndTwinPath
->second
;
628 if ( badDylibs
.count(twinPath
) != 0 )
630 knownDylibs
.erase(twinPath
);
631 badDylibs
[twinPath
].insert(std::string("evicting UIKitForMac binary as it is linked by zippered binary '") + dylib
.mappedFile
.runtimePath
+ "'");
637 // HACK: Exclude some dylibs and transitive deps for now until we have project fixes
638 __block
std::set
<std::string
> badProjects
;
639 badProjects
.insert("/System/Library/PrivateFrameworks/TuriCore.framework/Versions/A/TuriCore");
640 badProjects
.insert("/System/Library/PrivateFrameworks/UHASHelloExtensionPoint-macOS.framework/Versions/A/UHASHelloExtensionPoint-macOS");
642 // check all dependencies to assure every dylib in cache only depends on other dylibs in cache
643 __block
bool doAgain
= true;
646 // scan dylib list making sure all dependents are in dylib list
647 for (const CacheBuilder::LoadedMachO
& dylib
: dylibsToCache
) {
648 if ( badDylibs
.count(dylib
.mappedFile
.runtimePath
) != 0 )
650 if ( badProjects
.count(dylib
.mappedFile
.runtimePath
) != 0 )
652 dylib
.mappedFile
.mh
->forEachDependentDylib(^(const char* loadPath
, bool isWeak
, bool isReExport
, bool isUpward
, uint32_t compatVersion
, uint32_t curVersion
, bool& stop
) {
655 if ( badProjects
.count(loadPath
) != 0 ) {
656 // We depend on a bad dylib, so add this one to the list too
657 badProjects
.insert(dylib
.mappedFile
.runtimePath
);
658 badProjects
.insert(dylib
.mappedFile
.mh
->installName());
659 knownDylibs
.erase(dylib
.mappedFile
.runtimePath
);
660 knownDylibs
.erase(dylib
.mappedFile
.mh
->installName());
661 badDylibs
[dylib
.mappedFile
.runtimePath
].insert(std::string("Depends on bad project '") + loadPath
+ "'");
665 char resolvedSymlinkPath
[PATH_MAX
];
666 if ( knownDylibs
.count(loadPath
) == 0 ) {
667 // The loadPath was embedded when the dylib was built, but we may be in the process of moving
668 // a dylib with symlinks from old to new paths
669 // In this case, the realpath will tell us the new location
670 if ( fileSystem
.getRealPath(loadPath
, resolvedSymlinkPath
) ) {
671 if ( strcmp(resolvedSymlinkPath
, loadPath
) != 0 ) {
672 loadPath
= resolvedSymlinkPath
;
676 if ( knownDylibs
.count(loadPath
) == 0 ) {
677 badDylibs
[dylib
.mappedFile
.runtimePath
].insert(std::string("Could not find dependency '") + loadPath
+ "'");
678 knownDylibs
.erase(dylib
.mappedFile
.runtimePath
);
679 knownDylibs
.erase(dylib
.mappedFile
.mh
->installName());
686 // Now walk the dylibs which depend on missing dylibs and see if any of them are required binaries.
687 for (auto badDylibsIterator
: badDylibs
) {
688 const std::string
& dylibRuntimePath
= badDylibsIterator
.first
;
689 auto requiredDylibIterator
= allDylibs
.find(dylibRuntimePath
);
690 if (requiredDylibIterator
== allDylibs
.end())
692 if (!requiredDylibIterator
->second
->inputFile
->mustBeIncluded())
694 // This dylib is required so mark all dependencies as requried too
695 __block
std::vector
<const CacheBuilder::LoadedMachO
*> worklist
;
696 worklist
.push_back(requiredDylibIterator
->second
);
697 while (!worklist
.empty()) {
698 const CacheBuilder::LoadedMachO
* dylib
= worklist
.back();
700 if (!dylib
->mappedFile
.mh
)
702 dylib
->mappedFile
.mh
->forEachDependentDylib(^(const char* loadPath
, bool isWeak
, bool isReExport
, bool isUpward
, uint32_t compatVersion
, uint32_t curVersion
, bool& stop
) {
705 auto dylibIterator
= allDylibs
.find(loadPath
);
706 if (dylibIterator
!= allDylibs
.end()) {
707 if (dylibIterator
->second
->inputFile
->state
== CacheBuilder::InputFile::Unset
) {
708 dylibIterator
->second
->inputFile
->state
= CacheBuilder::InputFile::MustBeIncludedForDependent
;
709 worklist
.push_back(dylibIterator
->second
);
716 // FIXME: Make this an option we can pass in
717 const bool evictLeafDylibs
= true;
718 if (evictLeafDylibs
) {
723 // build count of how many references there are to each dylib
724 __block
std::set
<std::string
> referencedDylibs
;
725 for (const CacheBuilder::LoadedMachO
& dylib
: dylibsToCache
) {
726 if ( badDylibs
.count(dylib
.mappedFile
.runtimePath
) != 0 )
728 dylib
.mappedFile
.mh
->forEachDependentDylib(^(const char* loadPath
, bool isWeak
, bool isReExport
, bool isUpward
, uint32_t compatVersion
, uint32_t curVersion
, bool &stop
) {
729 referencedDylibs
.insert(loadPath
);
733 // find all dylibs not referenced
734 for (const CacheBuilder::LoadedMachO
& dylib
: dylibsToCache
) {
735 if ( badDylibs
.count(dylib
.mappedFile
.runtimePath
) != 0 )
737 const char* installName
= dylib
.mappedFile
.mh
->installName();
738 if ( (referencedDylibs
.count(installName
) == 0) && (dylib
.inputFile
->state
== CacheBuilder::InputFile::MustBeExcludedIfUnused
) ) {
739 badDylibs
[dylib
.mappedFile
.runtimePath
].insert(std::string("It has been explicitly excluded as it is unused"));
746 // Move bad dylibs from dylibs to cache to other dylibs.
747 for (const CacheBuilder::LoadedMachO
& dylib
: dylibsToCache
) {
748 auto i
= badDylibs
.find(dylib
.mappedFile
.runtimePath
);
749 if ( i
!= badDylibs
.end()) {
750 otherDylibs
.push_back(dylib
);
751 for (const std::string
& reason
: i
->second
)
752 otherDylibs
.back().inputFile
->diag
.warning("Dylib located at '%s' not placed in shared cache because: %s", dylib
.mappedFile
.runtimePath
.c_str(), reason
.c_str());
756 const auto& badDylibsLambdaRef
= badDylibs
;
757 dylibsToCache
.erase(std::remove_if(dylibsToCache
.begin(), dylibsToCache
.end(), [&](const CacheBuilder::LoadedMachO
& dylib
) {
758 if (badDylibsLambdaRef
.find(dylib
.mappedFile
.runtimePath
) != badDylibsLambdaRef
.end())
761 }), dylibsToCache
.end());
764 // This is the new build API which takes the raw files (which could be FAT) and tries to build a cache from them.
765 // We should remove the other build() method, or make it private so that this can wrap it.
766 void SharedCacheBuilder::build(std::vector
<CacheBuilder::InputFile
>& inputFiles
,
767 std::vector
<DyldSharedCache::FileAlias
>& aliases
) {
768 // First filter down to files which are actually MachO's
769 CacheInputBuilder
cacheInputBuilder(_fileSystem
, *_options
.archs
, _options
.platform
);
771 std::vector
<LoadedMachO
> dylibsToCache
;
772 std::vector
<LoadedMachO
> otherDylibs
;
773 std::vector
<LoadedMachO
> executables
;
774 std::vector
<LoadedMachO
> couldNotLoadFiles
;
775 cacheInputBuilder
.loadMachOs(inputFiles
, dylibsToCache
, otherDylibs
, executables
, couldNotLoadFiles
);
777 verifySelfContained(_fileSystem
, dylibsToCache
, otherDylibs
, couldNotLoadFiles
);
779 // Check for required binaries before we try to build the cache
780 if (!_diagnostics
.hasError()) {
781 // If we succeeded in building, then now see if there was a missing required file, and if so why its missing.
782 std::string errorString
;
783 for (const LoadedMachO
& dylib
: otherDylibs
) {
784 if (dylib
.inputFile
->mustBeIncluded()) {
785 // An error loading a required file must be propagated up to the top level diagnostic handler.
786 bool gotWarning
= false;
787 for (const std::string
& warning
: dylib
.inputFile
->diag
.warnings()) {
789 std::string message
= warning
;
790 if (message
.back() == '\n')
792 if (!errorString
.empty())
793 errorString
+= "ERROR: ";
794 errorString
+= "Required binary was not included in the shared cache '" + std::string(dylib
.inputFile
->path
) + "' because: " + message
+ "\n";
797 if (!errorString
.empty())
798 errorString
+= "ERROR: ";
799 errorString
+= "Required binary was not included in the shared cache '" + std::string(dylib
.inputFile
->path
) + "' because: 'unknown error. Please report to dyld'\n";
803 for (const LoadedMachO
& dylib
: couldNotLoadFiles
) {
804 if (dylib
.inputFile
->mustBeIncluded()) {
805 if (dylib
.inputFile
->diag
.hasError()) {
806 if (!errorString
.empty())
807 errorString
+= "ERROR: ";
808 errorString
+= "Required binary was not included in the shared cache '" + std::string(dylib
.inputFile
->path
) + "' because: " + dylib
.inputFile
->diag
.errorMessage() + "\n";
810 if (!errorString
.empty())
811 errorString
+= "ERROR: ";
812 errorString
+= "Required binary was not included in the shared cache '" + std::string(dylib
.inputFile
->path
) + "' because: 'unknown error. Please report to dyld'\n";
817 if (!errorString
.empty()) {
818 _diagnostics
.error("%s", errorString
.c_str());
822 if (!_diagnostics
.hasError())
823 build(dylibsToCache
, otherDylibs
, executables
, aliases
);
825 if (!_diagnostics
.hasError()) {
826 // If we succeeded in building, then now see if there was a missing required file, and if so why its missing.
827 std::string errorString
;
828 for (CacheBuilder::InputFile
& inputFile
: inputFiles
) {
829 if (inputFile
.mustBeIncluded() && inputFile
.diag
.hasError()) {
830 // An error loading a required file must be propagated up to the top level diagnostic handler.
831 std::string message
= inputFile
.diag
.errorMessage();
832 if (message
.back() == '\n')
834 errorString
+= "Required binary was not included in the shared cache '" + std::string(inputFile
.path
) + "' because: " + message
+ "\n";
837 if (!errorString
.empty()) {
838 _diagnostics
.error("%s", errorString
.c_str());
842 // Add all the warnings from the input files to the top level warnings on the main diagnostics object.
843 for (CacheBuilder::InputFile
& inputFile
: inputFiles
) {
844 for (const std::string
& warning
: inputFile
.diag
.warnings())
845 _diagnostics
.warning("%s", warning
.c_str());
848 // Clean up the loaded files
849 for (LoadedMachO
& loadedMachO
: dylibsToCache
)
850 _fileSystem
.unloadFile(loadedMachO
.loadedFileInfo
);
851 for (LoadedMachO
& loadedMachO
: otherDylibs
)
852 _fileSystem
.unloadFile(loadedMachO
.loadedFileInfo
);
853 for (LoadedMachO
& loadedMachO
: executables
)
854 _fileSystem
.unloadFile(loadedMachO
.loadedFileInfo
);
857 void SharedCacheBuilder::build(const std::vector
<DyldSharedCache::MappedMachO
>& dylibs
,
858 const std::vector
<DyldSharedCache::MappedMachO
>& otherOsDylibsInput
,
859 const std::vector
<DyldSharedCache::MappedMachO
>& osExecutables
,
860 std::vector
<DyldSharedCache::FileAlias
>& aliases
) {
862 std::vector
<LoadedMachO
> dylibsToCache
;
863 std::vector
<LoadedMachO
> otherDylibs
;
864 std::vector
<LoadedMachO
> executables
;
866 for (const DyldSharedCache::MappedMachO
& mappedMachO
: dylibs
) {
867 dyld3::closure::LoadedFileInfo loadedFileInfo
;
868 loadedFileInfo
.fileContent
= mappedMachO
.mh
;
869 loadedFileInfo
.fileContentLen
= mappedMachO
.length
;
870 loadedFileInfo
.sliceOffset
= mappedMachO
.sliceFileOffset
;
871 loadedFileInfo
.sliceLen
= mappedMachO
.length
;
872 loadedFileInfo
.inode
= mappedMachO
.inode
;
873 loadedFileInfo
.mtime
= mappedMachO
.modTime
;
874 loadedFileInfo
.path
= mappedMachO
.runtimePath
.c_str();
875 dylibsToCache
.emplace_back((LoadedMachO
){ mappedMachO
, loadedFileInfo
, nullptr });
878 for (const DyldSharedCache::MappedMachO
& mappedMachO
: otherOsDylibsInput
) {
879 dyld3::closure::LoadedFileInfo loadedFileInfo
;
880 loadedFileInfo
.fileContent
= mappedMachO
.mh
;
881 loadedFileInfo
.fileContentLen
= mappedMachO
.length
;
882 loadedFileInfo
.sliceOffset
= mappedMachO
.sliceFileOffset
;
883 loadedFileInfo
.sliceLen
= mappedMachO
.length
;
884 loadedFileInfo
.inode
= mappedMachO
.inode
;
885 loadedFileInfo
.mtime
= mappedMachO
.modTime
;
886 loadedFileInfo
.path
= mappedMachO
.runtimePath
.c_str();
887 otherDylibs
.emplace_back((LoadedMachO
){ mappedMachO
, loadedFileInfo
, nullptr });
890 for (const DyldSharedCache::MappedMachO
& mappedMachO
: osExecutables
) {
891 dyld3::closure::LoadedFileInfo loadedFileInfo
;
892 loadedFileInfo
.fileContent
= mappedMachO
.mh
;
893 loadedFileInfo
.fileContentLen
= mappedMachO
.length
;
894 loadedFileInfo
.sliceOffset
= mappedMachO
.sliceFileOffset
;
895 loadedFileInfo
.sliceLen
= mappedMachO
.length
;
896 loadedFileInfo
.inode
= mappedMachO
.inode
;
897 loadedFileInfo
.mtime
= mappedMachO
.modTime
;
898 loadedFileInfo
.path
= mappedMachO
.runtimePath
.c_str();
899 executables
.emplace_back((LoadedMachO
){ mappedMachO
, loadedFileInfo
, nullptr });
902 build(dylibsToCache
, otherDylibs
, executables
, aliases
);
905 void SharedCacheBuilder::build(const std::vector
<LoadedMachO
>& dylibs
,
906 const std::vector
<LoadedMachO
>& otherOsDylibsInput
,
907 const std::vector
<LoadedMachO
>& osExecutables
,
908 std::vector
<DyldSharedCache::FileAlias
>& aliases
)
910 // <rdar://problem/21317611> error out instead of crash if cache has no dylibs
911 // FIXME: plist should specify required vs optional dylibs
912 if ( dylibs
.size() < 30 ) {
913 _diagnostics
.error("missing required minimum set of dylibs");
917 _timeRecorder
.pushTimedSection();
919 // make copy of dylib list and sort
920 makeSortedDylibs(dylibs
, _options
.dylibOrdering
);
922 // allocate space used by largest possible cache plus room for LINKEDITS before optimization
923 _allocatedBufferSize
= _archLayout
->sharedMemorySize
* 1.50;
924 if ( vm_allocate(mach_task_self(), &_fullAllocatedBuffer
, _allocatedBufferSize
, VM_FLAGS_ANYWHERE
) != 0 ) {
925 _diagnostics
.error("could not allocate buffer");
929 _timeRecorder
.recordTime("sort dylibs");
931 bool impCachesSuccess
= false;
932 IMPCaches::HoleMap selectorAddressIntervals
;
933 _impCachesBuilder
= new IMPCaches::IMPCachesBuilder(_sortedDylibs
, _options
.objcOptimizations
, _diagnostics
, _timeRecorder
, _fileSystem
);
935 // Note, macOS allows install names and paths to mismatch. This is currently not supported by
936 // IMP caches as we use install names to look up the set of dylibs.
937 if ( _archLayout
->is64
938 && (_options
.platform
!= dyld3::Platform::macOS
)
939 && ((_impCachesBuilder
->neededClasses
.size() > 0) || (_impCachesBuilder
->neededMetaclasses
.size() > 0))) {
940 // Build the class map across all dylibs (including cross-image superclass references)
941 _impCachesBuilder
->buildClassesMap(_diagnostics
);
943 // Determine which methods will end up in each class's IMP cache
944 impCachesSuccess
= _impCachesBuilder
->parseDylibs(_diagnostics
);
946 // Compute perfect hash functions for IMP caches
947 if (impCachesSuccess
) _impCachesBuilder
->buildPerfectHashes(selectorAddressIntervals
, _diagnostics
);
950 constexpr bool log
= false;
952 for (const auto& p
: _impCachesBuilder
->selectors
.map
) {
953 printf("0x%06x %s\n", p
.second
->offset
, p
.second
->name
);
957 _timeRecorder
.recordTime("compute IMP caches");
959 IMPCaches::SelectorMap emptyMap
;
960 IMPCaches::SelectorMap
& selectorMap
= impCachesSuccess
? _impCachesBuilder
->selectors
: emptyMap
;
961 // assign addresses for each segment of each dylib in new cache
962 parseCoalescableSegments(selectorMap
, selectorAddressIntervals
);
963 processSelectorStrings(osExecutables
, selectorAddressIntervals
);
965 assignSegmentAddresses();
966 std::vector
<const LoadedMachO
*> overflowDylibs
;
967 while ( cacheOverflowAmount() != 0 ) {
968 // IMP caches: we may need to recompute the selector addresses here to be slightly more compact
969 // if we remove dylibs? This is probably overkill.
971 if ( !_options
.evictLeafDylibsOnOverflow
) {
972 _diagnostics
.error("cache overflow by %lluMB", cacheOverflowAmount() / 1024 / 1024);
975 size_t evictionCount
= evictLeafDylibs(cacheOverflowAmount(), overflowDylibs
);
977 for (DylibInfo
& dylib
: _sortedDylibs
)
978 dylib
.cacheLocation
.clear();
979 _dataRegions
.clear();
980 _coalescedText
.clear();
982 // Re-generate the hole map to remove any cruft that was added when parsing the coalescable text the first time.
983 // Always clear the hole map, even if IMP caches are off, as it is used by the text coalescer
984 selectorAddressIntervals
.clear();
985 if (impCachesSuccess
) _impCachesBuilder
->computeLowBits(selectorAddressIntervals
);
987 parseCoalescableSegments(selectorMap
, selectorAddressIntervals
);
988 processSelectorStrings(osExecutables
, selectorAddressIntervals
);
989 assignSegmentAddresses();
991 _diagnostics
.verbose("cache overflow, evicted %lu leaf dylibs\n", evictionCount
);
993 markPaddingInaccessible();
995 // copy all segments into cache
997 unsigned long wastedSelectorsSpace
= selectorAddressIntervals
.totalHoleSize();
998 if (wastedSelectorsSpace
> 0) {
999 _diagnostics
.verbose("Selector placement for IMP caches wasted %lu bytes\n", wastedSelectorsSpace
);
1001 std::cerr
<< selectorAddressIntervals
<< std::endl
;
1005 _timeRecorder
.recordTime("layout cache");
1009 _timeRecorder
.recordTime("copy cached dylibs into buffer");
1011 // rebase all dylibs for new location in cache
1013 _aslrTracker
.setDataRegion(firstDataRegion()->buffer
, dataRegionsTotalSize());
1014 if ( !_options
.cacheSupportsASLR
)
1015 _aslrTracker
.disable();
1016 adjustAllImagesForNewSegmentLocations(_archLayout
->sharedMemoryStart
, _aslrTracker
,
1017 &_lohTracker
, &_coalescedText
);
1018 if ( _diagnostics
.hasError() )
1021 _timeRecorder
.recordTime("adjust segments for new split locations");
1023 // build ImageArray for dyld3, which has side effect of binding all cached dylibs
1024 buildImageArray(aliases
);
1025 if ( _diagnostics
.hasError() )
1028 _timeRecorder
.recordTime("bind all images");
1031 DyldSharedCache
* dyldCache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
1032 optimizeObjC(impCachesSuccess
, _impCachesBuilder
->inlinedSelectors
);
1034 delete _impCachesBuilder
;
1035 _impCachesBuilder
= nullptr;
1037 if ( _diagnostics
.hasError() )
1040 _timeRecorder
.recordTime("optimize Objective-C");
1042 if ( _options
.optimizeStubs
) {
1043 __block
std::vector
<std::pair
<const mach_header
*, const char*>> images
;
1044 dyldCache
->forEachImage(^(const mach_header
*mh
, const char *installName
) {
1045 images
.push_back({ mh
, installName
});
1048 int64_t cacheSlide
= (long)dyldCache
- dyldCache
->unslidLoadAddress();
1049 uint64_t cacheUnslideAddr
= dyldCache
->unslidLoadAddress();
1050 optimizeAwayStubs(images
, cacheSlide
, cacheUnslideAddr
,
1051 dyldCache
, _s_neverStubEliminateSymbols
);
1055 // FIPS seal corecrypto, This must be done after stub elimination (so that __TEXT,__text is not changed after sealing)
1058 _timeRecorder
.recordTime("do stub elimination");
1060 // merge and compact LINKEDIT segments
1062 // If we want to remove, not just unmap locals, then set the dylibs themselves to be stripped
1063 DylibStripMode dylibStripMode
= DylibStripMode::stripNone
;
1064 if ( _options
.localSymbolMode
== DyldSharedCache::LocalSymbolsMode::strip
)
1065 dylibStripMode
= CacheBuilder::DylibStripMode::stripLocals
;
1067 __block
std::vector
<std::tuple
<const mach_header
*, const char*, DylibStripMode
>> images
;
1068 dyldCache
->forEachImage(^(const mach_header
*mh
, const char *installName
) {
1069 images
.push_back({ mh
, installName
, dylibStripMode
});
1071 optimizeLinkedit(&_localSymbolsRegion
, images
);
1074 // copy ImageArray to end of read-only region
1076 if ( _diagnostics
.hasError() )
1079 _timeRecorder
.recordTime("optimize LINKEDITs");
1081 // don't add dyld3 closures to simulator cache or the base system where size is more of an issue
1082 if ( _options
.optimizeDyldDlopens
) {
1083 // compute and add dlopen closures for all other dylibs
1084 addOtherImageArray(otherOsDylibsInput
, overflowDylibs
);
1085 if ( _diagnostics
.hasError() )
1088 if ( _options
.optimizeDyldLaunches
) {
1089 // compute and add launch closures to end of read-only region
1090 addClosures(osExecutables
);
1091 if ( _diagnostics
.hasError() )
1095 // update final readOnly region size
1096 dyld_cache_mapping_info
* mappings
= (dyld_cache_mapping_info
*)(_readExecuteRegion
.buffer
+ dyldCache
->header
.mappingOffset
);
1097 mappings
[dyldCache
->header
.mappingCount
- 1].size
= _readOnlyRegion
.sizeInUse
;
1098 dyld_cache_mapping_and_slide_info
* slidableMappings
= (dyld_cache_mapping_and_slide_info
*)(_readExecuteRegion
.buffer
+ dyldCache
->header
.mappingWithSlideOffset
);
1099 slidableMappings
[dyldCache
->header
.mappingCount
- 1].size
= _readOnlyRegion
.sizeInUse
;
1100 if ( _localSymbolsRegion
.sizeInUse
!= 0 ) {
1101 dyldCache
->header
.localSymbolsOffset
= _readOnlyRegion
.cacheFileOffset
+ _readOnlyRegion
.sizeInUse
;
1102 dyldCache
->header
.localSymbolsSize
= _localSymbolsRegion
.sizeInUse
;
1105 // record max slide now that final size is established
1106 if ( _archLayout
->sharedRegionsAreDiscontiguous
) {
1107 // special case x86_64 which has three non-contiguous chunks each in their own 1GB regions
1108 uint64_t maxSlide0
= DISCONTIGUOUS_RX_SIZE
- _readExecuteRegion
.sizeInUse
; // TEXT region has 1.5GB region
1109 uint64_t maxSlide1
= DISCONTIGUOUS_RW_SIZE
- dataRegionsTotalSize();
1110 uint64_t maxSlide2
= DISCONTIGUOUS_RO_SIZE
- _readOnlyRegion
.sizeInUse
;
1111 dyldCache
->header
.maxSlide
= std::min(std::min(maxSlide0
, maxSlide1
), maxSlide2
);
1114 // <rdar://problem/49852839> branch predictor on arm64 currently only looks at low 32-bits, so don't slide cache more than 2GB
1115 if ( (_archLayout
->sharedMemorySize
== 0x100000000) && (_readExecuteRegion
.sizeInUse
< 0x80000000) )
1116 dyldCache
->header
.maxSlide
= 0x80000000 - _readExecuteRegion
.sizeInUse
;
1118 dyldCache
->header
.maxSlide
= (_archLayout
->sharedMemoryStart
+ _archLayout
->sharedMemorySize
) - (_readOnlyRegion
.unslidLoadAddress
+ _readOnlyRegion
.sizeInUse
);
1121 // mark if any input dylibs were built with chained fixups
1122 dyldCache
->header
.builtFromChainedFixups
= _someDylibsUsedChainedFixups
;
1124 _timeRecorder
.recordTime("build %lu closures", osExecutables
.size());
1125 // Emit the CF strings without their ISAs being signed
1126 // This must be after addImageArray() as it depends on hasImageIndex().
1127 // It also has to be before emitting slide info as it adds ASLR entries.
1128 emitContantObjects();
1130 _timeRecorder
.recordTime("emit constant objects");
1132 // fill in slide info at start of region[2]
1133 // do this last because it modifies pointers in DATA segments
1134 if ( _options
.cacheSupportsASLR
) {
1135 #if SUPPORT_ARCH_arm64e
1136 if ( strcmp(_archLayout
->archName
, "arm64e") == 0 )
1137 writeSlideInfoV3(_aslrTracker
.bitmap(), _aslrTracker
.dataPageCount());
1140 if ( _archLayout
->is64
)
1141 writeSlideInfoV2
<Pointer64
<LittleEndian
>>(_aslrTracker
.bitmap(), _aslrTracker
.dataPageCount());
1142 #if SUPPORT_ARCH_arm64_32 || SUPPORT_ARCH_armv7k
1143 else if ( _archLayout
->pointerDeltaMask
== 0xC0000000 )
1144 writeSlideInfoV4
<Pointer32
<LittleEndian
>>(_aslrTracker
.bitmap(), _aslrTracker
.dataPageCount());
1147 writeSlideInfoV2
<Pointer32
<LittleEndian
>>(_aslrTracker
.bitmap(), _aslrTracker
.dataPageCount());
1150 _timeRecorder
.recordTime("compute slide info");
1152 // last sanity check on size
1153 if ( cacheOverflowAmount() != 0 ) {
1154 _diagnostics
.error("cache overflow after optimizations 0x%llX -> 0x%llX", _readExecuteRegion
.unslidLoadAddress
, _readOnlyRegion
.unslidLoadAddress
+ _readOnlyRegion
.sizeInUse
);
1158 // codesignature is part of file, but is not mapped
1160 if ( _diagnostics
.hasError() )
1163 _timeRecorder
.recordTime("compute UUID and codesign cache file");
1165 if (_options
.verbose
) {
1166 _timeRecorder
.logTimings();
1172 const std::set
<std::string
> SharedCacheBuilder::warnings()
1174 return _diagnostics
.warnings();
1177 const std::set
<const dyld3::MachOAnalyzer
*> SharedCacheBuilder::evictions()
1182 void SharedCacheBuilder::deleteBuffer()
1185 if ( _allocatedBufferSize
!= 0 ) {
1186 vm_deallocate(mach_task_self(), _fullAllocatedBuffer
, _allocatedBufferSize
);
1187 _fullAllocatedBuffer
= 0;
1188 _allocatedBufferSize
= 0;
1190 // Local symbols buffer
1191 if ( _localSymbolsRegion
.bufferSize
!= 0 ) {
1192 vm_deallocate(mach_task_self(), (vm_address_t
)_localSymbolsRegion
.buffer
, _localSymbolsRegion
.bufferSize
);
1193 _localSymbolsRegion
.buffer
= 0;
1194 _localSymbolsRegion
.bufferSize
= 0;
1197 if ( _codeSignatureRegion
.bufferSize
!= 0 ) {
1198 vm_deallocate(mach_task_self(), (vm_address_t
)_codeSignatureRegion
.buffer
, _codeSignatureRegion
.bufferSize
);
1199 _codeSignatureRegion
.buffer
= 0;
1200 _codeSignatureRegion
.bufferSize
= 0;
1205 void SharedCacheBuilder::makeSortedDylibs(const std::vector
<LoadedMachO
>& dylibs
, const std::unordered_map
<std::string
, unsigned> sortOrder
)
1207 for (const LoadedMachO
& dylib
: dylibs
) {
1208 _sortedDylibs
.push_back({ &dylib
, dylib
.mappedFile
.runtimePath
, {} });
1211 std::sort(_sortedDylibs
.begin(), _sortedDylibs
.end(), [&](const DylibInfo
& a
, const DylibInfo
& b
) {
1212 const auto& orderA
= sortOrder
.find(a
.input
->mappedFile
.runtimePath
);
1213 const auto& orderB
= sortOrder
.find(b
.input
->mappedFile
.runtimePath
);
1214 bool foundA
= (orderA
!= sortOrder
.end());
1215 bool foundB
= (orderB
!= sortOrder
.end());
1217 // Order all __DATA_DIRTY segments specified in the order file first, in
1218 // the order specified in the file, followed by any other __DATA_DIRTY
1219 // segments in lexicographic order.
1220 if ( foundA
&& foundB
)
1221 return orderA
->second
< orderB
->second
;
1227 // Sort mac before iOSMac
1228 bool isIOSMacA
= strncmp(a
.input
->mappedFile
.runtimePath
.c_str(), "/System/iOSSupport/", 19) == 0;
1229 bool isIOSMacB
= strncmp(b
.input
->mappedFile
.runtimePath
.c_str(), "/System/iOSSupport/", 19) == 0;
1230 if (isIOSMacA
!= isIOSMacB
)
1233 // Finally sort by path
1234 return a
.input
->mappedFile
.runtimePath
< b
.input
->mappedFile
.runtimePath
;
1240 const CacheBuilder::LoadedMachO
* input
;
1241 const char* installName
;
1245 uint64_t SharedCacheBuilder::cacheOverflowAmount()
1247 if ( _archLayout
->sharedRegionsAreDiscontiguous
) {
1248 // for macOS x86_64 cache, need to check each region for overflow
1249 if ( _readExecuteRegion
.sizeInUse
> DISCONTIGUOUS_RX_SIZE
)
1250 return (_readExecuteRegion
.sizeInUse
- DISCONTIGUOUS_RX_SIZE
);
1252 uint64_t dataSize
= dataRegionsTotalSize();
1253 if ( dataSize
> DISCONTIGUOUS_RW_SIZE
)
1254 return (dataSize
- DISCONTIGUOUS_RW_SIZE
);
1256 if ( _readOnlyRegion
.sizeInUse
> DISCONTIGUOUS_RO_SIZE
)
1257 return (_readOnlyRegion
.sizeInUse
- DISCONTIGUOUS_RO_SIZE
);
1260 bool alreadyOptimized
= (_readOnlyRegion
.sizeInUse
!= _readOnlyRegion
.bufferSize
);
1261 uint64_t vmSize
= _readOnlyRegion
.unslidLoadAddress
- _readExecuteRegion
.unslidLoadAddress
;
1262 if ( alreadyOptimized
)
1263 vmSize
+= _readOnlyRegion
.sizeInUse
;
1264 else if ( _options
.localSymbolMode
== DyldSharedCache::LocalSymbolsMode::unmap
)
1265 vmSize
+= (_readOnlyRegion
.sizeInUse
* 37/100); // assume locals removal and LINKEDIT optimzation reduces LINKEDITs %37 of original size
1267 vmSize
+= (_readOnlyRegion
.sizeInUse
* 80/100); // assume LINKEDIT optimzation reduces LINKEDITs to %80 of original size
1268 if ( vmSize
> _archLayout
->sharedMemorySize
)
1269 return vmSize
- _archLayout
->sharedMemorySize
;
1271 // fits in shared region
1275 size_t SharedCacheBuilder::evictLeafDylibs(uint64_t reductionTarget
, std::vector
<const LoadedMachO
*>& overflowDylibs
)
1277 // build a reverse map of all dylib dependencies
1278 __block
std::map
<std::string
, std::set
<std::string
>> references
;
1279 std::map
<std::string
, std::set
<std::string
>>* referencesPtr
= &references
;
1280 for (const DylibInfo
& dylib
: _sortedDylibs
) {
1281 // Esnure we have an entry (even if it is empty)
1282 if (references
.count(dylib
.input
->mappedFile
.mh
->installName()) == 0) {
1283 references
[dylib
.input
->mappedFile
.mh
->installName()] = std::set
<std::string
>();
1285 dylib
.input
->mappedFile
.mh
->forEachDependentDylib(^(const char* loadPath
, bool isWeak
, bool isReExport
, bool isUpward
, uint32_t compatVersion
, uint32_t curVersion
, bool &stop
) {
1286 references
[loadPath
].insert(dylib
.input
->mappedFile
.mh
->installName());
1290 // Find the sizes of all the dylibs
1291 std::vector
<DylibAndSize
> dylibsToSort
;
1292 std::vector
<DylibAndSize
> sortedDylibs
;
1293 for (const DylibInfo
& dylib
: _sortedDylibs
) {
1294 const char* installName
= dylib
.input
->mappedFile
.mh
->installName();
1295 __block
uint64_t segsSize
= 0;
1296 dylib
.input
->mappedFile
.mh
->forEachSegment(^(const dyld3::MachOFile::SegmentInfo
& info
, bool& stop
) {
1297 if ( strcmp(info
.segName
, "__LINKEDIT") != 0 )
1298 segsSize
+= info
.vmSize
;
1300 dylibsToSort
.push_back({ dylib
.input
, installName
, segsSize
});
1303 // Build an ordered list of what to remove. At each step we do following
1304 // 1) Find all dylibs that nothing else depends on
1305 // 2a) If any of those dylibs are not in the order select the largest one of them
1306 // 2b) If all the leaf dylibs are in the order file select the last dylib that appears last in the order file
1307 // 3) Remove all entries to the removed file from the reverse dependency map
1308 // 4) Go back to one and repeat until there are no more evictable dylibs
1309 // This results in us always choosing the locally optimal selection, and then taking into account how that impacts
1310 // the dependency graph for subsequent selections
1312 bool candidateFound
= true;
1313 while (candidateFound
) {
1314 candidateFound
= false;
1315 DylibAndSize candidate
;
1316 uint64_t candidateOrder
= 0;
1317 for(const auto& dylib
: dylibsToSort
) {
1318 const auto& i
= referencesPtr
->find(dylib
.installName
);
1319 assert(i
!= referencesPtr
->end());
1320 if (!i
->second
.empty()) {
1323 const auto& j
= _options
.dylibOrdering
.find(dylib
.input
->mappedFile
.runtimePath
);
1325 if (j
!= _options
.dylibOrdering
.end()) {
1328 // Not in the order file, set order sot it goes to the front of the list
1331 if (order
> candidateOrder
||
1332 (order
== UINT64_MAX
&& candidate
.size
< dylib
.size
)) {
1333 // The new file is either a lower priority in the order file
1334 // or the same priority as the candidate but larger
1336 candidateOrder
= order
;
1337 candidateFound
= true;
1340 if (candidateFound
) {
1341 sortedDylibs
.push_back(candidate
);
1342 referencesPtr
->erase(candidate
.installName
);
1343 for (auto& dependent
: references
) {
1344 (void)dependent
.second
.erase(candidate
.installName
);
1346 auto j
= std::find_if(dylibsToSort
.begin(), dylibsToSort
.end(), [&candidate
](const DylibAndSize
& dylib
) {
1347 return (strcmp(candidate
.installName
, dylib
.installName
) == 0);
1349 if (j
!= dylibsToSort
.end()) {
1350 dylibsToSort
.erase(j
);
1355 // build set of dylibs that if removed will allow cache to build
1356 for (DylibAndSize
& dylib
: sortedDylibs
) {
1357 if ( _options
.verbose
)
1358 _diagnostics
.warning("to prevent cache overflow, not caching %s", dylib
.installName
);
1359 _evictions
.insert(dylib
.input
->mappedFile
.mh
);
1360 // Track the evicted dylibs so we can try build "other" dlopen closures for them.
1361 overflowDylibs
.push_back(dylib
.input
);
1362 if ( dylib
.size
> reductionTarget
)
1364 reductionTarget
-= dylib
.size
;
1367 // prune _sortedDylibs
1368 _sortedDylibs
.erase(std::remove_if(_sortedDylibs
.begin(), _sortedDylibs
.end(), [&](const DylibInfo
& dylib
) {
1369 return (_evictions
.count(dylib
.input
->mappedFile
.mh
) != 0);
1370 }),_sortedDylibs
.end());
1372 return _evictions
.size();
1376 void SharedCacheBuilder::writeCacheHeader()
1378 // "dyld_v1" + spaces + archName(), with enough spaces to pad to 15 bytes
1379 std::string magic
= "dyld_v1";
1380 magic
.append(15 - magic
.length() - strlen(_options
.archs
->name()), ' ');
1381 magic
.append(_options
.archs
->name());
1382 assert(magic
.length() == 15);
1384 // 1 __TEXT segment, n __DATA segments, and 1 __LINKEDIT segment
1385 const uint32_t mappingCount
= 2 + (uint32_t)_dataRegions
.size();
1386 assert(mappingCount
<= DyldSharedCache::MaxMappings
);
1389 dyld_cache_header
* dyldCacheHeader
= (dyld_cache_header
*)_readExecuteRegion
.buffer
;
1390 memcpy(dyldCacheHeader
->magic
, magic
.c_str(), 16);
1391 dyldCacheHeader
->mappingOffset
= sizeof(dyld_cache_header
);
1392 dyldCacheHeader
->mappingCount
= mappingCount
;
1393 dyldCacheHeader
->mappingWithSlideOffset
= (uint32_t)(dyldCacheHeader
->mappingOffset
+ mappingCount
*sizeof(dyld_cache_mapping_and_slide_info
));
1394 dyldCacheHeader
->mappingWithSlideCount
= mappingCount
;
1395 dyldCacheHeader
->imagesOffset
= (uint32_t)(dyldCacheHeader
->mappingWithSlideOffset
+ mappingCount
*sizeof(dyld_cache_mapping_and_slide_info
));
1396 dyldCacheHeader
->imagesCount
= (uint32_t)_sortedDylibs
.size() + _aliasCount
;
1397 dyldCacheHeader
->dyldBaseAddress
= 0;
1398 dyldCacheHeader
->codeSignatureOffset
= 0;
1399 dyldCacheHeader
->codeSignatureSize
= 0;
1400 dyldCacheHeader
->slideInfoOffsetUnused
= 0;
1401 dyldCacheHeader
->slideInfoSizeUnused
= 0;
1402 dyldCacheHeader
->localSymbolsOffset
= 0;
1403 dyldCacheHeader
->localSymbolsSize
= 0;
1404 dyldCacheHeader
->cacheType
= _options
.optimizeStubs
? kDyldSharedCacheTypeProduction
: kDyldSharedCacheTypeDevelopment
;
1405 dyldCacheHeader
->accelerateInfoAddr
= 0;
1406 dyldCacheHeader
->accelerateInfoSize
= 0;
1407 bzero(dyldCacheHeader
->uuid
, 16);// overwritten later by recomputeCacheUUID()
1408 dyldCacheHeader
->branchPoolsOffset
= 0;
1409 dyldCacheHeader
->branchPoolsCount
= 0;
1410 dyldCacheHeader
->imagesTextOffset
= dyldCacheHeader
->imagesOffset
+ sizeof(dyld_cache_image_info
)*dyldCacheHeader
->imagesCount
;
1411 dyldCacheHeader
->imagesTextCount
= _sortedDylibs
.size();
1412 dyldCacheHeader
->patchInfoAddr
= 0;
1413 dyldCacheHeader
->patchInfoSize
= 0;
1414 dyldCacheHeader
->otherImageGroupAddrUnused
= 0;
1415 dyldCacheHeader
->otherImageGroupSizeUnused
= 0;
1416 dyldCacheHeader
->progClosuresAddr
= 0;
1417 dyldCacheHeader
->progClosuresSize
= 0;
1418 dyldCacheHeader
->progClosuresTrieAddr
= 0;
1419 dyldCacheHeader
->progClosuresTrieSize
= 0;
1420 dyldCacheHeader
->platform
= (uint8_t)_options
.platform
;
1421 dyldCacheHeader
->formatVersion
= dyld3::closure::kFormatVersion
;
1422 dyldCacheHeader
->dylibsExpectedOnDisk
= !_options
.dylibsRemovedDuringMastering
;
1423 dyldCacheHeader
->simulator
= _options
.forSimulator
;
1424 dyldCacheHeader
->locallyBuiltCache
= _options
.isLocallyBuiltCache
;
1425 dyldCacheHeader
->builtFromChainedFixups
= false;
1426 dyldCacheHeader
->formatVersion
= dyld3::closure::kFormatVersion
;
1427 dyldCacheHeader
->sharedRegionStart
= _archLayout
->sharedMemoryStart
;
1428 dyldCacheHeader
->sharedRegionSize
= _archLayout
->sharedMemorySize
;
1431 dyld_cache_mapping_info
* mappings
= (dyld_cache_mapping_info
*)(_readExecuteRegion
.buffer
+ dyldCacheHeader
->mappingOffset
);
1432 assert(_readExecuteRegion
.cacheFileOffset
== 0);
1433 mappings
[0].address
= _readExecuteRegion
.unslidLoadAddress
;
1434 mappings
[0].fileOffset
= _readExecuteRegion
.cacheFileOffset
;
1435 mappings
[0].size
= _readExecuteRegion
.sizeInUse
;
1436 mappings
[0].maxProt
= VM_PROT_READ
| VM_PROT_EXECUTE
;
1437 mappings
[0].initProt
= VM_PROT_READ
| VM_PROT_EXECUTE
;
1438 for (uint32_t i
= 0; i
!= _dataRegions
.size(); ++i
) {
1440 assert(_dataRegions
[i
].cacheFileOffset
== _readExecuteRegion
.sizeInUse
);
1442 mappings
[i
+ 1].address
= _dataRegions
[i
].unslidLoadAddress
;
1443 mappings
[i
+ 1].fileOffset
= _dataRegions
[i
].cacheFileOffset
;
1444 mappings
[i
+ 1].size
= _dataRegions
[i
].sizeInUse
;
1445 mappings
[i
+ 1].maxProt
= VM_PROT_READ
| VM_PROT_WRITE
;
1446 mappings
[i
+ 1].initProt
= VM_PROT_READ
| VM_PROT_WRITE
;
1448 assert(_readOnlyRegion
.cacheFileOffset
== (_dataRegions
.back().cacheFileOffset
+ _dataRegions
.back().sizeInUse
));
1449 mappings
[mappingCount
- 1].address
= _readOnlyRegion
.unslidLoadAddress
;
1450 mappings
[mappingCount
- 1].fileOffset
= _readOnlyRegion
.cacheFileOffset
;
1451 mappings
[mappingCount
- 1].size
= _readOnlyRegion
.sizeInUse
;
1452 mappings
[mappingCount
- 1].maxProt
= VM_PROT_READ
;
1453 mappings
[mappingCount
- 1].initProt
= VM_PROT_READ
;
1455 // Add in the new mappings with also have slide info
1456 dyld_cache_mapping_and_slide_info
* slidableMappings
= (dyld_cache_mapping_and_slide_info
*)(_readExecuteRegion
.buffer
+ dyldCacheHeader
->mappingWithSlideOffset
);
1457 slidableMappings
[0].address
= _readExecuteRegion
.unslidLoadAddress
;
1458 slidableMappings
[0].fileOffset
= _readExecuteRegion
.cacheFileOffset
;
1459 slidableMappings
[0].size
= _readExecuteRegion
.sizeInUse
;
1460 slidableMappings
[0].maxProt
= VM_PROT_READ
| VM_PROT_EXECUTE
;
1461 slidableMappings
[0].initProt
= VM_PROT_READ
| VM_PROT_EXECUTE
;
1462 slidableMappings
[0].slideInfoFileOffset
= 0;
1463 slidableMappings
[0].slideInfoFileSize
= 0;
1464 slidableMappings
[0].flags
= 0;
1465 for (uint32_t i
= 0; i
!= _dataRegions
.size(); ++i
) {
1466 // Work out which flags this mapping has
1468 if ( startsWith(_dataRegions
[i
].name
, "__AUTH") )
1469 flags
|= DYLD_CACHE_MAPPING_AUTH_DATA
;
1470 if ( (_dataRegions
[i
].name
== "__AUTH_DIRTY") || (_dataRegions
[i
].name
== "__DATA_DIRTY") ) {
1471 flags
|= DYLD_CACHE_MAPPING_DIRTY_DATA
;
1472 } else if ( (_dataRegions
[i
].name
== "__AUTH_CONST") || (_dataRegions
[i
].name
== "__DATA_CONST") ) {
1473 flags
|= DYLD_CACHE_MAPPING_CONST_DATA
;
1476 slidableMappings
[i
+ 1].address
= _dataRegions
[i
].unslidLoadAddress
;
1477 slidableMappings
[i
+ 1].fileOffset
= _dataRegions
[i
].cacheFileOffset
;
1478 slidableMappings
[i
+ 1].size
= _dataRegions
[i
].sizeInUse
;
1479 slidableMappings
[i
+ 1].maxProt
= VM_PROT_READ
| VM_PROT_WRITE
;
1480 slidableMappings
[i
+ 1].initProt
= VM_PROT_READ
| VM_PROT_WRITE
;
1481 slidableMappings
[i
+ 1].slideInfoFileOffset
= _dataRegions
[i
].slideInfoFileOffset
;
1482 slidableMappings
[i
+ 1].slideInfoFileSize
= _dataRegions
[i
].slideInfoFileSize
;
1483 slidableMappings
[i
+ 1].flags
= flags
;
1485 slidableMappings
[mappingCount
- 1].address
= _readOnlyRegion
.unslidLoadAddress
;
1486 slidableMappings
[mappingCount
- 1].fileOffset
= _readOnlyRegion
.cacheFileOffset
;
1487 slidableMappings
[mappingCount
- 1].size
= _readOnlyRegion
.sizeInUse
;
1488 slidableMappings
[mappingCount
- 1].maxProt
= VM_PROT_READ
;
1489 slidableMappings
[mappingCount
- 1].initProt
= VM_PROT_READ
;
1490 slidableMappings
[mappingCount
- 1].slideInfoFileOffset
= 0;
1491 slidableMappings
[mappingCount
- 1].slideInfoFileSize
= 0;
1492 slidableMappings
[mappingCount
- 1].flags
= 0;
1494 // fill in image table
1495 dyld_cache_image_info
* images
= (dyld_cache_image_info
*)(_readExecuteRegion
.buffer
+ dyldCacheHeader
->imagesOffset
);
1496 for (const DylibInfo
& dylib
: _sortedDylibs
) {
1497 const char* installName
= dylib
.input
->mappedFile
.mh
->installName();
1498 images
->address
= dylib
.cacheLocation
[0].dstCacheUnslidAddress
;
1499 if ( _options
.dylibsRemovedDuringMastering
) {
1500 images
->modTime
= 0;
1501 images
->inode
= pathHash(installName
);
1504 images
->modTime
= dylib
.input
->mappedFile
.modTime
;
1505 images
->inode
= dylib
.input
->mappedFile
.inode
;
1507 uint32_t installNameOffsetInTEXT
= (uint32_t)(installName
- (char*)dylib
.input
->mappedFile
.mh
);
1508 images
->pathFileOffset
= (uint32_t)dylib
.cacheLocation
[0].dstCacheFileOffset
+ installNameOffsetInTEXT
;
1511 // append aliases image records and strings
1513 for (auto &dylib : _dylibs) {
1514 if (!dylib->installNameAliases.empty()) {
1515 for (const std::string& alias : dylib->installNameAliases) {
1516 images->set_address(_segmentMap[dylib][0].address);
1517 if (_manifest.platform() == "osx") {
1518 images->modTime = dylib->lastModTime;
1519 images->inode = dylib->inode;
1522 images->modTime = 0;
1523 images->inode = pathHash(alias.c_str());
1525 images->pathFileOffset = offset;
1526 //fprintf(stderr, "adding alias %s for %s\n", alias.c_str(), dylib->installName.c_str());
1527 ::strcpy((char*)&_buffer[offset], alias.c_str());
1528 offset += alias.size() + 1;
1534 // calculate start of text image array and trailing string pool
1535 dyld_cache_image_text_info
* textImages
= (dyld_cache_image_text_info
*)(_readExecuteRegion
.buffer
+ dyldCacheHeader
->imagesTextOffset
);
1536 uint32_t stringOffset
= (uint32_t)(dyldCacheHeader
->imagesTextOffset
+ sizeof(dyld_cache_image_text_info
) * _sortedDylibs
.size());
1538 // write text image array and image names pool at same time
1539 for (const DylibInfo
& dylib
: _sortedDylibs
) {
1540 dylib
.input
->mappedFile
.mh
->getUuid(textImages
->uuid
);
1541 textImages
->loadAddress
= dylib
.cacheLocation
[0].dstCacheUnslidAddress
;
1542 textImages
->textSegmentSize
= (uint32_t)dylib
.cacheLocation
[0].dstCacheSegmentSize
;
1543 textImages
->pathOffset
= stringOffset
;
1544 const char* installName
= dylib
.input
->mappedFile
.mh
->installName();
1545 ::strcpy((char*)_readExecuteRegion
.buffer
+ stringOffset
, installName
);
1546 stringOffset
+= (uint32_t)strlen(installName
)+1;
1550 // make sure header did not overflow into first mapped image
1551 const dyld_cache_image_info
* firstImage
= (dyld_cache_image_info
*)(_readExecuteRegion
.buffer
+ dyldCacheHeader
->imagesOffset
);
1552 assert(stringOffset
<= (firstImage
->address
- mappings
[0].address
));
1555 void SharedCacheBuilder::processSelectorStrings(const std::vector
<LoadedMachO
>& executables
, IMPCaches::HoleMap
& selectorsHoleMap
) {
1556 const bool log
= false;
1558 // We only do this optimisation to reduce the size of the shared cache executable closures
1559 // Skip this is those closures are not being built
1560 if ( !_options
.optimizeDyldDlopens
|| !_options
.optimizeDyldLaunches
)
1563 _selectorStringsFromExecutables
= 0;
1564 uint64_t totalBytesPulledIn
= 0;
1566 // Don't do this optimisation on watchOS where the shared cache is too small
1567 if (_options
.platform
== dyld3::Platform::watchOS
)
1570 // Get the method name coalesced section as that is where we need to put these strings
1571 CacheBuilder::CacheCoalescedText::StringSection
& cacheStringSection
= _coalescedText
.getSectionData("__objc_methname");
1572 for (const LoadedMachO
& executable
: executables
) {
1573 const dyld3::MachOAnalyzer
* ma
= (const dyld3::MachOAnalyzer
*)executable
.loadedFileInfo
.fileContent
;
1575 uint64_t sizeBeforeProcessing
= cacheStringSection
.bufferSize
;
1577 ma
->forEachObjCMethodName(^(const char* methodName
) {
1578 std::string_view str
= methodName
;
1579 if (cacheStringSection
.stringsToOffsets
.find(str
) == cacheStringSection
.stringsToOffsets
.end()) {
1580 int offset
= selectorsHoleMap
.addStringOfSize((unsigned)str
.size() + 1);
1581 cacheStringSection
.stringsToOffsets
[str
] = offset
;
1583 // If we inserted the string past the end then we need to include it in the total
1584 int possibleNewEnd
= offset
+ (int)str
.size() + 1;
1585 if (cacheStringSection
.bufferSize
< (uint32_t)possibleNewEnd
) {
1586 cacheStringSection
.bufferSize
= (uint32_t)possibleNewEnd
;
1588 // if (log) printf("Selector: %s -> %s\n", ma->installName(), methodName);
1589 ++_selectorStringsFromExecutables
;
1593 uint64_t sizeAfterProcessing
= cacheStringSection
.bufferSize
;
1594 totalBytesPulledIn
+= (sizeAfterProcessing
- sizeBeforeProcessing
);
1595 if ( log
&& (sizeBeforeProcessing
!= sizeAfterProcessing
) ) {
1596 printf("Pulled in % 6lld bytes of selectors from %s\n",
1597 sizeAfterProcessing
- sizeBeforeProcessing
, executable
.loadedFileInfo
.path
);
1601 _diagnostics
.verbose("Pulled in %lld selector strings (%lld bytes) from executables\n",
1602 _selectorStringsFromExecutables
, totalBytesPulledIn
);
1605 void SharedCacheBuilder::parseCoalescableSegments(IMPCaches::SelectorMap
& selectors
, IMPCaches::HoleMap
& selectorsHoleMap
) {
1606 const bool log
= false;
1608 for (DylibInfo
& dylib
: _sortedDylibs
)
1609 _coalescedText
.parseCoalescableText(dylib
.input
->mappedFile
.mh
, dylib
.textCoalescer
, selectors
, selectorsHoleMap
);
1612 for (const char* section
: CacheCoalescedText::SupportedSections
) {
1613 CacheCoalescedText::StringSection
& sectionData
= _coalescedText
.getSectionData(section
);
1614 printf("Coalesced %s from % 10lld -> % 10d, saving % 10lld bytes\n", section
,
1615 sectionData
.bufferSize
+ sectionData
.savedSpace
, sectionData
.bufferSize
, sectionData
.savedSpace
);
1619 // arm64e needs to convert CF constants to tagged pointers
1620 if ( !strcmp(_archLayout
->archName
, "arm64e") ) {
1621 // Find the dylib which exports the CFString ISA. It's likely CoreFoundation but it could move
1622 CacheCoalescedText::CFSection
& cfStrings
= _coalescedText
.cfStrings
;
1623 for (DylibInfo
& dylib
: _sortedDylibs
) {
1624 const dyld3::MachOAnalyzer
* ma
= dylib
.input
->mappedFile
.mh
;
1625 dyld3::MachOAnalyzer::FoundSymbol foundInfo
;
1626 bool foundISASymbol
= ma
->findExportedSymbol(_diagnostics
, cfStrings
.isaClassName
, false, foundInfo
, nullptr);
1627 if ( foundISASymbol
) {
1628 // This dylib exports the ISA, so everyone else should look here for the ISA too.
1629 if ( cfStrings
.isaInstallName
!= nullptr ) {
1630 // Found a duplicate. We can't do anything here
1631 _diagnostics
.verbose("Could not optimize CFString's due to duplicate ISA symbols");
1632 cfStrings
.isaInstallName
= nullptr;
1635 cfStrings
.isaInstallName
= ma
->installName();
1636 cfStrings
.isaVMOffset
= foundInfo
.value
;
1640 if ( cfStrings
.isaInstallName
!= nullptr ) {
1641 for (DylibInfo
& dylib
: _sortedDylibs
) {
1642 _coalescedText
.parseCFConstants(dylib
.input
->mappedFile
.mh
, dylib
.textCoalescer
);
1648 // This is the new method which will put all __DATA* mappings in to a their own mappings
1649 void SharedCacheBuilder::assignMultipleDataSegmentAddresses(uint64_t& addr
, uint32_t totalProtocolDefCount
) {
1650 uint64_t nextRegionFileOffset
= _readExecuteRegion
.sizeInUse
;
1652 const size_t dylibCount
= _sortedDylibs
.size();
1653 uint32_t dirtyDataSortIndexes
[dylibCount
];
1654 for (size_t i
=0; i
< dylibCount
; ++i
)
1655 dirtyDataSortIndexes
[i
] = (uint32_t)i
;
1656 std::sort(&dirtyDataSortIndexes
[0], &dirtyDataSortIndexes
[dylibCount
], [&](const uint32_t& a
, const uint32_t& b
) {
1657 const auto& orderA
= _options
.dirtyDataSegmentOrdering
.find(_sortedDylibs
[a
].input
->mappedFile
.runtimePath
);
1658 const auto& orderB
= _options
.dirtyDataSegmentOrdering
.find(_sortedDylibs
[b
].input
->mappedFile
.runtimePath
);
1659 bool foundA
= (orderA
!= _options
.dirtyDataSegmentOrdering
.end());
1660 bool foundB
= (orderB
!= _options
.dirtyDataSegmentOrdering
.end());
1662 // Order all __DATA_DIRTY segments specified in the order file first, in the order specified in the file,
1663 // followed by any other __DATA_DIRTY segments in lexicographic order.
1664 if ( foundA
&& foundB
)
1665 return orderA
->second
< orderB
->second
;
1671 return _sortedDylibs
[a
].input
->mappedFile
.runtimePath
< _sortedDylibs
[b
].input
->mappedFile
.runtimePath
;
1674 // Work out if we'll have __AUTH regions, as the objc RW has to go at the end of __AUTH if it exists, or
1675 // the end of __DATA if we have no __AUTH
1676 __block
bool foundAuthenticatedFixups
= false;
1678 // This tracks which segments contain authenticated data, even if their name isn't __AUTH*
1679 std::map
<const DylibInfo
*, std::set
<uint32_t>> authenticatedSegments
;
1681 if ( strcmp(_archLayout
->archName
, "arm64e") == 0 ) {
1682 for (DylibInfo
& dylib
: _sortedDylibs
) {
1683 __block
std::set
<uint32_t>& authSegmentIndices
= authenticatedSegments
[&dylib
];
1685 // Put all __DATA_DIRTY segments in the __AUTH region first, then we don't need to walk their chains
1686 dylib
.input
->mappedFile
.mh
->forEachSegment(^(const dyld3::MachOFile::SegmentInfo
& segInfo
, bool& stop
) {
1687 if ( strcmp(segInfo
.segName
, "__DATA_DIRTY") == 0 ) {
1688 authSegmentIndices
.insert(segInfo
.segIndex
);
1689 foundAuthenticatedFixups
= true;
1693 dylib
.input
->mappedFile
.mh
->withChainStarts(_diagnostics
, 0,
1694 ^(const dyld_chained_starts_in_image
*starts
) {
1695 dylib
.input
->mappedFile
.mh
->forEachFixupChainSegment(_diagnostics
, starts
,
1696 ^(const dyld_chained_starts_in_segment
* segmentInfo
, uint32_t segIndex
, bool& stopSegment
) {
1697 // Skip walking segments we already know are __AUTH, ie, __DATA_DIRTY
1698 if ( authSegmentIndices
.count(segIndex
) )
1701 dylib
.input
->mappedFile
.mh
->forEachFixupInSegmentChains(_diagnostics
, segmentInfo
, false,
1702 ^(dyld3::MachOLoaded::ChainedFixupPointerOnDisk
* fixupLoc
, const dyld_chained_starts_in_segment
* segInfo
, bool& stopChain
) {
1703 uint16_t chainedFixupsFormat
= segInfo
->pointer_format
;
1704 assert( (chainedFixupsFormat
== DYLD_CHAINED_PTR_ARM64E
) || (chainedFixupsFormat
== DYLD_CHAINED_PTR_ARM64E_USERLAND
) || (chainedFixupsFormat
== DYLD_CHAINED_PTR_ARM64E_USERLAND24
) );
1706 if ( fixupLoc
->arm64e
.authRebase
.auth
) {
1707 foundAuthenticatedFixups
= true;
1708 authSegmentIndices
.insert(segIndex
);
1721 region
.buffer
= (uint8_t*)_fullAllocatedBuffer
+ addr
- _archLayout
->sharedMemoryStart
;
1722 region
.bufferSize
= 0;
1723 region
.sizeInUse
= 0;
1724 region
.unslidLoadAddress
= addr
;
1725 region
.cacheFileOffset
= nextRegionFileOffset
;
1726 region
.name
= "__DATA";
1728 // layout all __DATA_CONST/__OBJC_CONST segments
1729 __block
int dataConstSegmentCount
= 0;
1730 for (DylibInfo
& dylib
: _sortedDylibs
) {
1731 __block
uint64_t textSegVmAddr
= 0;
1732 __block
std::set
<uint32_t>& authSegmentIndices
= authenticatedSegments
[&dylib
];
1733 dylib
.input
->mappedFile
.mh
->forEachSegment(^(const dyld3::MachOFile::SegmentInfo
& segInfo
, bool& stop
) {
1734 if ( _options
.platform
== dyld3::Platform::watchOS_simulator
&& !_is64
)
1736 if ( strcmp(segInfo
.segName
, "__TEXT") == 0 )
1737 textSegVmAddr
= segInfo
.vmAddr
;
1738 if ( segInfo
.protections
!= (VM_PROT_READ
| VM_PROT_WRITE
) )
1740 if ( (strcmp(segInfo
.segName
, "__DATA_CONST") != 0) && (strcmp(segInfo
.segName
, "__OBJC_CONST") != 0) )
1743 // We may have coalesced the sections at the end of this segment. In that case, shrink the segment to remove them.
1744 __block
size_t sizeOfSections
= 0;
1745 __block
bool foundCoalescedSection
= false;
1746 dylib
.input
->mappedFile
.mh
->forEachSection(^(const dyld3::MachOAnalyzer::SectionInfo
§Info
, bool malformedSectionRange
, bool &stopSection
) {
1747 if (strcmp(sectInfo
.segInfo
.segName
, segInfo
.segName
) != 0)
1749 if ( dylib
.textCoalescer
.sectionWasCoalesced(segInfo
.segName
, sectInfo
.sectName
)) {
1750 foundCoalescedSection
= true;
1752 sizeOfSections
= sectInfo
.sectAddr
+ sectInfo
.sectSize
- segInfo
.vmAddr
;
1755 if (!foundCoalescedSection
)
1756 sizeOfSections
= segInfo
.sizeOfSections
;
1758 if ( authSegmentIndices
.count(segInfo
.segIndex
) ) {
1759 // Only move this segment to __AUTH if it had content we didn't coalesce away
1760 if ( !foundCoalescedSection
|| (sizeOfSections
!= 0) ) {
1761 // Don't put authenticated __DATA_CONST/__OBJC_CONST in the non-AUTH __DATA mapping
1762 _diagnostics
.verbose("%s: treating authenticated %s as __AUTH_CONST\n", dylib
.dylibID
.c_str(), segInfo
.segName
);
1767 ++dataConstSegmentCount
;
1768 // Pack __DATA_CONST segments
1769 addr
= align(addr
, segInfo
.p2align
);
1770 size_t copySize
= std::min((size_t)segInfo
.fileSize
, (size_t)sizeOfSections
);
1771 uint64_t offsetInRegion
= addr
- region
.unslidLoadAddress
;
1772 SegmentMappingInfo loc
;
1773 loc
.srcSegment
= (uint8_t*)dylib
.input
->mappedFile
.mh
+ segInfo
.vmAddr
- textSegVmAddr
;
1774 loc
.segName
= segInfo
.segName
;
1775 loc
.dstSegment
= region
.buffer
+ offsetInRegion
;
1776 loc
.dstCacheUnslidAddress
= addr
;
1777 loc
.dstCacheFileOffset
= (uint32_t)(region
.cacheFileOffset
+ offsetInRegion
);
1778 loc
.dstCacheSegmentSize
= (uint32_t)sizeOfSections
;
1779 loc
.dstCacheFileSize
= (uint32_t)copySize
;
1780 loc
.copySegmentSize
= (uint32_t)copySize
;
1781 loc
.srcSegmentIndex
= segInfo
.segIndex
;
1782 dylib
.cacheLocation
.push_back(loc
);
1783 addr
+= loc
.dstCacheSegmentSize
;
1787 // align __DATA_CONST region end
1788 addr
= align(addr
, _archLayout
->sharedRegionAlignP2
);
1790 // Make space for the cfstrings
1791 if ( _coalescedText
.cfStrings
.bufferSize
!= 0 ) {
1792 // Keep __DATA segments 4K or more aligned
1793 addr
= align(addr
, 12);
1794 uint64_t offsetInRegion
= addr
- region
.unslidLoadAddress
;
1796 CacheCoalescedText::CFSection
& cacheSection
= _coalescedText
.cfStrings
;
1797 cacheSection
.bufferAddr
= region
.buffer
+ offsetInRegion
;
1798 cacheSection
.bufferVMAddr
= addr
;
1799 cacheSection
.cacheFileOffset
= region
.cacheFileOffset
+ offsetInRegion
;
1800 addr
+= cacheSection
.bufferSize
;
1803 // layout all __DATA_DIRTY segments, sorted (FIXME)
1804 for (size_t i
=0; i
< dylibCount
; ++i
) {
1805 DylibInfo
& dylib
= _sortedDylibs
[dirtyDataSortIndexes
[i
]];
1806 __block
uint64_t textSegVmAddr
= 0;
1807 __block
std::set
<uint32_t>& authSegmentIndices
= authenticatedSegments
[&dylib
];
1808 dylib
.input
->mappedFile
.mh
->forEachSegment(^(const dyld3::MachOFile::SegmentInfo
& segInfo
, bool& stop
) {
1809 if ( _options
.platform
== dyld3::Platform::watchOS_simulator
&& !_is64
)
1811 if ( strcmp(segInfo
.segName
, "__TEXT") == 0 )
1812 textSegVmAddr
= segInfo
.vmAddr
;
1813 if ( segInfo
.protections
!= (VM_PROT_READ
| VM_PROT_WRITE
) )
1815 if ( strcmp(segInfo
.segName
, "__DATA_DIRTY") != 0 )
1817 if ( authSegmentIndices
.count(segInfo
.segIndex
) ) {
1818 // Don't put authenticated __DATA_DIRTY in the non-AUTH __DATA mapping
1819 // This is going to be true for all arm64e __DATA_DIRTY as we move it all, regardless of auth fixups.
1820 // Given that, don't issue a diagnostic as its really not helpful
1823 // Pack __DATA_DIRTY segments
1824 addr
= align(addr
, segInfo
.p2align
);
1825 size_t copySize
= std::min((size_t)segInfo
.fileSize
, (size_t)segInfo
.sizeOfSections
);
1826 uint64_t offsetInRegion
= addr
- region
.unslidLoadAddress
;
1827 SegmentMappingInfo loc
;
1828 loc
.srcSegment
= (uint8_t*)dylib
.input
->mappedFile
.mh
+ segInfo
.vmAddr
- textSegVmAddr
;
1829 loc
.segName
= segInfo
.segName
;
1830 loc
.dstSegment
= region
.buffer
+ offsetInRegion
;
1831 loc
.dstCacheUnslidAddress
= addr
;
1832 loc
.dstCacheFileOffset
= (uint32_t)(region
.cacheFileOffset
+ offsetInRegion
);
1833 loc
.dstCacheSegmentSize
= (uint32_t)segInfo
.sizeOfSections
;
1834 loc
.dstCacheFileSize
= (uint32_t)copySize
;
1835 loc
.copySegmentSize
= (uint32_t)copySize
;
1836 loc
.srcSegmentIndex
= segInfo
.segIndex
;
1837 dylib
.cacheLocation
.push_back(loc
);
1838 addr
+= loc
.dstCacheSegmentSize
;
1842 // align __DATA_DIRTY region end
1843 addr
= align(addr
, _archLayout
->sharedRegionAlignP2
);
1845 // layout all __DATA segments (and other r/w non-dirty, non-const, non-auth) segments
1846 for (DylibInfo
& dylib
: _sortedDylibs
) {
1847 __block
uint64_t textSegVmAddr
= 0;
1848 __block
std::set
<uint32_t>& authSegmentIndices
= authenticatedSegments
[&dylib
];
1849 dylib
.input
->mappedFile
.mh
->forEachSegment(^(const dyld3::MachOFile::SegmentInfo
& segInfo
, bool& stop
) {
1850 if ( strcmp(segInfo
.segName
, "__TEXT") == 0 )
1851 textSegVmAddr
= segInfo
.vmAddr
;
1852 if ( segInfo
.protections
!= (VM_PROT_READ
| VM_PROT_WRITE
) )
1854 if ( _options
.platform
!= dyld3::Platform::watchOS_simulator
|| _is64
) {
1855 if ( strcmp(segInfo
.segName
, "__DATA_CONST") == 0 )
1857 if ( strcmp(segInfo
.segName
, "__DATA_DIRTY") == 0 )
1859 if ( strcmp(segInfo
.segName
, "__OBJC_CONST") == 0 )
1862 // Skip __AUTH* segments as they'll be handled elsewhere
1863 if ( strncmp(segInfo
.segName
, "__AUTH", 6) == 0 )
1865 if ( authSegmentIndices
.count(segInfo
.segIndex
) ) {
1866 // Don't put authenticated __DATA in the non-AUTH __DATA mapping
1867 _diagnostics
.verbose("%s: treating authenticated __DATA as __AUTH\n", dylib
.dylibID
.c_str());
1870 bool forcePageAlignedData
= false;
1871 if (_options
.platform
== dyld3::Platform::macOS
) {
1872 forcePageAlignedData
= dylib
.input
->mappedFile
.mh
->hasUnalignedPointerFixups();
1873 //if ( forcePageAlignedData )
1874 // warning("unaligned pointer in %s\n", dylib.input->mappedFile.runtimePath.c_str());
1876 if ( (dataConstSegmentCount
> 10) && !forcePageAlignedData
) {
1877 // Pack __DATA segments only if we also have __DATA_CONST segments
1878 addr
= align(addr
, segInfo
.p2align
);
1881 // Keep __DATA segments 4K or more aligned
1882 addr
= align(addr
, std::max((int)segInfo
.p2align
, (int)12));
1884 size_t copySize
= std::min((size_t)segInfo
.fileSize
, (size_t)segInfo
.sizeOfSections
);
1885 uint64_t offsetInRegion
= addr
- region
.unslidLoadAddress
;
1886 SegmentMappingInfo loc
;
1887 loc
.srcSegment
= (uint8_t*)dylib
.input
->mappedFile
.mh
+ segInfo
.vmAddr
- textSegVmAddr
;
1888 loc
.segName
= segInfo
.segName
;
1889 loc
.dstSegment
= region
.buffer
+ offsetInRegion
;
1890 loc
.dstCacheUnslidAddress
= addr
;
1891 loc
.dstCacheFileOffset
= (uint32_t)(region
.cacheFileOffset
+ offsetInRegion
);
1892 loc
.dstCacheSegmentSize
= (uint32_t)segInfo
.sizeOfSections
;
1893 loc
.dstCacheFileSize
= (uint32_t)copySize
;
1894 loc
.copySegmentSize
= (uint32_t)copySize
;
1895 loc
.srcSegmentIndex
= segInfo
.segIndex
;
1896 dylib
.cacheLocation
.push_back(loc
);
1897 addr
+= loc
.dstCacheSegmentSize
;
1901 if ( !foundAuthenticatedFixups
) {
1902 // reserve space for objc r/w optimization tables
1903 _objcReadWriteBufferSizeAllocated
= align(computeReadWriteObjC((uint32_t)_sortedDylibs
.size(), totalProtocolDefCount
), 14);
1904 addr
= align(addr
, 4); // objc r/w section contains pointer and must be at least pointer align
1905 _objcReadWriteBuffer
= region
.buffer
+ (addr
- region
.unslidLoadAddress
);
1906 addr
+= _objcReadWriteBufferSizeAllocated
;
1909 // align DATA region end
1910 addr
= align(addr
, _archLayout
->sharedRegionAlignP2
);
1911 uint64_t endDataAddress
= addr
;
1912 region
.bufferSize
= endDataAddress
- region
.unslidLoadAddress
;
1913 region
.sizeInUse
= region
.bufferSize
;
1915 _dataRegions
.push_back(region
);
1916 nextRegionFileOffset
= region
.cacheFileOffset
+ region
.sizeInUse
;
1920 if ( foundAuthenticatedFixups
) {
1922 // align __AUTH region
1923 addr
= align((addr
+ _archLayout
->sharedRegionPadding
), _archLayout
->sharedRegionAlignP2
);
1926 region
.buffer
= (uint8_t*)_fullAllocatedBuffer
+ addr
- _archLayout
->sharedMemoryStart
;
1927 region
.bufferSize
= 0;
1928 region
.sizeInUse
= 0;
1929 region
.unslidLoadAddress
= addr
;
1930 region
.cacheFileOffset
= nextRegionFileOffset
;
1931 region
.name
= "__AUTH";
1933 // layout all __AUTH_CONST segments
1934 __block
int authConstSegmentCount
= 0;
1935 for (DylibInfo
& dylib
: _sortedDylibs
) {
1936 __block
uint64_t textSegVmAddr
= 0;
1937 __block
std::set
<uint32_t>& authSegmentIndices
= authenticatedSegments
[&dylib
];
1938 dylib
.input
->mappedFile
.mh
->forEachSegment(^(const dyld3::MachOFile::SegmentInfo
& segInfo
, bool& stop
) {
1939 if ( _options
.platform
== dyld3::Platform::watchOS_simulator
&& !_is64
)
1941 if ( strcmp(segInfo
.segName
, "__TEXT") == 0 )
1942 textSegVmAddr
= segInfo
.vmAddr
;
1943 if ( segInfo
.protections
!= (VM_PROT_READ
| VM_PROT_WRITE
) )
1946 // We may have coalesced the sections at the end of this segment. In that case, shrink the segment to remove them.
1947 __block
size_t sizeOfSections
= 0;
1948 __block
bool foundCoalescedSection
= false;
1949 dylib
.input
->mappedFile
.mh
->forEachSection(^(const dyld3::MachOAnalyzer::SectionInfo
§Info
, bool malformedSectionRange
, bool &stopSection
) {
1950 if (strcmp(sectInfo
.segInfo
.segName
, segInfo
.segName
) != 0)
1952 if ( dylib
.textCoalescer
.sectionWasCoalesced(segInfo
.segName
, sectInfo
.sectName
)) {
1953 foundCoalescedSection
= true;
1955 sizeOfSections
= sectInfo
.sectAddr
+ sectInfo
.sectSize
- segInfo
.vmAddr
;
1958 if (!foundCoalescedSection
)
1959 sizeOfSections
= segInfo
.sizeOfSections
;
1961 if ( strcmp(segInfo
.segName
, "__AUTH_CONST") == 0 ) {
1962 // We'll handle __AUTH_CONST here
1963 } else if ( (strcmp(segInfo
.segName
, "__DATA_CONST") == 0) || (strcmp(segInfo
.segName
, "__OBJC_CONST") == 0) ) {
1964 // And we'll also handle __DATA_CONST/__OBJC_CONST which may contain authenticated pointers
1965 if ( authSegmentIndices
.count(segInfo
.segIndex
) == 0 ) {
1966 // This __DATA_CONST doesn't contain authenticated pointers so was handled earlier
1969 // We only moved this segment to __AUTH if it had content we didn't coalesce away
1970 if ( foundCoalescedSection
&& (sizeOfSections
== 0) ) {
1971 // This __DATA_CONST doesn't contain authenticated pointers so was handled earlier
1976 // Not __AUTH_CONST or __DATA_CONST/__OBJC_CONST so skip this
1979 ++authConstSegmentCount
;
1980 // Pack __AUTH_CONST segments
1981 addr
= align(addr
, segInfo
.p2align
);
1982 size_t copySize
= std::min((size_t)segInfo
.fileSize
, (size_t)sizeOfSections
);
1983 uint64_t offsetInRegion
= addr
- region
.unslidLoadAddress
;
1984 SegmentMappingInfo loc
;
1985 loc
.srcSegment
= (uint8_t*)dylib
.input
->mappedFile
.mh
+ segInfo
.vmAddr
- textSegVmAddr
;
1986 loc
.segName
= segInfo
.segName
;
1987 loc
.dstSegment
= region
.buffer
+ offsetInRegion
;
1988 loc
.dstCacheUnslidAddress
= addr
;
1989 loc
.dstCacheFileOffset
= (uint32_t)(region
.cacheFileOffset
+ offsetInRegion
);
1990 loc
.dstCacheSegmentSize
= (uint32_t)sizeOfSections
;
1991 loc
.dstCacheFileSize
= (uint32_t)copySize
;
1992 loc
.copySegmentSize
= (uint32_t)copySize
;
1993 loc
.srcSegmentIndex
= segInfo
.segIndex
;
1994 dylib
.cacheLocation
.push_back(loc
);
1995 addr
+= loc
.dstCacheSegmentSize
;
1999 // align __AUTH_CONST region end
2000 addr
= align(addr
, _archLayout
->sharedRegionAlignP2
);
2002 // __AUTH_DIRTY. Note this is really __DATA_DIRTY as we don't generate an __AUTH_DIRTY in ld64
2003 for (size_t i
=0; i
< dylibCount
; ++i
) {
2004 DylibInfo
& dylib
= _sortedDylibs
[dirtyDataSortIndexes
[i
]];
2005 __block
uint64_t textSegVmAddr
= 0;
2006 dylib
.input
->mappedFile
.mh
->forEachSegment(^(const dyld3::MachOFile::SegmentInfo
& segInfo
, bool& stop
) {
2007 if ( _options
.platform
== dyld3::Platform::watchOS_simulator
&& !_is64
)
2009 if ( strcmp(segInfo
.segName
, "__TEXT") == 0 )
2010 textSegVmAddr
= segInfo
.vmAddr
;
2011 if ( segInfo
.protections
!= (VM_PROT_READ
| VM_PROT_WRITE
) )
2013 if ( strcmp(segInfo
.segName
, "__DATA_DIRTY") != 0 )
2015 // Pack __AUTH_DIRTY segments
2016 addr
= align(addr
, segInfo
.p2align
);
2017 size_t copySize
= std::min((size_t)segInfo
.fileSize
, (size_t)segInfo
.sizeOfSections
);
2018 uint64_t offsetInRegion
= addr
- region
.unslidLoadAddress
;
2019 SegmentMappingInfo loc
;
2020 loc
.srcSegment
= (uint8_t*)dylib
.input
->mappedFile
.mh
+ segInfo
.vmAddr
- textSegVmAddr
;
2021 loc
.segName
= segInfo
.segName
;
2022 loc
.dstSegment
= region
.buffer
+ offsetInRegion
;
2023 loc
.dstCacheUnslidAddress
= addr
;
2024 loc
.dstCacheFileOffset
= (uint32_t)(region
.cacheFileOffset
+ offsetInRegion
);
2025 loc
.dstCacheSegmentSize
= (uint32_t)segInfo
.sizeOfSections
;
2026 loc
.dstCacheFileSize
= (uint32_t)copySize
;
2027 loc
.copySegmentSize
= (uint32_t)copySize
;
2028 loc
.srcSegmentIndex
= segInfo
.segIndex
;
2029 dylib
.cacheLocation
.push_back(loc
);
2030 addr
+= loc
.dstCacheSegmentSize
;
2034 // align __AUTH_DIRTY region end
2035 addr
= align(addr
, _archLayout
->sharedRegionAlignP2
);
2037 // layout all __AUTH segments (and other r/w non-dirty, non-const, non-auth) segments
2038 for (DylibInfo
& dylib
: _sortedDylibs
) {
2039 __block
uint64_t textSegVmAddr
= 0;
2040 __block
std::set
<uint32_t>& authSegmentIndices
= authenticatedSegments
[&dylib
];
2041 dylib
.input
->mappedFile
.mh
->forEachSegment(^(const dyld3::MachOFile::SegmentInfo
& segInfo
, bool& stop
) {
2042 if ( strcmp(segInfo
.segName
, "__TEXT") == 0 )
2043 textSegVmAddr
= segInfo
.vmAddr
;
2044 if ( segInfo
.protections
!= (VM_PROT_READ
| VM_PROT_WRITE
) )
2046 if ( _options
.platform
!= dyld3::Platform::watchOS_simulator
|| _is64
) {
2047 if ( strcmp(segInfo
.segName
, "__AUTH_CONST") == 0 )
2050 if ( strncmp(segInfo
.segName
, "__AUTH", 6) == 0 ) {
2051 // We'll handle __AUTH* here
2053 // And we'll also handle __DATA* which contains authenticated pointers
2054 if ( authSegmentIndices
.count(segInfo
.segIndex
) == 0 ) {
2055 // This __DATA doesn't contain authenticated pointers so was handled earlier
2058 if ( _options
.platform
!= dyld3::Platform::watchOS_simulator
|| _is64
) {
2059 if ( strcmp(segInfo
.segName
, "__DATA_CONST") == 0 )
2061 if ( strcmp(segInfo
.segName
, "__DATA_DIRTY") == 0 )
2063 if ( strcmp(segInfo
.segName
, "__OBJC_CONST") == 0 )
2067 bool forcePageAlignedData
= false;
2068 if (_options
.platform
== dyld3::Platform::macOS
) {
2069 forcePageAlignedData
= dylib
.input
->mappedFile
.mh
->hasUnalignedPointerFixups();
2070 //if ( forcePageAlignedData )
2071 // warning("unaligned pointer in %s\n", dylib.input->mappedFile.runtimePath.c_str());
2073 if ( (authConstSegmentCount
> 10) && !forcePageAlignedData
) {
2074 // Pack __AUTH segments only if we also have __AUTH_CONST segments
2075 addr
= align(addr
, segInfo
.p2align
);
2078 // Keep __AUTH segments 4K or more aligned
2079 addr
= align(addr
, std::max((int)segInfo
.p2align
, (int)12));
2081 size_t copySize
= std::min((size_t)segInfo
.fileSize
, (size_t)segInfo
.sizeOfSections
);
2082 uint64_t offsetInRegion
= addr
- region
.unslidLoadAddress
;
2083 SegmentMappingInfo loc
;
2084 loc
.srcSegment
= (uint8_t*)dylib
.input
->mappedFile
.mh
+ segInfo
.vmAddr
- textSegVmAddr
;
2085 loc
.segName
= segInfo
.segName
;
2086 loc
.dstSegment
= region
.buffer
+ offsetInRegion
;
2087 loc
.dstCacheUnslidAddress
= addr
;
2088 loc
.dstCacheFileOffset
= (uint32_t)(region
.cacheFileOffset
+ offsetInRegion
);
2089 loc
.dstCacheSegmentSize
= (uint32_t)segInfo
.sizeOfSections
;
2090 loc
.dstCacheFileSize
= (uint32_t)copySize
;
2091 loc
.copySegmentSize
= (uint32_t)copySize
;
2092 loc
.srcSegmentIndex
= segInfo
.segIndex
;
2093 dylib
.cacheLocation
.push_back(loc
);
2094 addr
+= loc
.dstCacheSegmentSize
;
2098 // reserve space for objc r/w optimization tables
2099 _objcReadWriteBufferSizeAllocated
= align(computeReadWriteObjC((uint32_t)_sortedDylibs
.size(), totalProtocolDefCount
), 14);
2100 addr
= align(addr
, 4); // objc r/w section contains pointer and must be at least pointer align
2101 _objcReadWriteBuffer
= region
.buffer
+ (addr
- region
.unslidLoadAddress
);
2102 addr
+= _objcReadWriteBufferSizeAllocated
;
2104 // align DATA region end
2105 addr
= align(addr
, _archLayout
->sharedRegionAlignP2
);
2106 uint64_t endDataAddress
= addr
;
2107 region
.bufferSize
= endDataAddress
- region
.unslidLoadAddress
;
2108 region
.sizeInUse
= region
.bufferSize
;
2110 _dataRegions
.push_back(region
);
2111 nextRegionFileOffset
= region
.cacheFileOffset
+ region
.sizeInUse
;
2115 // Sanity check that we didn't put the same segment in 2 different ranges
2116 for (DylibInfo
& dylib
: _sortedDylibs
) {
2117 __block
std::unordered_set
<uint64_t> seenSegmentIndices
;
2118 for (SegmentMappingInfo
& segmentInfo
: dylib
.cacheLocation
) {
2119 if ( seenSegmentIndices
.count(segmentInfo
.srcSegmentIndex
) != 0 ) {
2120 _diagnostics
.error("%s segment %s was duplicated in layout",
2121 dylib
.input
->mappedFile
.mh
->installName(), segmentInfo
.segName
);
2124 seenSegmentIndices
.insert(segmentInfo
.srcSegmentIndex
);
2130 void SharedCacheBuilder::assignSegmentAddresses()
2132 // calculate size of header info and where first dylib's mach_header should start
2133 size_t startOffset
= sizeof(dyld_cache_header
) + DyldSharedCache::MaxMappings
* sizeof(dyld_cache_mapping_info
);
2134 startOffset
+= DyldSharedCache::MaxMappings
* sizeof(dyld_cache_mapping_and_slide_info
);
2135 startOffset
+= sizeof(dyld_cache_image_info
) * _sortedDylibs
.size();
2136 startOffset
+= sizeof(dyld_cache_image_text_info
) * _sortedDylibs
.size();
2137 for (const DylibInfo
& dylib
: _sortedDylibs
) {
2138 startOffset
+= (strlen(dylib
.input
->mappedFile
.mh
->installName()) + 1);
2140 //fprintf(stderr, "%s total header size = 0x%08lX\n", _options.archName.c_str(), startOffset);
2141 startOffset
= align(startOffset
, 12);
2143 // HACK!: Rebase v4 assumes that values below 0x8000 are not pointers (encoding as offsets from the cache header).
2144 // If using a minimal cache, we need to pad out the cache header to make sure a pointer doesn't fall within that range
2145 #if SUPPORT_ARCH_arm64_32 || SUPPORT_ARCH_armv7k
2146 if ( _options
.cacheSupportsASLR
&& !_archLayout
->is64
) {
2147 if ( _archLayout
->pointerDeltaMask
== 0xC0000000 )
2148 startOffset
= std::max(startOffset
, (size_t)0x8000);
2152 // assign TEXT segment addresses
2153 _readExecuteRegion
.buffer
= (uint8_t*)_fullAllocatedBuffer
;
2154 _readExecuteRegion
.bufferSize
= 0;
2155 _readExecuteRegion
.sizeInUse
= 0;
2156 _readExecuteRegion
.unslidLoadAddress
= _archLayout
->sharedMemoryStart
;
2157 _readExecuteRegion
.cacheFileOffset
= 0;
2158 __block
uint64_t addr
= _readExecuteRegion
.unslidLoadAddress
+ startOffset
; // header
2159 for (DylibInfo
& dylib
: _sortedDylibs
) {
2160 __block
uint64_t textSegVmAddr
= 0;
2161 dylib
.input
->mappedFile
.mh
->forEachSegment(^(const dyld3::MachOFile::SegmentInfo
& segInfo
, bool& stop
) {
2162 if ( strcmp(segInfo
.segName
, "__TEXT") == 0 )
2163 textSegVmAddr
= segInfo
.vmAddr
;
2164 if ( segInfo
.protections
!= (VM_PROT_READ
| VM_PROT_EXECUTE
) )
2166 // We may have coalesced the sections at the end of this segment. In that case, shrink the segment to remove them.
2167 __block
size_t sizeOfSections
= 0;
2168 __block
bool foundCoalescedSection
= false;
2169 dylib
.input
->mappedFile
.mh
->forEachSection(^(const dyld3::MachOAnalyzer::SectionInfo
§Info
, bool malformedSectionRange
, bool &stopSection
) {
2170 if (strcmp(sectInfo
.segInfo
.segName
, segInfo
.segName
) != 0)
2172 if ( dylib
.textCoalescer
.sectionWasCoalesced(segInfo
.segName
, sectInfo
.sectName
)) {
2173 foundCoalescedSection
= true;
2175 sizeOfSections
= sectInfo
.sectAddr
+ sectInfo
.sectSize
- segInfo
.vmAddr
;
2178 if (!foundCoalescedSection
)
2179 sizeOfSections
= segInfo
.sizeOfSections
;
2181 // Keep __TEXT segments 4K or more aligned
2182 addr
= align(addr
, std::max((int)segInfo
.p2align
, (int)12));
2183 uint64_t offsetInRegion
= addr
- _readExecuteRegion
.unslidLoadAddress
;
2184 SegmentMappingInfo loc
;
2185 loc
.srcSegment
= (uint8_t*)dylib
.input
->mappedFile
.mh
+ segInfo
.vmAddr
- textSegVmAddr
;
2186 loc
.segName
= segInfo
.segName
;
2187 loc
.dstSegment
= _readExecuteRegion
.buffer
+ offsetInRegion
;
2188 loc
.dstCacheUnslidAddress
= addr
;
2189 loc
.dstCacheFileOffset
= (uint32_t)offsetInRegion
;
2190 loc
.dstCacheSegmentSize
= (uint32_t)align(sizeOfSections
, 12);
2191 loc
.dstCacheFileSize
= (uint32_t)align(sizeOfSections
, 12);
2192 loc
.copySegmentSize
= (uint32_t)sizeOfSections
;
2193 loc
.srcSegmentIndex
= segInfo
.segIndex
;
2194 dylib
.cacheLocation
.push_back(loc
);
2195 addr
+= loc
.dstCacheSegmentSize
;
2199 // reserve space for objc optimization tables and deduped strings
2200 uint64_t objcReadOnlyBufferVMAddr
= addr
;
2201 _objcReadOnlyBuffer
= _readExecuteRegion
.buffer
+ (addr
- _readExecuteRegion
.unslidLoadAddress
);
2203 // First the strings as we'll fill in the objc tables later in the optimizer
2204 for (const char* section
: CacheCoalescedText::SupportedSections
) {
2205 CacheCoalescedText::StringSection
& cacheStringSection
= _coalescedText
.getSectionData(section
);
2206 cacheStringSection
.bufferAddr
= _readExecuteRegion
.buffer
+ (addr
- _readExecuteRegion
.unslidLoadAddress
);
2207 cacheStringSection
.bufferVMAddr
= addr
;
2208 addr
+= cacheStringSection
.bufferSize
;
2211 addr
= align(addr
, 14);
2212 _objcReadOnlyBufferSizeUsed
= addr
- objcReadOnlyBufferVMAddr
;
2214 uint32_t totalSelectorRefCount
= (uint32_t)_selectorStringsFromExecutables
;
2215 uint32_t totalClassDefCount
= 0;
2216 uint32_t totalProtocolDefCount
= 0;
2217 for (DylibInfo
& dylib
: _sortedDylibs
) {
2218 dyld3::MachOAnalyzer::ObjCInfo info
= dylib
.input
->mappedFile
.mh
->getObjCInfo();
2219 totalSelectorRefCount
+= info
.selRefCount
;
2220 totalClassDefCount
+= info
.classDefCount
;
2221 totalProtocolDefCount
+= info
.protocolDefCount
;
2224 // now that shared cache coalesces all selector strings, use that better count
2225 uint32_t coalescedSelectorCount
= (uint32_t)_coalescedText
.objcMethNames
.stringsToOffsets
.size();
2226 if ( coalescedSelectorCount
> totalSelectorRefCount
)
2227 totalSelectorRefCount
= coalescedSelectorCount
;
2228 addr
+= align(computeReadOnlyObjC(totalSelectorRefCount
, totalClassDefCount
, totalProtocolDefCount
), 14);
2230 size_t impCachesSize
= _impCachesBuilder
->totalIMPCachesSize();
2231 size_t alignedImpCachesSize
= align(impCachesSize
, 14);
2232 _diagnostics
.verbose("Reserving %zd bytes for IMP caches (aligned to %zd)\n", impCachesSize
, alignedImpCachesSize
);
2233 addr
+= alignedImpCachesSize
;
2235 _objcReadOnlyBufferSizeAllocated
= addr
- objcReadOnlyBufferVMAddr
;
2237 // align TEXT region end
2238 uint64_t endTextAddress
= align(addr
, _archLayout
->sharedRegionAlignP2
);
2239 _readExecuteRegion
.bufferSize
= endTextAddress
- _readExecuteRegion
.unslidLoadAddress
;
2240 _readExecuteRegion
.sizeInUse
= _readExecuteRegion
.bufferSize
;
2243 // assign __DATA* addresses
2244 if ( _archLayout
->sharedRegionsAreDiscontiguous
)
2245 addr
= DISCONTIGUOUS_RW
;
2247 addr
= align((addr
+ _archLayout
->sharedRegionPadding
), _archLayout
->sharedRegionAlignP2
);
2250 assignMultipleDataSegmentAddresses(addr
, totalProtocolDefCount
);
2252 // start read-only region
2253 if ( _archLayout
->sharedRegionsAreDiscontiguous
)
2254 addr
= DISCONTIGUOUS_RO
;
2256 addr
= align((addr
+ _archLayout
->sharedRegionPadding
), _archLayout
->sharedRegionAlignP2
);
2257 _readOnlyRegion
.buffer
= (uint8_t*)_fullAllocatedBuffer
+ addr
- _archLayout
->sharedMemoryStart
;
2258 _readOnlyRegion
.bufferSize
= 0;
2259 _readOnlyRegion
.sizeInUse
= 0;
2260 _readOnlyRegion
.unslidLoadAddress
= addr
;
2261 _readOnlyRegion
.cacheFileOffset
= lastDataRegion()->cacheFileOffset
+ lastDataRegion()->sizeInUse
;
2264 // reserve space for kernel ASLR slide info at start of r/o region
2265 if ( _options
.cacheSupportsASLR
) {
2266 size_t slideInfoSize
= sizeof(dyld_cache_slide_info
);
2267 slideInfoSize
= std::max(slideInfoSize
, sizeof(dyld_cache_slide_info2
));
2268 slideInfoSize
= std::max(slideInfoSize
, sizeof(dyld_cache_slide_info3
));
2269 slideInfoSize
= std::max(slideInfoSize
, sizeof(dyld_cache_slide_info4
));
2270 // We need one slide info header per data region, plus enough space for that regions pages
2271 // Each region will also be padded to a page-size so that the kernel can wire it.
2272 for (Region
& region
: _dataRegions
) {
2273 uint64_t offsetInRegion
= addr
- _readOnlyRegion
.unslidLoadAddress
;
2274 region
.slideInfoBuffer
= _readOnlyRegion
.buffer
+ offsetInRegion
;
2275 region
.slideInfoBufferSizeAllocated
= align(slideInfoSize
+ (region
.sizeInUse
/4096) * _archLayout
->slideInfoBytesPerPage
+ 0x4000, _archLayout
->sharedRegionAlignP2
);
2276 region
.slideInfoFileOffset
= _readOnlyRegion
.cacheFileOffset
+ offsetInRegion
;
2277 addr
+= region
.slideInfoBufferSizeAllocated
;
2281 // layout all read-only (but not LINKEDIT) segments
2282 for (DylibInfo
& dylib
: _sortedDylibs
) {
2283 __block
uint64_t textSegVmAddr
= 0;
2284 dylib
.input
->mappedFile
.mh
->forEachSegment(^(const dyld3::MachOFile::SegmentInfo
& segInfo
, bool& stop
) {
2285 if ( strcmp(segInfo
.segName
, "__TEXT") == 0 )
2286 textSegVmAddr
= segInfo
.vmAddr
;
2287 if ( segInfo
.protections
!= VM_PROT_READ
)
2289 if ( strcmp(segInfo
.segName
, "__LINKEDIT") == 0 )
2292 // Keep segments segments 4K or more aligned
2293 addr
= align(addr
, std::max((int)segInfo
.p2align
, (int)12));
2294 uint64_t offsetInRegion
= addr
- _readOnlyRegion
.unslidLoadAddress
;
2295 SegmentMappingInfo loc
;
2296 loc
.srcSegment
= (uint8_t*)dylib
.input
->mappedFile
.mh
+ segInfo
.vmAddr
- textSegVmAddr
;
2297 loc
.segName
= segInfo
.segName
;
2298 loc
.dstSegment
= _readOnlyRegion
.buffer
+ offsetInRegion
;
2299 loc
.dstCacheUnslidAddress
= addr
;
2300 loc
.dstCacheFileOffset
= (uint32_t)(_readOnlyRegion
.cacheFileOffset
+ offsetInRegion
);
2301 loc
.dstCacheSegmentSize
= (uint32_t)align(segInfo
.sizeOfSections
, 12);
2302 loc
.dstCacheFileSize
= (uint32_t)segInfo
.sizeOfSections
;
2303 loc
.copySegmentSize
= (uint32_t)segInfo
.sizeOfSections
;
2304 loc
.srcSegmentIndex
= segInfo
.segIndex
;
2305 dylib
.cacheLocation
.push_back(loc
);
2306 addr
+= loc
.dstCacheSegmentSize
;
2310 // layout all LINKEDIT segments (after other read-only segments), aligned to 16KB
2311 addr
= align(addr
, 14);
2312 _nonLinkEditReadOnlySize
= addr
- _readOnlyRegion
.unslidLoadAddress
;
2313 for (DylibInfo
& dylib
: _sortedDylibs
) {
2314 __block
uint64_t textSegVmAddr
= 0;
2315 dylib
.input
->mappedFile
.mh
->forEachSegment(^(const dyld3::MachOFile::SegmentInfo
& segInfo
, bool& stop
) {
2316 if ( strcmp(segInfo
.segName
, "__TEXT") == 0 )
2317 textSegVmAddr
= segInfo
.vmAddr
;
2318 if ( segInfo
.protections
!= VM_PROT_READ
)
2320 if ( strcmp(segInfo
.segName
, "__LINKEDIT") != 0 )
2322 // Keep segments segments 4K or more aligned
2323 addr
= align(addr
, std::max((int)segInfo
.p2align
, (int)12));
2324 size_t copySize
= std::min((size_t)segInfo
.fileSize
, (size_t)segInfo
.sizeOfSections
);
2325 uint64_t offsetInRegion
= addr
- _readOnlyRegion
.unslidLoadAddress
;
2326 SegmentMappingInfo loc
;
2327 loc
.srcSegment
= (uint8_t*)dylib
.input
->mappedFile
.mh
+ segInfo
.vmAddr
- textSegVmAddr
;
2328 loc
.segName
= segInfo
.segName
;
2329 loc
.dstSegment
= _readOnlyRegion
.buffer
+ offsetInRegion
;
2330 loc
.dstCacheUnslidAddress
= addr
;
2331 loc
.dstCacheFileOffset
= (uint32_t)(_readOnlyRegion
.cacheFileOffset
+ offsetInRegion
);
2332 loc
.dstCacheSegmentSize
= (uint32_t)align(segInfo
.sizeOfSections
, 12);
2333 loc
.dstCacheFileSize
= (uint32_t)copySize
;
2334 loc
.copySegmentSize
= (uint32_t)copySize
;
2335 loc
.srcSegmentIndex
= segInfo
.segIndex
;
2336 dylib
.cacheLocation
.push_back(loc
);
2337 addr
+= loc
.dstCacheSegmentSize
;
2341 // align r/o region end
2342 addr
= align(addr
, _archLayout
->sharedRegionAlignP2
);
2343 uint64_t endReadOnlyAddress
= addr
;
2344 _readOnlyRegion
.bufferSize
= endReadOnlyAddress
- _readOnlyRegion
.unslidLoadAddress
+ 0x100000;
2345 _readOnlyRegion
.sizeInUse
= _readOnlyRegion
.bufferSize
;
2347 //fprintf(stderr, "RX region=%p -> %p, logical addr=0x%llX\n", _readExecuteRegion.buffer, _readExecuteRegion.buffer+_readExecuteRegion.bufferSize, _readExecuteRegion.unslidLoadAddress);
2348 //fprintf(stderr, "RW region=%p -> %p, logical addr=0x%llX\n", readWriteRegion.buffer, readWriteRegion.buffer+readWriteRegion.bufferSize, readWriteRegion.unslidLoadAddress);
2349 //fprintf(stderr, "RO region=%p -> %p, logical addr=0x%llX\n", _readOnlyRegion.buffer, _readOnlyRegion.buffer+_readOnlyRegion.bufferSize, _readOnlyRegion.unslidLoadAddress);
2351 // sort SegmentMappingInfo for each image to be in the same order as original segments
2352 for (DylibInfo
& dylib
: _sortedDylibs
) {
2353 std::sort(dylib
.cacheLocation
.begin(), dylib
.cacheLocation
.end(), [&](const SegmentMappingInfo
& a
, const SegmentMappingInfo
& b
) {
2354 return a
.srcSegmentIndex
< b
.srcSegmentIndex
;
2359 // Return the total size of the data regions, including padding between them.
2360 // Note this assumes they are contiguous, or that we don't care about including
2361 // additional space between them.
2362 uint64_t SharedCacheBuilder::dataRegionsTotalSize() const {
2363 const Region
* firstRegion
= nullptr;
2364 const Region
* lastRegion
= nullptr;
2365 for (const Region
& region
: _dataRegions
) {
2366 if ( (firstRegion
== nullptr) || (region
.buffer
< firstRegion
->buffer
) )
2367 firstRegion
= ®ion
;
2368 if ( (lastRegion
== nullptr) || (region
.buffer
> lastRegion
->buffer
) )
2369 lastRegion
= ®ion
;
2371 return (lastRegion
->buffer
- firstRegion
->buffer
) + lastRegion
->sizeInUse
;
2375 // Return the total size of the data regions, excluding padding between them
2376 uint64_t SharedCacheBuilder::dataRegionsSizeInUse() const {
2378 for (const Region
& dataRegion
: _dataRegions
)
2379 size
+= dataRegion
.sizeInUse
;
2383 // Return the earliest data region by address
2384 const CacheBuilder::Region
* SharedCacheBuilder::firstDataRegion() const {
2385 const Region
* firstRegion
= nullptr;
2386 for (const Region
& region
: _dataRegions
) {
2387 if ( (firstRegion
== nullptr) || (region
.buffer
< firstRegion
->buffer
) )
2388 firstRegion
= ®ion
;
2393 // Return the lateset data region by address
2394 const CacheBuilder::Region
* SharedCacheBuilder::lastDataRegion() const {
2395 const Region
* lastRegion
= nullptr;
2396 for (const Region
& region
: _dataRegions
) {
2397 if ( (lastRegion
== nullptr) || (region
.buffer
> lastRegion
->buffer
) )
2398 lastRegion
= ®ion
;
2402 static dyld_cache_patchable_location
makePatchLocation(size_t cacheOff
, dyld3::MachOAnalyzerSet::PointerMetaData pmd
, uint64_t addend
) {
2403 dyld_cache_patchable_location patch
;
2404 patch
.cacheOffset
= cacheOff
;
2405 patch
.high7
= pmd
.high8
>> 1;
2406 patch
.addend
= addend
;
2407 patch
.authenticated
= pmd
.authenticated
;
2408 patch
.usesAddressDiversity
= pmd
.usesAddrDiversity
;
2409 patch
.key
= pmd
.key
;
2410 patch
.discriminator
= pmd
.diversity
;
2411 // check for truncations
2412 assert(patch
.cacheOffset
== cacheOff
);
2413 assert(patch
.addend
== addend
);
2414 assert((patch
.high7
<< 1) == pmd
.high8
);
2418 void SharedCacheBuilder::buildImageArray(std::vector
<DyldSharedCache::FileAlias
>& aliases
)
2420 typedef dyld3::closure::ClosureBuilder::CachedDylibInfo CachedDylibInfo
;
2422 // convert STL data structures to simple arrays to passe to makeDyldCacheImageArray()
2423 __block
std::vector
<CachedDylibInfo
> dylibInfos
;
2424 __block
std::unordered_map
<dyld3::closure::ImageNum
, const dyld3::MachOLoaded
*> imageNumToML
;
2425 DyldSharedCache
* cache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
2426 cache
->forEachImage(^(const mach_header
* mh
, const char* installName
) {
2427 const dyld3::MachOLoaded
* ml
= (dyld3::MachOLoaded
*)mh
;
2428 if ( !_someDylibsUsedChainedFixups
&& ml
->hasChainedFixups() )
2429 _someDylibsUsedChainedFixups
= true;
2432 cache
->getIndexedImageEntry((uint32_t)dylibInfos
.size(), mtime
, inode
);
2433 CachedDylibInfo entry
;
2434 entry
.fileInfo
.fileContent
= mh
;
2435 entry
.fileInfo
.path
= installName
;
2436 entry
.fileInfo
.sliceOffset
= 0;
2437 entry
.fileInfo
.inode
= inode
;
2438 entry
.fileInfo
.mtime
= mtime
;
2439 dylibInfos
.push_back(entry
);
2440 imageNumToML
[(dyld3::closure::ImageNum
)(dylibInfos
.size())] = ml
;
2443 // Convert symlinks from STL to simple char pointers.
2444 std::vector
<dyld3::closure::ClosureBuilder::CachedDylibAlias
> dylibAliases
;
2445 dylibAliases
.reserve(aliases
.size());
2446 for (const auto& alias
: aliases
)
2447 dylibAliases
.push_back({ alias
.realPath
.c_str(), alias
.aliasPath
.c_str() });
2449 typedef dyld3::MachOAnalyzerSet::FixupTarget FixupTarget
;
2450 typedef dyld3::MachOAnalyzerSet::PointerMetaData PointerMetaData
;
2452 dyld3::closure::ClosureBuilder::DylibFixupHandler handler
= ^(const dyld3::MachOLoaded
* fixupIn
, uint64_t fixupLocRuntimeOffset
,
2453 PointerMetaData pmd
, const FixupTarget
& target
) {
2454 uint8_t* fixupLoc
= (uint8_t*)fixupIn
+ fixupLocRuntimeOffset
;
2455 uint32_t* fixupLoc32
= (uint32_t*)fixupLoc
;
2456 uint64_t* fixupLoc64
= (uint64_t*)fixupLoc
;
2457 uint64_t targetSymbolOffsetInCache
;
2458 switch ( target
.kind
) {
2459 case FixupTarget::Kind::rebase
:
2460 // rebasing already done in AdjustDylibSegments, but if input dylib uses chained fixups, target might not fit
2461 if ( _archLayout
->is64
) {
2462 if ( pmd
.authenticated
)
2463 _aslrTracker
.setAuthData(fixupLoc
, pmd
.diversity
, pmd
.usesAddrDiversity
, pmd
.key
);
2465 _aslrTracker
.setHigh8(fixupLoc
, pmd
.high8
);
2466 uint64_t targetVmAddr
;
2467 if ( _aslrTracker
.hasRebaseTarget64(fixupLoc
, &targetVmAddr
) )
2468 *fixupLoc64
= targetVmAddr
;
2470 *fixupLoc64
= (uint8_t*)target
.foundInImage
._mh
- _readExecuteRegion
.buffer
+ target
.offsetInImage
+ _readExecuteRegion
.unslidLoadAddress
;
2473 uint32_t targetVmAddr
;
2474 assert(_aslrTracker
.hasRebaseTarget32(fixupLoc
, &targetVmAddr
) && "32-bit archs always store target in side table");
2475 *fixupLoc32
= targetVmAddr
;
2478 case FixupTarget::Kind::bindAbsolute
:
2479 if ( _archLayout
->is64
)
2480 *fixupLoc64
= target
.offsetInImage
;
2482 *fixupLoc32
= (uint32_t)(target
.offsetInImage
);
2483 // don't record absolute targets for ASLR
2484 _aslrTracker
.remove(fixupLoc
);
2486 case FixupTarget::Kind::bindToImage
:
2487 targetSymbolOffsetInCache
= (uint8_t*)target
.foundInImage
._mh
- _readExecuteRegion
.buffer
+ target
.offsetInImage
- target
.addend
;
2488 if ( !target
.weakCoalesced
|| !_aslrTracker
.has(fixupLoc
) ) {
2489 // this handler is called a second time for weak_bind info, which we ignore when building cache
2490 _aslrTracker
.add(fixupLoc
);
2491 if ( _archLayout
->is64
) {
2493 _aslrTracker
.setHigh8(fixupLoc
, pmd
.high8
);
2494 if ( pmd
.authenticated
)
2495 _aslrTracker
.setAuthData(fixupLoc
, pmd
.diversity
, pmd
.usesAddrDiversity
, pmd
.key
);
2496 *fixupLoc64
= _archLayout
->sharedMemoryStart
+ targetSymbolOffsetInCache
+ target
.addend
;
2499 assert(targetSymbolOffsetInCache
< (_readOnlyRegion
.buffer
- _readExecuteRegion
.buffer
) && "offset not into TEXT or DATA of cache file");
2500 uint32_t targetVmAddr
;
2501 if ( _aslrTracker
.hasRebaseTarget32(fixupLoc
, &targetVmAddr
) )
2502 *fixupLoc32
= targetVmAddr
;
2504 *fixupLoc32
= (uint32_t)(_archLayout
->sharedMemoryStart
+ targetSymbolOffsetInCache
+ target
.addend
);
2507 _dylibToItsExports
[target
.foundInImage
._mh
].insert(targetSymbolOffsetInCache
);
2508 if ( target
.isWeakDef
)
2509 _dylibWeakExports
.insert({ target
.foundInImage
._mh
, targetSymbolOffsetInCache
});
2510 _exportsToUses
[targetSymbolOffsetInCache
].push_back(makePatchLocation(fixupLoc
- _readExecuteRegion
.buffer
, pmd
, target
.addend
));
2511 _exportsToName
[targetSymbolOffsetInCache
] = target
.foundSymbolName
;
2513 case FixupTarget::Kind::bindMissingSymbol
:
2514 // if there are missing symbols, makeDyldCacheImageArray() will error
2520 // build ImageArray for all dylibs in dyld cache
2521 dyld3::closure::PathOverrides pathOverrides
;
2522 dyld3::RootsChecker rootsChecker
;
2523 dyld3::closure::ClosureBuilder
cb(dyld3::closure::kFirstDyldCacheImageNum
, _fileSystem
, rootsChecker
, cache
, false, *_options
.archs
, pathOverrides
,
2524 dyld3::closure::ClosureBuilder::AtPath::none
, false, nullptr, _options
.platform
, handler
);
2525 dyld3::Array
<CachedDylibInfo
> dylibs(&dylibInfos
[0], dylibInfos
.size(), dylibInfos
.size());
2526 const dyld3::Array
<dyld3::closure::ClosureBuilder::CachedDylibAlias
> aliasesArray(dylibAliases
.data(), dylibAliases
.size(), dylibAliases
.size());
2527 _imageArray
= cb
.makeDyldCacheImageArray(dylibs
, aliasesArray
);
2528 if ( cb
.diagnostics().hasError() ) {
2529 _diagnostics
.error("%s", cb
.diagnostics().errorMessage().c_str());
2534 static bool operator==(const dyld_cache_patchable_location
& a
, const dyld_cache_patchable_location
& b
) {
2535 return a
.cacheOffset
== b
.cacheOffset
;
2538 void SharedCacheBuilder::addImageArray()
2540 // build trie of dylib paths
2541 __block
std::vector
<DylibIndexTrie::Entry
> dylibEntrys
;
2542 _imageArray
->forEachImage(^(const dyld3::closure::Image
* image
, bool& stop
) {
2543 dylibEntrys
.push_back(DylibIndexTrie::Entry(image
->path(), DylibIndex(image
->imageNum()-1)));
2544 image
->forEachAlias(^(const char *aliasPath
, bool &innerStop
) {
2545 dylibEntrys
.push_back(DylibIndexTrie::Entry(aliasPath
, DylibIndex(image
->imageNum()-1)));
2548 DylibIndexTrie
dylibsTrie(dylibEntrys
);
2549 std::vector
<uint8_t> trieBytes
;
2550 dylibsTrie
.emit(trieBytes
);
2551 while ( (trieBytes
.size() % 4) != 0 )
2552 trieBytes
.push_back(0);
2554 // build set of functions to never stub-eliminate because tools may need to override them
2555 std::unordered_set
<std::string
> alwaysGeneratePatch
;
2556 for (const char* const* p
=_s_neverStubEliminateSymbols
; *p
!= nullptr; ++p
)
2557 alwaysGeneratePatch
.insert(*p
);
2559 // Add the patches for the image array.
2560 __block
uint64_t numPatchImages
= _imageArray
->size();
2561 __block
uint64_t numPatchExports
= 0;
2562 __block
uint64_t numPatchLocations
= 0;
2563 __block
uint64_t numPatchExportNameBytes
= 0;
2565 auto needsPatch
= [&](bool dylibNeedsPatching
, const dyld3::MachOLoaded
* mh
,
2566 CacheOffset offset
) -> bool {
2567 if (dylibNeedsPatching
)
2569 if (_dylibWeakExports
.find({ mh
, offset
}) != _dylibWeakExports
.end())
2571 const std::string
& exportName
= _exportsToName
[offset
];
2572 return alwaysGeneratePatch
.find(exportName
) != alwaysGeneratePatch
.end();
2575 // First calculate how much space we need
2576 const DyldSharedCache
* cache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
2577 cache
->forEachImage(^(const mach_header
* mh
, const char* installName
) {
2578 const dyld3::MachOLoaded
* ml
= (const dyld3::MachOLoaded
*)mh
;
2579 const std::set
<CacheOffset
>& dylibExports
= _dylibToItsExports
[ml
];
2581 // On a customer cache, only store patch locations for interposable dylibs and weak binding
2582 bool dylibNeedsPatching
= cache
->isOverridablePath(installName
);
2584 uint64_t numDylibExports
= 0;
2585 for (CacheOffset exportCacheOffset
: dylibExports
) {
2586 if (!needsPatch(dylibNeedsPatching
, ml
, exportCacheOffset
))
2588 std::vector
<dyld_cache_patchable_location
>& uses
= _exportsToUses
[exportCacheOffset
];
2589 uses
.erase(std::unique(uses
.begin(), uses
.end()), uses
.end());
2590 numPatchLocations
+= uses
.size();
2592 std::string exportName
= _exportsToName
[exportCacheOffset
];
2593 numPatchExportNameBytes
+= exportName
.size() + 1;
2595 numPatchExports
+= numDylibExports
;
2598 // Now reserve the space
2599 __block
std::vector
<dyld_cache_image_patches
> patchImages
;
2600 __block
std::vector
<dyld_cache_patchable_export
> patchExports
;
2601 __block
std::vector
<dyld_cache_patchable_location
> patchLocations
;
2602 __block
std::vector
<char> patchExportNames
;
2604 patchImages
.reserve(numPatchImages
);
2605 patchExports
.reserve(numPatchExports
);
2606 patchLocations
.reserve(numPatchLocations
);
2607 patchExportNames
.reserve(numPatchExportNameBytes
);
2609 // And now fill it with the patch data
2610 cache
->forEachImage(^(const mach_header
* mh
, const char* installName
) {
2611 const dyld3::MachOLoaded
* ml
= (const dyld3::MachOLoaded
*)mh
;
2612 const std::set
<CacheOffset
>& dylibExports
= _dylibToItsExports
[ml
];
2614 // On a customer cache, only store patch locations for interposable dylibs and weak binding
2615 bool dylibNeedsPatching
= cache
->isOverridablePath(installName
);
2617 // Add the patch image which points in to the exports
2618 dyld_cache_image_patches patchImage
;
2619 patchImage
.patchExportsStartIndex
= (uint32_t)patchExports
.size();
2620 patchImage
.patchExportsCount
= 0;
2622 // Then add each export which points to a list of locations and a name
2623 for (CacheOffset exportCacheOffset
: dylibExports
) {
2624 if (!needsPatch(dylibNeedsPatching
, ml
, exportCacheOffset
))
2626 ++patchImage
.patchExportsCount
;
2627 std::vector
<dyld_cache_patchable_location
>& uses
= _exportsToUses
[exportCacheOffset
];
2629 dyld_cache_patchable_export cacheExport
;
2630 cacheExport
.cacheOffsetOfImpl
= (uint32_t)exportCacheOffset
;
2631 cacheExport
.patchLocationsStartIndex
= (uint32_t)patchLocations
.size();
2632 cacheExport
.patchLocationsCount
= (uint32_t)uses
.size();
2633 cacheExport
.exportNameOffset
= (uint32_t)patchExportNames
.size();
2634 patchExports
.push_back(cacheExport
);
2636 // Now add the list of locations.
2637 patchLocations
.insert(patchLocations
.end(), uses
.begin(), uses
.end());
2639 // And add the export name
2640 const std::string
& exportName
= _exportsToName
[exportCacheOffset
];
2641 patchExportNames
.insert(patchExportNames
.end(), &exportName
[0], &exportName
[0] + exportName
.size() + 1);
2643 patchImages
.push_back(patchImage
);
2646 while ( (patchExportNames
.size() % 4) != 0 )
2647 patchExportNames
.push_back('\0');
2649 uint64_t patchInfoSize
= sizeof(dyld_cache_patch_info
);
2650 patchInfoSize
+= sizeof(dyld_cache_image_patches
) * patchImages
.size();
2651 patchInfoSize
+= sizeof(dyld_cache_patchable_export
) * patchExports
.size();
2652 patchInfoSize
+= sizeof(dyld_cache_patchable_location
) * patchLocations
.size();
2653 patchInfoSize
+= patchExportNames
.size();
2656 uint64_t imageArraySize
= _imageArray
->size();
2657 size_t freeSpace
= _readOnlyRegion
.bufferSize
- _readOnlyRegion
.sizeInUse
;
2658 if ( (imageArraySize
+trieBytes
.size()+patchInfoSize
) > freeSpace
) {
2659 _diagnostics
.error("cache buffer too small to hold ImageArray and Trie (buffer size=%lldMB, imageArray size=%lldMB, trie size=%luKB, patch size=%lluKB, free space=%ldMB)",
2660 _allocatedBufferSize
/1024/1024, imageArraySize
/1024/1024, trieBytes
.size()/1024, patchInfoSize
/1024, freeSpace
/1024/1024);
2664 // copy into cache and update header
2665 DyldSharedCache
* dyldCache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
2666 dyldCache
->header
.dylibsImageArrayAddr
= _readOnlyRegion
.unslidLoadAddress
+ _readOnlyRegion
.sizeInUse
;
2667 dyldCache
->header
.dylibsImageArraySize
= imageArraySize
;
2668 dyldCache
->header
.dylibsTrieAddr
= dyldCache
->header
.dylibsImageArrayAddr
+ imageArraySize
;
2669 dyldCache
->header
.dylibsTrieSize
= trieBytes
.size();
2670 ::memcpy(_readOnlyRegion
.buffer
+ _readOnlyRegion
.sizeInUse
, _imageArray
, imageArraySize
);
2671 ::memcpy(_readOnlyRegion
.buffer
+ _readOnlyRegion
.sizeInUse
+ imageArraySize
, &trieBytes
[0], trieBytes
.size());
2673 // Also write out the patch info
2674 dyldCache
->header
.patchInfoAddr
= dyldCache
->header
.dylibsTrieAddr
+ dyldCache
->header
.dylibsTrieSize
;
2675 dyldCache
->header
.patchInfoSize
= patchInfoSize
;
2676 dyld_cache_patch_info patchInfo
;
2677 patchInfo
.patchTableArrayAddr
= dyldCache
->header
.patchInfoAddr
+ sizeof(dyld_cache_patch_info
);
2678 patchInfo
.patchTableArrayCount
= patchImages
.size();
2679 patchInfo
.patchExportArrayAddr
= patchInfo
.patchTableArrayAddr
+ (patchInfo
.patchTableArrayCount
* sizeof(dyld_cache_image_patches
));
2680 patchInfo
.patchExportArrayCount
= patchExports
.size();
2681 patchInfo
.patchLocationArrayAddr
= patchInfo
.patchExportArrayAddr
+ (patchInfo
.patchExportArrayCount
* sizeof(dyld_cache_patchable_export
));
2682 patchInfo
.patchLocationArrayCount
= patchLocations
.size();
2683 patchInfo
.patchExportNamesAddr
= patchInfo
.patchLocationArrayAddr
+ (patchInfo
.patchLocationArrayCount
* sizeof(dyld_cache_patchable_location
));
2684 patchInfo
.patchExportNamesSize
= patchExportNames
.size();
2685 ::memcpy(_readOnlyRegion
.buffer
+ dyldCache
->header
.patchInfoAddr
- _readOnlyRegion
.unslidLoadAddress
,
2686 &patchInfo
, sizeof(dyld_cache_patch_info
));
2687 ::memcpy(_readOnlyRegion
.buffer
+ patchInfo
.patchTableArrayAddr
- _readOnlyRegion
.unslidLoadAddress
,
2688 &patchImages
[0], sizeof(patchImages
[0]) * patchImages
.size());
2689 ::memcpy(_readOnlyRegion
.buffer
+ patchInfo
.patchExportArrayAddr
- _readOnlyRegion
.unslidLoadAddress
,
2690 &patchExports
[0], sizeof(patchExports
[0]) * patchExports
.size());
2691 ::memcpy(_readOnlyRegion
.buffer
+ patchInfo
.patchLocationArrayAddr
- _readOnlyRegion
.unslidLoadAddress
,
2692 &patchLocations
[0], sizeof(patchLocations
[0]) * patchLocations
.size());
2693 ::memcpy(_readOnlyRegion
.buffer
+ patchInfo
.patchExportNamesAddr
- _readOnlyRegion
.unslidLoadAddress
,
2694 &patchExportNames
[0], patchExportNames
.size());
2696 _readOnlyRegion
.sizeInUse
+= align(imageArraySize
+trieBytes
.size()+patchInfoSize
,14);
2698 // Free the underlying image array buffer
2699 _imageArray
->deallocate();
2700 _imageArray
= nullptr;
2703 void SharedCacheBuilder::addOtherImageArray(const std::vector
<LoadedMachO
>& otherDylibsAndBundles
, std::vector
<const LoadedMachO
*>& overflowDylibs
)
2705 DyldSharedCache
* cache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
2706 dyld3::closure::PathOverrides pathOverrides
;
2707 dyld3::closure::FileSystemNull nullFileSystem
;
2708 dyld3::RootsChecker rootsChecker
;
2709 dyld3::closure::ClosureBuilder
cb(dyld3::closure::kFirstOtherOSImageNum
, nullFileSystem
, rootsChecker
, cache
, false, *_options
.archs
, pathOverrides
,
2710 dyld3::closure::ClosureBuilder::AtPath::none
, false, nullptr, _options
.platform
);
2712 // make ImageArray for other dylibs and bundles
2713 STACK_ALLOC_ARRAY(dyld3::closure::LoadedFileInfo
, others
, otherDylibsAndBundles
.size() + overflowDylibs
.size());
2714 for (const LoadedMachO
& other
: otherDylibsAndBundles
) {
2715 if ( !contains(other
.loadedFileInfo
.path
, "staged_system_apps/") )
2716 others
.push_back(other
.loadedFileInfo
);
2719 for (const LoadedMachO
* dylib
: overflowDylibs
) {
2720 if (dylib
->mappedFile
.mh
->canHavePrecomputedDlopenClosure(dylib
->mappedFile
.runtimePath
.c_str(), ^(const char*) {}) )
2721 others
.push_back(dylib
->loadedFileInfo
);
2724 // Sort the others array by name so that it is deterministic
2725 std::sort(others
.begin(), others
.end(),
2726 [](const dyld3::closure::LoadedFileInfo
& a
, const dyld3::closure::LoadedFileInfo
& b
) {
2727 // Sort mac before iOSMac
2728 bool isIOSMacA
= strncmp(a
.path
, "/System/iOSSupport/", 19) == 0;
2729 bool isIOSMacB
= strncmp(b
.path
, "/System/iOSSupport/", 19) == 0;
2730 if (isIOSMacA
!= isIOSMacB
)
2732 return strcmp(a
.path
, b
.path
) < 0;
2735 const dyld3::closure::ImageArray
* otherImageArray
= cb
.makeOtherDylibsImageArray(others
, (uint32_t)_sortedDylibs
.size());
2737 // build trie of paths
2738 __block
std::vector
<DylibIndexTrie::Entry
> otherEntrys
;
2739 otherImageArray
->forEachImage(^(const dyld3::closure::Image
* image
, bool& stop
) {
2740 if ( !image
->isInvalid() )
2741 otherEntrys
.push_back(DylibIndexTrie::Entry(image
->path(), DylibIndex(image
->imageNum())));
2743 DylibIndexTrie
dylibsTrie(otherEntrys
);
2744 std::vector
<uint8_t> trieBytes
;
2745 dylibsTrie
.emit(trieBytes
);
2746 while ( (trieBytes
.size() % 4) != 0 )
2747 trieBytes
.push_back(0);
2750 uint64_t imageArraySize
= otherImageArray
->size();
2751 size_t freeSpace
= _readOnlyRegion
.bufferSize
- _readOnlyRegion
.sizeInUse
;
2752 if ( imageArraySize
+trieBytes
.size() > freeSpace
) {
2753 _diagnostics
.error("cache buffer too small to hold ImageArray and Trie (buffer size=%lldMB, imageArray size=%lldMB, trie size=%luKB, free space=%ldMB)",
2754 _allocatedBufferSize
/1024/1024, imageArraySize
/1024/1024, trieBytes
.size()/1024, freeSpace
/1024/1024);
2758 // copy into cache and update header
2759 DyldSharedCache
* dyldCache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
2760 dyldCache
->header
.otherImageArrayAddr
= _readOnlyRegion
.unslidLoadAddress
+ _readOnlyRegion
.sizeInUse
;
2761 dyldCache
->header
.otherImageArraySize
= imageArraySize
;
2762 dyldCache
->header
.otherTrieAddr
= dyldCache
->header
.otherImageArrayAddr
+ imageArraySize
;
2763 dyldCache
->header
.otherTrieSize
= trieBytes
.size();
2764 ::memcpy(_readOnlyRegion
.buffer
+ _readOnlyRegion
.sizeInUse
, otherImageArray
, imageArraySize
);
2765 ::memcpy(_readOnlyRegion
.buffer
+ _readOnlyRegion
.sizeInUse
+ imageArraySize
, &trieBytes
[0], trieBytes
.size());
2766 _readOnlyRegion
.sizeInUse
+= align(imageArraySize
+trieBytes
.size(),14);
2768 // Free the underlying buffer
2769 otherImageArray
->deallocate();
2773 void SharedCacheBuilder::addClosures(const std::vector
<LoadedMachO
>& osExecutables
)
2775 const DyldSharedCache
* dyldCache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
2777 __block
std::vector
<Diagnostics
> osExecutablesDiags
;
2778 __block
std::vector
<const dyld3::closure::LaunchClosure
*> osExecutablesClosures
;
2779 osExecutablesDiags
.resize(osExecutables
.size());
2780 osExecutablesClosures
.resize(osExecutables
.size());
2782 dispatch_apply(osExecutables
.size(), DISPATCH_APPLY_AUTO
, ^(size_t index
) {
2783 const LoadedMachO
& loadedMachO
= osExecutables
[index
];
2784 // don't pre-build closures for staged apps into dyld cache, since they won't run from that location
2785 if ( startsWith(loadedMachO
.mappedFile
.runtimePath
, "/private/var/staged_system_apps/") ) {
2789 // prebuilt closures use the cdhash of the dylib to verify that the dylib is still the same
2790 // at runtime as when the shared cache processed it. We must have a code signature to record this information
2791 uint32_t codeSigFileOffset
;
2792 uint32_t codeSigSize
;
2793 if ( !loadedMachO
.mappedFile
.mh
->hasCodeSignature(codeSigFileOffset
, codeSigSize
) ) {
2797 dyld3::closure::PathOverrides pathOverrides
;
2798 dyld3::RootsChecker rootsChecker
;
2799 dyld3::closure::ClosureBuilder
builder(dyld3::closure::kFirstLaunchClosureImageNum
, _fileSystem
, rootsChecker
, dyldCache
, false, *_options
.archs
, pathOverrides
,
2800 dyld3::closure::ClosureBuilder::AtPath::all
, false, nullptr, _options
.platform
, nullptr);
2801 bool issetuid
= false;
2802 if ( this->_options
.platform
== dyld3::Platform::macOS
|| dyld3::MachOFile::isSimulatorPlatform(this->_options
.platform
) )
2803 _fileSystem
.fileExists(loadedMachO
.loadedFileInfo
.path
, nullptr, nullptr, &issetuid
, nullptr);
2804 const dyld3::closure::LaunchClosure
* mainClosure
= builder
.makeLaunchClosure(loadedMachO
.loadedFileInfo
, issetuid
);
2805 if ( builder
.diagnostics().hasError() ) {
2806 osExecutablesDiags
[index
].error("%s", builder
.diagnostics().errorMessage().c_str());
2809 assert(mainClosure
!= nullptr);
2810 osExecutablesClosures
[index
] = mainClosure
;
2814 std::map
<std::string
, const dyld3::closure::LaunchClosure
*> closures
;
2815 for (uint64_t i
= 0, e
= osExecutables
.size(); i
!= e
; ++i
) {
2816 const LoadedMachO
& loadedMachO
= osExecutables
[i
];
2817 const Diagnostics
& diag
= osExecutablesDiags
[i
];
2818 if (diag
.hasError()) {
2819 if ( _options
.verbose
) {
2820 _diagnostics
.warning("building closure for '%s': %s", loadedMachO
.mappedFile
.runtimePath
.c_str(), diag
.errorMessage().c_str());
2821 for (const std::string
& warn
: diag
.warnings() )
2822 _diagnostics
.warning("%s", warn
.c_str());
2824 if ( loadedMachO
.inputFile
&& (loadedMachO
.inputFile
->mustBeIncluded()) ) {
2825 loadedMachO
.inputFile
->diag
.error("%s", diag
.errorMessage().c_str());
2828 // Note, a closure could be null here if it has a path we skip.
2829 if (osExecutablesClosures
[i
] != nullptr)
2830 closures
[loadedMachO
.mappedFile
.runtimePath
] = osExecutablesClosures
[i
];
2834 osExecutablesDiags
.clear();
2835 osExecutablesClosures
.clear();
2837 // preflight space needed
2838 size_t closuresSpace
= 0;
2839 for (const auto& entry
: closures
) {
2840 closuresSpace
+= entry
.second
->size();
2842 size_t freeSpace
= _readOnlyRegion
.bufferSize
- _readOnlyRegion
.sizeInUse
;
2843 if ( closuresSpace
> freeSpace
) {
2844 _diagnostics
.error("cache buffer too small to hold all closures (buffer size=%lldMB, closures size=%ldMB, free space=%ldMB)",
2845 _allocatedBufferSize
/1024/1024, closuresSpace
/1024/1024, freeSpace
/1024/1024);
2848 DyldSharedCache
* cache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
2849 cache
->header
.progClosuresAddr
= _readOnlyRegion
.unslidLoadAddress
+ _readOnlyRegion
.sizeInUse
;
2850 uint8_t* closuresBase
= _readOnlyRegion
.buffer
+ _readOnlyRegion
.sizeInUse
;
2851 std::vector
<DylibIndexTrie::Entry
> closureEntrys
;
2852 uint32_t currentClosureOffset
= 0;
2853 for (const auto& entry
: closures
) {
2854 const dyld3::closure::LaunchClosure
* closure
= entry
.second
;
2855 closureEntrys
.push_back(DylibIndexTrie::Entry(entry
.first
, DylibIndex(currentClosureOffset
)));
2856 size_t size
= closure
->size();
2857 assert((size
% 4) == 0);
2858 memcpy(closuresBase
+currentClosureOffset
, closure
, size
);
2859 currentClosureOffset
+= size
;
2861 closure
->deallocate();
2863 cache
->header
.progClosuresSize
= currentClosureOffset
;
2864 _readOnlyRegion
.sizeInUse
+= currentClosureOffset
;
2865 freeSpace
= _readOnlyRegion
.bufferSize
- _readOnlyRegion
.sizeInUse
;
2866 // build trie of indexes into closures list
2867 DylibIndexTrie
closureTrie(closureEntrys
);
2868 std::vector
<uint8_t> trieBytes
;
2869 closureTrie
.emit(trieBytes
);
2870 while ( (trieBytes
.size() % 8) != 0 )
2871 trieBytes
.push_back(0);
2872 if ( trieBytes
.size() > freeSpace
) {
2873 _diagnostics
.error("cache buffer too small to hold all closures trie (buffer size=%lldMB, trie size=%ldMB, free space=%ldMB)",
2874 _allocatedBufferSize
/1024/1024, trieBytes
.size()/1024/1024, freeSpace
/1024/1024);
2877 memcpy(_readOnlyRegion
.buffer
+ _readOnlyRegion
.sizeInUse
, &trieBytes
[0], trieBytes
.size());
2878 cache
->header
.progClosuresTrieAddr
= _readOnlyRegion
.unslidLoadAddress
+ _readOnlyRegion
.sizeInUse
;
2879 cache
->header
.progClosuresTrieSize
= trieBytes
.size();
2880 _readOnlyRegion
.sizeInUse
+= trieBytes
.size();
2881 _readOnlyRegion
.sizeInUse
= align(_readOnlyRegion
.sizeInUse
, 14);
2884 void SharedCacheBuilder::emitContantObjects() {
2885 if ( _coalescedText
.cfStrings
.bufferSize
== 0 )
2888 assert(_coalescedText
.cfStrings
.isaInstallName
!= nullptr);
2889 DyldSharedCache
* cache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
2890 __block
uint64_t targetSymbolOffsetInCache
= 0;
2891 __block
const dyld3::MachOAnalyzer
* targetSymbolMA
= nullptr;
2892 __block
const dyld3::MachOAnalyzer
* libdyldMA
= nullptr;
2893 cache
->forEachImage(^(const mach_header
* mh
, const char* installName
) {
2894 const dyld3::MachOAnalyzer
* ma
= (const dyld3::MachOAnalyzer
*)mh
;
2896 if ( strcmp(installName
, "/usr/lib/system/libdyld.dylib") == 0 ) {
2900 if ( targetSymbolOffsetInCache
!= 0 )
2902 if ( strcmp(installName
, _coalescedText
.cfStrings
.isaInstallName
) != 0 )
2904 dyld3::MachOAnalyzer::FoundSymbol foundInfo
;
2905 bool foundSymbol
= ma
->findExportedSymbol(_diagnostics
, _coalescedText
.cfStrings
.isaClassName
,
2906 false, foundInfo
, nullptr);
2907 if ( foundSymbol
) {
2908 targetSymbolOffsetInCache
= (uint8_t*)ma
- _readExecuteRegion
.buffer
+ foundInfo
.value
;
2909 targetSymbolMA
= ma
;
2912 if ( targetSymbolOffsetInCache
== 0 ) {
2913 _diagnostics
.error("Could not find export of '%s' in '%s'", _coalescedText
.cfStrings
.isaClassName
,
2914 _coalescedText
.cfStrings
.isaInstallName
);
2917 if ( libdyldMA
== nullptr ) {
2918 _diagnostics
.error("Could not libdyld.dylib in shared cache");
2922 // If all binds to this symbol were via CF constants, then we'll never have seen the ISA patch export
2923 // os add it now just in case
2924 _dylibToItsExports
[targetSymbolMA
].insert(targetSymbolOffsetInCache
);
2925 _exportsToName
[targetSymbolOffsetInCache
] = _coalescedText
.cfStrings
.isaClassName
;
2927 // CFString's have so far just been memcpy'ed from the source dylib to the shared cache.
2928 // We now need to rewrite their ISAs to be rebases to the ___CFConstantStringClassReference class
2929 const uint64_t cfStringAtomSize
= (uint64_t)DyldSharedCache::ConstantClasses::cfStringAtomSize
;
2930 assert( (_coalescedText
.cfStrings
.bufferSize
% cfStringAtomSize
) == 0);
2931 for (uint64_t bufferOffset
= 0; bufferOffset
!= _coalescedText
.cfStrings
.bufferSize
; bufferOffset
+= cfStringAtomSize
) {
2932 uint8_t* atomBuffer
= _coalescedText
.cfStrings
.bufferAddr
+ bufferOffset
;
2933 // The ISA fixup is at an offset of 0 in to the atom
2934 uint8_t* fixupLoc
= atomBuffer
;
2935 // We purposefully want to remove the pointer authentication from the ISA so
2936 // just use an empty pointer metadata
2937 dyld3::Loader::PointerMetaData pmd
;
2938 uint64_t addend
= 0;
2939 _exportsToUses
[targetSymbolOffsetInCache
].push_back(makePatchLocation(fixupLoc
- _readExecuteRegion
.buffer
, pmd
, addend
));
2940 *(uint64_t*)fixupLoc
= _archLayout
->sharedMemoryStart
+ targetSymbolOffsetInCache
;
2941 _aslrTracker
.add(fixupLoc
);
2944 // Set the ranges in the libdyld in the shared cache. At runtime we can use these to quickly check if a given address
2945 // is a valid constant
2946 typedef std::pair
<const uint8_t*, const uint8_t*> ObjCConstantRange
;
2947 std::pair
<const void*, uint64_t> sharedCacheRanges
= cache
->getObjCConstantRange();
2948 uint64_t numRanges
= sharedCacheRanges
.second
/ sizeof(ObjCConstantRange
);
2949 dyld3::Array
<ObjCConstantRange
> rangeArray((ObjCConstantRange
*)sharedCacheRanges
.first
, numRanges
, numRanges
);
2951 if ( numRanges
> dyld_objc_string_kind
) {
2952 rangeArray
[dyld_objc_string_kind
].first
= (const uint8_t*)_coalescedText
.cfStrings
.bufferVMAddr
;
2953 rangeArray
[dyld_objc_string_kind
].second
= rangeArray
[dyld_objc_string_kind
].first
+ _coalescedText
.cfStrings
.bufferSize
;
2954 _aslrTracker
.add(&rangeArray
[dyld_objc_string_kind
].first
);
2955 _aslrTracker
.add(&rangeArray
[dyld_objc_string_kind
].second
);
2958 // Update the __SHARED_CACHE range in libdyld to contain the cf/objc constants
2959 libdyldMA
->forEachLoadCommand(_diagnostics
, ^(const load_command
* cmd
, bool& stop
) {
2960 // We don't handle 32-bit as this is only needed for pointer authentication
2961 assert(cmd
->cmd
!= LC_SEGMENT
);
2962 if ( cmd
->cmd
== LC_SEGMENT_64
) {
2963 segment_command_64
* seg
= (segment_command_64
*)cmd
;
2964 if ( strcmp(seg
->segname
, "__SHARED_CACHE") == 0 ) {
2965 // Update the range of this segment, and any sections inside
2966 seg
->vmaddr
= _coalescedText
.cfStrings
.bufferVMAddr
;
2967 seg
->vmsize
= _coalescedText
.cfStrings
.bufferSize
;
2968 seg
->fileoff
= _coalescedText
.cfStrings
.cacheFileOffset
;
2969 seg
->fileoff
= _coalescedText
.cfStrings
.bufferSize
;
2970 section_64
* const sectionsStart
= (section_64
*)((char*)seg
+ sizeof(struct segment_command_64
));
2971 section_64
* const sectionsEnd
= §ionsStart
[seg
->nsects
];
2972 for (section_64
* sect
=sectionsStart
; sect
< sectionsEnd
; ++sect
) {
2973 if ( !strcmp(sect
->sectname
, "__cfstring") ) {
2974 sect
->addr
= _coalescedText
.cfStrings
.bufferVMAddr
;
2975 sect
->size
= _coalescedText
.cfStrings
.bufferSize
;
2976 sect
->offset
= (uint32_t)_coalescedText
.cfStrings
.cacheFileOffset
;
2986 bool SharedCacheBuilder::writeCache(void (^cacheSizeCallback
)(uint64_t size
), bool (^copyCallback
)(const uint8_t* src
, uint64_t size
, uint64_t dstOffset
))
2988 const dyld_cache_header
* cacheHeader
= (dyld_cache_header
*)_readExecuteRegion
.buffer
;
2989 const dyld_cache_mapping_info
* mappings
= (dyld_cache_mapping_info
*)(_readExecuteRegion
.buffer
+ cacheHeader
->mappingOffset
);
2990 const uint32_t mappingsCount
= cacheHeader
->mappingCount
;
2991 // Check the sizes of all the regions are correct
2992 assert(_readExecuteRegion
.sizeInUse
== mappings
[0].size
);
2993 for (uint32_t i
= 0; i
!= _dataRegions
.size(); ++i
) {
2994 assert(_dataRegions
[i
].sizeInUse
== mappings
[i
+ 1].size
);
2996 assert(_readOnlyRegion
.sizeInUse
== mappings
[mappingsCount
- 1].size
);
2998 // Check the file offsets of all the regions are correct
2999 assert(_readExecuteRegion
.cacheFileOffset
== mappings
[0].fileOffset
);
3000 for (uint32_t i
= 0; i
!= _dataRegions
.size(); ++i
) {
3001 assert(_dataRegions
[i
].cacheFileOffset
== mappings
[i
+ 1].fileOffset
);
3003 assert(_readOnlyRegion
.cacheFileOffset
== mappings
[mappingsCount
- 1].fileOffset
);
3004 assert(_codeSignatureRegion
.sizeInUse
== cacheHeader
->codeSignatureSize
);
3005 assert(cacheHeader
->codeSignatureOffset
== _readOnlyRegion
.cacheFileOffset
+_readOnlyRegion
.sizeInUse
+_localSymbolsRegion
.sizeInUse
);
3007 // Make sure the slidable mappings have the same ranges as the original mappings
3008 const dyld_cache_mapping_and_slide_info
* slidableMappings
= (dyld_cache_mapping_and_slide_info
*)(_readExecuteRegion
.buffer
+ cacheHeader
->mappingWithSlideOffset
);
3009 assert(cacheHeader
->mappingCount
== cacheHeader
->mappingWithSlideCount
);
3010 for (uint32_t i
= 0; i
!= cacheHeader
->mappingCount
; ++i
) {
3011 assert(mappings
[i
].address
== slidableMappings
[i
].address
);
3012 assert(mappings
[i
].size
== slidableMappings
[i
].size
);
3013 assert(mappings
[i
].fileOffset
== slidableMappings
[i
].fileOffset
);
3014 assert(mappings
[i
].maxProt
== slidableMappings
[i
].maxProt
);
3015 assert(mappings
[i
].initProt
== slidableMappings
[i
].initProt
);
3018 // Now that we know everything is correct, actually copy the data
3019 cacheSizeCallback(_readExecuteRegion
.sizeInUse
+dataRegionsSizeInUse()+_readOnlyRegion
.sizeInUse
+_localSymbolsRegion
.sizeInUse
+_codeSignatureRegion
.sizeInUse
);
3020 bool fullyWritten
= copyCallback(_readExecuteRegion
.buffer
, _readExecuteRegion
.sizeInUse
, mappings
[0].fileOffset
);
3021 for (uint32_t i
= 0; i
!= _dataRegions
.size(); ++i
) {
3022 fullyWritten
&= copyCallback(_dataRegions
[i
].buffer
, _dataRegions
[i
].sizeInUse
, mappings
[i
+ 1].fileOffset
);
3024 fullyWritten
&= copyCallback(_readOnlyRegion
.buffer
, _readOnlyRegion
.sizeInUse
, mappings
[cacheHeader
->mappingCount
- 1].fileOffset
);
3025 if ( _localSymbolsRegion
.sizeInUse
!= 0 ) {
3026 assert(cacheHeader
->localSymbolsOffset
== mappings
[cacheHeader
->mappingCount
- 1].fileOffset
+_readOnlyRegion
.sizeInUse
);
3027 fullyWritten
&= copyCallback(_localSymbolsRegion
.buffer
, _localSymbolsRegion
.sizeInUse
, cacheHeader
->localSymbolsOffset
);
3029 fullyWritten
&= copyCallback(_codeSignatureRegion
.buffer
, _codeSignatureRegion
.sizeInUse
, cacheHeader
->codeSignatureOffset
);
3030 return fullyWritten
;
3034 void SharedCacheBuilder::writeFile(const std::string
& path
)
3036 std::string pathTemplate
= path
+ "-XXXXXX";
3037 size_t templateLen
= strlen(pathTemplate
.c_str())+2;
3038 BLOCK_ACCCESSIBLE_ARRAY(char, pathTemplateSpace
, templateLen
);
3039 strlcpy(pathTemplateSpace
, pathTemplate
.c_str(), templateLen
);
3040 int fd
= mkstemp(pathTemplateSpace
);
3042 auto cacheSizeCallback
= ^(uint64_t size
) {
3043 // set final cache file size (may help defragment file)
3044 ::ftruncate(fd
, size
);
3046 auto copyCallback
= ^(const uint8_t* src
, uint64_t size
, uint64_t dstOffset
) {
3047 uint64_t writtenSize
= pwrite(fd
, src
, size
, dstOffset
);
3048 return writtenSize
== size
;
3050 // <rdar://problem/55370916> TOCTOU: verify path is still a realpath (not changed)
3051 char tempPath
[MAXPATHLEN
];
3052 if ( ::fcntl(fd
, F_GETPATH
, tempPath
) == 0 ) {
3053 size_t tempPathLen
= strlen(tempPath
);
3054 if ( tempPathLen
> 7 )
3055 tempPath
[tempPathLen
-7] = '\0'; // remove trailing -xxxxxx
3056 if ( path
!= tempPath
) {
3057 _diagnostics
.error("output file path changed from: '%s' to: '%s'", path
.c_str(), tempPath
);
3063 _diagnostics
.error("unable to fcntl(fd, F_GETPATH) on output file");
3067 bool fullyWritten
= writeCache(cacheSizeCallback
, copyCallback
);
3068 if ( fullyWritten
) {
3069 ::fchmod(fd
, S_IRUSR
|S_IRGRP
|S_IROTH
); // mkstemp() makes file "rw-------", switch it to "r--r--r--"
3070 // <rdar://problem/55370916> TOCTOU: verify path is still a realpath (not changed)
3071 // For MRM bringup, dyld installs symlinks from:
3072 // dyld_shared_cache_x86_64 -> ../../../../System/Library/dyld/dyld_shared_cache_x86_64
3073 // dyld_shared_cache_x86_64h -> ../../../../System/Library/dyld/dyld_shared_cache_x86_64h
3074 // We don't want to follow that symlink when we install the cache, but instead write over it
3075 auto lastSlash
= path
.find_last_of("/");
3076 if ( lastSlash
!= std::string::npos
) {
3077 std::string directoryPath
= path
.substr(0, lastSlash
);
3079 char resolvedPath
[PATH_MAX
];
3080 ::realpath(directoryPath
.c_str(), resolvedPath
);
3081 // Note: if the target cache file does not already exist, realpath() will return NULL, but still fill in the path buffer
3082 if ( directoryPath
!= resolvedPath
) {
3083 _diagnostics
.error("output directory file path changed from: '%s' to: '%s'", directoryPath
.c_str(), resolvedPath
);
3087 if ( ::rename(pathTemplateSpace
, path
.c_str()) == 0) {
3091 _diagnostics
.error("could not rename file '%s' to: '%s'", pathTemplateSpace
, path
.c_str());
3095 _diagnostics
.error("could not write file %s", pathTemplateSpace
);
3098 ::unlink(pathTemplateSpace
);
3101 _diagnostics
.error("could not open file %s", pathTemplateSpace
);
3105 void SharedCacheBuilder::writeBuffer(uint8_t*& buffer
, uint64_t& bufferSize
) {
3106 auto cacheSizeCallback
= ^(uint64_t size
) {
3107 buffer
= (uint8_t*)malloc(size
);
3110 auto copyCallback
= ^(const uint8_t* src
, uint64_t size
, uint64_t dstOffset
) {
3111 memcpy(buffer
+ dstOffset
, src
, size
);
3114 bool fullyWritten
= writeCache(cacheSizeCallback
, copyCallback
);
3115 assert(fullyWritten
);
3118 void SharedCacheBuilder::writeMapFile(const std::string
& path
)
3120 std::string mapContent
= getMapFileBuffer();
3121 safeSave(mapContent
.c_str(), mapContent
.size(), path
);
3124 std::string
SharedCacheBuilder::getMapFileBuffer() const
3126 const DyldSharedCache
* cache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
3127 return cache
->mapFile();
3130 std::string
SharedCacheBuilder::getMapFileJSONBuffer(const std::string
& cacheDisposition
) const
3132 const DyldSharedCache
* cache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
3133 return cache
->generateJSONMap(cacheDisposition
.c_str());
3136 void SharedCacheBuilder::markPaddingInaccessible()
3138 // region between RX and RW
3139 uint8_t* startPad1
= _readExecuteRegion
.buffer
+_readExecuteRegion
.sizeInUse
;
3140 uint8_t* endPad1
= firstDataRegion()->buffer
;
3141 ::vm_protect(mach_task_self(), (vm_address_t
)startPad1
, endPad1
-startPad1
, false, 0);
3143 // region between RW and RO
3144 const Region
* lastRegion
= lastDataRegion();
3145 uint8_t* startPad2
= lastRegion
->buffer
+lastRegion
->sizeInUse
;
3146 uint8_t* endPad2
= _readOnlyRegion
.buffer
;
3147 ::vm_protect(mach_task_self(), (vm_address_t
)startPad2
, endPad2
-startPad2
, false, 0);
3151 void SharedCacheBuilder::forEachCacheDylib(void (^callback
)(const std::string
& path
)) {
3152 for (const DylibInfo
& dylibInfo
: _sortedDylibs
)
3153 callback(dylibInfo
.dylibID
);
3157 void SharedCacheBuilder::forEachCacheSymlink(void (^callback
)(const std::string
& path
))
3159 const DyldSharedCache
* cache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
3160 const dyld3::closure::ImageArray
* images
= cache
->cachedDylibsImageArray();
3161 if ( images
== nullptr )
3164 // Aliases we folded in to the cache are in the cache dylib closures
3165 images
->forEachImage(^(const dyld3::closure::Image
*image
, bool &stop
) {
3166 image
->forEachAlias(^(const char *aliasPath
, bool &stop
) {
3167 callback(aliasPath
);
3173 uint64_t SharedCacheBuilder::pathHash(const char* path
)
3176 for (const char* s
=path
; *s
!= '\0'; ++s
)
3182 void SharedCacheBuilder::findDylibAndSegment(const void* contentPtr
, std::string
& foundDylibName
, std::string
& foundSegName
)
3184 foundDylibName
= "???";
3185 foundSegName
= "???";
3186 uint64_t unslidVmAddr
= ((uint8_t*)contentPtr
- _readExecuteRegion
.buffer
) + _readExecuteRegion
.unslidLoadAddress
;
3187 const DyldSharedCache
* cache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
3188 cache
->forEachImage(^(const mach_header
* mh
, const char* installName
) {
3189 ((dyld3::MachOLoaded
*)mh
)->forEachSegment(^(const dyld3::MachOFile::SegmentInfo
& info
, bool &stop
) {
3190 if ( (unslidVmAddr
>= info
.vmAddr
) && (unslidVmAddr
< (info
.vmAddr
+info
.vmSize
)) ) {
3191 foundDylibName
= installName
;
3192 foundSegName
= info
.segName
;
3200 void SharedCacheBuilder::fipsSign()
3202 // find libcorecrypto.dylib in cache being built
3203 DyldSharedCache
* dyldCache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
3204 __block
const dyld3::MachOLoaded
* ml
= nullptr;
3205 dyldCache
->forEachImage(^(const mach_header
* mh
, const char* installName
) {
3206 if ( strcmp(installName
, "/usr/lib/system/libcorecrypto.dylib") == 0 )
3207 ml
= (dyld3::MachOLoaded
*)mh
;
3209 if ( ml
== nullptr ) {
3210 _diagnostics
.warning("Could not find libcorecrypto.dylib, skipping FIPS sealing");
3214 // find location in libcorecrypto.dylib to store hash of __text section
3215 uint64_t hashStoreSize
;
3216 const void* hashStoreLocation
= ml
->findSectionContent("__TEXT", "__fips_hmacs", hashStoreSize
);
3217 if ( hashStoreLocation
== nullptr ) {
3218 _diagnostics
.warning("Could not find __TEXT/__fips_hmacs section in libcorecrypto.dylib, skipping FIPS sealing");
3221 if ( hashStoreSize
!= 32 ) {
3222 _diagnostics
.warning("__TEXT/__fips_hmacs section in libcorecrypto.dylib is not 32 bytes in size, skipping FIPS sealing");
3226 // compute hmac hash of __text section
3228 const void* textLocation
= ml
->findSectionContent("__TEXT", "__text", textSize
);
3229 if ( textLocation
== nullptr ) {
3230 _diagnostics
.warning("Could not find __TEXT/__text section in libcorecrypto.dylib, skipping FIPS sealing");
3233 unsigned char hmac_key
= 0;
3234 CCHmac(kCCHmacAlgSHA256
, &hmac_key
, 1, textLocation
, textSize
, (void*)hashStoreLocation
); // store hash directly into hashStoreLocation
3237 void SharedCacheBuilder::codeSign()
3239 uint8_t dscHashType
;
3240 uint8_t dscHashSize
;
3241 uint32_t dscDigestFormat
;
3244 // select which codesigning hash
3245 switch (_options
.codeSigningDigestMode
) {
3246 case DyldSharedCache::Agile
:
3248 // Fall through to SHA1, because the main code directory remains SHA1 for compatibility.
3249 [[clang::fallthrough]];
3250 case DyldSharedCache::SHA1only
:
3251 #pragma clang diagnostic push
3252 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3253 dscHashType
= CS_HASHTYPE_SHA1
;
3254 dscHashSize
= CS_HASH_SIZE_SHA1
;
3255 dscDigestFormat
= kCCDigestSHA1
;
3256 #pragma clang diagnostic pop
3258 case DyldSharedCache::SHA256only
:
3259 dscHashType
= CS_HASHTYPE_SHA256
;
3260 dscHashSize
= CS_HASH_SIZE_SHA256
;
3261 dscDigestFormat
= kCCDigestSHA256
;
3264 _diagnostics
.error("codeSigningDigestMode has unknown, unexpected value %d, bailing out.",
3265 _options
.codeSigningDigestMode
);
3269 std::string cacheIdentifier
= "com.apple.dyld.cache.";
3270 cacheIdentifier
+= _options
.archs
->name();
3271 if ( _options
.dylibsRemovedDuringMastering
) {
3272 if ( _options
.optimizeStubs
)
3273 cacheIdentifier
+= ".release";
3275 cacheIdentifier
+= ".development";
3277 // get pointers into shared cache buffer
3278 size_t inBbufferSize
= _readExecuteRegion
.sizeInUse
+dataRegionsSizeInUse()+_readOnlyRegion
.sizeInUse
+_localSymbolsRegion
.sizeInUse
;
3280 const uint16_t pageSize
= _archLayout
->csPageSize
;
3282 // layout code signature contents
3283 uint32_t blobCount
= agile
? 4 : 3;
3284 size_t idSize
= cacheIdentifier
.size()+1; // +1 for terminating 0
3285 uint32_t slotCount
= (uint32_t)((inBbufferSize
+ pageSize
- 1) / pageSize
);
3286 uint32_t xSlotCount
= CSSLOT_REQUIREMENTS
;
3287 size_t idOffset
= offsetof(CS_CodeDirectory
, end_withExecSeg
);
3288 size_t hashOffset
= idOffset
+idSize
+ dscHashSize
*xSlotCount
;
3289 size_t hash256Offset
= idOffset
+idSize
+ CS_HASH_SIZE_SHA256
*xSlotCount
;
3290 size_t cdSize
= hashOffset
+ (slotCount
* dscHashSize
);
3291 size_t cd256Size
= agile
? hash256Offset
+ (slotCount
* CS_HASH_SIZE_SHA256
) : 0;
3292 size_t reqsSize
= 12;
3293 size_t cmsSize
= sizeof(CS_Blob
);
3294 size_t cdOffset
= sizeof(CS_SuperBlob
) + blobCount
*sizeof(CS_BlobIndex
);
3295 size_t cd256Offset
= cdOffset
+ cdSize
;
3296 size_t reqsOffset
= cd256Offset
+ cd256Size
; // equals cdOffset + cdSize if not agile
3297 size_t cmsOffset
= reqsOffset
+ reqsSize
;
3298 size_t sbSize
= cmsOffset
+ cmsSize
;
3299 size_t sigSize
= align(sbSize
, 14); // keep whole cache 16KB aligned
3301 // allocate space for blob
3302 vm_address_t codeSigAlloc
;
3303 if ( vm_allocate(mach_task_self(), &codeSigAlloc
, sigSize
, VM_FLAGS_ANYWHERE
) != 0 ) {
3304 _diagnostics
.error("could not allocate code signature buffer");
3307 _codeSignatureRegion
.buffer
= (uint8_t*)codeSigAlloc
;
3308 _codeSignatureRegion
.bufferSize
= sigSize
;
3309 _codeSignatureRegion
.sizeInUse
= sigSize
;
3311 // create overall code signature which is a superblob
3312 CS_SuperBlob
* sb
= reinterpret_cast<CS_SuperBlob
*>(_codeSignatureRegion
.buffer
);
3313 sb
->magic
= htonl(CSMAGIC_EMBEDDED_SIGNATURE
);
3314 sb
->length
= htonl(sbSize
);
3315 sb
->count
= htonl(blobCount
);
3316 sb
->index
[0].type
= htonl(CSSLOT_CODEDIRECTORY
);
3317 sb
->index
[0].offset
= htonl(cdOffset
);
3318 sb
->index
[1].type
= htonl(CSSLOT_REQUIREMENTS
);
3319 sb
->index
[1].offset
= htonl(reqsOffset
);
3320 sb
->index
[2].type
= htonl(CSSLOT_CMS_SIGNATURE
);
3321 sb
->index
[2].offset
= htonl(cmsOffset
);
3323 sb
->index
[3].type
= htonl(CSSLOT_ALTERNATE_CODEDIRECTORIES
+ 0);
3324 sb
->index
[3].offset
= htonl(cd256Offset
);
3327 // fill in empty requirements
3328 CS_RequirementsBlob
* reqs
= (CS_RequirementsBlob
*)(((char*)sb
)+reqsOffset
);
3329 reqs
->magic
= htonl(CSMAGIC_REQUIREMENTS
);
3330 reqs
->length
= htonl(sizeof(CS_RequirementsBlob
));
3333 // initialize fixed fields of Code Directory
3334 CS_CodeDirectory
* cd
= (CS_CodeDirectory
*)(((char*)sb
)+cdOffset
);
3335 cd
->magic
= htonl(CSMAGIC_CODEDIRECTORY
);
3336 cd
->length
= htonl(cdSize
);
3337 cd
->version
= htonl(0x20400); // supports exec segment
3338 cd
->flags
= htonl(kSecCodeSignatureAdhoc
);
3339 cd
->hashOffset
= htonl(hashOffset
);
3340 cd
->identOffset
= htonl(idOffset
);
3341 cd
->nSpecialSlots
= htonl(xSlotCount
);
3342 cd
->nCodeSlots
= htonl(slotCount
);
3343 cd
->codeLimit
= htonl(inBbufferSize
);
3344 cd
->hashSize
= dscHashSize
;
3345 cd
->hashType
= dscHashType
;
3346 cd
->platform
= 0; // not platform binary
3347 cd
->pageSize
= __builtin_ctz(pageSize
); // log2(CS_PAGE_SIZE);
3348 cd
->spare2
= 0; // unused (must be zero)
3349 cd
->scatterOffset
= 0; // not supported anymore
3350 cd
->teamOffset
= 0; // no team ID
3351 cd
->spare3
= 0; // unused (must be zero)
3352 cd
->codeLimit64
= 0; // falls back to codeLimit
3354 // executable segment info
3355 cd
->execSegBase
= htonll(_readExecuteRegion
.cacheFileOffset
); // base of TEXT segment
3356 cd
->execSegLimit
= htonll(_readExecuteRegion
.sizeInUse
); // size of TEXT segment
3357 cd
->execSegFlags
= 0; // not a main binary
3359 // initialize dynamic fields of Code Directory
3360 strcpy((char*)cd
+ idOffset
, cacheIdentifier
.c_str());
3362 // add special slot hashes
3363 uint8_t* hashSlot
= (uint8_t*)cd
+ hashOffset
;
3364 uint8_t* reqsHashSlot
= &hashSlot
[-CSSLOT_REQUIREMENTS
*dscHashSize
];
3365 CCDigest(dscDigestFormat
, (uint8_t*)reqs
, sizeof(CS_RequirementsBlob
), reqsHashSlot
);
3367 CS_CodeDirectory
* cd256
;
3368 uint8_t* hash256Slot
;
3369 uint8_t* reqsHash256Slot
;
3371 // Note that the assumption here is that the size up to the hashes is the same as for
3372 // sha1 code directory, and that they come last, after everything else.
3374 cd256
= (CS_CodeDirectory
*)(((char*)sb
)+cd256Offset
);
3375 cd256
->magic
= htonl(CSMAGIC_CODEDIRECTORY
);
3376 cd256
->length
= htonl(cd256Size
);
3377 cd256
->version
= htonl(0x20400); // supports exec segment
3378 cd256
->flags
= htonl(kSecCodeSignatureAdhoc
);
3379 cd256
->hashOffset
= htonl(hash256Offset
);
3380 cd256
->identOffset
= htonl(idOffset
);
3381 cd256
->nSpecialSlots
= htonl(xSlotCount
);
3382 cd256
->nCodeSlots
= htonl(slotCount
);
3383 cd256
->codeLimit
= htonl(inBbufferSize
);
3384 cd256
->hashSize
= CS_HASH_SIZE_SHA256
;
3385 cd256
->hashType
= CS_HASHTYPE_SHA256
;
3386 cd256
->platform
= 0; // not platform binary
3387 cd256
->pageSize
= __builtin_ctz(pageSize
); // log2(CS_PAGE_SIZE);
3388 cd256
->spare2
= 0; // unused (must be zero)
3389 cd256
->scatterOffset
= 0; // not supported anymore
3390 cd256
->teamOffset
= 0; // no team ID
3391 cd256
->spare3
= 0; // unused (must be zero)
3392 cd256
->codeLimit64
= 0; // falls back to codeLimit
3394 // executable segment info
3395 cd256
->execSegBase
= cd
->execSegBase
;
3396 cd256
->execSegLimit
= cd
->execSegLimit
;
3397 cd256
->execSegFlags
= cd
->execSegFlags
;
3399 // initialize dynamic fields of Code Directory
3400 strcpy((char*)cd256
+ idOffset
, cacheIdentifier
.c_str());
3402 // add special slot hashes
3403 hash256Slot
= (uint8_t*)cd256
+ hash256Offset
;
3404 reqsHash256Slot
= &hash256Slot
[-CSSLOT_REQUIREMENTS
*CS_HASH_SIZE_SHA256
];
3405 CCDigest(kCCDigestSHA256
, (uint8_t*)reqs
, sizeof(CS_RequirementsBlob
), reqsHash256Slot
);
3410 reqsHash256Slot
= NULL
;
3413 // fill in empty CMS blob for ad-hoc signing
3414 CS_Blob
* cms
= (CS_Blob
*)(((char*)sb
)+cmsOffset
);
3415 cms
->magic
= htonl(CSMAGIC_BLOBWRAPPER
);
3416 cms
->length
= htonl(sizeof(CS_Blob
));
3419 // alter header of cache to record size and location of code signature
3420 // do this *before* hashing each page
3421 dyld_cache_header
* cache
= (dyld_cache_header
*)_readExecuteRegion
.buffer
;
3422 cache
->codeSignatureOffset
= inBbufferSize
;
3423 cache
->codeSignatureSize
= sigSize
;
3428 const uint8_t* buffer
= nullptr;
3430 std::vector
<SlotRange
> regionSlots
;
3432 regionSlots
.push_back({ 0, (_readExecuteRegion
.sizeInUse
/ pageSize
), _readExecuteRegion
.buffer
});
3434 for (const Region
& dataRegion
: _dataRegions
) {
3435 // The first data region starts at the end of __TEXT, and subsequent regions are
3436 // after the previous __DATA region.
3437 uint64_t previousEnd
= regionSlots
.back().end
;
3438 uint64_t numSlots
= dataRegion
.sizeInUse
/ pageSize
;
3439 regionSlots
.push_back({ previousEnd
, previousEnd
+ numSlots
, dataRegion
.buffer
});
3443 uint64_t previousEnd
= regionSlots
.back().end
;
3444 uint64_t numSlots
= _readOnlyRegion
.sizeInUse
/ pageSize
;
3445 regionSlots
.push_back({ previousEnd
, previousEnd
+ numSlots
, _readOnlyRegion
.buffer
});
3448 if ( _localSymbolsRegion
.sizeInUse
!= 0 ) {
3449 uint64_t previousEnd
= regionSlots
.back().end
;
3450 uint64_t numSlots
= _localSymbolsRegion
.sizeInUse
/ pageSize
;
3451 regionSlots
.push_back({ previousEnd
, previousEnd
+ numSlots
, _localSymbolsRegion
.buffer
});
3454 auto codeSignPage
= ^(size_t i
) {
3455 // move to correct region
3456 for (const SlotRange
& slotRange
: regionSlots
) {
3457 if ( (i
>= slotRange
.start
) && (i
< slotRange
.end
) ) {
3458 const uint8_t* code
= slotRange
.buffer
+ ((i
- slotRange
.start
) * pageSize
);
3460 CCDigest(dscDigestFormat
, code
, pageSize
, hashSlot
+ (i
* dscHashSize
));
3463 CCDigest(kCCDigestSHA256
, code
, pageSize
, hash256Slot
+ (i
* CS_HASH_SIZE_SHA256
));
3468 assert(0 && "Out of range slot");
3472 dispatch_apply(slotCount
, DISPATCH_APPLY_AUTO
, ^(size_t i
) {
3476 // Now that we have a code signature, compute a cache UUID by hashing the code signature blob
3478 uint8_t* uuidLoc
= cache
->uuid
;
3479 assert(uuid_is_null(uuidLoc
));
3480 static_assert(offsetof(dyld_cache_header
, uuid
) / CS_PAGE_SIZE_4K
== 0, "uuid is expected in the first page of the cache");
3481 uint8_t fullDigest
[CC_SHA256_DIGEST_LENGTH
];
3482 CC_SHA256((const void*)cd
, (unsigned)cdSize
, fullDigest
);
3483 memcpy(uuidLoc
, fullDigest
, 16);
3484 // <rdar://problem/6723729> uuids should conform to RFC 4122 UUID version 4 & UUID version 5 formats
3485 uuidLoc
[6] = ( uuidLoc
[6] & 0x0F ) | ( 3 << 4 );
3486 uuidLoc
[8] = ( uuidLoc
[8] & 0x3F ) | 0x80;
3488 // Now codesign page 0 again, because we modified it by setting uuid in header
3492 // hash of entire code directory (cdHash) uses same hash as each page
3493 uint8_t fullCdHash
[dscHashSize
];
3494 CCDigest(dscDigestFormat
, (const uint8_t*)cd
, cdSize
, fullCdHash
);
3495 // Note: cdHash is defined as first 20 bytes of hash
3496 memcpy(_cdHashFirst
, fullCdHash
, 20);
3498 uint8_t fullCdHash256
[CS_HASH_SIZE_SHA256
];
3499 CCDigest(kCCDigestSHA256
, (const uint8_t*)cd256
, cd256Size
, fullCdHash256
);
3500 // Note: cdHash is defined as first 20 bytes of hash, even for sha256
3501 memcpy(_cdHashSecond
, fullCdHash256
, 20);
3504 memset(_cdHashSecond
, 0, 20);
3508 const bool SharedCacheBuilder::agileSignature()
3510 return _options
.codeSigningDigestMode
== DyldSharedCache::Agile
;
3513 static const std::string
cdHash(uint8_t hash
[20])
3516 for (int i
= 0; i
< 20; ++i
)
3517 sprintf(&buff
[2*i
], "%2.2x", hash
[i
]);
3521 const std::string
SharedCacheBuilder::cdHashFirst()
3523 return cdHash(_cdHashFirst
);
3526 const std::string
SharedCacheBuilder::cdHashSecond()
3528 return cdHash(_cdHashSecond
);
3531 const std::string
SharedCacheBuilder::uuid() const
3533 dyld_cache_header
* cache
= (dyld_cache_header
*)_readExecuteRegion
.buffer
;
3534 uuid_string_t uuidStr
;
3535 uuid_unparse(cache
->uuid
, uuidStr
);
3539 void SharedCacheBuilder::forEachDylibInfo(void (^callback
)(const DylibInfo
& dylib
, Diagnostics
& dylibDiag
)) {
3540 for (const DylibInfo
& dylibInfo
: _sortedDylibs
) {
3541 // The shared cache builder doesn't use per-dylib errors right now
3542 // so just share the global diagnostics
3543 callback(dylibInfo
, _diagnostics
);
3549 template <typename P
>
3550 bool SharedCacheBuilder::makeRebaseChainV2(uint8_t* pageContent
, uint16_t lastLocationOffset
, uint16_t offset
, const dyld_cache_slide_info2
* info
)
3552 typedef typename
P::uint_t pint_t
;
3554 const pint_t deltaMask
= (pint_t
)(info
->delta_mask
);
3555 const pint_t valueMask
= ~deltaMask
;
3556 const pint_t valueAdd
= (pint_t
)(info
->value_add
);
3557 const unsigned deltaShift
= __builtin_ctzll(deltaMask
) - 2;
3558 const uint32_t maxDelta
= (uint32_t)(deltaMask
>> deltaShift
);
3560 pint_t
* lastLoc
= (pint_t
*)&pageContent
[lastLocationOffset
+0];
3561 pint_t lastValue
= (pint_t
)P::getP(*lastLoc
);
3562 if ( (lastValue
- valueAdd
) & deltaMask
) {
3563 std::string dylibName
;
3564 std::string segName
;
3565 findDylibAndSegment((void*)pageContent
, dylibName
, segName
);
3566 _diagnostics
.error("rebase pointer (0x%0lX) does not point within cache. lastOffset=0x%04X, seg=%s, dylib=%s\n",
3567 (long)lastValue
, lastLocationOffset
, segName
.c_str(), dylibName
.c_str());
3570 if ( offset
<= (lastLocationOffset
+maxDelta
) ) {
3571 // previous location in range, make link from it
3572 // encode this location into last value
3573 pint_t delta
= offset
- lastLocationOffset
;
3574 pint_t newLastValue
= ((lastValue
- valueAdd
) & valueMask
) | (delta
<< deltaShift
);
3575 //warning(" add chain: delta = %d, lastOffset=0x%03X, offset=0x%03X, org value=0x%08lX, new value=0x%08lX",
3576 // offset - lastLocationOffset, lastLocationOffset, offset, (long)lastValue, (long)newLastValue);
3578 if ( _aslrTracker
.hasHigh8(lastLoc
, &highByte
) ) {
3579 uint64_t tbi
= (uint64_t)highByte
<< 56;
3580 newLastValue
|= tbi
;
3582 P::setP(*lastLoc
, newLastValue
);
3585 //fprintf(stderr, " too big delta = %d, lastOffset=0x%03X, offset=0x%03X\n", offset - lastLocationOffset, lastLocationOffset, offset);
3587 // distance between rebase locations is too far
3588 // see if we can make a chain from non-rebase locations
3589 uint16_t nonRebaseLocationOffsets
[1024];
3590 unsigned nrIndex
= 0;
3591 for (uint16_t i
= lastLocationOffset
; i
< offset
-maxDelta
; ) {
3592 nonRebaseLocationOffsets
[nrIndex
] = 0;
3593 for (int j
=maxDelta
; j
> 0; j
-= 4) {
3594 pint_t value
= (pint_t
)P::getP(*(pint_t
*)&pageContent
[i
+j
]);
3596 // Steal values of 0 to be used in the rebase chain
3597 nonRebaseLocationOffsets
[nrIndex
] = i
+j
;
3601 if ( nonRebaseLocationOffsets
[nrIndex
] == 0 ) {
3602 lastValue
= (pint_t
)P::getP(*lastLoc
);
3603 pint_t newValue
= ((lastValue
- valueAdd
) & valueMask
);
3604 //warning(" no way to make non-rebase delta chain, terminate off=0x%03X, old value=0x%08lX, new value=0x%08lX", lastLocationOffset, (long)value, (long)newValue);
3605 P::setP(*lastLoc
, newValue
);
3608 i
= nonRebaseLocationOffsets
[nrIndex
];
3612 // we can make chain. go back and add each non-rebase location to chain
3613 uint16_t prevOffset
= lastLocationOffset
;
3614 pint_t
* prevLoc
= (pint_t
*)&pageContent
[prevOffset
];
3615 for (unsigned n
=0; n
< nrIndex
; ++n
) {
3616 uint16_t nOffset
= nonRebaseLocationOffsets
[n
];
3617 assert(nOffset
!= 0);
3618 pint_t
* nLoc
= (pint_t
*)&pageContent
[nOffset
];
3619 pint_t delta2
= nOffset
- prevOffset
;
3620 pint_t value
= (pint_t
)P::getP(*prevLoc
);
3623 newValue
= (delta2
<< deltaShift
);
3625 newValue
= ((value
- valueAdd
) & valueMask
) | (delta2
<< deltaShift
);
3626 //warning(" non-rebase delta = %d, to off=0x%03X, old value=0x%08lX, new value=0x%08lX", delta2, nOffset, (long)value, (long)newValue);
3627 P::setP(*prevLoc
, newValue
);
3628 prevOffset
= nOffset
;
3631 pint_t delta3
= offset
- prevOffset
;
3632 pint_t value
= (pint_t
)P::getP(*prevLoc
);
3635 newValue
= (delta3
<< deltaShift
);
3637 newValue
= ((value
- valueAdd
) & valueMask
) | (delta3
<< deltaShift
);
3638 //warning(" non-rebase delta = %d, to off=0x%03X, old value=0x%08lX, new value=0x%08lX", delta3, offset, (long)value, (long)newValue);
3639 P::setP(*prevLoc
, newValue
);
3645 template <typename P
>
3646 void SharedCacheBuilder::addPageStartsV2(uint8_t* pageContent
, const bool bitmap
[], const dyld_cache_slide_info2
* info
,
3647 std::vector
<uint16_t>& pageStarts
, std::vector
<uint16_t>& pageExtras
)
3649 typedef typename
P::uint_t pint_t
;
3651 const pint_t deltaMask
= (pint_t
)(info
->delta_mask
);
3652 const pint_t valueMask
= ~deltaMask
;
3653 const uint32_t pageSize
= info
->page_size
;
3654 const pint_t valueAdd
= (pint_t
)(info
->value_add
);
3656 uint16_t startValue
= DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE
;
3657 uint16_t lastLocationOffset
= 0xFFFF;
3658 for(uint32_t i
=0; i
< pageSize
/4; ++i
) {
3659 unsigned offset
= i
*4;
3661 if ( startValue
== DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE
) {
3662 // found first rebase location in page
3665 else if ( !makeRebaseChainV2
<P
>(pageContent
, lastLocationOffset
, offset
, info
) ) {
3666 // can't record all rebasings in one chain
3667 if ( (startValue
& DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA
) == 0 ) {
3668 // switch page_start to "extras" which is a list of chain starts
3669 unsigned indexInExtras
= (unsigned)pageExtras
.size();
3670 if ( indexInExtras
> 0x3FFF ) {
3671 _diagnostics
.error("rebase overflow in v2 page extras");
3674 pageExtras
.push_back(startValue
);
3675 startValue
= indexInExtras
| DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA
;
3677 pageExtras
.push_back(i
);
3679 lastLocationOffset
= offset
;
3682 if ( lastLocationOffset
!= 0xFFFF ) {
3683 // mark end of chain
3684 pint_t
* lastLoc
= (pint_t
*)&pageContent
[lastLocationOffset
];
3685 pint_t lastValue
= (pint_t
)P::getP(*lastLoc
);
3686 pint_t newValue
= ((lastValue
- valueAdd
) & valueMask
);
3687 P::setP(*lastLoc
, newValue
);
3689 if ( startValue
& DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA
) {
3690 // add end bit to extras
3691 pageExtras
.back() |= DYLD_CACHE_SLIDE_PAGE_ATTR_END
;
3693 pageStarts
.push_back(startValue
);
3696 template <typename P
>
3697 void SharedCacheBuilder::writeSlideInfoV2(const bool bitmapForAllDataRegions
[], unsigned dataPageCountForAllDataRegions
)
3699 typedef typename
P::uint_t pint_t
;
3700 typedef typename
P::E E
;
3702 const uint32_t pageSize
= _aslrTracker
.pageSize();
3703 const uint8_t* firstDataRegionBuffer
= firstDataRegion()->buffer
;
3704 for (uint32_t dataRegionIndex
= 0; dataRegionIndex
!= _dataRegions
.size(); ++dataRegionIndex
) {
3705 Region
& dataRegion
= _dataRegions
[dataRegionIndex
];
3707 // fill in fixed info
3708 assert(dataRegion
.slideInfoFileOffset
!= 0);
3709 assert((dataRegion
.sizeInUse
% pageSize
) == 0);
3710 unsigned dataPageCount
= (uint32_t)dataRegion
.sizeInUse
/ pageSize
;
3711 dyld_cache_slide_info2
* info
= (dyld_cache_slide_info2
*)dataRegion
.slideInfoBuffer
;
3713 info
->page_size
= pageSize
;
3714 info
->delta_mask
= _archLayout
->pointerDeltaMask
;
3715 info
->value_add
= _archLayout
->useValueAdd
? _archLayout
->sharedMemoryStart
: 0;
3717 // set page starts and extras for each page
3718 std::vector
<uint16_t> pageStarts
;
3719 std::vector
<uint16_t> pageExtras
;
3720 pageStarts
.reserve(dataPageCount
);
3722 const size_t bitmapEntriesPerPage
= (sizeof(bool)*(pageSize
/4));
3723 uint8_t* pageContent
= dataRegion
.buffer
;
3724 unsigned numPagesFromFirstDataRegion
= (uint32_t)(dataRegion
.buffer
- firstDataRegionBuffer
) / pageSize
;
3725 assert((numPagesFromFirstDataRegion
+ dataPageCount
) <= dataPageCountForAllDataRegions
);
3726 const bool* bitmapForRegion
= (const bool*)bitmapForAllDataRegions
+ (bitmapEntriesPerPage
* numPagesFromFirstDataRegion
);
3727 const bool* bitmapForPage
= bitmapForRegion
;
3728 for (unsigned i
=0; i
< dataPageCount
; ++i
) {
3729 //warning("page[%d]", i);
3730 addPageStartsV2
<P
>(pageContent
, bitmapForPage
, info
, pageStarts
, pageExtras
);
3731 if ( _diagnostics
.hasError() ) {
3734 pageContent
+= pageSize
;
3735 bitmapForPage
+= (sizeof(bool)*(pageSize
/4));
3738 // fill in computed info
3739 info
->page_starts_offset
= sizeof(dyld_cache_slide_info2
);
3740 info
->page_starts_count
= (unsigned)pageStarts
.size();
3741 info
->page_extras_offset
= (unsigned)(sizeof(dyld_cache_slide_info2
)+pageStarts
.size()*sizeof(uint16_t));
3742 info
->page_extras_count
= (unsigned)pageExtras
.size();
3743 uint16_t* pageStartsBuffer
= (uint16_t*)((char*)info
+ info
->page_starts_offset
);
3744 uint16_t* pageExtrasBuffer
= (uint16_t*)((char*)info
+ info
->page_extras_offset
);
3745 for (unsigned i
=0; i
< pageStarts
.size(); ++i
)
3746 pageStartsBuffer
[i
] = pageStarts
[i
];
3747 for (unsigned i
=0; i
< pageExtras
.size(); ++i
)
3748 pageExtrasBuffer
[i
] = pageExtras
[i
];
3749 // update header with final size
3750 uint64_t slideInfoSize
= align(info
->page_extras_offset
+ pageExtras
.size()*sizeof(uint16_t), _archLayout
->sharedRegionAlignP2
);
3751 dataRegion
.slideInfoFileSize
= slideInfoSize
;
3752 if ( dataRegion
.slideInfoFileSize
> dataRegion
.slideInfoBufferSizeAllocated
) {
3753 _diagnostics
.error("kernel slide info overflow buffer");
3755 // Update the mapping entry on the cache header
3756 const dyld_cache_header
* cacheHeader
= (dyld_cache_header
*)_readExecuteRegion
.buffer
;
3757 dyld_cache_mapping_and_slide_info
* slidableMappings
= (dyld_cache_mapping_and_slide_info
*)(_readExecuteRegion
.buffer
+ cacheHeader
->mappingWithSlideOffset
);
3758 slidableMappings
[1 + dataRegionIndex
].slideInfoFileSize
= dataRegion
.slideInfoFileSize
;
3759 //fprintf(stderr, "pageCount=%u, page_starts_count=%lu, page_extras_count=%lu\n", dataPageCount, pageStarts.size(), pageExtras.size());
3763 #if SUPPORT_ARCH_arm64_32 || SUPPORT_ARCH_armv7k
3764 // fits in to int16_t
3765 static bool smallValue(uint64_t value
)
3767 uint32_t high
= (value
& 0xFFFF8000);
3768 return (high
== 0) || (high
== 0xFFFF8000);
3771 template <typename P
>
3772 bool SharedCacheBuilder::makeRebaseChainV4(uint8_t* pageContent
, uint16_t lastLocationOffset
, uint16_t offset
, const dyld_cache_slide_info4
* info
)
3774 typedef typename
P::uint_t pint_t
;
3776 const pint_t deltaMask
= (pint_t
)(info
->delta_mask
);
3777 const pint_t valueMask
= ~deltaMask
;
3778 const pint_t valueAdd
= (pint_t
)(info
->value_add
);
3779 const unsigned deltaShift
= __builtin_ctzll(deltaMask
) - 2;
3780 const uint32_t maxDelta
= (uint32_t)(deltaMask
>> deltaShift
);
3782 pint_t
* lastLoc
= (pint_t
*)&pageContent
[lastLocationOffset
+0];
3783 pint_t lastValue
= (pint_t
)P::getP(*lastLoc
);
3784 if ( (lastValue
- valueAdd
) & deltaMask
) {
3785 std::string dylibName
;
3786 std::string segName
;
3787 findDylibAndSegment((void*)pageContent
, dylibName
, segName
);
3788 _diagnostics
.error("rebase pointer does not point within cache. lastOffset=0x%04X, seg=%s, dylib=%s\n",
3789 lastLocationOffset
, segName
.c_str(), dylibName
.c_str());
3792 if ( offset
<= (lastLocationOffset
+maxDelta
) ) {
3793 // previous location in range, make link from it
3794 // encode this location into last value
3795 pint_t delta
= offset
- lastLocationOffset
;
3796 pint_t newLastValue
= ((lastValue
- valueAdd
) & valueMask
) | (delta
<< deltaShift
);
3797 //warning(" add chain: delta = %d, lastOffset=0x%03X, offset=0x%03X, org value=0x%08lX, new value=0x%08lX",
3798 // offset - lastLocationOffset, lastLocationOffset, offset, (long)lastValue, (long)newLastValue);
3799 P::setP(*lastLoc
, newLastValue
);
3802 //fprintf(stderr, " too big delta = %d, lastOffset=0x%03X, offset=0x%03X\n", offset - lastLocationOffset, lastLocationOffset, offset);
3804 // distance between rebase locations is too far
3805 // see if we can make a chain from non-rebase locations
3806 uint16_t nonRebaseLocationOffsets
[1024];
3807 unsigned nrIndex
= 0;
3808 for (uint16_t i
= lastLocationOffset
; i
< offset
-maxDelta
; ) {
3809 nonRebaseLocationOffsets
[nrIndex
] = 0;
3810 for (int j
=maxDelta
; j
> 0; j
-= 4) {
3811 pint_t value
= (pint_t
)P::getP(*(pint_t
*)&pageContent
[i
+j
]);
3812 if ( smallValue(value
) ) {
3813 // Steal values of 0 to be used in the rebase chain
3814 nonRebaseLocationOffsets
[nrIndex
] = i
+j
;
3818 if ( nonRebaseLocationOffsets
[nrIndex
] == 0 ) {
3819 lastValue
= (pint_t
)P::getP(*lastLoc
);
3820 pint_t newValue
= ((lastValue
- valueAdd
) & valueMask
);
3821 //fprintf(stderr, " no way to make non-rebase delta chain, terminate off=0x%03X, old value=0x%08lX, new value=0x%08lX\n",
3822 // lastLocationOffset, (long)lastValue, (long)newValue);
3823 P::setP(*lastLoc
, newValue
);
3826 i
= nonRebaseLocationOffsets
[nrIndex
];
3830 // we can make chain. go back and add each non-rebase location to chain
3831 uint16_t prevOffset
= lastLocationOffset
;
3832 pint_t
* prevLoc
= (pint_t
*)&pageContent
[prevOffset
];
3833 for (unsigned n
=0; n
< nrIndex
; ++n
) {
3834 uint16_t nOffset
= nonRebaseLocationOffsets
[n
];
3835 assert(nOffset
!= 0);
3836 pint_t
* nLoc
= (pint_t
*)&pageContent
[nOffset
];
3837 uint32_t delta2
= nOffset
- prevOffset
;
3838 pint_t value
= (pint_t
)P::getP(*prevLoc
);
3840 if ( smallValue(value
) )
3841 newValue
= (value
& valueMask
) | (delta2
<< deltaShift
);
3843 newValue
= ((value
- valueAdd
) & valueMask
) | (delta2
<< deltaShift
);
3844 //warning(" non-rebase delta = %d, to off=0x%03X, old value=0x%08lX, new value=0x%08lX", delta2, nOffset, (long)value, (long)newValue);
3845 P::setP(*prevLoc
, newValue
);
3846 prevOffset
= nOffset
;
3849 uint32_t delta3
= offset
- prevOffset
;
3850 pint_t value
= (pint_t
)P::getP(*prevLoc
);
3852 if ( smallValue(value
) )
3853 newValue
= (value
& valueMask
) | (delta3
<< deltaShift
);
3855 newValue
= ((value
- valueAdd
) & valueMask
) | (delta3
<< deltaShift
);
3856 //warning(" non-rebase delta = %d, to off=0x%03X, old value=0x%08lX, new value=0x%08lX", delta3, offset, (long)value, (long)newValue);
3857 P::setP(*prevLoc
, newValue
);
3863 template <typename P
>
3864 void SharedCacheBuilder::addPageStartsV4(uint8_t* pageContent
, const bool bitmap
[], const dyld_cache_slide_info4
* info
,
3865 std::vector
<uint16_t>& pageStarts
, std::vector
<uint16_t>& pageExtras
)
3867 typedef typename
P::uint_t pint_t
;
3869 const pint_t deltaMask
= (pint_t
)(info
->delta_mask
);
3870 const pint_t valueMask
= ~deltaMask
;
3871 const uint32_t pageSize
= info
->page_size
;
3872 const pint_t valueAdd
= (pint_t
)(info
->value_add
);
3874 uint16_t startValue
= DYLD_CACHE_SLIDE4_PAGE_NO_REBASE
;
3875 uint16_t lastLocationOffset
= 0xFFFF;
3876 for(uint32_t i
=0; i
< pageSize
/4; ++i
) {
3877 unsigned offset
= i
*4;
3879 if ( startValue
== DYLD_CACHE_SLIDE4_PAGE_NO_REBASE
) {
3880 // found first rebase location in page
3883 else if ( !makeRebaseChainV4
<P
>(pageContent
, lastLocationOffset
, offset
, info
) ) {
3884 // can't record all rebasings in one chain
3885 if ( (startValue
& DYLD_CACHE_SLIDE4_PAGE_USE_EXTRA
) == 0 ) {
3886 // switch page_start to "extras" which is a list of chain starts
3887 unsigned indexInExtras
= (unsigned)pageExtras
.size();
3888 if ( indexInExtras
>= DYLD_CACHE_SLIDE4_PAGE_INDEX
) {
3889 _diagnostics
.error("rebase overflow in v4 page extras");
3892 pageExtras
.push_back(startValue
);
3893 startValue
= indexInExtras
| DYLD_CACHE_SLIDE4_PAGE_USE_EXTRA
;
3895 pageExtras
.push_back(i
);
3897 lastLocationOffset
= offset
;
3900 if ( lastLocationOffset
!= 0xFFFF ) {
3901 // mark end of chain
3902 pint_t
* lastLoc
= (pint_t
*)&pageContent
[lastLocationOffset
];
3903 pint_t lastValue
= (pint_t
)P::getP(*lastLoc
);
3904 pint_t newValue
= ((lastValue
- valueAdd
) & valueMask
);
3905 P::setP(*lastLoc
, newValue
);
3907 if ( startValue
& DYLD_CACHE_SLIDE4_PAGE_USE_EXTRA
) {
3908 // add end bit to extras
3909 pageExtras
.back() |= DYLD_CACHE_SLIDE4_PAGE_EXTRA_END
;
3911 pageStarts
.push_back(startValue
);
3916 template <typename P
>
3917 void SharedCacheBuilder::writeSlideInfoV4(const bool bitmapForAllDataRegions
[], unsigned dataPageCountForAllDataRegions
)
3919 typedef typename
P::uint_t pint_t
;
3920 typedef typename
P::E E
;
3922 const uint32_t pageSize
= _aslrTracker
.pageSize();
3923 const uint8_t* firstDataRegionBuffer
= firstDataRegion()->buffer
;
3924 for (uint32_t dataRegionIndex
= 0; dataRegionIndex
!= _dataRegions
.size(); ++dataRegionIndex
) {
3925 Region
& dataRegion
= _dataRegions
[dataRegionIndex
];
3927 // fill in fixed info
3928 assert(dataRegion
.slideInfoFileOffset
!= 0);
3929 assert((dataRegion
.sizeInUse
% pageSize
) == 0);
3930 unsigned dataPageCount
= (uint32_t)dataRegion
.sizeInUse
/ pageSize
;
3931 dyld_cache_slide_info4
* info
= (dyld_cache_slide_info4
*)dataRegion
.slideInfoBuffer
;
3933 info
->page_size
= pageSize
;
3934 info
->delta_mask
= _archLayout
->pointerDeltaMask
;
3935 info
->value_add
= info
->value_add
= _archLayout
->useValueAdd
? _archLayout
->sharedMemoryStart
: 0;
3937 // set page starts and extras for each page
3938 std::vector
<uint16_t> pageStarts
;
3939 std::vector
<uint16_t> pageExtras
;
3940 pageStarts
.reserve(dataPageCount
);
3941 const size_t bitmapEntriesPerPage
= (sizeof(bool)*(pageSize
/4));
3942 uint8_t* pageContent
= dataRegion
.buffer
;
3943 unsigned numPagesFromFirstDataRegion
= (uint32_t)(dataRegion
.buffer
- firstDataRegionBuffer
) / pageSize
;
3944 assert((numPagesFromFirstDataRegion
+ dataPageCount
) <= dataPageCountForAllDataRegions
);
3945 const bool* bitmapForRegion
= (const bool*)bitmapForAllDataRegions
+ (bitmapEntriesPerPage
* numPagesFromFirstDataRegion
);
3946 const bool* bitmapForPage
= bitmapForRegion
;
3947 for (unsigned i
=0; i
< dataPageCount
; ++i
) {
3948 addPageStartsV4
<P
>(pageContent
, bitmapForPage
, info
, pageStarts
, pageExtras
);
3949 if ( _diagnostics
.hasError() ) {
3952 pageContent
+= pageSize
;
3953 bitmapForPage
+= (sizeof(bool)*(pageSize
/4));
3955 // fill in computed info
3956 info
->page_starts_offset
= sizeof(dyld_cache_slide_info4
);
3957 info
->page_starts_count
= (unsigned)pageStarts
.size();
3958 info
->page_extras_offset
= (unsigned)(sizeof(dyld_cache_slide_info4
)+pageStarts
.size()*sizeof(uint16_t));
3959 info
->page_extras_count
= (unsigned)pageExtras
.size();
3960 uint16_t* pageStartsBuffer
= (uint16_t*)((char*)info
+ info
->page_starts_offset
);
3961 uint16_t* pageExtrasBuffer
= (uint16_t*)((char*)info
+ info
->page_extras_offset
);
3962 for (unsigned i
=0; i
< pageStarts
.size(); ++i
)
3963 pageStartsBuffer
[i
] = pageStarts
[i
];
3964 for (unsigned i
=0; i
< pageExtras
.size(); ++i
)
3965 pageExtrasBuffer
[i
] = pageExtras
[i
];
3966 // update header with final size
3967 uint64_t slideInfoSize
= align(info
->page_extras_offset
+ pageExtras
.size()*sizeof(uint16_t), _archLayout
->sharedRegionAlignP2
);
3968 dataRegion
.slideInfoFileSize
= slideInfoSize
;
3969 if ( dataRegion
.slideInfoFileSize
> dataRegion
.slideInfoBufferSizeAllocated
) {
3970 _diagnostics
.error("kernel slide info overflow buffer");
3972 // Update the mapping entry on the cache header
3973 const dyld_cache_header
* cacheHeader
= (dyld_cache_header
*)_readExecuteRegion
.buffer
;
3974 dyld_cache_mapping_and_slide_info
* slidableMappings
= (dyld_cache_mapping_and_slide_info
*)(_readExecuteRegion
.buffer
+ cacheHeader
->mappingWithSlideOffset
);
3975 slidableMappings
[1 + dataRegionIndex
].slideInfoFileSize
= dataRegion
.slideInfoFileSize
;
3976 //fprintf(stderr, "pageCount=%u, page_starts_count=%lu, page_extras_count=%lu\n", dataPageCount, pageStarts.size(), pageExtras.size());
3982 void CacheBuilder::writeSlideInfoV1()
3984 // build one 128-byte bitmap per page (4096) of DATA
3985 uint8_t* const dataStart = (uint8_t*)_buffer.get() + regions[1].fileOffset;
3986 uint8_t* const dataEnd = dataStart + regions[1].size;
3987 const long bitmapSize = (dataEnd - dataStart)/(4*8);
3988 uint8_t* bitmap = (uint8_t*)calloc(bitmapSize, 1);
3989 for (void* p : _pointersForASLR) {
3990 if ( (p < dataStart) || ( p > dataEnd) )
3991 terminate("DATA pointer for sliding, out of range\n");
3992 long offset = (long)((uint8_t*)p - dataStart);
3993 if ( (offset % 4) != 0 )
3994 terminate("pointer not 4-byte aligned in DATA offset 0x%08lX\n", offset);
3995 long byteIndex = offset / (4*8);
3996 long bitInByte = (offset % 32) >> 2;
3997 bitmap[byteIndex] |= (1 << bitInByte);
4000 // allocate worst case size block of all slide info
4001 const unsigned entry_size = 4096/(8*4); // 8 bits per byte, possible pointer every 4 bytes.
4002 const unsigned toc_count = (unsigned)bitmapSize/entry_size;
4003 dyld_cache_slide_info* slideInfo = (dyld_cache_slide_info*)((uint8_t*)_buffer + _slideInfoFileOffset);
4004 slideInfo->version = 1;
4005 slideInfo->toc_offset = sizeof(dyld_cache_slide_info);
4006 slideInfo->toc_count = toc_count;
4007 slideInfo->entries_offset = (slideInfo->toc_offset+2*toc_count+127)&(-128);
4008 slideInfo->entries_count = 0;
4009 slideInfo->entries_size = entry_size;
4010 // append each unique entry
4011 const dyldCacheSlideInfoEntry* bitmapAsEntries = (dyldCacheSlideInfoEntry*)bitmap;
4012 dyldCacheSlideInfoEntry* const entriesInSlidInfo = (dyldCacheSlideInfoEntry*)((char*)slideInfo+slideInfo->entries_offset());
4013 int entry_count = 0;
4014 for (int i=0; i < toc_count; ++i) {
4015 const dyldCacheSlideInfoEntry* thisEntry = &bitmapAsEntries[i];
4016 // see if it is same as one already added
4018 for (int j=0; j < entry_count; ++j) {
4019 if ( memcmp(thisEntry, &entriesInSlidInfo[j], entry_size) == 0 ) {
4020 slideInfo->set_toc(i, j);
4027 memcpy(&entriesInSlidInfo[entry_count], thisEntry, entry_size);
4028 slideInfo->set_toc(i, entry_count++);
4031 slideInfo->entries_count = entry_count;
4032 ::free((void*)bitmap);
4034 _buffer.header->slideInfoSize = align(slideInfo->entries_offset + entry_count*entry_size, _archLayout->sharedRegionAlignP2);
4040 void SharedCacheBuilder::setPointerContentV3(dyld3::MachOLoaded::ChainedFixupPointerOnDisk
* loc
, uint64_t targetVMAddr
, size_t next
)
4042 assert(targetVMAddr
> _readExecuteRegion
.unslidLoadAddress
);
4043 assert(targetVMAddr
< _readOnlyRegion
.unslidLoadAddress
+_readOnlyRegion
.sizeInUse
);
4044 dyld3::MachOLoaded::ChainedFixupPointerOnDisk tmp
;
4048 if ( _aslrTracker
.hasAuthData(loc
, &diversity
, &hasAddrDiv
, &key
) ) {
4049 // if base cache address cannot fit into target, then use offset
4050 tmp
.arm64e
.authRebase
.target
= _readExecuteRegion
.unslidLoadAddress
;
4051 if ( tmp
.arm64e
.authRebase
.target
!= _readExecuteRegion
.unslidLoadAddress
)
4052 targetVMAddr
-= _readExecuteRegion
.unslidLoadAddress
;
4053 loc
->arm64e
.authRebase
.target
= targetVMAddr
;
4054 loc
->arm64e
.authRebase
.diversity
= diversity
;
4055 loc
->arm64e
.authRebase
.addrDiv
= hasAddrDiv
;
4056 loc
->arm64e
.authRebase
.key
= key
;
4057 loc
->arm64e
.authRebase
.next
= next
;
4058 loc
->arm64e
.authRebase
.bind
= 0;
4059 loc
->arm64e
.authRebase
.auth
= 1;
4060 assert(loc
->arm64e
.authRebase
.target
== targetVMAddr
&& "target truncated");
4061 assert(loc
->arm64e
.authRebase
.next
== next
&& "next location truncated");
4064 uint8_t highByte
= 0;
4065 _aslrTracker
.hasHigh8(loc
, &highByte
);
4066 // if base cache address cannot fit into target, then use offset
4067 tmp
.arm64e
.rebase
.target
= _readExecuteRegion
.unslidLoadAddress
;
4068 if ( tmp
.arm64e
.rebase
.target
!= _readExecuteRegion
.unslidLoadAddress
)
4069 targetVMAddr
-= _readExecuteRegion
.unslidLoadAddress
;
4070 loc
->arm64e
.rebase
.target
= targetVMAddr
;
4071 loc
->arm64e
.rebase
.high8
= highByte
;
4072 loc
->arm64e
.rebase
.next
= next
;
4073 loc
->arm64e
.rebase
.bind
= 0;
4074 loc
->arm64e
.rebase
.auth
= 0;
4075 assert(loc
->arm64e
.rebase
.target
== targetVMAddr
&& "target truncated");
4076 assert(loc
->arm64e
.rebase
.next
== next
&& "next location truncated");
4080 uint16_t SharedCacheBuilder::pageStartV3(uint8_t* pageContent
, uint32_t pageSize
, const bool bitmap
[])
4082 const int maxPerPage
= pageSize
/ 4;
4083 uint16_t result
= DYLD_CACHE_SLIDE_V3_PAGE_ATTR_NO_REBASE
;
4084 dyld3::MachOLoaded::ChainedFixupPointerOnDisk
* lastLoc
= nullptr;
4085 for (int i
=0; i
< maxPerPage
; ++i
) {
4087 if ( result
== DYLD_CACHE_SLIDE_V3_PAGE_ATTR_NO_REBASE
) {
4088 // found first rebase location in page
4091 dyld3::MachOLoaded::ChainedFixupPointerOnDisk
* loc
= (dyld3::MachOLoaded::ChainedFixupPointerOnDisk
*)(pageContent
+ i
*4);;
4092 if ( lastLoc
!= nullptr ) {
4093 // convert vmaddr based pointers to arm64e dyld cache chains
4094 setPointerContentV3(lastLoc
, lastLoc
->raw64
, loc
- lastLoc
);
4099 if ( lastLoc
!= nullptr ) {
4100 // convert vmaddr based pointers to arm64e dyld cache chain, and mark end of chain
4101 setPointerContentV3(lastLoc
, lastLoc
->raw64
, 0);
4107 void SharedCacheBuilder::writeSlideInfoV3(const bool bitmapForAllDataRegions
[], unsigned dataPageCountForAllDataRegions
)
4109 const uint32_t pageSize
= _aslrTracker
.pageSize();
4110 const uint8_t* firstDataRegionBuffer
= firstDataRegion()->buffer
;
4111 for (uint32_t dataRegionIndex
= 0; dataRegionIndex
!= _dataRegions
.size(); ++dataRegionIndex
) {
4112 Region
& dataRegion
= _dataRegions
[dataRegionIndex
];
4113 // fprintf(stderr, "writeSlideInfoV3: %s 0x%llx->0x%llx\n", dataRegion.name.c_str(), dataRegion.cacheFileOffset, dataRegion.cacheFileOffset + dataRegion.sizeInUse);
4114 // fill in fixed info
4115 assert(dataRegion
.slideInfoFileOffset
!= 0);
4116 assert((dataRegion
.sizeInUse
% pageSize
) == 0);
4117 unsigned dataPageCount
= (uint32_t)dataRegion
.sizeInUse
/ pageSize
;
4118 dyld_cache_slide_info3
* info
= (dyld_cache_slide_info3
*)dataRegion
.slideInfoBuffer
;
4120 info
->page_size
= pageSize
;
4121 info
->page_starts_count
= dataPageCount
;
4122 info
->auth_value_add
= _archLayout
->sharedMemoryStart
;
4124 // fill in per-page starts
4125 const size_t bitmapEntriesPerPage
= (sizeof(bool)*(pageSize
/4));
4126 uint8_t* pageContent
= dataRegion
.buffer
;
4127 unsigned numPagesFromFirstDataRegion
= (uint32_t)(dataRegion
.buffer
- firstDataRegionBuffer
) / pageSize
;
4128 assert((numPagesFromFirstDataRegion
+ dataPageCount
) <= dataPageCountForAllDataRegions
);
4129 const bool* bitmapForRegion
= (const bool*)bitmapForAllDataRegions
+ (bitmapEntriesPerPage
* numPagesFromFirstDataRegion
);
4130 const bool* bitmapForPage
= bitmapForRegion
;
4131 //for (unsigned i=0; i < dataPageCount; ++i) {
4132 dispatch_apply(dataPageCount
, DISPATCH_APPLY_AUTO
, ^(size_t i
) {
4133 info
->page_starts
[i
] = pageStartV3(pageContent
+ (i
* pageSize
), pageSize
, bitmapForPage
+ (i
* bitmapEntriesPerPage
));
4136 // update region with final size
4137 dataRegion
.slideInfoFileSize
= align(__offsetof(dyld_cache_slide_info3
, page_starts
[dataPageCount
]), _archLayout
->sharedRegionAlignP2
);
4138 if ( dataRegion
.slideInfoFileSize
> dataRegion
.slideInfoBufferSizeAllocated
) {
4139 _diagnostics
.error("kernel slide info overflow buffer");
4141 // Update the mapping entry on the cache header
4142 const dyld_cache_header
* cacheHeader
= (dyld_cache_header
*)_readExecuteRegion
.buffer
;
4143 dyld_cache_mapping_and_slide_info
* slidableMappings
= (dyld_cache_mapping_and_slide_info
*)(_readExecuteRegion
.buffer
+ cacheHeader
->mappingWithSlideOffset
);
4144 slidableMappings
[1 + dataRegionIndex
].slideInfoFileSize
= dataRegion
.slideInfoFileSize
;