1 /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*-
3 * Copyright (c) 2014 Apple Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
28 #include <sys/errno.h>
29 #include <sys/fcntl.h>
30 #include <sys/param.h>
31 #include <mach/mach.h>
32 #include <mach/mach_time.h>
33 #include <mach-o/loader.h>
34 #include <mach-o/fat.h>
35 #include <mach/shared_region.h>
37 #include <CommonCrypto/CommonHMAC.h>
38 #include <CommonCrypto/CommonDigest.h>
39 #include <CommonCrypto/CommonDigestSPI.h>
40 #include <pthread/pthread.h>
44 #include <unordered_map>
45 #include <unordered_set>
47 #include "MachOParser.h"
48 #include "CodeSigningTypes.h"
49 #include "DyldSharedCache.h"
50 #include "CacheBuilder.h"
51 #include "FileAbstraction.hpp"
52 #include "LaunchCacheWriter.h"
54 #include "Diagnostics.h"
55 #include "ImageProxy.h"
57 #if __has_include("dyld_cache_config.h")
58 #include "dyld_cache_config.h"
60 #define ARM_SHARED_REGION_START 0x1A000000ULL
61 #define ARM_SHARED_REGION_SIZE 0x26000000ULL
62 #define ARM64_SHARED_REGION_START 0x180000000ULL
63 #define ARM64_SHARED_REGION_SIZE 0x40000000ULL
66 const CacheBuilder::ArchLayout
CacheBuilder::_s_archLayout
[] = {
67 { 0x7FFF20000000ULL
, 0xEFE00000ULL
, 0x40000000, 0xFFFF000000000000, "x86_64", 0, 0, 0, 12, true, true },
68 { 0x7FFF20000000ULL
, 0xEFE00000ULL
, 0x40000000, 0xFFFF000000000000, "x86_64h", 0, 0, 0, 12, true, true },
69 { SHARED_REGION_BASE_I386
, SHARED_REGION_SIZE_I386
, 0x00200000, 0x0, "i386", 0, 0, 0, 12, false, false },
70 { ARM64_SHARED_REGION_START
, ARM64_SHARED_REGION_SIZE
, 0x02000000, 0x00FFFF0000000000, "arm64", 0x0000C000, 0x00100000, 0x07F00000, 14, false, true },
71 { ARM64_SHARED_REGION_START
, ARM64_SHARED_REGION_SIZE
, 0x02000000, 0x00FFFF0000000000, "arm64e", 0x0000C000, 0x00100000, 0x07F00000, 14, false, true },
72 { ARM_SHARED_REGION_START
, ARM_SHARED_REGION_SIZE
, 0x02000000, 0xE0000000, "armv7s", 0, 0, 0, 14, false, false },
73 { ARM_SHARED_REGION_START
, ARM_SHARED_REGION_SIZE
, 0x00400000, 0xE0000000, "armv7k", 0, 0, 0, 14, false, false },
74 { 0x40000000, 0x40000000, 0x02000000, 0x0, "sim-x86", 0, 0, 0, 14, false, false }
78 // These are dylibs that may be interposed, so stubs calling into them should never be bypassed
79 const char* const CacheBuilder::_s_neverStubEliminate
[] = {
80 "/usr/lib/system/libdispatch.dylib",
85 CacheBuilder::CacheBuilder(const DyldSharedCache::CreateOptions
& options
)
88 , _diagnostics(options
.loggingPrefix
, options
.verbose
)
89 , _archLayout(nullptr)
91 , _slideInfoFileOffset(0)
92 , _slideInfoBufferSizeAllocated(0)
93 , _allocatedBufferSize(0)
96 , _branchPoolsLinkEditStartAddr(0)
99 std::string targetArch
= options
.archName
;
100 if ( options
.forSimulator
&& (options
.archName
== "i386") )
101 targetArch
= "sim-x86";
103 for (const ArchLayout
& layout
: _s_archLayout
) {
104 if ( layout
.archName
== targetArch
) {
105 _archLayout
= &layout
;
112 std::string
CacheBuilder::errorMessage()
114 return _diagnostics
.errorMessage();
117 const std::set
<std::string
> CacheBuilder::warnings()
119 return _diagnostics
.warnings();
122 const std::set
<const mach_header
*> CacheBuilder::evictions()
127 void CacheBuilder::deleteBuffer()
129 vm_deallocate(mach_task_self(), (vm_address_t
)_buffer
, _allocatedBufferSize
);
131 _allocatedBufferSize
= 0;
134 std::vector
<DyldSharedCache::MappedMachO
>
135 CacheBuilder::makeSortedDylibs(const std::vector
<DyldSharedCache::MappedMachO
>& dylibs
, const std::unordered_map
<std::string
, unsigned> sortOrder
)
137 std::vector
<DyldSharedCache::MappedMachO
> sortedDylibs
= dylibs
;
139 std::sort(sortedDylibs
.begin(), sortedDylibs
.end(), [&](const DyldSharedCache::MappedMachO
& a
, const DyldSharedCache::MappedMachO
& b
) {
140 const auto& orderA
= sortOrder
.find(a
.runtimePath
);
141 const auto& orderB
= sortOrder
.find(b
.runtimePath
);
142 bool foundA
= (orderA
!= sortOrder
.end());
143 bool foundB
= (orderB
!= sortOrder
.end());
145 // Order all __DATA_DIRTY segments specified in the order file first, in
146 // the order specified in the file, followed by any other __DATA_DIRTY
147 // segments in lexicographic order.
148 if ( foundA
&& foundB
)
149 return orderA
->second
< orderB
->second
;
155 return a
.runtimePath
< b
.runtimePath
;
162 inline uint32_t absolutetime_to_milliseconds(uint64_t abstime
)
164 return (uint32_t)(abstime
/1000/1000);
169 const char* installName
;
173 bool CacheBuilder::cacheOverflow(const dyld_cache_mapping_info regions
[3])
175 if ( _archLayout
->sharedRegionsAreDiscontiguous
) {
176 // for macOS x86_64 cache, need to check each region for overflow
177 return ( (regions
[0].size
> 0x60000000) || (regions
[1].size
> 0x40000000) || (regions
[2].size
> 0x3FE00000) );
180 return (_vmSize
> _archLayout
->sharedMemorySize
);
184 void CacheBuilder::build(const std::vector
<DyldSharedCache::MappedMachO
>& dylibs
,
185 const std::vector
<DyldSharedCache::MappedMachO
>& otherOsDylibsInput
,
186 const std::vector
<DyldSharedCache::MappedMachO
>& osExecutables
)
188 // <rdar://problem/21317611> error out instead of crash if cache has no dylibs
189 // FIXME: plist should specify required vs optional dylibs
190 if ( dylibs
.size() < 30 ) {
191 _diagnostics
.error("missing required minimum set of dylibs");
194 uint64_t t1
= mach_absolute_time();
197 // make copy of dylib list and sort
198 std::vector
<DyldSharedCache::MappedMachO
> sortedDylibs
= makeSortedDylibs(dylibs
, _options
.dylibOrdering
);
199 std::vector
<DyldSharedCache::MappedMachO
> otherOsDylibs
= otherOsDylibsInput
;
201 // assign addresses for each segment of each dylib in new cache
202 dyld_cache_mapping_info regions
[3];
203 SegmentMapping segmentMapping
= assignSegmentAddresses(sortedDylibs
, regions
);
204 while ( cacheOverflow(regions
) ) {
205 if ( !_options
.evictLeafDylibsOnOverflow
) {
206 _diagnostics
.error("cache overflow: %lluMB (max %lluMB)", _vmSize
/ 1024 / 1024, (_archLayout
->sharedMemorySize
) / 1024 / 1024);
209 // find all leaf (not referenced by anything else in cache) dylibs
211 // build count of how many references there are to each dylib
212 __block
std::map
<std::string
, unsigned int> referenceCount
;
213 for (const DyldSharedCache::MappedMachO
& dylib
: sortedDylibs
) {
214 dyld3::MachOParser
parser(dylib
.mh
);
215 parser
.forEachDependentDylib(^(const char* loadPath
, bool isWeak
, bool isReExport
, bool isUpward
, uint32_t compatVersion
, uint32_t curVersion
, bool &stop
) {
216 referenceCount
[loadPath
] += 1;
220 // find all dylibs not referenced
221 std::vector
<DylibAndSize
> unreferencedDylibs
;
222 for (const DyldSharedCache::MappedMachO
& dylib
: sortedDylibs
) {
223 dyld3::MachOParser
parser(dylib
.mh
);
224 const char* installName
= parser
.installName();
225 if ( referenceCount
.count(installName
) == 0 ) {
226 // conservative: sum up all segments except LINKEDIT
227 __block
uint64_t segsSize
= 0;
228 parser
.forEachSegment(^(const char* segName
, uint32_t fileOffset
, uint32_t fileSize
, uint64_t vmAddr
, uint64_t vmSize
, uint8_t protections
, bool &stop
) {
229 if ( strcmp(segName
, "__LINKEDIT") != 0 )
232 unreferencedDylibs
.push_back({installName
, segsSize
});
235 // sort leaf dylibs by size
236 std::sort(unreferencedDylibs
.begin(), unreferencedDylibs
.end(), [&](const DylibAndSize
& a
, const DylibAndSize
& b
) {
237 return ( a
.size
> b
.size
);
240 // build set of dylibs that if removed will allow cache to build
241 uint64_t reductionTarget
= _vmSize
- _archLayout
->sharedMemorySize
;
242 std::set
<std::string
> toRemove
;
243 for (DylibAndSize
& dylib
: unreferencedDylibs
) {
244 if ( _options
.verbose
)
245 _diagnostics
.warning("to prevent cache overflow, not caching %s", dylib
.installName
);
246 toRemove
.insert(dylib
.installName
);
247 if ( dylib
.size
> reductionTarget
)
249 reductionTarget
-= dylib
.size
;
251 // transfer overflow dylibs from cached vector to other vector
252 for (const std::string
& installName
: toRemove
) {
253 for (std::vector
<DyldSharedCache::MappedMachO
>::iterator it
=sortedDylibs
.begin(); it
!= sortedDylibs
.end(); ++it
) {
254 dyld3::MachOParser
parser(it
->mh
);
255 if ( installName
== parser
.installName() ) {
256 _evictions
.insert(parser
.header());
257 otherOsDylibs
.push_back(*it
);
258 sortedDylibs
.erase(it
);
264 segmentMapping
= assignSegmentAddresses(sortedDylibs
, regions
);
265 if ( unreferencedDylibs
.size() == 0 && cacheOverflow(regions
) ) {
266 _diagnostics
.error("cache overflow, tried evicting %ld leaf daylibs, but still too big: %lluMB (max %lluMB)",
267 toRemove
.size(), _vmSize
/ 1024 / 1024, (_archLayout
->sharedMemorySize
) / 1024 / 1024);
272 // allocate buffer for new cache
273 _allocatedBufferSize
= std::max(_currentFileSize
, (uint64_t)0x100000)*1.1; // add 10% to allocation to support large closures
274 if ( vm_allocate(mach_task_self(), (vm_address_t
*)&_buffer
, _allocatedBufferSize
, VM_FLAGS_ANYWHERE
) != 0 ) {
275 _diagnostics
.error("could not allocate buffer");
278 _currentFileSize
= _allocatedBufferSize
;
280 // write unoptimized cache
281 writeCacheHeader(regions
, sortedDylibs
, segmentMapping
);
282 copyRawSegments(sortedDylibs
, segmentMapping
);
283 adjustAllImagesForNewSegmentLocations(sortedDylibs
, segmentMapping
);
284 if ( _diagnostics
.hasError() )
287 bindAllImagesInCacheFile(regions
);
288 if ( _diagnostics
.hasError() )
292 if ( _options
.optimizeObjC
)
293 optimizeObjC(_buffer
, _archLayout
->is64
, _options
.optimizeStubs
, _pointersForASLR
, _diagnostics
);
294 if ( _diagnostics
.hasError() )
297 // optimize away stubs
298 std::vector
<uint64_t> branchPoolOffsets
;
299 uint64_t cacheStartAddress
= _archLayout
->sharedMemoryStart
;
300 if ( _options
.optimizeStubs
) {
301 std::vector
<uint64_t> branchPoolStartAddrs
;
302 const uint64_t* p
= (uint64_t*)((uint8_t*)_buffer
+ _buffer
->header
.branchPoolsOffset
);
303 for (int i
=0; i
< _buffer
->header
.branchPoolsCount
; ++i
) {
304 uint64_t poolAddr
= p
[i
];
305 branchPoolStartAddrs
.push_back(poolAddr
);
306 branchPoolOffsets
.push_back(poolAddr
- cacheStartAddress
);
308 bypassStubs(_buffer
, branchPoolStartAddrs
, _s_neverStubEliminate
, _diagnostics
);
310 uint64_t t2
= mach_absolute_time();
312 // FIPS seal corecrypto, This must be done after stub elimination (so that
313 // __TEXT,__text is not changed after sealing), but before LINKEDIT
314 // optimization (so that we still have access to local symbols)
317 // merge and compact LINKEDIT segments
318 dyld_cache_local_symbols_info
* localsInfo
= nullptr;
319 if ( dylibs
.size() == 0 )
320 _currentFileSize
= 0x1000;
322 _currentFileSize
= optimizeLinkedit(_buffer
, _archLayout
->is64
, _options
.excludeLocalSymbols
, _options
.optimizeStubs
, branchPoolOffsets
, _diagnostics
, &localsInfo
);
324 uint64_t t3
= mach_absolute_time();
326 // add ImageGroup for all dylibs in cache
327 __block
std::vector
<DyldSharedCache::MappedMachO
> cachedDylibs
;
328 std::unordered_map
<std::string
, const DyldSharedCache::MappedMachO
*> mapIntoSortedDylibs
;
329 for (const DyldSharedCache::MappedMachO
& entry
: sortedDylibs
) {
330 mapIntoSortedDylibs
[entry
.runtimePath
] = &entry
;
332 _buffer
->forEachImage(^(const mach_header
* mh
, const char* installName
) {
333 auto pos
= mapIntoSortedDylibs
.find(installName
);
334 if ( pos
!= mapIntoSortedDylibs
.end() ) {
335 DyldSharedCache::MappedMachO newEntry
= *(pos
->second
);
337 cachedDylibs
.push_back(newEntry
);
341 for (const std::string
& prefix
: _options
.pathPrefixes
) {
342 std::string fullPath
= prefix
+ installName
;
343 char resolvedPath
[PATH_MAX
];
344 if ( realpath(fullPath
.c_str(), resolvedPath
) != nullptr ) {
345 std::string resolvedUnPrefixed
= &resolvedPath
[prefix
.size()];
346 pos
= mapIntoSortedDylibs
.find(resolvedUnPrefixed
);
347 if ( pos
!= mapIntoSortedDylibs
.end() ) {
348 DyldSharedCache::MappedMachO newEntry
= *(pos
->second
);
350 cachedDylibs
.push_back(newEntry
);
356 fprintf(stderr
, "missing mapping for %s\n", installName
);
359 dyld3::DyldCacheParser
dyldCacheParser(_buffer
, true);
360 dyld3::ImageProxyGroup
* dylibGroup
= dyld3::ImageProxyGroup::makeDyldCacheDylibsGroup(_diagnostics
, dyldCacheParser
, cachedDylibs
,
361 _options
.pathPrefixes
, _patchTable
,
362 _options
.optimizeStubs
, !_options
.dylibsRemovedDuringMastering
);
363 if ( _diagnostics
.hasError() )
365 addCachedDylibsImageGroup(dylibGroup
);
366 if ( _diagnostics
.hasError() )
369 uint64_t t4
= mach_absolute_time();
371 // add ImageGroup for other OS dylibs and bundles
372 dyld3::ImageProxyGroup
* otherGroup
= dyld3::ImageProxyGroup::makeOtherOsGroup(_diagnostics
, dyldCacheParser
, dylibGroup
, otherOsDylibs
,
373 _options
.inodesAreSameAsRuntime
, _options
.pathPrefixes
);
374 if ( _diagnostics
.hasError() )
376 addCachedOtherDylibsImageGroup(otherGroup
);
377 if ( _diagnostics
.hasError() )
380 uint64_t t5
= mach_absolute_time();
382 // compute and add launch closures
383 std::map
<std::string
, const dyld3::launch_cache::binary_format::Closure
*> closures
;
384 for (const DyldSharedCache::MappedMachO
& mainProg
: osExecutables
) {
386 const dyld3::launch_cache::binary_format::Closure
* cls
= dyld3::ImageProxyGroup::makeClosure(clsDiag
, dyldCacheParser
, dylibGroup
, otherGroup
, mainProg
,
387 _options
.inodesAreSameAsRuntime
, _options
.pathPrefixes
);
388 if ( clsDiag
.hasError() ) {
389 // if closure cannot be built, silently skip it, unless in verbose mode
390 if ( _options
.verbose
) {
391 _diagnostics
.warning("building closure for '%s': %s", mainProg
.runtimePath
.c_str(), clsDiag
.errorMessage().c_str());
392 for (const std::string
& warn
: clsDiag
.warnings() )
393 _diagnostics
.warning("%s", warn
.c_str());
397 closures
[mainProg
.runtimePath
] = cls
;
400 addClosures(closures
);
401 if ( _diagnostics
.hasError() )
404 uint64_t t6
= mach_absolute_time();
406 // fill in slide info at start of region[2]
407 // do this last because it modifies pointers in DATA segments
408 if ( _options
.cacheSupportsASLR
) {
409 if ( _archLayout
->is64
)
410 writeSlideInfoV2
<Pointer64
<LittleEndian
>>();
412 writeSlideInfoV2
<Pointer32
<LittleEndian
>>();
415 uint64_t t7
= mach_absolute_time();
417 // update last region size
418 dyld_cache_mapping_info
* mappings
= (dyld_cache_mapping_info
*)((char*)_buffer
+ _buffer
->header
.mappingOffset
);
419 _currentFileSize
= align(_currentFileSize
, _archLayout
->sharedRegionAlignP2
);
420 mappings
[2].size
= _currentFileSize
- mappings
[2].fileOffset
;
422 // record cache bounds
423 _buffer
->header
.sharedRegionStart
= _archLayout
->sharedMemoryStart
;
424 _buffer
->header
.sharedRegionSize
= _archLayout
->sharedMemorySize
;
425 if ( _archLayout
->sharedRegionsAreDiscontiguous
) {
426 // special case x86_64 which has three non-contiguous chunks each in their own 1GB regions
427 uint64_t maxSlide0
= 0x60000000 - mappings
[0].size
; // TEXT region has 1.5GB region
428 uint64_t maxSlide1
= 0x40000000 - mappings
[1].size
;
429 uint64_t maxSlide2
= 0x3FE00000 - mappings
[2].size
;
430 _buffer
->header
.maxSlide
= std::min(std::min(maxSlide0
, maxSlide1
), maxSlide2
);
433 _buffer
->header
.maxSlide
= (_archLayout
->sharedMemoryStart
+ _archLayout
->sharedMemorySize
) - (mappings
[2].address
+ mappings
[2].size
);
436 // append "unmapped" local symbols region
437 if ( _options
.excludeLocalSymbols
) {
438 size_t localsInfoSize
= align(localsInfo
->stringsOffset
+ localsInfo
->stringsSize
, _archLayout
->sharedRegionAlignP2
);
439 if ( _currentFileSize
+ localsInfoSize
> _allocatedBufferSize
) {
440 _diagnostics
.warning("local symbols omitted because cache buffer overflow");
443 memcpy((char*)_buffer
+_currentFileSize
, localsInfo
, localsInfoSize
);
444 _buffer
->header
.localSymbolsOffset
= _currentFileSize
;
445 _buffer
->header
.localSymbolsSize
= localsInfoSize
;
446 _currentFileSize
+= localsInfoSize
;
448 free((void*)localsInfo
);
451 recomputeCacheUUID();
453 // Calculate the VMSize of the resulting cache
454 __block
uint64_t endAddr
= 0;
455 _buffer
->forEachRegion(^(const void* content
, uint64_t vmAddr
, uint64_t size
, uint32_t permissions
) {
456 if (vmAddr
+size
> endAddr
)
457 endAddr
= vmAddr
+size
;
459 _vmSize
= endAddr
- cacheStartAddress
;
461 // last sanity check on size
462 if ( _vmSize
> _archLayout
->sharedMemorySize
) {
463 _diagnostics
.error("cache overflow after optimizations. %lluMB (max %lluMB)", _vmSize
/ 1024 / 1024, (_archLayout
->sharedMemorySize
) / 1024 / 1024);
467 // codesignature is part of file, but is not mapped
469 if ( _diagnostics
.hasError() )
472 uint64_t t8
= mach_absolute_time();
474 if ( _options
.verbose
) {
475 fprintf(stderr
, "time to copy and bind cached dylibs: %ums\n", absolutetime_to_milliseconds(t2
-t1
));
476 fprintf(stderr
, "time to optimize LINKEDITs: %ums\n", absolutetime_to_milliseconds(t3
-t2
));
477 fprintf(stderr
, "time to build ImageGroup of %lu cached dylibs: %ums\n", sortedDylibs
.size(), absolutetime_to_milliseconds(t4
-t3
));
478 fprintf(stderr
, "time to build ImageGroup of %lu other dylibs: %ums\n", otherOsDylibs
.size(), absolutetime_to_milliseconds(t5
-t4
));
479 fprintf(stderr
, "time to build %lu closures: %ums\n", osExecutables
.size(), absolutetime_to_milliseconds(t6
-t5
));
480 fprintf(stderr
, "time to compute slide info: %ums\n", absolutetime_to_milliseconds(t7
-t6
));
481 fprintf(stderr
, "time to compute UUID and codesign cache file: %ums\n", absolutetime_to_milliseconds(t8
-t7
));
484 // trim over allocated buffer
485 if ( _allocatedBufferSize
> _currentFileSize
) {
486 uint8_t* startOfUnused
= (uint8_t*)_buffer
+_currentFileSize
;
487 size_t unusedLen
= _allocatedBufferSize
-_currentFileSize
;
488 vm_deallocate(mach_task_self(), (vm_address_t
)startOfUnused
, unusedLen
);
489 _allocatedBufferSize
= _currentFileSize
;
496 void CacheBuilder::writeCacheHeader(const dyld_cache_mapping_info regions
[3], const std::vector
<DyldSharedCache::MappedMachO
>& dylibs
, const SegmentMapping
& segmentMappings
)
498 // "dyld_v1" + spaces + archName(), with enough spaces to pad to 15 bytes
499 std::string magic
= "dyld_v1";
500 magic
.append(15 - magic
.length() - _options
.archName
.length(), ' ');
501 magic
.append(_options
.archName
);
502 assert(magic
.length() == 15);
505 memcpy(_buffer
->header
.magic
, magic
.c_str(), 16);
506 _buffer
->header
.mappingOffset
= sizeof(dyld_cache_header
);
507 _buffer
->header
.mappingCount
= 3;
508 _buffer
->header
.imagesOffset
= (uint32_t)(_buffer
->header
.mappingOffset
+ 3*sizeof(dyld_cache_mapping_info
) + sizeof(uint64_t)*_branchPoolStarts
.size());
509 _buffer
->header
.imagesCount
= (uint32_t)dylibs
.size() + _aliasCount
;
510 _buffer
->header
.dyldBaseAddress
= 0;
511 _buffer
->header
.codeSignatureOffset
= 0;
512 _buffer
->header
.codeSignatureSize
= 0;
513 _buffer
->header
.slideInfoOffset
= _slideInfoFileOffset
;
514 _buffer
->header
.slideInfoSize
= _slideInfoBufferSizeAllocated
;
515 _buffer
->header
.localSymbolsOffset
= 0;
516 _buffer
->header
.localSymbolsSize
= 0;
517 _buffer
->header
.cacheType
= _options
.optimizeStubs
? kDyldSharedCacheTypeProduction
: kDyldSharedCacheTypeDevelopment
;
518 _buffer
->header
.accelerateInfoAddr
= 0;
519 _buffer
->header
.accelerateInfoSize
= 0;
520 bzero(_buffer
->header
.uuid
, 16); // overwritten later by recomputeCacheUUID()
521 _buffer
->header
.branchPoolsOffset
= _buffer
->header
.mappingOffset
+ 3*sizeof(dyld_cache_mapping_info
);
522 _buffer
->header
.branchPoolsCount
= (uint32_t)_branchPoolStarts
.size();
523 _buffer
->header
.imagesTextOffset
= _buffer
->header
.imagesOffset
+ sizeof(dyld_cache_image_info
)*_buffer
->header
.imagesCount
;
524 _buffer
->header
.imagesTextCount
= dylibs
.size();
525 _buffer
->header
.platform
= (uint8_t)_options
.platform
;
526 _buffer
->header
.formatVersion
= dyld3::launch_cache::binary_format::kFormatVersion
;
527 _buffer
->header
.dylibsExpectedOnDisk
= !_options
.dylibsRemovedDuringMastering
;
528 _buffer
->header
.simulator
= _options
.forSimulator
;
531 dyld_cache_mapping_info
* mappings
= (dyld_cache_mapping_info
*)((char*)_buffer
+ _buffer
->header
.mappingOffset
);
532 mappings
[0] = regions
[0];
533 mappings
[1] = regions
[1];
534 mappings
[2] = regions
[2];
536 // fill in branch pool addresses
537 uint64_t* p
= (uint64_t*)((char*)_buffer
+ _buffer
->header
.branchPoolsOffset
);
538 for (uint64_t pool
: _branchPoolStarts
) {
542 // fill in image table
543 dyld_cache_image_info
* images
= (dyld_cache_image_info
*)((char*)_buffer
+ _buffer
->header
.imagesOffset
);
544 for (const DyldSharedCache::MappedMachO
& dylib
: dylibs
) {
545 const std::vector
<SegmentMappingInfo
>& segs
= segmentMappings
.at(dylib
.mh
);
546 dyld3::MachOParser
parser(dylib
.mh
);
547 const char* installName
= parser
.installName();
548 images
->address
= segs
[0].dstCacheAddress
;
549 if ( _options
.dylibsRemovedDuringMastering
) {
551 images
->inode
= pathHash(installName
);
554 images
->modTime
= dylib
.modTime
;
555 images
->inode
= dylib
.inode
;
557 uint32_t installNameOffsetInTEXT
= (uint32_t)(installName
- (char*)dylib
.mh
);
558 images
->pathFileOffset
= (uint32_t)segs
[0].dstCacheOffset
+ installNameOffsetInTEXT
;
561 // append aliases image records and strings
563 for (auto &dylib : _dylibs) {
564 if (!dylib->installNameAliases.empty()) {
565 for (const std::string& alias : dylib->installNameAliases) {
566 images->set_address(_segmentMap[dylib][0].address);
567 if (_manifest.platform() == "osx") {
568 images->modTime = dylib->lastModTime;
569 images->inode = dylib->inode;
573 images->inode = pathHash(alias.c_str());
575 images->pathFileOffset = offset;
576 //fprintf(stderr, "adding alias %s for %s\n", alias.c_str(), dylib->installName.c_str());
577 ::strcpy((char*)&_buffer[offset], alias.c_str());
578 offset += alias.size() + 1;
584 // calculate start of text image array and trailing string pool
585 dyld_cache_image_text_info
* textImages
= (dyld_cache_image_text_info
*)((char*)_buffer
+ _buffer
->header
.imagesTextOffset
);
586 uint32_t stringOffset
= (uint32_t)(_buffer
->header
.imagesTextOffset
+ sizeof(dyld_cache_image_text_info
) * dylibs
.size());
588 // write text image array and image names pool at same time
589 for (const DyldSharedCache::MappedMachO
& dylib
: dylibs
) {
590 const std::vector
<SegmentMappingInfo
>& segs
= segmentMappings
.at(dylib
.mh
);
591 dyld3::MachOParser
parser(dylib
.mh
);
592 parser
.getUuid(textImages
->uuid
);
593 textImages
->loadAddress
= segs
[0].dstCacheAddress
;
594 textImages
->textSegmentSize
= (uint32_t)segs
[0].dstCacheSegmentSize
;
595 textImages
->pathOffset
= stringOffset
;
596 const char* installName
= parser
.installName();
597 ::strcpy((char*)_buffer
+ stringOffset
, installName
);
598 stringOffset
+= (uint32_t)strlen(installName
)+1;
602 // make sure header did not overflow into first mapped image
603 const dyld_cache_image_info
* firstImage
= (dyld_cache_image_info
*)((char*)_buffer
+ _buffer
->header
.imagesOffset
);
604 assert(stringOffset
<= (firstImage
->address
- mappings
[0].address
));
608 void CacheBuilder::copyRawSegments(const std::vector
<DyldSharedCache::MappedMachO
>& dylibs
, const SegmentMapping
& mapping
)
610 uint8_t* cacheBytes
= (uint8_t*)_buffer
;
611 for (const DyldSharedCache::MappedMachO
& dylib
: dylibs
) {
612 auto pos
= mapping
.find(dylib
.mh
);
613 assert(pos
!= mapping
.end());
614 for (const SegmentMappingInfo
& info
: pos
->second
) {
615 //fprintf(stderr, "copy %s segment %s (0x%08X bytes) from %p to %p (logical addr 0x%llX) for %s\n", _options.archName.c_str(), info.segName, info.copySegmentSize, info.srcSegment, &cacheBytes[info.dstCacheOffset], info.dstCacheAddress, dylib.runtimePath.c_str());
616 ::memcpy(&cacheBytes
[info
.dstCacheOffset
], info
.srcSegment
, info
.copySegmentSize
);
621 void CacheBuilder::adjustAllImagesForNewSegmentLocations(const std::vector
<DyldSharedCache::MappedMachO
>& dylibs
, const SegmentMapping
& mapping
)
623 uint8_t* cacheBytes
= (uint8_t*)_buffer
;
624 for (const DyldSharedCache::MappedMachO
& dylib
: dylibs
) {
625 auto pos
= mapping
.find(dylib
.mh
);
626 assert(pos
!= mapping
.end());
627 mach_header
* mhInCache
= (mach_header
*)&cacheBytes
[pos
->second
[0].dstCacheOffset
];
628 adjustDylibSegments(_buffer
, _archLayout
->is64
, mhInCache
, pos
->second
, _pointersForASLR
, _diagnostics
);
629 if ( _diagnostics
.hasError() )
635 unsigned long lazyCount
= 0;
636 unsigned long nonLazyCount
= 0;
639 void CacheBuilder::bindAllImagesInCacheFile(const dyld_cache_mapping_info regions
[3])
641 const bool log
= false;
642 __block
std::unordered_map
<std::string
, Counts
> useCounts
;
644 // build map of install names to mach_headers
645 __block
std::unordered_map
<std::string
, const mach_header
*> installNameToMH
;
646 __block
std::vector
<const mach_header
*> dylibMHs
;
647 _buffer
->forEachImage(^(const mach_header
* mh
, const char* installName
) {
648 installNameToMH
[installName
] = mh
;
649 dylibMHs
.push_back(mh
);
652 __block Diagnostics parsingDiag
;
653 bool (^dylibFinder
)(uint32_t, const char*, void* , const mach_header
**, void**) = ^(uint32_t depIndex
, const char* depLoadPath
, void* extra
, const mach_header
** foundMH
, void** foundExtra
) {
654 auto pos
= installNameToMH
.find(depLoadPath
);
655 if ( pos
!= installNameToMH
.end() ) {
656 *foundMH
= pos
->second
;
657 *foundExtra
= nullptr;
660 parsingDiag
.error("dependent dylib %s not found", depLoadPath
);
663 if ( parsingDiag
.hasError() ) {
664 _diagnostics
.error("%s", parsingDiag
.errorMessage().c_str());
668 // bind every dylib in cache
669 for (const mach_header
* mh
: dylibMHs
) {
670 dyld3::MachOParser
parser(mh
, true);
671 bool is64
= parser
.is64();
672 const char* depPaths
[256];
673 const char** depPathsArray
= depPaths
;
674 __block
int depIndex
= 1;
675 parser
.forEachDependentDylib(^(const char* loadPath
, bool isWeak
, bool isReExport
, bool isUpward
, uint32_t compatVersion
, uint32_t curVersion
, bool& stop
) {
676 depPathsArray
[depIndex
++] = loadPath
;
678 uint8_t* segCacheStarts
[10];
679 uint64_t segCacheAddrs
[10];
680 uint8_t** segCacheStartsArray
= segCacheStarts
;
681 uint64_t* segCacheAddrsArray
= segCacheAddrs
;
682 __block
int segIndex
= 0;
683 parser
.forEachSegment(^(const char* segName
, uint32_t fileOffset
, uint32_t fileSize
, uint64_t vmAddr
, uint64_t vmSize
, uint8_t protections
, bool& stop
) {
684 segCacheStartsArray
[segIndex
] = (segIndex
== 0) ? (uint8_t*)mh
: (uint8_t*)_buffer
+ fileOffset
;
685 segCacheAddrsArray
[segIndex
] = vmAddr
;
688 __block Diagnostics bindingDiag
;
689 parser
.forEachBind(bindingDiag
, ^(uint32_t dataSegIndex
, uint64_t dataSegOffset
, uint8_t type
, int libOrdinal
, uint64_t addend
, const char* symbolName
, bool weakImport
, bool lazy
, bool& stop
) {
692 useCounts
[symbolName
].lazyCount
+= 1;
694 useCounts
[symbolName
].nonLazyCount
+= 1;
696 const mach_header
* targetMH
= nullptr;
697 if ( libOrdinal
== BIND_SPECIAL_DYLIB_SELF
) {
700 else if ( libOrdinal
== BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE
) {
701 parsingDiag
.error("bind ordinal BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE not supported in dylibs in dyld shared cache (found in %s)", parser
.installName());
705 else if ( libOrdinal
== BIND_SPECIAL_DYLIB_FLAT_LOOKUP
) {
706 parsingDiag
.error("bind ordinal BIND_SPECIAL_DYLIB_FLAT_LOOKUP not supported in dylibs in dyld shared cache (found in %s)", parser
.installName());
711 const char* fromPath
= depPathsArray
[libOrdinal
];
712 auto pos
= installNameToMH
.find(fromPath
);
713 if (pos
== installNameToMH
.end()) {
715 _diagnostics
.error("dependent dylib %s not found", fromPath
);
719 targetMH
= pos
->second
;
721 dyld3::MachOParser
targetParser(targetMH
, true);
722 dyld3::MachOParser::FoundSymbol foundInfo
;
723 uint64_t targetValue
= 0;
724 uint8_t* fixupLoc
= segCacheStartsArray
[dataSegIndex
] + dataSegOffset
;
725 if ( targetParser
.findExportedSymbol(parsingDiag
, symbolName
, nullptr, foundInfo
, dylibFinder
) ) {
726 const mach_header
* foundInMH
= foundInfo
.foundInDylib
;
727 dyld3::MachOParser
foundInParser(foundInMH
, true);
728 uint64_t foundInBaseAddress
= foundInParser
.preferredLoadAddress();
729 switch ( foundInfo
.kind
) {
730 case dyld3::MachOParser::FoundSymbol::Kind::resolverOffset
:
731 // Bind to the target stub for resolver based functions.
732 // There may be a later optimization to alter the client
733 // stubs to directly to the target stub's lazy pointer.
734 case dyld3::MachOParser::FoundSymbol::Kind::headerOffset
:
735 targetValue
= foundInBaseAddress
+ foundInfo
.value
+ addend
;
736 _pointersForASLR
.push_back((void*)fixupLoc
);
737 if ( foundInMH
!= mh
) {
738 uint32_t mhVmOffset
= (uint32_t)((uint8_t*)foundInMH
- (uint8_t*)_buffer
);
739 uint32_t definitionCacheVmOffset
= (uint32_t)(mhVmOffset
+ foundInfo
.value
);
740 uint32_t referenceCacheDataVmOffset
= (uint32_t)(segCacheAddrsArray
[dataSegIndex
] + dataSegOffset
- regions
[1].address
);
741 assert(referenceCacheDataVmOffset
< (1<<30));
742 dyld3::launch_cache::binary_format::PatchOffset entry
;
744 entry
.hasAddend
= (addend
!= 0);
745 entry
.dataRegionOffset
= referenceCacheDataVmOffset
;
746 _patchTable
[foundInMH
][definitionCacheVmOffset
].insert(*((uint32_t*)&entry
));
749 case dyld3::MachOParser::FoundSymbol::Kind::absolute
:
750 // pointers set to absolute values are not slid
751 targetValue
= foundInfo
.value
+ addend
;
755 else if ( weakImport
) {
756 // weak pointers set to zero are not slid
760 parsingDiag
.error("cannot find symbol %s, needed in dylib %s", symbolName
, parser
.installName());
764 case BIND_TYPE_POINTER
:
766 *((uint64_t*)fixupLoc
) = targetValue
;
768 *((uint32_t*)fixupLoc
) = (uint32_t)targetValue
;
770 case BIND_TYPE_TEXT_ABSOLUTE32
:
771 case BIND_TYPE_TEXT_PCREL32
:
772 parsingDiag
.error("text relocs not supported for shared cache binding in %s", parser
.installName());
776 parsingDiag
.error("bad bind type (%d) in %s", type
, parser
.installName());
782 if ( bindingDiag
.hasError() ) {
783 parsingDiag
.error("%s in dylib %s", bindingDiag
.errorMessage().c_str(), parser
.installName());
785 if ( parsingDiag
.hasError() )
787 // also need to add patch locations for weak-binds that point within same image, since they are not captured by binds above
788 parser
.forEachWeakDef(bindingDiag
, ^(bool strongDef
, uint32_t dataSegIndex
, uint64_t dataSegOffset
, uint64_t addend
, const char* symbolName
, bool &stop
) {
791 uint8_t* fixupLoc
= segCacheStartsArray
[dataSegIndex
] + dataSegOffset
;
792 dyld3::MachOParser::FoundSymbol weakFoundInfo
;
793 Diagnostics weakLookupDiag
;
794 if ( parser
.findExportedSymbol(weakLookupDiag
, symbolName
, nullptr, weakFoundInfo
, nullptr) ) {
795 // this is an interior pointing (rebased) pointer
796 uint64_t targetValue
;
798 targetValue
= *((uint64_t*)fixupLoc
);
800 targetValue
= *((uint32_t*)fixupLoc
);
801 uint32_t definitionCacheVmOffset
= (uint32_t)(targetValue
- regions
[0].address
);
802 uint32_t referenceCacheDataVmOffset
= (uint32_t)(segCacheAddrsArray
[dataSegIndex
] + dataSegOffset
- regions
[1].address
);
803 assert(referenceCacheDataVmOffset
< (1<<30));
804 dyld3::launch_cache::binary_format::PatchOffset entry
;
806 entry
.hasAddend
= (addend
!= 0);
807 entry
.dataRegionOffset
= referenceCacheDataVmOffset
;
808 _patchTable
[mh
][definitionCacheVmOffset
].insert(*((uint32_t*)&entry
));
811 if ( bindingDiag
.hasError() ) {
812 parsingDiag
.error("%s in dylib %s", bindingDiag
.errorMessage().c_str(), parser
.installName());
814 if ( parsingDiag
.hasError() )
819 unsigned lazyCount
= 0;
820 unsigned nonLazyCount
= 0;
821 std::unordered_set
<std::string
> lazyTargets
;
822 for (auto entry
: useCounts
) {
823 fprintf(stderr
, "% 3ld % 3ld %s\n", entry
.second
.lazyCount
, entry
.second
.nonLazyCount
, entry
.first
.c_str());
824 lazyCount
+= entry
.second
.lazyCount
;
825 nonLazyCount
+= entry
.second
.nonLazyCount
;
826 if ( entry
.second
.lazyCount
!= 0 )
827 lazyTargets
.insert(entry
.first
);
829 fprintf(stderr
, "lazyCount = %d\n", lazyCount
);
830 fprintf(stderr
, "nonLazyCount = %d\n", nonLazyCount
);
831 fprintf(stderr
, "unique lazys = %ld\n", lazyTargets
.size());
834 if ( parsingDiag
.hasError() )
835 _diagnostics
.error("%s", parsingDiag
.errorMessage().c_str());
839 void CacheBuilder::recomputeCacheUUID(void)
841 // Clear existing UUID, then MD5 whole cache buffer.
842 uint8_t* uuidLoc
= _buffer
->header
.uuid
;
844 CC_MD5(_buffer
, (unsigned)_currentFileSize
, uuidLoc
);
845 // <rdar://problem/6723729> uuids should conform to RFC 4122 UUID version 4 & UUID version 5 formats
846 uuidLoc
[6] = ( uuidLoc
[6] & 0x0F ) | ( 3 << 4 );
847 uuidLoc
[8] = ( uuidLoc
[8] & 0x3F ) | 0x80;
851 CacheBuilder::SegmentMapping
CacheBuilder::assignSegmentAddresses(const std::vector
<DyldSharedCache::MappedMachO
>& dylibs
, dyld_cache_mapping_info regions
[3])
853 // calculate size of header info and where first dylib's mach_header should start
854 size_t startOffset
= sizeof(dyld_cache_header
) + 3*sizeof(dyld_cache_mapping_info
);
855 size_t maxPoolCount
= 0;
856 if ( _archLayout
->branchReach
!= 0 )
857 maxPoolCount
= (_archLayout
->sharedMemorySize
/ _archLayout
->branchReach
);
858 startOffset
+= maxPoolCount
* sizeof(uint64_t);
859 startOffset
+= sizeof(dyld_cache_image_info
) * dylibs
.size();
860 startOffset
+= sizeof(dyld_cache_image_text_info
) * dylibs
.size();
861 for (const DyldSharedCache::MappedMachO
& dylib
: dylibs
) {
862 dyld3::MachOParser
parser(dylib
.mh
);
863 startOffset
+= (strlen(parser
.installName()) + 1);
865 //fprintf(stderr, "%s total header size = 0x%08lX\n", _options.archName.c_str(), startOffset);
866 startOffset
= align(startOffset
, 12);
868 _branchPoolStarts
.clear();
869 __block
uint64_t addr
= _archLayout
->sharedMemoryStart
;
870 __block SegmentMapping result
;
872 // assign TEXT segment addresses
873 regions
[0].address
= addr
;
874 regions
[0].fileOffset
= 0;
875 regions
[0].initProt
= VM_PROT_READ
| VM_PROT_EXECUTE
;
876 regions
[0].maxProt
= VM_PROT_READ
| VM_PROT_EXECUTE
;
877 addr
+= startOffset
; // header
879 __block
uint64_t lastPoolAddress
= addr
;
880 for (const DyldSharedCache::MappedMachO
& dylib
: dylibs
) {
881 dyld3::MachOParser
parser(dylib
.mh
, true);
882 parser
.forEachSegment(^(const char* segName
, uint32_t fileOffset
, uint32_t fileSize
, uint64_t vmAddr
, uint64_t vmSize
, uint8_t protections
, uint32_t segIndex
, uint64_t sizeOfSections
, uint8_t p2align
, bool& stop
) {
883 if ( protections
!= (VM_PROT_READ
| VM_PROT_EXECUTE
) )
885 // Insert branch island pools every 128MB for arm64
886 if ( (_archLayout
->branchPoolTextSize
!= 0) && ((addr
+ vmSize
- lastPoolAddress
) > _archLayout
->branchReach
) ) {
887 _branchPoolStarts
.push_back(addr
);
888 _diagnostics
.verbose("adding branch pool at 0x%llX\n", addr
);
889 lastPoolAddress
= addr
;
890 addr
+= _archLayout
->branchPoolTextSize
;
892 // Keep __TEXT segments 4K or more aligned
893 addr
= align(addr
, std::max(p2align
, (uint8_t)12));
894 SegmentMappingInfo info
;
895 info
.srcSegment
= (uint8_t*)dylib
.mh
+ fileOffset
;
896 info
.segName
= segName
;
897 info
.dstCacheAddress
= addr
;
898 info
.dstCacheOffset
= (uint32_t)(addr
- regions
[0].address
+ regions
[0].fileOffset
);
899 info
.dstCacheSegmentSize
= (uint32_t)align(sizeOfSections
, 12);
900 info
.copySegmentSize
= (uint32_t)align(sizeOfSections
, 12);
901 info
.srcSegmentIndex
= segIndex
;
902 result
[dylib
.mh
].push_back(info
);
903 addr
+= info
.dstCacheSegmentSize
;
906 // align TEXT region end
907 uint64_t endTextAddress
= align(addr
, _archLayout
->sharedRegionAlignP2
);
908 regions
[0].size
= endTextAddress
- regions
[0].address
;
910 // assign __DATA* addresses
911 if ( _archLayout
->sharedRegionsAreDiscontiguous
)
912 addr
= _archLayout
->sharedMemoryStart
+ 0x60000000;
914 addr
= align((addr
+ _archLayout
->sharedRegionPadding
), _archLayout
->sharedRegionAlignP2
);
915 regions
[1].address
= addr
;
916 regions
[1].fileOffset
= regions
[0].fileOffset
+ regions
[0].size
;
917 regions
[1].initProt
= VM_PROT_READ
| VM_PROT_WRITE
;
918 regions
[1].maxProt
= VM_PROT_READ
| VM_PROT_WRITE
;
920 // layout all __DATA_CONST segments
921 __block
int dataConstSegmentCount
= 0;
922 for (const DyldSharedCache::MappedMachO
& dylib
: dylibs
) {
923 dyld3::MachOParser
parser(dylib
.mh
, true);
924 parser
.forEachSegment(^(const char* segName
, uint32_t fileOffset
, uint32_t fileSize
, uint64_t vmAddr
, uint64_t vmSize
, uint8_t protections
, uint32_t segIndex
, uint64_t sizeOfSections
, uint8_t p2align
, bool& stop
) {
925 if ( protections
!= (VM_PROT_READ
| VM_PROT_WRITE
) )
927 if ( strcmp(segName
, "__DATA_CONST") != 0 )
929 ++dataConstSegmentCount
;
930 // Pack __DATA_CONST segments
931 addr
= align(addr
, p2align
);
932 size_t copySize
= std::min((size_t)fileSize
, (size_t)sizeOfSections
);
933 SegmentMappingInfo info
;
934 info
.srcSegment
= (uint8_t*)dylib
.mh
+ fileOffset
;
935 info
.segName
= segName
;
936 info
.dstCacheAddress
= addr
;
937 info
.dstCacheOffset
= (uint32_t)(addr
- regions
[1].address
+ regions
[1].fileOffset
);
938 info
.dstCacheSegmentSize
= (uint32_t)sizeOfSections
;
939 info
.copySegmentSize
= (uint32_t)copySize
;
940 info
.srcSegmentIndex
= segIndex
;
941 result
[dylib
.mh
].push_back(info
);
942 addr
+= info
.dstCacheSegmentSize
;
946 // layout all __DATA segments (and other r/w non-dirty, non-const) segments
947 for (const DyldSharedCache::MappedMachO
& dylib
: dylibs
) {
948 dyld3::MachOParser
parser(dylib
.mh
, true);
949 parser
.forEachSegment(^(const char* segName
, uint32_t fileOffset
, uint32_t fileSize
, uint64_t vmAddr
, uint64_t vmSize
, uint8_t protections
, uint32_t segIndex
, uint64_t sizeOfSections
, uint8_t p2align
, bool& stop
) {
950 if ( protections
!= (VM_PROT_READ
| VM_PROT_WRITE
) )
952 if ( strcmp(segName
, "__DATA_CONST") == 0 )
954 if ( strcmp(segName
, "__DATA_DIRTY") == 0 )
956 if ( dataConstSegmentCount
> 10 ) {
957 // Pack __DATA segments only if we also have __DATA_CONST segments
958 addr
= align(addr
, p2align
);
961 // Keep __DATA segments 4K or more aligned
962 addr
= align(addr
, std::max(p2align
, (uint8_t)12));
964 size_t copySize
= std::min((size_t)fileSize
, (size_t)sizeOfSections
);
965 SegmentMappingInfo info
;
966 info
.srcSegment
= (uint8_t*)dylib
.mh
+ fileOffset
;
967 info
.segName
= segName
;
968 info
.dstCacheAddress
= addr
;
969 info
.dstCacheOffset
= (uint32_t)(addr
- regions
[1].address
+ regions
[1].fileOffset
);
970 info
.dstCacheSegmentSize
= (uint32_t)sizeOfSections
;
971 info
.copySegmentSize
= (uint32_t)copySize
;
972 info
.srcSegmentIndex
= segIndex
;
973 result
[dylib
.mh
].push_back(info
);
974 addr
+= info
.dstCacheSegmentSize
;
978 // layout all __DATA_DIRTY segments, sorted
979 addr
= align(addr
, 12);
980 std::vector
<DyldSharedCache::MappedMachO
> dirtyDataDylibs
= makeSortedDylibs(dylibs
, _options
.dirtyDataSegmentOrdering
);
981 for (const DyldSharedCache::MappedMachO
& dylib
: dirtyDataDylibs
) {
982 dyld3::MachOParser
parser(dylib
.mh
, true);
983 parser
.forEachSegment(^(const char* segName
, uint32_t fileOffset
, uint32_t fileSize
, uint64_t vmAddr
, uint64_t vmSize
, uint8_t protections
, uint32_t segIndex
, uint64_t sizeOfSections
, uint8_t p2align
, bool& stop
) {
984 if ( protections
!= (VM_PROT_READ
| VM_PROT_WRITE
) )
986 if ( strcmp(segName
, "__DATA_DIRTY") != 0 )
988 // Pack __DATA_DIRTY segments
989 addr
= align(addr
, p2align
);
990 size_t copySize
= std::min((size_t)fileSize
, (size_t)sizeOfSections
);
991 SegmentMappingInfo info
;
992 info
.srcSegment
= (uint8_t*)dylib
.mh
+ fileOffset
;
993 info
.segName
= segName
;
994 info
.dstCacheAddress
= addr
;
995 info
.dstCacheOffset
= (uint32_t)(addr
- regions
[1].address
+ regions
[1].fileOffset
);
996 info
.dstCacheSegmentSize
= (uint32_t)sizeOfSections
;
997 info
.copySegmentSize
= (uint32_t)copySize
;
998 info
.srcSegmentIndex
= segIndex
;
999 result
[dylib
.mh
].push_back(info
);
1000 addr
+= info
.dstCacheSegmentSize
;
1004 // align DATA region end
1005 uint64_t endDataAddress
= align(addr
, _archLayout
->sharedRegionAlignP2
);
1006 regions
[1].size
= endDataAddress
- regions
[1].address
;
1008 // start read-only region
1009 if ( _archLayout
->sharedRegionsAreDiscontiguous
)
1010 addr
= _archLayout
->sharedMemoryStart
+ 0xA0000000;
1012 addr
= align((addr
+ _archLayout
->sharedRegionPadding
), _archLayout
->sharedRegionAlignP2
);
1013 regions
[2].address
= addr
;
1014 regions
[2].fileOffset
= regions
[1].fileOffset
+ regions
[1].size
;
1015 regions
[2].maxProt
= VM_PROT_READ
;
1016 regions
[2].initProt
= VM_PROT_READ
;
1018 // reserve space for kernel ASLR slide info at start of r/o region
1019 if ( _options
.cacheSupportsASLR
) {
1020 _slideInfoBufferSizeAllocated
= align((regions
[1].size
/4096) * 4, _archLayout
->sharedRegionAlignP2
); // only need 2 bytes per page
1021 _slideInfoFileOffset
= regions
[2].fileOffset
;
1022 addr
+= _slideInfoBufferSizeAllocated
;
1025 // layout all read-only (but not LINKEDIT) segments
1026 for (const DyldSharedCache::MappedMachO
& dylib
: dylibs
) {
1027 dyld3::MachOParser
parser(dylib
.mh
, true);
1028 parser
.forEachSegment(^(const char* segName
, uint32_t fileOffset
, uint32_t fileSize
, uint64_t vmAddr
, uint64_t vmSize
, uint8_t protections
, uint32_t segIndex
, uint64_t sizeOfSections
, uint8_t p2align
, bool& stop
) {
1029 if ( protections
!= VM_PROT_READ
)
1031 if ( strcmp(segName
, "__LINKEDIT") == 0 )
1033 // Keep segments segments 4K or more aligned
1034 addr
= align(addr
, std::max(p2align
, (uint8_t)12));
1035 SegmentMappingInfo info
;
1036 info
.srcSegment
= (uint8_t*)dylib
.mh
+ fileOffset
;
1037 info
.segName
= segName
;
1038 info
.dstCacheAddress
= addr
;
1039 info
.dstCacheOffset
= (uint32_t)(addr
- regions
[2].address
+ regions
[2].fileOffset
);
1040 info
.dstCacheSegmentSize
= (uint32_t)align(sizeOfSections
, 12);
1041 info
.copySegmentSize
= (uint32_t)sizeOfSections
;
1042 info
.srcSegmentIndex
= segIndex
;
1043 result
[dylib
.mh
].push_back(info
);
1044 addr
+= info
.dstCacheSegmentSize
;
1047 // layout all LINKEDIT segments (after other read-only segments)
1048 for (const DyldSharedCache::MappedMachO
& dylib
: dylibs
) {
1049 dyld3::MachOParser
parser(dylib
.mh
, true);
1050 parser
.forEachSegment(^(const char* segName
, uint32_t fileOffset
, uint32_t fileSize
, uint64_t vmAddr
, uint64_t vmSize
, uint8_t protections
, uint32_t segIndex
, uint64_t sizeOfSections
, uint8_t p2align
, bool& stop
) {
1051 if ( protections
!= VM_PROT_READ
)
1053 if ( strcmp(segName
, "__LINKEDIT") != 0 )
1055 // Keep segments segments 4K or more aligned
1056 addr
= align(addr
, std::max(p2align
, (uint8_t)12));
1057 SegmentMappingInfo info
;
1058 info
.srcSegment
= (uint8_t*)dylib
.mh
+ fileOffset
;
1059 info
.segName
= segName
;
1060 info
.dstCacheAddress
= addr
;
1061 info
.dstCacheOffset
= (uint32_t)(addr
- regions
[2].address
+ regions
[2].fileOffset
);
1062 info
.dstCacheSegmentSize
= (uint32_t)align(sizeOfSections
, 12);
1063 info
.copySegmentSize
= (uint32_t)align(fileSize
, 12);
1064 info
.srcSegmentIndex
= segIndex
;
1065 result
[dylib
.mh
].push_back(info
);
1066 addr
+= info
.dstCacheSegmentSize
;
1069 // add room for branch pool linkedits
1070 _branchPoolsLinkEditStartAddr
= addr
;
1071 addr
+= (_branchPoolStarts
.size() * _archLayout
->branchPoolLinkEditSize
);
1073 // align r/o region end
1074 uint64_t endReadOnlyAddress
= align(addr
, _archLayout
->sharedRegionAlignP2
);
1075 regions
[2].size
= endReadOnlyAddress
- regions
[2].address
;
1076 _currentFileSize
= regions
[2].fileOffset
+ regions
[2].size
;
1078 // FIXME: Confirm these numbers for all platform/arch combos
1079 // assume LINKEDIT optimzation reduces LINKEDITs to %40 of original size
1080 if ( _options
.excludeLocalSymbols
) {
1081 _vmSize
= regions
[2].address
+ (regions
[2].size
* 2 / 5) - regions
[0].address
;
1084 _vmSize
= regions
[2].address
+ (regions
[2].size
* 9 / 10) - regions
[0].address
;
1087 // sort SegmentMappingInfo for each image to be in the same order as original segments
1088 for (auto& entry
: result
) {
1089 std::vector
<SegmentMappingInfo
>& infos
= entry
.second
;
1090 std::sort(infos
.begin(), infos
.end(), [&](const SegmentMappingInfo
& a
, const SegmentMappingInfo
& b
) {
1091 return a
.srcSegmentIndex
< b
.srcSegmentIndex
;
1098 uint64_t CacheBuilder::pathHash(const char* path
)
1101 for (const char* s
=path
; *s
!= '\0'; ++s
)
1107 void CacheBuilder::findDylibAndSegment(const void* contentPtr
, std::string
& foundDylibName
, std::string
& foundSegName
)
1109 foundDylibName
= "???";
1110 foundSegName
= "???";
1111 uint32_t cacheOffset
= (uint32_t)((uint8_t*)contentPtr
- (uint8_t*)_buffer
);
1112 _buffer
->forEachImage(^(const mach_header
* mh
, const char* installName
) {
1113 dyld3::MachOParser
parser(mh
, true);
1114 parser
.forEachSegment(^(const char* segName
, uint32_t fileOffset
, uint32_t fileSize
, uint64_t vmAddr
, uint64_t vmSize
, uint8_t protections
, bool& stop
) {
1115 if ( (cacheOffset
> fileOffset
) && (cacheOffset
< (fileOffset
+vmSize
)) ) {
1116 foundDylibName
= installName
;
1117 foundSegName
= segName
;
1124 template <typename P
>
1125 bool CacheBuilder::makeRebaseChain(uint8_t* pageContent
, uint16_t lastLocationOffset
, uint16_t offset
, const dyld_cache_slide_info2
* info
)
1127 typedef typename
P::uint_t pint_t
;
1129 const pint_t deltaMask
= (pint_t
)(info
->delta_mask
);
1130 const pint_t valueMask
= ~deltaMask
;
1131 const pint_t valueAdd
= (pint_t
)(info
->value_add
);
1132 const unsigned deltaShift
= __builtin_ctzll(deltaMask
) - 2;
1133 const uint32_t maxDelta
= (uint32_t)(deltaMask
>> deltaShift
);
1135 pint_t
* lastLoc
= (pint_t
*)&pageContent
[lastLocationOffset
+0];
1136 pint_t lastValue
= (pint_t
)P::getP(*lastLoc
);
1137 if ( (lastValue
- valueAdd
) & deltaMask
) {
1138 std::string dylibName
;
1139 std::string segName
;
1140 findDylibAndSegment((void*)pageContent
, dylibName
, segName
);
1141 _diagnostics
.error("rebase pointer does not point within cache. lastOffset=0x%04X, seg=%s, dylib=%s\n",
1142 lastLocationOffset
, segName
.c_str(), dylibName
.c_str());
1145 if ( offset
<= (lastLocationOffset
+maxDelta
) ) {
1146 // previous location in range, make link from it
1147 // encode this location into last value
1148 pint_t delta
= offset
- lastLocationOffset
;
1149 pint_t newLastValue
= ((lastValue
- valueAdd
) & valueMask
) | (delta
<< deltaShift
);
1150 //warning(" add chain: delta = %d, lastOffset=0x%03X, offset=0x%03X, org value=0x%08lX, new value=0x%08lX",
1151 // offset - lastLocationOffset, lastLocationOffset, offset, (long)lastValue, (long)newLastValue);
1152 P::setP(*lastLoc
, newLastValue
);
1155 //warning(" too big delta = %d, lastOffset=0x%03X, offset=0x%03X", offset - lastLocationOffset, lastLocationOffset, offset);
1157 // distance between rebase locations is too far
1158 // see if we can make a chain from non-rebase locations
1159 uint16_t nonRebaseLocationOffsets
[1024];
1160 unsigned nrIndex
= 0;
1161 for (uint16_t i
= lastLocationOffset
; i
< offset
-maxDelta
; ) {
1162 nonRebaseLocationOffsets
[nrIndex
] = 0;
1163 for (int j
=maxDelta
; j
> 0; j
-= 4) {
1164 pint_t value
= (pint_t
)P::getP(*(pint_t
*)&pageContent
[i
+j
]);
1166 // Steal values of 0 to be used in the rebase chain
1167 nonRebaseLocationOffsets
[nrIndex
] = i
+j
;
1171 if ( nonRebaseLocationOffsets
[nrIndex
] == 0 ) {
1172 lastValue
= (pint_t
)P::getP(*lastLoc
);
1173 pint_t newValue
= ((lastValue
- valueAdd
) & valueMask
);
1174 //warning(" no way to make non-rebase delta chain, terminate off=0x%03X, old value=0x%08lX, new value=0x%08lX", lastLocationOffset, (long)value, (long)newValue);
1175 P::setP(*lastLoc
, newValue
);
1178 i
= nonRebaseLocationOffsets
[nrIndex
];
1182 // we can make chain. go back and add each non-rebase location to chain
1183 uint16_t prevOffset
= lastLocationOffset
;
1184 pint_t
* prevLoc
= (pint_t
*)&pageContent
[prevOffset
];
1185 for (int n
=0; n
< nrIndex
; ++n
) {
1186 uint16_t nOffset
= nonRebaseLocationOffsets
[n
];
1187 assert(nOffset
!= 0);
1188 pint_t
* nLoc
= (pint_t
*)&pageContent
[nOffset
];
1189 uint32_t delta2
= nOffset
- prevOffset
;
1190 pint_t value
= (pint_t
)P::getP(*prevLoc
);
1193 newValue
= (delta2
<< deltaShift
);
1195 newValue
= ((value
- valueAdd
) & valueMask
) | (delta2
<< deltaShift
);
1196 //warning(" non-rebase delta = %d, to off=0x%03X, old value=0x%08lX, new value=0x%08lX", delta2, nOffset, (long)value, (long)newValue);
1197 P::setP(*prevLoc
, newValue
);
1198 prevOffset
= nOffset
;
1201 uint32_t delta3
= offset
- prevOffset
;
1202 pint_t value
= (pint_t
)P::getP(*prevLoc
);
1205 newValue
= (delta3
<< deltaShift
);
1207 newValue
= ((value
- valueAdd
) & valueMask
) | (delta3
<< deltaShift
);
1208 //warning(" non-rebase delta = %d, to off=0x%03X, old value=0x%08lX, new value=0x%08lX", delta3, offset, (long)value, (long)newValue);
1209 P::setP(*prevLoc
, newValue
);
1215 template <typename P
>
1216 void CacheBuilder::addPageStarts(uint8_t* pageContent
, const bool bitmap
[], const dyld_cache_slide_info2
* info
,
1217 std::vector
<uint16_t>& pageStarts
, std::vector
<uint16_t>& pageExtras
)
1219 typedef typename
P::uint_t pint_t
;
1221 const pint_t deltaMask
= (pint_t
)(info
->delta_mask
);
1222 const pint_t valueMask
= ~deltaMask
;
1223 const uint32_t pageSize
= info
->page_size
;
1224 const pint_t valueAdd
= (pint_t
)(info
->value_add
);
1226 uint16_t startValue
= DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE
;
1227 uint16_t lastLocationOffset
= 0xFFFF;
1228 for(int i
=0; i
< pageSize
/4; ++i
) {
1229 unsigned offset
= i
*4;
1231 if ( startValue
== DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE
) {
1232 // found first rebase location in page
1235 else if ( !makeRebaseChain
<P
>(pageContent
, lastLocationOffset
, offset
, info
) ) {
1236 // can't record all rebasings in one chain
1237 if ( (startValue
& DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA
) == 0 ) {
1238 // switch page_start to "extras" which is a list of chain starts
1239 unsigned indexInExtras
= (unsigned)pageExtras
.size();
1240 if ( indexInExtras
> 0x3FFF ) {
1241 _diagnostics
.error("rebase overflow in page extras");
1244 pageExtras
.push_back(startValue
);
1245 startValue
= indexInExtras
| DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA
;
1247 pageExtras
.push_back(i
);
1249 lastLocationOffset
= offset
;
1252 if ( lastLocationOffset
!= 0xFFFF ) {
1253 // mark end of chain
1254 pint_t
* lastLoc
= (pint_t
*)&pageContent
[lastLocationOffset
];
1255 pint_t lastValue
= (pint_t
)P::getP(*lastLoc
);
1256 pint_t newValue
= ((lastValue
- valueAdd
) & valueMask
);
1257 P::setP(*lastLoc
, newValue
);
1259 if ( startValue
& DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA
) {
1260 // add end bit to extras
1261 pageExtras
.back() |= DYLD_CACHE_SLIDE_PAGE_ATTR_END
;
1263 pageStarts
.push_back(startValue
);
1266 template <typename P
>
1267 void CacheBuilder::writeSlideInfoV2()
1269 typedef typename
P::uint_t pint_t
;
1270 typedef typename
P::E E
;
1271 const uint32_t pageSize
= 4096;
1273 // build one 1024/4096 bool bitmap per page (4KB/16KB) of DATA
1274 const dyld_cache_mapping_info
* mappings
= (dyld_cache_mapping_info
*)((char*)_buffer
+ _buffer
->header
.mappingOffset
);
1275 uint8_t* const dataStart
= (uint8_t*)_buffer
+ mappings
[1].fileOffset
;
1276 uint8_t* const dataEnd
= dataStart
+ mappings
[1].size
;
1277 unsigned pageCount
= (unsigned)(mappings
[1].size
+pageSize
-1)/pageSize
;
1278 const long bitmapSize
= pageCount
*(pageSize
/4)*sizeof(bool);
1279 bool* bitmap
= (bool*)calloc(bitmapSize
, 1);
1280 for (void* p
: _pointersForASLR
) {
1281 if ( (p
< dataStart
) || ( p
> dataEnd
) ) {
1282 _diagnostics
.error("DATA pointer for sliding, out of range\n");
1286 long byteOffset
= (long)((uint8_t*)p
- dataStart
);
1287 if ( (byteOffset
% 4) != 0 ) {
1288 _diagnostics
.error("pointer not 4-byte aligned in DATA offset 0x%08lX\n", byteOffset
);
1292 long boolIndex
= byteOffset
/ 4;
1293 // work around <rdar://24941083> by ignoring pointers to be slid that are NULL on disk
1294 if ( *((pint_t
*)p
) == 0 ) {
1295 std::string dylibName
;
1296 std::string segName
;
1297 findDylibAndSegment(p
, dylibName
, segName
);
1298 _diagnostics
.warning("NULL pointer asked to be slid in %s at DATA region offset 0x%04lX of %s", segName
.c_str(), byteOffset
, dylibName
.c_str());
1301 bitmap
[boolIndex
] = true;
1304 // fill in fixed info
1305 assert(_slideInfoFileOffset
!= 0);
1306 dyld_cache_slide_info2
* info
= (dyld_cache_slide_info2
*)((uint8_t*)_buffer
+ _slideInfoFileOffset
);
1308 info
->page_size
= pageSize
;
1309 info
->delta_mask
= _archLayout
->pointerDeltaMask
;
1310 info
->value_add
= (sizeof(pint_t
) == 8) ? 0 : _archLayout
->sharedMemoryStart
; // only value_add for 32-bit archs
1312 // set page starts and extras for each page
1313 std::vector
<uint16_t> pageStarts
;
1314 std::vector
<uint16_t> pageExtras
;
1315 pageStarts
.reserve(pageCount
);
1316 uint8_t* pageContent
= dataStart
;;
1317 const bool* bitmapForPage
= bitmap
;
1318 for (unsigned i
=0; i
< pageCount
; ++i
) {
1319 //warning("page[%d]", i);
1320 addPageStarts
<P
>(pageContent
, bitmapForPage
, info
, pageStarts
, pageExtras
);
1321 if ( _diagnostics
.hasError() ) {
1325 pageContent
+= pageSize
;
1326 bitmapForPage
+= (sizeof(bool)*(pageSize
/4));
1328 free((void*)bitmap
);
1330 // fill in computed info
1331 info
->page_starts_offset
= sizeof(dyld_cache_slide_info2
);
1332 info
->page_starts_count
= (unsigned)pageStarts
.size();
1333 info
->page_extras_offset
= (unsigned)(sizeof(dyld_cache_slide_info2
)+pageStarts
.size()*sizeof(uint16_t));
1334 info
->page_extras_count
= (unsigned)pageExtras
.size();
1335 uint16_t* pageStartsBuffer
= (uint16_t*)((char*)info
+ info
->page_starts_offset
);
1336 uint16_t* pageExtrasBuffer
= (uint16_t*)((char*)info
+ info
->page_extras_offset
);
1337 for (unsigned i
=0; i
< pageStarts
.size(); ++i
)
1338 pageStartsBuffer
[i
] = pageStarts
[i
];
1339 for (unsigned i
=0; i
< pageExtras
.size(); ++i
)
1340 pageExtrasBuffer
[i
] = pageExtras
[i
];
1341 // update header with final size
1342 _buffer
->header
.slideInfoSize
= align(info
->page_extras_offset
+ pageExtras
.size()*sizeof(uint16_t), _archLayout
->sharedRegionAlignP2
);
1343 if ( _buffer
->header
.slideInfoSize
> _slideInfoBufferSizeAllocated
) {
1344 _diagnostics
.error("kernel slide info overflow buffer");
1346 //warning("pageCount=%u, page_starts_count=%lu, page_extras_count=%lu", pageCount, pageStarts.size(), pageExtras.size());
1351 void CacheBuilder::writeSlideInfoV1()
1353 // build one 128-byte bitmap per page (4096) of DATA
1354 uint8_t* const dataStart = (uint8_t*)_buffer.get() + regions[1].fileOffset;
1355 uint8_t* const dataEnd = dataStart + regions[1].size;
1356 const long bitmapSize = (dataEnd - dataStart)/(4*8);
1357 uint8_t* bitmap = (uint8_t*)calloc(bitmapSize, 1);
1358 for (void* p : _pointersForASLR) {
1359 if ( (p < dataStart) || ( p > dataEnd) )
1360 terminate("DATA pointer for sliding, out of range\n");
1361 long offset = (long)((uint8_t*)p - dataStart);
1362 if ( (offset % 4) != 0 )
1363 terminate("pointer not 4-byte aligned in DATA offset 0x%08lX\n", offset);
1364 long byteIndex = offset / (4*8);
1365 long bitInByte = (offset % 32) >> 2;
1366 bitmap[byteIndex] |= (1 << bitInByte);
1369 // allocate worst case size block of all slide info
1370 const unsigned entry_size = 4096/(8*4); // 8 bits per byte, possible pointer every 4 bytes.
1371 const unsigned toc_count = (unsigned)bitmapSize/entry_size;
1372 dyld_cache_slide_info* slideInfo = (dyld_cache_slide_info*)((uint8_t*)_buffer + _slideInfoFileOffset);
1373 slideInfo->version = 1;
1374 slideInfo->toc_offset = sizeof(dyld_cache_slide_info);
1375 slideInfo->toc_count = toc_count;
1376 slideInfo->entries_offset = (slideInfo->toc_offset+2*toc_count+127)&(-128);
1377 slideInfo->entries_count = 0;
1378 slideInfo->entries_size = entry_size;
1379 // append each unique entry
1380 const dyldCacheSlideInfoEntry* bitmapAsEntries = (dyldCacheSlideInfoEntry*)bitmap;
1381 dyldCacheSlideInfoEntry* const entriesInSlidInfo = (dyldCacheSlideInfoEntry*)((char*)slideInfo+slideInfo->entries_offset());
1382 int entry_count = 0;
1383 for (int i=0; i < toc_count; ++i) {
1384 const dyldCacheSlideInfoEntry* thisEntry = &bitmapAsEntries[i];
1385 // see if it is same as one already added
1387 for (int j=0; j < entry_count; ++j) {
1388 if ( memcmp(thisEntry, &entriesInSlidInfo[j], entry_size) == 0 ) {
1389 slideInfo->set_toc(i, j);
1396 memcpy(&entriesInSlidInfo[entry_count], thisEntry, entry_size);
1397 slideInfo->set_toc(i, entry_count++);
1400 slideInfo->entries_count = entry_count;
1401 ::free((void*)bitmap);
1403 _buffer.header->slideInfoSize = align(slideInfo->entries_offset + entry_count*entry_size, _archLayout->sharedRegionAlignP2);
1408 void CacheBuilder::fipsSign() {
1409 __block
bool found
= false;
1410 _buffer
->forEachImage(^(const mach_header
* mh
, const char* installName
) {
1411 __block
void *hash_location
= nullptr;
1412 // Return if this is not corecrypto
1413 if (strcmp(installName
, "/usr/lib/system/libcorecrypto.dylib") != 0) {
1417 auto parser
= dyld3::MachOParser(mh
, true);
1418 parser
.forEachLocalSymbol(_diagnostics
, ^(const char *symbolName
, uint64_t n_value
, uint8_t n_type
, uint8_t n_sect
, uint16_t n_desc
, bool &stop
) {
1419 if (strcmp(symbolName
, "_fipspost_precalc_hmac") != 0)
1421 hash_location
= (void *)(n_value
- _archLayout
->sharedMemoryStart
+ (uintptr_t)_buffer
);
1425 // Bail out if we did not find the symbol
1426 if (hash_location
== nullptr) {
1427 _diagnostics
.warning("Could not find _fipspost_precalc_hmac, skipping FIPS sealing");
1431 parser
.forEachSection(^(const char *segName
, const char *sectionName
, uint32_t flags
, const void *content
, size_t size
, bool illegalSectionSize
, bool &stop
) {
1432 // FIXME: If we ever implement userspace __TEXT_EXEC this will need to be updated
1433 if ( (strcmp(segName
, "__TEXT" ) != 0) || (strcmp(sectionName
, "__text") != 0) ) {
1437 if (illegalSectionSize
) {
1438 _diagnostics
.error("FIPS section %s/%s extends beyond the end of the segment", segName
, sectionName
);
1442 //We have _fipspost_precalc_hmac and __TEXT,__text, seal it
1443 unsigned char hmac_key
= 0;
1444 CCHmac(kCCHmacAlgSHA256
, &hmac_key
, 1, content
, size
, hash_location
);
1450 _diagnostics
.warning("Could not find /usr/lib/system/libcorecrypto.dylib, skipping FIPS sealing");
1454 void CacheBuilder::codeSign()
1456 uint8_t dscHashType
;
1457 uint8_t dscHashSize
;
1458 uint32_t dscDigestFormat
;
1461 // select which codesigning hash
1462 switch (_options
.codeSigningDigestMode
) {
1463 case DyldSharedCache::Agile
:
1465 // Fall through to SHA1, because the main code directory remains SHA1 for compatibility.
1466 case DyldSharedCache::SHA1only
:
1467 dscHashType
= CS_HASHTYPE_SHA1
;
1468 dscHashSize
= CS_HASH_SIZE_SHA1
;
1469 dscDigestFormat
= kCCDigestSHA1
;
1471 case DyldSharedCache::SHA256only
:
1472 dscHashType
= CS_HASHTYPE_SHA256
;
1473 dscHashSize
= CS_HASH_SIZE_SHA256
;
1474 dscDigestFormat
= kCCDigestSHA256
;
1477 _diagnostics
.error("codeSigningDigestMode has unknown, unexpected value %d, bailing out.",
1478 _options
.codeSigningDigestMode
);
1482 std::string cacheIdentifier
= "com.apple.dyld.cache." + _options
.archName
;
1483 if ( _options
.dylibsRemovedDuringMastering
) {
1484 if ( _options
.optimizeStubs
)
1485 cacheIdentifier
= "com.apple.dyld.cache." + _options
.archName
+ ".release";
1487 cacheIdentifier
= "com.apple.dyld.cache." + _options
.archName
+ ".development";
1489 // get pointers into shared cache buffer
1490 size_t inBbufferSize
= _currentFileSize
;
1491 const uint8_t* inBuffer
= (uint8_t*)_buffer
;
1492 uint8_t* csBuffer
= (uint8_t*)_buffer
+inBbufferSize
;
1494 // layout code signature contents
1495 uint32_t blobCount
= agile
? 4 : 3;
1496 size_t idSize
= cacheIdentifier
.size()+1; // +1 for terminating 0
1497 uint32_t slotCount
= (uint32_t)((inBbufferSize
+ CS_PAGE_SIZE
- 1) / CS_PAGE_SIZE
);
1498 uint32_t xSlotCount
= CSSLOT_REQUIREMENTS
;
1499 size_t idOffset
= offsetof(CS_CodeDirectory
, end_withExecSeg
);
1500 size_t hashOffset
= idOffset
+idSize
+ dscHashSize
*xSlotCount
;
1501 size_t hash256Offset
= idOffset
+idSize
+ CS_HASH_SIZE_SHA256
*xSlotCount
;
1502 size_t cdSize
= hashOffset
+ (slotCount
* dscHashSize
);
1503 size_t cd256Size
= agile
? hash256Offset
+ (slotCount
* CS_HASH_SIZE_SHA256
) : 0;
1504 size_t reqsSize
= 12;
1505 size_t cmsSize
= sizeof(CS_Blob
);
1506 size_t cdOffset
= sizeof(CS_SuperBlob
) + blobCount
*sizeof(CS_BlobIndex
);
1507 size_t cd256Offset
= cdOffset
+ cdSize
;
1508 size_t reqsOffset
= cd256Offset
+ cd256Size
; // equals cdOffset + cdSize if not agile
1509 size_t cmsOffset
= reqsOffset
+ reqsSize
;
1510 size_t sbSize
= cmsOffset
+ cmsSize
;
1511 size_t sigSize
= align(sbSize
, 14); // keep whole cache 16KB aligned
1513 if ( _currentFileSize
+sigSize
> _allocatedBufferSize
) {
1514 _diagnostics
.error("cache buffer too small to hold code signature (buffer size=%lldMB, signature size=%ldMB, free space=%lldMB)",
1515 _allocatedBufferSize
/1024/1024, sigSize
/1024/1024, (_allocatedBufferSize
-_currentFileSize
)/1024/1024);
1519 // create overall code signature which is a superblob
1520 CS_SuperBlob
* sb
= reinterpret_cast<CS_SuperBlob
*>(csBuffer
);
1521 sb
->magic
= htonl(CSMAGIC_EMBEDDED_SIGNATURE
);
1522 sb
->length
= htonl(sbSize
);
1523 sb
->count
= htonl(blobCount
);
1524 sb
->index
[0].type
= htonl(CSSLOT_CODEDIRECTORY
);
1525 sb
->index
[0].offset
= htonl(cdOffset
);
1526 sb
->index
[1].type
= htonl(CSSLOT_REQUIREMENTS
);
1527 sb
->index
[1].offset
= htonl(reqsOffset
);
1528 sb
->index
[2].type
= htonl(CSSLOT_CMS_SIGNATURE
);
1529 sb
->index
[2].offset
= htonl(cmsOffset
);
1531 sb
->index
[3].type
= htonl(CSSLOT_ALTERNATE_CODEDIRECTORIES
+ 0);
1532 sb
->index
[3].offset
= htonl(cd256Offset
);
1535 // fill in empty requirements
1536 CS_RequirementsBlob
* reqs
= (CS_RequirementsBlob
*)(((char*)sb
)+reqsOffset
);
1537 reqs
->magic
= htonl(CSMAGIC_REQUIREMENTS
);
1538 reqs
->length
= htonl(sizeof(CS_RequirementsBlob
));
1541 // initialize fixed fields of Code Directory
1542 CS_CodeDirectory
* cd
= (CS_CodeDirectory
*)(((char*)sb
)+cdOffset
);
1543 cd
->magic
= htonl(CSMAGIC_CODEDIRECTORY
);
1544 cd
->length
= htonl(cdSize
);
1545 cd
->version
= htonl(0x20400); // supports exec segment
1546 cd
->flags
= htonl(kSecCodeSignatureAdhoc
);
1547 cd
->hashOffset
= htonl(hashOffset
);
1548 cd
->identOffset
= htonl(idOffset
);
1549 cd
->nSpecialSlots
= htonl(xSlotCount
);
1550 cd
->nCodeSlots
= htonl(slotCount
);
1551 cd
->codeLimit
= htonl(inBbufferSize
);
1552 cd
->hashSize
= dscHashSize
;
1553 cd
->hashType
= dscHashType
;
1554 cd
->platform
= 0; // not platform binary
1555 cd
->pageSize
= __builtin_ctz(CS_PAGE_SIZE
); // log2(CS_PAGE_SIZE);
1556 cd
->spare2
= 0; // unused (must be zero)
1557 cd
->scatterOffset
= 0; // not supported anymore
1558 cd
->teamOffset
= 0; // no team ID
1559 cd
->spare3
= 0; // unused (must be zero)
1560 cd
->codeLimit64
= 0; // falls back to codeLimit
1562 // executable segment info
1563 const dyld_cache_mapping_info
* mappings
= (dyld_cache_mapping_info
*)((char*)_buffer
+ _buffer
->header
.mappingOffset
);
1564 cd
->execSegBase
= htonll(mappings
[0].fileOffset
); // base of TEXT segment
1565 cd
->execSegLimit
= htonll(mappings
[0].size
); // size of TEXT segment
1566 cd
->execSegFlags
= 0; // not a main binary
1568 // initialize dynamic fields of Code Directory
1569 strcpy((char*)cd
+ idOffset
, cacheIdentifier
.c_str());
1571 // add special slot hashes
1572 uint8_t* hashSlot
= (uint8_t*)cd
+ hashOffset
;
1573 uint8_t* reqsHashSlot
= &hashSlot
[-CSSLOT_REQUIREMENTS
*dscHashSize
];
1574 CCDigest(dscDigestFormat
, (uint8_t*)reqs
, sizeof(CS_RequirementsBlob
), reqsHashSlot
);
1576 CS_CodeDirectory
* cd256
;
1577 uint8_t* hash256Slot
;
1578 uint8_t* reqsHash256Slot
;
1580 // Note that the assumption here is that the size up to the hashes is the same as for
1581 // sha1 code directory, and that they come last, after everything else.
1583 cd256
= (CS_CodeDirectory
*)(((char*)sb
)+cd256Offset
);
1584 cd256
->magic
= htonl(CSMAGIC_CODEDIRECTORY
);
1585 cd256
->length
= htonl(cd256Size
);
1586 cd256
->version
= htonl(0x20400); // supports exec segment
1587 cd256
->flags
= htonl(kSecCodeSignatureAdhoc
);
1588 cd256
->hashOffset
= htonl(hash256Offset
);
1589 cd256
->identOffset
= htonl(idOffset
);
1590 cd256
->nSpecialSlots
= htonl(xSlotCount
);
1591 cd256
->nCodeSlots
= htonl(slotCount
);
1592 cd256
->codeLimit
= htonl(inBbufferSize
);
1593 cd256
->hashSize
= CS_HASH_SIZE_SHA256
;
1594 cd256
->hashType
= CS_HASHTYPE_SHA256
;
1595 cd256
->platform
= 0; // not platform binary
1596 cd256
->pageSize
= __builtin_ctz(CS_PAGE_SIZE
); // log2(CS_PAGE_SIZE);
1597 cd256
->spare2
= 0; // unused (must be zero)
1598 cd256
->scatterOffset
= 0; // not supported anymore
1599 cd256
->teamOffset
= 0; // no team ID
1600 cd256
->spare3
= 0; // unused (must be zero)
1601 cd256
->codeLimit64
= 0; // falls back to codeLimit
1603 // executable segment info
1604 cd256
->execSegBase
= cd
->execSegBase
;
1605 cd256
->execSegLimit
= cd
->execSegLimit
;
1606 cd256
->execSegFlags
= cd
->execSegFlags
;
1608 // initialize dynamic fields of Code Directory
1609 strcpy((char*)cd256
+ idOffset
, cacheIdentifier
.c_str());
1611 // add special slot hashes
1612 hash256Slot
= (uint8_t*)cd256
+ hash256Offset
;
1613 reqsHash256Slot
= &hash256Slot
[-CSSLOT_REQUIREMENTS
*CS_HASH_SIZE_SHA256
];
1614 CCDigest(kCCDigestSHA256
, (uint8_t*)reqs
, sizeof(CS_RequirementsBlob
), reqsHash256Slot
);
1619 reqsHash256Slot
= NULL
;
1622 // fill in empty CMS blob for ad-hoc signing
1623 CS_Blob
* cms
= (CS_Blob
*)(((char*)sb
)+cmsOffset
);
1624 cms
->magic
= htonl(CSMAGIC_BLOBWRAPPER
);
1625 cms
->length
= htonl(sizeof(CS_Blob
));
1627 // alter header of cache to record size and location of code signature
1628 // do this *before* hashing each page
1629 _buffer
->header
.codeSignatureOffset
= inBbufferSize
;
1630 _buffer
->header
.codeSignatureSize
= sigSize
;
1633 const uint8_t* code
= inBuffer
;
1634 for (uint32_t i
=0; i
< slotCount
; ++i
) {
1635 CCDigest(dscDigestFormat
, code
, CS_PAGE_SIZE
, hashSlot
);
1636 hashSlot
+= dscHashSize
;
1639 CCDigest(kCCDigestSHA256
, code
, CS_PAGE_SIZE
, hash256Slot
);
1640 hash256Slot
+= CS_HASH_SIZE_SHA256
;
1642 code
+= CS_PAGE_SIZE
;
1645 // hash of entire code directory (cdHash) uses same hash as each page
1646 uint8_t fullCdHash
[dscHashSize
];
1647 CCDigest(dscDigestFormat
, (const uint8_t*)cd
, cdSize
, fullCdHash
);
1648 // Note: cdHash is defined as first 20 bytes of hash
1649 memcpy(_cdHashFirst
, fullCdHash
, 20);
1651 uint8_t fullCdHash256
[CS_HASH_SIZE_SHA256
];
1652 CCDigest(kCCDigestSHA256
, (const uint8_t*)cd256
, cd256Size
, fullCdHash256
);
1653 // Note: cdHash is defined as first 20 bytes of hash, even for sha256
1654 memcpy(_cdHashSecond
, fullCdHash256
, 20);
1657 memset(_cdHashSecond
, 0, 20);
1660 // increase file size to include newly append code signature
1661 _currentFileSize
+= sigSize
;
1664 const bool CacheBuilder::agileSignature()
1666 return _options
.codeSigningDigestMode
== DyldSharedCache::Agile
;
1669 static const std::string
cdHash(uint8_t hash
[20])
1672 for (int i
= 0; i
< 20; ++i
)
1673 sprintf(&buff
[2*i
], "%2.2x", hash
[i
]);
1677 const std::string
CacheBuilder::cdHashFirst()
1679 return cdHash(_cdHashFirst
);
1682 const std::string
CacheBuilder::cdHashSecond()
1684 return cdHash(_cdHashSecond
);
1687 void CacheBuilder::addCachedDylibsImageGroup(dyld3::ImageProxyGroup
* dylibGroup
)
1689 const dyld3::launch_cache::binary_format::ImageGroup
* groupBinary
= dylibGroup
->makeImageGroupBinary(_diagnostics
, _s_neverStubEliminate
);
1693 dyld3::launch_cache::ImageGroup
group(groupBinary
);
1694 size_t groupSize
= group
.size();
1696 if ( _currentFileSize
+groupSize
> _allocatedBufferSize
) {
1697 _diagnostics
.error("cache buffer too small to hold group[0] info (buffer size=%lldMB, group size=%ldMB, free space=%lldMB)",
1698 _allocatedBufferSize
/1024/1024, groupSize
/1024/1024, (_allocatedBufferSize
-_currentFileSize
)/1024/1024);
1702 // append ImageGroup data to read-only region of cache
1703 uint8_t* loc
= (uint8_t*)_buffer
+ _currentFileSize
;
1704 memcpy(loc
, groupBinary
, groupSize
);
1705 dyld_cache_mapping_info
* mappings
= (dyld_cache_mapping_info
*)((char*)_buffer
+ _buffer
->header
.mappingOffset
);
1706 _buffer
->header
.dylibsImageGroupAddr
= mappings
[2].address
+ (_currentFileSize
- mappings
[2].fileOffset
);
1707 _buffer
->header
.dylibsImageGroupSize
= (uint32_t)groupSize
;
1708 _currentFileSize
+= groupSize
;
1709 free((void*)groupBinary
);
1713 void CacheBuilder::addCachedOtherDylibsImageGroup(dyld3::ImageProxyGroup
* otherGroup
)
1715 const dyld3::launch_cache::binary_format::ImageGroup
* groupBinary
= otherGroup
->makeImageGroupBinary(_diagnostics
);
1719 dyld3::launch_cache::ImageGroup
group(groupBinary
);
1720 size_t groupSize
= group
.size();
1722 if ( _currentFileSize
+groupSize
> _allocatedBufferSize
) {
1723 _diagnostics
.error("cache buffer too small to hold group[1] info (buffer size=%lldMB, group size=%ldMB, free space=%lldMB)",
1724 _allocatedBufferSize
/1024/1024, groupSize
/1024/1024, (_allocatedBufferSize
-_currentFileSize
)/1024/1024);
1728 // append ImageGroup data to read-only region of cache
1729 uint8_t* loc
= (uint8_t*)_buffer
+ _currentFileSize
;
1730 memcpy(loc
, groupBinary
, groupSize
);
1731 dyld_cache_mapping_info
* mappings
= (dyld_cache_mapping_info
*)((char*)_buffer
+ _buffer
->header
.mappingOffset
);
1732 _buffer
->header
.otherImageGroupAddr
= mappings
[2].address
+ (_currentFileSize
- mappings
[2].fileOffset
);
1733 _buffer
->header
.otherImageGroupSize
= (uint32_t)groupSize
;
1734 _currentFileSize
+= groupSize
;
1735 free((void*)groupBinary
);
1738 void CacheBuilder::addClosures(const std::map
<std::string
, const dyld3::launch_cache::binary_format::Closure
*>& closures
)
1740 // preflight space needed
1741 size_t closuresSpace
= 0;
1742 for (const auto& entry
: closures
) {
1743 dyld3::launch_cache::Closure
closure(entry
.second
);
1744 closuresSpace
+= closure
.size();
1746 size_t freeSpace
= _allocatedBufferSize
- _currentFileSize
;
1747 if ( closuresSpace
> freeSpace
) {
1748 _diagnostics
.error("cache buffer too small to hold all closures (buffer size=%lldMB, closures size=%ldMB, free space=%ldMB)",
1749 _allocatedBufferSize
/1024/1024, closuresSpace
/1024/1024, freeSpace
/1024/1024);
1753 dyld_cache_mapping_info
* mappings
= (dyld_cache_mapping_info
*)((char*)_buffer
+ _buffer
->header
.mappingOffset
);
1754 _buffer
->header
.progClosuresAddr
= mappings
[2].address
+ (_currentFileSize
- mappings
[2].fileOffset
);
1755 uint8_t* closuresBase
= (uint8_t*)_buffer
+ _currentFileSize
;
1756 std::vector
<DylibIndexTrie::Entry
> closureEntrys
;
1757 uint32_t currentClosureOffset
= 0;
1758 for (const auto& entry
: closures
) {
1759 const dyld3::launch_cache::binary_format::Closure
* closBuf
= entry
.second
;
1760 closureEntrys
.push_back(DylibIndexTrie::Entry(entry
.first
, DylibIndex(currentClosureOffset
)));
1761 dyld3::launch_cache::Closure
closure(closBuf
);
1762 size_t size
= closure
.size();
1763 assert((size
% 4) == 0);
1764 memcpy(closuresBase
+currentClosureOffset
, closBuf
, size
);
1765 currentClosureOffset
+= size
;
1767 free((void*)closBuf
);
1769 _buffer
->header
.progClosuresSize
= currentClosureOffset
;
1770 _currentFileSize
+= currentClosureOffset
;
1771 freeSpace
= _allocatedBufferSize
- _currentFileSize
;
1773 // build trie of indexes into closures list
1774 DylibIndexTrie
closureTrie(closureEntrys
);
1775 std::vector
<uint8_t> trieBytes
;
1776 closureTrie
.emit(trieBytes
);
1777 while ( (trieBytes
.size() % 8) != 0 )
1778 trieBytes
.push_back(0);
1779 if ( trieBytes
.size() > freeSpace
) {
1780 _diagnostics
.error("cache buffer too small to hold all closures trie (buffer size=%lldMB, trie size=%ldMB, free space=%ldMB)",
1781 _allocatedBufferSize
/1024/1024, trieBytes
.size()/1024/1024, freeSpace
/1024/1024);
1784 memcpy((uint8_t*)_buffer
+ _currentFileSize
, &trieBytes
[0], trieBytes
.size());
1785 _buffer
->header
.progClosuresTrieAddr
= mappings
[2].address
+ (_currentFileSize
- mappings
[2].fileOffset
);
1786 _buffer
->header
.progClosuresTrieSize
= trieBytes
.size();
1787 _currentFileSize
+= trieBytes
.size();