1 /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*-
3 * Copyright (c) 2014 Apple Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
25 #include "mega-dylib-utils.h"
26 #include "MachOFileAbstraction.hpp"
27 #include "FileAbstraction.hpp"
30 #include "dyld_cache_config.h"
35 #include <sys/errno.h>
36 #include <sys/fcntl.h>
37 #include <sys/param.h>
38 #include <mach-o/loader.h>
39 #include <mach-o/fat.h>
42 #include <CommonCrypto/CommonDigest.h>
43 #include <CommonCrypto/CommonDigestSPI.h>
52 #include <unordered_map>
53 #include <unordered_set>
55 #include "MachOProxy.h"
57 #include "OptimizerBranches.h"
59 #include "CacheFileAbstraction.hpp"
60 #include "CodeSigningTypes.h"
64 uint64_t sharedRegionStartExecutableAddress(ArchPair arch
)
68 return ARM_SHARED_REGION_START
;
70 return SHARED_REGION_BASE_I386
;
72 return SHARED_REGION_BASE_X86_64
;
74 return ARM64_SHARED_REGION_START
;
76 terminate("unsupported arch 0x%08X", arch
.arch
);
80 uint64_t sharedRegionStartWriteableAddress(ArchPair arch
, uint64_t textEndAddress
)
85 // more efficient if code and data never in same 2MB chunk
86 return textEndAddress
+ 0x04000000;
88 return textEndAddress
;
90 return textEndAddress
+ 32*1024*1024; // <rdar://problem/18564532> Add 32MB padding before arm64 dyld shared cache R/W region
92 terminate("unsupported arch 0x%08X", arch
.arch
);
96 uint64_t sharedRegionStartReadOnlyAddress(ArchPair arch
, uint64_t dataEndAddress
, uint64_t textEndAddress
)
100 case CPU_TYPE_X86_64
:
101 // more efficient if code and data never in same 2MB chunk
102 return dataEndAddress
+ 0x04000000;
104 return dataEndAddress
;
106 return dataEndAddress
+ 32*1024*1024; // <rdar://problem/18564532> Add 32MB padding before arm64 dyld shared cache R/W region
108 terminate("unsupported arch 0x%08X", arch
.arch
);
114 uint8_t sharedRegionRegionAlignment(ArchPair arch
) {
116 return ARM_SHARED_REGION_SIZE
;
118 case CPU_TYPE_X86_64
:
124 terminate("unsupported arch 0x%08X", arch
.arch
);
128 uint64_t sharedRegionRegionSize(ArchPair arch
) {
129 switch ( arch
.arch
) {
131 return SHARED_REGION_SIZE_I386
;
132 case CPU_TYPE_X86_64
:
133 return SHARED_REGION_SIZE_X86_64
;
135 return ARM_SHARED_REGION_SIZE
;
137 return ARM64_SHARED_REGION_SIZE
;
139 terminate("unsupported arch 0x%08X", arch
.arch
);
143 static const std::tuple
<const char* const, const char* const, const ArchPair
> gArchitectures
[] = {
144 {"i386", nullptr, ArchPair( CPU_TYPE_I386
, CPU_SUBTYPE_I386_ALL
)},
145 {"x86_64", nullptr, ArchPair( CPU_TYPE_X86_64
, CPU_SUBTYPE_X86_64_ALL
)},
146 {"x86_64h", "x86_64", ArchPair( CPU_TYPE_X86_64
, CPU_SUBTYPE_X86_64_H
)},
147 {"armv4t", nullptr, ArchPair( CPU_TYPE_ARM
, CPU_SUBTYPE_ARM_V4T
)},
148 {"armv5", nullptr, ArchPair( CPU_TYPE_ARM
, CPU_SUBTYPE_ARM_V5TEJ
)},
149 {"armv6", nullptr, ArchPair( CPU_TYPE_ARM
, CPU_SUBTYPE_ARM_V6
)},
150 {"armv7", nullptr, ArchPair( CPU_TYPE_ARM
, CPU_SUBTYPE_ARM_V7
)},
151 {"armv7f", nullptr, ArchPair( CPU_TYPE_ARM
, CPU_SUBTYPE_ARM_V7F
)},
152 {"armv7k", nullptr, ArchPair( CPU_TYPE_ARM
, CPU_SUBTYPE_ARM_V7K
)},
153 {"armv7s", "armv7", ArchPair( CPU_TYPE_ARM
, CPU_SUBTYPE_ARM_V7S
)},
154 {"arm64", nullptr, ArchPair( CPU_TYPE_ARM64
, CPU_SUBTYPE_ARM64_ALL
)},
157 ArchPair
archForString(const std::string
& archStr
) {
158 for (auto& a
: gArchitectures
) {
159 if ( std::get
<0>( a
) == archStr
) return std::get
<2>( a
);
161 terminate("unknown architecture %s", archStr
.c_str());
164 std::string
stringForArch(ArchPair arch
, bool allowUnknown
) {
165 for (auto& a
: gArchitectures
) {
166 // FIXME LIB64 is set on some binaries and not other
167 if ( std::get
<2>( a
).arch
== arch
.arch
&& std::get
<2>( a
).subtype
== ( arch
.subtype
& ~CPU_SUBTYPE_MASK
) )
168 return std::get
<0>( a
);
172 "unrecognized cpu type " + std::to_string(arch
.arch
) +
173 " subtype " + std::to_string(arch
.subtype
);
174 if (allowUnknown
) return unknownString
;
175 else terminate("%s", unknownString
.c_str());
178 std::string
fallbackArchStringForArchString( const std::string
& archStr
) {
179 for ( auto& a
: gArchitectures
) {
180 if ( std::get
<0>( a
) == archStr
&& std::get
<1>( a
) != nullptr ) {
181 return std::get
<1>( a
);
188 SharedCache::SharedCache(Manifest
& manifest
,
189 const std::string
& configuration
, const std::string
& architecture
)
190 : _manifest(manifest
)
191 , _arch(archForString(architecture
))
192 , _archManifest(manifest
.configuration(configuration
).architecture(architecture
))
197 , _slideInfoFileOffset(0)
198 , _slideInfoBufferSize(0)
200 auto maxCacheVMSize
= sharedRegionRegionSize(_arch
);
202 for (auto& includedIdentifier
: _archManifest
.results
.dylibs
) {
203 if (includedIdentifier
.second
.included
) {
204 //assert(manifest.dylibs.count(includedDylib.first) > 0);
205 //assert(manifest.dylibs.find(includedDylib.first)->second.proxies.count(architecture) > 0);
206 MachOProxy
* proxy
= MachOProxy::forIdentifier(includedIdentifier
.first
, architecture
);
207 assert(proxy
!= nullptr);
208 assert(proxy
->isDylib());
209 _dylibs
.push_back(proxy
);
213 // <rdar://problem/21317611> error out instead of crash if cache has no dylibs
214 if ( _dylibs
.size() < 30 ) // FIXME: plist should specify required vs optional dylibs
215 terminate("missing required minimum set of dylibs");
217 for (auto &dylib
: _dylibs
) {
218 _segmentMap
[dylib
].reserve(dylib
->segments
.size());
219 for (const auto& seg
: dylib
->segments
)
220 _segmentMap
[dylib
].push_back(&seg
);
221 _aliasCount
+= dylib
->installNameAliases
.size();
224 sortDylibs(_manifest
.dylibOrderFile());
225 if (!_manifest
.dirtyDataOrderFile().empty())
226 loadDirtyDataOrderFile(_manifest
.dirtyDataOrderFile());
228 assignSegmentAddresses();
229 if ( _vmSize
> maxCacheVMSize
)
230 verboseLog("%s cache overflow. %lluMB (max %lluMB)", archName().c_str(), _vmSize
/1024/1024, maxCacheVMSize
/1024/1024);
231 while (_vmSize
> maxCacheVMSize
) {
232 auto evictedDylib
= manifest
.removeLargestLeafDylib( configuration
, architecture
);
233 _dylibs
.erase( std::remove( _dylibs
.begin(), _dylibs
.end(), evictedDylib
), _dylibs
.end() );
234 _aliasCount
-= evictedDylib
->installNameAliases
.size();
235 assignSegmentAddresses();
239 // There is an order file specifying the order in which dylibs are laid out in
240 // general, as well as an order file specifying the order in which __DATA_DIRTY
241 // segments are laid out in particular.
243 // The syntax is one dylib (install name) per line. Blank lines are ignored.
244 // Comments start with the # character.
246 static std::unordered_map
<std::string
, uint32_t> loadOrderFile(const std::string
& orderFile
) {
247 std::unordered_map
<std::string
, uint32_t> order
;
249 std::ifstream
myfile(orderFile
);
250 if ( myfile
.is_open() ) {
253 while ( std::getline(myfile
, line
) ) {
254 size_t pos
= line
.find('#');
255 if ( pos
!= std::string::npos
)
257 while ( !line
.empty() && isspace(line
.back()) ) {
261 order
[line
] = count
++;
265 warning("could not load orderfile '%s'", orderFile
.c_str());
271 void SharedCache::loadDirtyDataOrderFile(const std::string
& dirtyDataOrderFile
) {
272 _dataDirtySegsOrder
= loadOrderFile(dirtyDataOrderFile
);
275 void SharedCache::sortDylibs(const std::string
& dylibOrderFile
) {
276 std::unordered_map
<std::string
, uint32_t> dylibOrder
;
277 if ( !dylibOrderFile
.empty() )
278 dylibOrder
= loadOrderFile(dylibOrderFile
);
280 std::sort(_dylibs
.begin(), _dylibs
.end(), [&](const MachOProxy
* a
,
281 const MachOProxy
* b
) {
282 const std::string
& pathA
= a
->installName
;
283 const std::string
& pathB
= b
->installName
;
285 const auto& orderA
= dylibOrder
.find(pathA
);
286 const auto& orderB
= dylibOrder
.find(pathB
);
287 bool foundA
= (orderA
!= dylibOrder
.end());
288 bool foundB
= (orderB
!= dylibOrder
.end());
290 // Order all dylibs specified in the order file first, in the order specified in
291 // the file, followed by any other dylibs in lexicographic order.
292 if ( foundA
&& foundB
)
293 return orderA
->second
< orderB
->second
;
299 return pathA
< pathB
;
303 void SharedCache::buildUnoptimizedCache(void) {
304 _buffer
= std::shared_ptr
<void>(calloc(_fileSize
, 1), free
);
306 writeCacheSegments();
311 template <typename P
>
312 void SharedCache::buildForDevelopment(const std::string
& cachePath
) {
313 typedef typename
P::E E
;
314 std::vector
<uint64_t> emptyBranchPoolOffsets
;
315 buildUnoptimizedCache();
316 optimizeObjC(false/*not production*/);
317 if (_manifest
.platform() == "osx") {
318 optimizeLinkedit(false, false, emptyBranchPoolOffsets
);
320 optimizeLinkedit(true, false, emptyBranchPoolOffsets
);
324 dyldCacheHeader
<E
>* header
= (dyldCacheHeader
<E
>*)_buffer
.get();
325 header
->set_cacheType(kDyldSharedCacheTypeDevelopment
);
326 recomputeCacheUUID();
328 // Calculate the VMSize of the resulting cache
329 uint64_t endAddr
= 0;
331 forEachRegion([&] (void* content
, uint64_t vmAddr
, uint64_t size
, uint32_t permissions
) {
332 if (vmAddr
+size
> endAddr
)
333 endAddr
= vmAddr
+size
;
335 _vmSize
= endAddr
- sharedRegionStartExecutableAddress(_arch
);
337 if (_manifest
.platform() == "osx") {
338 appendCodeSignature("release");
340 appendCodeSignature("development");
344 template <typename P
>
345 void SharedCache::buildForProduction(const std::string
& cachePath
) {
346 typedef typename
P::E E
;
347 buildUnoptimizedCache();
348 optimizeObjC(true/*production*/);
349 uint64_t cacheStartAddress
= sharedRegionStartExecutableAddress(_arch
);
351 dyldCacheHeader
<E
>* header
= (dyldCacheHeader
<E
>*)_buffer
.get();
352 header
->set_cacheType(kDyldSharedCacheTypeProduction
);
354 // build vector of branch pool addresss
355 std::vector
<uint64_t> branchPoolStartAddrs
;
356 std::vector
<uint64_t> branchPoolOffsets
;
357 const uint64_t* p
= (uint64_t*)((uint8_t*)_buffer
.get() + header
->branchPoolsOffset());
358 for (int i
=0; i
< header
->branchPoolsCount(); ++i
) {
359 uint64_t poolAddr
= LittleEndian::get64(p
[i
]);
360 branchPoolStartAddrs
.push_back(poolAddr
);
361 branchPoolOffsets
.push_back(poolAddr
- cacheStartAddress
);
364 bypassStubs(branchPoolStartAddrs
);
365 optimizeLinkedit(true, true, branchPoolOffsets
);
368 recomputeCacheUUID();
370 // Calculate the VMSize of the resulting cache
371 uint64_t endAddr
= 0;
373 forEachRegion([&] (void* content
, uint64_t vmAddr
, uint64_t size
, uint32_t permissions
) {
374 if (vmAddr
+size
> endAddr
)
375 endAddr
= vmAddr
+size
;
377 _vmSize
= endAddr
- cacheStartAddress
;
379 appendCodeSignature("release");
382 bool SharedCache::writeCacheMapFile(const std::string
& mapPath
) {
383 FILE* fmap
= ::fopen(mapPath
.c_str(), "w");
387 std::vector
<uint64_t> regionStartAddresses
;
388 std::vector
<uint64_t> regionSizes
;
389 std::vector
<uint64_t> regionFileOffsets
;
391 forEachRegion([&] (void* content
, uint64_t vmAddr
, uint64_t size
, uint32_t permissions
) {
392 regionStartAddresses
.push_back(vmAddr
);
393 regionSizes
.push_back(size
);
394 regionFileOffsets
.push_back((uint8_t*)content
- (uint8_t*)_buffer
.get());
395 const char* prot
= "RW";
396 if ( permissions
== (VM_PROT_EXECUTE
|VM_PROT_READ
) )
398 else if ( permissions
== VM_PROT_READ
)
400 if ( size
> 1024*1024 )
401 fprintf(fmap
, "mapping %s %4lluMB 0x%0llX -> 0x%0llX\n", prot
, size
/(1024*1024), vmAddr
, vmAddr
+size
);
403 fprintf(fmap
, "mapping %s %4lluKB 0x%0llX -> 0x%0llX\n", prot
, size
/1024, vmAddr
, vmAddr
+size
);
406 // TODO: add linkedit breakdown
407 fprintf(fmap
, "\n\n");
409 std::unordered_set
<const void*> seenHeaders
;
410 forEachImage([&](const void* machHeader
, const char* installName
, time_t mtime
,
411 ino_t inode
, const std::vector
<MachOProxySegment
>& segments
) {
412 if ( !seenHeaders
.count(machHeader
) ) {
413 seenHeaders
.insert(machHeader
);
415 fprintf(fmap
, "%s\n", installName
);
416 for (const auto& seg
: segments
) {
418 for (int i
=0; i
< regionSizes
.size(); ++i
) {
419 if ( (seg
.fileOffset
>= regionFileOffsets
[i
]) && (seg
.fileOffset
< (regionFileOffsets
[i
]+regionSizes
[i
])) ) {
420 vmAddr
= regionStartAddresses
[i
] + seg
.fileOffset
- regionFileOffsets
[i
];
423 fprintf(fmap
, "\t%16s 0x%08llX -> 0x%08llX\n", seg
.name
.c_str(), vmAddr
, vmAddr
+seg
.size
);
432 template <typename P
>
433 std::vector
<MachOProxySegment
> getSegments(const void* cacheBuffer
, const void* machHeader
)
435 std::vector
<MachOProxySegment
> result
;
436 macho_header
<P
>* mh
= (macho_header
<P
>*)machHeader
;
437 const uint32_t cmd_count
= mh
->ncmds();
438 const macho_load_command
<P
>* cmd
= (macho_load_command
<P
>*)((uint8_t*)mh
+ sizeof(macho_header
<P
>));
439 for (uint32_t i
= 0; i
< cmd_count
; ++i
) {
440 if ( cmd
->cmd() != macho_segment_command
<P
>::CMD
)
442 macho_segment_command
<P
>* segCmd
= (macho_segment_command
<P
>*)cmd
;
443 MachOProxySegment seg
;
444 seg
.name
= segCmd
->segname();
445 seg
.name
= segCmd
->segname();
446 seg
.size
= segCmd
->vmsize();
447 seg
.vmaddr
= segCmd
->vmaddr();
448 seg
.diskSize
= (uint32_t)segCmd
->filesize();
449 seg
.fileOffset
= (uint32_t)segCmd
->fileoff();
450 seg
.protection
= segCmd
->initprot();
451 // HACK until lldb fixed in <rdar://problem/20357466>
452 if ( (seg
.fileOffset
== 0) && (strcmp(segCmd
->segname(), "__TEXT") == 0) )
453 seg
.fileOffset
= (uint32_t)((char*)machHeader
- (char*)cacheBuffer
);
454 if ( segCmd
->nsects() > 0 ) {
456 const macho_section
<P
>* const sectionsStart
= (macho_section
<P
>*)((uint8_t*)segCmd
+ sizeof(macho_segment_command
<P
>));
457 const macho_section
<P
>* const sectionsEnd
= §ionsStart
[segCmd
->nsects()];
458 for (const macho_section
<P
>* sect
=sectionsStart
; sect
< sectionsEnd
; ++sect
) {
459 if ( sect
->align() > seg
.p2align
)
460 seg
.p2align
= sect
->align();
466 result
.push_back(seg
);
467 cmd
= (const macho_load_command
<P
>*)(((uint8_t*)cmd
)+cmd
->cmdsize());
472 template <typename P
>
473 void SharedCache::forEachImage(DylibHandler handler
)
475 #if NEW_CACHE_FILE_FORMAT
476 terminate("forEachImage() not implemented");
478 typedef typename
P::E E
;
479 const dyldCacheHeader
<E
>* header
= (dyldCacheHeader
<E
>*)_buffer
.get();
480 const dyldCacheImageInfo
<E
>* dylibs
= (dyldCacheImageInfo
<E
>*)((char*)_buffer
.get() + header
->imagesOffset());
481 const dyldCacheFileMapping
<E
>* mappings
= (dyldCacheFileMapping
<E
>*)((char*)_buffer
.get() + header
->mappingOffset());
482 if ( mappings
[0].file_offset() != 0 )
483 terminate("malformed cache file");
484 uint64_t firstImageOffset
= 0;
485 uint64_t firstRegionAddress
= mappings
[0].address();
486 const void* cacheEnd
= (char*)_buffer
.get() + _fileSize
;
487 if ( (const void*)&dylibs
[header
->imagesCount()] > cacheEnd
)
489 for (uint32_t i
=0; i
< header
->imagesCount(); ++i
) {
490 const char* dylibPath
= (char*)_buffer
.get() + dylibs
[i
].pathFileOffset();
491 if ( dylibPath
> cacheEnd
)
493 uint64_t offset
= dylibs
[i
].address() - firstRegionAddress
;
494 if ( firstImageOffset
== 0 )
495 firstImageOffset
= offset
;
497 if ( dylibs
[i
].pathFileOffset() < firstImageOffset
)
499 const void* mh
= (char*)_buffer
.get() + offset
;
500 time_t inode
= dylibs
[i
].inode();
501 ino_t modTime
= dylibs
[i
].modTime();
502 handler(mh
, dylibPath
, modTime
, inode
, getSegments
<P
>(_buffer
.get(), mh
));
508 template <typename P
>
509 void SharedCache::recomputeCacheUUID(void)
511 uint8_t* uuidLoc
= nullptr;
512 #if NEW_CACHE_FILE_FORMAT
513 const macho_header
<P
>* mh
= (macho_header
<P
>*)cacheBuffer
;
514 const macho_load_command
<P
>* const cmds
= (macho_load_command
<P
>*)((uint8_t*)mh
+ sizeof(macho_header
<P
>));
515 const uint32_t cmd_count
= mh
->ncmds();
516 const macho_load_command
<P
>* cmd
= cmds
;
517 for (uint32_t i
= 0; i
< cmd_count
; ++i
) {
518 if ( cmd
->cmd() == LC_UUID
) {
519 const macho_uuid_command
<P
>* uuidCmd
= (macho_uuid_command
<P
>*)cmd
;
520 uuidLoc
= const_cast<uint8_t*>(uuidCmd
->uuid());
523 cmd
= (const macho_load_command
<P
>*)(((uint8_t*)cmd
)+cmd
->cmdsize());
526 dyldCacheHeader
<P
>* header
= (dyldCacheHeader
<P
>*)_buffer
.get();
527 uuidLoc
= const_cast<uint8_t*>(header
->uuid());
530 // Clear existing UUID, then MD5 whole cache buffer.
532 CC_MD5(_buffer
.get(), (unsigned)_fileSize
, uuidLoc
);
533 // <rdar://problem/6723729> uuids should conform to RFC 4122 UUID version 4 & UUID version 5 formats
534 uuidLoc
[6] = ( uuidLoc
[6] & 0x0F ) | ( 3 << 4 );
535 uuidLoc
[8] = ( uuidLoc
[8] & 0x3F ) | 0x80;
538 template <typename P
>
539 void SharedCache::setLinkeditsMappingEndFileOffset(uint64_t newFileSize
)
541 #if NEW_CACHE_FILE_FORMAT
542 terminate("setLinkeditsMappingEndFileOffset() not implemented");
544 typedef typename
P::E E
;
545 dyldCacheHeader
<E
>* header
= (dyldCacheHeader
<E
>*)_buffer
.get();
546 dyldCacheFileMapping
<E
>* mappings
= (dyldCacheFileMapping
<E
>*)((char*)_buffer
.get() + header
->mappingOffset());
547 uint64_t newReadOnlySize
= newFileSize
- mappings
[2].file_offset();
548 mappings
[2].set_size(newReadOnlySize
);
549 header
->set_codeSignatureOffset(newFileSize
);
550 _readOnlyRegion
.size
= (newReadOnlySize
);
554 template <typename P
>
555 void SharedCache::setUnmappedLocalsRange(uint64_t localSymbolsOffset
, uint32_t unmappedSize
)
557 #if NEW_CACHE_FILE_FORMAT
558 terminate("setUnmappedLocalsRange() not implemented");
560 typedef typename
P::E E
;
561 dyldCacheHeader
<E
>* header
= (dyldCacheHeader
<E
>*)_buffer
.get();
562 header
->set_localSymbolsOffset(localSymbolsOffset
);
563 header
->set_localSymbolsSize(unmappedSize
);
564 // move start of code signature to new end of file
565 header
->set_codeSignatureOffset(localSymbolsOffset
+unmappedSize
);
569 template <typename P
>
570 void SharedCache::setAcceleratorInfoRange(uint64_t accelInfoAddr
, uint32_t accelInfoSize
)
572 #if NEW_CACHE_FILE_FORMAT
573 terminate("setUnmappedLocalsRange() not implemented");
575 typedef typename
P::E E
;
576 dyldCacheHeader
<E
>* header
= (dyldCacheHeader
<E
>*)_buffer
.get();
577 header
->set_accelerateInfoAddr(accelInfoAddr
);
578 header
->set_accelerateInfoSize(accelInfoSize
);
582 template <typename P
>
583 void SharedCache::forEachRegion(RegionHandler handler
)
585 #if NEW_CACHE_FILE_FORMAT
586 const macho_header
<P
>* mh
= (macho_header
<P
>*)cacheBuffer
;
587 const macho_load_command
<P
>* const cmds
= (macho_load_command
<P
>*)((uint8_t*)mh
+ sizeof(macho_header
<P
>));
588 const uint32_t cmd_count
= mh
->ncmds();
589 const macho_load_command
<P
>* cmd
= cmds
;
590 for (uint32_t i
= 0; i
< cmd_count
; ++i
) {
591 if ( cmd
->cmd() == macho_segment_command
<P
>::CMD
) {
592 const macho_segment_command
<P
>* segCmd
= (macho_segment_command
<P
>*)cmd
;
593 handler((char*)cacheBuffer
+ segCmd
->fileoff(), segCmd
->vmaddr(), segCmd
->vmsize(), segCmd
->initprot());
595 cmd
= (const macho_load_command
<P
>*)(((uint8_t*)cmd
)+cmd
->cmdsize());
598 typedef typename
P::E E
;
599 const dyldCacheHeader
<E
>* header
= (dyldCacheHeader
<E
>*)_buffer
.get();
600 const dyldCacheFileMapping
<E
>* mappings
= (dyldCacheFileMapping
<E
>*)((char*)_buffer
.get() + header
->mappingOffset());
601 const dyldCacheFileMapping
<E
>* mappingsEnd
= &mappings
[header
->mappingCount()];
602 for (const dyldCacheFileMapping
<E
>* m
=mappings
; m
< mappingsEnd
; ++m
) {
603 handler((char*)_buffer
.get() + m
->file_offset(), m
->address(), m
->size(), m
->init_prot());
608 std::shared_ptr
<void> SharedCache::buffer(void) const {
612 std::string
SharedCache::archName() {
613 return stringForArch(_arch
);
616 void SharedCache::assignSegmentAddresses()
618 _branchPoolStarts
.clear();
619 uint64_t addr
= sharedRegionStartExecutableAddress(_arch
);
621 // assign TEXT segment addresses
622 _textRegion
.address
= addr
;
623 _textRegion
.fileOffset
= 0;
624 _textRegion
.prot
= VM_PROT_READ
| VM_PROT_EXECUTE
;
625 #if NEW_CACHE_FILE_FORMAT
626 addr
+= 0x4000; // header
628 addr
+= 0x28000; // header
630 uint64_t brPoolTextSize
= branchPoolTextSize(_arch
);
631 uint64_t brPoolLinkEditSize
= branchPoolLinkEditSize(_arch
);
632 uint64_t brRearch
= branchReach(_arch
);
633 uint64_t lastPoolAddress
= addr
;
634 for (auto& dylib
: _dylibs
) {
635 for (auto& seg
: _segmentMap
[dylib
]) {
636 if ( seg
.base
->protection
!= (VM_PROT_READ
| VM_PROT_EXECUTE
) )
638 // Insert branch island pools every 128MB for arm64
639 if ( (brPoolTextSize
!= 0) && ((addr
+ seg
.base
->size
- lastPoolAddress
) > brRearch
) ) {
640 _branchPoolStarts
.push_back(addr
);
641 //verboseLog("adding branch pool at 0x%lX\n", addr);
642 lastPoolAddress
= addr
;
643 addr
+= brPoolTextSize
;
645 // Keep __TEXT segments 4K or more aligned
646 uint64_t startAlignPad
= align(addr
, std::max(seg
.base
->p2align
, (uint8_t)12)) - addr
;
647 addr
+= startAlignPad
;
649 seg
.cacheFileOffset
= addr
- _textRegion
.address
+ _textRegion
.fileOffset
;
650 seg
.cacheSegSize
= align(seg
.base
->sizeOfSections
, 12);
651 addr
+= align(seg
.base
->sizeOfSections
, 12);
654 // align TEXT region end
655 uint64_t endTextAddress
= align(addr
, sharedRegionRegionAlignment(_arch
));
656 _textRegion
.size
= endTextAddress
- _textRegion
.address
;
658 std::unordered_map
<const SegmentInfo
*, std::string
> dataDirtySegPaths
;
660 // co-locate similar __DATA* segments
661 std::vector
<SegmentInfo
*> dataSegs
;
662 std::vector
<SegmentInfo
*> dataConstSegs
;
663 std::vector
<SegmentInfo
*> dataDirtySegs
;
664 for (auto& dylib
: _dylibs
) {
665 for (auto& seg
: _segmentMap
[dylib
]) {
666 if ( seg
.base
->protection
== (VM_PROT_READ
| VM_PROT_WRITE
) ) {
667 if ( seg
.base
->name
== "__DATA_CONST" ) {
668 dataConstSegs
.push_back(&seg
);
670 else if ( seg
.base
->name
== "__DATA_DIRTY" ) {
671 dataDirtySegs
.push_back(&seg
);
672 dataDirtySegPaths
[&seg
] = dylib
->installName
;
675 dataSegs
.push_back(&seg
);
681 // assign __DATA* addresses
682 addr
= sharedRegionStartWriteableAddress(_arch
, endTextAddress
);
683 _dataRegion
.address
= addr
;
684 _dataRegion
.fileOffset
= _textRegion
.fileOffset
+ _textRegion
.size
;
685 _dataRegion
.prot
= VM_PROT_READ
| VM_PROT_WRITE
;
687 // layout all __DATA_CONST segments
688 for (SegmentInfo
* seg
: dataConstSegs
) {
689 // Keep __DATA_CONST segments 4K or more aligned
690 uint64_t startAlignPad
= align(addr
, std::max(seg
->base
->p2align
, (uint8_t)12)) - addr
;
691 addr
+= startAlignPad
;
693 seg
->cacheFileOffset
= addr
- _dataRegion
.address
+ _dataRegion
.fileOffset
;
694 seg
->cacheSegSize
= seg
->base
->sizeOfSections
;
695 addr
+= seg
->base
->sizeOfSections
;
698 // layout all __DATA segments
699 for (SegmentInfo
* seg
: dataSegs
) {
700 // Keep __DATA segments 4K or more aligned
701 uint64_t startAlignPad
= align(addr
, std::max(seg
->base
->p2align
, (uint8_t)12)) - addr
;
702 addr
+= startAlignPad
;
704 seg
->cacheFileOffset
= addr
- _dataRegion
.address
+ _dataRegion
.fileOffset
;
705 seg
->cacheSegSize
= seg
->base
->sizeOfSections
;
706 addr
+= seg
->base
->sizeOfSections
;
709 // layout all __DATA_DIRTY segments
710 addr
= align(addr
, 12);
711 std::sort(dataDirtySegs
.begin(), dataDirtySegs
.end(), [&](const SegmentInfo
*a
, const SegmentInfo
*b
) {
712 const std::string
& pathA
= dataDirtySegPaths
[a
];
713 const std::string
& pathB
= dataDirtySegPaths
[b
];
715 const auto& orderA
= _dataDirtySegsOrder
.find(pathA
);
716 const auto& orderB
= _dataDirtySegsOrder
.find(pathB
);
717 bool foundA
= (orderA
!= _dataDirtySegsOrder
.end());
718 bool foundB
= (orderB
!= _dataDirtySegsOrder
.end());
720 // Order all __DATA_DIRTY segments specified in the order file first, in
721 // the order specified in the file, followed by any other __DATA_DIRTY
722 // segments in lexicographic order.
723 if ( foundA
&& foundB
)
724 return orderA
->second
< orderB
->second
;
730 return pathA
< pathB
;
732 for (SegmentInfo
* seg
: dataDirtySegs
) {
733 // Pack __DATA_DIRTY segments
734 uint64_t startAlignPad
= align(addr
, seg
->base
->p2align
) - addr
;
735 addr
+= startAlignPad
;
737 seg
->cacheFileOffset
= addr
- _dataRegion
.address
+ _dataRegion
.fileOffset
;
738 seg
->cacheSegSize
= seg
->base
->sizeOfSections
;
739 addr
+= seg
->base
->sizeOfSections
;
742 // align DATA region end
743 uint64_t endDataAddress
= align(addr
, sharedRegionRegionAlignment(_arch
));
744 _dataRegion
.size
= endDataAddress
- _dataRegion
.address
;
746 // start read-only region
747 addr
= sharedRegionStartReadOnlyAddress(_arch
, endDataAddress
, endTextAddress
);
748 _readOnlyRegion
.address
= addr
;
749 _readOnlyRegion
.fileOffset
= _dataRegion
.fileOffset
+ _dataRegion
.size
;
750 _readOnlyRegion
.prot
= VM_PROT_READ
;
752 // reserve space for kernel ASLR slide info at start of r/o region
753 _slideInfoBufferSize
= align((_dataRegion
.size
/4096) * 130, 12); // bitmap entry + toc entry
754 _slideInfoFileOffset
= _readOnlyRegion
.fileOffset
;
755 addr
+= _slideInfoBufferSize
;
757 // layout all read-only (but not LINKEDIT) segments
758 for (auto& dylib
: _dylibs
) {
759 for (auto& seg
: _segmentMap
[dylib
]) {
760 if ( seg
.base
->protection
!= VM_PROT_READ
)
762 if ( seg
.base
->name
== "__LINKEDIT" )
764 // Keep segments 4K or more aligned
765 addr
= align(addr
, std::min(seg
.base
->p2align
, (uint8_t)12));
767 seg
.cacheFileOffset
= addr
- _readOnlyRegion
.address
+ _readOnlyRegion
.fileOffset
;;
768 seg
.cacheSegSize
= seg
.base
->size
;
769 addr
+= seg
.base
->size
;
770 //verboseLog("read-only offset=0x%08X, for path=%s\n", seg.cacheFileOffset, ex->proxy->installName.c_str());
773 // layout all LINKEDIT segments (after other read-only segments)
774 for (auto& dylib
: _dylibs
) {
775 for (auto& seg
: _segmentMap
[dylib
]) {
776 if ( seg
.base
->protection
!= VM_PROT_READ
)
778 if ( seg
.base
->name
!= "__LINKEDIT" )
780 // Keep LINKEDIT segments 4K aligned
781 addr
= align(addr
, 12);
783 seg
.cacheFileOffset
= addr
- _readOnlyRegion
.address
+ _readOnlyRegion
.fileOffset
;;
784 seg
.cacheSegSize
= seg
.base
->diskSize
;
785 addr
+= seg
.base
->size
;
786 //verboseLog("linkedit offset=0x%08X, for path=%s\n", seg.cacheFileOffset, ex->proxy->installName.c_str());
789 // add room for branch pool linkedits
790 _branchPoolsLinkEditStartAddr
= addr
;
791 addr
+= (_branchPoolStarts
.size() * brPoolLinkEditSize
);
793 // align r/o region end
794 uint64_t endReadOnlyAddress
= align(addr
, sharedRegionRegionAlignment(_arch
));
795 _readOnlyRegion
.size
= endReadOnlyAddress
- _readOnlyRegion
.address
;
796 _fileSize
= _readOnlyRegion
.fileOffset
+ _readOnlyRegion
.size
;
798 // FIXME: Confirm these numbers for all platform/arch combos
799 // assume LINKEDIT optimzation reduces LINKEDITs to %40 of original size
800 if (_manifest
.platform() == "osx") {
801 _vmSize
= _readOnlyRegion
.address
+ (_readOnlyRegion
.size
* 9 / 10) - _textRegion
.address
;
803 _vmSize
= _readOnlyRegion
.address
+ (_readOnlyRegion
.size
* 2 / 5) - _textRegion
.address
;
807 uint64_t SharedCache::pathHash(const char* path
)
810 for (const char* s
=path
; *s
!= '\0'; ++s
)
817 void SharedCache::findDylibAndSegment(const void* contentPtr
, std::string
& dylibName
, std::string
& segName
)
819 uint64_t fileOffset
= (uint8_t*)contentPtr
- (uint8_t*)_buffer
.get();
820 for (const auto& entry
: _segmentMap
) {
821 const MachOProxy
* dylib
= entry
.first
;
822 for (const SegmentInfo
& segInfo
: entry
.second
) {
823 //fprintf(stderr, " cacheFileOffset=0x%08llX, end=0x%08llX\n", segInfo.cacheFileOffset, segInfo.cacheFileOffset+segInfo.base->size);
824 if ( (segInfo
.cacheFileOffset
<= fileOffset
) && (fileOffset
< segInfo
.cacheFileOffset
+segInfo
.base
->size
) ) {
825 dylibName
= dylib
->installName
;
826 segName
= segInfo
.base
->name
;
836 template <typename P
>
837 bool SharedCache::makeRebaseChain(uint8_t* pageContent
, uint16_t lastLocationOffset
, uint16_t offset
, const dyldCacheSlideInfo2
<typename
P::E
>* info
)
839 typedef typename
P::uint_t pint_t
;
841 const pint_t deltaMask
= (pint_t
)(info
->delta_mask());
842 const pint_t valueMask
= ~deltaMask
;
843 const pint_t valueAdd
= (pint_t
)(info
->value_add());
844 const unsigned deltaShift
= __builtin_ctzll(deltaMask
) - 2;
845 const uint32_t maxDelta
= (uint32_t)(deltaMask
>> deltaShift
);
847 pint_t
* lastLoc
= (pint_t
*)&pageContent
[lastLocationOffset
+0];
848 pint_t lastValue
= (pint_t
)P::getP(*lastLoc
);
849 if ( (lastValue
- valueAdd
) & deltaMask
) {
850 std::string dylibName
;
852 findDylibAndSegment((void*)pageContent
, dylibName
, segName
);
853 terminate("rebase pointer does not point within cache. lastOffset=0x%04X, seg=%s, dylib=%s\n",
854 lastLocationOffset
, segName
.c_str(), dylibName
.c_str());
856 if ( offset
<= (lastLocationOffset
+maxDelta
) ) {
857 // previous location in range, make link from it
858 // encode this location into last value
859 pint_t delta
= offset
- lastLocationOffset
;
860 pint_t newLastValue
= ((lastValue
- valueAdd
) & valueMask
) | (delta
<< deltaShift
);
861 //warning(" add chain: delta = %d, lastOffset=0x%03X, offset=0x%03X, org value=0x%08lX, new value=0x%08lX",
862 // offset - lastLocationOffset, lastLocationOffset, offset, (long)lastValue, (long)newLastValue);
863 P::setP(*lastLoc
, newLastValue
);
866 //warning(" too big delta = %d, lastOffset=0x%03X, offset=0x%03X", offset - lastLocationOffset, lastLocationOffset, offset);
868 // distance between rebase locations is too far
869 // see if we can make a chain from non-rebase locations
870 uint16_t nonRebaseLocationOffsets
[1024];
871 unsigned nrIndex
= 0;
872 for (uint16_t i
= lastLocationOffset
; i
< offset
-maxDelta
; ) {
873 nonRebaseLocationOffsets
[nrIndex
] = 0;
874 for (int j
=maxDelta
; j
> 0; j
-= 4) {
875 pint_t value
= (pint_t
)P::getP(*(pint_t
*)&pageContent
[i
+j
]);
877 // Steal values of 0 to be used in the rebase chain
878 nonRebaseLocationOffsets
[nrIndex
] = i
+j
;
882 if ( nonRebaseLocationOffsets
[nrIndex
] == 0 ) {
883 lastValue
= (pint_t
)P::getP(*lastLoc
);
884 pint_t newValue
= ((lastValue
- valueAdd
) & valueMask
);
885 //warning(" no way to make non-rebase delta chain, terminate off=0x%03X, old value=0x%08lX, new value=0x%08lX", lastLocationOffset, (long)value, (long)newValue);
886 P::setP(*lastLoc
, newValue
);
889 i
= nonRebaseLocationOffsets
[nrIndex
];
893 // we can make chain. go back and add each non-rebase location to chain
894 uint16_t prevOffset
= lastLocationOffset
;
895 pint_t
* prevLoc
= (pint_t
*)&pageContent
[prevOffset
];
896 for (int n
=0; n
< nrIndex
; ++n
) {
897 uint16_t nOffset
= nonRebaseLocationOffsets
[n
];
898 assert(nOffset
!= 0);
899 pint_t
* nLoc
= (pint_t
*)&pageContent
[nOffset
];
900 uint32_t delta2
= nOffset
- prevOffset
;
901 pint_t value
= (pint_t
)P::getP(*prevLoc
);
904 newValue
= (delta2
<< deltaShift
);
906 newValue
= ((value
- valueAdd
) & valueMask
) | (delta2
<< deltaShift
);
907 //warning(" non-rebase delta = %d, to off=0x%03X, old value=0x%08lX, new value=0x%08lX", delta2, nOffset, (long)value, (long)newValue);
908 P::setP(*prevLoc
, newValue
);
909 prevOffset
= nOffset
;
912 uint32_t delta3
= offset
- prevOffset
;
913 pint_t value
= (pint_t
)P::getP(*prevLoc
);
916 newValue
= (delta3
<< deltaShift
);
918 newValue
= ((value
- valueAdd
) & valueMask
) | (delta3
<< deltaShift
);
919 //warning(" non-rebase delta = %d, to off=0x%03X, old value=0x%08lX, new value=0x%08lX", delta3, offset, (long)value, (long)newValue);
920 P::setP(*prevLoc
, newValue
);
926 template <typename P
>
927 void SharedCache::addPageStarts(uint8_t* pageContent
, const bool bitmap
[], const dyldCacheSlideInfo2
<typename
P::E
>* info
,
928 std::vector
<uint16_t>& pageStarts
, std::vector
<uint16_t>& pageExtras
)
930 typedef typename
P::uint_t pint_t
;
932 const pint_t deltaMask
= (pint_t
)(info
->delta_mask());
933 const pint_t valueMask
= ~deltaMask
;
934 const uint32_t pageSize
= info
->page_size();
935 const pint_t valueAdd
= (pint_t
)(info
->value_add());
937 uint16_t startValue
= DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE
;
938 uint16_t lastLocationOffset
= 0xFFFF;
939 for(int i
=0; i
< pageSize
/4; ++i
) {
940 unsigned offset
= i
*4;
942 if ( startValue
== DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE
) {
943 // found first rebase location in page
946 else if ( !makeRebaseChain
<P
>(pageContent
, lastLocationOffset
, offset
, info
) ) {
947 // can't record all rebasings in one chain
948 if ( (startValue
& DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA
) == 0 ) {
949 // switch page_start to "extras" which is a list of chain starts
950 unsigned indexInExtras
= (unsigned)pageExtras
.size();
951 if ( indexInExtras
> 0x3FFF )
952 terminate("rebase overflow in page extras");
953 pageExtras
.push_back(startValue
);
954 startValue
= indexInExtras
| DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA
;
956 pageExtras
.push_back(i
);
958 lastLocationOffset
= offset
;
961 if ( lastLocationOffset
!= 0xFFFF ) {
963 pint_t
* lastLoc
= (pint_t
*)&pageContent
[lastLocationOffset
];
964 pint_t lastValue
= (pint_t
)P::getP(*lastLoc
);
965 pint_t newValue
= ((lastValue
- valueAdd
) & valueMask
);
966 P::setP(*lastLoc
, newValue
);
968 if ( startValue
& DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA
) {
969 // add end bit to extras
970 pageExtras
.back() |= DYLD_CACHE_SLIDE_PAGE_ATTR_END
;
972 pageStarts
.push_back(startValue
);
975 template <typename P
>
976 void SharedCache::writeSlideInfoV2(uint64_t deltaMask
, uint64_t valueAdd
)
978 // i386 cache does not support sliding because stubs use absolute addressing (text relocs)
979 if (_arch
.arch
== CPU_TYPE_I386
) {
980 dyldCacheHeader
<LittleEndian
>* header
= (dyldCacheHeader
<LittleEndian
>*)_buffer
.get();
981 header
->set_slideInfoSize(0);
985 typedef typename
P::E E
;
986 const uint32_t pageSize
= 4096;
988 // build one 1024/4096 bool bitmap per page (4KB/16KB) of DATA
989 uint8_t* const dataStart
= (uint8_t*)_buffer
.get() + _dataRegion
.fileOffset
;
990 uint8_t* const dataEnd
= dataStart
+ _dataRegion
.size
;
991 unsigned pageCount
= (unsigned)(_dataRegion
.size
+pageSize
-1)/pageSize
;
992 const long bitmapSize
= pageCount
*(pageSize
/4)*sizeof(bool);
993 bool* bitmap
= (bool*)calloc(bitmapSize
, 1);
994 for (void* p
: _pointersForASLR
) {
995 if ( (p
< dataStart
) || ( p
> dataEnd
) )
996 terminate("DATA pointer for sliding, out of range\n");
997 long byteOffset
= (long)((uint8_t*)p
- dataStart
);
998 if ( (byteOffset
% 4) != 0 )
999 terminate("pointer not 4-byte aligned in DATA offset 0x%08lX\n", byteOffset
);
1000 long boolIndex
= byteOffset
/ 4;
1001 // work around <rdar://24941083> by ignoring pointers to be slid that are NULL on disk
1002 if ( *(typename
P::uint_t
*)p
== 0 ) {
1003 std::string dylibName
;
1004 std::string segName
;
1005 findDylibAndSegment(p
, dylibName
, segName
);
1006 warning("NULL pointer asked to be slid in %s of %s", segName
.c_str(), dylibName
.c_str());
1009 bitmap
[boolIndex
] = true;
1012 // fill in fixed info
1013 dyldCacheSlideInfo2
<E
>* info
= (dyldCacheSlideInfo2
<E
>*)((uint8_t*)_buffer
.get() + _slideInfoFileOffset
);
1014 info
->set_version(2);
1015 info
->set_page_size(pageSize
);
1016 info
->set_delta_mask(deltaMask
);
1017 info
->set_value_add(valueAdd
);
1019 // set page starts and extras for each page
1020 std::vector
<uint16_t> pageStarts
;
1021 std::vector
<uint16_t> pageExtras
;
1022 pageStarts
.reserve(pageCount
);
1023 uint8_t* pageContent
= dataStart
;;
1024 const bool* bitmapForPage
= bitmap
;
1025 for (unsigned i
=0; i
< pageCount
; ++i
) {
1026 //warning("page[%d]", i);
1027 addPageStarts
<P
>(pageContent
, bitmapForPage
, info
, pageStarts
, pageExtras
);
1028 pageContent
+= pageSize
;
1029 bitmapForPage
+= (sizeof(bool)*(pageSize
/4));
1031 free((void*)bitmap
);
1033 // fill in computed info
1034 info
->set_page_starts_offset(sizeof(dyldCacheSlideInfo2
<E
>));
1035 info
->set_page_starts_count((unsigned)pageStarts
.size());
1036 info
->set_page_extras_offset((unsigned)(sizeof(dyldCacheSlideInfo2
<E
>)+pageStarts
.size()*sizeof(uint16_t)));
1037 info
->set_page_extras_count((unsigned)pageExtras
.size());
1038 for (unsigned i
=0; i
< pageStarts
.size(); ++i
)
1039 info
->set_page_starts(i
, pageStarts
[i
]);
1040 for (unsigned i
=0; i
< pageExtras
.size(); ++i
)
1041 info
->set_page_extras(i
, pageExtras
[i
]);
1042 //warning("pageCount=%u, page_starts_count=%lu, page_extras_count=%lu", pageCount, pageStarts.size(), pageExtras.size());
1043 _slideInfoBufferSize
= align(info
->page_extras_offset() + pageExtras
.size()*sizeof(uint16_t), 12);
1045 #if NEW_CACHE_FILE_FORMAT
1048 unsigned long slideInfoPageSize
= align(_slideInfoBufferSize
, sharedRegionRegionAlignment(_arch
));
1049 dyldCacheHeader
<E
>* header
= (dyldCacheHeader
<E
>*)_buffer
.get();
1050 header
->set_slideInfoSize(slideInfoPageSize
);
1054 void SharedCache::writeSlideInfoV2(void)
1056 switch (_arch
.arch
) {
1058 // linked list based slide info needs high 3 bits of pointer, won't work with > 512MB of pointable content
1059 if ( (_textRegion
.size
+ _dataRegion
.size
) > 512*1024*1024 ) {
1060 warning("cache TEXT+DATA > 512MB, using larger slide info format");
1061 writeSlideInfo
<LittleEndian
>();
1064 writeSlideInfoV2
<Pointer32
<LittleEndian
>>(0xE0000000, ARM_SHARED_REGION_START
);
1068 writeSlideInfoV2
<Pointer32
<LittleEndian
>>(0xE0000000, 0x90000000);
1070 case CPU_TYPE_X86_64
:
1071 writeSlideInfoV2
<Pointer64
<LittleEndian
>>(0xFFFF000000000000, 0);
1073 case CPU_TYPE_ARM64
:
1074 writeSlideInfoV2
<Pointer64
<LittleEndian
>>(0x00FFFF0000000000, 0);
1077 warning("unsupported arch 0x%08X", _arch
.arch
);
1084 template <typename E
>
1085 void SharedCache::writeSlideInfo(void)
1087 // i386 cache does not support sliding because stubs use absolute addressing (text relocs)
1088 if (_arch
.arch
== CPU_TYPE_I386
) {
1089 dyldCacheHeader
<E
>* header
= (dyldCacheHeader
<E
>*)_buffer
.get();
1090 header
->set_slideInfoSize(0);
1094 // build one 128-byte bitmap per page (4096) of DATA
1095 uint8_t* const dataStart
= (uint8_t*)_buffer
.get() + _dataRegion
.fileOffset
;
1096 uint8_t* const dataEnd
= dataStart
+ _dataRegion
.size
;
1097 const long bitmapSize
= (dataEnd
- dataStart
)/(4*8);
1098 uint8_t* bitmap
= (uint8_t*)calloc(bitmapSize
, 1);
1099 for (void* p
: _pointersForASLR
) {
1100 if ( (p
< dataStart
) || ( p
> dataEnd
) )
1101 terminate("DATA pointer for sliding, out of range\n");
1102 long offset
= (long)((uint8_t*)p
- dataStart
);
1103 if ( (offset
% 4) != 0 )
1104 terminate("pointer not 4-byte aligned in DATA offset 0x%08lX\n", offset
);
1105 long byteIndex
= offset
/ (4*8);
1106 long bitInByte
= (offset
% 32) >> 2;
1107 bitmap
[byteIndex
] |= (1 << bitInByte
);
1110 // allocate worst case size block of all slide info
1111 const unsigned entry_size
= 4096/(8*4); // 8 bits per byte, possible pointer every 4 bytes.
1112 const unsigned toc_count
= (unsigned)bitmapSize
/entry_size
;
1113 dyldCacheSlideInfo
<E
>* slideInfo
= (dyldCacheSlideInfo
<E
>*)((uint8_t*)_buffer
.get() + _slideInfoFileOffset
);
1114 slideInfo
->set_version(1);
1115 slideInfo
->set_toc_offset(sizeof(dyldCacheSlideInfo
<E
>));
1116 slideInfo
->set_toc_count(toc_count
);
1117 slideInfo
->set_entries_offset((slideInfo
->toc_offset()+2*toc_count
+127)&(-128));
1118 slideInfo
->set_entries_count(0);
1119 slideInfo
->set_entries_size(entry_size
);
1120 // append each unique entry
1121 const dyldCacheSlideInfoEntry
* bitmapAsEntries
= (dyldCacheSlideInfoEntry
*)bitmap
;
1122 dyldCacheSlideInfoEntry
* const entriesInSlidInfo
= (dyldCacheSlideInfoEntry
*)((char*)slideInfo
+slideInfo
->entries_offset());
1123 int entry_count
= 0;
1124 for (int i
=0; i
< toc_count
; ++i
) {
1125 const dyldCacheSlideInfoEntry
* thisEntry
= &bitmapAsEntries
[i
];
1126 // see if it is same as one already added
1128 for (int j
=0; j
< entry_count
; ++j
) {
1129 if ( memcmp(thisEntry
, &entriesInSlidInfo
[j
], entry_size
) == 0 ) {
1130 slideInfo
->set_toc(i
, j
);
1137 memcpy(&entriesInSlidInfo
[entry_count
], thisEntry
, entry_size
);
1138 slideInfo
->set_toc(i
, entry_count
++);
1141 slideInfo
->set_entries_count(entry_count
);
1142 ::free((void*)bitmap
);
1144 #if NEW_CACHE_FILE_FORMAT
1147 unsigned long slideInfoPageSize
= align(slideInfo
->entries_offset() + entry_count
*entry_size
, sharedRegionRegionAlignment(_arch
));
1148 dyldCacheHeader
<E
>* header
= (dyldCacheHeader
<E
>*)_buffer
.get();
1149 header
->set_slideInfoSize(slideInfoPageSize
);
1153 template <typename P
>
1154 void SharedCache::writeCacheHeader(void)
1156 #if NEW_CACHE_FILE_FORMAT
1157 macho_header
<P
>* mh
= (macho_header
<P
>*)cacheBuffer
;
1158 mh
->set_magic((sizeof(typename
P::uint_t
) == 8) ? MH_MAGIC_64
: MH_MAGIC
);
1159 mh
->set_cputype(arch
.arch
);
1160 mh
->set_cpusubtype(arch
.subtype
);
1161 mh
->set_filetype(MH_DYLIB
);
1163 mh
->set_sizeofcmds(0);
1166 uint8_t* cmd
= (uint8_t*)cacheBuffer
+ sizeof(macho_header
<P
>);
1168 // write LC_SEGMENT for each region
1169 macho_segment_command
<P
>* rxSegCmd
= (macho_segment_command
<P
>*)cmd
;
1170 rxSegCmd
->set_cmd(macho_segment_command
<P
>::CMD
);
1171 rxSegCmd
->set_cmdsize(sizeof(macho_segment_command
<P
>));
1172 rxSegCmd
->set_segname("R.X");
1173 rxSegCmd
->set_vmaddr(_textRegion
.address
);
1174 rxSegCmd
->set_vmsize(_textRegion
.size
);
1175 rxSegCmd
->set_fileoff(_textRegion
.fileOffset
);
1176 rxSegCmd
->set_filesize(_textRegion
.size
);
1177 rxSegCmd
->set_maxprot(VM_PROT_READ
| VM_PROT_EXECUTE
);
1178 rxSegCmd
->set_initprot(VM_PROT_READ
| VM_PROT_EXECUTE
);
1179 rxSegCmd
->set_nsects(0);
1180 rxSegCmd
->set_flags(0);
1181 mh
->set_ncmds(mh
->ncmds()+1);
1182 mh
->set_sizeofcmds(mh
->sizeofcmds()+rxSegCmd
->cmdsize());
1183 cmd
+= rxSegCmd
->cmdsize();
1185 macho_segment_command
<P
>* rwSegCmd
= (macho_segment_command
<P
>*)cmd
;
1186 rwSegCmd
->set_cmd(macho_segment_command
<P
>::CMD
);
1187 rwSegCmd
->set_cmdsize(sizeof(macho_segment_command
<P
>));
1188 rwSegCmd
->set_segname("RW.");
1189 rwSegCmd
->set_vmaddr(_dataRegion
.address
);
1190 rwSegCmd
->set_vmsize(_dataRegion
.size
);
1191 rwSegCmd
->set_fileoff(_dataRegion
.fileOffset
);
1192 rwSegCmd
->set_filesize(_dataRegion
.size
);
1193 rwSegCmd
->set_maxprot(VM_PROT_READ
| VM_PROT_WRITE
);
1194 rwSegCmd
->set_initprot(VM_PROT_READ
| VM_PROT_WRITE
);
1195 rwSegCmd
->set_nsects(0);
1196 rwSegCmd
->set_flags(0);
1197 mh
->set_ncmds(mh
->ncmds()+1);
1198 mh
->set_sizeofcmds(mh
->sizeofcmds()+rwSegCmd
->cmdsize());
1199 cmd
+= rwSegCmd
->cmdsize();
1201 macho_segment_command
<P
>* roSegCmd
= (macho_segment_command
<P
>*)cmd
;
1202 roSegCmd
->set_cmd(macho_segment_command
<P
>::CMD
);
1203 roSegCmd
->set_cmdsize(sizeof(macho_segment_command
<P
>));
1204 roSegCmd
->set_segname("R..");
1205 roSegCmd
->set_vmaddr(_readOnlyRegion
.address
);
1206 roSegCmd
->set_vmsize(_readOnlyRegion
.size
);
1207 roSegCmd
->set_fileoff(_readOnlyRegion
.fileOffset
);
1208 roSegCmd
->set_filesize(_readOnlyRegion
.size
);
1209 roSegCmd
->set_maxprot(VM_PROT_READ
);
1210 roSegCmd
->set_initprot(VM_PROT_READ
);
1211 roSegCmd
->set_nsects(0);
1212 roSegCmd
->set_flags(0);
1213 mh
->set_ncmds(mh
->ncmds()+1);
1214 mh
->set_sizeofcmds(mh
->sizeofcmds()+roSegCmd
->cmdsize());
1215 cmd
+= roSegCmd
->cmdsize();
1218 macho_dylib_command
<P
>* dylibIdCmd
= (macho_dylib_command
<P
>*)cmd
;
1219 const char* installName
= "/System/Library/Frameworks/OS.framework/OS"; // FIXME
1220 uint32_t sz
= (uint32_t)align(sizeof(macho_dylib_command
<P
>) + strlen(installName
) + 1, 3);
1221 dylibIdCmd
->set_cmd(LC_ID_DYLIB
);
1222 dylibIdCmd
->set_cmdsize(sz
);
1223 dylibIdCmd
->set_name_offset();
1224 dylibIdCmd
->set_timestamp(1);
1225 dylibIdCmd
->set_current_version(0x10000);
1226 dylibIdCmd
->set_compatibility_version(0x10000);
1227 strcpy((char*)&cmd
[sizeof(macho_dylib_command
<P
>)], installName
);
1228 mh
->set_ncmds(mh
->ncmds()+1);
1229 mh
->set_sizeofcmds(mh
->sizeofcmds()+sz
);
1230 cmd
+= dylibIdCmd
->cmdsize();
1233 macho_uuid_command
<P
>* uuidCmd
= (macho_uuid_command
<P
>*)cmd
;
1236 uuidCmd
->set_cmd(LC_UUID
);
1237 uuidCmd
->set_cmdsize(sizeof(macho_uuid_command
<P
>));
1238 uuidCmd
->set_uuid(zeros
);
1239 cmd
+= uuidCmd
->cmdsize();
1242 std::vector
<mach_o::trie::Entry
> dylibTrieEntires
;
1243 int pathLengths
= 0;
1244 for (Extra
* ex
: _sortedDylibs
) {
1245 mach_o::trie::Entry entry
;
1246 entry
.name
= ex
->proxy
->installName
.c_str();
1247 entry
.address
= ex
->segments
[0].address
;
1250 entry
.importName
= NULL
;
1251 dylibTrieEntires
.push_back(entry
);
1252 pathLengths
+= (strlen(entry
.name
) + 1);
1253 for (const std::string
& alias
: ex
->proxy
->installNameAliases
) {
1254 mach_o::trie::Entry aliasEntry
;
1255 aliasEntry
.name
= alias
.c_str();
1256 aliasEntry
.address
= ex
->segments
[0].address
;
1257 aliasEntry
.flags
= 0;
1258 aliasEntry
.other
= 0;
1259 aliasEntry
.importName
= NULL
;
1260 dylibTrieEntires
.push_back(aliasEntry
);
1261 pathLengths
+= (strlen(aliasEntry
.name
) + 1);
1264 std::vector
<uint8_t> dylibTrieBytes
;
1265 dylibTrieBytes
.reserve(4096);
1266 mach_o::trie::makeTrie(dylibTrieEntires
, dylibTrieBytes
);
1267 fprintf(stderr
, "dylib trie size = %lu bytes, for %lu entries, pathLength=%d\n", dylibTrieBytes
.size(), dylibTrieEntires
.size(), pathLengths
);
1271 // Build SPI trie (optimized cache only)
1275 // add LC_CODE_SIGNATURE
1276 macho_linkedit_data_command
<P
>* codeSigCmd
= (macho_linkedit_data_command
<P
>*)cmd
;
1277 codeSigCmd
->set_cmd(LC_CODE_SIGNATURE
);
1278 codeSigCmd
->set_cmdsize(sizeof(macho_linkedit_data_command
<P
>));
1279 codeSigCmd
->set_dataoff((uint32_t)(_readOnlyRegion
.fileOffset
+ _readOnlyRegion
.size
));
1280 codeSigCmd
->set_datasize(0); // FIXME
1281 mh
->set_ncmds(mh
->ncmds()+1);
1282 mh
->set_sizeofcmds(mh
->sizeofcmds()+codeSigCmd
->cmdsize());
1283 cmd
+= codeSigCmd
->cmdsize();
1286 typedef typename
P::E E
;
1288 uint8_t* buffer
= (uint8_t*)_buffer
.get();
1289 dyldCacheHeader
<E
>* header
= (dyldCacheHeader
<E
>*)_buffer
.get();;
1290 // "dyld_v1" + spaces + archName(), with enough spaces to pad to 15 bytes
1291 std::string magic
= "dyld_v1";
1292 magic
.append(15 - magic
.length() - archName().length(), ' ');
1293 magic
.append(archName());
1294 assert(magic
.length() == 15);
1295 header
->set_magic(magic
.c_str());
1296 header
->set_mappingOffset(sizeof(dyldCacheHeader
<E
>));
1297 header
->set_mappingCount(3);
1298 header
->set_imagesOffset((uint32_t)(header
->mappingOffset() + 3*sizeof(dyldCacheFileMapping
<E
>) + sizeof(uint64_t)*_branchPoolStarts
.size()));
1299 header
->set_imagesCount((uint32_t)_dylibs
.size() + _aliasCount
);
1300 header
->set_dyldBaseAddress(0);
1301 header
->set_codeSignatureOffset(_fileSize
);
1302 header
->set_codeSignatureSize(0);
1303 header
->set_slideInfoOffset(_slideInfoFileOffset
);
1304 header
->set_slideInfoSize(_slideInfoBufferSize
);
1305 header
->set_localSymbolsOffset(0);
1306 header
->set_localSymbolsSize(0);
1307 header
->set_cacheType(kDyldSharedCacheTypeDevelopment
);
1308 header
->set_accelerateInfoAddr(0);
1309 header
->set_accelerateInfoSize(0);
1310 static const uint8_t zero_uuid
[16] = { 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0 };
1311 header
->set_uuid(zero_uuid
); // overwritten later by recomputeCacheUUID()
1312 header
->set_branchPoolsOffset(header
->mappingOffset() + 3*sizeof(dyldCacheFileMapping
<E
>));
1313 header
->set_branchPoolsCount((uint32_t)_branchPoolStarts
.size());
1314 header
->set_imagesTextOffset(0);
1315 header
->set_imagesTextCount(_dylibs
.size());
1318 dyldCacheFileMapping
<E
>* mappings
= (dyldCacheFileMapping
<E
>*)&buffer
[header
->mappingOffset()];
1319 mappings
[0].set_address(_textRegion
.address
);
1320 mappings
[0].set_size(_textRegion
.size
);
1321 mappings
[0].set_file_offset(_textRegion
.fileOffset
);
1322 mappings
[0].set_max_prot(_textRegion
.prot
);
1323 mappings
[0].set_init_prot(_textRegion
.prot
);
1325 mappings
[1].set_address(_dataRegion
.address
);
1326 mappings
[1].set_size(_dataRegion
.size
);
1327 mappings
[1].set_file_offset(_dataRegion
.fileOffset
);
1328 mappings
[1].set_max_prot(_dataRegion
.prot
);
1329 mappings
[1].set_init_prot(_dataRegion
.prot
);
1331 mappings
[2].set_address(_readOnlyRegion
.address
);
1332 mappings
[2].set_size(_readOnlyRegion
.size
);
1333 mappings
[2].set_file_offset(_readOnlyRegion
.fileOffset
);
1334 mappings
[2].set_max_prot(_readOnlyRegion
.prot
);
1335 mappings
[2].set_init_prot(_readOnlyRegion
.prot
);
1337 // fill in branch pool addresses
1338 uint64_t* p
= (uint64_t*)&buffer
[header
->branchPoolsOffset()];
1339 for (uint64_t pool
: _branchPoolStarts
) {
1344 // fill in image table
1345 dyldCacheImageInfo
<E
>* images
= (dyldCacheImageInfo
<E
>*)&buffer
[header
->imagesOffset()];
1346 for (auto& dylib
: _dylibs
) {
1347 auto textSeg
= _segmentMap
[dylib
][0];
1348 images
->set_address(textSeg
.address
);
1349 if (_manifest
.platform() == "osx") {
1350 images
->set_modTime(dylib
->lastModTime
);
1351 images
->set_inode(dylib
->inode
);
1353 images
->set_modTime(0);
1354 images
->set_inode(pathHash(dylib
->installName
.c_str()));
1356 images
->set_pathFileOffset((uint32_t)textSeg
.cacheFileOffset
+ dylib
->installNameOffsetInTEXT
);
1359 // append aliases image records and strings
1360 uint32_t offset
= header
->imagesOffset() + header
->imagesCount()*sizeof(dyld_cache_image_info
);
1361 for (auto &dylib
: _dylibs
) {
1362 if (!dylib
->installNameAliases
.empty()) {
1363 for (const std::string
& alias
: dylib
->installNameAliases
) {
1364 images
->set_address(_segmentMap
[dylib
][0].address
);
1365 if (_manifest
.platform() == "osx") {
1366 images
->set_modTime(dylib
->lastModTime
);
1367 images
->set_inode(dylib
->inode
);
1369 images
->set_modTime(0);
1370 images
->set_inode(pathHash(alias
.c_str()));
1372 images
->set_pathFileOffset(offset
);
1373 ::strcpy((char*)&buffer
[offset
], alias
.c_str());
1374 offset
+= alias
.size() + 1;
1380 // calculate start of text image array and trailing string pool
1381 offset
= (offset
+ 15) & (-16);
1382 header
->set_imagesTextOffset(offset
);
1383 dyldCacheImageTextInfo
<E
>* textImages
= (dyldCacheImageTextInfo
<E
>*)&buffer
[header
->imagesTextOffset()];
1384 uint32_t stringOffset
= offset
+ (uint32_t)(sizeof(dyldCacheImageTextInfo
<E
>) * _dylibs
.size());
1386 // write text image array and image names pool at same time
1387 for (auto& dylib
: _dylibs
) {
1388 textImages
->set_uuid(dylib
->uuid
.get());
1389 textImages
->set_loadAddress(_segmentMap
[dylib
][0].address
);
1390 textImages
->set_textSegmentSize((uint32_t)_segmentMap
[dylib
].front().cacheSegSize
);
1391 textImages
->set_pathOffset(stringOffset
);
1392 ::strcpy((char*)&buffer
[stringOffset
], dylib
->installName
.c_str());
1393 stringOffset
+= dylib
->installName
.size()+1;
1397 assert(stringOffset
< 0x28000);
1401 void SharedCache::rebase(MachOProxy
* dylib
)
1403 std::vector
<uint64_t> segNewStartAddresses
;
1404 std::vector
<uint64_t> segCacheFileOffsets
;
1405 std::vector
<uint64_t> segCacheFileSizes
;
1406 for (auto& seg
: _segmentMap
[dylib
]) {
1407 segNewStartAddresses
.push_back(seg
.address
);
1408 segCacheFileOffsets
.push_back(seg
.cacheFileOffset
);
1409 segCacheFileSizes
.push_back(seg
.cacheSegSize
);
1411 adjustImageForNewSegmentLocations(segNewStartAddresses
, segCacheFileOffsets
, segCacheFileSizes
, _pointersForASLR
);
1415 void SharedCache::rebaseAll(void)
1417 for (auto& dylib
: _dylibs
)
1421 void SharedCache::bindAll(void)
1423 bindAllImagesInCache(_dylibs
, _segmentMap
, _pointersForASLR
);
1426 void SharedCache::writeCacheSegments(void)
1428 uint8_t* cacheBytes
= (uint8_t*)_buffer
.get();
1429 for (auto& dylib
: _dylibs
) {
1430 const uint8_t* srcDylib
= dylib
->getBuffer();
1432 for (auto& seg
: _segmentMap
[dylib
]) {
1433 uint64_t copySize
= std::min(seg
.cacheSegSize
, (uint64_t)seg
.base
->diskSize
);
1434 verboseLog("copy segment %12s (0x%08llX bytes) to %p (logical addr 0x%llX) for %s", seg
.base
->name
.c_str(), copySize
, &cacheBytes
[seg
.cacheFileOffset
], seg
.address
, dylib
->installName
.c_str());
1435 ::memcpy(&cacheBytes
[seg
.cacheFileOffset
], &srcDylib
[seg
.base
->fileOffset
], copySize
);
1442 void SharedCache::appendCodeSignature(const std::string
& suffix
)
1444 // select which codesigning hash
1445 uint8_t dscHashType
= CS_HASHTYPE_SHA1
;
1446 uint8_t dscHashSize
= CS_HASH_SIZE_SHA1
;
1447 uint32_t dscDigestFormat
= kCCDigestSHA1
;
1448 if (_manifest
.platform() == "osx") {
1449 dscHashType
= CS_HASHTYPE_SHA256
;
1450 dscHashSize
= CS_HASH_SIZE_SHA256
;
1451 dscDigestFormat
= kCCDigestSHA256
;
1454 std::string cacheIdentifier
= "com.apple.dyld.cache." + archName() + "." + suffix
;
1455 // get pointers into shared cache buffer
1456 size_t inBbufferSize
= _fileSize
;
1457 const uint8_t* inBuffer
= (uint8_t*)_buffer
.get();
1458 uint8_t* csBuffer
= (uint8_t*)_buffer
.get()+inBbufferSize
;
1460 // layout code signature contents
1461 size_t idSize
= cacheIdentifier
.size()+1; // +1 for terminating 0
1462 uint32_t slotCount
= (uint32_t)((inBbufferSize
+ CS_PAGE_SIZE
- 1) / CS_PAGE_SIZE
);
1463 uint32_t xSlotCount
= CSSLOT_REQUIREMENTS
;
1464 size_t scatOffset
= sizeof(CS_CodeDirectory
);
1465 size_t scatSize
= 4*sizeof(CS_Scatter
); // only 3 used??
1466 size_t idOffset
= scatOffset
+scatSize
;
1467 size_t hashOffset
= idOffset
+idSize
+ dscHashSize
*xSlotCount
;
1468 size_t cdSize
= hashOffset
+ (slotCount
* dscHashSize
);
1469 size_t reqsSize
= 12;
1470 size_t cmsSize
= sizeof(CS_Blob
);
1471 size_t cdOffset
= sizeof(CS_SuperBlob
) + 3*sizeof(CS_BlobIndex
);
1472 size_t reqsOffset
= cdOffset
+ cdSize
;
1473 size_t cmsOffset
= reqsOffset
+ reqsSize
;
1474 size_t sbSize
= cmsOffset
+ cmsSize
;
1475 size_t sigSize
= align(sbSize
, 14); // keep whole cache 16KB aligned
1477 // create overall code signature which is a superblob
1478 CS_SuperBlob
* sb
= reinterpret_cast<CS_SuperBlob
*>(csBuffer
);
1479 sb
->magic
= htonl(CSMAGIC_EMBEDDED_SIGNATURE
);
1480 sb
->length
= htonl(sbSize
);
1481 sb
->count
= htonl(3);
1482 sb
->index
[0].type
= htonl(CSSLOT_CODEDIRECTORY
);
1483 sb
->index
[0].offset
= htonl(cdOffset
);
1484 sb
->index
[1].type
= htonl(CSSLOT_REQUIREMENTS
);
1485 sb
->index
[1].offset
= htonl(reqsOffset
);
1486 sb
->index
[2].type
= htonl(CSSLOT_CMS_SIGNATURE
);
1487 sb
->index
[2].offset
= htonl(cmsOffset
);
1489 // initialize fixed fields of Code Directory
1490 CS_CodeDirectory
* cd
= (CS_CodeDirectory
*)(((char*)sb
)+cdOffset
);
1491 cd
->magic
= htonl(CSMAGIC_CODEDIRECTORY
);
1492 cd
->length
= htonl(cdSize
);
1493 cd
->version
= htonl(0x20100);
1494 cd
->flags
= htonl(kSecCodeSignatureAdhoc
);
1495 cd
->hashOffset
= htonl(hashOffset
);
1496 cd
->identOffset
= htonl(idOffset
);
1497 cd
->nSpecialSlots
= htonl(xSlotCount
);
1498 cd
->nCodeSlots
= htonl(slotCount
);
1499 cd
->codeLimit
= htonl(inBbufferSize
);
1500 cd
->hashSize
= dscHashSize
;
1501 cd
->hashType
= dscHashType
;
1502 cd
->platform
= 0; // not platform binary
1503 cd
->pageSize
= __builtin_ctz(CS_PAGE_SIZE
); // log2(CS_PAGE_SIZE);
1504 cd
->spare2
= 0; // unused (must be zero)
1505 cd
->scatterOffset
= htonl(scatOffset
);
1507 // initialize dynamic fields of Code Directory
1508 strcpy((char*)cd
+ idOffset
, cacheIdentifier
.c_str());
1511 CS_Scatter
* scatter
= reinterpret_cast<CS_Scatter
*>((char*)cd
+scatOffset
);
1512 scatter
[0].count
= htonl(_textRegion
.size
/CS_PAGE_SIZE
);
1513 scatter
[0].base
= htonl(_textRegion
.fileOffset
/CS_PAGE_SIZE
);
1514 scatter
[0].targetOffset
= htonll(_textRegion
.address
);
1515 scatter
[0].spare
= 0;
1516 scatter
[1].count
= htonl(_dataRegion
.size
/CS_PAGE_SIZE
);
1517 scatter
[1].base
= htonl(_dataRegion
.fileOffset
/CS_PAGE_SIZE
);
1518 scatter
[1].targetOffset
= htonll(_dataRegion
.address
);
1519 scatter
[1].spare
= 0;
1520 scatter
[2].count
= htonl(_readOnlyRegion
.size
/CS_PAGE_SIZE
);
1521 scatter
[2].base
= htonl(_readOnlyRegion
.fileOffset
/CS_PAGE_SIZE
);
1522 scatter
[2].targetOffset
= htonll(_readOnlyRegion
.address
);
1523 scatter
[2].spare
= 0;
1525 // fill in empty requirements
1526 CS_RequirementsBlob
* reqs
= (CS_RequirementsBlob
*)(((char*)sb
)+reqsOffset
);
1527 reqs
->magic
= htonl(CSMAGIC_REQUIREMENTS
);
1528 reqs
->length
= htonl(sizeof(CS_RequirementsBlob
));
1531 // fill in empty CMS blob for ad-hoc signing
1532 CS_Blob
* cms
= (CS_Blob
*)(((char*)sb
)+cmsOffset
);
1533 cms
->magic
= htonl(CSMAGIC_BLOBWRAPPER
);
1534 cms
->length
= htonl(sizeof(CS_Blob
));
1536 // add special slot hashes
1537 uint8_t* hashSlot
= (uint8_t*)cd
+ hashOffset
;
1538 uint8_t* reqsHashSlot
= &hashSlot
[-CSSLOT_REQUIREMENTS
*dscHashSize
];
1539 CCDigest(dscDigestFormat
, (uint8_t*)reqs
, sizeof(CS_RequirementsBlob
), reqsHashSlot
);
1541 // alter header of cache to record size and location of code signature
1542 // do this *before* hashing each page
1543 dyldCacheHeader
<LittleEndian
>* header
= (dyldCacheHeader
<LittleEndian
>*)inBuffer
;
1544 header
->set_codeSignatureOffset(inBbufferSize
);
1545 header
->set_codeSignatureSize(sigSize
);
1548 const uint8_t* code
= inBuffer
;
1549 for (uint32_t i
=0; i
< slotCount
; ++i
) {
1550 CCDigest(dscDigestFormat
, code
, CS_PAGE_SIZE
, hashSlot
);
1551 hashSlot
+= dscHashSize
;
1552 code
+= CS_PAGE_SIZE
;
1555 // hash of entire code directory (cdHash) uses same has hash as each page
1556 uint8_t fullCdHash
[dscHashSize
];
1557 CCDigest(dscDigestFormat
, (const uint8_t*)cd
, cdSize
, fullCdHash
);
1558 // Note: cdHash is defined as first 20 bytes of hash
1559 memcpy(_cdHash
, fullCdHash
, 20);
1561 // increase file size to include newly append code signature
1562 _fileSize
+= sigSize
;
1565 std::string
SharedCache::cdHashString()
1568 for (int i
= 0; i
< sizeof(_cdHash
); ++i
)
1569 sprintf(&buff
[2*i
], "%2.2x", _cdHash
[i
]);
1575 #pragma mark Template dispatchers
1577 #define TEMPLATE_DISPATCHER_BODY(method,...) \
1578 switch( _arch.arch ) { \
1579 case CPU_TYPE_ARM: \
1580 case CPU_TYPE_I386: \
1581 method<Pointer32<LittleEndian>>(__VA_ARGS__); \
1583 case CPU_TYPE_X86_64: \
1584 case CPU_TYPE_ARM64: \
1585 method<Pointer64<LittleEndian>>(__VA_ARGS__); \
1588 terminate("unsupported arch 0x%08X", _arch.arch); \
1591 void SharedCache::writeCacheHeader() {
1592 TEMPLATE_DISPATCHER_BODY(writeCacheHeader
)
1595 void SharedCache::buildForDevelopment(const std::string
& cachePath
) {
1596 TEMPLATE_DISPATCHER_BODY(buildForDevelopment
, cachePath
)
1599 void SharedCache::buildForProduction(const std::string
& cachePath
) {
1600 TEMPLATE_DISPATCHER_BODY(buildForProduction
, cachePath
)
1603 void SharedCache::setLinkeditsMappingEndFileOffset(uint64_t newFileSize
) {
1604 TEMPLATE_DISPATCHER_BODY(setLinkeditsMappingEndFileOffset
, newFileSize
)
1607 void SharedCache::setUnmappedLocalsRange(uint64_t localSymbolsOffset
, uint32_t unmappedSize
) {
1608 TEMPLATE_DISPATCHER_BODY(setUnmappedLocalsRange
, localSymbolsOffset
, unmappedSize
)
1611 void SharedCache::setAcceleratorInfoRange(uint64_t accelInfoAddr
, uint32_t accelInfoSize
) {
1612 TEMPLATE_DISPATCHER_BODY(setAcceleratorInfoRange
, accelInfoAddr
, accelInfoSize
)
1615 void SharedCache::recomputeCacheUUID(void) {
1616 TEMPLATE_DISPATCHER_BODY(recomputeCacheUUID
)
1619 void SharedCache::forEachImage(DylibHandler handler
) {
1620 TEMPLATE_DISPATCHER_BODY(forEachImage
, handler
)
1623 void SharedCache::forEachRegion(RegionHandler handler
) {
1624 TEMPLATE_DISPATCHER_BODY(forEachRegion
, handler
)