1 /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*-
3 * Copyright (c) 2014 Apple Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
25 #include "mega-dylib-utils.h"
26 #include "MachOFileAbstraction.hpp"
27 #include "FileAbstraction.hpp"
30 #include "dyld_cache_config.h"
35 #include <sys/errno.h>
36 #include <sys/fcntl.h>
37 #include <sys/param.h>
38 #include <mach-o/loader.h>
39 #include <mach-o/fat.h>
42 #include <CommonCrypto/CommonDigest.h>
43 #include <CommonCrypto/CommonDigestSPI.h>
52 #include <unordered_map>
53 #include <unordered_set>
55 #include "OptimizerBranches.h"
57 #include "CacheFileAbstraction.hpp"
58 #include "CodeSigningTypes.h"
62 uint64_t sharedRegionStartExecutableAddress(ArchPair arch
)
66 return ARM_SHARED_REGION_START
;
68 return SHARED_REGION_BASE_I386
;
70 return SHARED_REGION_BASE_X86_64
;
72 return ARM64_SHARED_REGION_START
;
74 terminate("unsupported arch 0x%08X", arch
.arch
);
78 uint64_t sharedRegionStartWriteableAddress(ArchPair arch
, uint64_t textEndAddress
)
83 // more efficient if code and data never in same 2MB chunk
84 return textEndAddress
+ 0x04000000;
86 return textEndAddress
;
88 return textEndAddress
+ 32*1024*1024; // <rdar://problem/18564532> Add 32MB padding before arm64 dyld shared cache R/W region
90 terminate("unsupported arch 0x%08X", arch
.arch
);
94 uint64_t sharedRegionStartReadOnlyAddress(ArchPair arch
, uint64_t dataEndAddress
, uint64_t textEndAddress
)
99 // more efficient if code and data never in same 2MB chunk
100 return dataEndAddress
+ 0x04000000;
102 return dataEndAddress
;
104 return dataEndAddress
+ 32*1024*1024; // <rdar://problem/18564532> Add 32MB padding before arm64 dyld shared cache R/W region
106 terminate("unsupported arch 0x%08X", arch
.arch
);
112 uint8_t sharedRegionRegionAlignment(ArchPair arch
) {
114 return ARM_SHARED_REGION_SIZE
;
116 case CPU_TYPE_X86_64
:
122 terminate("unsupported arch 0x%08X", arch
.arch
);
126 uint64_t sharedRegionRegionSize(ArchPair arch
) {
127 switch ( arch
.arch
) {
129 return SHARED_REGION_SIZE_I386
;
130 case CPU_TYPE_X86_64
:
131 return SHARED_REGION_SIZE_X86_64
;
133 return ARM_SHARED_REGION_SIZE
;
135 return ARM64_SHARED_REGION_SIZE
;
137 terminate("unsupported arch 0x%08X", arch
.arch
);
141 static const std::tuple
<const char* const, const char* const, const ArchPair
> gArchitectures
[] = {
142 {"i386", nullptr, ArchPair( CPU_TYPE_I386
, CPU_SUBTYPE_I386_ALL
)},
143 {"x86_64", nullptr, ArchPair( CPU_TYPE_X86_64
, CPU_SUBTYPE_X86_64_ALL
)},
144 {"x86_64h", "x86_64", ArchPair( CPU_TYPE_X86_64
, CPU_SUBTYPE_X86_64_H
)},
145 {"armv4t", nullptr, ArchPair( CPU_TYPE_ARM
, CPU_SUBTYPE_ARM_V4T
)},
146 {"armv5", nullptr, ArchPair( CPU_TYPE_ARM
, CPU_SUBTYPE_ARM_V5TEJ
)},
147 {"armv6", nullptr, ArchPair( CPU_TYPE_ARM
, CPU_SUBTYPE_ARM_V6
)},
148 {"armv7", nullptr, ArchPair( CPU_TYPE_ARM
, CPU_SUBTYPE_ARM_V7
)},
149 {"armv7f", nullptr, ArchPair( CPU_TYPE_ARM
, CPU_SUBTYPE_ARM_V7F
)},
150 {"armv7k", nullptr, ArchPair( CPU_TYPE_ARM
, CPU_SUBTYPE_ARM_V7K
)},
151 {"armv7s", "armv7", ArchPair( CPU_TYPE_ARM
, CPU_SUBTYPE_ARM_V7S
)},
152 {"arm64", nullptr, ArchPair( CPU_TYPE_ARM64
, CPU_SUBTYPE_ARM64_ALL
)},
155 ArchPair
archForString(const std::string
& archStr
) {
156 for (auto& a
: gArchitectures
) {
157 if ( std::get
<0>( a
) == archStr
) return std::get
<2>( a
);
159 terminate("unknown architecture %s", archStr
.c_str());
162 std::string
stringForArch(ArchPair arch
, bool allowUnknown
) {
163 for (auto& a
: gArchitectures
) {
164 // FIXME LIB64 is set on some binaries and not other
165 if ( std::get
<2>( a
).arch
== arch
.arch
&& std::get
<2>( a
).subtype
== ( arch
.subtype
& ~CPU_SUBTYPE_MASK
) )
166 return std::get
<0>( a
);
170 "unrecognized cpu type " + std::to_string(arch
.arch
) +
171 " subtype " + std::to_string(arch
.subtype
);
172 if (allowUnknown
) return unknownString
;
173 else terminate("%s", unknownString
.c_str());
176 std::string
fallbackArchStringForArchString( const std::string
& archStr
) {
177 for ( auto& a
: gArchitectures
) {
178 if ( std::get
<0>( a
) == archStr
&& std::get
<1>( a
) != nullptr ) {
179 return std::get
<1>( a
);
186 SharedCache::SharedCache(Manifest
& manifest
,
187 const std::string
& configuration
, const std::string
& architecture
) :
188 _manifest(manifest
), _arch(archForString(architecture
)),
189 _archManifest(manifest
.configurations
.find(configuration
)->second
.architectures
.find(architecture
)->second
), _buffer(nullptr),
190 _fileSize(0), _vmSize(0), _aliasCount(0), _slideInfoFileOffset(0), _slideInfoBufferSize(0) {
191 auto maxCacheVMSize
= sharedRegionRegionSize(_arch
);
193 for ( auto& includedDylib
: _archManifest
.results
.dylibs
) {
194 if (includedDylib
.second
.included
) {
195 //assert(manifest.dylibs.count(includedDylib.first) > 0);
196 //assert(manifest.dylibs.find(includedDylib.first)->second.proxies.count(architecture) > 0);
197 MachOProxy
* proxy
= _manifest
.dylibProxy( includedDylib
.first
, architecture
);
198 assert(proxy
!= nullptr);
199 _dylibs
.push_back(proxy
);
203 // <rdar://problem/21317611> error out instead of crash if cache has no dylibs
204 if ( _dylibs
.size() < 30 ) // FIXME: plist should specify required vs optional dylibs
205 terminate("missing required minimum set of dylibs");
207 for (auto &dylib
: _dylibs
) {
208 _segmentMap
[dylib
].reserve(dylib
->segments
.size());
209 for (const auto& seg
: dylib
->segments
)
210 _segmentMap
[dylib
].push_back(&seg
);
211 _aliasCount
+= dylib
->installNameAliases
.size();
214 sortDylibs(_manifest
.dylibOrderFile
);
215 if ( !_manifest
.dirtyDataOrderFile
.empty() )
216 loadDirtyDataOrderFile(_manifest
.dirtyDataOrderFile
);
218 assignSegmentAddresses();
219 if ( _vmSize
> maxCacheVMSize
)
220 verboseLog("%s cache overflow. %lluMB (max %lluMB)", archName().c_str(), _vmSize
/1024/1024, maxCacheVMSize
/1024/1024);
221 while (_vmSize
> maxCacheVMSize
) {
222 auto evictedDylib
= manifest
.removeLargestLeafDylib( configuration
, architecture
);
223 _dylibs
.erase( std::remove( _dylibs
.begin(), _dylibs
.end(), evictedDylib
), _dylibs
.end() );
224 _aliasCount
-= evictedDylib
->installNameAliases
.size();
225 assignSegmentAddresses();
229 // There is an order file specifying the order in which dylibs are laid out in
230 // general, as well as an order file specifying the order in which __DATA_DIRTY
231 // segments are laid out in particular.
233 // The syntax is one dylib (install name) per line. Blank lines are ignored.
234 // Comments start with the # character.
236 static std::unordered_map
<std::string
, uint32_t> loadOrderFile(const std::string
& orderFile
) {
237 std::unordered_map
<std::string
, uint32_t> order
;
239 std::ifstream
myfile(orderFile
);
240 if ( myfile
.is_open() ) {
243 while ( std::getline(myfile
, line
) ) {
244 size_t pos
= line
.find('#');
245 if ( pos
!= std::string::npos
)
247 while ( !line
.empty() && isspace(line
.back()) ) {
251 order
[line
] = count
++;
255 warning("could not load orderfile '%s'", orderFile
.c_str());
261 void SharedCache::loadDirtyDataOrderFile(const std::string
& dirtyDataOrderFile
) {
262 _dataDirtySegsOrder
= loadOrderFile(dirtyDataOrderFile
);
265 void SharedCache::sortDylibs(const std::string
& dylibOrderFile
) {
266 std::unordered_map
<std::string
, uint32_t> dylibOrder
;
267 if ( !dylibOrderFile
.empty() )
268 dylibOrder
= loadOrderFile(dylibOrderFile
);
270 std::sort(_dylibs
.begin(), _dylibs
.end(), [&](const MachOProxy
* a
,
271 const MachOProxy
* b
) {
272 const std::string
& pathA
= a
->installName
;
273 const std::string
& pathB
= b
->installName
;
275 const auto& orderA
= dylibOrder
.find(pathA
);
276 const auto& orderB
= dylibOrder
.find(pathB
);
277 bool foundA
= (orderA
!= dylibOrder
.end());
278 bool foundB
= (orderB
!= dylibOrder
.end());
280 // Order all dylibs specified in the order file first, in the order specified in
281 // the file, followed by any other dylibs in lexicographic order.
282 if ( foundA
&& foundB
)
283 return orderA
->second
< orderB
->second
;
289 return pathA
< pathB
;
293 void SharedCache::buildUnoptimizedCache(void) {
294 _buffer
= std::shared_ptr
<void>(calloc(_fileSize
, 1), free
);
296 writeCacheSegments();
301 template <typename P
>
302 void SharedCache::buildForDevelopment(const std::string
& cachePath
) {
303 typedef typename
P::E E
;
304 std::vector
<uint64_t> emptyBranchPoolOffsets
;
305 buildUnoptimizedCache();
306 optimizeObjC(false/*not production*/);
307 if (_manifest
.platform
== "osx") {
308 optimizeLinkedit(false, false, emptyBranchPoolOffsets
);
310 optimizeLinkedit(true, false, emptyBranchPoolOffsets
);
314 dyldCacheHeader
<E
>* header
= (dyldCacheHeader
<E
>*)_buffer
.get();
315 header
->set_cacheType(kDyldSharedCacheTypeDevelopment
);
316 recomputeCacheUUID();
318 // Calculate the VMSize of the resulting cache
319 uint64_t endAddr
= 0;
321 forEachRegion([&] (void* content
, uint64_t vmAddr
, uint64_t size
, uint32_t permissions
) {
322 if (vmAddr
+size
> endAddr
)
323 endAddr
= vmAddr
+size
;
325 _vmSize
= endAddr
- sharedRegionStartExecutableAddress(_arch
);
327 if (_manifest
.platform
== "osx") {
328 appendCodeSignature("release");
330 appendCodeSignature("development");
334 template <typename P
>
335 void SharedCache::buildForProduction(const std::string
& cachePath
) {
336 typedef typename
P::E E
;
337 buildUnoptimizedCache();
338 optimizeObjC(true/*production*/);
339 uint64_t cacheStartAddress
= sharedRegionStartExecutableAddress(_arch
);
341 dyldCacheHeader
<E
>* header
= (dyldCacheHeader
<E
>*)_buffer
.get();
342 header
->set_cacheType(kDyldSharedCacheTypeProduction
);
344 // build vector of branch pool addresss
345 std::vector
<uint64_t> branchPoolStartAddrs
;
346 std::vector
<uint64_t> branchPoolOffsets
;
347 const uint64_t* p
= (uint64_t*)((uint8_t*)_buffer
.get() + header
->branchPoolsOffset());
348 for (int i
=0; i
< header
->branchPoolsCount(); ++i
) {
349 uint64_t poolAddr
= LittleEndian::get64(p
[i
]);
350 branchPoolStartAddrs
.push_back(poolAddr
);
351 branchPoolOffsets
.push_back(poolAddr
- cacheStartAddress
);
354 bypassStubs(branchPoolStartAddrs
);
355 optimizeLinkedit(true, true, branchPoolOffsets
);
358 recomputeCacheUUID();
360 // Calculate the VMSize of the resulting cache
361 uint64_t endAddr
= 0;
363 forEachRegion([&] (void* content
, uint64_t vmAddr
, uint64_t size
, uint32_t permissions
) {
364 if (vmAddr
+size
> endAddr
)
365 endAddr
= vmAddr
+size
;
367 _vmSize
= endAddr
- cacheStartAddress
;
369 appendCodeSignature("release");
372 bool SharedCache::writeCacheMapFile(const std::string
& mapPath
) {
373 FILE* fmap
= ::fopen(mapPath
.c_str(), "w");
377 std::vector
<uint64_t> regionStartAddresses
;
378 std::vector
<uint64_t> regionSizes
;
379 std::vector
<uint64_t> regionFileOffsets
;
381 forEachRegion([&] (void* content
, uint64_t vmAddr
, uint64_t size
, uint32_t permissions
) {
382 regionStartAddresses
.push_back(vmAddr
);
383 regionSizes
.push_back(size
);
384 regionFileOffsets
.push_back((uint8_t*)content
- (uint8_t*)_buffer
.get());
385 const char* prot
= "RW";
386 if ( permissions
== (VM_PROT_EXECUTE
|VM_PROT_READ
) )
388 else if ( permissions
== VM_PROT_READ
)
390 if ( size
> 1024*1024 )
391 fprintf(fmap
, "mapping %s %4lluMB 0x%0llX -> 0x%0llX\n", prot
, size
/(1024*1024), vmAddr
, vmAddr
+size
);
393 fprintf(fmap
, "mapping %s %4lluKB 0x%0llX -> 0x%0llX\n", prot
, size
/1024, vmAddr
, vmAddr
+size
);
396 // TODO: add linkedit breakdown
397 fprintf(fmap
, "\n\n");
399 std::unordered_set
<const void*> seenHeaders
;
400 forEachImage([&](const void* machHeader
, const char* installName
, time_t mtime
,
401 ino_t inode
, const std::vector
<MachOProxy::Segment
>& segments
) {
402 if ( !seenHeaders
.count(machHeader
) ) {
403 seenHeaders
.insert(machHeader
);
405 fprintf(fmap
, "%s\n", installName
);
406 for (const MachOProxy::Segment
& seg
: segments
) {
408 for (int i
=0; i
< regionSizes
.size(); ++i
) {
409 if ( (seg
.fileOffset
>= regionFileOffsets
[i
]) && (seg
.fileOffset
< (regionFileOffsets
[i
]+regionSizes
[i
])) ) {
410 vmAddr
= regionStartAddresses
[i
] + seg
.fileOffset
- regionFileOffsets
[i
];
413 fprintf(fmap
, "\t%16s 0x%08llX -> 0x%08llX\n", seg
.name
.c_str(), vmAddr
, vmAddr
+seg
.size
);
423 template <typename P
>
424 std::vector
<MachOProxy::Segment
> getSegments(const void* cacheBuffer
, const void* machHeader
)
426 std::vector
<MachOProxy::Segment
> result
;
427 macho_header
<P
>* mh
= (macho_header
<P
>*)machHeader
;
428 const uint32_t cmd_count
= mh
->ncmds();
429 const macho_load_command
<P
>* cmd
= (macho_load_command
<P
>*)((uint8_t*)mh
+ sizeof(macho_header
<P
>));
430 for (uint32_t i
= 0; i
< cmd_count
; ++i
) {
431 if ( cmd
->cmd() != macho_segment_command
<P
>::CMD
)
433 macho_segment_command
<P
>* segCmd
= (macho_segment_command
<P
>*)cmd
;
434 MachOProxy::Segment seg
;
435 seg
.name
= segCmd
->segname();
436 seg
.size
= segCmd
->vmsize();
437 seg
.diskSize
= (uint32_t)segCmd
->filesize();
438 seg
.fileOffset
= (uint32_t)segCmd
->fileoff();
439 seg
.protection
= segCmd
->initprot();
440 // HACK until lldb fixed in <rdar://problem/20357466>
441 if ( (seg
.fileOffset
== 0) && (strcmp(segCmd
->segname(), "__TEXT") == 0) )
442 seg
.fileOffset
= (uint32_t)((char*)machHeader
- (char*)cacheBuffer
);
443 if ( segCmd
->nsects() > 0 ) {
445 const macho_section
<P
>* const sectionsStart
= (macho_section
<P
>*)((uint8_t*)segCmd
+ sizeof(macho_segment_command
<P
>));
446 const macho_section
<P
>* const sectionsEnd
= §ionsStart
[segCmd
->nsects()];
447 for (const macho_section
<P
>* sect
=sectionsStart
; sect
< sectionsEnd
; ++sect
) {
448 if ( sect
->align() > seg
.p2align
)
449 seg
.p2align
= sect
->align();
455 result
.push_back(seg
);
456 cmd
= (const macho_load_command
<P
>*)(((uint8_t*)cmd
)+cmd
->cmdsize());
461 template <typename P
>
462 void SharedCache::forEachImage(DylibHandler handler
)
464 #if NEW_CACHE_FILE_FORMAT
465 terminate("forEachImage() not implemented");
467 typedef typename
P::E E
;
468 const dyldCacheHeader
<E
>* header
= (dyldCacheHeader
<E
>*)_buffer
.get();
469 const dyldCacheImageInfo
<E
>* dylibs
= (dyldCacheImageInfo
<E
>*)((char*)_buffer
.get() + header
->imagesOffset());
470 const dyldCacheFileMapping
<E
>* mappings
= (dyldCacheFileMapping
<E
>*)((char*)_buffer
.get() + header
->mappingOffset());
471 if ( mappings
[0].file_offset() != 0 )
472 terminate("malformed cache file");
473 uint64_t firstImageOffset
= 0;
474 uint64_t firstRegionAddress
= mappings
[0].address();
475 const void* cacheEnd
= (char*)_buffer
.get() + _fileSize
;
476 if ( (const void*)&dylibs
[header
->imagesCount()] > cacheEnd
)
478 for (uint32_t i
=0; i
< header
->imagesCount(); ++i
) {
479 const char* dylibPath
= (char*)_buffer
.get() + dylibs
[i
].pathFileOffset();
480 if ( dylibPath
> cacheEnd
)
482 uint64_t offset
= dylibs
[i
].address() - firstRegionAddress
;
483 if ( firstImageOffset
== 0 )
484 firstImageOffset
= offset
;
486 if ( dylibs
[i
].pathFileOffset() < firstImageOffset
)
488 const void* mh
= (char*)_buffer
.get() + offset
;
489 time_t inode
= dylibs
[i
].inode();
490 ino_t modTime
= dylibs
[i
].modTime();
491 handler(mh
, dylibPath
, modTime
, inode
, getSegments
<P
>(_buffer
.get(), mh
));
497 template <typename P
>
498 void SharedCache::recomputeCacheUUID(void)
500 uint8_t* uuidLoc
= nullptr;
501 #if NEW_CACHE_FILE_FORMAT
502 const macho_header
<P
>* mh
= (macho_header
<P
>*)cacheBuffer
;
503 const macho_load_command
<P
>* const cmds
= (macho_load_command
<P
>*)((uint8_t*)mh
+ sizeof(macho_header
<P
>));
504 const uint32_t cmd_count
= mh
->ncmds();
505 const macho_load_command
<P
>* cmd
= cmds
;
506 for (uint32_t i
= 0; i
< cmd_count
; ++i
) {
507 if ( cmd
->cmd() == LC_UUID
) {
508 const macho_uuid_command
<P
>* uuidCmd
= (macho_uuid_command
<P
>*)cmd
;
509 uuidLoc
= const_cast<uint8_t*>(uuidCmd
->uuid());
512 cmd
= (const macho_load_command
<P
>*)(((uint8_t*)cmd
)+cmd
->cmdsize());
515 dyldCacheHeader
<P
>* header
= (dyldCacheHeader
<P
>*)_buffer
.get();
516 uuidLoc
= const_cast<uint8_t*>(header
->uuid());
519 // Clear existing UUID, then MD5 whole cache buffer.
521 CC_MD5(_buffer
.get(), (unsigned)_fileSize
, uuidLoc
);
522 // <rdar://problem/6723729> uuids should conform to RFC 4122 UUID version 4 & UUID version 5 formats
523 uuidLoc
[6] = ( uuidLoc
[6] & 0x0F ) | ( 3 << 4 );
524 uuidLoc
[8] = ( uuidLoc
[8] & 0x3F ) | 0x80;
527 template <typename P
>
528 void SharedCache::setLinkeditsMappingEndFileOffset(uint64_t newFileSize
)
530 #if NEW_CACHE_FILE_FORMAT
531 terminate("setLinkeditsMappingEndFileOffset() not implemented");
533 typedef typename
P::E E
;
534 dyldCacheHeader
<E
>* header
= (dyldCacheHeader
<E
>*)_buffer
.get();
535 dyldCacheFileMapping
<E
>* mappings
= (dyldCacheFileMapping
<E
>*)((char*)_buffer
.get() + header
->mappingOffset());
536 uint64_t newReadOnlySize
= newFileSize
- mappings
[2].file_offset();
537 mappings
[2].set_size(newReadOnlySize
);
538 header
->set_codeSignatureOffset(newFileSize
);
539 _readOnlyRegion
.size
= (newReadOnlySize
);
543 template <typename P
>
544 void SharedCache::setUnmappedLocalsRange(uint64_t localSymbolsOffset
, uint32_t unmappedSize
)
546 #if NEW_CACHE_FILE_FORMAT
547 terminate("setUnmappedLocalsRange() not implemented");
549 typedef typename
P::E E
;
550 dyldCacheHeader
<E
>* header
= (dyldCacheHeader
<E
>*)_buffer
.get();
551 header
->set_localSymbolsOffset(localSymbolsOffset
);
552 header
->set_localSymbolsSize(unmappedSize
);
553 // move start of code signature to new end of file
554 header
->set_codeSignatureOffset(localSymbolsOffset
+unmappedSize
);
558 template <typename P
>
559 void SharedCache::setAcceleratorInfoRange(uint64_t accelInfoAddr
, uint32_t accelInfoSize
)
561 #if NEW_CACHE_FILE_FORMAT
562 terminate("setUnmappedLocalsRange() not implemented");
564 typedef typename
P::E E
;
565 dyldCacheHeader
<E
>* header
= (dyldCacheHeader
<E
>*)_buffer
.get();
566 header
->set_accelerateInfoAddr(accelInfoAddr
);
567 header
->set_accelerateInfoSize(accelInfoSize
);
571 template <typename P
>
572 void SharedCache::forEachRegion(RegionHandler handler
)
574 #if NEW_CACHE_FILE_FORMAT
575 const macho_header
<P
>* mh
= (macho_header
<P
>*)cacheBuffer
;
576 const macho_load_command
<P
>* const cmds
= (macho_load_command
<P
>*)((uint8_t*)mh
+ sizeof(macho_header
<P
>));
577 const uint32_t cmd_count
= mh
->ncmds();
578 const macho_load_command
<P
>* cmd
= cmds
;
579 for (uint32_t i
= 0; i
< cmd_count
; ++i
) {
580 if ( cmd
->cmd() == macho_segment_command
<P
>::CMD
) {
581 const macho_segment_command
<P
>* segCmd
= (macho_segment_command
<P
>*)cmd
;
582 handler((char*)cacheBuffer
+ segCmd
->fileoff(), segCmd
->vmaddr(), segCmd
->vmsize(), segCmd
->initprot());
584 cmd
= (const macho_load_command
<P
>*)(((uint8_t*)cmd
)+cmd
->cmdsize());
587 typedef typename
P::E E
;
588 const dyldCacheHeader
<E
>* header
= (dyldCacheHeader
<E
>*)_buffer
.get();
589 const dyldCacheFileMapping
<E
>* mappings
= (dyldCacheFileMapping
<E
>*)((char*)_buffer
.get() + header
->mappingOffset());
590 const dyldCacheFileMapping
<E
>* mappingsEnd
= &mappings
[header
->mappingCount()];
591 for (const dyldCacheFileMapping
<E
>* m
=mappings
; m
< mappingsEnd
; ++m
) {
592 handler((char*)_buffer
.get() + m
->file_offset(), m
->address(), m
->size(), m
->init_prot());
597 std::shared_ptr
<void> SharedCache::buffer(void) const {
601 std::string
SharedCache::archName() {
602 return stringForArch(_arch
);
605 void SharedCache::assignSegmentAddresses()
607 _branchPoolStarts
.clear();
608 uint64_t addr
= sharedRegionStartExecutableAddress(_arch
);
610 // assign TEXT segment addresses
611 _textRegion
.address
= addr
;
612 _textRegion
.fileOffset
= 0;
613 _textRegion
.prot
= VM_PROT_READ
| VM_PROT_EXECUTE
;
614 #if NEW_CACHE_FILE_FORMAT
615 addr
+= 0x4000; // header
617 addr
+= 0x28000; // header
619 uint64_t brPoolTextSize
= branchPoolTextSize(_arch
);
620 uint64_t brPoolLinkEditSize
= branchPoolLinkEditSize(_arch
);
621 uint64_t brRearch
= branchReach(_arch
);
622 uint64_t lastPoolAddress
= addr
;
623 for (auto& dylib
: _dylibs
) {
624 for (auto& seg
: _segmentMap
[dylib
]) {
625 if ( seg
.base
->protection
!= (VM_PROT_READ
| VM_PROT_EXECUTE
) )
627 // Insert branch island pools every 128MB for arm64
628 if ( (brPoolTextSize
!= 0) && ((addr
+ seg
.base
->size
- lastPoolAddress
) > brRearch
) ) {
629 _branchPoolStarts
.push_back(addr
);
630 //verboseLog("adding branch pool at 0x%lX\n", addr);
631 lastPoolAddress
= addr
;
632 addr
+= brPoolTextSize
;
634 // Keep __TEXT segments 4K or more aligned
635 uint64_t startAlignPad
= align(addr
, std::max(seg
.base
->p2align
, (uint8_t)12)) - addr
;
636 addr
+= startAlignPad
;
638 seg
.cacheFileOffset
= addr
- _textRegion
.address
+ _textRegion
.fileOffset
;
639 seg
.cacheSegSize
= align(seg
.base
->sizeOfSections
, 12);
640 addr
+= align(seg
.base
->sizeOfSections
, 12);
643 // align TEXT region end
644 uint64_t endTextAddress
= align(addr
, sharedRegionRegionAlignment(_arch
));
645 _textRegion
.size
= endTextAddress
- _textRegion
.address
;
647 std::unordered_map
<const SegmentInfo
*, std::string
> dataDirtySegPaths
;
649 // co-locate similar __DATA* segments
650 std::vector
<SegmentInfo
*> dataSegs
;
651 std::vector
<SegmentInfo
*> dataConstSegs
;
652 std::vector
<SegmentInfo
*> dataDirtySegs
;
653 for (auto& dylib
: _dylibs
) {
654 for (auto& seg
: _segmentMap
[dylib
]) {
655 if ( seg
.base
->protection
== (VM_PROT_READ
| VM_PROT_WRITE
) ) {
656 if ( seg
.base
->name
== "__DATA_CONST" ) {
657 dataConstSegs
.push_back(&seg
);
659 else if ( seg
.base
->name
== "__DATA_DIRTY" ) {
660 dataDirtySegs
.push_back(&seg
);
661 dataDirtySegPaths
[&seg
] = dylib
->installName
;
664 dataSegs
.push_back(&seg
);
670 // assign __DATA* addresses
671 addr
= sharedRegionStartWriteableAddress(_arch
, endTextAddress
);
672 _dataRegion
.address
= addr
;
673 _dataRegion
.fileOffset
= _textRegion
.fileOffset
+ _textRegion
.size
;
674 _dataRegion
.prot
= VM_PROT_READ
| VM_PROT_WRITE
;
676 // layout all __DATA_CONST segments
677 for (SegmentInfo
* seg
: dataConstSegs
) {
678 // Keep __DATA_CONST segments 4K or more aligned
679 uint64_t startAlignPad
= align(addr
, std::max(seg
->base
->p2align
, (uint8_t)12)) - addr
;
680 addr
+= startAlignPad
;
682 seg
->cacheFileOffset
= addr
- _dataRegion
.address
+ _dataRegion
.fileOffset
;
683 seg
->cacheSegSize
= seg
->base
->sizeOfSections
;
684 addr
+= seg
->base
->sizeOfSections
;
687 // layout all __DATA segments
688 for (SegmentInfo
* seg
: dataSegs
) {
689 // Keep __DATA segments 4K or more aligned
690 uint64_t startAlignPad
= align(addr
, std::max(seg
->base
->p2align
, (uint8_t)12)) - addr
;
691 addr
+= startAlignPad
;
693 seg
->cacheFileOffset
= addr
- _dataRegion
.address
+ _dataRegion
.fileOffset
;
694 seg
->cacheSegSize
= seg
->base
->sizeOfSections
;
695 addr
+= seg
->base
->sizeOfSections
;
698 // layout all __DATA_DIRTY segments
699 addr
= align(addr
, 12);
700 std::sort(dataDirtySegs
.begin(), dataDirtySegs
.end(), [&](const SegmentInfo
*a
, const SegmentInfo
*b
) {
701 const std::string
& pathA
= dataDirtySegPaths
[a
];
702 const std::string
& pathB
= dataDirtySegPaths
[b
];
704 const auto& orderA
= _dataDirtySegsOrder
.find(pathA
);
705 const auto& orderB
= _dataDirtySegsOrder
.find(pathB
);
706 bool foundA
= (orderA
!= _dataDirtySegsOrder
.end());
707 bool foundB
= (orderB
!= _dataDirtySegsOrder
.end());
709 // Order all __DATA_DIRTY segments specified in the order file first, in
710 // the order specified in the file, followed by any other __DATA_DIRTY
711 // segments in lexicographic order.
712 if ( foundA
&& foundB
)
713 return orderA
->second
< orderB
->second
;
719 return pathA
< pathB
;
721 for (SegmentInfo
* seg
: dataDirtySegs
) {
722 // Pack __DATA_DIRTY segments
723 uint64_t startAlignPad
= align(addr
, seg
->base
->p2align
) - addr
;
724 addr
+= startAlignPad
;
726 seg
->cacheFileOffset
= addr
- _dataRegion
.address
+ _dataRegion
.fileOffset
;
727 seg
->cacheSegSize
= seg
->base
->sizeOfSections
;
728 addr
+= seg
->base
->sizeOfSections
;
731 // align DATA region end
732 uint64_t endDataAddress
= align(addr
, sharedRegionRegionAlignment(_arch
));
733 _dataRegion
.size
= endDataAddress
- _dataRegion
.address
;
735 // start read-only region
736 addr
= sharedRegionStartReadOnlyAddress(_arch
, endDataAddress
, endTextAddress
);
737 _readOnlyRegion
.address
= addr
;
738 _readOnlyRegion
.fileOffset
= _dataRegion
.fileOffset
+ _dataRegion
.size
;
739 _readOnlyRegion
.prot
= VM_PROT_READ
;
741 // reserve space for kernel ASLR slide info at start of r/o region
742 _slideInfoBufferSize
= align((_dataRegion
.size
/4096) * 130, 12); // bitmap entry + toc entry
743 _slideInfoFileOffset
= _readOnlyRegion
.fileOffset
;
744 addr
+= _slideInfoBufferSize
;
746 // layout all read-only (but not LINKEDIT) segments
747 for (auto& dylib
: _dylibs
) {
748 for (auto& seg
: _segmentMap
[dylib
]) {
749 if ( seg
.base
->protection
!= VM_PROT_READ
)
751 if ( seg
.base
->name
== "__LINKEDIT" )
753 // Keep segments 4K or more aligned
754 addr
= align(addr
, std::min(seg
.base
->p2align
, (uint8_t)12));
756 seg
.cacheFileOffset
= addr
- _readOnlyRegion
.address
+ _readOnlyRegion
.fileOffset
;;
757 seg
.cacheSegSize
= seg
.base
->size
;
758 addr
+= seg
.base
->size
;
759 //verboseLog("read-only offset=0x%08X, for path=%s\n", seg.cacheFileOffset, ex->proxy->installName.c_str());
762 // layout all LINKEDIT segments (after other read-only segments)
763 for (auto& dylib
: _dylibs
) {
764 for (auto& seg
: _segmentMap
[dylib
]) {
765 if ( seg
.base
->protection
!= VM_PROT_READ
)
767 if ( seg
.base
->name
!= "__LINKEDIT" )
769 // Keep LINKEDIT segments 4K aligned
770 addr
= align(addr
, 12);
772 seg
.cacheFileOffset
= addr
- _readOnlyRegion
.address
+ _readOnlyRegion
.fileOffset
;;
773 seg
.cacheSegSize
= seg
.base
->diskSize
;
774 addr
+= seg
.base
->size
;
775 //verboseLog("linkedit offset=0x%08X, for path=%s\n", seg.cacheFileOffset, ex->proxy->installName.c_str());
778 // add room for branch pool linkedits
779 _branchPoolsLinkEditStartAddr
= addr
;
780 addr
+= (_branchPoolStarts
.size() * brPoolLinkEditSize
);
782 // align r/o region end
783 uint64_t endReadOnlyAddress
= align(addr
, sharedRegionRegionAlignment(_arch
));
784 _readOnlyRegion
.size
= endReadOnlyAddress
- _readOnlyRegion
.address
;
785 _fileSize
= _readOnlyRegion
.fileOffset
+ _readOnlyRegion
.size
;
786 // assume LINKEDIT optimzation reduces LINKEDITs to %40 of original size
787 _vmSize
= _readOnlyRegion
.address
+(_readOnlyRegion
.size
* 2/5) - _textRegion
.address
;
790 uint64_t SharedCache::pathHash(const char* path
)
793 for (const char* s
=path
; *s
!= '\0'; ++s
)
800 void SharedCache::findDylibAndSegment(const void* contentPtr
, std::string
& dylibName
, std::string
& segName
)
802 uint64_t fileOffset
= (uint8_t*)contentPtr
- (uint8_t*)_buffer
.get();
803 for (const auto& entry
: _segmentMap
) {
804 const MachOProxy
* dylib
= entry
.first
;
805 for (const SegmentInfo
& segInfo
: entry
.second
) {
806 //fprintf(stderr, " cacheFileOffset=0x%08llX, end=0x%08llX\n", segInfo.cacheFileOffset, segInfo.cacheFileOffset+segInfo.base->size);
807 if ( (segInfo
.cacheFileOffset
<= fileOffset
) && (fileOffset
< segInfo
.cacheFileOffset
+segInfo
.base
->size
) ) {
808 dylibName
= dylib
->installName
;
809 segName
= segInfo
.base
->name
;
819 template <typename P
>
820 bool SharedCache::makeRebaseChain(uint8_t* pageContent
, uint16_t lastLocationOffset
, uint16_t offset
, const dyldCacheSlideInfo2
<typename
P::E
>* info
)
822 typedef typename
P::uint_t pint_t
;
824 const pint_t deltaMask
= (pint_t
)(info
->delta_mask());
825 const pint_t valueMask
= ~deltaMask
;
826 const pint_t valueAdd
= (pint_t
)(info
->value_add());
827 const unsigned deltaShift
= __builtin_ctzll(deltaMask
) - 2;
828 const uint32_t maxDelta
= (uint32_t)(deltaMask
>> deltaShift
);
830 pint_t
* lastLoc
= (pint_t
*)&pageContent
[lastLocationOffset
+0];
831 pint_t lastValue
= (pint_t
)P::getP(*lastLoc
);
832 if ( (lastValue
- valueAdd
) & deltaMask
) {
833 std::string dylibName
;
835 findDylibAndSegment((void*)pageContent
, dylibName
, segName
);
836 terminate("rebase pointer does not point within cache. lastOffset=0x%04X, seg=%s, dylib=%s\n",
837 lastLocationOffset
, segName
.c_str(), dylibName
.c_str());
839 if ( offset
<= (lastLocationOffset
+maxDelta
) ) {
840 // previous location in range, make link from it
841 // encode this location into last value
842 pint_t delta
= offset
- lastLocationOffset
;
843 pint_t newLastValue
= ((lastValue
- valueAdd
) & valueMask
) | (delta
<< deltaShift
);
844 //warning(" add chain: delta = %d, lastOffset=0x%03X, offset=0x%03X, org value=0x%08lX, new value=0x%08lX",
845 // offset - lastLocationOffset, lastLocationOffset, offset, (long)lastValue, (long)newLastValue);
846 P::setP(*lastLoc
, newLastValue
);
849 //warning(" too big delta = %d, lastOffset=0x%03X, offset=0x%03X", offset - lastLocationOffset, lastLocationOffset, offset);
851 // distance between rebase locations is too far
852 // see if we can make a chain from non-rebase locations
853 uint16_t nonRebaseLocationOffsets
[1024];
854 unsigned nrIndex
= 0;
855 for (uint16_t i
= lastLocationOffset
; i
< offset
-maxDelta
; ) {
856 nonRebaseLocationOffsets
[nrIndex
] = 0;
857 for (int j
=maxDelta
; j
> 0; j
-= 4) {
858 pint_t value
= (pint_t
)P::getP(*(pint_t
*)&pageContent
[i
+j
]);
860 // Steal values of 0 to be used in the rebase chain
861 nonRebaseLocationOffsets
[nrIndex
] = i
+j
;
865 if ( nonRebaseLocationOffsets
[nrIndex
] == 0 ) {
866 lastValue
= (pint_t
)P::getP(*lastLoc
);
867 pint_t newValue
= ((lastValue
- valueAdd
) & valueMask
);
868 //warning(" no way to make non-rebase delta chain, terminate off=0x%03X, old value=0x%08lX, new value=0x%08lX", lastLocationOffset, (long)value, (long)newValue);
869 P::setP(*lastLoc
, newValue
);
872 i
= nonRebaseLocationOffsets
[nrIndex
];
876 // we can make chain. go back and add each non-rebase location to chain
877 uint16_t prevOffset
= lastLocationOffset
;
878 pint_t
* prevLoc
= (pint_t
*)&pageContent
[prevOffset
];
879 for (int n
=0; n
< nrIndex
; ++n
) {
880 uint16_t nOffset
= nonRebaseLocationOffsets
[n
];
881 assert(nOffset
!= 0);
882 pint_t
* nLoc
= (pint_t
*)&pageContent
[nOffset
];
883 uint32_t delta2
= nOffset
- prevOffset
;
884 pint_t value
= (pint_t
)P::getP(*prevLoc
);
887 newValue
= (delta2
<< deltaShift
);
889 newValue
= ((value
- valueAdd
) & valueMask
) | (delta2
<< deltaShift
);
890 //warning(" non-rebase delta = %d, to off=0x%03X, old value=0x%08lX, new value=0x%08lX", delta2, nOffset, (long)value, (long)newValue);
891 P::setP(*prevLoc
, newValue
);
892 prevOffset
= nOffset
;
895 uint32_t delta3
= offset
- prevOffset
;
896 pint_t value
= (pint_t
)P::getP(*prevLoc
);
899 newValue
= (delta3
<< deltaShift
);
901 newValue
= ((value
- valueAdd
) & valueMask
) | (delta3
<< deltaShift
);
902 //warning(" non-rebase delta = %d, to off=0x%03X, old value=0x%08lX, new value=0x%08lX", delta3, offset, (long)value, (long)newValue);
903 P::setP(*prevLoc
, newValue
);
909 template <typename P
>
910 void SharedCache::addPageStarts(uint8_t* pageContent
, const bool bitmap
[], const dyldCacheSlideInfo2
<typename
P::E
>* info
,
911 std::vector
<uint16_t>& pageStarts
, std::vector
<uint16_t>& pageExtras
)
913 typedef typename
P::uint_t pint_t
;
915 const pint_t deltaMask
= (pint_t
)(info
->delta_mask());
916 const pint_t valueMask
= ~deltaMask
;
917 const uint32_t pageSize
= info
->page_size();
918 const pint_t valueAdd
= (pint_t
)(info
->value_add());
920 uint16_t startValue
= DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE
;
921 uint16_t lastLocationOffset
= 0xFFFF;
922 for(int i
=0; i
< pageSize
/4; ++i
) {
923 unsigned offset
= i
*4;
925 if ( startValue
== DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE
) {
926 // found first rebase location in page
929 else if ( !makeRebaseChain
<P
>(pageContent
, lastLocationOffset
, offset
, info
) ) {
930 // can't record all rebasings in one chain
931 if ( (startValue
& DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA
) == 0 ) {
932 // switch page_start to "extras" which is a list of chain starts
933 unsigned indexInExtras
= (unsigned)pageExtras
.size();
934 if ( indexInExtras
> 0x3FFF )
935 terminate("rebase overflow in page extras");
936 pageExtras
.push_back(startValue
);
937 startValue
= indexInExtras
| DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA
;
939 pageExtras
.push_back(i
);
941 lastLocationOffset
= offset
;
944 if ( lastLocationOffset
!= 0xFFFF ) {
946 pint_t
* lastLoc
= (pint_t
*)&pageContent
[lastLocationOffset
];
947 pint_t lastValue
= (pint_t
)P::getP(*lastLoc
);
948 pint_t newValue
= ((lastValue
- valueAdd
) & valueMask
);
949 P::setP(*lastLoc
, newValue
);
951 if ( startValue
& DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA
) {
952 // add end bit to extras
953 pageExtras
.back() |= DYLD_CACHE_SLIDE_PAGE_ATTR_END
;
955 pageStarts
.push_back(startValue
);
958 template <typename P
>
959 void SharedCache::writeSlideInfoV2(uint64_t deltaMask
, uint64_t valueAdd
)
961 // i386 cache does not support sliding because stubs use absolute addressing (text relocs)
962 if (_arch
.arch
== CPU_TYPE_I386
) {
963 dyldCacheHeader
<LittleEndian
>* header
= (dyldCacheHeader
<LittleEndian
>*)_buffer
.get();
964 header
->set_slideInfoSize(0);
968 typedef typename
P::E E
;
969 const uint32_t pageSize
= 4096;
971 // build one 1024/4096 bool bitmap per page (4KB/16KB) of DATA
972 uint8_t* const dataStart
= (uint8_t*)_buffer
.get() + _dataRegion
.fileOffset
;
973 uint8_t* const dataEnd
= dataStart
+ _dataRegion
.size
;
974 unsigned pageCount
= (unsigned)(_dataRegion
.size
+pageSize
-1)/pageSize
;
975 const long bitmapSize
= pageCount
*(pageSize
/4)*sizeof(bool);
976 bool* bitmap
= (bool*)calloc(bitmapSize
, 1);
977 for (void* p
: _pointersForASLR
) {
978 if ( (p
< dataStart
) || ( p
> dataEnd
) )
979 terminate("DATA pointer for sliding, out of range\n");
980 long byteOffset
= (long)((uint8_t*)p
- dataStart
);
981 if ( (byteOffset
% 4) != 0 )
982 terminate("pointer not 4-byte aligned in DATA offset 0x%08lX\n", byteOffset
);
983 long boolIndex
= byteOffset
/ 4;
984 // work around <rdar://24941083> by ignoring pointers to be slid that are NULL on disk
985 if ( *(typename
P::uint_t
*)p
== 0 ) {
986 std::string dylibName
;
988 findDylibAndSegment(p
, dylibName
, segName
);
989 warning("NULL pointer asked to be slid in %s of %s", segName
.c_str(), dylibName
.c_str());
992 bitmap
[boolIndex
] = true;
995 // fill in fixed info
996 dyldCacheSlideInfo2
<E
>* info
= (dyldCacheSlideInfo2
<E
>*)((uint8_t*)_buffer
.get() + _slideInfoFileOffset
);
997 info
->set_version(2);
998 info
->set_page_size(pageSize
);
999 info
->set_delta_mask(deltaMask
);
1000 info
->set_value_add(valueAdd
);
1002 // set page starts and extras for each page
1003 std::vector
<uint16_t> pageStarts
;
1004 std::vector
<uint16_t> pageExtras
;
1005 pageStarts
.reserve(pageCount
);
1006 uint8_t* pageContent
= dataStart
;;
1007 const bool* bitmapForPage
= bitmap
;
1008 for (unsigned i
=0; i
< pageCount
; ++i
) {
1009 //warning("page[%d]", i);
1010 addPageStarts
<P
>(pageContent
, bitmapForPage
, info
, pageStarts
, pageExtras
);
1011 pageContent
+= pageSize
;
1012 bitmapForPage
+= (sizeof(bool)*(pageSize
/4));
1014 free((void*)bitmap
);
1016 // fill in computed info
1017 info
->set_page_starts_offset(sizeof(dyldCacheSlideInfo2
<E
>));
1018 info
->set_page_starts_count((unsigned)pageStarts
.size());
1019 info
->set_page_extras_offset((unsigned)(sizeof(dyldCacheSlideInfo2
<E
>)+pageStarts
.size()*sizeof(uint16_t)));
1020 info
->set_page_extras_count((unsigned)pageExtras
.size());
1021 for (unsigned i
=0; i
< pageStarts
.size(); ++i
)
1022 info
->set_page_starts(i
, pageStarts
[i
]);
1023 for (unsigned i
=0; i
< pageExtras
.size(); ++i
)
1024 info
->set_page_extras(i
, pageExtras
[i
]);
1025 //warning("pageCount=%u, page_starts_count=%lu, page_extras_count=%lu", pageCount, pageStarts.size(), pageExtras.size());
1026 _slideInfoBufferSize
= align(info
->page_extras_offset() + pageExtras
.size()*sizeof(uint16_t), 12);
1028 #if NEW_CACHE_FILE_FORMAT
1031 unsigned long slideInfoPageSize
= align(_slideInfoBufferSize
, sharedRegionRegionAlignment(_arch
));
1032 dyldCacheHeader
<E
>* header
= (dyldCacheHeader
<E
>*)_buffer
.get();
1033 header
->set_slideInfoSize(slideInfoPageSize
);
1037 void SharedCache::writeSlideInfoV2(void)
1039 switch (_arch
.arch
) {
1041 // linked list based slide info needs high 3 bits of pointer, won't work with > 512MB of pointable content
1042 if ( (_textRegion
.size
+ _dataRegion
.size
) > 512*1024*1024 ) {
1043 warning("cache TEXT+DATA > 512MB, using larger slide info format");
1044 writeSlideInfo
<LittleEndian
>();
1047 writeSlideInfoV2
<Pointer32
<LittleEndian
>>(0xE0000000, ARM_SHARED_REGION_START
);
1051 writeSlideInfoV2
<Pointer32
<LittleEndian
>>(0xE0000000, 0x90000000);
1053 case CPU_TYPE_X86_64
:
1054 writeSlideInfoV2
<Pointer64
<LittleEndian
>>(0xFFFF000000000000, 0);
1056 case CPU_TYPE_ARM64
:
1057 writeSlideInfoV2
<Pointer64
<LittleEndian
>>(0x00FFFF0000000000, 0);
1060 warning("unsupported arch 0x%08X", _arch
.arch
);
1067 template <typename E
>
1068 void SharedCache::writeSlideInfo(void)
1070 // i386 cache does not support sliding because stubs use absolute addressing (text relocs)
1071 if (_arch
.arch
== CPU_TYPE_I386
) {
1072 dyldCacheHeader
<E
>* header
= (dyldCacheHeader
<E
>*)_buffer
.get();
1073 header
->set_slideInfoSize(0);
1077 // build one 128-byte bitmap per page (4096) of DATA
1078 uint8_t* const dataStart
= (uint8_t*)_buffer
.get() + _dataRegion
.fileOffset
;
1079 uint8_t* const dataEnd
= dataStart
+ _dataRegion
.size
;
1080 const long bitmapSize
= (dataEnd
- dataStart
)/(4*8);
1081 uint8_t* bitmap
= (uint8_t*)calloc(bitmapSize
, 1);
1082 for (void* p
: _pointersForASLR
) {
1083 if ( (p
< dataStart
) || ( p
> dataEnd
) )
1084 terminate("DATA pointer for sliding, out of range\n");
1085 long offset
= (long)((uint8_t*)p
- dataStart
);
1086 if ( (offset
% 4) != 0 )
1087 terminate("pointer not 4-byte aligned in DATA offset 0x%08lX\n", offset
);
1088 long byteIndex
= offset
/ (4*8);
1089 long bitInByte
= (offset
% 32) >> 2;
1090 bitmap
[byteIndex
] |= (1 << bitInByte
);
1093 // allocate worst case size block of all slide info
1094 const unsigned entry_size
= 4096/(8*4); // 8 bits per byte, possible pointer every 4 bytes.
1095 const unsigned toc_count
= (unsigned)bitmapSize
/entry_size
;
1096 dyldCacheSlideInfo
<E
>* slideInfo
= (dyldCacheSlideInfo
<E
>*)((uint8_t*)_buffer
.get() + _slideInfoFileOffset
);
1097 slideInfo
->set_version(1);
1098 slideInfo
->set_toc_offset(sizeof(dyldCacheSlideInfo
<E
>));
1099 slideInfo
->set_toc_count(toc_count
);
1100 slideInfo
->set_entries_offset((slideInfo
->toc_offset()+2*toc_count
+127)&(-128));
1101 slideInfo
->set_entries_count(0);
1102 slideInfo
->set_entries_size(entry_size
);
1103 // append each unique entry
1104 const dyldCacheSlideInfoEntry
* bitmapAsEntries
= (dyldCacheSlideInfoEntry
*)bitmap
;
1105 dyldCacheSlideInfoEntry
* const entriesInSlidInfo
= (dyldCacheSlideInfoEntry
*)((char*)slideInfo
+slideInfo
->entries_offset());
1106 int entry_count
= 0;
1107 for (int i
=0; i
< toc_count
; ++i
) {
1108 const dyldCacheSlideInfoEntry
* thisEntry
= &bitmapAsEntries
[i
];
1109 // see if it is same as one already added
1111 for (int j
=0; j
< entry_count
; ++j
) {
1112 if ( memcmp(thisEntry
, &entriesInSlidInfo
[j
], entry_size
) == 0 ) {
1113 slideInfo
->set_toc(i
, j
);
1120 memcpy(&entriesInSlidInfo
[entry_count
], thisEntry
, entry_size
);
1121 slideInfo
->set_toc(i
, entry_count
++);
1124 slideInfo
->set_entries_count(entry_count
);
1125 ::free((void*)bitmap
);
1127 #if NEW_CACHE_FILE_FORMAT
1130 unsigned long slideInfoPageSize
= align(slideInfo
->entries_offset() + entry_count
*entry_size
, sharedRegionRegionAlignment(_arch
));
1131 dyldCacheHeader
<E
>* header
= (dyldCacheHeader
<E
>*)_buffer
.get();
1132 header
->set_slideInfoSize(slideInfoPageSize
);
1136 template <typename P
>
1137 void SharedCache::writeCacheHeader(void)
1139 #if NEW_CACHE_FILE_FORMAT
1140 macho_header
<P
>* mh
= (macho_header
<P
>*)cacheBuffer
;
1141 mh
->set_magic((sizeof(typename
P::uint_t
) == 8) ? MH_MAGIC_64
: MH_MAGIC
);
1142 mh
->set_cputype(arch
.arch
);
1143 mh
->set_cpusubtype(arch
.subtype
);
1144 mh
->set_filetype(MH_DYLIB
);
1146 mh
->set_sizeofcmds(0);
1149 uint8_t* cmd
= (uint8_t*)cacheBuffer
+ sizeof(macho_header
<P
>);
1151 // write LC_SEGMENT for each region
1152 macho_segment_command
<P
>* rxSegCmd
= (macho_segment_command
<P
>*)cmd
;
1153 rxSegCmd
->set_cmd(macho_segment_command
<P
>::CMD
);
1154 rxSegCmd
->set_cmdsize(sizeof(macho_segment_command
<P
>));
1155 rxSegCmd
->set_segname("R.X");
1156 rxSegCmd
->set_vmaddr(_textRegion
.address
);
1157 rxSegCmd
->set_vmsize(_textRegion
.size
);
1158 rxSegCmd
->set_fileoff(_textRegion
.fileOffset
);
1159 rxSegCmd
->set_filesize(_textRegion
.size
);
1160 rxSegCmd
->set_maxprot(VM_PROT_READ
| VM_PROT_EXECUTE
);
1161 rxSegCmd
->set_initprot(VM_PROT_READ
| VM_PROT_EXECUTE
);
1162 rxSegCmd
->set_nsects(0);
1163 rxSegCmd
->set_flags(0);
1164 mh
->set_ncmds(mh
->ncmds()+1);
1165 mh
->set_sizeofcmds(mh
->sizeofcmds()+rxSegCmd
->cmdsize());
1166 cmd
+= rxSegCmd
->cmdsize();
1168 macho_segment_command
<P
>* rwSegCmd
= (macho_segment_command
<P
>*)cmd
;
1169 rwSegCmd
->set_cmd(macho_segment_command
<P
>::CMD
);
1170 rwSegCmd
->set_cmdsize(sizeof(macho_segment_command
<P
>));
1171 rwSegCmd
->set_segname("RW.");
1172 rwSegCmd
->set_vmaddr(_dataRegion
.address
);
1173 rwSegCmd
->set_vmsize(_dataRegion
.size
);
1174 rwSegCmd
->set_fileoff(_dataRegion
.fileOffset
);
1175 rwSegCmd
->set_filesize(_dataRegion
.size
);
1176 rwSegCmd
->set_maxprot(VM_PROT_READ
| VM_PROT_WRITE
);
1177 rwSegCmd
->set_initprot(VM_PROT_READ
| VM_PROT_WRITE
);
1178 rwSegCmd
->set_nsects(0);
1179 rwSegCmd
->set_flags(0);
1180 mh
->set_ncmds(mh
->ncmds()+1);
1181 mh
->set_sizeofcmds(mh
->sizeofcmds()+rwSegCmd
->cmdsize());
1182 cmd
+= rwSegCmd
->cmdsize();
1184 macho_segment_command
<P
>* roSegCmd
= (macho_segment_command
<P
>*)cmd
;
1185 roSegCmd
->set_cmd(macho_segment_command
<P
>::CMD
);
1186 roSegCmd
->set_cmdsize(sizeof(macho_segment_command
<P
>));
1187 roSegCmd
->set_segname("R..");
1188 roSegCmd
->set_vmaddr(_readOnlyRegion
.address
);
1189 roSegCmd
->set_vmsize(_readOnlyRegion
.size
);
1190 roSegCmd
->set_fileoff(_readOnlyRegion
.fileOffset
);
1191 roSegCmd
->set_filesize(_readOnlyRegion
.size
);
1192 roSegCmd
->set_maxprot(VM_PROT_READ
);
1193 roSegCmd
->set_initprot(VM_PROT_READ
);
1194 roSegCmd
->set_nsects(0);
1195 roSegCmd
->set_flags(0);
1196 mh
->set_ncmds(mh
->ncmds()+1);
1197 mh
->set_sizeofcmds(mh
->sizeofcmds()+roSegCmd
->cmdsize());
1198 cmd
+= roSegCmd
->cmdsize();
1201 macho_dylib_command
<P
>* dylibIdCmd
= (macho_dylib_command
<P
>*)cmd
;
1202 const char* installName
= "/System/Library/Frameworks/OS.framework/OS"; // FIXME
1203 uint32_t sz
= (uint32_t)align(sizeof(macho_dylib_command
<P
>) + strlen(installName
) + 1, 3);
1204 dylibIdCmd
->set_cmd(LC_ID_DYLIB
);
1205 dylibIdCmd
->set_cmdsize(sz
);
1206 dylibIdCmd
->set_name_offset();
1207 dylibIdCmd
->set_timestamp(1);
1208 dylibIdCmd
->set_current_version(0x10000);
1209 dylibIdCmd
->set_compatibility_version(0x10000);
1210 strcpy((char*)&cmd
[sizeof(macho_dylib_command
<P
>)], installName
);
1211 mh
->set_ncmds(mh
->ncmds()+1);
1212 mh
->set_sizeofcmds(mh
->sizeofcmds()+sz
);
1213 cmd
+= dylibIdCmd
->cmdsize();
1216 macho_uuid_command
<P
>* uuidCmd
= (macho_uuid_command
<P
>*)cmd
;
1219 uuidCmd
->set_cmd(LC_UUID
);
1220 uuidCmd
->set_cmdsize(sizeof(macho_uuid_command
<P
>));
1221 uuidCmd
->set_uuid(zeros
);
1222 cmd
+= uuidCmd
->cmdsize();
1225 std::vector
<mach_o::trie::Entry
> dylibTrieEntires
;
1226 int pathLengths
= 0;
1227 for (Extra
* ex
: _sortedDylibs
) {
1228 mach_o::trie::Entry entry
;
1229 entry
.name
= ex
->proxy
->installName
.c_str();
1230 entry
.address
= ex
->segments
[0].address
;
1233 entry
.importName
= NULL
;
1234 dylibTrieEntires
.push_back(entry
);
1235 pathLengths
+= (strlen(entry
.name
) + 1);
1236 for (const std::string
& alias
: ex
->proxy
->installNameAliases
) {
1237 mach_o::trie::Entry aliasEntry
;
1238 aliasEntry
.name
= alias
.c_str();
1239 aliasEntry
.address
= ex
->segments
[0].address
;
1240 aliasEntry
.flags
= 0;
1241 aliasEntry
.other
= 0;
1242 aliasEntry
.importName
= NULL
;
1243 dylibTrieEntires
.push_back(aliasEntry
);
1244 pathLengths
+= (strlen(aliasEntry
.name
) + 1);
1247 std::vector
<uint8_t> dylibTrieBytes
;
1248 dylibTrieBytes
.reserve(4096);
1249 mach_o::trie::makeTrie(dylibTrieEntires
, dylibTrieBytes
);
1250 fprintf(stderr
, "dylib trie size = %lu bytes, for %lu entries, pathLength=%d\n", dylibTrieBytes
.size(), dylibTrieEntires
.size(), pathLengths
);
1254 // Build SPI trie (optimized cache only)
1258 // add LC_CODE_SIGNATURE
1259 macho_linkedit_data_command
<P
>* codeSigCmd
= (macho_linkedit_data_command
<P
>*)cmd
;
1260 codeSigCmd
->set_cmd(LC_CODE_SIGNATURE
);
1261 codeSigCmd
->set_cmdsize(sizeof(macho_linkedit_data_command
<P
>));
1262 codeSigCmd
->set_dataoff((uint32_t)(_readOnlyRegion
.fileOffset
+ _readOnlyRegion
.size
));
1263 codeSigCmd
->set_datasize(0); // FIXME
1264 mh
->set_ncmds(mh
->ncmds()+1);
1265 mh
->set_sizeofcmds(mh
->sizeofcmds()+codeSigCmd
->cmdsize());
1266 cmd
+= codeSigCmd
->cmdsize();
1269 typedef typename
P::E E
;
1271 uint8_t* buffer
= (uint8_t*)_buffer
.get();
1272 dyldCacheHeader
<E
>* header
= (dyldCacheHeader
<E
>*)_buffer
.get();;
1273 // "dyld_v1" + spaces + archName(), with enough spaces to pad to 15 bytes
1274 std::string magic
= "dyld_v1";
1275 magic
.append(15 - magic
.length() - archName().length(), ' ');
1276 magic
.append(archName());
1277 assert(magic
.length() == 15);
1278 header
->set_magic(magic
.c_str());
1279 header
->set_mappingOffset(sizeof(dyldCacheHeader
<E
>));
1280 header
->set_mappingCount(3);
1281 header
->set_imagesOffset((uint32_t)(header
->mappingOffset() + 3*sizeof(dyldCacheFileMapping
<E
>) + sizeof(uint64_t)*_branchPoolStarts
.size()));
1282 header
->set_imagesCount((uint32_t)_dylibs
.size() + _aliasCount
);
1283 header
->set_dyldBaseAddress(0);
1284 header
->set_codeSignatureOffset(_fileSize
);
1285 header
->set_codeSignatureSize(0);
1286 header
->set_slideInfoOffset(_slideInfoFileOffset
);
1287 header
->set_slideInfoSize(_slideInfoBufferSize
);
1288 header
->set_localSymbolsOffset(0);
1289 header
->set_localSymbolsSize(0);
1290 header
->set_cacheType(kDyldSharedCacheTypeDevelopment
);
1291 header
->set_accelerateInfoAddr(0);
1292 header
->set_accelerateInfoSize(0);
1293 static const uint8_t zero_uuid
[16] = { 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0 };
1294 header
->set_uuid(zero_uuid
); // overwritten later by recomputeCacheUUID()
1295 header
->set_branchPoolsOffset(header
->mappingOffset() + 3*sizeof(dyldCacheFileMapping
<E
>));
1296 header
->set_branchPoolsCount((uint32_t)_branchPoolStarts
.size());
1297 header
->set_imagesTextOffset(0);
1298 header
->set_imagesTextCount(_dylibs
.size());
1301 dyldCacheFileMapping
<E
>* mappings
= (dyldCacheFileMapping
<E
>*)&buffer
[header
->mappingOffset()];
1302 mappings
[0].set_address(_textRegion
.address
);
1303 mappings
[0].set_size(_textRegion
.size
);
1304 mappings
[0].set_file_offset(_textRegion
.fileOffset
);
1305 mappings
[0].set_max_prot(_textRegion
.prot
);
1306 mappings
[0].set_init_prot(_textRegion
.prot
);
1308 mappings
[1].set_address(_dataRegion
.address
);
1309 mappings
[1].set_size(_dataRegion
.size
);
1310 mappings
[1].set_file_offset(_dataRegion
.fileOffset
);
1311 mappings
[1].set_max_prot(_dataRegion
.prot
);
1312 mappings
[1].set_init_prot(_dataRegion
.prot
);
1314 mappings
[2].set_address(_readOnlyRegion
.address
);
1315 mappings
[2].set_size(_readOnlyRegion
.size
);
1316 mappings
[2].set_file_offset(_readOnlyRegion
.fileOffset
);
1317 mappings
[2].set_max_prot(_readOnlyRegion
.prot
);
1318 mappings
[2].set_init_prot(_readOnlyRegion
.prot
);
1320 // fill in branch pool addresses
1321 uint64_t* p
= (uint64_t*)&buffer
[header
->branchPoolsOffset()];
1322 for (uint64_t pool
: _branchPoolStarts
) {
1327 // fill in image table
1328 dyldCacheImageInfo
<E
>* images
= (dyldCacheImageInfo
<E
>*)&buffer
[header
->imagesOffset()];
1329 for (auto& dylib
: _dylibs
) {
1330 auto textSeg
= _segmentMap
[dylib
][0];
1331 images
->set_address(textSeg
.address
);
1332 if (_manifest
.platform
== "osx") {
1333 images
->set_modTime(dylib
->lastModTime
);
1334 images
->set_inode(dylib
->inode
);
1336 images
->set_modTime(0);
1337 images
->set_inode(pathHash(dylib
->installName
.c_str()));
1339 images
->set_pathFileOffset((uint32_t)textSeg
.cacheFileOffset
+ dylib
->installNameOffsetInTEXT
);
1342 // append aliases image records and strings
1343 uint32_t offset
= header
->imagesOffset() + header
->imagesCount()*sizeof(dyld_cache_image_info
);
1344 for (auto &dylib
: _dylibs
) {
1345 if (!dylib
->installNameAliases
.empty()) {
1346 for (const std::string
& alias
: dylib
->installNameAliases
) {
1347 images
->set_address(_segmentMap
[dylib
][0].address
);
1348 if (_manifest
.platform
== "osx") {
1349 images
->set_modTime(dylib
->lastModTime
);
1350 images
->set_inode(dylib
->inode
);
1352 images
->set_modTime(0);
1353 images
->set_inode(pathHash(alias
.c_str()));
1355 images
->set_pathFileOffset(offset
);
1356 ::strcpy((char*)&buffer
[offset
], alias
.c_str());
1357 offset
+= alias
.size() + 1;
1363 // calculate start of text image array and trailing string pool
1364 offset
= (offset
+ 15) & (-16);
1365 header
->set_imagesTextOffset(offset
);
1366 dyldCacheImageTextInfo
<E
>* textImages
= (dyldCacheImageTextInfo
<E
>*)&buffer
[header
->imagesTextOffset()];
1367 uint32_t stringOffset
= offset
+ (uint32_t)(sizeof(dyldCacheImageTextInfo
<E
>) * _dylibs
.size());
1369 // write text image array and image names pool at same time
1370 for (auto& dylib
: _dylibs
) {
1371 textImages
->set_uuid(dylib
->uuid
);
1372 textImages
->set_loadAddress(_segmentMap
[dylib
][0].address
);
1373 textImages
->set_textSegmentSize((uint32_t)dylib
->segments
[0].size
);
1374 textImages
->set_pathOffset(stringOffset
);
1375 ::strcpy((char*)&buffer
[stringOffset
], dylib
->installName
.c_str());
1376 stringOffset
+= dylib
->installName
.size()+1;
1380 assert(stringOffset
< 0x28000);
1384 void SharedCache::rebase(MachOProxy
* dylib
)
1386 std::vector
<uint64_t> segNewStartAddresses
;
1387 std::vector
<uint64_t> segCacheFileOffsets
;
1388 std::vector
<uint64_t> segCacheFileSizes
;
1389 for (auto& seg
: _segmentMap
[dylib
]) {
1390 segNewStartAddresses
.push_back(seg
.address
);
1391 segCacheFileOffsets
.push_back(seg
.cacheFileOffset
);
1392 segCacheFileSizes
.push_back(seg
.cacheSegSize
);
1394 adjustImageForNewSegmentLocations(segNewStartAddresses
, segCacheFileOffsets
, segCacheFileSizes
, _pointersForASLR
);
1398 void SharedCache::rebaseAll(void)
1400 for (auto& dylib
: _dylibs
)
1404 void SharedCache::bindAll(void)
1406 std::unordered_map
<std::string
, void*> dylibPathToMachHeader
;
1407 for (auto& dylib
: _dylibs
) {
1408 void* mh
= (uint8_t*)_buffer
.get() + _segmentMap
[dylib
][0].cacheFileOffset
;
1409 dylibPathToMachHeader
[dylib
->installName
] = mh
;
1410 for (const std::string
& path
: dylib
->installNameAliases
) {
1411 if (path
!= dylib
->installName
) {
1412 dylibPathToMachHeader
[path
] = mh
;
1417 bindAllImagesInCache(dylibPathToMachHeader
, _pointersForASLR
);
1420 void SharedCache::writeCacheSegments(void)
1422 uint8_t* cacheBytes
= (uint8_t*)_buffer
.get();
1423 for (auto& dylib
: _dylibs
) {
1424 struct stat stat_buf
;
1425 const uint8_t* srcDylib
;
1428 std::tie(srcDylib
, stat_buf
, rootless
) = fileCache
.cacheLoad(dylib
->path
);
1429 for (auto& seg
: _segmentMap
[dylib
]) {
1430 uint32_t segFileOffset
= dylib
->fatFileOffset
+ seg
.base
->fileOffset
;
1431 uint64_t copySize
= std::min(seg
.cacheSegSize
, (uint64_t)seg
.base
->diskSize
);
1432 verboseLog("copy segment %12s (0x%08llX bytes) to %p (logical addr 0x%llX) for %s", seg
.base
->name
.c_str(), copySize
, &cacheBytes
[seg
.cacheFileOffset
], seg
.address
, dylib
->installName
.c_str());
1433 ::memcpy(&cacheBytes
[seg
.cacheFileOffset
], &srcDylib
[segFileOffset
], copySize
);
1440 void SharedCache::appendCodeSignature(const std::string
& suffix
)
1442 // select which codesigning hash
1443 uint8_t dscHashType
= CS_HASHTYPE_SHA1
;
1444 uint8_t dscHashSize
= CS_HASH_SIZE_SHA1
;
1445 uint32_t dscDigestFormat
= kCCDigestSHA1
;
1446 if ( _manifest
.platform
== "osx" ) {
1447 dscHashType
= CS_HASHTYPE_SHA256
;
1448 dscHashSize
= CS_HASH_SIZE_SHA256
;
1449 dscDigestFormat
= kCCDigestSHA256
;
1452 std::string cacheIdentifier
= "com.apple.dyld.cache." + archName() + "." + suffix
;
1453 // get pointers into shared cache buffer
1454 size_t inBbufferSize
= _fileSize
;
1455 const uint8_t* inBuffer
= (uint8_t*)_buffer
.get();
1456 uint8_t* csBuffer
= (uint8_t*)_buffer
.get()+inBbufferSize
;
1458 // layout code signature contents
1459 size_t idSize
= cacheIdentifier
.size()+1; // +1 for terminating 0
1460 uint32_t slotCount
= (uint32_t)((inBbufferSize
+ CS_PAGE_SIZE
- 1) / CS_PAGE_SIZE
);
1461 uint32_t xSlotCount
= CSSLOT_REQUIREMENTS
;
1462 size_t scatOffset
= sizeof(CS_CodeDirectory
);
1463 size_t scatSize
= 4*sizeof(CS_Scatter
); // only 3 used??
1464 size_t idOffset
= scatOffset
+scatSize
;
1465 size_t hashOffset
= idOffset
+idSize
+ dscHashSize
*xSlotCount
;
1466 size_t cdSize
= hashOffset
+ (slotCount
* dscHashSize
);
1467 size_t reqsSize
= 12;
1468 size_t cmsSize
= sizeof(CS_Blob
);
1469 size_t cdOffset
= sizeof(CS_SuperBlob
) + 3*sizeof(CS_BlobIndex
);
1470 size_t reqsOffset
= cdOffset
+ cdSize
;
1471 size_t cmsOffset
= reqsOffset
+ reqsSize
;
1472 size_t sbSize
= cmsOffset
+ cmsSize
;
1473 size_t sigSize
= align(sbSize
, 14); // keep whole cache 16KB aligned
1475 // create overall code signature which is a superblob
1476 CS_SuperBlob
* sb
= reinterpret_cast<CS_SuperBlob
*>(csBuffer
);
1477 sb
->magic
= htonl(CSMAGIC_EMBEDDED_SIGNATURE
);
1478 sb
->length
= htonl(sbSize
);
1479 sb
->count
= htonl(3);
1480 sb
->index
[0].type
= htonl(CSSLOT_CODEDIRECTORY
);
1481 sb
->index
[0].offset
= htonl(cdOffset
);
1482 sb
->index
[1].type
= htonl(CSSLOT_REQUIREMENTS
);
1483 sb
->index
[1].offset
= htonl(reqsOffset
);
1484 sb
->index
[2].type
= htonl(CSSLOT_CMS_SIGNATURE
);
1485 sb
->index
[2].offset
= htonl(cmsOffset
);
1487 // initialize fixed fields of Code Directory
1488 CS_CodeDirectory
* cd
= (CS_CodeDirectory
*)(((char*)sb
)+cdOffset
);
1489 cd
->magic
= htonl(CSMAGIC_CODEDIRECTORY
);
1490 cd
->length
= htonl(cdSize
);
1491 cd
->version
= htonl(0x20100);
1492 cd
->flags
= htonl(kSecCodeSignatureAdhoc
);
1493 cd
->hashOffset
= htonl(hashOffset
);
1494 cd
->identOffset
= htonl(idOffset
);
1495 cd
->nSpecialSlots
= htonl(xSlotCount
);
1496 cd
->nCodeSlots
= htonl(slotCount
);
1497 cd
->codeLimit
= htonl(inBbufferSize
);
1498 cd
->hashSize
= dscHashSize
;
1499 cd
->hashType
= dscHashType
;
1500 cd
->platform
= 0; // not platform binary
1501 cd
->pageSize
= __builtin_ctz(CS_PAGE_SIZE
); // log2(CS_PAGE_SIZE);
1502 cd
->spare2
= 0; // unused (must be zero)
1503 cd
->scatterOffset
= htonl(scatOffset
);
1505 // initialize dynamic fields of Code Directory
1506 strcpy((char*)cd
+ idOffset
, cacheIdentifier
.c_str());
1509 CS_Scatter
* scatter
= reinterpret_cast<CS_Scatter
*>((char*)cd
+scatOffset
);
1510 scatter
[0].count
= htonl(_textRegion
.size
/CS_PAGE_SIZE
);
1511 scatter
[0].base
= htonl(_textRegion
.fileOffset
/CS_PAGE_SIZE
);
1512 scatter
[0].targetOffset
= htonll(_textRegion
.address
);
1513 scatter
[0].spare
= 0;
1514 scatter
[1].count
= htonl(_dataRegion
.size
/CS_PAGE_SIZE
);
1515 scatter
[1].base
= htonl(_dataRegion
.fileOffset
/CS_PAGE_SIZE
);
1516 scatter
[1].targetOffset
= htonll(_dataRegion
.address
);
1517 scatter
[1].spare
= 0;
1518 scatter
[2].count
= htonl(_readOnlyRegion
.size
/CS_PAGE_SIZE
);
1519 scatter
[2].base
= htonl(_readOnlyRegion
.fileOffset
/CS_PAGE_SIZE
);
1520 scatter
[2].targetOffset
= htonll(_readOnlyRegion
.address
);
1521 scatter
[2].spare
= 0;
1523 // fill in empty requirements
1524 CS_RequirementsBlob
* reqs
= (CS_RequirementsBlob
*)(((char*)sb
)+reqsOffset
);
1525 reqs
->magic
= htonl(CSMAGIC_REQUIREMENTS
);
1526 reqs
->length
= htonl(sizeof(CS_RequirementsBlob
));
1529 // fill in empty CMS blob for ad-hoc signing
1530 CS_Blob
* cms
= (CS_Blob
*)(((char*)sb
)+cmsOffset
);
1531 cms
->magic
= htonl(CSMAGIC_BLOBWRAPPER
);
1532 cms
->length
= htonl(sizeof(CS_Blob
));
1534 // add special slot hashes
1535 uint8_t* hashSlot
= (uint8_t*)cd
+ hashOffset
;
1536 uint8_t* reqsHashSlot
= &hashSlot
[-CSSLOT_REQUIREMENTS
*dscHashSize
];
1537 CCDigest(dscDigestFormat
, (uint8_t*)reqs
, sizeof(CS_RequirementsBlob
), reqsHashSlot
);
1539 // alter header of cache to record size and location of code signature
1540 // do this *before* hashing each page
1541 dyldCacheHeader
<LittleEndian
>* header
= (dyldCacheHeader
<LittleEndian
>*)inBuffer
;
1542 header
->set_codeSignatureOffset(inBbufferSize
);
1543 header
->set_codeSignatureSize(sigSize
);
1546 const uint8_t* code
= inBuffer
;
1547 for (uint32_t i
=0; i
< slotCount
; ++i
) {
1548 CCDigest(dscDigestFormat
, code
, CS_PAGE_SIZE
, hashSlot
);
1549 hashSlot
+= dscHashSize
;
1550 code
+= CS_PAGE_SIZE
;
1553 // hash of entire code directory (cdHash) uses same has hash as each page
1554 uint8_t fullCdHash
[dscHashSize
];
1555 CCDigest(dscDigestFormat
, (const uint8_t*)cd
, cdSize
, fullCdHash
);
1556 // Note: cdHash is defined as first 20 bytes of hash
1557 memcpy(_cdHash
, fullCdHash
, 20);
1559 // increase file size to include newly append code signature
1560 _fileSize
+= sigSize
;
1563 std::string
SharedCache::cdHashString()
1566 for (int i
= 0; i
< sizeof(_cdHash
); ++i
)
1567 sprintf(&buff
[2*i
], "%2.2x", _cdHash
[i
]);
1573 #pragma mark Template dispatchers
1575 #define TEMPLATE_DISPATCHER_BODY(method,...) \
1576 switch( _arch.arch ) { \
1577 case CPU_TYPE_ARM: \
1578 case CPU_TYPE_I386: \
1579 method<Pointer32<LittleEndian>>(__VA_ARGS__); \
1581 case CPU_TYPE_X86_64: \
1582 case CPU_TYPE_ARM64: \
1583 method<Pointer64<LittleEndian>>(__VA_ARGS__); \
1586 terminate("unsupported arch 0x%08X", _arch.arch); \
1589 void SharedCache::writeCacheHeader() {
1590 TEMPLATE_DISPATCHER_BODY(writeCacheHeader
)
1593 void SharedCache::buildForDevelopment(const std::string
& cachePath
) {
1594 TEMPLATE_DISPATCHER_BODY(buildForDevelopment
, cachePath
)
1597 void SharedCache::buildForProduction(const std::string
& cachePath
) {
1598 TEMPLATE_DISPATCHER_BODY(buildForProduction
, cachePath
)
1601 void SharedCache::setLinkeditsMappingEndFileOffset(uint64_t newFileSize
) {
1602 TEMPLATE_DISPATCHER_BODY(setLinkeditsMappingEndFileOffset
, newFileSize
)
1605 void SharedCache::setUnmappedLocalsRange(uint64_t localSymbolsOffset
, uint32_t unmappedSize
) {
1606 TEMPLATE_DISPATCHER_BODY(setUnmappedLocalsRange
, localSymbolsOffset
, unmappedSize
)
1609 void SharedCache::setAcceleratorInfoRange(uint64_t accelInfoAddr
, uint32_t accelInfoSize
) {
1610 TEMPLATE_DISPATCHER_BODY(setAcceleratorInfoRange
, accelInfoAddr
, accelInfoSize
)
1613 void SharedCache::recomputeCacheUUID(void) {
1614 TEMPLATE_DISPATCHER_BODY(recomputeCacheUUID
)
1617 void SharedCache::forEachImage(DylibHandler handler
) {
1618 TEMPLATE_DISPATCHER_BODY(forEachImage
, handler
)
1621 void SharedCache::forEachRegion(RegionHandler handler
) {
1622 TEMPLATE_DISPATCHER_BODY(forEachRegion
, handler
)