]> git.saurik.com Git - apple/dyld.git/blob - dyld3/shared-cache/AdjustDylibSegments.cpp
c01f2c9b13a2bcec8bf525f3e74233c2577cfb98
[apple/dyld.git] / dyld3 / shared-cache / AdjustDylibSegments.cpp
1 /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*-
2 *
3 * Copyright (c) 2014 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26 #include <dirent.h>
27 #include <sys/errno.h>
28 #include <sys/fcntl.h>
29 #include <mach-o/loader.h>
30 #include <mach-o/fat.h>
31 #include <assert.h>
32
33 #include <fstream>
34 #include <string>
35 #include <algorithm>
36 #include <unordered_map>
37 #include <unordered_set>
38
39 #include "CacheBuilder.h"
40 #include "Diagnostics.h"
41 #include "DyldSharedCache.h"
42 #include "Trie.hpp"
43 #include "MachOFileAbstraction.hpp"
44 #include "MachOLoaded.h"
45 #include "MachOAnalyzer.h"
46
47
48 #ifndef EXPORT_SYMBOL_FLAGS_KIND_ABSOLUTE
49 #define EXPORT_SYMBOL_FLAGS_KIND_ABSOLUTE 0x02
50 #endif
51
52 namespace {
53
54 template <typename P>
55 class Adjustor {
56 public:
57 Adjustor(DyldSharedCache* cacheBuffer, macho_header<P>* mh, const std::vector<CacheBuilder::SegmentMappingInfo>& mappingInfo, Diagnostics& diag);
58 void adjustImageForNewSegmentLocations(CacheBuilder::ASLR_Tracker& aslrTracker,
59 CacheBuilder::LOH_Tracker& lohTracker,
60 const CacheBuilder::CacheCoalescedText& coalescedText,
61 const CacheBuilder::DylibTextCoalescer& textCoalescer);
62
63 private:
64 void adjustReferencesUsingInfoV2(CacheBuilder::ASLR_Tracker& aslrTracker, CacheBuilder::LOH_Tracker& lohTracker,
65 const CacheBuilder::CacheCoalescedText& coalescedText,
66 const CacheBuilder::DylibTextCoalescer& textCoalescer);
67 void adjustReference(uint32_t kind, uint8_t* mappedAddr, uint64_t fromNewAddress, uint64_t toNewAddress, int64_t adjust, int64_t targetSlide,
68 uint64_t imageStartAddress, uint64_t imageEndAddress,
69 CacheBuilder::ASLR_Tracker& aslrTracker, CacheBuilder::LOH_Tracker* lohTracker,
70 uint32_t*& lastMappedAddr32, uint32_t& lastKind, uint64_t& lastToNewAddress);
71 void adjustDataPointers(CacheBuilder::ASLR_Tracker& aslrTracker);
72 void slidePointer(int segIndex, uint64_t segOffset, uint8_t type, CacheBuilder::ASLR_Tracker& aslrTracker);
73 void adjustSymbolTable();
74 void adjustChainedFixups();
75 void adjustExportsTrie(std::vector<uint8_t>& newTrieBytes);
76 void rebuildLinkEdit();
77 void adjustCode();
78 void adjustInstruction(uint8_t kind, uint8_t* textLoc, uint64_t codeToDataDelta);
79 void rebuildLinkEditAndLoadCommands(const CacheBuilder::DylibTextCoalescer& textCoalescer);
80 uint64_t slideForOrigAddress(uint64_t addr);
81
82 typedef typename P::uint_t pint_t;
83 typedef typename P::E E;
84
85 DyldSharedCache* _cacheBuffer;
86 macho_header<P>* _mh;
87 Diagnostics& _diagnostics;
88 const uint8_t* _linkeditBias = nullptr;
89 unsigned _linkeditSegIndex = 0;
90 bool _maskPointers = false;
91 bool _splitSegInfoV2 = false;
92 const char* _installName = nullptr;
93 macho_symtab_command<P>* _symTabCmd = nullptr;
94 macho_dysymtab_command<P>* _dynSymTabCmd = nullptr;
95 macho_dyld_info_command<P>* _dyldInfo = nullptr;
96 macho_linkedit_data_command<P>* _splitSegInfoCmd = nullptr;
97 macho_linkedit_data_command<P>* _functionStartsCmd = nullptr;
98 macho_linkedit_data_command<P>* _dataInCodeCmd = nullptr;
99 macho_linkedit_data_command<P>* _exportTrieCmd = nullptr;
100 macho_linkedit_data_command<P>* _chainedFixupsCmd = nullptr;
101 std::vector<uint64_t> _segOrigStartAddresses;
102 std::vector<uint64_t> _segSlides;
103 std::vector<macho_segment_command<P>*> _segCmds;
104 const std::vector<CacheBuilder::SegmentMappingInfo>& _mappingInfo;
105 };
106
107 template <typename P>
108 Adjustor<P>::Adjustor(DyldSharedCache* cacheBuffer, macho_header<P>* mh, const std::vector<CacheBuilder::SegmentMappingInfo>& mappingInfo, Diagnostics& diag)
109 : _cacheBuffer(cacheBuffer), _mh(mh), _diagnostics(diag), _mappingInfo(mappingInfo)
110 {
111 assert((mh->magic() == MH_MAGIC) || (mh->magic() == MH_MAGIC_64));
112 macho_segment_command<P>* segCmd;
113 const macho_load_command<P>* const cmds = (macho_load_command<P>*)((uint8_t*)mh + sizeof(macho_header<P>));
114 const uint32_t cmd_count = mh->ncmds();
115 const macho_load_command<P>* cmd = cmds;
116 unsigned segIndex = 0;
117 for (uint32_t i = 0; i < cmd_count; ++i) {
118 switch (cmd->cmd()) {
119 case LC_ID_DYLIB:
120 _installName = ((macho_dylib_command<P>*)cmd)->name();
121 break;
122 case LC_SYMTAB:
123 _symTabCmd = (macho_symtab_command<P>*)cmd;
124 break;
125 case LC_DYSYMTAB:
126 _dynSymTabCmd = (macho_dysymtab_command<P>*)cmd;
127 break;
128 case LC_DYLD_INFO:
129 case LC_DYLD_INFO_ONLY:
130 _dyldInfo = (macho_dyld_info_command<P>*)cmd;
131 break;
132 case LC_SEGMENT_SPLIT_INFO:
133 _splitSegInfoCmd = (macho_linkedit_data_command<P>*)cmd;
134 break;
135 case LC_FUNCTION_STARTS:
136 _functionStartsCmd = (macho_linkedit_data_command<P>*)cmd;
137 break;
138 case LC_DATA_IN_CODE:
139 _dataInCodeCmd = (macho_linkedit_data_command<P>*)cmd;
140 break;
141 case LC_DYLD_CHAINED_FIXUPS:
142 _chainedFixupsCmd = (macho_linkedit_data_command<P>*)cmd;
143 break;
144 case LC_DYLD_EXPORTS_TRIE:
145 _exportTrieCmd = (macho_linkedit_data_command<P>*)cmd;
146 break;
147 case macho_segment_command<P>::CMD:
148 segCmd = (macho_segment_command<P>*)cmd;
149 _segCmds.push_back(segCmd);
150 _segOrigStartAddresses.push_back(segCmd->vmaddr());
151 _segSlides.push_back(_mappingInfo[segIndex].dstCacheUnslidAddress - segCmd->vmaddr());
152 if ( strcmp(segCmd->segname(), "__LINKEDIT") == 0 ) {
153 _linkeditBias = (uint8_t*)_mappingInfo[segIndex].dstSegment - segCmd->fileoff();
154 _linkeditSegIndex = segIndex;
155 }
156 ++segIndex;
157 break;
158 }
159 cmd = (const macho_load_command<P>*)(((uint8_t*)cmd)+cmd->cmdsize());
160 }
161 _maskPointers = (P::E::get32(mh->cputype()) == CPU_TYPE_ARM64) || (P::E::get32(mh->cputype()) == CPU_TYPE_ARM64_32);
162 if ( _splitSegInfoCmd != NULL ) {
163 const uint8_t* infoStart = &_linkeditBias[_splitSegInfoCmd->dataoff()];
164 _splitSegInfoV2 = (*infoStart == DYLD_CACHE_ADJ_V2_FORMAT);
165 }
166 else {
167 _diagnostics.error("missing LC_SEGMENT_SPLIT_INFO in %s", _installName);
168 }
169 }
170
171 template <typename P>
172 void Adjustor<P>::adjustImageForNewSegmentLocations(CacheBuilder::ASLR_Tracker& aslrTracker,
173 CacheBuilder::LOH_Tracker& lohTracker,
174 const CacheBuilder::CacheCoalescedText& coalescedText,
175 const CacheBuilder::DylibTextCoalescer& textCoalescer)
176 {
177 if ( _diagnostics.hasError() )
178 return;
179 if ( _splitSegInfoV2 ) {
180 adjustReferencesUsingInfoV2(aslrTracker, lohTracker, coalescedText, textCoalescer);
181 }
182 else {
183 adjustDataPointers(aslrTracker);
184 adjustCode();
185 }
186 if ( _diagnostics.hasError() )
187 return;
188 adjustSymbolTable();
189 if ( _diagnostics.hasError() )
190 return;
191 adjustChainedFixups();
192 if ( _diagnostics.hasError() )
193 return;
194 rebuildLinkEditAndLoadCommands(textCoalescer);
195
196 #if DEBUG
197 Diagnostics diag;
198 ((dyld3::MachOAnalyzer*)_mh)->validateDyldCacheDylib(diag, _installName);
199 if ( diag.hasError() ) {
200 fprintf(stderr, "%s\n", diag.errorMessage().c_str());
201 }
202 #endif
203 }
204
205 template <typename P>
206 uint64_t Adjustor<P>::slideForOrigAddress(uint64_t addr)
207 {
208 for (unsigned i=0; i < _segOrigStartAddresses.size(); ++i) {
209 if ( (_segOrigStartAddresses[i] <= addr) && (addr < (_segOrigStartAddresses[i]+_segCmds[i]->vmsize())) )
210 return _segSlides[i];
211 }
212 // On arm64, high nibble of pointers can have extra bits
213 if ( _maskPointers && (addr & 0xF000000000000000) ) {
214 return slideForOrigAddress(addr & 0x0FFFFFFFFFFFFFFF);
215 }
216 _diagnostics.error("slide not known for dylib address 0x%llX in %s", addr, _installName);
217 return 0;
218 }
219
220 template <typename P>
221 void Adjustor<P>::rebuildLinkEditAndLoadCommands(const CacheBuilder::DylibTextCoalescer& textCoalescer)
222 {
223 // Exports trie is only data structure in LINKEDIT that might grow
224 std::vector<uint8_t> newTrieBytes;
225 adjustExportsTrie(newTrieBytes);
226
227 // Remove: code signature, rebase info, code-sign-dirs, split seg info
228 uint32_t chainedFixupsOffset = 0;
229 uint32_t chainedFixupsSize = _chainedFixupsCmd ? _chainedFixupsCmd->datasize() : 0;
230 uint32_t bindOffset = chainedFixupsOffset + chainedFixupsSize;
231 uint32_t bindSize = _dyldInfo ? _dyldInfo->bind_size() : 0;
232 uint32_t weakBindOffset = bindOffset + bindSize;
233 uint32_t weakBindSize = _dyldInfo ? _dyldInfo->weak_bind_size() : 0;
234 uint32_t lazyBindOffset = weakBindOffset + weakBindSize;
235 uint32_t lazyBindSize = _dyldInfo ? _dyldInfo->lazy_bind_size() : 0;
236 uint32_t exportOffset = lazyBindOffset + lazyBindSize;
237 uint32_t exportSize = (uint32_t)newTrieBytes.size();
238 uint32_t splitSegInfoOffset = exportOffset + exportSize;
239 uint32_t splitSegInfosSize = (_splitSegInfoCmd ? _splitSegInfoCmd->datasize() : 0);
240 uint32_t funcStartsOffset = splitSegInfoOffset + splitSegInfosSize;
241 uint32_t funcStartsSize = (_functionStartsCmd ? _functionStartsCmd->datasize() : 0);
242 uint32_t dataInCodeOffset = funcStartsOffset + funcStartsSize;
243 uint32_t dataInCodeSize = (_dataInCodeCmd ? _dataInCodeCmd->datasize() : 0);
244 uint32_t symbolTableOffset = dataInCodeOffset + dataInCodeSize;
245 uint32_t symbolTableSize = _symTabCmd->nsyms() * sizeof(macho_nlist<P>);
246 uint32_t indirectTableOffset = symbolTableOffset + symbolTableSize;
247 uint32_t indirectTableSize = _dynSymTabCmd->nindirectsyms() * sizeof(uint32_t);
248 uint32_t symbolStringsOffset = indirectTableOffset + indirectTableSize;
249 uint32_t symbolStringsSize = _symTabCmd->strsize();
250 uint32_t newLinkEditSize = symbolStringsOffset + symbolStringsSize;
251
252 size_t linkeditBufferSize = align(_segCmds[_linkeditSegIndex]->vmsize(), 12);
253 if ( linkeditBufferSize < newLinkEditSize ) {
254 _diagnostics.error("LINKEDIT overflow in %s", _installName);
255 return;
256 }
257
258 uint8_t* newLinkeditBufer = (uint8_t*)::calloc(linkeditBufferSize, 1);
259 if ( chainedFixupsSize )
260 memcpy(&newLinkeditBufer[chainedFixupsOffset], &_linkeditBias[_chainedFixupsCmd->dataoff()], chainedFixupsSize);
261 if ( bindSize )
262 memcpy(&newLinkeditBufer[bindOffset], &_linkeditBias[_dyldInfo->bind_off()], bindSize);
263 if ( lazyBindSize )
264 memcpy(&newLinkeditBufer[lazyBindOffset], &_linkeditBias[_dyldInfo->lazy_bind_off()], lazyBindSize);
265 if ( weakBindSize )
266 memcpy(&newLinkeditBufer[weakBindOffset], &_linkeditBias[_dyldInfo->weak_bind_off()], weakBindSize);
267 if ( exportSize )
268 memcpy(&newLinkeditBufer[exportOffset], &newTrieBytes[0], exportSize);
269 if ( splitSegInfosSize )
270 memcpy(&newLinkeditBufer[splitSegInfoOffset], &_linkeditBias[_splitSegInfoCmd->dataoff()], splitSegInfosSize);
271 if ( funcStartsSize )
272 memcpy(&newLinkeditBufer[funcStartsOffset], &_linkeditBias[_functionStartsCmd->dataoff()], funcStartsSize);
273 if ( dataInCodeSize )
274 memcpy(&newLinkeditBufer[dataInCodeOffset], &_linkeditBias[_dataInCodeCmd->dataoff()], dataInCodeSize);
275 if ( symbolTableSize )
276 memcpy(&newLinkeditBufer[symbolTableOffset], &_linkeditBias[_symTabCmd->symoff()], symbolTableSize);
277 if ( indirectTableSize )
278 memcpy(&newLinkeditBufer[indirectTableOffset], &_linkeditBias[_dynSymTabCmd->indirectsymoff()], indirectTableSize);
279 if ( symbolStringsSize )
280 memcpy(&newLinkeditBufer[symbolStringsOffset], &_linkeditBias[_symTabCmd->stroff()], symbolStringsSize);
281
282 memcpy(_mappingInfo[_linkeditSegIndex].dstSegment, newLinkeditBufer, newLinkEditSize);
283 ::bzero(((uint8_t*)_mappingInfo[_linkeditSegIndex].dstSegment)+newLinkEditSize, linkeditBufferSize-newLinkEditSize);
284 ::free(newLinkeditBufer);
285 uint32_t linkeditStartOffset = (uint32_t)_mappingInfo[_linkeditSegIndex].dstCacheFileOffset;
286
287 // updates load commands and removed ones no longer needed
288 macho_load_command<P>* const cmds = (macho_load_command<P>*)((uint8_t*)_mh + sizeof(macho_header<P>));
289 uint32_t cmd_count = _mh->ncmds();
290 const macho_load_command<P>* cmd = cmds;
291 const unsigned origLoadCommandsSize = _mh->sizeofcmds();
292 unsigned bytesRemaining = origLoadCommandsSize;
293 unsigned removedCount = 0;
294 unsigned segIndex = 0;
295 for (uint32_t i = 0; i < cmd_count; ++i) {
296 macho_symtab_command<P>* symTabCmd;
297 macho_dysymtab_command<P>* dynSymTabCmd;
298 macho_dyld_info_command<P>* dyldInfo;
299 macho_linkedit_data_command<P>* functionStartsCmd;
300 macho_linkedit_data_command<P>* dataInCodeCmd;
301 macho_linkedit_data_command<P>* chainedFixupsCmd;
302 macho_linkedit_data_command<P>* exportTrieCmd;
303 macho_linkedit_data_command<P>* splitSegInfoCmd;
304 macho_segment_command<P>* segCmd;
305 macho_routines_command<P>* routinesCmd;
306 macho_dylib_command<P>* dylibIDCmd;
307 uint32_t cmdSize = cmd->cmdsize();
308 int32_t segFileOffsetDelta;
309 bool remove = false;
310 switch ( cmd->cmd() ) {
311 case LC_ID_DYLIB:
312 dylibIDCmd = (macho_dylib_command<P>*)cmd;
313 dylibIDCmd->set_timestamp(2); // match what static linker sets in LC_LOAD_DYLIB
314 break;
315 case LC_SYMTAB:
316 symTabCmd = (macho_symtab_command<P>*)cmd;
317 symTabCmd->set_symoff(linkeditStartOffset+symbolTableOffset);
318 symTabCmd->set_stroff(linkeditStartOffset+symbolStringsOffset);
319 break;
320 case LC_DYSYMTAB:
321 dynSymTabCmd = (macho_dysymtab_command<P>*)cmd;
322 dynSymTabCmd->set_indirectsymoff(linkeditStartOffset+indirectTableOffset);
323 break;
324 case LC_DYLD_INFO:
325 case LC_DYLD_INFO_ONLY:
326 dyldInfo = (macho_dyld_info_command<P>*)cmd;
327 dyldInfo->set_rebase_off(0);
328 dyldInfo->set_rebase_size(0);
329 dyldInfo->set_bind_off(bindSize ? linkeditStartOffset+bindOffset : 0);
330 dyldInfo->set_bind_size(bindSize);
331 dyldInfo->set_weak_bind_off(weakBindSize ? linkeditStartOffset+weakBindOffset : 0);
332 dyldInfo->set_weak_bind_size(weakBindSize);
333 dyldInfo->set_lazy_bind_off(lazyBindSize ? linkeditStartOffset+lazyBindOffset : 0);
334 dyldInfo->set_lazy_bind_size(lazyBindSize);
335 dyldInfo->set_export_off(exportSize ? linkeditStartOffset+exportOffset : 0);
336 dyldInfo->set_export_size(exportSize);
337 break;
338 case LC_FUNCTION_STARTS:
339 functionStartsCmd = (macho_linkedit_data_command<P>*)cmd;
340 functionStartsCmd->set_dataoff(linkeditStartOffset+funcStartsOffset);
341 break;
342 case LC_DATA_IN_CODE:
343 dataInCodeCmd = (macho_linkedit_data_command<P>*)cmd;
344 dataInCodeCmd->set_dataoff(linkeditStartOffset+dataInCodeOffset);
345 break;
346 case LC_DYLD_CHAINED_FIXUPS:
347 chainedFixupsCmd = (macho_linkedit_data_command<P>*)cmd;
348 chainedFixupsCmd->set_dataoff(chainedFixupsSize ? linkeditStartOffset+chainedFixupsOffset : 0);
349 chainedFixupsCmd->set_datasize(chainedFixupsSize);
350 break;
351 case LC_DYLD_EXPORTS_TRIE:
352 exportTrieCmd = (macho_linkedit_data_command<P>*)cmd;
353 exportTrieCmd->set_dataoff(exportSize ? linkeditStartOffset+exportOffset : 0);
354 exportTrieCmd->set_datasize(exportSize);
355 break;
356 case macho_routines_command<P>::CMD:
357 routinesCmd = (macho_routines_command<P>*)cmd;
358 routinesCmd->set_init_address(routinesCmd->init_address()+slideForOrigAddress(routinesCmd->init_address()));
359 break;
360 case macho_segment_command<P>::CMD:
361 segCmd = (macho_segment_command<P>*)cmd;
362 segFileOffsetDelta = (int32_t)(_mappingInfo[segIndex].dstCacheFileOffset - segCmd->fileoff());
363 segCmd->set_vmaddr(_mappingInfo[segIndex].dstCacheUnslidAddress);
364 segCmd->set_vmsize(_mappingInfo[segIndex].dstCacheSegmentSize);
365 segCmd->set_fileoff(_mappingInfo[segIndex].dstCacheFileOffset);
366 segCmd->set_filesize(_mappingInfo[segIndex].dstCacheFileSize);
367 if ( strcmp(segCmd->segname(), "__LINKEDIT") == 0 )
368 segCmd->set_vmsize(linkeditBufferSize);
369 if ( segCmd->nsects() > 0 ) {
370 macho_section<P>* const sectionsStart = (macho_section<P>*)((uint8_t*)segCmd + sizeof(macho_segment_command<P>));
371 macho_section<P>* const sectionsEnd = &sectionsStart[segCmd->nsects()];
372
373 for (macho_section<P>* sect=sectionsStart; sect < sectionsEnd; ++sect) {
374 if ( (strcmp(segCmd->segname(), "__TEXT") == 0) && textCoalescer.sectionWasCoalesced(sect->sectname())) {
375 // Put coalesced sections at the end of the segment
376 sect->set_addr(segCmd->vmaddr() + segCmd->filesize());
377 sect->set_offset(0);
378 sect->set_size(0);
379 } else {
380 sect->set_addr(sect->addr() + _segSlides[segIndex]);
381 if ( sect->offset() != 0 )
382 sect->set_offset(sect->offset() + segFileOffsetDelta);
383 }
384 }
385 }
386 ++segIndex;
387 break;
388 case LC_RPATH:
389 _diagnostics.warning("dyld shared cache does not support LC_RPATH found in %s", _installName);
390 remove = true;
391 break;
392 case LC_SEGMENT_SPLIT_INFO:
393 splitSegInfoCmd = (macho_linkedit_data_command<P>*)cmd;
394 splitSegInfoCmd->set_dataoff(linkeditStartOffset+splitSegInfoOffset);
395 break;
396 case LC_CODE_SIGNATURE:
397 case LC_DYLIB_CODE_SIGN_DRS:
398 remove = true;
399 break;
400 default:
401 break;
402 }
403 macho_load_command<P>* nextCmd = (macho_load_command<P>*)(((uint8_t*)cmd)+cmdSize);
404 if ( remove ) {
405 ::memmove((void*)cmd, (void*)nextCmd, bytesRemaining);
406 ++removedCount;
407 }
408 else {
409 bytesRemaining -= cmdSize;
410 cmd = nextCmd;
411 }
412 }
413 // zero out stuff removed
414 ::bzero((void*)cmd, bytesRemaining);
415 // update header
416 _mh->set_ncmds(cmd_count-removedCount);
417 _mh->set_sizeofcmds(origLoadCommandsSize-bytesRemaining);
418 _mh->set_flags(_mh->flags() | 0x80000000);
419 }
420
421
422 template <typename P>
423 void Adjustor<P>::adjustSymbolTable()
424 {
425 macho_nlist<P>* symbolTable = (macho_nlist<P>*)&_linkeditBias[_symTabCmd->symoff()];
426
427 // adjust global symbol table entries
428 macho_nlist<P>* lastExport = &symbolTable[_dynSymTabCmd->iextdefsym()+_dynSymTabCmd->nextdefsym()];
429 for (macho_nlist<P>* entry = &symbolTable[_dynSymTabCmd->iextdefsym()]; entry < lastExport; ++entry) {
430 if ( (entry->n_type() & N_TYPE) == N_SECT )
431 entry->set_n_value(entry->n_value() + slideForOrigAddress(entry->n_value()));
432 }
433
434 // adjust local symbol table entries
435 macho_nlist<P>* lastLocal = &symbolTable[_dynSymTabCmd->ilocalsym()+_dynSymTabCmd->nlocalsym()];
436 for (macho_nlist<P>* entry = &symbolTable[_dynSymTabCmd->ilocalsym()]; entry < lastLocal; ++entry) {
437 if ( (entry->n_sect() != NO_SECT) && ((entry->n_type() & N_STAB) == 0) )
438 entry->set_n_value(entry->n_value() + slideForOrigAddress(entry->n_value()));
439 }
440 }
441
442
443 template <typename P>
444 void Adjustor<P>::adjustChainedFixups()
445 {
446 if ( _chainedFixupsCmd == nullptr )
447 return;
448
449 // Pass a start hint in to withChainStarts which takes account of the LINKEDIT shifting but we haven't
450 // yet updated that LC_SEGMENT to point to the new data
451 const dyld_chained_fixups_header* header = (dyld_chained_fixups_header*)&_linkeditBias[_chainedFixupsCmd->dataoff()];
452 uint64_t startsOffset = ((uint64_t)header + header->starts_offset) - (uint64_t)_mh;
453
454 // segment_offset in dyld_chained_starts_in_segment is wrong. We need to move it to the new segment offset
455 ((dyld3::MachOAnalyzer*)_mh)->withChainStarts(_diagnostics, startsOffset, ^(const dyld_chained_starts_in_image* starts) {
456 for (uint32_t segIndex=0; segIndex < starts->seg_count; ++segIndex) {
457 if ( starts->seg_info_offset[segIndex] == 0 )
458 continue;
459 dyld_chained_starts_in_segment* segInfo = (dyld_chained_starts_in_segment*)((uint8_t*)starts + starts->seg_info_offset[segIndex]);
460 segInfo->segment_offset = (uint64_t)_mappingInfo[segIndex].dstSegment - (uint64_t)_mh;
461 }
462 });
463 }
464
465 template <typename P>
466 void Adjustor<P>::slidePointer(int segIndex, uint64_t segOffset, uint8_t type, CacheBuilder::ASLR_Tracker& aslrTracker)
467 {
468 pint_t* mappedAddrP = (pint_t*)((uint8_t*)_mappingInfo[segIndex].dstSegment + segOffset);
469 uint32_t* mappedAddr32 = (uint32_t*)mappedAddrP;
470 pint_t valueP;
471 uint32_t value32;
472 switch ( type ) {
473 case REBASE_TYPE_POINTER:
474 valueP = (pint_t)P::getP(*mappedAddrP);
475 P::setP(*mappedAddrP, valueP + slideForOrigAddress(valueP));
476 aslrTracker.add(mappedAddrP);
477 break;
478
479 case REBASE_TYPE_TEXT_ABSOLUTE32:
480 value32 = P::E::get32(*mappedAddr32);
481 P::E::set32(*mappedAddr32, value32 + (uint32_t)slideForOrigAddress(value32));
482 break;
483
484 case REBASE_TYPE_TEXT_PCREL32:
485 // general text relocs not support
486 default:
487 _diagnostics.error("unknown rebase type 0x%02X in %s", type, _installName);
488 }
489 }
490
491
492 static bool isThumbMovw(uint32_t instruction)
493 {
494 return ( (instruction & 0x8000FBF0) == 0x0000F240 );
495 }
496
497 static bool isThumbMovt(uint32_t instruction)
498 {
499 return ( (instruction & 0x8000FBF0) == 0x0000F2C0 );
500 }
501
502 static uint16_t getThumbWord(uint32_t instruction)
503 {
504 uint32_t i = ((instruction & 0x00000400) >> 10);
505 uint32_t imm4 = (instruction & 0x0000000F);
506 uint32_t imm3 = ((instruction & 0x70000000) >> 28);
507 uint32_t imm8 = ((instruction & 0x00FF0000) >> 16);
508 return ((imm4 << 12) | (i << 11) | (imm3 << 8) | imm8);
509 }
510
511 static uint32_t setThumbWord(uint32_t instruction, uint16_t word) {
512 uint32_t imm4 = (word & 0xF000) >> 12;
513 uint32_t i = (word & 0x0800) >> 11;
514 uint32_t imm3 = (word & 0x0700) >> 8;
515 uint32_t imm8 = word & 0x00FF;
516 return (instruction & 0x8F00FBF0) | imm4 | (i << 10) | (imm3 << 28) | (imm8 << 16);
517 }
518
519 static bool isArmMovw(uint32_t instruction)
520 {
521 return (instruction & 0x0FF00000) == 0x03000000;
522 }
523
524 static bool isArmMovt(uint32_t instruction)
525 {
526 return (instruction & 0x0FF00000) == 0x03400000;
527 }
528
529 static uint16_t getArmWord(uint32_t instruction)
530 {
531 uint32_t imm4 = ((instruction & 0x000F0000) >> 16);
532 uint32_t imm12 = (instruction & 0x00000FFF);
533 return (imm4 << 12) | imm12;
534 }
535
536 static uint32_t setArmWord(uint32_t instruction, uint16_t word) {
537 uint32_t imm4 = (word & 0xF000) >> 12;
538 uint32_t imm12 = word & 0x0FFF;
539 return (instruction & 0xFFF0F000) | (imm4 << 16) | imm12;
540 }
541
542 template <typename P>
543 void Adjustor<P>::adjustReference(uint32_t kind, uint8_t* mappedAddr, uint64_t fromNewAddress, uint64_t toNewAddress,
544 int64_t adjust, int64_t targetSlide, uint64_t imageStartAddress, uint64_t imageEndAddress,
545 CacheBuilder::ASLR_Tracker& aslrTracker, CacheBuilder::LOH_Tracker* lohTracker,
546 uint32_t*& lastMappedAddr32, uint32_t& lastKind, uint64_t& lastToNewAddress)
547 {
548 uint64_t value64;
549 uint64_t* mappedAddr64 = 0;
550 uint32_t value32;
551 uint32_t* mappedAddr32 = 0;
552 uint32_t instruction;
553 dyld3::MachOLoaded::ChainedFixupPointerOnDisk chainPtr;
554 int64_t offsetAdjust;
555 int64_t delta;
556 switch ( kind ) {
557 case DYLD_CACHE_ADJ_V2_DELTA_32:
558 mappedAddr32 = (uint32_t*)mappedAddr;
559 value32 = P::E::get32(*mappedAddr32);
560 delta = (int32_t)value32;
561 delta += adjust;
562 if ( (delta > 0x80000000) || (-delta > 0x80000000) ) {
563 _diagnostics.error("DYLD_CACHE_ADJ_V2_DELTA_32 can't be adjust by 0x%016llX in %s", adjust, _installName);
564 return;
565 }
566 P::E::set32(*mappedAddr32, (int32_t)delta);
567 break;
568 case DYLD_CACHE_ADJ_V2_POINTER_32:
569 mappedAddr32 = (uint32_t*)mappedAddr;
570 if ( toNewAddress != (uint64_t)(E::get32(*mappedAddr32) + targetSlide) ) {
571 _diagnostics.error("bad DYLD_CACHE_ADJ_V2_POINTER_32 value not as expected at address 0x%llX in %s", fromNewAddress, _installName);
572 return;
573 }
574 E::set32(*mappedAddr32, (uint32_t)toNewAddress);
575 aslrTracker.add(mappedAddr32);
576 break;
577 case DYLD_CACHE_ADJ_V2_POINTER_64:
578 mappedAddr64 = (uint64_t*)mappedAddr;
579 if ( toNewAddress != (E::get64(*mappedAddr64) + targetSlide) ) {
580 _diagnostics.error("bad DYLD_CACHE_ADJ_V2_POINTER_64 value not as expected at address 0x%llX in %s", fromNewAddress, _installName);
581 return;
582 }
583 E::set64(*mappedAddr64, toNewAddress);
584 aslrTracker.add(mappedAddr64);
585 break;
586 case DYLD_CACHE_ADJ_V2_THREADED_POINTER_64:
587 mappedAddr64 = (uint64_t*)mappedAddr;
588 chainPtr.raw64 = E::get64(*mappedAddr64);
589 // ignore binds, fix up rebases to have new targets
590 if ( chainPtr.arm64e.authRebase.bind == 0 ) {
591 if ( chainPtr.arm64e.authRebase.auth ) {
592 // auth pointer target is offset in dyld cache
593 chainPtr.arm64e.authRebase.target += (((dyld3::MachOAnalyzer*)_mh)->preferredLoadAddress() + targetSlide - _cacheBuffer->header.sharedRegionStart);
594 }
595 else {
596 // plain pointer target is unslid address of target
597 chainPtr.arm64e.rebase.target += targetSlide;
598 }
599 // Note, the pointer remains a chain with just the target of the rebase adjusted to the new target location
600 E::set64(*mappedAddr64, chainPtr.raw64);
601 }
602 break;
603 case DYLD_CACHE_ADJ_V2_DELTA_64:
604 mappedAddr64 = (uint64_t*)mappedAddr;
605 value64 = P::E::get64(*mappedAddr64);
606 E::set64(*mappedAddr64, value64 + adjust);
607 break;
608 case DYLD_CACHE_ADJ_V2_IMAGE_OFF_32:
609 if ( adjust == 0 )
610 break;
611 mappedAddr32 = (uint32_t*)mappedAddr;
612 value32 = P::E::get32(*mappedAddr32);
613 value64 = toNewAddress - imageStartAddress;
614 if ( value64 > imageEndAddress ) {
615 _diagnostics.error("DYLD_CACHE_ADJ_V2_IMAGE_OFF_32 can't be adjust to 0x%016llX in %s", toNewAddress, _installName);
616 return;
617 }
618 P::E::set32(*mappedAddr32, (uint32_t)value64);
619 break;
620 case DYLD_CACHE_ADJ_V2_ARM64_ADRP:
621 mappedAddr32 = (uint32_t*)mappedAddr;
622 if (lohTracker)
623 (*lohTracker)[toNewAddress].insert(mappedAddr);
624 instruction = P::E::get32(*mappedAddr32);
625 if ( (instruction & 0x9F000000) == 0x90000000 ) {
626 int64_t pageDistance = ((toNewAddress & ~0xFFF) - (fromNewAddress & ~0xFFF));
627 int64_t newPage21 = pageDistance >> 12;
628 if ( (newPage21 > 2097151) || (newPage21 < -2097151) ) {
629 _diagnostics.error("DYLD_CACHE_ADJ_V2_ARM64_ADRP can't be adjusted that far in %s", _installName);
630 return;
631 }
632 instruction = (instruction & 0x9F00001F) | ((newPage21 << 29) & 0x60000000) | ((newPage21 << 3) & 0x00FFFFE0);
633 P::E::set32(*mappedAddr32, instruction);
634 }
635 else {
636 // ADRP instructions are sometimes optimized to other instructions (e.g. ADR) after the split-seg-info is generated
637 }
638 break;
639 case DYLD_CACHE_ADJ_V2_ARM64_OFF12:
640 mappedAddr32 = (uint32_t*)mappedAddr;
641 if (lohTracker)
642 (*lohTracker)[toNewAddress].insert(mappedAddr);
643 instruction = P::E::get32(*mappedAddr32);
644 offsetAdjust = (adjust & 0xFFF);
645 if ( offsetAdjust == 0 )
646 break;
647 if ( (instruction & 0x3B000000) == 0x39000000 ) {
648 // LDR/STR imm12
649 if ( offsetAdjust != 0 ) {
650 uint32_t encodedAddend = ((instruction & 0x003FFC00) >> 10);
651 uint32_t newAddend = 0;
652 switch ( instruction & 0xC0000000 ) {
653 case 0x00000000:
654 if ( (instruction & 0x04800000) == 0x04800000 ) {
655 if ( offsetAdjust & 0xF ) {
656 _diagnostics.error("can't adjust off12 scale=16 instruction by %lld bytes at mapped address=%p in %s", offsetAdjust, mappedAddr, _installName);
657 return;
658 }
659 if ( encodedAddend*16 >= 4096 ) {
660 _diagnostics.error("off12 scale=16 instruction points outside its page at mapped address=%p in %s", mappedAddr, _installName);
661 }
662 newAddend = (encodedAddend + offsetAdjust/16) % 256;
663 }
664 else {
665 // scale=1
666 newAddend = (encodedAddend + (int32_t)offsetAdjust) % 4096;
667 }
668 break;
669 case 0x40000000:
670 if ( offsetAdjust & 1 ) {
671 _diagnostics.error("can't adjust off12 scale=2 instruction by %lld bytes at mapped address=%p in %s", offsetAdjust, mappedAddr, _installName);
672 return;
673 }
674 if ( encodedAddend*2 >= 4096 ) {
675 _diagnostics.error("off12 scale=2 instruction points outside its page at mapped address=%p in %s", mappedAddr, _installName);
676 return;
677 }
678 newAddend = (encodedAddend + offsetAdjust/2) % 2048;
679 break;
680 case 0x80000000:
681 if ( offsetAdjust & 3 ) {
682 _diagnostics.error("can't adjust off12 scale=4 instruction by %lld bytes at mapped address=%p in %s", offsetAdjust, mappedAddr, _installName);
683 return;
684 }
685 if ( encodedAddend*4 >= 4096 ) {
686 _diagnostics.error("off12 scale=4 instruction points outside its page at mapped address=%p in %s", mappedAddr, _installName);
687 return;
688 }
689 newAddend = (encodedAddend + offsetAdjust/4) % 1024;
690 break;
691 case 0xC0000000:
692 if ( offsetAdjust & 7 ) {
693 _diagnostics.error("can't adjust off12 scale=8 instruction by %lld bytes at mapped address=%p in %s", offsetAdjust, mappedAddr, _installName);
694 return;
695 }
696 if ( encodedAddend*8 >= 4096 ) {
697 _diagnostics.error("off12 scale=8 instruction points outside its page at mapped address=%p in %s", mappedAddr, _installName);
698 return;
699 }
700 newAddend = (encodedAddend + offsetAdjust/8) % 512;
701 break;
702 }
703 uint32_t newInstruction = (instruction & 0xFFC003FF) | (newAddend << 10);
704 P::E::set32(*mappedAddr32, newInstruction);
705 }
706 }
707 else if ( (instruction & 0xFFC00000) == 0x91000000 ) {
708 // ADD imm12
709 if ( instruction & 0x00C00000 ) {
710 _diagnostics.error("ADD off12 uses shift at mapped address=%p in %s", mappedAddr, _installName);
711 return;
712 }
713 uint32_t encodedAddend = ((instruction & 0x003FFC00) >> 10);
714 uint32_t newAddend = (encodedAddend + offsetAdjust) & 0xFFF;
715 uint32_t newInstruction = (instruction & 0xFFC003FF) | (newAddend << 10);
716 P::E::set32(*mappedAddr32, newInstruction);
717 }
718 else if ( instruction != 0xD503201F ) {
719 // ignore imm12 instructions optimized into a NOP, but warn about others
720 _diagnostics.error("unknown off12 instruction 0x%08X at 0x%0llX in %s", instruction, fromNewAddress, _installName);
721 return;
722 }
723 break;
724 case DYLD_CACHE_ADJ_V2_THUMB_MOVW_MOVT:
725 mappedAddr32 = (uint32_t*)mappedAddr;
726 // to update a movw/movt pair we need to extract the 32-bit they will make,
727 // add the adjust and write back the new movw/movt pair.
728 if ( lastKind == kind ) {
729 if ( lastToNewAddress == toNewAddress ) {
730 uint32_t instruction1 = P::E::get32(*lastMappedAddr32);
731 uint32_t instruction2 = P::E::get32(*mappedAddr32);
732 if ( isThumbMovw(instruction1) && isThumbMovt(instruction2) ) {
733 uint16_t high = getThumbWord(instruction2);
734 uint16_t low = getThumbWord(instruction1);
735 uint32_t full = high << 16 | low;
736 full += adjust;
737 instruction1 = setThumbWord(instruction1, full & 0xFFFF);
738 instruction2 = setThumbWord(instruction2, full >> 16);
739 }
740 else if ( isThumbMovt(instruction1) && isThumbMovw(instruction2) ) {
741 uint16_t high = getThumbWord(instruction1);
742 uint16_t low = getThumbWord(instruction2);
743 uint32_t full = high << 16 | low;
744 full += adjust;
745 instruction2 = setThumbWord(instruction2, full & 0xFFFF);
746 instruction1 = setThumbWord(instruction1, full >> 16);
747 }
748 else {
749 _diagnostics.error("two DYLD_CACHE_ADJ_V2_THUMB_MOVW_MOVT in a row but not paried in %s", _installName);
750 return;
751 }
752 P::E::set32(*lastMappedAddr32, instruction1);
753 P::E::set32(*mappedAddr32, instruction2);
754 kind = 0;
755 }
756 else {
757 _diagnostics.error("two DYLD_CACHE_ADJ_V2_THUMB_MOVW_MOVT in a row but target different addresses in %s", _installName);
758 return;
759 }
760 }
761 break;
762 case DYLD_CACHE_ADJ_V2_ARM_MOVW_MOVT:
763 mappedAddr32 = (uint32_t*)mappedAddr;
764 // to update a movw/movt pair we need to extract the 32-bit they will make,
765 // add the adjust and write back the new movw/movt pair.
766 if ( lastKind == kind ) {
767 if ( lastToNewAddress == toNewAddress ) {
768 uint32_t instruction1 = P::E::get32(*lastMappedAddr32);
769 uint32_t instruction2 = P::E::get32(*mappedAddr32);
770 if ( isArmMovw(instruction1) && isArmMovt(instruction2) ) {
771 uint16_t high = getArmWord(instruction2);
772 uint16_t low = getArmWord(instruction1);
773 uint32_t full = high << 16 | low;
774 full += adjust;
775 instruction1 = setArmWord(instruction1, full & 0xFFFF);
776 instruction2 = setArmWord(instruction2, full >> 16);
777 }
778 else if ( isArmMovt(instruction1) && isArmMovw(instruction2) ) {
779 uint16_t high = getArmWord(instruction1);
780 uint16_t low = getArmWord(instruction2);
781 uint32_t full = high << 16 | low;
782 full += adjust;
783 instruction2 = setArmWord(instruction2, full & 0xFFFF);
784 instruction1 = setArmWord(instruction1, full >> 16);
785 }
786 else {
787 _diagnostics.error("two DYLD_CACHE_ADJ_V2_ARM_MOVW_MOVT in a row but not paired in %s", _installName);
788 return;
789 }
790 P::E::set32(*lastMappedAddr32, instruction1);
791 P::E::set32(*mappedAddr32, instruction2);
792 kind = 0;
793 }
794 else {
795 _diagnostics.error("two DYLD_CACHE_ADJ_V2_ARM_MOVW_MOVT in a row but target different addresses in %s", _installName);
796 return;
797 }
798 }
799 break;
800 case DYLD_CACHE_ADJ_V2_ARM64_BR26:
801 case DYLD_CACHE_ADJ_V2_THUMB_BR22:
802 case DYLD_CACHE_ADJ_V2_ARM_BR24:
803 // nothing to do with calls to stubs
804 break;
805 default:
806 _diagnostics.error("unknown split seg kind=%d in %s", kind, _installName);
807 return;
808 }
809 lastKind = kind;
810 lastToNewAddress = toNewAddress;
811 lastMappedAddr32 = mappedAddr32;
812 }
813
814 template <typename P>
815 void Adjustor<P>::adjustReferencesUsingInfoV2(CacheBuilder::ASLR_Tracker& aslrTracker,
816 CacheBuilder::LOH_Tracker& lohTracker,
817 const CacheBuilder::CacheCoalescedText& coalescedText,
818 const CacheBuilder::DylibTextCoalescer& textCoalescer)
819 {
820 static const bool log = false;
821
822 const uint8_t* infoStart = &_linkeditBias[_splitSegInfoCmd->dataoff()];
823 const uint8_t* infoEnd = &infoStart[_splitSegInfoCmd->datasize()];
824 if ( *infoStart++ != DYLD_CACHE_ADJ_V2_FORMAT ) {
825 _diagnostics.error("malformed split seg info in %s", _installName);
826 return;
827 }
828 // build section arrays of slide and mapped address for each section
829 std::vector<uint64_t> sectionSlides;
830 std::vector<uint64_t> sectionNewAddress;
831 std::vector<uint8_t*> sectionMappedAddress;
832 sectionSlides.reserve(16);
833 sectionNewAddress.reserve(16);
834 sectionMappedAddress.reserve(16);
835 // section index 0 refers to mach_header
836 sectionMappedAddress.push_back((uint8_t*)_mappingInfo[0].dstSegment);
837 sectionSlides.push_back(_segSlides[0]);
838 sectionNewAddress.push_back(_mappingInfo[0].dstCacheUnslidAddress);
839 // section 1 and later refer to real sections
840 unsigned sectionIndex = 0;
841 unsigned objcSelRefsSectionIndex = ~0U;
842 std::map<uint64_t, std::string_view> coalescedSectionNames;
843 std::map<uint64_t, uint64_t> coalescedSectionOriginalVMAddrs;
844 for (unsigned segmentIndex=0; segmentIndex < _segCmds.size(); ++segmentIndex) {
845 macho_segment_command<P>* segCmd = _segCmds[segmentIndex];
846 macho_section<P>* const sectionsStart = (macho_section<P>*)((char*)segCmd + sizeof(macho_segment_command<P>));
847 macho_section<P>* const sectionsEnd = &sectionsStart[segCmd->nsects()];
848
849 for(macho_section<P>* sect = sectionsStart; sect < sectionsEnd; ++sect) {
850 if ( (strcmp(segCmd->segname(), "__TEXT") == 0) && textCoalescer.sectionWasCoalesced(sect->sectname())) {
851 // If we coalesced the segment then the sections aren't really there to be fixed up
852 sectionMappedAddress.push_back(nullptr);
853 sectionSlides.push_back(0);
854 sectionNewAddress.push_back(0);
855 if (log) {
856 fprintf(stderr, " %s/%s, sectIndex=%d, mapped at=%p\n",
857 sect->segname(), sect->sectname(), sectionIndex, sectionMappedAddress.back());
858 }
859 ++sectionIndex;
860 std::string_view sectionName = sect->sectname();
861 if (sectionName.size() > 16)
862 sectionName = sectionName.substr(0, 16);
863 coalescedSectionNames[sectionIndex] = sectionName;
864 coalescedSectionOriginalVMAddrs[sectionIndex] = sect->addr();
865 } else {
866 sectionMappedAddress.push_back((uint8_t*)_mappingInfo[segmentIndex].dstSegment + sect->addr() - segCmd->vmaddr());
867 sectionSlides.push_back(_segSlides[segmentIndex]);
868 sectionNewAddress.push_back(_mappingInfo[segmentIndex].dstCacheUnslidAddress + sect->addr() - segCmd->vmaddr());
869 if (log) {
870 fprintf(stderr, " %s/%s, sectIndex=%d, mapped at=%p\n",
871 sect->segname(), sect->sectname(), sectionIndex, sectionMappedAddress.back());
872 }
873 ++sectionIndex;
874 if (!strcmp(sect->segname(), "__DATA") && !strcmp(sect->sectname(), "__objc_selrefs"))
875 objcSelRefsSectionIndex = sectionIndex;
876 }
877 }
878 }
879
880 // Whole :== <count> FromToSection+
881 // FromToSection :== <from-sect-index> <to-sect-index> <count> ToOffset+
882 // ToOffset :== <to-sect-offset-delta> <count> FromOffset+
883 // FromOffset :== <kind> <count> <from-sect-offset-delta>
884 const uint8_t* p = infoStart;
885 uint64_t sectionCount = read_uleb128(p, infoEnd);
886 for (uint64_t i=0; i < sectionCount; ++i) {
887 uint32_t* lastMappedAddr32 = NULL;
888 uint32_t lastKind = 0;
889 uint64_t lastToNewAddress = 0;
890 uint64_t fromSectionIndex = read_uleb128(p, infoEnd);
891 uint64_t toSectionIndex = read_uleb128(p, infoEnd);
892 uint64_t toOffsetCount = read_uleb128(p, infoEnd);
893 uint64_t fromSectionSlide = sectionSlides[fromSectionIndex];
894 uint64_t fromSectionNewAddress = sectionNewAddress[fromSectionIndex];
895 uint8_t* fromSectionMappedAddress = sectionMappedAddress[fromSectionIndex];
896 uint64_t toSectionSlide = sectionSlides[toSectionIndex];
897 uint64_t toSectionNewAddress = sectionNewAddress[toSectionIndex];
898 CacheBuilder::LOH_Tracker* lohTrackerPtr = (toSectionIndex == objcSelRefsSectionIndex) ? &lohTracker : nullptr;
899 if (log) printf(" from sect=%lld (mapped=%p), to sect=%lld (new addr=0x%llX):\n", fromSectionIndex, fromSectionMappedAddress, toSectionIndex, toSectionNewAddress);
900 uint64_t toSectionOffset = 0;
901
902 // We don't support updating split seg from a coalesced segment
903 if (coalescedSectionNames.find(fromSectionIndex) != coalescedSectionNames.end()) {
904 _diagnostics.error("split seg from coalesced segment in %s", _installName);
905 return;
906 }
907 for (uint64_t j=0; j < toOffsetCount; ++j) {
908 uint64_t toSectionDelta = read_uleb128(p, infoEnd);
909 uint64_t fromOffsetCount = read_uleb128(p, infoEnd);
910 toSectionOffset += toSectionDelta;
911 for (uint64_t k=0; k < fromOffsetCount; ++k) {
912 uint64_t kind = read_uleb128(p, infoEnd);
913 if ( kind > 13 ) {
914 _diagnostics.error("unknown split seg info v2 kind value (%llu) in %s", kind, _installName);
915 return;
916 }
917 uint64_t fromSectDeltaCount = read_uleb128(p, infoEnd);
918 uint64_t fromSectionOffset = 0;
919 for (uint64_t l=0; l < fromSectDeltaCount; ++l) {
920 uint64_t delta = read_uleb128(p, infoEnd);
921 fromSectionOffset += delta;
922 //if (log) printf(" kind=%lld, from offset=0x%0llX, to offset=0x%0llX, adjust=0x%llX, targetSlide=0x%llX\n", kind, fromSectionOffset, toSectionOffset, deltaAdjust, toSectionSlide);
923
924 uint8_t* fromMappedAddr = fromSectionMappedAddress + fromSectionOffset;
925 uint64_t toNewAddress = toSectionNewAddress + toSectionOffset;
926 uint64_t fromNewAddress = fromSectionNewAddress + fromSectionOffset;
927 uint64_t imageStartAddress = sectionNewAddress.front();
928 uint64_t imageEndAddress = sectionNewAddress.back();
929 if ( toSectionIndex != 255 ) {
930 auto textCoalIt = coalescedSectionNames.find(toSectionIndex);
931 if (textCoalIt != coalescedSectionNames.end() ) {
932 //printf("Section name: %s\n", textCoalIt->second.data());
933 const CacheBuilder::DylibTextCoalescer::DylibSectionOffsetToCacheSectionOffset& offsetMap = textCoalescer.getSectionCoalescer(textCoalIt->second);
934 auto offsetIt = offsetMap.find((uint32_t)toSectionOffset);
935 assert(offsetIt != offsetMap.end());
936 uint64_t baseVMAddr = coalescedText.getSectionData(textCoalIt->second).bufferVMAddr;
937 toNewAddress = baseVMAddr + offsetIt->second;
938
939 // The 'to' section is gone, but we still need the 'to' slide. Instead of a section slide, compute the slide
940 // for this individual atom
941 uint64_t toAtomOriginalVMAddr = coalescedSectionOriginalVMAddrs[toSectionIndex] + toSectionOffset;
942 uint64_t toAtomSlide = toNewAddress - toAtomOriginalVMAddr;
943 int64_t deltaAdjust = toAtomSlide - fromSectionSlide;
944 adjustReference((uint32_t)kind, fromMappedAddr, fromNewAddress, toNewAddress, deltaAdjust, toAtomSlide,
945 imageStartAddress, imageEndAddress, aslrTracker, lohTrackerPtr, lastMappedAddr32, lastKind, lastToNewAddress);
946
947 } else {
948 int64_t deltaAdjust = toSectionSlide - fromSectionSlide;
949 adjustReference((uint32_t)kind, fromMappedAddr, fromNewAddress, toNewAddress, deltaAdjust, toSectionSlide,
950 imageStartAddress, imageEndAddress, aslrTracker, lohTrackerPtr, lastMappedAddr32, lastKind, lastToNewAddress);
951 }
952 }
953 if ( _diagnostics.hasError() )
954 return;
955 }
956 }
957 }
958 }
959
960 }
961
962 template <typename P>
963 void Adjustor<P>::adjustDataPointers(CacheBuilder::ASLR_Tracker& aslrTracker)
964 {
965 const uint8_t* p = &_linkeditBias[_dyldInfo->rebase_off()];
966 const uint8_t* end = &p[_dyldInfo->rebase_size()];
967
968 uint8_t type = 0;
969 int segIndex = 0;
970 uint64_t segOffset = 0;
971 uint64_t count;
972 uint64_t skip;
973 bool done = false;
974 while ( !done && (p < end) ) {
975 uint8_t immediate = *p & REBASE_IMMEDIATE_MASK;
976 uint8_t opcode = *p & REBASE_OPCODE_MASK;
977 ++p;
978 switch (opcode) {
979 case REBASE_OPCODE_DONE:
980 done = true;
981 break;
982 case REBASE_OPCODE_SET_TYPE_IMM:
983 type = immediate;
984 break;
985 case REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB:
986 segIndex = immediate;
987 segOffset = read_uleb128(p, end);
988 break;
989 case REBASE_OPCODE_ADD_ADDR_ULEB:
990 segOffset += read_uleb128(p, end);
991 break;
992 case REBASE_OPCODE_ADD_ADDR_IMM_SCALED:
993 segOffset += immediate*sizeof(pint_t);
994 break;
995 case REBASE_OPCODE_DO_REBASE_IMM_TIMES:
996 for (int i=0; i < immediate; ++i) {
997 slidePointer(segIndex, segOffset, type, aslrTracker);
998 segOffset += sizeof(pint_t);
999 }
1000 break;
1001 case REBASE_OPCODE_DO_REBASE_ULEB_TIMES:
1002 count = read_uleb128(p, end);
1003 for (uint32_t i=0; i < count; ++i) {
1004 slidePointer(segIndex, segOffset, type, aslrTracker);
1005 segOffset += sizeof(pint_t);
1006 }
1007 break;
1008 case REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB:
1009 slidePointer(segIndex, segOffset, type, aslrTracker);
1010 segOffset += read_uleb128(p, end) + sizeof(pint_t);
1011 break;
1012 case REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB:
1013 count = read_uleb128(p, end);
1014 skip = read_uleb128(p, end);
1015 for (uint32_t i=0; i < count; ++i) {
1016 slidePointer(segIndex, segOffset, type, aslrTracker);
1017 segOffset += skip + sizeof(pint_t);
1018 }
1019 break;
1020 default:
1021 _diagnostics.error("unknown rebase opcode 0x%02X in %s", opcode, _installName);
1022 done = true;
1023 break;
1024 }
1025 }
1026 }
1027
1028
1029 template <typename P>
1030 void Adjustor<P>::adjustInstruction(uint8_t kind, uint8_t* textLoc, uint64_t codeToDataDelta)
1031 {
1032 uint32_t* fixupLoc32 = (uint32_t*)textLoc;
1033 uint64_t* fixupLoc64 = (uint64_t*)textLoc;
1034 uint32_t instruction;
1035 uint32_t value32;
1036 uint64_t value64;
1037
1038 switch (kind) {
1039 case 1: // 32-bit pointer (including x86_64 RIP-rel)
1040 value32 = P::E::get32(*fixupLoc32);
1041 value32 += codeToDataDelta;
1042 P::E::set32(*fixupLoc32, value32);
1043 break;
1044 case 2: // 64-bit pointer
1045 value64 = P::E::get64(*fixupLoc64);
1046 value64 += codeToDataDelta;
1047 P::E::set64(*fixupLoc64, value64);
1048 break;
1049 case 4: // only used for i386, a reference to something in the IMPORT segment
1050 break;
1051 case 5: // used by thumb2 movw
1052 instruction = P::E::get32(*fixupLoc32);
1053 // slide is always a multiple of 4096, so only top 4 bits of lo16 will ever need adjusting
1054 value32 = (instruction & 0x0000000F) + ((uint32_t)codeToDataDelta >> 12);
1055 instruction = (instruction & 0xFFFFFFF0) | (value32 & 0x0000000F);
1056 P::E::set32(*fixupLoc32, instruction);
1057 break;
1058 case 6: // used by ARM movw
1059 instruction = P::E::get32(*fixupLoc32);
1060 // slide is always a multiple of 4096, so only top 4 bits of lo16 will ever need adjusting
1061 value32 = ((instruction & 0x000F0000) >> 16) + ((uint32_t)codeToDataDelta >> 12);
1062 instruction = (instruction & 0xFFF0FFFF) | ((value32 <<16) & 0x000F0000);
1063 P::E::set32(*fixupLoc32, instruction);
1064 break;
1065 case 0x10:
1066 case 0x11:
1067 case 0x12:
1068 case 0x13:
1069 case 0x14:
1070 case 0x15:
1071 case 0x16:
1072 case 0x17:
1073 case 0x18:
1074 case 0x19:
1075 case 0x1A:
1076 case 0x1B:
1077 case 0x1C:
1078 case 0x1D:
1079 case 0x1E:
1080 case 0x1F:
1081 // used by thumb2 movt (low nibble of kind is high 4-bits of paired movw)
1082 {
1083 instruction = P::E::get32(*fixupLoc32);
1084 assert((instruction & 0x8000FBF0) == 0x0000F2C0);
1085 // extract 16-bit value from instruction
1086 uint32_t i = ((instruction & 0x00000400) >> 10);
1087 uint32_t imm4 = (instruction & 0x0000000F);
1088 uint32_t imm3 = ((instruction & 0x70000000) >> 28);
1089 uint32_t imm8 = ((instruction & 0x00FF0000) >> 16);
1090 uint32_t imm16 = (imm4 << 12) | (i << 11) | (imm3 << 8) | imm8;
1091 // combine with codeToDataDelta and kind nibble
1092 uint32_t targetValue = (imm16 << 16) | ((kind & 0xF) << 12);
1093 uint32_t newTargetValue = targetValue + (uint32_t)codeToDataDelta;
1094 // construct new bits slices
1095 uint32_t imm4_ = (newTargetValue & 0xF0000000) >> 28;
1096 uint32_t i_ = (newTargetValue & 0x08000000) >> 27;
1097 uint32_t imm3_ = (newTargetValue & 0x07000000) >> 24;
1098 uint32_t imm8_ = (newTargetValue & 0x00FF0000) >> 16;
1099 // update instruction to match codeToDataDelta
1100 uint32_t newInstruction = (instruction & 0x8F00FBF0) | imm4_ | (i_ << 10) | (imm3_ << 28) | (imm8_ << 16);
1101 P::E::set32(*fixupLoc32, newInstruction);
1102 }
1103 break;
1104 case 0x20:
1105 case 0x21:
1106 case 0x22:
1107 case 0x23:
1108 case 0x24:
1109 case 0x25:
1110 case 0x26:
1111 case 0x27:
1112 case 0x28:
1113 case 0x29:
1114 case 0x2A:
1115 case 0x2B:
1116 case 0x2C:
1117 case 0x2D:
1118 case 0x2E:
1119 case 0x2F:
1120 // used by arm movt (low nibble of kind is high 4-bits of paired movw)
1121 {
1122 instruction = P::E::get32(*fixupLoc32);
1123 // extract 16-bit value from instruction
1124 uint32_t imm4 = ((instruction & 0x000F0000) >> 16);
1125 uint32_t imm12 = (instruction & 0x00000FFF);
1126 uint32_t imm16 = (imm4 << 12) | imm12;
1127 // combine with codeToDataDelta and kind nibble
1128 uint32_t targetValue = (imm16 << 16) | ((kind & 0xF) << 12);
1129 uint32_t newTargetValue = targetValue + (uint32_t)codeToDataDelta;
1130 // construct new bits slices
1131 uint32_t imm4_ = (newTargetValue & 0xF0000000) >> 28;
1132 uint32_t imm12_ = (newTargetValue & 0x0FFF0000) >> 16;
1133 // update instruction to match codeToDataDelta
1134 uint32_t newInstruction = (instruction & 0xFFF0F000) | (imm4_ << 16) | imm12_;
1135 P::E::set32(*fixupLoc32, newInstruction);
1136 }
1137 break;
1138 case 3: // used for arm64 ADRP
1139 instruction = P::E::get32(*fixupLoc32);
1140 if ( (instruction & 0x9F000000) == 0x90000000 ) {
1141 // codeToDataDelta is always a multiple of 4096, so only top 4 bits of lo16 will ever need adjusting
1142 value64 = ((instruction & 0x60000000) >> 17) | ((instruction & 0x00FFFFE0) << 9);
1143 value64 += codeToDataDelta;
1144 instruction = (instruction & 0x9F00001F) | ((value64 << 17) & 0x60000000) | ((value64 >> 9) & 0x00FFFFE0);
1145 P::E::set32(*fixupLoc32, instruction);
1146 }
1147 break;
1148 default:
1149 break;
1150 }
1151 }
1152
1153 template <typename P>
1154 void Adjustor<P>::adjustCode()
1155 {
1156 // find compressed info on how code needs to be updated
1157 const uint8_t* infoStart = &_linkeditBias[_splitSegInfoCmd->dataoff()];
1158 const uint8_t* infoEnd = &infoStart[_splitSegInfoCmd->datasize()];;
1159
1160 // This encoding only works if all data segments slide by the same amount
1161 uint64_t codeToDataDelta = _segSlides[1] - _segSlides[0];
1162
1163 // compressed data is: [ <kind> [uleb128-delta]+ <0> ] + <0>
1164 for (const uint8_t* p = infoStart; (*p != 0) && (p < infoEnd);) {
1165 uint8_t kind = *p++;
1166 uint8_t* textLoc = (uint8_t*)_mappingInfo[0].dstSegment;
1167 while (uint64_t delta = read_uleb128(p, infoEnd)) {
1168 textLoc += delta;
1169 adjustInstruction(kind, textLoc, codeToDataDelta);
1170 }
1171 }
1172 }
1173
1174
1175 template <typename P>
1176 void Adjustor<P>::adjustExportsTrie(std::vector<uint8_t>& newTrieBytes)
1177 {
1178 // if no export info, nothing to adjust
1179 uint32_t exportOffset = 0;
1180 uint32_t exportSize = 0;
1181 if ( _dyldInfo != nullptr ) {
1182 exportOffset = _dyldInfo->export_off();
1183 exportSize = _dyldInfo->export_size();
1184 } else {
1185 exportOffset = _exportTrieCmd->dataoff();
1186 exportSize = _exportTrieCmd->datasize();
1187 }
1188
1189 if ( exportSize == 0 )
1190 return;
1191
1192 // since export info addresses are offsets from mach_header, everything in __TEXT is fine
1193 // only __DATA addresses need to be updated
1194 const uint8_t* start = &_linkeditBias[exportOffset];
1195 const uint8_t* end = &start[exportSize];
1196 std::vector<ExportInfoTrie::Entry> originalExports;
1197 if ( !ExportInfoTrie::parseTrie(start, end, originalExports) ) {
1198 _diagnostics.error("malformed exports trie in %s", _installName);
1199 return;
1200 }
1201
1202 std::vector<ExportInfoTrie::Entry> newExports;
1203 newExports.reserve(originalExports.size());
1204 uint64_t baseAddress = _segOrigStartAddresses[0];
1205 uint64_t baseAddressSlide = slideForOrigAddress(baseAddress);
1206 for (auto& entry: originalExports) {
1207 // remove symbols used by the static linker only
1208 if ( (strncmp(entry.name.c_str(), "$ld$", 4) == 0)
1209 || (strncmp(entry.name.c_str(), ".objc_class_name",16) == 0)
1210 || (strncmp(entry.name.c_str(), ".objc_category_name",19) == 0) ) {
1211 continue;
1212 }
1213 // adjust symbols in slid segments
1214 if ( (entry.info.flags & EXPORT_SYMBOL_FLAGS_KIND_MASK) != EXPORT_SYMBOL_FLAGS_KIND_ABSOLUTE )
1215 entry.info.address += (slideForOrigAddress(entry.info.address + baseAddress) - baseAddressSlide);
1216 newExports.push_back(entry);
1217 }
1218
1219 // rebuild export trie
1220 newTrieBytes.reserve(exportSize);
1221
1222 ExportInfoTrie(newExports).emit(newTrieBytes);
1223 // align
1224 while ( (newTrieBytes.size() % sizeof(pint_t)) != 0 )
1225 newTrieBytes.push_back(0);
1226 }
1227
1228
1229 } // anonymous namespace
1230
1231 void CacheBuilder::adjustDylibSegments(const DylibInfo& dylib, Diagnostics& diag) const
1232 {
1233 DyldSharedCache* cache = (DyldSharedCache*)_readExecuteRegion.buffer;
1234 if ( _archLayout->is64 ) {
1235 Adjustor<Pointer64<LittleEndian>> adjustor64(cache, (macho_header<Pointer64<LittleEndian>>*)dylib.cacheLocation[0].dstSegment, dylib.cacheLocation, diag);
1236 adjustor64.adjustImageForNewSegmentLocations(_aslrTracker, _lohTracker, _coalescedText, dylib.textCoalescer);
1237 }
1238 else {
1239 Adjustor<Pointer32<LittleEndian>> adjustor32(cache, (macho_header<Pointer32<LittleEndian>>*)dylib.cacheLocation[0].dstSegment, dylib.cacheLocation, diag);
1240 adjustor32.adjustImageForNewSegmentLocations(_aslrTracker, _lohTracker, _coalescedText, dylib.textCoalescer);
1241 }
1242 }
1243
1244
1245
1246
1247
1248