]> git.saurik.com Git - apple/dyld.git/blob - dyld3/shared-cache/AdjustDylibSegments.cpp
dyld-640.2.tar.gz
[apple/dyld.git] / dyld3 / shared-cache / AdjustDylibSegments.cpp
1 /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*-
2 *
3 * Copyright (c) 2014 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26 #include <dirent.h>
27 #include <sys/errno.h>
28 #include <sys/fcntl.h>
29 #include <mach-o/loader.h>
30 #include <mach-o/fat.h>
31 #include <assert.h>
32
33 #include <fstream>
34 #include <string>
35 #include <algorithm>
36 #include <unordered_map>
37 #include <unordered_set>
38
39 #include "CacheBuilder.h"
40 #include "Diagnostics.h"
41 #include "DyldSharedCache.h"
42 #include "Trie.hpp"
43 #include "MachOFileAbstraction.hpp"
44 #include "MachOLoaded.h"
45 #include "MachOAnalyzer.h"
46
47
48 #ifndef EXPORT_SYMBOL_FLAGS_KIND_ABSOLUTE
49 #define EXPORT_SYMBOL_FLAGS_KIND_ABSOLUTE 0x02
50 #endif
51
52 namespace {
53
54 template <typename P>
55 class Adjustor {
56 public:
57 Adjustor(DyldSharedCache* cacheBuffer, macho_header<P>* mh, const std::vector<CacheBuilder::SegmentMappingInfo>& mappingInfo, Diagnostics& diag);
58 void adjustImageForNewSegmentLocations(CacheBuilder::ASLR_Tracker& aslrTracker,
59 CacheBuilder::LOH_Tracker& lohTracker);
60
61 private:
62 void adjustReferencesUsingInfoV2(CacheBuilder::ASLR_Tracker& aslrTracker, CacheBuilder::LOH_Tracker& lohTracker);
63 void adjustReference(uint32_t kind, uint8_t* mappedAddr, uint64_t fromNewAddress, uint64_t toNewAddress, int64_t adjust, int64_t targetSlide,
64 uint64_t imageStartAddress, uint64_t imageEndAddress,
65 CacheBuilder::ASLR_Tracker& aslrTracker, CacheBuilder::LOH_Tracker* lohTracker,
66 uint32_t*& lastMappedAddr32, uint32_t& lastKind, uint64_t& lastToNewAddress);
67 void adjustDataPointers(CacheBuilder::ASLR_Tracker& aslrTracker);
68 void slidePointer(int segIndex, uint64_t segOffset, uint8_t type, CacheBuilder::ASLR_Tracker& aslrTracker);
69 void adjustSymbolTable();
70 void adjustExportsTrie(std::vector<uint8_t>& newTrieBytes);
71 void rebuildLinkEdit();
72 void adjustCode();
73 void adjustInstruction(uint8_t kind, uint8_t* textLoc, uint64_t codeToDataDelta);
74 void rebuildLinkEditAndLoadCommands();
75 uint64_t slideForOrigAddress(uint64_t addr);
76
77 typedef typename P::uint_t pint_t;
78 typedef typename P::E E;
79
80 DyldSharedCache* _cacheBuffer;
81 macho_header<P>* _mh;
82 Diagnostics& _diagnostics;
83 const uint8_t* _linkeditBias = nullptr;
84 unsigned _linkeditSegIndex = 0;
85 bool _maskPointers = false;
86 bool _splitSegInfoV2 = false;
87 const char* _installName = nullptr;
88 macho_symtab_command<P>* _symTabCmd = nullptr;
89 macho_dysymtab_command<P>* _dynSymTabCmd = nullptr;
90 macho_dyld_info_command<P>* _dyldInfo = nullptr;
91 macho_linkedit_data_command<P>* _splitSegInfoCmd = nullptr;
92 macho_linkedit_data_command<P>* _functionStartsCmd = nullptr;
93 macho_linkedit_data_command<P>* _dataInCodeCmd = nullptr;
94 std::vector<uint64_t> _segOrigStartAddresses;
95 std::vector<uint64_t> _segSlides;
96 std::vector<macho_segment_command<P>*> _segCmds;
97 const std::vector<CacheBuilder::SegmentMappingInfo>& _mappingInfo;
98 };
99
100 template <typename P>
101 Adjustor<P>::Adjustor(DyldSharedCache* cacheBuffer, macho_header<P>* mh, const std::vector<CacheBuilder::SegmentMappingInfo>& mappingInfo, Diagnostics& diag)
102 : _cacheBuffer(cacheBuffer), _mh(mh), _diagnostics(diag), _mappingInfo(mappingInfo)
103 {
104 assert((mh->magic() == MH_MAGIC) || (mh->magic() == MH_MAGIC_64));
105 macho_segment_command<P>* segCmd;
106 const macho_load_command<P>* const cmds = (macho_load_command<P>*)((uint8_t*)mh + sizeof(macho_header<P>));
107 const uint32_t cmd_count = mh->ncmds();
108 const macho_load_command<P>* cmd = cmds;
109 unsigned segIndex = 0;
110 for (uint32_t i = 0; i < cmd_count; ++i) {
111 switch (cmd->cmd()) {
112 case LC_ID_DYLIB:
113 _installName = ((macho_dylib_command<P>*)cmd)->name();
114 break;
115 case LC_SYMTAB:
116 _symTabCmd = (macho_symtab_command<P>*)cmd;
117 break;
118 case LC_DYSYMTAB:
119 _dynSymTabCmd = (macho_dysymtab_command<P>*)cmd;
120 break;
121 case LC_DYLD_INFO:
122 case LC_DYLD_INFO_ONLY:
123 _dyldInfo = (macho_dyld_info_command<P>*)cmd;
124 break;
125 case LC_SEGMENT_SPLIT_INFO:
126 _splitSegInfoCmd = (macho_linkedit_data_command<P>*)cmd;
127 break;
128 case LC_FUNCTION_STARTS:
129 _functionStartsCmd = (macho_linkedit_data_command<P>*)cmd;
130 break;
131 case LC_DATA_IN_CODE:
132 _dataInCodeCmd = (macho_linkedit_data_command<P>*)cmd;
133 break;
134 case macho_segment_command<P>::CMD:
135 segCmd = (macho_segment_command<P>*)cmd;
136 _segCmds.push_back(segCmd);
137 _segOrigStartAddresses.push_back(segCmd->vmaddr());
138 _segSlides.push_back(_mappingInfo[segIndex].dstCacheUnslidAddress - segCmd->vmaddr());
139 if ( strcmp(segCmd->segname(), "__LINKEDIT") == 0 ) {
140 _linkeditBias = (uint8_t*)_mappingInfo[segIndex].dstSegment - segCmd->fileoff();
141 _linkeditSegIndex = segIndex;
142 }
143 ++segIndex;
144 break;
145 }
146 cmd = (const macho_load_command<P>*)(((uint8_t*)cmd)+cmd->cmdsize());
147 }
148 _maskPointers = (P::E::get32(mh->cputype()) == CPU_TYPE_ARM64) || (P::E::get32(mh->cputype()) == CPU_TYPE_ARM64_32);
149 if ( _splitSegInfoCmd != NULL ) {
150 const uint8_t* infoStart = &_linkeditBias[_splitSegInfoCmd->dataoff()];
151 _splitSegInfoV2 = (*infoStart == DYLD_CACHE_ADJ_V2_FORMAT);
152 }
153 else {
154 _diagnostics.error("missing LC_SEGMENT_SPLIT_INFO in %s", _installName);
155 }
156 }
157
158 template <typename P>
159 void Adjustor<P>::adjustImageForNewSegmentLocations(CacheBuilder::ASLR_Tracker& aslrTracker,
160 CacheBuilder::LOH_Tracker& lohTracker)
161 {
162 if ( _diagnostics.hasError() )
163 return;
164 if ( _splitSegInfoV2 ) {
165 adjustReferencesUsingInfoV2(aslrTracker, lohTracker);
166 }
167 else {
168 adjustDataPointers(aslrTracker);
169 adjustCode();
170 }
171 if ( _diagnostics.hasError() )
172 return;
173 adjustSymbolTable();
174 if ( _diagnostics.hasError() )
175 return;
176 rebuildLinkEditAndLoadCommands();
177
178 #if DEBUG
179 Diagnostics diag;
180 ((dyld3::MachOAnalyzer*)_mh)->validateDyldCacheDylib(diag, _installName);
181 if ( diag.hasError() ) {
182 fprintf(stderr, "%s\n", diag.errorMessage().c_str());
183 }
184 #endif
185 }
186
187 template <typename P>
188 uint64_t Adjustor<P>::slideForOrigAddress(uint64_t addr)
189 {
190 for (unsigned i=0; i < _segOrigStartAddresses.size(); ++i) {
191 if ( (_segOrigStartAddresses[i] <= addr) && (addr < (_segOrigStartAddresses[i]+_segCmds[i]->vmsize())) )
192 return _segSlides[i];
193 }
194 // On arm64, high nibble of pointers can have extra bits
195 if ( _maskPointers && (addr & 0xF000000000000000) ) {
196 return slideForOrigAddress(addr & 0x0FFFFFFFFFFFFFFF);
197 }
198 _diagnostics.error("slide not known for dylib address 0x%llX in %s", addr, _installName);
199 return 0;
200 }
201
202 template <typename P>
203 void Adjustor<P>::rebuildLinkEditAndLoadCommands()
204 {
205 // Exports trie is only data structure in LINKEDIT that might grow
206 std::vector<uint8_t> newTrieBytes;
207 adjustExportsTrie(newTrieBytes);
208
209 // Remove: code signature, rebase info, code-sign-dirs, split seg info
210 uint32_t bindOffset = 0;
211 uint32_t bindSize = _dyldInfo->bind_size();
212 uint32_t weakBindOffset = bindOffset + bindSize;
213 uint32_t weakBindSize = _dyldInfo->weak_bind_size();
214 uint32_t lazyBindOffset = weakBindOffset + weakBindSize;
215 uint32_t lazyBindSize = _dyldInfo->lazy_bind_size();
216 uint32_t exportOffset = lazyBindOffset + lazyBindSize;
217 uint32_t exportSize = (uint32_t)newTrieBytes.size();
218 uint32_t splitSegInfoOffset = exportOffset + exportSize;
219 uint32_t splitSegInfosSize = (_splitSegInfoCmd ? _splitSegInfoCmd->datasize() : 0);
220 uint32_t funcStartsOffset = splitSegInfoOffset + splitSegInfosSize;
221 uint32_t funcStartsSize = (_functionStartsCmd ? _functionStartsCmd->datasize() : 0);
222 uint32_t dataInCodeOffset = funcStartsOffset + funcStartsSize;
223 uint32_t dataInCodeSize = (_dataInCodeCmd ? _dataInCodeCmd->datasize() : 0);
224 uint32_t symbolTableOffset = dataInCodeOffset + dataInCodeSize;
225 uint32_t symbolTableSize = _symTabCmd->nsyms() * sizeof(macho_nlist<P>);
226 uint32_t indirectTableOffset = symbolTableOffset + symbolTableSize;
227 uint32_t indirectTableSize = _dynSymTabCmd->nindirectsyms() * sizeof(uint32_t);
228 uint32_t symbolStringsOffset = indirectTableOffset + indirectTableSize;
229 uint32_t symbolStringsSize = _symTabCmd->strsize();
230 uint32_t newLinkEditSize = symbolStringsOffset + symbolStringsSize;
231
232 size_t linkeditBufferSize = align(_segCmds[_linkeditSegIndex]->vmsize(), 12);
233 if ( linkeditBufferSize < newLinkEditSize ) {
234 _diagnostics.error("LINKEDIT overflow in %s", _installName);
235 return;
236 }
237
238 uint8_t* newLinkeditBufer = (uint8_t*)::calloc(linkeditBufferSize, 1);
239 if ( bindSize )
240 memcpy(&newLinkeditBufer[bindOffset], &_linkeditBias[_dyldInfo->bind_off()], bindSize);
241 if ( lazyBindSize )
242 memcpy(&newLinkeditBufer[lazyBindOffset], &_linkeditBias[_dyldInfo->lazy_bind_off()], lazyBindSize);
243 if ( weakBindSize )
244 memcpy(&newLinkeditBufer[weakBindOffset], &_linkeditBias[_dyldInfo->weak_bind_off()], weakBindSize);
245 if ( exportSize )
246 memcpy(&newLinkeditBufer[exportOffset], &newTrieBytes[0], exportSize);
247 if ( splitSegInfosSize )
248 memcpy(&newLinkeditBufer[splitSegInfoOffset], &_linkeditBias[_splitSegInfoCmd->dataoff()], splitSegInfosSize);
249 if ( funcStartsSize )
250 memcpy(&newLinkeditBufer[funcStartsOffset], &_linkeditBias[_functionStartsCmd->dataoff()], funcStartsSize);
251 if ( dataInCodeSize )
252 memcpy(&newLinkeditBufer[dataInCodeOffset], &_linkeditBias[_dataInCodeCmd->dataoff()], dataInCodeSize);
253 if ( symbolTableSize )
254 memcpy(&newLinkeditBufer[symbolTableOffset], &_linkeditBias[_symTabCmd->symoff()], symbolTableSize);
255 if ( indirectTableSize )
256 memcpy(&newLinkeditBufer[indirectTableOffset], &_linkeditBias[_dynSymTabCmd->indirectsymoff()], indirectTableSize);
257 if ( symbolStringsSize )
258 memcpy(&newLinkeditBufer[symbolStringsOffset], &_linkeditBias[_symTabCmd->stroff()], symbolStringsSize);
259
260 memcpy(_mappingInfo[_linkeditSegIndex].dstSegment, newLinkeditBufer, newLinkEditSize);
261 ::bzero(((uint8_t*)_mappingInfo[_linkeditSegIndex].dstSegment)+newLinkEditSize, linkeditBufferSize-newLinkEditSize);
262 ::free(newLinkeditBufer);
263 uint32_t linkeditStartOffset = (uint32_t)_mappingInfo[_linkeditSegIndex].dstCacheFileOffset;
264
265 // updates load commands and removed ones no longer needed
266 macho_load_command<P>* const cmds = (macho_load_command<P>*)((uint8_t*)_mh + sizeof(macho_header<P>));
267 uint32_t cmd_count = _mh->ncmds();
268 const macho_load_command<P>* cmd = cmds;
269 const unsigned origLoadCommandsSize = _mh->sizeofcmds();
270 unsigned bytesRemaining = origLoadCommandsSize;
271 unsigned removedCount = 0;
272 unsigned segIndex = 0;
273 for (uint32_t i = 0; i < cmd_count; ++i) {
274 macho_symtab_command<P>* symTabCmd;
275 macho_dysymtab_command<P>* dynSymTabCmd;
276 macho_dyld_info_command<P>* dyldInfo;
277 macho_linkedit_data_command<P>* functionStartsCmd;
278 macho_linkedit_data_command<P>* dataInCodeCmd;
279 macho_linkedit_data_command<P>* splitSegInfoCmd;
280 macho_segment_command<P>* segCmd;
281 macho_routines_command<P>* routinesCmd;
282 macho_dylib_command<P>* dylibIDCmd;
283 uint32_t cmdSize = cmd->cmdsize();
284 int32_t segFileOffsetDelta;
285 bool remove = false;
286 switch ( cmd->cmd() ) {
287 case LC_ID_DYLIB:
288 dylibIDCmd = (macho_dylib_command<P>*)cmd;
289 dylibIDCmd->set_timestamp(2); // match what static linker sets in LC_LOAD_DYLIB
290 break;
291 case LC_SYMTAB:
292 symTabCmd = (macho_symtab_command<P>*)cmd;
293 symTabCmd->set_symoff(linkeditStartOffset+symbolTableOffset);
294 symTabCmd->set_stroff(linkeditStartOffset+symbolStringsOffset);
295 break;
296 case LC_DYSYMTAB:
297 dynSymTabCmd = (macho_dysymtab_command<P>*)cmd;
298 dynSymTabCmd->set_indirectsymoff(linkeditStartOffset+indirectTableOffset);
299 break;
300 case LC_DYLD_INFO:
301 case LC_DYLD_INFO_ONLY:
302 dyldInfo = (macho_dyld_info_command<P>*)cmd;
303 dyldInfo->set_rebase_off(0);
304 dyldInfo->set_rebase_size(0);
305 dyldInfo->set_bind_off(bindSize ? linkeditStartOffset+bindOffset : 0);
306 dyldInfo->set_bind_size(bindSize);
307 dyldInfo->set_weak_bind_off(weakBindSize ? linkeditStartOffset+weakBindOffset : 0);
308 dyldInfo->set_weak_bind_size(weakBindSize);
309 dyldInfo->set_lazy_bind_off(lazyBindSize ? linkeditStartOffset+lazyBindOffset : 0);
310 dyldInfo->set_lazy_bind_size(lazyBindSize);
311 dyldInfo->set_export_off(exportSize ? linkeditStartOffset+exportOffset : 0);
312 dyldInfo->set_export_size(exportSize);
313 break;
314 case LC_FUNCTION_STARTS:
315 functionStartsCmd = (macho_linkedit_data_command<P>*)cmd;
316 functionStartsCmd->set_dataoff(linkeditStartOffset+funcStartsOffset);
317 break;
318 case LC_DATA_IN_CODE:
319 dataInCodeCmd = (macho_linkedit_data_command<P>*)cmd;
320 dataInCodeCmd->set_dataoff(linkeditStartOffset+dataInCodeOffset);
321 break;
322 case macho_routines_command<P>::CMD:
323 routinesCmd = (macho_routines_command<P>*)cmd;
324 routinesCmd->set_init_address(routinesCmd->init_address()+slideForOrigAddress(routinesCmd->init_address()));
325 break;
326 case macho_segment_command<P>::CMD:
327 segCmd = (macho_segment_command<P>*)cmd;
328 segFileOffsetDelta = (int32_t)(_mappingInfo[segIndex].dstCacheFileOffset - segCmd->fileoff());
329 segCmd->set_vmaddr(_mappingInfo[segIndex].dstCacheUnslidAddress);
330 segCmd->set_vmsize(_mappingInfo[segIndex].dstCacheSegmentSize);
331 segCmd->set_fileoff(_mappingInfo[segIndex].dstCacheFileOffset);
332 segCmd->set_filesize(_mappingInfo[segIndex].copySegmentSize);
333 if ( strcmp(segCmd->segname(), "__LINKEDIT") == 0 )
334 segCmd->set_vmsize(linkeditBufferSize);
335 if ( segCmd->nsects() > 0 ) {
336 macho_section<P>* const sectionsStart = (macho_section<P>*)((uint8_t*)segCmd + sizeof(macho_segment_command<P>));
337 macho_section<P>* const sectionsEnd = &sectionsStart[segCmd->nsects()];
338 for (macho_section<P>* sect=sectionsStart; sect < sectionsEnd; ++sect) {
339 sect->set_addr(sect->addr() + _segSlides[segIndex]);
340 if ( sect->offset() != 0 )
341 sect->set_offset(sect->offset() + segFileOffsetDelta);
342 }
343 }
344 ++segIndex;
345 break;
346 case LC_RPATH:
347 _diagnostics.warning("dyld shared cache does not support LC_RPATH found in %s", _installName);
348 remove = true;
349 break;
350 case LC_SEGMENT_SPLIT_INFO:
351 splitSegInfoCmd = (macho_linkedit_data_command<P>*)cmd;
352 splitSegInfoCmd->set_dataoff(linkeditStartOffset+splitSegInfoOffset);
353 break;
354 case LC_CODE_SIGNATURE:
355 case LC_DYLIB_CODE_SIGN_DRS:
356 remove = true;
357 break;
358 default:
359 break;
360 }
361 macho_load_command<P>* nextCmd = (macho_load_command<P>*)(((uint8_t*)cmd)+cmdSize);
362 if ( remove ) {
363 ::memmove((void*)cmd, (void*)nextCmd, bytesRemaining);
364 ++removedCount;
365 }
366 else {
367 bytesRemaining -= cmdSize;
368 cmd = nextCmd;
369 }
370 }
371 // zero out stuff removed
372 ::bzero((void*)cmd, bytesRemaining);
373 // update header
374 _mh->set_ncmds(cmd_count-removedCount);
375 _mh->set_sizeofcmds(origLoadCommandsSize-bytesRemaining);
376 _mh->set_flags(_mh->flags() | 0x80000000);
377 }
378
379
380 template <typename P>
381 void Adjustor<P>::adjustSymbolTable()
382 {
383 macho_nlist<P>* symbolTable = (macho_nlist<P>*)&_linkeditBias[_symTabCmd->symoff()];
384
385 // adjust global symbol table entries
386 macho_nlist<P>* lastExport = &symbolTable[_dynSymTabCmd->iextdefsym()+_dynSymTabCmd->nextdefsym()];
387 for (macho_nlist<P>* entry = &symbolTable[_dynSymTabCmd->iextdefsym()]; entry < lastExport; ++entry) {
388 if ( (entry->n_type() & N_TYPE) == N_SECT )
389 entry->set_n_value(entry->n_value() + slideForOrigAddress(entry->n_value()));
390 }
391
392 // adjust local symbol table entries
393 macho_nlist<P>* lastLocal = &symbolTable[_dynSymTabCmd->ilocalsym()+_dynSymTabCmd->nlocalsym()];
394 for (macho_nlist<P>* entry = &symbolTable[_dynSymTabCmd->ilocalsym()]; entry < lastLocal; ++entry) {
395 if ( (entry->n_sect() != NO_SECT) && ((entry->n_type() & N_STAB) == 0) )
396 entry->set_n_value(entry->n_value() + slideForOrigAddress(entry->n_value()));
397 }
398 }
399
400 template <typename P>
401 void Adjustor<P>::slidePointer(int segIndex, uint64_t segOffset, uint8_t type, CacheBuilder::ASLR_Tracker& aslrTracker)
402 {
403 pint_t* mappedAddrP = (pint_t*)((uint8_t*)_mappingInfo[segIndex].dstSegment + segOffset);
404 uint32_t* mappedAddr32 = (uint32_t*)mappedAddrP;
405 pint_t valueP;
406 uint32_t value32;
407 switch ( type ) {
408 case REBASE_TYPE_POINTER:
409 valueP = (pint_t)P::getP(*mappedAddrP);
410 P::setP(*mappedAddrP, valueP + slideForOrigAddress(valueP));
411 aslrTracker.add(mappedAddrP);
412 break;
413
414 case REBASE_TYPE_TEXT_ABSOLUTE32:
415 value32 = P::E::get32(*mappedAddr32);
416 P::E::set32(*mappedAddr32, value32 + (uint32_t)slideForOrigAddress(value32));
417 break;
418
419 case REBASE_TYPE_TEXT_PCREL32:
420 // general text relocs not support
421 default:
422 _diagnostics.error("unknown rebase type 0x%02X in %s", type, _installName);
423 }
424 }
425
426
427 static bool isThumbMovw(uint32_t instruction)
428 {
429 return ( (instruction & 0x8000FBF0) == 0x0000F240 );
430 }
431
432 static bool isThumbMovt(uint32_t instruction)
433 {
434 return ( (instruction & 0x8000FBF0) == 0x0000F2C0 );
435 }
436
437 static uint16_t getThumbWord(uint32_t instruction)
438 {
439 uint32_t i = ((instruction & 0x00000400) >> 10);
440 uint32_t imm4 = (instruction & 0x0000000F);
441 uint32_t imm3 = ((instruction & 0x70000000) >> 28);
442 uint32_t imm8 = ((instruction & 0x00FF0000) >> 16);
443 return ((imm4 << 12) | (i << 11) | (imm3 << 8) | imm8);
444 }
445
446 static uint32_t setThumbWord(uint32_t instruction, uint16_t word) {
447 uint32_t imm4 = (word & 0xF000) >> 12;
448 uint32_t i = (word & 0x0800) >> 11;
449 uint32_t imm3 = (word & 0x0700) >> 8;
450 uint32_t imm8 = word & 0x00FF;
451 return (instruction & 0x8F00FBF0) | imm4 | (i << 10) | (imm3 << 28) | (imm8 << 16);
452 }
453
454 static bool isArmMovw(uint32_t instruction)
455 {
456 return (instruction & 0x0FF00000) == 0x03000000;
457 }
458
459 static bool isArmMovt(uint32_t instruction)
460 {
461 return (instruction & 0x0FF00000) == 0x03400000;
462 }
463
464 static uint16_t getArmWord(uint32_t instruction)
465 {
466 uint32_t imm4 = ((instruction & 0x000F0000) >> 16);
467 uint32_t imm12 = (instruction & 0x00000FFF);
468 return (imm4 << 12) | imm12;
469 }
470
471 static uint32_t setArmWord(uint32_t instruction, uint16_t word) {
472 uint32_t imm4 = (word & 0xF000) >> 12;
473 uint32_t imm12 = word & 0x0FFF;
474 return (instruction & 0xFFF0F000) | (imm4 << 16) | imm12;
475 }
476
477 template <typename P>
478 void Adjustor<P>::adjustReference(uint32_t kind, uint8_t* mappedAddr, uint64_t fromNewAddress, uint64_t toNewAddress,
479 int64_t adjust, int64_t targetSlide, uint64_t imageStartAddress, uint64_t imageEndAddress,
480 CacheBuilder::ASLR_Tracker& aslrTracker, CacheBuilder::LOH_Tracker* lohTracker,
481 uint32_t*& lastMappedAddr32, uint32_t& lastKind, uint64_t& lastToNewAddress)
482 {
483 uint64_t value64;
484 uint64_t* mappedAddr64 = 0;
485 uint32_t value32;
486 uint32_t* mappedAddr32 = 0;
487 uint32_t instruction;
488 dyld3::MachOLoaded::ChainedFixupPointerOnDisk chainPtr;
489 int64_t offsetAdjust;
490 int64_t delta;
491 switch ( kind ) {
492 case DYLD_CACHE_ADJ_V2_DELTA_32:
493 mappedAddr32 = (uint32_t*)mappedAddr;
494 value32 = P::E::get32(*mappedAddr32);
495 delta = (int32_t)value32;
496 delta += adjust;
497 if ( (delta > 0x80000000) || (-delta > 0x80000000) ) {
498 _diagnostics.error("DYLD_CACHE_ADJ_V2_DELTA_32 can't be adjust by 0x%016llX in %s", adjust, _installName);
499 return;
500 }
501 P::E::set32(*mappedAddr32, (int32_t)delta);
502 break;
503 case DYLD_CACHE_ADJ_V2_POINTER_32:
504 mappedAddr32 = (uint32_t*)mappedAddr;
505 if ( toNewAddress != (uint64_t)(E::get32(*mappedAddr32) + targetSlide) ) {
506 _diagnostics.error("bad DYLD_CACHE_ADJ_V2_POINTER_32 value not as expected at address 0x%llX in %s", fromNewAddress, _installName);
507 return;
508 }
509 E::set32(*mappedAddr32, (uint32_t)toNewAddress);
510 aslrTracker.add(mappedAddr32);
511 break;
512 case DYLD_CACHE_ADJ_V2_POINTER_64:
513 mappedAddr64 = (uint64_t*)mappedAddr;
514 if ( toNewAddress != (E::get64(*mappedAddr64) + targetSlide) ) {
515 _diagnostics.error("bad DYLD_CACHE_ADJ_V2_POINTER_64 value not as expected at address 0x%llX in %s", fromNewAddress, _installName);
516 return;
517 }
518 E::set64(*mappedAddr64, toNewAddress);
519 aslrTracker.add(mappedAddr64);
520 break;
521 case DYLD_CACHE_ADJ_V2_THREADED_POINTER_64:
522 mappedAddr64 = (uint64_t*)mappedAddr;
523 chainPtr.raw = E::get64(*mappedAddr64);
524 // ignore binds, fix up rebases to have new targets
525 if ( chainPtr.authRebase.bind == 0 ) {
526 if ( chainPtr.authRebase.auth ) {
527 // auth pointer target is offset in dyld cache
528 chainPtr.authRebase.target += (((dyld3::MachOAnalyzer*)_mh)->preferredLoadAddress() + targetSlide - _cacheBuffer->header.sharedRegionStart);
529 }
530 else {
531 // plain pointer target is unslid address of target
532 chainPtr.plainRebase.target += targetSlide;
533 }
534 // Note, the pointer remains a chain with just the target of the rebase adjusted to the new target location
535 E::set64(*mappedAddr64, chainPtr.raw);
536 }
537 break;
538 case DYLD_CACHE_ADJ_V2_DELTA_64:
539 mappedAddr64 = (uint64_t*)mappedAddr;
540 value64 = P::E::get64(*mappedAddr64);
541 E::set64(*mappedAddr64, value64 + adjust);
542 break;
543 case DYLD_CACHE_ADJ_V2_IMAGE_OFF_32:
544 if ( adjust == 0 )
545 break;
546 mappedAddr32 = (uint32_t*)mappedAddr;
547 value32 = P::E::get32(*mappedAddr32);
548 value64 = toNewAddress - imageStartAddress;
549 if ( value64 > imageEndAddress ) {
550 _diagnostics.error("DYLD_CACHE_ADJ_V2_IMAGE_OFF_32 can't be adjust to 0x%016llX in %s", toNewAddress, _installName);
551 return;
552 }
553 P::E::set32(*mappedAddr32, (uint32_t)value64);
554 break;
555 case DYLD_CACHE_ADJ_V2_ARM64_ADRP:
556 mappedAddr32 = (uint32_t*)mappedAddr;
557 if (lohTracker)
558 (*lohTracker)[toNewAddress].insert(mappedAddr);
559 instruction = P::E::get32(*mappedAddr32);
560 if ( (instruction & 0x9F000000) == 0x90000000 ) {
561 int64_t pageDistance = ((toNewAddress & ~0xFFF) - (fromNewAddress & ~0xFFF));
562 int64_t newPage21 = pageDistance >> 12;
563 if ( (newPage21 > 2097151) || (newPage21 < -2097151) ) {
564 _diagnostics.error("DYLD_CACHE_ADJ_V2_ARM64_ADRP can't be adjusted that far in %s", _installName);
565 return;
566 }
567 instruction = (instruction & 0x9F00001F) | ((newPage21 << 29) & 0x60000000) | ((newPage21 << 3) & 0x00FFFFE0);
568 P::E::set32(*mappedAddr32, instruction);
569 }
570 else {
571 // ADRP instructions are sometimes optimized to other instructions (e.g. ADR) after the split-seg-info is generated
572 }
573 break;
574 case DYLD_CACHE_ADJ_V2_ARM64_OFF12:
575 mappedAddr32 = (uint32_t*)mappedAddr;
576 if (lohTracker)
577 (*lohTracker)[toNewAddress].insert(mappedAddr);
578 instruction = P::E::get32(*mappedAddr32);
579 offsetAdjust = (adjust & 0xFFF);
580 if ( offsetAdjust == 0 )
581 break;
582 if ( (instruction & 0x3B000000) == 0x39000000 ) {
583 // LDR/STR imm12
584 if ( offsetAdjust != 0 ) {
585 uint32_t encodedAddend = ((instruction & 0x003FFC00) >> 10);
586 uint32_t newAddend = 0;
587 switch ( instruction & 0xC0000000 ) {
588 case 0x00000000:
589 if ( (instruction & 0x04800000) == 0x04800000 ) {
590 if ( offsetAdjust & 0xF ) {
591 _diagnostics.error("can't adjust off12 scale=16 instruction by %lld bytes at mapped address=%p in %s", offsetAdjust, mappedAddr, _installName);
592 return;
593 }
594 if ( encodedAddend*16 >= 4096 ) {
595 _diagnostics.error("off12 scale=16 instruction points outside its page at mapped address=%p in %s", mappedAddr, _installName);
596 }
597 newAddend = (encodedAddend + offsetAdjust/16) % 256;
598 }
599 else {
600 // scale=1
601 newAddend = (encodedAddend + (int32_t)offsetAdjust) % 4096;
602 }
603 break;
604 case 0x40000000:
605 if ( offsetAdjust & 1 ) {
606 _diagnostics.error("can't adjust off12 scale=2 instruction by %lld bytes at mapped address=%p in %s", offsetAdjust, mappedAddr, _installName);
607 return;
608 }
609 if ( encodedAddend*2 >= 4096 ) {
610 _diagnostics.error("off12 scale=2 instruction points outside its page at mapped address=%p in %s", mappedAddr, _installName);
611 return;
612 }
613 newAddend = (encodedAddend + offsetAdjust/2) % 2048;
614 break;
615 case 0x80000000:
616 if ( offsetAdjust & 3 ) {
617 _diagnostics.error("can't adjust off12 scale=4 instruction by %lld bytes at mapped address=%p in %s", offsetAdjust, mappedAddr, _installName);
618 return;
619 }
620 if ( encodedAddend*4 >= 4096 ) {
621 _diagnostics.error("off12 scale=4 instruction points outside its page at mapped address=%p in %s", mappedAddr, _installName);
622 return;
623 }
624 newAddend = (encodedAddend + offsetAdjust/4) % 1024;
625 break;
626 case 0xC0000000:
627 if ( offsetAdjust & 7 ) {
628 _diagnostics.error("can't adjust off12 scale=8 instruction by %lld bytes at mapped address=%p in %s", offsetAdjust, mappedAddr, _installName);
629 return;
630 }
631 if ( encodedAddend*8 >= 4096 ) {
632 _diagnostics.error("off12 scale=8 instruction points outside its page at mapped address=%p in %s", mappedAddr, _installName);
633 return;
634 }
635 newAddend = (encodedAddend + offsetAdjust/8) % 512;
636 break;
637 }
638 uint32_t newInstruction = (instruction & 0xFFC003FF) | (newAddend << 10);
639 P::E::set32(*mappedAddr32, newInstruction);
640 }
641 }
642 else if ( (instruction & 0xFFC00000) == 0x91000000 ) {
643 // ADD imm12
644 if ( instruction & 0x00C00000 ) {
645 _diagnostics.error("ADD off12 uses shift at mapped address=%p in %s", mappedAddr, _installName);
646 return;
647 }
648 uint32_t encodedAddend = ((instruction & 0x003FFC00) >> 10);
649 uint32_t newAddend = (encodedAddend + offsetAdjust) & 0xFFF;
650 uint32_t newInstruction = (instruction & 0xFFC003FF) | (newAddend << 10);
651 P::E::set32(*mappedAddr32, newInstruction);
652 }
653 else if ( instruction != 0xD503201F ) {
654 // ignore imm12 instructions optimized into a NOP, but warn about others
655 _diagnostics.error("unknown off12 instruction 0x%08X at 0x%0llX in %s", instruction, fromNewAddress, _installName);
656 return;
657 }
658 break;
659 case DYLD_CACHE_ADJ_V2_THUMB_MOVW_MOVT:
660 mappedAddr32 = (uint32_t*)mappedAddr;
661 // to update a movw/movt pair we need to extract the 32-bit they will make,
662 // add the adjust and write back the new movw/movt pair.
663 if ( lastKind == kind ) {
664 if ( lastToNewAddress == toNewAddress ) {
665 uint32_t instruction1 = P::E::get32(*lastMappedAddr32);
666 uint32_t instruction2 = P::E::get32(*mappedAddr32);
667 if ( isThumbMovw(instruction1) && isThumbMovt(instruction2) ) {
668 uint16_t high = getThumbWord(instruction2);
669 uint16_t low = getThumbWord(instruction1);
670 uint32_t full = high << 16 | low;
671 full += adjust;
672 instruction1 = setThumbWord(instruction1, full & 0xFFFF);
673 instruction2 = setThumbWord(instruction2, full >> 16);
674 }
675 else if ( isThumbMovt(instruction1) && isThumbMovw(instruction2) ) {
676 uint16_t high = getThumbWord(instruction1);
677 uint16_t low = getThumbWord(instruction2);
678 uint32_t full = high << 16 | low;
679 full += adjust;
680 instruction2 = setThumbWord(instruction2, full & 0xFFFF);
681 instruction1 = setThumbWord(instruction1, full >> 16);
682 }
683 else {
684 _diagnostics.error("two DYLD_CACHE_ADJ_V2_THUMB_MOVW_MOVT in a row but not paried in %s", _installName);
685 return;
686 }
687 P::E::set32(*lastMappedAddr32, instruction1);
688 P::E::set32(*mappedAddr32, instruction2);
689 kind = 0;
690 }
691 else {
692 _diagnostics.error("two DYLD_CACHE_ADJ_V2_THUMB_MOVW_MOVT in a row but target different addresses in %s", _installName);
693 return;
694 }
695 }
696 break;
697 case DYLD_CACHE_ADJ_V2_ARM_MOVW_MOVT:
698 mappedAddr32 = (uint32_t*)mappedAddr;
699 // to update a movw/movt pair we need to extract the 32-bit they will make,
700 // add the adjust and write back the new movw/movt pair.
701 if ( lastKind == kind ) {
702 if ( lastToNewAddress == toNewAddress ) {
703 uint32_t instruction1 = P::E::get32(*lastMappedAddr32);
704 uint32_t instruction2 = P::E::get32(*mappedAddr32);
705 if ( isArmMovw(instruction1) && isArmMovt(instruction2) ) {
706 uint16_t high = getArmWord(instruction2);
707 uint16_t low = getArmWord(instruction1);
708 uint32_t full = high << 16 | low;
709 full += adjust;
710 instruction1 = setArmWord(instruction1, full & 0xFFFF);
711 instruction2 = setArmWord(instruction2, full >> 16);
712 }
713 else if ( isArmMovt(instruction1) && isArmMovw(instruction2) ) {
714 uint16_t high = getArmWord(instruction1);
715 uint16_t low = getArmWord(instruction2);
716 uint32_t full = high << 16 | low;
717 full += adjust;
718 instruction2 = setArmWord(instruction2, full & 0xFFFF);
719 instruction1 = setArmWord(instruction1, full >> 16);
720 }
721 else {
722 _diagnostics.error("two DYLD_CACHE_ADJ_V2_ARM_MOVW_MOVT in a row but not paired in %s", _installName);
723 return;
724 }
725 P::E::set32(*lastMappedAddr32, instruction1);
726 P::E::set32(*mappedAddr32, instruction2);
727 kind = 0;
728 }
729 else {
730 _diagnostics.error("two DYLD_CACHE_ADJ_V2_ARM_MOVW_MOVT in a row but target different addresses in %s", _installName);
731 return;
732 }
733 }
734 break;
735 case DYLD_CACHE_ADJ_V2_ARM64_BR26:
736 case DYLD_CACHE_ADJ_V2_THUMB_BR22:
737 case DYLD_CACHE_ADJ_V2_ARM_BR24:
738 // nothing to do with calls to stubs
739 break;
740 default:
741 _diagnostics.error("unknown split seg kind=%d in %s", kind, _installName);
742 return;
743 }
744 lastKind = kind;
745 lastToNewAddress = toNewAddress;
746 lastMappedAddr32 = mappedAddr32;
747 }
748
749 template <typename P>
750 void Adjustor<P>::adjustReferencesUsingInfoV2(CacheBuilder::ASLR_Tracker& aslrTracker,
751 CacheBuilder::LOH_Tracker& lohTracker)
752 {
753 static const bool log = false;
754
755 const uint8_t* infoStart = &_linkeditBias[_splitSegInfoCmd->dataoff()];
756 const uint8_t* infoEnd = &infoStart[_splitSegInfoCmd->datasize()];
757 if ( *infoStart++ != DYLD_CACHE_ADJ_V2_FORMAT ) {
758 _diagnostics.error("malformed split seg info in %s", _installName);
759 return;
760 }
761 // build section arrays of slide and mapped address for each section
762 std::vector<uint64_t> sectionSlides;
763 std::vector<uint64_t> sectionNewAddress;
764 std::vector<uint8_t*> sectionMappedAddress;
765 sectionSlides.reserve(16);
766 sectionNewAddress.reserve(16);
767 sectionMappedAddress.reserve(16);
768 // section index 0 refers to mach_header
769 sectionMappedAddress.push_back((uint8_t*)_mappingInfo[0].dstSegment);
770 sectionSlides.push_back(_segSlides[0]);
771 sectionNewAddress.push_back(_mappingInfo[0].dstCacheUnslidAddress);
772 // section 1 and later refer to real sections
773 unsigned sectionIndex = 0;
774 unsigned objcSelRefsSectionIndex = ~0U;
775 for (unsigned segmentIndex=0; segmentIndex < _segCmds.size(); ++segmentIndex) {
776 macho_segment_command<P>* segCmd = _segCmds[segmentIndex];
777 macho_section<P>* const sectionsStart = (macho_section<P>*)((char*)segCmd + sizeof(macho_segment_command<P>));
778 macho_section<P>* const sectionsEnd = &sectionsStart[segCmd->nsects()];
779 for(macho_section<P>* sect = sectionsStart; sect < sectionsEnd; ++sect) {
780 sectionMappedAddress.push_back((uint8_t*)_mappingInfo[segmentIndex].dstSegment + sect->addr() - segCmd->vmaddr());
781 sectionSlides.push_back(_segSlides[segmentIndex]);
782 sectionNewAddress.push_back(_mappingInfo[segmentIndex].dstCacheUnslidAddress + sect->addr() - segCmd->vmaddr());
783 if (log) {
784 fprintf(stderr, " %s/%s, sectIndex=%d, mapped at=%p\n",
785 sect->segname(), sect->sectname(), sectionIndex, sectionMappedAddress.back());
786 }
787 ++sectionIndex;
788 if (!strcmp(sect->segname(), "__DATA") && !strcmp(sect->sectname(), "__objc_selrefs"))
789 objcSelRefsSectionIndex = sectionIndex;
790 }
791 }
792
793 // Whole :== <count> FromToSection+
794 // FromToSection :== <from-sect-index> <to-sect-index> <count> ToOffset+
795 // ToOffset :== <to-sect-offset-delta> <count> FromOffset+
796 // FromOffset :== <kind> <count> <from-sect-offset-delta>
797 const uint8_t* p = infoStart;
798 uint64_t sectionCount = read_uleb128(p, infoEnd);
799 for (uint64_t i=0; i < sectionCount; ++i) {
800 uint32_t* lastMappedAddr32 = NULL;
801 uint32_t lastKind = 0;
802 uint64_t lastToNewAddress = 0;
803 uint64_t fromSectionIndex = read_uleb128(p, infoEnd);
804 uint64_t toSectionIndex = read_uleb128(p, infoEnd);
805 uint64_t toOffsetCount = read_uleb128(p, infoEnd);
806 uint64_t fromSectionSlide = sectionSlides[fromSectionIndex];
807 uint64_t fromSectionNewAddress = sectionNewAddress[fromSectionIndex];
808 uint8_t* fromSectionMappedAddress = sectionMappedAddress[fromSectionIndex];
809 uint64_t toSectionSlide = sectionSlides[toSectionIndex];
810 uint64_t toSectionNewAddress = sectionNewAddress[toSectionIndex];
811 CacheBuilder::LOH_Tracker* lohTrackerPtr = (toSectionIndex == objcSelRefsSectionIndex) ? &lohTracker : nullptr;
812 if (log) printf(" from sect=%lld (mapped=%p), to sect=%lld (new addr=0x%llX):\n", fromSectionIndex, fromSectionMappedAddress, toSectionIndex, toSectionNewAddress);
813 uint64_t toSectionOffset = 0;
814 for (uint64_t j=0; j < toOffsetCount; ++j) {
815 uint64_t toSectionDelta = read_uleb128(p, infoEnd);
816 uint64_t fromOffsetCount = read_uleb128(p, infoEnd);
817 toSectionOffset += toSectionDelta;
818 for (uint64_t k=0; k < fromOffsetCount; ++k) {
819 uint64_t kind = read_uleb128(p, infoEnd);
820 if ( kind > 13 ) {
821 _diagnostics.error("unknown split seg info v2 kind value (%llu) in %s", kind, _installName);
822 return;
823 }
824 uint64_t fromSectDeltaCount = read_uleb128(p, infoEnd);
825 uint64_t fromSectionOffset = 0;
826 for (uint64_t l=0; l < fromSectDeltaCount; ++l) {
827 uint64_t delta = read_uleb128(p, infoEnd);
828 fromSectionOffset += delta;
829 int64_t deltaAdjust = toSectionSlide - fromSectionSlide;
830 //if (log) printf(" kind=%lld, from offset=0x%0llX, to offset=0x%0llX, adjust=0x%llX, targetSlide=0x%llX\n", kind, fromSectionOffset, toSectionOffset, deltaAdjust, toSectionSlide);
831 uint8_t* fromMappedAddr = fromSectionMappedAddress + fromSectionOffset;
832 uint64_t toNewAddress = toSectionNewAddress + toSectionOffset;
833 uint64_t fromNewAddress = fromSectionNewAddress + fromSectionOffset;
834 uint64_t imageStartAddress = sectionNewAddress.front();
835 uint64_t imageEndAddress = sectionNewAddress.back();
836 if ( toSectionIndex != 255 ) {
837 adjustReference((uint32_t)kind, fromMappedAddr, fromNewAddress, toNewAddress, deltaAdjust, toSectionSlide, imageStartAddress, imageEndAddress, aslrTracker, lohTrackerPtr, lastMappedAddr32, lastKind, lastToNewAddress);
838 }
839 if ( _diagnostics.hasError() )
840 return;
841 }
842 }
843 }
844 }
845
846 }
847
848 template <typename P>
849 void Adjustor<P>::adjustDataPointers(CacheBuilder::ASLR_Tracker& aslrTracker)
850 {
851 const uint8_t* p = &_linkeditBias[_dyldInfo->rebase_off()];
852 const uint8_t* end = &p[_dyldInfo->rebase_size()];
853
854 uint8_t type = 0;
855 int segIndex = 0;
856 uint64_t segOffset = 0;
857 uint64_t count;
858 uint64_t skip;
859 bool done = false;
860 while ( !done && (p < end) ) {
861 uint8_t immediate = *p & REBASE_IMMEDIATE_MASK;
862 uint8_t opcode = *p & REBASE_OPCODE_MASK;
863 ++p;
864 switch (opcode) {
865 case REBASE_OPCODE_DONE:
866 done = true;
867 break;
868 case REBASE_OPCODE_SET_TYPE_IMM:
869 type = immediate;
870 break;
871 case REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB:
872 segIndex = immediate;
873 segOffset = read_uleb128(p, end);
874 break;
875 case REBASE_OPCODE_ADD_ADDR_ULEB:
876 segOffset += read_uleb128(p, end);
877 break;
878 case REBASE_OPCODE_ADD_ADDR_IMM_SCALED:
879 segOffset += immediate*sizeof(pint_t);
880 break;
881 case REBASE_OPCODE_DO_REBASE_IMM_TIMES:
882 for (int i=0; i < immediate; ++i) {
883 slidePointer(segIndex, segOffset, type, aslrTracker);
884 segOffset += sizeof(pint_t);
885 }
886 break;
887 case REBASE_OPCODE_DO_REBASE_ULEB_TIMES:
888 count = read_uleb128(p, end);
889 for (uint32_t i=0; i < count; ++i) {
890 slidePointer(segIndex, segOffset, type, aslrTracker);
891 segOffset += sizeof(pint_t);
892 }
893 break;
894 case REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB:
895 slidePointer(segIndex, segOffset, type, aslrTracker);
896 segOffset += read_uleb128(p, end) + sizeof(pint_t);
897 break;
898 case REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB:
899 count = read_uleb128(p, end);
900 skip = read_uleb128(p, end);
901 for (uint32_t i=0; i < count; ++i) {
902 slidePointer(segIndex, segOffset, type, aslrTracker);
903 segOffset += skip + sizeof(pint_t);
904 }
905 break;
906 default:
907 _diagnostics.error("unknown rebase opcode 0x%02X in %s", opcode, _installName);
908 done = true;
909 break;
910 }
911 }
912 }
913
914
915 template <typename P>
916 void Adjustor<P>::adjustInstruction(uint8_t kind, uint8_t* textLoc, uint64_t codeToDataDelta)
917 {
918 uint32_t* fixupLoc32 = (uint32_t*)textLoc;
919 uint64_t* fixupLoc64 = (uint64_t*)textLoc;
920 uint32_t instruction;
921 uint32_t value32;
922 uint64_t value64;
923
924 switch (kind) {
925 case 1: // 32-bit pointer (including x86_64 RIP-rel)
926 value32 = P::E::get32(*fixupLoc32);
927 value32 += codeToDataDelta;
928 P::E::set32(*fixupLoc32, value32);
929 break;
930 case 2: // 64-bit pointer
931 value64 = P::E::get64(*fixupLoc64);
932 value64 += codeToDataDelta;
933 P::E::set64(*fixupLoc64, value64);
934 break;
935 case 4: // only used for i386, a reference to something in the IMPORT segment
936 break;
937 case 5: // used by thumb2 movw
938 instruction = P::E::get32(*fixupLoc32);
939 // slide is always a multiple of 4096, so only top 4 bits of lo16 will ever need adjusting
940 value32 = (instruction & 0x0000000F) + ((uint32_t)codeToDataDelta >> 12);
941 instruction = (instruction & 0xFFFFFFF0) | (value32 & 0x0000000F);
942 P::E::set32(*fixupLoc32, instruction);
943 break;
944 case 6: // used by ARM movw
945 instruction = P::E::get32(*fixupLoc32);
946 // slide is always a multiple of 4096, so only top 4 bits of lo16 will ever need adjusting
947 value32 = ((instruction & 0x000F0000) >> 16) + ((uint32_t)codeToDataDelta >> 12);
948 instruction = (instruction & 0xFFF0FFFF) | ((value32 <<16) & 0x000F0000);
949 P::E::set32(*fixupLoc32, instruction);
950 break;
951 case 0x10:
952 case 0x11:
953 case 0x12:
954 case 0x13:
955 case 0x14:
956 case 0x15:
957 case 0x16:
958 case 0x17:
959 case 0x18:
960 case 0x19:
961 case 0x1A:
962 case 0x1B:
963 case 0x1C:
964 case 0x1D:
965 case 0x1E:
966 case 0x1F:
967 // used by thumb2 movt (low nibble of kind is high 4-bits of paired movw)
968 {
969 instruction = P::E::get32(*fixupLoc32);
970 assert((instruction & 0x8000FBF0) == 0x0000F2C0);
971 // extract 16-bit value from instruction
972 uint32_t i = ((instruction & 0x00000400) >> 10);
973 uint32_t imm4 = (instruction & 0x0000000F);
974 uint32_t imm3 = ((instruction & 0x70000000) >> 28);
975 uint32_t imm8 = ((instruction & 0x00FF0000) >> 16);
976 uint32_t imm16 = (imm4 << 12) | (i << 11) | (imm3 << 8) | imm8;
977 // combine with codeToDataDelta and kind nibble
978 uint32_t targetValue = (imm16 << 16) | ((kind & 0xF) << 12);
979 uint32_t newTargetValue = targetValue + (uint32_t)codeToDataDelta;
980 // construct new bits slices
981 uint32_t imm4_ = (newTargetValue & 0xF0000000) >> 28;
982 uint32_t i_ = (newTargetValue & 0x08000000) >> 27;
983 uint32_t imm3_ = (newTargetValue & 0x07000000) >> 24;
984 uint32_t imm8_ = (newTargetValue & 0x00FF0000) >> 16;
985 // update instruction to match codeToDataDelta
986 uint32_t newInstruction = (instruction & 0x8F00FBF0) | imm4_ | (i_ << 10) | (imm3_ << 28) | (imm8_ << 16);
987 P::E::set32(*fixupLoc32, newInstruction);
988 }
989 break;
990 case 0x20:
991 case 0x21:
992 case 0x22:
993 case 0x23:
994 case 0x24:
995 case 0x25:
996 case 0x26:
997 case 0x27:
998 case 0x28:
999 case 0x29:
1000 case 0x2A:
1001 case 0x2B:
1002 case 0x2C:
1003 case 0x2D:
1004 case 0x2E:
1005 case 0x2F:
1006 // used by arm movt (low nibble of kind is high 4-bits of paired movw)
1007 {
1008 instruction = P::E::get32(*fixupLoc32);
1009 // extract 16-bit value from instruction
1010 uint32_t imm4 = ((instruction & 0x000F0000) >> 16);
1011 uint32_t imm12 = (instruction & 0x00000FFF);
1012 uint32_t imm16 = (imm4 << 12) | imm12;
1013 // combine with codeToDataDelta and kind nibble
1014 uint32_t targetValue = (imm16 << 16) | ((kind & 0xF) << 12);
1015 uint32_t newTargetValue = targetValue + (uint32_t)codeToDataDelta;
1016 // construct new bits slices
1017 uint32_t imm4_ = (newTargetValue & 0xF0000000) >> 28;
1018 uint32_t imm12_ = (newTargetValue & 0x0FFF0000) >> 16;
1019 // update instruction to match codeToDataDelta
1020 uint32_t newInstruction = (instruction & 0xFFF0F000) | (imm4_ << 16) | imm12_;
1021 P::E::set32(*fixupLoc32, newInstruction);
1022 }
1023 break;
1024 case 3: // used for arm64 ADRP
1025 instruction = P::E::get32(*fixupLoc32);
1026 if ( (instruction & 0x9F000000) == 0x90000000 ) {
1027 // codeToDataDelta is always a multiple of 4096, so only top 4 bits of lo16 will ever need adjusting
1028 value64 = ((instruction & 0x60000000) >> 17) | ((instruction & 0x00FFFFE0) << 9);
1029 value64 += codeToDataDelta;
1030 instruction = (instruction & 0x9F00001F) | ((value64 << 17) & 0x60000000) | ((value64 >> 9) & 0x00FFFFE0);
1031 P::E::set32(*fixupLoc32, instruction);
1032 }
1033 break;
1034 default:
1035 break;
1036 }
1037 }
1038
1039 template <typename P>
1040 void Adjustor<P>::adjustCode()
1041 {
1042 // find compressed info on how code needs to be updated
1043 const uint8_t* infoStart = &_linkeditBias[_splitSegInfoCmd->dataoff()];
1044 const uint8_t* infoEnd = &infoStart[_splitSegInfoCmd->datasize()];;
1045
1046 // This encoding only works if all data segments slide by the same amount
1047 uint64_t codeToDataDelta = _segSlides[1] - _segSlides[0];
1048
1049 // compressed data is: [ <kind> [uleb128-delta]+ <0> ] + <0>
1050 for (const uint8_t* p = infoStart; (*p != 0) && (p < infoEnd);) {
1051 uint8_t kind = *p++;
1052 uint8_t* textLoc = (uint8_t*)_mappingInfo[0].dstSegment;
1053 while (uint64_t delta = read_uleb128(p, infoEnd)) {
1054 textLoc += delta;
1055 adjustInstruction(kind, textLoc, codeToDataDelta);
1056 }
1057 }
1058 }
1059
1060
1061 template <typename P>
1062 void Adjustor<P>::adjustExportsTrie(std::vector<uint8_t>& newTrieBytes)
1063 {
1064 // if no export info, nothing to adjust
1065 if ( _dyldInfo->export_size() == 0 )
1066 return;
1067
1068 // since export info addresses are offsets from mach_header, everything in __TEXT is fine
1069 // only __DATA addresses need to be updated
1070 const uint8_t* start = &_linkeditBias[_dyldInfo->export_off()];
1071 const uint8_t* end = &start[_dyldInfo->export_size()];
1072 std::vector<ExportInfoTrie::Entry> originalExports;
1073 if ( !ExportInfoTrie::parseTrie(start, end, originalExports) ) {
1074 _diagnostics.error("malformed exports trie in %s", _installName);
1075 return;
1076 }
1077
1078 std::vector<ExportInfoTrie::Entry> newExports;
1079 newExports.reserve(originalExports.size());
1080 uint64_t baseAddress = _segOrigStartAddresses[0];
1081 uint64_t baseAddressSlide = slideForOrigAddress(baseAddress);
1082 for (auto& entry: originalExports) {
1083 // remove symbols used by the static linker only
1084 if ( (strncmp(entry.name.c_str(), "$ld$", 4) == 0)
1085 || (strncmp(entry.name.c_str(), ".objc_class_name",16) == 0)
1086 || (strncmp(entry.name.c_str(), ".objc_category_name",19) == 0) ) {
1087 continue;
1088 }
1089 // adjust symbols in slid segments
1090 if ( (entry.info.flags & EXPORT_SYMBOL_FLAGS_KIND_MASK) != EXPORT_SYMBOL_FLAGS_KIND_ABSOLUTE )
1091 entry.info.address += (slideForOrigAddress(entry.info.address + baseAddress) - baseAddressSlide);
1092 newExports.push_back(entry);
1093 }
1094
1095 // rebuild export trie
1096 newTrieBytes.reserve(_dyldInfo->export_size());
1097
1098 ExportInfoTrie(newExports).emit(newTrieBytes);
1099 // align
1100 while ( (newTrieBytes.size() % sizeof(pint_t)) != 0 )
1101 newTrieBytes.push_back(0);
1102 }
1103
1104
1105 } // anonymous namespace
1106
1107 void CacheBuilder::adjustDylibSegments(const DylibInfo& dylib, Diagnostics& diag) const
1108 {
1109 DyldSharedCache* cache = (DyldSharedCache*)_readExecuteRegion.buffer;
1110 if ( _archLayout->is64 ) {
1111 Adjustor<Pointer64<LittleEndian>> adjustor64(cache, (macho_header<Pointer64<LittleEndian>>*)dylib.cacheLocation[0].dstSegment, dylib.cacheLocation, diag);
1112 adjustor64.adjustImageForNewSegmentLocations(_aslrTracker, _lohTracker);
1113 }
1114 else {
1115 Adjustor<Pointer32<LittleEndian>> adjustor32(cache, (macho_header<Pointer32<LittleEndian>>*)dylib.cacheLocation[0].dstSegment, dylib.cacheLocation, diag);
1116 adjustor32.adjustImageForNewSegmentLocations(_aslrTracker, _lohTracker);
1117 }
1118 }
1119
1120
1121
1122
1123
1124