]> git.saurik.com Git - apple/dyld.git/blob - dyld3/shared-cache/OptimizerBranches.cpp
dyld-851.27.tar.gz
[apple/dyld.git] / dyld3 / shared-cache / OptimizerBranches.cpp
1 /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*-
2 *
3 * Copyright (c) 2015 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26 #include <sys/types.h>
27 #include <sys/stat.h>
28 #include <sys/mman.h>
29 #include <limits.h>
30 #include <stdarg.h>
31 #include <stdio.h>
32 #include <unistd.h>
33 #include <CommonCrypto/CommonDigest.h>
34
35 #include <string>
36 #include <unordered_map>
37 #include <unordered_set>
38
39 #include "StringUtils.h"
40 #include "Trie.hpp"
41 #include "MachOFileAbstraction.hpp"
42 #include "MachOAnalyzer.h"
43 #include "Diagnostics.h"
44 #include "DyldSharedCache.h"
45 #include "CacheBuilder.h"
46
47 static const bool verbose = false;
48
49
50
51
52 template <typename P>
53 class StubOptimizer {
54 public:
55 StubOptimizer(int64_t cacheSlide, uint64_t cacheUnslidAddr,
56 const std::string& archName, macho_header<P>* mh,
57 const char* dylibID, Diagnostics& diags);
58 void buildStubMap(const std::unordered_set<std::string>& neverStubEliminate);
59 void optimizeStubs();
60 void optimizeCallSites(std::unordered_map<uint64_t, uint64_t>& targetAddrToOptStubAddr);
61 const char* dylibID() { return _dylibID; }
62 const uint8_t* exportsTrie() {
63 if ( _dyldInfo != nullptr )
64 return &_linkeditBias[_dyldInfo->export_off()];
65 else
66 return &_linkeditBias[_exportTrie->dataoff()];
67 }
68 uint32_t exportsTrieSize() {
69 if ( _dyldInfo != nullptr )
70 return _dyldInfo->export_size();
71 else
72 return _exportTrie->datasize();
73 }
74
75 uint32_t _stubCount = 0;
76 uint32_t _stubOptimizedCount = 0;
77 uint32_t _stubsLeftInterposable = 0;
78 uint32_t _branchToStubCount = 0;
79 uint32_t _branchOptimizedToDirectCount = 0;
80 uint32_t _branchToOptimizedStubCount = 0;
81 uint32_t _branchToReUsedOptimizedStubCount = 0;
82
83 private:
84 Diagnostics& _diagnostics;
85
86 typedef std::function<bool(uint8_t callSiteKind, uint64_t callSiteAddr, uint64_t stubAddr, uint32_t& instruction)> CallSiteHandler;
87 typedef typename P::uint_t pint_t;
88 typedef typename P::E E;
89
90 void forEachCallSiteToAStub(CallSiteHandler);
91 void optimizeArm64CallSites(std::unordered_map<uint64_t, uint64_t>& targetAddrToOptStubAddr);
92 void optimizeArm64Stubs();
93 #if SUPPORT_ARCH_arm64e
94 void optimizeArm64eStubs();
95 #endif
96 #if SUPPORT_ARCH_arm64_32
97 void optimizeArm64_32Stubs();
98 #endif
99 void optimizeArmCallSites(std::unordered_map<uint64_t, uint64_t>& targetAddrToOptStubAddr);
100 void optimizeArmStubs();
101 uint64_t lazyPointerAddrFromArm64Stub(const uint8_t* stubInstructions, uint64_t stubVMAddr);
102 #if SUPPORT_ARCH_arm64e
103 uint64_t lazyPointerAddrFromArm64eStub(const uint8_t* stubInstructions, uint64_t stubVMAddr);
104 #endif
105 #if SUPPORT_ARCH_arm64_32
106 uint64_t lazyPointerAddrFromArm64_32Stub(const uint8_t* stubInstructions, uint64_t stubVMAddr);
107 #endif
108 uint32_t lazyPointerAddrFromArmStub(const uint8_t* stubInstructions, uint32_t stubVMAddr);
109 int32_t getDisplacementFromThumbBranch(uint32_t instruction, uint32_t instrAddr);
110 uint32_t setDisplacementInThumbBranch(uint32_t instruction, uint32_t instrAddr,
111 int32_t displacement, bool targetIsThumb);
112 uint32_t cpuSubtype() { return ((dyld3::MachOFile*)_mh)->maskedCpuSubtype(); }
113
114
115 struct AddressAndName { pint_t targetVMAddr; const char* targetName; };
116 typedef std::unordered_map<pint_t, AddressAndName> StubVMAddrToTarget;
117
118 static const int64_t b128MegLimit = 0x07FFFFFF;
119 static const int64_t b16MegLimit = 0x00FFFFFF;
120
121
122
123 macho_header<P>* _mh;
124 int64_t _cacheSlide = 0;
125 uint64_t _cacheUnslideAddr = 0;
126 uint32_t _linkeditSize = 0;
127 uint64_t _linkeditAddr = 0;
128 const uint8_t* _linkeditBias = nullptr;
129 const char* _dylibID = nullptr;
130 const macho_symtab_command<P>* _symTabCmd = nullptr;
131 const macho_dysymtab_command<P>* _dynSymTabCmd = nullptr;
132 const macho_dyld_info_command<P>* _dyldInfo = nullptr;
133 const macho_linkedit_data_command<P>* _exportTrie = nullptr;
134 macho_linkedit_data_command<P>* _splitSegInfoCmd = nullptr;
135 const macho_section<P>* _textSection = nullptr;
136 const macho_section<P>* _stubSection = nullptr;
137 uint32_t _textSectionIndex = 0;
138 uint32_t _stubSectionIndex = 0;
139 pint_t _textSegStartAddr = 0;
140 std::vector<macho_segment_command<P>*> _segCmds;
141 std::unordered_map<pint_t, pint_t> _stubAddrToLPAddr;
142 std::unordered_map<pint_t, pint_t> _lpAddrToTargetAddr;
143 std::unordered_map<pint_t, const char*> _targetAddrToName;
144 std::unordered_set<uint64_t> _stubsToOptimize;
145 };
146
147
148 template <typename P>
149 StubOptimizer<P>::StubOptimizer(int64_t cacheSlide, uint64_t cacheUnslidAddr,
150 const std::string& archName,
151 macho_header<P>* mh, const char* dylibID,
152 Diagnostics& diags)
153 : _mh(mh), _dylibID(dylibID),
154 _cacheSlide(cacheSlide), _cacheUnslideAddr(cacheUnslidAddr),
155 _diagnostics(diags)
156 {
157 const macho_load_command<P>* const cmds = (macho_load_command<P>*)((uint8_t*)mh + sizeof(macho_header<P>));
158 const uint32_t cmd_count = mh->ncmds();
159 macho_segment_command<P>* segCmd;
160 uint32_t sectionIndex = 0;
161 const macho_load_command<P>* cmd = cmds;
162 for (uint32_t i = 0; i < cmd_count; ++i) {
163 switch (cmd->cmd()) {
164 case LC_SYMTAB:
165 _symTabCmd = (macho_symtab_command<P>*)cmd;
166 break;
167 case LC_DYSYMTAB:
168 _dynSymTabCmd = (macho_dysymtab_command<P>*)cmd;
169 break;
170 case LC_SEGMENT_SPLIT_INFO:
171 _splitSegInfoCmd = (macho_linkedit_data_command<P>*)cmd;
172 break;
173 case LC_DYLD_INFO:
174 case LC_DYLD_INFO_ONLY:
175 _dyldInfo = (macho_dyld_info_command<P>*)cmd;
176 break;
177 case LC_DYLD_EXPORTS_TRIE:
178 _exportTrie = (macho_linkedit_data_command<P>*)cmd;
179 break;
180 case macho_segment_command<P>::CMD:
181 segCmd =( macho_segment_command<P>*)cmd;
182 _segCmds.push_back(segCmd);
183 if ( strcmp(segCmd->segname(), "__LINKEDIT") == 0 ) {
184 _linkeditBias = (uint8_t*)(segCmd->vmaddr() + _cacheSlide - segCmd->fileoff());
185 _linkeditSize = (uint32_t)segCmd->vmsize();
186 _linkeditAddr = segCmd->vmaddr();
187 }
188 else if ( strcmp(segCmd->segname(), "__TEXT") == 0 ) {
189 _textSegStartAddr = (pint_t)segCmd->vmaddr();
190 const macho_section<P>* const sectionsStart = (macho_section<P>*)((char*)segCmd + sizeof(macho_segment_command<P>));
191 const macho_section<P>* const sectionsEnd = &sectionsStart[segCmd->nsects()];
192 for (const macho_section<P>* sect = sectionsStart; sect < sectionsEnd; ++sect) {
193 ++sectionIndex;
194 if ( strcmp(sect->sectname(), "__text") == 0 ) {
195 _textSection = sect;
196 _textSectionIndex = sectionIndex;
197 }
198 else if ( ((sect->flags() & SECTION_TYPE) == S_SYMBOL_STUBS) && (sect->size() != 0) ) {
199 _stubSection = sect;
200 _stubSectionIndex = sectionIndex;
201 }
202 }
203 }
204 break;
205 }
206 cmd = (const macho_load_command<P>*)(((uint8_t*)cmd)+cmd->cmdsize());
207 }
208 }
209
210
211
212 template <typename P>
213 uint32_t StubOptimizer<P>::lazyPointerAddrFromArmStub(const uint8_t* stubInstructions, uint32_t stubVMAddr)
214 {
215 uint32_t stubInstr1 = E::get32(*(uint32_t*)stubInstructions);
216 uint32_t stubInstr2 = E::get32(*(uint32_t*)(stubInstructions+4));
217 uint32_t stubInstr3 = E::get32(*(uint32_t*)(stubInstructions+8));
218 int32_t stubData = E::get32(*(uint32_t*)(stubInstructions+12));
219 if ( stubInstr1 != 0xe59fc004 ) {
220 _diagnostics.warning("first instruction of stub (0x%08X) is not 'ldr ip, pc + 12' for stub at addr 0x%0llX in %s",
221 stubInstr1, (uint64_t)stubVMAddr, _dylibID);
222 return 0;
223 }
224 if ( stubInstr2 != 0xe08fc00c ) {
225 _diagnostics.warning("second instruction of stub (0x%08X) is not 'add ip, pc, ip' for stub at addr 0x%0llX in %s",
226 stubInstr1, (uint64_t)stubVMAddr, _dylibID);
227 return 0;
228 }
229 if ( stubInstr3 != 0xe59cf000 ) {
230 _diagnostics.warning("third instruction of stub (0x%08X) is not 'ldr pc, [ip]' for stub at addr 0x%0llX in %s",
231 stubInstr1, (uint64_t)stubVMAddr, _dylibID);
232 return 0;
233 }
234 return stubVMAddr + 12 + stubData;
235 }
236
237
238 template <typename P>
239 uint64_t StubOptimizer<P>::lazyPointerAddrFromArm64Stub(const uint8_t* stubInstructions, uint64_t stubVMAddr)
240 {
241 uint32_t stubInstr1 = E::get32(*(uint32_t*)stubInstructions);
242 if ( (stubInstr1 & 0x9F00001F) != 0x90000010 ) {
243 _diagnostics.warning("first instruction of stub (0x%08X) is not ADRP for stub at addr 0x%0llX in %s",
244 stubInstr1, (uint64_t)stubVMAddr, _dylibID);
245 return 0;
246 }
247 int32_t adrpValue = ((stubInstr1 & 0x00FFFFE0) >> 3) | ((stubInstr1 & 0x60000000) >> 29);
248 if ( stubInstr1 & 0x00800000 )
249 adrpValue |= 0xFFF00000;
250 uint32_t stubInstr2 = E::get32(*(uint32_t*)(stubInstructions + 4));
251 if ( (stubInstr2 & 0xFFC003FF) != 0xF9400210 ) {
252 _diagnostics.warning("second instruction of stub (0x%08X) is not LDR for stub at addr 0x%0llX in %s",
253 stubInstr2, (uint64_t)stubVMAddr, _dylibID);
254 return 0;
255 }
256 uint32_t ldrValue = ((stubInstr2 >> 10) & 0x00000FFF);
257 return (stubVMAddr & (-4096)) + adrpValue*4096 + ldrValue*8;
258 }
259
260 #if SUPPORT_ARCH_arm64_32
261 template <typename P>
262 uint64_t StubOptimizer<P>::lazyPointerAddrFromArm64_32Stub(const uint8_t* stubInstructions, uint64_t stubVMAddr)
263 {
264 uint32_t stubInstr1 = E::get32(*(uint32_t*)stubInstructions);
265 if ( (stubInstr1 & 0x9F00001F) != 0x90000010 ) {
266 _diagnostics.warning("first instruction of stub (0x%08X) is not ADRP for stub at addr 0x%0llX in %s",
267 stubInstr1, (uint64_t)stubVMAddr, _dylibID);
268 return 0;
269 }
270 int32_t adrpValue = ((stubInstr1 & 0x00FFFFE0) >> 3) | ((stubInstr1 & 0x60000000) >> 29);
271 if ( stubInstr1 & 0x00800000 )
272 adrpValue |= 0xFFF00000;
273 uint32_t stubInstr2 = E::get32(*(uint32_t*)(stubInstructions + 4));
274 if ( (stubInstr2 & 0xFFC003FF) != 0xB9400210 ) {
275 _diagnostics.warning("second instruction of stub (0x%08X) is not LDR for stub at addr 0x%0llX in %s",
276 stubInstr2, (uint64_t)stubVMAddr, _dylibID);
277 return 0;
278 }
279 uint32_t ldrValue = ((stubInstr2 >> 10) & 0x00000FFF);
280 return (stubVMAddr & (-4096)) + adrpValue*4096 + ldrValue*4; // LDR Wn has a scale factor of 4
281
282 }
283 #endif
284
285
286 #if SUPPORT_ARCH_arm64e
287 template <typename P>
288 uint64_t StubOptimizer<P>::lazyPointerAddrFromArm64eStub(const uint8_t* stubInstructions, uint64_t stubVMAddr)
289 {
290 uint32_t stubInstr1 = E::get32(*(uint32_t*)stubInstructions);
291 // ADRP X17, dyld_mageLoaderCache@page
292 if ( (stubInstr1 & 0x9F00001F) != 0x90000011 ) {
293 _diagnostics.warning("first instruction of stub (0x%08X) is not ADRP for stub at addr 0x%0llX in %s",
294 stubInstr1, (uint64_t)stubVMAddr, _dylibID);
295 return 0;
296 }
297 int32_t adrpValue = ((stubInstr1 & 0x00FFFFE0) >> 3) | ((stubInstr1 & 0x60000000) >> 29);
298 if ( stubInstr1 & 0x00800000 )
299 adrpValue |= 0xFFF00000;
300
301 // ADD X17, X17, dyld_mageLoaderCache@pageoff
302 uint32_t stubInstr2 = E::get32(*(uint32_t*)(stubInstructions + 4));
303 if ( (stubInstr2 & 0xFFC003FF) != 0x91000231 ) {
304 _diagnostics.warning("second instruction of stub (0x%08X) is not ADD for stub at addr 0x%0llX in %s",
305 stubInstr2, (uint64_t)stubVMAddr, _dylibID);
306 return 0;
307 }
308 uint32_t addValue = ((stubInstr2 & 0x003FFC00) >> 10);
309
310 // LDR X16, [X17]
311 uint32_t stubInstr3 = E::get32(*(uint32_t*)(stubInstructions + 8));
312 if ( stubInstr3 != 0xF9400230 ) {
313 _diagnostics.warning("second instruction of stub (0x%08X) is not LDR for stub at addr 0x%0llX in %s",
314 stubInstr2, (uint64_t)stubVMAddr, _dylibID);
315 return 0;
316 }
317 return (stubVMAddr & (-4096)) + adrpValue*4096 + addValue;
318 }
319 #endif
320
321
322 template <typename P>
323 void StubOptimizer<P>::buildStubMap(const std::unordered_set<std::string>& neverStubEliminate)
324 {
325 // find all stubs and lazy pointers
326 const macho_nlist<P>* symbolTable = (const macho_nlist<P>*)(&_linkeditBias[_symTabCmd->symoff()]);
327 const char* symbolStrings = (char*)(&_linkeditBias[_symTabCmd->stroff()]);
328 const uint32_t* const indirectTable = (uint32_t*)(&_linkeditBias[_dynSymTabCmd->indirectsymoff()]);
329 const macho_load_command<P>* const cmds = (macho_load_command<P>*)((uint8_t*)_mh + sizeof(macho_header<P>));
330 const uint32_t cmd_count = _mh->ncmds();
331 const macho_load_command<P>* cmd = cmds;
332 for (uint32_t i = 0; i < cmd_count; ++i) {
333 if ( cmd->cmd() == macho_segment_command<P>::CMD ) {
334 macho_segment_command<P>* seg = (macho_segment_command<P>*)cmd;
335 macho_section<P>* const sectionsStart = (macho_section<P>*)((char*)seg + sizeof(macho_segment_command<P>));
336 macho_section<P>* const sectionsEnd = &sectionsStart[seg->nsects()];
337 for(macho_section<P>* sect = sectionsStart; sect < sectionsEnd; ++sect) {
338 if ( sect->size() == 0 )
339 continue;
340 unsigned sectionType = (sect->flags() & SECTION_TYPE);
341 const uint32_t indirectTableOffset = sect->reserved1();
342 if ( sectionType == S_SYMBOL_STUBS ) {
343 const uint32_t stubSize = sect->reserved2();
344 _stubCount = (uint32_t)(sect->size() / stubSize);
345 pint_t stubVMAddr = (pint_t)sect->addr();
346 for (uint32_t j=0; j < _stubCount; ++j, stubVMAddr += stubSize) {
347 uint32_t symbolIndex = E::get32(indirectTable[indirectTableOffset + j]);
348 switch ( symbolIndex ) {
349 case INDIRECT_SYMBOL_ABS:
350 case INDIRECT_SYMBOL_LOCAL:
351 case INDIRECT_SYMBOL_ABS | INDIRECT_SYMBOL_LOCAL:
352 break;
353 default:
354 if ( symbolIndex >= _symTabCmd->nsyms() ) {
355 _diagnostics.warning("symbol index out of range (%d of %d) for stub at addr 0x%0llX in %s",
356 symbolIndex, _symTabCmd->nsyms(), (uint64_t)stubVMAddr, _dylibID);
357 continue;
358 }
359 const macho_nlist<P>* sym = &symbolTable[symbolIndex];
360 uint32_t stringOffset = sym->n_strx();
361 if ( stringOffset > _symTabCmd->strsize() ) {
362 _diagnostics.warning("symbol string offset out of range (%u of %u) for stub at addr 0x%0llX in %s",
363 stringOffset, sym->n_strx(), (uint64_t)stubVMAddr, _dylibID);
364 continue;
365 }
366 const char* symName = &symbolStrings[stringOffset];
367 if ( neverStubEliminate.count(symName) ) {
368 //fprintf(stderr, "stubVMAddr=0x%llX, not bypassing stub to %s in %s because target is interposable\n", (uint64_t)stubVMAddr, symName, _dylibID);
369 _stubsLeftInterposable++;
370 continue;
371 }
372 const uint8_t* stubInstrs = (uint8_t*)(long)stubVMAddr + _cacheSlide;
373 pint_t targetLPAddr = 0;
374 switch ( _mh->cputype() ) {
375 case CPU_TYPE_ARM64:
376 #if SUPPORT_ARCH_arm64e
377 if (cpuSubtype() == CPU_SUBTYPE_ARM64E)
378 targetLPAddr = (pint_t)lazyPointerAddrFromArm64eStub(stubInstrs, stubVMAddr);
379 else
380 #endif
381 targetLPAddr = (pint_t)lazyPointerAddrFromArm64Stub(stubInstrs, stubVMAddr);
382 break;
383 #if SUPPORT_ARCH_arm64_32
384 case CPU_TYPE_ARM64_32:
385 if (cpuSubtype() == CPU_TYPE_ARM64_32)
386 targetLPAddr = (pint_t)lazyPointerAddrFromArm64_32Stub(stubInstrs, stubVMAddr);
387 break;
388 #endif
389 case CPU_TYPE_ARM:
390 targetLPAddr = (pint_t)lazyPointerAddrFromArmStub(stubInstrs, (uint32_t)stubVMAddr);
391 break;
392 }
393 if ( targetLPAddr != 0 )
394 _stubAddrToLPAddr[stubVMAddr] = targetLPAddr;
395 break;
396 }
397 }
398 }
399 else if ( (sectionType == S_LAZY_SYMBOL_POINTERS) || (sectionType == S_NON_LAZY_SYMBOL_POINTERS) ) {
400 pint_t lpVMAddr;
401 pint_t* lpContent = (pint_t*)(sect->addr() + _cacheSlide);
402 uint32_t elementCount = (uint32_t)(sect->size() / sizeof(pint_t));
403 uint64_t textSegStartAddr = _segCmds[0]->vmaddr();
404 uint64_t textSegEndAddr = _segCmds[0]->vmaddr() + _segCmds[0]->vmsize();
405 pint_t lpValue;
406 for (uint32_t j=0; j < elementCount; ++j) {
407 uint32_t symbolIndex = E::get32(indirectTable[indirectTableOffset + j]);
408 switch ( symbolIndex ) {
409 case INDIRECT_SYMBOL_ABS:
410 case INDIRECT_SYMBOL_LOCAL:
411 case INDIRECT_SYMBOL_LOCAL|INDIRECT_SYMBOL_ABS:
412 break;
413 default:
414 lpValue = (pint_t)P::getP(lpContent[j]);
415 lpVMAddr = (pint_t)sect->addr() + j * sizeof(pint_t);
416 if ( symbolIndex >= _symTabCmd->nsyms() ) {
417 _diagnostics.warning("symbol index out of range (%d of %d) for lazy pointer at addr 0x%0llX in %s",
418 symbolIndex, _symTabCmd->nsyms(), (uint64_t)lpVMAddr, _dylibID);
419 continue;
420 }
421 const macho_nlist<P>* sym = &symbolTable[symbolIndex];
422 uint32_t stringOffset = sym->n_strx();
423 if ( stringOffset > _symTabCmd->strsize() ) {
424 _diagnostics.warning("symbol string offset out of range (%u of %u) for lazy pointer at addr 0x%0llX in %s",
425 stringOffset, sym->n_strx(), (uint64_t)lpVMAddr, _dylibID);
426 continue;
427 }
428 const char* symName = &symbolStrings[stringOffset];
429 if ( (lpValue > textSegStartAddr) && (lpValue< textSegEndAddr) ) {
430 //fprintf(stderr, "skipping lazy pointer at 0x%0lX to %s in %s because target is within dylib\n", (long)lpVMAddr, symName, _dylibID);
431 }
432 else if ( (sizeof(pint_t) == 8) && ((lpValue % 4) != 0) ) {
433 // Only warn on lazy pointers which correspond to call targets
434 if ( sectionType == S_LAZY_SYMBOL_POINTERS ) {
435 _diagnostics.warning("lazy pointer at 0x%0llX does not point to 4-byte aligned address(0x%0llX) for symbol '%s' in %s",
436 (uint64_t)lpVMAddr, (uint64_t)lpValue, symName, _dylibID);
437 }
438 }
439 else {
440 _lpAddrToTargetAddr[lpVMAddr] = lpValue;
441 _targetAddrToName[lpValue] = symName;
442 }
443 break;
444 }
445 }
446 }
447 }
448 }
449 cmd = (const macho_load_command<P>*)(((uint8_t*)cmd)+cmd->cmdsize());
450 }
451 }
452
453
454 template <typename P>
455 void StubOptimizer<P>::forEachCallSiteToAStub(CallSiteHandler handler)
456 {
457 if (_diagnostics.hasError())
458 return;
459 const uint8_t* infoStart = &_linkeditBias[_splitSegInfoCmd->dataoff()];
460 const uint8_t* infoEnd = &infoStart[_splitSegInfoCmd->datasize()];
461 if ( *infoStart++ != DYLD_CACHE_ADJ_V2_FORMAT ) {
462 _diagnostics.error("malformed split seg info in %s", _dylibID);
463 return;
464 }
465
466 uint8_t* textSectionContent = (uint8_t*)(_textSection->addr() + _cacheSlide);
467
468 // Whole :== <count> FromToSection+
469 // FromToSection :== <from-sect-index> <to-sect-index> <count> ToOffset+
470 // ToOffset :== <to-sect-offset-delta> <count> FromOffset+
471 // FromOffset :== <kind> <count> <from-sect-offset-delta>
472 const uint8_t* p = infoStart;
473 uint64_t sectionCount = read_uleb128(p, infoEnd);
474 for (uint64_t i=0; i < sectionCount; ++i) {
475 uint64_t fromSectionIndex = read_uleb128(p, infoEnd);
476 uint64_t toSectionIndex = read_uleb128(p, infoEnd);
477 uint64_t toOffsetCount = read_uleb128(p, infoEnd);
478 uint64_t toSectionOffset = 0;
479 for (uint64_t j=0; j < toOffsetCount; ++j) {
480 uint64_t toSectionDelta = read_uleb128(p, infoEnd);
481 uint64_t fromOffsetCount = read_uleb128(p, infoEnd);
482 toSectionOffset += toSectionDelta;
483 for (uint64_t k=0; k < fromOffsetCount; ++k) {
484 uint64_t kind = read_uleb128(p, infoEnd);
485 if ( kind > 13 ) {
486 _diagnostics.error("bad kind (%llu) value in %s\n", kind, _dylibID);
487 }
488 uint64_t fromSectDeltaCount = read_uleb128(p, infoEnd);
489 uint64_t fromSectionOffset = 0;
490 for (uint64_t l=0; l < fromSectDeltaCount; ++l) {
491 uint64_t delta = read_uleb128(p, infoEnd);
492 fromSectionOffset += delta;
493 if ( (fromSectionIndex == _textSectionIndex) && (toSectionIndex == _stubSectionIndex) ) {
494 uint32_t* instrPtr = (uint32_t*)(textSectionContent + fromSectionOffset);
495 uint64_t instrAddr = _textSection->addr() + fromSectionOffset;
496 uint64_t stubAddr = _stubSection->addr() + toSectionOffset;
497 uint32_t instruction = E::get32(*instrPtr);
498 _branchToStubCount++;
499 if ( handler(kind, instrAddr, stubAddr, instruction) ) {
500 E::set32(*instrPtr, instruction);
501 }
502 }
503 }
504 }
505 }
506 }
507 }
508
509
510 /// Extract displacement from a thumb b/bl/blx instruction.
511 template <typename P>
512 int32_t StubOptimizer<P>::getDisplacementFromThumbBranch(uint32_t instruction, uint32_t instrAddr)
513 {
514 bool is_blx = ((instruction & 0xD000F800) == 0xC000F000);
515 uint32_t s = (instruction >> 10) & 0x1;
516 uint32_t j1 = (instruction >> 29) & 0x1;
517 uint32_t j2 = (instruction >> 27) & 0x1;
518 uint32_t imm10 = instruction & 0x3FF;
519 uint32_t imm11 = (instruction >> 16) & 0x7FF;
520 uint32_t i1 = (j1 == s);
521 uint32_t i2 = (j2 == s);
522 uint32_t dis = (s << 24) | (i1 << 23) | (i2 << 22) | (imm10 << 12) | (imm11 << 1);
523 int32_t sdis = dis;
524 int32_t result = s ? (sdis | 0xFE000000) : sdis;
525 if ( is_blx && (instrAddr & 0x2) ) {
526 // The thumb blx instruction always has low bit of imm11 as zero. The way
527 // a 2-byte aligned blx can branch to a 4-byte aligned ARM target is that
528 // the blx instruction always 4-byte aligns the pc before adding the
529 // displacement from the blx. We must emulate that when decoding this.
530 result -= 2;
531 }
532 return result;
533 }
534
535 /// Update a thumb b/bl/blx instruction, switching bl <-> blx as needed.
536 template <typename P>
537 uint32_t StubOptimizer<P>::setDisplacementInThumbBranch(uint32_t instruction, uint32_t instrAddr,
538 int32_t displacement, bool targetIsThumb) {
539 if ( (displacement > 16777214) || (displacement < (-16777216)) ) {
540 _diagnostics.error("thumb branch out of range at 0x%0X in %s", instrAddr, _dylibID);
541 return 0;
542 }
543 bool is_bl = ((instruction & 0xD000F800) == 0xD000F000);
544 bool is_blx = ((instruction & 0xD000F800) == 0xC000F000);
545 bool is_b = ((instruction & 0xD000F800) == 0x9000F000);
546 uint32_t newInstruction = (instruction & 0xD000F800);
547 if (is_bl || is_blx) {
548 if (targetIsThumb) {
549 newInstruction = 0xD000F000; // Use bl
550 }
551 else {
552 newInstruction = 0xC000F000; // Use blx
553 // See note in getDisplacementFromThumbBranch() about blx.
554 if (instrAddr & 0x2)
555 displacement += 2;
556 }
557 }
558 else if (is_b) {
559 if ( !targetIsThumb ) {
560 _diagnostics.error("no pc-rel thumb branch instruction that switches to arm mode at 0x%0X in %s", instrAddr, _dylibID);
561 return 0;
562 }
563 }
564 else {
565 _diagnostics.error("not b/bl/blx at 0x%0X in %s", instrAddr, _dylibID);
566 return 0;
567 }
568 uint32_t s = (uint32_t)(displacement >> 24) & 0x1;
569 uint32_t i1 = (uint32_t)(displacement >> 23) & 0x1;
570 uint32_t i2 = (uint32_t)(displacement >> 22) & 0x1;
571 uint32_t imm10 = (uint32_t)(displacement >> 12) & 0x3FF;
572 uint32_t imm11 = (uint32_t)(displacement >> 1) & 0x7FF;
573 uint32_t j1 = (i1 == s);
574 uint32_t j2 = (i2 == s);
575 uint32_t nextDisp = (j1 << 13) | (j2 << 11) | imm11;
576 uint32_t firstDisp = (s << 10) | imm10;
577 newInstruction |= (nextDisp << 16) | firstDisp;
578 return newInstruction;
579 }
580
581
582 template <typename P>
583 void StubOptimizer<P>::optimizeArmCallSites(std::unordered_map<uint64_t, uint64_t>& targetAddrToOptStubAddr)
584 {
585 forEachCallSiteToAStub([&](uint8_t kind, uint64_t callSiteAddr, uint64_t stubAddr, uint32_t& instruction) -> bool {
586 if ( kind == DYLD_CACHE_ADJ_V2_THUMB_BR22 ) {
587 bool is_bl = ((instruction & 0xD000F800) == 0xD000F000);
588 bool is_blx = ((instruction & 0xD000F800) == 0xC000F000);
589 bool is_b = ((instruction & 0xD000F800) == 0x9000F000);
590 if ( !is_bl && !is_blx && !is_b ){
591 _diagnostics.warning("non-branch instruction at 0x%0llX in %s", callSiteAddr, _dylibID);
592 return false;
593 }
594 int32_t brDelta = getDisplacementFromThumbBranch(instruction, (uint32_t)callSiteAddr);
595 pint_t targetAddr = (pint_t)callSiteAddr + 4 + brDelta;
596 if ( targetAddr != stubAddr ) {
597 _diagnostics.warning("stub target mismatch at callsite 0x%0llX in %s", callSiteAddr, _dylibID);
598 return false;
599 }
600 // ignore branch if not to a known stub
601 const auto& pos = _stubAddrToLPAddr.find(targetAddr);
602 if ( pos == _stubAddrToLPAddr.end() )
603 return false;
604
605 // ignore branch if lazy pointer is not known (resolver or interposable)
606 uint64_t lpAddr = pos->second;
607 const auto& pos2 = _lpAddrToTargetAddr.find((pint_t)lpAddr);
608 if ( pos2 == _lpAddrToTargetAddr.end() )
609 return false;
610
611 uint64_t finalTargetAddr = pos2->second;
612 int64_t deltaToFinalTarget = finalTargetAddr - (callSiteAddr + 4);
613 // if final target within range, change to branch there directly
614 if ( (deltaToFinalTarget > -b16MegLimit) && (deltaToFinalTarget < b16MegLimit) ) {
615 bool targetIsThumb = (finalTargetAddr & 1);
616 instruction = setDisplacementInThumbBranch(instruction, (uint32_t)callSiteAddr, (int32_t)deltaToFinalTarget, targetIsThumb);
617 if (_diagnostics.hasError())
618 return false;
619 _branchOptimizedToDirectCount++;
620 return true;
621 }
622
623 // try to re-use an existing optimized stub
624 const auto& pos3 = targetAddrToOptStubAddr.find(finalTargetAddr);
625 if ( pos3 != targetAddrToOptStubAddr.end() ) {
626 uint64_t existingStub = pos3->second;
627 if ( existingStub != stubAddr ) {
628 int64_t deltaToOptStub = existingStub - (callSiteAddr + 4);
629 if ( (deltaToOptStub > -b16MegLimit) && (deltaToOptStub < b16MegLimit) ) {
630 bool targetIsThumb = (existingStub & 1);
631 instruction = setDisplacementInThumbBranch(instruction, (uint32_t)callSiteAddr, (int32_t)deltaToOptStub, targetIsThumb);
632 if (_diagnostics.hasError())
633 return false;
634 _branchToReUsedOptimizedStubCount++;
635 return true;
636 }
637 }
638 }
639
640 // leave as BL to stub, but optimize the stub
641 _stubsToOptimize.insert(stubAddr);
642 targetAddrToOptStubAddr[finalTargetAddr] = stubAddr;
643 _branchToOptimizedStubCount++;
644 return false;
645 }
646 else if ( kind == DYLD_CACHE_ADJ_V2_ARM_BR24 ) {
647 // too few of these to be worth trying to optimize
648 }
649
650 return false;
651 });
652 if (_diagnostics.hasError())
653 return;
654 }
655
656
657 template <typename P>
658 void StubOptimizer<P>::optimizeArmStubs()
659 {
660 for (const auto& stubEntry : _stubAddrToLPAddr) {
661 pint_t stubVMAddr = stubEntry.first;
662 pint_t lpVMAddr = stubEntry.second;
663 const auto& pos = _lpAddrToTargetAddr.find(lpVMAddr);
664 if ( pos == _lpAddrToTargetAddr.end() )
665 return;
666 pint_t targetVMAddr = pos->second;
667
668 int32_t delta = (int32_t)(targetVMAddr - (stubVMAddr + 12));
669 uint32_t* stubInstructions = (uint32_t*)((uint8_t*)(long)stubVMAddr + _cacheSlide);
670 assert(stubInstructions[0] == 0xe59fc004);
671 stubInstructions[0] = 0xe59fc000; // ldr ip, L0
672 stubInstructions[1] = 0xe08ff00c; // add pc, pc, ip
673 stubInstructions[2] = delta; // L0: .long xxxx
674 stubInstructions[3] = 0xe7ffdefe; // trap
675 _stubOptimizedCount++;
676 }
677 }
678
679
680
681 template <typename P>
682 void StubOptimizer<P>::optimizeArm64Stubs()
683 {
684 for (const uint64_t stubVMAddr : _stubsToOptimize ) {
685 pint_t lpVMAddr = _stubAddrToLPAddr[(pint_t)stubVMAddr];
686 const auto& pos = _lpAddrToTargetAddr.find(lpVMAddr);
687 if ( pos == _lpAddrToTargetAddr.end() )
688 return;
689 pint_t targetVMAddr = pos->second;
690
691 int64_t adrpDelta = (targetVMAddr & -4096) - (stubVMAddr & -4096);
692 // Note: ADRP/ADD can only span +/-4GB
693 uint32_t* stubInstructions = (uint32_t*)((uint8_t*)(long)stubVMAddr + _cacheSlide);
694 bool rightInstr1 = ((stubInstructions[0] & 0x9F00001F) == 0x90000010); // ADRP X16, lp@page
695 bool rightInstr2 = ((stubInstructions[1] & 0xFFC003FF) == 0xF9400210); // LDR X16, [X16, lp@pageoff]
696 bool rightInstr3 = (stubInstructions[2] == 0xD61F0200); // BR X16
697
698 if ( rightInstr1 && rightInstr2 && rightInstr3 ) {
699 uint32_t immhi = (adrpDelta >> 9) & (0x00FFFFE0);
700 uint32_t immlo = (adrpDelta << 17) & (0x60000000);
701 uint32_t newADRP = (0x90000010) | immlo | immhi;
702 uint32_t off12 = (targetVMAddr & 0xFFF);
703 uint32_t newADD = (0x91000210) | (off12 << 10);
704
705 stubInstructions[0] = newADRP; // ADRP X16, target@page
706 stubInstructions[1] = newADD; // ADD X16, X16, target@pageoff
707 stubInstructions[2] = 0xD61F0200; // BR X16
708 _stubOptimizedCount++;
709 }
710 }
711 }
712
713 #if SUPPORT_ARCH_arm64e
714 template <typename P>
715 void StubOptimizer<P>::optimizeArm64eStubs()
716 {
717 for (const uint64_t stubVMAddr : _stubsToOptimize ) {
718 pint_t lpVMAddr = _stubAddrToLPAddr[(pint_t)stubVMAddr];
719 const auto& pos = _lpAddrToTargetAddr.find(lpVMAddr);
720 if ( pos == _lpAddrToTargetAddr.end() )
721 return;
722 pint_t targetVMAddr = pos->second;
723
724 int64_t adrpDelta = (targetVMAddr & -4096) - (stubVMAddr & -4096);
725 // Note: ADRP/ADD can only span +/-4GB
726 uint32_t* stubInstructions = (uint32_t*)((uint8_t*)(long)stubVMAddr + _cacheSlide);
727 bool rightInstr1 = ((stubInstructions[0] & 0x9F00001F) == 0x90000011); // ADRP X17, lp@page
728 bool rightInstr2 = ((stubInstructions[1] & 0xFFC003FF) == 0x91000231); // ADD X17, [X17, lp@pageoff]
729 bool rightInstr3 = (stubInstructions[2] == 0xF9400230); // LDR X16, [X17]
730 bool rightInstr4 = (stubInstructions[3] == 0xD71F0A11); // BRAA X16, X17
731
732 if ( rightInstr1 && rightInstr2 && rightInstr3 && rightInstr4) {
733 uint32_t immhi = (adrpDelta >> 9) & (0x00FFFFE0);
734 uint32_t immlo = (adrpDelta << 17) & (0x60000000);
735 uint32_t newADRP = (0x90000010) | immlo | immhi;
736 uint32_t off12 = (targetVMAddr & 0xFFF);
737 uint32_t newADD = (0x91000210) | (off12 << 10);
738
739 stubInstructions[0] = newADRP; // ADRP X16, target@page
740 stubInstructions[1] = newADD; // ADD X16, X16, target@pageoff
741 stubInstructions[2] = 0xD61F0200; // BR X16
742 stubInstructions[3] = 0xD4200020; // TRAP
743 _stubOptimizedCount++;
744 }
745 }
746 }
747 #endif
748
749 #if SUPPORT_ARCH_arm64_32
750 template <typename P>
751 void StubOptimizer<P>::optimizeArm64_32Stubs()
752 {
753 for (const uint64_t stubVMAddr : _stubsToOptimize ) {
754 pint_t lpVMAddr = _stubAddrToLPAddr[(pint_t)stubVMAddr];
755 const auto& pos = _lpAddrToTargetAddr.find(lpVMAddr);
756 if ( pos == _lpAddrToTargetAddr.end() )
757 return;
758 pint_t targetVMAddr = pos->second;
759
760 int64_t adrpDelta = (targetVMAddr & -4096) - (stubVMAddr & -4096);
761 uint32_t* stubInstructions = (uint32_t*)((uint8_t*)(long)stubVMAddr + _cacheSlide);
762 bool rightInstr1 = ((stubInstructions[0] & 0x9F00001F) == 0x90000010); // ADRP X16, lp@page
763 bool rightInstr2 = ((stubInstructions[1] & 0xFFC003FF) == 0xB9400210); // LDR W16, [X16, lp@pageoff]
764 bool rightInstr3 = (stubInstructions[2] == 0xD61F0200); // BR X16
765
766 if ( rightInstr1 && rightInstr2 && rightInstr3 ) {
767 uint32_t immhi = (adrpDelta >> 9) & (0x00FFFFE0);
768 uint32_t immlo = (adrpDelta << 17) & (0x60000000);
769 uint32_t newADRP = (0x90000010) | immlo | immhi;
770 uint32_t off12 = (targetVMAddr & 0xFFF);
771 uint32_t newADD = (0x91000210) | (off12 << 10);
772
773 stubInstructions[0] = newADRP; // ADRP X16, target@page
774 stubInstructions[1] = newADD; // ADD X16, X16, target@pageoff
775 stubInstructions[2] = 0xD61F0200; // BR X16
776 _stubOptimizedCount++;
777 }
778 }
779 }
780 #endif
781
782
783 template <typename P>
784 void StubOptimizer<P>::optimizeArm64CallSites(std::unordered_map<uint64_t, uint64_t>& targetAddrToOptStubAddr)
785 {
786 forEachCallSiteToAStub([&](uint8_t kind, uint64_t callSiteAddr, uint64_t stubAddr, uint32_t& instruction) -> bool {
787 if ( kind != DYLD_CACHE_ADJ_V2_ARM64_BR26 )
788 return false;
789 // skip all but BL or B
790 if ( (instruction & 0x7C000000) != 0x14000000 )
791 return false;
792 // compute target of branch instruction
793 int32_t brDelta = (instruction & 0x03FFFFFF) << 2;
794 if ( brDelta & 0x08000000 )
795 brDelta |= 0xF0000000;
796 uint64_t targetAddr = callSiteAddr + (int64_t)brDelta;
797 if ( targetAddr != stubAddr ) {
798 _diagnostics.warning("stub target mismatch");
799 return false;
800 }
801 // ignore branch if not to a known stub
802 const auto& pos = _stubAddrToLPAddr.find((pint_t)targetAddr);
803 if ( pos == _stubAddrToLPAddr.end() )
804 return false;
805
806 // ignore branch if lazy pointer is not known (resolver or interposable)
807 uint64_t lpAddr = pos->second;
808 const auto& pos2 = _lpAddrToTargetAddr.find((pint_t)lpAddr);
809 if ( pos2 == _lpAddrToTargetAddr.end() )
810 return false;
811
812 uint64_t finalTargetAddr = pos2->second;
813 int64_t deltaToFinalTarget = finalTargetAddr - callSiteAddr;
814 // if final target within range, change to branch there directly
815 if ( (deltaToFinalTarget > -b128MegLimit) && (deltaToFinalTarget < b128MegLimit) ) {
816 instruction= (instruction & 0xFC000000) | ((deltaToFinalTarget >> 2) & 0x03FFFFFF);
817 _branchOptimizedToDirectCount++;
818 return true;
819 }
820
821 // try to re-use an existing optimized stub
822 const auto& pos3 = targetAddrToOptStubAddr.find((pint_t)finalTargetAddr);
823 if ( pos3 != targetAddrToOptStubAddr.end() ) {
824 uint64_t existingStub = pos3->second;
825 if ( existingStub != stubAddr ) {
826 int64_t deltaToOptStub = existingStub - callSiteAddr;
827 if ( (deltaToOptStub > -b128MegLimit) && (deltaToOptStub < b128MegLimit) ) {
828 instruction = (instruction & 0xFC000000) | ((deltaToOptStub >> 2) & 0x03FFFFFF);
829 _branchToReUsedOptimizedStubCount++;
830 return true;
831 }
832 }
833 }
834
835 // leave as BL to stub, but optimize the stub
836 _stubsToOptimize.insert(stubAddr);
837 targetAddrToOptStubAddr[(pint_t)finalTargetAddr] = (pint_t)stubAddr;
838 _branchToOptimizedStubCount++;
839 return false;
840 });
841 if (_diagnostics.hasError())
842 return;
843 }
844
845
846 template <typename P>
847 void StubOptimizer<P>::optimizeCallSites(std::unordered_map<uint64_t, uint64_t>& targetAddrToOptStubAddr)
848 {
849 if ( _textSection == NULL )
850 return;
851 if ( _stubSection == NULL )
852 return;
853
854
855 switch ( _mh->cputype() ) {
856 case CPU_TYPE_ARM64:
857 optimizeArm64CallSites(targetAddrToOptStubAddr);
858 #if SUPPORT_ARCH_arm64e
859 if (cpuSubtype() == CPU_SUBTYPE_ARM64E)
860 optimizeArm64eStubs();
861 else
862 #endif
863 optimizeArm64Stubs();
864 break;
865 #if SUPPORT_ARCH_arm64_32
866 case CPU_TYPE_ARM64_32:
867 optimizeArm64CallSites(targetAddrToOptStubAddr);
868 optimizeArm64_32Stubs();
869 break;
870 #endif
871 case CPU_TYPE_ARM:
872 optimizeArmCallSites(targetAddrToOptStubAddr);
873 optimizeArmStubs();
874 break;
875 }
876 if ( verbose ) {
877 _diagnostics.verbose("dylib has %6u BLs to %4u stubs. Changed %5u, %5u, %5u BLs to use direct branch, optimized stub, neighbor's optimized stub. "
878 "%5u stubs left interposable, %4u stubs optimized. path=%s\n",
879 _branchToStubCount, _stubCount, _branchOptimizedToDirectCount, _branchToOptimizedStubCount, _branchToReUsedOptimizedStubCount,
880 _stubsLeftInterposable, _stubOptimizedCount, _dylibID);
881 }
882
883 }
884
885 template <typename P>
886 void bypassStubs(std::vector<std::pair<const mach_header*, const char*>> images,
887 const std::string& archName,
888 int64_t cacheSlide, uint64_t cacheUnslidAddr,
889 const DyldSharedCache* dyldCache,
890 const char* const neverStubEliminateSymbols[],
891 Diagnostics& diags)
892 {
893 std::unordered_map<uint64_t, uint64_t> targetAddrToOptStubAddr;
894 diags.verbose("Stub elimination optimization:\n");
895
896 // construct a StubOptimizer for each image
897 __block std::vector<StubOptimizer<P>*> optimizers;
898 for (std::pair<const mach_header*, const char*> image : images) {
899 optimizers.push_back(new StubOptimizer<P>(cacheSlide, cacheUnslidAddr, archName,
900 (macho_header<P>*)image.first, image.second,
901 diags));
902 }
903
904 // build set of functions to never stub-eliminate because tools may need to override them
905 std::unordered_set<std::string> neverStubEliminate;
906 for (const char* const* p=neverStubEliminateSymbols; *p != nullptr; ++p) {
907 neverStubEliminate.insert(*p);
908 }
909
910 #if !BUILDING_APP_CACHE_UTIL
911 // Customer shared caches support overriding libdispatch
912 if ( dyldCache != nullptr ) {
913 for (StubOptimizer<P>* op : optimizers) {
914 if ( dyldCache->isOverridablePath(op->dylibID()) ) {
915 // add all exports
916 const uint8_t* exportsStart = op->exportsTrie();
917 const uint8_t* exportsEnd = exportsStart + op->exportsTrieSize();
918 std::vector<ExportInfoTrie::Entry> exports;
919 if ( !ExportInfoTrie::parseTrie(exportsStart, exportsEnd, exports) ) {
920 diags.error("malformed exports trie in %s", op->dylibID());
921 return;
922 }
923 for(const ExportInfoTrie::Entry& entry : exports) {
924 neverStubEliminate.insert(entry.name);
925 }
926 }
927 }
928 }
929 #endif
930
931 // build maps of stubs-to-lp and lp-to-target
932 for (StubOptimizer<P>* op : optimizers)
933 op->buildStubMap(neverStubEliminate);
934
935 // optimize call sites to by-pass stubs or jump through island
936 for (StubOptimizer<P>* op : optimizers)
937 op->optimizeCallSites(targetAddrToOptStubAddr);
938
939 // write total optimization info
940 uint32_t callSiteCount = 0;
941 uint32_t callSiteDirectOptCount = 0;
942 for (StubOptimizer<P>* op : optimizers) {
943 callSiteCount += op->_branchToStubCount;
944 callSiteDirectOptCount += op->_branchOptimizedToDirectCount;
945 }
946 diags.verbose(" cache contains %u call sites of which %u were direct bound\n", callSiteCount, callSiteDirectOptCount);
947
948 // clean up
949 for (StubOptimizer<P>* op : optimizers)
950 delete op;
951 }
952
953 void CacheBuilder::optimizeAwayStubs(const std::vector<std::pair<const mach_header*, const char*>>& images,
954 int64_t cacheSlide, uint64_t cacheUnslidAddr,
955 const DyldSharedCache* dyldCache,
956 const char* const neverStubEliminateSymbols[])
957 {
958 std::unordered_map<uint64_t, uint64_t> targetAddrToOptStubAddr;
959 std::string archName = _options.archs->name();
960 #if SUPPORT_ARCH_arm64_32
961 if ( startsWith(archName, "arm64_32") ) {
962 bypassStubs<Pointer32<LittleEndian> >(images, archName, cacheSlide, cacheUnslidAddr,
963 dyldCache, neverStubEliminateSymbols,
964 _diagnostics);
965 return;
966 }
967 #endif
968 if ( startsWith(archName, "arm64") ) {
969 bypassStubs<Pointer64<LittleEndian> >(images, archName, cacheSlide, cacheUnslidAddr,
970 dyldCache, neverStubEliminateSymbols,
971 _diagnostics);
972 return;
973 }
974 if ( archName == "armv7k" ) {
975 bypassStubs<Pointer32<LittleEndian> >(images, archName, cacheSlide, cacheUnslidAddr,
976 dyldCache, neverStubEliminateSymbols,
977 _diagnostics);
978 return;
979 }
980 // no stub optimization done for other arches
981 }