1 /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*-
3 * Copyright (c) 2015 Apple Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
26 #include <sys/types.h>
33 #include <CommonCrypto/CommonDigest.h>
36 #include <unordered_map>
37 #include <unordered_set>
39 #include "StringUtils.h"
41 #include "MachOFileAbstraction.hpp"
42 #include "MachOAnalyzer.h"
43 #include "Diagnostics.h"
44 #include "DyldSharedCache.h"
45 #include "SharedCacheBuilder.h"
47 static const bool verbose
= false;
55 StubOptimizer(const DyldSharedCache
* cache
, macho_header
<P
>* mh
, Diagnostics
& diags
);
56 void buildStubMap(const std::unordered_set
<std::string
>& neverStubEliminate
);
58 void optimizeCallSites(std::unordered_map
<uint64_t, uint64_t>& targetAddrToOptStubAddr
);
59 const char* installName() { return _installName
; }
60 const uint8_t* exportsTrie() {
61 if ( _dyldInfo
!= nullptr )
62 return &_linkeditBias
[_dyldInfo
->export_off()];
64 return &_linkeditBias
[_exportTrie
->dataoff()];
66 uint32_t exportsTrieSize() {
67 if ( _dyldInfo
!= nullptr )
68 return _dyldInfo
->export_size();
70 return _exportTrie
->datasize();
73 uint32_t _stubCount
= 0;
74 uint32_t _stubOptimizedCount
= 0;
75 uint32_t _stubsLeftInterposable
= 0;
76 uint32_t _branchToStubCount
= 0;
77 uint32_t _branchOptimizedToDirectCount
= 0;
78 uint32_t _branchToOptimizedStubCount
= 0;
79 uint32_t _branchToReUsedOptimizedStubCount
= 0;
82 Diagnostics _diagnostics
;
84 typedef std::function
<bool(uint8_t callSiteKind
, uint64_t callSiteAddr
, uint64_t stubAddr
, uint32_t& instruction
)> CallSiteHandler
;
85 typedef typename
P::uint_t pint_t
;
86 typedef typename
P::E E
;
88 void forEachCallSiteToAStub(CallSiteHandler
);
89 void optimizeArm64CallSites(std::unordered_map
<uint64_t, uint64_t>& targetAddrToOptStubAddr
);
90 void optimizeArm64Stubs();
91 #if SUPPORT_ARCH_arm64e
92 void optimizeArm64eStubs();
94 #if SUPPORT_ARCH_arm64_32
95 void optimizeArm64_32Stubs();
97 void optimizeArmCallSites(std::unordered_map
<uint64_t, uint64_t>& targetAddrToOptStubAddr
);
98 void optimizeArmStubs();
99 uint64_t lazyPointerAddrFromArm64Stub(const uint8_t* stubInstructions
, uint64_t stubVMAddr
);
100 #if SUPPORT_ARCH_arm64e
101 uint64_t lazyPointerAddrFromArm64eStub(const uint8_t* stubInstructions
, uint64_t stubVMAddr
);
103 #if SUPPORT_ARCH_arm64_32
104 uint64_t lazyPointerAddrFromArm64_32Stub(const uint8_t* stubInstructions
, uint64_t stubVMAddr
);
106 uint32_t lazyPointerAddrFromArmStub(const uint8_t* stubInstructions
, uint32_t stubVMAddr
);
107 int32_t getDisplacementFromThumbBranch(uint32_t instruction
, uint32_t instrAddr
);
108 uint32_t setDisplacementInThumbBranch(uint32_t instruction
, uint32_t instrAddr
,
109 int32_t displacement
, bool targetIsThumb
);
112 struct AddressAndName
{ pint_t targetVMAddr
; const char* targetName
; };
113 typedef std::unordered_map
<pint_t
, AddressAndName
> StubVMAddrToTarget
;
115 static const int64_t b128MegLimit
= 0x07FFFFFF;
116 static const int64_t b16MegLimit
= 0x00FFFFFF;
120 macho_header
<P
>* _mh
;
121 int64_t _cacheSlide
= 0;
122 uint64_t _cacheUnslideAddr
= 0;
123 uint32_t _linkeditSize
= 0;
124 uint64_t _linkeditAddr
= 0;
125 const uint8_t* _linkeditBias
= nullptr;
126 const char* _installName
= nullptr;
127 const macho_symtab_command
<P
>* _symTabCmd
= nullptr;
128 const macho_dysymtab_command
<P
>* _dynSymTabCmd
= nullptr;
129 const macho_dyld_info_command
<P
>* _dyldInfo
= nullptr;
130 const macho_linkedit_data_command
<P
>* _exportTrie
= nullptr;
131 macho_linkedit_data_command
<P
>* _splitSegInfoCmd
= nullptr;
132 const macho_section
<P
>* _textSection
= nullptr;
133 const macho_section
<P
>* _stubSection
= nullptr;
134 uint32_t _textSectionIndex
= 0;
135 uint32_t _stubSectionIndex
= 0;
136 pint_t _textSegStartAddr
= 0;
137 std::vector
<macho_segment_command
<P
>*> _segCmds
;
138 std::unordered_map
<pint_t
, pint_t
> _stubAddrToLPAddr
;
139 std::unordered_map
<pint_t
, pint_t
> _lpAddrToTargetAddr
;
140 std::unordered_map
<pint_t
, const char*> _targetAddrToName
;
141 std::unordered_set
<uint64_t> _stubsToOptimize
;
145 template <typename P
>
146 StubOptimizer
<P
>::StubOptimizer(const DyldSharedCache
* cache
, macho_header
<P
>* mh
, Diagnostics
& diags
)
147 : _mh(mh
), _diagnostics(diags
)
149 _cacheSlide
= (long)cache
- cache
->unslidLoadAddress();
150 _cacheUnslideAddr
= cache
->unslidLoadAddress();
151 const macho_load_command
<P
>* const cmds
= (macho_load_command
<P
>*)((uint8_t*)mh
+ sizeof(macho_header
<P
>));
152 const uint32_t cmd_count
= mh
->ncmds();
153 macho_segment_command
<P
>* segCmd
;
154 uint32_t sectionIndex
= 0;
155 const macho_load_command
<P
>* cmd
= cmds
;
156 for (uint32_t i
= 0; i
< cmd_count
; ++i
) {
157 switch (cmd
->cmd()) {
159 _installName
= ((macho_dylib_command
<P
>*)cmd
)->name();
162 _symTabCmd
= (macho_symtab_command
<P
>*)cmd
;
165 _dynSymTabCmd
= (macho_dysymtab_command
<P
>*)cmd
;
167 case LC_SEGMENT_SPLIT_INFO
:
168 _splitSegInfoCmd
= (macho_linkedit_data_command
<P
>*)cmd
;
171 case LC_DYLD_INFO_ONLY
:
172 _dyldInfo
= (macho_dyld_info_command
<P
>*)cmd
;
174 case LC_DYLD_EXPORTS_TRIE
:
175 _exportTrie
= (macho_linkedit_data_command
<P
>*)cmd
;
177 case macho_segment_command
<P
>::CMD
:
178 segCmd
=( macho_segment_command
<P
>*)cmd
;
179 _segCmds
.push_back(segCmd
);
180 if ( strcmp(segCmd
->segname(), "__LINKEDIT") == 0 ) {
181 _linkeditBias
= (uint8_t*)(segCmd
->vmaddr() + _cacheSlide
- segCmd
->fileoff());
182 _linkeditSize
= (uint32_t)segCmd
->vmsize();
183 _linkeditAddr
= segCmd
->vmaddr();
185 else if ( strcmp(segCmd
->segname(), "__TEXT") == 0 ) {
186 _textSegStartAddr
= (pint_t
)segCmd
->vmaddr();
187 const macho_section
<P
>* const sectionsStart
= (macho_section
<P
>*)((char*)segCmd
+ sizeof(macho_segment_command
<P
>));
188 const macho_section
<P
>* const sectionsEnd
= §ionsStart
[segCmd
->nsects()];
189 for (const macho_section
<P
>* sect
= sectionsStart
; sect
< sectionsEnd
; ++sect
) {
191 if ( strcmp(sect
->sectname(), "__text") == 0 ) {
193 _textSectionIndex
= sectionIndex
;
195 else if ( ((sect
->flags() & SECTION_TYPE
) == S_SYMBOL_STUBS
) && (sect
->size() != 0) ) {
197 _stubSectionIndex
= sectionIndex
;
203 cmd
= (const macho_load_command
<P
>*)(((uint8_t*)cmd
)+cmd
->cmdsize());
209 template <typename P
>
210 uint32_t StubOptimizer
<P
>::lazyPointerAddrFromArmStub(const uint8_t* stubInstructions
, uint32_t stubVMAddr
)
212 uint32_t stubInstr1
= E::get32(*(uint32_t*)stubInstructions
);
213 uint32_t stubInstr2
= E::get32(*(uint32_t*)(stubInstructions
+4));
214 uint32_t stubInstr3
= E::get32(*(uint32_t*)(stubInstructions
+8));
215 int32_t stubData
= E::get32(*(uint32_t*)(stubInstructions
+12));
216 if ( stubInstr1
!= 0xe59fc004 ) {
217 _diagnostics
.warning("first instruction of stub (0x%08X) is not 'ldr ip, pc + 12' for stub at addr 0x%0llX in %s",
218 stubInstr1
, (uint64_t)stubVMAddr
, _installName
);
221 if ( stubInstr2
!= 0xe08fc00c ) {
222 _diagnostics
.warning("second instruction of stub (0x%08X) is not 'add ip, pc, ip' for stub at addr 0x%0llX in %s",
223 stubInstr1
, (uint64_t)stubVMAddr
, _installName
);
226 if ( stubInstr3
!= 0xe59cf000 ) {
227 _diagnostics
.warning("third instruction of stub (0x%08X) is not 'ldr pc, [ip]' for stub at addr 0x%0llX in %s",
228 stubInstr1
, (uint64_t)stubVMAddr
, _installName
);
231 return stubVMAddr
+ 12 + stubData
;
235 template <typename P
>
236 uint64_t StubOptimizer
<P
>::lazyPointerAddrFromArm64Stub(const uint8_t* stubInstructions
, uint64_t stubVMAddr
)
238 uint32_t stubInstr1
= E::get32(*(uint32_t*)stubInstructions
);
239 if ( (stubInstr1
& 0x9F00001F) != 0x90000010 ) {
240 _diagnostics
.warning("first instruction of stub (0x%08X) is not ADRP for stub at addr 0x%0llX in %s",
241 stubInstr1
, (uint64_t)stubVMAddr
, _installName
);
244 int32_t adrpValue
= ((stubInstr1
& 0x00FFFFE0) >> 3) | ((stubInstr1
& 0x60000000) >> 29);
245 if ( stubInstr1
& 0x00800000 )
246 adrpValue
|= 0xFFF00000;
247 uint32_t stubInstr2
= E::get32(*(uint32_t*)(stubInstructions
+ 4));
248 if ( (stubInstr2
& 0xFFC003FF) != 0xF9400210 ) {
249 _diagnostics
.warning("second instruction of stub (0x%08X) is not LDR for stub at addr 0x%0llX in %s",
250 stubInstr2
, (uint64_t)stubVMAddr
, _installName
);
253 uint32_t ldrValue
= ((stubInstr2
>> 10) & 0x00000FFF);
254 return (stubVMAddr
& (-4096)) + adrpValue
*4096 + ldrValue
*8;
257 #if SUPPORT_ARCH_arm64_32
258 template <typename P
>
259 uint64_t StubOptimizer
<P
>::lazyPointerAddrFromArm64_32Stub(const uint8_t* stubInstructions
, uint64_t stubVMAddr
)
261 uint32_t stubInstr1
= E::get32(*(uint32_t*)stubInstructions
);
262 if ( (stubInstr1
& 0x9F00001F) != 0x90000010 ) {
263 _diagnostics
.warning("first instruction of stub (0x%08X) is not ADRP for stub at addr 0x%0llX in %s",
264 stubInstr1
, (uint64_t)stubVMAddr
, _installName
);
267 int32_t adrpValue
= ((stubInstr1
& 0x00FFFFE0) >> 3) | ((stubInstr1
& 0x60000000) >> 29);
268 if ( stubInstr1
& 0x00800000 )
269 adrpValue
|= 0xFFF00000;
270 uint32_t stubInstr2
= E::get32(*(uint32_t*)(stubInstructions
+ 4));
271 if ( (stubInstr2
& 0xFFC003FF) != 0xB9400210 ) {
272 _diagnostics
.warning("second instruction of stub (0x%08X) is not LDR for stub at addr 0x%0llX in %s",
273 stubInstr2
, (uint64_t)stubVMAddr
, _installName
);
276 uint32_t ldrValue
= ((stubInstr2
>> 10) & 0x00000FFF);
277 return (stubVMAddr
& (-4096)) + adrpValue
*4096 + ldrValue
*4; // LDR Wn has a scale factor of 4
283 #if SUPPORT_ARCH_arm64e
284 template <typename P
>
285 uint64_t StubOptimizer
<P
>::lazyPointerAddrFromArm64eStub(const uint8_t* stubInstructions
, uint64_t stubVMAddr
)
287 uint32_t stubInstr1
= E::get32(*(uint32_t*)stubInstructions
);
288 // ADRP X17, dyld_mageLoaderCache@page
289 if ( (stubInstr1
& 0x9F00001F) != 0x90000011 ) {
290 _diagnostics
.warning("first instruction of stub (0x%08X) is not ADRP for stub at addr 0x%0llX in %s",
291 stubInstr1
, (uint64_t)stubVMAddr
, _installName
);
294 int32_t adrpValue
= ((stubInstr1
& 0x00FFFFE0) >> 3) | ((stubInstr1
& 0x60000000) >> 29);
295 if ( stubInstr1
& 0x00800000 )
296 adrpValue
|= 0xFFF00000;
298 // ADD X17, X17, dyld_mageLoaderCache@pageoff
299 uint32_t stubInstr2
= E::get32(*(uint32_t*)(stubInstructions
+ 4));
300 if ( (stubInstr2
& 0xFFC003FF) != 0x91000231 ) {
301 _diagnostics
.warning("second instruction of stub (0x%08X) is not ADD for stub at addr 0x%0llX in %s",
302 stubInstr2
, (uint64_t)stubVMAddr
, _installName
);
305 uint32_t addValue
= ((stubInstr2
& 0x003FFC00) >> 10);
308 uint32_t stubInstr3
= E::get32(*(uint32_t*)(stubInstructions
+ 8));
309 if ( stubInstr3
!= 0xF9400230 ) {
310 _diagnostics
.warning("second instruction of stub (0x%08X) is not LDR for stub at addr 0x%0llX in %s",
311 stubInstr2
, (uint64_t)stubVMAddr
, _installName
);
314 return (stubVMAddr
& (-4096)) + adrpValue
*4096 + addValue
;
319 template <typename P
>
320 void StubOptimizer
<P
>::buildStubMap(const std::unordered_set
<std::string
>& neverStubEliminate
)
322 // find all stubs and lazy pointers
323 const macho_nlist
<P
>* symbolTable
= (const macho_nlist
<P
>*)(&_linkeditBias
[_symTabCmd
->symoff()]);
324 const char* symbolStrings
= (char*)(&_linkeditBias
[_symTabCmd
->stroff()]);
325 const uint32_t* const indirectTable
= (uint32_t*)(&_linkeditBias
[_dynSymTabCmd
->indirectsymoff()]);
326 const macho_load_command
<P
>* const cmds
= (macho_load_command
<P
>*)((uint8_t*)_mh
+ sizeof(macho_header
<P
>));
327 const uint32_t cmd_count
= _mh
->ncmds();
328 const macho_load_command
<P
>* cmd
= cmds
;
329 for (uint32_t i
= 0; i
< cmd_count
; ++i
) {
330 if ( cmd
->cmd() == macho_segment_command
<P
>::CMD
) {
331 macho_segment_command
<P
>* seg
= (macho_segment_command
<P
>*)cmd
;
332 macho_section
<P
>* const sectionsStart
= (macho_section
<P
>*)((char*)seg
+ sizeof(macho_segment_command
<P
>));
333 macho_section
<P
>* const sectionsEnd
= §ionsStart
[seg
->nsects()];
334 for(macho_section
<P
>* sect
= sectionsStart
; sect
< sectionsEnd
; ++sect
) {
335 if ( sect
->size() == 0 )
337 unsigned sectionType
= (sect
->flags() & SECTION_TYPE
);
338 const uint32_t indirectTableOffset
= sect
->reserved1();
339 if ( sectionType
== S_SYMBOL_STUBS
) {
340 const uint32_t stubSize
= sect
->reserved2();
341 _stubCount
= (uint32_t)(sect
->size() / stubSize
);
342 pint_t stubVMAddr
= (pint_t
)sect
->addr();
343 for (uint32_t j
=0; j
< _stubCount
; ++j
, stubVMAddr
+= stubSize
) {
344 uint32_t symbolIndex
= E::get32(indirectTable
[indirectTableOffset
+ j
]);
345 switch ( symbolIndex
) {
346 case INDIRECT_SYMBOL_ABS
:
347 case INDIRECT_SYMBOL_LOCAL
:
348 case INDIRECT_SYMBOL_ABS
| INDIRECT_SYMBOL_LOCAL
:
351 if ( symbolIndex
>= _symTabCmd
->nsyms() ) {
352 _diagnostics
.warning("symbol index out of range (%d of %d) for stub at addr 0x%0llX in %s",
353 symbolIndex
, _symTabCmd
->nsyms(), (uint64_t)stubVMAddr
, _installName
);
356 const macho_nlist
<P
>* sym
= &symbolTable
[symbolIndex
];
357 uint32_t stringOffset
= sym
->n_strx();
358 if ( stringOffset
> _symTabCmd
->strsize() ) {
359 _diagnostics
.warning("symbol string offset out of range (%u of %u) for stub at addr 0x%0llX in %s",
360 stringOffset
, sym
->n_strx(), (uint64_t)stubVMAddr
, _installName
);
363 const char* symName
= &symbolStrings
[stringOffset
];
364 if ( neverStubEliminate
.count(symName
) ) {
365 //fprintf(stderr, "stubVMAddr=0x%llX, not bypassing stub to %s in %s because target is interposable\n", (uint64_t)stubVMAddr, symName, _installName);
366 _stubsLeftInterposable
++;
369 const uint8_t* stubInstrs
= (uint8_t*)(long)stubVMAddr
+ _cacheSlide
;
370 pint_t targetLPAddr
= 0;
371 switch ( _mh
->cputype() ) {
373 case CPU_TYPE_ARM64_32
:
374 #if SUPPORT_ARCH_arm64e
375 if (_mh
->cpusubtype() == CPU_SUBTYPE_ARM64E
)
376 targetLPAddr
= (pint_t
)lazyPointerAddrFromArm64eStub(stubInstrs
, stubVMAddr
);
379 #if SUPPORT_ARCH_arm64_32
380 if (_mh
->cputype() == CPU_TYPE_ARM64_32
)
381 targetLPAddr
= (pint_t
)lazyPointerAddrFromArm64_32Stub(stubInstrs
, stubVMAddr
);
384 targetLPAddr
= (pint_t
)lazyPointerAddrFromArm64Stub(stubInstrs
, stubVMAddr
);
387 targetLPAddr
= (pint_t
)lazyPointerAddrFromArmStub(stubInstrs
, (uint32_t)stubVMAddr
);
390 if ( targetLPAddr
!= 0 )
391 _stubAddrToLPAddr
[stubVMAddr
] = targetLPAddr
;
396 else if ( (sectionType
== S_LAZY_SYMBOL_POINTERS
) || (sectionType
== S_NON_LAZY_SYMBOL_POINTERS
) ) {
398 pint_t
* lpContent
= (pint_t
*)(sect
->addr() + _cacheSlide
);
399 uint32_t elementCount
= (uint32_t)(sect
->size() / sizeof(pint_t
));
400 uint64_t textSegStartAddr
= _segCmds
[0]->vmaddr();
401 uint64_t textSegEndAddr
= _segCmds
[0]->vmaddr() + _segCmds
[0]->vmsize();
403 for (uint32_t j
=0; j
< elementCount
; ++j
) {
404 uint32_t symbolIndex
= E::get32(indirectTable
[indirectTableOffset
+ j
]);
405 switch ( symbolIndex
) {
406 case INDIRECT_SYMBOL_ABS
:
407 case INDIRECT_SYMBOL_LOCAL
:
408 case INDIRECT_SYMBOL_LOCAL
|INDIRECT_SYMBOL_ABS
:
411 lpValue
= (pint_t
)P::getP(lpContent
[j
]);
412 lpVMAddr
= (pint_t
)sect
->addr() + j
* sizeof(pint_t
);
413 if ( symbolIndex
>= _symTabCmd
->nsyms() ) {
414 _diagnostics
.warning("symbol index out of range (%d of %d) for lazy pointer at addr 0x%0llX in %s",
415 symbolIndex
, _symTabCmd
->nsyms(), (uint64_t)lpVMAddr
, _installName
);
418 const macho_nlist
<P
>* sym
= &symbolTable
[symbolIndex
];
419 uint32_t stringOffset
= sym
->n_strx();
420 if ( stringOffset
> _symTabCmd
->strsize() ) {
421 _diagnostics
.warning("symbol string offset out of range (%u of %u) for lazy pointer at addr 0x%0llX in %s",
422 stringOffset
, sym
->n_strx(), (uint64_t)lpVMAddr
, _installName
);
425 const char* symName
= &symbolStrings
[stringOffset
];
426 if ( (lpValue
> textSegStartAddr
) && (lpValue
< textSegEndAddr
) ) {
427 //fprintf(stderr, "skipping lazy pointer at 0x%0lX to %s in %s because target is within dylib\n", (long)lpVMAddr, symName, _installName);
429 else if ( (sizeof(pint_t
) == 8) && ((lpValue
% 4) != 0) ) {
430 _diagnostics
.warning("lazy pointer at 0x%0llX does not point to 4-byte aligned address(0x%0llX) in %s",
431 (uint64_t)lpVMAddr
, (uint64_t)lpValue
, _installName
);
434 _lpAddrToTargetAddr
[lpVMAddr
] = lpValue
;
435 _targetAddrToName
[lpValue
] = symName
;
443 cmd
= (const macho_load_command
<P
>*)(((uint8_t*)cmd
)+cmd
->cmdsize());
448 template <typename P
>
449 void StubOptimizer
<P
>::forEachCallSiteToAStub(CallSiteHandler handler
)
451 if (_diagnostics
.hasError())
453 const uint8_t* infoStart
= &_linkeditBias
[_splitSegInfoCmd
->dataoff()];
454 const uint8_t* infoEnd
= &infoStart
[_splitSegInfoCmd
->datasize()];
455 if ( *infoStart
++ != DYLD_CACHE_ADJ_V2_FORMAT
) {
456 _diagnostics
.error("malformed split seg info in %s", _installName
);
460 uint8_t* textSectionContent
= (uint8_t*)(_textSection
->addr() + _cacheSlide
);
462 // Whole :== <count> FromToSection+
463 // FromToSection :== <from-sect-index> <to-sect-index> <count> ToOffset+
464 // ToOffset :== <to-sect-offset-delta> <count> FromOffset+
465 // FromOffset :== <kind> <count> <from-sect-offset-delta>
466 const uint8_t* p
= infoStart
;
467 uint64_t sectionCount
= read_uleb128(p
, infoEnd
);
468 for (uint64_t i
=0; i
< sectionCount
; ++i
) {
469 uint64_t fromSectionIndex
= read_uleb128(p
, infoEnd
);
470 uint64_t toSectionIndex
= read_uleb128(p
, infoEnd
);
471 uint64_t toOffsetCount
= read_uleb128(p
, infoEnd
);
472 uint64_t toSectionOffset
= 0;
473 for (uint64_t j
=0; j
< toOffsetCount
; ++j
) {
474 uint64_t toSectionDelta
= read_uleb128(p
, infoEnd
);
475 uint64_t fromOffsetCount
= read_uleb128(p
, infoEnd
);
476 toSectionOffset
+= toSectionDelta
;
477 for (uint64_t k
=0; k
< fromOffsetCount
; ++k
) {
478 uint64_t kind
= read_uleb128(p
, infoEnd
);
480 _diagnostics
.error("bad kind (%llu) value in %s\n", kind
, _installName
);
482 uint64_t fromSectDeltaCount
= read_uleb128(p
, infoEnd
);
483 uint64_t fromSectionOffset
= 0;
484 for (uint64_t l
=0; l
< fromSectDeltaCount
; ++l
) {
485 uint64_t delta
= read_uleb128(p
, infoEnd
);
486 fromSectionOffset
+= delta
;
487 if ( (fromSectionIndex
== _textSectionIndex
) && (toSectionIndex
== _stubSectionIndex
) ) {
488 uint32_t* instrPtr
= (uint32_t*)(textSectionContent
+ fromSectionOffset
);
489 uint64_t instrAddr
= _textSection
->addr() + fromSectionOffset
;
490 uint64_t stubAddr
= _stubSection
->addr() + toSectionOffset
;
491 uint32_t instruction
= E::get32(*instrPtr
);
492 _branchToStubCount
++;
493 if ( handler(kind
, instrAddr
, stubAddr
, instruction
) ) {
494 E::set32(*instrPtr
, instruction
);
504 /// Extract displacement from a thumb b/bl/blx instruction.
505 template <typename P
>
506 int32_t StubOptimizer
<P
>::getDisplacementFromThumbBranch(uint32_t instruction
, uint32_t instrAddr
)
508 bool is_blx
= ((instruction
& 0xD000F800) == 0xC000F000);
509 uint32_t s
= (instruction
>> 10) & 0x1;
510 uint32_t j1
= (instruction
>> 29) & 0x1;
511 uint32_t j2
= (instruction
>> 27) & 0x1;
512 uint32_t imm10
= instruction
& 0x3FF;
513 uint32_t imm11
= (instruction
>> 16) & 0x7FF;
514 uint32_t i1
= (j1
== s
);
515 uint32_t i2
= (j2
== s
);
516 uint32_t dis
= (s
<< 24) | (i1
<< 23) | (i2
<< 22) | (imm10
<< 12) | (imm11
<< 1);
518 int32_t result
= s
? (sdis
| 0xFE000000) : sdis
;
519 if ( is_blx
&& (instrAddr
& 0x2) ) {
520 // The thumb blx instruction always has low bit of imm11 as zero. The way
521 // a 2-byte aligned blx can branch to a 4-byte aligned ARM target is that
522 // the blx instruction always 4-byte aligns the pc before adding the
523 // displacement from the blx. We must emulate that when decoding this.
529 /// Update a thumb b/bl/blx instruction, switching bl <-> blx as needed.
530 template <typename P
>
531 uint32_t StubOptimizer
<P
>::setDisplacementInThumbBranch(uint32_t instruction
, uint32_t instrAddr
,
532 int32_t displacement
, bool targetIsThumb
) {
533 if ( (displacement
> 16777214) || (displacement
< (-16777216)) ) {
534 _diagnostics
.error("thumb branch out of range at 0x%0X in %s", instrAddr
, _installName
);
537 bool is_bl
= ((instruction
& 0xD000F800) == 0xD000F000);
538 bool is_blx
= ((instruction
& 0xD000F800) == 0xC000F000);
539 bool is_b
= ((instruction
& 0xD000F800) == 0x9000F000);
540 uint32_t newInstruction
= (instruction
& 0xD000F800);
541 if (is_bl
|| is_blx
) {
543 newInstruction
= 0xD000F000; // Use bl
546 newInstruction
= 0xC000F000; // Use blx
547 // See note in getDisplacementFromThumbBranch() about blx.
553 if ( !targetIsThumb
) {
554 _diagnostics
.error("no pc-rel thumb branch instruction that switches to arm mode at 0x%0X in %s", instrAddr
, _installName
);
559 _diagnostics
.error("not b/bl/blx at 0x%0X in %s", instrAddr
, _installName
);
562 uint32_t s
= (uint32_t)(displacement
>> 24) & 0x1;
563 uint32_t i1
= (uint32_t)(displacement
>> 23) & 0x1;
564 uint32_t i2
= (uint32_t)(displacement
>> 22) & 0x1;
565 uint32_t imm10
= (uint32_t)(displacement
>> 12) & 0x3FF;
566 uint32_t imm11
= (uint32_t)(displacement
>> 1) & 0x7FF;
567 uint32_t j1
= (i1
== s
);
568 uint32_t j2
= (i2
== s
);
569 uint32_t nextDisp
= (j1
<< 13) | (j2
<< 11) | imm11
;
570 uint32_t firstDisp
= (s
<< 10) | imm10
;
571 newInstruction
|= (nextDisp
<< 16) | firstDisp
;
572 return newInstruction
;
576 template <typename P
>
577 void StubOptimizer
<P
>::optimizeArmCallSites(std::unordered_map
<uint64_t, uint64_t>& targetAddrToOptStubAddr
)
579 forEachCallSiteToAStub([&](uint8_t kind
, uint64_t callSiteAddr
, uint64_t stubAddr
, uint32_t& instruction
) -> bool {
580 if ( kind
== DYLD_CACHE_ADJ_V2_THUMB_BR22
) {
581 bool is_bl
= ((instruction
& 0xD000F800) == 0xD000F000);
582 bool is_blx
= ((instruction
& 0xD000F800) == 0xC000F000);
583 bool is_b
= ((instruction
& 0xD000F800) == 0x9000F000);
584 if ( !is_bl
&& !is_blx
&& !is_b
){
585 _diagnostics
.warning("non-branch instruction at 0x%0llX in %s", callSiteAddr
, _installName
);
588 int32_t brDelta
= getDisplacementFromThumbBranch(instruction
, (uint32_t)callSiteAddr
);
589 pint_t targetAddr
= (pint_t
)callSiteAddr
+ 4 + brDelta
;
590 if ( targetAddr
!= stubAddr
) {
591 _diagnostics
.warning("stub target mismatch at callsite 0x%0llX in %s", callSiteAddr
, _installName
);
594 // ignore branch if not to a known stub
595 const auto& pos
= _stubAddrToLPAddr
.find(targetAddr
);
596 if ( pos
== _stubAddrToLPAddr
.end() )
599 // ignore branch if lazy pointer is not known (resolver or interposable)
600 uint64_t lpAddr
= pos
->second
;
601 const auto& pos2
= _lpAddrToTargetAddr
.find((pint_t
)lpAddr
);
602 if ( pos2
== _lpAddrToTargetAddr
.end() )
605 uint64_t finalTargetAddr
= pos2
->second
;
606 int64_t deltaToFinalTarget
= finalTargetAddr
- (callSiteAddr
+ 4);
607 // if final target within range, change to branch there directly
608 if ( (deltaToFinalTarget
> -b16MegLimit
) && (deltaToFinalTarget
< b16MegLimit
) ) {
609 bool targetIsThumb
= (finalTargetAddr
& 1);
610 instruction
= setDisplacementInThumbBranch(instruction
, (uint32_t)callSiteAddr
, (int32_t)deltaToFinalTarget
, targetIsThumb
);
611 if (_diagnostics
.hasError())
613 _branchOptimizedToDirectCount
++;
617 // try to re-use an existing optimized stub
618 const auto& pos3
= targetAddrToOptStubAddr
.find(finalTargetAddr
);
619 if ( pos3
!= targetAddrToOptStubAddr
.end() ) {
620 uint64_t existingStub
= pos3
->second
;
621 if ( existingStub
!= stubAddr
) {
622 int64_t deltaToOptStub
= existingStub
- (callSiteAddr
+ 4);
623 if ( (deltaToOptStub
> -b16MegLimit
) && (deltaToOptStub
< b16MegLimit
) ) {
624 bool targetIsThumb
= (existingStub
& 1);
625 instruction
= setDisplacementInThumbBranch(instruction
, (uint32_t)callSiteAddr
, (int32_t)deltaToOptStub
, targetIsThumb
);
626 if (_diagnostics
.hasError())
628 _branchToReUsedOptimizedStubCount
++;
634 // leave as BL to stub, but optimize the stub
635 _stubsToOptimize
.insert(stubAddr
);
636 targetAddrToOptStubAddr
[finalTargetAddr
] = stubAddr
;
637 _branchToOptimizedStubCount
++;
640 else if ( kind
== DYLD_CACHE_ADJ_V2_ARM_BR24
) {
641 // too few of these to be worth trying to optimize
646 if (_diagnostics
.hasError())
651 template <typename P
>
652 void StubOptimizer
<P
>::optimizeArmStubs()
654 for (const auto& stubEntry
: _stubAddrToLPAddr
) {
655 pint_t stubVMAddr
= stubEntry
.first
;
656 pint_t lpVMAddr
= stubEntry
.second
;
657 const auto& pos
= _lpAddrToTargetAddr
.find(lpVMAddr
);
658 if ( pos
== _lpAddrToTargetAddr
.end() )
660 pint_t targetVMAddr
= pos
->second
;
662 int32_t delta
= (int32_t)(targetVMAddr
- (stubVMAddr
+ 12));
663 uint32_t* stubInstructions
= (uint32_t*)((uint8_t*)(long)stubVMAddr
+ _cacheSlide
);
664 assert(stubInstructions
[0] == 0xe59fc004);
665 stubInstructions
[0] = 0xe59fc000; // ldr ip, L0
666 stubInstructions
[1] = 0xe08ff00c; // add pc, pc, ip
667 stubInstructions
[2] = delta
; // L0: .long xxxx
668 stubInstructions
[3] = 0xe7ffdefe; // trap
669 _stubOptimizedCount
++;
675 template <typename P
>
676 void StubOptimizer
<P
>::optimizeArm64Stubs()
678 for (const uint64_t stubVMAddr
: _stubsToOptimize
) {
679 pint_t lpVMAddr
= _stubAddrToLPAddr
[(pint_t
)stubVMAddr
];
680 const auto& pos
= _lpAddrToTargetAddr
.find(lpVMAddr
);
681 if ( pos
== _lpAddrToTargetAddr
.end() )
683 pint_t targetVMAddr
= pos
->second
;
685 int64_t adrpDelta
= (targetVMAddr
& -4096) - (stubVMAddr
& -4096);
686 // Note: ADRP/ADD can only span +/-4GB
687 uint32_t* stubInstructions
= (uint32_t*)((uint8_t*)(long)stubVMAddr
+ _cacheSlide
);
688 bool rightInstr1
= ((stubInstructions
[0] & 0x9F00001F) == 0x90000010); // ADRP X16, lp@page
689 bool rightInstr2
= ((stubInstructions
[1] & 0xFFC003FF) == 0xF9400210); // LDR X16, [X16, lp@pageoff]
690 bool rightInstr3
= (stubInstructions
[2] == 0xD61F0200); // BR X16
692 if ( rightInstr1
&& rightInstr2
&& rightInstr3
) {
693 uint32_t immhi
= (adrpDelta
>> 9) & (0x00FFFFE0);
694 uint32_t immlo
= (adrpDelta
<< 17) & (0x60000000);
695 uint32_t newADRP
= (0x90000010) | immlo
| immhi
;
696 uint32_t off12
= (targetVMAddr
& 0xFFF);
697 uint32_t newADD
= (0x91000210) | (off12
<< 10);
699 stubInstructions
[0] = newADRP
; // ADRP X16, target@page
700 stubInstructions
[1] = newADD
; // ADD X16, X16, target@pageoff
701 stubInstructions
[2] = 0xD61F0200; // BR X16
702 _stubOptimizedCount
++;
707 #if SUPPORT_ARCH_arm64e
708 template <typename P
>
709 void StubOptimizer
<P
>::optimizeArm64eStubs()
711 for (const uint64_t stubVMAddr
: _stubsToOptimize
) {
712 pint_t lpVMAddr
= _stubAddrToLPAddr
[(pint_t
)stubVMAddr
];
713 const auto& pos
= _lpAddrToTargetAddr
.find(lpVMAddr
);
714 if ( pos
== _lpAddrToTargetAddr
.end() )
716 pint_t targetVMAddr
= pos
->second
;
718 int64_t adrpDelta
= (targetVMAddr
& -4096) - (stubVMAddr
& -4096);
719 // Note: ADRP/ADD can only span +/-4GB
720 uint32_t* stubInstructions
= (uint32_t*)((uint8_t*)(long)stubVMAddr
+ _cacheSlide
);
721 bool rightInstr1
= ((stubInstructions
[0] & 0x9F00001F) == 0x90000011); // ADRP X17, lp@page
722 bool rightInstr2
= ((stubInstructions
[1] & 0xFFC003FF) == 0x91000231); // ADD X17, [X17, lp@pageoff]
723 bool rightInstr3
= (stubInstructions
[2] == 0xF9400230); // LDR X16, [X17]
724 bool rightInstr4
= (stubInstructions
[3] == 0xD71F0A11); // BRAA X16, X17
726 if ( rightInstr1
&& rightInstr2
&& rightInstr3
&& rightInstr4
) {
727 uint32_t immhi
= (adrpDelta
>> 9) & (0x00FFFFE0);
728 uint32_t immlo
= (adrpDelta
<< 17) & (0x60000000);
729 uint32_t newADRP
= (0x90000010) | immlo
| immhi
;
730 uint32_t off12
= (targetVMAddr
& 0xFFF);
731 uint32_t newADD
= (0x91000210) | (off12
<< 10);
733 stubInstructions
[0] = newADRP
; // ADRP X16, target@page
734 stubInstructions
[1] = newADD
; // ADD X16, X16, target@pageoff
735 stubInstructions
[2] = 0xD61F0200; // BR X16
736 stubInstructions
[3] = 0xD4200020; // TRAP
737 _stubOptimizedCount
++;
743 #if SUPPORT_ARCH_arm64_32
744 template <typename P
>
745 void StubOptimizer
<P
>::optimizeArm64_32Stubs()
747 for (const uint64_t stubVMAddr
: _stubsToOptimize
) {
748 pint_t lpVMAddr
= _stubAddrToLPAddr
[(pint_t
)stubVMAddr
];
749 const auto& pos
= _lpAddrToTargetAddr
.find(lpVMAddr
);
750 if ( pos
== _lpAddrToTargetAddr
.end() )
752 pint_t targetVMAddr
= pos
->second
;
754 int64_t adrpDelta
= (targetVMAddr
& -4096) - (stubVMAddr
& -4096);
755 uint32_t* stubInstructions
= (uint32_t*)((uint8_t*)(long)stubVMAddr
+ _cacheSlide
);
756 bool rightInstr1
= ((stubInstructions
[0] & 0x9F00001F) == 0x90000010); // ADRP X16, lp@page
757 bool rightInstr2
= ((stubInstructions
[1] & 0xFFC003FF) == 0xB9400210); // LDR W16, [X16, lp@pageoff]
758 bool rightInstr3
= (stubInstructions
[2] == 0xD61F0200); // BR X16
760 if ( rightInstr1
&& rightInstr2
&& rightInstr3
) {
761 uint32_t immhi
= (adrpDelta
>> 9) & (0x00FFFFE0);
762 uint32_t immlo
= (adrpDelta
<< 17) & (0x60000000);
763 uint32_t newADRP
= (0x90000010) | immlo
| immhi
;
764 uint32_t off12
= (targetVMAddr
& 0xFFF);
765 uint32_t newADD
= (0x91000210) | (off12
<< 10);
767 stubInstructions
[0] = newADRP
; // ADRP X16, target@page
768 stubInstructions
[1] = newADD
; // ADD X16, X16, target@pageoff
769 stubInstructions
[2] = 0xD61F0200; // BR X16
770 _stubOptimizedCount
++;
777 template <typename P
>
778 void StubOptimizer
<P
>::optimizeArm64CallSites(std::unordered_map
<uint64_t, uint64_t>& targetAddrToOptStubAddr
)
780 forEachCallSiteToAStub([&](uint8_t kind
, uint64_t callSiteAddr
, uint64_t stubAddr
, uint32_t& instruction
) -> bool {
781 if ( kind
!= DYLD_CACHE_ADJ_V2_ARM64_BR26
)
783 // skip all but BL or B
784 if ( (instruction
& 0x7C000000) != 0x14000000 )
786 // compute target of branch instruction
787 int32_t brDelta
= (instruction
& 0x03FFFFFF) << 2;
788 if ( brDelta
& 0x08000000 )
789 brDelta
|= 0xF0000000;
790 uint64_t targetAddr
= callSiteAddr
+ (int64_t)brDelta
;
791 if ( targetAddr
!= stubAddr
) {
792 _diagnostics
.warning("stub target mismatch");
795 // ignore branch if not to a known stub
796 const auto& pos
= _stubAddrToLPAddr
.find((pint_t
)targetAddr
);
797 if ( pos
== _stubAddrToLPAddr
.end() )
800 // ignore branch if lazy pointer is not known (resolver or interposable)
801 uint64_t lpAddr
= pos
->second
;
802 const auto& pos2
= _lpAddrToTargetAddr
.find((pint_t
)lpAddr
);
803 if ( pos2
== _lpAddrToTargetAddr
.end() )
806 uint64_t finalTargetAddr
= pos2
->second
;
807 int64_t deltaToFinalTarget
= finalTargetAddr
- callSiteAddr
;
808 // if final target within range, change to branch there directly
809 if ( (deltaToFinalTarget
> -b128MegLimit
) && (deltaToFinalTarget
< b128MegLimit
) ) {
810 instruction
= (instruction
& 0xFC000000) | ((deltaToFinalTarget
>> 2) & 0x03FFFFFF);
811 _branchOptimizedToDirectCount
++;
815 // try to re-use an existing optimized stub
816 const auto& pos3
= targetAddrToOptStubAddr
.find((pint_t
)finalTargetAddr
);
817 if ( pos3
!= targetAddrToOptStubAddr
.end() ) {
818 uint64_t existingStub
= pos3
->second
;
819 if ( existingStub
!= stubAddr
) {
820 int64_t deltaToOptStub
= existingStub
- callSiteAddr
;
821 if ( (deltaToOptStub
> -b128MegLimit
) && (deltaToOptStub
< b128MegLimit
) ) {
822 instruction
= (instruction
& 0xFC000000) | ((deltaToOptStub
>> 2) & 0x03FFFFFF);
823 _branchToReUsedOptimizedStubCount
++;
829 // leave as BL to stub, but optimize the stub
830 _stubsToOptimize
.insert(stubAddr
);
831 targetAddrToOptStubAddr
[(pint_t
)finalTargetAddr
] = (pint_t
)stubAddr
;
832 _branchToOptimizedStubCount
++;
835 if (_diagnostics
.hasError())
840 template <typename P
>
841 void StubOptimizer
<P
>::optimizeCallSites(std::unordered_map
<uint64_t, uint64_t>& targetAddrToOptStubAddr
)
843 if ( _textSection
== NULL
)
845 if ( _stubSection
== NULL
)
849 switch ( _mh
->cputype() ) {
851 optimizeArm64CallSites(targetAddrToOptStubAddr
);
852 #if SUPPORT_ARCH_arm64e
853 if (_mh
->cpusubtype() == CPU_SUBTYPE_ARM64E
)
854 optimizeArm64eStubs();
857 optimizeArm64Stubs();
859 #if SUPPORT_ARCH_arm64_32
860 case CPU_TYPE_ARM64_32
:
861 optimizeArm64CallSites(targetAddrToOptStubAddr
);
862 optimizeArm64_32Stubs();
866 optimizeArmCallSites(targetAddrToOptStubAddr
);
871 _diagnostics
.verbose("dylib has %6u BLs to %4u stubs. Changed %5u, %5u, %5u BLs to use direct branch, optimized stub, neighbor's optimized stub. "
872 "%5u stubs left interposable, %4u stubs optimized. path=%s\n",
873 _branchToStubCount
, _stubCount
, _branchOptimizedToDirectCount
, _branchToOptimizedStubCount
, _branchToReUsedOptimizedStubCount
,
874 _stubsLeftInterposable
, _stubOptimizedCount
, _installName
);
879 template <typename P
>
880 void bypassStubs(DyldSharedCache
* cache
, const std::string
& archName
, std::unordered_map
<uint64_t, uint64_t>& targetAddrToOptStubAddr
,
881 const char* const neverStubEliminateDylibs
[], const char* const neverStubEliminateSymbols
[],
884 diags
.verbose("Stub elimination optimization:\n");
886 // construct a StubOptimizer for each image
887 __block
std::vector
<StubOptimizer
<P
>*> optimizers
;
888 cache
->forEachImage(^(const mach_header
* mh
, const char* installName
) {
889 optimizers
.push_back(new StubOptimizer
<P
>(cache
, (macho_header
<P
>*)mh
, diags
));
892 // build set of functions to never stub-eliminate because tools may need to override them
893 std::unordered_set
<std::string
> neverStubEliminate
;
894 for (const char* const* p
=neverStubEliminateSymbols
; *p
!= nullptr; ++p
) {
895 neverStubEliminate
.insert(*p
);
897 for (const char* const* d
=neverStubEliminateDylibs
; *d
!= nullptr; ++d
) {
898 for (StubOptimizer
<P
>* op
: optimizers
) {
899 if ( strcmp(op
->installName(), *d
) == 0 ) {
901 const uint8_t* exportsStart
= op
->exportsTrie();
902 const uint8_t* exportsEnd
= exportsStart
+ op
->exportsTrieSize();
903 std::vector
<ExportInfoTrie::Entry
> exports
;
904 if ( !ExportInfoTrie::parseTrie(exportsStart
, exportsEnd
, exports
) ) {
905 diags
.error("malformed exports trie in %s", *d
);
908 for(const ExportInfoTrie::Entry
& entry
: exports
) {
909 neverStubEliminate
.insert(entry
.name
);
915 // build maps of stubs-to-lp and lp-to-target
916 for (StubOptimizer
<P
>* op
: optimizers
)
917 op
->buildStubMap(neverStubEliminate
);
919 // optimize call sites to by-pass stubs or jump through island
920 for (StubOptimizer
<P
>* op
: optimizers
)
921 op
->optimizeCallSites(targetAddrToOptStubAddr
);
923 // write total optimization info
924 uint32_t callSiteCount
= 0;
925 uint32_t callSiteDirectOptCount
= 0;
926 for (StubOptimizer
<P
>* op
: optimizers
) {
927 callSiteCount
+= op
->_branchToStubCount
;
928 callSiteDirectOptCount
+= op
->_branchOptimizedToDirectCount
;
930 diags
.verbose(" cache contains %u call sites of which %u were direct bound\n", callSiteCount
, callSiteDirectOptCount
);
933 for (StubOptimizer
<P
>* op
: optimizers
)
937 void SharedCacheBuilder::optimizeAwayStubs()
939 std::unordered_map
<uint64_t, uint64_t> targetAddrToOptStubAddr
;
941 DyldSharedCache
* dyldCache
= (DyldSharedCache
*)_readExecuteRegion
.buffer
;
942 std::string archName
= dyldCache
->archName();
943 #if SUPPORT_ARCH_arm64_32
944 if ( startsWith(archName
, "arm64_32") )
945 bypassStubs
<Pointer32
<LittleEndian
> >(dyldCache
, archName
, targetAddrToOptStubAddr
, _s_neverStubEliminateDylibs
, _s_neverStubEliminateSymbols
, _diagnostics
);
948 if ( startsWith(archName
, "arm64") )
949 bypassStubs
<Pointer64
<LittleEndian
> >(dyldCache
, archName
, targetAddrToOptStubAddr
, _s_neverStubEliminateDylibs
, _s_neverStubEliminateSymbols
, _diagnostics
);
950 else if ( archName
== "armv7k" )
951 bypassStubs
<Pointer32
<LittleEndian
>>(dyldCache
, archName
, targetAddrToOptStubAddr
, _s_neverStubEliminateDylibs
, _s_neverStubEliminateSymbols
, _diagnostics
);
952 // no stub optimization done for other arches