1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-*
3 * Copyright (c) 2009-2011 Apple Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
27 #include <sys/types.h>
30 #include <sys/sysctl.h>
31 #include <sys/param.h>
32 #include <sys/mount.h>
37 #include <mach/mach_time.h>
38 #include <mach/vm_statistics.h>
39 #include <mach/mach_init.h>
40 #include <mach/mach_host.h>
41 #include <uuid/uuid.h>
43 #include <mach-o/dyld.h>
44 #include <mach-o/fat.h>
53 #include <unordered_set>
56 #include <CommonCrypto/CommonDigest.h>
57 #include <AvailabilityMacros.h>
59 #include "MachOTrie.hpp"
63 #include "OutputFile.h"
64 #include "Architectures.hpp"
65 #include "HeaderAndLoadCommands.hpp"
66 #include "LinkEdit.hpp"
67 #include "LinkEditClassic.hpp"
73 uint32_t sAdrpNoped
= 0;
74 uint32_t sAdrpNotNoped
= 0;
77 OutputFile::OutputFile(const Options
& opts
)
79 usesWeakExternalSymbols(false), overridesWeakExternalSymbols(false),
80 _noReExportedDylibs(false), pieDisabled(false), hasDataInCode(false),
81 headerAndLoadCommandsSection(NULL
),
82 rebaseSection(NULL
), bindingSection(NULL
), weakBindingSection(NULL
),
83 lazyBindingSection(NULL
), exportSection(NULL
),
84 splitSegInfoSection(NULL
), functionStartsSection(NULL
),
85 dataInCodeSection(NULL
), optimizationHintsSection(NULL
),
86 symbolTableSection(NULL
), stringPoolSection(NULL
),
87 localRelocationsSection(NULL
), externalRelocationsSection(NULL
),
88 sectionRelocationsSection(NULL
),
89 indirectSymbolTableSection(NULL
),
91 _hasDyldInfo(opts
.makeCompressedDyldInfo()),
92 _hasSymbolTable(true),
93 _hasSectionRelocations(opts
.outputKind() == Options::kObjectFile
),
94 _hasSplitSegInfo(opts
.sharedRegionEligible()),
95 _hasFunctionStartsInfo(opts
.addFunctionStarts()),
96 _hasDataInCodeInfo(opts
.addDataInCodeInfo()),
97 _hasDynamicSymbolTable(true),
98 _hasLocalRelocations(!opts
.makeCompressedDyldInfo()),
99 _hasExternalRelocations(!opts
.makeCompressedDyldInfo()),
100 _hasOptimizationHints(opts
.outputKind() == Options::kObjectFile
),
101 _encryptedTEXTstartOffset(0),
102 _encryptedTEXTendOffset(0),
103 _localSymbolsStartIndex(0),
104 _localSymbolsCount(0),
105 _globalSymbolsStartIndex(0),
106 _globalSymbolsCount(0),
107 _importSymbolsStartIndex(0),
108 _importSymbolsCount(0),
109 _sectionsRelocationsAtom(NULL
),
110 _localRelocsAtom(NULL
),
111 _externalRelocsAtom(NULL
),
112 _symbolTableAtom(NULL
),
113 _indirectSymbolTableAtom(NULL
),
114 _rebasingInfoAtom(NULL
),
115 _bindingInfoAtom(NULL
),
116 _lazyBindingInfoAtom(NULL
),
117 _weakBindingInfoAtom(NULL
),
118 _exportInfoAtom(NULL
),
119 _splitSegInfoAtom(NULL
),
120 _functionStartsAtom(NULL
),
121 _dataInCodeAtom(NULL
),
122 _optimizationHintsAtom(NULL
)
126 void OutputFile::dumpAtomsBySection(ld::Internal
& state
, bool printAtoms
)
128 fprintf(stderr
, "SORTED:\n");
129 for (std::vector
<ld::Internal::FinalSection
*>::iterator it
= state
.sections
.begin(); it
!= state
.sections
.end(); ++it
) {
130 fprintf(stderr
, "final section %p %s/%s %s start addr=0x%08llX, size=0x%08llX, alignment=%02d, fileOffset=0x%08llX\n",
131 (*it
), (*it
)->segmentName(), (*it
)->sectionName(), (*it
)->isSectionHidden() ? "(hidden)" : "",
132 (*it
)->address
, (*it
)->size
, (*it
)->alignment
, (*it
)->fileOffset
);
134 std::vector
<const ld::Atom
*>& atoms
= (*it
)->atoms
;
135 for (std::vector
<const ld::Atom
*>::iterator ait
= atoms
.begin(); ait
!= atoms
.end(); ++ait
) {
136 fprintf(stderr
, " %p (0x%04llX) %s\n", *ait
, (*ait
)->size(), (*ait
)->name());
140 fprintf(stderr
, "DYLIBS:\n");
141 for (std::vector
<ld::dylib::File
*>::iterator it
=state
.dylibs
.begin(); it
!= state
.dylibs
.end(); ++it
)
142 fprintf(stderr
, " %s\n", (*it
)->installPath());
145 void OutputFile::write(ld::Internal
& state
)
147 this->buildDylibOrdinalMapping(state
);
148 this->addLoadCommands(state
);
149 this->addLinkEdit(state
);
150 state
.setSectionSizesAndAlignments();
151 this->setLoadCommandsPadding(state
);
152 _fileSize
= state
.assignFileOffsets();
153 this->assignAtomAddresses(state
);
154 this->synthesizeDebugNotes(state
);
155 this->buildSymbolTable(state
);
156 this->generateLinkEditInfo(state
);
157 if ( _options
.sharedRegionEncodingV2() )
158 this->makeSplitSegInfoV2(state
);
160 this->makeSplitSegInfo(state
);
161 this->updateLINKEDITAddresses(state
);
162 //this->dumpAtomsBySection(state, false);
163 this->writeOutputFile(state
);
164 this->writeMapFile(state
);
167 bool OutputFile::findSegment(ld::Internal
& state
, uint64_t addr
, uint64_t* start
, uint64_t* end
, uint32_t* index
)
169 uint32_t segIndex
= 0;
170 ld::Internal::FinalSection
* segFirstSection
= NULL
;
171 ld::Internal::FinalSection
* lastSection
= NULL
;
172 for (std::vector
<ld::Internal::FinalSection
*>::iterator it
= state
.sections
.begin(); it
!= state
.sections
.end(); ++it
) {
173 ld::Internal::FinalSection
* sect
= *it
;
174 if ( (segFirstSection
== NULL
) || strcmp(segFirstSection
->segmentName(), sect
->segmentName()) != 0 ) {
175 if ( segFirstSection
!= NULL
) {
176 //fprintf(stderr, "findSegment(0x%llX) seg changed to %s\n", addr, sect->segmentName());
177 if ( (addr
>= segFirstSection
->address
) && (addr
< lastSection
->address
+lastSection
->size
) ) {
178 *start
= segFirstSection
->address
;
179 *end
= lastSection
->address
+lastSection
->size
;
185 segFirstSection
= sect
;
193 void OutputFile::assignAtomAddresses(ld::Internal
& state
)
195 const bool log
= false;
196 if ( log
) fprintf(stderr
, "assignAtomAddresses()\n");
197 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
198 ld::Internal::FinalSection
* sect
= *sit
;
199 if ( log
) fprintf(stderr
, " section=%s/%s\n", sect
->segmentName(), sect
->sectionName());
200 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
201 const ld::Atom
* atom
= *ait
;
202 switch ( sect
-> type() ) {
203 case ld::Section::typeImportProxies
:
204 // want finalAddress() of all proxy atoms to be zero
205 (const_cast<ld::Atom
*>(atom
))->setSectionStartAddress(0);
207 case ld::Section::typeAbsoluteSymbols
:
208 // want finalAddress() of all absolute atoms to be value of abs symbol
209 (const_cast<ld::Atom
*>(atom
))->setSectionStartAddress(0);
211 case ld::Section::typeLinkEdit
:
212 // linkedit layout is assigned later
215 (const_cast<ld::Atom
*>(atom
))->setSectionStartAddress(sect
->address
);
216 if ( log
) fprintf(stderr
, " atom=%p, addr=0x%08llX, name=%s\n", atom
, atom
->finalAddress(), atom
->name());
223 void OutputFile::updateLINKEDITAddresses(ld::Internal
& state
)
225 if ( _options
.makeCompressedDyldInfo() ) {
226 // build dylb rebasing info
227 assert(_rebasingInfoAtom
!= NULL
);
228 _rebasingInfoAtom
->encode();
230 // build dyld binding info
231 assert(_bindingInfoAtom
!= NULL
);
232 _bindingInfoAtom
->encode();
234 // build dyld lazy binding info
235 assert(_lazyBindingInfoAtom
!= NULL
);
236 _lazyBindingInfoAtom
->encode();
238 // build dyld weak binding info
239 assert(_weakBindingInfoAtom
!= NULL
);
240 _weakBindingInfoAtom
->encode();
242 // build dyld export info
243 assert(_exportInfoAtom
!= NULL
);
244 _exportInfoAtom
->encode();
247 if ( _options
.sharedRegionEligible() ) {
248 // build split seg info
249 assert(_splitSegInfoAtom
!= NULL
);
250 _splitSegInfoAtom
->encode();
253 if ( _options
.addFunctionStarts() ) {
254 // build function starts info
255 assert(_functionStartsAtom
!= NULL
);
256 _functionStartsAtom
->encode();
259 if ( _options
.addDataInCodeInfo() ) {
260 // build data-in-code info
261 assert(_dataInCodeAtom
!= NULL
);
262 _dataInCodeAtom
->encode();
265 if ( _hasOptimizationHints
) {
266 // build linker-optimization-hint info
267 assert(_optimizationHintsAtom
!= NULL
);
268 _optimizationHintsAtom
->encode();
271 // build classic symbol table
272 assert(_symbolTableAtom
!= NULL
);
273 _symbolTableAtom
->encode();
274 assert(_indirectSymbolTableAtom
!= NULL
);
275 _indirectSymbolTableAtom
->encode();
277 // add relocations to .o files
278 if ( _options
.outputKind() == Options::kObjectFile
) {
279 assert(_sectionsRelocationsAtom
!= NULL
);
280 _sectionsRelocationsAtom
->encode();
283 if ( ! _options
.makeCompressedDyldInfo() ) {
284 // build external relocations
285 assert(_externalRelocsAtom
!= NULL
);
286 _externalRelocsAtom
->encode();
287 // build local relocations
288 assert(_localRelocsAtom
!= NULL
);
289 _localRelocsAtom
->encode();
292 // update address and file offsets now that linkedit content has been generated
293 uint64_t curLinkEditAddress
= 0;
294 uint64_t curLinkEditfileOffset
= 0;
295 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
296 ld::Internal::FinalSection
* sect
= *sit
;
297 if ( sect
->type() != ld::Section::typeLinkEdit
)
299 if ( curLinkEditAddress
== 0 ) {
300 curLinkEditAddress
= sect
->address
;
301 curLinkEditfileOffset
= sect
->fileOffset
;
303 uint16_t maxAlignment
= 0;
305 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
306 const ld::Atom
* atom
= *ait
;
307 //fprintf(stderr, "setting linkedit atom offset for %s\n", atom->name());
308 if ( atom
->alignment().powerOf2
> maxAlignment
)
309 maxAlignment
= atom
->alignment().powerOf2
;
310 // calculate section offset for this atom
311 uint64_t alignment
= 1 << atom
->alignment().powerOf2
;
312 uint64_t currentModulus
= (offset
% alignment
);
313 uint64_t requiredModulus
= atom
->alignment().modulus
;
314 if ( currentModulus
!= requiredModulus
) {
315 if ( requiredModulus
> currentModulus
)
316 offset
+= requiredModulus
-currentModulus
;
318 offset
+= requiredModulus
+alignment
-currentModulus
;
320 (const_cast<ld::Atom
*>(atom
))->setSectionOffset(offset
);
321 (const_cast<ld::Atom
*>(atom
))->setSectionStartAddress(curLinkEditAddress
);
322 offset
+= atom
->size();
325 // section alignment is that of a contained atom with the greatest alignment
326 sect
->alignment
= maxAlignment
;
327 sect
->address
= curLinkEditAddress
;
328 sect
->fileOffset
= curLinkEditfileOffset
;
329 curLinkEditAddress
+= sect
->size
;
330 curLinkEditfileOffset
+= sect
->size
;
333 _fileSize
= state
.sections
.back()->fileOffset
+ state
.sections
.back()->size
;
337 void OutputFile::setLoadCommandsPadding(ld::Internal
& state
)
339 // In other sections, any extra space is put and end of segment.
340 // In __TEXT segment, any extra space is put after load commands to allow post-processing of load commands
341 // Do a reverse layout of __TEXT segment to determine padding size and adjust section size
342 uint64_t paddingSize
= 0;
343 switch ( _options
.outputKind() ) {
345 // dyld itself has special padding requirements. We want the beginning __text section to start at a stable address
346 assert(strcmp(state
.sections
[1]->sectionName(),"__text") == 0);
347 state
.sections
[1]->alignment
= 12; // page align __text
349 case Options::kObjectFile
:
350 // mach-o .o files need no padding between load commands and first section
351 // but leave enough room that the object file could be signed
354 case Options::kPreload
:
355 // mach-o MH_PRELOAD files need no padding between load commands and first section
358 // work backwards from end of segment and lay out sections so that extra room goes to padding atom
360 uint64_t textSegPageSize
= _options
.segPageSize("__TEXT");
361 if ( _options
.sharedRegionEligible() && (_options
.iOSVersionMin() >= ld::iOS_8_0
) && (textSegPageSize
== 0x4000) )
362 textSegPageSize
= 0x1000;
363 for (std::vector
<ld::Internal::FinalSection
*>::reverse_iterator it
= state
.sections
.rbegin(); it
!= state
.sections
.rend(); ++it
) {
364 ld::Internal::FinalSection
* sect
= *it
;
365 if ( strcmp(sect
->segmentName(), "__TEXT") != 0 )
367 if ( sect
== headerAndLoadCommandsSection
) {
368 addr
-= headerAndLoadCommandsSection
->size
;
369 paddingSize
= addr
% textSegPageSize
;
373 addr
= addr
& (0 - (1 << sect
->alignment
));
376 // if command line requires more padding than this
377 uint32_t minPad
= _options
.minimumHeaderPad();
378 if ( _options
.maxMminimumHeaderPad() ) {
379 // -headerpad_max_install_names means there should be room for every path load command to grow to 1204 bytes
380 uint32_t altMin
= _dylibsToLoad
.size() * MAXPATHLEN
;
381 if ( _options
.outputKind() == Options::kDynamicLibrary
)
382 altMin
+= MAXPATHLEN
;
383 if ( altMin
> minPad
)
386 if ( paddingSize
< minPad
) {
387 int extraPages
= (minPad
- paddingSize
+ _options
.segmentAlignment() - 1)/_options
.segmentAlignment();
388 paddingSize
+= extraPages
* _options
.segmentAlignment();
391 if ( _options
.makeEncryptable() ) {
392 // load commands must be on a separate non-encrypted page
393 int loadCommandsPage
= (headerAndLoadCommandsSection
->size
+ minPad
)/_options
.segmentAlignment();
394 int textPage
= (headerAndLoadCommandsSection
->size
+ paddingSize
)/_options
.segmentAlignment();
395 if ( loadCommandsPage
== textPage
) {
396 paddingSize
+= _options
.segmentAlignment();
399 // remember start for later use by load command
400 _encryptedTEXTstartOffset
= textPage
*_options
.segmentAlignment();
404 // add padding to size of section
405 headerAndLoadCommandsSection
->size
+= paddingSize
;
409 uint64_t OutputFile::pageAlign(uint64_t addr
)
411 const uint64_t alignment
= _options
.segmentAlignment();
412 return ((addr
+alignment
-1) & (-alignment
));
415 uint64_t OutputFile::pageAlign(uint64_t addr
, uint64_t pageSize
)
417 return ((addr
+pageSize
-1) & (-pageSize
));
420 static const char* makeName(const ld::Atom
& atom
)
422 static char buffer
[4096];
423 switch ( atom
.symbolTableInclusion() ) {
424 case ld::Atom::symbolTableNotIn
:
425 case ld::Atom::symbolTableNotInFinalLinkedImages
:
426 sprintf(buffer
, "%s@0x%08llX", atom
.name(), atom
.objectAddress());
428 case ld::Atom::symbolTableIn
:
429 case ld::Atom::symbolTableInAndNeverStrip
:
430 case ld::Atom::symbolTableInAsAbsolute
:
431 case ld::Atom::symbolTableInWithRandomAutoStripLabel
:
432 strlcpy(buffer
, atom
.name(), 4096);
438 static const char* referenceTargetAtomName(ld::Internal
& state
, const ld::Fixup
* ref
)
440 switch ( ref
->binding
) {
441 case ld::Fixup::bindingNone
:
443 case ld::Fixup::bindingByNameUnbound
:
444 return (char*)(ref
->u
.target
);
445 case ld::Fixup::bindingByContentBound
:
446 case ld::Fixup::bindingDirectlyBound
:
447 return makeName(*((ld::Atom
*)(ref
->u
.target
)));
448 case ld::Fixup::bindingsIndirectlyBound
:
449 return makeName(*state
.indirectBindingTable
[ref
->u
.bindingIndex
]);
451 return "BAD BINDING";
454 bool OutputFile::targetIsThumb(ld::Internal
& state
, const ld::Fixup
* fixup
)
456 switch ( fixup
->binding
) {
457 case ld::Fixup::bindingByContentBound
:
458 case ld::Fixup::bindingDirectlyBound
:
459 return fixup
->u
.target
->isThumb();
460 case ld::Fixup::bindingsIndirectlyBound
:
461 return state
.indirectBindingTable
[fixup
->u
.bindingIndex
]->isThumb();
465 throw "unexpected binding";
468 uint64_t OutputFile::addressOf(const ld::Internal
& state
, const ld::Fixup
* fixup
, const ld::Atom
** target
)
470 if ( !_options
.makeCompressedDyldInfo() ) {
471 // For external relocations the classic mach-o format
472 // has addend only stored in the content. That means
473 // that the address of the target is not used.
474 if ( fixup
->contentAddendOnly
)
477 switch ( fixup
->binding
) {
478 case ld::Fixup::bindingNone
:
479 throw "unexpected bindingNone";
480 case ld::Fixup::bindingByNameUnbound
:
481 throw "unexpected bindingByNameUnbound";
482 case ld::Fixup::bindingByContentBound
:
483 case ld::Fixup::bindingDirectlyBound
:
484 *target
= fixup
->u
.target
;
485 return (*target
)->finalAddress();
486 case ld::Fixup::bindingsIndirectlyBound
:
487 *target
= state
.indirectBindingTable
[fixup
->u
.bindingIndex
];
489 if ( ! (*target
)->finalAddressMode() ) {
490 throwf("reference to symbol (which has not been assigned an address) %s", (*target
)->name());
493 return (*target
)->finalAddress();
495 throw "unexpected binding";
498 uint64_t OutputFile::sectionOffsetOf(const ld::Internal
& state
, const ld::Fixup
* fixup
)
500 const ld::Atom
* target
= NULL
;
501 switch ( fixup
->binding
) {
502 case ld::Fixup::bindingNone
:
503 throw "unexpected bindingNone";
504 case ld::Fixup::bindingByNameUnbound
:
505 throw "unexpected bindingByNameUnbound";
506 case ld::Fixup::bindingByContentBound
:
507 case ld::Fixup::bindingDirectlyBound
:
508 target
= fixup
->u
.target
;
510 case ld::Fixup::bindingsIndirectlyBound
:
511 target
= state
.indirectBindingTable
[fixup
->u
.bindingIndex
];
514 assert(target
!= NULL
);
516 uint64_t targetAddress
= target
->finalAddress();
517 for (std::vector
<ld::Internal::FinalSection
*>::const_iterator it
= state
.sections
.begin(); it
!= state
.sections
.end(); ++it
) {
518 const ld::Internal::FinalSection
* sect
= *it
;
519 if ( (sect
->address
<= targetAddress
) && (targetAddress
< (sect
->address
+sect
->size
)) )
520 return targetAddress
- sect
->address
;
522 throw "section not found for section offset";
527 uint64_t OutputFile::tlvTemplateOffsetOf(const ld::Internal
& state
, const ld::Fixup
* fixup
)
529 const ld::Atom
* target
= NULL
;
530 switch ( fixup
->binding
) {
531 case ld::Fixup::bindingNone
:
532 throw "unexpected bindingNone";
533 case ld::Fixup::bindingByNameUnbound
:
534 throw "unexpected bindingByNameUnbound";
535 case ld::Fixup::bindingByContentBound
:
536 case ld::Fixup::bindingDirectlyBound
:
537 target
= fixup
->u
.target
;
539 case ld::Fixup::bindingsIndirectlyBound
:
540 target
= state
.indirectBindingTable
[fixup
->u
.bindingIndex
];
543 assert(target
!= NULL
);
545 for (std::vector
<ld::Internal::FinalSection
*>::const_iterator it
= state
.sections
.begin(); it
!= state
.sections
.end(); ++it
) {
546 const ld::Internal::FinalSection
* sect
= *it
;
547 switch ( sect
->type() ) {
548 case ld::Section::typeTLVInitialValues
:
549 case ld::Section::typeTLVZeroFill
:
550 return target
->finalAddress() - sect
->address
;
555 throw "section not found for tlvTemplateOffsetOf";
558 void OutputFile::printSectionLayout(ld::Internal
& state
)
560 // show layout of final image
561 fprintf(stderr
, "final section layout:\n");
562 for (std::vector
<ld::Internal::FinalSection
*>::iterator it
= state
.sections
.begin(); it
!= state
.sections
.end(); ++it
) {
563 if ( (*it
)->isSectionHidden() )
565 fprintf(stderr
, " %s/%s addr=0x%08llX, size=0x%08llX, fileOffset=0x%08llX, type=%d\n",
566 (*it
)->segmentName(), (*it
)->sectionName(),
567 (*it
)->address
, (*it
)->size
, (*it
)->fileOffset
, (*it
)->type());
572 void OutputFile::rangeCheck8(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
574 if ( (displacement
> 127) || (displacement
< -128) ) {
575 // show layout of final image
576 printSectionLayout(state
);
578 const ld::Atom
* target
;
579 throwf("8-bit reference out of range (%lld max is +/-127B): from %s (0x%08llX) to %s (0x%08llX)",
580 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
581 addressOf(state
, fixup
, &target
));
585 void OutputFile::rangeCheck16(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
587 const int64_t thirtyTwoKLimit
= 0x00007FFF;
588 if ( (displacement
> thirtyTwoKLimit
) || (displacement
< (-thirtyTwoKLimit
)) ) {
589 // show layout of final image
590 printSectionLayout(state
);
592 const ld::Atom
* target
;
593 throwf("16-bit reference out of range (%lld max is +/-32KB): from %s (0x%08llX) to %s (0x%08llX)",
594 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
595 addressOf(state
, fixup
, &target
));
599 void OutputFile::rangeCheckBranch32(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
601 const int64_t twoGigLimit
= 0x7FFFFFFF;
602 if ( (displacement
> twoGigLimit
) || (displacement
< (-twoGigLimit
)) ) {
603 // show layout of final image
604 printSectionLayout(state
);
606 const ld::Atom
* target
;
607 throwf("32-bit branch out of range (%lld max is +/-2GB): from %s (0x%08llX) to %s (0x%08llX)",
608 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
609 addressOf(state
, fixup
, &target
));
614 void OutputFile::rangeCheckAbsolute32(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
616 const int64_t fourGigLimit
= 0xFFFFFFFF;
617 if ( displacement
> fourGigLimit
) {
618 // <rdar://problem/9610466> cannot enforce 32-bit range checks on 32-bit archs because assembler loses sign information
619 // .long _foo - 0xC0000000
620 // is encoded in mach-o the same as:
621 // .long _foo + 0x40000000
622 // so if _foo lays out to 0xC0000100, the first is ok, but the second is not.
623 if ( (_options
.architecture() == CPU_TYPE_ARM
) || (_options
.architecture() == CPU_TYPE_I386
) ) {
624 // Unlikely userland code does funky stuff like this, so warn for them, but not warn for -preload or -static
625 if ( (_options
.outputKind() != Options::kPreload
) && (_options
.outputKind() != Options::kStaticExecutable
) ) {
626 warning("32-bit absolute address out of range (0x%08llX max is 4GB): from %s + 0x%08X (0x%08llX) to 0x%08llX",
627 displacement
, atom
->name(), fixup
->offsetInAtom
, atom
->finalAddress(), displacement
);
631 // show layout of final image
632 printSectionLayout(state
);
634 const ld::Atom
* target
;
635 if ( fixup
->binding
== ld::Fixup::bindingNone
)
636 throwf("32-bit absolute address out of range (0x%08llX max is 4GB): from %s + 0x%08X (0x%08llX) to 0x%08llX",
637 displacement
, atom
->name(), fixup
->offsetInAtom
, atom
->finalAddress(), displacement
);
639 throwf("32-bit absolute address out of range (0x%08llX max is 4GB): from %s + 0x%08X (0x%08llX) to %s (0x%08llX)",
640 displacement
, atom
->name(), fixup
->offsetInAtom
, atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
641 addressOf(state
, fixup
, &target
));
646 void OutputFile::rangeCheckRIP32(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
648 const int64_t twoGigLimit
= 0x7FFFFFFF;
649 if ( (displacement
> twoGigLimit
) || (displacement
< (-twoGigLimit
)) ) {
650 // show layout of final image
651 printSectionLayout(state
);
653 const ld::Atom
* target
;
654 throwf("32-bit RIP relative reference out of range (%lld max is +/-4GB): from %s (0x%08llX) to %s (0x%08llX)",
655 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
656 addressOf(state
, fixup
, &target
));
660 void OutputFile::rangeCheckARM12(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
662 if ( (displacement
> 4092LL) || (displacement
< (-4092LL)) ) {
663 // show layout of final image
664 printSectionLayout(state
);
666 const ld::Atom
* target
;
667 throwf("ARM ldr 12-bit displacement out of range (%lld max is +/-4096B): from %s (0x%08llX) to %s (0x%08llX)",
668 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
669 addressOf(state
, fixup
, &target
));
673 bool OutputFile::checkArmBranch24Displacement(int64_t displacement
)
675 return ( (displacement
< 33554428LL) && (displacement
> (-33554432LL)) );
678 void OutputFile::rangeCheckARMBranch24(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
680 if ( checkArmBranch24Displacement(displacement
) )
683 // show layout of final image
684 printSectionLayout(state
);
686 const ld::Atom
* target
;
687 throwf("b/bl/blx ARM branch out of range (%lld max is +/-32MB): from %s (0x%08llX) to %s (0x%08llX)",
688 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
689 addressOf(state
, fixup
, &target
));
692 bool OutputFile::checkThumbBranch22Displacement(int64_t displacement
)
694 // thumb2 supports +/- 16MB displacement
695 if ( _options
.preferSubArchitecture() && _options
.archSupportsThumb2() ) {
696 if ( (displacement
> 16777214LL) || (displacement
< (-16777216LL)) ) {
701 // thumb1 supports +/- 4MB displacement
702 if ( (displacement
> 4194302LL) || (displacement
< (-4194304LL)) ) {
709 void OutputFile::rangeCheckThumbBranch22(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
711 if ( checkThumbBranch22Displacement(displacement
) )
714 // show layout of final image
715 printSectionLayout(state
);
717 const ld::Atom
* target
;
718 if ( _options
.preferSubArchitecture() && _options
.archSupportsThumb2() ) {
719 throwf("b/bl/blx thumb2 branch out of range (%lld max is +/-16MB): from %s (0x%08llX) to %s (0x%08llX)",
720 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
721 addressOf(state
, fixup
, &target
));
724 throwf("b/bl/blx thumb1 branch out of range (%lld max is +/-4MB): from %s (0x%08llX) to %s (0x%08llX)",
725 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
726 addressOf(state
, fixup
, &target
));
731 void OutputFile::rangeCheckARM64Branch26(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
733 const int64_t bl_128MegLimit
= 0x07FFFFFF;
734 if ( (displacement
> bl_128MegLimit
) || (displacement
< (-bl_128MegLimit
)) ) {
735 // show layout of final image
736 printSectionLayout(state
);
738 const ld::Atom
* target
;
739 throwf("b(l) ARM64 branch out of range (%lld max is +/-128MB): from %s (0x%08llX) to %s (0x%08llX)",
740 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
741 addressOf(state
, fixup
, &target
));
745 void OutputFile::rangeCheckARM64Page21(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
747 const int64_t adrp_4GigLimit
= 0x100000000ULL
;
748 if ( (displacement
> adrp_4GigLimit
) || (displacement
< (-adrp_4GigLimit
)) ) {
749 // show layout of final image
750 printSectionLayout(state
);
752 const ld::Atom
* target
;
753 throwf("ARM64 ADRP out of range (%lld max is +/-4GB): from %s (0x%08llX) to %s (0x%08llX)",
754 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
755 addressOf(state
, fixup
, &target
));
760 uint16_t OutputFile::get16LE(uint8_t* loc
) { return LittleEndian::get16(*(uint16_t*)loc
); }
761 void OutputFile::set16LE(uint8_t* loc
, uint16_t value
) { LittleEndian::set16(*(uint16_t*)loc
, value
); }
763 uint32_t OutputFile::get32LE(uint8_t* loc
) { return LittleEndian::get32(*(uint32_t*)loc
); }
764 void OutputFile::set32LE(uint8_t* loc
, uint32_t value
) { LittleEndian::set32(*(uint32_t*)loc
, value
); }
766 uint64_t OutputFile::get64LE(uint8_t* loc
) { return LittleEndian::get64(*(uint64_t*)loc
); }
767 void OutputFile::set64LE(uint8_t* loc
, uint64_t value
) { LittleEndian::set64(*(uint64_t*)loc
, value
); }
769 uint16_t OutputFile::get16BE(uint8_t* loc
) { return BigEndian::get16(*(uint16_t*)loc
); }
770 void OutputFile::set16BE(uint8_t* loc
, uint16_t value
) { BigEndian::set16(*(uint16_t*)loc
, value
); }
772 uint32_t OutputFile::get32BE(uint8_t* loc
) { return BigEndian::get32(*(uint32_t*)loc
); }
773 void OutputFile::set32BE(uint8_t* loc
, uint32_t value
) { BigEndian::set32(*(uint32_t*)loc
, value
); }
775 uint64_t OutputFile::get64BE(uint8_t* loc
) { return BigEndian::get64(*(uint64_t*)loc
); }
776 void OutputFile::set64BE(uint8_t* loc
, uint64_t value
) { BigEndian::set64(*(uint64_t*)loc
, value
); }
778 #if SUPPORT_ARCH_arm64
780 static uint32_t makeNOP() {
784 enum SignExtension
{ signedNot
, signed32
, signed64
};
785 struct LoadStoreInfo
{
788 uint32_t offset
; // after scaling
789 uint32_t size
; // 1,2,4,8, or 16
791 bool isFloat
; // if destReg is FP/SIMD
792 SignExtension signEx
; // if load is sign extended
795 static uint32_t makeLDR_literal(const LoadStoreInfo
& info
, uint64_t targetAddress
, uint64_t instructionAddress
)
797 int64_t delta
= targetAddress
- instructionAddress
;
798 assert(delta
< 1024*1024);
799 assert(delta
> -1024*1024);
800 assert((info
.reg
& 0xFFFFFFE0) == 0);
801 assert((targetAddress
& 0x3) == 0);
802 assert((instructionAddress
& 0x3) == 0);
803 assert(!info
.isStore
);
804 uint32_t imm19
= (delta
<< 3) & 0x00FFFFE0;
805 uint32_t instruction
= 0;
806 switch ( info
.size
) {
808 if ( info
.isFloat
) {
809 assert(info
.signEx
== signedNot
);
810 instruction
= 0x1C000000;
813 if ( info
.signEx
== signed64
)
814 instruction
= 0x98000000;
816 instruction
= 0x18000000;
820 assert(info
.signEx
== signedNot
);
821 instruction
= info
.isFloat
? 0x5C000000 : 0x58000000;
824 assert(info
.signEx
== signedNot
);
825 instruction
= 0x9C000000;
828 assert(0 && "invalid load size for literal");
830 return (instruction
| imm19
| info
.reg
);
833 static uint32_t makeADR(uint32_t destReg
, uint64_t targetAddress
, uint64_t instructionAddress
)
835 assert((destReg
& 0xFFFFFFE0) == 0);
836 assert((instructionAddress
& 0x3) == 0);
837 uint32_t instruction
= 0x10000000;
838 int64_t delta
= targetAddress
- instructionAddress
;
839 assert(delta
< 1024*1024);
840 assert(delta
> -1024*1024);
841 uint32_t immhi
= (delta
& 0x001FFFFC) << 3;
842 uint32_t immlo
= (delta
& 0x00000003) << 29;
843 return (instruction
| immhi
| immlo
| destReg
);
846 static uint32_t makeLoadOrStore(const LoadStoreInfo
& info
)
848 uint32_t instruction
= 0x39000000;
850 instruction
|= 0x04000000;
851 instruction
|= info
.reg
;
852 instruction
|= (info
.baseReg
<< 5);
853 uint32_t sizeBits
= 0;
854 uint32_t opcBits
= 0;
855 uint32_t imm12Bits
= 0;
856 switch ( info
.size
) {
859 imm12Bits
= info
.offset
;
860 if ( info
.isStore
) {
864 switch ( info
.signEx
) {
879 assert((info
.offset
% 2) == 0);
880 imm12Bits
= info
.offset
/2;
881 if ( info
.isStore
) {
885 switch ( info
.signEx
) {
900 assert((info
.offset
% 4) == 0);
901 imm12Bits
= info
.offset
/4;
902 if ( info
.isStore
) {
906 switch ( info
.signEx
) {
911 assert(0 && "cannot use signed32 with 32-bit load/store");
921 assert((info
.offset
% 8) == 0);
922 imm12Bits
= info
.offset
/8;
923 if ( info
.isStore
) {
928 assert(info
.signEx
== signedNot
);
933 assert((info
.offset
% 16) == 0);
934 imm12Bits
= info
.offset
/16;
935 assert(info
.isFloat
);
936 if ( info
.isStore
) {
944 assert(0 && "bad load/store size");
947 assert(imm12Bits
< 4096);
948 return (instruction
| (sizeBits
<< 30) | (opcBits
<< 22) | (imm12Bits
<< 10));
951 static bool parseLoadOrStore(uint32_t instruction
, LoadStoreInfo
& info
)
953 if ( (instruction
& 0x3B000000) != 0x39000000 )
955 info
.isFloat
= ( (instruction
& 0x04000000) != 0 );
956 info
.reg
= (instruction
& 0x1F);
957 info
.baseReg
= ((instruction
>>5) & 0x1F);
958 switch (instruction
& 0xC0C00000) {
962 info
.signEx
= signedNot
;
966 info
.isStore
= false;
967 info
.signEx
= signedNot
;
970 if ( info
.isFloat
) {
973 info
.signEx
= signedNot
;
977 info
.isStore
= false;
978 info
.signEx
= signed64
;
982 if ( info
.isFloat
) {
984 info
.isStore
= false;
985 info
.signEx
= signedNot
;
989 info
.isStore
= false;
990 info
.signEx
= signed32
;
996 info
.signEx
= signedNot
;
1000 info
.isStore
= false;
1001 info
.signEx
= signedNot
;
1005 info
.isStore
= false;
1006 info
.signEx
= signed64
;
1010 info
.isStore
= false;
1011 info
.signEx
= signed32
;
1015 info
.isStore
= true;
1016 info
.signEx
= signedNot
;
1020 info
.isStore
= false;
1021 info
.signEx
= signedNot
;
1025 info
.isStore
= false;
1026 info
.signEx
= signed64
;
1030 info
.isStore
= true;
1031 info
.signEx
= signedNot
;
1035 info
.isStore
= false;
1036 info
.signEx
= signedNot
;
1041 info
.offset
= ((instruction
>> 10) & 0x0FFF) * info
.size
;
1049 static bool parseADRP(uint32_t instruction
, AdrpInfo
& info
)
1051 if ( (instruction
& 0x9F000000) != 0x90000000 )
1053 info
.destReg
= (instruction
& 0x1F);
1063 static bool parseADD(uint32_t instruction
, AddInfo
& info
)
1065 if ( (instruction
& 0xFFC00000) != 0x91000000 )
1067 info
.destReg
= (instruction
& 0x1F);
1068 info
.srcReg
= ((instruction
>>5) & 0x1F);
1069 info
.addend
= ((instruction
>>10) & 0xFFF);
1076 static uint32_t makeLDR_scaledOffset(const LoadStoreInfo
& info
)
1078 assert((info
.reg
& 0xFFFFFFE0) == 0);
1079 assert((info
.baseReg
& 0xFFFFFFE0) == 0);
1080 assert(!info
.isFloat
|| (info
.signEx
!= signedNot
));
1081 uint32_t sizeBits
= 0;
1082 uint32_t opcBits
= 1;
1083 uint32_t vBit
= info
.isFloat
;
1084 switch ( info
.signEx
) {
1095 assert(0 && "bad SignExtension runtime value");
1097 switch ( info
.size
) {
1116 assert(0 && "invalid load size for literal");
1118 assert((info
.offset
% info
.size
) == 0);
1119 uint32_t scaledOffset
= info
.offset
/info
.size
;
1120 assert(scaledOffset
< 4096);
1121 return (0x39000000 | (sizeBits
<<30) | (vBit
<<26) | (opcBits
<<22) | (scaledOffset
<<10) | (info
.baseReg
<<5) | info
.reg
);
1124 static uint32_t makeLDR_literal(uint32_t destReg
, uint32_t loadSize
, bool isFloat
, uint64_t targetAddress
, uint64_t instructionAddress
)
1126 int64_t delta
= targetAddress
- instructionAddress
;
1127 assert(delta
< 1024*1024);
1128 assert(delta
> -1024*1024);
1129 assert((destReg
& 0xFFFFFFE0) == 0);
1130 assert((targetAddress
& 0x3) == 0);
1131 assert((instructionAddress
& 0x3) == 0);
1132 uint32_t imm19
= (delta
<< 3) & 0x00FFFFE0;
1133 uint32_t instruction
= 0;
1134 switch ( loadSize
) {
1136 instruction
= isFloat
? 0x1C000000 : 0x18000000;
1139 instruction
= isFloat
? 0x5C000000 : 0x58000000;
1142 instruction
= 0x9C000000;
1145 assert(0 && "invalid load size for literal");
1147 return (instruction
| imm19
| destReg
);
1151 static bool ldrInfo(uint32_t instruction
, uint8_t* size
, uint8_t* destReg
, bool* v
, uint32_t* scaledOffset
)
1153 *v
= ( (instruction
& 0x04000000) != 0 );
1154 *destReg
= (instruction
& 0x1F);
1155 uint32_t imm12
= ((instruction
>> 10) & 0x00000FFF);
1156 switch ( (instruction
& 0xC0000000) >> 30 ) {
1158 // vector and byte LDR have same "size" bits, need to check other bits to differenciate
1159 if ( (instruction
& 0x00800000) == 0 ) {
1161 *scaledOffset
= imm12
;
1165 *scaledOffset
= imm12
* 16;
1170 *scaledOffset
= imm12
* 2;
1174 *scaledOffset
= imm12
* 4;
1178 *scaledOffset
= imm12
* 8;
1181 return ((instruction
& 0x3B400000) == 0x39400000);
1185 static bool withinOneMeg(uint64_t addr1
, uint64_t addr2
) {
1186 int64_t delta
= (addr2
- addr1
);
1187 return ( (delta
< 1024*1024) && (delta
> -1024*1024) );
1189 #endif // SUPPORT_ARCH_arm64
1191 void OutputFile::setInfo(ld::Internal
& state
, const ld::Atom
* atom
, uint8_t* buffer
, const std::map
<uint32_t, const Fixup
*>& usedByHints
,
1192 uint32_t offsetInAtom
, uint32_t delta
, InstructionInfo
* info
)
1194 info
->offsetInAtom
= offsetInAtom
+ delta
;
1195 std::map
<uint32_t, const Fixup
*>::const_iterator pos
= usedByHints
.find(info
->offsetInAtom
);
1196 if ( (pos
!= usedByHints
.end()) && (pos
->second
!= NULL
) ) {
1197 info
->fixup
= pos
->second
;
1198 info
->targetAddress
= addressOf(state
, info
->fixup
, &info
->target
);
1199 if ( info
->fixup
->clusterSize
!= ld::Fixup::k1of1
) {
1200 assert(info
->fixup
->firstInCluster());
1201 const ld::Fixup
* nextFixup
= info
->fixup
+ 1;
1202 if ( nextFixup
->kind
== ld::Fixup::kindAddAddend
) {
1203 info
->targetAddress
+= nextFixup
->u
.addend
;
1206 assert(0 && "expected addend");
1212 info
->targetAddress
= 0;
1213 info
->target
= NULL
;
1215 info
->instructionContent
= &buffer
[info
->offsetInAtom
];
1216 info
->instructionAddress
= atom
->finalAddress() + info
->offsetInAtom
;
1217 info
->instruction
= get32LE(info
->instructionContent
);
1220 #if SUPPORT_ARCH_arm64
1221 static bool isPageKind(const ld::Fixup
* fixup
, bool mustBeGOT
=false)
1223 if ( fixup
== NULL
)
1226 switch ( fixup
->kind
) {
1227 case ld::Fixup::kindStoreTargetAddressARM64Page21
:
1229 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21
:
1230 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21
:
1231 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21
:
1232 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21
:
1234 case ld::Fixup::kindSetTargetAddress
:
1238 } while ( ! f
->lastInCluster() );
1240 case ld::Fixup::kindStoreARM64Page21
:
1242 case ld::Fixup::kindStoreARM64GOTLoadPage21
:
1243 case ld::Fixup::kindStoreARM64GOTLeaPage21
:
1244 case ld::Fixup::kindStoreARM64TLVPLoadPage21
:
1245 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21
:
1257 static bool isPageOffsetKind(const ld::Fixup
* fixup
, bool mustBeGOT
=false)
1259 if ( fixup
== NULL
)
1262 switch ( fixup
->kind
) {
1263 case ld::Fixup::kindStoreTargetAddressARM64PageOff12
:
1265 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12
:
1266 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12
:
1267 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12
:
1268 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12
:
1270 case ld::Fixup::kindSetTargetAddress
:
1274 } while ( ! f
->lastInCluster() );
1276 case ld::Fixup::kindStoreARM64PageOff12
:
1278 case ld::Fixup::kindStoreARM64GOTLoadPageOff12
:
1279 case ld::Fixup::kindStoreARM64GOTLeaPageOff12
:
1280 case ld::Fixup::kindStoreARM64TLVPLoadPageOff12
:
1281 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPageOff12
:
1292 #endif // SUPPORT_ARCH_arm64
1295 #define LOH_ASSERT(cond) \
1297 warning("ignoring linker optimzation hint at %s+0x%X because " #cond, atom->name(), fit->offsetInAtom); \
1301 void OutputFile::applyFixUps(ld::Internal
& state
, uint64_t mhAddress
, const ld::Atom
* atom
, uint8_t* buffer
)
1303 //fprintf(stderr, "applyFixUps() on %s\n", atom->name());
1304 int64_t accumulator
= 0;
1305 const ld::Atom
* toTarget
= NULL
;
1306 const ld::Atom
* fromTarget
;
1308 uint32_t instruction
;
1309 uint32_t newInstruction
;
1313 bool thumbTarget
= false;
1314 std::map
<uint32_t, const Fixup
*> usedByHints
;
1315 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
1316 uint8_t* fixUpLocation
= &buffer
[fit
->offsetInAtom
];
1317 ld::Fixup::LOH_arm64 lohExtra
;
1318 switch ( (ld::Fixup::Kind
)(fit
->kind
) ) {
1319 case ld::Fixup::kindNone
:
1320 case ld::Fixup::kindNoneFollowOn
:
1321 case ld::Fixup::kindNoneGroupSubordinate
:
1322 case ld::Fixup::kindNoneGroupSubordinateFDE
:
1323 case ld::Fixup::kindNoneGroupSubordinateLSDA
:
1324 case ld::Fixup::kindNoneGroupSubordinatePersonality
:
1326 case ld::Fixup::kindSetTargetAddress
:
1327 accumulator
= addressOf(state
, fit
, &toTarget
);
1328 thumbTarget
= targetIsThumb(state
, fit
);
1331 if ( fit
->contentAddendOnly
|| fit
->contentDetlaToAddendOnly
)
1334 case ld::Fixup::kindSubtractTargetAddress
:
1335 delta
= addressOf(state
, fit
, &fromTarget
);
1336 if ( ! fit
->contentAddendOnly
)
1337 accumulator
-= delta
;
1339 case ld::Fixup::kindAddAddend
:
1340 if ( ! fit
->contentIgnoresAddend
) {
1341 // <rdar://problem/8342028> ARM main executables main contain .long constants pointing
1342 // into themselves such as jump tables. These .long should not have thumb bit set
1343 // even though the target is a thumb instruction. We can tell it is an interior pointer
1344 // because we are processing an addend.
1345 if ( thumbTarget
&& (toTarget
== atom
) && ((int32_t)fit
->u
.addend
> 0) ) {
1346 accumulator
&= (-2);
1347 //warning("removing thumb bit from intra-atom pointer in %s %s+0x%0X",
1348 // atom->section().sectionName(), atom->name(), fit->offsetInAtom);
1350 accumulator
+= fit
->u
.addend
;
1353 case ld::Fixup::kindSubtractAddend
:
1354 accumulator
-= fit
->u
.addend
;
1356 case ld::Fixup::kindSetTargetImageOffset
:
1357 accumulator
= addressOf(state
, fit
, &toTarget
) - mhAddress
;
1358 thumbTarget
= targetIsThumb(state
, fit
);
1362 case ld::Fixup::kindSetTargetSectionOffset
:
1363 accumulator
= sectionOffsetOf(state
, fit
);
1365 case ld::Fixup::kindSetTargetTLVTemplateOffset
:
1366 accumulator
= tlvTemplateOffsetOf(state
, fit
);
1368 case ld::Fixup::kindStore8
:
1369 *fixUpLocation
+= accumulator
;
1371 case ld::Fixup::kindStoreLittleEndian16
:
1372 set16LE(fixUpLocation
, accumulator
);
1374 case ld::Fixup::kindStoreLittleEndianLow24of32
:
1375 set32LE(fixUpLocation
, (get32LE(fixUpLocation
) & 0xFF000000) | (accumulator
& 0x00FFFFFF) );
1377 case ld::Fixup::kindStoreLittleEndian32
:
1378 rangeCheckAbsolute32(accumulator
, state
, atom
, fit
);
1379 set32LE(fixUpLocation
, accumulator
);
1381 case ld::Fixup::kindStoreLittleEndian64
:
1382 set64LE(fixUpLocation
, accumulator
);
1384 case ld::Fixup::kindStoreBigEndian16
:
1385 set16BE(fixUpLocation
, accumulator
);
1387 case ld::Fixup::kindStoreBigEndianLow24of32
:
1388 set32BE(fixUpLocation
, (get32BE(fixUpLocation
) & 0xFF000000) | (accumulator
& 0x00FFFFFF) );
1390 case ld::Fixup::kindStoreBigEndian32
:
1391 rangeCheckAbsolute32(accumulator
, state
, atom
, fit
);
1392 set32BE(fixUpLocation
, accumulator
);
1394 case ld::Fixup::kindStoreBigEndian64
:
1395 set64BE(fixUpLocation
, accumulator
);
1397 case ld::Fixup::kindStoreX86PCRel8
:
1398 case ld::Fixup::kindStoreX86BranchPCRel8
:
1399 if ( fit
->contentAddendOnly
)
1400 delta
= accumulator
;
1402 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 1);
1403 rangeCheck8(delta
, state
, atom
, fit
);
1404 *fixUpLocation
= delta
;
1406 case ld::Fixup::kindStoreX86PCRel16
:
1407 if ( fit
->contentAddendOnly
)
1408 delta
= accumulator
;
1410 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 2);
1411 rangeCheck16(delta
, state
, atom
, fit
);
1412 set16LE(fixUpLocation
, delta
);
1414 case ld::Fixup::kindStoreX86BranchPCRel32
:
1415 if ( fit
->contentAddendOnly
)
1416 delta
= accumulator
;
1418 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1419 rangeCheckBranch32(delta
, state
, atom
, fit
);
1420 set32LE(fixUpLocation
, delta
);
1422 case ld::Fixup::kindStoreX86PCRel32GOTLoad
:
1423 case ld::Fixup::kindStoreX86PCRel32GOT
:
1424 case ld::Fixup::kindStoreX86PCRel32
:
1425 case ld::Fixup::kindStoreX86PCRel32TLVLoad
:
1426 if ( fit
->contentAddendOnly
)
1427 delta
= accumulator
;
1429 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1430 rangeCheckRIP32(delta
, state
, atom
, fit
);
1431 set32LE(fixUpLocation
, delta
);
1433 case ld::Fixup::kindStoreX86PCRel32_1
:
1434 if ( fit
->contentAddendOnly
)
1435 delta
= accumulator
- 1;
1437 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 5);
1438 rangeCheckRIP32(delta
, state
, atom
, fit
);
1439 set32LE(fixUpLocation
, delta
);
1441 case ld::Fixup::kindStoreX86PCRel32_2
:
1442 if ( fit
->contentAddendOnly
)
1443 delta
= accumulator
- 2;
1445 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 6);
1446 rangeCheckRIP32(delta
, state
, atom
, fit
);
1447 set32LE(fixUpLocation
, delta
);
1449 case ld::Fixup::kindStoreX86PCRel32_4
:
1450 if ( fit
->contentAddendOnly
)
1451 delta
= accumulator
- 4;
1453 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 8);
1454 rangeCheckRIP32(delta
, state
, atom
, fit
);
1455 set32LE(fixUpLocation
, delta
);
1457 case ld::Fixup::kindStoreX86Abs32TLVLoad
:
1458 set32LE(fixUpLocation
, accumulator
);
1460 case ld::Fixup::kindStoreX86Abs32TLVLoadNowLEA
:
1461 assert(_options
.outputKind() != Options::kObjectFile
);
1462 // TLV entry was optimized away, change movl instruction to a leal
1463 if ( fixUpLocation
[-1] != 0xA1 )
1464 throw "TLV load reloc does not point to a movl instruction";
1465 fixUpLocation
[-1] = 0xB8;
1466 set32LE(fixUpLocation
, accumulator
);
1468 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA
:
1469 assert(_options
.outputKind() != Options::kObjectFile
);
1470 // GOT entry was optimized away, change movq instruction to a leaq
1471 if ( fixUpLocation
[-2] != 0x8B )
1472 throw "GOT load reloc does not point to a movq instruction";
1473 fixUpLocation
[-2] = 0x8D;
1474 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1475 rangeCheckRIP32(delta
, state
, atom
, fit
);
1476 set32LE(fixUpLocation
, delta
);
1478 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA
:
1479 assert(_options
.outputKind() != Options::kObjectFile
);
1480 // TLV entry was optimized away, change movq instruction to a leaq
1481 if ( fixUpLocation
[-2] != 0x8B )
1482 throw "TLV load reloc does not point to a movq instruction";
1483 fixUpLocation
[-2] = 0x8D;
1484 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1485 rangeCheckRIP32(delta
, state
, atom
, fit
);
1486 set32LE(fixUpLocation
, delta
);
1488 case ld::Fixup::kindStoreTargetAddressARMLoad12
:
1489 accumulator
= addressOf(state
, fit
, &toTarget
);
1490 // fall into kindStoreARMLoad12 case
1491 case ld::Fixup::kindStoreARMLoad12
:
1492 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 8);
1493 rangeCheckARM12(delta
, state
, atom
, fit
);
1494 instruction
= get32LE(fixUpLocation
);
1496 newInstruction
= instruction
& 0xFFFFF000;
1497 newInstruction
|= ((uint32_t)delta
& 0xFFF);
1500 newInstruction
= instruction
& 0xFF7FF000;
1501 newInstruction
|= ((uint32_t)(-delta
) & 0xFFF);
1503 set32LE(fixUpLocation
, newInstruction
);
1505 case ld::Fixup::kindDtraceExtra
:
1507 case ld::Fixup::kindStoreX86DtraceCallSiteNop
:
1508 if ( _options
.outputKind() != Options::kObjectFile
) {
1509 // change call site to a NOP
1510 fixUpLocation
[-1] = 0x90; // 1-byte nop
1511 fixUpLocation
[0] = 0x0F; // 4-byte nop
1512 fixUpLocation
[1] = 0x1F;
1513 fixUpLocation
[2] = 0x40;
1514 fixUpLocation
[3] = 0x00;
1517 case ld::Fixup::kindStoreX86DtraceIsEnableSiteClear
:
1518 if ( _options
.outputKind() != Options::kObjectFile
) {
1519 // change call site to a clear eax
1520 fixUpLocation
[-1] = 0x33; // xorl eax,eax
1521 fixUpLocation
[0] = 0xC0;
1522 fixUpLocation
[1] = 0x90; // 1-byte nop
1523 fixUpLocation
[2] = 0x90; // 1-byte nop
1524 fixUpLocation
[3] = 0x90; // 1-byte nop
1527 case ld::Fixup::kindStoreARMDtraceCallSiteNop
:
1528 if ( _options
.outputKind() != Options::kObjectFile
) {
1529 // change call site to a NOP
1530 set32LE(fixUpLocation
, 0xE1A00000);
1533 case ld::Fixup::kindStoreARMDtraceIsEnableSiteClear
:
1534 if ( _options
.outputKind() != Options::kObjectFile
) {
1535 // change call site to 'eor r0, r0, r0'
1536 set32LE(fixUpLocation
, 0xE0200000);
1539 case ld::Fixup::kindStoreThumbDtraceCallSiteNop
:
1540 if ( _options
.outputKind() != Options::kObjectFile
) {
1541 // change 32-bit blx call site to two thumb NOPs
1542 set32LE(fixUpLocation
, 0x46C046C0);
1545 case ld::Fixup::kindStoreThumbDtraceIsEnableSiteClear
:
1546 if ( _options
.outputKind() != Options::kObjectFile
) {
1547 // change 32-bit blx call site to 'nop', 'eor r0, r0'
1548 set32LE(fixUpLocation
, 0x46C04040);
1551 case ld::Fixup::kindStoreARM64DtraceCallSiteNop
:
1552 if ( _options
.outputKind() != Options::kObjectFile
) {
1553 // change call site to a NOP
1554 set32LE(fixUpLocation
, 0xD503201F);
1557 case ld::Fixup::kindStoreARM64DtraceIsEnableSiteClear
:
1558 if ( _options
.outputKind() != Options::kObjectFile
) {
1559 // change call site to 'MOVZ X0,0'
1560 set32LE(fixUpLocation
, 0xD2800000);
1563 case ld::Fixup::kindLazyTarget
:
1564 case ld::Fixup::kindIslandTarget
:
1566 case ld::Fixup::kindSetLazyOffset
:
1567 assert(fit
->binding
== ld::Fixup::bindingDirectlyBound
);
1568 accumulator
= this->lazyBindingInfoOffsetForLazyPointerAddress(fit
->u
.target
->finalAddress());
1570 case ld::Fixup::kindDataInCodeStartData
:
1571 case ld::Fixup::kindDataInCodeStartJT8
:
1572 case ld::Fixup::kindDataInCodeStartJT16
:
1573 case ld::Fixup::kindDataInCodeStartJT32
:
1574 case ld::Fixup::kindDataInCodeStartJTA32
:
1575 case ld::Fixup::kindDataInCodeEnd
:
1577 case ld::Fixup::kindLinkerOptimizationHint
:
1578 // expand table of address/offsets used by hints
1579 lohExtra
.addend
= fit
->u
.addend
;
1580 usedByHints
[fit
->offsetInAtom
+ (lohExtra
.info
.delta1
<< 2)] = NULL
;
1581 if ( lohExtra
.info
.count
> 0 )
1582 usedByHints
[fit
->offsetInAtom
+ (lohExtra
.info
.delta2
<< 2)] = NULL
;
1583 if ( lohExtra
.info
.count
> 1 )
1584 usedByHints
[fit
->offsetInAtom
+ (lohExtra
.info
.delta3
<< 2)] = NULL
;
1585 if ( lohExtra
.info
.count
> 2 )
1586 usedByHints
[fit
->offsetInAtom
+ (lohExtra
.info
.delta4
<< 2)] = NULL
;
1588 case ld::Fixup::kindStoreTargetAddressLittleEndian32
:
1589 accumulator
= addressOf(state
, fit
, &toTarget
);
1590 thumbTarget
= targetIsThumb(state
, fit
);
1593 if ( fit
->contentAddendOnly
)
1595 rangeCheckAbsolute32(accumulator
, state
, atom
, fit
);
1596 set32LE(fixUpLocation
, accumulator
);
1598 case ld::Fixup::kindStoreTargetAddressLittleEndian64
:
1599 accumulator
= addressOf(state
, fit
, &toTarget
);
1600 if ( fit
->contentAddendOnly
)
1602 set64LE(fixUpLocation
, accumulator
);
1604 case ld::Fixup::kindStoreTargetAddressBigEndian32
:
1605 accumulator
= addressOf(state
, fit
, &toTarget
);
1606 if ( fit
->contentAddendOnly
)
1608 set32BE(fixUpLocation
, accumulator
);
1610 case ld::Fixup::kindStoreTargetAddressBigEndian64
:
1611 accumulator
= addressOf(state
, fit
, &toTarget
);
1612 if ( fit
->contentAddendOnly
)
1614 set64BE(fixUpLocation
, accumulator
);
1616 case ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian32
:
1617 accumulator
= tlvTemplateOffsetOf(state
, fit
);
1618 set32LE(fixUpLocation
, accumulator
);
1620 case ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian64
:
1621 accumulator
= tlvTemplateOffsetOf(state
, fit
);
1622 set64LE(fixUpLocation
, accumulator
);
1624 case ld::Fixup::kindStoreTargetAddressX86PCRel32
:
1625 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32
:
1626 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad
:
1627 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad
:
1628 accumulator
= addressOf(state
, fit
, &toTarget
);
1629 if ( fit
->contentDetlaToAddendOnly
)
1631 if ( fit
->contentAddendOnly
)
1634 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1635 rangeCheckRIP32(delta
, state
, atom
, fit
);
1636 set32LE(fixUpLocation
, delta
);
1638 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad
:
1639 set32LE(fixUpLocation
, accumulator
);
1641 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoadNowLEA
:
1642 // TLV entry was optimized away, change movl instruction to a leal
1643 if ( fixUpLocation
[-1] != 0xA1 )
1644 throw "TLV load reloc does not point to a movl <abs-address>,<reg> instruction";
1645 fixUpLocation
[-1] = 0xB8;
1646 accumulator
= addressOf(state
, fit
, &toTarget
);
1647 set32LE(fixUpLocation
, accumulator
);
1649 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA
:
1650 // GOT entry was optimized away, change movq instruction to a leaq
1651 if ( fixUpLocation
[-2] != 0x8B )
1652 throw "GOT load reloc does not point to a movq instruction";
1653 fixUpLocation
[-2] = 0x8D;
1654 accumulator
= addressOf(state
, fit
, &toTarget
);
1655 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1656 rangeCheckRIP32(delta
, state
, atom
, fit
);
1657 set32LE(fixUpLocation
, delta
);
1659 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA
:
1660 // TLV entry was optimized away, change movq instruction to a leaq
1661 if ( fixUpLocation
[-2] != 0x8B )
1662 throw "TLV load reloc does not point to a movq instruction";
1663 fixUpLocation
[-2] = 0x8D;
1664 accumulator
= addressOf(state
, fit
, &toTarget
);
1665 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1666 rangeCheckRIP32(delta
, state
, atom
, fit
);
1667 set32LE(fixUpLocation
, delta
);
1669 case ld::Fixup::kindStoreTargetAddressARMBranch24
:
1670 accumulator
= addressOf(state
, fit
, &toTarget
);
1671 thumbTarget
= targetIsThumb(state
, fit
);
1672 if ( toTarget
->contentType() == ld::Atom::typeBranchIsland
) {
1673 // Branching to island. If ultimate target is in range, branch there directly.
1674 for (ld::Fixup::iterator islandfit
= toTarget
->fixupsBegin(), end
=toTarget
->fixupsEnd(); islandfit
!= end
; ++islandfit
) {
1675 if ( islandfit
->kind
== ld::Fixup::kindIslandTarget
) {
1676 const ld::Atom
* islandTarget
= NULL
;
1677 uint64_t islandTargetAddress
= addressOf(state
, islandfit
, &islandTarget
);
1678 delta
= islandTargetAddress
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1679 if ( checkArmBranch24Displacement(delta
) ) {
1680 toTarget
= islandTarget
;
1681 accumulator
= islandTargetAddress
;
1682 thumbTarget
= targetIsThumb(state
, islandfit
);
1690 if ( fit
->contentDetlaToAddendOnly
)
1692 // fall into kindStoreARMBranch24 case
1693 case ld::Fixup::kindStoreARMBranch24
:
1694 // The pc added will be +8 from the pc
1695 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 8);
1696 rangeCheckARMBranch24(delta
, state
, atom
, fit
);
1697 instruction
= get32LE(fixUpLocation
);
1698 // Make sure we are calling arm with bl, thumb with blx
1699 is_bl
= ((instruction
& 0xFF000000) == 0xEB000000);
1700 is_blx
= ((instruction
& 0xFE000000) == 0xFA000000);
1701 is_b
= !is_blx
&& ((instruction
& 0x0F000000) == 0x0A000000);
1702 if ( (is_bl
| is_blx
) && thumbTarget
) {
1703 uint32_t opcode
= 0xFA000000; // force to be blx
1704 uint32_t disp
= (uint32_t)(delta
>> 2) & 0x00FFFFFF;
1705 uint32_t h_bit
= (uint32_t)(delta
<< 23) & 0x01000000;
1706 newInstruction
= opcode
| h_bit
| disp
;
1708 else if ( (is_bl
| is_blx
) && !thumbTarget
) {
1709 uint32_t opcode
= 0xEB000000; // force to be bl
1710 uint32_t disp
= (uint32_t)(delta
>> 2) & 0x00FFFFFF;
1711 newInstruction
= opcode
| disp
;
1713 else if ( is_b
&& thumbTarget
) {
1714 if ( fit
->contentDetlaToAddendOnly
)
1715 newInstruction
= (instruction
& 0xFF000000) | ((uint32_t)(delta
>> 2) & 0x00FFFFFF);
1717 throwf("no pc-rel bx arm instruction. Can't fix up branch to %s in %s",
1718 referenceTargetAtomName(state
, fit
), atom
->name());
1720 else if ( !is_bl
&& !is_blx
&& thumbTarget
) {
1721 throwf("don't know how to convert instruction %x referencing %s to thumb",
1722 instruction
, referenceTargetAtomName(state
, fit
));
1725 newInstruction
= (instruction
& 0xFF000000) | ((uint32_t)(delta
>> 2) & 0x00FFFFFF);
1727 set32LE(fixUpLocation
, newInstruction
);
1729 case ld::Fixup::kindStoreTargetAddressThumbBranch22
:
1730 accumulator
= addressOf(state
, fit
, &toTarget
);
1731 thumbTarget
= targetIsThumb(state
, fit
);
1732 if ( toTarget
->contentType() == ld::Atom::typeBranchIsland
) {
1733 // branching to island, so see if ultimate target is in range
1734 // and if so branch to ultimate target instead.
1735 for (ld::Fixup::iterator islandfit
= toTarget
->fixupsBegin(), end
=toTarget
->fixupsEnd(); islandfit
!= end
; ++islandfit
) {
1736 if ( islandfit
->kind
== ld::Fixup::kindIslandTarget
) {
1737 const ld::Atom
* islandTarget
= NULL
;
1738 uint64_t islandTargetAddress
= addressOf(state
, islandfit
, &islandTarget
);
1739 if ( !fit
->contentDetlaToAddendOnly
) {
1740 if ( targetIsThumb(state
, islandfit
) ) {
1741 // Thumb to thumb branch, we will be generating a bl instruction.
1742 // Delta is always even, so mask out thumb bit in target.
1743 islandTargetAddress
&= -2ULL;
1746 // Target is not thumb, we will be generating a blx instruction
1747 // Since blx cannot have the low bit set, set bit[1] of the target to
1748 // bit[1] of the base address, so that the difference is a multiple of
1750 islandTargetAddress
&= -3ULL;
1751 islandTargetAddress
|= ((atom
->finalAddress() + fit
->offsetInAtom
) & 2LL);
1754 delta
= islandTargetAddress
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1755 if ( checkThumbBranch22Displacement(delta
) ) {
1756 toTarget
= islandTarget
;
1757 accumulator
= islandTargetAddress
;
1758 thumbTarget
= targetIsThumb(state
, islandfit
);
1766 if ( fit
->contentDetlaToAddendOnly
)
1768 // fall into kindStoreThumbBranch22 case
1769 case ld::Fixup::kindStoreThumbBranch22
:
1770 instruction
= get32LE(fixUpLocation
);
1771 is_bl
= ((instruction
& 0xD000F800) == 0xD000F000);
1772 is_blx
= ((instruction
& 0xD000F800) == 0xC000F000);
1773 is_b
= ((instruction
& 0xD000F800) == 0x9000F000);
1774 if ( !fit
->contentDetlaToAddendOnly
) {
1775 if ( thumbTarget
) {
1776 // Thumb to thumb branch, we will be generating a bl instruction.
1777 // Delta is always even, so mask out thumb bit in target.
1778 accumulator
&= -2ULL;
1781 // Target is not thumb, we will be generating a blx instruction
1782 // Since blx cannot have the low bit set, set bit[1] of the target to
1783 // bit[1] of the base address, so that the difference is a multiple of
1785 accumulator
&= -3ULL;
1786 accumulator
|= ((atom
->finalAddress() + fit
->offsetInAtom
) & 2LL);
1789 // The pc added will be +4 from the pc
1790 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1791 // <rdar://problem/16652542> support bl in very large .o files
1792 if ( fit
->contentDetlaToAddendOnly
) {
1793 while ( delta
< (-16777216LL) )
1796 rangeCheckThumbBranch22(delta
, state
, atom
, fit
);
1797 if ( _options
.preferSubArchitecture() && _options
.archSupportsThumb2() ) {
1798 // The instruction is really two instructions:
1799 // The lower 16 bits are the first instruction, which contains the high
1800 // 11 bits of the displacement.
1801 // The upper 16 bits are the second instruction, which contains the low
1802 // 11 bits of the displacement, as well as differentiating bl and blx.
1803 uint32_t s
= (uint32_t)(delta
>> 24) & 0x1;
1804 uint32_t i1
= (uint32_t)(delta
>> 23) & 0x1;
1805 uint32_t i2
= (uint32_t)(delta
>> 22) & 0x1;
1806 uint32_t imm10
= (uint32_t)(delta
>> 12) & 0x3FF;
1807 uint32_t imm11
= (uint32_t)(delta
>> 1) & 0x7FF;
1808 uint32_t j1
= (i1
== s
);
1809 uint32_t j2
= (i2
== s
);
1812 instruction
= 0xD000F000; // keep bl
1814 instruction
= 0xC000F000; // change to blx
1816 else if ( is_blx
) {
1818 instruction
= 0xD000F000; // change to bl
1820 instruction
= 0xC000F000; // keep blx
1823 instruction
= 0x9000F000; // keep b
1824 if ( !thumbTarget
&& !fit
->contentDetlaToAddendOnly
) {
1825 throwf("armv7 has no pc-rel bx thumb instruction. Can't fix up branch to %s in %s",
1826 referenceTargetAtomName(state
, fit
), atom
->name());
1831 throwf("don't know how to convert branch instruction %x referencing %s to bx",
1832 instruction
, referenceTargetAtomName(state
, fit
));
1833 instruction
= 0x9000F000; // keep b
1835 uint32_t nextDisp
= (j1
<< 13) | (j2
<< 11) | imm11
;
1836 uint32_t firstDisp
= (s
<< 10) | imm10
;
1837 newInstruction
= instruction
| (nextDisp
<< 16) | firstDisp
;
1838 //warning("s=%d, j1=%d, j2=%d, imm10=0x%0X, imm11=0x%0X, instruction=0x%08X, first=0x%04X, next=0x%04X, new=0x%08X, disp=0x%llX for %s to %s\n",
1839 // s, j1, j2, imm10, imm11, instruction, firstDisp, nextDisp, newInstruction, delta, atom->name(), toTarget->name());
1840 set32LE(fixUpLocation
, newInstruction
);
1843 // The instruction is really two instructions:
1844 // The lower 16 bits are the first instruction, which contains the high
1845 // 11 bits of the displacement.
1846 // The upper 16 bits are the second instruction, which contains the low
1847 // 11 bits of the displacement, as well as differentiating bl and blx.
1848 uint32_t firstDisp
= (uint32_t)(delta
>> 12) & 0x7FF;
1849 uint32_t nextDisp
= (uint32_t)(delta
>> 1) & 0x7FF;
1850 if ( is_bl
&& !thumbTarget
) {
1851 instruction
= 0xE800F000;
1853 else if ( is_blx
&& thumbTarget
) {
1854 instruction
= 0xF800F000;
1857 instruction
= 0x9000F000; // keep b
1858 if ( !thumbTarget
&& !fit
->contentDetlaToAddendOnly
) {
1859 throwf("armv6 has no pc-rel bx thumb instruction. Can't fix up branch to %s in %s",
1860 referenceTargetAtomName(state
, fit
), atom
->name());
1864 instruction
= instruction
& 0xF800F800;
1866 newInstruction
= instruction
| (nextDisp
<< 16) | firstDisp
;
1867 set32LE(fixUpLocation
, newInstruction
);
1870 case ld::Fixup::kindStoreARMLow16
:
1872 uint32_t imm4
= (accumulator
& 0x0000F000) >> 12;
1873 uint32_t imm12
= accumulator
& 0x00000FFF;
1874 instruction
= get32LE(fixUpLocation
);
1875 newInstruction
= (instruction
& 0xFFF0F000) | (imm4
<< 16) | imm12
;
1876 set32LE(fixUpLocation
, newInstruction
);
1879 case ld::Fixup::kindStoreARMHigh16
:
1881 uint32_t imm4
= (accumulator
& 0xF0000000) >> 28;
1882 uint32_t imm12
= (accumulator
& 0x0FFF0000) >> 16;
1883 instruction
= get32LE(fixUpLocation
);
1884 newInstruction
= (instruction
& 0xFFF0F000) | (imm4
<< 16) | imm12
;
1885 set32LE(fixUpLocation
, newInstruction
);
1888 case ld::Fixup::kindStoreThumbLow16
:
1890 uint32_t imm4
= (accumulator
& 0x0000F000) >> 12;
1891 uint32_t i
= (accumulator
& 0x00000800) >> 11;
1892 uint32_t imm3
= (accumulator
& 0x00000700) >> 8;
1893 uint32_t imm8
= accumulator
& 0x000000FF;
1894 instruction
= get32LE(fixUpLocation
);
1895 newInstruction
= (instruction
& 0x8F00FBF0) | imm4
| (i
<< 10) | (imm3
<< 28) | (imm8
<< 16);
1896 set32LE(fixUpLocation
, newInstruction
);
1899 case ld::Fixup::kindStoreThumbHigh16
:
1901 uint32_t imm4
= (accumulator
& 0xF0000000) >> 28;
1902 uint32_t i
= (accumulator
& 0x08000000) >> 27;
1903 uint32_t imm3
= (accumulator
& 0x07000000) >> 24;
1904 uint32_t imm8
= (accumulator
& 0x00FF0000) >> 16;
1905 instruction
= get32LE(fixUpLocation
);
1906 newInstruction
= (instruction
& 0x8F00FBF0) | imm4
| (i
<< 10) | (imm3
<< 28) | (imm8
<< 16);
1907 set32LE(fixUpLocation
, newInstruction
);
1910 #if SUPPORT_ARCH_arm64
1911 case ld::Fixup::kindStoreTargetAddressARM64Branch26
:
1912 accumulator
= addressOf(state
, fit
, &toTarget
);
1913 // fall into kindStoreARM64Branch26 case
1914 case ld::Fixup::kindStoreARM64Branch26
:
1915 if ( fit
->contentAddendOnly
)
1916 delta
= accumulator
;
1918 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
);
1919 rangeCheckARM64Branch26(delta
, state
, atom
, fit
);
1920 instruction
= get32LE(fixUpLocation
);
1921 newInstruction
= (instruction
& 0xFC000000) | ((uint32_t)(delta
>> 2) & 0x03FFFFFF);
1922 set32LE(fixUpLocation
, newInstruction
);
1924 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21
:
1925 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21
:
1926 case ld::Fixup::kindStoreTargetAddressARM64Page21
:
1927 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21
:
1928 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21
:
1929 accumulator
= addressOf(state
, fit
, &toTarget
);
1930 // fall into kindStoreARM64Branch26 case
1931 case ld::Fixup::kindStoreARM64GOTLeaPage21
:
1932 case ld::Fixup::kindStoreARM64GOTLoadPage21
:
1933 case ld::Fixup::kindStoreARM64TLVPLoadPage21
:
1934 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21
:
1935 case ld::Fixup::kindStoreARM64Page21
:
1937 // the ADRP instruction adds the imm << 12 to the page that the pc is on
1938 if ( fit
->contentAddendOnly
)
1941 delta
= (accumulator
& (-4096)) - ((atom
->finalAddress() + fit
->offsetInAtom
) & (-4096));
1942 rangeCheckARM64Page21(delta
, state
, atom
, fit
);
1943 instruction
= get32LE(fixUpLocation
);
1944 uint32_t immhi
= (delta
>> 9) & (0x00FFFFE0);
1945 uint32_t immlo
= (delta
<< 17) & (0x60000000);
1946 newInstruction
= (instruction
& 0x9F00001F) | immlo
| immhi
;
1947 set32LE(fixUpLocation
, newInstruction
);
1950 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12
:
1951 case ld::Fixup::kindStoreTargetAddressARM64PageOff12
:
1952 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12
:
1953 accumulator
= addressOf(state
, fit
, &toTarget
);
1954 // fall into kindAddressARM64PageOff12 case
1955 case ld::Fixup::kindStoreARM64TLVPLoadPageOff12
:
1956 case ld::Fixup::kindStoreARM64GOTLoadPageOff12
:
1957 case ld::Fixup::kindStoreARM64PageOff12
:
1959 uint32_t offset
= accumulator
& 0x00000FFF;
1960 instruction
= get32LE(fixUpLocation
);
1961 // LDR/STR instruction have implicit scale factor, need to compensate for that
1962 if ( instruction
& 0x08000000 ) {
1963 uint32_t implictShift
= ((instruction
>> 30) & 0x3);
1964 switch ( implictShift
) {
1966 if ( (instruction
& 0x04800000) == 0x04800000 ) {
1967 // vector and byte LDR/STR have same "size" bits, need to check other bits to differenciate
1969 if ( (offset
& 0xF) != 0 ) {
1970 throwf("128-bit LDR/STR not 16-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
1971 atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fit
),
1972 addressOf(state
, fit
, &toTarget
));
1977 if ( (offset
& 0x1) != 0 ) {
1978 throwf("16-bit LDR/STR not 2-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
1979 atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fit
),
1980 addressOf(state
, fit
, &toTarget
));
1984 if ( (offset
& 0x3) != 0 ) {
1985 throwf("32-bit LDR/STR not 4-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
1986 atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fit
),
1987 addressOf(state
, fit
, &toTarget
));
1991 if ( (offset
& 0x7) != 0 ) {
1992 throwf("64-bit LDR/STR not 8-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
1993 atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fit
),
1994 addressOf(state
, fit
, &toTarget
));
1998 // compensate for implicit scale
1999 offset
>>= implictShift
;
2001 if ( fit
->contentAddendOnly
)
2003 uint32_t imm12
= offset
<< 10;
2004 newInstruction
= (instruction
& 0xFFC003FF) | imm12
;
2005 set32LE(fixUpLocation
, newInstruction
);
2008 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12
:
2009 accumulator
= addressOf(state
, fit
, &toTarget
);
2010 // fall into kindStoreARM64GOTLoadPage21 case
2011 case ld::Fixup::kindStoreARM64GOTLeaPageOff12
:
2013 // GOT entry was optimized away, change LDR instruction to a ADD
2014 instruction
= get32LE(fixUpLocation
);
2015 if ( (instruction
& 0xFFC00000) != 0xF9400000 )
2016 throwf("GOT load reloc does not point to a LDR instruction in %s", atom
->name());
2017 uint32_t offset
= accumulator
& 0x00000FFF;
2018 uint32_t imm12
= offset
<< 10;
2019 newInstruction
= 0x91000000 | imm12
| (instruction
& 0x000003FF);
2020 set32LE(fixUpLocation
, newInstruction
);
2023 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12
:
2024 accumulator
= addressOf(state
, fit
, &toTarget
);
2025 // fall into kindStoreARM64TLVPLeaPageOff12 case
2026 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPageOff12
:
2028 // TLV thunk in same linkage unit, so LEA it directly, changing LDR instruction to a ADD
2029 instruction
= get32LE(fixUpLocation
);
2030 if ( (instruction
& 0xFFC00000) != 0xF9400000 )
2031 throwf("TLV load reloc does not point to a LDR instruction in %s", atom
->name());
2032 uint32_t offset
= accumulator
& 0x00000FFF;
2033 uint32_t imm12
= offset
<< 10;
2034 newInstruction
= 0x91000000 | imm12
| (instruction
& 0x000003FF);
2035 set32LE(fixUpLocation
, newInstruction
);
2038 case ld::Fixup::kindStoreARM64PointerToGOT
:
2039 set64LE(fixUpLocation
, accumulator
);
2041 case ld::Fixup::kindStoreARM64PCRelToGOT
:
2042 if ( fit
->contentAddendOnly
)
2043 delta
= accumulator
;
2045 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
);
2046 set32LE(fixUpLocation
, delta
);
2052 #if SUPPORT_ARCH_arm64
2053 // after all fixups are done on atom, if there are potential optimizations, do those
2054 if ( (usedByHints
.size() != 0) && (_options
.outputKind() != Options::kObjectFile
) && !_options
.ignoreOptimizationHints() ) {
2055 // fill in second part of usedByHints map, so we can see the target of fixups that might be optimized
2056 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
2057 switch ( fit
->kind
) {
2058 case ld::Fixup::kindLinkerOptimizationHint
:
2059 case ld::Fixup::kindNoneFollowOn
:
2060 case ld::Fixup::kindNoneGroupSubordinate
:
2061 case ld::Fixup::kindNoneGroupSubordinateFDE
:
2062 case ld::Fixup::kindNoneGroupSubordinateLSDA
:
2063 case ld::Fixup::kindNoneGroupSubordinatePersonality
:
2066 if ( fit
->firstInCluster() ) {
2067 std::map
<uint32_t, const Fixup
*>::iterator pos
= usedByHints
.find(fit
->offsetInAtom
);
2068 if ( pos
!= usedByHints
.end() ) {
2069 assert(pos
->second
== NULL
&& "two fixups in same hint location");
2071 //fprintf(stderr, "setting %s usedByHints[0x%04X], kind = %d\n", atom->name(), fit->offsetInAtom, fit->kind);
2077 // apply hints pass 1
2078 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
2079 if ( fit
->kind
!= ld::Fixup::kindLinkerOptimizationHint
)
2081 InstructionInfo infoA
;
2082 InstructionInfo infoB
;
2083 InstructionInfo infoC
;
2084 InstructionInfo infoD
;
2085 LoadStoreInfo ldrInfoB
, ldrInfoC
;
2089 bool targetFourByteAligned
;
2090 bool literalableSize
, isADRP
, isADD
, isLDR
, isSTR
;
2091 //uint8_t loadSize, destReg;
2092 //uint32_t scaledOffset;
2094 ld::Fixup::LOH_arm64 alt
;
2095 alt
.addend
= fit
->u
.addend
;
2096 setInfo(state
, atom
, buffer
, usedByHints
, fit
->offsetInAtom
, (alt
.info
.delta1
<< 2), &infoA
);
2097 if ( alt
.info
.count
> 0 )
2098 setInfo(state
, atom
, buffer
, usedByHints
, fit
->offsetInAtom
, (alt
.info
.delta2
<< 2), &infoB
);
2099 if ( alt
.info
.count
> 1 )
2100 setInfo(state
, atom
, buffer
, usedByHints
, fit
->offsetInAtom
, (alt
.info
.delta3
<< 2), &infoC
);
2101 if ( alt
.info
.count
> 2 )
2102 setInfo(state
, atom
, buffer
, usedByHints
, fit
->offsetInAtom
, (alt
.info
.delta4
<< 2), &infoD
);
2104 if ( _options
.sharedRegionEligible() ) {
2105 if ( _options
.sharedRegionEncodingV2() ) {
2106 // In v2 format, all references might be move at dyld shared cache creation time
2107 usableSegment
= false;
2110 // In v1 format, only references to something in __TEXT segment could be optimized
2111 usableSegment
= (strcmp(atom
->section().segmentName(), infoB
.target
->section().segmentName()) == 0);
2115 // main executables can optimize any reference
2116 usableSegment
= true;
2119 switch ( alt
.info
.kind
) {
2120 case LOH_ARM64_ADRP_ADRP
:
2121 // processed in pass 2 because some ADRP may have been removed
2123 case LOH_ARM64_ADRP_LDR
:
2124 LOH_ASSERT(alt
.info
.count
== 1);
2125 LOH_ASSERT(isPageKind(infoA
.fixup
));
2126 LOH_ASSERT(isPageOffsetKind(infoB
.fixup
));
2127 LOH_ASSERT(infoA
.target
== infoB
.target
);
2128 LOH_ASSERT(infoA
.targetAddress
== infoB
.targetAddress
);
2129 isADRP
= parseADRP(infoA
.instruction
, adrpInfoA
);
2131 isLDR
= parseLoadOrStore(infoB
.instruction
, ldrInfoB
);
2132 // silently ignore LDRs transformed to ADD by TLV pass
2133 if ( !isLDR
&& infoB
.fixup
->kind
== ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12
)
2136 LOH_ASSERT(ldrInfoB
.baseReg
== adrpInfoA
.destReg
);
2137 LOH_ASSERT(ldrInfoB
.offset
== (infoA
.targetAddress
& 0x00000FFF));
2138 literalableSize
= ( (ldrInfoB
.size
!= 1) && (ldrInfoB
.size
!= 2) );
2139 targetFourByteAligned
= ( (infoA
.targetAddress
& 0x3) == 0 );
2140 if ( literalableSize
&& usableSegment
&& targetFourByteAligned
&& withinOneMeg(infoB
.instructionAddress
, infoA
.targetAddress
) ) {
2141 set32LE(infoA
.instructionContent
, makeNOP());
2142 set32LE(infoB
.instructionContent
, makeLDR_literal(ldrInfoB
, infoA
.targetAddress
, infoB
.instructionAddress
));
2143 if ( _options
.verboseOptimizationHints() )
2144 fprintf(stderr
, "adrp-ldr at 0x%08llX transformed to LDR literal, usableSegment=%d usableSegment\n", infoB
.instructionAddress
, usableSegment
);
2147 if ( _options
.verboseOptimizationHints() )
2148 fprintf(stderr
, "adrp-ldr at 0x%08llX not transformed, isLDR=%d, literalableSize=%d, inRange=%d, usableSegment=%d, scaledOffset=%d\n",
2149 infoB
.instructionAddress
, isLDR
, literalableSize
, withinOneMeg(infoB
.instructionAddress
, infoA
.targetAddress
), usableSegment
, ldrInfoB
.offset
);
2152 case LOH_ARM64_ADRP_ADD_LDR
:
2153 LOH_ASSERT(alt
.info
.count
== 2);
2154 LOH_ASSERT(isPageKind(infoA
.fixup
));
2155 LOH_ASSERT(isPageOffsetKind(infoB
.fixup
));
2156 LOH_ASSERT(infoC
.fixup
== NULL
);
2157 LOH_ASSERT(infoA
.target
== infoB
.target
);
2158 LOH_ASSERT(infoA
.targetAddress
== infoB
.targetAddress
);
2159 isADRP
= parseADRP(infoA
.instruction
, adrpInfoA
);
2161 isADD
= parseADD(infoB
.instruction
, addInfoB
);
2163 LOH_ASSERT(adrpInfoA
.destReg
== addInfoB
.srcReg
);
2164 isLDR
= parseLoadOrStore(infoC
.instruction
, ldrInfoC
);
2166 LOH_ASSERT(addInfoB
.destReg
== ldrInfoC
.baseReg
);
2167 targetFourByteAligned
= ( ((infoB
.targetAddress
+ldrInfoC
.offset
) & 0x3) == 0 );
2168 literalableSize
= ( (ldrInfoC
.size
!= 1) && (ldrInfoC
.size
!= 2) );
2169 if ( literalableSize
&& usableSegment
&& targetFourByteAligned
&& withinOneMeg(infoC
.instructionAddress
, infoA
.targetAddress
+ldrInfoC
.offset
) ) {
2170 // can do T1 transformation to LDR literal
2171 set32LE(infoA
.instructionContent
, makeNOP());
2172 set32LE(infoB
.instructionContent
, makeNOP());
2173 set32LE(infoC
.instructionContent
, makeLDR_literal(ldrInfoC
, infoA
.targetAddress
+ldrInfoC
.offset
, infoC
.instructionAddress
));
2174 if ( _options
.verboseOptimizationHints() ) {
2175 fprintf(stderr
, "adrp-add-ldr at 0x%08llX T1 transformed to LDR literal\n", infoC
.instructionAddress
);
2178 else if ( usableSegment
&& withinOneMeg(infoA
.instructionAddress
, infoA
.targetAddress
+ldrInfoC
.offset
) ) {
2179 // can to T4 transformation and turn ADRP/ADD into ADR
2180 set32LE(infoA
.instructionContent
, makeADR(ldrInfoC
.baseReg
, infoA
.targetAddress
+ldrInfoC
.offset
, infoA
.instructionAddress
));
2181 set32LE(infoB
.instructionContent
, makeNOP());
2182 ldrInfoC
.offset
= 0; // offset is now in ADR instead of ADD or LDR
2183 set32LE(infoC
.instructionContent
, makeLoadOrStore(ldrInfoC
));
2184 set32LE(infoC
.instructionContent
, infoC
.instruction
& 0xFFC003FF);
2185 if ( _options
.verboseOptimizationHints() )
2186 fprintf(stderr
, "adrp-add-ldr at 0x%08llX T4 transformed to ADR/LDR\n", infoB
.instructionAddress
);
2188 else if ( ((infoB
.targetAddress
% ldrInfoC
.size
) == 0) && (ldrInfoC
.offset
== 0) ) {
2189 // can do T2 transformation by merging ADD into LD
2191 set32LE(infoB
.instructionContent
, makeNOP());
2192 ldrInfoC
.offset
+= addInfoB
.addend
;
2193 set32LE(infoC
.instructionContent
, makeLoadOrStore(ldrInfoC
));
2194 if ( _options
.verboseOptimizationHints() )
2195 fprintf(stderr
, "adrp-add-ldr at 0x%08llX T2 transformed to ADRP/LDR \n", infoC
.instructionAddress
);
2198 if ( _options
.verboseOptimizationHints() )
2199 fprintf(stderr
, "adrp-add-ldr at 0x%08llX could not be transformed, loadSize=%d, literalableSize=%d, inRange=%d, usableSegment=%d, targetFourByteAligned=%d, imm12=%d\n",
2200 infoC
.instructionAddress
, ldrInfoC
.size
, literalableSize
, withinOneMeg(infoC
.instructionAddress
, infoA
.targetAddress
+ldrInfoC
.offset
), usableSegment
, targetFourByteAligned
, ldrInfoC
.offset
);
2203 case LOH_ARM64_ADRP_ADD
:
2204 LOH_ASSERT(alt
.info
.count
== 1);
2205 LOH_ASSERT(isPageKind(infoA
.fixup
));
2206 LOH_ASSERT(isPageOffsetKind(infoB
.fixup
));
2207 LOH_ASSERT(infoA
.target
== infoB
.target
);
2208 LOH_ASSERT(infoA
.targetAddress
== infoB
.targetAddress
);
2209 isADRP
= parseADRP(infoA
.instruction
, adrpInfoA
);
2211 isADD
= parseADD(infoB
.instruction
, addInfoB
);
2213 LOH_ASSERT(adrpInfoA
.destReg
== addInfoB
.srcReg
);
2214 if ( usableSegment
&& withinOneMeg(infoA
.targetAddress
, infoA
.instructionAddress
) ) {
2215 // can do T4 transformation and use ADR
2216 set32LE(infoA
.instructionContent
, makeADR(addInfoB
.destReg
, infoA
.targetAddress
, infoA
.instructionAddress
));
2217 set32LE(infoB
.instructionContent
, makeNOP());
2218 if ( _options
.verboseOptimizationHints() )
2219 fprintf(stderr
, "adrp-add at 0x%08llX transformed to ADR\n", infoB
.instructionAddress
);
2222 if ( _options
.verboseOptimizationHints() )
2223 fprintf(stderr
, "adrp-add at 0x%08llX not transformed, isAdd=%d, inRange=%d, usableSegment=%d\n",
2224 infoB
.instructionAddress
, isADD
, withinOneMeg(infoA
.targetAddress
, infoA
.instructionAddress
), usableSegment
);
2227 case LOH_ARM64_ADRP_LDR_GOT_LDR
:
2228 LOH_ASSERT(alt
.info
.count
== 2);
2229 LOH_ASSERT(isPageKind(infoA
.fixup
, true));
2230 LOH_ASSERT(isPageOffsetKind(infoB
.fixup
, true));
2231 LOH_ASSERT(infoC
.fixup
== NULL
);
2232 LOH_ASSERT(infoA
.target
== infoB
.target
);
2233 LOH_ASSERT(infoA
.targetAddress
== infoB
.targetAddress
);
2234 isADRP
= parseADRP(infoA
.instruction
, adrpInfoA
);
2236 isLDR
= parseLoadOrStore(infoC
.instruction
, ldrInfoC
);
2238 LOH_ASSERT(ldrInfoC
.offset
== 0);
2239 isADD
= parseADD(infoB
.instruction
, addInfoB
);
2240 isLDR
= parseLoadOrStore(infoB
.instruction
, ldrInfoB
);
2242 // target of GOT is external
2243 LOH_ASSERT(ldrInfoB
.size
== 8);
2244 LOH_ASSERT(!ldrInfoB
.isFloat
);
2245 LOH_ASSERT(ldrInfoC
.baseReg
== ldrInfoB
.reg
);
2246 //fprintf(stderr, "infoA.target=%p, %s, infoA.targetAddress=0x%08llX\n", infoA.target, infoA.target->name(), infoA.targetAddress);
2247 targetFourByteAligned
= ( ((infoA
.targetAddress
) & 0x3) == 0 );
2248 if ( usableSegment
&& targetFourByteAligned
&& withinOneMeg(infoB
.instructionAddress
, infoA
.targetAddress
) ) {
2249 // can do T5 transform
2250 set32LE(infoA
.instructionContent
, makeNOP());
2251 set32LE(infoB
.instructionContent
, makeLDR_literal(ldrInfoB
, infoA
.targetAddress
, infoB
.instructionAddress
));
2252 if ( _options
.verboseOptimizationHints() ) {
2253 fprintf(stderr
, "adrp-ldr-got-ldr at 0x%08llX T5 transformed to LDR literal of GOT plus LDR\n", infoC
.instructionAddress
);
2257 if ( _options
.verboseOptimizationHints() )
2258 fprintf(stderr
, "adrp-ldr-got-ldr at 0x%08llX no optimization done\n", infoC
.instructionAddress
);
2262 // target of GOT is in same linkage unit and B instruction was changed to ADD to compute LEA of target
2263 LOH_ASSERT(addInfoB
.srcReg
== adrpInfoA
.destReg
);
2264 LOH_ASSERT(addInfoB
.destReg
== ldrInfoC
.baseReg
);
2265 targetFourByteAligned
= ( ((infoA
.targetAddress
) & 0x3) == 0 );
2266 literalableSize
= ( (ldrInfoC
.size
!= 1) && (ldrInfoC
.size
!= 2) );
2267 if ( usableSegment
&& literalableSize
&& targetFourByteAligned
&& withinOneMeg(infoC
.instructionAddress
, infoA
.targetAddress
) ) {
2268 // can do T1 transform
2269 set32LE(infoA
.instructionContent
, makeNOP());
2270 set32LE(infoB
.instructionContent
, makeNOP());
2271 set32LE(infoC
.instructionContent
, makeLDR_literal(ldrInfoC
, infoA
.targetAddress
, infoC
.instructionAddress
));
2272 if ( _options
.verboseOptimizationHints() )
2273 fprintf(stderr
, "adrp-ldr-got-ldr at 0x%08llX T1 transformed to LDR literal\n", infoC
.instructionAddress
);
2275 else if ( usableSegment
&& withinOneMeg(infoA
.instructionAddress
, infoA
.targetAddress
) ) {
2276 // can do T4 transform
2277 set32LE(infoA
.instructionContent
, makeADR(ldrInfoC
.baseReg
, infoA
.targetAddress
, infoA
.instructionAddress
));
2278 set32LE(infoB
.instructionContent
, makeNOP());
2279 set32LE(infoC
.instructionContent
, makeLoadOrStore(ldrInfoC
));
2280 if ( _options
.verboseOptimizationHints() ) {
2281 fprintf(stderr
, "adrp-ldr-got-ldr at 0x%08llX T4 transformed to ADR/LDR\n", infoC
.instructionAddress
);
2284 else if ( (infoA
.targetAddress
% ldrInfoC
.size
) == 0 ) {
2285 // can do T2 transform
2286 set32LE(infoB
.instructionContent
, makeNOP());
2287 ldrInfoC
.baseReg
= adrpInfoA
.destReg
;
2288 ldrInfoC
.offset
= addInfoB
.addend
;
2289 set32LE(infoC
.instructionContent
, makeLoadOrStore(ldrInfoC
));
2290 if ( _options
.verboseOptimizationHints() ) {
2291 fprintf(stderr
, "adrp-ldr-got-ldr at 0x%08llX T4 transformed to ADRP/NOP/LDR\n", infoC
.instructionAddress
);
2295 // T3 transform already done by ld::passes:got:doPass()
2296 if ( _options
.verboseOptimizationHints() ) {
2297 fprintf(stderr
, "adrp-ldr-got-ldr at 0x%08llX T3 transformed to ADRP/ADD/LDR\n", infoC
.instructionAddress
);
2302 if ( _options
.verboseOptimizationHints() )
2303 fprintf(stderr
, "adrp-ldr-got-ldr at 0x%08llX not ADD or LDR\n", infoC
.instructionAddress
);
2306 case LOH_ARM64_ADRP_ADD_STR
:
2307 LOH_ASSERT(alt
.info
.count
== 2);
2308 LOH_ASSERT(isPageKind(infoA
.fixup
));
2309 LOH_ASSERT(isPageOffsetKind(infoB
.fixup
));
2310 LOH_ASSERT(infoC
.fixup
== NULL
);
2311 LOH_ASSERT(infoA
.target
== infoB
.target
);
2312 LOH_ASSERT(infoA
.targetAddress
== infoB
.targetAddress
);
2313 isADRP
= parseADRP(infoA
.instruction
, adrpInfoA
);
2315 isADD
= parseADD(infoB
.instruction
, addInfoB
);
2317 LOH_ASSERT(adrpInfoA
.destReg
== addInfoB
.srcReg
);
2318 isSTR
= (parseLoadOrStore(infoC
.instruction
, ldrInfoC
) && ldrInfoC
.isStore
);
2320 LOH_ASSERT(addInfoB
.destReg
== ldrInfoC
.baseReg
);
2321 if ( usableSegment
&& withinOneMeg(infoA
.instructionAddress
, infoA
.targetAddress
+ldrInfoC
.offset
) ) {
2322 // can to T4 transformation and turn ADRP/ADD into ADR
2323 set32LE(infoA
.instructionContent
, makeADR(ldrInfoC
.baseReg
, infoA
.targetAddress
+ldrInfoC
.offset
, infoA
.instructionAddress
));
2324 set32LE(infoB
.instructionContent
, makeNOP());
2325 ldrInfoC
.offset
= 0; // offset is now in ADR instead of ADD or LDR
2326 set32LE(infoC
.instructionContent
, makeLoadOrStore(ldrInfoC
));
2327 set32LE(infoC
.instructionContent
, infoC
.instruction
& 0xFFC003FF);
2328 if ( _options
.verboseOptimizationHints() )
2329 fprintf(stderr
, "adrp-add-str at 0x%08llX T4 transformed to ADR/STR\n", infoB
.instructionAddress
);
2331 else if ( ((infoB
.targetAddress
% ldrInfoC
.size
) == 0) && (ldrInfoC
.offset
== 0) ) {
2332 // can do T2 transformation by merging ADD into STR
2334 set32LE(infoB
.instructionContent
, makeNOP());
2335 ldrInfoC
.offset
+= addInfoB
.addend
;
2336 set32LE(infoC
.instructionContent
, makeLoadOrStore(ldrInfoC
));
2337 if ( _options
.verboseOptimizationHints() )
2338 fprintf(stderr
, "adrp-add-str at 0x%08llX T2 transformed to ADRP/STR \n", infoC
.instructionAddress
);
2341 if ( _options
.verboseOptimizationHints() )
2342 fprintf(stderr
, "adrp-add-str at 0x%08llX could not be transformed, loadSize=%d, inRange=%d, usableSegment=%d, imm12=%d\n",
2343 infoC
.instructionAddress
, ldrInfoC
.size
, withinOneMeg(infoC
.instructionAddress
, infoA
.targetAddress
+ldrInfoC
.offset
), usableSegment
, ldrInfoC
.offset
);
2346 case LOH_ARM64_ADRP_LDR_GOT_STR
:
2347 LOH_ASSERT(alt
.info
.count
== 2);
2348 LOH_ASSERT(isPageKind(infoA
.fixup
, true));
2349 LOH_ASSERT(isPageOffsetKind(infoB
.fixup
, true));
2350 LOH_ASSERT(infoC
.fixup
== NULL
);
2351 LOH_ASSERT(infoA
.target
== infoB
.target
);
2352 LOH_ASSERT(infoA
.targetAddress
== infoB
.targetAddress
);
2353 isADRP
= parseADRP(infoA
.instruction
, adrpInfoA
);
2355 isSTR
= (parseLoadOrStore(infoC
.instruction
, ldrInfoC
) && ldrInfoC
.isStore
);
2357 LOH_ASSERT(ldrInfoC
.offset
== 0);
2358 isADD
= parseADD(infoB
.instruction
, addInfoB
);
2359 isLDR
= parseLoadOrStore(infoB
.instruction
, ldrInfoB
);
2361 // target of GOT is external
2362 LOH_ASSERT(ldrInfoB
.size
== 8);
2363 LOH_ASSERT(!ldrInfoB
.isFloat
);
2364 LOH_ASSERT(ldrInfoC
.baseReg
== ldrInfoB
.reg
);
2365 targetFourByteAligned
= ( ((infoA
.targetAddress
) & 0x3) == 0 );
2366 if ( usableSegment
&& targetFourByteAligned
&& withinOneMeg(infoB
.instructionAddress
, infoA
.targetAddress
) ) {
2367 // can do T5 transform
2368 set32LE(infoA
.instructionContent
, makeNOP());
2369 set32LE(infoB
.instructionContent
, makeLDR_literal(ldrInfoB
, infoA
.targetAddress
, infoB
.instructionAddress
));
2370 if ( _options
.verboseOptimizationHints() ) {
2371 fprintf(stderr
, "adrp-ldr-got-str at 0x%08llX T5 transformed to LDR literal of GOT plus STR\n", infoC
.instructionAddress
);
2375 if ( _options
.verboseOptimizationHints() )
2376 fprintf(stderr
, "adrp-ldr-got-str at 0x%08llX no optimization done\n", infoC
.instructionAddress
);
2380 // target of GOT is in same linkage unit and B instruction was changed to ADD to compute LEA of target
2381 LOH_ASSERT(addInfoB
.srcReg
== adrpInfoA
.destReg
);
2382 LOH_ASSERT(addInfoB
.destReg
== ldrInfoC
.baseReg
);
2383 targetFourByteAligned
= ( ((infoA
.targetAddress
) & 0x3) == 0 );
2384 literalableSize
= ( (ldrInfoC
.size
!= 1) && (ldrInfoC
.size
!= 2) );
2385 if ( usableSegment
&& withinOneMeg(infoA
.instructionAddress
, infoA
.targetAddress
) ) {
2386 // can do T4 transform
2387 set32LE(infoA
.instructionContent
, makeADR(ldrInfoC
.baseReg
, infoA
.targetAddress
, infoA
.instructionAddress
));
2388 set32LE(infoB
.instructionContent
, makeNOP());
2389 set32LE(infoC
.instructionContent
, makeLoadOrStore(ldrInfoC
));
2390 if ( _options
.verboseOptimizationHints() ) {
2391 fprintf(stderr
, "adrp-ldr-got-str at 0x%08llX T4 transformed to ADR/STR\n", infoC
.instructionAddress
);
2394 else if ( ((infoA
.targetAddress
% ldrInfoC
.size
) == 0) && (ldrInfoC
.offset
== 0) ) {
2395 // can do T2 transform
2396 set32LE(infoB
.instructionContent
, makeNOP());
2397 ldrInfoC
.baseReg
= adrpInfoA
.destReg
;
2398 ldrInfoC
.offset
= addInfoB
.addend
;
2399 set32LE(infoC
.instructionContent
, makeLoadOrStore(ldrInfoC
));
2400 if ( _options
.verboseOptimizationHints() ) {
2401 fprintf(stderr
, "adrp-ldr-got-str at 0x%08llX T4 transformed to ADRP/NOP/STR\n", infoC
.instructionAddress
);
2405 // T3 transform already done by ld::passes:got:doPass()
2406 if ( _options
.verboseOptimizationHints() ) {
2407 fprintf(stderr
, "adrp-ldr-got-str at 0x%08llX T3 transformed to ADRP/ADD/STR\n", infoC
.instructionAddress
);
2412 if ( _options
.verboseOptimizationHints() )
2413 fprintf(stderr
, "adrp-ldr-got-str at 0x%08llX not ADD or LDR\n", infoC
.instructionAddress
);
2416 case LOH_ARM64_ADRP_LDR_GOT
:
2417 LOH_ASSERT(alt
.info
.count
== 1);
2418 LOH_ASSERT(isPageKind(infoA
.fixup
, true));
2419 LOH_ASSERT(isPageOffsetKind(infoB
.fixup
, true));
2420 LOH_ASSERT(infoA
.target
== infoB
.target
);
2421 LOH_ASSERT(infoA
.targetAddress
== infoB
.targetAddress
);
2422 isADRP
= parseADRP(infoA
.instruction
, adrpInfoA
);
2423 isADD
= parseADD(infoB
.instruction
, addInfoB
);
2424 isLDR
= parseLoadOrStore(infoB
.instruction
, ldrInfoB
);
2427 if ( usableSegment
&& withinOneMeg(infoB
.instructionAddress
, infoA
.targetAddress
) ) {
2428 // can do T5 transform (LDR literal load of GOT)
2429 set32LE(infoA
.instructionContent
, makeNOP());
2430 set32LE(infoB
.instructionContent
, makeLDR_literal(ldrInfoB
, infoA
.targetAddress
, infoB
.instructionAddress
));
2431 if ( _options
.verboseOptimizationHints() ) {
2432 fprintf(stderr
, "adrp-ldr-got at 0x%08llX T5 transformed to NOP/LDR\n", infoC
.instructionAddress
);
2437 if ( usableSegment
&& withinOneMeg(infoA
.instructionAddress
, infoA
.targetAddress
) ) {
2438 // can do T4 transform (ADR to compute local address)
2439 set32LE(infoA
.instructionContent
, makeADR(addInfoB
.destReg
, infoA
.targetAddress
, infoA
.instructionAddress
));
2440 set32LE(infoB
.instructionContent
, makeNOP());
2441 if ( _options
.verboseOptimizationHints() ) {
2442 fprintf(stderr
, "adrp-ldr-got at 0x%08llX T4 transformed to ADR/STR\n", infoC
.instructionAddress
);
2447 if ( _options
.verboseOptimizationHints() )
2448 fprintf(stderr
, "adrp-ldr-got at 0x%08llX not LDR or ADD\n", infoB
.instructionAddress
);
2452 if ( _options
.verboseOptimizationHints() )
2453 fprintf(stderr
, "adrp-ldr-got at 0x%08llX not ADRP\n", infoA
.instructionAddress
);
2457 if ( _options
.verboseOptimizationHints() )
2458 fprintf(stderr
, "unknown hint kind %d alt.info.kind at 0x%08llX\n", alt
.info
.kind
, infoA
.instructionAddress
);
2462 // apply hints pass 2
2463 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
2464 if ( fit
->kind
!= ld::Fixup::kindLinkerOptimizationHint
)
2466 InstructionInfo infoA
;
2467 InstructionInfo infoB
;
2468 ld::Fixup::LOH_arm64 alt
;
2469 alt
.addend
= fit
->u
.addend
;
2470 setInfo(state
, atom
, buffer
, usedByHints
, fit
->offsetInAtom
, (alt
.info
.delta1
<< 2), &infoA
);
2471 if ( alt
.info
.count
> 0 )
2472 setInfo(state
, atom
, buffer
, usedByHints
, fit
->offsetInAtom
, (alt
.info
.delta2
<< 2), &infoB
);
2474 switch ( alt
.info
.kind
) {
2475 case LOH_ARM64_ADRP_ADRP
:
2476 LOH_ASSERT(isPageKind(infoA
.fixup
));
2477 LOH_ASSERT(isPageKind(infoB
.fixup
));
2478 if ( (infoA
.instruction
& 0x9F000000) != 0x90000000 ) {
2479 if ( _options
.verboseOptimizationHints() )
2480 fprintf(stderr
, "may-reused-adrp at 0x%08llX no longer an ADRP, now 0x%08X\n", infoA
.instructionAddress
, infoA
.instruction
);
2484 if ( (infoB
.instruction
& 0x9F000000) != 0x90000000 ) {
2485 if ( _options
.verboseOptimizationHints() )
2486 fprintf(stderr
, "may-reused-adrp at 0x%08llX no longer an ADRP, now 0x%08X\n", infoB
.instructionAddress
, infoA
.instruction
);
2490 if ( (infoA
.targetAddress
& (-4096)) == (infoB
.targetAddress
& (-4096)) ) {
2491 set32LE(infoB
.instructionContent
, 0xD503201F);
2501 #endif // SUPPORT_ARCH_arm64
2505 void OutputFile::copyNoOps(uint8_t* from
, uint8_t* to
, bool thumb
)
2507 switch ( _options
.architecture() ) {
2509 case CPU_TYPE_X86_64
:
2510 for (uint8_t* p
=from
; p
< to
; ++p
)
2515 for (uint8_t* p
=from
; p
< to
; p
+= 2)
2516 OSWriteLittleInt16((uint16_t*)p
, 0, 0x46c0);
2519 for (uint8_t* p
=from
; p
< to
; p
+= 4)
2520 OSWriteLittleInt32((uint32_t*)p
, 0, 0xe1a00000);
2524 for (uint8_t* p
=from
; p
< to
; ++p
)
2530 bool OutputFile::takesNoDiskSpace(const ld::Section
* sect
)
2532 switch ( sect
->type() ) {
2533 case ld::Section::typeZeroFill
:
2534 case ld::Section::typeTLVZeroFill
:
2535 return _options
.optimizeZeroFill();
2536 case ld::Section::typePageZero
:
2537 case ld::Section::typeStack
:
2538 case ld::Section::typeAbsoluteSymbols
:
2539 case ld::Section::typeTentativeDefs
:
2547 bool OutputFile::hasZeroForFileOffset(const ld::Section
* sect
)
2549 switch ( sect
->type() ) {
2550 case ld::Section::typeZeroFill
:
2551 case ld::Section::typeTLVZeroFill
:
2552 return _options
.optimizeZeroFill();
2553 case ld::Section::typePageZero
:
2554 case ld::Section::typeStack
:
2555 case ld::Section::typeTentativeDefs
:
2563 void OutputFile::writeAtoms(ld::Internal
& state
, uint8_t* wholeBuffer
)
2565 // have each atom write itself
2566 uint64_t fileOffsetOfEndOfLastAtom
= 0;
2567 uint64_t mhAddress
= 0;
2568 bool lastAtomUsesNoOps
= false;
2569 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
2570 ld::Internal::FinalSection
* sect
= *sit
;
2571 if ( sect
->type() == ld::Section::typeMachHeader
)
2572 mhAddress
= sect
->address
;
2573 if ( takesNoDiskSpace(sect
) )
2575 const bool sectionUsesNops
= (sect
->type() == ld::Section::typeCode
);
2576 //fprintf(stderr, "file offset=0x%08llX, section %s\n", sect->fileOffset, sect->sectionName());
2577 std::vector
<const ld::Atom
*>& atoms
= sect
->atoms
;
2578 bool lastAtomWasThumb
= false;
2579 for (std::vector
<const ld::Atom
*>::iterator ait
= atoms
.begin(); ait
!= atoms
.end(); ++ait
) {
2580 const ld::Atom
* atom
= *ait
;
2581 if ( atom
->definition() == ld::Atom::definitionProxy
)
2584 uint64_t fileOffset
= atom
->finalAddress() - sect
->address
+ sect
->fileOffset
;
2585 // check for alignment padding between atoms
2586 if ( (fileOffset
!= fileOffsetOfEndOfLastAtom
) && lastAtomUsesNoOps
) {
2587 this->copyNoOps(&wholeBuffer
[fileOffsetOfEndOfLastAtom
], &wholeBuffer
[fileOffset
], lastAtomWasThumb
);
2589 // copy atom content
2590 atom
->copyRawContent(&wholeBuffer
[fileOffset
]);
2592 this->applyFixUps(state
, mhAddress
, atom
, &wholeBuffer
[fileOffset
]);
2593 fileOffsetOfEndOfLastAtom
= fileOffset
+atom
->size();
2594 lastAtomUsesNoOps
= sectionUsesNops
;
2595 lastAtomWasThumb
= atom
->isThumb();
2597 catch (const char* msg
) {
2598 if ( atom
->file() != NULL
)
2599 throwf("%s in '%s' from %s", msg
, atom
->name(), atom
->file()->path());
2601 throwf("%s in '%s'", msg
, atom
->name());
2606 if ( _options
.verboseOptimizationHints() ) {
2607 //fprintf(stderr, "ADRP optimized away: %d\n", sAdrpNA);
2608 //fprintf(stderr, "ADRPs changed to NOPs: %d\n", sAdrpNoped);
2609 //fprintf(stderr, "ADRPs unchanged: %d\n", sAdrpNotNoped);
2613 void OutputFile::computeContentUUID(ld::Internal
& state
, uint8_t* wholeBuffer
)
2615 const bool log
= false;
2616 if ( (_options
.outputKind() != Options::kObjectFile
) || state
.someObjectFileHasDwarf
) {
2617 uint8_t digest
[CC_MD5_DIGEST_LENGTH
];
2618 std::vector
<std::pair
<uint64_t, uint64_t>> excludeRegions
;
2619 uint64_t bitcodeCmdOffset
;
2620 uint64_t bitcodeCmdEnd
;
2621 uint64_t bitcodeSectOffset
;
2622 uint64_t bitcodePaddingEnd
;
2623 if ( _headersAndLoadCommandAtom
->bitcodeBundleCommand(bitcodeCmdOffset
, bitcodeCmdEnd
,
2624 bitcodeSectOffset
, bitcodePaddingEnd
) ) {
2625 // Exclude embedded bitcode bundle section which contains timestamps in XAR header
2626 // Note the timestamp is in the compressed XML header which means it might change the size of
2627 // bitcode section. The load command which include the size of the section and the padding after
2628 // the bitcode section should also be excluded in the UUID computation.
2629 // Bitcode section should appears before LINKEDIT
2630 // Exclude section cmd
2631 if ( log
) fprintf(stderr
, "bundle cmd start=0x%08llX, bundle cmd end=0x%08llX\n",
2632 bitcodeCmdOffset
, bitcodeCmdEnd
);
2633 excludeRegions
.emplace_back(std::pair
<uint64_t, uint64_t>(bitcodeCmdOffset
, bitcodeCmdEnd
));
2634 // Exclude section content
2635 if ( log
) fprintf(stderr
, "bundle start=0x%08llX, bundle end=0x%08llX\n",
2636 bitcodeSectOffset
, bitcodePaddingEnd
);
2637 excludeRegions
.emplace_back(std::pair
<uint64_t, uint64_t>(bitcodeSectOffset
, bitcodePaddingEnd
));
2639 uint32_t stabsStringsOffsetStart
;
2640 uint32_t tabsStringsOffsetEnd
;
2641 uint32_t stabsOffsetStart
;
2642 uint32_t stabsOffsetEnd
;
2643 if ( _symbolTableAtom
->hasStabs(stabsStringsOffsetStart
, tabsStringsOffsetEnd
, stabsOffsetStart
, stabsOffsetEnd
) ) {
2644 // find two areas of file that are stabs info and should not contribute to checksum
2645 uint64_t stringPoolFileOffset
= 0;
2646 uint64_t symbolTableFileOffset
= 0;
2647 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
2648 ld::Internal::FinalSection
* sect
= *sit
;
2649 if ( sect
->type() == ld::Section::typeLinkEdit
) {
2650 if ( strcmp(sect
->sectionName(), "__string_pool") == 0 )
2651 stringPoolFileOffset
= sect
->fileOffset
;
2652 else if ( strcmp(sect
->sectionName(), "__symbol_table") == 0 )
2653 symbolTableFileOffset
= sect
->fileOffset
;
2656 uint64_t firstStabNlistFileOffset
= symbolTableFileOffset
+ stabsOffsetStart
;
2657 uint64_t lastStabNlistFileOffset
= symbolTableFileOffset
+ stabsOffsetEnd
;
2658 uint64_t firstStabStringFileOffset
= stringPoolFileOffset
+ stabsStringsOffsetStart
;
2659 uint64_t lastStabStringFileOffset
= stringPoolFileOffset
+ tabsStringsOffsetEnd
;
2660 if ( log
) fprintf(stderr
, "firstStabNlistFileOffset=0x%08llX\n", firstStabNlistFileOffset
);
2661 if ( log
) fprintf(stderr
, "lastStabNlistFileOffset=0x%08llX\n", lastStabNlistFileOffset
);
2662 if ( log
) fprintf(stderr
, "firstStabStringFileOffset=0x%08llX\n", firstStabStringFileOffset
);
2663 if ( log
) fprintf(stderr
, "lastStabStringFileOffset=0x%08llX\n", lastStabStringFileOffset
);
2664 assert(firstStabNlistFileOffset
<= firstStabStringFileOffset
);
2665 excludeRegions
.emplace_back(std::pair
<uint64_t, uint64_t>(firstStabNlistFileOffset
, lastStabNlistFileOffset
));
2666 excludeRegions
.emplace_back(std::pair
<uint64_t, uint64_t>(firstStabStringFileOffset
, lastStabStringFileOffset
));
2668 if ( !excludeRegions
.empty() ) {
2669 CC_MD5_CTX md5state
;
2670 CC_MD5_Init(&md5state
);
2671 // rdar://problem/19487042 include the output leaf file name in the hash
2672 const char* lastSlash
= strrchr(_options
.outputFilePath(), '/');
2673 if ( lastSlash
!= NULL
) {
2674 CC_MD5_Update(&md5state
, lastSlash
, strlen(lastSlash
));
2676 uint64_t checksumStart
= 0;
2677 for ( auto& region
: excludeRegions
) {
2678 uint64_t regionStart
= region
.first
;
2679 uint64_t regionEnd
= region
.second
;
2680 assert(checksumStart
<= regionStart
&& regionStart
<= regionEnd
&& "Region overlapped");
2681 if ( log
) fprintf(stderr
, "checksum 0x%08llX -> 0x%08llX\n", checksumStart
, regionStart
);
2682 CC_MD5_Update(&md5state
, &wholeBuffer
[checksumStart
], regionStart
- checksumStart
);
2683 checksumStart
= regionEnd
;
2685 if ( log
) fprintf(stderr
, "checksum 0x%08llX -> 0x%08llX\n", checksumStart
, _fileSize
);
2686 CC_MD5_Update(&md5state
, &wholeBuffer
[checksumStart
], _fileSize
-checksumStart
);
2687 CC_MD5_Final(digest
, &md5state
);
2688 if ( log
) fprintf(stderr
, "uuid=%02X, %02X, %02X, %02X, %02X, %02X, %02X, %02X\n", digest
[0], digest
[1], digest
[2],
2689 digest
[3], digest
[4], digest
[5], digest
[6], digest
[7]);
2692 CC_MD5(wholeBuffer
, _fileSize
, digest
);
2694 // <rdar://problem/6723729> LC_UUID uuids should conform to RFC 4122 UUID version 4 & UUID version 5 formats
2695 digest
[6] = ( digest
[6] & 0x0F ) | ( 3 << 4 );
2696 digest
[8] = ( digest
[8] & 0x3F ) | 0x80;
2697 // update buffer with new UUID
2698 _headersAndLoadCommandAtom
->setUUID(digest
);
2699 _headersAndLoadCommandAtom
->recopyUUIDCommand();
2703 static int sDescriptorOfPathToRemove
= -1;
2704 static void removePathAndExit(int sig
)
2706 if ( sDescriptorOfPathToRemove
!= -1 ) {
2707 char path
[MAXPATHLEN
];
2708 if ( ::fcntl(sDescriptorOfPathToRemove
, F_GETPATH
, path
) == 0 )
2711 fprintf(stderr
, "ld: interrupted\n");
2715 void OutputFile::writeOutputFile(ld::Internal
& state
)
2717 // for UNIX conformance, error if file exists and is not writable
2718 if ( (access(_options
.outputFilePath(), F_OK
) == 0) && (access(_options
.outputFilePath(), W_OK
) == -1) )
2719 throwf("can't write output file: %s", _options
.outputFilePath());
2721 mode_t permissions
= 0777;
2722 if ( _options
.outputKind() == Options::kObjectFile
)
2724 mode_t umask
= ::umask(0);
2725 ::umask(umask
); // put back the original umask
2726 permissions
&= ~umask
;
2727 // Calling unlink first assures the file is gone so that open creates it with correct permissions
2728 // It also handles the case where __options.outputFilePath() file is not writable but its directory is
2729 // And it means we don't have to truncate the file when done writing (in case new is smaller than old)
2730 // Lastly, only delete existing file if it is a normal file (e.g. not /dev/null).
2731 struct stat stat_buf
;
2732 bool outputIsRegularFile
= false;
2733 bool outputIsMappableFile
= false;
2734 if ( stat(_options
.outputFilePath(), &stat_buf
) != -1 ) {
2735 if (stat_buf
.st_mode
& S_IFREG
) {
2736 outputIsRegularFile
= true;
2737 // <rdar://problem/12264302> Don't use mmap on non-hfs volumes
2738 struct statfs fsInfo
;
2739 if ( statfs(_options
.outputFilePath(), &fsInfo
) != -1 ) {
2740 if ( strcmp(fsInfo
.f_fstypename
, "hfs") == 0) {
2741 (void)unlink(_options
.outputFilePath());
2742 outputIsMappableFile
= true;
2746 outputIsMappableFile
= false;
2750 outputIsRegularFile
= false;
2754 // special files (pipes, devices, etc) must already exist
2755 outputIsRegularFile
= true;
2756 // output file does not exist yet
2757 char dirPath
[PATH_MAX
];
2758 strcpy(dirPath
, _options
.outputFilePath());
2759 char* end
= strrchr(dirPath
, '/');
2760 if ( end
!= NULL
) {
2762 struct statfs fsInfo
;
2763 if ( statfs(dirPath
, &fsInfo
) != -1 ) {
2764 if ( strcmp(fsInfo
.f_fstypename
, "hfs") == 0) {
2765 outputIsMappableFile
= true;
2771 //fprintf(stderr, "outputIsMappableFile=%d, outputIsRegularFile=%d, path=%s\n", outputIsMappableFile, outputIsRegularFile, _options.outputFilePath());
2774 // Construct a temporary path of the form {outputFilePath}.ld_XXXXXX
2775 const char filenameTemplate
[] = ".ld_XXXXXX";
2776 char tmpOutput
[PATH_MAX
];
2777 uint8_t *wholeBuffer
;
2778 if ( outputIsRegularFile
&& outputIsMappableFile
) {
2779 // <rdar://problem/20959031> ld64 should clean up temporary files on SIGINT
2780 ::signal(SIGINT
, removePathAndExit
);
2782 strcpy(tmpOutput
, _options
.outputFilePath());
2783 // If the path is too long to add a suffix for a temporary name then
2784 // just fall back to using the output path.
2785 if (strlen(tmpOutput
)+strlen(filenameTemplate
) < PATH_MAX
) {
2786 strcat(tmpOutput
, filenameTemplate
);
2787 fd
= mkstemp(tmpOutput
);
2788 sDescriptorOfPathToRemove
= fd
;
2791 fd
= open(tmpOutput
, O_RDWR
|O_CREAT
, permissions
);
2794 throwf("can't open output file for writing '%s', errno=%d", tmpOutput
, errno
);
2795 if ( ftruncate(fd
, _fileSize
) == -1 ) {
2798 if ( err
== ENOSPC
)
2799 throwf("not enough disk space for writing '%s'", _options
.outputFilePath());
2801 throwf("can't grow file for writing '%s', errno=%d", _options
.outputFilePath(), err
);
2804 wholeBuffer
= (uint8_t *)mmap(NULL
, _fileSize
, PROT_WRITE
|PROT_READ
, MAP_SHARED
, fd
, 0);
2805 if ( wholeBuffer
== MAP_FAILED
)
2806 throwf("can't create buffer of %llu bytes for output", _fileSize
);
2809 if ( outputIsRegularFile
)
2810 fd
= open(_options
.outputFilePath(), O_RDWR
|O_CREAT
, permissions
);
2812 fd
= open(_options
.outputFilePath(), O_WRONLY
);
2814 throwf("can't open output file for writing: %s, errno=%d", _options
.outputFilePath(), errno
);
2815 // try to allocate buffer for entire output file content
2816 wholeBuffer
= (uint8_t*)calloc(_fileSize
, 1);
2817 if ( wholeBuffer
== NULL
)
2818 throwf("can't create buffer of %llu bytes for output", _fileSize
);
2821 if ( _options
.UUIDMode() == Options::kUUIDRandom
) {
2823 ::uuid_generate_random(bits
);
2824 _headersAndLoadCommandAtom
->setUUID(bits
);
2827 writeAtoms(state
, wholeBuffer
);
2830 if ( _options
.UUIDMode() == Options::kUUIDContent
)
2831 computeContentUUID(state
, wholeBuffer
);
2833 if ( outputIsRegularFile
&& outputIsMappableFile
) {
2834 if ( ::chmod(tmpOutput
, permissions
) == -1 ) {
2836 throwf("can't set permissions on output file: %s, errno=%d", tmpOutput
, errno
);
2838 if ( ::rename(tmpOutput
, _options
.outputFilePath()) == -1 && strcmp(tmpOutput
, _options
.outputFilePath()) != 0) {
2840 throwf("can't move output file in place, errno=%d", errno
);
2844 if ( ::write(fd
, wholeBuffer
, _fileSize
) == -1 ) {
2845 throwf("can't write to output file: %s, errno=%d", _options
.outputFilePath(), errno
);
2847 sDescriptorOfPathToRemove
= -1;
2849 // <rdar://problem/13118223> NFS: iOS incremental builds in Xcode 4.6 fail with codesign error
2850 // NFS seems to pad the end of the file sometimes. Calling trunc seems to correct it...
2851 ::truncate(_options
.outputFilePath(), _fileSize
);
2854 // Rename symbol map file if needed
2855 if ( _options
.renameReverseSymbolMap() ) {
2856 assert(_options
.hideSymbols() && _options
.reverseSymbolMapPath() != NULL
&& "Must hide symbol and specify a path");
2857 uuid_string_t UUIDString
;
2858 const uint8_t* rawUUID
= _headersAndLoadCommandAtom
->getUUID();
2859 uuid_unparse_upper(rawUUID
, UUIDString
);
2860 char outputMapPath
[PATH_MAX
];
2861 sprintf(outputMapPath
, "%s/%s.bcsymbolmap", _options
.reverseSymbolMapPath(), UUIDString
);
2862 if ( ::rename(_options
.reverseMapTempPath().c_str(), outputMapPath
) != 0 )
2863 throwf("could not create bcsymbolmap file: %s", outputMapPath
);
2867 struct AtomByNameSorter
2869 bool operator()(const ld::Atom
* left
, const ld::Atom
* right
)
2871 return (strcmp(left
->name(), right
->name()) < 0);
2878 NotInSet(const std::set
<const ld::Atom
*>& theSet
) : _set(theSet
) {}
2880 bool operator()(const ld::Atom
* atom
) const {
2881 return ( _set
.count(atom
) == 0 );
2884 const std::set
<const ld::Atom
*>& _set
;
2888 void OutputFile::buildSymbolTable(ld::Internal
& state
)
2890 unsigned int machoSectionIndex
= 0;
2891 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
2892 ld::Internal::FinalSection
* sect
= *sit
;
2893 bool setMachoSectionIndex
= !sect
->isSectionHidden() && (sect
->type() != ld::Section::typeTentativeDefs
);
2894 if ( setMachoSectionIndex
)
2895 ++machoSectionIndex
;
2896 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
2897 const ld::Atom
* atom
= *ait
;
2898 if ( setMachoSectionIndex
)
2899 (const_cast<ld::Atom
*>(atom
))->setMachoSection(machoSectionIndex
);
2900 else if ( sect
->type() == ld::Section::typeMachHeader
)
2901 (const_cast<ld::Atom
*>(atom
))->setMachoSection(1); // __mh_execute_header is not in any section by needs n_sect==1
2902 else if ( sect
->type() == ld::Section::typeLastSection
)
2903 (const_cast<ld::Atom
*>(atom
))->setMachoSection(machoSectionIndex
); // use section index of previous section
2904 else if ( sect
->type() == ld::Section::typeFirstSection
)
2905 (const_cast<ld::Atom
*>(atom
))->setMachoSection(machoSectionIndex
+1); // use section index of next section
2907 // in -r mode, clarify symbolTableNotInFinalLinkedImages
2908 if ( _options
.outputKind() == Options::kObjectFile
) {
2909 if ( (_options
.architecture() == CPU_TYPE_X86_64
) || (_options
.architecture() == CPU_TYPE_ARM64
) ) {
2910 // x86_64 .o files need labels on anonymous literal strings
2911 if ( (sect
->type() == ld::Section::typeCString
) && (atom
->combine() == ld::Atom::combineByNameAndContent
) ) {
2912 (const_cast<ld::Atom
*>(atom
))->setSymbolTableInclusion(ld::Atom::symbolTableIn
);
2913 _localAtoms
.push_back(atom
);
2917 if ( sect
->type() == ld::Section::typeCFI
) {
2918 if ( _options
.removeEHLabels() )
2919 (const_cast<ld::Atom
*>(atom
))->setSymbolTableInclusion(ld::Atom::symbolTableNotIn
);
2921 (const_cast<ld::Atom
*>(atom
))->setSymbolTableInclusion(ld::Atom::symbolTableIn
);
2923 else if ( sect
->type() == ld::Section::typeTempAlias
) {
2924 assert(_options
.outputKind() == Options::kObjectFile
);
2925 _importedAtoms
.push_back(atom
);
2928 if ( atom
->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages
)
2929 (const_cast<ld::Atom
*>(atom
))->setSymbolTableInclusion(ld::Atom::symbolTableIn
);
2932 // TEMP work around until <rdar://problem/7702923> goes in
2933 if ( (atom
->symbolTableInclusion() == ld::Atom::symbolTableInAndNeverStrip
)
2934 && (atom
->scope() == ld::Atom::scopeLinkageUnit
)
2935 && (_options
.outputKind() == Options::kDynamicLibrary
) ) {
2936 (const_cast<ld::Atom
*>(atom
))->setScope(ld::Atom::scopeGlobal
);
2939 // <rdar://problem/6783167> support auto hidden weak symbols: .weak_def_can_be_hidden
2940 if ( atom
->autoHide() && (_options
.outputKind() != Options::kObjectFile
) ) {
2941 // adding auto-hide symbol to .exp file should keep it global
2942 if ( !_options
.hasExportMaskList() || !_options
.shouldExport(atom
->name()) )
2943 (const_cast<ld::Atom
*>(atom
))->setScope(ld::Atom::scopeLinkageUnit
);
2946 // <rdar://problem/8626058> ld should consistently warn when resolvers are not exported
2947 if ( (atom
->contentType() == ld::Atom::typeResolver
) && (atom
->scope() == ld::Atom::scopeLinkageUnit
) )
2948 warning("resolver functions should be external, but '%s' is hidden", atom
->name());
2950 if ( sect
->type() == ld::Section::typeImportProxies
) {
2951 if ( atom
->combine() == ld::Atom::combineByName
)
2952 this->usesWeakExternalSymbols
= true;
2953 // alias proxy is a re-export with a name change, don't import changed name
2954 if ( ! atom
->isAlias() )
2955 _importedAtoms
.push_back(atom
);
2956 // scope of proxies are usually linkage unit, so done
2957 // if scope is global, we need to re-export it too
2958 if ( atom
->scope() == ld::Atom::scopeGlobal
)
2959 _exportedAtoms
.push_back(atom
);
2962 if ( atom
->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages
) {
2963 assert(_options
.outputKind() != Options::kObjectFile
);
2964 continue; // don't add to symbol table
2966 if ( atom
->symbolTableInclusion() == ld::Atom::symbolTableNotIn
) {
2967 continue; // don't add to symbol table
2969 if ( (atom
->symbolTableInclusion() == ld::Atom::symbolTableInWithRandomAutoStripLabel
)
2970 && (_options
.outputKind() != Options::kObjectFile
) ) {
2971 continue; // don't add to symbol table
2974 if ( (atom
->definition() == ld::Atom::definitionTentative
) && (_options
.outputKind() == Options::kObjectFile
) ) {
2975 if ( _options
.makeTentativeDefinitionsReal() ) {
2976 // -r -d turns tentative defintions into real def
2977 _exportedAtoms
.push_back(atom
);
2980 // in mach-o object files tentative defintions are stored like undefined symbols
2981 _importedAtoms
.push_back(atom
);
2986 switch ( atom
->scope() ) {
2987 case ld::Atom::scopeTranslationUnit
:
2988 if ( _options
.keepLocalSymbol(atom
->name()) ) {
2989 _localAtoms
.push_back(atom
);
2992 if ( _options
.outputKind() == Options::kObjectFile
) {
2993 (const_cast<ld::Atom
*>(atom
))->setSymbolTableInclusion(ld::Atom::symbolTableInWithRandomAutoStripLabel
);
2994 _localAtoms
.push_back(atom
);
2997 (const_cast<ld::Atom
*>(atom
))->setSymbolTableInclusion(ld::Atom::symbolTableNotIn
);
3000 case ld::Atom::scopeGlobal
:
3001 _exportedAtoms
.push_back(atom
);
3003 case ld::Atom::scopeLinkageUnit
:
3004 if ( _options
.outputKind() == Options::kObjectFile
) {
3005 if ( _options
.keepPrivateExterns() ) {
3006 _exportedAtoms
.push_back(atom
);
3008 else if ( _options
.keepLocalSymbol(atom
->name()) ) {
3009 _localAtoms
.push_back(atom
);
3012 (const_cast<ld::Atom
*>(atom
))->setSymbolTableInclusion(ld::Atom::symbolTableInWithRandomAutoStripLabel
);
3013 _localAtoms
.push_back(atom
);
3017 if ( _options
.keepLocalSymbol(atom
->name()) )
3018 _localAtoms
.push_back(atom
);
3019 // <rdar://problem/5804214> ld should never have a symbol in the non-lazy indirect symbol table with index 0
3020 // this works by making __mh_execute_header be a local symbol which takes symbol index 0
3021 else if ( (atom
->symbolTableInclusion() == ld::Atom::symbolTableInAndNeverStrip
) && !_options
.makeCompressedDyldInfo() )
3022 _localAtoms
.push_back(atom
);
3024 (const_cast<ld::Atom
*>(atom
))->setSymbolTableInclusion(ld::Atom::symbolTableNotIn
);
3031 // <rdar://problem/6978069> ld adds undefined symbol from .exp file to binary
3032 if ( (_options
.outputKind() == Options::kKextBundle
) && _options
.hasExportRestrictList() ) {
3033 // search for referenced undefines
3034 std::set
<const ld::Atom
*> referencedProxyAtoms
;
3035 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
=state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
3036 ld::Internal::FinalSection
* sect
= *sit
;
3037 for (std::vector
<const ld::Atom
*>::iterator ait
=sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
3038 const ld::Atom
* atom
= *ait
;
3039 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
3040 switch ( fit
->binding
) {
3041 case ld::Fixup::bindingsIndirectlyBound
:
3042 referencedProxyAtoms
.insert(state
.indirectBindingTable
[fit
->u
.bindingIndex
]);
3044 case ld::Fixup::bindingDirectlyBound
:
3045 referencedProxyAtoms
.insert(fit
->u
.target
);
3053 // remove any unreferenced _importedAtoms
3054 _importedAtoms
.erase(std::remove_if(_importedAtoms
.begin(), _importedAtoms
.end(), NotInSet(referencedProxyAtoms
)), _importedAtoms
.end());
3058 std::sort(_exportedAtoms
.begin(), _exportedAtoms
.end(), AtomByNameSorter());
3059 std::sort(_importedAtoms
.begin(), _importedAtoms
.end(), AtomByNameSorter());
3062 void OutputFile::addPreloadLinkEdit(ld::Internal
& state
)
3064 switch ( _options
.architecture() ) {
3065 #if SUPPORT_ARCH_i386
3067 if ( _hasLocalRelocations
) {
3068 _localRelocsAtom
= new LocalRelocationsAtom
<x86
>(_options
, state
, *this);
3069 localRelocationsSection
= state
.addAtom(*_localRelocsAtom
);
3071 if ( _hasExternalRelocations
) {
3072 _externalRelocsAtom
= new ExternalRelocationsAtom
<x86
>(_options
, state
, *this);
3073 externalRelocationsSection
= state
.addAtom(*_externalRelocsAtom
);
3075 if ( _hasSymbolTable
) {
3076 _indirectSymbolTableAtom
= new IndirectSymbolTableAtom
<x86
>(_options
, state
, *this);
3077 indirectSymbolTableSection
= state
.addAtom(*_indirectSymbolTableAtom
);
3078 _symbolTableAtom
= new SymbolTableAtom
<x86
>(_options
, state
, *this);
3079 symbolTableSection
= state
.addAtom(*_symbolTableAtom
);
3080 _stringPoolAtom
= new StringPoolAtom(_options
, state
, *this, 4);
3081 stringPoolSection
= state
.addAtom(*_stringPoolAtom
);
3085 #if SUPPORT_ARCH_x86_64
3086 case CPU_TYPE_X86_64
:
3087 if ( _hasLocalRelocations
) {
3088 _localRelocsAtom
= new LocalRelocationsAtom
<x86_64
>(_options
, state
, *this);
3089 localRelocationsSection
= state
.addAtom(*_localRelocsAtom
);
3091 if ( _hasExternalRelocations
) {
3092 _externalRelocsAtom
= new ExternalRelocationsAtom
<x86_64
>(_options
, state
, *this);
3093 externalRelocationsSection
= state
.addAtom(*_externalRelocsAtom
);
3095 if ( _hasSymbolTable
) {
3096 _indirectSymbolTableAtom
= new IndirectSymbolTableAtom
<x86_64
>(_options
, state
, *this);
3097 indirectSymbolTableSection
= state
.addAtom(*_indirectSymbolTableAtom
);
3098 _symbolTableAtom
= new SymbolTableAtom
<x86_64
>(_options
, state
, *this);
3099 symbolTableSection
= state
.addAtom(*_symbolTableAtom
);
3100 _stringPoolAtom
= new StringPoolAtom(_options
, state
, *this, 4);
3101 stringPoolSection
= state
.addAtom(*_stringPoolAtom
);
3105 #if SUPPORT_ARCH_arm_any
3107 if ( _hasLocalRelocations
) {
3108 _localRelocsAtom
= new LocalRelocationsAtom
<arm
>(_options
, state
, *this);
3109 localRelocationsSection
= state
.addAtom(*_localRelocsAtom
);
3111 if ( _hasExternalRelocations
) {
3112 _externalRelocsAtom
= new ExternalRelocationsAtom
<arm
>(_options
, state
, *this);
3113 externalRelocationsSection
= state
.addAtom(*_externalRelocsAtom
);
3115 if ( _hasSymbolTable
) {
3116 _indirectSymbolTableAtom
= new IndirectSymbolTableAtom
<arm
>(_options
, state
, *this);
3117 indirectSymbolTableSection
= state
.addAtom(*_indirectSymbolTableAtom
);
3118 _symbolTableAtom
= new SymbolTableAtom
<arm
>(_options
, state
, *this);
3119 symbolTableSection
= state
.addAtom(*_symbolTableAtom
);
3120 _stringPoolAtom
= new StringPoolAtom(_options
, state
, *this, 4);
3121 stringPoolSection
= state
.addAtom(*_stringPoolAtom
);
3125 #if SUPPORT_ARCH_arm64
3126 case CPU_TYPE_ARM64
:
3127 if ( _hasLocalRelocations
) {
3128 _localRelocsAtom
= new LocalRelocationsAtom
<arm64
>(_options
, state
, *this);
3129 localRelocationsSection
= state
.addAtom(*_localRelocsAtom
);
3131 if ( _hasExternalRelocations
) {
3132 _externalRelocsAtom
= new ExternalRelocationsAtom
<arm64
>(_options
, state
, *this);
3133 externalRelocationsSection
= state
.addAtom(*_externalRelocsAtom
);
3135 if ( _hasSymbolTable
) {
3136 _indirectSymbolTableAtom
= new IndirectSymbolTableAtom
<arm64
>(_options
, state
, *this);
3137 indirectSymbolTableSection
= state
.addAtom(*_indirectSymbolTableAtom
);
3138 _symbolTableAtom
= new SymbolTableAtom
<arm64
>(_options
, state
, *this);
3139 symbolTableSection
= state
.addAtom(*_symbolTableAtom
);
3140 _stringPoolAtom
= new StringPoolAtom(_options
, state
, *this, 4);
3141 stringPoolSection
= state
.addAtom(*_stringPoolAtom
);
3146 throw "-preload not supported";
3152 void OutputFile::addLinkEdit(ld::Internal
& state
)
3154 // for historical reasons, -preload orders LINKEDIT content differently
3155 if ( _options
.outputKind() == Options::kPreload
)
3156 return addPreloadLinkEdit(state
);
3158 switch ( _options
.architecture() ) {
3159 #if SUPPORT_ARCH_i386
3161 if ( _hasSectionRelocations
) {
3162 _sectionsRelocationsAtom
= new SectionRelocationsAtom
<x86
>(_options
, state
, *this);
3163 sectionRelocationsSection
= state
.addAtom(*_sectionsRelocationsAtom
);
3165 if ( _hasDyldInfo
) {
3166 _rebasingInfoAtom
= new RebaseInfoAtom
<x86
>(_options
, state
, *this);
3167 rebaseSection
= state
.addAtom(*_rebasingInfoAtom
);
3169 _bindingInfoAtom
= new BindingInfoAtom
<x86
>(_options
, state
, *this);
3170 bindingSection
= state
.addAtom(*_bindingInfoAtom
);
3172 _weakBindingInfoAtom
= new WeakBindingInfoAtom
<x86
>(_options
, state
, *this);
3173 weakBindingSection
= state
.addAtom(*_weakBindingInfoAtom
);
3175 _lazyBindingInfoAtom
= new LazyBindingInfoAtom
<x86
>(_options
, state
, *this);
3176 lazyBindingSection
= state
.addAtom(*_lazyBindingInfoAtom
);
3178 _exportInfoAtom
= new ExportInfoAtom
<x86
>(_options
, state
, *this);
3179 exportSection
= state
.addAtom(*_exportInfoAtom
);
3181 if ( _hasLocalRelocations
) {
3182 _localRelocsAtom
= new LocalRelocationsAtom
<x86
>(_options
, state
, *this);
3183 localRelocationsSection
= state
.addAtom(*_localRelocsAtom
);
3185 if ( _hasSplitSegInfo
) {
3186 _splitSegInfoAtom
= new SplitSegInfoV1Atom
<x86
>(_options
, state
, *this);
3187 splitSegInfoSection
= state
.addAtom(*_splitSegInfoAtom
);
3189 if ( _hasFunctionStartsInfo
) {
3190 _functionStartsAtom
= new FunctionStartsAtom
<x86
>(_options
, state
, *this);
3191 functionStartsSection
= state
.addAtom(*_functionStartsAtom
);
3193 if ( _hasDataInCodeInfo
) {
3194 _dataInCodeAtom
= new DataInCodeAtom
<x86
>(_options
, state
, *this);
3195 dataInCodeSection
= state
.addAtom(*_dataInCodeAtom
);
3197 if ( _hasOptimizationHints
) {
3198 _optimizationHintsAtom
= new OptimizationHintsAtom
<x86
>(_options
, state
, *this);
3199 optimizationHintsSection
= state
.addAtom(*_optimizationHintsAtom
);
3201 if ( _hasSymbolTable
) {
3202 _symbolTableAtom
= new SymbolTableAtom
<x86
>(_options
, state
, *this);
3203 symbolTableSection
= state
.addAtom(*_symbolTableAtom
);
3205 if ( _hasExternalRelocations
) {
3206 _externalRelocsAtom
= new ExternalRelocationsAtom
<x86
>(_options
, state
, *this);
3207 externalRelocationsSection
= state
.addAtom(*_externalRelocsAtom
);
3209 if ( _hasSymbolTable
) {
3210 _indirectSymbolTableAtom
= new IndirectSymbolTableAtom
<x86
>(_options
, state
, *this);
3211 indirectSymbolTableSection
= state
.addAtom(*_indirectSymbolTableAtom
);
3212 _stringPoolAtom
= new StringPoolAtom(_options
, state
, *this, 4);
3213 stringPoolSection
= state
.addAtom(*_stringPoolAtom
);
3217 #if SUPPORT_ARCH_x86_64
3218 case CPU_TYPE_X86_64
:
3219 if ( _hasSectionRelocations
) {
3220 _sectionsRelocationsAtom
= new SectionRelocationsAtom
<x86_64
>(_options
, state
, *this);
3221 sectionRelocationsSection
= state
.addAtom(*_sectionsRelocationsAtom
);
3223 if ( _hasDyldInfo
) {
3224 _rebasingInfoAtom
= new RebaseInfoAtom
<x86_64
>(_options
, state
, *this);
3225 rebaseSection
= state
.addAtom(*_rebasingInfoAtom
);
3227 _bindingInfoAtom
= new BindingInfoAtom
<x86_64
>(_options
, state
, *this);
3228 bindingSection
= state
.addAtom(*_bindingInfoAtom
);
3230 _weakBindingInfoAtom
= new WeakBindingInfoAtom
<x86_64
>(_options
, state
, *this);
3231 weakBindingSection
= state
.addAtom(*_weakBindingInfoAtom
);
3233 _lazyBindingInfoAtom
= new LazyBindingInfoAtom
<x86_64
>(_options
, state
, *this);
3234 lazyBindingSection
= state
.addAtom(*_lazyBindingInfoAtom
);
3236 _exportInfoAtom
= new ExportInfoAtom
<x86_64
>(_options
, state
, *this);
3237 exportSection
= state
.addAtom(*_exportInfoAtom
);
3239 if ( _hasLocalRelocations
) {
3240 _localRelocsAtom
= new LocalRelocationsAtom
<x86_64
>(_options
, state
, *this);
3241 localRelocationsSection
= state
.addAtom(*_localRelocsAtom
);
3243 if ( _hasSplitSegInfo
) {
3244 _splitSegInfoAtom
= new SplitSegInfoV1Atom
<x86_64
>(_options
, state
, *this);
3245 splitSegInfoSection
= state
.addAtom(*_splitSegInfoAtom
);
3247 if ( _hasFunctionStartsInfo
) {
3248 _functionStartsAtom
= new FunctionStartsAtom
<x86_64
>(_options
, state
, *this);
3249 functionStartsSection
= state
.addAtom(*_functionStartsAtom
);
3251 if ( _hasDataInCodeInfo
) {
3252 _dataInCodeAtom
= new DataInCodeAtom
<x86_64
>(_options
, state
, *this);
3253 dataInCodeSection
= state
.addAtom(*_dataInCodeAtom
);
3255 if ( _hasOptimizationHints
) {
3256 _optimizationHintsAtom
= new OptimizationHintsAtom
<x86_64
>(_options
, state
, *this);
3257 optimizationHintsSection
= state
.addAtom(*_optimizationHintsAtom
);
3259 if ( _hasSymbolTable
) {
3260 _symbolTableAtom
= new SymbolTableAtom
<x86_64
>(_options
, state
, *this);
3261 symbolTableSection
= state
.addAtom(*_symbolTableAtom
);
3263 if ( _hasExternalRelocations
) {
3264 _externalRelocsAtom
= new ExternalRelocationsAtom
<x86_64
>(_options
, state
, *this);
3265 externalRelocationsSection
= state
.addAtom(*_externalRelocsAtom
);
3267 if ( _hasSymbolTable
) {
3268 _indirectSymbolTableAtom
= new IndirectSymbolTableAtom
<x86_64
>(_options
, state
, *this);
3269 indirectSymbolTableSection
= state
.addAtom(*_indirectSymbolTableAtom
);
3270 _stringPoolAtom
= new StringPoolAtom(_options
, state
, *this, 8);
3271 stringPoolSection
= state
.addAtom(*_stringPoolAtom
);
3275 #if SUPPORT_ARCH_arm_any
3277 if ( _hasSectionRelocations
) {
3278 _sectionsRelocationsAtom
= new SectionRelocationsAtom
<arm
>(_options
, state
, *this);
3279 sectionRelocationsSection
= state
.addAtom(*_sectionsRelocationsAtom
);
3281 if ( _hasDyldInfo
) {
3282 _rebasingInfoAtom
= new RebaseInfoAtom
<arm
>(_options
, state
, *this);
3283 rebaseSection
= state
.addAtom(*_rebasingInfoAtom
);
3285 _bindingInfoAtom
= new BindingInfoAtom
<arm
>(_options
, state
, *this);
3286 bindingSection
= state
.addAtom(*_bindingInfoAtom
);
3288 _weakBindingInfoAtom
= new WeakBindingInfoAtom
<arm
>(_options
, state
, *this);
3289 weakBindingSection
= state
.addAtom(*_weakBindingInfoAtom
);
3291 _lazyBindingInfoAtom
= new LazyBindingInfoAtom
<arm
>(_options
, state
, *this);
3292 lazyBindingSection
= state
.addAtom(*_lazyBindingInfoAtom
);
3294 _exportInfoAtom
= new ExportInfoAtom
<arm
>(_options
, state
, *this);
3295 exportSection
= state
.addAtom(*_exportInfoAtom
);
3297 if ( _hasLocalRelocations
) {
3298 _localRelocsAtom
= new LocalRelocationsAtom
<arm
>(_options
, state
, *this);
3299 localRelocationsSection
= state
.addAtom(*_localRelocsAtom
);
3301 if ( _hasSplitSegInfo
) {
3302 if ( _options
.sharedRegionEncodingV2() )
3303 _splitSegInfoAtom
= new SplitSegInfoV2Atom
<arm
>(_options
, state
, *this);
3305 _splitSegInfoAtom
= new SplitSegInfoV1Atom
<arm
>(_options
, state
, *this);
3306 splitSegInfoSection
= state
.addAtom(*_splitSegInfoAtom
);
3308 if ( _hasFunctionStartsInfo
) {
3309 _functionStartsAtom
= new FunctionStartsAtom
<arm
>(_options
, state
, *this);
3310 functionStartsSection
= state
.addAtom(*_functionStartsAtom
);
3312 if ( _hasDataInCodeInfo
) {
3313 _dataInCodeAtom
= new DataInCodeAtom
<arm
>(_options
, state
, *this);
3314 dataInCodeSection
= state
.addAtom(*_dataInCodeAtom
);
3316 if ( _hasOptimizationHints
) {
3317 _optimizationHintsAtom
= new OptimizationHintsAtom
<arm
>(_options
, state
, *this);
3318 optimizationHintsSection
= state
.addAtom(*_optimizationHintsAtom
);
3320 if ( _hasSymbolTable
) {
3321 _symbolTableAtom
= new SymbolTableAtom
<arm
>(_options
, state
, *this);
3322 symbolTableSection
= state
.addAtom(*_symbolTableAtom
);
3324 if ( _hasExternalRelocations
) {
3325 _externalRelocsAtom
= new ExternalRelocationsAtom
<arm
>(_options
, state
, *this);
3326 externalRelocationsSection
= state
.addAtom(*_externalRelocsAtom
);
3328 if ( _hasSymbolTable
) {
3329 _indirectSymbolTableAtom
= new IndirectSymbolTableAtom
<arm
>(_options
, state
, *this);
3330 indirectSymbolTableSection
= state
.addAtom(*_indirectSymbolTableAtom
);
3331 _stringPoolAtom
= new StringPoolAtom(_options
, state
, *this, 4);
3332 stringPoolSection
= state
.addAtom(*_stringPoolAtom
);
3336 #if SUPPORT_ARCH_arm64
3337 case CPU_TYPE_ARM64
:
3338 if ( _hasSectionRelocations
) {
3339 _sectionsRelocationsAtom
= new SectionRelocationsAtom
<arm64
>(_options
, state
, *this);
3340 sectionRelocationsSection
= state
.addAtom(*_sectionsRelocationsAtom
);
3342 if ( _hasDyldInfo
) {
3343 _rebasingInfoAtom
= new RebaseInfoAtom
<arm64
>(_options
, state
, *this);
3344 rebaseSection
= state
.addAtom(*_rebasingInfoAtom
);
3346 _bindingInfoAtom
= new BindingInfoAtom
<arm64
>(_options
, state
, *this);
3347 bindingSection
= state
.addAtom(*_bindingInfoAtom
);
3349 _weakBindingInfoAtom
= new WeakBindingInfoAtom
<arm64
>(_options
, state
, *this);
3350 weakBindingSection
= state
.addAtom(*_weakBindingInfoAtom
);
3352 _lazyBindingInfoAtom
= new LazyBindingInfoAtom
<arm64
>(_options
, state
, *this);
3353 lazyBindingSection
= state
.addAtom(*_lazyBindingInfoAtom
);
3355 _exportInfoAtom
= new ExportInfoAtom
<arm64
>(_options
, state
, *this);
3356 exportSection
= state
.addAtom(*_exportInfoAtom
);
3358 if ( _hasLocalRelocations
) {
3359 _localRelocsAtom
= new LocalRelocationsAtom
<arm64
>(_options
, state
, *this);
3360 localRelocationsSection
= state
.addAtom(*_localRelocsAtom
);
3362 if ( _hasSplitSegInfo
) {
3363 if ( _options
.sharedRegionEncodingV2() )
3364 _splitSegInfoAtom
= new SplitSegInfoV2Atom
<arm64
>(_options
, state
, *this);
3366 _splitSegInfoAtom
= new SplitSegInfoV1Atom
<arm64
>(_options
, state
, *this);
3367 splitSegInfoSection
= state
.addAtom(*_splitSegInfoAtom
);
3369 if ( _hasFunctionStartsInfo
) {
3370 _functionStartsAtom
= new FunctionStartsAtom
<arm64
>(_options
, state
, *this);
3371 functionStartsSection
= state
.addAtom(*_functionStartsAtom
);
3373 if ( _hasDataInCodeInfo
) {
3374 _dataInCodeAtom
= new DataInCodeAtom
<arm64
>(_options
, state
, *this);
3375 dataInCodeSection
= state
.addAtom(*_dataInCodeAtom
);
3377 if ( _hasOptimizationHints
) {
3378 _optimizationHintsAtom
= new OptimizationHintsAtom
<arm64
>(_options
, state
, *this);
3379 optimizationHintsSection
= state
.addAtom(*_optimizationHintsAtom
);
3381 if ( _hasSymbolTable
) {
3382 _symbolTableAtom
= new SymbolTableAtom
<arm64
>(_options
, state
, *this);
3383 symbolTableSection
= state
.addAtom(*_symbolTableAtom
);
3385 if ( _hasExternalRelocations
) {
3386 _externalRelocsAtom
= new ExternalRelocationsAtom
<arm64
>(_options
, state
, *this);
3387 externalRelocationsSection
= state
.addAtom(*_externalRelocsAtom
);
3389 if ( _hasSymbolTable
) {
3390 _indirectSymbolTableAtom
= new IndirectSymbolTableAtom
<arm64
>(_options
, state
, *this);
3391 indirectSymbolTableSection
= state
.addAtom(*_indirectSymbolTableAtom
);
3392 _stringPoolAtom
= new StringPoolAtom(_options
, state
, *this, 4);
3393 stringPoolSection
= state
.addAtom(*_stringPoolAtom
);
3398 throw "unknown architecture";
3402 void OutputFile::addLoadCommands(ld::Internal
& state
)
3404 switch ( _options
.architecture() ) {
3405 #if SUPPORT_ARCH_x86_64
3406 case CPU_TYPE_X86_64
:
3407 _headersAndLoadCommandAtom
= new HeaderAndLoadCommandsAtom
<x86_64
>(_options
, state
, *this);
3408 headerAndLoadCommandsSection
= state
.addAtom(*_headersAndLoadCommandAtom
);
3411 #if SUPPORT_ARCH_arm_any
3413 _headersAndLoadCommandAtom
= new HeaderAndLoadCommandsAtom
<arm
>(_options
, state
, *this);
3414 headerAndLoadCommandsSection
= state
.addAtom(*_headersAndLoadCommandAtom
);
3417 #if SUPPORT_ARCH_arm64
3418 case CPU_TYPE_ARM64
:
3419 _headersAndLoadCommandAtom
= new HeaderAndLoadCommandsAtom
<arm64
>(_options
, state
, *this);
3420 headerAndLoadCommandsSection
= state
.addAtom(*_headersAndLoadCommandAtom
);
3423 #if SUPPORT_ARCH_i386
3425 _headersAndLoadCommandAtom
= new HeaderAndLoadCommandsAtom
<x86
>(_options
, state
, *this);
3426 headerAndLoadCommandsSection
= state
.addAtom(*_headersAndLoadCommandAtom
);
3430 throw "unknown architecture";
3434 uint32_t OutputFile::dylibCount()
3436 return _dylibsToLoad
.size();
3439 const ld::dylib::File
* OutputFile::dylibByOrdinal(unsigned int ordinal
)
3441 assert( ordinal
> 0 );
3442 assert( ordinal
<= _dylibsToLoad
.size() );
3443 return _dylibsToLoad
[ordinal
-1];
3446 bool OutputFile::hasOrdinalForInstallPath(const char* path
, int* ordinal
)
3448 for (std::map
<const ld::dylib::File
*, int>::const_iterator it
= _dylibToOrdinal
.begin(); it
!= _dylibToOrdinal
.end(); ++it
) {
3449 const char* installPath
= it
->first
->installPath();
3450 if ( (installPath
!= NULL
) && (strcmp(path
, installPath
) == 0) ) {
3451 *ordinal
= it
->second
;
3458 uint32_t OutputFile::dylibToOrdinal(const ld::dylib::File
* dylib
)
3460 return _dylibToOrdinal
[dylib
];
3464 void OutputFile::buildDylibOrdinalMapping(ld::Internal
& state
)
3466 // count non-public re-exported dylibs
3467 unsigned int nonPublicReExportCount
= 0;
3468 for (std::vector
<ld::dylib::File
*>::iterator it
= state
.dylibs
.begin(); it
!= state
.dylibs
.end(); ++it
) {
3469 ld::dylib::File
* aDylib
= *it
;
3470 if ( aDylib
->willBeReExported() && ! aDylib
->hasPublicInstallName() )
3471 ++nonPublicReExportCount
;
3474 // look at each dylib supplied in state
3475 bool hasReExports
= false;
3476 bool haveLazyDylibs
= false;
3477 for (std::vector
<ld::dylib::File
*>::iterator it
= state
.dylibs
.begin(); it
!= state
.dylibs
.end(); ++it
) {
3478 ld::dylib::File
* aDylib
= *it
;
3480 if ( aDylib
== state
.bundleLoader
) {
3481 _dylibToOrdinal
[aDylib
] = BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE
;
3483 else if ( this->hasOrdinalForInstallPath(aDylib
->installPath(), &ordinal
) ) {
3484 // already have a dylib with that install path, map all uses to that ordinal
3485 _dylibToOrdinal
[aDylib
] = ordinal
;
3487 else if ( aDylib
->willBeLazyLoadedDylib() ) {
3488 // all lazy dylib need to be at end of ordinals
3489 haveLazyDylibs
= true;
3491 else if ( aDylib
->willBeReExported() && ! aDylib
->hasPublicInstallName() && (nonPublicReExportCount
>= 2) ) {
3492 _dylibsToLoad
.push_back(aDylib
);
3493 _dylibToOrdinal
[aDylib
] = BIND_SPECIAL_DYLIB_SELF
;
3496 // first time this install path seen, create new ordinal
3497 _dylibsToLoad
.push_back(aDylib
);
3498 _dylibToOrdinal
[aDylib
] = _dylibsToLoad
.size();
3500 if ( aDylib
->explicitlyLinked() && aDylib
->willBeReExported() )
3501 hasReExports
= true;
3503 if ( haveLazyDylibs
) {
3504 // second pass to determine ordinals for lazy loaded dylibs
3505 for (std::vector
<ld::dylib::File
*>::iterator it
= state
.dylibs
.begin(); it
!= state
.dylibs
.end(); ++it
) {
3506 ld::dylib::File
* aDylib
= *it
;
3507 if ( aDylib
->willBeLazyLoadedDylib() ) {
3509 if ( this->hasOrdinalForInstallPath(aDylib
->installPath(), &ordinal
) ) {
3510 // already have a dylib with that install path, map all uses to that ordinal
3511 _dylibToOrdinal
[aDylib
] = ordinal
;
3514 // first time this install path seen, create new ordinal
3515 _dylibsToLoad
.push_back(aDylib
);
3516 _dylibToOrdinal
[aDylib
] = _dylibsToLoad
.size();
3521 _noReExportedDylibs
= !hasReExports
;
3522 //fprintf(stderr, "dylibs:\n");
3523 //for (std::map<const ld::dylib::File*, int>::const_iterator it = _dylibToOrdinal.begin(); it != _dylibToOrdinal.end(); ++it) {
3524 // fprintf(stderr, " %p ord=%u, install_name=%s\n",it->first, it->second, it->first->installPath());
3528 uint32_t OutputFile::lazyBindingInfoOffsetForLazyPointerAddress(uint64_t lpAddress
)
3530 return _lazyPointerAddressToInfoOffset
[lpAddress
];
3533 void OutputFile::setLazyBindingInfoOffset(uint64_t lpAddress
, uint32_t lpInfoOffset
)
3535 _lazyPointerAddressToInfoOffset
[lpAddress
] = lpInfoOffset
;
3538 int OutputFile::compressedOrdinalForAtom(const ld::Atom
* target
)
3540 // flat namespace images use zero for all ordinals
3541 if ( _options
.nameSpace() != Options::kTwoLevelNameSpace
)
3542 return BIND_SPECIAL_DYLIB_FLAT_LOOKUP
;
3544 // handle -interposable
3545 if ( target
->definition() == ld::Atom::definitionRegular
)
3546 return BIND_SPECIAL_DYLIB_SELF
;
3549 const ld::dylib::File
* dylib
= dynamic_cast<const ld::dylib::File
*>(target
->file());
3550 if ( dylib
!= NULL
) {
3551 std::map
<const ld::dylib::File
*, int>::iterator pos
= _dylibToOrdinal
.find(dylib
);
3552 if ( pos
!= _dylibToOrdinal
.end() )
3554 assert(0 && "dylib not assigned ordinal");
3557 // handle undefined dynamic_lookup
3558 if ( _options
.undefinedTreatment() == Options::kUndefinedDynamicLookup
)
3559 return BIND_SPECIAL_DYLIB_FLAT_LOOKUP
;
3562 if ( _options
.allowedUndefined(target
->name()) )
3563 return BIND_SPECIAL_DYLIB_FLAT_LOOKUP
;
3565 throw "can't find ordinal for imported symbol";
3569 bool OutputFile::isPcRelStore(ld::Fixup::Kind kind
)
3572 case ld::Fixup::kindStoreX86BranchPCRel8
:
3573 case ld::Fixup::kindStoreX86BranchPCRel32
:
3574 case ld::Fixup::kindStoreX86PCRel8
:
3575 case ld::Fixup::kindStoreX86PCRel16
:
3576 case ld::Fixup::kindStoreX86PCRel32
:
3577 case ld::Fixup::kindStoreX86PCRel32_1
:
3578 case ld::Fixup::kindStoreX86PCRel32_2
:
3579 case ld::Fixup::kindStoreX86PCRel32_4
:
3580 case ld::Fixup::kindStoreX86PCRel32GOTLoad
:
3581 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA
:
3582 case ld::Fixup::kindStoreX86PCRel32GOT
:
3583 case ld::Fixup::kindStoreX86PCRel32TLVLoad
:
3584 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA
:
3585 case ld::Fixup::kindStoreARMBranch24
:
3586 case ld::Fixup::kindStoreThumbBranch22
:
3587 case ld::Fixup::kindStoreARMLoad12
:
3588 case ld::Fixup::kindStoreTargetAddressX86PCRel32
:
3589 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad
:
3590 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA
:
3591 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad
:
3592 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA
:
3593 case ld::Fixup::kindStoreTargetAddressARMBranch24
:
3594 case ld::Fixup::kindStoreTargetAddressThumbBranch22
:
3595 case ld::Fixup::kindStoreTargetAddressARMLoad12
:
3596 #if SUPPORT_ARCH_arm64
3597 case ld::Fixup::kindStoreARM64Page21
:
3598 case ld::Fixup::kindStoreARM64PageOff12
:
3599 case ld::Fixup::kindStoreARM64GOTLoadPage21
:
3600 case ld::Fixup::kindStoreARM64GOTLoadPageOff12
:
3601 case ld::Fixup::kindStoreARM64GOTLeaPage21
:
3602 case ld::Fixup::kindStoreARM64GOTLeaPageOff12
:
3603 case ld::Fixup::kindStoreARM64PCRelToGOT
:
3604 case ld::Fixup::kindStoreTargetAddressARM64Page21
:
3605 case ld::Fixup::kindStoreTargetAddressARM64PageOff12
:
3606 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21
:
3607 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12
:
3608 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21
:
3609 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12
:
3612 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32
:
3613 #if SUPPORT_ARCH_arm64
3614 case ld::Fixup::kindStoreTargetAddressARM64Branch26
:
3616 return (_options
.outputKind() != Options::kKextBundle
);
3623 bool OutputFile::isStore(ld::Fixup::Kind kind
)
3626 case ld::Fixup::kindNone
:
3627 case ld::Fixup::kindNoneFollowOn
:
3628 case ld::Fixup::kindNoneGroupSubordinate
:
3629 case ld::Fixup::kindNoneGroupSubordinateFDE
:
3630 case ld::Fixup::kindNoneGroupSubordinateLSDA
:
3631 case ld::Fixup::kindNoneGroupSubordinatePersonality
:
3632 case ld::Fixup::kindSetTargetAddress
:
3633 case ld::Fixup::kindSubtractTargetAddress
:
3634 case ld::Fixup::kindAddAddend
:
3635 case ld::Fixup::kindSubtractAddend
:
3636 case ld::Fixup::kindSetTargetImageOffset
:
3637 case ld::Fixup::kindSetTargetSectionOffset
:
3646 bool OutputFile::setsTarget(ld::Fixup::Kind kind
)
3649 case ld::Fixup::kindSetTargetAddress
:
3650 case ld::Fixup::kindLazyTarget
:
3651 case ld::Fixup::kindStoreTargetAddressLittleEndian32
:
3652 case ld::Fixup::kindStoreTargetAddressLittleEndian64
:
3653 case ld::Fixup::kindStoreTargetAddressBigEndian32
:
3654 case ld::Fixup::kindStoreTargetAddressBigEndian64
:
3655 case ld::Fixup::kindStoreTargetAddressX86PCRel32
:
3656 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32
:
3657 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad
:
3658 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA
:
3659 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad
:
3660 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA
:
3661 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad
:
3662 case ld::Fixup::kindStoreTargetAddressARMBranch24
:
3663 case ld::Fixup::kindStoreTargetAddressThumbBranch22
:
3664 case ld::Fixup::kindStoreTargetAddressARMLoad12
:
3665 #if SUPPORT_ARCH_arm64
3666 case ld::Fixup::kindStoreTargetAddressARM64Branch26
:
3667 case ld::Fixup::kindStoreTargetAddressARM64Page21
:
3668 case ld::Fixup::kindStoreTargetAddressARM64PageOff12
:
3669 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21
:
3670 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12
:
3671 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21
:
3672 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12
:
3675 case ld::Fixup::kindStoreX86DtraceCallSiteNop
:
3676 case ld::Fixup::kindStoreX86DtraceIsEnableSiteClear
:
3677 case ld::Fixup::kindStoreARMDtraceCallSiteNop
:
3678 case ld::Fixup::kindStoreARMDtraceIsEnableSiteClear
:
3679 case ld::Fixup::kindStoreARM64DtraceCallSiteNop
:
3680 case ld::Fixup::kindStoreARM64DtraceIsEnableSiteClear
:
3681 case ld::Fixup::kindStoreThumbDtraceCallSiteNop
:
3682 case ld::Fixup::kindStoreThumbDtraceIsEnableSiteClear
:
3683 return (_options
.outputKind() == Options::kObjectFile
);
3690 bool OutputFile::isPointerToTarget(ld::Fixup::Kind kind
)
3693 case ld::Fixup::kindSetTargetAddress
:
3694 case ld::Fixup::kindStoreTargetAddressLittleEndian32
:
3695 case ld::Fixup::kindStoreTargetAddressLittleEndian64
:
3696 case ld::Fixup::kindStoreTargetAddressBigEndian32
:
3697 case ld::Fixup::kindStoreTargetAddressBigEndian64
:
3698 case ld::Fixup::kindLazyTarget
:
3705 bool OutputFile::isPointerFromTarget(ld::Fixup::Kind kind
)
3708 case ld::Fixup::kindSubtractTargetAddress
:
3717 uint64_t OutputFile::lookBackAddend(ld::Fixup::iterator fit
)
3719 uint64_t addend
= 0;
3720 switch ( fit
->clusterSize
) {
3721 case ld::Fixup::k1of1
:
3722 case ld::Fixup::k1of2
:
3723 case ld::Fixup::k2of2
:
3725 case ld::Fixup::k2of3
:
3727 switch ( fit
->kind
) {
3728 case ld::Fixup::kindAddAddend
:
3729 addend
+= fit
->u
.addend
;
3731 case ld::Fixup::kindSubtractAddend
:
3732 addend
-= fit
->u
.addend
;
3735 throw "unexpected fixup kind for binding";
3738 case ld::Fixup::k1of3
:
3740 switch ( fit
->kind
) {
3741 case ld::Fixup::kindAddAddend
:
3742 addend
+= fit
->u
.addend
;
3744 case ld::Fixup::kindSubtractAddend
:
3745 addend
-= fit
->u
.addend
;
3748 throw "unexpected fixup kind for binding";
3752 throw "unexpected fixup cluster size for binding";
3758 void OutputFile::generateLinkEditInfo(ld::Internal
& state
)
3760 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
3761 ld::Internal::FinalSection
* sect
= *sit
;
3762 // record end of last __TEXT section encrypted iPhoneOS apps.
3763 if ( _options
.makeEncryptable() && (strcmp(sect
->segmentName(), "__TEXT") == 0) ) {
3764 _encryptedTEXTendOffset
= pageAlign(sect
->fileOffset
+ sect
->size
);
3766 bool objc1ClassRefSection
= ( (sect
->type() == ld::Section::typeCStringPointer
)
3767 && (strcmp(sect
->sectionName(), "__cls_refs") == 0)
3768 && (strcmp(sect
->segmentName(), "__OBJC") == 0) );
3769 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
3770 const ld::Atom
* atom
= *ait
;
3772 // Record regular atoms that override a dylib's weak definitions
3773 if ( (atom
->scope() == ld::Atom::scopeGlobal
) && atom
->overridesDylibsWeakDef() ) {
3774 if ( _options
.makeCompressedDyldInfo() ) {
3775 uint8_t wtype
= BIND_TYPE_OVERRIDE_OF_WEAKDEF_IN_DYLIB
;
3776 bool nonWeakDef
= (atom
->combine() == ld::Atom::combineNever
);
3777 _weakBindingInfo
.push_back(BindingInfo(wtype
, atom
->name(), nonWeakDef
, atom
->finalAddress(), 0));
3779 this->overridesWeakExternalSymbols
= true;
3780 if ( _options
.warnWeakExports() )
3781 warning("overrides weak external symbol: %s", atom
->name());
3784 ld::Fixup
* fixupWithTarget
= NULL
;
3785 ld::Fixup
* fixupWithMinusTarget
= NULL
;
3786 ld::Fixup
* fixupWithStore
= NULL
;
3787 ld::Fixup
* fixupWithAddend
= NULL
;
3788 const ld::Atom
* target
= NULL
;
3789 const ld::Atom
* minusTarget
= NULL
;
3790 uint64_t targetAddend
= 0;
3791 uint64_t minusTargetAddend
= 0;
3792 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(); fit
!= atom
->fixupsEnd(); ++fit
) {
3793 if ( fit
->firstInCluster() ) {
3794 fixupWithTarget
= NULL
;
3795 fixupWithMinusTarget
= NULL
;
3796 fixupWithStore
= NULL
;
3800 minusTargetAddend
= 0;
3802 if ( this->setsTarget(fit
->kind
) ) {
3803 switch ( fit
->binding
) {
3804 case ld::Fixup::bindingNone
:
3805 case ld::Fixup::bindingByNameUnbound
:
3807 case ld::Fixup::bindingByContentBound
:
3808 case ld::Fixup::bindingDirectlyBound
:
3809 fixupWithTarget
= fit
;
3810 target
= fit
->u
.target
;
3812 case ld::Fixup::bindingsIndirectlyBound
:
3813 fixupWithTarget
= fit
;
3814 target
= state
.indirectBindingTable
[fit
->u
.bindingIndex
];
3817 assert(target
!= NULL
);
3819 switch ( fit
->kind
) {
3820 case ld::Fixup::kindAddAddend
:
3821 targetAddend
= fit
->u
.addend
;
3822 fixupWithAddend
= fit
;
3824 case ld::Fixup::kindSubtractAddend
:
3825 minusTargetAddend
= fit
->u
.addend
;
3826 fixupWithAddend
= fit
;
3828 case ld::Fixup::kindSubtractTargetAddress
:
3829 switch ( fit
->binding
) {
3830 case ld::Fixup::bindingNone
:
3831 case ld::Fixup::bindingByNameUnbound
:
3833 case ld::Fixup::bindingByContentBound
:
3834 case ld::Fixup::bindingDirectlyBound
:
3835 fixupWithMinusTarget
= fit
;
3836 minusTarget
= fit
->u
.target
;
3838 case ld::Fixup::bindingsIndirectlyBound
:
3839 fixupWithMinusTarget
= fit
;
3840 minusTarget
= state
.indirectBindingTable
[fit
->u
.bindingIndex
];
3843 assert(minusTarget
!= NULL
);
3845 case ld::Fixup::kindDataInCodeStartData
:
3846 case ld::Fixup::kindDataInCodeStartJT8
:
3847 case ld::Fixup::kindDataInCodeStartJT16
:
3848 case ld::Fixup::kindDataInCodeStartJT32
:
3849 case ld::Fixup::kindDataInCodeStartJTA32
:
3850 case ld::Fixup::kindDataInCodeEnd
:
3851 hasDataInCode
= true;
3856 if ( this->isStore(fit
->kind
) ) {
3857 fixupWithStore
= fit
;
3859 if ( fit
->lastInCluster() ) {
3860 if ( (fixupWithStore
!= NULL
) && (target
!= NULL
) ) {
3861 if ( _options
.outputKind() == Options::kObjectFile
) {
3862 this->addSectionRelocs(state
, sect
, atom
, fixupWithTarget
, fixupWithMinusTarget
, fixupWithAddend
, fixupWithStore
,
3863 target
, minusTarget
, targetAddend
, minusTargetAddend
);
3866 if ( _options
.makeCompressedDyldInfo() ) {
3867 this->addDyldInfo(state
, sect
, atom
, fixupWithTarget
, fixupWithMinusTarget
, fixupWithStore
,
3868 target
, minusTarget
, targetAddend
, minusTargetAddend
);
3871 this->addClassicRelocs(state
, sect
, atom
, fixupWithTarget
, fixupWithMinusTarget
, fixupWithStore
,
3872 target
, minusTarget
, targetAddend
, minusTargetAddend
);
3876 else if ( objc1ClassRefSection
&& (target
!= NULL
) && (fixupWithStore
== NULL
) ) {
3877 // check for class refs to lazy loaded dylibs
3878 const ld::dylib::File
* dylib
= dynamic_cast<const ld::dylib::File
*>(target
->file());
3879 if ( (dylib
!= NULL
) && dylib
->willBeLazyLoadedDylib() )
3880 throwf("illegal class reference to %s in lazy loaded dylib %s", target
->name(), dylib
->path());
3889 void OutputFile::noteTextReloc(const ld::Atom
* atom
, const ld::Atom
* target
)
3891 if ( (atom
->contentType() == ld::Atom::typeStub
) || (atom
->contentType() == ld::Atom::typeStubHelper
) ) {
3892 // silently let stubs (synthesized by linker) use text relocs
3894 else if ( _options
.allowTextRelocs() ) {
3895 if ( _options
.warnAboutTextRelocs() )
3896 warning("text reloc in %s to %s", atom
->name(), target
->name());
3898 else if ( _options
.positionIndependentExecutable() && (_options
.outputKind() == Options::kDynamicExecutable
)
3899 && ((_options
.iOSVersionMin() >= ld::iOS_4_3
) || (_options
.macosxVersionMin() >= ld::mac10_7
)) ) {
3900 if ( ! this->pieDisabled
) {
3901 #if SUPPORT_ARCH_arm64
3902 if ( _options
.architecture() == CPU_TYPE_ARM64
) {
3903 const char* demangledName
= strdup(_options
.demangleSymbol(atom
->name()));
3904 throwf("Absolute addressing not allowed in arm64 code but used in '%s' referencing '%s'", demangledName
, _options
.demangleSymbol(target
->name()));
3909 warning("PIE disabled. Absolute addressing (perhaps -mdynamic-no-pic) not allowed in code signed PIE, "
3910 "but used in %s from %s. "
3911 "To fix this warning, don't compile with -mdynamic-no-pic or link with -Wl,-no_pie",
3912 atom
->name(), atom
->file()->path());
3915 this->pieDisabled
= true;
3917 else if ( (target
->scope() == ld::Atom::scopeGlobal
) && (target
->combine() == ld::Atom::combineByName
) ) {
3918 throwf("illegal text-relocoation (direct reference) to (global,weak) %s in %s from %s in %s", target
->name(), target
->file()->path(), atom
->name(), atom
->file()->path());
3921 if ( (target
->file() != NULL
) && (atom
->file() != NULL
) )
3922 throwf("illegal text-relocation to '%s' in %s from '%s' in %s", target
->name(), target
->file()->path(), atom
->name(), atom
->file()->path());
3924 throwf("illegal text reloc in '%s' to '%s'", atom
->name(), target
->name());
3928 void OutputFile::addDyldInfo(ld::Internal
& state
, ld::Internal::FinalSection
* sect
, const ld::Atom
* atom
,
3929 ld::Fixup
* fixupWithTarget
, ld::Fixup
* fixupWithMinusTarget
, ld::Fixup
* fixupWithStore
,
3930 const ld::Atom
* target
, const ld::Atom
* minusTarget
,
3931 uint64_t targetAddend
, uint64_t minusTargetAddend
)
3933 if ( sect
->isSectionHidden() )
3936 // no need to rebase or bind PCRel stores
3937 if ( this->isPcRelStore(fixupWithStore
->kind
) ) {
3938 // as long as target is in same linkage unit
3939 if ( (target
== NULL
) || (target
->definition() != ld::Atom::definitionProxy
) ) {
3940 // make sure target is not global and weak
3941 if ( (target
->scope() == ld::Atom::scopeGlobal
) && (target
->combine() == ld::Atom::combineByName
) && (target
->definition() == ld::Atom::definitionRegular
)) {
3942 if ( (atom
->section().type() == ld::Section::typeCFI
)
3943 || (atom
->section().type() == ld::Section::typeDtraceDOF
)
3944 || (atom
->section().type() == ld::Section::typeUnwindInfo
) ) {
3945 // ok for __eh_frame and __uwind_info to use pointer diffs to global weak symbols
3948 // <rdar://problem/13700961> spurious warning when weak function has reference to itself
3949 if ( fixupWithTarget
->binding
== ld::Fixup::bindingDirectlyBound
) {
3950 // ok to ignore pc-rel references within a weak function to itself
3953 // Have direct reference to weak-global. This should be an indrect reference
3954 const char* demangledName
= strdup(_options
.demangleSymbol(atom
->name()));
3955 warning("direct access in %s to global weak symbol %s means the weak symbol cannot be overridden at runtime. "
3956 "This was likely caused by different translation units being compiled with different visibility settings.",
3957 demangledName
, _options
.demangleSymbol(target
->name()));
3963 // no need to rebase or bind PIC internal pointer diff
3964 if ( minusTarget
!= NULL
) {
3965 // with pointer diffs, both need to be in same linkage unit
3966 assert(minusTarget
->definition() != ld::Atom::definitionProxy
);
3967 assert(target
!= NULL
);
3968 assert(target
->definition() != ld::Atom::definitionProxy
);
3969 if ( target
== minusTarget
) {
3970 // This is a compile time constant and could have been optimized away by compiler
3974 // check if target of pointer-diff is global and weak
3975 if ( (target
->scope() == ld::Atom::scopeGlobal
) && (target
->combine() == ld::Atom::combineByName
) && (target
->definition() == ld::Atom::definitionRegular
) ) {
3976 if ( (atom
->section().type() == ld::Section::typeCFI
)
3977 || (atom
->section().type() == ld::Section::typeDtraceDOF
)
3978 || (atom
->section().type() == ld::Section::typeUnwindInfo
) ) {
3979 // ok for __eh_frame and __uwind_info to use pointer diffs to global weak symbols
3982 // Have direct reference to weak-global. This should be an indrect reference
3983 const char* demangledName
= strdup(_options
.demangleSymbol(atom
->name()));
3984 warning("direct access in %s to global weak symbol %s means the weak symbol cannot be overridden at runtime. "
3985 "This was likely caused by different translation units being compiled with different visibility settings.",
3986 demangledName
, _options
.demangleSymbol(target
->name()));
3991 // no need to rebase or bind an atom's references to itself if the output is not slidable
3992 if ( (atom
== target
) && !_options
.outputSlidable() )
3995 // cluster has no target, so needs no rebasing or binding
3996 if ( target
== NULL
)
3999 bool inReadOnlySeg
= ((_options
.initialSegProtection(sect
->segmentName()) & VM_PROT_WRITE
) == 0);
4000 bool needsRebase
= false;
4001 bool needsBinding
= false;
4002 bool needsLazyBinding
= false;
4003 bool needsWeakBinding
= false;
4005 uint8_t rebaseType
= REBASE_TYPE_POINTER
;
4006 uint8_t type
= BIND_TYPE_POINTER
;
4007 const ld::dylib::File
* dylib
= dynamic_cast<const ld::dylib::File
*>(target
->file());
4008 bool weak_import
= (fixupWithTarget
->weakImport
|| ((dylib
!= NULL
) && dylib
->forcedWeakLinked()));
4009 uint64_t address
= atom
->finalAddress() + fixupWithTarget
->offsetInAtom
;
4010 uint64_t addend
= targetAddend
- minusTargetAddend
;
4012 // special case lazy pointers
4013 if ( fixupWithTarget
->kind
== ld::Fixup::kindLazyTarget
) {
4014 assert(fixupWithTarget
->u
.target
== target
);
4015 assert(addend
== 0);
4016 // lazy dylib lazy pointers do not have any dyld info
4017 if ( atom
->section().type() == ld::Section::typeLazyDylibPointer
)
4019 // lazy binding to weak definitions are done differently
4020 // they are directly bound to target, then have a weak bind in case of a collision
4021 if ( target
->combine() == ld::Atom::combineByName
) {
4022 if ( target
->definition() == ld::Atom::definitionProxy
) {
4023 // weak def exported from another dylib
4024 // must non-lazy bind to it plus have weak binding info in case of collision
4025 needsBinding
= true;
4026 needsWeakBinding
= true;
4029 // weak def in this linkage unit.
4030 // just rebase, plus have weak binding info in case of collision
4031 // this will be done by other cluster on lazy pointer atom
4034 else if ( target
->contentType() == ld::Atom::typeResolver
) {
4035 // <rdar://problem/8553647> Hidden resolver functions should not have lazy binding info
4036 // <rdar://problem/12629331> Resolver function run before initializers when overriding the dyld shared cache
4037 // The lazy pointers used by stubs used when non-lazy binding to a resolver are not normal lazy pointers
4038 // and should not be in lazy binding info.
4039 needsLazyBinding
= false;
4042 // normal case of a pointer to non-weak-def symbol, so can lazily bind
4043 needsLazyBinding
= true;
4047 // everything except lazy pointers
4048 switch ( target
->definition() ) {
4049 case ld::Atom::definitionProxy
:
4050 if ( (dylib
!= NULL
) && dylib
->willBeLazyLoadedDylib() )
4051 throwf("illegal data reference to %s in lazy loaded dylib %s", target
->name(), dylib
->path());
4052 if ( target
->contentType() == ld::Atom::typeTLV
) {
4053 if ( sect
->type() != ld::Section::typeTLVPointers
)
4054 throwf("illegal data reference in %s to thread local variable %s in dylib %s",
4055 atom
->name(), target
->name(), dylib
->path());
4057 if ( inReadOnlySeg
)
4058 type
= BIND_TYPE_TEXT_ABSOLUTE32
;
4059 needsBinding
= true;
4060 if ( target
->combine() == ld::Atom::combineByName
)
4061 needsWeakBinding
= true;
4063 case ld::Atom::definitionRegular
:
4064 case ld::Atom::definitionTentative
:
4065 // only slideable images need rebasing info
4066 if ( _options
.outputSlidable() ) {
4069 // references to internal symbol never need binding
4070 if ( target
->scope() != ld::Atom::scopeGlobal
)
4072 // reference to global weak def needs weak binding
4073 if ( (target
->combine() == ld::Atom::combineByName
) && (target
->definition() == ld::Atom::definitionRegular
) )
4074 needsWeakBinding
= true;
4075 else if ( _options
.outputKind() == Options::kDynamicExecutable
) {
4076 // in main executables, the only way regular symbols are indirected is if -interposable is used
4077 if ( _options
.interposable(target
->name()) ) {
4078 needsRebase
= false;
4079 needsBinding
= true;
4083 // for flat-namespace or interposable two-level-namespace
4084 // all references to exported symbols get indirected
4085 if ( (_options
.nameSpace() != Options::kTwoLevelNameSpace
) || _options
.interposable(target
->name()) ) {
4086 // <rdar://problem/5254468> no external relocs for flat objc classes
4087 if ( strncmp(target
->name(), ".objc_class_", 12) == 0 )
4089 // no rebase info for references to global symbols that will have binding info
4090 needsRebase
= false;
4091 needsBinding
= true;
4093 else if ( _options
.forceCoalesce(target
->name()) ) {
4094 needsWeakBinding
= true;
4098 case ld::Atom::definitionAbsolute
:
4103 // <rdar://problem/13828711> if target is an import alias, use base of alias
4104 if ( target
->isAlias() && (target
->definition() == ld::Atom::definitionProxy
) ) {
4105 for (ld::Fixup::iterator fit
= target
->fixupsBegin(), end
=target
->fixupsEnd(); fit
!= end
; ++fit
) {
4106 if ( fit
->firstInCluster() ) {
4107 if ( fit
->kind
== ld::Fixup::kindNoneFollowOn
) {
4108 if ( fit
->binding
== ld::Fixup::bindingDirectlyBound
) {
4109 //fprintf(stderr, "switching import of %s to import of %s\n", target->name(), fit->u.target->name());
4110 target
= fit
->u
.target
;
4117 // record dyld info for this cluster
4118 if ( needsRebase
) {
4119 if ( inReadOnlySeg
) {
4120 noteTextReloc(atom
, target
);
4121 sect
->hasLocalRelocs
= true; // so dyld knows to change permissions on __TEXT segment
4122 rebaseType
= REBASE_TYPE_TEXT_ABSOLUTE32
;
4124 if ( _options
.sharedRegionEligible() ) {
4125 // <rdar://problem/13287063> when range checking, ignore high byte of arm64 addends
4126 uint64_t checkAddend
= addend
;
4127 if ( _options
.architecture() == CPU_TYPE_ARM64
)
4128 checkAddend
&= 0x0FFFFFFFFFFFFFFFULL
;
4129 if ( checkAddend
!= 0 ) {
4130 // make sure the addend does not cause the pointer to point outside the target's segment
4131 // if it does, update_dyld_shared_cache will not be able to put this dylib into the shared cache
4132 uint64_t targetAddress
= target
->finalAddress();
4133 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
4134 ld::Internal::FinalSection
* sct
= *sit
;
4135 uint64_t sctEnd
= (sct
->address
+sct
->size
);
4136 if ( (sct
->address
<= targetAddress
) && (targetAddress
< sctEnd
) ) {
4137 if ( (targetAddress
+checkAddend
) > sctEnd
) {
4138 warning("data symbol %s from %s has pointer to %s + 0x%08llX. "
4139 "That large of an addend may disable %s from being put in the dyld shared cache.",
4140 atom
->name(), atom
->file()->path(), target
->name(), addend
, _options
.installPath() );
4146 _rebaseInfo
.push_back(RebaseInfo(rebaseType
, address
));
4148 if ( needsBinding
) {
4149 if ( inReadOnlySeg
) {
4150 noteTextReloc(atom
, target
);
4151 sect
->hasExternalRelocs
= true; // so dyld knows to change permissions on __TEXT segment
4153 _bindingInfo
.push_back(BindingInfo(type
, this->compressedOrdinalForAtom(target
), target
->name(), weak_import
, address
, addend
));
4155 if ( needsLazyBinding
) {
4156 if ( _options
.bindAtLoad() )
4157 _bindingInfo
.push_back(BindingInfo(type
, this->compressedOrdinalForAtom(target
), target
->name(), weak_import
, address
, addend
));
4159 _lazyBindingInfo
.push_back(BindingInfo(type
, this->compressedOrdinalForAtom(target
), target
->name(), weak_import
, address
, addend
));
4161 if ( needsWeakBinding
)
4162 _weakBindingInfo
.push_back(BindingInfo(type
, 0, target
->name(), false, address
, addend
));
4166 void OutputFile::addClassicRelocs(ld::Internal
& state
, ld::Internal::FinalSection
* sect
, const ld::Atom
* atom
,
4167 ld::Fixup
* fixupWithTarget
, ld::Fixup
* fixupWithMinusTarget
, ld::Fixup
* fixupWithStore
,
4168 const ld::Atom
* target
, const ld::Atom
* minusTarget
,
4169 uint64_t targetAddend
, uint64_t minusTargetAddend
)
4171 if ( sect
->isSectionHidden() )
4174 // non-lazy-pointer section is encoded in indirect symbol table - not using relocations
4175 if ( sect
->type() == ld::Section::typeNonLazyPointer
) {
4176 // except kexts and static pie which *do* use relocations
4177 switch (_options
.outputKind()) {
4178 case Options::kKextBundle
:
4180 case Options::kStaticExecutable
:
4181 if ( _options
.positionIndependentExecutable() )
4183 // else fall into default case
4185 assert(target
!= NULL
);
4186 assert(fixupWithTarget
!= NULL
);
4191 // no need to rebase or bind PCRel stores
4192 if ( this->isPcRelStore(fixupWithStore
->kind
) ) {
4193 // as long as target is in same linkage unit
4194 if ( (target
== NULL
) || (target
->definition() != ld::Atom::definitionProxy
) )
4198 // no need to rebase or bind PIC internal pointer diff
4199 if ( minusTarget
!= NULL
) {
4200 // with pointer diffs, both need to be in same linkage unit
4201 assert(minusTarget
->definition() != ld::Atom::definitionProxy
);
4202 assert(target
!= NULL
);
4203 assert(target
->definition() != ld::Atom::definitionProxy
);
4204 // make sure target is not global and weak
4205 if ( (target
->scope() == ld::Atom::scopeGlobal
) && (target
->combine() == ld::Atom::combineByName
)
4206 && (atom
->section().type() != ld::Section::typeCFI
)
4207 && (atom
->section().type() != ld::Section::typeDtraceDOF
)
4208 && (atom
->section().type() != ld::Section::typeUnwindInfo
)
4209 && (minusTarget
!= target
) ) {
4210 // ok for __eh_frame and __uwind_info to use pointer diffs to global weak symbols
4211 throwf("bad codegen, pointer diff in %s to global weak symbol %s", atom
->name(), target
->name());
4216 // cluster has no target, so needs no rebasing or binding
4217 if ( target
== NULL
)
4220 assert(_localRelocsAtom
!= NULL
);
4221 uint64_t relocAddress
= atom
->finalAddress() + fixupWithTarget
->offsetInAtom
- _localRelocsAtom
->relocBaseAddress(state
);
4223 bool inReadOnlySeg
= ( strcmp(sect
->segmentName(), "__TEXT") == 0 );
4224 bool needsLocalReloc
= false;
4225 bool needsExternReloc
= false;
4227 switch ( fixupWithStore
->kind
) {
4228 case ld::Fixup::kindLazyTarget
:
4229 // lazy pointers don't need relocs
4231 case ld::Fixup::kindStoreLittleEndian32
:
4232 case ld::Fixup::kindStoreLittleEndian64
:
4233 case ld::Fixup::kindStoreBigEndian32
:
4234 case ld::Fixup::kindStoreBigEndian64
:
4235 case ld::Fixup::kindStoreTargetAddressLittleEndian32
:
4236 case ld::Fixup::kindStoreTargetAddressLittleEndian64
:
4237 case ld::Fixup::kindStoreTargetAddressBigEndian32
:
4238 case ld::Fixup::kindStoreTargetAddressBigEndian64
:
4240 switch ( target
->definition() ) {
4241 case ld::Atom::definitionProxy
:
4242 needsExternReloc
= true;
4244 case ld::Atom::definitionRegular
:
4245 case ld::Atom::definitionTentative
:
4246 // only slideable images need local relocs
4247 if ( _options
.outputSlidable() )
4248 needsLocalReloc
= true;
4249 // references to internal symbol never need binding
4250 if ( target
->scope() != ld::Atom::scopeGlobal
)
4252 // reference to global weak def needs weak binding in dynamic images
4253 if ( (target
->combine() == ld::Atom::combineByName
)
4254 && (target
->definition() == ld::Atom::definitionRegular
)
4255 && (_options
.outputKind() != Options::kStaticExecutable
)
4256 && (_options
.outputKind() != Options::kPreload
)
4257 && (atom
!= target
) ) {
4258 needsExternReloc
= true;
4260 else if ( _options
.outputKind() == Options::kDynamicExecutable
) {
4261 // in main executables, the only way regular symbols are indirected is if -interposable is used
4262 if ( _options
.interposable(target
->name()) )
4263 needsExternReloc
= true;
4266 // for flat-namespace or interposable two-level-namespace
4267 // all references to exported symbols get indirected
4268 if ( (_options
.nameSpace() != Options::kTwoLevelNameSpace
) || _options
.interposable(target
->name()) ) {
4269 // <rdar://problem/5254468> no external relocs for flat objc classes
4270 if ( strncmp(target
->name(), ".objc_class_", 12) == 0 )
4272 // no rebase info for references to global symbols that will have binding info
4273 needsExternReloc
= true;
4276 if ( needsExternReloc
)
4277 needsLocalReloc
= false;
4279 case ld::Atom::definitionAbsolute
:
4282 if ( needsExternReloc
) {
4283 if ( inReadOnlySeg
)
4284 noteTextReloc(atom
, target
);
4285 const ld::dylib::File
* dylib
= dynamic_cast<const ld::dylib::File
*>(target
->file());
4286 if ( (dylib
!= NULL
) && dylib
->willBeLazyLoadedDylib() )
4287 throwf("illegal data reference to %s in lazy loaded dylib %s", target
->name(), dylib
->path());
4288 _externalRelocsAtom
->addExternalPointerReloc(relocAddress
, target
);
4289 sect
->hasExternalRelocs
= true;
4290 fixupWithTarget
->contentAddendOnly
= true;
4292 else if ( needsLocalReloc
) {
4293 assert(target
!= NULL
);
4294 if ( inReadOnlySeg
)
4295 noteTextReloc(atom
, target
);
4296 _localRelocsAtom
->addPointerReloc(relocAddress
, target
->machoSection());
4297 sect
->hasLocalRelocs
= true;
4300 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32
:
4301 #if SUPPORT_ARCH_arm64
4302 case ld::Fixup::kindStoreTargetAddressARM64Branch26
:
4304 if ( _options
.outputKind() == Options::kKextBundle
) {
4305 assert(target
!= NULL
);
4306 if ( target
->definition() == ld::Atom::definitionProxy
) {
4307 _externalRelocsAtom
->addExternalCallSiteReloc(relocAddress
, target
);
4308 fixupWithStore
->contentAddendOnly
= true;
4313 case ld::Fixup::kindStoreARMLow16
:
4314 case ld::Fixup::kindStoreThumbLow16
:
4315 // no way to encode rebasing of binding for these instructions
4316 if ( _options
.outputSlidable() || (target
->definition() == ld::Atom::definitionProxy
) )
4317 throwf("no supported runtime lo16 relocation in %s from %s to %s", atom
->name(), atom
->file()->path(), target
->name());
4320 case ld::Fixup::kindStoreARMHigh16
:
4321 case ld::Fixup::kindStoreThumbHigh16
:
4322 // no way to encode rebasing of binding for these instructions
4323 if ( _options
.outputSlidable() || (target
->definition() == ld::Atom::definitionProxy
) )
4324 throwf("no supported runtime hi16 relocation in %s from %s to %s", atom
->name(), atom
->file()->path(), target
->name());
4333 bool OutputFile::useExternalSectionReloc(const ld::Atom
* atom
, const ld::Atom
* target
, ld::Fixup
* fixupWithTarget
)
4335 if ( (_options
.architecture() == CPU_TYPE_X86_64
) || (_options
.architecture() == CPU_TYPE_ARM64
) ) {
4336 // x86_64 and ARM64 use external relocations for everthing that has a symbol
4337 return ( target
->symbolTableInclusion() != ld::Atom::symbolTableNotIn
);
4340 // <rdar://problem/9513487> support arm branch interworking in -r mode
4341 if ( (_options
.architecture() == CPU_TYPE_ARM
) && (_options
.outputKind() == Options::kObjectFile
) ) {
4342 if ( atom
->isThumb() != target
->isThumb() ) {
4343 switch ( fixupWithTarget
->kind
) {
4344 // have branch that switches mode, then might be 'b' not 'bl'
4345 // Force external relocation, since no way to do local reloc for 'b'
4346 case ld::Fixup::kindStoreTargetAddressThumbBranch22
:
4347 case ld::Fixup::kindStoreTargetAddressARMBranch24
:
4355 if ( (_options
.architecture() == CPU_TYPE_I386
) && (_options
.outputKind() == Options::kObjectFile
) ) {
4356 if ( target
->contentType() == ld::Atom::typeTLV
)
4360 // most architectures use external relocations only for references
4361 // to a symbol in another translation unit or for references to "weak symbols" or tentative definitions
4362 assert(target
!= NULL
);
4363 if ( target
->definition() == ld::Atom::definitionProxy
)
4365 if ( (target
->definition() == ld::Atom::definitionTentative
) && ! _options
.makeTentativeDefinitionsReal() )
4367 if ( target
->scope() != ld::Atom::scopeGlobal
)
4369 if ( (target
->combine() == ld::Atom::combineByName
) && (target
->definition() == ld::Atom::definitionRegular
) )
4374 bool OutputFile::useSectionRelocAddend(ld::Fixup
* fixupWithTarget
)
4376 #if SUPPORT_ARCH_arm64
4377 if ( _options
.architecture() == CPU_TYPE_ARM64
) {
4378 switch ( fixupWithTarget
->kind
) {
4379 case ld::Fixup::kindStoreARM64Branch26
:
4380 case ld::Fixup::kindStoreARM64Page21
:
4381 case ld::Fixup::kindStoreARM64PageOff12
:
4394 void OutputFile::addSectionRelocs(ld::Internal
& state
, ld::Internal::FinalSection
* sect
, const ld::Atom
* atom
,
4395 ld::Fixup
* fixupWithTarget
, ld::Fixup
* fixupWithMinusTarget
,
4396 ld::Fixup
* fixupWithAddend
, ld::Fixup
* fixupWithStore
,
4397 const ld::Atom
* target
, const ld::Atom
* minusTarget
,
4398 uint64_t targetAddend
, uint64_t minusTargetAddend
)
4400 if ( sect
->isSectionHidden() )
4403 // in -r mode where there will be no labels on __eh_frame section, there is no need for relocations
4404 if ( (sect
->type() == ld::Section::typeCFI
) && _options
.removeEHLabels() )
4407 // non-lazy-pointer section is encoded in indirect symbol table - not using relocations
4408 if ( sect
->type() == ld::Section::typeNonLazyPointer
)
4411 // tentative defs don't have any relocations
4412 if ( sect
->type() == ld::Section::typeTentativeDefs
)
4415 assert(target
!= NULL
);
4416 assert(fixupWithTarget
!= NULL
);
4417 bool targetUsesExternalReloc
= this->useExternalSectionReloc(atom
, target
, fixupWithTarget
);
4418 bool minusTargetUsesExternalReloc
= (minusTarget
!= NULL
) && this->useExternalSectionReloc(atom
, minusTarget
, fixupWithMinusTarget
);
4420 // in x86_64 and arm64 .o files an external reloc means the content contains just the addend
4421 if ( (_options
.architecture() == CPU_TYPE_X86_64
) ||(_options
.architecture() == CPU_TYPE_ARM64
) ) {
4422 if ( targetUsesExternalReloc
) {
4423 fixupWithTarget
->contentAddendOnly
= true;
4424 fixupWithStore
->contentAddendOnly
= true;
4425 if ( this->useSectionRelocAddend(fixupWithStore
) && (fixupWithAddend
!= NULL
) )
4426 fixupWithAddend
->contentIgnoresAddend
= true;
4428 if ( minusTargetUsesExternalReloc
)
4429 fixupWithMinusTarget
->contentAddendOnly
= true;
4432 // for other archs, content is addend only with (non pc-rel) pointers
4433 // pc-rel instructions are funny. If the target is _foo+8 and _foo is
4434 // external, then the pc-rel instruction *evalutates* to the address 8.
4435 if ( targetUsesExternalReloc
) {
4436 // TLV support for i386 acts like RIP relative addressing
4437 // The addend is the offset from the PICBase to the end of the instruction
4438 if ( (_options
.architecture() == CPU_TYPE_I386
)
4439 && (_options
.outputKind() == Options::kObjectFile
)
4440 && (fixupWithStore
->kind
== ld::Fixup::kindStoreX86PCRel32TLVLoad
) ) {
4441 fixupWithTarget
->contentAddendOnly
= true;
4442 fixupWithStore
->contentAddendOnly
= true;
4444 else if ( isPcRelStore(fixupWithStore
->kind
) ) {
4445 fixupWithTarget
->contentDetlaToAddendOnly
= true;
4446 fixupWithStore
->contentDetlaToAddendOnly
= true;
4448 else if ( minusTarget
== NULL
){
4449 fixupWithTarget
->contentAddendOnly
= true;
4450 fixupWithStore
->contentAddendOnly
= true;
4455 if ( fixupWithStore
!= NULL
) {
4456 _sectionsRelocationsAtom
->addSectionReloc(sect
, fixupWithStore
->kind
, atom
, fixupWithStore
->offsetInAtom
,
4457 targetUsesExternalReloc
, minusTargetUsesExternalReloc
,
4458 target
, targetAddend
, minusTarget
, minusTargetAddend
);
4463 void OutputFile::makeSplitSegInfo(ld::Internal
& state
)
4465 if ( !_options
.sharedRegionEligible() )
4468 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
4469 ld::Internal::FinalSection
* sect
= *sit
;
4470 if ( sect
->isSectionHidden() )
4472 if ( strcmp(sect
->segmentName(), "__TEXT") != 0 )
4474 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
4475 const ld::Atom
* atom
= *ait
;
4476 const ld::Atom
* target
= NULL
;
4477 const ld::Atom
* fromTarget
= NULL
;
4478 uint64_t accumulator
= 0;
4480 bool hadSubtract
= false;
4481 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
4482 if ( fit
->firstInCluster() )
4484 if ( this->setsTarget(fit
->kind
) ) {
4485 accumulator
= addressOf(state
, fit
, &target
);
4486 thumbTarget
= targetIsThumb(state
, fit
);
4490 switch ( fit
->kind
) {
4491 case ld::Fixup::kindSubtractTargetAddress
:
4492 accumulator
-= addressOf(state
, fit
, &fromTarget
);
4495 case ld::Fixup::kindAddAddend
:
4496 accumulator
+= fit
->u
.addend
;
4498 case ld::Fixup::kindSubtractAddend
:
4499 accumulator
-= fit
->u
.addend
;
4501 case ld::Fixup::kindStoreBigEndian32
:
4502 case ld::Fixup::kindStoreLittleEndian32
:
4503 case ld::Fixup::kindStoreLittleEndian64
:
4504 case ld::Fixup::kindStoreTargetAddressLittleEndian32
:
4505 case ld::Fixup::kindStoreTargetAddressLittleEndian64
:
4506 // if no subtract, then this is an absolute pointer which means
4507 // there is also a text reloc which update_dyld_shared_cache will use.
4508 if ( ! hadSubtract
)
4511 case ld::Fixup::kindStoreX86PCRel32
:
4512 case ld::Fixup::kindStoreX86PCRel32_1
:
4513 case ld::Fixup::kindStoreX86PCRel32_2
:
4514 case ld::Fixup::kindStoreX86PCRel32_4
:
4515 case ld::Fixup::kindStoreX86PCRel32GOTLoad
:
4516 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA
:
4517 case ld::Fixup::kindStoreX86PCRel32GOT
:
4518 case ld::Fixup::kindStoreX86PCRel32TLVLoad
:
4519 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA
:
4520 case ld::Fixup::kindStoreTargetAddressX86PCRel32
:
4521 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad
:
4522 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA
:
4523 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad
:
4524 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA
:
4525 case ld::Fixup::kindStoreARMLow16
:
4526 case ld::Fixup::kindStoreThumbLow16
:
4527 #if SUPPORT_ARCH_arm64
4528 case ld::Fixup::kindStoreARM64Page21
:
4529 case ld::Fixup::kindStoreARM64GOTLoadPage21
:
4530 case ld::Fixup::kindStoreARM64GOTLeaPage21
:
4531 case ld::Fixup::kindStoreARM64TLVPLoadPage21
:
4532 case ld::Fixup::kindStoreTargetAddressARM64Page21
:
4533 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21
:
4534 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21
:
4535 case ld::Fixup::kindStoreARM64PCRelToGOT
:
4537 assert(target
!= NULL
);
4538 if ( strcmp(sect
->segmentName(), target
->section().segmentName()) != 0 ) {
4539 _splitSegInfos
.push_back(SplitSegInfoEntry(atom
->finalAddress()+fit
->offsetInAtom
,fit
->kind
));
4542 case ld::Fixup::kindStoreARMHigh16
:
4543 case ld::Fixup::kindStoreThumbHigh16
:
4544 assert(target
!= NULL
);
4545 if ( strcmp(sect
->segmentName(), target
->section().segmentName()) != 0 ) {
4546 // hi16 needs to know upper 4-bits of low16 to compute carry
4547 uint32_t extra
= (accumulator
>> 12) & 0xF;
4548 _splitSegInfos
.push_back(SplitSegInfoEntry(atom
->finalAddress()+fit
->offsetInAtom
,fit
->kind
, extra
));
4551 case ld::Fixup::kindSetTargetImageOffset
:
4552 accumulator
= addressOf(state
, fit
, &target
);
4553 assert(target
!= NULL
);
4564 void OutputFile::makeSplitSegInfoV2(ld::Internal
& state
)
4566 static const bool log
= false;
4567 if ( !_options
.sharedRegionEligible() )
4570 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
4571 ld::Internal::FinalSection
* sect
= *sit
;
4572 if ( sect
->isSectionHidden() )
4574 bool codeSection
= (sect
->type() == ld::Section::typeCode
);
4575 if (log
) fprintf(stderr
, "sect: %s, address=0x%llX\n", sect
->sectionName(), sect
->address
);
4576 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
4577 const ld::Atom
* atom
= *ait
;
4578 const ld::Atom
* target
= NULL
;
4579 const ld::Atom
* fromTarget
= NULL
;
4580 uint32_t picBase
= 0;
4581 uint64_t accumulator
= 0;
4583 bool hadSubtract
= false;
4584 uint8_t fromSectionIndex
= atom
->machoSection();
4585 uint8_t toSectionIndex
;
4587 uint64_t fromOffset
= 0;
4588 uint64_t toOffset
= 0;
4589 uint64_t addend
= 0;
4590 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
4591 if ( fit
->firstInCluster() ) {
4596 toSectionIndex
= 255;
4597 fromOffset
= atom
->finalAddress() + fit
->offsetInAtom
- sect
->address
;
4599 if ( this->setsTarget(fit
->kind
) ) {
4600 accumulator
= addressOf(state
, fit
, &target
);
4601 thumbTarget
= targetIsThumb(state
, fit
);
4604 toOffset
= accumulator
- state
.atomToSection
[target
]->address
;
4605 if ( target
->definition() != ld::Atom::definitionProxy
) {
4606 if ( target
->section().type() == ld::Section::typeMachHeader
)
4609 toSectionIndex
= target
->machoSection();
4612 switch ( fit
->kind
) {
4613 case ld::Fixup::kindSubtractTargetAddress
:
4614 accumulator
-= addressOf(state
, fit
, &fromTarget
);
4617 case ld::Fixup::kindAddAddend
:
4618 accumulator
+= fit
->u
.addend
;
4619 addend
= fit
->u
.addend
;
4621 case ld::Fixup::kindSubtractAddend
:
4622 accumulator
-= fit
->u
.addend
;
4623 picBase
= fit
->u
.addend
;
4625 case ld::Fixup::kindSetLazyOffset
:
4627 case ld::Fixup::kindStoreBigEndian32
:
4628 case ld::Fixup::kindStoreLittleEndian32
:
4629 case ld::Fixup::kindStoreTargetAddressLittleEndian32
:
4630 if ( kind
!= DYLD_CACHE_ADJ_V2_IMAGE_OFF_32
) {
4632 kind
= DYLD_CACHE_ADJ_V2_DELTA_32
;
4634 kind
= DYLD_CACHE_ADJ_V2_POINTER_32
;
4637 case ld::Fixup::kindStoreLittleEndian64
:
4638 case ld::Fixup::kindStoreTargetAddressLittleEndian64
:
4640 kind
= DYLD_CACHE_ADJ_V2_DELTA_64
;
4642 kind
= DYLD_CACHE_ADJ_V2_POINTER_64
;
4644 case ld::Fixup::kindStoreX86PCRel32
:
4645 case ld::Fixup::kindStoreX86PCRel32_1
:
4646 case ld::Fixup::kindStoreX86PCRel32_2
:
4647 case ld::Fixup::kindStoreX86PCRel32_4
:
4648 case ld::Fixup::kindStoreX86PCRel32GOTLoad
:
4649 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA
:
4650 case ld::Fixup::kindStoreX86PCRel32GOT
:
4651 case ld::Fixup::kindStoreX86PCRel32TLVLoad
:
4652 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA
:
4653 case ld::Fixup::kindStoreTargetAddressX86PCRel32
:
4654 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad
:
4655 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA
:
4656 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad
:
4657 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA
:
4658 #if SUPPORT_ARCH_arm64
4659 case ld::Fixup::kindStoreARM64PCRelToGOT
:
4661 if ( (fromSectionIndex
!= toSectionIndex
) || !codeSection
)
4662 kind
= DYLD_CACHE_ADJ_V2_DELTA_32
;
4664 #if SUPPORT_ARCH_arm64
4665 case ld::Fixup::kindStoreARM64Page21
:
4666 case ld::Fixup::kindStoreARM64GOTLoadPage21
:
4667 case ld::Fixup::kindStoreARM64GOTLeaPage21
:
4668 case ld::Fixup::kindStoreARM64TLVPLoadPage21
:
4669 case ld::Fixup::kindStoreTargetAddressARM64Page21
:
4670 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21
:
4671 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21
:
4672 if ( fromSectionIndex
!= toSectionIndex
)
4673 kind
= DYLD_CACHE_ADJ_V2_ARM64_ADRP
;
4675 case ld::Fixup::kindStoreARM64PageOff12
:
4676 case ld::Fixup::kindStoreARM64GOTLeaPageOff12
:
4677 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPageOff12
:
4678 case ld::Fixup::kindStoreTargetAddressARM64PageOff12
:
4679 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12
:
4680 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12
:
4681 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12
:
4682 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12
:
4683 if ( fromSectionIndex
!= toSectionIndex
)
4684 kind
= DYLD_CACHE_ADJ_V2_ARM64_OFF12
;
4686 case ld::Fixup::kindStoreARM64Branch26
:
4687 case ld::Fixup::kindStoreTargetAddressARM64Branch26
:
4688 if ( fromSectionIndex
!= toSectionIndex
)
4689 kind
= DYLD_CACHE_ADJ_V2_ARM64_BR26
;
4692 case ld::Fixup::kindStoreARMHigh16
:
4693 case ld::Fixup::kindStoreARMLow16
:
4694 if ( (fromSectionIndex
!= toSectionIndex
) && (fromTarget
== atom
) ) {
4695 kind
= DYLD_CACHE_ADJ_V2_ARM_MOVW_MOVT
;
4698 case ld::Fixup::kindStoreARMBranch24
:
4699 case ld::Fixup::kindStoreTargetAddressARMBranch24
:
4700 if ( fromSectionIndex
!= toSectionIndex
)
4701 kind
= DYLD_CACHE_ADJ_V2_ARM_BR24
;
4703 case ld::Fixup::kindStoreThumbLow16
:
4704 case ld::Fixup::kindStoreThumbHigh16
:
4705 if ( (fromSectionIndex
!= toSectionIndex
) && (fromTarget
== atom
) ) {
4706 kind
= DYLD_CACHE_ADJ_V2_THUMB_MOVW_MOVT
;
4709 case ld::Fixup::kindStoreThumbBranch22
:
4710 case ld::Fixup::kindStoreTargetAddressThumbBranch22
:
4711 if ( fromSectionIndex
!= toSectionIndex
)
4712 kind
= DYLD_CACHE_ADJ_V2_THUMB_BR22
;
4714 case ld::Fixup::kindSetTargetImageOffset
:
4715 kind
= DYLD_CACHE_ADJ_V2_IMAGE_OFF_32
;
4716 accumulator
= addressOf(state
, fit
, &target
);
4717 assert(target
!= NULL
);
4718 toSectionIndex
= target
->machoSection();
4719 toOffset
= accumulator
- state
.atomToSection
[target
]->address
;
4725 if ( fit
->lastInCluster() ) {
4726 if ( (kind
!= 0) && (target
!= NULL
) && (target
->definition() != ld::Atom::definitionProxy
) ) {
4727 if ( !hadSubtract
&& addend
)
4729 assert(toSectionIndex
!= 255);
4730 if (log
) fprintf(stderr
, "from (%d.%s + 0x%llX) to (%d.%s + 0x%llX), kind=%d, atomAddr=0x%llX, sectAddr=0x%llx\n",
4731 fromSectionIndex
, sect
->sectionName(), fromOffset
, toSectionIndex
, state
.atomToSection
[target
]->sectionName(),
4732 toOffset
, kind
, atom
->finalAddress(), sect
->address
);
4733 _splitSegV2Infos
.push_back(SplitSegInfoV2Entry(fromSectionIndex
, fromOffset
, toSectionIndex
, toOffset
, kind
));
4742 void OutputFile::writeMapFile(ld::Internal
& state
)
4744 if ( _options
.generatedMapPath() != NULL
) {
4745 FILE* mapFile
= fopen(_options
.generatedMapPath(), "w");
4746 if ( mapFile
!= NULL
) {
4747 // write output path
4748 fprintf(mapFile
, "# Path: %s\n", _options
.outputFilePath());
4749 // write output architecure
4750 fprintf(mapFile
, "# Arch: %s\n", _options
.architectureName());
4752 //if ( fUUIDAtom != NULL ) {
4753 // const uint8_t* uuid = fUUIDAtom->getUUID();
4754 // fprintf(mapFile, "# UUID: %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X \n",
4755 // uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5], uuid[6], uuid[7],
4756 // uuid[8], uuid[9], uuid[10], uuid[11], uuid[12], uuid[13], uuid[14], uuid[15]);
4758 // write table of object files
4759 std::map
<const ld::File
*, ld::File::Ordinal
> readerToOrdinal
;
4760 std::map
<ld::File::Ordinal
, const ld::File
*> ordinalToReader
;
4761 std::map
<const ld::File
*, uint32_t> readerToFileOrdinal
;
4762 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
4763 ld::Internal::FinalSection
* sect
= *sit
;
4764 if ( sect
->isSectionHidden() )
4766 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
4767 const ld::Atom
* atom
= *ait
;
4768 const ld::File
* reader
= atom
->file();
4769 if ( reader
== NULL
)
4771 ld::File::Ordinal readerOrdinal
= reader
->ordinal();
4772 std::map
<const ld::File
*, ld::File::Ordinal
>::iterator pos
= readerToOrdinal
.find(reader
);
4773 if ( pos
== readerToOrdinal
.end() ) {
4774 readerToOrdinal
[reader
] = readerOrdinal
;
4775 ordinalToReader
[readerOrdinal
] = reader
;
4779 fprintf(mapFile
, "# Object files:\n");
4780 fprintf(mapFile
, "[%3u] %s\n", 0, "linker synthesized");
4781 uint32_t fileIndex
= 1;
4782 for(std::map
<ld::File::Ordinal
, const ld::File
*>::iterator it
= ordinalToReader
.begin(); it
!= ordinalToReader
.end(); ++it
) {
4783 fprintf(mapFile
, "[%3u] %s\n", fileIndex
, it
->second
->path());
4784 readerToFileOrdinal
[it
->second
] = fileIndex
++;
4786 // write table of sections
4787 fprintf(mapFile
, "# Sections:\n");
4788 fprintf(mapFile
, "# Address\tSize \tSegment\tSection\n");
4789 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
4790 ld::Internal::FinalSection
* sect
= *sit
;
4791 if ( sect
->isSectionHidden() )
4793 fprintf(mapFile
, "0x%08llX\t0x%08llX\t%s\t%s\n", sect
->address
, sect
->size
,
4794 sect
->segmentName(), sect
->sectionName());
4796 // write table of symbols
4797 fprintf(mapFile
, "# Symbols:\n");
4798 fprintf(mapFile
, "# Address\tSize \tFile Name\n");
4799 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
4800 ld::Internal::FinalSection
* sect
= *sit
;
4801 if ( sect
->isSectionHidden() )
4803 //bool isCstring = (sect->type() == ld::Section::typeCString);
4804 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
4806 const ld::Atom
* atom
= *ait
;
4807 const char* name
= atom
->name();
4808 // don't add auto-stripped aliases to .map file
4809 if ( (atom
->size() == 0) && (atom
->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages
) )
4811 if ( atom
->contentType() == ld::Atom::typeCString
) {
4812 strcpy(buffer
, "literal string: ");
4813 strlcat(buffer
, (char*)atom
->rawContentPointer(), 4096);
4816 else if ( (atom
->contentType() == ld::Atom::typeCFI
) && (strcmp(name
, "FDE") == 0) ) {
4817 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(); fit
!= atom
->fixupsEnd(); ++fit
) {
4818 if ( (fit
->kind
== ld::Fixup::kindSetTargetAddress
) && (fit
->clusterSize
== ld::Fixup::k1of4
) ) {
4819 if ( (fit
->binding
== ld::Fixup::bindingDirectlyBound
)
4820 && (fit
->u
.target
->section().type() == ld::Section::typeCode
) ) {
4821 strcpy(buffer
, "FDE for: ");
4822 strlcat(buffer
, fit
->u
.target
->name(), 4096);
4828 else if ( atom
->contentType() == ld::Atom::typeNonLazyPointer
) {
4829 strcpy(buffer
, "non-lazy-pointer");
4830 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(); fit
!= atom
->fixupsEnd(); ++fit
) {
4831 if ( fit
->binding
== ld::Fixup::bindingsIndirectlyBound
) {
4832 strcpy(buffer
, "non-lazy-pointer-to: ");
4833 strlcat(buffer
, state
.indirectBindingTable
[fit
->u
.bindingIndex
]->name(), 4096);
4836 else if ( fit
->binding
== ld::Fixup::bindingDirectlyBound
) {
4837 strcpy(buffer
, "non-lazy-pointer-to-local: ");
4838 strlcat(buffer
, fit
->u
.target
->name(), 4096);
4844 fprintf(mapFile
, "0x%08llX\t0x%08llX\t[%3u] %s\n", atom
->finalAddress(), atom
->size(),
4845 readerToFileOrdinal
[atom
->file()], name
);
4851 warning("could not write map file: %s\n", _options
.generatedMapPath());
4856 // used to sort atoms with debug notes
4857 class DebugNoteSorter
4860 bool operator()(const ld::Atom
* left
, const ld::Atom
* right
) const
4862 // first sort by reader
4863 ld::File::Ordinal leftFileOrdinal
= left
->file()->ordinal();
4864 ld::File::Ordinal rightFileOrdinal
= right
->file()->ordinal();
4865 if ( leftFileOrdinal
!= rightFileOrdinal
)
4866 return (leftFileOrdinal
< rightFileOrdinal
);
4868 // then sort by atom objectAddress
4869 uint64_t leftAddr
= left
->finalAddress();
4870 uint64_t rightAddr
= right
->finalAddress();
4871 return leftAddr
< rightAddr
;
4876 const char* OutputFile::assureFullPath(const char* path
)
4878 if ( path
[0] == '/' )
4880 char cwdbuff
[MAXPATHLEN
];
4881 if ( getcwd(cwdbuff
, MAXPATHLEN
) != NULL
) {
4883 asprintf(&result
, "%s/%s", cwdbuff
, path
);
4884 if ( result
!= NULL
)
4890 static time_t fileModTime(const char* path
) {
4891 struct stat statBuffer
;
4892 if ( stat(path
, &statBuffer
) == 0 ) {
4893 return statBuffer
.st_mtime
;
4899 void OutputFile::synthesizeDebugNotes(ld::Internal
& state
)
4901 // -S means don't synthesize debug map
4902 if ( _options
.debugInfoStripping() == Options::kDebugInfoNone
)
4904 // make a vector of atoms that come from files compiled with dwarf debug info
4905 std::vector
<const ld::Atom
*> atomsNeedingDebugNotes
;
4906 std::set
<const ld::Atom
*> atomsWithStabs
;
4907 atomsNeedingDebugNotes
.reserve(1024);
4908 const ld::relocatable::File
* objFile
= NULL
;
4909 bool objFileHasDwarf
= false;
4910 bool objFileHasStabs
= false;
4911 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
4912 ld::Internal::FinalSection
* sect
= *sit
;
4913 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
4914 const ld::Atom
* atom
= *ait
;
4915 // no stabs for atoms that would not be in the symbol table
4916 if ( atom
->symbolTableInclusion() == ld::Atom::symbolTableNotIn
)
4918 if ( atom
->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages
)
4920 if ( atom
->symbolTableInclusion() == ld::Atom::symbolTableInWithRandomAutoStripLabel
)
4922 // no stabs for absolute symbols
4923 if ( atom
->definition() == ld::Atom::definitionAbsolute
)
4925 // no stabs for .eh atoms
4926 if ( atom
->contentType() == ld::Atom::typeCFI
)
4928 // no stabs for string literal atoms
4929 if ( atom
->contentType() == ld::Atom::typeCString
)
4931 // no stabs for kernel dtrace probes
4932 if ( (_options
.outputKind() == Options::kStaticExecutable
) && (strncmp(atom
->name(), "__dtrace_probe$", 15) == 0) )
4934 const ld::File
* file
= atom
->file();
4935 if ( file
!= NULL
) {
4936 if ( file
!= objFile
) {
4937 objFileHasDwarf
= false;
4938 objFileHasStabs
= false;
4939 objFile
= dynamic_cast<const ld::relocatable::File
*>(file
);
4940 if ( objFile
!= NULL
) {
4941 switch ( objFile
->debugInfo() ) {
4942 case ld::relocatable::File::kDebugInfoNone
:
4944 case ld::relocatable::File::kDebugInfoDwarf
:
4945 objFileHasDwarf
= true;
4947 case ld::relocatable::File::kDebugInfoStabs
:
4948 case ld::relocatable::File::kDebugInfoStabsUUID
:
4949 objFileHasStabs
= true;
4954 if ( objFileHasDwarf
)
4955 atomsNeedingDebugNotes
.push_back(atom
);
4956 if ( objFileHasStabs
)
4957 atomsWithStabs
.insert(atom
);
4962 // sort by file ordinal then atom ordinal
4963 std::sort(atomsNeedingDebugNotes
.begin(), atomsNeedingDebugNotes
.end(), DebugNoteSorter());
4965 // <rdar://problem/17689030> Add -add_ast_path option to linker which add N_AST stab entry to output
4966 const std::vector
<const char*>& astPaths
= _options
.astFilePaths();
4967 for (std::vector
<const char*>::const_iterator it
=astPaths
.begin(); it
!= astPaths
.end(); it
++) {
4968 const char* path
= *it
;
4970 ld::relocatable::File::Stab astStab
;
4971 astStab
.atom
= NULL
;
4972 astStab
.type
= N_AST
;
4975 astStab
.value
= fileModTime(path
);
4976 astStab
.string
= path
;
4977 state
.stabs
.push_back(astStab
);
4980 // synthesize "debug notes" and add them to master stabs vector
4981 const char* dirPath
= NULL
;
4982 const char* filename
= NULL
;
4983 bool wroteStartSO
= false;
4984 state
.stabs
.reserve(atomsNeedingDebugNotes
.size()*4);
4985 std::unordered_set
<const char*, CStringHash
, CStringEquals
> seenFiles
;
4986 for (std::vector
<const ld::Atom
*>::iterator it
=atomsNeedingDebugNotes
.begin(); it
!= atomsNeedingDebugNotes
.end(); it
++) {
4987 const ld::Atom
* atom
= *it
;
4988 const ld::File
* atomFile
= atom
->file();
4989 const ld::relocatable::File
* atomObjFile
= dynamic_cast<const ld::relocatable::File
*>(atomFile
);
4990 //fprintf(stderr, "debug note for %s\n", atom->name());
4991 const char* newPath
= atom
->translationUnitSource();
4992 if ( newPath
!= NULL
) {
4993 const char* newDirPath
;
4994 const char* newFilename
;
4995 const char* lastSlash
= strrchr(newPath
, '/');
4996 if ( lastSlash
== NULL
)
4998 newFilename
= lastSlash
+1;
4999 char* temp
= strdup(newPath
);
5001 // gdb like directory SO's to end in '/', but dwarf DW_AT_comp_dir usually does not have trailing '/'
5002 temp
[lastSlash
-newPath
+1] = '\0';
5003 // need SO's whenever the translation unit source file changes
5004 if ( (filename
== NULL
) || (strcmp(newFilename
,filename
) != 0) || (strcmp(newDirPath
,dirPath
) != 0)) {
5005 if ( filename
!= NULL
) {
5006 // translation unit change, emit ending SO
5007 ld::relocatable::File::Stab endFileStab
;
5008 endFileStab
.atom
= NULL
;
5009 endFileStab
.type
= N_SO
;
5010 endFileStab
.other
= 1;
5011 endFileStab
.desc
= 0;
5012 endFileStab
.value
= 0;
5013 endFileStab
.string
= "";
5014 state
.stabs
.push_back(endFileStab
);
5016 // new translation unit, emit start SO's
5017 ld::relocatable::File::Stab dirPathStab
;
5018 dirPathStab
.atom
= NULL
;
5019 dirPathStab
.type
= N_SO
;
5020 dirPathStab
.other
= 0;
5021 dirPathStab
.desc
= 0;
5022 dirPathStab
.value
= 0;
5023 dirPathStab
.string
= newDirPath
;
5024 state
.stabs
.push_back(dirPathStab
);
5025 ld::relocatable::File::Stab fileStab
;
5026 fileStab
.atom
= NULL
;
5027 fileStab
.type
= N_SO
;
5031 fileStab
.string
= newFilename
;
5032 state
.stabs
.push_back(fileStab
);
5033 // Synthesize OSO for start of file
5034 ld::relocatable::File::Stab objStab
;
5035 objStab
.atom
= NULL
;
5036 objStab
.type
= N_OSO
;
5037 // <rdar://problem/6337329> linker should put cpusubtype in n_sect field of nlist entry for N_OSO debug note entries
5038 objStab
.other
= atomFile
->cpuSubType();
5040 if ( atomObjFile
!= NULL
) {
5041 objStab
.string
= assureFullPath(atomObjFile
->debugInfoPath());
5042 objStab
.value
= atomObjFile
->debugInfoModificationTime();
5045 objStab
.string
= assureFullPath(atomFile
->path());
5046 objStab
.value
= atomFile
->modificationTime();
5048 state
.stabs
.push_back(objStab
);
5049 wroteStartSO
= true;
5050 // add the source file path to seenFiles so it does not show up in SOLs
5051 seenFiles
.insert(newFilename
);
5053 asprintf(&fullFilePath
, "%s%s", newDirPath
, newFilename
);
5054 // add both leaf path and full path
5055 seenFiles
.insert(fullFilePath
);
5057 filename
= newFilename
;
5058 dirPath
= newDirPath
;
5059 if ( atom
->section().type() == ld::Section::typeCode
) {
5060 // Synthesize BNSYM and start FUN stabs
5061 ld::relocatable::File::Stab beginSym
;
5062 beginSym
.atom
= atom
;
5063 beginSym
.type
= N_BNSYM
;
5067 beginSym
.string
= "";
5068 state
.stabs
.push_back(beginSym
);
5069 ld::relocatable::File::Stab startFun
;
5070 startFun
.atom
= atom
;
5071 startFun
.type
= N_FUN
;
5075 startFun
.string
= atom
->name();
5076 state
.stabs
.push_back(startFun
);
5077 // Synthesize any SOL stabs needed
5078 const char* curFile
= NULL
;
5079 for (ld::Atom::LineInfo::iterator lit
= atom
->beginLineInfo(); lit
!= atom
->endLineInfo(); ++lit
) {
5080 if ( lit
->fileName
!= curFile
) {
5081 if ( seenFiles
.count(lit
->fileName
) == 0 ) {
5082 seenFiles
.insert(lit
->fileName
);
5083 ld::relocatable::File::Stab sol
;
5089 sol
.string
= lit
->fileName
;
5090 state
.stabs
.push_back(sol
);
5092 curFile
= lit
->fileName
;
5095 // Synthesize end FUN and ENSYM stabs
5096 ld::relocatable::File::Stab endFun
;
5098 endFun
.type
= N_FUN
;
5103 state
.stabs
.push_back(endFun
);
5104 ld::relocatable::File::Stab endSym
;
5106 endSym
.type
= N_ENSYM
;
5111 state
.stabs
.push_back(endSym
);
5114 ld::relocatable::File::Stab globalsStab
;
5115 const char* name
= atom
->name();
5116 if ( atom
->scope() == ld::Atom::scopeTranslationUnit
) {
5117 // Synthesize STSYM stab for statics
5118 globalsStab
.atom
= atom
;
5119 globalsStab
.type
= N_STSYM
;
5120 globalsStab
.other
= 1;
5121 globalsStab
.desc
= 0;
5122 globalsStab
.value
= 0;
5123 globalsStab
.string
= name
;
5124 state
.stabs
.push_back(globalsStab
);
5127 // Synthesize GSYM stab for other globals
5128 globalsStab
.atom
= atom
;
5129 globalsStab
.type
= N_GSYM
;
5130 globalsStab
.other
= 1;
5131 globalsStab
.desc
= 0;
5132 globalsStab
.value
= 0;
5133 globalsStab
.string
= name
;
5134 state
.stabs
.push_back(globalsStab
);
5140 if ( wroteStartSO
) {
5142 ld::relocatable::File::Stab endFileStab
;
5143 endFileStab
.atom
= NULL
;
5144 endFileStab
.type
= N_SO
;
5145 endFileStab
.other
= 1;
5146 endFileStab
.desc
= 0;
5147 endFileStab
.value
= 0;
5148 endFileStab
.string
= "";
5149 state
.stabs
.push_back(endFileStab
);
5152 // copy any stabs from .o file
5153 std::set
<const ld::File
*> filesSeenWithStabs
;
5154 for (std::set
<const ld::Atom
*>::iterator it
=atomsWithStabs
.begin(); it
!= atomsWithStabs
.end(); it
++) {
5155 const ld::Atom
* atom
= *it
;
5156 objFile
= dynamic_cast<const ld::relocatable::File
*>(atom
->file());
5157 if ( objFile
!= NULL
) {
5158 if ( filesSeenWithStabs
.count(objFile
) == 0 ) {
5159 filesSeenWithStabs
.insert(objFile
);
5160 const std::vector
<ld::relocatable::File::Stab
>* stabs
= objFile
->stabs();
5161 if ( stabs
!= NULL
) {
5162 for(std::vector
<ld::relocatable::File::Stab
>::const_iterator sit
= stabs
->begin(); sit
!= stabs
->end(); ++sit
) {
5163 ld::relocatable::File::Stab stab
= *sit
;
5164 // ignore stabs associated with atoms that were dead stripped or coalesced away
5165 if ( (sit
->atom
!= NULL
) && (atomsWithStabs
.count(sit
->atom
) == 0) )
5167 // <rdar://problem/8284718> Value of N_SO stabs should be address of first atom from translation unit
5168 if ( (stab
.type
== N_SO
) && (stab
.string
!= NULL
) && (stab
.string
[0] != '\0') ) {
5171 state
.stabs
.push_back(stab
);