1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-*
3 * Copyright (c) 2009-2011 Apple Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
27 #include <sys/types.h>
30 #include <sys/sysctl.h>
31 #include <sys/param.h>
32 #include <sys/mount.h>
37 #include <mach/mach_time.h>
38 #include <mach/vm_statistics.h>
39 #include <mach/mach_init.h>
40 #include <mach/mach_host.h>
41 #include <uuid/uuid.h>
43 #include <mach-o/dyld.h>
44 #include <mach-o/fat.h>
53 #include <unordered_set>
55 #include <CommonCrypto/CommonDigest.h>
56 #include <AvailabilityMacros.h>
58 #include "MachOTrie.hpp"
62 #include "OutputFile.h"
63 #include "Architectures.hpp"
64 #include "HeaderAndLoadCommands.hpp"
65 #include "LinkEdit.hpp"
66 #include "LinkEditClassic.hpp"
73 uint32_t sAdrpNoped
= 0;
74 uint32_t sAdrpNotNoped
= 0;
77 OutputFile::OutputFile(const Options
& opts
)
79 usesWeakExternalSymbols(false), overridesWeakExternalSymbols(false),
80 _noReExportedDylibs(false), pieDisabled(false), hasDataInCode(false),
81 headerAndLoadCommandsSection(NULL
),
82 rebaseSection(NULL
), bindingSection(NULL
), weakBindingSection(NULL
),
83 lazyBindingSection(NULL
), exportSection(NULL
),
84 splitSegInfoSection(NULL
), functionStartsSection(NULL
),
85 dataInCodeSection(NULL
), optimizationHintsSection(NULL
), dependentDRsSection(NULL
),
86 symbolTableSection(NULL
), stringPoolSection(NULL
),
87 localRelocationsSection(NULL
), externalRelocationsSection(NULL
),
88 sectionRelocationsSection(NULL
),
89 indirectSymbolTableSection(NULL
),
91 _hasDyldInfo(opts
.makeCompressedDyldInfo()),
92 _hasSymbolTable(true),
93 _hasSectionRelocations(opts
.outputKind() == Options::kObjectFile
),
94 _hasSplitSegInfo(opts
.sharedRegionEligible()),
95 _hasFunctionStartsInfo(opts
.addFunctionStarts()),
96 _hasDataInCodeInfo(opts
.addDataInCodeInfo()),
97 _hasDependentDRInfo(opts
.needsDependentDRInfo()),
98 _hasDynamicSymbolTable(true),
99 _hasLocalRelocations(!opts
.makeCompressedDyldInfo()),
100 _hasExternalRelocations(!opts
.makeCompressedDyldInfo()),
101 _hasOptimizationHints(opts
.outputKind() == Options::kObjectFile
),
102 _encryptedTEXTstartOffset(0),
103 _encryptedTEXTendOffset(0),
104 _localSymbolsStartIndex(0),
105 _localSymbolsCount(0),
106 _globalSymbolsStartIndex(0),
107 _globalSymbolsCount(0),
108 _importSymbolsStartIndex(0),
109 _importSymbolsCount(0),
110 _sectionsRelocationsAtom(NULL
),
111 _localRelocsAtom(NULL
),
112 _externalRelocsAtom(NULL
),
113 _symbolTableAtom(NULL
),
114 _indirectSymbolTableAtom(NULL
),
115 _rebasingInfoAtom(NULL
),
116 _bindingInfoAtom(NULL
),
117 _lazyBindingInfoAtom(NULL
),
118 _weakBindingInfoAtom(NULL
),
119 _exportInfoAtom(NULL
),
120 _splitSegInfoAtom(NULL
),
121 _functionStartsAtom(NULL
),
122 _dataInCodeAtom(NULL
),
123 _dependentDRInfoAtom(NULL
),
124 _optimizationHintsAtom(NULL
)
128 void OutputFile::dumpAtomsBySection(ld::Internal
& state
, bool printAtoms
)
130 fprintf(stderr
, "SORTED:\n");
131 for (std::vector
<ld::Internal::FinalSection
*>::iterator it
= state
.sections
.begin(); it
!= state
.sections
.end(); ++it
) {
132 fprintf(stderr
, "final section %p %s/%s %s start addr=0x%08llX, size=0x%08llX, alignment=%02d, fileOffset=0x%08llX\n",
133 (*it
), (*it
)->segmentName(), (*it
)->sectionName(), (*it
)->isSectionHidden() ? "(hidden)" : "",
134 (*it
)->address
, (*it
)->size
, (*it
)->alignment
, (*it
)->fileOffset
);
136 std::vector
<const ld::Atom
*>& atoms
= (*it
)->atoms
;
137 for (std::vector
<const ld::Atom
*>::iterator ait
= atoms
.begin(); ait
!= atoms
.end(); ++ait
) {
138 fprintf(stderr
, " %p (0x%04llX) %s\n", *ait
, (*ait
)->size(), (*ait
)->name());
142 fprintf(stderr
, "DYLIBS:\n");
143 for (std::vector
<ld::dylib::File
*>::iterator it
=state
.dylibs
.begin(); it
!= state
.dylibs
.end(); ++it
)
144 fprintf(stderr
, " %s\n", (*it
)->installPath());
147 void OutputFile::write(ld::Internal
& state
)
149 this->buildDylibOrdinalMapping(state
);
150 this->addLoadCommands(state
);
151 this->addLinkEdit(state
);
152 state
.setSectionSizesAndAlignments();
153 this->setLoadCommandsPadding(state
);
154 _fileSize
= state
.assignFileOffsets();
155 this->assignAtomAddresses(state
);
156 this->synthesizeDebugNotes(state
);
157 this->buildSymbolTable(state
);
158 this->generateLinkEditInfo(state
);
159 this->makeSplitSegInfo(state
);
160 this->updateLINKEDITAddresses(state
);
161 //this->dumpAtomsBySection(state, false);
162 this->writeOutputFile(state
);
163 this->writeMapFile(state
);
166 bool OutputFile::findSegment(ld::Internal
& state
, uint64_t addr
, uint64_t* start
, uint64_t* end
, uint32_t* index
)
168 uint32_t segIndex
= 0;
169 ld::Internal::FinalSection
* segFirstSection
= NULL
;
170 ld::Internal::FinalSection
* lastSection
= NULL
;
171 for (std::vector
<ld::Internal::FinalSection
*>::iterator it
= state
.sections
.begin(); it
!= state
.sections
.end(); ++it
) {
172 ld::Internal::FinalSection
* sect
= *it
;
173 if ( (segFirstSection
== NULL
) || strcmp(segFirstSection
->segmentName(), sect
->segmentName()) != 0 ) {
174 if ( segFirstSection
!= NULL
) {
175 //fprintf(stderr, "findSegment(0x%llX) seg changed to %s\n", addr, sect->segmentName());
176 if ( (addr
>= segFirstSection
->address
) && (addr
< lastSection
->address
+lastSection
->size
) ) {
177 *start
= segFirstSection
->address
;
178 *end
= lastSection
->address
+lastSection
->size
;
184 segFirstSection
= sect
;
192 void OutputFile::assignAtomAddresses(ld::Internal
& state
)
194 const bool log
= false;
195 if ( log
) fprintf(stderr
, "assignAtomAddresses()\n");
196 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
197 ld::Internal::FinalSection
* sect
= *sit
;
198 if ( log
) fprintf(stderr
, " section=%s/%s\n", sect
->segmentName(), sect
->sectionName());
199 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
200 const ld::Atom
* atom
= *ait
;
201 switch ( sect
-> type() ) {
202 case ld::Section::typeImportProxies
:
203 // want finalAddress() of all proxy atoms to be zero
204 (const_cast<ld::Atom
*>(atom
))->setSectionStartAddress(0);
206 case ld::Section::typeAbsoluteSymbols
:
207 // want finalAddress() of all absolute atoms to be value of abs symbol
208 (const_cast<ld::Atom
*>(atom
))->setSectionStartAddress(0);
210 case ld::Section::typeLinkEdit
:
211 // linkedit layout is assigned later
214 (const_cast<ld::Atom
*>(atom
))->setSectionStartAddress(sect
->address
);
215 if ( log
) fprintf(stderr
, " atom=%p, addr=0x%08llX, name=%s\n", atom
, atom
->finalAddress(), atom
->name());
222 void OutputFile::updateLINKEDITAddresses(ld::Internal
& state
)
224 if ( _options
.makeCompressedDyldInfo() ) {
225 // build dylb rebasing info
226 assert(_rebasingInfoAtom
!= NULL
);
227 _rebasingInfoAtom
->encode();
229 // build dyld binding info
230 assert(_bindingInfoAtom
!= NULL
);
231 _bindingInfoAtom
->encode();
233 // build dyld lazy binding info
234 assert(_lazyBindingInfoAtom
!= NULL
);
235 _lazyBindingInfoAtom
->encode();
237 // build dyld weak binding info
238 assert(_weakBindingInfoAtom
!= NULL
);
239 _weakBindingInfoAtom
->encode();
241 // build dyld export info
242 assert(_exportInfoAtom
!= NULL
);
243 _exportInfoAtom
->encode();
246 if ( _options
.sharedRegionEligible() ) {
247 // build split seg info
248 assert(_splitSegInfoAtom
!= NULL
);
249 _splitSegInfoAtom
->encode();
252 if ( _options
.addFunctionStarts() ) {
253 // build function starts info
254 assert(_functionStartsAtom
!= NULL
);
255 _functionStartsAtom
->encode();
258 if ( _options
.addDataInCodeInfo() ) {
259 // build data-in-code info
260 assert(_dataInCodeAtom
!= NULL
);
261 _dataInCodeAtom
->encode();
264 if ( _hasOptimizationHints
) {
265 // build linker-optimization-hint info
266 assert(_optimizationHintsAtom
!= NULL
);
267 _optimizationHintsAtom
->encode();
270 if ( _options
.needsDependentDRInfo() ) {
271 // build dependent dylib DR info
272 assert(_dependentDRInfoAtom
!= NULL
);
273 _dependentDRInfoAtom
->encode();
276 // build classic symbol table
277 assert(_symbolTableAtom
!= NULL
);
278 _symbolTableAtom
->encode();
279 assert(_indirectSymbolTableAtom
!= NULL
);
280 _indirectSymbolTableAtom
->encode();
282 // add relocations to .o files
283 if ( _options
.outputKind() == Options::kObjectFile
) {
284 assert(_sectionsRelocationsAtom
!= NULL
);
285 _sectionsRelocationsAtom
->encode();
288 if ( ! _options
.makeCompressedDyldInfo() ) {
289 // build external relocations
290 assert(_externalRelocsAtom
!= NULL
);
291 _externalRelocsAtom
->encode();
292 // build local relocations
293 assert(_localRelocsAtom
!= NULL
);
294 _localRelocsAtom
->encode();
297 // update address and file offsets now that linkedit content has been generated
298 uint64_t curLinkEditAddress
= 0;
299 uint64_t curLinkEditfileOffset
= 0;
300 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
301 ld::Internal::FinalSection
* sect
= *sit
;
302 if ( sect
->type() != ld::Section::typeLinkEdit
)
304 if ( curLinkEditAddress
== 0 ) {
305 curLinkEditAddress
= sect
->address
;
306 curLinkEditfileOffset
= sect
->fileOffset
;
308 uint16_t maxAlignment
= 0;
310 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
311 const ld::Atom
* atom
= *ait
;
312 //fprintf(stderr, "setting linkedit atom offset for %s\n", atom->name());
313 if ( atom
->alignment().powerOf2
> maxAlignment
)
314 maxAlignment
= atom
->alignment().powerOf2
;
315 // calculate section offset for this atom
316 uint64_t alignment
= 1 << atom
->alignment().powerOf2
;
317 uint64_t currentModulus
= (offset
% alignment
);
318 uint64_t requiredModulus
= atom
->alignment().modulus
;
319 if ( currentModulus
!= requiredModulus
) {
320 if ( requiredModulus
> currentModulus
)
321 offset
+= requiredModulus
-currentModulus
;
323 offset
+= requiredModulus
+alignment
-currentModulus
;
325 (const_cast<ld::Atom
*>(atom
))->setSectionOffset(offset
);
326 (const_cast<ld::Atom
*>(atom
))->setSectionStartAddress(curLinkEditAddress
);
327 offset
+= atom
->size();
330 // section alignment is that of a contained atom with the greatest alignment
331 sect
->alignment
= maxAlignment
;
332 sect
->address
= curLinkEditAddress
;
333 sect
->fileOffset
= curLinkEditfileOffset
;
334 curLinkEditAddress
+= sect
->size
;
335 curLinkEditfileOffset
+= sect
->size
;
338 _fileSize
= state
.sections
.back()->fileOffset
+ state
.sections
.back()->size
;
342 void OutputFile::setLoadCommandsPadding(ld::Internal
& state
)
344 // In other sections, any extra space is put and end of segment.
345 // In __TEXT segment, any extra space is put after load commands to allow post-processing of load commands
346 // Do a reverse layout of __TEXT segment to determine padding size and adjust section size
347 uint64_t paddingSize
= 0;
348 switch ( _options
.outputKind() ) {
350 // dyld itself has special padding requirements. We want the beginning __text section to start at a stable address
351 assert(strcmp(state
.sections
[1]->sectionName(),"__text") == 0);
352 state
.sections
[1]->alignment
= 12; // page align __text
354 case Options::kObjectFile
:
355 // mach-o .o files need no padding between load commands and first section
356 // but leave enough room that the object file could be signed
359 case Options::kPreload
:
360 // mach-o MH_PRELOAD files need no padding between load commands and first section
363 // work backwards from end of segment and lay out sections so that extra room goes to padding atom
365 uint64_t textSegPageSize
= _options
.segPageSize("__TEXT");
366 if ( _options
.sharedRegionEligible() && (_options
.iOSVersionMin() >= ld::iOS_8_0
) && (textSegPageSize
== 0x4000) )
367 textSegPageSize
= 0x1000;
368 for (std::vector
<ld::Internal::FinalSection
*>::reverse_iterator it
= state
.sections
.rbegin(); it
!= state
.sections
.rend(); ++it
) {
369 ld::Internal::FinalSection
* sect
= *it
;
370 if ( strcmp(sect
->segmentName(), "__TEXT") != 0 )
372 if ( sect
== headerAndLoadCommandsSection
) {
373 addr
-= headerAndLoadCommandsSection
->size
;
374 paddingSize
= addr
% textSegPageSize
;
378 addr
= addr
& (0 - (1 << sect
->alignment
));
381 // if command line requires more padding than this
382 uint32_t minPad
= _options
.minimumHeaderPad();
383 if ( _options
.maxMminimumHeaderPad() ) {
384 // -headerpad_max_install_names means there should be room for every path load command to grow to 1204 bytes
385 uint32_t altMin
= _dylibsToLoad
.size() * MAXPATHLEN
;
386 if ( _options
.outputKind() == Options::kDynamicLibrary
)
387 altMin
+= MAXPATHLEN
;
388 if ( altMin
> minPad
)
391 if ( paddingSize
< minPad
) {
392 int extraPages
= (minPad
- paddingSize
+ _options
.segmentAlignment() - 1)/_options
.segmentAlignment();
393 paddingSize
+= extraPages
* _options
.segmentAlignment();
396 if ( _options
.makeEncryptable() ) {
397 // load commands must be on a separate non-encrypted page
398 int loadCommandsPage
= (headerAndLoadCommandsSection
->size
+ minPad
)/_options
.segmentAlignment();
399 int textPage
= (headerAndLoadCommandsSection
->size
+ paddingSize
)/_options
.segmentAlignment();
400 if ( loadCommandsPage
== textPage
) {
401 paddingSize
+= _options
.segmentAlignment();
404 // remember start for later use by load command
405 _encryptedTEXTstartOffset
= textPage
*_options
.segmentAlignment();
409 // add padding to size of section
410 headerAndLoadCommandsSection
->size
+= paddingSize
;
414 uint64_t OutputFile::pageAlign(uint64_t addr
)
416 const uint64_t alignment
= _options
.segmentAlignment();
417 return ((addr
+alignment
-1) & (-alignment
));
420 uint64_t OutputFile::pageAlign(uint64_t addr
, uint64_t pageSize
)
422 return ((addr
+pageSize
-1) & (-pageSize
));
425 static const char* makeName(const ld::Atom
& atom
)
427 static char buffer
[4096];
428 switch ( atom
.symbolTableInclusion() ) {
429 case ld::Atom::symbolTableNotIn
:
430 case ld::Atom::symbolTableNotInFinalLinkedImages
:
431 sprintf(buffer
, "%s@0x%08llX", atom
.name(), atom
.objectAddress());
433 case ld::Atom::symbolTableIn
:
434 case ld::Atom::symbolTableInAndNeverStrip
:
435 case ld::Atom::symbolTableInAsAbsolute
:
436 case ld::Atom::symbolTableInWithRandomAutoStripLabel
:
437 strlcpy(buffer
, atom
.name(), 4096);
443 static const char* referenceTargetAtomName(ld::Internal
& state
, const ld::Fixup
* ref
)
445 switch ( ref
->binding
) {
446 case ld::Fixup::bindingNone
:
448 case ld::Fixup::bindingByNameUnbound
:
449 return (char*)(ref
->u
.target
);
450 case ld::Fixup::bindingByContentBound
:
451 case ld::Fixup::bindingDirectlyBound
:
452 return makeName(*((ld::Atom
*)(ref
->u
.target
)));
453 case ld::Fixup::bindingsIndirectlyBound
:
454 return makeName(*state
.indirectBindingTable
[ref
->u
.bindingIndex
]);
456 return "BAD BINDING";
459 bool OutputFile::targetIsThumb(ld::Internal
& state
, const ld::Fixup
* fixup
)
461 switch ( fixup
->binding
) {
462 case ld::Fixup::bindingByContentBound
:
463 case ld::Fixup::bindingDirectlyBound
:
464 return fixup
->u
.target
->isThumb();
465 case ld::Fixup::bindingsIndirectlyBound
:
466 return state
.indirectBindingTable
[fixup
->u
.bindingIndex
]->isThumb();
470 throw "unexpected binding";
473 uint64_t OutputFile::addressOf(const ld::Internal
& state
, const ld::Fixup
* fixup
, const ld::Atom
** target
)
475 if ( !_options
.makeCompressedDyldInfo() ) {
476 // For external relocations the classic mach-o format
477 // has addend only stored in the content. That means
478 // that the address of the target is not used.
479 if ( fixup
->contentAddendOnly
)
482 switch ( fixup
->binding
) {
483 case ld::Fixup::bindingNone
:
484 throw "unexpected bindingNone";
485 case ld::Fixup::bindingByNameUnbound
:
486 throw "unexpected bindingByNameUnbound";
487 case ld::Fixup::bindingByContentBound
:
488 case ld::Fixup::bindingDirectlyBound
:
489 *target
= fixup
->u
.target
;
490 return (*target
)->finalAddress();
491 case ld::Fixup::bindingsIndirectlyBound
:
492 *target
= state
.indirectBindingTable
[fixup
->u
.bindingIndex
];
494 if ( ! (*target
)->finalAddressMode() ) {
495 throwf("reference to symbol (which has not been assigned an address) %s", (*target
)->name());
498 return (*target
)->finalAddress();
500 throw "unexpected binding";
503 uint64_t OutputFile::sectionOffsetOf(const ld::Internal
& state
, const ld::Fixup
* fixup
)
505 const ld::Atom
* target
= NULL
;
506 switch ( fixup
->binding
) {
507 case ld::Fixup::bindingNone
:
508 throw "unexpected bindingNone";
509 case ld::Fixup::bindingByNameUnbound
:
510 throw "unexpected bindingByNameUnbound";
511 case ld::Fixup::bindingByContentBound
:
512 case ld::Fixup::bindingDirectlyBound
:
513 target
= fixup
->u
.target
;
515 case ld::Fixup::bindingsIndirectlyBound
:
516 target
= state
.indirectBindingTable
[fixup
->u
.bindingIndex
];
519 assert(target
!= NULL
);
521 uint64_t targetAddress
= target
->finalAddress();
522 for (std::vector
<ld::Internal::FinalSection
*>::const_iterator it
= state
.sections
.begin(); it
!= state
.sections
.end(); ++it
) {
523 const ld::Internal::FinalSection
* sect
= *it
;
524 if ( (sect
->address
<= targetAddress
) && (targetAddress
< (sect
->address
+sect
->size
)) )
525 return targetAddress
- sect
->address
;
527 throw "section not found for section offset";
532 uint64_t OutputFile::tlvTemplateOffsetOf(const ld::Internal
& state
, const ld::Fixup
* fixup
)
534 const ld::Atom
* target
= NULL
;
535 switch ( fixup
->binding
) {
536 case ld::Fixup::bindingNone
:
537 throw "unexpected bindingNone";
538 case ld::Fixup::bindingByNameUnbound
:
539 throw "unexpected bindingByNameUnbound";
540 case ld::Fixup::bindingByContentBound
:
541 case ld::Fixup::bindingDirectlyBound
:
542 target
= fixup
->u
.target
;
544 case ld::Fixup::bindingsIndirectlyBound
:
545 target
= state
.indirectBindingTable
[fixup
->u
.bindingIndex
];
548 assert(target
!= NULL
);
550 for (std::vector
<ld::Internal::FinalSection
*>::const_iterator it
= state
.sections
.begin(); it
!= state
.sections
.end(); ++it
) {
551 const ld::Internal::FinalSection
* sect
= *it
;
552 switch ( sect
->type() ) {
553 case ld::Section::typeTLVInitialValues
:
554 case ld::Section::typeTLVZeroFill
:
555 return target
->finalAddress() - sect
->address
;
560 throw "section not found for tlvTemplateOffsetOf";
563 void OutputFile::printSectionLayout(ld::Internal
& state
)
565 // show layout of final image
566 fprintf(stderr
, "final section layout:\n");
567 for (std::vector
<ld::Internal::FinalSection
*>::iterator it
= state
.sections
.begin(); it
!= state
.sections
.end(); ++it
) {
568 if ( (*it
)->isSectionHidden() )
570 fprintf(stderr
, " %s/%s addr=0x%08llX, size=0x%08llX, fileOffset=0x%08llX, type=%d\n",
571 (*it
)->segmentName(), (*it
)->sectionName(),
572 (*it
)->address
, (*it
)->size
, (*it
)->fileOffset
, (*it
)->type());
577 void OutputFile::rangeCheck8(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
579 if ( (displacement
> 127) || (displacement
< -128) ) {
580 // show layout of final image
581 printSectionLayout(state
);
583 const ld::Atom
* target
;
584 throwf("8-bit reference out of range (%lld max is +/-127B): from %s (0x%08llX) to %s (0x%08llX)",
585 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
586 addressOf(state
, fixup
, &target
));
590 void OutputFile::rangeCheck16(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
592 const int64_t thirtyTwoKLimit
= 0x00007FFF;
593 if ( (displacement
> thirtyTwoKLimit
) || (displacement
< (-thirtyTwoKLimit
)) ) {
594 // show layout of final image
595 printSectionLayout(state
);
597 const ld::Atom
* target
;
598 throwf("16-bit reference out of range (%lld max is +/-32KB): from %s (0x%08llX) to %s (0x%08llX)",
599 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
600 addressOf(state
, fixup
, &target
));
604 void OutputFile::rangeCheckBranch32(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
606 const int64_t twoGigLimit
= 0x7FFFFFFF;
607 if ( (displacement
> twoGigLimit
) || (displacement
< (-twoGigLimit
)) ) {
608 // show layout of final image
609 printSectionLayout(state
);
611 const ld::Atom
* target
;
612 throwf("32-bit branch out of range (%lld max is +/-2GB): from %s (0x%08llX) to %s (0x%08llX)",
613 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
614 addressOf(state
, fixup
, &target
));
619 void OutputFile::rangeCheckAbsolute32(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
621 const int64_t fourGigLimit
= 0xFFFFFFFF;
622 if ( displacement
> fourGigLimit
) {
623 // <rdar://problem/9610466> cannot enforce 32-bit range checks on 32-bit archs because assembler loses sign information
624 // .long _foo - 0xC0000000
625 // is encoded in mach-o the same as:
626 // .long _foo + 0x40000000
627 // so if _foo lays out to 0xC0000100, the first is ok, but the second is not.
628 if ( (_options
.architecture() == CPU_TYPE_ARM
) || (_options
.architecture() == CPU_TYPE_I386
) ) {
629 // Unlikely userland code does funky stuff like this, so warn for them, but not warn for -preload or -static
630 if ( (_options
.outputKind() != Options::kPreload
) && (_options
.outputKind() != Options::kStaticExecutable
) ) {
631 warning("32-bit absolute address out of range (0x%08llX max is 4GB): from %s + 0x%08X (0x%08llX) to 0x%08llX",
632 displacement
, atom
->name(), fixup
->offsetInAtom
, atom
->finalAddress(), displacement
);
636 // show layout of final image
637 printSectionLayout(state
);
639 const ld::Atom
* target
;
640 if ( fixup
->binding
== ld::Fixup::bindingNone
)
641 throwf("32-bit absolute address out of range (0x%08llX max is 4GB): from %s + 0x%08X (0x%08llX) to 0x%08llX",
642 displacement
, atom
->name(), fixup
->offsetInAtom
, atom
->finalAddress(), displacement
);
644 throwf("32-bit absolute address out of range (0x%08llX max is 4GB): from %s + 0x%08X (0x%08llX) to %s (0x%08llX)",
645 displacement
, atom
->name(), fixup
->offsetInAtom
, atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
646 addressOf(state
, fixup
, &target
));
651 void OutputFile::rangeCheckRIP32(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
653 const int64_t twoGigLimit
= 0x7FFFFFFF;
654 if ( (displacement
> twoGigLimit
) || (displacement
< (-twoGigLimit
)) ) {
655 // show layout of final image
656 printSectionLayout(state
);
658 const ld::Atom
* target
;
659 throwf("32-bit RIP relative reference out of range (%lld max is +/-4GB): from %s (0x%08llX) to %s (0x%08llX)",
660 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
661 addressOf(state
, fixup
, &target
));
665 void OutputFile::rangeCheckARM12(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
667 if ( (displacement
> 4092LL) || (displacement
< (-4092LL)) ) {
668 // show layout of final image
669 printSectionLayout(state
);
671 const ld::Atom
* target
;
672 throwf("ARM ldr 12-bit displacement out of range (%lld max is +/-4096B): from %s (0x%08llX) to %s (0x%08llX)",
673 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
674 addressOf(state
, fixup
, &target
));
678 bool OutputFile::checkArmBranch24Displacement(int64_t displacement
)
680 return ( (displacement
< 33554428LL) && (displacement
> (-33554432LL)) );
683 void OutputFile::rangeCheckARMBranch24(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
685 if ( checkArmBranch24Displacement(displacement
) )
688 // show layout of final image
689 printSectionLayout(state
);
691 const ld::Atom
* target
;
692 throwf("b/bl/blx ARM branch out of range (%lld max is +/-32MB): from %s (0x%08llX) to %s (0x%08llX)",
693 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
694 addressOf(state
, fixup
, &target
));
697 bool OutputFile::checkThumbBranch22Displacement(int64_t displacement
)
699 // thumb2 supports +/- 16MB displacement
700 if ( _options
.preferSubArchitecture() && _options
.archSupportsThumb2() ) {
701 if ( (displacement
> 16777214LL) || (displacement
< (-16777216LL)) ) {
706 // thumb1 supports +/- 4MB displacement
707 if ( (displacement
> 4194302LL) || (displacement
< (-4194304LL)) ) {
714 void OutputFile::rangeCheckThumbBranch22(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
716 if ( checkThumbBranch22Displacement(displacement
) )
719 // show layout of final image
720 printSectionLayout(state
);
722 const ld::Atom
* target
;
723 if ( _options
.preferSubArchitecture() && _options
.archSupportsThumb2() ) {
724 throwf("b/bl/blx thumb2 branch out of range (%lld max is +/-16MB): from %s (0x%08llX) to %s (0x%08llX)",
725 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
726 addressOf(state
, fixup
, &target
));
729 throwf("b/bl/blx thumb1 branch out of range (%lld max is +/-4MB): from %s (0x%08llX) to %s (0x%08llX)",
730 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
731 addressOf(state
, fixup
, &target
));
736 void OutputFile::rangeCheckARM64Branch26(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
738 const int64_t bl_128MegLimit
= 0x07FFFFFF;
739 if ( (displacement
> bl_128MegLimit
) || (displacement
< (-bl_128MegLimit
)) ) {
740 // show layout of final image
741 printSectionLayout(state
);
743 const ld::Atom
* target
;
744 throwf("b(l) ARM64 branch out of range (%lld max is +/-128MB): from %s (0x%08llX) to %s (0x%08llX)",
745 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
746 addressOf(state
, fixup
, &target
));
750 void OutputFile::rangeCheckARM64Page21(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
752 const int64_t adrp_4GigLimit
= 0x100000000ULL
;
753 if ( (displacement
> adrp_4GigLimit
) || (displacement
< (-adrp_4GigLimit
)) ) {
754 // show layout of final image
755 printSectionLayout(state
);
757 const ld::Atom
* target
;
758 throwf("ARM64 ADRP out of range (%lld max is +/-4GB): from %s (0x%08llX) to %s (0x%08llX)",
759 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
760 addressOf(state
, fixup
, &target
));
765 uint16_t OutputFile::get16LE(uint8_t* loc
) { return LittleEndian::get16(*(uint16_t*)loc
); }
766 void OutputFile::set16LE(uint8_t* loc
, uint16_t value
) { LittleEndian::set16(*(uint16_t*)loc
, value
); }
768 uint32_t OutputFile::get32LE(uint8_t* loc
) { return LittleEndian::get32(*(uint32_t*)loc
); }
769 void OutputFile::set32LE(uint8_t* loc
, uint32_t value
) { LittleEndian::set32(*(uint32_t*)loc
, value
); }
771 uint64_t OutputFile::get64LE(uint8_t* loc
) { return LittleEndian::get64(*(uint64_t*)loc
); }
772 void OutputFile::set64LE(uint8_t* loc
, uint64_t value
) { LittleEndian::set64(*(uint64_t*)loc
, value
); }
774 uint16_t OutputFile::get16BE(uint8_t* loc
) { return BigEndian::get16(*(uint16_t*)loc
); }
775 void OutputFile::set16BE(uint8_t* loc
, uint16_t value
) { BigEndian::set16(*(uint16_t*)loc
, value
); }
777 uint32_t OutputFile::get32BE(uint8_t* loc
) { return BigEndian::get32(*(uint32_t*)loc
); }
778 void OutputFile::set32BE(uint8_t* loc
, uint32_t value
) { BigEndian::set32(*(uint32_t*)loc
, value
); }
780 uint64_t OutputFile::get64BE(uint8_t* loc
) { return BigEndian::get64(*(uint64_t*)loc
); }
781 void OutputFile::set64BE(uint8_t* loc
, uint64_t value
) { BigEndian::set64(*(uint64_t*)loc
, value
); }
783 #if SUPPORT_ARCH_arm64
785 static uint32_t makeNOP() {
789 enum SignExtension
{ signedNot
, signed32
, signed64
};
790 struct LoadStoreInfo
{
793 uint32_t offset
; // after scaling
794 uint32_t size
; // 1,2,4,8, or 16
796 bool isFloat
; // if destReg is FP/SIMD
797 SignExtension signEx
; // if load is sign extended
800 static uint32_t makeLDR_literal(const LoadStoreInfo
& info
, uint64_t targetAddress
, uint64_t instructionAddress
)
802 int64_t delta
= targetAddress
- instructionAddress
;
803 assert(delta
< 1024*1024);
804 assert(delta
> -1024*1024);
805 assert((info
.reg
& 0xFFFFFFE0) == 0);
806 assert((targetAddress
& 0x3) == 0);
807 assert((instructionAddress
& 0x3) == 0);
808 assert(!info
.isStore
);
809 uint32_t imm19
= (delta
<< 3) & 0x00FFFFE0;
810 uint32_t instruction
= 0;
811 switch ( info
.size
) {
813 if ( info
.isFloat
) {
814 assert(info
.signEx
== signedNot
);
815 instruction
= 0x1C000000;
818 if ( info
.signEx
== signed64
)
819 instruction
= 0x98000000;
821 instruction
= 0x18000000;
825 assert(info
.signEx
== signedNot
);
826 instruction
= info
.isFloat
? 0x5C000000 : 0x58000000;
829 assert(info
.signEx
== signedNot
);
830 instruction
= 0x9C000000;
833 assert(0 && "invalid load size for literal");
835 return (instruction
| imm19
| info
.reg
);
838 static uint32_t makeADR(uint32_t destReg
, uint64_t targetAddress
, uint64_t instructionAddress
)
840 assert((destReg
& 0xFFFFFFE0) == 0);
841 assert((instructionAddress
& 0x3) == 0);
842 uint32_t instruction
= 0x10000000;
843 int64_t delta
= targetAddress
- instructionAddress
;
844 assert(delta
< 1024*1024);
845 assert(delta
> -1024*1024);
846 uint32_t immhi
= (delta
& 0x001FFFFC) << 3;
847 uint32_t immlo
= (delta
& 0x00000003) << 29;
848 return (instruction
| immhi
| immlo
| destReg
);
851 static uint32_t makeLoadOrStore(const LoadStoreInfo
& info
)
853 uint32_t instruction
= 0x39000000;
855 instruction
|= 0x04000000;
856 instruction
|= info
.reg
;
857 instruction
|= (info
.baseReg
<< 5);
858 uint32_t sizeBits
= 0;
859 uint32_t opcBits
= 0;
860 uint32_t imm12Bits
= 0;
861 switch ( info
.size
) {
864 imm12Bits
= info
.offset
;
865 if ( info
.isStore
) {
869 switch ( info
.signEx
) {
884 assert((info
.offset
% 2) == 0);
885 imm12Bits
= info
.offset
/2;
886 if ( info
.isStore
) {
890 switch ( info
.signEx
) {
905 assert((info
.offset
% 4) == 0);
906 imm12Bits
= info
.offset
/4;
907 if ( info
.isStore
) {
911 switch ( info
.signEx
) {
916 assert(0 && "cannot use signed32 with 32-bit load/store");
926 assert((info
.offset
% 8) == 0);
927 imm12Bits
= info
.offset
/8;
928 if ( info
.isStore
) {
933 assert(info
.signEx
== signedNot
);
938 assert((info
.offset
% 16) == 0);
939 imm12Bits
= info
.offset
/16;
940 assert(info
.isFloat
);
941 if ( info
.isStore
) {
949 assert(0 && "bad load/store size");
952 assert(imm12Bits
< 4096);
953 return (instruction
| (sizeBits
<< 30) | (opcBits
<< 22) | (imm12Bits
<< 10));
956 static bool parseLoadOrStore(uint32_t instruction
, LoadStoreInfo
& info
)
958 if ( (instruction
& 0x3B000000) != 0x39000000 )
960 info
.isFloat
= ( (instruction
& 0x04000000) != 0 );
961 info
.reg
= (instruction
& 0x1F);
962 info
.baseReg
= ((instruction
>>5) & 0x1F);
963 switch (instruction
& 0xC0C00000) {
967 info
.signEx
= signedNot
;
971 info
.isStore
= false;
972 info
.signEx
= signedNot
;
975 if ( info
.isFloat
) {
978 info
.signEx
= signedNot
;
982 info
.isStore
= false;
983 info
.signEx
= signed64
;
987 if ( info
.isFloat
) {
989 info
.isStore
= false;
990 info
.signEx
= signedNot
;
994 info
.isStore
= false;
995 info
.signEx
= signed32
;
1000 info
.isStore
= true;
1001 info
.signEx
= signedNot
;
1005 info
.isStore
= false;
1006 info
.signEx
= signedNot
;
1010 info
.isStore
= false;
1011 info
.signEx
= signed64
;
1015 info
.isStore
= false;
1016 info
.signEx
= signed32
;
1020 info
.isStore
= true;
1021 info
.signEx
= signedNot
;
1025 info
.isStore
= false;
1026 info
.signEx
= signedNot
;
1030 info
.isStore
= false;
1031 info
.signEx
= signed64
;
1035 info
.isStore
= true;
1036 info
.signEx
= signedNot
;
1040 info
.isStore
= false;
1041 info
.signEx
= signedNot
;
1046 info
.offset
= ((instruction
>> 10) & 0x0FFF) * info
.size
;
1054 static bool parseADRP(uint32_t instruction
, AdrpInfo
& info
)
1056 if ( (instruction
& 0x9F000000) != 0x90000000 )
1058 info
.destReg
= (instruction
& 0x1F);
1068 static bool parseADD(uint32_t instruction
, AddInfo
& info
)
1070 if ( (instruction
& 0xFFC00000) != 0x91000000 )
1072 info
.destReg
= (instruction
& 0x1F);
1073 info
.srcReg
= ((instruction
>>5) & 0x1F);
1074 info
.addend
= ((instruction
>>10) & 0xFFF);
1081 static uint32_t makeLDR_scaledOffset(const LoadStoreInfo
& info
)
1083 assert((info
.reg
& 0xFFFFFFE0) == 0);
1084 assert((info
.baseReg
& 0xFFFFFFE0) == 0);
1085 assert(!info
.isFloat
|| (info
.signEx
!= signedNot
));
1086 uint32_t sizeBits
= 0;
1087 uint32_t opcBits
= 1;
1088 uint32_t vBit
= info
.isFloat
;
1089 switch ( info
.signEx
) {
1100 assert(0 && "bad SignExtension runtime value");
1102 switch ( info
.size
) {
1121 assert(0 && "invalid load size for literal");
1123 assert((info
.offset
% info
.size
) == 0);
1124 uint32_t scaledOffset
= info
.offset
/info
.size
;
1125 assert(scaledOffset
< 4096);
1126 return (0x39000000 | (sizeBits
<<30) | (vBit
<<26) | (opcBits
<<22) | (scaledOffset
<<10) | (info
.baseReg
<<5) | info
.reg
);
1129 static uint32_t makeLDR_literal(uint32_t destReg
, uint32_t loadSize
, bool isFloat
, uint64_t targetAddress
, uint64_t instructionAddress
)
1131 int64_t delta
= targetAddress
- instructionAddress
;
1132 assert(delta
< 1024*1024);
1133 assert(delta
> -1024*1024);
1134 assert((destReg
& 0xFFFFFFE0) == 0);
1135 assert((targetAddress
& 0x3) == 0);
1136 assert((instructionAddress
& 0x3) == 0);
1137 uint32_t imm19
= (delta
<< 3) & 0x00FFFFE0;
1138 uint32_t instruction
= 0;
1139 switch ( loadSize
) {
1141 instruction
= isFloat
? 0x1C000000 : 0x18000000;
1144 instruction
= isFloat
? 0x5C000000 : 0x58000000;
1147 instruction
= 0x9C000000;
1150 assert(0 && "invalid load size for literal");
1152 return (instruction
| imm19
| destReg
);
1156 static bool ldrInfo(uint32_t instruction
, uint8_t* size
, uint8_t* destReg
, bool* v
, uint32_t* scaledOffset
)
1158 *v
= ( (instruction
& 0x04000000) != 0 );
1159 *destReg
= (instruction
& 0x1F);
1160 uint32_t imm12
= ((instruction
>> 10) & 0x00000FFF);
1161 switch ( (instruction
& 0xC0000000) >> 30 ) {
1163 // vector and byte LDR have same "size" bits, need to check other bits to differenciate
1164 if ( (instruction
& 0x00800000) == 0 ) {
1166 *scaledOffset
= imm12
;
1170 *scaledOffset
= imm12
* 16;
1175 *scaledOffset
= imm12
* 2;
1179 *scaledOffset
= imm12
* 4;
1183 *scaledOffset
= imm12
* 8;
1186 return ((instruction
& 0x3B400000) == 0x39400000);
1190 static bool withinOneMeg(uint64_t addr1
, uint64_t addr2
) {
1191 int64_t delta
= (addr2
- addr1
);
1192 return ( (delta
< 1024*1024) && (delta
> -1024*1024) );
1194 #endif // SUPPORT_ARCH_arm64
1196 void OutputFile::setInfo(ld::Internal
& state
, const ld::Atom
* atom
, uint8_t* buffer
, const std::map
<uint32_t, const Fixup
*>& usedByHints
,
1197 uint32_t offsetInAtom
, uint32_t delta
, InstructionInfo
* info
)
1199 info
->offsetInAtom
= offsetInAtom
+ delta
;
1200 std::map
<uint32_t, const Fixup
*>::const_iterator pos
= usedByHints
.find(info
->offsetInAtom
);
1201 if ( (pos
!= usedByHints
.end()) && (pos
->second
!= NULL
) ) {
1202 info
->fixup
= pos
->second
;
1203 info
->targetAddress
= addressOf(state
, info
->fixup
, &info
->target
);
1204 if ( info
->fixup
->clusterSize
!= ld::Fixup::k1of1
) {
1205 assert(info
->fixup
->firstInCluster());
1206 const ld::Fixup
* nextFixup
= info
->fixup
+ 1;
1207 if ( nextFixup
->kind
== ld::Fixup::kindAddAddend
) {
1208 info
->targetAddress
+= nextFixup
->u
.addend
;
1211 assert(0 && "expected addend");
1217 info
->targetAddress
= 0;
1218 info
->target
= NULL
;
1220 info
->instructionContent
= &buffer
[info
->offsetInAtom
];
1221 info
->instructionAddress
= atom
->finalAddress() + info
->offsetInAtom
;
1222 info
->instruction
= get32LE(info
->instructionContent
);
1225 #if SUPPORT_ARCH_arm64
1226 static bool isPageKind(const ld::Fixup
* fixup
, bool mustBeGOT
=false)
1228 if ( fixup
== NULL
)
1231 switch ( fixup
->kind
) {
1232 case ld::Fixup::kindStoreTargetAddressARM64Page21
:
1234 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21
:
1235 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21
:
1237 case ld::Fixup::kindSetTargetAddress
:
1241 } while ( ! f
->lastInCluster() );
1243 case ld::Fixup::kindStoreARM64Page21
:
1245 case ld::Fixup::kindStoreARM64GOTLoadPage21
:
1246 case ld::Fixup::kindStoreARM64GOTLeaPage21
:
1258 static bool isPageOffsetKind(const ld::Fixup
* fixup
, bool mustBeGOT
=false)
1260 if ( fixup
== NULL
)
1263 switch ( fixup
->kind
) {
1264 case ld::Fixup::kindStoreTargetAddressARM64PageOff12
:
1266 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12
:
1267 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12
:
1269 case ld::Fixup::kindSetTargetAddress
:
1273 } while ( ! f
->lastInCluster() );
1275 case ld::Fixup::kindStoreARM64PageOff12
:
1277 case ld::Fixup::kindStoreARM64GOTLoadPageOff12
:
1278 case ld::Fixup::kindStoreARM64GOTLeaPageOff12
:
1289 #endif // SUPPORT_ARCH_arm64
1292 #define LOH_ASSERT(cond) \
1294 warning("ignoring linker optimzation hint at %s+0x%X because " #cond, atom->name(), fit->offsetInAtom); \
1299 void OutputFile::applyFixUps(ld::Internal
& state
, uint64_t mhAddress
, const ld::Atom
* atom
, uint8_t* buffer
)
1301 //fprintf(stderr, "applyFixUps() on %s\n", atom->name());
1302 int64_t accumulator
= 0;
1303 const ld::Atom
* toTarget
= NULL
;
1304 const ld::Atom
* fromTarget
;
1306 uint32_t instruction
;
1307 uint32_t newInstruction
;
1311 bool thumbTarget
= false;
1312 std::map
<uint32_t, const Fixup
*> usedByHints
;
1313 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
1314 uint8_t* fixUpLocation
= &buffer
[fit
->offsetInAtom
];
1315 ld::Fixup::LOH_arm64 lohExtra
;
1316 switch ( (ld::Fixup::Kind
)(fit
->kind
) ) {
1317 case ld::Fixup::kindNone
:
1318 case ld::Fixup::kindNoneFollowOn
:
1319 case ld::Fixup::kindNoneGroupSubordinate
:
1320 case ld::Fixup::kindNoneGroupSubordinateFDE
:
1321 case ld::Fixup::kindNoneGroupSubordinateLSDA
:
1322 case ld::Fixup::kindNoneGroupSubordinatePersonality
:
1324 case ld::Fixup::kindSetTargetAddress
:
1325 accumulator
= addressOf(state
, fit
, &toTarget
);
1326 thumbTarget
= targetIsThumb(state
, fit
);
1329 if ( fit
->contentAddendOnly
|| fit
->contentDetlaToAddendOnly
)
1332 case ld::Fixup::kindSubtractTargetAddress
:
1333 delta
= addressOf(state
, fit
, &fromTarget
);
1334 if ( ! fit
->contentAddendOnly
)
1335 accumulator
-= delta
;
1337 case ld::Fixup::kindAddAddend
:
1338 if ( ! fit
->contentIgnoresAddend
) {
1339 // <rdar://problem/8342028> ARM main executables main contain .long constants pointing
1340 // into themselves such as jump tables. These .long should not have thumb bit set
1341 // even though the target is a thumb instruction. We can tell it is an interior pointer
1342 // because we are processing an addend.
1343 if ( thumbTarget
&& (toTarget
== atom
) && ((int32_t)fit
->u
.addend
> 0) ) {
1344 accumulator
&= (-2);
1345 //warning("removing thumb bit from intra-atom pointer in %s %s+0x%0X",
1346 // atom->section().sectionName(), atom->name(), fit->offsetInAtom);
1348 accumulator
+= fit
->u
.addend
;
1351 case ld::Fixup::kindSubtractAddend
:
1352 accumulator
-= fit
->u
.addend
;
1354 case ld::Fixup::kindSetTargetImageOffset
:
1355 accumulator
= addressOf(state
, fit
, &toTarget
) - mhAddress
;
1356 thumbTarget
= targetIsThumb(state
, fit
);
1360 case ld::Fixup::kindSetTargetSectionOffset
:
1361 accumulator
= sectionOffsetOf(state
, fit
);
1363 case ld::Fixup::kindSetTargetTLVTemplateOffset
:
1364 accumulator
= tlvTemplateOffsetOf(state
, fit
);
1366 case ld::Fixup::kindStore8
:
1367 *fixUpLocation
+= accumulator
;
1369 case ld::Fixup::kindStoreLittleEndian16
:
1370 set16LE(fixUpLocation
, accumulator
);
1372 case ld::Fixup::kindStoreLittleEndianLow24of32
:
1373 set32LE(fixUpLocation
, (get32LE(fixUpLocation
) & 0xFF000000) | (accumulator
& 0x00FFFFFF) );
1375 case ld::Fixup::kindStoreLittleEndian32
:
1376 rangeCheckAbsolute32(accumulator
, state
, atom
, fit
);
1377 set32LE(fixUpLocation
, accumulator
);
1379 case ld::Fixup::kindStoreLittleEndian64
:
1380 set64LE(fixUpLocation
, accumulator
);
1382 case ld::Fixup::kindStoreBigEndian16
:
1383 set16BE(fixUpLocation
, accumulator
);
1385 case ld::Fixup::kindStoreBigEndianLow24of32
:
1386 set32BE(fixUpLocation
, (get32BE(fixUpLocation
) & 0xFF000000) | (accumulator
& 0x00FFFFFF) );
1388 case ld::Fixup::kindStoreBigEndian32
:
1389 rangeCheckAbsolute32(accumulator
, state
, atom
, fit
);
1390 set32BE(fixUpLocation
, accumulator
);
1392 case ld::Fixup::kindStoreBigEndian64
:
1393 set64BE(fixUpLocation
, accumulator
);
1395 case ld::Fixup::kindStoreX86PCRel8
:
1396 case ld::Fixup::kindStoreX86BranchPCRel8
:
1397 if ( fit
->contentAddendOnly
)
1398 delta
= accumulator
;
1400 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 1);
1401 rangeCheck8(delta
, state
, atom
, fit
);
1402 *fixUpLocation
= delta
;
1404 case ld::Fixup::kindStoreX86PCRel16
:
1405 if ( fit
->contentAddendOnly
)
1406 delta
= accumulator
;
1408 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 2);
1409 rangeCheck16(delta
, state
, atom
, fit
);
1410 set16LE(fixUpLocation
, delta
);
1412 case ld::Fixup::kindStoreX86BranchPCRel32
:
1413 if ( fit
->contentAddendOnly
)
1414 delta
= accumulator
;
1416 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1417 rangeCheckBranch32(delta
, state
, atom
, fit
);
1418 set32LE(fixUpLocation
, delta
);
1420 case ld::Fixup::kindStoreX86PCRel32GOTLoad
:
1421 case ld::Fixup::kindStoreX86PCRel32GOT
:
1422 case ld::Fixup::kindStoreX86PCRel32
:
1423 case ld::Fixup::kindStoreX86PCRel32TLVLoad
:
1424 if ( fit
->contentAddendOnly
)
1425 delta
= accumulator
;
1427 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1428 rangeCheckRIP32(delta
, state
, atom
, fit
);
1429 set32LE(fixUpLocation
, delta
);
1431 case ld::Fixup::kindStoreX86PCRel32_1
:
1432 if ( fit
->contentAddendOnly
)
1433 delta
= accumulator
- 1;
1435 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 5);
1436 rangeCheckRIP32(delta
, state
, atom
, fit
);
1437 set32LE(fixUpLocation
, delta
);
1439 case ld::Fixup::kindStoreX86PCRel32_2
:
1440 if ( fit
->contentAddendOnly
)
1441 delta
= accumulator
- 2;
1443 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 6);
1444 rangeCheckRIP32(delta
, state
, atom
, fit
);
1445 set32LE(fixUpLocation
, delta
);
1447 case ld::Fixup::kindStoreX86PCRel32_4
:
1448 if ( fit
->contentAddendOnly
)
1449 delta
= accumulator
- 4;
1451 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 8);
1452 rangeCheckRIP32(delta
, state
, atom
, fit
);
1453 set32LE(fixUpLocation
, delta
);
1455 case ld::Fixup::kindStoreX86Abs32TLVLoad
:
1456 set32LE(fixUpLocation
, accumulator
);
1458 case ld::Fixup::kindStoreX86Abs32TLVLoadNowLEA
:
1459 assert(_options
.outputKind() != Options::kObjectFile
);
1460 // TLV entry was optimized away, change movl instruction to a leal
1461 if ( fixUpLocation
[-1] != 0xA1 )
1462 throw "TLV load reloc does not point to a movl instruction";
1463 fixUpLocation
[-1] = 0xB8;
1464 set32LE(fixUpLocation
, accumulator
);
1466 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA
:
1467 assert(_options
.outputKind() != Options::kObjectFile
);
1468 // GOT entry was optimized away, change movq instruction to a leaq
1469 if ( fixUpLocation
[-2] != 0x8B )
1470 throw "GOT load reloc does not point to a movq instruction";
1471 fixUpLocation
[-2] = 0x8D;
1472 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1473 rangeCheckRIP32(delta
, state
, atom
, fit
);
1474 set32LE(fixUpLocation
, delta
);
1476 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA
:
1477 assert(_options
.outputKind() != Options::kObjectFile
);
1478 // TLV entry was optimized away, change movq instruction to a leaq
1479 if ( fixUpLocation
[-2] != 0x8B )
1480 throw "TLV load reloc does not point to a movq instruction";
1481 fixUpLocation
[-2] = 0x8D;
1482 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1483 rangeCheckRIP32(delta
, state
, atom
, fit
);
1484 set32LE(fixUpLocation
, delta
);
1486 case ld::Fixup::kindStoreTargetAddressARMLoad12
:
1487 accumulator
= addressOf(state
, fit
, &toTarget
);
1488 // fall into kindStoreARMLoad12 case
1489 case ld::Fixup::kindStoreARMLoad12
:
1490 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 8);
1491 rangeCheckARM12(delta
, state
, atom
, fit
);
1492 instruction
= get32LE(fixUpLocation
);
1494 newInstruction
= instruction
& 0xFFFFF000;
1495 newInstruction
|= ((uint32_t)delta
& 0xFFF);
1498 newInstruction
= instruction
& 0xFF7FF000;
1499 newInstruction
|= ((uint32_t)(-delta
) & 0xFFF);
1501 set32LE(fixUpLocation
, newInstruction
);
1503 case ld::Fixup::kindDtraceExtra
:
1505 case ld::Fixup::kindStoreX86DtraceCallSiteNop
:
1506 if ( _options
.outputKind() != Options::kObjectFile
) {
1507 // change call site to a NOP
1508 fixUpLocation
[-1] = 0x90; // 1-byte nop
1509 fixUpLocation
[0] = 0x0F; // 4-byte nop
1510 fixUpLocation
[1] = 0x1F;
1511 fixUpLocation
[2] = 0x40;
1512 fixUpLocation
[3] = 0x00;
1515 case ld::Fixup::kindStoreX86DtraceIsEnableSiteClear
:
1516 if ( _options
.outputKind() != Options::kObjectFile
) {
1517 // change call site to a clear eax
1518 fixUpLocation
[-1] = 0x33; // xorl eax,eax
1519 fixUpLocation
[0] = 0xC0;
1520 fixUpLocation
[1] = 0x90; // 1-byte nop
1521 fixUpLocation
[2] = 0x90; // 1-byte nop
1522 fixUpLocation
[3] = 0x90; // 1-byte nop
1525 case ld::Fixup::kindStoreARMDtraceCallSiteNop
:
1526 if ( _options
.outputKind() != Options::kObjectFile
) {
1527 // change call site to a NOP
1528 set32LE(fixUpLocation
, 0xE1A00000);
1531 case ld::Fixup::kindStoreARMDtraceIsEnableSiteClear
:
1532 if ( _options
.outputKind() != Options::kObjectFile
) {
1533 // change call site to 'eor r0, r0, r0'
1534 set32LE(fixUpLocation
, 0xE0200000);
1537 case ld::Fixup::kindStoreThumbDtraceCallSiteNop
:
1538 if ( _options
.outputKind() != Options::kObjectFile
) {
1539 // change 32-bit blx call site to two thumb NOPs
1540 set32LE(fixUpLocation
, 0x46C046C0);
1543 case ld::Fixup::kindStoreThumbDtraceIsEnableSiteClear
:
1544 if ( _options
.outputKind() != Options::kObjectFile
) {
1545 // change 32-bit blx call site to 'nop', 'eor r0, r0'
1546 set32LE(fixUpLocation
, 0x46C04040);
1549 case ld::Fixup::kindStoreARM64DtraceCallSiteNop
:
1550 if ( _options
.outputKind() != Options::kObjectFile
) {
1551 // change call site to a NOP
1552 set32LE(fixUpLocation
, 0xD503201F);
1555 case ld::Fixup::kindStoreARM64DtraceIsEnableSiteClear
:
1556 if ( _options
.outputKind() != Options::kObjectFile
) {
1557 // change call site to 'MOVZ X0,0'
1558 set32LE(fixUpLocation
, 0xD2800000);
1561 case ld::Fixup::kindLazyTarget
:
1562 case ld::Fixup::kindIslandTarget
:
1564 case ld::Fixup::kindSetLazyOffset
:
1565 assert(fit
->binding
== ld::Fixup::bindingDirectlyBound
);
1566 accumulator
= this->lazyBindingInfoOffsetForLazyPointerAddress(fit
->u
.target
->finalAddress());
1568 case ld::Fixup::kindDataInCodeStartData
:
1569 case ld::Fixup::kindDataInCodeStartJT8
:
1570 case ld::Fixup::kindDataInCodeStartJT16
:
1571 case ld::Fixup::kindDataInCodeStartJT32
:
1572 case ld::Fixup::kindDataInCodeStartJTA32
:
1573 case ld::Fixup::kindDataInCodeEnd
:
1575 case ld::Fixup::kindLinkerOptimizationHint
:
1576 // expand table of address/offsets used by hints
1577 lohExtra
.addend
= fit
->u
.addend
;
1578 usedByHints
[fit
->offsetInAtom
+ (lohExtra
.info
.delta1
<< 2)] = NULL
;
1579 if ( lohExtra
.info
.count
> 0 )
1580 usedByHints
[fit
->offsetInAtom
+ (lohExtra
.info
.delta2
<< 2)] = NULL
;
1581 if ( lohExtra
.info
.count
> 1 )
1582 usedByHints
[fit
->offsetInAtom
+ (lohExtra
.info
.delta3
<< 2)] = NULL
;
1583 if ( lohExtra
.info
.count
> 2 )
1584 usedByHints
[fit
->offsetInAtom
+ (lohExtra
.info
.delta4
<< 2)] = NULL
;
1586 case ld::Fixup::kindStoreTargetAddressLittleEndian32
:
1587 accumulator
= addressOf(state
, fit
, &toTarget
);
1588 thumbTarget
= targetIsThumb(state
, fit
);
1591 if ( fit
->contentAddendOnly
)
1593 rangeCheckAbsolute32(accumulator
, state
, atom
, fit
);
1594 set32LE(fixUpLocation
, accumulator
);
1596 case ld::Fixup::kindStoreTargetAddressLittleEndian64
:
1597 accumulator
= addressOf(state
, fit
, &toTarget
);
1598 if ( fit
->contentAddendOnly
)
1600 set64LE(fixUpLocation
, accumulator
);
1602 case ld::Fixup::kindStoreTargetAddressBigEndian32
:
1603 accumulator
= addressOf(state
, fit
, &toTarget
);
1604 if ( fit
->contentAddendOnly
)
1606 set32BE(fixUpLocation
, accumulator
);
1608 case ld::Fixup::kindStoreTargetAddressBigEndian64
:
1609 accumulator
= addressOf(state
, fit
, &toTarget
);
1610 if ( fit
->contentAddendOnly
)
1612 set64BE(fixUpLocation
, accumulator
);
1614 case ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian32
:
1615 accumulator
= tlvTemplateOffsetOf(state
, fit
);
1616 set32LE(fixUpLocation
, accumulator
);
1618 case ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian64
:
1619 accumulator
= tlvTemplateOffsetOf(state
, fit
);
1620 set64LE(fixUpLocation
, accumulator
);
1622 case ld::Fixup::kindStoreTargetAddressX86PCRel32
:
1623 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32
:
1624 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad
:
1625 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad
:
1626 accumulator
= addressOf(state
, fit
, &toTarget
);
1627 if ( fit
->contentDetlaToAddendOnly
)
1629 if ( fit
->contentAddendOnly
)
1632 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1633 rangeCheckRIP32(delta
, state
, atom
, fit
);
1634 set32LE(fixUpLocation
, delta
);
1636 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad
:
1637 set32LE(fixUpLocation
, accumulator
);
1639 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoadNowLEA
:
1640 // TLV entry was optimized away, change movl instruction to a leal
1641 if ( fixUpLocation
[-1] != 0xA1 )
1642 throw "TLV load reloc does not point to a movl <abs-address>,<reg> instruction";
1643 fixUpLocation
[-1] = 0xB8;
1644 accumulator
= addressOf(state
, fit
, &toTarget
);
1645 set32LE(fixUpLocation
, accumulator
);
1647 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA
:
1648 // GOT entry was optimized away, change movq instruction to a leaq
1649 if ( fixUpLocation
[-2] != 0x8B )
1650 throw "GOT load reloc does not point to a movq instruction";
1651 fixUpLocation
[-2] = 0x8D;
1652 accumulator
= addressOf(state
, fit
, &toTarget
);
1653 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1654 rangeCheckRIP32(delta
, state
, atom
, fit
);
1655 set32LE(fixUpLocation
, delta
);
1657 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA
:
1658 // TLV entry was optimized away, change movq instruction to a leaq
1659 if ( fixUpLocation
[-2] != 0x8B )
1660 throw "TLV load reloc does not point to a movq instruction";
1661 fixUpLocation
[-2] = 0x8D;
1662 accumulator
= addressOf(state
, fit
, &toTarget
);
1663 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1664 rangeCheckRIP32(delta
, state
, atom
, fit
);
1665 set32LE(fixUpLocation
, delta
);
1667 case ld::Fixup::kindStoreTargetAddressARMBranch24
:
1668 accumulator
= addressOf(state
, fit
, &toTarget
);
1669 thumbTarget
= targetIsThumb(state
, fit
);
1670 if ( toTarget
->contentType() == ld::Atom::typeBranchIsland
) {
1671 // Branching to island. If ultimate target is in range, branch there directly.
1672 for (ld::Fixup::iterator islandfit
= toTarget
->fixupsBegin(), end
=toTarget
->fixupsEnd(); islandfit
!= end
; ++islandfit
) {
1673 if ( islandfit
->kind
== ld::Fixup::kindIslandTarget
) {
1674 const ld::Atom
* islandTarget
= NULL
;
1675 uint64_t islandTargetAddress
= addressOf(state
, islandfit
, &islandTarget
);
1676 delta
= islandTargetAddress
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1677 if ( checkArmBranch24Displacement(delta
) ) {
1678 toTarget
= islandTarget
;
1679 accumulator
= islandTargetAddress
;
1680 thumbTarget
= targetIsThumb(state
, islandfit
);
1688 if ( fit
->contentDetlaToAddendOnly
)
1690 // fall into kindStoreARMBranch24 case
1691 case ld::Fixup::kindStoreARMBranch24
:
1692 // The pc added will be +8 from the pc
1693 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 8);
1694 rangeCheckARMBranch24(delta
, state
, atom
, fit
);
1695 instruction
= get32LE(fixUpLocation
);
1696 // Make sure we are calling arm with bl, thumb with blx
1697 is_bl
= ((instruction
& 0xFF000000) == 0xEB000000);
1698 is_blx
= ((instruction
& 0xFE000000) == 0xFA000000);
1699 is_b
= !is_blx
&& ((instruction
& 0x0F000000) == 0x0A000000);
1700 if ( (is_bl
| is_blx
) && thumbTarget
) {
1701 uint32_t opcode
= 0xFA000000; // force to be blx
1702 uint32_t disp
= (uint32_t)(delta
>> 2) & 0x00FFFFFF;
1703 uint32_t h_bit
= (uint32_t)(delta
<< 23) & 0x01000000;
1704 newInstruction
= opcode
| h_bit
| disp
;
1706 else if ( (is_bl
| is_blx
) && !thumbTarget
) {
1707 uint32_t opcode
= 0xEB000000; // force to be bl
1708 uint32_t disp
= (uint32_t)(delta
>> 2) & 0x00FFFFFF;
1709 newInstruction
= opcode
| disp
;
1711 else if ( is_b
&& thumbTarget
) {
1712 if ( fit
->contentDetlaToAddendOnly
)
1713 newInstruction
= (instruction
& 0xFF000000) | ((uint32_t)(delta
>> 2) & 0x00FFFFFF);
1715 throwf("no pc-rel bx arm instruction. Can't fix up branch to %s in %s",
1716 referenceTargetAtomName(state
, fit
), atom
->name());
1718 else if ( !is_bl
&& !is_blx
&& thumbTarget
) {
1719 throwf("don't know how to convert instruction %x referencing %s to thumb",
1720 instruction
, referenceTargetAtomName(state
, fit
));
1723 newInstruction
= (instruction
& 0xFF000000) | ((uint32_t)(delta
>> 2) & 0x00FFFFFF);
1725 set32LE(fixUpLocation
, newInstruction
);
1727 case ld::Fixup::kindStoreTargetAddressThumbBranch22
:
1728 accumulator
= addressOf(state
, fit
, &toTarget
);
1729 thumbTarget
= targetIsThumb(state
, fit
);
1730 if ( toTarget
->contentType() == ld::Atom::typeBranchIsland
) {
1731 // branching to island, so see if ultimate target is in range
1732 // and if so branch to ultimate target instead.
1733 for (ld::Fixup::iterator islandfit
= toTarget
->fixupsBegin(), end
=toTarget
->fixupsEnd(); islandfit
!= end
; ++islandfit
) {
1734 if ( islandfit
->kind
== ld::Fixup::kindIslandTarget
) {
1735 const ld::Atom
* islandTarget
= NULL
;
1736 uint64_t islandTargetAddress
= addressOf(state
, islandfit
, &islandTarget
);
1737 if ( !fit
->contentDetlaToAddendOnly
) {
1738 if ( targetIsThumb(state
, islandfit
) ) {
1739 // Thumb to thumb branch, we will be generating a bl instruction.
1740 // Delta is always even, so mask out thumb bit in target.
1741 islandTargetAddress
&= -2ULL;
1744 // Target is not thumb, we will be generating a blx instruction
1745 // Since blx cannot have the low bit set, set bit[1] of the target to
1746 // bit[1] of the base address, so that the difference is a multiple of
1748 islandTargetAddress
&= -3ULL;
1749 islandTargetAddress
|= ((atom
->finalAddress() + fit
->offsetInAtom
) & 2LL);
1752 delta
= islandTargetAddress
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1753 if ( checkThumbBranch22Displacement(delta
) ) {
1754 toTarget
= islandTarget
;
1755 accumulator
= islandTargetAddress
;
1756 thumbTarget
= targetIsThumb(state
, islandfit
);
1764 if ( fit
->contentDetlaToAddendOnly
)
1766 // fall into kindStoreThumbBranch22 case
1767 case ld::Fixup::kindStoreThumbBranch22
:
1768 instruction
= get32LE(fixUpLocation
);
1769 is_bl
= ((instruction
& 0xD000F800) == 0xD000F000);
1770 is_blx
= ((instruction
& 0xD000F800) == 0xC000F000);
1771 is_b
= ((instruction
& 0xD000F800) == 0x9000F000);
1772 if ( !fit
->contentDetlaToAddendOnly
) {
1773 if ( thumbTarget
) {
1774 // Thumb to thumb branch, we will be generating a bl instruction.
1775 // Delta is always even, so mask out thumb bit in target.
1776 accumulator
&= -2ULL;
1779 // Target is not thumb, we will be generating a blx instruction
1780 // Since blx cannot have the low bit set, set bit[1] of the target to
1781 // bit[1] of the base address, so that the difference is a multiple of
1783 accumulator
&= -3ULL;
1784 accumulator
|= ((atom
->finalAddress() + fit
->offsetInAtom
) & 2LL);
1787 // The pc added will be +4 from the pc
1788 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1789 // <rdar://problem/16652542> support bl in very large .o files
1790 if ( fit
->contentDetlaToAddendOnly
) {
1791 while ( delta
< (-16777216LL) )
1794 rangeCheckThumbBranch22(delta
, state
, atom
, fit
);
1795 if ( _options
.preferSubArchitecture() && _options
.archSupportsThumb2() ) {
1796 // The instruction is really two instructions:
1797 // The lower 16 bits are the first instruction, which contains the high
1798 // 11 bits of the displacement.
1799 // The upper 16 bits are the second instruction, which contains the low
1800 // 11 bits of the displacement, as well as differentiating bl and blx.
1801 uint32_t s
= (uint32_t)(delta
>> 24) & 0x1;
1802 uint32_t i1
= (uint32_t)(delta
>> 23) & 0x1;
1803 uint32_t i2
= (uint32_t)(delta
>> 22) & 0x1;
1804 uint32_t imm10
= (uint32_t)(delta
>> 12) & 0x3FF;
1805 uint32_t imm11
= (uint32_t)(delta
>> 1) & 0x7FF;
1806 uint32_t j1
= (i1
== s
);
1807 uint32_t j2
= (i2
== s
);
1810 instruction
= 0xD000F000; // keep bl
1812 instruction
= 0xC000F000; // change to blx
1814 else if ( is_blx
) {
1816 instruction
= 0xD000F000; // change to bl
1818 instruction
= 0xC000F000; // keep blx
1821 instruction
= 0x9000F000; // keep b
1822 if ( !thumbTarget
&& !fit
->contentDetlaToAddendOnly
) {
1823 throwf("armv7 has no pc-rel bx thumb instruction. Can't fix up branch to %s in %s",
1824 referenceTargetAtomName(state
, fit
), atom
->name());
1829 throwf("don't know how to convert branch instruction %x referencing %s to bx",
1830 instruction
, referenceTargetAtomName(state
, fit
));
1831 instruction
= 0x9000F000; // keep b
1833 uint32_t nextDisp
= (j1
<< 13) | (j2
<< 11) | imm11
;
1834 uint32_t firstDisp
= (s
<< 10) | imm10
;
1835 newInstruction
= instruction
| (nextDisp
<< 16) | firstDisp
;
1836 //warning("s=%d, j1=%d, j2=%d, imm10=0x%0X, imm11=0x%0X, instruction=0x%08X, first=0x%04X, next=0x%04X, new=0x%08X, disp=0x%llX for %s to %s\n",
1837 // s, j1, j2, imm10, imm11, instruction, firstDisp, nextDisp, newInstruction, delta, atom->name(), toTarget->name());
1838 set32LE(fixUpLocation
, newInstruction
);
1841 // The instruction is really two instructions:
1842 // The lower 16 bits are the first instruction, which contains the high
1843 // 11 bits of the displacement.
1844 // The upper 16 bits are the second instruction, which contains the low
1845 // 11 bits of the displacement, as well as differentiating bl and blx.
1846 uint32_t firstDisp
= (uint32_t)(delta
>> 12) & 0x7FF;
1847 uint32_t nextDisp
= (uint32_t)(delta
>> 1) & 0x7FF;
1848 if ( is_bl
&& !thumbTarget
) {
1849 instruction
= 0xE800F000;
1851 else if ( is_blx
&& thumbTarget
) {
1852 instruction
= 0xF800F000;
1855 instruction
= 0x9000F000; // keep b
1856 if ( !thumbTarget
&& !fit
->contentDetlaToAddendOnly
) {
1857 throwf("armv6 has no pc-rel bx thumb instruction. Can't fix up branch to %s in %s",
1858 referenceTargetAtomName(state
, fit
), atom
->name());
1862 instruction
= instruction
& 0xF800F800;
1864 newInstruction
= instruction
| (nextDisp
<< 16) | firstDisp
;
1865 set32LE(fixUpLocation
, newInstruction
);
1868 case ld::Fixup::kindStoreARMLow16
:
1870 uint32_t imm4
= (accumulator
& 0x0000F000) >> 12;
1871 uint32_t imm12
= accumulator
& 0x00000FFF;
1872 instruction
= get32LE(fixUpLocation
);
1873 newInstruction
= (instruction
& 0xFFF0F000) | (imm4
<< 16) | imm12
;
1874 set32LE(fixUpLocation
, newInstruction
);
1877 case ld::Fixup::kindStoreARMHigh16
:
1879 uint32_t imm4
= (accumulator
& 0xF0000000) >> 28;
1880 uint32_t imm12
= (accumulator
& 0x0FFF0000) >> 16;
1881 instruction
= get32LE(fixUpLocation
);
1882 newInstruction
= (instruction
& 0xFFF0F000) | (imm4
<< 16) | imm12
;
1883 set32LE(fixUpLocation
, newInstruction
);
1886 case ld::Fixup::kindStoreThumbLow16
:
1888 uint32_t imm4
= (accumulator
& 0x0000F000) >> 12;
1889 uint32_t i
= (accumulator
& 0x00000800) >> 11;
1890 uint32_t imm3
= (accumulator
& 0x00000700) >> 8;
1891 uint32_t imm8
= accumulator
& 0x000000FF;
1892 instruction
= get32LE(fixUpLocation
);
1893 newInstruction
= (instruction
& 0x8F00FBF0) | imm4
| (i
<< 10) | (imm3
<< 28) | (imm8
<< 16);
1894 set32LE(fixUpLocation
, newInstruction
);
1897 case ld::Fixup::kindStoreThumbHigh16
:
1899 uint32_t imm4
= (accumulator
& 0xF0000000) >> 28;
1900 uint32_t i
= (accumulator
& 0x08000000) >> 27;
1901 uint32_t imm3
= (accumulator
& 0x07000000) >> 24;
1902 uint32_t imm8
= (accumulator
& 0x00FF0000) >> 16;
1903 instruction
= get32LE(fixUpLocation
);
1904 newInstruction
= (instruction
& 0x8F00FBF0) | imm4
| (i
<< 10) | (imm3
<< 28) | (imm8
<< 16);
1905 set32LE(fixUpLocation
, newInstruction
);
1908 #if SUPPORT_ARCH_arm64
1909 case ld::Fixup::kindStoreTargetAddressARM64Branch26
:
1910 accumulator
= addressOf(state
, fit
, &toTarget
);
1911 // fall into kindStoreARM64Branch26 case
1912 case ld::Fixup::kindStoreARM64Branch26
:
1913 if ( fit
->contentAddendOnly
)
1914 delta
= accumulator
;
1916 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
);
1917 rangeCheckARM64Branch26(delta
, state
, atom
, fit
);
1918 instruction
= get32LE(fixUpLocation
);
1919 newInstruction
= (instruction
& 0xFC000000) | ((uint32_t)(delta
>> 2) & 0x03FFFFFF);
1920 set32LE(fixUpLocation
, newInstruction
);
1922 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21
:
1923 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21
:
1924 case ld::Fixup::kindStoreTargetAddressARM64Page21
:
1925 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21
:
1926 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21
:
1927 accumulator
= addressOf(state
, fit
, &toTarget
);
1928 // fall into kindStoreARM64Branch26 case
1929 case ld::Fixup::kindStoreARM64GOTLeaPage21
:
1930 case ld::Fixup::kindStoreARM64GOTLoadPage21
:
1931 case ld::Fixup::kindStoreARM64TLVPLoadPage21
:
1932 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21
:
1933 case ld::Fixup::kindStoreARM64Page21
:
1935 // the ADRP instruction adds the imm << 12 to the page that the pc is on
1936 if ( fit
->contentAddendOnly
)
1939 delta
= (accumulator
& (-4096)) - ((atom
->finalAddress() + fit
->offsetInAtom
) & (-4096));
1940 rangeCheckARM64Page21(delta
, state
, atom
, fit
);
1941 instruction
= get32LE(fixUpLocation
);
1942 uint32_t immhi
= (delta
>> 9) & (0x00FFFFE0);
1943 uint32_t immlo
= (delta
<< 17) & (0x60000000);
1944 newInstruction
= (instruction
& 0x9F00001F) | immlo
| immhi
;
1945 set32LE(fixUpLocation
, newInstruction
);
1948 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12
:
1949 case ld::Fixup::kindStoreTargetAddressARM64PageOff12
:
1950 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12
:
1951 accumulator
= addressOf(state
, fit
, &toTarget
);
1952 // fall into kindAddressARM64PageOff12 case
1953 case ld::Fixup::kindStoreARM64TLVPLoadPageOff12
:
1954 case ld::Fixup::kindStoreARM64GOTLoadPageOff12
:
1955 case ld::Fixup::kindStoreARM64PageOff12
:
1957 uint32_t offset
= accumulator
& 0x00000FFF;
1958 instruction
= get32LE(fixUpLocation
);
1959 // LDR/STR instruction have implicit scale factor, need to compensate for that
1960 if ( instruction
& 0x08000000 ) {
1961 uint32_t implictShift
= ((instruction
>> 30) & 0x3);
1962 switch ( implictShift
) {
1964 if ( (instruction
& 0x04800000) == 0x04800000 ) {
1965 // vector and byte LDR/STR have same "size" bits, need to check other bits to differenciate
1967 if ( (offset
& 0xF) != 0 ) {
1968 throwf("128-bit LDR/STR not 16-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
1969 atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fit
),
1970 addressOf(state
, fit
, &toTarget
));
1975 if ( (offset
& 0x1) != 0 ) {
1976 throwf("16-bit LDR/STR not 2-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
1977 atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fit
),
1978 addressOf(state
, fit
, &toTarget
));
1982 if ( (offset
& 0x3) != 0 ) {
1983 throwf("32-bit LDR/STR not 4-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
1984 atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fit
),
1985 addressOf(state
, fit
, &toTarget
));
1989 if ( (offset
& 0x7) != 0 ) {
1990 throwf("64-bit LDR/STR not 8-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
1991 atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fit
),
1992 addressOf(state
, fit
, &toTarget
));
1996 // compensate for implicit scale
1997 offset
>>= implictShift
;
1999 if ( fit
->contentAddendOnly
)
2001 uint32_t imm12
= offset
<< 10;
2002 newInstruction
= (instruction
& 0xFFC003FF) | imm12
;
2003 set32LE(fixUpLocation
, newInstruction
);
2006 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12
:
2007 accumulator
= addressOf(state
, fit
, &toTarget
);
2008 // fall into kindStoreARM64GOTLoadPage21 case
2009 case ld::Fixup::kindStoreARM64GOTLeaPageOff12
:
2011 // GOT entry was optimized away, change LDR instruction to a ADD
2012 instruction
= get32LE(fixUpLocation
);
2013 if ( (instruction
& 0xFFC00000) != 0xF9400000 )
2014 throwf("GOT load reloc does not point to a LDR instruction in %s", atom
->name());
2015 uint32_t offset
= accumulator
& 0x00000FFF;
2016 uint32_t imm12
= offset
<< 10;
2017 newInstruction
= 0x91000000 | imm12
| (instruction
& 0x000003FF);
2018 set32LE(fixUpLocation
, newInstruction
);
2021 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12
:
2022 accumulator
= addressOf(state
, fit
, &toTarget
);
2023 // fall into kindStoreARM64TLVPLeaPageOff12 case
2024 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPageOff12
:
2026 // TLV thunk in same linkage unit, so LEA it directly, changing LDR instruction to a ADD
2027 instruction
= get32LE(fixUpLocation
);
2028 if ( (instruction
& 0xFFC00000) != 0xF9400000 )
2029 throwf("TLV load reloc does not point to a LDR instruction in %s", atom
->name());
2030 uint32_t offset
= accumulator
& 0x00000FFF;
2031 uint32_t imm12
= offset
<< 10;
2032 newInstruction
= 0x91000000 | imm12
| (instruction
& 0x000003FF);
2033 set32LE(fixUpLocation
, newInstruction
);
2036 case ld::Fixup::kindStoreARM64PointerToGOT
:
2037 set64LE(fixUpLocation
, accumulator
);
2039 case ld::Fixup::kindStoreARM64PCRelToGOT
:
2040 if ( fit
->contentAddendOnly
)
2041 delta
= accumulator
;
2043 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
);
2044 set32LE(fixUpLocation
, delta
);
2050 #if SUPPORT_ARCH_arm64
2051 // after all fixups are done on atom, if there are potential optimizations, do those
2052 if ( (usedByHints
.size() != 0) && (_options
.outputKind() != Options::kObjectFile
) && !_options
.ignoreOptimizationHints() ) {
2053 // fill in second part of usedByHints map, so we can see the target of fixups that might be optimized
2054 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
2055 switch ( fit
->kind
) {
2056 case ld::Fixup::kindLinkerOptimizationHint
:
2057 case ld::Fixup::kindNoneFollowOn
:
2058 case ld::Fixup::kindNoneGroupSubordinate
:
2059 case ld::Fixup::kindNoneGroupSubordinateFDE
:
2060 case ld::Fixup::kindNoneGroupSubordinateLSDA
:
2061 case ld::Fixup::kindNoneGroupSubordinatePersonality
:
2064 if ( fit
->firstInCluster() ) {
2065 std::map
<uint32_t, const Fixup
*>::iterator pos
= usedByHints
.find(fit
->offsetInAtom
);
2066 if ( pos
!= usedByHints
.end() ) {
2067 assert(pos
->second
== NULL
&& "two fixups in same hint location");
2069 //fprintf(stderr, "setting %s usedByHints[0x%04X], kind = %d\n", atom->name(), fit->offsetInAtom, fit->kind);
2075 // apply hints pass 1
2076 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
2077 if ( fit
->kind
!= ld::Fixup::kindLinkerOptimizationHint
)
2079 InstructionInfo infoA
;
2080 InstructionInfo infoB
;
2081 InstructionInfo infoC
;
2082 InstructionInfo infoD
;
2083 LoadStoreInfo ldrInfoB
, ldrInfoC
;
2087 bool targetFourByteAligned
;
2088 bool literalableSize
, isADRP
, isADD
, isLDR
, isSTR
;
2089 //uint8_t loadSize, destReg;
2090 //uint32_t scaledOffset;
2092 ld::Fixup::LOH_arm64 alt
;
2093 alt
.addend
= fit
->u
.addend
;
2094 setInfo(state
, atom
, buffer
, usedByHints
, fit
->offsetInAtom
, (alt
.info
.delta1
<< 2), &infoA
);
2095 if ( alt
.info
.count
> 0 )
2096 setInfo(state
, atom
, buffer
, usedByHints
, fit
->offsetInAtom
, (alt
.info
.delta2
<< 2), &infoB
);
2097 if ( alt
.info
.count
> 1 )
2098 setInfo(state
, atom
, buffer
, usedByHints
, fit
->offsetInAtom
, (alt
.info
.delta3
<< 2), &infoC
);
2099 if ( alt
.info
.count
> 2 )
2100 setInfo(state
, atom
, buffer
, usedByHints
, fit
->offsetInAtom
, (alt
.info
.delta4
<< 2), &infoD
);
2102 switch ( alt
.info
.kind
) {
2103 case LOH_ARM64_ADRP_ADRP
:
2104 // processed in pass 2 beacuse some ADRP may have been removed
2106 case LOH_ARM64_ADRP_LDR
:
2107 LOH_ASSERT(alt
.info
.count
== 1);
2108 LOH_ASSERT(isPageKind(infoA
.fixup
));
2109 LOH_ASSERT(isPageOffsetKind(infoB
.fixup
));
2110 LOH_ASSERT(infoA
.target
== infoB
.target
);
2111 LOH_ASSERT(infoA
.targetAddress
== infoB
.targetAddress
);
2112 usableSegment
= ( !_options
.sharedRegionEligible() || (strcmp(atom
->section().segmentName(), infoB
.target
->section().segmentName()) == 0) );
2113 isADRP
= parseADRP(infoA
.instruction
, adrpInfoA
);
2115 isLDR
= parseLoadOrStore(infoB
.instruction
, ldrInfoB
);
2117 LOH_ASSERT(ldrInfoB
.baseReg
== adrpInfoA
.destReg
);
2118 LOH_ASSERT(ldrInfoB
.offset
== (infoA
.targetAddress
& 0x00000FFF));
2119 literalableSize
= ( (ldrInfoB
.size
!= 1) && (ldrInfoB
.size
!= 2) );
2120 targetFourByteAligned
= ( (infoA
.targetAddress
& 0x3) == 0 );
2121 if ( literalableSize
&& usableSegment
&& targetFourByteAligned
&& withinOneMeg(infoB
.instructionAddress
, infoA
.targetAddress
) ) {
2122 set32LE(infoA
.instructionContent
, makeNOP());
2123 set32LE(infoB
.instructionContent
, makeLDR_literal(ldrInfoB
, infoA
.targetAddress
, infoB
.instructionAddress
));
2124 if ( _options
.verboseOptimizationHints() )
2125 fprintf(stderr
, "adrp-ldr at 0x%08llX transformed to LDR literal\n", infoB
.instructionAddress
);
2128 if ( _options
.verboseOptimizationHints() )
2129 fprintf(stderr
, "adrp-ldr at 0x%08llX not transformed, isLDR=%d, literalableSize=%d, inRange=%d, usableSegment=%d, scaledOffset=%d\n",
2130 infoB
.instructionAddress
, isLDR
, literalableSize
, withinOneMeg(infoB
.instructionAddress
, infoA
.targetAddress
), usableSegment
, ldrInfoB
.offset
);
2133 case LOH_ARM64_ADRP_ADD_LDR
:
2134 LOH_ASSERT(alt
.info
.count
== 2);
2135 LOH_ASSERT(isPageKind(infoA
.fixup
));
2136 LOH_ASSERT(isPageOffsetKind(infoB
.fixup
));
2137 LOH_ASSERT(infoC
.fixup
== NULL
);
2138 LOH_ASSERT(infoA
.target
== infoB
.target
);
2139 LOH_ASSERT(infoA
.targetAddress
== infoB
.targetAddress
);
2140 usableSegment
= ( !_options
.sharedRegionEligible() || (strcmp(atom
->section().segmentName(), infoB
.target
->section().segmentName()) == 0) );
2141 isADRP
= parseADRP(infoA
.instruction
, adrpInfoA
);
2143 isADD
= parseADD(infoB
.instruction
, addInfoB
);
2145 LOH_ASSERT(adrpInfoA
.destReg
== addInfoB
.srcReg
);
2146 isLDR
= parseLoadOrStore(infoC
.instruction
, ldrInfoC
);
2148 LOH_ASSERT(addInfoB
.destReg
== ldrInfoC
.baseReg
);
2149 targetFourByteAligned
= ( ((infoB
.targetAddress
+ldrInfoC
.offset
) & 0x3) == 0 );
2150 literalableSize
= ( (ldrInfoC
.size
!= 1) && (ldrInfoC
.size
!= 2) );
2151 if ( literalableSize
&& usableSegment
&& targetFourByteAligned
&& withinOneMeg(infoC
.instructionAddress
, infoA
.targetAddress
+ldrInfoC
.offset
) ) {
2152 // can do T1 transformation to LDR literal
2153 set32LE(infoA
.instructionContent
, makeNOP());
2154 set32LE(infoB
.instructionContent
, makeNOP());
2155 set32LE(infoC
.instructionContent
, makeLDR_literal(ldrInfoC
, infoA
.targetAddress
+ldrInfoC
.offset
, infoC
.instructionAddress
));
2156 if ( _options
.verboseOptimizationHints() ) {
2157 fprintf(stderr
, "adrp-add-ldr at 0x%08llX T1 transformed to LDR literal\n", infoC
.instructionAddress
);
2160 else if ( usableSegment
&& withinOneMeg(infoA
.instructionAddress
, infoA
.targetAddress
+ldrInfoC
.offset
) ) {
2161 // can to T4 transformation and turn ADRP/ADD into ADR
2162 set32LE(infoA
.instructionContent
, makeADR(ldrInfoC
.baseReg
, infoA
.targetAddress
+ldrInfoC
.offset
, infoA
.instructionAddress
));
2163 set32LE(infoB
.instructionContent
, makeNOP());
2164 ldrInfoC
.offset
= 0; // offset is now in ADR instead of ADD or LDR
2165 set32LE(infoC
.instructionContent
, makeLoadOrStore(ldrInfoC
));
2166 set32LE(infoC
.instructionContent
, infoC
.instruction
& 0xFFC003FF);
2167 if ( _options
.verboseOptimizationHints() )
2168 fprintf(stderr
, "adrp-add-ldr at 0x%08llX T4 transformed to ADR/LDR\n", infoB
.instructionAddress
);
2170 else if ( ((infoB
.targetAddress
% ldrInfoC
.size
) == 0) && (ldrInfoC
.offset
== 0) ) {
2171 // can do T2 transformation by merging ADD into LD
2173 set32LE(infoB
.instructionContent
, makeNOP());
2174 ldrInfoC
.offset
+= addInfoB
.addend
;
2175 set32LE(infoC
.instructionContent
, makeLoadOrStore(ldrInfoC
));
2176 if ( _options
.verboseOptimizationHints() )
2177 fprintf(stderr
, "adrp-add-ldr at 0x%08llX T2 transformed to ADRP/LDR \n", infoC
.instructionAddress
);
2180 if ( _options
.verboseOptimizationHints() )
2181 fprintf(stderr
, "adrp-add-ldr at 0x%08llX could not be transformed, loadSize=%d, literalableSize=%d, inRange=%d, usableSegment=%d, targetFourByteAligned=%d, imm12=%d\n",
2182 infoC
.instructionAddress
, ldrInfoC
.size
, literalableSize
, withinOneMeg(infoC
.instructionAddress
, infoA
.targetAddress
+ldrInfoC
.offset
), usableSegment
, targetFourByteAligned
, ldrInfoC
.offset
);
2185 case LOH_ARM64_ADRP_ADD
:
2186 LOH_ASSERT(alt
.info
.count
== 1);
2187 LOH_ASSERT(isPageKind(infoA
.fixup
));
2188 LOH_ASSERT(isPageOffsetKind(infoB
.fixup
));
2189 LOH_ASSERT(infoA
.target
== infoB
.target
);
2190 LOH_ASSERT(infoA
.targetAddress
== infoB
.targetAddress
);
2191 isADRP
= parseADRP(infoA
.instruction
, adrpInfoA
);
2193 isADD
= parseADD(infoB
.instruction
, addInfoB
);
2195 LOH_ASSERT(adrpInfoA
.destReg
== addInfoB
.srcReg
);
2196 usableSegment
= ( !_options
.sharedRegionEligible() || (strcmp(atom
->section().segmentName(), infoB
.target
->section().segmentName()) == 0) );
2197 if ( usableSegment
&& withinOneMeg(infoA
.targetAddress
, infoA
.instructionAddress
) ) {
2198 // can do T4 transformation and use ADR
2199 set32LE(infoA
.instructionContent
, makeADR(addInfoB
.destReg
, infoA
.targetAddress
, infoA
.instructionAddress
));
2200 set32LE(infoB
.instructionContent
, makeNOP());
2201 if ( _options
.verboseOptimizationHints() )
2202 fprintf(stderr
, "adrp-add at 0x%08llX transformed to ADR\n", infoB
.instructionAddress
);
2205 if ( _options
.verboseOptimizationHints() )
2206 fprintf(stderr
, "adrp-add at 0x%08llX not transformed, isAdd=%d, inRange=%d, usableSegment=%d\n",
2207 infoB
.instructionAddress
, isADD
, withinOneMeg(infoA
.targetAddress
, infoA
.instructionAddress
), usableSegment
);
2210 case LOH_ARM64_ADRP_LDR_GOT_LDR
:
2211 LOH_ASSERT(alt
.info
.count
== 2);
2212 LOH_ASSERT(isPageKind(infoA
.fixup
, true));
2213 LOH_ASSERT(isPageOffsetKind(infoB
.fixup
, true));
2214 LOH_ASSERT(infoC
.fixup
== NULL
);
2215 LOH_ASSERT(infoA
.target
== infoB
.target
);
2216 LOH_ASSERT(infoA
.targetAddress
== infoB
.targetAddress
);
2217 isADRP
= parseADRP(infoA
.instruction
, adrpInfoA
);
2219 isLDR
= parseLoadOrStore(infoC
.instruction
, ldrInfoC
);
2221 LOH_ASSERT(ldrInfoC
.offset
== 0);
2222 isADD
= parseADD(infoB
.instruction
, addInfoB
);
2223 isLDR
= parseLoadOrStore(infoB
.instruction
, ldrInfoB
);
2225 // target of GOT is external
2226 LOH_ASSERT(ldrInfoB
.size
== 8);
2227 LOH_ASSERT(!ldrInfoB
.isFloat
);
2228 LOH_ASSERT(ldrInfoC
.baseReg
== ldrInfoB
.reg
);
2229 //fprintf(stderr, "infoA.target=%p, %s, infoA.targetAddress=0x%08llX\n", infoA.target, infoA.target->name(), infoA.targetAddress);
2230 usableSegment
= ( !_options
.sharedRegionEligible() || (strcmp(atom
->section().segmentName(), infoB
.target
->section().segmentName()) == 0) );
2231 targetFourByteAligned
= ( ((infoA
.targetAddress
) & 0x3) == 0 );
2232 if ( usableSegment
&& targetFourByteAligned
&& withinOneMeg(infoB
.instructionAddress
, infoA
.targetAddress
) ) {
2233 // can do T5 transform
2234 set32LE(infoA
.instructionContent
, makeNOP());
2235 set32LE(infoB
.instructionContent
, makeLDR_literal(ldrInfoB
, infoA
.targetAddress
, infoB
.instructionAddress
));
2236 if ( _options
.verboseOptimizationHints() ) {
2237 fprintf(stderr
, "adrp-ldr-got-ldr at 0x%08llX T5 transformed to LDR literal of GOT plus LDR\n", infoC
.instructionAddress
);
2241 if ( _options
.verboseOptimizationHints() )
2242 fprintf(stderr
, "adrp-ldr-got-ldr at 0x%08llX no optimization done\n", infoC
.instructionAddress
);
2246 // target of GOT is in same linkage unit and B instruction was changed to ADD to compute LEA of target
2247 LOH_ASSERT(addInfoB
.srcReg
== adrpInfoA
.destReg
);
2248 LOH_ASSERT(addInfoB
.destReg
== ldrInfoC
.baseReg
);
2249 usableSegment
= ( !_options
.sharedRegionEligible() || (strcmp(atom
->section().segmentName(), infoB
.target
->section().segmentName()) == 0) );
2250 targetFourByteAligned
= ( ((infoA
.targetAddress
) & 0x3) == 0 );
2251 literalableSize
= ( (ldrInfoC
.size
!= 1) && (ldrInfoC
.size
!= 2) );
2252 if ( usableSegment
&& literalableSize
&& targetFourByteAligned
&& withinOneMeg(infoC
.instructionAddress
, infoA
.targetAddress
) ) {
2253 // can do T1 transform
2254 set32LE(infoA
.instructionContent
, makeNOP());
2255 set32LE(infoB
.instructionContent
, makeNOP());
2256 set32LE(infoC
.instructionContent
, makeLDR_literal(ldrInfoC
, infoA
.targetAddress
, infoC
.instructionAddress
));
2257 if ( _options
.verboseOptimizationHints() )
2258 fprintf(stderr
, "adrp-ldr-got-ldr at 0x%08llX T1 transformed to LDR literal\n", infoC
.instructionAddress
);
2260 else if ( usableSegment
&& withinOneMeg(infoA
.instructionAddress
, infoA
.targetAddress
) ) {
2261 // can do T4 transform
2262 set32LE(infoA
.instructionContent
, makeADR(ldrInfoC
.baseReg
, infoA
.targetAddress
, infoA
.instructionAddress
));
2263 set32LE(infoB
.instructionContent
, makeNOP());
2264 set32LE(infoC
.instructionContent
, makeLoadOrStore(ldrInfoC
));
2265 if ( _options
.verboseOptimizationHints() ) {
2266 fprintf(stderr
, "adrp-ldr-got-ldr at 0x%08llX T4 transformed to ADR/LDR\n", infoC
.instructionAddress
);
2269 else if ( (infoA
.targetAddress
% ldrInfoC
.size
) == 0 ) {
2270 // can do T2 transform
2271 set32LE(infoB
.instructionContent
, makeNOP());
2272 ldrInfoC
.baseReg
= adrpInfoA
.destReg
;
2273 ldrInfoC
.offset
= addInfoB
.addend
;
2274 set32LE(infoC
.instructionContent
, makeLoadOrStore(ldrInfoC
));
2275 if ( _options
.verboseOptimizationHints() ) {
2276 fprintf(stderr
, "adrp-ldr-got-ldr at 0x%08llX T4 transformed to ADRP/NOP/LDR\n", infoC
.instructionAddress
);
2280 // T3 transform already done by ld::passes:got:doPass()
2281 if ( _options
.verboseOptimizationHints() ) {
2282 fprintf(stderr
, "adrp-ldr-got-ldr at 0x%08llX T3 transformed to ADRP/ADD/LDR\n", infoC
.instructionAddress
);
2287 if ( _options
.verboseOptimizationHints() )
2288 fprintf(stderr
, "adrp-ldr-got-ldr at 0x%08llX not ADD or LDR\n", infoC
.instructionAddress
);
2291 case LOH_ARM64_ADRP_ADD_STR
:
2292 LOH_ASSERT(alt
.info
.count
== 2);
2293 LOH_ASSERT(isPageKind(infoA
.fixup
));
2294 LOH_ASSERT(isPageOffsetKind(infoB
.fixup
));
2295 LOH_ASSERT(infoC
.fixup
== NULL
);
2296 LOH_ASSERT(infoA
.target
== infoB
.target
);
2297 LOH_ASSERT(infoA
.targetAddress
== infoB
.targetAddress
);
2298 usableSegment
= ( !_options
.sharedRegionEligible() || (strcmp(atom
->section().segmentName(), infoB
.target
->section().segmentName()) == 0) );
2299 isADRP
= parseADRP(infoA
.instruction
, adrpInfoA
);
2301 isADD
= parseADD(infoB
.instruction
, addInfoB
);
2303 LOH_ASSERT(adrpInfoA
.destReg
== addInfoB
.srcReg
);
2304 isSTR
= (parseLoadOrStore(infoC
.instruction
, ldrInfoC
) && ldrInfoC
.isStore
);
2306 LOH_ASSERT(addInfoB
.destReg
== ldrInfoC
.baseReg
);
2307 if ( usableSegment
&& withinOneMeg(infoA
.instructionAddress
, infoA
.targetAddress
+ldrInfoC
.offset
) ) {
2308 // can to T4 transformation and turn ADRP/ADD into ADR
2309 set32LE(infoA
.instructionContent
, makeADR(ldrInfoC
.baseReg
, infoA
.targetAddress
+ldrInfoC
.offset
, infoA
.instructionAddress
));
2310 set32LE(infoB
.instructionContent
, makeNOP());
2311 ldrInfoC
.offset
= 0; // offset is now in ADR instead of ADD or LDR
2312 set32LE(infoC
.instructionContent
, makeLoadOrStore(ldrInfoC
));
2313 set32LE(infoC
.instructionContent
, infoC
.instruction
& 0xFFC003FF);
2314 if ( _options
.verboseOptimizationHints() )
2315 fprintf(stderr
, "adrp-add-str at 0x%08llX T4 transformed to ADR/STR\n", infoB
.instructionAddress
);
2317 else if ( ((infoB
.targetAddress
% ldrInfoC
.size
) == 0) && (ldrInfoC
.offset
== 0) ) {
2318 // can do T2 transformation by merging ADD into STR
2320 set32LE(infoB
.instructionContent
, makeNOP());
2321 ldrInfoC
.offset
+= addInfoB
.addend
;
2322 set32LE(infoC
.instructionContent
, makeLoadOrStore(ldrInfoC
));
2323 if ( _options
.verboseOptimizationHints() )
2324 fprintf(stderr
, "adrp-add-str at 0x%08llX T2 transformed to ADRP/STR \n", infoC
.instructionAddress
);
2327 if ( _options
.verboseOptimizationHints() )
2328 fprintf(stderr
, "adrp-add-str at 0x%08llX could not be transformed, loadSize=%d, inRange=%d, usableSegment=%d, imm12=%d\n",
2329 infoC
.instructionAddress
, ldrInfoC
.size
, withinOneMeg(infoC
.instructionAddress
, infoA
.targetAddress
+ldrInfoC
.offset
), usableSegment
, ldrInfoC
.offset
);
2332 case LOH_ARM64_ADRP_LDR_GOT_STR
:
2333 LOH_ASSERT(alt
.info
.count
== 2);
2334 LOH_ASSERT(isPageKind(infoA
.fixup
, true));
2335 LOH_ASSERT(isPageOffsetKind(infoB
.fixup
, true));
2336 LOH_ASSERT(infoC
.fixup
== NULL
);
2337 LOH_ASSERT(infoA
.target
== infoB
.target
);
2338 LOH_ASSERT(infoA
.targetAddress
== infoB
.targetAddress
);
2339 isADRP
= parseADRP(infoA
.instruction
, adrpInfoA
);
2341 isSTR
= (parseLoadOrStore(infoC
.instruction
, ldrInfoC
) && ldrInfoC
.isStore
);
2343 LOH_ASSERT(ldrInfoC
.offset
== 0);
2344 isADD
= parseADD(infoB
.instruction
, addInfoB
);
2345 isLDR
= parseLoadOrStore(infoB
.instruction
, ldrInfoB
);
2347 // target of GOT is external
2348 LOH_ASSERT(ldrInfoB
.size
== 8);
2349 LOH_ASSERT(!ldrInfoB
.isFloat
);
2350 LOH_ASSERT(ldrInfoC
.baseReg
== ldrInfoB
.reg
);
2351 usableSegment
= ( !_options
.sharedRegionEligible() || (strcmp(atom
->section().segmentName(), infoB
.target
->section().segmentName()) == 0) );
2352 targetFourByteAligned
= ( ((infoA
.targetAddress
) & 0x3) == 0 );
2353 if ( usableSegment
&& targetFourByteAligned
&& withinOneMeg(infoB
.instructionAddress
, infoA
.targetAddress
) ) {
2354 // can do T5 transform
2355 set32LE(infoA
.instructionContent
, makeNOP());
2356 set32LE(infoB
.instructionContent
, makeLDR_literal(ldrInfoB
, infoA
.targetAddress
, infoB
.instructionAddress
));
2357 if ( _options
.verboseOptimizationHints() ) {
2358 fprintf(stderr
, "adrp-ldr-got-str at 0x%08llX T5 transformed to LDR literal of GOT plus STR\n", infoC
.instructionAddress
);
2362 if ( _options
.verboseOptimizationHints() )
2363 fprintf(stderr
, "adrp-ldr-got-str at 0x%08llX no optimization done\n", infoC
.instructionAddress
);
2367 // target of GOT is in same linkage unit and B instruction was changed to ADD to compute LEA of target
2368 LOH_ASSERT(addInfoB
.srcReg
== adrpInfoA
.destReg
);
2369 LOH_ASSERT(addInfoB
.destReg
== ldrInfoC
.baseReg
);
2370 usableSegment
= ( !_options
.sharedRegionEligible() || (strcmp(atom
->section().segmentName(), infoB
.target
->section().segmentName()) == 0) );
2371 targetFourByteAligned
= ( ((infoA
.targetAddress
) & 0x3) == 0 );
2372 literalableSize
= ( (ldrInfoC
.size
!= 1) && (ldrInfoC
.size
!= 2) );
2373 if ( usableSegment
&& withinOneMeg(infoA
.instructionAddress
, infoA
.targetAddress
) ) {
2374 // can do T4 transform
2375 set32LE(infoA
.instructionContent
, makeADR(ldrInfoC
.baseReg
, infoA
.targetAddress
, infoA
.instructionAddress
));
2376 set32LE(infoB
.instructionContent
, makeNOP());
2377 set32LE(infoC
.instructionContent
, makeLoadOrStore(ldrInfoC
));
2378 if ( _options
.verboseOptimizationHints() ) {
2379 fprintf(stderr
, "adrp-ldr-got-str at 0x%08llX T4 transformed to ADR/STR\n", infoC
.instructionAddress
);
2382 else if ( ((infoA
.targetAddress
% ldrInfoC
.size
) == 0) && (ldrInfoC
.offset
== 0) ) {
2383 // can do T2 transform
2384 set32LE(infoB
.instructionContent
, makeNOP());
2385 ldrInfoC
.baseReg
= adrpInfoA
.destReg
;
2386 ldrInfoC
.offset
= addInfoB
.addend
;
2387 set32LE(infoC
.instructionContent
, makeLoadOrStore(ldrInfoC
));
2388 if ( _options
.verboseOptimizationHints() ) {
2389 fprintf(stderr
, "adrp-ldr-got-str at 0x%08llX T4 transformed to ADRP/NOP/STR\n", infoC
.instructionAddress
);
2393 // T3 transform already done by ld::passes:got:doPass()
2394 if ( _options
.verboseOptimizationHints() ) {
2395 fprintf(stderr
, "adrp-ldr-got-str at 0x%08llX T3 transformed to ADRP/ADD/STR\n", infoC
.instructionAddress
);
2400 if ( _options
.verboseOptimizationHints() )
2401 fprintf(stderr
, "adrp-ldr-got-str at 0x%08llX not ADD or LDR\n", infoC
.instructionAddress
);
2404 case LOH_ARM64_ADRP_LDR_GOT
:
2405 LOH_ASSERT(alt
.info
.count
== 1);
2406 LOH_ASSERT(isPageKind(infoA
.fixup
, true));
2407 LOH_ASSERT(isPageOffsetKind(infoB
.fixup
, true));
2408 LOH_ASSERT(infoA
.target
== infoB
.target
);
2409 LOH_ASSERT(infoA
.targetAddress
== infoB
.targetAddress
);
2410 isADRP
= parseADRP(infoA
.instruction
, adrpInfoA
);
2411 isADD
= parseADD(infoB
.instruction
, addInfoB
);
2412 isLDR
= parseLoadOrStore(infoB
.instruction
, ldrInfoB
);
2413 usableSegment
= ( !_options
.sharedRegionEligible() || (strcmp(atom
->section().segmentName(), infoB
.target
->section().segmentName()) == 0) );
2416 if ( usableSegment
&& withinOneMeg(infoB
.instructionAddress
, infoA
.targetAddress
) ) {
2417 // can do T5 transform (LDR literal load of GOT)
2418 set32LE(infoA
.instructionContent
, makeNOP());
2419 set32LE(infoB
.instructionContent
, makeLDR_literal(ldrInfoB
, infoA
.targetAddress
, infoB
.instructionAddress
));
2420 if ( _options
.verboseOptimizationHints() ) {
2421 fprintf(stderr
, "adrp-ldr-got at 0x%08llX T5 transformed to NOP/LDR\n", infoC
.instructionAddress
);
2426 if ( usableSegment
&& withinOneMeg(infoA
.instructionAddress
, infoA
.targetAddress
) ) {
2427 // can do T4 transform (ADR to compute local address)
2428 set32LE(infoA
.instructionContent
, makeADR(addInfoB
.destReg
, infoA
.targetAddress
, infoA
.instructionAddress
));
2429 set32LE(infoB
.instructionContent
, makeNOP());
2430 if ( _options
.verboseOptimizationHints() ) {
2431 fprintf(stderr
, "adrp-ldr-got at 0x%08llX T4 transformed to ADR/STR\n", infoC
.instructionAddress
);
2436 if ( _options
.verboseOptimizationHints() )
2437 fprintf(stderr
, "adrp-ldr-got at 0x%08llX not LDR or ADD\n", infoB
.instructionAddress
);
2441 if ( _options
.verboseOptimizationHints() )
2442 fprintf(stderr
, "adrp-ldr-got at 0x%08llX not ADRP\n", infoA
.instructionAddress
);
2446 if ( _options
.verboseOptimizationHints() )
2447 fprintf(stderr
, "unknown hint kind %d alt.info.kind at 0x%08llX\n", alt
.info
.kind
, infoA
.instructionAddress
);
2451 // apply hints pass 2
2452 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
2453 if ( fit
->kind
!= ld::Fixup::kindLinkerOptimizationHint
)
2455 InstructionInfo infoA
;
2456 InstructionInfo infoB
;
2457 ld::Fixup::LOH_arm64 alt
;
2458 alt
.addend
= fit
->u
.addend
;
2459 setInfo(state
, atom
, buffer
, usedByHints
, fit
->offsetInAtom
, (alt
.info
.delta1
<< 2), &infoA
);
2460 if ( alt
.info
.count
> 0 )
2461 setInfo(state
, atom
, buffer
, usedByHints
, fit
->offsetInAtom
, (alt
.info
.delta2
<< 2), &infoB
);
2463 switch ( alt
.info
.kind
) {
2464 case LOH_ARM64_ADRP_ADRP
:
2465 LOH_ASSERT(isPageKind(infoA
.fixup
));
2466 LOH_ASSERT(isPageKind(infoB
.fixup
));
2467 if ( (infoA
.instruction
& 0x9F000000) != 0x90000000 ) {
2468 if ( _options
.verboseOptimizationHints() )
2469 fprintf(stderr
, "may-reused-adrp at 0x%08llX no longer an ADRP, now 0x%08X\n", infoA
.instructionAddress
, infoA
.instruction
);
2473 if ( (infoB
.instruction
& 0x9F000000) != 0x90000000 ) {
2474 if ( _options
.verboseOptimizationHints() )
2475 fprintf(stderr
, "may-reused-adrp at 0x%08llX no longer an ADRP, now 0x%08X\n", infoB
.instructionAddress
, infoA
.instruction
);
2479 if ( (infoA
.targetAddress
& (-4096)) == (infoB
.targetAddress
& (-4096)) ) {
2480 set32LE(infoB
.instructionContent
, 0xD503201F);
2490 #endif // SUPPORT_ARCH_arm64
2494 void OutputFile::copyNoOps(uint8_t* from
, uint8_t* to
, bool thumb
)
2496 switch ( _options
.architecture() ) {
2498 case CPU_TYPE_X86_64
:
2499 for (uint8_t* p
=from
; p
< to
; ++p
)
2504 for (uint8_t* p
=from
; p
< to
; p
+= 2)
2505 OSWriteLittleInt16((uint16_t*)p
, 0, 0x46c0);
2508 for (uint8_t* p
=from
; p
< to
; p
+= 4)
2509 OSWriteLittleInt32((uint32_t*)p
, 0, 0xe1a00000);
2513 for (uint8_t* p
=from
; p
< to
; ++p
)
2519 bool OutputFile::takesNoDiskSpace(const ld::Section
* sect
)
2521 switch ( sect
->type() ) {
2522 case ld::Section::typeZeroFill
:
2523 case ld::Section::typeTLVZeroFill
:
2524 return _options
.optimizeZeroFill();
2525 case ld::Section::typePageZero
:
2526 case ld::Section::typeStack
:
2527 case ld::Section::typeAbsoluteSymbols
:
2528 case ld::Section::typeTentativeDefs
:
2536 bool OutputFile::hasZeroForFileOffset(const ld::Section
* sect
)
2538 switch ( sect
->type() ) {
2539 case ld::Section::typeZeroFill
:
2540 case ld::Section::typeTLVZeroFill
:
2541 return _options
.optimizeZeroFill();
2542 case ld::Section::typePageZero
:
2543 case ld::Section::typeStack
:
2544 case ld::Section::typeTentativeDefs
:
2552 void OutputFile::writeAtoms(ld::Internal
& state
, uint8_t* wholeBuffer
)
2554 // have each atom write itself
2555 uint64_t fileOffsetOfEndOfLastAtom
= 0;
2556 uint64_t mhAddress
= 0;
2557 bool lastAtomUsesNoOps
= false;
2558 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
2559 ld::Internal::FinalSection
* sect
= *sit
;
2560 if ( sect
->type() == ld::Section::typeMachHeader
)
2561 mhAddress
= sect
->address
;
2562 if ( takesNoDiskSpace(sect
) )
2564 const bool sectionUsesNops
= (sect
->type() == ld::Section::typeCode
);
2565 //fprintf(stderr, "file offset=0x%08llX, section %s\n", sect->fileOffset, sect->sectionName());
2566 std::vector
<const ld::Atom
*>& atoms
= sect
->atoms
;
2567 bool lastAtomWasThumb
= false;
2568 for (std::vector
<const ld::Atom
*>::iterator ait
= atoms
.begin(); ait
!= atoms
.end(); ++ait
) {
2569 const ld::Atom
* atom
= *ait
;
2570 if ( atom
->definition() == ld::Atom::definitionProxy
)
2573 uint64_t fileOffset
= atom
->finalAddress() - sect
->address
+ sect
->fileOffset
;
2574 // check for alignment padding between atoms
2575 if ( (fileOffset
!= fileOffsetOfEndOfLastAtom
) && lastAtomUsesNoOps
) {
2576 this->copyNoOps(&wholeBuffer
[fileOffsetOfEndOfLastAtom
], &wholeBuffer
[fileOffset
], lastAtomWasThumb
);
2578 // copy atom content
2579 atom
->copyRawContent(&wholeBuffer
[fileOffset
]);
2581 this->applyFixUps(state
, mhAddress
, atom
, &wholeBuffer
[fileOffset
]);
2582 fileOffsetOfEndOfLastAtom
= fileOffset
+atom
->size();
2583 lastAtomUsesNoOps
= sectionUsesNops
;
2584 lastAtomWasThumb
= atom
->isThumb();
2586 catch (const char* msg
) {
2587 if ( atom
->file() != NULL
)
2588 throwf("%s in '%s' from %s", msg
, atom
->name(), atom
->file()->path());
2590 throwf("%s in '%s'", msg
, atom
->name());
2595 if ( _options
.verboseOptimizationHints() ) {
2596 //fprintf(stderr, "ADRP optimized away: %d\n", sAdrpNA);
2597 //fprintf(stderr, "ADRPs changed to NOPs: %d\n", sAdrpNoped);
2598 //fprintf(stderr, "ADRPs unchanged: %d\n", sAdrpNotNoped);
2603 void OutputFile::computeContentUUID(ld::Internal
& state
, uint8_t* wholeBuffer
)
2605 const bool log
= false;
2606 if ( (_options
.outputKind() != Options::kObjectFile
) || state
.someObjectFileHasDwarf
) {
2607 uint8_t digest
[CC_MD5_DIGEST_LENGTH
];
2608 uint32_t stabsStringsOffsetStart
;
2609 uint32_t tabsStringsOffsetEnd
;
2610 uint32_t stabsOffsetStart
;
2611 uint32_t stabsOffsetEnd
;
2612 if ( _symbolTableAtom
->hasStabs(stabsStringsOffsetStart
, tabsStringsOffsetEnd
, stabsOffsetStart
, stabsOffsetEnd
) ) {
2613 // find two areas of file that are stabs info and should not contribute to checksum
2614 uint64_t stringPoolFileOffset
= 0;
2615 uint64_t symbolTableFileOffset
= 0;
2616 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
2617 ld::Internal::FinalSection
* sect
= *sit
;
2618 if ( sect
->type() == ld::Section::typeLinkEdit
) {
2619 if ( strcmp(sect
->sectionName(), "__string_pool") == 0 )
2620 stringPoolFileOffset
= sect
->fileOffset
;
2621 else if ( strcmp(sect
->sectionName(), "__symbol_table") == 0 )
2622 symbolTableFileOffset
= sect
->fileOffset
;
2625 uint64_t firstStabNlistFileOffset
= symbolTableFileOffset
+ stabsOffsetStart
;
2626 uint64_t lastStabNlistFileOffset
= symbolTableFileOffset
+ stabsOffsetEnd
;
2627 uint64_t firstStabStringFileOffset
= stringPoolFileOffset
+ stabsStringsOffsetStart
;
2628 uint64_t lastStabStringFileOffset
= stringPoolFileOffset
+ tabsStringsOffsetEnd
;
2629 if ( log
) fprintf(stderr
, "firstStabNlistFileOffset=0x%08llX\n", firstStabNlistFileOffset
);
2630 if ( log
) fprintf(stderr
, "lastStabNlistFileOffset=0x%08llX\n", lastStabNlistFileOffset
);
2631 if ( log
) fprintf(stderr
, "firstStabStringFileOffset=0x%08llX\n", firstStabStringFileOffset
);
2632 if ( log
) fprintf(stderr
, "lastStabStringFileOffset=0x%08llX\n", lastStabStringFileOffset
);
2633 assert(firstStabNlistFileOffset
<= firstStabStringFileOffset
);
2635 CC_MD5_CTX md5state
;
2636 CC_MD5_Init(&md5state
);
2637 // checksum everything up to first stabs nlist
2638 if ( log
) fprintf(stderr
, "checksum 0x%08X -> 0x%08llX\n", 0, firstStabNlistFileOffset
);
2639 CC_MD5_Update(&md5state
, &wholeBuffer
[0], firstStabNlistFileOffset
);
2640 // checkusm everything after last stabs nlist and up to first stabs string
2641 if ( log
) fprintf(stderr
, "checksum 0x%08llX -> 0x%08llX\n", lastStabNlistFileOffset
, firstStabStringFileOffset
);
2642 CC_MD5_Update(&md5state
, &wholeBuffer
[lastStabNlistFileOffset
], firstStabStringFileOffset
-lastStabNlistFileOffset
);
2643 // checksum everything after last stabs string to end of file
2644 if ( log
) fprintf(stderr
, "checksum 0x%08llX -> 0x%08llX\n", lastStabStringFileOffset
, _fileSize
);
2645 CC_MD5_Update(&md5state
, &wholeBuffer
[lastStabStringFileOffset
], _fileSize
-lastStabStringFileOffset
);
2646 CC_MD5_Final(digest
, &md5state
);
2647 if ( log
) fprintf(stderr
, "uuid=%02X, %02X, %02X, %02X, %02X, %02X, %02X, %02X\n", digest
[0], digest
[1], digest
[2],
2648 digest
[3], digest
[4], digest
[5], digest
[6], digest
[7]);
2651 CC_MD5(wholeBuffer
, _fileSize
, digest
);
2653 // <rdar://problem/6723729> LC_UUID uuids should conform to RFC 4122 UUID version 4 & UUID version 5 formats
2654 digest
[6] = ( digest
[6] & 0x0F ) | ( 3 << 4 );
2655 digest
[8] = ( digest
[8] & 0x3F ) | 0x80;
2656 // update buffer with new UUID
2657 _headersAndLoadCommandAtom
->setUUID(digest
);
2658 _headersAndLoadCommandAtom
->recopyUUIDCommand();
2663 void OutputFile::writeOutputFile(ld::Internal
& state
)
2665 // for UNIX conformance, error if file exists and is not writable
2666 if ( (access(_options
.outputFilePath(), F_OK
) == 0) && (access(_options
.outputFilePath(), W_OK
) == -1) )
2667 throwf("can't write output file: %s", _options
.outputFilePath());
2669 mode_t permissions
= 0777;
2670 if ( _options
.outputKind() == Options::kObjectFile
)
2672 mode_t umask
= ::umask(0);
2673 ::umask(umask
); // put back the original umask
2674 permissions
&= ~umask
;
2675 // Calling unlink first assures the file is gone so that open creates it with correct permissions
2676 // It also handles the case where __options.outputFilePath() file is not writable but its directory is
2677 // And it means we don't have to truncate the file when done writing (in case new is smaller than old)
2678 // Lastly, only delete existing file if it is a normal file (e.g. not /dev/null).
2679 struct stat stat_buf
;
2680 bool outputIsRegularFile
= false;
2681 bool outputIsMappableFile
= false;
2682 if ( stat(_options
.outputFilePath(), &stat_buf
) != -1 ) {
2683 if (stat_buf
.st_mode
& S_IFREG
) {
2684 outputIsRegularFile
= true;
2685 // <rdar://problem/12264302> Don't use mmap on non-hfs volumes
2686 struct statfs fsInfo
;
2687 if ( statfs(_options
.outputFilePath(), &fsInfo
) != -1 ) {
2688 if ( strcmp(fsInfo
.f_fstypename
, "hfs") == 0) {
2689 (void)unlink(_options
.outputFilePath());
2690 outputIsMappableFile
= true;
2694 outputIsMappableFile
= false;
2698 outputIsRegularFile
= false;
2702 // special files (pipes, devices, etc) must already exist
2703 outputIsRegularFile
= true;
2704 // output file does not exist yet
2705 char dirPath
[PATH_MAX
];
2706 strcpy(dirPath
, _options
.outputFilePath());
2707 char* end
= strrchr(dirPath
, '/');
2708 if ( end
!= NULL
) {
2710 struct statfs fsInfo
;
2711 if ( statfs(dirPath
, &fsInfo
) != -1 ) {
2712 if ( strcmp(fsInfo
.f_fstypename
, "hfs") == 0) {
2713 outputIsMappableFile
= true;
2719 //fprintf(stderr, "outputIsMappableFile=%d, outputIsRegularFile=%d, path=%s\n", outputIsMappableFile, outputIsRegularFile, _options.outputFilePath());
2722 // Construct a temporary path of the form {outputFilePath}.ld_XXXXXX
2723 const char filenameTemplate
[] = ".ld_XXXXXX";
2724 char tmpOutput
[PATH_MAX
];
2725 uint8_t *wholeBuffer
;
2726 if ( outputIsRegularFile
&& outputIsMappableFile
) {
2727 strcpy(tmpOutput
, _options
.outputFilePath());
2728 // If the path is too long to add a suffix for a temporary name then
2729 // just fall back to using the output path.
2730 if (strlen(tmpOutput
)+strlen(filenameTemplate
) < PATH_MAX
) {
2731 strcat(tmpOutput
, filenameTemplate
);
2732 fd
= mkstemp(tmpOutput
);
2735 fd
= open(tmpOutput
, O_RDWR
|O_CREAT
, permissions
);
2738 throwf("can't open output file for writing '%s', errno=%d", tmpOutput
, errno
);
2739 if ( ftruncate(fd
, _fileSize
) == -1 ) {
2742 if ( err
== ENOSPC
)
2743 throwf("not enough disk space for writing '%s'", _options
.outputFilePath());
2745 throwf("can't grow file for writing '%s', errno=%d", _options
.outputFilePath(), err
);
2748 wholeBuffer
= (uint8_t *)mmap(NULL
, _fileSize
, PROT_WRITE
|PROT_READ
, MAP_SHARED
, fd
, 0);
2749 if ( wholeBuffer
== MAP_FAILED
)
2750 throwf("can't create buffer of %llu bytes for output", _fileSize
);
2753 if ( outputIsRegularFile
)
2754 fd
= open(_options
.outputFilePath(), O_RDWR
|O_CREAT
, permissions
);
2756 fd
= open(_options
.outputFilePath(), O_WRONLY
);
2758 throwf("can't open output file for writing: %s, errno=%d", _options
.outputFilePath(), errno
);
2759 // try to allocate buffer for entire output file content
2760 wholeBuffer
= (uint8_t*)calloc(_fileSize
, 1);
2761 if ( wholeBuffer
== NULL
)
2762 throwf("can't create buffer of %llu bytes for output", _fileSize
);
2765 if ( _options
.UUIDMode() == Options::kUUIDRandom
) {
2767 ::uuid_generate_random(bits
);
2768 _headersAndLoadCommandAtom
->setUUID(bits
);
2771 writeAtoms(state
, wholeBuffer
);
2774 if ( _options
.UUIDMode() == Options::kUUIDContent
)
2775 computeContentUUID(state
, wholeBuffer
);
2777 if ( outputIsRegularFile
&& outputIsMappableFile
) {
2778 if ( ::chmod(tmpOutput
, permissions
) == -1 ) {
2780 throwf("can't set permissions on output file: %s, errno=%d", tmpOutput
, errno
);
2782 if ( ::rename(tmpOutput
, _options
.outputFilePath()) == -1 && strcmp(tmpOutput
, _options
.outputFilePath()) != 0) {
2784 throwf("can't move output file in place, errno=%d", errno
);
2788 if ( ::write(fd
, wholeBuffer
, _fileSize
) == -1 ) {
2789 throwf("can't write to output file: %s, errno=%d", _options
.outputFilePath(), errno
);
2792 // <rdar://problem/13118223> NFS: iOS incremental builds in Xcode 4.6 fail with codesign error
2793 // NFS seems to pad the end of the file sometimes. Calling trunc seems to correct it...
2794 ::truncate(_options
.outputFilePath(), _fileSize
);
2798 struct AtomByNameSorter
2800 bool operator()(const ld::Atom
* left
, const ld::Atom
* right
)
2802 return (strcmp(left
->name(), right
->name()) < 0);
2809 NotInSet(const std::set
<const ld::Atom
*>& theSet
) : _set(theSet
) {}
2811 bool operator()(const ld::Atom
* atom
) const {
2812 return ( _set
.count(atom
) == 0 );
2815 const std::set
<const ld::Atom
*>& _set
;
2819 void OutputFile::buildSymbolTable(ld::Internal
& state
)
2821 unsigned int machoSectionIndex
= 0;
2822 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
2823 ld::Internal::FinalSection
* sect
= *sit
;
2824 bool setMachoSectionIndex
= !sect
->isSectionHidden() && (sect
->type() != ld::Section::typeTentativeDefs
);
2825 if ( setMachoSectionIndex
)
2826 ++machoSectionIndex
;
2827 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
2828 const ld::Atom
* atom
= *ait
;
2829 if ( setMachoSectionIndex
)
2830 (const_cast<ld::Atom
*>(atom
))->setMachoSection(machoSectionIndex
);
2831 else if ( sect
->type() == ld::Section::typeMachHeader
)
2832 (const_cast<ld::Atom
*>(atom
))->setMachoSection(1); // __mh_execute_header is not in any section by needs n_sect==1
2833 else if ( sect
->type() == ld::Section::typeLastSection
)
2834 (const_cast<ld::Atom
*>(atom
))->setMachoSection(machoSectionIndex
); // use section index of previous section
2835 else if ( sect
->type() == ld::Section::typeFirstSection
)
2836 (const_cast<ld::Atom
*>(atom
))->setMachoSection(machoSectionIndex
+1); // use section index of next section
2838 // in -r mode, clarify symbolTableNotInFinalLinkedImages
2839 if ( _options
.outputKind() == Options::kObjectFile
) {
2840 if ( (_options
.architecture() == CPU_TYPE_X86_64
) || (_options
.architecture() == CPU_TYPE_ARM64
) ) {
2841 // x86_64 .o files need labels on anonymous literal strings
2842 if ( (sect
->type() == ld::Section::typeCString
) && (atom
->combine() == ld::Atom::combineByNameAndContent
) ) {
2843 (const_cast<ld::Atom
*>(atom
))->setSymbolTableInclusion(ld::Atom::symbolTableIn
);
2844 _localAtoms
.push_back(atom
);
2848 if ( sect
->type() == ld::Section::typeCFI
) {
2849 if ( _options
.removeEHLabels() )
2850 (const_cast<ld::Atom
*>(atom
))->setSymbolTableInclusion(ld::Atom::symbolTableNotIn
);
2852 (const_cast<ld::Atom
*>(atom
))->setSymbolTableInclusion(ld::Atom::symbolTableIn
);
2854 else if ( sect
->type() == ld::Section::typeTempAlias
) {
2855 assert(_options
.outputKind() == Options::kObjectFile
);
2856 _importedAtoms
.push_back(atom
);
2859 if ( atom
->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages
)
2860 (const_cast<ld::Atom
*>(atom
))->setSymbolTableInclusion(ld::Atom::symbolTableIn
);
2863 // TEMP work around until <rdar://problem/7702923> goes in
2864 if ( (atom
->symbolTableInclusion() == ld::Atom::symbolTableInAndNeverStrip
)
2865 && (atom
->scope() == ld::Atom::scopeLinkageUnit
)
2866 && (_options
.outputKind() == Options::kDynamicLibrary
) ) {
2867 (const_cast<ld::Atom
*>(atom
))->setScope(ld::Atom::scopeGlobal
);
2870 // <rdar://problem/6783167> support auto hidden weak symbols: .weak_def_can_be_hidden
2871 if ( atom
->autoHide() && (_options
.outputKind() != Options::kObjectFile
) ) {
2872 // adding auto-hide symbol to .exp file should keep it global
2873 if ( !_options
.hasExportMaskList() || !_options
.shouldExport(atom
->name()) )
2874 (const_cast<ld::Atom
*>(atom
))->setScope(ld::Atom::scopeLinkageUnit
);
2877 // <rdar://problem/8626058> ld should consistently warn when resolvers are not exported
2878 if ( (atom
->contentType() == ld::Atom::typeResolver
) && (atom
->scope() == ld::Atom::scopeLinkageUnit
) )
2879 warning("resolver functions should be external, but '%s' is hidden", atom
->name());
2881 if ( sect
->type() == ld::Section::typeImportProxies
) {
2882 if ( atom
->combine() == ld::Atom::combineByName
)
2883 this->usesWeakExternalSymbols
= true;
2884 // alias proxy is a re-export with a name change, don't import changed name
2885 if ( ! atom
->isAlias() )
2886 _importedAtoms
.push_back(atom
);
2887 // scope of proxies are usually linkage unit, so done
2888 // if scope is global, we need to re-export it too
2889 if ( atom
->scope() == ld::Atom::scopeGlobal
)
2890 _exportedAtoms
.push_back(atom
);
2893 if ( atom
->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages
) {
2894 assert(_options
.outputKind() != Options::kObjectFile
);
2895 continue; // don't add to symbol table
2897 if ( atom
->symbolTableInclusion() == ld::Atom::symbolTableNotIn
) {
2898 continue; // don't add to symbol table
2900 if ( (atom
->symbolTableInclusion() == ld::Atom::symbolTableInWithRandomAutoStripLabel
)
2901 && (_options
.outputKind() != Options::kObjectFile
) ) {
2902 continue; // don't add to symbol table
2905 if ( (atom
->definition() == ld::Atom::definitionTentative
) && (_options
.outputKind() == Options::kObjectFile
) ) {
2906 if ( _options
.makeTentativeDefinitionsReal() ) {
2907 // -r -d turns tentative defintions into real def
2908 _exportedAtoms
.push_back(atom
);
2911 // in mach-o object files tentative defintions are stored like undefined symbols
2912 _importedAtoms
.push_back(atom
);
2917 switch ( atom
->scope() ) {
2918 case ld::Atom::scopeTranslationUnit
:
2919 if ( _options
.keepLocalSymbol(atom
->name()) ) {
2920 _localAtoms
.push_back(atom
);
2923 if ( _options
.outputKind() == Options::kObjectFile
) {
2924 (const_cast<ld::Atom
*>(atom
))->setSymbolTableInclusion(ld::Atom::symbolTableInWithRandomAutoStripLabel
);
2925 _localAtoms
.push_back(atom
);
2928 (const_cast<ld::Atom
*>(atom
))->setSymbolTableInclusion(ld::Atom::symbolTableNotIn
);
2931 case ld::Atom::scopeGlobal
:
2932 _exportedAtoms
.push_back(atom
);
2934 case ld::Atom::scopeLinkageUnit
:
2935 if ( _options
.outputKind() == Options::kObjectFile
) {
2936 if ( _options
.keepPrivateExterns() ) {
2937 _exportedAtoms
.push_back(atom
);
2939 else if ( _options
.keepLocalSymbol(atom
->name()) ) {
2940 _localAtoms
.push_back(atom
);
2943 (const_cast<ld::Atom
*>(atom
))->setSymbolTableInclusion(ld::Atom::symbolTableInWithRandomAutoStripLabel
);
2944 _localAtoms
.push_back(atom
);
2948 if ( _options
.keepLocalSymbol(atom
->name()) )
2949 _localAtoms
.push_back(atom
);
2950 // <rdar://problem/5804214> ld should never have a symbol in the non-lazy indirect symbol table with index 0
2951 // this works by making __mh_execute_header be a local symbol which takes symbol index 0
2952 else if ( (atom
->symbolTableInclusion() == ld::Atom::symbolTableInAndNeverStrip
) && !_options
.makeCompressedDyldInfo() )
2953 _localAtoms
.push_back(atom
);
2955 (const_cast<ld::Atom
*>(atom
))->setSymbolTableInclusion(ld::Atom::symbolTableNotIn
);
2962 // <rdar://problem/6978069> ld adds undefined symbol from .exp file to binary
2963 if ( (_options
.outputKind() == Options::kKextBundle
) && _options
.hasExportRestrictList() ) {
2964 // search for referenced undefines
2965 std::set
<const ld::Atom
*> referencedProxyAtoms
;
2966 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
=state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
2967 ld::Internal::FinalSection
* sect
= *sit
;
2968 for (std::vector
<const ld::Atom
*>::iterator ait
=sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
2969 const ld::Atom
* atom
= *ait
;
2970 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
2971 switch ( fit
->binding
) {
2972 case ld::Fixup::bindingsIndirectlyBound
:
2973 referencedProxyAtoms
.insert(state
.indirectBindingTable
[fit
->u
.bindingIndex
]);
2975 case ld::Fixup::bindingDirectlyBound
:
2976 referencedProxyAtoms
.insert(fit
->u
.target
);
2984 // remove any unreferenced _importedAtoms
2985 _importedAtoms
.erase(std::remove_if(_importedAtoms
.begin(), _importedAtoms
.end(), NotInSet(referencedProxyAtoms
)), _importedAtoms
.end());
2989 std::sort(_exportedAtoms
.begin(), _exportedAtoms
.end(), AtomByNameSorter());
2990 std::sort(_importedAtoms
.begin(), _importedAtoms
.end(), AtomByNameSorter());
2993 void OutputFile::addPreloadLinkEdit(ld::Internal
& state
)
2995 switch ( _options
.architecture() ) {
2996 #if SUPPORT_ARCH_i386
2998 if ( _hasLocalRelocations
) {
2999 _localRelocsAtom
= new LocalRelocationsAtom
<x86
>(_options
, state
, *this);
3000 localRelocationsSection
= state
.addAtom(*_localRelocsAtom
);
3002 if ( _hasExternalRelocations
) {
3003 _externalRelocsAtom
= new ExternalRelocationsAtom
<x86
>(_options
, state
, *this);
3004 externalRelocationsSection
= state
.addAtom(*_externalRelocsAtom
);
3006 if ( _hasSymbolTable
) {
3007 _indirectSymbolTableAtom
= new IndirectSymbolTableAtom
<x86
>(_options
, state
, *this);
3008 indirectSymbolTableSection
= state
.addAtom(*_indirectSymbolTableAtom
);
3009 _symbolTableAtom
= new SymbolTableAtom
<x86
>(_options
, state
, *this);
3010 symbolTableSection
= state
.addAtom(*_symbolTableAtom
);
3011 _stringPoolAtom
= new StringPoolAtom(_options
, state
, *this, 4);
3012 stringPoolSection
= state
.addAtom(*_stringPoolAtom
);
3016 #if SUPPORT_ARCH_x86_64
3017 case CPU_TYPE_X86_64
:
3018 if ( _hasLocalRelocations
) {
3019 _localRelocsAtom
= new LocalRelocationsAtom
<x86_64
>(_options
, state
, *this);
3020 localRelocationsSection
= state
.addAtom(*_localRelocsAtom
);
3022 if ( _hasExternalRelocations
) {
3023 _externalRelocsAtom
= new ExternalRelocationsAtom
<x86_64
>(_options
, state
, *this);
3024 externalRelocationsSection
= state
.addAtom(*_externalRelocsAtom
);
3026 if ( _hasSymbolTable
) {
3027 _indirectSymbolTableAtom
= new IndirectSymbolTableAtom
<x86_64
>(_options
, state
, *this);
3028 indirectSymbolTableSection
= state
.addAtom(*_indirectSymbolTableAtom
);
3029 _symbolTableAtom
= new SymbolTableAtom
<x86_64
>(_options
, state
, *this);
3030 symbolTableSection
= state
.addAtom(*_symbolTableAtom
);
3031 _stringPoolAtom
= new StringPoolAtom(_options
, state
, *this, 4);
3032 stringPoolSection
= state
.addAtom(*_stringPoolAtom
);
3036 #if SUPPORT_ARCH_arm_any
3038 if ( _hasLocalRelocations
) {
3039 _localRelocsAtom
= new LocalRelocationsAtom
<arm
>(_options
, state
, *this);
3040 localRelocationsSection
= state
.addAtom(*_localRelocsAtom
);
3042 if ( _hasExternalRelocations
) {
3043 _externalRelocsAtom
= new ExternalRelocationsAtom
<arm
>(_options
, state
, *this);
3044 externalRelocationsSection
= state
.addAtom(*_externalRelocsAtom
);
3046 if ( _hasSymbolTable
) {
3047 _indirectSymbolTableAtom
= new IndirectSymbolTableAtom
<arm
>(_options
, state
, *this);
3048 indirectSymbolTableSection
= state
.addAtom(*_indirectSymbolTableAtom
);
3049 _symbolTableAtom
= new SymbolTableAtom
<arm
>(_options
, state
, *this);
3050 symbolTableSection
= state
.addAtom(*_symbolTableAtom
);
3051 _stringPoolAtom
= new StringPoolAtom(_options
, state
, *this, 4);
3052 stringPoolSection
= state
.addAtom(*_stringPoolAtom
);
3056 #if SUPPORT_ARCH_arm64
3057 case CPU_TYPE_ARM64
:
3058 if ( _hasLocalRelocations
) {
3059 _localRelocsAtom
= new LocalRelocationsAtom
<arm64
>(_options
, state
, *this);
3060 localRelocationsSection
= state
.addAtom(*_localRelocsAtom
);
3062 if ( _hasExternalRelocations
) {
3063 _externalRelocsAtom
= new ExternalRelocationsAtom
<arm64
>(_options
, state
, *this);
3064 externalRelocationsSection
= state
.addAtom(*_externalRelocsAtom
);
3066 if ( _hasSymbolTable
) {
3067 _indirectSymbolTableAtom
= new IndirectSymbolTableAtom
<arm64
>(_options
, state
, *this);
3068 indirectSymbolTableSection
= state
.addAtom(*_indirectSymbolTableAtom
);
3069 _symbolTableAtom
= new SymbolTableAtom
<arm64
>(_options
, state
, *this);
3070 symbolTableSection
= state
.addAtom(*_symbolTableAtom
);
3071 _stringPoolAtom
= new StringPoolAtom(_options
, state
, *this, 4);
3072 stringPoolSection
= state
.addAtom(*_stringPoolAtom
);
3077 throw "-preload not supported";
3083 void OutputFile::addLinkEdit(ld::Internal
& state
)
3085 // for historical reasons, -preload orders LINKEDIT content differently
3086 if ( _options
.outputKind() == Options::kPreload
)
3087 return addPreloadLinkEdit(state
);
3089 switch ( _options
.architecture() ) {
3090 #if SUPPORT_ARCH_i386
3092 if ( _hasSectionRelocations
) {
3093 _sectionsRelocationsAtom
= new SectionRelocationsAtom
<x86
>(_options
, state
, *this);
3094 sectionRelocationsSection
= state
.addAtom(*_sectionsRelocationsAtom
);
3096 if ( _hasDyldInfo
) {
3097 _rebasingInfoAtom
= new RebaseInfoAtom
<x86
>(_options
, state
, *this);
3098 rebaseSection
= state
.addAtom(*_rebasingInfoAtom
);
3100 _bindingInfoAtom
= new BindingInfoAtom
<x86
>(_options
, state
, *this);
3101 bindingSection
= state
.addAtom(*_bindingInfoAtom
);
3103 _weakBindingInfoAtom
= new WeakBindingInfoAtom
<x86
>(_options
, state
, *this);
3104 weakBindingSection
= state
.addAtom(*_weakBindingInfoAtom
);
3106 _lazyBindingInfoAtom
= new LazyBindingInfoAtom
<x86
>(_options
, state
, *this);
3107 lazyBindingSection
= state
.addAtom(*_lazyBindingInfoAtom
);
3109 _exportInfoAtom
= new ExportInfoAtom
<x86
>(_options
, state
, *this);
3110 exportSection
= state
.addAtom(*_exportInfoAtom
);
3112 if ( _hasLocalRelocations
) {
3113 _localRelocsAtom
= new LocalRelocationsAtom
<x86
>(_options
, state
, *this);
3114 localRelocationsSection
= state
.addAtom(*_localRelocsAtom
);
3116 if ( _hasSplitSegInfo
) {
3117 _splitSegInfoAtom
= new SplitSegInfoAtom
<x86
>(_options
, state
, *this);
3118 splitSegInfoSection
= state
.addAtom(*_splitSegInfoAtom
);
3120 if ( _hasFunctionStartsInfo
) {
3121 _functionStartsAtom
= new FunctionStartsAtom
<x86
>(_options
, state
, *this);
3122 functionStartsSection
= state
.addAtom(*_functionStartsAtom
);
3124 if ( _hasDataInCodeInfo
) {
3125 _dataInCodeAtom
= new DataInCodeAtom
<x86
>(_options
, state
, *this);
3126 dataInCodeSection
= state
.addAtom(*_dataInCodeAtom
);
3128 if ( _hasOptimizationHints
) {
3129 _optimizationHintsAtom
= new OptimizationHintsAtom
<x86
>(_options
, state
, *this);
3130 optimizationHintsSection
= state
.addAtom(*_optimizationHintsAtom
);
3132 if ( _hasDependentDRInfo
) {
3133 _dependentDRInfoAtom
= new DependentDRAtom
<x86
>(_options
, state
, *this);
3134 dependentDRsSection
= state
.addAtom(*_dependentDRInfoAtom
);
3136 if ( _hasSymbolTable
) {
3137 _symbolTableAtom
= new SymbolTableAtom
<x86
>(_options
, state
, *this);
3138 symbolTableSection
= state
.addAtom(*_symbolTableAtom
);
3140 if ( _hasExternalRelocations
) {
3141 _externalRelocsAtom
= new ExternalRelocationsAtom
<x86
>(_options
, state
, *this);
3142 externalRelocationsSection
= state
.addAtom(*_externalRelocsAtom
);
3144 if ( _hasSymbolTable
) {
3145 _indirectSymbolTableAtom
= new IndirectSymbolTableAtom
<x86
>(_options
, state
, *this);
3146 indirectSymbolTableSection
= state
.addAtom(*_indirectSymbolTableAtom
);
3147 _stringPoolAtom
= new StringPoolAtom(_options
, state
, *this, 4);
3148 stringPoolSection
= state
.addAtom(*_stringPoolAtom
);
3152 #if SUPPORT_ARCH_x86_64
3153 case CPU_TYPE_X86_64
:
3154 if ( _hasSectionRelocations
) {
3155 _sectionsRelocationsAtom
= new SectionRelocationsAtom
<x86_64
>(_options
, state
, *this);
3156 sectionRelocationsSection
= state
.addAtom(*_sectionsRelocationsAtom
);
3158 if ( _hasDyldInfo
) {
3159 _rebasingInfoAtom
= new RebaseInfoAtom
<x86_64
>(_options
, state
, *this);
3160 rebaseSection
= state
.addAtom(*_rebasingInfoAtom
);
3162 _bindingInfoAtom
= new BindingInfoAtom
<x86_64
>(_options
, state
, *this);
3163 bindingSection
= state
.addAtom(*_bindingInfoAtom
);
3165 _weakBindingInfoAtom
= new WeakBindingInfoAtom
<x86_64
>(_options
, state
, *this);
3166 weakBindingSection
= state
.addAtom(*_weakBindingInfoAtom
);
3168 _lazyBindingInfoAtom
= new LazyBindingInfoAtom
<x86_64
>(_options
, state
, *this);
3169 lazyBindingSection
= state
.addAtom(*_lazyBindingInfoAtom
);
3171 _exportInfoAtom
= new ExportInfoAtom
<x86_64
>(_options
, state
, *this);
3172 exportSection
= state
.addAtom(*_exportInfoAtom
);
3174 if ( _hasLocalRelocations
) {
3175 _localRelocsAtom
= new LocalRelocationsAtom
<x86_64
>(_options
, state
, *this);
3176 localRelocationsSection
= state
.addAtom(*_localRelocsAtom
);
3178 if ( _hasSplitSegInfo
) {
3179 _splitSegInfoAtom
= new SplitSegInfoAtom
<x86_64
>(_options
, state
, *this);
3180 splitSegInfoSection
= state
.addAtom(*_splitSegInfoAtom
);
3182 if ( _hasFunctionStartsInfo
) {
3183 _functionStartsAtom
= new FunctionStartsAtom
<x86_64
>(_options
, state
, *this);
3184 functionStartsSection
= state
.addAtom(*_functionStartsAtom
);
3186 if ( _hasDataInCodeInfo
) {
3187 _dataInCodeAtom
= new DataInCodeAtom
<x86_64
>(_options
, state
, *this);
3188 dataInCodeSection
= state
.addAtom(*_dataInCodeAtom
);
3190 if ( _hasOptimizationHints
) {
3191 _optimizationHintsAtom
= new OptimizationHintsAtom
<x86_64
>(_options
, state
, *this);
3192 optimizationHintsSection
= state
.addAtom(*_optimizationHintsAtom
);
3194 if ( _hasDependentDRInfo
) {
3195 _dependentDRInfoAtom
= new DependentDRAtom
<x86_64
>(_options
, state
, *this);
3196 dependentDRsSection
= state
.addAtom(*_dependentDRInfoAtom
);
3198 if ( _hasSymbolTable
) {
3199 _symbolTableAtom
= new SymbolTableAtom
<x86_64
>(_options
, state
, *this);
3200 symbolTableSection
= state
.addAtom(*_symbolTableAtom
);
3202 if ( _hasExternalRelocations
) {
3203 _externalRelocsAtom
= new ExternalRelocationsAtom
<x86_64
>(_options
, state
, *this);
3204 externalRelocationsSection
= state
.addAtom(*_externalRelocsAtom
);
3206 if ( _hasSymbolTable
) {
3207 _indirectSymbolTableAtom
= new IndirectSymbolTableAtom
<x86_64
>(_options
, state
, *this);
3208 indirectSymbolTableSection
= state
.addAtom(*_indirectSymbolTableAtom
);
3209 _stringPoolAtom
= new StringPoolAtom(_options
, state
, *this, 8);
3210 stringPoolSection
= state
.addAtom(*_stringPoolAtom
);
3214 #if SUPPORT_ARCH_arm_any
3216 if ( _hasSectionRelocations
) {
3217 _sectionsRelocationsAtom
= new SectionRelocationsAtom
<arm
>(_options
, state
, *this);
3218 sectionRelocationsSection
= state
.addAtom(*_sectionsRelocationsAtom
);
3220 if ( _hasDyldInfo
) {
3221 _rebasingInfoAtom
= new RebaseInfoAtom
<arm
>(_options
, state
, *this);
3222 rebaseSection
= state
.addAtom(*_rebasingInfoAtom
);
3224 _bindingInfoAtom
= new BindingInfoAtom
<arm
>(_options
, state
, *this);
3225 bindingSection
= state
.addAtom(*_bindingInfoAtom
);
3227 _weakBindingInfoAtom
= new WeakBindingInfoAtom
<arm
>(_options
, state
, *this);
3228 weakBindingSection
= state
.addAtom(*_weakBindingInfoAtom
);
3230 _lazyBindingInfoAtom
= new LazyBindingInfoAtom
<arm
>(_options
, state
, *this);
3231 lazyBindingSection
= state
.addAtom(*_lazyBindingInfoAtom
);
3233 _exportInfoAtom
= new ExportInfoAtom
<arm
>(_options
, state
, *this);
3234 exportSection
= state
.addAtom(*_exportInfoAtom
);
3236 if ( _hasLocalRelocations
) {
3237 _localRelocsAtom
= new LocalRelocationsAtom
<arm
>(_options
, state
, *this);
3238 localRelocationsSection
= state
.addAtom(*_localRelocsAtom
);
3240 if ( _hasSplitSegInfo
) {
3241 _splitSegInfoAtom
= new SplitSegInfoAtom
<arm
>(_options
, state
, *this);
3242 splitSegInfoSection
= state
.addAtom(*_splitSegInfoAtom
);
3244 if ( _hasFunctionStartsInfo
) {
3245 _functionStartsAtom
= new FunctionStartsAtom
<arm
>(_options
, state
, *this);
3246 functionStartsSection
= state
.addAtom(*_functionStartsAtom
);
3248 if ( _hasDataInCodeInfo
) {
3249 _dataInCodeAtom
= new DataInCodeAtom
<arm
>(_options
, state
, *this);
3250 dataInCodeSection
= state
.addAtom(*_dataInCodeAtom
);
3252 if ( _hasOptimizationHints
) {
3253 _optimizationHintsAtom
= new OptimizationHintsAtom
<arm
>(_options
, state
, *this);
3254 optimizationHintsSection
= state
.addAtom(*_optimizationHintsAtom
);
3256 if ( _hasDependentDRInfo
) {
3257 _dependentDRInfoAtom
= new DependentDRAtom
<arm
>(_options
, state
, *this);
3258 dependentDRsSection
= state
.addAtom(*_dependentDRInfoAtom
);
3260 if ( _hasSymbolTable
) {
3261 _symbolTableAtom
= new SymbolTableAtom
<arm
>(_options
, state
, *this);
3262 symbolTableSection
= state
.addAtom(*_symbolTableAtom
);
3264 if ( _hasExternalRelocations
) {
3265 _externalRelocsAtom
= new ExternalRelocationsAtom
<arm
>(_options
, state
, *this);
3266 externalRelocationsSection
= state
.addAtom(*_externalRelocsAtom
);
3268 if ( _hasSymbolTable
) {
3269 _indirectSymbolTableAtom
= new IndirectSymbolTableAtom
<arm
>(_options
, state
, *this);
3270 indirectSymbolTableSection
= state
.addAtom(*_indirectSymbolTableAtom
);
3271 _stringPoolAtom
= new StringPoolAtom(_options
, state
, *this, 4);
3272 stringPoolSection
= state
.addAtom(*_stringPoolAtom
);
3276 #if SUPPORT_ARCH_arm64
3277 case CPU_TYPE_ARM64
:
3278 if ( _hasSectionRelocations
) {
3279 _sectionsRelocationsAtom
= new SectionRelocationsAtom
<arm64
>(_options
, state
, *this);
3280 sectionRelocationsSection
= state
.addAtom(*_sectionsRelocationsAtom
);
3282 if ( _hasDyldInfo
) {
3283 _rebasingInfoAtom
= new RebaseInfoAtom
<arm64
>(_options
, state
, *this);
3284 rebaseSection
= state
.addAtom(*_rebasingInfoAtom
);
3286 _bindingInfoAtom
= new BindingInfoAtom
<arm64
>(_options
, state
, *this);
3287 bindingSection
= state
.addAtom(*_bindingInfoAtom
);
3289 _weakBindingInfoAtom
= new WeakBindingInfoAtom
<arm64
>(_options
, state
, *this);
3290 weakBindingSection
= state
.addAtom(*_weakBindingInfoAtom
);
3292 _lazyBindingInfoAtom
= new LazyBindingInfoAtom
<arm64
>(_options
, state
, *this);
3293 lazyBindingSection
= state
.addAtom(*_lazyBindingInfoAtom
);
3295 _exportInfoAtom
= new ExportInfoAtom
<arm64
>(_options
, state
, *this);
3296 exportSection
= state
.addAtom(*_exportInfoAtom
);
3298 if ( _hasLocalRelocations
) {
3299 _localRelocsAtom
= new LocalRelocationsAtom
<arm64
>(_options
, state
, *this);
3300 localRelocationsSection
= state
.addAtom(*_localRelocsAtom
);
3302 if ( _hasSplitSegInfo
) {
3303 _splitSegInfoAtom
= new SplitSegInfoAtom
<arm64
>(_options
, state
, *this);
3304 splitSegInfoSection
= state
.addAtom(*_splitSegInfoAtom
);
3306 if ( _hasFunctionStartsInfo
) {
3307 _functionStartsAtom
= new FunctionStartsAtom
<arm64
>(_options
, state
, *this);
3308 functionStartsSection
= state
.addAtom(*_functionStartsAtom
);
3310 if ( _hasDataInCodeInfo
) {
3311 _dataInCodeAtom
= new DataInCodeAtom
<arm64
>(_options
, state
, *this);
3312 dataInCodeSection
= state
.addAtom(*_dataInCodeAtom
);
3314 if ( _hasOptimizationHints
) {
3315 _optimizationHintsAtom
= new OptimizationHintsAtom
<arm64
>(_options
, state
, *this);
3316 optimizationHintsSection
= state
.addAtom(*_optimizationHintsAtom
);
3318 if ( _hasDependentDRInfo
) {
3319 _dependentDRInfoAtom
= new DependentDRAtom
<arm64
>(_options
, state
, *this);
3320 dependentDRsSection
= state
.addAtom(*_dependentDRInfoAtom
);
3322 if ( _hasSymbolTable
) {
3323 _symbolTableAtom
= new SymbolTableAtom
<arm64
>(_options
, state
, *this);
3324 symbolTableSection
= state
.addAtom(*_symbolTableAtom
);
3326 if ( _hasExternalRelocations
) {
3327 _externalRelocsAtom
= new ExternalRelocationsAtom
<arm64
>(_options
, state
, *this);
3328 externalRelocationsSection
= state
.addAtom(*_externalRelocsAtom
);
3330 if ( _hasSymbolTable
) {
3331 _indirectSymbolTableAtom
= new IndirectSymbolTableAtom
<arm64
>(_options
, state
, *this);
3332 indirectSymbolTableSection
= state
.addAtom(*_indirectSymbolTableAtom
);
3333 _stringPoolAtom
= new StringPoolAtom(_options
, state
, *this, 4);
3334 stringPoolSection
= state
.addAtom(*_stringPoolAtom
);
3339 throw "unknown architecture";
3343 void OutputFile::addLoadCommands(ld::Internal
& state
)
3345 switch ( _options
.architecture() ) {
3346 #if SUPPORT_ARCH_x86_64
3347 case CPU_TYPE_X86_64
:
3348 _headersAndLoadCommandAtom
= new HeaderAndLoadCommandsAtom
<x86_64
>(_options
, state
, *this);
3349 headerAndLoadCommandsSection
= state
.addAtom(*_headersAndLoadCommandAtom
);
3352 #if SUPPORT_ARCH_arm_any
3354 _headersAndLoadCommandAtom
= new HeaderAndLoadCommandsAtom
<arm
>(_options
, state
, *this);
3355 headerAndLoadCommandsSection
= state
.addAtom(*_headersAndLoadCommandAtom
);
3358 #if SUPPORT_ARCH_arm64
3359 case CPU_TYPE_ARM64
:
3360 _headersAndLoadCommandAtom
= new HeaderAndLoadCommandsAtom
<arm64
>(_options
, state
, *this);
3361 headerAndLoadCommandsSection
= state
.addAtom(*_headersAndLoadCommandAtom
);
3364 #if SUPPORT_ARCH_i386
3366 _headersAndLoadCommandAtom
= new HeaderAndLoadCommandsAtom
<x86
>(_options
, state
, *this);
3367 headerAndLoadCommandsSection
= state
.addAtom(*_headersAndLoadCommandAtom
);
3371 throw "unknown architecture";
3375 uint32_t OutputFile::dylibCount()
3377 return _dylibsToLoad
.size();
3380 const ld::dylib::File
* OutputFile::dylibByOrdinal(unsigned int ordinal
)
3382 assert( ordinal
> 0 );
3383 assert( ordinal
<= _dylibsToLoad
.size() );
3384 return _dylibsToLoad
[ordinal
-1];
3387 bool OutputFile::hasOrdinalForInstallPath(const char* path
, int* ordinal
)
3389 for (std::map
<const ld::dylib::File
*, int>::const_iterator it
= _dylibToOrdinal
.begin(); it
!= _dylibToOrdinal
.end(); ++it
) {
3390 const char* installPath
= it
->first
->installPath();
3391 if ( (installPath
!= NULL
) && (strcmp(path
, installPath
) == 0) ) {
3392 *ordinal
= it
->second
;
3399 uint32_t OutputFile::dylibToOrdinal(const ld::dylib::File
* dylib
)
3401 return _dylibToOrdinal
[dylib
];
3405 void OutputFile::buildDylibOrdinalMapping(ld::Internal
& state
)
3407 // count non-public re-exported dylibs
3408 unsigned int nonPublicReExportCount
= 0;
3409 for (std::vector
<ld::dylib::File
*>::iterator it
= state
.dylibs
.begin(); it
!= state
.dylibs
.end(); ++it
) {
3410 ld::dylib::File
* aDylib
= *it
;
3411 if ( aDylib
->willBeReExported() && ! aDylib
->hasPublicInstallName() )
3412 ++nonPublicReExportCount
;
3415 // look at each dylib supplied in state
3416 bool hasReExports
= false;
3417 bool haveLazyDylibs
= false;
3418 for (std::vector
<ld::dylib::File
*>::iterator it
= state
.dylibs
.begin(); it
!= state
.dylibs
.end(); ++it
) {
3419 ld::dylib::File
* aDylib
= *it
;
3421 if ( aDylib
== state
.bundleLoader
) {
3422 _dylibToOrdinal
[aDylib
] = BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE
;
3424 else if ( this->hasOrdinalForInstallPath(aDylib
->installPath(), &ordinal
) ) {
3425 // already have a dylib with that install path, map all uses to that ordinal
3426 _dylibToOrdinal
[aDylib
] = ordinal
;
3428 else if ( aDylib
->willBeLazyLoadedDylib() ) {
3429 // all lazy dylib need to be at end of ordinals
3430 haveLazyDylibs
= true;
3432 else if ( aDylib
->willBeReExported() && ! aDylib
->hasPublicInstallName() && (nonPublicReExportCount
>= 2) ) {
3433 _dylibsToLoad
.push_back(aDylib
);
3434 _dylibToOrdinal
[aDylib
] = BIND_SPECIAL_DYLIB_SELF
;
3437 // first time this install path seen, create new ordinal
3438 _dylibsToLoad
.push_back(aDylib
);
3439 _dylibToOrdinal
[aDylib
] = _dylibsToLoad
.size();
3441 if ( aDylib
->explicitlyLinked() && aDylib
->willBeReExported() )
3442 hasReExports
= true;
3444 if ( haveLazyDylibs
) {
3445 // second pass to determine ordinals for lazy loaded dylibs
3446 for (std::vector
<ld::dylib::File
*>::iterator it
= state
.dylibs
.begin(); it
!= state
.dylibs
.end(); ++it
) {
3447 ld::dylib::File
* aDylib
= *it
;
3448 if ( aDylib
->willBeLazyLoadedDylib() ) {
3450 if ( this->hasOrdinalForInstallPath(aDylib
->installPath(), &ordinal
) ) {
3451 // already have a dylib with that install path, map all uses to that ordinal
3452 _dylibToOrdinal
[aDylib
] = ordinal
;
3455 // first time this install path seen, create new ordinal
3456 _dylibsToLoad
.push_back(aDylib
);
3457 _dylibToOrdinal
[aDylib
] = _dylibsToLoad
.size();
3462 _noReExportedDylibs
= !hasReExports
;
3463 //fprintf(stderr, "dylibs:\n");
3464 //for (std::map<const ld::dylib::File*, int>::const_iterator it = _dylibToOrdinal.begin(); it != _dylibToOrdinal.end(); ++it) {
3465 // fprintf(stderr, " %p ord=%u, install_name=%s\n",it->first, it->second, it->first->installPath());
3469 uint32_t OutputFile::lazyBindingInfoOffsetForLazyPointerAddress(uint64_t lpAddress
)
3471 return _lazyPointerAddressToInfoOffset
[lpAddress
];
3474 void OutputFile::setLazyBindingInfoOffset(uint64_t lpAddress
, uint32_t lpInfoOffset
)
3476 _lazyPointerAddressToInfoOffset
[lpAddress
] = lpInfoOffset
;
3479 int OutputFile::compressedOrdinalForAtom(const ld::Atom
* target
)
3481 // flat namespace images use zero for all ordinals
3482 if ( _options
.nameSpace() != Options::kTwoLevelNameSpace
)
3483 return BIND_SPECIAL_DYLIB_FLAT_LOOKUP
;
3485 // handle -interposable
3486 if ( target
->definition() == ld::Atom::definitionRegular
)
3487 return BIND_SPECIAL_DYLIB_SELF
;
3490 const ld::dylib::File
* dylib
= dynamic_cast<const ld::dylib::File
*>(target
->file());
3491 if ( dylib
!= NULL
) {
3492 std::map
<const ld::dylib::File
*, int>::iterator pos
= _dylibToOrdinal
.find(dylib
);
3493 if ( pos
!= _dylibToOrdinal
.end() )
3495 assert(0 && "dylib not assigned ordinal");
3498 // handle undefined dynamic_lookup
3499 if ( _options
.undefinedTreatment() == Options::kUndefinedDynamicLookup
)
3500 return BIND_SPECIAL_DYLIB_FLAT_LOOKUP
;
3503 if ( _options
.allowedUndefined(target
->name()) )
3504 return BIND_SPECIAL_DYLIB_FLAT_LOOKUP
;
3506 throw "can't find ordinal for imported symbol";
3510 bool OutputFile::isPcRelStore(ld::Fixup::Kind kind
)
3513 case ld::Fixup::kindStoreX86BranchPCRel8
:
3514 case ld::Fixup::kindStoreX86BranchPCRel32
:
3515 case ld::Fixup::kindStoreX86PCRel8
:
3516 case ld::Fixup::kindStoreX86PCRel16
:
3517 case ld::Fixup::kindStoreX86PCRel32
:
3518 case ld::Fixup::kindStoreX86PCRel32_1
:
3519 case ld::Fixup::kindStoreX86PCRel32_2
:
3520 case ld::Fixup::kindStoreX86PCRel32_4
:
3521 case ld::Fixup::kindStoreX86PCRel32GOTLoad
:
3522 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA
:
3523 case ld::Fixup::kindStoreX86PCRel32GOT
:
3524 case ld::Fixup::kindStoreX86PCRel32TLVLoad
:
3525 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA
:
3526 case ld::Fixup::kindStoreARMBranch24
:
3527 case ld::Fixup::kindStoreThumbBranch22
:
3528 case ld::Fixup::kindStoreARMLoad12
:
3529 case ld::Fixup::kindStoreTargetAddressX86PCRel32
:
3530 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad
:
3531 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA
:
3532 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad
:
3533 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA
:
3534 case ld::Fixup::kindStoreTargetAddressARMBranch24
:
3535 case ld::Fixup::kindStoreTargetAddressThumbBranch22
:
3536 case ld::Fixup::kindStoreTargetAddressARMLoad12
:
3537 #if SUPPORT_ARCH_arm64
3538 case ld::Fixup::kindStoreARM64Page21
:
3539 case ld::Fixup::kindStoreARM64PageOff12
:
3540 case ld::Fixup::kindStoreARM64GOTLoadPage21
:
3541 case ld::Fixup::kindStoreARM64GOTLoadPageOff12
:
3542 case ld::Fixup::kindStoreARM64GOTLeaPage21
:
3543 case ld::Fixup::kindStoreARM64GOTLeaPageOff12
:
3544 case ld::Fixup::kindStoreARM64PCRelToGOT
:
3545 case ld::Fixup::kindStoreTargetAddressARM64Page21
:
3546 case ld::Fixup::kindStoreTargetAddressARM64PageOff12
:
3547 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21
:
3548 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12
:
3549 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21
:
3550 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12
:
3553 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32
:
3554 #if SUPPORT_ARCH_arm64
3555 case ld::Fixup::kindStoreTargetAddressARM64Branch26
:
3557 return (_options
.outputKind() != Options::kKextBundle
);
3564 bool OutputFile::isStore(ld::Fixup::Kind kind
)
3567 case ld::Fixup::kindNone
:
3568 case ld::Fixup::kindNoneFollowOn
:
3569 case ld::Fixup::kindNoneGroupSubordinate
:
3570 case ld::Fixup::kindNoneGroupSubordinateFDE
:
3571 case ld::Fixup::kindNoneGroupSubordinateLSDA
:
3572 case ld::Fixup::kindNoneGroupSubordinatePersonality
:
3573 case ld::Fixup::kindSetTargetAddress
:
3574 case ld::Fixup::kindSubtractTargetAddress
:
3575 case ld::Fixup::kindAddAddend
:
3576 case ld::Fixup::kindSubtractAddend
:
3577 case ld::Fixup::kindSetTargetImageOffset
:
3578 case ld::Fixup::kindSetTargetSectionOffset
:
3587 bool OutputFile::setsTarget(ld::Fixup::Kind kind
)
3590 case ld::Fixup::kindSetTargetAddress
:
3591 case ld::Fixup::kindLazyTarget
:
3592 case ld::Fixup::kindStoreTargetAddressLittleEndian32
:
3593 case ld::Fixup::kindStoreTargetAddressLittleEndian64
:
3594 case ld::Fixup::kindStoreTargetAddressBigEndian32
:
3595 case ld::Fixup::kindStoreTargetAddressBigEndian64
:
3596 case ld::Fixup::kindStoreTargetAddressX86PCRel32
:
3597 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32
:
3598 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad
:
3599 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA
:
3600 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad
:
3601 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA
:
3602 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad
:
3603 case ld::Fixup::kindStoreTargetAddressARMBranch24
:
3604 case ld::Fixup::kindStoreTargetAddressThumbBranch22
:
3605 case ld::Fixup::kindStoreTargetAddressARMLoad12
:
3606 #if SUPPORT_ARCH_arm64
3607 case ld::Fixup::kindStoreTargetAddressARM64Branch26
:
3608 case ld::Fixup::kindStoreTargetAddressARM64Page21
:
3609 case ld::Fixup::kindStoreTargetAddressARM64PageOff12
:
3610 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21
:
3611 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12
:
3612 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21
:
3613 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12
:
3616 case ld::Fixup::kindStoreX86DtraceCallSiteNop
:
3617 case ld::Fixup::kindStoreX86DtraceIsEnableSiteClear
:
3618 case ld::Fixup::kindStoreARMDtraceCallSiteNop
:
3619 case ld::Fixup::kindStoreARMDtraceIsEnableSiteClear
:
3620 case ld::Fixup::kindStoreARM64DtraceCallSiteNop
:
3621 case ld::Fixup::kindStoreARM64DtraceIsEnableSiteClear
:
3622 case ld::Fixup::kindStoreThumbDtraceCallSiteNop
:
3623 case ld::Fixup::kindStoreThumbDtraceIsEnableSiteClear
:
3624 return (_options
.outputKind() == Options::kObjectFile
);
3631 bool OutputFile::isPointerToTarget(ld::Fixup::Kind kind
)
3634 case ld::Fixup::kindSetTargetAddress
:
3635 case ld::Fixup::kindStoreTargetAddressLittleEndian32
:
3636 case ld::Fixup::kindStoreTargetAddressLittleEndian64
:
3637 case ld::Fixup::kindStoreTargetAddressBigEndian32
:
3638 case ld::Fixup::kindStoreTargetAddressBigEndian64
:
3639 case ld::Fixup::kindLazyTarget
:
3646 bool OutputFile::isPointerFromTarget(ld::Fixup::Kind kind
)
3649 case ld::Fixup::kindSubtractTargetAddress
:
3658 uint64_t OutputFile::lookBackAddend(ld::Fixup::iterator fit
)
3660 uint64_t addend
= 0;
3661 switch ( fit
->clusterSize
) {
3662 case ld::Fixup::k1of1
:
3663 case ld::Fixup::k1of2
:
3664 case ld::Fixup::k2of2
:
3666 case ld::Fixup::k2of3
:
3668 switch ( fit
->kind
) {
3669 case ld::Fixup::kindAddAddend
:
3670 addend
+= fit
->u
.addend
;
3672 case ld::Fixup::kindSubtractAddend
:
3673 addend
-= fit
->u
.addend
;
3676 throw "unexpected fixup kind for binding";
3679 case ld::Fixup::k1of3
:
3681 switch ( fit
->kind
) {
3682 case ld::Fixup::kindAddAddend
:
3683 addend
+= fit
->u
.addend
;
3685 case ld::Fixup::kindSubtractAddend
:
3686 addend
-= fit
->u
.addend
;
3689 throw "unexpected fixup kind for binding";
3693 throw "unexpected fixup cluster size for binding";
3699 void OutputFile::generateLinkEditInfo(ld::Internal
& state
)
3701 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
3702 ld::Internal::FinalSection
* sect
= *sit
;
3703 // record end of last __TEXT section encrypted iPhoneOS apps.
3704 if ( _options
.makeEncryptable() && (strcmp(sect
->segmentName(), "__TEXT") == 0) ) {
3705 _encryptedTEXTendOffset
= pageAlign(sect
->fileOffset
+ sect
->size
);
3707 bool objc1ClassRefSection
= ( (sect
->type() == ld::Section::typeCStringPointer
)
3708 && (strcmp(sect
->sectionName(), "__cls_refs") == 0)
3709 && (strcmp(sect
->segmentName(), "__OBJC") == 0) );
3710 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
3711 const ld::Atom
* atom
= *ait
;
3713 // Record regular atoms that override a dylib's weak definitions
3714 if ( (atom
->scope() == ld::Atom::scopeGlobal
) && atom
->overridesDylibsWeakDef() ) {
3715 if ( _options
.makeCompressedDyldInfo() ) {
3716 uint8_t wtype
= BIND_TYPE_OVERRIDE_OF_WEAKDEF_IN_DYLIB
;
3717 bool nonWeakDef
= (atom
->combine() == ld::Atom::combineNever
);
3718 _weakBindingInfo
.push_back(BindingInfo(wtype
, atom
->name(), nonWeakDef
, atom
->finalAddress(), 0));
3720 this->overridesWeakExternalSymbols
= true;
3721 if ( _options
.warnWeakExports() )
3722 warning("overrides weak external symbol: %s", atom
->name());
3725 ld::Fixup
* fixupWithTarget
= NULL
;
3726 ld::Fixup
* fixupWithMinusTarget
= NULL
;
3727 ld::Fixup
* fixupWithStore
= NULL
;
3728 ld::Fixup
* fixupWithAddend
= NULL
;
3729 const ld::Atom
* target
= NULL
;
3730 const ld::Atom
* minusTarget
= NULL
;
3731 uint64_t targetAddend
= 0;
3732 uint64_t minusTargetAddend
= 0;
3733 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(); fit
!= atom
->fixupsEnd(); ++fit
) {
3734 if ( fit
->firstInCluster() ) {
3735 fixupWithTarget
= NULL
;
3736 fixupWithMinusTarget
= NULL
;
3737 fixupWithStore
= NULL
;
3741 minusTargetAddend
= 0;
3743 if ( this->setsTarget(fit
->kind
) ) {
3744 switch ( fit
->binding
) {
3745 case ld::Fixup::bindingNone
:
3746 case ld::Fixup::bindingByNameUnbound
:
3748 case ld::Fixup::bindingByContentBound
:
3749 case ld::Fixup::bindingDirectlyBound
:
3750 fixupWithTarget
= fit
;
3751 target
= fit
->u
.target
;
3753 case ld::Fixup::bindingsIndirectlyBound
:
3754 fixupWithTarget
= fit
;
3755 target
= state
.indirectBindingTable
[fit
->u
.bindingIndex
];
3758 assert(target
!= NULL
);
3760 switch ( fit
->kind
) {
3761 case ld::Fixup::kindAddAddend
:
3762 targetAddend
= fit
->u
.addend
;
3763 fixupWithAddend
= fit
;
3765 case ld::Fixup::kindSubtractAddend
:
3766 minusTargetAddend
= fit
->u
.addend
;
3767 fixupWithAddend
= fit
;
3769 case ld::Fixup::kindSubtractTargetAddress
:
3770 switch ( fit
->binding
) {
3771 case ld::Fixup::bindingNone
:
3772 case ld::Fixup::bindingByNameUnbound
:
3774 case ld::Fixup::bindingByContentBound
:
3775 case ld::Fixup::bindingDirectlyBound
:
3776 fixupWithMinusTarget
= fit
;
3777 minusTarget
= fit
->u
.target
;
3779 case ld::Fixup::bindingsIndirectlyBound
:
3780 fixupWithMinusTarget
= fit
;
3781 minusTarget
= state
.indirectBindingTable
[fit
->u
.bindingIndex
];
3784 assert(minusTarget
!= NULL
);
3786 case ld::Fixup::kindDataInCodeStartData
:
3787 case ld::Fixup::kindDataInCodeStartJT8
:
3788 case ld::Fixup::kindDataInCodeStartJT16
:
3789 case ld::Fixup::kindDataInCodeStartJT32
:
3790 case ld::Fixup::kindDataInCodeStartJTA32
:
3791 case ld::Fixup::kindDataInCodeEnd
:
3792 hasDataInCode
= true;
3797 if ( this->isStore(fit
->kind
) ) {
3798 fixupWithStore
= fit
;
3800 if ( fit
->lastInCluster() ) {
3801 if ( (fixupWithStore
!= NULL
) && (target
!= NULL
) ) {
3802 if ( _options
.outputKind() == Options::kObjectFile
) {
3803 this->addSectionRelocs(state
, sect
, atom
, fixupWithTarget
, fixupWithMinusTarget
, fixupWithAddend
, fixupWithStore
,
3804 target
, minusTarget
, targetAddend
, minusTargetAddend
);
3807 if ( _options
.makeCompressedDyldInfo() ) {
3808 this->addDyldInfo(state
, sect
, atom
, fixupWithTarget
, fixupWithMinusTarget
, fixupWithStore
,
3809 target
, minusTarget
, targetAddend
, minusTargetAddend
);
3812 this->addClassicRelocs(state
, sect
, atom
, fixupWithTarget
, fixupWithMinusTarget
, fixupWithStore
,
3813 target
, minusTarget
, targetAddend
, minusTargetAddend
);
3817 else if ( objc1ClassRefSection
&& (target
!= NULL
) && (fixupWithStore
== NULL
) ) {
3818 // check for class refs to lazy loaded dylibs
3819 const ld::dylib::File
* dylib
= dynamic_cast<const ld::dylib::File
*>(target
->file());
3820 if ( (dylib
!= NULL
) && dylib
->willBeLazyLoadedDylib() )
3821 throwf("illegal class reference to %s in lazy loaded dylib %s", target
->name(), dylib
->path());
3830 void OutputFile::noteTextReloc(const ld::Atom
* atom
, const ld::Atom
* target
)
3832 if ( (atom
->contentType() == ld::Atom::typeStub
) || (atom
->contentType() == ld::Atom::typeStubHelper
) ) {
3833 // silently let stubs (synthesized by linker) use text relocs
3835 else if ( _options
.allowTextRelocs() ) {
3836 if ( _options
.warnAboutTextRelocs() )
3837 warning("text reloc in %s to %s", atom
->name(), target
->name());
3839 else if ( _options
.positionIndependentExecutable() && (_options
.outputKind() == Options::kDynamicExecutable
)
3840 && ((_options
.iOSVersionMin() >= ld::iOS_4_3
) || (_options
.macosxVersionMin() >= ld::mac10_7
)) ) {
3841 if ( ! this->pieDisabled
) {
3842 #if SUPPORT_ARCH_arm64
3843 if ( _options
.architecture() == CPU_TYPE_ARM64
) {
3844 const char* demangledName
= strdup(_options
.demangleSymbol(atom
->name()));
3845 throwf("Absolute addressing not allowed in arm64 code but used in '%s' referencing '%s'", demangledName
, _options
.demangleSymbol(target
->name()));
3850 warning("PIE disabled. Absolute addressing (perhaps -mdynamic-no-pic) not allowed in code signed PIE, "
3851 "but used in %s from %s. "
3852 "To fix this warning, don't compile with -mdynamic-no-pic or link with -Wl,-no_pie",
3853 atom
->name(), atom
->file()->path());
3856 this->pieDisabled
= true;
3858 else if ( (target
->scope() == ld::Atom::scopeGlobal
) && (target
->combine() == ld::Atom::combineByName
) ) {
3859 throwf("illegal text-relocoation (direct reference) to (global,weak) %s in %s from %s in %s", target
->name(), target
->file()->path(), atom
->name(), atom
->file()->path());
3862 if ( (target
->file() != NULL
) && (atom
->file() != NULL
) )
3863 throwf("illegal text-relocation to '%s' in %s from '%s' in %s", target
->name(), target
->file()->path(), atom
->name(), atom
->file()->path());
3865 throwf("illegal text reloc in '%s' to '%s'", atom
->name(), target
->name());
3869 void OutputFile::addDyldInfo(ld::Internal
& state
, ld::Internal::FinalSection
* sect
, const ld::Atom
* atom
,
3870 ld::Fixup
* fixupWithTarget
, ld::Fixup
* fixupWithMinusTarget
, ld::Fixup
* fixupWithStore
,
3871 const ld::Atom
* target
, const ld::Atom
* minusTarget
,
3872 uint64_t targetAddend
, uint64_t minusTargetAddend
)
3874 if ( sect
->isSectionHidden() )
3877 // no need to rebase or bind PCRel stores
3878 if ( this->isPcRelStore(fixupWithStore
->kind
) ) {
3879 // as long as target is in same linkage unit
3880 if ( (target
== NULL
) || (target
->definition() != ld::Atom::definitionProxy
) ) {
3881 // make sure target is not global and weak
3882 if ( (target
->scope() == ld::Atom::scopeGlobal
) && (target
->combine() == ld::Atom::combineByName
) && (target
->definition() == ld::Atom::definitionRegular
)) {
3883 if ( (atom
->section().type() == ld::Section::typeCFI
)
3884 || (atom
->section().type() == ld::Section::typeDtraceDOF
)
3885 || (atom
->section().type() == ld::Section::typeUnwindInfo
) ) {
3886 // ok for __eh_frame and __uwind_info to use pointer diffs to global weak symbols
3889 // <rdar://problem/13700961> spurious warning when weak function has reference to itself
3890 if ( fixupWithTarget
->binding
== ld::Fixup::bindingDirectlyBound
) {
3891 // ok to ignore pc-rel references within a weak function to itself
3894 // Have direct reference to weak-global. This should be an indrect reference
3895 const char* demangledName
= strdup(_options
.demangleSymbol(atom
->name()));
3896 warning("direct access in %s to global weak symbol %s means the weak symbol cannot be overridden at runtime. "
3897 "This was likely caused by different translation units being compiled with different visibility settings.",
3898 demangledName
, _options
.demangleSymbol(target
->name()));
3904 // no need to rebase or bind PIC internal pointer diff
3905 if ( minusTarget
!= NULL
) {
3906 // with pointer diffs, both need to be in same linkage unit
3907 assert(minusTarget
->definition() != ld::Atom::definitionProxy
);
3908 assert(target
!= NULL
);
3909 assert(target
->definition() != ld::Atom::definitionProxy
);
3910 if ( target
== minusTarget
) {
3911 // This is a compile time constant and could have been optimized away by compiler
3915 // check if target of pointer-diff is global and weak
3916 if ( (target
->scope() == ld::Atom::scopeGlobal
) && (target
->combine() == ld::Atom::combineByName
) && (target
->definition() == ld::Atom::definitionRegular
) ) {
3917 if ( (atom
->section().type() == ld::Section::typeCFI
)
3918 || (atom
->section().type() == ld::Section::typeDtraceDOF
)
3919 || (atom
->section().type() == ld::Section::typeUnwindInfo
) ) {
3920 // ok for __eh_frame and __uwind_info to use pointer diffs to global weak symbols
3923 // Have direct reference to weak-global. This should be an indrect reference
3924 const char* demangledName
= strdup(_options
.demangleSymbol(atom
->name()));
3925 warning("direct access in %s to global weak symbol %s means the weak symbol cannot be overridden at runtime. "
3926 "This was likely caused by different translation units being compiled with different visibility settings.",
3927 demangledName
, _options
.demangleSymbol(target
->name()));
3932 // no need to rebase or bind an atom's references to itself if the output is not slidable
3933 if ( (atom
== target
) && !_options
.outputSlidable() )
3936 // cluster has no target, so needs no rebasing or binding
3937 if ( target
== NULL
)
3940 bool inReadOnlySeg
= ((_options
.initialSegProtection(sect
->segmentName()) & VM_PROT_WRITE
) == 0);
3941 bool needsRebase
= false;
3942 bool needsBinding
= false;
3943 bool needsLazyBinding
= false;
3944 bool needsWeakBinding
= false;
3946 uint8_t rebaseType
= REBASE_TYPE_POINTER
;
3947 uint8_t type
= BIND_TYPE_POINTER
;
3948 const ld::dylib::File
* dylib
= dynamic_cast<const ld::dylib::File
*>(target
->file());
3949 bool weak_import
= (fixupWithTarget
->weakImport
|| ((dylib
!= NULL
) && dylib
->forcedWeakLinked()));
3950 uint64_t address
= atom
->finalAddress() + fixupWithTarget
->offsetInAtom
;
3951 uint64_t addend
= targetAddend
- minusTargetAddend
;
3953 // special case lazy pointers
3954 if ( fixupWithTarget
->kind
== ld::Fixup::kindLazyTarget
) {
3955 assert(fixupWithTarget
->u
.target
== target
);
3956 assert(addend
== 0);
3957 // lazy dylib lazy pointers do not have any dyld info
3958 if ( atom
->section().type() == ld::Section::typeLazyDylibPointer
)
3960 // lazy binding to weak definitions are done differently
3961 // they are directly bound to target, then have a weak bind in case of a collision
3962 if ( target
->combine() == ld::Atom::combineByName
) {
3963 if ( target
->definition() == ld::Atom::definitionProxy
) {
3964 // weak def exported from another dylib
3965 // must non-lazy bind to it plus have weak binding info in case of collision
3966 needsBinding
= true;
3967 needsWeakBinding
= true;
3970 // weak def in this linkage unit.
3971 // just rebase, plus have weak binding info in case of collision
3972 // this will be done by other cluster on lazy pointer atom
3975 else if ( target
->contentType() == ld::Atom::typeResolver
) {
3976 // <rdar://problem/8553647> Hidden resolver functions should not have lazy binding info
3977 // <rdar://problem/12629331> Resolver function run before initializers when overriding the dyld shared cache
3978 // The lazy pointers used by stubs used when non-lazy binding to a resolver are not normal lazy pointers
3979 // and should not be in lazy binding info.
3980 needsLazyBinding
= false;
3983 // normal case of a pointer to non-weak-def symbol, so can lazily bind
3984 needsLazyBinding
= true;
3988 // everything except lazy pointers
3989 switch ( target
->definition() ) {
3990 case ld::Atom::definitionProxy
:
3991 if ( (dylib
!= NULL
) && dylib
->willBeLazyLoadedDylib() )
3992 throwf("illegal data reference to %s in lazy loaded dylib %s", target
->name(), dylib
->path());
3993 if ( target
->contentType() == ld::Atom::typeTLV
) {
3994 if ( sect
->type() != ld::Section::typeTLVPointers
)
3995 throwf("illegal data reference in %s to thread local variable %s in dylib %s",
3996 atom
->name(), target
->name(), dylib
->path());
3998 if ( inReadOnlySeg
)
3999 type
= BIND_TYPE_TEXT_ABSOLUTE32
;
4000 needsBinding
= true;
4001 if ( target
->combine() == ld::Atom::combineByName
)
4002 needsWeakBinding
= true;
4004 case ld::Atom::definitionRegular
:
4005 case ld::Atom::definitionTentative
:
4006 // only slideable images need rebasing info
4007 if ( _options
.outputSlidable() ) {
4010 // references to internal symbol never need binding
4011 if ( target
->scope() != ld::Atom::scopeGlobal
)
4013 // reference to global weak def needs weak binding
4014 if ( (target
->combine() == ld::Atom::combineByName
) && (target
->definition() == ld::Atom::definitionRegular
) )
4015 needsWeakBinding
= true;
4016 else if ( _options
.outputKind() == Options::kDynamicExecutable
) {
4017 // in main executables, the only way regular symbols are indirected is if -interposable is used
4018 if ( _options
.interposable(target
->name()) ) {
4019 needsRebase
= false;
4020 needsBinding
= true;
4024 // for flat-namespace or interposable two-level-namespace
4025 // all references to exported symbols get indirected
4026 if ( (_options
.nameSpace() != Options::kTwoLevelNameSpace
) || _options
.interposable(target
->name()) ) {
4027 // <rdar://problem/5254468> no external relocs for flat objc classes
4028 if ( strncmp(target
->name(), ".objc_class_", 12) == 0 )
4030 // no rebase info for references to global symbols that will have binding info
4031 needsRebase
= false;
4032 needsBinding
= true;
4034 else if ( _options
.forceCoalesce(target
->name()) ) {
4035 needsWeakBinding
= true;
4039 case ld::Atom::definitionAbsolute
:
4044 // <rdar://problem/13828711> if target is an import alias, use base of alias
4045 if ( target
->isAlias() && (target
->definition() == ld::Atom::definitionProxy
) ) {
4046 for (ld::Fixup::iterator fit
= target
->fixupsBegin(), end
=target
->fixupsEnd(); fit
!= end
; ++fit
) {
4047 if ( fit
->firstInCluster() ) {
4048 if ( fit
->kind
== ld::Fixup::kindNoneFollowOn
) {
4049 if ( fit
->binding
== ld::Fixup::bindingDirectlyBound
) {
4050 //fprintf(stderr, "switching import of %s to import of %s\n", target->name(), fit->u.target->name());
4051 target
= fit
->u
.target
;
4058 // record dyld info for this cluster
4059 if ( needsRebase
) {
4060 if ( inReadOnlySeg
) {
4061 noteTextReloc(atom
, target
);
4062 sect
->hasLocalRelocs
= true; // so dyld knows to change permissions on __TEXT segment
4063 rebaseType
= REBASE_TYPE_TEXT_ABSOLUTE32
;
4065 if ( _options
.sharedRegionEligible() ) {
4066 // <rdar://problem/13287063> when range checking, ignore high byte of arm64 addends
4067 uint64_t checkAddend
= addend
;
4068 if ( _options
.architecture() == CPU_TYPE_ARM64
)
4069 checkAddend
&= 0x0FFFFFFFFFFFFFFFULL
;
4070 if ( checkAddend
!= 0 ) {
4071 // make sure the addend does not cause the pointer to point outside the target's segment
4072 // if it does, update_dyld_shared_cache will not be able to put this dylib into the shared cache
4073 uint64_t targetAddress
= target
->finalAddress();
4074 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
4075 ld::Internal::FinalSection
* sct
= *sit
;
4076 uint64_t sctEnd
= (sct
->address
+sct
->size
);
4077 if ( (sct
->address
<= targetAddress
) && (targetAddress
< sctEnd
) ) {
4078 if ( (targetAddress
+checkAddend
) > sctEnd
) {
4079 warning("data symbol %s from %s has pointer to %s + 0x%08llX. "
4080 "That large of an addend may disable %s from being put in the dyld shared cache.",
4081 atom
->name(), atom
->file()->path(), target
->name(), addend
, _options
.installPath() );
4087 _rebaseInfo
.push_back(RebaseInfo(rebaseType
, address
));
4089 if ( needsBinding
) {
4090 if ( inReadOnlySeg
) {
4091 noteTextReloc(atom
, target
);
4092 sect
->hasExternalRelocs
= true; // so dyld knows to change permissions on __TEXT segment
4094 _bindingInfo
.push_back(BindingInfo(type
, this->compressedOrdinalForAtom(target
), target
->name(), weak_import
, address
, addend
));
4096 if ( needsLazyBinding
) {
4097 if ( _options
.bindAtLoad() )
4098 _bindingInfo
.push_back(BindingInfo(type
, this->compressedOrdinalForAtom(target
), target
->name(), weak_import
, address
, addend
));
4100 _lazyBindingInfo
.push_back(BindingInfo(type
, this->compressedOrdinalForAtom(target
), target
->name(), weak_import
, address
, addend
));
4102 if ( needsWeakBinding
)
4103 _weakBindingInfo
.push_back(BindingInfo(type
, 0, target
->name(), false, address
, addend
));
4107 void OutputFile::addClassicRelocs(ld::Internal
& state
, ld::Internal::FinalSection
* sect
, const ld::Atom
* atom
,
4108 ld::Fixup
* fixupWithTarget
, ld::Fixup
* fixupWithMinusTarget
, ld::Fixup
* fixupWithStore
,
4109 const ld::Atom
* target
, const ld::Atom
* minusTarget
,
4110 uint64_t targetAddend
, uint64_t minusTargetAddend
)
4112 if ( sect
->isSectionHidden() )
4115 // non-lazy-pointer section is encoded in indirect symbol table - not using relocations
4116 if ( sect
->type() == ld::Section::typeNonLazyPointer
) {
4117 // except kexts and static pie which *do* use relocations
4118 switch (_options
.outputKind()) {
4119 case Options::kKextBundle
:
4121 case Options::kStaticExecutable
:
4122 if ( _options
.positionIndependentExecutable() )
4124 // else fall into default case
4126 assert(target
!= NULL
);
4127 assert(fixupWithTarget
!= NULL
);
4132 // no need to rebase or bind PCRel stores
4133 if ( this->isPcRelStore(fixupWithStore
->kind
) ) {
4134 // as long as target is in same linkage unit
4135 if ( (target
== NULL
) || (target
->definition() != ld::Atom::definitionProxy
) )
4139 // no need to rebase or bind PIC internal pointer diff
4140 if ( minusTarget
!= NULL
) {
4141 // with pointer diffs, both need to be in same linkage unit
4142 assert(minusTarget
->definition() != ld::Atom::definitionProxy
);
4143 assert(target
!= NULL
);
4144 assert(target
->definition() != ld::Atom::definitionProxy
);
4145 // make sure target is not global and weak
4146 if ( (target
->scope() == ld::Atom::scopeGlobal
) && (target
->combine() == ld::Atom::combineByName
)
4147 && (atom
->section().type() != ld::Section::typeCFI
)
4148 && (atom
->section().type() != ld::Section::typeDtraceDOF
)
4149 && (atom
->section().type() != ld::Section::typeUnwindInfo
)
4150 && (minusTarget
!= target
) ) {
4151 // ok for __eh_frame and __uwind_info to use pointer diffs to global weak symbols
4152 throwf("bad codegen, pointer diff in %s to global weak symbol %s", atom
->name(), target
->name());
4157 // cluster has no target, so needs no rebasing or binding
4158 if ( target
== NULL
)
4161 assert(_localRelocsAtom
!= NULL
);
4162 uint64_t relocAddress
= atom
->finalAddress() + fixupWithTarget
->offsetInAtom
- _localRelocsAtom
->relocBaseAddress(state
);
4164 bool inReadOnlySeg
= ( strcmp(sect
->segmentName(), "__TEXT") == 0 );
4165 bool needsLocalReloc
= false;
4166 bool needsExternReloc
= false;
4168 switch ( fixupWithStore
->kind
) {
4169 case ld::Fixup::kindLazyTarget
:
4170 // lazy pointers don't need relocs
4172 case ld::Fixup::kindStoreLittleEndian32
:
4173 case ld::Fixup::kindStoreLittleEndian64
:
4174 case ld::Fixup::kindStoreBigEndian32
:
4175 case ld::Fixup::kindStoreBigEndian64
:
4176 case ld::Fixup::kindStoreTargetAddressLittleEndian32
:
4177 case ld::Fixup::kindStoreTargetAddressLittleEndian64
:
4178 case ld::Fixup::kindStoreTargetAddressBigEndian32
:
4179 case ld::Fixup::kindStoreTargetAddressBigEndian64
:
4181 switch ( target
->definition() ) {
4182 case ld::Atom::definitionProxy
:
4183 needsExternReloc
= true;
4185 case ld::Atom::definitionRegular
:
4186 case ld::Atom::definitionTentative
:
4187 // only slideable images need local relocs
4188 if ( _options
.outputSlidable() )
4189 needsLocalReloc
= true;
4190 // references to internal symbol never need binding
4191 if ( target
->scope() != ld::Atom::scopeGlobal
)
4193 // reference to global weak def needs weak binding in dynamic images
4194 if ( (target
->combine() == ld::Atom::combineByName
)
4195 && (target
->definition() == ld::Atom::definitionRegular
)
4196 && (_options
.outputKind() != Options::kStaticExecutable
)
4197 && (_options
.outputKind() != Options::kPreload
)
4198 && (atom
!= target
) ) {
4199 needsExternReloc
= true;
4201 else if ( _options
.outputKind() == Options::kDynamicExecutable
) {
4202 // in main executables, the only way regular symbols are indirected is if -interposable is used
4203 if ( _options
.interposable(target
->name()) )
4204 needsExternReloc
= true;
4207 // for flat-namespace or interposable two-level-namespace
4208 // all references to exported symbols get indirected
4209 if ( (_options
.nameSpace() != Options::kTwoLevelNameSpace
) || _options
.interposable(target
->name()) ) {
4210 // <rdar://problem/5254468> no external relocs for flat objc classes
4211 if ( strncmp(target
->name(), ".objc_class_", 12) == 0 )
4213 // no rebase info for references to global symbols that will have binding info
4214 needsExternReloc
= true;
4217 if ( needsExternReloc
)
4218 needsLocalReloc
= false;
4220 case ld::Atom::definitionAbsolute
:
4223 if ( needsExternReloc
) {
4224 if ( inReadOnlySeg
)
4225 noteTextReloc(atom
, target
);
4226 const ld::dylib::File
* dylib
= dynamic_cast<const ld::dylib::File
*>(target
->file());
4227 if ( (dylib
!= NULL
) && dylib
->willBeLazyLoadedDylib() )
4228 throwf("illegal data reference to %s in lazy loaded dylib %s", target
->name(), dylib
->path());
4229 _externalRelocsAtom
->addExternalPointerReloc(relocAddress
, target
);
4230 sect
->hasExternalRelocs
= true;
4231 fixupWithTarget
->contentAddendOnly
= true;
4233 else if ( needsLocalReloc
) {
4234 assert(target
!= NULL
);
4235 if ( inReadOnlySeg
)
4236 noteTextReloc(atom
, target
);
4237 _localRelocsAtom
->addPointerReloc(relocAddress
, target
->machoSection());
4238 sect
->hasLocalRelocs
= true;
4241 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32
:
4242 #if SUPPORT_ARCH_arm64
4243 case ld::Fixup::kindStoreTargetAddressARM64Branch26
:
4245 if ( _options
.outputKind() == Options::kKextBundle
) {
4246 assert(target
!= NULL
);
4247 if ( target
->definition() == ld::Atom::definitionProxy
) {
4248 _externalRelocsAtom
->addExternalCallSiteReloc(relocAddress
, target
);
4249 fixupWithStore
->contentAddendOnly
= true;
4254 case ld::Fixup::kindStoreARMLow16
:
4255 case ld::Fixup::kindStoreThumbLow16
:
4256 // no way to encode rebasing of binding for these instructions
4257 if ( _options
.outputSlidable() || (target
->definition() == ld::Atom::definitionProxy
) )
4258 throwf("no supported runtime lo16 relocation in %s from %s to %s", atom
->name(), atom
->file()->path(), target
->name());
4261 case ld::Fixup::kindStoreARMHigh16
:
4262 case ld::Fixup::kindStoreThumbHigh16
:
4263 // no way to encode rebasing of binding for these instructions
4264 if ( _options
.outputSlidable() || (target
->definition() == ld::Atom::definitionProxy
) )
4265 throwf("no supported runtime hi16 relocation in %s from %s to %s", atom
->name(), atom
->file()->path(), target
->name());
4274 bool OutputFile::useExternalSectionReloc(const ld::Atom
* atom
, const ld::Atom
* target
, ld::Fixup
* fixupWithTarget
)
4276 if ( (_options
.architecture() == CPU_TYPE_X86_64
) || (_options
.architecture() == CPU_TYPE_ARM64
) ) {
4277 // x86_64 and ARM64 use external relocations for everthing that has a symbol
4278 return ( target
->symbolTableInclusion() != ld::Atom::symbolTableNotIn
);
4281 // <rdar://problem/9513487> support arm branch interworking in -r mode
4282 if ( (_options
.architecture() == CPU_TYPE_ARM
) && (_options
.outputKind() == Options::kObjectFile
) ) {
4283 if ( atom
->isThumb() != target
->isThumb() ) {
4284 switch ( fixupWithTarget
->kind
) {
4285 // have branch that switches mode, then might be 'b' not 'bl'
4286 // Force external relocation, since no way to do local reloc for 'b'
4287 case ld::Fixup::kindStoreTargetAddressThumbBranch22
:
4288 case ld::Fixup::kindStoreTargetAddressARMBranch24
:
4296 if ( (_options
.architecture() == CPU_TYPE_I386
) && (_options
.outputKind() == Options::kObjectFile
) ) {
4297 if ( target
->contentType() == ld::Atom::typeTLV
)
4301 // most architectures use external relocations only for references
4302 // to a symbol in another translation unit or for references to "weak symbols" or tentative definitions
4303 assert(target
!= NULL
);
4304 if ( target
->definition() == ld::Atom::definitionProxy
)
4306 if ( (target
->definition() == ld::Atom::definitionTentative
) && ! _options
.makeTentativeDefinitionsReal() )
4308 if ( target
->scope() != ld::Atom::scopeGlobal
)
4310 if ( (target
->combine() == ld::Atom::combineByName
) && (target
->definition() == ld::Atom::definitionRegular
) )
4315 bool OutputFile::useSectionRelocAddend(ld::Fixup
* fixupWithTarget
)
4317 #if SUPPORT_ARCH_arm64
4318 if ( _options
.architecture() == CPU_TYPE_ARM64
) {
4319 switch ( fixupWithTarget
->kind
) {
4320 case ld::Fixup::kindStoreARM64Branch26
:
4321 case ld::Fixup::kindStoreARM64Page21
:
4322 case ld::Fixup::kindStoreARM64PageOff12
:
4335 void OutputFile::addSectionRelocs(ld::Internal
& state
, ld::Internal::FinalSection
* sect
, const ld::Atom
* atom
,
4336 ld::Fixup
* fixupWithTarget
, ld::Fixup
* fixupWithMinusTarget
,
4337 ld::Fixup
* fixupWithAddend
, ld::Fixup
* fixupWithStore
,
4338 const ld::Atom
* target
, const ld::Atom
* minusTarget
,
4339 uint64_t targetAddend
, uint64_t minusTargetAddend
)
4341 if ( sect
->isSectionHidden() )
4344 // in -r mode where there will be no labels on __eh_frame section, there is no need for relocations
4345 if ( (sect
->type() == ld::Section::typeCFI
) && _options
.removeEHLabels() )
4348 // non-lazy-pointer section is encoded in indirect symbol table - not using relocations
4349 if ( sect
->type() == ld::Section::typeNonLazyPointer
)
4352 // tentative defs don't have any relocations
4353 if ( sect
->type() == ld::Section::typeTentativeDefs
)
4356 assert(target
!= NULL
);
4357 assert(fixupWithTarget
!= NULL
);
4358 bool targetUsesExternalReloc
= this->useExternalSectionReloc(atom
, target
, fixupWithTarget
);
4359 bool minusTargetUsesExternalReloc
= (minusTarget
!= NULL
) && this->useExternalSectionReloc(atom
, minusTarget
, fixupWithMinusTarget
);
4361 // in x86_64 and arm64 .o files an external reloc means the content contains just the addend
4362 if ( (_options
.architecture() == CPU_TYPE_X86_64
) ||(_options
.architecture() == CPU_TYPE_ARM64
) ) {
4363 if ( targetUsesExternalReloc
) {
4364 fixupWithTarget
->contentAddendOnly
= true;
4365 fixupWithStore
->contentAddendOnly
= true;
4366 if ( this->useSectionRelocAddend(fixupWithStore
) && (fixupWithAddend
!= NULL
) )
4367 fixupWithAddend
->contentIgnoresAddend
= true;
4369 if ( minusTargetUsesExternalReloc
)
4370 fixupWithMinusTarget
->contentAddendOnly
= true;
4373 // for other archs, content is addend only with (non pc-rel) pointers
4374 // pc-rel instructions are funny. If the target is _foo+8 and _foo is
4375 // external, then the pc-rel instruction *evalutates* to the address 8.
4376 if ( targetUsesExternalReloc
) {
4377 // TLV support for i386 acts like RIP relative addressing
4378 // The addend is the offset from the PICBase to the end of the instruction
4379 if ( (_options
.architecture() == CPU_TYPE_I386
)
4380 && (_options
.outputKind() == Options::kObjectFile
)
4381 && (fixupWithStore
->kind
== ld::Fixup::kindStoreX86PCRel32TLVLoad
) ) {
4382 fixupWithTarget
->contentAddendOnly
= true;
4383 fixupWithStore
->contentAddendOnly
= true;
4385 else if ( isPcRelStore(fixupWithStore
->kind
) ) {
4386 fixupWithTarget
->contentDetlaToAddendOnly
= true;
4387 fixupWithStore
->contentDetlaToAddendOnly
= true;
4389 else if ( minusTarget
== NULL
){
4390 fixupWithTarget
->contentAddendOnly
= true;
4391 fixupWithStore
->contentAddendOnly
= true;
4396 if ( fixupWithStore
!= NULL
) {
4397 _sectionsRelocationsAtom
->addSectionReloc(sect
, fixupWithStore
->kind
, atom
, fixupWithStore
->offsetInAtom
,
4398 targetUsesExternalReloc
, minusTargetUsesExternalReloc
,
4399 target
, targetAddend
, minusTarget
, minusTargetAddend
);
4405 void OutputFile::makeSplitSegInfo(ld::Internal
& state
)
4407 if ( !_options
.sharedRegionEligible() )
4410 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
4411 ld::Internal::FinalSection
* sect
= *sit
;
4412 if ( sect
->isSectionHidden() )
4414 if ( strcmp(sect
->segmentName(), "__TEXT") != 0 )
4416 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
4417 const ld::Atom
* atom
= *ait
;
4418 const ld::Atom
* target
= NULL
;
4419 const ld::Atom
* fromTarget
= NULL
;
4420 uint64_t accumulator
= 0;
4422 bool hadSubtract
= false;
4423 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
4424 if ( fit
->firstInCluster() )
4426 if ( this->setsTarget(fit
->kind
) ) {
4427 accumulator
= addressOf(state
, fit
, &target
);
4428 thumbTarget
= targetIsThumb(state
, fit
);
4432 switch ( fit
->kind
) {
4433 case ld::Fixup::kindSubtractTargetAddress
:
4434 accumulator
-= addressOf(state
, fit
, &fromTarget
);
4437 case ld::Fixup::kindAddAddend
:
4438 accumulator
+= fit
->u
.addend
;
4440 case ld::Fixup::kindSubtractAddend
:
4441 accumulator
-= fit
->u
.addend
;
4443 case ld::Fixup::kindStoreBigEndian32
:
4444 case ld::Fixup::kindStoreLittleEndian32
:
4445 case ld::Fixup::kindStoreLittleEndian64
:
4446 case ld::Fixup::kindStoreTargetAddressLittleEndian32
:
4447 case ld::Fixup::kindStoreTargetAddressLittleEndian64
:
4448 // if no subtract, then this is an absolute pointer which means
4449 // there is also a text reloc which update_dyld_shared_cache will use.
4450 if ( ! hadSubtract
)
4453 case ld::Fixup::kindStoreX86PCRel32
:
4454 case ld::Fixup::kindStoreX86PCRel32_1
:
4455 case ld::Fixup::kindStoreX86PCRel32_2
:
4456 case ld::Fixup::kindStoreX86PCRel32_4
:
4457 case ld::Fixup::kindStoreX86PCRel32GOTLoad
:
4458 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA
:
4459 case ld::Fixup::kindStoreX86PCRel32GOT
:
4460 case ld::Fixup::kindStoreX86PCRel32TLVLoad
:
4461 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA
:
4462 case ld::Fixup::kindStoreTargetAddressX86PCRel32
:
4463 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad
:
4464 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA
:
4465 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad
:
4466 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA
:
4467 case ld::Fixup::kindStoreARMLow16
:
4468 case ld::Fixup::kindStoreThumbLow16
:
4469 #if SUPPORT_ARCH_arm64
4470 case ld::Fixup::kindStoreARM64Page21
:
4471 case ld::Fixup::kindStoreARM64GOTLoadPage21
:
4472 case ld::Fixup::kindStoreARM64GOTLeaPage21
:
4473 case ld::Fixup::kindStoreARM64TLVPLoadPage21
:
4474 case ld::Fixup::kindStoreTargetAddressARM64Page21
:
4475 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21
:
4476 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21
:
4477 case ld::Fixup::kindStoreARM64PCRelToGOT
:
4479 assert(target
!= NULL
);
4480 if ( strcmp(sect
->segmentName(), target
->section().segmentName()) != 0 ) {
4481 _splitSegInfos
.push_back(SplitSegInfoEntry(atom
->finalAddress()+fit
->offsetInAtom
,fit
->kind
));
4484 case ld::Fixup::kindStoreARMHigh16
:
4485 case ld::Fixup::kindStoreThumbHigh16
:
4486 assert(target
!= NULL
);
4487 if ( strcmp(sect
->segmentName(), target
->section().segmentName()) != 0 ) {
4488 // hi16 needs to know upper 4-bits of low16 to compute carry
4489 uint32_t extra
= (accumulator
>> 12) & 0xF;
4490 _splitSegInfos
.push_back(SplitSegInfoEntry(atom
->finalAddress()+fit
->offsetInAtom
,fit
->kind
, extra
));
4493 case ld::Fixup::kindSetTargetImageOffset
:
4494 accumulator
= addressOf(state
, fit
, &target
);
4495 assert(target
!= NULL
);
4507 void OutputFile::writeMapFile(ld::Internal
& state
)
4509 if ( _options
.generatedMapPath() != NULL
) {
4510 FILE* mapFile
= fopen(_options
.generatedMapPath(), "w");
4511 if ( mapFile
!= NULL
) {
4512 // write output path
4513 fprintf(mapFile
, "# Path: %s\n", _options
.outputFilePath());
4514 // write output architecure
4515 fprintf(mapFile
, "# Arch: %s\n", _options
.architectureName());
4517 //if ( fUUIDAtom != NULL ) {
4518 // const uint8_t* uuid = fUUIDAtom->getUUID();
4519 // fprintf(mapFile, "# UUID: %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X \n",
4520 // uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5], uuid[6], uuid[7],
4521 // uuid[8], uuid[9], uuid[10], uuid[11], uuid[12], uuid[13], uuid[14], uuid[15]);
4523 // write table of object files
4524 std::map
<const ld::File
*, ld::File::Ordinal
> readerToOrdinal
;
4525 std::map
<ld::File::Ordinal
, const ld::File
*> ordinalToReader
;
4526 std::map
<const ld::File
*, uint32_t> readerToFileOrdinal
;
4527 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
4528 ld::Internal::FinalSection
* sect
= *sit
;
4529 if ( sect
->isSectionHidden() )
4531 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
4532 const ld::Atom
* atom
= *ait
;
4533 const ld::File
* reader
= atom
->file();
4534 if ( reader
== NULL
)
4536 ld::File::Ordinal readerOrdinal
= reader
->ordinal();
4537 std::map
<const ld::File
*, ld::File::Ordinal
>::iterator pos
= readerToOrdinal
.find(reader
);
4538 if ( pos
== readerToOrdinal
.end() ) {
4539 readerToOrdinal
[reader
] = readerOrdinal
;
4540 ordinalToReader
[readerOrdinal
] = reader
;
4544 fprintf(mapFile
, "# Object files:\n");
4545 fprintf(mapFile
, "[%3u] %s\n", 0, "linker synthesized");
4546 uint32_t fileIndex
= 1;
4547 for(std::map
<ld::File::Ordinal
, const ld::File
*>::iterator it
= ordinalToReader
.begin(); it
!= ordinalToReader
.end(); ++it
) {
4548 fprintf(mapFile
, "[%3u] %s\n", fileIndex
, it
->second
->path());
4549 readerToFileOrdinal
[it
->second
] = fileIndex
++;
4551 // write table of sections
4552 fprintf(mapFile
, "# Sections:\n");
4553 fprintf(mapFile
, "# Address\tSize \tSegment\tSection\n");
4554 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
4555 ld::Internal::FinalSection
* sect
= *sit
;
4556 if ( sect
->isSectionHidden() )
4558 fprintf(mapFile
, "0x%08llX\t0x%08llX\t%s\t%s\n", sect
->address
, sect
->size
,
4559 sect
->segmentName(), sect
->sectionName());
4561 // write table of symbols
4562 fprintf(mapFile
, "# Symbols:\n");
4563 fprintf(mapFile
, "# Address\tSize \tFile Name\n");
4564 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
4565 ld::Internal::FinalSection
* sect
= *sit
;
4566 if ( sect
->isSectionHidden() )
4568 //bool isCstring = (sect->type() == ld::Section::typeCString);
4569 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
4571 const ld::Atom
* atom
= *ait
;
4572 const char* name
= atom
->name();
4573 // don't add auto-stripped aliases to .map file
4574 if ( (atom
->size() == 0) && (atom
->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages
) )
4576 if ( atom
->contentType() == ld::Atom::typeCString
) {
4577 strcpy(buffer
, "literal string: ");
4578 strlcat(buffer
, (char*)atom
->rawContentPointer(), 4096);
4581 else if ( (atom
->contentType() == ld::Atom::typeCFI
) && (strcmp(name
, "FDE") == 0) ) {
4582 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(); fit
!= atom
->fixupsEnd(); ++fit
) {
4583 if ( (fit
->kind
== ld::Fixup::kindSetTargetAddress
) && (fit
->clusterSize
== ld::Fixup::k1of4
) ) {
4584 if ( (fit
->binding
== ld::Fixup::bindingDirectlyBound
)
4585 && (fit
->u
.target
->section().type() == ld::Section::typeCode
) ) {
4586 strcpy(buffer
, "FDE for: ");
4587 strlcat(buffer
, fit
->u
.target
->name(), 4096);
4593 else if ( atom
->contentType() == ld::Atom::typeNonLazyPointer
) {
4594 strcpy(buffer
, "non-lazy-pointer");
4595 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(); fit
!= atom
->fixupsEnd(); ++fit
) {
4596 if ( fit
->binding
== ld::Fixup::bindingsIndirectlyBound
) {
4597 strcpy(buffer
, "non-lazy-pointer-to: ");
4598 strlcat(buffer
, state
.indirectBindingTable
[fit
->u
.bindingIndex
]->name(), 4096);
4601 else if ( fit
->binding
== ld::Fixup::bindingDirectlyBound
) {
4602 strcpy(buffer
, "non-lazy-pointer-to-local: ");
4603 strlcat(buffer
, fit
->u
.target
->name(), 4096);
4609 fprintf(mapFile
, "0x%08llX\t0x%08llX\t[%3u] %s\n", atom
->finalAddress(), atom
->size(),
4610 readerToFileOrdinal
[atom
->file()], name
);
4616 warning("could not write map file: %s\n", _options
.generatedMapPath());
4622 // used to sort atoms with debug notes
4623 class DebugNoteSorter
4626 bool operator()(const ld::Atom
* left
, const ld::Atom
* right
) const
4628 // first sort by reader
4629 ld::File::Ordinal leftFileOrdinal
= left
->file()->ordinal();
4630 ld::File::Ordinal rightFileOrdinal
= right
->file()->ordinal();
4631 if ( leftFileOrdinal
!= rightFileOrdinal
)
4632 return (leftFileOrdinal
< rightFileOrdinal
);
4634 // then sort by atom objectAddress
4635 uint64_t leftAddr
= left
->finalAddress();
4636 uint64_t rightAddr
= right
->finalAddress();
4637 return leftAddr
< rightAddr
;
4642 const char* OutputFile::assureFullPath(const char* path
)
4644 if ( path
[0] == '/' )
4646 char cwdbuff
[MAXPATHLEN
];
4647 if ( getcwd(cwdbuff
, MAXPATHLEN
) != NULL
) {
4649 asprintf(&result
, "%s/%s", cwdbuff
, path
);
4650 if ( result
!= NULL
)
4656 static time_t fileModTime(const char* path
) {
4657 struct stat statBuffer
;
4658 if ( stat(path
, &statBuffer
) == 0 ) {
4659 return statBuffer
.st_mtime
;
4665 void OutputFile::synthesizeDebugNotes(ld::Internal
& state
)
4667 // -S means don't synthesize debug map
4668 if ( _options
.debugInfoStripping() == Options::kDebugInfoNone
)
4670 // make a vector of atoms that come from files compiled with dwarf debug info
4671 std::vector
<const ld::Atom
*> atomsNeedingDebugNotes
;
4672 std::set
<const ld::Atom
*> atomsWithStabs
;
4673 atomsNeedingDebugNotes
.reserve(1024);
4674 const ld::relocatable::File
* objFile
= NULL
;
4675 bool objFileHasDwarf
= false;
4676 bool objFileHasStabs
= false;
4677 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
4678 ld::Internal::FinalSection
* sect
= *sit
;
4679 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
4680 const ld::Atom
* atom
= *ait
;
4681 // no stabs for atoms that would not be in the symbol table
4682 if ( atom
->symbolTableInclusion() == ld::Atom::symbolTableNotIn
)
4684 if ( atom
->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages
)
4686 if ( atom
->symbolTableInclusion() == ld::Atom::symbolTableInWithRandomAutoStripLabel
)
4688 // no stabs for absolute symbols
4689 if ( atom
->definition() == ld::Atom::definitionAbsolute
)
4691 // no stabs for .eh atoms
4692 if ( atom
->contentType() == ld::Atom::typeCFI
)
4694 // no stabs for string literal atoms
4695 if ( atom
->contentType() == ld::Atom::typeCString
)
4697 // no stabs for kernel dtrace probes
4698 if ( (_options
.outputKind() == Options::kStaticExecutable
) && (strncmp(atom
->name(), "__dtrace_probe$", 15) == 0) )
4700 const ld::File
* file
= atom
->file();
4701 if ( file
!= NULL
) {
4702 if ( file
!= objFile
) {
4703 objFileHasDwarf
= false;
4704 objFileHasStabs
= false;
4705 objFile
= dynamic_cast<const ld::relocatable::File
*>(file
);
4706 if ( objFile
!= NULL
) {
4707 switch ( objFile
->debugInfo() ) {
4708 case ld::relocatable::File::kDebugInfoNone
:
4710 case ld::relocatable::File::kDebugInfoDwarf
:
4711 objFileHasDwarf
= true;
4713 case ld::relocatable::File::kDebugInfoStabs
:
4714 case ld::relocatable::File::kDebugInfoStabsUUID
:
4715 objFileHasStabs
= true;
4720 if ( objFileHasDwarf
)
4721 atomsNeedingDebugNotes
.push_back(atom
);
4722 if ( objFileHasStabs
)
4723 atomsWithStabs
.insert(atom
);
4728 // sort by file ordinal then atom ordinal
4729 std::sort(atomsNeedingDebugNotes
.begin(), atomsNeedingDebugNotes
.end(), DebugNoteSorter());
4731 // <rdar://problem/17689030> Add -add_ast_path option to linker which add N_AST stab entry to output
4732 const std::vector
<const char*>& astPaths
= _options
.astFilePaths();
4733 for (std::vector
<const char*>::const_iterator it
=astPaths
.begin(); it
!= astPaths
.end(); it
++) {
4734 const char* path
= *it
;
4736 ld::relocatable::File::Stab astStab
;
4737 astStab
.atom
= NULL
;
4738 astStab
.type
= N_AST
;
4741 astStab
.value
= fileModTime(path
);
4742 astStab
.string
= path
;
4743 state
.stabs
.push_back(astStab
);
4746 // synthesize "debug notes" and add them to master stabs vector
4747 const char* dirPath
= NULL
;
4748 const char* filename
= NULL
;
4749 bool wroteStartSO
= false;
4750 state
.stabs
.reserve(atomsNeedingDebugNotes
.size()*4);
4751 std::unordered_set
<const char*, CStringHash
, CStringEquals
> seenFiles
;
4752 for (std::vector
<const ld::Atom
*>::iterator it
=atomsNeedingDebugNotes
.begin(); it
!= atomsNeedingDebugNotes
.end(); it
++) {
4753 const ld::Atom
* atom
= *it
;
4754 const ld::File
* atomFile
= atom
->file();
4755 const ld::relocatable::File
* atomObjFile
= dynamic_cast<const ld::relocatable::File
*>(atomFile
);
4756 //fprintf(stderr, "debug note for %s\n", atom->name());
4757 const char* newPath
= atom
->translationUnitSource();
4758 if ( newPath
!= NULL
) {
4759 const char* newDirPath
;
4760 const char* newFilename
;
4761 const char* lastSlash
= strrchr(newPath
, '/');
4762 if ( lastSlash
== NULL
)
4764 newFilename
= lastSlash
+1;
4765 char* temp
= strdup(newPath
);
4767 // gdb like directory SO's to end in '/', but dwarf DW_AT_comp_dir usually does not have trailing '/'
4768 temp
[lastSlash
-newPath
+1] = '\0';
4769 // need SO's whenever the translation unit source file changes
4770 if ( (filename
== NULL
) || (strcmp(newFilename
,filename
) != 0) || (strcmp(newDirPath
,dirPath
) != 0)) {
4771 if ( filename
!= NULL
) {
4772 // translation unit change, emit ending SO
4773 ld::relocatable::File::Stab endFileStab
;
4774 endFileStab
.atom
= NULL
;
4775 endFileStab
.type
= N_SO
;
4776 endFileStab
.other
= 1;
4777 endFileStab
.desc
= 0;
4778 endFileStab
.value
= 0;
4779 endFileStab
.string
= "";
4780 state
.stabs
.push_back(endFileStab
);
4782 // new translation unit, emit start SO's
4783 ld::relocatable::File::Stab dirPathStab
;
4784 dirPathStab
.atom
= NULL
;
4785 dirPathStab
.type
= N_SO
;
4786 dirPathStab
.other
= 0;
4787 dirPathStab
.desc
= 0;
4788 dirPathStab
.value
= 0;
4789 dirPathStab
.string
= newDirPath
;
4790 state
.stabs
.push_back(dirPathStab
);
4791 ld::relocatable::File::Stab fileStab
;
4792 fileStab
.atom
= NULL
;
4793 fileStab
.type
= N_SO
;
4797 fileStab
.string
= newFilename
;
4798 state
.stabs
.push_back(fileStab
);
4799 // Synthesize OSO for start of file
4800 ld::relocatable::File::Stab objStab
;
4801 objStab
.atom
= NULL
;
4802 objStab
.type
= N_OSO
;
4803 // <rdar://problem/6337329> linker should put cpusubtype in n_sect field of nlist entry for N_OSO debug note entries
4804 objStab
.other
= atomFile
->cpuSubType();
4806 if ( atomObjFile
!= NULL
) {
4807 objStab
.string
= assureFullPath(atomObjFile
->debugInfoPath());
4808 objStab
.value
= atomObjFile
->debugInfoModificationTime();
4811 objStab
.string
= assureFullPath(atomFile
->path());
4812 objStab
.value
= atomFile
->modificationTime();
4814 state
.stabs
.push_back(objStab
);
4815 wroteStartSO
= true;
4816 // add the source file path to seenFiles so it does not show up in SOLs
4817 seenFiles
.insert(newFilename
);
4819 asprintf(&fullFilePath
, "%s%s", newDirPath
, newFilename
);
4820 // add both leaf path and full path
4821 seenFiles
.insert(fullFilePath
);
4823 filename
= newFilename
;
4824 dirPath
= newDirPath
;
4825 if ( atom
->section().type() == ld::Section::typeCode
) {
4826 // Synthesize BNSYM and start FUN stabs
4827 ld::relocatable::File::Stab beginSym
;
4828 beginSym
.atom
= atom
;
4829 beginSym
.type
= N_BNSYM
;
4833 beginSym
.string
= "";
4834 state
.stabs
.push_back(beginSym
);
4835 ld::relocatable::File::Stab startFun
;
4836 startFun
.atom
= atom
;
4837 startFun
.type
= N_FUN
;
4841 startFun
.string
= atom
->name();
4842 state
.stabs
.push_back(startFun
);
4843 // Synthesize any SOL stabs needed
4844 const char* curFile
= NULL
;
4845 for (ld::Atom::LineInfo::iterator lit
= atom
->beginLineInfo(); lit
!= atom
->endLineInfo(); ++lit
) {
4846 if ( lit
->fileName
!= curFile
) {
4847 if ( seenFiles
.count(lit
->fileName
) == 0 ) {
4848 seenFiles
.insert(lit
->fileName
);
4849 ld::relocatable::File::Stab sol
;
4855 sol
.string
= lit
->fileName
;
4856 state
.stabs
.push_back(sol
);
4858 curFile
= lit
->fileName
;
4861 // Synthesize end FUN and ENSYM stabs
4862 ld::relocatable::File::Stab endFun
;
4864 endFun
.type
= N_FUN
;
4869 state
.stabs
.push_back(endFun
);
4870 ld::relocatable::File::Stab endSym
;
4872 endSym
.type
= N_ENSYM
;
4877 state
.stabs
.push_back(endSym
);
4880 ld::relocatable::File::Stab globalsStab
;
4881 const char* name
= atom
->name();
4882 if ( atom
->scope() == ld::Atom::scopeTranslationUnit
) {
4883 // Synthesize STSYM stab for statics
4884 globalsStab
.atom
= atom
;
4885 globalsStab
.type
= N_STSYM
;
4886 globalsStab
.other
= 1;
4887 globalsStab
.desc
= 0;
4888 globalsStab
.value
= 0;
4889 globalsStab
.string
= name
;
4890 state
.stabs
.push_back(globalsStab
);
4893 // Synthesize GSYM stab for other globals
4894 globalsStab
.atom
= atom
;
4895 globalsStab
.type
= N_GSYM
;
4896 globalsStab
.other
= 1;
4897 globalsStab
.desc
= 0;
4898 globalsStab
.value
= 0;
4899 globalsStab
.string
= name
;
4900 state
.stabs
.push_back(globalsStab
);
4906 if ( wroteStartSO
) {
4908 ld::relocatable::File::Stab endFileStab
;
4909 endFileStab
.atom
= NULL
;
4910 endFileStab
.type
= N_SO
;
4911 endFileStab
.other
= 1;
4912 endFileStab
.desc
= 0;
4913 endFileStab
.value
= 0;
4914 endFileStab
.string
= "";
4915 state
.stabs
.push_back(endFileStab
);
4918 // copy any stabs from .o file
4919 std::set
<const ld::File
*> filesSeenWithStabs
;
4920 for (std::set
<const ld::Atom
*>::iterator it
=atomsWithStabs
.begin(); it
!= atomsWithStabs
.end(); it
++) {
4921 const ld::Atom
* atom
= *it
;
4922 objFile
= dynamic_cast<const ld::relocatable::File
*>(atom
->file());
4923 if ( objFile
!= NULL
) {
4924 if ( filesSeenWithStabs
.count(objFile
) == 0 ) {
4925 filesSeenWithStabs
.insert(objFile
);
4926 const std::vector
<ld::relocatable::File::Stab
>* stabs
= objFile
->stabs();
4927 if ( stabs
!= NULL
) {
4928 for(std::vector
<ld::relocatable::File::Stab
>::const_iterator sit
= stabs
->begin(); sit
!= stabs
->end(); ++sit
) {
4929 ld::relocatable::File::Stab stab
= *sit
;
4930 // ignore stabs associated with atoms that were dead stripped or coalesced away
4931 if ( (sit
->atom
!= NULL
) && (atomsWithStabs
.count(sit
->atom
) == 0) )
4933 // <rdar://problem/8284718> Value of N_SO stabs should be address of first atom from translation unit
4934 if ( (stab
.type
== N_SO
) && (stab
.string
!= NULL
) && (stab
.string
[0] != '\0') ) {
4937 state
.stabs
.push_back(stab
);