1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-*
3 * Copyright (c) 2009-2011 Apple Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
27 #include <sys/types.h>
30 #include <sys/sysctl.h>
31 #include <sys/param.h>
32 #include <sys/mount.h>
37 #include <mach/mach_time.h>
38 #include <mach/vm_statistics.h>
39 #include <mach/mach_init.h>
40 #include <mach/mach_host.h>
41 #include <uuid/uuid.h>
43 #include <mach-o/dyld.h>
44 #include <mach-o/fat.h>
53 #include <unordered_set>
58 #include <CommonCrypto/CommonDigest.h>
59 #include <AvailabilityMacros.h>
61 #include "MachOTrie.hpp"
65 #include "OutputFile.h"
66 #include "Architectures.hpp"
67 #include "HeaderAndLoadCommands.hpp"
68 #include "LinkEdit.hpp"
69 #include "LinkEditClassic.hpp"
75 uint32_t sAdrpNoped
= 0;
76 uint32_t sAdrpNotNoped
= 0;
79 OutputFile::OutputFile(const Options
& opts
)
81 usesWeakExternalSymbols(false), overridesWeakExternalSymbols(false),
82 _noReExportedDylibs(false), pieDisabled(false), hasDataInCode(false),
83 headerAndLoadCommandsSection(NULL
),
84 rebaseSection(NULL
), bindingSection(NULL
), weakBindingSection(NULL
),
85 lazyBindingSection(NULL
), exportSection(NULL
),
86 splitSegInfoSection(NULL
), functionStartsSection(NULL
),
87 dataInCodeSection(NULL
), optimizationHintsSection(NULL
),
88 symbolTableSection(NULL
), stringPoolSection(NULL
),
89 localRelocationsSection(NULL
), externalRelocationsSection(NULL
),
90 sectionRelocationsSection(NULL
),
91 indirectSymbolTableSection(NULL
),
93 _hasDyldInfo(opts
.makeCompressedDyldInfo()),
94 _hasSymbolTable(true),
95 _hasSectionRelocations(opts
.outputKind() == Options::kObjectFile
),
96 _hasSplitSegInfo(opts
.sharedRegionEligible()),
97 _hasFunctionStartsInfo(opts
.addFunctionStarts()),
98 _hasDataInCodeInfo(opts
.addDataInCodeInfo()),
99 _hasDynamicSymbolTable(true),
100 _hasLocalRelocations(!opts
.makeCompressedDyldInfo()),
101 _hasExternalRelocations(!opts
.makeCompressedDyldInfo()),
102 _hasOptimizationHints(opts
.outputKind() == Options::kObjectFile
),
103 _encryptedTEXTstartOffset(0),
104 _encryptedTEXTendOffset(0),
105 _localSymbolsStartIndex(0),
106 _localSymbolsCount(0),
107 _globalSymbolsStartIndex(0),
108 _globalSymbolsCount(0),
109 _importSymbolsStartIndex(0),
110 _importSymbolsCount(0),
111 _sectionsRelocationsAtom(NULL
),
112 _localRelocsAtom(NULL
),
113 _externalRelocsAtom(NULL
),
114 _symbolTableAtom(NULL
),
115 _indirectSymbolTableAtom(NULL
),
116 _rebasingInfoAtom(NULL
),
117 _bindingInfoAtom(NULL
),
118 _lazyBindingInfoAtom(NULL
),
119 _weakBindingInfoAtom(NULL
),
120 _exportInfoAtom(NULL
),
121 _splitSegInfoAtom(NULL
),
122 _functionStartsAtom(NULL
),
123 _dataInCodeAtom(NULL
),
124 _optimizationHintsAtom(NULL
)
128 void OutputFile::dumpAtomsBySection(ld::Internal
& state
, bool printAtoms
)
130 fprintf(stderr
, "SORTED:\n");
131 for (std::vector
<ld::Internal::FinalSection
*>::iterator it
= state
.sections
.begin(); it
!= state
.sections
.end(); ++it
) {
132 fprintf(stderr
, "final section %p %s/%s %s start addr=0x%08llX, size=0x%08llX, alignment=%02d, fileOffset=0x%08llX\n",
133 (*it
), (*it
)->segmentName(), (*it
)->sectionName(), (*it
)->isSectionHidden() ? "(hidden)" : "",
134 (*it
)->address
, (*it
)->size
, (*it
)->alignment
, (*it
)->fileOffset
);
136 std::vector
<const ld::Atom
*>& atoms
= (*it
)->atoms
;
137 for (std::vector
<const ld::Atom
*>::iterator ait
= atoms
.begin(); ait
!= atoms
.end(); ++ait
) {
138 fprintf(stderr
, " %p (0x%04llX) %s\n", *ait
, (*ait
)->size(), (*ait
)->name());
142 fprintf(stderr
, "DYLIBS:\n");
143 for (std::vector
<ld::dylib::File
*>::iterator it
=state
.dylibs
.begin(); it
!= state
.dylibs
.end(); ++it
)
144 fprintf(stderr
, " %s\n", (*it
)->installPath());
147 void OutputFile::write(ld::Internal
& state
)
149 this->buildDylibOrdinalMapping(state
);
150 this->addLoadCommands(state
);
151 this->addLinkEdit(state
);
152 state
.setSectionSizesAndAlignments();
153 this->setLoadCommandsPadding(state
);
154 _fileSize
= state
.assignFileOffsets();
155 this->assignAtomAddresses(state
);
156 this->synthesizeDebugNotes(state
);
157 this->buildSymbolTable(state
);
158 this->generateLinkEditInfo(state
);
159 if ( _options
.sharedRegionEncodingV2() )
160 this->makeSplitSegInfoV2(state
);
162 this->makeSplitSegInfo(state
);
163 this->updateLINKEDITAddresses(state
);
164 //this->dumpAtomsBySection(state, false);
165 this->writeOutputFile(state
);
166 this->writeMapFile(state
);
167 this->writeJSONEntry(state
);
170 bool OutputFile::findSegment(ld::Internal
& state
, uint64_t addr
, uint64_t* start
, uint64_t* end
, uint32_t* index
)
172 uint32_t segIndex
= 0;
173 ld::Internal::FinalSection
* segFirstSection
= NULL
;
174 ld::Internal::FinalSection
* lastSection
= NULL
;
175 for (std::vector
<ld::Internal::FinalSection
*>::iterator it
= state
.sections
.begin(); it
!= state
.sections
.end(); ++it
) {
176 ld::Internal::FinalSection
* sect
= *it
;
177 if ( (segFirstSection
== NULL
) || strcmp(segFirstSection
->segmentName(), sect
->segmentName()) != 0 ) {
178 if ( segFirstSection
!= NULL
) {
179 //fprintf(stderr, "findSegment(0x%llX) seg changed to %s\n", addr, sect->segmentName());
180 if ( (addr
>= segFirstSection
->address
) && (addr
< lastSection
->address
+lastSection
->size
) ) {
181 *start
= segFirstSection
->address
;
182 *end
= lastSection
->address
+lastSection
->size
;
188 segFirstSection
= sect
;
196 void OutputFile::assignAtomAddresses(ld::Internal
& state
)
198 const bool log
= false;
199 if ( log
) fprintf(stderr
, "assignAtomAddresses()\n");
200 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
201 ld::Internal::FinalSection
* sect
= *sit
;
202 if ( log
) fprintf(stderr
, " section=%s/%s\n", sect
->segmentName(), sect
->sectionName());
203 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
204 const ld::Atom
* atom
= *ait
;
205 switch ( sect
-> type() ) {
206 case ld::Section::typeImportProxies
:
207 // want finalAddress() of all proxy atoms to be zero
208 (const_cast<ld::Atom
*>(atom
))->setSectionStartAddress(0);
210 case ld::Section::typeAbsoluteSymbols
:
211 // want finalAddress() of all absolute atoms to be value of abs symbol
212 (const_cast<ld::Atom
*>(atom
))->setSectionStartAddress(0);
214 case ld::Section::typeLinkEdit
:
215 // linkedit layout is assigned later
218 (const_cast<ld::Atom
*>(atom
))->setSectionStartAddress(sect
->address
);
219 if ( log
) fprintf(stderr
, " atom=%p, addr=0x%08llX, name=%s\n", atom
, atom
->finalAddress(), atom
->name());
226 void OutputFile::updateLINKEDITAddresses(ld::Internal
& state
)
228 if ( _options
.makeCompressedDyldInfo() ) {
229 // build dylb rebasing info
230 assert(_rebasingInfoAtom
!= NULL
);
231 _rebasingInfoAtom
->encode();
233 // build dyld binding info
234 assert(_bindingInfoAtom
!= NULL
);
235 _bindingInfoAtom
->encode();
237 // build dyld lazy binding info
238 assert(_lazyBindingInfoAtom
!= NULL
);
239 _lazyBindingInfoAtom
->encode();
241 // build dyld weak binding info
242 assert(_weakBindingInfoAtom
!= NULL
);
243 _weakBindingInfoAtom
->encode();
245 // build dyld export info
246 assert(_exportInfoAtom
!= NULL
);
247 _exportInfoAtom
->encode();
250 if ( _options
.sharedRegionEligible() ) {
251 // build split seg info
252 assert(_splitSegInfoAtom
!= NULL
);
253 _splitSegInfoAtom
->encode();
256 if ( _options
.addFunctionStarts() ) {
257 // build function starts info
258 assert(_functionStartsAtom
!= NULL
);
259 _functionStartsAtom
->encode();
262 if ( _options
.addDataInCodeInfo() ) {
263 // build data-in-code info
264 assert(_dataInCodeAtom
!= NULL
);
265 _dataInCodeAtom
->encode();
268 if ( _hasOptimizationHints
) {
269 // build linker-optimization-hint info
270 assert(_optimizationHintsAtom
!= NULL
);
271 _optimizationHintsAtom
->encode();
274 // build classic symbol table
275 assert(_symbolTableAtom
!= NULL
);
276 _symbolTableAtom
->encode();
277 assert(_indirectSymbolTableAtom
!= NULL
);
278 _indirectSymbolTableAtom
->encode();
280 // add relocations to .o files
281 if ( _options
.outputKind() == Options::kObjectFile
) {
282 assert(_sectionsRelocationsAtom
!= NULL
);
283 _sectionsRelocationsAtom
->encode();
286 if ( ! _options
.makeCompressedDyldInfo() ) {
287 // build external relocations
288 assert(_externalRelocsAtom
!= NULL
);
289 _externalRelocsAtom
->encode();
290 // build local relocations
291 assert(_localRelocsAtom
!= NULL
);
292 _localRelocsAtom
->encode();
295 // update address and file offsets now that linkedit content has been generated
296 uint64_t curLinkEditAddress
= 0;
297 uint64_t curLinkEditfileOffset
= 0;
298 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
299 ld::Internal::FinalSection
* sect
= *sit
;
300 if ( sect
->type() != ld::Section::typeLinkEdit
)
302 if ( curLinkEditAddress
== 0 ) {
303 curLinkEditAddress
= sect
->address
;
304 curLinkEditfileOffset
= sect
->fileOffset
;
306 uint16_t maxAlignment
= 0;
308 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
309 const ld::Atom
* atom
= *ait
;
310 //fprintf(stderr, "setting linkedit atom offset for %s\n", atom->name());
311 if ( atom
->alignment().powerOf2
> maxAlignment
)
312 maxAlignment
= atom
->alignment().powerOf2
;
313 // calculate section offset for this atom
314 uint64_t alignment
= 1 << atom
->alignment().powerOf2
;
315 uint64_t currentModulus
= (offset
% alignment
);
316 uint64_t requiredModulus
= atom
->alignment().modulus
;
317 if ( currentModulus
!= requiredModulus
) {
318 if ( requiredModulus
> currentModulus
)
319 offset
+= requiredModulus
-currentModulus
;
321 offset
+= requiredModulus
+alignment
-currentModulus
;
323 (const_cast<ld::Atom
*>(atom
))->setSectionOffset(offset
);
324 (const_cast<ld::Atom
*>(atom
))->setSectionStartAddress(curLinkEditAddress
);
325 offset
+= atom
->size();
328 // section alignment is that of a contained atom with the greatest alignment
329 sect
->alignment
= maxAlignment
;
330 sect
->address
= curLinkEditAddress
;
331 sect
->fileOffset
= curLinkEditfileOffset
;
332 curLinkEditAddress
+= sect
->size
;
333 curLinkEditfileOffset
+= sect
->size
;
336 _fileSize
= state
.sections
.back()->fileOffset
+ state
.sections
.back()->size
;
340 void OutputFile::setLoadCommandsPadding(ld::Internal
& state
)
342 // In other sections, any extra space is put and end of segment.
343 // In __TEXT segment, any extra space is put after load commands to allow post-processing of load commands
344 // Do a reverse layout of __TEXT segment to determine padding size and adjust section size
345 uint64_t paddingSize
= 0;
346 switch ( _options
.outputKind() ) {
348 // dyld itself has special padding requirements. We want the beginning __text section to start at a stable address
349 assert(strcmp(state
.sections
[1]->sectionName(),"__text") == 0);
350 state
.sections
[1]->alignment
= 12; // page align __text
352 case Options::kObjectFile
:
353 // mach-o .o files need no padding between load commands and first section
354 // but leave enough room that the object file could be signed
357 case Options::kPreload
:
358 // mach-o MH_PRELOAD files need no padding between load commands and first section
360 case Options::kKextBundle
:
361 if ( _options
.useTextExecSegment() ) {
365 // else fall into default case
367 // work backwards from end of segment and lay out sections so that extra room goes to padding atom
369 uint64_t textSegPageSize
= _options
.segPageSize("__TEXT");
370 if ( _options
.sharedRegionEligible() && (_options
.iOSVersionMin() >= ld::iOS_8_0
) && (textSegPageSize
== 0x4000) )
371 textSegPageSize
= 0x1000;
372 for (std::vector
<ld::Internal::FinalSection
*>::reverse_iterator it
= state
.sections
.rbegin(); it
!= state
.sections
.rend(); ++it
) {
373 ld::Internal::FinalSection
* sect
= *it
;
374 if ( strcmp(sect
->segmentName(), "__TEXT") != 0 )
376 if ( sect
== headerAndLoadCommandsSection
) {
377 addr
-= headerAndLoadCommandsSection
->size
;
378 paddingSize
= addr
% textSegPageSize
;
382 addr
= addr
& (0 - (1 << sect
->alignment
));
385 // if command line requires more padding than this
386 uint32_t minPad
= _options
.minimumHeaderPad();
387 if ( _options
.maxMminimumHeaderPad() ) {
388 // -headerpad_max_install_names means there should be room for every path load command to grow to 1204 bytes
389 uint32_t altMin
= _dylibsToLoad
.size() * MAXPATHLEN
;
390 if ( _options
.outputKind() == Options::kDynamicLibrary
)
391 altMin
+= MAXPATHLEN
;
392 if ( altMin
> minPad
)
395 if ( paddingSize
< minPad
) {
396 int extraPages
= (minPad
- paddingSize
+ _options
.segmentAlignment() - 1)/_options
.segmentAlignment();
397 paddingSize
+= extraPages
* _options
.segmentAlignment();
400 if ( _options
.makeEncryptable() ) {
401 // load commands must be on a separate non-encrypted page
402 int loadCommandsPage
= (headerAndLoadCommandsSection
->size
+ minPad
)/_options
.segmentAlignment();
403 int textPage
= (headerAndLoadCommandsSection
->size
+ paddingSize
)/_options
.segmentAlignment();
404 if ( loadCommandsPage
== textPage
) {
405 paddingSize
+= _options
.segmentAlignment();
408 // remember start for later use by load command
409 _encryptedTEXTstartOffset
= textPage
*_options
.segmentAlignment();
413 // add padding to size of section
414 headerAndLoadCommandsSection
->size
+= paddingSize
;
418 uint64_t OutputFile::pageAlign(uint64_t addr
)
420 const uint64_t alignment
= _options
.segmentAlignment();
421 return ((addr
+alignment
-1) & (-alignment
));
424 uint64_t OutputFile::pageAlign(uint64_t addr
, uint64_t pageSize
)
426 return ((addr
+pageSize
-1) & (-pageSize
));
429 static const char* makeName(const ld::Atom
& atom
)
431 static char buffer
[4096];
432 switch ( atom
.symbolTableInclusion() ) {
433 case ld::Atom::symbolTableNotIn
:
434 case ld::Atom::symbolTableNotInFinalLinkedImages
:
435 sprintf(buffer
, "%s@0x%08llX", atom
.name(), atom
.objectAddress());
437 case ld::Atom::symbolTableIn
:
438 case ld::Atom::symbolTableInAndNeverStrip
:
439 case ld::Atom::symbolTableInAsAbsolute
:
440 case ld::Atom::symbolTableInWithRandomAutoStripLabel
:
441 strlcpy(buffer
, atom
.name(), 4096);
447 static const char* referenceTargetAtomName(ld::Internal
& state
, const ld::Fixup
* ref
)
449 switch ( ref
->binding
) {
450 case ld::Fixup::bindingNone
:
452 case ld::Fixup::bindingByNameUnbound
:
453 return (char*)(ref
->u
.target
);
454 case ld::Fixup::bindingByContentBound
:
455 case ld::Fixup::bindingDirectlyBound
:
456 return makeName(*((ld::Atom
*)(ref
->u
.target
)));
457 case ld::Fixup::bindingsIndirectlyBound
:
458 return makeName(*state
.indirectBindingTable
[ref
->u
.bindingIndex
]);
460 return "BAD BINDING";
463 bool OutputFile::targetIsThumb(ld::Internal
& state
, const ld::Fixup
* fixup
)
465 switch ( fixup
->binding
) {
466 case ld::Fixup::bindingByContentBound
:
467 case ld::Fixup::bindingDirectlyBound
:
468 return fixup
->u
.target
->isThumb();
469 case ld::Fixup::bindingsIndirectlyBound
:
470 return state
.indirectBindingTable
[fixup
->u
.bindingIndex
]->isThumb();
474 throw "unexpected binding";
477 uint64_t OutputFile::addressOf(const ld::Internal
& state
, const ld::Fixup
* fixup
, const ld::Atom
** target
)
479 if ( !_options
.makeCompressedDyldInfo() ) {
480 // For external relocations the classic mach-o format
481 // has addend only stored in the content. That means
482 // that the address of the target is not used.
483 if ( fixup
->contentAddendOnly
)
486 switch ( fixup
->binding
) {
487 case ld::Fixup::bindingNone
:
488 throw "unexpected bindingNone";
489 case ld::Fixup::bindingByNameUnbound
:
490 throw "unexpected bindingByNameUnbound";
491 case ld::Fixup::bindingByContentBound
:
492 case ld::Fixup::bindingDirectlyBound
:
493 *target
= fixup
->u
.target
;
494 return (*target
)->finalAddress();
495 case ld::Fixup::bindingsIndirectlyBound
:
496 *target
= state
.indirectBindingTable
[fixup
->u
.bindingIndex
];
498 if ( ! (*target
)->finalAddressMode() ) {
499 throwf("reference to symbol (which has not been assigned an address) %s", (*target
)->name());
502 return (*target
)->finalAddress();
504 throw "unexpected binding";
507 uint64_t OutputFile::addressAndTarget(const ld::Internal
& state
, const ld::Fixup
* fixup
, const ld::Atom
** target
)
509 switch ( fixup
->binding
) {
510 case ld::Fixup::bindingNone
:
511 throw "unexpected bindingNone";
512 case ld::Fixup::bindingByNameUnbound
:
513 throw "unexpected bindingByNameUnbound";
514 case ld::Fixup::bindingByContentBound
:
515 case ld::Fixup::bindingDirectlyBound
:
516 *target
= fixup
->u
.target
;
517 return (*target
)->finalAddress();
518 case ld::Fixup::bindingsIndirectlyBound
:
519 *target
= state
.indirectBindingTable
[fixup
->u
.bindingIndex
];
521 if ( ! (*target
)->finalAddressMode() ) {
522 throwf("reference to symbol (which has not been assigned an address) %s", (*target
)->name());
525 return (*target
)->finalAddress();
527 throw "unexpected binding";
531 uint64_t OutputFile::sectionOffsetOf(const ld::Internal
& state
, const ld::Fixup
* fixup
)
533 const ld::Atom
* target
= NULL
;
534 switch ( fixup
->binding
) {
535 case ld::Fixup::bindingNone
:
536 throw "unexpected bindingNone";
537 case ld::Fixup::bindingByNameUnbound
:
538 throw "unexpected bindingByNameUnbound";
539 case ld::Fixup::bindingByContentBound
:
540 case ld::Fixup::bindingDirectlyBound
:
541 target
= fixup
->u
.target
;
543 case ld::Fixup::bindingsIndirectlyBound
:
544 target
= state
.indirectBindingTable
[fixup
->u
.bindingIndex
];
547 assert(target
!= NULL
);
549 uint64_t targetAddress
= target
->finalAddress();
550 for (std::vector
<ld::Internal::FinalSection
*>::const_iterator it
= state
.sections
.begin(); it
!= state
.sections
.end(); ++it
) {
551 const ld::Internal::FinalSection
* sect
= *it
;
552 if ( (sect
->address
<= targetAddress
) && (targetAddress
< (sect
->address
+sect
->size
)) )
553 return targetAddress
- sect
->address
;
555 throw "section not found for section offset";
560 uint64_t OutputFile::tlvTemplateOffsetOf(const ld::Internal
& state
, const ld::Fixup
* fixup
)
562 const ld::Atom
* target
= NULL
;
563 switch ( fixup
->binding
) {
564 case ld::Fixup::bindingNone
:
565 throw "unexpected bindingNone";
566 case ld::Fixup::bindingByNameUnbound
:
567 throw "unexpected bindingByNameUnbound";
568 case ld::Fixup::bindingByContentBound
:
569 case ld::Fixup::bindingDirectlyBound
:
570 target
= fixup
->u
.target
;
572 case ld::Fixup::bindingsIndirectlyBound
:
573 target
= state
.indirectBindingTable
[fixup
->u
.bindingIndex
];
576 assert(target
!= NULL
);
578 for (std::vector
<ld::Internal::FinalSection
*>::const_iterator it
= state
.sections
.begin(); it
!= state
.sections
.end(); ++it
) {
579 const ld::Internal::FinalSection
* sect
= *it
;
580 switch ( sect
->type() ) {
581 case ld::Section::typeTLVInitialValues
:
582 case ld::Section::typeTLVZeroFill
:
583 return target
->finalAddress() - sect
->address
;
588 throw "section not found for tlvTemplateOffsetOf";
591 void OutputFile::printSectionLayout(ld::Internal
& state
)
593 // show layout of final image
594 fprintf(stderr
, "final section layout:\n");
595 for (std::vector
<ld::Internal::FinalSection
*>::iterator it
= state
.sections
.begin(); it
!= state
.sections
.end(); ++it
) {
596 if ( (*it
)->isSectionHidden() )
598 fprintf(stderr
, " %s/%s addr=0x%08llX, size=0x%08llX, fileOffset=0x%08llX, type=%d\n",
599 (*it
)->segmentName(), (*it
)->sectionName(),
600 (*it
)->address
, (*it
)->size
, (*it
)->fileOffset
, (*it
)->type());
605 void OutputFile::rangeCheck8(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
607 if ( (displacement
> 127) || (displacement
< -128) ) {
608 // show layout of final image
609 printSectionLayout(state
);
611 const ld::Atom
* target
;
612 throwf("8-bit reference out of range (%lld max is +/-127B): from %s (0x%08llX) to %s (0x%08llX)",
613 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
614 addressOf(state
, fixup
, &target
));
618 void OutputFile::rangeCheck16(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
620 const int64_t thirtyTwoKLimit
= 0x00007FFF;
621 if ( (displacement
> thirtyTwoKLimit
) || (displacement
< (-thirtyTwoKLimit
)) ) {
622 // show layout of final image
623 printSectionLayout(state
);
625 const ld::Atom
* target
;
626 throwf("16-bit reference out of range (%lld max is +/-32KB): from %s (0x%08llX) to %s (0x%08llX)",
627 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
628 addressOf(state
, fixup
, &target
));
632 void OutputFile::rangeCheckBranch32(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
634 const int64_t twoGigLimit
= 0x7FFFFFFF;
635 if ( (displacement
> twoGigLimit
) || (displacement
< (-twoGigLimit
)) ) {
636 // show layout of final image
637 printSectionLayout(state
);
639 const ld::Atom
* target
;
640 throwf("32-bit branch out of range (%lld max is +/-2GB): from %s (0x%08llX) to %s (0x%08llX)",
641 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
642 addressOf(state
, fixup
, &target
));
647 void OutputFile::rangeCheckAbsolute32(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
649 const int64_t fourGigLimit
= 0xFFFFFFFF;
650 if ( displacement
> fourGigLimit
) {
651 // <rdar://problem/9610466> cannot enforce 32-bit range checks on 32-bit archs because assembler loses sign information
652 // .long _foo - 0xC0000000
653 // is encoded in mach-o the same as:
654 // .long _foo + 0x40000000
655 // so if _foo lays out to 0xC0000100, the first is ok, but the second is not.
656 if ( (_options
.architecture() == CPU_TYPE_ARM
) || (_options
.architecture() == CPU_TYPE_I386
) ) {
657 // Unlikely userland code does funky stuff like this, so warn for them, but not warn for -preload or -static
658 if ( (_options
.outputKind() != Options::kPreload
) && (_options
.outputKind() != Options::kStaticExecutable
) ) {
659 warning("32-bit absolute address out of range (0x%08llX max is 4GB): from %s + 0x%08X (0x%08llX) to 0x%08llX",
660 displacement
, atom
->name(), fixup
->offsetInAtom
, atom
->finalAddress(), displacement
);
664 // show layout of final image
665 printSectionLayout(state
);
667 const ld::Atom
* target
;
668 if ( fixup
->binding
== ld::Fixup::bindingNone
)
669 throwf("32-bit absolute address out of range (0x%08llX max is 4GB): from %s + 0x%08X (0x%08llX) to 0x%08llX",
670 displacement
, atom
->name(), fixup
->offsetInAtom
, atom
->finalAddress(), displacement
);
672 throwf("32-bit absolute address out of range (0x%08llX max is 4GB): from %s + 0x%08X (0x%08llX) to %s (0x%08llX)",
673 displacement
, atom
->name(), fixup
->offsetInAtom
, atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
674 addressOf(state
, fixup
, &target
));
679 void OutputFile::rangeCheckRIP32(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
681 const int64_t twoGigLimit
= 0x7FFFFFFF;
682 if ( (displacement
> twoGigLimit
) || (displacement
< (-twoGigLimit
)) ) {
683 // show layout of final image
684 printSectionLayout(state
);
686 const ld::Atom
* target
;
687 throwf("32-bit RIP relative reference out of range (%lld max is +/-4GB): from %s (0x%08llX) to %s (0x%08llX)",
688 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
689 addressOf(state
, fixup
, &target
));
693 void OutputFile::rangeCheckARM12(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
695 if ( (displacement
> 4092LL) || (displacement
< (-4092LL)) ) {
696 // show layout of final image
697 printSectionLayout(state
);
699 const ld::Atom
* target
;
700 throwf("ARM ldr 12-bit displacement out of range (%lld max is +/-4096B): from %s (0x%08llX) to %s (0x%08llX)",
701 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
702 addressOf(state
, fixup
, &target
));
706 bool OutputFile::checkArmBranch24Displacement(int64_t displacement
)
708 return ( (displacement
< 33554428LL) && (displacement
> (-33554432LL)) );
711 void OutputFile::rangeCheckARMBranch24(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
713 if ( checkArmBranch24Displacement(displacement
) )
716 // show layout of final image
717 printSectionLayout(state
);
719 const ld::Atom
* target
;
720 throwf("b/bl/blx ARM branch out of range (%lld max is +/-32MB): from %s (0x%08llX) to %s (0x%08llX)",
721 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
722 addressOf(state
, fixup
, &target
));
725 bool OutputFile::checkThumbBranch22Displacement(int64_t displacement
)
727 // thumb2 supports +/- 16MB displacement
728 if ( _options
.preferSubArchitecture() && _options
.archSupportsThumb2() ) {
729 if ( (displacement
> 16777214LL) || (displacement
< (-16777216LL)) ) {
734 // thumb1 supports +/- 4MB displacement
735 if ( (displacement
> 4194302LL) || (displacement
< (-4194304LL)) ) {
742 void OutputFile::rangeCheckThumbBranch22(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
744 if ( checkThumbBranch22Displacement(displacement
) )
747 // show layout of final image
748 printSectionLayout(state
);
750 const ld::Atom
* target
;
751 if ( _options
.preferSubArchitecture() && _options
.archSupportsThumb2() ) {
752 throwf("b/bl/blx thumb2 branch out of range (%lld max is +/-16MB): from %s (0x%08llX) to %s (0x%08llX)",
753 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
754 addressOf(state
, fixup
, &target
));
757 throwf("b/bl/blx thumb1 branch out of range (%lld max is +/-4MB): from %s (0x%08llX) to %s (0x%08llX)",
758 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
759 addressOf(state
, fixup
, &target
));
764 void OutputFile::rangeCheckARM64Branch26(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
766 const int64_t bl_128MegLimit
= 0x07FFFFFF;
767 if ( (displacement
> bl_128MegLimit
) || (displacement
< (-bl_128MegLimit
)) ) {
768 // show layout of final image
769 printSectionLayout(state
);
771 const ld::Atom
* target
;
772 throwf("b(l) ARM64 branch out of range (%lld max is +/-128MB): from %s (0x%08llX) to %s (0x%08llX)",
773 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
774 addressOf(state
, fixup
, &target
));
778 void OutputFile::rangeCheckARM64Page21(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
780 const int64_t adrp_4GigLimit
= 0x100000000ULL
;
781 if ( (displacement
> adrp_4GigLimit
) || (displacement
< (-adrp_4GigLimit
)) ) {
782 // show layout of final image
783 printSectionLayout(state
);
785 const ld::Atom
* target
;
786 throwf("ARM64 ADRP out of range (%lld max is +/-4GB): from %s (0x%08llX) to %s (0x%08llX)",
787 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
788 addressOf(state
, fixup
, &target
));
793 uint16_t OutputFile::get16LE(uint8_t* loc
) { return LittleEndian::get16(*(uint16_t*)loc
); }
794 void OutputFile::set16LE(uint8_t* loc
, uint16_t value
) { LittleEndian::set16(*(uint16_t*)loc
, value
); }
796 uint32_t OutputFile::get32LE(uint8_t* loc
) { return LittleEndian::get32(*(uint32_t*)loc
); }
797 void OutputFile::set32LE(uint8_t* loc
, uint32_t value
) { LittleEndian::set32(*(uint32_t*)loc
, value
); }
799 uint64_t OutputFile::get64LE(uint8_t* loc
) { return LittleEndian::get64(*(uint64_t*)loc
); }
800 void OutputFile::set64LE(uint8_t* loc
, uint64_t value
) { LittleEndian::set64(*(uint64_t*)loc
, value
); }
802 uint16_t OutputFile::get16BE(uint8_t* loc
) { return BigEndian::get16(*(uint16_t*)loc
); }
803 void OutputFile::set16BE(uint8_t* loc
, uint16_t value
) { BigEndian::set16(*(uint16_t*)loc
, value
); }
805 uint32_t OutputFile::get32BE(uint8_t* loc
) { return BigEndian::get32(*(uint32_t*)loc
); }
806 void OutputFile::set32BE(uint8_t* loc
, uint32_t value
) { BigEndian::set32(*(uint32_t*)loc
, value
); }
808 uint64_t OutputFile::get64BE(uint8_t* loc
) { return BigEndian::get64(*(uint64_t*)loc
); }
809 void OutputFile::set64BE(uint8_t* loc
, uint64_t value
) { BigEndian::set64(*(uint64_t*)loc
, value
); }
811 #if SUPPORT_ARCH_arm64
813 static uint32_t makeNOP() {
817 enum SignExtension
{ signedNot
, signed32
, signed64
};
818 struct LoadStoreInfo
{
821 uint32_t offset
; // after scaling
822 uint32_t size
; // 1,2,4,8, or 16
824 bool isFloat
; // if destReg is FP/SIMD
825 SignExtension signEx
; // if load is sign extended
828 static uint32_t makeLDR_literal(const LoadStoreInfo
& info
, uint64_t targetAddress
, uint64_t instructionAddress
)
830 int64_t delta
= targetAddress
- instructionAddress
;
831 assert(delta
< 1024*1024);
832 assert(delta
> -1024*1024);
833 assert((info
.reg
& 0xFFFFFFE0) == 0);
834 assert((targetAddress
& 0x3) == 0);
835 assert((instructionAddress
& 0x3) == 0);
836 assert(!info
.isStore
);
837 uint32_t imm19
= (delta
<< 3) & 0x00FFFFE0;
838 uint32_t instruction
= 0;
839 switch ( info
.size
) {
841 if ( info
.isFloat
) {
842 assert(info
.signEx
== signedNot
);
843 instruction
= 0x1C000000;
846 if ( info
.signEx
== signed64
)
847 instruction
= 0x98000000;
849 instruction
= 0x18000000;
853 assert(info
.signEx
== signedNot
);
854 instruction
= info
.isFloat
? 0x5C000000 : 0x58000000;
857 assert(info
.signEx
== signedNot
);
858 instruction
= 0x9C000000;
861 assert(0 && "invalid load size for literal");
863 return (instruction
| imm19
| info
.reg
);
866 static uint32_t makeADR(uint32_t destReg
, uint64_t targetAddress
, uint64_t instructionAddress
)
868 assert((destReg
& 0xFFFFFFE0) == 0);
869 assert((instructionAddress
& 0x3) == 0);
870 uint32_t instruction
= 0x10000000;
871 int64_t delta
= targetAddress
- instructionAddress
;
872 assert(delta
< 1024*1024);
873 assert(delta
> -1024*1024);
874 uint32_t immhi
= (delta
& 0x001FFFFC) << 3;
875 uint32_t immlo
= (delta
& 0x00000003) << 29;
876 return (instruction
| immhi
| immlo
| destReg
);
879 static uint32_t makeLoadOrStore(const LoadStoreInfo
& info
)
881 uint32_t instruction
= 0x39000000;
883 instruction
|= 0x04000000;
884 instruction
|= info
.reg
;
885 instruction
|= (info
.baseReg
<< 5);
886 uint32_t sizeBits
= 0;
887 uint32_t opcBits
= 0;
888 uint32_t imm12Bits
= 0;
889 switch ( info
.size
) {
892 imm12Bits
= info
.offset
;
893 if ( info
.isStore
) {
897 switch ( info
.signEx
) {
912 assert((info
.offset
% 2) == 0);
913 imm12Bits
= info
.offset
/2;
914 if ( info
.isStore
) {
918 switch ( info
.signEx
) {
933 assert((info
.offset
% 4) == 0);
934 imm12Bits
= info
.offset
/4;
935 if ( info
.isStore
) {
939 switch ( info
.signEx
) {
944 assert(0 && "cannot use signed32 with 32-bit load/store");
954 assert((info
.offset
% 8) == 0);
955 imm12Bits
= info
.offset
/8;
956 if ( info
.isStore
) {
961 assert(info
.signEx
== signedNot
);
966 assert((info
.offset
% 16) == 0);
967 imm12Bits
= info
.offset
/16;
968 assert(info
.isFloat
);
969 if ( info
.isStore
) {
977 assert(0 && "bad load/store size");
980 assert(imm12Bits
< 4096);
981 return (instruction
| (sizeBits
<< 30) | (opcBits
<< 22) | (imm12Bits
<< 10));
984 static bool parseLoadOrStore(uint32_t instruction
, LoadStoreInfo
& info
)
986 if ( (instruction
& 0x3B000000) != 0x39000000 )
988 info
.isFloat
= ( (instruction
& 0x04000000) != 0 );
989 info
.reg
= (instruction
& 0x1F);
990 info
.baseReg
= ((instruction
>>5) & 0x1F);
991 switch (instruction
& 0xC0C00000) {
995 info
.signEx
= signedNot
;
999 info
.isStore
= false;
1000 info
.signEx
= signedNot
;
1003 if ( info
.isFloat
) {
1005 info
.isStore
= true;
1006 info
.signEx
= signedNot
;
1010 info
.isStore
= false;
1011 info
.signEx
= signed64
;
1015 if ( info
.isFloat
) {
1017 info
.isStore
= false;
1018 info
.signEx
= signedNot
;
1022 info
.isStore
= false;
1023 info
.signEx
= signed32
;
1028 info
.isStore
= true;
1029 info
.signEx
= signedNot
;
1033 info
.isStore
= false;
1034 info
.signEx
= signedNot
;
1038 info
.isStore
= false;
1039 info
.signEx
= signed64
;
1043 info
.isStore
= false;
1044 info
.signEx
= signed32
;
1048 info
.isStore
= true;
1049 info
.signEx
= signedNot
;
1053 info
.isStore
= false;
1054 info
.signEx
= signedNot
;
1058 info
.isStore
= false;
1059 info
.signEx
= signed64
;
1063 info
.isStore
= true;
1064 info
.signEx
= signedNot
;
1068 info
.isStore
= false;
1069 info
.signEx
= signedNot
;
1074 info
.offset
= ((instruction
>> 10) & 0x0FFF) * info
.size
;
1082 static bool parseADRP(uint32_t instruction
, AdrpInfo
& info
)
1084 if ( (instruction
& 0x9F000000) != 0x90000000 )
1086 info
.destReg
= (instruction
& 0x1F);
1096 static bool parseADD(uint32_t instruction
, AddInfo
& info
)
1098 if ( (instruction
& 0xFFC00000) != 0x91000000 )
1100 info
.destReg
= (instruction
& 0x1F);
1101 info
.srcReg
= ((instruction
>>5) & 0x1F);
1102 info
.addend
= ((instruction
>>10) & 0xFFF);
1109 static uint32_t makeLDR_scaledOffset(const LoadStoreInfo
& info
)
1111 assert((info
.reg
& 0xFFFFFFE0) == 0);
1112 assert((info
.baseReg
& 0xFFFFFFE0) == 0);
1113 assert(!info
.isFloat
|| (info
.signEx
!= signedNot
));
1114 uint32_t sizeBits
= 0;
1115 uint32_t opcBits
= 1;
1116 uint32_t vBit
= info
.isFloat
;
1117 switch ( info
.signEx
) {
1128 assert(0 && "bad SignExtension runtime value");
1130 switch ( info
.size
) {
1149 assert(0 && "invalid load size for literal");
1151 assert((info
.offset
% info
.size
) == 0);
1152 uint32_t scaledOffset
= info
.offset
/info
.size
;
1153 assert(scaledOffset
< 4096);
1154 return (0x39000000 | (sizeBits
<<30) | (vBit
<<26) | (opcBits
<<22) | (scaledOffset
<<10) | (info
.baseReg
<<5) | info
.reg
);
1157 static uint32_t makeLDR_literal(uint32_t destReg
, uint32_t loadSize
, bool isFloat
, uint64_t targetAddress
, uint64_t instructionAddress
)
1159 int64_t delta
= targetAddress
- instructionAddress
;
1160 assert(delta
< 1024*1024);
1161 assert(delta
> -1024*1024);
1162 assert((destReg
& 0xFFFFFFE0) == 0);
1163 assert((targetAddress
& 0x3) == 0);
1164 assert((instructionAddress
& 0x3) == 0);
1165 uint32_t imm19
= (delta
<< 3) & 0x00FFFFE0;
1166 uint32_t instruction
= 0;
1167 switch ( loadSize
) {
1169 instruction
= isFloat
? 0x1C000000 : 0x18000000;
1172 instruction
= isFloat
? 0x5C000000 : 0x58000000;
1175 instruction
= 0x9C000000;
1178 assert(0 && "invalid load size for literal");
1180 return (instruction
| imm19
| destReg
);
1184 static bool ldrInfo(uint32_t instruction
, uint8_t* size
, uint8_t* destReg
, bool* v
, uint32_t* scaledOffset
)
1186 *v
= ( (instruction
& 0x04000000) != 0 );
1187 *destReg
= (instruction
& 0x1F);
1188 uint32_t imm12
= ((instruction
>> 10) & 0x00000FFF);
1189 switch ( (instruction
& 0xC0000000) >> 30 ) {
1191 // vector and byte LDR have same "size" bits, need to check other bits to differenciate
1192 if ( (instruction
& 0x00800000) == 0 ) {
1194 *scaledOffset
= imm12
;
1198 *scaledOffset
= imm12
* 16;
1203 *scaledOffset
= imm12
* 2;
1207 *scaledOffset
= imm12
* 4;
1211 *scaledOffset
= imm12
* 8;
1214 return ((instruction
& 0x3B400000) == 0x39400000);
1218 static bool withinOneMeg(uint64_t addr1
, uint64_t addr2
) {
1219 int64_t delta
= (addr2
- addr1
);
1220 return ( (delta
< 1024*1024) && (delta
> -1024*1024) );
1222 #endif // SUPPORT_ARCH_arm64
1224 void OutputFile::setInfo(ld::Internal
& state
, const ld::Atom
* atom
, uint8_t* buffer
, const std::map
<uint32_t, const Fixup
*>& usedByHints
,
1225 uint32_t offsetInAtom
, uint32_t delta
, InstructionInfo
* info
)
1227 info
->offsetInAtom
= offsetInAtom
+ delta
;
1228 std::map
<uint32_t, const Fixup
*>::const_iterator pos
= usedByHints
.find(info
->offsetInAtom
);
1229 if ( (pos
!= usedByHints
.end()) && (pos
->second
!= NULL
) ) {
1230 info
->fixup
= pos
->second
;
1231 info
->targetAddress
= addressOf(state
, info
->fixup
, &info
->target
);
1232 if ( info
->fixup
->clusterSize
!= ld::Fixup::k1of1
) {
1233 assert(info
->fixup
->firstInCluster());
1234 const ld::Fixup
* nextFixup
= info
->fixup
+ 1;
1235 if ( nextFixup
->kind
== ld::Fixup::kindAddAddend
) {
1236 info
->targetAddress
+= nextFixup
->u
.addend
;
1239 assert(0 && "expected addend");
1245 info
->targetAddress
= 0;
1246 info
->target
= NULL
;
1248 info
->instructionContent
= &buffer
[info
->offsetInAtom
];
1249 info
->instructionAddress
= atom
->finalAddress() + info
->offsetInAtom
;
1250 info
->instruction
= get32LE(info
->instructionContent
);
1253 #if SUPPORT_ARCH_arm64
1254 static bool isPageKind(const ld::Fixup
* fixup
, bool mustBeGOT
=false)
1256 if ( fixup
== NULL
)
1259 switch ( fixup
->kind
) {
1260 case ld::Fixup::kindStoreTargetAddressARM64Page21
:
1262 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21
:
1263 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21
:
1264 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21
:
1265 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21
:
1267 case ld::Fixup::kindSetTargetAddress
:
1271 } while ( ! f
->lastInCluster() );
1273 case ld::Fixup::kindStoreARM64Page21
:
1275 case ld::Fixup::kindStoreARM64GOTLoadPage21
:
1276 case ld::Fixup::kindStoreARM64GOTLeaPage21
:
1277 case ld::Fixup::kindStoreARM64TLVPLoadPage21
:
1278 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21
:
1290 static bool isPageOffsetKind(const ld::Fixup
* fixup
, bool mustBeGOT
=false)
1292 if ( fixup
== NULL
)
1295 switch ( fixup
->kind
) {
1296 case ld::Fixup::kindStoreTargetAddressARM64PageOff12
:
1298 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12
:
1299 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12
:
1300 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12
:
1301 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12
:
1303 case ld::Fixup::kindSetTargetAddress
:
1307 } while ( ! f
->lastInCluster() );
1309 case ld::Fixup::kindStoreARM64PageOff12
:
1311 case ld::Fixup::kindStoreARM64GOTLoadPageOff12
:
1312 case ld::Fixup::kindStoreARM64GOTLeaPageOff12
:
1313 case ld::Fixup::kindStoreARM64TLVPLoadPageOff12
:
1314 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPageOff12
:
1325 #endif // SUPPORT_ARCH_arm64
1328 #define LOH_ASSERT(cond) \
1330 warning("ignoring linker optimization hint at %s+0x%X because " #cond, atom->name(), fit->offsetInAtom); \
1334 void OutputFile::applyFixUps(ld::Internal
& state
, uint64_t mhAddress
, const ld::Atom
* atom
, uint8_t* buffer
)
1336 //fprintf(stderr, "applyFixUps() on %s\n", atom->name());
1337 int64_t accumulator
= 0;
1338 const ld::Atom
* toTarget
= NULL
;
1339 const ld::Atom
* fromTarget
;
1341 uint32_t instruction
;
1342 uint32_t newInstruction
;
1346 bool thumbTarget
= false;
1347 std::map
<uint32_t, const Fixup
*> usedByHints
;
1348 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
1349 uint8_t* fixUpLocation
= &buffer
[fit
->offsetInAtom
];
1350 ld::Fixup::LOH_arm64 lohExtra
;
1351 switch ( (ld::Fixup::Kind
)(fit
->kind
) ) {
1352 case ld::Fixup::kindNone
:
1353 case ld::Fixup::kindNoneFollowOn
:
1354 case ld::Fixup::kindNoneGroupSubordinate
:
1355 case ld::Fixup::kindNoneGroupSubordinateFDE
:
1356 case ld::Fixup::kindNoneGroupSubordinateLSDA
:
1357 case ld::Fixup::kindNoneGroupSubordinatePersonality
:
1359 case ld::Fixup::kindSetTargetAddress
:
1360 accumulator
= addressOf(state
, fit
, &toTarget
);
1361 thumbTarget
= targetIsThumb(state
, fit
);
1364 if ( fit
->contentAddendOnly
|| fit
->contentDetlaToAddendOnly
)
1367 case ld::Fixup::kindSubtractTargetAddress
:
1368 delta
= addressOf(state
, fit
, &fromTarget
);
1369 if ( ! fit
->contentAddendOnly
)
1370 accumulator
-= delta
;
1372 case ld::Fixup::kindAddAddend
:
1373 if ( ! fit
->contentIgnoresAddend
) {
1374 // <rdar://problem/8342028> ARM main executables main contain .long constants pointing
1375 // into themselves such as jump tables. These .long should not have thumb bit set
1376 // even though the target is a thumb instruction. We can tell it is an interior pointer
1377 // because we are processing an addend.
1378 if ( thumbTarget
&& (toTarget
== atom
) && ((int32_t)fit
->u
.addend
> 0) ) {
1379 accumulator
&= (-2);
1380 //warning("removing thumb bit from intra-atom pointer in %s %s+0x%0X",
1381 // atom->section().sectionName(), atom->name(), fit->offsetInAtom);
1383 accumulator
+= fit
->u
.addend
;
1386 case ld::Fixup::kindSubtractAddend
:
1387 accumulator
-= fit
->u
.addend
;
1389 case ld::Fixup::kindSetTargetImageOffset
:
1390 accumulator
= addressOf(state
, fit
, &toTarget
) - mhAddress
;
1391 thumbTarget
= targetIsThumb(state
, fit
);
1395 case ld::Fixup::kindSetTargetSectionOffset
:
1396 accumulator
= sectionOffsetOf(state
, fit
);
1398 case ld::Fixup::kindSetTargetTLVTemplateOffset
:
1399 accumulator
= tlvTemplateOffsetOf(state
, fit
);
1401 case ld::Fixup::kindStore8
:
1402 *fixUpLocation
+= accumulator
;
1404 case ld::Fixup::kindStoreLittleEndian16
:
1405 set16LE(fixUpLocation
, accumulator
);
1407 case ld::Fixup::kindStoreLittleEndianLow24of32
:
1408 set32LE(fixUpLocation
, (get32LE(fixUpLocation
) & 0xFF000000) | (accumulator
& 0x00FFFFFF) );
1410 case ld::Fixup::kindStoreLittleEndian32
:
1411 rangeCheckAbsolute32(accumulator
, state
, atom
, fit
);
1412 set32LE(fixUpLocation
, accumulator
);
1414 case ld::Fixup::kindStoreLittleEndian64
:
1415 set64LE(fixUpLocation
, accumulator
);
1417 case ld::Fixup::kindStoreBigEndian16
:
1418 set16BE(fixUpLocation
, accumulator
);
1420 case ld::Fixup::kindStoreBigEndianLow24of32
:
1421 set32BE(fixUpLocation
, (get32BE(fixUpLocation
) & 0xFF000000) | (accumulator
& 0x00FFFFFF) );
1423 case ld::Fixup::kindStoreBigEndian32
:
1424 rangeCheckAbsolute32(accumulator
, state
, atom
, fit
);
1425 set32BE(fixUpLocation
, accumulator
);
1427 case ld::Fixup::kindStoreBigEndian64
:
1428 set64BE(fixUpLocation
, accumulator
);
1430 case ld::Fixup::kindStoreX86PCRel8
:
1431 case ld::Fixup::kindStoreX86BranchPCRel8
:
1432 if ( fit
->contentAddendOnly
)
1433 delta
= accumulator
;
1435 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 1);
1436 rangeCheck8(delta
, state
, atom
, fit
);
1437 *fixUpLocation
= delta
;
1439 case ld::Fixup::kindStoreX86PCRel16
:
1440 if ( fit
->contentAddendOnly
)
1441 delta
= accumulator
;
1443 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 2);
1444 rangeCheck16(delta
, state
, atom
, fit
);
1445 set16LE(fixUpLocation
, delta
);
1447 case ld::Fixup::kindStoreX86BranchPCRel32
:
1448 if ( fit
->contentAddendOnly
)
1449 delta
= accumulator
;
1451 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1452 rangeCheckBranch32(delta
, state
, atom
, fit
);
1453 set32LE(fixUpLocation
, delta
);
1455 case ld::Fixup::kindStoreX86PCRel32GOTLoad
:
1456 case ld::Fixup::kindStoreX86PCRel32GOT
:
1457 case ld::Fixup::kindStoreX86PCRel32
:
1458 case ld::Fixup::kindStoreX86PCRel32TLVLoad
:
1459 if ( fit
->contentAddendOnly
)
1460 delta
= accumulator
;
1462 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1463 rangeCheckRIP32(delta
, state
, atom
, fit
);
1464 set32LE(fixUpLocation
, delta
);
1466 case ld::Fixup::kindStoreX86PCRel32_1
:
1467 if ( fit
->contentAddendOnly
)
1468 delta
= accumulator
- 1;
1470 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 5);
1471 rangeCheckRIP32(delta
, state
, atom
, fit
);
1472 set32LE(fixUpLocation
, delta
);
1474 case ld::Fixup::kindStoreX86PCRel32_2
:
1475 if ( fit
->contentAddendOnly
)
1476 delta
= accumulator
- 2;
1478 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 6);
1479 rangeCheckRIP32(delta
, state
, atom
, fit
);
1480 set32LE(fixUpLocation
, delta
);
1482 case ld::Fixup::kindStoreX86PCRel32_4
:
1483 if ( fit
->contentAddendOnly
)
1484 delta
= accumulator
- 4;
1486 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 8);
1487 rangeCheckRIP32(delta
, state
, atom
, fit
);
1488 set32LE(fixUpLocation
, delta
);
1490 case ld::Fixup::kindStoreX86Abs32TLVLoad
:
1491 set32LE(fixUpLocation
, accumulator
);
1493 case ld::Fixup::kindStoreX86Abs32TLVLoadNowLEA
:
1494 assert(_options
.outputKind() != Options::kObjectFile
);
1495 // TLV entry was optimized away, change movl instruction to a leal
1496 if ( fixUpLocation
[-1] != 0xA1 )
1497 throw "TLV load reloc does not point to a movl instruction";
1498 fixUpLocation
[-1] = 0xB8;
1499 set32LE(fixUpLocation
, accumulator
);
1501 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA
:
1502 assert(_options
.outputKind() != Options::kObjectFile
);
1503 // GOT entry was optimized away, change movq instruction to a leaq
1504 if ( fixUpLocation
[-2] != 0x8B )
1505 throw "GOT load reloc does not point to a movq instruction";
1506 fixUpLocation
[-2] = 0x8D;
1507 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1508 rangeCheckRIP32(delta
, state
, atom
, fit
);
1509 set32LE(fixUpLocation
, delta
);
1511 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA
:
1512 assert(_options
.outputKind() != Options::kObjectFile
);
1513 // TLV entry was optimized away, change movq instruction to a leaq
1514 if ( fixUpLocation
[-2] != 0x8B )
1515 throw "TLV load reloc does not point to a movq instruction";
1516 fixUpLocation
[-2] = 0x8D;
1517 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1518 rangeCheckRIP32(delta
, state
, atom
, fit
);
1519 set32LE(fixUpLocation
, delta
);
1521 case ld::Fixup::kindStoreTargetAddressARMLoad12
:
1522 accumulator
= addressOf(state
, fit
, &toTarget
);
1523 // fall into kindStoreARMLoad12 case
1524 case ld::Fixup::kindStoreARMLoad12
:
1525 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 8);
1526 rangeCheckARM12(delta
, state
, atom
, fit
);
1527 instruction
= get32LE(fixUpLocation
);
1529 newInstruction
= instruction
& 0xFFFFF000;
1530 newInstruction
|= ((uint32_t)delta
& 0xFFF);
1533 newInstruction
= instruction
& 0xFF7FF000;
1534 newInstruction
|= ((uint32_t)(-delta
) & 0xFFF);
1536 set32LE(fixUpLocation
, newInstruction
);
1538 case ld::Fixup::kindDtraceExtra
:
1540 case ld::Fixup::kindStoreX86DtraceCallSiteNop
:
1541 if ( _options
.outputKind() != Options::kObjectFile
) {
1542 // change call site to a NOP
1543 fixUpLocation
[-1] = 0x90; // 1-byte nop
1544 fixUpLocation
[0] = 0x0F; // 4-byte nop
1545 fixUpLocation
[1] = 0x1F;
1546 fixUpLocation
[2] = 0x40;
1547 fixUpLocation
[3] = 0x00;
1550 case ld::Fixup::kindStoreX86DtraceIsEnableSiteClear
:
1551 if ( _options
.outputKind() != Options::kObjectFile
) {
1552 // change call site to a clear eax
1553 fixUpLocation
[-1] = 0x33; // xorl eax,eax
1554 fixUpLocation
[0] = 0xC0;
1555 fixUpLocation
[1] = 0x90; // 1-byte nop
1556 fixUpLocation
[2] = 0x90; // 1-byte nop
1557 fixUpLocation
[3] = 0x90; // 1-byte nop
1560 case ld::Fixup::kindStoreARMDtraceCallSiteNop
:
1561 if ( _options
.outputKind() != Options::kObjectFile
) {
1562 // change call site to a NOP
1563 set32LE(fixUpLocation
, 0xE1A00000);
1566 case ld::Fixup::kindStoreARMDtraceIsEnableSiteClear
:
1567 if ( _options
.outputKind() != Options::kObjectFile
) {
1568 // change call site to 'eor r0, r0, r0'
1569 set32LE(fixUpLocation
, 0xE0200000);
1572 case ld::Fixup::kindStoreThumbDtraceCallSiteNop
:
1573 if ( _options
.outputKind() != Options::kObjectFile
) {
1574 // change 32-bit blx call site to two thumb NOPs
1575 set32LE(fixUpLocation
, 0x46C046C0);
1578 case ld::Fixup::kindStoreThumbDtraceIsEnableSiteClear
:
1579 if ( _options
.outputKind() != Options::kObjectFile
) {
1580 // change 32-bit blx call site to 'nop', 'eor r0, r0'
1581 set32LE(fixUpLocation
, 0x46C04040);
1584 case ld::Fixup::kindStoreARM64DtraceCallSiteNop
:
1585 if ( _options
.outputKind() != Options::kObjectFile
) {
1586 // change call site to a NOP
1587 set32LE(fixUpLocation
, 0xD503201F);
1590 case ld::Fixup::kindStoreARM64DtraceIsEnableSiteClear
:
1591 if ( _options
.outputKind() != Options::kObjectFile
) {
1592 // change call site to 'MOVZ X0,0'
1593 set32LE(fixUpLocation
, 0xD2800000);
1596 case ld::Fixup::kindLazyTarget
:
1597 case ld::Fixup::kindIslandTarget
:
1599 case ld::Fixup::kindSetLazyOffset
:
1600 assert(fit
->binding
== ld::Fixup::bindingDirectlyBound
);
1601 accumulator
= this->lazyBindingInfoOffsetForLazyPointerAddress(fit
->u
.target
->finalAddress());
1603 case ld::Fixup::kindDataInCodeStartData
:
1604 case ld::Fixup::kindDataInCodeStartJT8
:
1605 case ld::Fixup::kindDataInCodeStartJT16
:
1606 case ld::Fixup::kindDataInCodeStartJT32
:
1607 case ld::Fixup::kindDataInCodeStartJTA32
:
1608 case ld::Fixup::kindDataInCodeEnd
:
1610 case ld::Fixup::kindLinkerOptimizationHint
:
1611 // expand table of address/offsets used by hints
1612 lohExtra
.addend
= fit
->u
.addend
;
1613 usedByHints
[fit
->offsetInAtom
+ (lohExtra
.info
.delta1
<< 2)] = NULL
;
1614 if ( lohExtra
.info
.count
> 0 )
1615 usedByHints
[fit
->offsetInAtom
+ (lohExtra
.info
.delta2
<< 2)] = NULL
;
1616 if ( lohExtra
.info
.count
> 1 )
1617 usedByHints
[fit
->offsetInAtom
+ (lohExtra
.info
.delta3
<< 2)] = NULL
;
1618 if ( lohExtra
.info
.count
> 2 )
1619 usedByHints
[fit
->offsetInAtom
+ (lohExtra
.info
.delta4
<< 2)] = NULL
;
1621 case ld::Fixup::kindStoreTargetAddressLittleEndian32
:
1622 accumulator
= addressOf(state
, fit
, &toTarget
);
1623 thumbTarget
= targetIsThumb(state
, fit
);
1626 if ( fit
->contentAddendOnly
)
1628 rangeCheckAbsolute32(accumulator
, state
, atom
, fit
);
1629 set32LE(fixUpLocation
, accumulator
);
1631 case ld::Fixup::kindStoreTargetAddressLittleEndian64
:
1632 accumulator
= addressOf(state
, fit
, &toTarget
);
1633 if ( fit
->contentAddendOnly
)
1635 set64LE(fixUpLocation
, accumulator
);
1637 case ld::Fixup::kindStoreTargetAddressBigEndian32
:
1638 accumulator
= addressOf(state
, fit
, &toTarget
);
1639 if ( fit
->contentAddendOnly
)
1641 set32BE(fixUpLocation
, accumulator
);
1643 case ld::Fixup::kindStoreTargetAddressBigEndian64
:
1644 accumulator
= addressOf(state
, fit
, &toTarget
);
1645 if ( fit
->contentAddendOnly
)
1647 set64BE(fixUpLocation
, accumulator
);
1649 case ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian32
:
1650 accumulator
= tlvTemplateOffsetOf(state
, fit
);
1651 set32LE(fixUpLocation
, accumulator
);
1653 case ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian64
:
1654 accumulator
= tlvTemplateOffsetOf(state
, fit
);
1655 set64LE(fixUpLocation
, accumulator
);
1657 case ld::Fixup::kindStoreTargetAddressX86PCRel32
:
1658 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32
:
1659 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad
:
1660 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad
:
1661 accumulator
= addressOf(state
, fit
, &toTarget
);
1662 if ( fit
->contentDetlaToAddendOnly
)
1664 if ( fit
->contentAddendOnly
)
1667 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1668 rangeCheckRIP32(delta
, state
, atom
, fit
);
1669 set32LE(fixUpLocation
, delta
);
1671 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad
:
1672 set32LE(fixUpLocation
, accumulator
);
1674 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoadNowLEA
:
1675 // TLV entry was optimized away, change movl instruction to a leal
1676 if ( fixUpLocation
[-1] != 0xA1 )
1677 throw "TLV load reloc does not point to a movl <abs-address>,<reg> instruction";
1678 fixUpLocation
[-1] = 0xB8;
1679 accumulator
= addressOf(state
, fit
, &toTarget
);
1680 set32LE(fixUpLocation
, accumulator
);
1682 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA
:
1683 // GOT entry was optimized away, change movq instruction to a leaq
1684 if ( fixUpLocation
[-2] != 0x8B )
1685 throw "GOT load reloc does not point to a movq instruction";
1686 fixUpLocation
[-2] = 0x8D;
1687 accumulator
= addressOf(state
, fit
, &toTarget
);
1688 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1689 rangeCheckRIP32(delta
, state
, atom
, fit
);
1690 set32LE(fixUpLocation
, delta
);
1692 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA
:
1693 // TLV entry was optimized away, change movq instruction to a leaq
1694 if ( fixUpLocation
[-2] != 0x8B )
1695 throw "TLV load reloc does not point to a movq instruction";
1696 fixUpLocation
[-2] = 0x8D;
1697 accumulator
= addressOf(state
, fit
, &toTarget
);
1698 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1699 rangeCheckRIP32(delta
, state
, atom
, fit
);
1700 set32LE(fixUpLocation
, delta
);
1702 case ld::Fixup::kindStoreTargetAddressARMBranch24
:
1703 accumulator
= addressOf(state
, fit
, &toTarget
);
1704 thumbTarget
= targetIsThumb(state
, fit
);
1705 if ( toTarget
->contentType() == ld::Atom::typeBranchIsland
) {
1706 // Branching to island. If ultimate target is in range, branch there directly.
1707 for (ld::Fixup::iterator islandfit
= toTarget
->fixupsBegin(), end
=toTarget
->fixupsEnd(); islandfit
!= end
; ++islandfit
) {
1708 if ( islandfit
->kind
== ld::Fixup::kindIslandTarget
) {
1709 const ld::Atom
* islandTarget
= NULL
;
1710 uint64_t islandTargetAddress
= addressOf(state
, islandfit
, &islandTarget
);
1711 delta
= islandTargetAddress
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1712 if ( checkArmBranch24Displacement(delta
) ) {
1713 toTarget
= islandTarget
;
1714 accumulator
= islandTargetAddress
;
1715 thumbTarget
= targetIsThumb(state
, islandfit
);
1723 if ( fit
->contentDetlaToAddendOnly
)
1725 // fall into kindStoreARMBranch24 case
1726 case ld::Fixup::kindStoreARMBranch24
:
1727 // The pc added will be +8 from the pc
1728 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 8);
1729 rangeCheckARMBranch24(delta
, state
, atom
, fit
);
1730 instruction
= get32LE(fixUpLocation
);
1731 // Make sure we are calling arm with bl, thumb with blx
1732 is_bl
= ((instruction
& 0xFF000000) == 0xEB000000);
1733 is_blx
= ((instruction
& 0xFE000000) == 0xFA000000);
1734 is_b
= !is_blx
&& ((instruction
& 0x0F000000) == 0x0A000000);
1735 if ( (is_bl
| is_blx
) && thumbTarget
) {
1736 uint32_t opcode
= 0xFA000000; // force to be blx
1737 uint32_t disp
= (uint32_t)(delta
>> 2) & 0x00FFFFFF;
1738 uint32_t h_bit
= (uint32_t)(delta
<< 23) & 0x01000000;
1739 newInstruction
= opcode
| h_bit
| disp
;
1741 else if ( (is_bl
| is_blx
) && !thumbTarget
) {
1742 uint32_t opcode
= 0xEB000000; // force to be bl
1743 uint32_t disp
= (uint32_t)(delta
>> 2) & 0x00FFFFFF;
1744 newInstruction
= opcode
| disp
;
1746 else if ( is_b
&& thumbTarget
) {
1747 if ( fit
->contentDetlaToAddendOnly
)
1748 newInstruction
= (instruction
& 0xFF000000) | ((uint32_t)(delta
>> 2) & 0x00FFFFFF);
1750 throwf("no pc-rel bx arm instruction. Can't fix up branch to %s in %s",
1751 referenceTargetAtomName(state
, fit
), atom
->name());
1753 else if ( !is_bl
&& !is_blx
&& thumbTarget
) {
1754 throwf("don't know how to convert instruction %x referencing %s to thumb",
1755 instruction
, referenceTargetAtomName(state
, fit
));
1758 newInstruction
= (instruction
& 0xFF000000) | ((uint32_t)(delta
>> 2) & 0x00FFFFFF);
1760 set32LE(fixUpLocation
, newInstruction
);
1762 case ld::Fixup::kindStoreTargetAddressThumbBranch22
:
1763 accumulator
= addressOf(state
, fit
, &toTarget
);
1764 thumbTarget
= targetIsThumb(state
, fit
);
1765 if ( toTarget
->contentType() == ld::Atom::typeBranchIsland
) {
1766 // branching to island, so see if ultimate target is in range
1767 // and if so branch to ultimate target instead.
1768 for (ld::Fixup::iterator islandfit
= toTarget
->fixupsBegin(), end
=toTarget
->fixupsEnd(); islandfit
!= end
; ++islandfit
) {
1769 if ( islandfit
->kind
== ld::Fixup::kindIslandTarget
) {
1770 const ld::Atom
* islandTarget
= NULL
;
1771 uint64_t islandTargetAddress
= addressOf(state
, islandfit
, &islandTarget
);
1772 if ( !fit
->contentDetlaToAddendOnly
) {
1773 if ( targetIsThumb(state
, islandfit
) ) {
1774 // Thumb to thumb branch, we will be generating a bl instruction.
1775 // Delta is always even, so mask out thumb bit in target.
1776 islandTargetAddress
&= -2ULL;
1779 // Target is not thumb, we will be generating a blx instruction
1780 // Since blx cannot have the low bit set, set bit[1] of the target to
1781 // bit[1] of the base address, so that the difference is a multiple of
1783 islandTargetAddress
&= -3ULL;
1784 islandTargetAddress
|= ((atom
->finalAddress() + fit
->offsetInAtom
) & 2LL);
1787 delta
= islandTargetAddress
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1788 if ( checkThumbBranch22Displacement(delta
) ) {
1789 toTarget
= islandTarget
;
1790 accumulator
= islandTargetAddress
;
1791 thumbTarget
= targetIsThumb(state
, islandfit
);
1799 if ( fit
->contentDetlaToAddendOnly
)
1801 // fall into kindStoreThumbBranch22 case
1802 case ld::Fixup::kindStoreThumbBranch22
:
1803 instruction
= get32LE(fixUpLocation
);
1804 is_bl
= ((instruction
& 0xD000F800) == 0xD000F000);
1805 is_blx
= ((instruction
& 0xD000F800) == 0xC000F000);
1806 is_b
= ((instruction
& 0xD000F800) == 0x9000F000);
1807 if ( !fit
->contentDetlaToAddendOnly
) {
1808 if ( thumbTarget
) {
1809 // Thumb to thumb branch, we will be generating a bl instruction.
1810 // Delta is always even, so mask out thumb bit in target.
1811 accumulator
&= -2ULL;
1814 // Target is not thumb, we will be generating a blx instruction
1815 // Since blx cannot have the low bit set, set bit[1] of the target to
1816 // bit[1] of the base address, so that the difference is a multiple of
1818 accumulator
&= -3ULL;
1819 accumulator
|= ((atom
->finalAddress() + fit
->offsetInAtom
) & 2LL);
1822 // The pc added will be +4 from the pc
1823 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1824 // <rdar://problem/16652542> support bl in very large .o files
1825 if ( fit
->contentDetlaToAddendOnly
) {
1826 while ( delta
< (-16777216LL) )
1829 rangeCheckThumbBranch22(delta
, state
, atom
, fit
);
1830 if ( _options
.preferSubArchitecture() && _options
.archSupportsThumb2() ) {
1831 // The instruction is really two instructions:
1832 // The lower 16 bits are the first instruction, which contains the high
1833 // 11 bits of the displacement.
1834 // The upper 16 bits are the second instruction, which contains the low
1835 // 11 bits of the displacement, as well as differentiating bl and blx.
1836 uint32_t s
= (uint32_t)(delta
>> 24) & 0x1;
1837 uint32_t i1
= (uint32_t)(delta
>> 23) & 0x1;
1838 uint32_t i2
= (uint32_t)(delta
>> 22) & 0x1;
1839 uint32_t imm10
= (uint32_t)(delta
>> 12) & 0x3FF;
1840 uint32_t imm11
= (uint32_t)(delta
>> 1) & 0x7FF;
1841 uint32_t j1
= (i1
== s
);
1842 uint32_t j2
= (i2
== s
);
1845 instruction
= 0xD000F000; // keep bl
1847 instruction
= 0xC000F000; // change to blx
1849 else if ( is_blx
) {
1851 instruction
= 0xD000F000; // change to bl
1853 instruction
= 0xC000F000; // keep blx
1856 instruction
= 0x9000F000; // keep b
1857 if ( !thumbTarget
&& !fit
->contentDetlaToAddendOnly
) {
1858 throwf("armv7 has no pc-rel bx thumb instruction. Can't fix up branch to %s in %s",
1859 referenceTargetAtomName(state
, fit
), atom
->name());
1864 throwf("don't know how to convert branch instruction %x referencing %s to bx",
1865 instruction
, referenceTargetAtomName(state
, fit
));
1866 instruction
= 0x9000F000; // keep b
1868 uint32_t nextDisp
= (j1
<< 13) | (j2
<< 11) | imm11
;
1869 uint32_t firstDisp
= (s
<< 10) | imm10
;
1870 newInstruction
= instruction
| (nextDisp
<< 16) | firstDisp
;
1871 //warning("s=%d, j1=%d, j2=%d, imm10=0x%0X, imm11=0x%0X, instruction=0x%08X, first=0x%04X, next=0x%04X, new=0x%08X, disp=0x%llX for %s to %s\n",
1872 // s, j1, j2, imm10, imm11, instruction, firstDisp, nextDisp, newInstruction, delta, atom->name(), toTarget->name());
1873 set32LE(fixUpLocation
, newInstruction
);
1876 // The instruction is really two instructions:
1877 // The lower 16 bits are the first instruction, which contains the high
1878 // 11 bits of the displacement.
1879 // The upper 16 bits are the second instruction, which contains the low
1880 // 11 bits of the displacement, as well as differentiating bl and blx.
1881 uint32_t firstDisp
= (uint32_t)(delta
>> 12) & 0x7FF;
1882 uint32_t nextDisp
= (uint32_t)(delta
>> 1) & 0x7FF;
1883 if ( is_bl
&& !thumbTarget
) {
1884 instruction
= 0xE800F000;
1886 else if ( is_blx
&& thumbTarget
) {
1887 instruction
= 0xF800F000;
1890 instruction
= 0x9000F000; // keep b
1891 if ( !thumbTarget
&& !fit
->contentDetlaToAddendOnly
) {
1892 throwf("armv6 has no pc-rel bx thumb instruction. Can't fix up branch to %s in %s",
1893 referenceTargetAtomName(state
, fit
), atom
->name());
1897 instruction
= instruction
& 0xF800F800;
1899 newInstruction
= instruction
| (nextDisp
<< 16) | firstDisp
;
1900 set32LE(fixUpLocation
, newInstruction
);
1903 case ld::Fixup::kindStoreARMLow16
:
1905 uint32_t imm4
= (accumulator
& 0x0000F000) >> 12;
1906 uint32_t imm12
= accumulator
& 0x00000FFF;
1907 instruction
= get32LE(fixUpLocation
);
1908 newInstruction
= (instruction
& 0xFFF0F000) | (imm4
<< 16) | imm12
;
1909 set32LE(fixUpLocation
, newInstruction
);
1912 case ld::Fixup::kindStoreARMHigh16
:
1914 uint32_t imm4
= (accumulator
& 0xF0000000) >> 28;
1915 uint32_t imm12
= (accumulator
& 0x0FFF0000) >> 16;
1916 instruction
= get32LE(fixUpLocation
);
1917 newInstruction
= (instruction
& 0xFFF0F000) | (imm4
<< 16) | imm12
;
1918 set32LE(fixUpLocation
, newInstruction
);
1921 case ld::Fixup::kindStoreThumbLow16
:
1923 uint32_t imm4
= (accumulator
& 0x0000F000) >> 12;
1924 uint32_t i
= (accumulator
& 0x00000800) >> 11;
1925 uint32_t imm3
= (accumulator
& 0x00000700) >> 8;
1926 uint32_t imm8
= accumulator
& 0x000000FF;
1927 instruction
= get32LE(fixUpLocation
);
1928 newInstruction
= (instruction
& 0x8F00FBF0) | imm4
| (i
<< 10) | (imm3
<< 28) | (imm8
<< 16);
1929 set32LE(fixUpLocation
, newInstruction
);
1932 case ld::Fixup::kindStoreThumbHigh16
:
1934 uint32_t imm4
= (accumulator
& 0xF0000000) >> 28;
1935 uint32_t i
= (accumulator
& 0x08000000) >> 27;
1936 uint32_t imm3
= (accumulator
& 0x07000000) >> 24;
1937 uint32_t imm8
= (accumulator
& 0x00FF0000) >> 16;
1938 instruction
= get32LE(fixUpLocation
);
1939 newInstruction
= (instruction
& 0x8F00FBF0) | imm4
| (i
<< 10) | (imm3
<< 28) | (imm8
<< 16);
1940 set32LE(fixUpLocation
, newInstruction
);
1943 #if SUPPORT_ARCH_arm64
1944 case ld::Fixup::kindStoreTargetAddressARM64Branch26
:
1945 accumulator
= addressOf(state
, fit
, &toTarget
);
1946 // fall into kindStoreARM64Branch26 case
1947 case ld::Fixup::kindStoreARM64Branch26
:
1948 if ( fit
->contentAddendOnly
)
1949 delta
= accumulator
;
1951 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
);
1952 rangeCheckARM64Branch26(delta
, state
, atom
, fit
);
1953 instruction
= get32LE(fixUpLocation
);
1954 newInstruction
= (instruction
& 0xFC000000) | ((uint32_t)(delta
>> 2) & 0x03FFFFFF);
1955 set32LE(fixUpLocation
, newInstruction
);
1957 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21
:
1958 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21
:
1959 case ld::Fixup::kindStoreTargetAddressARM64Page21
:
1960 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21
:
1961 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21
:
1962 accumulator
= addressOf(state
, fit
, &toTarget
);
1963 // fall into kindStoreARM64Branch26 case
1964 case ld::Fixup::kindStoreARM64GOTLeaPage21
:
1965 case ld::Fixup::kindStoreARM64GOTLoadPage21
:
1966 case ld::Fixup::kindStoreARM64TLVPLoadPage21
:
1967 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21
:
1968 case ld::Fixup::kindStoreARM64Page21
:
1970 // the ADRP instruction adds the imm << 12 to the page that the pc is on
1971 if ( fit
->contentAddendOnly
)
1974 delta
= (accumulator
& (-4096)) - ((atom
->finalAddress() + fit
->offsetInAtom
) & (-4096));
1975 rangeCheckARM64Page21(delta
, state
, atom
, fit
);
1976 instruction
= get32LE(fixUpLocation
);
1977 uint32_t immhi
= (delta
>> 9) & (0x00FFFFE0);
1978 uint32_t immlo
= (delta
<< 17) & (0x60000000);
1979 newInstruction
= (instruction
& 0x9F00001F) | immlo
| immhi
;
1980 set32LE(fixUpLocation
, newInstruction
);
1983 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12
:
1984 case ld::Fixup::kindStoreTargetAddressARM64PageOff12
:
1985 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12
:
1986 accumulator
= addressOf(state
, fit
, &toTarget
);
1987 // fall into kindAddressARM64PageOff12 case
1988 case ld::Fixup::kindStoreARM64TLVPLoadPageOff12
:
1989 case ld::Fixup::kindStoreARM64GOTLoadPageOff12
:
1990 case ld::Fixup::kindStoreARM64PageOff12
:
1992 uint32_t offset
= accumulator
& 0x00000FFF;
1993 instruction
= get32LE(fixUpLocation
);
1994 // LDR/STR instruction have implicit scale factor, need to compensate for that
1995 if ( instruction
& 0x08000000 ) {
1996 uint32_t implictShift
= ((instruction
>> 30) & 0x3);
1997 switch ( implictShift
) {
1999 if ( (instruction
& 0x04800000) == 0x04800000 ) {
2000 // vector and byte LDR/STR have same "size" bits, need to check other bits to differenciate
2002 if ( (offset
& 0xF) != 0 ) {
2003 throwf("128-bit LDR/STR not 16-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
2004 atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fit
),
2005 addressOf(state
, fit
, &toTarget
));
2010 if ( (offset
& 0x1) != 0 ) {
2011 throwf("16-bit LDR/STR not 2-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
2012 atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fit
),
2013 addressOf(state
, fit
, &toTarget
));
2017 if ( (offset
& 0x3) != 0 ) {
2018 throwf("32-bit LDR/STR not 4-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
2019 atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fit
),
2020 addressOf(state
, fit
, &toTarget
));
2024 if ( (offset
& 0x7) != 0 ) {
2025 throwf("64-bit LDR/STR not 8-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
2026 atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fit
),
2027 addressOf(state
, fit
, &toTarget
));
2031 // compensate for implicit scale
2032 offset
>>= implictShift
;
2034 if ( fit
->contentAddendOnly
)
2036 uint32_t imm12
= offset
<< 10;
2037 newInstruction
= (instruction
& 0xFFC003FF) | imm12
;
2038 set32LE(fixUpLocation
, newInstruction
);
2041 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12
:
2042 accumulator
= addressOf(state
, fit
, &toTarget
);
2043 // fall into kindStoreARM64GOTLoadPage21 case
2044 case ld::Fixup::kindStoreARM64GOTLeaPageOff12
:
2046 // GOT entry was optimized away, change LDR instruction to a ADD
2047 instruction
= get32LE(fixUpLocation
);
2048 if ( (instruction
& 0xBFC00000) != 0xB9400000 )
2049 throwf("GOT load reloc does not point to a LDR instruction in %s", atom
->name());
2050 uint32_t offset
= accumulator
& 0x00000FFF;
2051 uint32_t imm12
= offset
<< 10;
2052 newInstruction
= 0x91000000 | imm12
| (instruction
& 0x000003FF);
2053 set32LE(fixUpLocation
, newInstruction
);
2056 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12
:
2057 accumulator
= addressOf(state
, fit
, &toTarget
);
2058 // fall into kindStoreARM64TLVPLeaPageOff12 case
2059 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPageOff12
:
2061 // TLV thunk in same linkage unit, so LEA it directly, changing LDR instruction to a ADD
2062 instruction
= get32LE(fixUpLocation
);
2063 if ( (instruction
& 0xBFC00000) != 0xB9400000 )
2064 throwf("TLV load reloc does not point to a LDR instruction in %s", atom
->name());
2065 uint32_t offset
= accumulator
& 0x00000FFF;
2066 uint32_t imm12
= offset
<< 10;
2067 newInstruction
= 0x91000000 | imm12
| (instruction
& 0x000003FF);
2068 set32LE(fixUpLocation
, newInstruction
);
2071 case ld::Fixup::kindStoreARM64PointerToGOT
:
2072 set64LE(fixUpLocation
, accumulator
);
2074 case ld::Fixup::kindStoreARM64PCRelToGOT
:
2075 if ( fit
->contentAddendOnly
)
2076 delta
= accumulator
;
2078 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
);
2079 set32LE(fixUpLocation
, delta
);
2085 #if SUPPORT_ARCH_arm64
2086 // after all fixups are done on atom, if there are potential optimizations, do those
2087 if ( (usedByHints
.size() != 0) && (_options
.outputKind() != Options::kObjectFile
) && !_options
.ignoreOptimizationHints() ) {
2088 // fill in second part of usedByHints map, so we can see the target of fixups that might be optimized
2089 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
2090 switch ( fit
->kind
) {
2091 case ld::Fixup::kindLinkerOptimizationHint
:
2092 case ld::Fixup::kindNoneFollowOn
:
2093 case ld::Fixup::kindNoneGroupSubordinate
:
2094 case ld::Fixup::kindNoneGroupSubordinateFDE
:
2095 case ld::Fixup::kindNoneGroupSubordinateLSDA
:
2096 case ld::Fixup::kindNoneGroupSubordinatePersonality
:
2099 if ( fit
->firstInCluster() ) {
2100 std::map
<uint32_t, const Fixup
*>::iterator pos
= usedByHints
.find(fit
->offsetInAtom
);
2101 if ( pos
!= usedByHints
.end() ) {
2102 assert(pos
->second
== NULL
&& "two fixups in same hint location");
2104 //fprintf(stderr, "setting %s usedByHints[0x%04X], kind = %d\n", atom->name(), fit->offsetInAtom, fit->kind);
2110 // apply hints pass 1
2111 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
2112 if ( fit
->kind
!= ld::Fixup::kindLinkerOptimizationHint
)
2114 InstructionInfo infoA
;
2115 InstructionInfo infoB
;
2116 InstructionInfo infoC
;
2117 InstructionInfo infoD
;
2118 LoadStoreInfo ldrInfoB
, ldrInfoC
;
2122 bool targetFourByteAligned
;
2123 bool literalableSize
, isADRP
, isADD
, isLDR
, isSTR
;
2124 //uint8_t loadSize, destReg;
2125 //uint32_t scaledOffset;
2127 ld::Fixup::LOH_arm64 alt
;
2128 alt
.addend
= fit
->u
.addend
;
2129 setInfo(state
, atom
, buffer
, usedByHints
, fit
->offsetInAtom
, (alt
.info
.delta1
<< 2), &infoA
);
2130 if ( alt
.info
.count
> 0 )
2131 setInfo(state
, atom
, buffer
, usedByHints
, fit
->offsetInAtom
, (alt
.info
.delta2
<< 2), &infoB
);
2132 if ( alt
.info
.count
> 1 )
2133 setInfo(state
, atom
, buffer
, usedByHints
, fit
->offsetInAtom
, (alt
.info
.delta3
<< 2), &infoC
);
2134 if ( alt
.info
.count
> 2 )
2135 setInfo(state
, atom
, buffer
, usedByHints
, fit
->offsetInAtom
, (alt
.info
.delta4
<< 2), &infoD
);
2137 if ( _options
.sharedRegionEligible() ) {
2138 if ( _options
.sharedRegionEncodingV2() ) {
2139 // In v2 format, all references might be move at dyld shared cache creation time
2140 usableSegment
= false;
2143 // In v1 format, only references to something in __TEXT segment could be optimized
2144 usableSegment
= (strcmp(atom
->section().segmentName(), infoB
.target
->section().segmentName()) == 0);
2148 // main executables can optimize any reference
2149 usableSegment
= true;
2152 switch ( alt
.info
.kind
) {
2153 case LOH_ARM64_ADRP_ADRP
:
2154 // processed in pass 2 because some ADRP may have been removed
2156 case LOH_ARM64_ADRP_LDR
:
2157 LOH_ASSERT(alt
.info
.count
== 1);
2158 LOH_ASSERT(isPageKind(infoA
.fixup
));
2159 LOH_ASSERT(isPageOffsetKind(infoB
.fixup
));
2160 LOH_ASSERT(infoA
.target
== infoB
.target
);
2161 LOH_ASSERT(infoA
.targetAddress
== infoB
.targetAddress
);
2162 isADRP
= parseADRP(infoA
.instruction
, adrpInfoA
);
2164 isLDR
= parseLoadOrStore(infoB
.instruction
, ldrInfoB
);
2165 // silently ignore LDRs transformed to ADD by TLV pass
2166 if ( !isLDR
&& infoB
.fixup
->kind
== ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12
)
2169 LOH_ASSERT(ldrInfoB
.baseReg
== adrpInfoA
.destReg
);
2170 LOH_ASSERT(ldrInfoB
.offset
== (infoA
.targetAddress
& 0x00000FFF));
2171 literalableSize
= ( (ldrInfoB
.size
!= 1) && (ldrInfoB
.size
!= 2) );
2172 targetFourByteAligned
= ( (infoA
.targetAddress
& 0x3) == 0 );
2173 if ( literalableSize
&& usableSegment
&& targetFourByteAligned
&& withinOneMeg(infoB
.instructionAddress
, infoA
.targetAddress
) ) {
2174 set32LE(infoA
.instructionContent
, makeNOP());
2175 set32LE(infoB
.instructionContent
, makeLDR_literal(ldrInfoB
, infoA
.targetAddress
, infoB
.instructionAddress
));
2176 if ( _options
.verboseOptimizationHints() )
2177 fprintf(stderr
, "adrp-ldr at 0x%08llX transformed to LDR literal, usableSegment=%d usableSegment\n", infoB
.instructionAddress
, usableSegment
);
2180 if ( _options
.verboseOptimizationHints() )
2181 fprintf(stderr
, "adrp-ldr at 0x%08llX not transformed, isLDR=%d, literalableSize=%d, inRange=%d, usableSegment=%d, scaledOffset=%d\n",
2182 infoB
.instructionAddress
, isLDR
, literalableSize
, withinOneMeg(infoB
.instructionAddress
, infoA
.targetAddress
), usableSegment
, ldrInfoB
.offset
);
2185 case LOH_ARM64_ADRP_ADD_LDR
:
2186 LOH_ASSERT(alt
.info
.count
== 2);
2187 LOH_ASSERT(isPageKind(infoA
.fixup
));
2188 LOH_ASSERT(isPageOffsetKind(infoB
.fixup
));
2189 LOH_ASSERT(infoC
.fixup
== NULL
);
2190 LOH_ASSERT(infoA
.target
== infoB
.target
);
2191 LOH_ASSERT(infoA
.targetAddress
== infoB
.targetAddress
);
2192 isADRP
= parseADRP(infoA
.instruction
, adrpInfoA
);
2194 isADD
= parseADD(infoB
.instruction
, addInfoB
);
2196 LOH_ASSERT(adrpInfoA
.destReg
== addInfoB
.srcReg
);
2197 isLDR
= parseLoadOrStore(infoC
.instruction
, ldrInfoC
);
2199 LOH_ASSERT(addInfoB
.destReg
== ldrInfoC
.baseReg
);
2200 targetFourByteAligned
= ( ((infoB
.targetAddress
+ldrInfoC
.offset
) & 0x3) == 0 );
2201 literalableSize
= ( (ldrInfoC
.size
!= 1) && (ldrInfoC
.size
!= 2) );
2202 if ( literalableSize
&& usableSegment
&& targetFourByteAligned
&& withinOneMeg(infoC
.instructionAddress
, infoA
.targetAddress
+ldrInfoC
.offset
) ) {
2203 // can do T1 transformation to LDR literal
2204 set32LE(infoA
.instructionContent
, makeNOP());
2205 set32LE(infoB
.instructionContent
, makeNOP());
2206 set32LE(infoC
.instructionContent
, makeLDR_literal(ldrInfoC
, infoA
.targetAddress
+ldrInfoC
.offset
, infoC
.instructionAddress
));
2207 if ( _options
.verboseOptimizationHints() ) {
2208 fprintf(stderr
, "adrp-add-ldr at 0x%08llX T1 transformed to LDR literal\n", infoC
.instructionAddress
);
2211 else if ( usableSegment
&& withinOneMeg(infoA
.instructionAddress
, infoA
.targetAddress
+ldrInfoC
.offset
) ) {
2212 // can to T4 transformation and turn ADRP/ADD into ADR
2213 set32LE(infoA
.instructionContent
, makeADR(ldrInfoC
.baseReg
, infoA
.targetAddress
+ldrInfoC
.offset
, infoA
.instructionAddress
));
2214 set32LE(infoB
.instructionContent
, makeNOP());
2215 ldrInfoC
.offset
= 0; // offset is now in ADR instead of ADD or LDR
2216 set32LE(infoC
.instructionContent
, makeLoadOrStore(ldrInfoC
));
2217 set32LE(infoC
.instructionContent
, infoC
.instruction
& 0xFFC003FF);
2218 if ( _options
.verboseOptimizationHints() )
2219 fprintf(stderr
, "adrp-add-ldr at 0x%08llX T4 transformed to ADR/LDR\n", infoB
.instructionAddress
);
2221 else if ( ((infoB
.targetAddress
% ldrInfoC
.size
) == 0) && (ldrInfoC
.offset
== 0) ) {
2222 // can do T2 transformation by merging ADD into LD
2224 set32LE(infoB
.instructionContent
, makeNOP());
2225 ldrInfoC
.offset
+= addInfoB
.addend
;
2226 set32LE(infoC
.instructionContent
, makeLoadOrStore(ldrInfoC
));
2227 if ( _options
.verboseOptimizationHints() )
2228 fprintf(stderr
, "adrp-add-ldr at 0x%08llX T2 transformed to ADRP/LDR \n", infoC
.instructionAddress
);
2231 if ( _options
.verboseOptimizationHints() )
2232 fprintf(stderr
, "adrp-add-ldr at 0x%08llX could not be transformed, loadSize=%d, literalableSize=%d, inRange=%d, usableSegment=%d, targetFourByteAligned=%d, imm12=%d\n",
2233 infoC
.instructionAddress
, ldrInfoC
.size
, literalableSize
, withinOneMeg(infoC
.instructionAddress
, infoA
.targetAddress
+ldrInfoC
.offset
), usableSegment
, targetFourByteAligned
, ldrInfoC
.offset
);
2236 case LOH_ARM64_ADRP_ADD
:
2237 LOH_ASSERT(alt
.info
.count
== 1);
2238 LOH_ASSERT(isPageKind(infoA
.fixup
));
2239 LOH_ASSERT(isPageOffsetKind(infoB
.fixup
));
2240 LOH_ASSERT(infoA
.target
== infoB
.target
);
2241 LOH_ASSERT(infoA
.targetAddress
== infoB
.targetAddress
);
2242 isADRP
= parseADRP(infoA
.instruction
, adrpInfoA
);
2244 isADD
= parseADD(infoB
.instruction
, addInfoB
);
2246 LOH_ASSERT(adrpInfoA
.destReg
== addInfoB
.srcReg
);
2247 if ( usableSegment
&& withinOneMeg(infoA
.targetAddress
, infoA
.instructionAddress
) ) {
2248 // can do T4 transformation and use ADR
2249 set32LE(infoA
.instructionContent
, makeADR(addInfoB
.destReg
, infoA
.targetAddress
, infoA
.instructionAddress
));
2250 set32LE(infoB
.instructionContent
, makeNOP());
2251 if ( _options
.verboseOptimizationHints() )
2252 fprintf(stderr
, "adrp-add at 0x%08llX transformed to ADR\n", infoB
.instructionAddress
);
2255 if ( _options
.verboseOptimizationHints() )
2256 fprintf(stderr
, "adrp-add at 0x%08llX not transformed, isAdd=%d, inRange=%d, usableSegment=%d\n",
2257 infoB
.instructionAddress
, isADD
, withinOneMeg(infoA
.targetAddress
, infoA
.instructionAddress
), usableSegment
);
2260 case LOH_ARM64_ADRP_LDR_GOT_LDR
:
2261 LOH_ASSERT(alt
.info
.count
== 2);
2262 LOH_ASSERT(isPageKind(infoA
.fixup
, true));
2263 LOH_ASSERT(isPageOffsetKind(infoB
.fixup
, true));
2264 LOH_ASSERT(infoC
.fixup
== NULL
);
2265 LOH_ASSERT(infoA
.target
== infoB
.target
);
2266 LOH_ASSERT(infoA
.targetAddress
== infoB
.targetAddress
);
2267 isADRP
= parseADRP(infoA
.instruction
, adrpInfoA
);
2269 isLDR
= parseLoadOrStore(infoC
.instruction
, ldrInfoC
);
2271 isADD
= parseADD(infoB
.instruction
, addInfoB
);
2272 isLDR
= parseLoadOrStore(infoB
.instruction
, ldrInfoB
);
2274 // target of GOT is external
2275 LOH_ASSERT(ldrInfoB
.size
== 8);
2276 LOH_ASSERT(!ldrInfoB
.isFloat
);
2277 LOH_ASSERT(ldrInfoC
.baseReg
== ldrInfoB
.reg
);
2278 //fprintf(stderr, "infoA.target=%p, %s, infoA.targetAddress=0x%08llX\n", infoA.target, infoA.target->name(), infoA.targetAddress);
2279 targetFourByteAligned
= ( ((infoA
.targetAddress
+ ldrInfoC
.offset
) & 0x3) == 0 );
2280 if ( usableSegment
&& targetFourByteAligned
&& withinOneMeg(infoB
.instructionAddress
, infoA
.targetAddress
+ ldrInfoC
.offset
) ) {
2281 // can do T5 transform
2282 set32LE(infoA
.instructionContent
, makeNOP());
2283 set32LE(infoB
.instructionContent
, makeLDR_literal(ldrInfoB
, infoA
.targetAddress
, infoB
.instructionAddress
));
2284 if ( _options
.verboseOptimizationHints() ) {
2285 fprintf(stderr
, "adrp-ldr-got-ldr at 0x%08llX T5 transformed to LDR literal of GOT plus LDR\n", infoC
.instructionAddress
);
2289 if ( _options
.verboseOptimizationHints() )
2290 fprintf(stderr
, "adrp-ldr-got-ldr at 0x%08llX no optimization done\n", infoC
.instructionAddress
);
2294 // target of GOT is in same linkage unit and B instruction was changed to ADD to compute LEA of target
2295 LOH_ASSERT(addInfoB
.srcReg
== adrpInfoA
.destReg
);
2296 LOH_ASSERT(addInfoB
.destReg
== ldrInfoC
.baseReg
);
2297 targetFourByteAligned
= ( ((infoA
.targetAddress
) & 0x3) == 0 );
2298 literalableSize
= ( (ldrInfoC
.size
!= 1) && (ldrInfoC
.size
!= 2) );
2299 if ( usableSegment
&& literalableSize
&& targetFourByteAligned
&& withinOneMeg(infoC
.instructionAddress
, infoA
.targetAddress
+ ldrInfoC
.offset
) ) {
2300 // can do T1 transform
2301 set32LE(infoA
.instructionContent
, makeNOP());
2302 set32LE(infoB
.instructionContent
, makeNOP());
2303 set32LE(infoC
.instructionContent
, makeLDR_literal(ldrInfoC
, infoA
.targetAddress
+ ldrInfoC
.offset
, infoC
.instructionAddress
));
2304 if ( _options
.verboseOptimizationHints() )
2305 fprintf(stderr
, "adrp-ldr-got-ldr at 0x%08llX T1 transformed to LDR literal\n", infoC
.instructionAddress
);
2307 else if ( usableSegment
&& withinOneMeg(infoA
.instructionAddress
, infoA
.targetAddress
) ) {
2308 // can do T4 transform
2309 set32LE(infoA
.instructionContent
, makeADR(ldrInfoC
.baseReg
, infoA
.targetAddress
, infoA
.instructionAddress
));
2310 set32LE(infoB
.instructionContent
, makeNOP());
2311 set32LE(infoC
.instructionContent
, makeLoadOrStore(ldrInfoC
));
2312 if ( _options
.verboseOptimizationHints() ) {
2313 fprintf(stderr
, "adrp-ldr-got-ldr at 0x%08llX T4 transformed to ADR/LDR\n", infoC
.instructionAddress
);
2316 else if ( ((infoA
.targetAddress
% ldrInfoC
.size
) == 0) && ((addInfoB
.addend
+ ldrInfoC
.offset
) < 4096) ) {
2317 // can do T2 transform
2318 set32LE(infoB
.instructionContent
, makeNOP());
2319 ldrInfoC
.baseReg
= adrpInfoA
.destReg
;
2320 ldrInfoC
.offset
+= addInfoB
.addend
;
2321 set32LE(infoC
.instructionContent
, makeLoadOrStore(ldrInfoC
));
2322 if ( _options
.verboseOptimizationHints() ) {
2323 fprintf(stderr
, "adrp-ldr-got-ldr at 0x%08llX T2 transformed to ADRP/NOP/LDR\n", infoC
.instructionAddress
);
2327 // T3 transform already done by ld::passes:got:doPass()
2328 if ( _options
.verboseOptimizationHints() ) {
2329 fprintf(stderr
, "adrp-ldr-got-ldr at 0x%08llX T3 transformed to ADRP/ADD/LDR\n", infoC
.instructionAddress
);
2334 if ( _options
.verboseOptimizationHints() )
2335 fprintf(stderr
, "adrp-ldr-got-ldr at 0x%08llX not ADD or LDR\n", infoC
.instructionAddress
);
2338 case LOH_ARM64_ADRP_ADD_STR
:
2339 LOH_ASSERT(alt
.info
.count
== 2);
2340 LOH_ASSERT(isPageKind(infoA
.fixup
));
2341 LOH_ASSERT(isPageOffsetKind(infoB
.fixup
));
2342 LOH_ASSERT(infoC
.fixup
== NULL
);
2343 LOH_ASSERT(infoA
.target
== infoB
.target
);
2344 LOH_ASSERT(infoA
.targetAddress
== infoB
.targetAddress
);
2345 isADRP
= parseADRP(infoA
.instruction
, adrpInfoA
);
2347 isADD
= parseADD(infoB
.instruction
, addInfoB
);
2349 LOH_ASSERT(adrpInfoA
.destReg
== addInfoB
.srcReg
);
2350 isSTR
= (parseLoadOrStore(infoC
.instruction
, ldrInfoC
) && ldrInfoC
.isStore
);
2352 LOH_ASSERT(addInfoB
.destReg
== ldrInfoC
.baseReg
);
2353 if ( usableSegment
&& withinOneMeg(infoA
.instructionAddress
, infoA
.targetAddress
+ldrInfoC
.offset
) ) {
2354 // can to T4 transformation and turn ADRP/ADD into ADR
2355 set32LE(infoA
.instructionContent
, makeADR(ldrInfoC
.baseReg
, infoA
.targetAddress
+ldrInfoC
.offset
, infoA
.instructionAddress
));
2356 set32LE(infoB
.instructionContent
, makeNOP());
2357 ldrInfoC
.offset
= 0; // offset is now in ADR instead of ADD or LDR
2358 set32LE(infoC
.instructionContent
, makeLoadOrStore(ldrInfoC
));
2359 set32LE(infoC
.instructionContent
, infoC
.instruction
& 0xFFC003FF);
2360 if ( _options
.verboseOptimizationHints() )
2361 fprintf(stderr
, "adrp-add-str at 0x%08llX T4 transformed to ADR/STR\n", infoB
.instructionAddress
);
2363 else if ( ((infoB
.targetAddress
% ldrInfoC
.size
) == 0) && (ldrInfoC
.offset
== 0) ) {
2364 // can do T2 transformation by merging ADD into STR
2366 set32LE(infoB
.instructionContent
, makeNOP());
2367 ldrInfoC
.offset
+= addInfoB
.addend
;
2368 set32LE(infoC
.instructionContent
, makeLoadOrStore(ldrInfoC
));
2369 if ( _options
.verboseOptimizationHints() )
2370 fprintf(stderr
, "adrp-add-str at 0x%08llX T2 transformed to ADRP/STR \n", infoC
.instructionAddress
);
2373 if ( _options
.verboseOptimizationHints() )
2374 fprintf(stderr
, "adrp-add-str at 0x%08llX could not be transformed, loadSize=%d, inRange=%d, usableSegment=%d, imm12=%d\n",
2375 infoC
.instructionAddress
, ldrInfoC
.size
, withinOneMeg(infoC
.instructionAddress
, infoA
.targetAddress
+ldrInfoC
.offset
), usableSegment
, ldrInfoC
.offset
);
2378 case LOH_ARM64_ADRP_LDR_GOT_STR
:
2379 LOH_ASSERT(alt
.info
.count
== 2);
2380 LOH_ASSERT(isPageKind(infoA
.fixup
, true));
2381 LOH_ASSERT(isPageOffsetKind(infoB
.fixup
, true));
2382 LOH_ASSERT(infoC
.fixup
== NULL
);
2383 LOH_ASSERT(infoA
.target
== infoB
.target
);
2384 LOH_ASSERT(infoA
.targetAddress
== infoB
.targetAddress
);
2385 isADRP
= parseADRP(infoA
.instruction
, adrpInfoA
);
2387 isSTR
= (parseLoadOrStore(infoC
.instruction
, ldrInfoC
) && ldrInfoC
.isStore
);
2389 isADD
= parseADD(infoB
.instruction
, addInfoB
);
2390 isLDR
= parseLoadOrStore(infoB
.instruction
, ldrInfoB
);
2392 // target of GOT is external
2393 LOH_ASSERT(ldrInfoB
.size
== 8);
2394 LOH_ASSERT(!ldrInfoB
.isFloat
);
2395 LOH_ASSERT(ldrInfoC
.baseReg
== ldrInfoB
.reg
);
2396 targetFourByteAligned
= ( ((infoA
.targetAddress
+ ldrInfoC
.offset
) & 0x3) == 0 );
2397 if ( usableSegment
&& targetFourByteAligned
&& withinOneMeg(infoB
.instructionAddress
, infoA
.targetAddress
+ ldrInfoC
.offset
) ) {
2398 // can do T5 transform
2399 set32LE(infoA
.instructionContent
, makeNOP());
2400 set32LE(infoB
.instructionContent
, makeLDR_literal(ldrInfoB
, infoA
.targetAddress
, infoB
.instructionAddress
));
2401 if ( _options
.verboseOptimizationHints() ) {
2402 fprintf(stderr
, "adrp-ldr-got-str at 0x%08llX T5 transformed to LDR literal of GOT plus STR\n", infoC
.instructionAddress
);
2406 if ( _options
.verboseOptimizationHints() )
2407 fprintf(stderr
, "adrp-ldr-got-str at 0x%08llX no optimization done\n", infoC
.instructionAddress
);
2411 // target of GOT is in same linkage unit and B instruction was changed to ADD to compute LEA of target
2412 LOH_ASSERT(addInfoB
.srcReg
== adrpInfoA
.destReg
);
2413 LOH_ASSERT(addInfoB
.destReg
== ldrInfoC
.baseReg
);
2414 targetFourByteAligned
= ( ((infoA
.targetAddress
) & 0x3) == 0 );
2415 literalableSize
= ( (ldrInfoC
.size
!= 1) && (ldrInfoC
.size
!= 2) );
2416 if ( usableSegment
&& withinOneMeg(infoA
.instructionAddress
, infoA
.targetAddress
) ) {
2417 // can do T4 transform
2418 set32LE(infoA
.instructionContent
, makeADR(ldrInfoC
.baseReg
, infoA
.targetAddress
, infoA
.instructionAddress
));
2419 set32LE(infoB
.instructionContent
, makeNOP());
2420 set32LE(infoC
.instructionContent
, makeLoadOrStore(ldrInfoC
));
2421 if ( _options
.verboseOptimizationHints() ) {
2422 fprintf(stderr
, "adrp-ldr-got-str at 0x%08llX T4 transformed to ADR/STR\n", infoC
.instructionAddress
);
2425 else if ( ((infoA
.targetAddress
% ldrInfoC
.size
) == 0) && (ldrInfoC
.offset
== 0) ) {
2426 // can do T2 transform
2427 set32LE(infoB
.instructionContent
, makeNOP());
2428 ldrInfoC
.baseReg
= adrpInfoA
.destReg
;
2429 ldrInfoC
.offset
+= addInfoB
.addend
;
2430 set32LE(infoC
.instructionContent
, makeLoadOrStore(ldrInfoC
));
2431 if ( _options
.verboseOptimizationHints() ) {
2432 fprintf(stderr
, "adrp-ldr-got-str at 0x%08llX T4 transformed to ADRP/NOP/STR\n", infoC
.instructionAddress
);
2436 // T3 transform already done by ld::passes:got:doPass()
2437 if ( _options
.verboseOptimizationHints() ) {
2438 fprintf(stderr
, "adrp-ldr-got-str at 0x%08llX T3 transformed to ADRP/ADD/STR\n", infoC
.instructionAddress
);
2443 if ( _options
.verboseOptimizationHints() )
2444 fprintf(stderr
, "adrp-ldr-got-str at 0x%08llX not ADD or LDR\n", infoC
.instructionAddress
);
2447 case LOH_ARM64_ADRP_LDR_GOT
:
2448 LOH_ASSERT(alt
.info
.count
== 1);
2449 LOH_ASSERT(isPageKind(infoA
.fixup
, true));
2450 LOH_ASSERT(isPageOffsetKind(infoB
.fixup
, true));
2451 LOH_ASSERT(infoA
.target
== infoB
.target
);
2452 LOH_ASSERT(infoA
.targetAddress
== infoB
.targetAddress
);
2453 isADRP
= parseADRP(infoA
.instruction
, adrpInfoA
);
2454 isADD
= parseADD(infoB
.instruction
, addInfoB
);
2455 isLDR
= parseLoadOrStore(infoB
.instruction
, ldrInfoB
);
2458 if ( usableSegment
&& withinOneMeg(infoB
.instructionAddress
, infoA
.targetAddress
) ) {
2459 // can do T5 transform (LDR literal load of GOT)
2460 set32LE(infoA
.instructionContent
, makeNOP());
2461 set32LE(infoB
.instructionContent
, makeLDR_literal(ldrInfoB
, infoA
.targetAddress
, infoB
.instructionAddress
));
2462 if ( _options
.verboseOptimizationHints() ) {
2463 fprintf(stderr
, "adrp-ldr-got at 0x%08llX T5 transformed to NOP/LDR\n", infoC
.instructionAddress
);
2468 if ( usableSegment
&& withinOneMeg(infoA
.instructionAddress
, infoA
.targetAddress
) ) {
2469 // can do T4 transform (ADR to compute local address)
2470 set32LE(infoA
.instructionContent
, makeADR(addInfoB
.destReg
, infoA
.targetAddress
, infoA
.instructionAddress
));
2471 set32LE(infoB
.instructionContent
, makeNOP());
2472 if ( _options
.verboseOptimizationHints() ) {
2473 fprintf(stderr
, "adrp-ldr-got at 0x%08llX T4 transformed to ADR/STR\n", infoC
.instructionAddress
);
2478 if ( _options
.verboseOptimizationHints() )
2479 fprintf(stderr
, "adrp-ldr-got at 0x%08llX not LDR or ADD\n", infoB
.instructionAddress
);
2483 if ( _options
.verboseOptimizationHints() )
2484 fprintf(stderr
, "adrp-ldr-got at 0x%08llX not ADRP\n", infoA
.instructionAddress
);
2488 if ( _options
.verboseOptimizationHints() )
2489 fprintf(stderr
, "unknown hint kind %d alt.info.kind at 0x%08llX\n", alt
.info
.kind
, infoA
.instructionAddress
);
2493 // apply hints pass 2
2494 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
2495 if ( fit
->kind
!= ld::Fixup::kindLinkerOptimizationHint
)
2497 InstructionInfo infoA
;
2498 InstructionInfo infoB
;
2499 ld::Fixup::LOH_arm64 alt
;
2500 alt
.addend
= fit
->u
.addend
;
2501 setInfo(state
, atom
, buffer
, usedByHints
, fit
->offsetInAtom
, (alt
.info
.delta1
<< 2), &infoA
);
2502 if ( alt
.info
.count
> 0 )
2503 setInfo(state
, atom
, buffer
, usedByHints
, fit
->offsetInAtom
, (alt
.info
.delta2
<< 2), &infoB
);
2505 switch ( alt
.info
.kind
) {
2506 case LOH_ARM64_ADRP_ADRP
:
2507 LOH_ASSERT(isPageKind(infoA
.fixup
));
2508 LOH_ASSERT(isPageKind(infoB
.fixup
));
2509 if ( (infoA
.instruction
& 0x9F000000) != 0x90000000 ) {
2510 if ( _options
.verboseOptimizationHints() )
2511 fprintf(stderr
, "may-reused-adrp at 0x%08llX no longer an ADRP, now 0x%08X\n", infoA
.instructionAddress
, infoA
.instruction
);
2515 if ( (infoB
.instruction
& 0x9F000000) != 0x90000000 ) {
2516 if ( _options
.verboseOptimizationHints() )
2517 fprintf(stderr
, "may-reused-adrp at 0x%08llX no longer an ADRP, now 0x%08X\n", infoB
.instructionAddress
, infoA
.instruction
);
2521 if ( (infoA
.targetAddress
& (-4096)) == (infoB
.targetAddress
& (-4096)) ) {
2522 set32LE(infoB
.instructionContent
, 0xD503201F);
2532 #endif // SUPPORT_ARCH_arm64
2536 void OutputFile::copyNoOps(uint8_t* from
, uint8_t* to
, bool thumb
)
2538 switch ( _options
.architecture() ) {
2540 case CPU_TYPE_X86_64
:
2541 for (uint8_t* p
=from
; p
< to
; ++p
)
2546 for (uint8_t* p
=from
; p
< to
; p
+= 2)
2547 OSWriteLittleInt16((uint16_t*)p
, 0, 0x46c0);
2550 for (uint8_t* p
=from
; p
< to
; p
+= 4)
2551 OSWriteLittleInt32((uint32_t*)p
, 0, 0xe1a00000);
2555 for (uint8_t* p
=from
; p
< to
; ++p
)
2561 bool OutputFile::takesNoDiskSpace(const ld::Section
* sect
)
2563 switch ( sect
->type() ) {
2564 case ld::Section::typeZeroFill
:
2565 case ld::Section::typeTLVZeroFill
:
2566 return _options
.optimizeZeroFill();
2567 case ld::Section::typePageZero
:
2568 case ld::Section::typeStack
:
2569 case ld::Section::typeAbsoluteSymbols
:
2570 case ld::Section::typeTentativeDefs
:
2578 bool OutputFile::hasZeroForFileOffset(const ld::Section
* sect
)
2580 switch ( sect
->type() ) {
2581 case ld::Section::typeZeroFill
:
2582 case ld::Section::typeTLVZeroFill
:
2583 return _options
.optimizeZeroFill();
2584 case ld::Section::typePageZero
:
2585 case ld::Section::typeStack
:
2586 case ld::Section::typeTentativeDefs
:
2594 void OutputFile::writeAtoms(ld::Internal
& state
, uint8_t* wholeBuffer
)
2596 // have each atom write itself
2597 uint64_t fileOffsetOfEndOfLastAtom
= 0;
2598 uint64_t mhAddress
= 0;
2599 bool lastAtomUsesNoOps
= false;
2600 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
2601 ld::Internal::FinalSection
* sect
= *sit
;
2602 if ( sect
->type() == ld::Section::typeMachHeader
)
2603 mhAddress
= sect
->address
;
2604 if ( takesNoDiskSpace(sect
) )
2606 const bool sectionUsesNops
= (sect
->type() == ld::Section::typeCode
);
2607 //fprintf(stderr, "file offset=0x%08llX, section %s\n", sect->fileOffset, sect->sectionName());
2608 std::vector
<const ld::Atom
*>& atoms
= sect
->atoms
;
2609 bool lastAtomWasThumb
= false;
2610 for (std::vector
<const ld::Atom
*>::iterator ait
= atoms
.begin(); ait
!= atoms
.end(); ++ait
) {
2611 const ld::Atom
* atom
= *ait
;
2612 if ( atom
->definition() == ld::Atom::definitionProxy
)
2615 uint64_t fileOffset
= atom
->finalAddress() - sect
->address
+ sect
->fileOffset
;
2616 // check for alignment padding between atoms
2617 if ( (fileOffset
!= fileOffsetOfEndOfLastAtom
) && lastAtomUsesNoOps
) {
2618 this->copyNoOps(&wholeBuffer
[fileOffsetOfEndOfLastAtom
], &wholeBuffer
[fileOffset
], lastAtomWasThumb
);
2620 // copy atom content
2621 atom
->copyRawContent(&wholeBuffer
[fileOffset
]);
2623 this->applyFixUps(state
, mhAddress
, atom
, &wholeBuffer
[fileOffset
]);
2624 fileOffsetOfEndOfLastAtom
= fileOffset
+atom
->size();
2625 lastAtomUsesNoOps
= sectionUsesNops
;
2626 lastAtomWasThumb
= atom
->isThumb();
2628 catch (const char* msg
) {
2629 if ( atom
->file() != NULL
)
2630 throwf("%s in '%s' from %s", msg
, atom
->name(), atom
->file()->path());
2632 throwf("%s in '%s'", msg
, atom
->name());
2637 if ( _options
.verboseOptimizationHints() ) {
2638 //fprintf(stderr, "ADRP optimized away: %d\n", sAdrpNA);
2639 //fprintf(stderr, "ADRPs changed to NOPs: %d\n", sAdrpNoped);
2640 //fprintf(stderr, "ADRPs unchanged: %d\n", sAdrpNotNoped);
2644 void OutputFile::computeContentUUID(ld::Internal
& state
, uint8_t* wholeBuffer
)
2646 const bool log
= false;
2647 if ( (_options
.outputKind() != Options::kObjectFile
) || state
.someObjectFileHasDwarf
) {
2648 uint8_t digest
[CC_MD5_DIGEST_LENGTH
];
2649 std::vector
<std::pair
<uint64_t, uint64_t>> excludeRegions
;
2650 uint64_t bitcodeCmdOffset
;
2651 uint64_t bitcodeCmdEnd
;
2652 uint64_t bitcodeSectOffset
;
2653 uint64_t bitcodePaddingEnd
;
2654 if ( _headersAndLoadCommandAtom
->bitcodeBundleCommand(bitcodeCmdOffset
, bitcodeCmdEnd
,
2655 bitcodeSectOffset
, bitcodePaddingEnd
) ) {
2656 // Exclude embedded bitcode bundle section which contains timestamps in XAR header
2657 // Note the timestamp is in the compressed XML header which means it might change the size of
2658 // bitcode section. The load command which include the size of the section and the padding after
2659 // the bitcode section should also be excluded in the UUID computation.
2660 // Bitcode section should appears before LINKEDIT
2661 // Exclude section cmd
2662 if ( log
) fprintf(stderr
, "bundle cmd start=0x%08llX, bundle cmd end=0x%08llX\n",
2663 bitcodeCmdOffset
, bitcodeCmdEnd
);
2664 excludeRegions
.emplace_back(std::pair
<uint64_t, uint64_t>(bitcodeCmdOffset
, bitcodeCmdEnd
));
2665 // Exclude section content
2666 if ( log
) fprintf(stderr
, "bundle start=0x%08llX, bundle end=0x%08llX\n",
2667 bitcodeSectOffset
, bitcodePaddingEnd
);
2668 excludeRegions
.emplace_back(std::pair
<uint64_t, uint64_t>(bitcodeSectOffset
, bitcodePaddingEnd
));
2670 uint32_t stabsStringsOffsetStart
;
2671 uint32_t tabsStringsOffsetEnd
;
2672 uint32_t stabsOffsetStart
;
2673 uint32_t stabsOffsetEnd
;
2674 if ( _symbolTableAtom
->hasStabs(stabsStringsOffsetStart
, tabsStringsOffsetEnd
, stabsOffsetStart
, stabsOffsetEnd
) ) {
2675 // find two areas of file that are stabs info and should not contribute to checksum
2676 uint64_t stringPoolFileOffset
= 0;
2677 uint64_t symbolTableFileOffset
= 0;
2678 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
2679 ld::Internal::FinalSection
* sect
= *sit
;
2680 if ( sect
->type() == ld::Section::typeLinkEdit
) {
2681 if ( strcmp(sect
->sectionName(), "__string_pool") == 0 )
2682 stringPoolFileOffset
= sect
->fileOffset
;
2683 else if ( strcmp(sect
->sectionName(), "__symbol_table") == 0 )
2684 symbolTableFileOffset
= sect
->fileOffset
;
2687 uint64_t firstStabNlistFileOffset
= symbolTableFileOffset
+ stabsOffsetStart
;
2688 uint64_t lastStabNlistFileOffset
= symbolTableFileOffset
+ stabsOffsetEnd
;
2689 uint64_t firstStabStringFileOffset
= stringPoolFileOffset
+ stabsStringsOffsetStart
;
2690 uint64_t lastStabStringFileOffset
= stringPoolFileOffset
+ tabsStringsOffsetEnd
;
2691 if ( log
) fprintf(stderr
, "stabNlist offset=0x%08llX, size=0x%08llX\n", firstStabNlistFileOffset
, lastStabNlistFileOffset
-firstStabNlistFileOffset
);
2692 if ( log
) fprintf(stderr
, "stabString offset=0x%08llX, size=0x%08llX\n", firstStabStringFileOffset
, lastStabStringFileOffset
-firstStabStringFileOffset
);
2693 assert(firstStabNlistFileOffset
<= firstStabStringFileOffset
);
2694 excludeRegions
.emplace_back(std::pair
<uint64_t, uint64_t>(firstStabNlistFileOffset
, lastStabNlistFileOffset
));
2695 excludeRegions
.emplace_back(std::pair
<uint64_t, uint64_t>(firstStabStringFileOffset
, lastStabStringFileOffset
));
2696 // exclude LINKEDIT LC_SEGMENT (size field depends on stabs size)
2697 uint64_t linkeditSegCmdOffset
;
2698 uint64_t linkeditSegCmdSize
;
2699 _headersAndLoadCommandAtom
->linkeditCmdInfo(linkeditSegCmdOffset
, linkeditSegCmdSize
);
2700 excludeRegions
.emplace_back(std::pair
<uint64_t, uint64_t>(linkeditSegCmdOffset
, linkeditSegCmdOffset
+linkeditSegCmdSize
));
2701 if ( log
) fprintf(stderr
, "linkedit SegCmdOffset=0x%08llX, size=0x%08llX\n", linkeditSegCmdOffset
, linkeditSegCmdSize
);
2702 uint64_t symbolTableCmdOffset
;
2703 uint64_t symbolTableCmdSize
;
2704 _headersAndLoadCommandAtom
->symbolTableCmdInfo(symbolTableCmdOffset
, symbolTableCmdSize
);
2705 excludeRegions
.emplace_back(std::pair
<uint64_t, uint64_t>(symbolTableCmdOffset
, symbolTableCmdOffset
+symbolTableCmdSize
));
2706 if ( log
) fprintf(stderr
, "linkedit SegCmdOffset=0x%08llX, size=0x%08llX\n", symbolTableCmdOffset
, symbolTableCmdSize
);
2708 if ( !excludeRegions
.empty() ) {
2709 CC_MD5_CTX md5state
;
2710 CC_MD5_Init(&md5state
);
2711 // rdar://problem/19487042 include the output leaf file name in the hash
2712 const char* lastSlash
= strrchr(_options
.outputFilePath(), '/');
2713 if ( lastSlash
!= NULL
) {
2714 CC_MD5_Update(&md5state
, lastSlash
, strlen(lastSlash
));
2716 std::sort(excludeRegions
.begin(), excludeRegions
.end());
2717 uint64_t checksumStart
= 0;
2718 for ( auto& region
: excludeRegions
) {
2719 uint64_t regionStart
= region
.first
;
2720 uint64_t regionEnd
= region
.second
;
2721 assert(checksumStart
<= regionStart
&& regionStart
<= regionEnd
&& "Region overlapped");
2722 if ( log
) fprintf(stderr
, "checksum 0x%08llX -> 0x%08llX\n", checksumStart
, regionStart
);
2723 CC_MD5_Update(&md5state
, &wholeBuffer
[checksumStart
], regionStart
- checksumStart
);
2724 checksumStart
= regionEnd
;
2726 if ( log
) fprintf(stderr
, "checksum 0x%08llX -> 0x%08llX\n", checksumStart
, _fileSize
);
2727 CC_MD5_Update(&md5state
, &wholeBuffer
[checksumStart
], _fileSize
-checksumStart
);
2728 CC_MD5_Final(digest
, &md5state
);
2729 if ( log
) fprintf(stderr
, "uuid=%02X, %02X, %02X, %02X, %02X, %02X, %02X, %02X\n", digest
[0], digest
[1], digest
[2],
2730 digest
[3], digest
[4], digest
[5], digest
[6], digest
[7]);
2733 CC_MD5(wholeBuffer
, _fileSize
, digest
);
2735 // <rdar://problem/6723729> LC_UUID uuids should conform to RFC 4122 UUID version 4 & UUID version 5 formats
2736 digest
[6] = ( digest
[6] & 0x0F ) | ( 3 << 4 );
2737 digest
[8] = ( digest
[8] & 0x3F ) | 0x80;
2738 // update buffer with new UUID
2739 _headersAndLoadCommandAtom
->setUUID(digest
);
2740 _headersAndLoadCommandAtom
->recopyUUIDCommand();
2744 static int sDescriptorOfPathToRemove
= -1;
2745 static void removePathAndExit(int sig
)
2747 if ( sDescriptorOfPathToRemove
!= -1 ) {
2748 char path
[MAXPATHLEN
];
2749 if ( ::fcntl(sDescriptorOfPathToRemove
, F_GETPATH
, path
) == 0 )
2752 fprintf(stderr
, "ld: interrupted\n");
2756 void OutputFile::writeOutputFile(ld::Internal
& state
)
2758 // for UNIX conformance, error if file exists and is not writable
2759 if ( (access(_options
.outputFilePath(), F_OK
) == 0) && (access(_options
.outputFilePath(), W_OK
) == -1) )
2760 throwf("can't write output file: %s", _options
.outputFilePath());
2762 mode_t permissions
= 0777;
2763 if ( _options
.outputKind() == Options::kObjectFile
)
2765 mode_t umask
= ::umask(0);
2766 ::umask(umask
); // put back the original umask
2767 permissions
&= ~umask
;
2768 // Calling unlink first assures the file is gone so that open creates it with correct permissions
2769 // It also handles the case where __options.outputFilePath() file is not writable but its directory is
2770 // And it means we don't have to truncate the file when done writing (in case new is smaller than old)
2771 // Lastly, only delete existing file if it is a normal file (e.g. not /dev/null).
2772 struct stat stat_buf
;
2773 bool outputIsRegularFile
= false;
2774 bool outputIsMappableFile
= false;
2775 if ( stat(_options
.outputFilePath(), &stat_buf
) != -1 ) {
2776 if (stat_buf
.st_mode
& S_IFREG
) {
2777 outputIsRegularFile
= true;
2778 // <rdar://problem/12264302> Don't use mmap on non-hfs volumes
2779 struct statfs fsInfo
;
2780 if ( statfs(_options
.outputFilePath(), &fsInfo
) != -1 ) {
2781 if ( strcmp(fsInfo
.f_fstypename
, "hfs") == 0) {
2782 (void)unlink(_options
.outputFilePath());
2783 outputIsMappableFile
= true;
2787 outputIsMappableFile
= false;
2791 outputIsRegularFile
= false;
2795 // special files (pipes, devices, etc) must already exist
2796 outputIsRegularFile
= true;
2797 // output file does not exist yet
2798 char dirPath
[PATH_MAX
];
2799 strcpy(dirPath
, _options
.outputFilePath());
2800 char* end
= strrchr(dirPath
, '/');
2801 if ( end
!= NULL
) {
2803 struct statfs fsInfo
;
2804 if ( statfs(dirPath
, &fsInfo
) != -1 ) {
2805 if ( strcmp(fsInfo
.f_fstypename
, "hfs") == 0) {
2806 outputIsMappableFile
= true;
2812 //fprintf(stderr, "outputIsMappableFile=%d, outputIsRegularFile=%d, path=%s\n", outputIsMappableFile, outputIsRegularFile, _options.outputFilePath());
2815 // Construct a temporary path of the form {outputFilePath}.ld_XXXXXX
2816 const char filenameTemplate
[] = ".ld_XXXXXX";
2817 char tmpOutput
[PATH_MAX
];
2818 uint8_t *wholeBuffer
;
2819 if ( outputIsRegularFile
&& outputIsMappableFile
) {
2820 // <rdar://problem/20959031> ld64 should clean up temporary files on SIGINT
2821 ::signal(SIGINT
, removePathAndExit
);
2823 strcpy(tmpOutput
, _options
.outputFilePath());
2824 // If the path is too long to add a suffix for a temporary name then
2825 // just fall back to using the output path.
2826 if (strlen(tmpOutput
)+strlen(filenameTemplate
) < PATH_MAX
) {
2827 strcat(tmpOutput
, filenameTemplate
);
2828 fd
= mkstemp(tmpOutput
);
2829 sDescriptorOfPathToRemove
= fd
;
2832 fd
= open(tmpOutput
, O_RDWR
|O_CREAT
, permissions
);
2835 throwf("can't open output file for writing '%s', errno=%d", tmpOutput
, errno
);
2836 if ( ftruncate(fd
, _fileSize
) == -1 ) {
2839 if ( err
== ENOSPC
)
2840 throwf("not enough disk space for writing '%s'", _options
.outputFilePath());
2842 throwf("can't grow file for writing '%s', errno=%d", _options
.outputFilePath(), err
);
2845 wholeBuffer
= (uint8_t *)mmap(NULL
, _fileSize
, PROT_WRITE
|PROT_READ
, MAP_SHARED
, fd
, 0);
2846 if ( wholeBuffer
== MAP_FAILED
)
2847 throwf("can't create buffer of %llu bytes for output", _fileSize
);
2850 if ( outputIsRegularFile
)
2851 fd
= open(_options
.outputFilePath(), O_RDWR
|O_CREAT
, permissions
);
2853 fd
= open(_options
.outputFilePath(), O_WRONLY
);
2855 throwf("can't open output file for writing: %s, errno=%d", _options
.outputFilePath(), errno
);
2856 // try to allocate buffer for entire output file content
2857 wholeBuffer
= (uint8_t*)calloc(_fileSize
, 1);
2858 if ( wholeBuffer
== NULL
)
2859 throwf("can't create buffer of %llu bytes for output", _fileSize
);
2862 if ( _options
.UUIDMode() == Options::kUUIDRandom
) {
2864 ::uuid_generate_random(bits
);
2865 _headersAndLoadCommandAtom
->setUUID(bits
);
2868 writeAtoms(state
, wholeBuffer
);
2871 if ( _options
.UUIDMode() == Options::kUUIDContent
)
2872 computeContentUUID(state
, wholeBuffer
);
2874 if ( outputIsRegularFile
&& outputIsMappableFile
) {
2875 if ( ::chmod(tmpOutput
, permissions
) == -1 ) {
2877 throwf("can't set permissions on output file: %s, errno=%d", tmpOutput
, errno
);
2879 if ( ::rename(tmpOutput
, _options
.outputFilePath()) == -1 && strcmp(tmpOutput
, _options
.outputFilePath()) != 0) {
2881 throwf("can't move output file in place, errno=%d", errno
);
2885 if ( ::write(fd
, wholeBuffer
, _fileSize
) == -1 ) {
2886 throwf("can't write to output file: %s, errno=%d", _options
.outputFilePath(), errno
);
2888 sDescriptorOfPathToRemove
= -1;
2890 // <rdar://problem/13118223> NFS: iOS incremental builds in Xcode 4.6 fail with codesign error
2891 // NFS seems to pad the end of the file sometimes. Calling trunc seems to correct it...
2892 ::truncate(_options
.outputFilePath(), _fileSize
);
2895 // Rename symbol map file if needed
2896 if ( _options
.renameReverseSymbolMap() ) {
2897 assert(_options
.hideSymbols() && _options
.reverseSymbolMapPath() != NULL
&& "Must hide symbol and specify a path");
2898 uuid_string_t UUIDString
;
2899 const uint8_t* rawUUID
= _headersAndLoadCommandAtom
->getUUID();
2900 uuid_unparse_upper(rawUUID
, UUIDString
);
2901 char outputMapPath
[PATH_MAX
];
2902 sprintf(outputMapPath
, "%s/%s.bcsymbolmap", _options
.reverseSymbolMapPath(), UUIDString
);
2903 if ( ::rename(_options
.reverseMapTempPath().c_str(), outputMapPath
) != 0 )
2904 throwf("could not create bcsymbolmap file: %s", outputMapPath
);
2908 struct AtomByNameSorter
2910 bool operator()(const ld::Atom
* left
, const ld::Atom
* right
) const
2912 return (strcmp(left
->name(), right
->name()) < 0);
2915 bool operator()(const ld::Atom
* left
, const char* right
) const
2917 return (strcmp(left
->name(), right
) < 0);
2920 bool operator()(const char* left
, const ld::Atom
* right
) const
2922 return (strcmp(left
, right
->name()) < 0);
2930 NotInSet(const std::set
<const ld::Atom
*>& theSet
) : _set(theSet
) {}
2932 bool operator()(const ld::Atom
* atom
) const {
2933 return ( _set
.count(atom
) == 0 );
2936 const std::set
<const ld::Atom
*>& _set
;
2940 void OutputFile::buildSymbolTable(ld::Internal
& state
)
2942 unsigned int machoSectionIndex
= 0;
2943 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
2944 ld::Internal::FinalSection
* sect
= *sit
;
2945 bool setMachoSectionIndex
= !sect
->isSectionHidden() && (sect
->type() != ld::Section::typeTentativeDefs
);
2946 if ( setMachoSectionIndex
)
2947 ++machoSectionIndex
;
2948 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
2949 const ld::Atom
* atom
= *ait
;
2950 if ( setMachoSectionIndex
)
2951 (const_cast<ld::Atom
*>(atom
))->setMachoSection(machoSectionIndex
);
2952 else if ( sect
->type() == ld::Section::typeMachHeader
)
2953 (const_cast<ld::Atom
*>(atom
))->setMachoSection(1); // __mh_execute_header is not in any section by needs n_sect==1
2954 else if ( sect
->type() == ld::Section::typeLastSection
)
2955 (const_cast<ld::Atom
*>(atom
))->setMachoSection(machoSectionIndex
); // use section index of previous section
2956 else if ( sect
->type() == ld::Section::typeFirstSection
)
2957 (const_cast<ld::Atom
*>(atom
))->setMachoSection(machoSectionIndex
+1); // use section index of next section
2959 // in -r mode, clarify symbolTableNotInFinalLinkedImages
2960 if ( _options
.outputKind() == Options::kObjectFile
) {
2961 if ( (_options
.architecture() == CPU_TYPE_X86_64
)
2962 || (_options
.architecture() == CPU_TYPE_ARM64
)
2964 // x86_64 .o files need labels on anonymous literal strings
2965 if ( (sect
->type() == ld::Section::typeCString
) && (atom
->combine() == ld::Atom::combineByNameAndContent
) ) {
2966 (const_cast<ld::Atom
*>(atom
))->setSymbolTableInclusion(ld::Atom::symbolTableIn
);
2967 _localAtoms
.push_back(atom
);
2971 if ( sect
->type() == ld::Section::typeCFI
) {
2972 if ( _options
.removeEHLabels() )
2973 (const_cast<ld::Atom
*>(atom
))->setSymbolTableInclusion(ld::Atom::symbolTableNotIn
);
2975 (const_cast<ld::Atom
*>(atom
))->setSymbolTableInclusion(ld::Atom::symbolTableIn
);
2977 else if ( sect
->type() == ld::Section::typeTempAlias
) {
2978 assert(_options
.outputKind() == Options::kObjectFile
);
2979 _importedAtoms
.push_back(atom
);
2982 if ( atom
->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages
)
2983 (const_cast<ld::Atom
*>(atom
))->setSymbolTableInclusion(ld::Atom::symbolTableIn
);
2986 // TEMP work around until <rdar://problem/7702923> goes in
2987 if ( (atom
->symbolTableInclusion() == ld::Atom::symbolTableInAndNeverStrip
)
2988 && (atom
->scope() == ld::Atom::scopeLinkageUnit
)
2989 && (_options
.outputKind() == Options::kDynamicLibrary
) ) {
2990 (const_cast<ld::Atom
*>(atom
))->setScope(ld::Atom::scopeGlobal
);
2993 // <rdar://problem/6783167> support auto hidden weak symbols: .weak_def_can_be_hidden
2994 if ( atom
->autoHide() && (_options
.outputKind() != Options::kObjectFile
) ) {
2995 // adding auto-hide symbol to .exp file should keep it global
2996 if ( !_options
.hasExportMaskList() || !_options
.shouldExport(atom
->name()) )
2997 (const_cast<ld::Atom
*>(atom
))->setScope(ld::Atom::scopeLinkageUnit
);
3000 // <rdar://problem/8626058> ld should consistently warn when resolvers are not exported
3001 if ( (atom
->contentType() == ld::Atom::typeResolver
) && (atom
->scope() == ld::Atom::scopeLinkageUnit
) )
3002 warning("resolver functions should be external, but '%s' is hidden", atom
->name());
3004 if ( sect
->type() == ld::Section::typeImportProxies
) {
3005 if ( atom
->combine() == ld::Atom::combineByName
)
3006 this->usesWeakExternalSymbols
= true;
3007 // alias proxy is a re-export with a name change, don't import changed name
3008 if ( ! atom
->isAlias() )
3009 _importedAtoms
.push_back(atom
);
3010 // scope of proxies are usually linkage unit, so done
3011 // if scope is global, we need to re-export it too
3012 if ( atom
->scope() == ld::Atom::scopeGlobal
)
3013 _exportedAtoms
.push_back(atom
);
3016 if ( atom
->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages
) {
3017 assert(_options
.outputKind() != Options::kObjectFile
);
3018 continue; // don't add to symbol table
3020 if ( atom
->symbolTableInclusion() == ld::Atom::symbolTableNotIn
) {
3021 continue; // don't add to symbol table
3023 if ( (atom
->symbolTableInclusion() == ld::Atom::symbolTableInWithRandomAutoStripLabel
)
3024 && (_options
.outputKind() != Options::kObjectFile
) ) {
3025 continue; // don't add to symbol table
3028 if ( (atom
->definition() == ld::Atom::definitionTentative
) && (_options
.outputKind() == Options::kObjectFile
) ) {
3029 if ( _options
.makeTentativeDefinitionsReal() ) {
3030 // -r -d turns tentative defintions into real def
3031 _exportedAtoms
.push_back(atom
);
3034 // in mach-o object files tentative defintions are stored like undefined symbols
3035 _importedAtoms
.push_back(atom
);
3040 switch ( atom
->scope() ) {
3041 case ld::Atom::scopeTranslationUnit
:
3042 if ( _options
.keepLocalSymbol(atom
->name()) ) {
3043 _localAtoms
.push_back(atom
);
3046 if ( _options
.outputKind() == Options::kObjectFile
) {
3047 (const_cast<ld::Atom
*>(atom
))->setSymbolTableInclusion(ld::Atom::symbolTableInWithRandomAutoStripLabel
);
3048 _localAtoms
.push_back(atom
);
3051 (const_cast<ld::Atom
*>(atom
))->setSymbolTableInclusion(ld::Atom::symbolTableNotIn
);
3054 case ld::Atom::scopeGlobal
:
3055 _exportedAtoms
.push_back(atom
);
3057 case ld::Atom::scopeLinkageUnit
:
3058 if ( _options
.outputKind() == Options::kObjectFile
) {
3059 if ( _options
.keepPrivateExterns() ) {
3060 _exportedAtoms
.push_back(atom
);
3062 else if ( _options
.keepLocalSymbol(atom
->name()) ) {
3063 _localAtoms
.push_back(atom
);
3066 (const_cast<ld::Atom
*>(atom
))->setSymbolTableInclusion(ld::Atom::symbolTableInWithRandomAutoStripLabel
);
3067 _localAtoms
.push_back(atom
);
3071 if ( _options
.keepLocalSymbol(atom
->name()) )
3072 _localAtoms
.push_back(atom
);
3073 // <rdar://problem/5804214> ld should never have a symbol in the non-lazy indirect symbol table with index 0
3074 // this works by making __mh_execute_header be a local symbol which takes symbol index 0
3075 else if ( (atom
->symbolTableInclusion() == ld::Atom::symbolTableInAndNeverStrip
) && !_options
.makeCompressedDyldInfo() )
3076 _localAtoms
.push_back(atom
);
3078 (const_cast<ld::Atom
*>(atom
))->setSymbolTableInclusion(ld::Atom::symbolTableNotIn
);
3085 // <rdar://problem/6978069> ld adds undefined symbol from .exp file to binary
3086 if ( (_options
.outputKind() == Options::kKextBundle
) && _options
.hasExportRestrictList() ) {
3087 // search for referenced undefines
3088 std::set
<const ld::Atom
*> referencedProxyAtoms
;
3089 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
=state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
3090 ld::Internal::FinalSection
* sect
= *sit
;
3091 for (std::vector
<const ld::Atom
*>::iterator ait
=sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
3092 const ld::Atom
* atom
= *ait
;
3093 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
3094 switch ( fit
->binding
) {
3095 case ld::Fixup::bindingsIndirectlyBound
:
3096 referencedProxyAtoms
.insert(state
.indirectBindingTable
[fit
->u
.bindingIndex
]);
3098 case ld::Fixup::bindingDirectlyBound
:
3099 referencedProxyAtoms
.insert(fit
->u
.target
);
3107 // remove any unreferenced _importedAtoms
3108 _importedAtoms
.erase(std::remove_if(_importedAtoms
.begin(), _importedAtoms
.end(), NotInSet(referencedProxyAtoms
)), _importedAtoms
.end());
3112 std::sort(_exportedAtoms
.begin(), _exportedAtoms
.end(), AtomByNameSorter());
3113 std::sort(_importedAtoms
.begin(), _importedAtoms
.end(), AtomByNameSorter());
3115 std::map
<std::string
, std::vector
<std::string
>> addedSymbols
;
3116 std::map
<std::string
, std::vector
<std::string
>> hiddenSymbols
;
3117 for (const auto *atom
: _exportedAtoms
) {
3118 // The exported symbols have already been sorted. Early exit the loop
3119 // once we see a symbol that is lexicographically past the special
3121 if (atom
->name()[0] > '$')
3124 std::string
name(atom
->name());
3125 if (name
.rfind("$ld$add$", 7) == 0) {
3126 auto pos
= name
.find_first_of('$', 10);
3127 if (pos
== std::string::npos
) {
3128 warning("bad special linker symbol '%s'", atom
->name());
3131 auto &&symbolName
= name
.substr(pos
+1);
3132 auto it
= addedSymbols
.emplace(symbolName
, std::initializer_list
<std::string
>{name
});
3134 it
.first
->second
.emplace_back(name
);
3135 } else if (name
.rfind("$ld$hide$", 8) == 0) {
3136 auto pos
= name
.find_first_of('$', 11);
3137 if (pos
== std::string::npos
) {
3138 warning("bad special linker symbol '%s'", atom
->name());
3141 auto &&symbolName
= name
.substr(pos
+1);
3142 auto it
= hiddenSymbols
.emplace(symbolName
, std::initializer_list
<std::string
>{name
});
3144 it
.first
->second
.emplace_back(name
);
3148 for (const auto &it
: addedSymbols
) {
3149 if (!std::binary_search(_exportedAtoms
.begin(), _exportedAtoms
.end(), it
.first
.c_str(), AtomByNameSorter()))
3151 for (const auto &symbol
: it
.second
)
3152 warning("linker symbol '%s' adds already existing symbol '%s'", symbol
.c_str(), it
.first
.c_str());
3155 auto it
= hiddenSymbols
.begin();
3156 while (it
!= hiddenSymbols
.end()) {
3157 if (std::binary_search(_exportedAtoms
.begin(), _exportedAtoms
.end(), it
->first
.c_str(), AtomByNameSorter()))
3158 it
= hiddenSymbols
.erase(it
);
3163 for (const auto &it
: hiddenSymbols
) {
3164 for (const auto &symbol
: it
.second
)
3165 warning("linker symbol '%s' hides a non-existent symbol '%s'", symbol
.c_str(), it
.first
.c_str());
3169 void OutputFile::addPreloadLinkEdit(ld::Internal
& state
)
3171 switch ( _options
.architecture() ) {
3172 #if SUPPORT_ARCH_i386
3174 if ( _hasLocalRelocations
) {
3175 _localRelocsAtom
= new LocalRelocationsAtom
<x86
>(_options
, state
, *this);
3176 localRelocationsSection
= state
.addAtom(*_localRelocsAtom
);
3178 if ( _hasExternalRelocations
) {
3179 _externalRelocsAtom
= new ExternalRelocationsAtom
<x86
>(_options
, state
, *this);
3180 externalRelocationsSection
= state
.addAtom(*_externalRelocsAtom
);
3182 if ( _hasSymbolTable
) {
3183 _indirectSymbolTableAtom
= new IndirectSymbolTableAtom
<x86
>(_options
, state
, *this);
3184 indirectSymbolTableSection
= state
.addAtom(*_indirectSymbolTableAtom
);
3185 _symbolTableAtom
= new SymbolTableAtom
<x86
>(_options
, state
, *this);
3186 symbolTableSection
= state
.addAtom(*_symbolTableAtom
);
3187 _stringPoolAtom
= new StringPoolAtom(_options
, state
, *this, 4);
3188 stringPoolSection
= state
.addAtom(*_stringPoolAtom
);
3192 #if SUPPORT_ARCH_x86_64
3193 case CPU_TYPE_X86_64
:
3194 if ( _hasLocalRelocations
) {
3195 _localRelocsAtom
= new LocalRelocationsAtom
<x86_64
>(_options
, state
, *this);
3196 localRelocationsSection
= state
.addAtom(*_localRelocsAtom
);
3198 if ( _hasExternalRelocations
) {
3199 _externalRelocsAtom
= new ExternalRelocationsAtom
<x86_64
>(_options
, state
, *this);
3200 externalRelocationsSection
= state
.addAtom(*_externalRelocsAtom
);
3202 if ( _hasSymbolTable
) {
3203 _indirectSymbolTableAtom
= new IndirectSymbolTableAtom
<x86_64
>(_options
, state
, *this);
3204 indirectSymbolTableSection
= state
.addAtom(*_indirectSymbolTableAtom
);
3205 _symbolTableAtom
= new SymbolTableAtom
<x86_64
>(_options
, state
, *this);
3206 symbolTableSection
= state
.addAtom(*_symbolTableAtom
);
3207 _stringPoolAtom
= new StringPoolAtom(_options
, state
, *this, 4);
3208 stringPoolSection
= state
.addAtom(*_stringPoolAtom
);
3212 #if SUPPORT_ARCH_arm_any
3214 if ( _hasLocalRelocations
) {
3215 _localRelocsAtom
= new LocalRelocationsAtom
<arm
>(_options
, state
, *this);
3216 localRelocationsSection
= state
.addAtom(*_localRelocsAtom
);
3218 if ( _hasExternalRelocations
) {
3219 _externalRelocsAtom
= new ExternalRelocationsAtom
<arm
>(_options
, state
, *this);
3220 externalRelocationsSection
= state
.addAtom(*_externalRelocsAtom
);
3222 if ( _hasSymbolTable
) {
3223 _indirectSymbolTableAtom
= new IndirectSymbolTableAtom
<arm
>(_options
, state
, *this);
3224 indirectSymbolTableSection
= state
.addAtom(*_indirectSymbolTableAtom
);
3225 _symbolTableAtom
= new SymbolTableAtom
<arm
>(_options
, state
, *this);
3226 symbolTableSection
= state
.addAtom(*_symbolTableAtom
);
3227 _stringPoolAtom
= new StringPoolAtom(_options
, state
, *this, 4);
3228 stringPoolSection
= state
.addAtom(*_stringPoolAtom
);
3232 #if SUPPORT_ARCH_arm64
3233 case CPU_TYPE_ARM64
:
3234 if ( _hasLocalRelocations
) {
3235 _localRelocsAtom
= new LocalRelocationsAtom
<arm64
>(_options
, state
, *this);
3236 localRelocationsSection
= state
.addAtom(*_localRelocsAtom
);
3238 if ( _hasExternalRelocations
) {
3239 _externalRelocsAtom
= new ExternalRelocationsAtom
<arm64
>(_options
, state
, *this);
3240 externalRelocationsSection
= state
.addAtom(*_externalRelocsAtom
);
3242 if ( _hasSymbolTable
) {
3243 _indirectSymbolTableAtom
= new IndirectSymbolTableAtom
<arm64
>(_options
, state
, *this);
3244 indirectSymbolTableSection
= state
.addAtom(*_indirectSymbolTableAtom
);
3245 _symbolTableAtom
= new SymbolTableAtom
<arm64
>(_options
, state
, *this);
3246 symbolTableSection
= state
.addAtom(*_symbolTableAtom
);
3247 _stringPoolAtom
= new StringPoolAtom(_options
, state
, *this, 4);
3248 stringPoolSection
= state
.addAtom(*_stringPoolAtom
);
3253 throw "-preload not supported";
3259 void OutputFile::addLinkEdit(ld::Internal
& state
)
3261 // for historical reasons, -preload orders LINKEDIT content differently
3262 if ( _options
.outputKind() == Options::kPreload
)
3263 return addPreloadLinkEdit(state
);
3265 switch ( _options
.architecture() ) {
3266 #if SUPPORT_ARCH_i386
3268 if ( _hasSectionRelocations
) {
3269 _sectionsRelocationsAtom
= new SectionRelocationsAtom
<x86
>(_options
, state
, *this);
3270 sectionRelocationsSection
= state
.addAtom(*_sectionsRelocationsAtom
);
3272 if ( _hasDyldInfo
) {
3273 _rebasingInfoAtom
= new RebaseInfoAtom
<x86
>(_options
, state
, *this);
3274 rebaseSection
= state
.addAtom(*_rebasingInfoAtom
);
3276 _bindingInfoAtom
= new BindingInfoAtom
<x86
>(_options
, state
, *this);
3277 bindingSection
= state
.addAtom(*_bindingInfoAtom
);
3279 _weakBindingInfoAtom
= new WeakBindingInfoAtom
<x86
>(_options
, state
, *this);
3280 weakBindingSection
= state
.addAtom(*_weakBindingInfoAtom
);
3282 _lazyBindingInfoAtom
= new LazyBindingInfoAtom
<x86
>(_options
, state
, *this);
3283 lazyBindingSection
= state
.addAtom(*_lazyBindingInfoAtom
);
3285 _exportInfoAtom
= new ExportInfoAtom
<x86
>(_options
, state
, *this);
3286 exportSection
= state
.addAtom(*_exportInfoAtom
);
3288 if ( _hasLocalRelocations
) {
3289 _localRelocsAtom
= new LocalRelocationsAtom
<x86
>(_options
, state
, *this);
3290 localRelocationsSection
= state
.addAtom(*_localRelocsAtom
);
3292 if ( _hasSplitSegInfo
) {
3293 if ( _options
.sharedRegionEncodingV2() )
3294 _splitSegInfoAtom
= new SplitSegInfoV2Atom
<x86
>(_options
, state
, *this);
3296 _splitSegInfoAtom
= new SplitSegInfoV1Atom
<x86
>(_options
, state
, *this);
3297 splitSegInfoSection
= state
.addAtom(*_splitSegInfoAtom
);
3299 if ( _hasFunctionStartsInfo
) {
3300 _functionStartsAtom
= new FunctionStartsAtom
<x86
>(_options
, state
, *this);
3301 functionStartsSection
= state
.addAtom(*_functionStartsAtom
);
3303 if ( _hasDataInCodeInfo
) {
3304 _dataInCodeAtom
= new DataInCodeAtom
<x86
>(_options
, state
, *this);
3305 dataInCodeSection
= state
.addAtom(*_dataInCodeAtom
);
3307 if ( _hasOptimizationHints
) {
3308 _optimizationHintsAtom
= new OptimizationHintsAtom
<x86
>(_options
, state
, *this);
3309 optimizationHintsSection
= state
.addAtom(*_optimizationHintsAtom
);
3311 if ( _hasSymbolTable
) {
3312 _symbolTableAtom
= new SymbolTableAtom
<x86
>(_options
, state
, *this);
3313 symbolTableSection
= state
.addAtom(*_symbolTableAtom
);
3315 if ( _hasExternalRelocations
) {
3316 _externalRelocsAtom
= new ExternalRelocationsAtom
<x86
>(_options
, state
, *this);
3317 externalRelocationsSection
= state
.addAtom(*_externalRelocsAtom
);
3319 if ( _hasSymbolTable
) {
3320 _indirectSymbolTableAtom
= new IndirectSymbolTableAtom
<x86
>(_options
, state
, *this);
3321 indirectSymbolTableSection
= state
.addAtom(*_indirectSymbolTableAtom
);
3322 _stringPoolAtom
= new StringPoolAtom(_options
, state
, *this, 4);
3323 stringPoolSection
= state
.addAtom(*_stringPoolAtom
);
3327 #if SUPPORT_ARCH_x86_64
3328 case CPU_TYPE_X86_64
:
3329 if ( _hasSectionRelocations
) {
3330 _sectionsRelocationsAtom
= new SectionRelocationsAtom
<x86_64
>(_options
, state
, *this);
3331 sectionRelocationsSection
= state
.addAtom(*_sectionsRelocationsAtom
);
3333 if ( _hasDyldInfo
) {
3334 _rebasingInfoAtom
= new RebaseInfoAtom
<x86_64
>(_options
, state
, *this);
3335 rebaseSection
= state
.addAtom(*_rebasingInfoAtom
);
3337 _bindingInfoAtom
= new BindingInfoAtom
<x86_64
>(_options
, state
, *this);
3338 bindingSection
= state
.addAtom(*_bindingInfoAtom
);
3340 _weakBindingInfoAtom
= new WeakBindingInfoAtom
<x86_64
>(_options
, state
, *this);
3341 weakBindingSection
= state
.addAtom(*_weakBindingInfoAtom
);
3343 _lazyBindingInfoAtom
= new LazyBindingInfoAtom
<x86_64
>(_options
, state
, *this);
3344 lazyBindingSection
= state
.addAtom(*_lazyBindingInfoAtom
);
3346 _exportInfoAtom
= new ExportInfoAtom
<x86_64
>(_options
, state
, *this);
3347 exportSection
= state
.addAtom(*_exportInfoAtom
);
3349 if ( _hasLocalRelocations
) {
3350 _localRelocsAtom
= new LocalRelocationsAtom
<x86_64
>(_options
, state
, *this);
3351 localRelocationsSection
= state
.addAtom(*_localRelocsAtom
);
3353 if ( _hasSplitSegInfo
) {
3354 if ( _options
.sharedRegionEncodingV2() )
3355 _splitSegInfoAtom
= new SplitSegInfoV2Atom
<x86_64
>(_options
, state
, *this);
3357 _splitSegInfoAtom
= new SplitSegInfoV1Atom
<x86_64
>(_options
, state
, *this);
3358 splitSegInfoSection
= state
.addAtom(*_splitSegInfoAtom
);
3360 if ( _hasFunctionStartsInfo
) {
3361 _functionStartsAtom
= new FunctionStartsAtom
<x86_64
>(_options
, state
, *this);
3362 functionStartsSection
= state
.addAtom(*_functionStartsAtom
);
3364 if ( _hasDataInCodeInfo
) {
3365 _dataInCodeAtom
= new DataInCodeAtom
<x86_64
>(_options
, state
, *this);
3366 dataInCodeSection
= state
.addAtom(*_dataInCodeAtom
);
3368 if ( _hasOptimizationHints
) {
3369 _optimizationHintsAtom
= new OptimizationHintsAtom
<x86_64
>(_options
, state
, *this);
3370 optimizationHintsSection
= state
.addAtom(*_optimizationHintsAtom
);
3372 if ( _hasSymbolTable
) {
3373 _symbolTableAtom
= new SymbolTableAtom
<x86_64
>(_options
, state
, *this);
3374 symbolTableSection
= state
.addAtom(*_symbolTableAtom
);
3376 if ( _hasExternalRelocations
) {
3377 _externalRelocsAtom
= new ExternalRelocationsAtom
<x86_64
>(_options
, state
, *this);
3378 externalRelocationsSection
= state
.addAtom(*_externalRelocsAtom
);
3380 if ( _hasSymbolTable
) {
3381 _indirectSymbolTableAtom
= new IndirectSymbolTableAtom
<x86_64
>(_options
, state
, *this);
3382 indirectSymbolTableSection
= state
.addAtom(*_indirectSymbolTableAtom
);
3383 _stringPoolAtom
= new StringPoolAtom(_options
, state
, *this, 8);
3384 stringPoolSection
= state
.addAtom(*_stringPoolAtom
);
3388 #if SUPPORT_ARCH_arm_any
3390 if ( _hasSectionRelocations
) {
3391 _sectionsRelocationsAtom
= new SectionRelocationsAtom
<arm
>(_options
, state
, *this);
3392 sectionRelocationsSection
= state
.addAtom(*_sectionsRelocationsAtom
);
3394 if ( _hasDyldInfo
) {
3395 _rebasingInfoAtom
= new RebaseInfoAtom
<arm
>(_options
, state
, *this);
3396 rebaseSection
= state
.addAtom(*_rebasingInfoAtom
);
3398 _bindingInfoAtom
= new BindingInfoAtom
<arm
>(_options
, state
, *this);
3399 bindingSection
= state
.addAtom(*_bindingInfoAtom
);
3401 _weakBindingInfoAtom
= new WeakBindingInfoAtom
<arm
>(_options
, state
, *this);
3402 weakBindingSection
= state
.addAtom(*_weakBindingInfoAtom
);
3404 _lazyBindingInfoAtom
= new LazyBindingInfoAtom
<arm
>(_options
, state
, *this);
3405 lazyBindingSection
= state
.addAtom(*_lazyBindingInfoAtom
);
3407 _exportInfoAtom
= new ExportInfoAtom
<arm
>(_options
, state
, *this);
3408 exportSection
= state
.addAtom(*_exportInfoAtom
);
3410 if ( _hasLocalRelocations
) {
3411 _localRelocsAtom
= new LocalRelocationsAtom
<arm
>(_options
, state
, *this);
3412 localRelocationsSection
= state
.addAtom(*_localRelocsAtom
);
3414 if ( _hasSplitSegInfo
) {
3415 if ( _options
.sharedRegionEncodingV2() )
3416 _splitSegInfoAtom
= new SplitSegInfoV2Atom
<arm
>(_options
, state
, *this);
3418 _splitSegInfoAtom
= new SplitSegInfoV1Atom
<arm
>(_options
, state
, *this);
3419 splitSegInfoSection
= state
.addAtom(*_splitSegInfoAtom
);
3421 if ( _hasFunctionStartsInfo
) {
3422 _functionStartsAtom
= new FunctionStartsAtom
<arm
>(_options
, state
, *this);
3423 functionStartsSection
= state
.addAtom(*_functionStartsAtom
);
3425 if ( _hasDataInCodeInfo
) {
3426 _dataInCodeAtom
= new DataInCodeAtom
<arm
>(_options
, state
, *this);
3427 dataInCodeSection
= state
.addAtom(*_dataInCodeAtom
);
3429 if ( _hasOptimizationHints
) {
3430 _optimizationHintsAtom
= new OptimizationHintsAtom
<arm
>(_options
, state
, *this);
3431 optimizationHintsSection
= state
.addAtom(*_optimizationHintsAtom
);
3433 if ( _hasSymbolTable
) {
3434 _symbolTableAtom
= new SymbolTableAtom
<arm
>(_options
, state
, *this);
3435 symbolTableSection
= state
.addAtom(*_symbolTableAtom
);
3437 if ( _hasExternalRelocations
) {
3438 _externalRelocsAtom
= new ExternalRelocationsAtom
<arm
>(_options
, state
, *this);
3439 externalRelocationsSection
= state
.addAtom(*_externalRelocsAtom
);
3441 if ( _hasSymbolTable
) {
3442 _indirectSymbolTableAtom
= new IndirectSymbolTableAtom
<arm
>(_options
, state
, *this);
3443 indirectSymbolTableSection
= state
.addAtom(*_indirectSymbolTableAtom
);
3444 _stringPoolAtom
= new StringPoolAtom(_options
, state
, *this, 4);
3445 stringPoolSection
= state
.addAtom(*_stringPoolAtom
);
3449 #if SUPPORT_ARCH_arm64
3450 case CPU_TYPE_ARM64
:
3451 if ( _hasSectionRelocations
) {
3452 _sectionsRelocationsAtom
= new SectionRelocationsAtom
<arm64
>(_options
, state
, *this);
3453 sectionRelocationsSection
= state
.addAtom(*_sectionsRelocationsAtom
);
3455 if ( _hasDyldInfo
) {
3456 _rebasingInfoAtom
= new RebaseInfoAtom
<arm64
>(_options
, state
, *this);
3457 rebaseSection
= state
.addAtom(*_rebasingInfoAtom
);
3459 _bindingInfoAtom
= new BindingInfoAtom
<arm64
>(_options
, state
, *this);
3460 bindingSection
= state
.addAtom(*_bindingInfoAtom
);
3462 _weakBindingInfoAtom
= new WeakBindingInfoAtom
<arm64
>(_options
, state
, *this);
3463 weakBindingSection
= state
.addAtom(*_weakBindingInfoAtom
);
3465 _lazyBindingInfoAtom
= new LazyBindingInfoAtom
<arm64
>(_options
, state
, *this);
3466 lazyBindingSection
= state
.addAtom(*_lazyBindingInfoAtom
);
3468 _exportInfoAtom
= new ExportInfoAtom
<arm64
>(_options
, state
, *this);
3469 exportSection
= state
.addAtom(*_exportInfoAtom
);
3471 if ( _hasLocalRelocations
) {
3472 _localRelocsAtom
= new LocalRelocationsAtom
<arm64
>(_options
, state
, *this);
3473 localRelocationsSection
= state
.addAtom(*_localRelocsAtom
);
3475 if ( _hasSplitSegInfo
) {
3476 if ( _options
.sharedRegionEncodingV2() )
3477 _splitSegInfoAtom
= new SplitSegInfoV2Atom
<arm64
>(_options
, state
, *this);
3479 _splitSegInfoAtom
= new SplitSegInfoV1Atom
<arm64
>(_options
, state
, *this);
3480 splitSegInfoSection
= state
.addAtom(*_splitSegInfoAtom
);
3482 if ( _hasFunctionStartsInfo
) {
3483 _functionStartsAtom
= new FunctionStartsAtom
<arm64
>(_options
, state
, *this);
3484 functionStartsSection
= state
.addAtom(*_functionStartsAtom
);
3486 if ( _hasDataInCodeInfo
) {
3487 _dataInCodeAtom
= new DataInCodeAtom
<arm64
>(_options
, state
, *this);
3488 dataInCodeSection
= state
.addAtom(*_dataInCodeAtom
);
3490 if ( _hasOptimizationHints
) {
3491 _optimizationHintsAtom
= new OptimizationHintsAtom
<arm64
>(_options
, state
, *this);
3492 optimizationHintsSection
= state
.addAtom(*_optimizationHintsAtom
);
3494 if ( _hasSymbolTable
) {
3495 _symbolTableAtom
= new SymbolTableAtom
<arm64
>(_options
, state
, *this);
3496 symbolTableSection
= state
.addAtom(*_symbolTableAtom
);
3498 if ( _hasExternalRelocations
) {
3499 _externalRelocsAtom
= new ExternalRelocationsAtom
<arm64
>(_options
, state
, *this);
3500 externalRelocationsSection
= state
.addAtom(*_externalRelocsAtom
);
3502 if ( _hasSymbolTable
) {
3503 _indirectSymbolTableAtom
= new IndirectSymbolTableAtom
<arm64
>(_options
, state
, *this);
3504 indirectSymbolTableSection
= state
.addAtom(*_indirectSymbolTableAtom
);
3505 _stringPoolAtom
= new StringPoolAtom(_options
, state
, *this, 4);
3506 stringPoolSection
= state
.addAtom(*_stringPoolAtom
);
3511 throw "unknown architecture";
3515 void OutputFile::addLoadCommands(ld::Internal
& state
)
3517 switch ( _options
.architecture() ) {
3518 #if SUPPORT_ARCH_x86_64
3519 case CPU_TYPE_X86_64
:
3520 _headersAndLoadCommandAtom
= new HeaderAndLoadCommandsAtom
<x86_64
>(_options
, state
, *this);
3521 headerAndLoadCommandsSection
= state
.addAtom(*_headersAndLoadCommandAtom
);
3524 #if SUPPORT_ARCH_arm_any
3526 _headersAndLoadCommandAtom
= new HeaderAndLoadCommandsAtom
<arm
>(_options
, state
, *this);
3527 headerAndLoadCommandsSection
= state
.addAtom(*_headersAndLoadCommandAtom
);
3530 #if SUPPORT_ARCH_arm64
3531 case CPU_TYPE_ARM64
:
3532 _headersAndLoadCommandAtom
= new HeaderAndLoadCommandsAtom
<arm64
>(_options
, state
, *this);
3533 headerAndLoadCommandsSection
= state
.addAtom(*_headersAndLoadCommandAtom
);
3536 #if SUPPORT_ARCH_i386
3538 _headersAndLoadCommandAtom
= new HeaderAndLoadCommandsAtom
<x86
>(_options
, state
, *this);
3539 headerAndLoadCommandsSection
= state
.addAtom(*_headersAndLoadCommandAtom
);
3543 throw "unknown architecture";
3547 uint32_t OutputFile::dylibCount()
3549 return _dylibsToLoad
.size();
3552 const ld::dylib::File
* OutputFile::dylibByOrdinal(unsigned int ordinal
)
3554 assert( ordinal
> 0 );
3555 assert( ordinal
<= _dylibsToLoad
.size() );
3556 return _dylibsToLoad
[ordinal
-1];
3559 bool OutputFile::hasOrdinalForInstallPath(const char* path
, int* ordinal
)
3561 for (std::map
<const ld::dylib::File
*, int>::const_iterator it
= _dylibToOrdinal
.begin(); it
!= _dylibToOrdinal
.end(); ++it
) {
3562 const char* installPath
= it
->first
->installPath();
3563 if ( (installPath
!= NULL
) && (strcmp(path
, installPath
) == 0) ) {
3564 *ordinal
= it
->second
;
3571 uint32_t OutputFile::dylibToOrdinal(const ld::dylib::File
* dylib
)
3573 return _dylibToOrdinal
[dylib
];
3577 void OutputFile::buildDylibOrdinalMapping(ld::Internal
& state
)
3579 // count non-public re-exported dylibs
3580 unsigned int nonPublicReExportCount
= 0;
3581 for (std::vector
<ld::dylib::File
*>::iterator it
= state
.dylibs
.begin(); it
!= state
.dylibs
.end(); ++it
) {
3582 ld::dylib::File
* aDylib
= *it
;
3583 if ( aDylib
->willBeReExported() && ! aDylib
->hasPublicInstallName() )
3584 ++nonPublicReExportCount
;
3587 // look at each dylib supplied in state
3588 bool hasReExports
= false;
3589 bool haveLazyDylibs
= false;
3590 for (std::vector
<ld::dylib::File
*>::iterator it
= state
.dylibs
.begin(); it
!= state
.dylibs
.end(); ++it
) {
3591 ld::dylib::File
* aDylib
= *it
;
3593 if ( aDylib
== state
.bundleLoader
) {
3594 _dylibToOrdinal
[aDylib
] = BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE
;
3596 else if ( this->hasOrdinalForInstallPath(aDylib
->installPath(), &ordinal
) ) {
3597 // already have a dylib with that install path, map all uses to that ordinal
3598 _dylibToOrdinal
[aDylib
] = ordinal
;
3600 else if ( aDylib
->willBeLazyLoadedDylib() ) {
3601 // all lazy dylib need to be at end of ordinals
3602 haveLazyDylibs
= true;
3604 else if ( aDylib
->willBeReExported() && ! aDylib
->hasPublicInstallName() && (nonPublicReExportCount
>= 2) ) {
3605 _dylibsToLoad
.push_back(aDylib
);
3606 _dylibToOrdinal
[aDylib
] = BIND_SPECIAL_DYLIB_SELF
;
3609 // first time this install path seen, create new ordinal
3610 _dylibsToLoad
.push_back(aDylib
);
3611 _dylibToOrdinal
[aDylib
] = _dylibsToLoad
.size();
3613 if ( aDylib
->explicitlyLinked() && aDylib
->willBeReExported() )
3614 hasReExports
= true;
3616 if ( haveLazyDylibs
) {
3617 // second pass to determine ordinals for lazy loaded dylibs
3618 for (std::vector
<ld::dylib::File
*>::iterator it
= state
.dylibs
.begin(); it
!= state
.dylibs
.end(); ++it
) {
3619 ld::dylib::File
* aDylib
= *it
;
3620 if ( aDylib
->willBeLazyLoadedDylib() ) {
3622 if ( this->hasOrdinalForInstallPath(aDylib
->installPath(), &ordinal
) ) {
3623 // already have a dylib with that install path, map all uses to that ordinal
3624 _dylibToOrdinal
[aDylib
] = ordinal
;
3627 // first time this install path seen, create new ordinal
3628 _dylibsToLoad
.push_back(aDylib
);
3629 _dylibToOrdinal
[aDylib
] = _dylibsToLoad
.size();
3634 _noReExportedDylibs
= !hasReExports
;
3635 //fprintf(stderr, "dylibs:\n");
3636 //for (std::map<const ld::dylib::File*, int>::const_iterator it = _dylibToOrdinal.begin(); it != _dylibToOrdinal.end(); ++it) {
3637 // fprintf(stderr, " %p ord=%u, install_name=%s\n",it->first, it->second, it->first->installPath());
3641 uint32_t OutputFile::lazyBindingInfoOffsetForLazyPointerAddress(uint64_t lpAddress
)
3643 return _lazyPointerAddressToInfoOffset
[lpAddress
];
3646 void OutputFile::setLazyBindingInfoOffset(uint64_t lpAddress
, uint32_t lpInfoOffset
)
3648 _lazyPointerAddressToInfoOffset
[lpAddress
] = lpInfoOffset
;
3651 int OutputFile::compressedOrdinalForAtom(const ld::Atom
* target
)
3653 // flat namespace images use zero for all ordinals
3654 if ( _options
.nameSpace() != Options::kTwoLevelNameSpace
)
3655 return BIND_SPECIAL_DYLIB_FLAT_LOOKUP
;
3657 // handle -interposable
3658 if ( target
->definition() == ld::Atom::definitionRegular
)
3659 return BIND_SPECIAL_DYLIB_SELF
;
3662 const ld::dylib::File
* dylib
= dynamic_cast<const ld::dylib::File
*>(target
->file());
3663 if ( dylib
!= NULL
) {
3664 std::map
<const ld::dylib::File
*, int>::iterator pos
= _dylibToOrdinal
.find(dylib
);
3665 if ( pos
!= _dylibToOrdinal
.end() )
3667 assert(0 && "dylib not assigned ordinal");
3670 // handle undefined dynamic_lookup
3671 if ( _options
.undefinedTreatment() == Options::kUndefinedDynamicLookup
)
3672 return BIND_SPECIAL_DYLIB_FLAT_LOOKUP
;
3675 if ( _options
.allowedUndefined(target
->name()) )
3676 return BIND_SPECIAL_DYLIB_FLAT_LOOKUP
;
3678 throw "can't find ordinal for imported symbol";
3682 bool OutputFile::isPcRelStore(ld::Fixup::Kind kind
)
3685 case ld::Fixup::kindStoreX86BranchPCRel8
:
3686 case ld::Fixup::kindStoreX86BranchPCRel32
:
3687 case ld::Fixup::kindStoreX86PCRel8
:
3688 case ld::Fixup::kindStoreX86PCRel16
:
3689 case ld::Fixup::kindStoreX86PCRel32
:
3690 case ld::Fixup::kindStoreX86PCRel32_1
:
3691 case ld::Fixup::kindStoreX86PCRel32_2
:
3692 case ld::Fixup::kindStoreX86PCRel32_4
:
3693 case ld::Fixup::kindStoreX86PCRel32GOTLoad
:
3694 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA
:
3695 case ld::Fixup::kindStoreX86PCRel32GOT
:
3696 case ld::Fixup::kindStoreX86PCRel32TLVLoad
:
3697 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA
:
3698 case ld::Fixup::kindStoreARMBranch24
:
3699 case ld::Fixup::kindStoreThumbBranch22
:
3700 case ld::Fixup::kindStoreARMLoad12
:
3701 case ld::Fixup::kindStoreTargetAddressX86PCRel32
:
3702 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad
:
3703 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA
:
3704 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad
:
3705 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA
:
3706 case ld::Fixup::kindStoreTargetAddressARMBranch24
:
3707 case ld::Fixup::kindStoreTargetAddressThumbBranch22
:
3708 case ld::Fixup::kindStoreTargetAddressARMLoad12
:
3709 #if SUPPORT_ARCH_arm64
3710 case ld::Fixup::kindStoreARM64Page21
:
3711 case ld::Fixup::kindStoreARM64PageOff12
:
3712 case ld::Fixup::kindStoreARM64GOTLoadPage21
:
3713 case ld::Fixup::kindStoreARM64GOTLoadPageOff12
:
3714 case ld::Fixup::kindStoreARM64GOTLeaPage21
:
3715 case ld::Fixup::kindStoreARM64GOTLeaPageOff12
:
3716 case ld::Fixup::kindStoreARM64TLVPLoadPage21
:
3717 case ld::Fixup::kindStoreARM64TLVPLoadPageOff12
:
3718 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21
:
3719 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPageOff12
:
3720 case ld::Fixup::kindStoreARM64PCRelToGOT
:
3721 case ld::Fixup::kindStoreTargetAddressARM64Page21
:
3722 case ld::Fixup::kindStoreTargetAddressARM64PageOff12
:
3723 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21
:
3724 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12
:
3725 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21
:
3726 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12
:
3727 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21
:
3728 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12
:
3729 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21
:
3730 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12
:
3733 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32
:
3734 #if SUPPORT_ARCH_arm64
3735 case ld::Fixup::kindStoreTargetAddressARM64Branch26
:
3737 return (_options
.outputKind() != Options::kKextBundle
);
3744 bool OutputFile::isStore(ld::Fixup::Kind kind
)
3747 case ld::Fixup::kindNone
:
3748 case ld::Fixup::kindNoneFollowOn
:
3749 case ld::Fixup::kindNoneGroupSubordinate
:
3750 case ld::Fixup::kindNoneGroupSubordinateFDE
:
3751 case ld::Fixup::kindNoneGroupSubordinateLSDA
:
3752 case ld::Fixup::kindNoneGroupSubordinatePersonality
:
3753 case ld::Fixup::kindSetTargetAddress
:
3754 case ld::Fixup::kindSubtractTargetAddress
:
3755 case ld::Fixup::kindAddAddend
:
3756 case ld::Fixup::kindSubtractAddend
:
3757 case ld::Fixup::kindSetTargetImageOffset
:
3758 case ld::Fixup::kindSetTargetSectionOffset
:
3767 bool OutputFile::setsTarget(ld::Fixup::Kind kind
)
3770 case ld::Fixup::kindSetTargetAddress
:
3771 case ld::Fixup::kindLazyTarget
:
3772 case ld::Fixup::kindStoreTargetAddressLittleEndian32
:
3773 case ld::Fixup::kindStoreTargetAddressLittleEndian64
:
3774 case ld::Fixup::kindStoreTargetAddressBigEndian32
:
3775 case ld::Fixup::kindStoreTargetAddressBigEndian64
:
3776 case ld::Fixup::kindStoreTargetAddressX86PCRel32
:
3777 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32
:
3778 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad
:
3779 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA
:
3780 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad
:
3781 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA
:
3782 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad
:
3783 case ld::Fixup::kindStoreTargetAddressARMBranch24
:
3784 case ld::Fixup::kindStoreTargetAddressThumbBranch22
:
3785 case ld::Fixup::kindStoreTargetAddressARMLoad12
:
3786 #if SUPPORT_ARCH_arm64
3787 case ld::Fixup::kindStoreTargetAddressARM64Branch26
:
3788 case ld::Fixup::kindStoreTargetAddressARM64Page21
:
3789 case ld::Fixup::kindStoreTargetAddressARM64PageOff12
:
3790 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21
:
3791 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12
:
3792 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21
:
3793 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12
:
3794 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21
:
3795 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12
:
3796 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21
:
3797 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12
:
3800 case ld::Fixup::kindStoreX86DtraceCallSiteNop
:
3801 case ld::Fixup::kindStoreX86DtraceIsEnableSiteClear
:
3802 case ld::Fixup::kindStoreARMDtraceCallSiteNop
:
3803 case ld::Fixup::kindStoreARMDtraceIsEnableSiteClear
:
3804 case ld::Fixup::kindStoreARM64DtraceCallSiteNop
:
3805 case ld::Fixup::kindStoreARM64DtraceIsEnableSiteClear
:
3806 case ld::Fixup::kindStoreThumbDtraceCallSiteNop
:
3807 case ld::Fixup::kindStoreThumbDtraceIsEnableSiteClear
:
3808 return (_options
.outputKind() == Options::kObjectFile
);
3815 bool OutputFile::isPointerToTarget(ld::Fixup::Kind kind
)
3818 case ld::Fixup::kindSetTargetAddress
:
3819 case ld::Fixup::kindStoreTargetAddressLittleEndian32
:
3820 case ld::Fixup::kindStoreTargetAddressLittleEndian64
:
3821 case ld::Fixup::kindStoreTargetAddressBigEndian32
:
3822 case ld::Fixup::kindStoreTargetAddressBigEndian64
:
3823 case ld::Fixup::kindLazyTarget
:
3830 bool OutputFile::isPointerFromTarget(ld::Fixup::Kind kind
)
3833 case ld::Fixup::kindSubtractTargetAddress
:
3842 uint64_t OutputFile::lookBackAddend(ld::Fixup::iterator fit
)
3844 uint64_t addend
= 0;
3845 switch ( fit
->clusterSize
) {
3846 case ld::Fixup::k1of1
:
3847 case ld::Fixup::k1of2
:
3848 case ld::Fixup::k2of2
:
3850 case ld::Fixup::k2of3
:
3852 switch ( fit
->kind
) {
3853 case ld::Fixup::kindAddAddend
:
3854 addend
+= fit
->u
.addend
;
3856 case ld::Fixup::kindSubtractAddend
:
3857 addend
-= fit
->u
.addend
;
3860 throw "unexpected fixup kind for binding";
3863 case ld::Fixup::k1of3
:
3865 switch ( fit
->kind
) {
3866 case ld::Fixup::kindAddAddend
:
3867 addend
+= fit
->u
.addend
;
3869 case ld::Fixup::kindSubtractAddend
:
3870 addend
-= fit
->u
.addend
;
3873 throw "unexpected fixup kind for binding";
3877 throw "unexpected fixup cluster size for binding";
3883 void OutputFile::generateLinkEditInfo(ld::Internal
& state
)
3885 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
3886 ld::Internal::FinalSection
* sect
= *sit
;
3887 // record end of last __TEXT section encrypted iPhoneOS apps.
3888 if ( _options
.makeEncryptable() && (strcmp(sect
->segmentName(), "__TEXT") == 0) && (strcmp(sect
->sectionName(), "__oslogstring") != 0) ) {
3889 _encryptedTEXTendOffset
= pageAlign(sect
->fileOffset
+ sect
->size
);
3891 bool objc1ClassRefSection
= ( (sect
->type() == ld::Section::typeCStringPointer
)
3892 && (strcmp(sect
->sectionName(), "__cls_refs") == 0)
3893 && (strcmp(sect
->segmentName(), "__OBJC") == 0) );
3894 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
3895 const ld::Atom
* atom
= *ait
;
3897 // Record regular atoms that override a dylib's weak definitions
3898 if ( (atom
->scope() == ld::Atom::scopeGlobal
) && atom
->overridesDylibsWeakDef() ) {
3899 if ( _options
.makeCompressedDyldInfo() ) {
3900 uint8_t wtype
= BIND_TYPE_OVERRIDE_OF_WEAKDEF_IN_DYLIB
;
3901 bool nonWeakDef
= (atom
->combine() == ld::Atom::combineNever
);
3902 _weakBindingInfo
.push_back(BindingInfo(wtype
, atom
->name(), nonWeakDef
, atom
->finalAddress(), 0));
3904 this->overridesWeakExternalSymbols
= true;
3905 if ( _options
.warnWeakExports() )
3906 warning("overrides weak external symbol: %s", atom
->name());
3909 ld::Fixup
* fixupWithTarget
= NULL
;
3910 ld::Fixup
* fixupWithMinusTarget
= NULL
;
3911 ld::Fixup
* fixupWithStore
= NULL
;
3912 ld::Fixup
* fixupWithAddend
= NULL
;
3913 const ld::Atom
* target
= NULL
;
3914 const ld::Atom
* minusTarget
= NULL
;
3915 uint64_t targetAddend
= 0;
3916 uint64_t minusTargetAddend
= 0;
3917 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(); fit
!= atom
->fixupsEnd(); ++fit
) {
3918 if ( fit
->firstInCluster() ) {
3919 fixupWithTarget
= NULL
;
3920 fixupWithMinusTarget
= NULL
;
3921 fixupWithStore
= NULL
;
3925 minusTargetAddend
= 0;
3927 if ( this->setsTarget(fit
->kind
) ) {
3928 switch ( fit
->binding
) {
3929 case ld::Fixup::bindingNone
:
3930 case ld::Fixup::bindingByNameUnbound
:
3932 case ld::Fixup::bindingByContentBound
:
3933 case ld::Fixup::bindingDirectlyBound
:
3934 fixupWithTarget
= fit
;
3935 target
= fit
->u
.target
;
3937 case ld::Fixup::bindingsIndirectlyBound
:
3938 fixupWithTarget
= fit
;
3939 target
= state
.indirectBindingTable
[fit
->u
.bindingIndex
];
3942 assert(target
!= NULL
);
3944 switch ( fit
->kind
) {
3945 case ld::Fixup::kindAddAddend
:
3946 targetAddend
= fit
->u
.addend
;
3947 fixupWithAddend
= fit
;
3949 case ld::Fixup::kindSubtractAddend
:
3950 minusTargetAddend
= fit
->u
.addend
;
3951 fixupWithAddend
= fit
;
3953 case ld::Fixup::kindSubtractTargetAddress
:
3954 switch ( fit
->binding
) {
3955 case ld::Fixup::bindingNone
:
3956 case ld::Fixup::bindingByNameUnbound
:
3958 case ld::Fixup::bindingByContentBound
:
3959 case ld::Fixup::bindingDirectlyBound
:
3960 fixupWithMinusTarget
= fit
;
3961 minusTarget
= fit
->u
.target
;
3963 case ld::Fixup::bindingsIndirectlyBound
:
3964 fixupWithMinusTarget
= fit
;
3965 minusTarget
= state
.indirectBindingTable
[fit
->u
.bindingIndex
];
3968 assert(minusTarget
!= NULL
);
3970 case ld::Fixup::kindDataInCodeStartData
:
3971 case ld::Fixup::kindDataInCodeStartJT8
:
3972 case ld::Fixup::kindDataInCodeStartJT16
:
3973 case ld::Fixup::kindDataInCodeStartJT32
:
3974 case ld::Fixup::kindDataInCodeStartJTA32
:
3975 case ld::Fixup::kindDataInCodeEnd
:
3976 hasDataInCode
= true;
3981 if ( this->isStore(fit
->kind
) ) {
3982 fixupWithStore
= fit
;
3984 if ( fit
->lastInCluster() ) {
3985 if ( (fixupWithStore
!= NULL
) && (target
!= NULL
) ) {
3986 if ( _options
.outputKind() == Options::kObjectFile
) {
3987 this->addSectionRelocs(state
, sect
, atom
, fixupWithTarget
, fixupWithMinusTarget
, fixupWithAddend
, fixupWithStore
,
3988 target
, minusTarget
, targetAddend
, minusTargetAddend
);
3991 if ( _options
.makeCompressedDyldInfo() ) {
3992 this->addDyldInfo(state
, sect
, atom
, fixupWithTarget
, fixupWithMinusTarget
, fixupWithStore
,
3993 target
, minusTarget
, targetAddend
, minusTargetAddend
);
3996 this->addClassicRelocs(state
, sect
, atom
, fixupWithTarget
, fixupWithMinusTarget
, fixupWithStore
,
3997 target
, minusTarget
, targetAddend
, minusTargetAddend
);
4001 else if ( objc1ClassRefSection
&& (target
!= NULL
) && (fixupWithStore
== NULL
) ) {
4002 // check for class refs to lazy loaded dylibs
4003 const ld::dylib::File
* dylib
= dynamic_cast<const ld::dylib::File
*>(target
->file());
4004 if ( (dylib
!= NULL
) && dylib
->willBeLazyLoadedDylib() )
4005 throwf("illegal class reference to %s in lazy loaded dylib %s", target
->name(), dylib
->path());
4014 void OutputFile::noteTextReloc(const ld::Atom
* atom
, const ld::Atom
* target
)
4016 if ( (atom
->contentType() == ld::Atom::typeStub
) || (atom
->contentType() == ld::Atom::typeStubHelper
) ) {
4017 // silently let stubs (synthesized by linker) use text relocs
4019 else if ( _options
.allowTextRelocs() ) {
4020 if ( _options
.warnAboutTextRelocs() )
4021 warning("text reloc in %s to %s", atom
->name(), target
->name());
4023 else if ( _options
.positionIndependentExecutable() && (_options
.outputKind() == Options::kDynamicExecutable
)
4024 && ((_options
.iOSVersionMin() >= ld::iOS_4_3
) || (_options
.macosxVersionMin() >= ld::mac10_7
)) ) {
4025 if ( ! this->pieDisabled
) {
4026 switch ( _options
.architecture()) {
4027 #if SUPPORT_ARCH_arm64
4028 case CPU_TYPE_ARM64
:
4030 #if SUPPORT_ARCH_arm64
4032 const char* demangledName
= strdup(_options
.demangleSymbol(atom
->name()));
4033 throwf("Absolute addressing not allowed in arm64 code but used in '%s' referencing '%s'", demangledName
, _options
.demangleSymbol(target
->name()));
4037 warning("PIE disabled. Absolute addressing (perhaps -mdynamic-no-pic) not allowed in code signed PIE, "
4038 "but used in %s from %s. "
4039 "To fix this warning, don't compile with -mdynamic-no-pic or link with -Wl,-no_pie",
4040 atom
->name(), atom
->file()->path());
4043 this->pieDisabled
= true;
4045 else if ( (target
->scope() == ld::Atom::scopeGlobal
) && (target
->combine() == ld::Atom::combineByName
) ) {
4046 throwf("illegal text-relocoation (direct reference) to (global,weak) %s in %s from %s in %s", target
->name(), target
->file()->path(), atom
->name(), atom
->file()->path());
4049 if ( (target
->file() != NULL
) && (atom
->file() != NULL
) )
4050 throwf("illegal text-relocation to '%s' in %s from '%s' in %s", target
->name(), target
->file()->path(), atom
->name(), atom
->file()->path());
4052 throwf("illegal text reloc in '%s' to '%s'", atom
->name(), target
->name());
4056 void OutputFile::addDyldInfo(ld::Internal
& state
, ld::Internal::FinalSection
* sect
, const ld::Atom
* atom
,
4057 ld::Fixup
* fixupWithTarget
, ld::Fixup
* fixupWithMinusTarget
, ld::Fixup
* fixupWithStore
,
4058 const ld::Atom
* target
, const ld::Atom
* minusTarget
,
4059 uint64_t targetAddend
, uint64_t minusTargetAddend
)
4061 if ( sect
->isSectionHidden() )
4064 // no need to rebase or bind PCRel stores
4065 if ( this->isPcRelStore(fixupWithStore
->kind
) ) {
4066 // as long as target is in same linkage unit
4067 if ( (target
== NULL
) || (target
->definition() != ld::Atom::definitionProxy
) ) {
4068 // make sure target is not global and weak
4069 if ( (target
->scope() == ld::Atom::scopeGlobal
) && (target
->combine() == ld::Atom::combineByName
) && (target
->definition() == ld::Atom::definitionRegular
)) {
4070 if ( (atom
->section().type() == ld::Section::typeCFI
)
4071 || (atom
->section().type() == ld::Section::typeDtraceDOF
)
4072 || (atom
->section().type() == ld::Section::typeUnwindInfo
) ) {
4073 // ok for __eh_frame and __uwind_info to use pointer diffs to global weak symbols
4076 // <rdar://problem/13700961> spurious warning when weak function has reference to itself
4077 if ( fixupWithTarget
->binding
== ld::Fixup::bindingDirectlyBound
) {
4078 // ok to ignore pc-rel references within a weak function to itself
4081 // Have direct reference to weak-global. This should be an indrect reference
4082 const char* demangledName
= strdup(_options
.demangleSymbol(atom
->name()));
4083 warning("direct access in function '%s' from file '%s' to global weak symbol '%s' from file '%s' means the weak symbol cannot be overridden at runtime. "
4084 "This was likely caused by different translation units being compiled with different visibility settings.",
4085 demangledName
, atom
->file()->path(), _options
.demangleSymbol(target
->name()), target
->file()->path());
4091 // no need to rebase or bind PIC internal pointer diff
4092 if ( minusTarget
!= NULL
) {
4093 // with pointer diffs, both need to be in same linkage unit
4094 assert(minusTarget
->definition() != ld::Atom::definitionProxy
);
4095 assert(target
!= NULL
);
4096 assert(target
->definition() != ld::Atom::definitionProxy
);
4097 if ( target
== minusTarget
) {
4098 // This is a compile time constant and could have been optimized away by compiler
4102 // check if target of pointer-diff is global and weak
4103 if ( (target
->scope() == ld::Atom::scopeGlobal
) && (target
->combine() == ld::Atom::combineByName
) && (target
->definition() == ld::Atom::definitionRegular
) ) {
4104 if ( (atom
->section().type() == ld::Section::typeCFI
)
4105 || (atom
->section().type() == ld::Section::typeDtraceDOF
)
4106 || (atom
->section().type() == ld::Section::typeUnwindInfo
) ) {
4107 // ok for __eh_frame and __uwind_info to use pointer diffs to global weak symbols
4110 // Have direct reference to weak-global. This should be an indrect reference
4111 const char* demangledName
= strdup(_options
.demangleSymbol(atom
->name()));
4112 warning("direct access in function '%s' from file '%s' to global weak symbol '%s' from file '%s' means the weak symbol cannot be overridden at runtime. "
4113 "This was likely caused by different translation units being compiled with different visibility settings.",
4114 demangledName
, atom
->file()->path(), _options
.demangleSymbol(target
->name()), target
->file()->path());
4119 // no need to rebase or bind an atom's references to itself if the output is not slidable
4120 if ( (atom
== target
) && !_options
.outputSlidable() )
4123 // cluster has no target, so needs no rebasing or binding
4124 if ( target
== NULL
)
4127 bool inReadOnlySeg
= ((_options
.initialSegProtection(sect
->segmentName()) & VM_PROT_WRITE
) == 0);
4128 bool needsRebase
= false;
4129 bool needsBinding
= false;
4130 bool needsLazyBinding
= false;
4131 bool needsWeakBinding
= false;
4133 uint8_t rebaseType
= REBASE_TYPE_POINTER
;
4134 uint8_t type
= BIND_TYPE_POINTER
;
4135 const ld::dylib::File
* dylib
= dynamic_cast<const ld::dylib::File
*>(target
->file());
4136 bool weak_import
= (fixupWithTarget
->weakImport
|| ((dylib
!= NULL
) && dylib
->forcedWeakLinked()));
4137 uint64_t address
= atom
->finalAddress() + fixupWithTarget
->offsetInAtom
;
4138 uint64_t addend
= targetAddend
- minusTargetAddend
;
4140 // special case lazy pointers
4141 if ( fixupWithTarget
->kind
== ld::Fixup::kindLazyTarget
) {
4142 assert(fixupWithTarget
->u
.target
== target
);
4143 assert(addend
== 0);
4144 // lazy dylib lazy pointers do not have any dyld info
4145 if ( atom
->section().type() == ld::Section::typeLazyDylibPointer
)
4147 // lazy binding to weak definitions are done differently
4148 // they are directly bound to target, then have a weak bind in case of a collision
4149 if ( target
->combine() == ld::Atom::combineByName
) {
4150 if ( target
->definition() == ld::Atom::definitionProxy
) {
4151 // weak def exported from another dylib
4152 // must non-lazy bind to it plus have weak binding info in case of collision
4153 needsBinding
= true;
4154 needsWeakBinding
= true;
4157 // weak def in this linkage unit.
4158 // just rebase, plus have weak binding info in case of collision
4159 // this will be done by other cluster on lazy pointer atom
4162 else if ( target
->contentType() == ld::Atom::typeResolver
) {
4163 // <rdar://problem/8553647> Hidden resolver functions should not have lazy binding info
4164 // <rdar://problem/12629331> Resolver function run before initializers when overriding the dyld shared cache
4165 // The lazy pointers used by stubs used when non-lazy binding to a resolver are not normal lazy pointers
4166 // and should not be in lazy binding info.
4167 needsLazyBinding
= false;
4170 // normal case of a pointer to non-weak-def symbol, so can lazily bind
4171 needsLazyBinding
= true;
4175 // everything except lazy pointers
4176 switch ( target
->definition() ) {
4177 case ld::Atom::definitionProxy
:
4178 if ( (dylib
!= NULL
) && dylib
->willBeLazyLoadedDylib() )
4179 throwf("illegal data reference to %s in lazy loaded dylib %s", target
->name(), dylib
->path());
4180 if ( target
->contentType() == ld::Atom::typeTLV
) {
4181 if ( sect
->type() != ld::Section::typeTLVPointers
)
4182 throwf("illegal data reference in %s to thread local variable %s in dylib %s",
4183 atom
->name(), target
->name(), dylib
->path());
4185 if ( inReadOnlySeg
)
4186 type
= BIND_TYPE_TEXT_ABSOLUTE32
;
4187 needsBinding
= true;
4188 if ( target
->combine() == ld::Atom::combineByName
)
4189 needsWeakBinding
= true;
4191 case ld::Atom::definitionRegular
:
4192 case ld::Atom::definitionTentative
:
4193 // only slideable images need rebasing info
4194 if ( _options
.outputSlidable() ) {
4197 // references to internal symbol never need binding
4198 if ( target
->scope() != ld::Atom::scopeGlobal
)
4200 // reference to global weak def needs weak binding
4201 if ( (target
->combine() == ld::Atom::combineByName
) && (target
->definition() == ld::Atom::definitionRegular
) )
4202 needsWeakBinding
= true;
4203 else if ( _options
.outputKind() == Options::kDynamicExecutable
) {
4204 // in main executables, the only way regular symbols are indirected is if -interposable is used
4205 if ( _options
.interposable(target
->name()) ) {
4206 needsRebase
= false;
4207 needsBinding
= true;
4211 // for flat-namespace or interposable two-level-namespace
4212 // all references to exported symbols get indirected
4213 if ( (_options
.nameSpace() != Options::kTwoLevelNameSpace
) || _options
.interposable(target
->name()) ) {
4214 // <rdar://problem/5254468> no external relocs for flat objc classes
4215 if ( strncmp(target
->name(), ".objc_class_", 12) == 0 )
4217 // no rebase info for references to global symbols that will have binding info
4218 needsRebase
= false;
4219 needsBinding
= true;
4221 else if ( _options
.forceCoalesce(target
->name()) ) {
4222 needsWeakBinding
= true;
4226 case ld::Atom::definitionAbsolute
:
4231 // <rdar://problem/13828711> if target is an import alias, use base of alias
4232 if ( target
->isAlias() && (target
->definition() == ld::Atom::definitionProxy
) ) {
4233 for (ld::Fixup::iterator fit
= target
->fixupsBegin(), end
=target
->fixupsEnd(); fit
!= end
; ++fit
) {
4234 if ( fit
->firstInCluster() ) {
4235 if ( fit
->kind
== ld::Fixup::kindNoneFollowOn
) {
4236 if ( fit
->binding
== ld::Fixup::bindingDirectlyBound
) {
4237 //fprintf(stderr, "switching import of %s to import of %s\n", target->name(), fit->u.target->name());
4238 target
= fit
->u
.target
;
4245 // record dyld info for this cluster
4246 if ( needsRebase
) {
4247 if ( inReadOnlySeg
) {
4248 noteTextReloc(atom
, target
);
4249 sect
->hasLocalRelocs
= true; // so dyld knows to change permissions on __TEXT segment
4250 rebaseType
= REBASE_TYPE_TEXT_ABSOLUTE32
;
4252 if ( _options
.sharedRegionEligible() ) {
4253 // <rdar://problem/13287063> when range checking, ignore high byte of arm64 addends
4254 uint64_t checkAddend
= addend
;
4255 if ( (_options
.architecture() == CPU_TYPE_ARM64
)
4257 checkAddend
&= 0x0FFFFFFFFFFFFFFFULL
;
4258 if ( checkAddend
!= 0 ) {
4259 // make sure the addend does not cause the pointer to point outside the target's segment
4260 // if it does, update_dyld_shared_cache will not be able to put this dylib into the shared cache
4261 uint64_t targetAddress
= target
->finalAddress();
4262 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
4263 ld::Internal::FinalSection
* sct
= *sit
;
4264 uint64_t sctEnd
= (sct
->address
+sct
->size
);
4265 if ( (sct
->address
<= targetAddress
) && (targetAddress
< sctEnd
) ) {
4266 if ( (targetAddress
+checkAddend
) > sctEnd
) {
4267 warning("data symbol %s from %s has pointer to %s + 0x%08llX. "
4268 "That large of an addend may disable %s from being put in the dyld shared cache.",
4269 atom
->name(), atom
->file()->path(), target
->name(), addend
, _options
.installPath() );
4275 _rebaseInfo
.push_back(RebaseInfo(rebaseType
, address
));
4277 if ( needsBinding
) {
4278 if ( inReadOnlySeg
) {
4279 noteTextReloc(atom
, target
);
4280 sect
->hasExternalRelocs
= true; // so dyld knows to change permissions on __TEXT segment
4282 _bindingInfo
.push_back(BindingInfo(type
, this->compressedOrdinalForAtom(target
), target
->name(), weak_import
, address
, addend
));
4284 if ( needsLazyBinding
) {
4285 if ( _options
.bindAtLoad() )
4286 _bindingInfo
.push_back(BindingInfo(type
, this->compressedOrdinalForAtom(target
), target
->name(), weak_import
, address
, addend
));
4288 _lazyBindingInfo
.push_back(BindingInfo(type
, this->compressedOrdinalForAtom(target
), target
->name(), weak_import
, address
, addend
));
4290 if ( needsWeakBinding
)
4291 _weakBindingInfo
.push_back(BindingInfo(type
, 0, target
->name(), false, address
, addend
));
4295 void OutputFile::addClassicRelocs(ld::Internal
& state
, ld::Internal::FinalSection
* sect
, const ld::Atom
* atom
,
4296 ld::Fixup
* fixupWithTarget
, ld::Fixup
* fixupWithMinusTarget
, ld::Fixup
* fixupWithStore
,
4297 const ld::Atom
* target
, const ld::Atom
* minusTarget
,
4298 uint64_t targetAddend
, uint64_t minusTargetAddend
)
4300 if ( sect
->isSectionHidden() )
4303 // non-lazy-pointer section is encoded in indirect symbol table - not using relocations
4304 if ( sect
->type() == ld::Section::typeNonLazyPointer
) {
4305 // except kexts and static pie which *do* use relocations
4306 switch (_options
.outputKind()) {
4307 case Options::kKextBundle
:
4309 case Options::kStaticExecutable
:
4310 if ( _options
.positionIndependentExecutable() )
4312 // else fall into default case
4314 assert(target
!= NULL
);
4315 assert(fixupWithTarget
!= NULL
);
4320 // no need to rebase or bind PCRel stores
4321 if ( this->isPcRelStore(fixupWithStore
->kind
) ) {
4322 // as long as target is in same linkage unit
4323 if ( (target
== NULL
) || (target
->definition() != ld::Atom::definitionProxy
) )
4327 // no need to rebase or bind PIC internal pointer diff
4328 if ( minusTarget
!= NULL
) {
4329 // with pointer diffs, both need to be in same linkage unit
4330 assert(minusTarget
->definition() != ld::Atom::definitionProxy
);
4331 assert(target
!= NULL
);
4332 assert(target
->definition() != ld::Atom::definitionProxy
);
4333 // make sure target is not global and weak
4334 if ( (target
->scope() == ld::Atom::scopeGlobal
) && (target
->combine() == ld::Atom::combineByName
)
4335 && (atom
->section().type() != ld::Section::typeCFI
)
4336 && (atom
->section().type() != ld::Section::typeDtraceDOF
)
4337 && (atom
->section().type() != ld::Section::typeUnwindInfo
)
4338 && (minusTarget
!= target
) ) {
4339 // ok for __eh_frame and __uwind_info to use pointer diffs to global weak symbols
4340 throwf("bad codegen, pointer diff in %s to global weak symbol %s", atom
->name(), target
->name());
4345 // cluster has no target, so needs no rebasing or binding
4346 if ( target
== NULL
)
4349 assert(_localRelocsAtom
!= NULL
);
4350 uint64_t relocAddress
= atom
->finalAddress() + fixupWithTarget
->offsetInAtom
- _localRelocsAtom
->relocBaseAddress(state
);
4352 bool inReadOnlySeg
= ( strcmp(sect
->segmentName(), "__TEXT") == 0 );
4353 bool needsLocalReloc
= false;
4354 bool needsExternReloc
= false;
4356 switch ( fixupWithStore
->kind
) {
4357 case ld::Fixup::kindLazyTarget
:
4358 // lazy pointers don't need relocs
4360 case ld::Fixup::kindStoreLittleEndian32
:
4361 case ld::Fixup::kindStoreLittleEndian64
:
4362 case ld::Fixup::kindStoreBigEndian32
:
4363 case ld::Fixup::kindStoreBigEndian64
:
4364 case ld::Fixup::kindStoreTargetAddressLittleEndian32
:
4365 case ld::Fixup::kindStoreTargetAddressLittleEndian64
:
4366 case ld::Fixup::kindStoreTargetAddressBigEndian32
:
4367 case ld::Fixup::kindStoreTargetAddressBigEndian64
:
4369 switch ( target
->definition() ) {
4370 case ld::Atom::definitionProxy
:
4371 needsExternReloc
= true;
4373 case ld::Atom::definitionRegular
:
4374 case ld::Atom::definitionTentative
:
4375 // only slideable images need local relocs
4376 if ( _options
.outputSlidable() )
4377 needsLocalReloc
= true;
4378 // references to internal symbol never need binding
4379 if ( target
->scope() != ld::Atom::scopeGlobal
)
4381 // reference to global weak def needs weak binding in dynamic images
4382 if ( (target
->combine() == ld::Atom::combineByName
)
4383 && (target
->definition() == ld::Atom::definitionRegular
)
4384 && (_options
.outputKind() != Options::kStaticExecutable
)
4385 && (_options
.outputKind() != Options::kPreload
)
4386 && (atom
!= target
) ) {
4387 needsExternReloc
= true;
4389 else if ( _options
.outputKind() == Options::kDynamicExecutable
) {
4390 // in main executables, the only way regular symbols are indirected is if -interposable is used
4391 if ( _options
.interposable(target
->name()) )
4392 needsExternReloc
= true;
4395 // for flat-namespace or interposable two-level-namespace
4396 // all references to exported symbols get indirected
4397 if ( (_options
.nameSpace() != Options::kTwoLevelNameSpace
) || _options
.interposable(target
->name()) ) {
4398 // <rdar://problem/5254468> no external relocs for flat objc classes
4399 if ( strncmp(target
->name(), ".objc_class_", 12) == 0 )
4401 // no rebase info for references to global symbols that will have binding info
4402 needsExternReloc
= true;
4405 if ( needsExternReloc
)
4406 needsLocalReloc
= false;
4408 case ld::Atom::definitionAbsolute
:
4411 if ( needsExternReloc
) {
4412 if ( inReadOnlySeg
)
4413 noteTextReloc(atom
, target
);
4414 const ld::dylib::File
* dylib
= dynamic_cast<const ld::dylib::File
*>(target
->file());
4415 if ( (dylib
!= NULL
) && dylib
->willBeLazyLoadedDylib() )
4416 throwf("illegal data reference to %s in lazy loaded dylib %s", target
->name(), dylib
->path());
4417 _externalRelocsAtom
->addExternalPointerReloc(relocAddress
, target
);
4418 sect
->hasExternalRelocs
= true;
4419 fixupWithTarget
->contentAddendOnly
= true;
4421 else if ( needsLocalReloc
) {
4422 assert(target
!= NULL
);
4423 if ( inReadOnlySeg
)
4424 noteTextReloc(atom
, target
);
4425 _localRelocsAtom
->addPointerReloc(relocAddress
, target
->machoSection());
4426 sect
->hasLocalRelocs
= true;
4429 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32
:
4430 #if SUPPORT_ARCH_arm64
4431 case ld::Fixup::kindStoreTargetAddressARM64Branch26
:
4433 if ( _options
.outputKind() == Options::kKextBundle
) {
4434 assert(target
!= NULL
);
4435 if ( target
->definition() == ld::Atom::definitionProxy
) {
4436 _externalRelocsAtom
->addExternalCallSiteReloc(relocAddress
, target
);
4437 fixupWithStore
->contentAddendOnly
= true;
4442 case ld::Fixup::kindStoreARMLow16
:
4443 case ld::Fixup::kindStoreThumbLow16
:
4444 // no way to encode rebasing of binding for these instructions
4445 if ( _options
.outputSlidable() || (target
->definition() == ld::Atom::definitionProxy
) )
4446 throwf("no supported runtime lo16 relocation in %s from %s to %s", atom
->name(), atom
->file()->path(), target
->name());
4449 case ld::Fixup::kindStoreARMHigh16
:
4450 case ld::Fixup::kindStoreThumbHigh16
:
4451 // no way to encode rebasing of binding for these instructions
4452 if ( _options
.outputSlidable() || (target
->definition() == ld::Atom::definitionProxy
) )
4453 throwf("no supported runtime hi16 relocation in %s from %s to %s", atom
->name(), atom
->file()->path(), target
->name());
4462 bool OutputFile::useExternalSectionReloc(const ld::Atom
* atom
, const ld::Atom
* target
, ld::Fixup
* fixupWithTarget
)
4464 if ( (_options
.architecture() == CPU_TYPE_X86_64
)
4465 || (_options
.architecture() == CPU_TYPE_ARM64
)
4467 // x86_64 and ARM64 use external relocations for everthing that has a symbol
4468 return ( target
->symbolTableInclusion() != ld::Atom::symbolTableNotIn
);
4471 // <rdar://problem/9513487> support arm branch interworking in -r mode
4472 if ( (_options
.architecture() == CPU_TYPE_ARM
) && (_options
.outputKind() == Options::kObjectFile
) ) {
4473 if ( atom
->isThumb() != target
->isThumb() ) {
4474 switch ( fixupWithTarget
->kind
) {
4475 // have branch that switches mode, then might be 'b' not 'bl'
4476 // Force external relocation, since no way to do local reloc for 'b'
4477 case ld::Fixup::kindStoreTargetAddressThumbBranch22
:
4478 case ld::Fixup::kindStoreTargetAddressARMBranch24
:
4486 if ( (_options
.architecture() == CPU_TYPE_I386
) && (_options
.outputKind() == Options::kObjectFile
) ) {
4487 if ( target
->contentType() == ld::Atom::typeTLV
)
4491 // most architectures use external relocations only for references
4492 // to a symbol in another translation unit or for references to "weak symbols" or tentative definitions
4493 assert(target
!= NULL
);
4494 if ( target
->definition() == ld::Atom::definitionProxy
)
4496 if ( (target
->definition() == ld::Atom::definitionTentative
) && ! _options
.makeTentativeDefinitionsReal() )
4498 if ( target
->scope() != ld::Atom::scopeGlobal
)
4500 if ( (target
->combine() == ld::Atom::combineByName
) && (target
->definition() == ld::Atom::definitionRegular
) )
4505 bool OutputFile::useSectionRelocAddend(ld::Fixup
* fixupWithTarget
)
4507 #if SUPPORT_ARCH_arm64
4508 if ( _options
.architecture() == CPU_TYPE_ARM64
) {
4509 switch ( fixupWithTarget
->kind
) {
4510 case ld::Fixup::kindStoreARM64Branch26
:
4511 case ld::Fixup::kindStoreARM64Page21
:
4512 case ld::Fixup::kindStoreARM64PageOff12
:
4525 void OutputFile::addSectionRelocs(ld::Internal
& state
, ld::Internal::FinalSection
* sect
, const ld::Atom
* atom
,
4526 ld::Fixup
* fixupWithTarget
, ld::Fixup
* fixupWithMinusTarget
,
4527 ld::Fixup
* fixupWithAddend
, ld::Fixup
* fixupWithStore
,
4528 const ld::Atom
* target
, const ld::Atom
* minusTarget
,
4529 uint64_t targetAddend
, uint64_t minusTargetAddend
)
4531 if ( sect
->isSectionHidden() )
4534 // in -r mode where there will be no labels on __eh_frame section, there is no need for relocations
4535 if ( (sect
->type() == ld::Section::typeCFI
) && _options
.removeEHLabels() )
4538 // non-lazy-pointer section is encoded in indirect symbol table - not using relocations
4539 if ( sect
->type() == ld::Section::typeNonLazyPointer
)
4542 // tentative defs don't have any relocations
4543 if ( sect
->type() == ld::Section::typeTentativeDefs
)
4546 assert(target
!= NULL
);
4547 assert(fixupWithTarget
!= NULL
);
4548 bool targetUsesExternalReloc
= this->useExternalSectionReloc(atom
, target
, fixupWithTarget
);
4549 bool minusTargetUsesExternalReloc
= (minusTarget
!= NULL
) && this->useExternalSectionReloc(atom
, minusTarget
, fixupWithMinusTarget
);
4551 // in x86_64 and arm64 .o files an external reloc means the content contains just the addend
4552 if ( (_options
.architecture() == CPU_TYPE_X86_64
)
4553 || (_options
.architecture() == CPU_TYPE_ARM64
)
4555 if ( targetUsesExternalReloc
) {
4556 fixupWithTarget
->contentAddendOnly
= true;
4557 fixupWithStore
->contentAddendOnly
= true;
4558 if ( this->useSectionRelocAddend(fixupWithStore
) && (fixupWithAddend
!= NULL
) )
4559 fixupWithAddend
->contentIgnoresAddend
= true;
4561 if ( minusTargetUsesExternalReloc
)
4562 fixupWithMinusTarget
->contentAddendOnly
= true;
4565 // for other archs, content is addend only with (non pc-rel) pointers
4566 // pc-rel instructions are funny. If the target is _foo+8 and _foo is
4567 // external, then the pc-rel instruction *evalutates* to the address 8.
4568 if ( targetUsesExternalReloc
) {
4569 // TLV support for i386 acts like RIP relative addressing
4570 // The addend is the offset from the PICBase to the end of the instruction
4571 if ( (_options
.architecture() == CPU_TYPE_I386
)
4572 && (_options
.outputKind() == Options::kObjectFile
)
4573 && (fixupWithStore
->kind
== ld::Fixup::kindStoreX86PCRel32TLVLoad
) ) {
4574 fixupWithTarget
->contentAddendOnly
= true;
4575 fixupWithStore
->contentAddendOnly
= true;
4577 else if ( isPcRelStore(fixupWithStore
->kind
) ) {
4578 fixupWithTarget
->contentDetlaToAddendOnly
= true;
4579 fixupWithStore
->contentDetlaToAddendOnly
= true;
4581 else if ( minusTarget
== NULL
){
4582 fixupWithTarget
->contentAddendOnly
= true;
4583 fixupWithStore
->contentAddendOnly
= true;
4588 if ( fixupWithStore
!= NULL
) {
4589 _sectionsRelocationsAtom
->addSectionReloc(sect
, fixupWithStore
->kind
, atom
, fixupWithStore
->offsetInAtom
,
4590 targetUsesExternalReloc
, minusTargetUsesExternalReloc
,
4591 target
, targetAddend
, minusTarget
, minusTargetAddend
);
4596 void OutputFile::makeSplitSegInfo(ld::Internal
& state
)
4598 if ( !_options
.sharedRegionEligible() )
4601 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
4602 ld::Internal::FinalSection
* sect
= *sit
;
4603 if ( sect
->isSectionHidden() )
4605 if ( strcmp(sect
->segmentName(), "__TEXT") != 0 )
4607 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
4608 const ld::Atom
* atom
= *ait
;
4609 const ld::Atom
* target
= NULL
;
4610 const ld::Atom
* fromTarget
= NULL
;
4611 uint64_t accumulator
= 0;
4613 bool hadSubtract
= false;
4614 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
4615 if ( fit
->firstInCluster() )
4617 if ( this->setsTarget(fit
->kind
) ) {
4618 accumulator
= addressOf(state
, fit
, &target
);
4619 thumbTarget
= targetIsThumb(state
, fit
);
4623 switch ( fit
->kind
) {
4624 case ld::Fixup::kindSubtractTargetAddress
:
4625 accumulator
-= addressOf(state
, fit
, &fromTarget
);
4628 case ld::Fixup::kindAddAddend
:
4629 accumulator
+= fit
->u
.addend
;
4631 case ld::Fixup::kindSubtractAddend
:
4632 accumulator
-= fit
->u
.addend
;
4634 case ld::Fixup::kindStoreBigEndian32
:
4635 case ld::Fixup::kindStoreLittleEndian32
:
4636 case ld::Fixup::kindStoreLittleEndian64
:
4637 case ld::Fixup::kindStoreTargetAddressLittleEndian32
:
4638 case ld::Fixup::kindStoreTargetAddressLittleEndian64
:
4639 // if no subtract, then this is an absolute pointer which means
4640 // there is also a text reloc which update_dyld_shared_cache will use.
4641 if ( ! hadSubtract
)
4644 case ld::Fixup::kindStoreX86PCRel32
:
4645 case ld::Fixup::kindStoreX86PCRel32_1
:
4646 case ld::Fixup::kindStoreX86PCRel32_2
:
4647 case ld::Fixup::kindStoreX86PCRel32_4
:
4648 case ld::Fixup::kindStoreX86PCRel32GOTLoad
:
4649 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA
:
4650 case ld::Fixup::kindStoreX86PCRel32GOT
:
4651 case ld::Fixup::kindStoreX86PCRel32TLVLoad
:
4652 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA
:
4653 case ld::Fixup::kindStoreTargetAddressX86PCRel32
:
4654 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad
:
4655 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA
:
4656 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad
:
4657 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA
:
4658 case ld::Fixup::kindStoreARMLow16
:
4659 case ld::Fixup::kindStoreThumbLow16
:
4660 #if SUPPORT_ARCH_arm64
4661 case ld::Fixup::kindStoreARM64Page21
:
4662 case ld::Fixup::kindStoreARM64GOTLoadPage21
:
4663 case ld::Fixup::kindStoreARM64GOTLeaPage21
:
4664 case ld::Fixup::kindStoreARM64TLVPLoadPage21
:
4665 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21
:
4666 case ld::Fixup::kindStoreTargetAddressARM64Page21
:
4667 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21
:
4668 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21
:
4669 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21
:
4670 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21
:
4671 case ld::Fixup::kindStoreARM64PCRelToGOT
:
4673 assert(target
!= NULL
);
4674 if ( strcmp(sect
->segmentName(), target
->section().segmentName()) != 0 ) {
4675 _splitSegInfos
.push_back(SplitSegInfoEntry(atom
->finalAddress()+fit
->offsetInAtom
,fit
->kind
));
4678 case ld::Fixup::kindStoreARMHigh16
:
4679 case ld::Fixup::kindStoreThumbHigh16
:
4680 assert(target
!= NULL
);
4681 if ( strcmp(sect
->segmentName(), target
->section().segmentName()) != 0 ) {
4682 // hi16 needs to know upper 4-bits of low16 to compute carry
4683 uint32_t extra
= (accumulator
>> 12) & 0xF;
4684 _splitSegInfos
.push_back(SplitSegInfoEntry(atom
->finalAddress()+fit
->offsetInAtom
,fit
->kind
, extra
));
4687 case ld::Fixup::kindSetTargetImageOffset
:
4688 accumulator
= addressOf(state
, fit
, &target
);
4689 assert(target
!= NULL
);
4700 void OutputFile::makeSplitSegInfoV2(ld::Internal
& state
)
4702 static const bool log
= false;
4703 if ( !_options
.sharedRegionEligible() )
4706 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
4707 ld::Internal::FinalSection
* sect
= *sit
;
4708 if ( sect
->isSectionHidden() )
4710 bool codeSection
= (sect
->type() == ld::Section::typeCode
);
4711 if (log
) fprintf(stderr
, "sect: %s, address=0x%llX\n", sect
->sectionName(), sect
->address
);
4712 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
4713 const ld::Atom
* atom
= *ait
;
4714 const ld::Atom
* target
= NULL
;
4715 const ld::Atom
* fromTarget
= NULL
;
4716 uint32_t picBase
= 0;
4717 uint64_t accumulator
= 0;
4719 bool hadSubtract
= false;
4720 uint8_t fromSectionIndex
= atom
->machoSection();
4721 uint8_t toSectionIndex
;
4723 uint64_t fromOffset
= 0;
4724 uint64_t toOffset
= 0;
4725 uint64_t addend
= 0;
4726 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
4727 if ( fit
->firstInCluster() ) {
4729 hadSubtract
= false;
4733 toSectionIndex
= 255;
4734 fromOffset
= atom
->finalAddress() + fit
->offsetInAtom
- sect
->address
;
4736 if ( this->setsTarget(fit
->kind
) ) {
4737 accumulator
= addressAndTarget(state
, fit
, &target
);
4738 thumbTarget
= targetIsThumb(state
, fit
);
4741 toOffset
= accumulator
- state
.atomToSection
[target
]->address
;
4742 if ( target
->definition() != ld::Atom::definitionProxy
) {
4743 if ( target
->section().type() == ld::Section::typeMachHeader
)
4746 toSectionIndex
= target
->machoSection();
4749 switch ( fit
->kind
) {
4750 case ld::Fixup::kindSubtractTargetAddress
:
4751 accumulator
-= addressAndTarget(state
, fit
, &fromTarget
);
4754 case ld::Fixup::kindAddAddend
:
4755 accumulator
+= fit
->u
.addend
;
4756 addend
= fit
->u
.addend
;
4758 case ld::Fixup::kindSubtractAddend
:
4759 accumulator
-= fit
->u
.addend
;
4760 picBase
= fit
->u
.addend
;
4762 case ld::Fixup::kindSetLazyOffset
:
4764 case ld::Fixup::kindStoreBigEndian32
:
4765 case ld::Fixup::kindStoreLittleEndian32
:
4766 case ld::Fixup::kindStoreTargetAddressLittleEndian32
:
4767 if ( kind
!= DYLD_CACHE_ADJ_V2_IMAGE_OFF_32
) {
4769 kind
= DYLD_CACHE_ADJ_V2_DELTA_32
;
4771 kind
= DYLD_CACHE_ADJ_V2_POINTER_32
;
4774 case ld::Fixup::kindStoreLittleEndian64
:
4775 case ld::Fixup::kindStoreTargetAddressLittleEndian64
:
4777 kind
= DYLD_CACHE_ADJ_V2_DELTA_64
;
4779 kind
= DYLD_CACHE_ADJ_V2_POINTER_64
;
4781 case ld::Fixup::kindStoreX86PCRel32
:
4782 case ld::Fixup::kindStoreX86PCRel32_1
:
4783 case ld::Fixup::kindStoreX86PCRel32_2
:
4784 case ld::Fixup::kindStoreX86PCRel32_4
:
4785 case ld::Fixup::kindStoreX86PCRel32GOTLoad
:
4786 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA
:
4787 case ld::Fixup::kindStoreX86PCRel32GOT
:
4788 case ld::Fixup::kindStoreX86PCRel32TLVLoad
:
4789 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA
:
4790 case ld::Fixup::kindStoreTargetAddressX86PCRel32
:
4791 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad
:
4792 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA
:
4793 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad
:
4794 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA
:
4795 #if SUPPORT_ARCH_arm64
4796 case ld::Fixup::kindStoreARM64PCRelToGOT
:
4798 if ( (fromSectionIndex
!= toSectionIndex
) || !codeSection
)
4799 kind
= DYLD_CACHE_ADJ_V2_DELTA_32
;
4801 #if SUPPORT_ARCH_arm64
4802 case ld::Fixup::kindStoreARM64Page21
:
4803 case ld::Fixup::kindStoreARM64GOTLoadPage21
:
4804 case ld::Fixup::kindStoreARM64GOTLeaPage21
:
4805 case ld::Fixup::kindStoreARM64TLVPLoadPage21
:
4806 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21
:
4807 case ld::Fixup::kindStoreTargetAddressARM64Page21
:
4808 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21
:
4809 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21
:
4810 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21
:
4811 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21
:
4812 if ( fromSectionIndex
!= toSectionIndex
)
4813 kind
= DYLD_CACHE_ADJ_V2_ARM64_ADRP
;
4815 case ld::Fixup::kindStoreARM64PageOff12
:
4816 case ld::Fixup::kindStoreARM64GOTLeaPageOff12
:
4817 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPageOff12
:
4818 case ld::Fixup::kindStoreTargetAddressARM64PageOff12
:
4819 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12
:
4820 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12
:
4821 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12
:
4822 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12
:
4823 if ( fromSectionIndex
!= toSectionIndex
)
4824 kind
= DYLD_CACHE_ADJ_V2_ARM64_OFF12
;
4826 case ld::Fixup::kindStoreARM64Branch26
:
4827 case ld::Fixup::kindStoreTargetAddressARM64Branch26
:
4828 if ( fromSectionIndex
!= toSectionIndex
)
4829 kind
= DYLD_CACHE_ADJ_V2_ARM64_BR26
;
4832 case ld::Fixup::kindStoreARMHigh16
:
4833 case ld::Fixup::kindStoreARMLow16
:
4834 if ( (fromSectionIndex
!= toSectionIndex
) && (fromTarget
== atom
) ) {
4835 kind
= DYLD_CACHE_ADJ_V2_ARM_MOVW_MOVT
;
4838 case ld::Fixup::kindStoreARMBranch24
:
4839 case ld::Fixup::kindStoreTargetAddressARMBranch24
:
4840 if ( fromSectionIndex
!= toSectionIndex
)
4841 kind
= DYLD_CACHE_ADJ_V2_ARM_BR24
;
4843 case ld::Fixup::kindStoreThumbLow16
:
4844 case ld::Fixup::kindStoreThumbHigh16
:
4845 if ( (fromSectionIndex
!= toSectionIndex
) && (fromTarget
== atom
) ) {
4846 kind
= DYLD_CACHE_ADJ_V2_THUMB_MOVW_MOVT
;
4849 case ld::Fixup::kindStoreThumbBranch22
:
4850 case ld::Fixup::kindStoreTargetAddressThumbBranch22
:
4851 if ( fromSectionIndex
!= toSectionIndex
)
4852 kind
= DYLD_CACHE_ADJ_V2_THUMB_BR22
;
4854 case ld::Fixup::kindSetTargetImageOffset
:
4855 kind
= DYLD_CACHE_ADJ_V2_IMAGE_OFF_32
;
4856 accumulator
= addressAndTarget(state
, fit
, &target
);
4857 assert(target
!= NULL
);
4858 toSectionIndex
= target
->machoSection();
4859 toOffset
= accumulator
- state
.atomToSection
[target
]->address
;
4865 if ( fit
->lastInCluster() ) {
4866 if ( (kind
!= 0) && (target
!= NULL
) && (target
->definition() != ld::Atom::definitionProxy
) ) {
4867 if ( !hadSubtract
&& addend
)
4869 assert(toSectionIndex
!= 255);
4870 if (log
) fprintf(stderr
, "from (%d.%s + 0x%llX) to (%d.%s + 0x%llX), kind=%d, atomAddr=0x%llX, sectAddr=0x%llx\n",
4871 fromSectionIndex
, sect
->sectionName(), fromOffset
, toSectionIndex
, state
.atomToSection
[target
]->sectionName(),
4872 toOffset
, kind
, atom
->finalAddress(), sect
->address
);
4873 _splitSegV2Infos
.push_back(SplitSegInfoV2Entry(fromSectionIndex
, fromOffset
, toSectionIndex
, toOffset
, kind
));
4882 void OutputFile::writeMapFile(ld::Internal
& state
)
4884 if ( _options
.generatedMapPath() != NULL
) {
4885 FILE* mapFile
= fopen(_options
.generatedMapPath(), "w");
4886 if ( mapFile
!= NULL
) {
4887 // write output path
4888 fprintf(mapFile
, "# Path: %s\n", _options
.outputFilePath());
4889 // write output architecure
4890 fprintf(mapFile
, "# Arch: %s\n", _options
.architectureName());
4892 //if ( fUUIDAtom != NULL ) {
4893 // const uint8_t* uuid = fUUIDAtom->getUUID();
4894 // fprintf(mapFile, "# UUID: %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X \n",
4895 // uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5], uuid[6], uuid[7],
4896 // uuid[8], uuid[9], uuid[10], uuid[11], uuid[12], uuid[13], uuid[14], uuid[15]);
4898 // write table of object files
4899 std::map
<const ld::File
*, ld::File::Ordinal
> readerToOrdinal
;
4900 std::map
<ld::File::Ordinal
, const ld::File
*> ordinalToReader
;
4901 std::map
<const ld::File
*, uint32_t> readerToFileOrdinal
;
4902 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
4903 ld::Internal::FinalSection
* sect
= *sit
;
4904 if ( sect
->isSectionHidden() )
4906 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
4907 const ld::Atom
* atom
= *ait
;
4908 const ld::File
* reader
= atom
->originalFile();
4909 if ( reader
== NULL
)
4911 ld::File::Ordinal readerOrdinal
= reader
->ordinal();
4912 std::map
<const ld::File
*, ld::File::Ordinal
>::iterator pos
= readerToOrdinal
.find(reader
);
4913 if ( pos
== readerToOrdinal
.end() ) {
4914 readerToOrdinal
[reader
] = readerOrdinal
;
4915 ordinalToReader
[readerOrdinal
] = reader
;
4919 for (const ld::Atom
* atom
: state
.deadAtoms
) {
4920 const ld::File
* reader
= atom
->originalFile();
4921 if ( reader
== NULL
)
4923 ld::File::Ordinal readerOrdinal
= reader
->ordinal();
4924 std::map
<const ld::File
*, ld::File::Ordinal
>::iterator pos
= readerToOrdinal
.find(reader
);
4925 if ( pos
== readerToOrdinal
.end() ) {
4926 readerToOrdinal
[reader
] = readerOrdinal
;
4927 ordinalToReader
[readerOrdinal
] = reader
;
4930 fprintf(mapFile
, "# Object files:\n");
4931 fprintf(mapFile
, "[%3u] %s\n", 0, "linker synthesized");
4932 uint32_t fileIndex
= 1;
4933 for(std::map
<ld::File::Ordinal
, const ld::File
*>::iterator it
= ordinalToReader
.begin(); it
!= ordinalToReader
.end(); ++it
) {
4934 fprintf(mapFile
, "[%3u] %s\n", fileIndex
, it
->second
->path());
4935 readerToFileOrdinal
[it
->second
] = fileIndex
++;
4937 // write table of sections
4938 fprintf(mapFile
, "# Sections:\n");
4939 fprintf(mapFile
, "# Address\tSize \tSegment\tSection\n");
4940 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
4941 ld::Internal::FinalSection
* sect
= *sit
;
4942 if ( sect
->isSectionHidden() )
4944 fprintf(mapFile
, "0x%08llX\t0x%08llX\t%s\t%s\n", sect
->address
, sect
->size
,
4945 sect
->segmentName(), sect
->sectionName());
4947 // write table of symbols
4948 fprintf(mapFile
, "# Symbols:\n");
4949 fprintf(mapFile
, "# Address\tSize \tFile Name\n");
4950 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
4951 ld::Internal::FinalSection
* sect
= *sit
;
4952 if ( sect
->isSectionHidden() )
4954 //bool isCstring = (sect->type() == ld::Section::typeCString);
4955 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
4957 const ld::Atom
* atom
= *ait
;
4958 const char* name
= atom
->name();
4959 // don't add auto-stripped aliases to .map file
4960 if ( (atom
->size() == 0) && (atom
->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages
) )
4962 if ( atom
->contentType() == ld::Atom::typeCString
) {
4963 strcpy(buffer
, "literal string: ");
4964 const char* s
= (char*)atom
->rawContentPointer();
4965 char* e
= &buffer
[4094];
4966 for (char* b
= &buffer
[strlen(buffer
)]; b
< e
;) {
4978 buffer
[4095] = '\0';
4981 else if ( (atom
->contentType() == ld::Atom::typeCFI
) && (strcmp(name
, "FDE") == 0) ) {
4982 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(); fit
!= atom
->fixupsEnd(); ++fit
) {
4983 if ( (fit
->kind
== ld::Fixup::kindSetTargetAddress
) && (fit
->clusterSize
== ld::Fixup::k1of4
) ) {
4984 if ( (fit
->binding
== ld::Fixup::bindingDirectlyBound
)
4985 && (fit
->u
.target
->section().type() == ld::Section::typeCode
) ) {
4986 strcpy(buffer
, "FDE for: ");
4987 strlcat(buffer
, fit
->u
.target
->name(), 4096);
4993 else if ( atom
->contentType() == ld::Atom::typeNonLazyPointer
) {
4994 strcpy(buffer
, "non-lazy-pointer");
4995 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(); fit
!= atom
->fixupsEnd(); ++fit
) {
4996 if ( fit
->binding
== ld::Fixup::bindingsIndirectlyBound
) {
4997 strcpy(buffer
, "non-lazy-pointer-to: ");
4998 strlcat(buffer
, state
.indirectBindingTable
[fit
->u
.bindingIndex
]->name(), 4096);
5001 else if ( fit
->binding
== ld::Fixup::bindingDirectlyBound
) {
5002 strcpy(buffer
, "non-lazy-pointer-to-local: ");
5003 strlcat(buffer
, fit
->u
.target
->name(), 4096);
5009 fprintf(mapFile
, "0x%08llX\t0x%08llX\t[%3u] %s\n", atom
->finalAddress(), atom
->size(),
5010 readerToFileOrdinal
[atom
->originalFile()], name
);
5013 // preload check is hack until 26613948 is fixed
5014 if ( _options
.deadCodeStrip() && (_options
.outputKind() != Options::kPreload
) ) {
5015 fprintf(mapFile
, "\n");
5016 fprintf(mapFile
, "# Dead Stripped Symbols:\n");
5017 fprintf(mapFile
, "# \tSize \tFile Name\n");
5018 for (const ld::Atom
* atom
: state
.deadAtoms
) {
5020 const char* name
= atom
->name();
5021 // don't add auto-stripped aliases to .map file
5022 if ( (atom
->size() == 0) && (atom
->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages
) )
5024 if ( atom
->contentType() == ld::Atom::typeCString
) {
5025 strcpy(buffer
, "literal string: ");
5026 const char* s
= (char*)atom
->rawContentPointer();
5027 char* e
= &buffer
[4094];
5028 for (char* b
= &buffer
[strlen(buffer
)]; b
< e
;) {
5040 buffer
[4095] = '\0';
5043 fprintf(mapFile
, "<<dead>> \t0x%08llX\t[%3u] %s\n", atom
->size(),
5044 readerToFileOrdinal
[atom
->originalFile()], name
);
5050 warning("could not write map file: %s\n", _options
.generatedMapPath());
5055 void OutputFile::writeJSONEntry(ld::Internal
& state
)
5057 if ( _options
.traceEmitJSON() && (_options
.UUIDMode() != Options::kUUIDNone
) && (_options
.traceOutputFile() != NULL
) ) {
5059 // Convert the UUID to a string.
5060 const uint8_t* uuid
= _headersAndLoadCommandAtom
->getUUID();
5061 uuid_string_t uuidString
;
5063 uuid_unparse(uuid
, uuidString
);
5065 // Enumerate the dylibs.
5066 std::vector
<const ld::dylib::File
*> dynamicList
;
5067 std::vector
<const ld::dylib::File
*> upwardList
;
5068 std::vector
<const ld::dylib::File
*> reexportList
;
5070 for (const ld::dylib::File
* dylib
: _dylibsToLoad
) {
5072 if (dylib
->willBeUpwardDylib()) {
5074 upwardList
.push_back(dylib
);
5075 } else if (dylib
->willBeReExported()) {
5077 reexportList
.push_back(dylib
);
5080 dynamicList
.push_back(dylib
);
5085 * Build the JSON entry.
5088 std::string jsonEntry
= "{";
5090 jsonEntry
+= "\"uuid\":\"" + std::string(uuidString
) + "\",";
5092 // installPath() returns -final_output for non-dylibs
5093 const char* lastNameSlash
= strrchr(_options
.installPath(), '/');
5094 const char* leafName
= (lastNameSlash
!= NULL
) ? lastNameSlash
+1 : _options
.outputFilePath();
5095 jsonEntry
+= "\"name\":\"" + std::string(leafName
) + "\",";
5097 jsonEntry
+= "\"arch\":\"" + std::string(_options
.architectureName()) + "\"";
5099 if (dynamicList
.size() > 0) {
5100 jsonEntry
+= ",\"dynamic\":[";
5101 for (const ld::dylib::File
* dylib
: dynamicList
) {
5102 jsonEntry
+= "\"" + std::string(dylib
->path()) + "\"";
5103 if ((dylib
!= dynamicList
.back())) {
5110 if (upwardList
.size() > 0) {
5111 jsonEntry
+= ",\"upward-dynamic\":[";
5112 for (const ld::dylib::File
* dylib
: upwardList
) {
5113 jsonEntry
+= "\"" + std::string(dylib
->path()) + "\"";
5114 if ((dylib
!= upwardList
.back())) {
5121 if (reexportList
.size() > 0) {
5122 jsonEntry
+= ",\"re-exports\":[";
5123 for (const ld::dylib::File
* dylib
: reexportList
) {
5124 jsonEntry
+= "\"" + std::string(dylib
->path()) + "\"";
5125 if ((dylib
!= reexportList
.back())) {
5132 if (state
.archivePaths
.size() > 0) {
5133 jsonEntry
+= ",\"archives\":[";
5134 for (const std::string
& archivePath
: state
.archivePaths
) {
5135 jsonEntry
+= "\"" + std::string(archivePath
) + "\"";
5136 if ((archivePath
!= state
.archivePaths
.back())) {
5144 // Write the JSON entry to the trace file.
5145 std::ofstream
out(_options
.traceOutputFile(), ios::app
);
5150 // used to sort atoms with debug notes
5151 class DebugNoteSorter
5154 bool operator()(const ld::Atom
* left
, const ld::Atom
* right
) const
5156 // first sort by reader
5157 ld::File::Ordinal leftFileOrdinal
= left
->file()->ordinal();
5158 ld::File::Ordinal rightFileOrdinal
= right
->file()->ordinal();
5159 if ( leftFileOrdinal
!= rightFileOrdinal
)
5160 return (leftFileOrdinal
< rightFileOrdinal
);
5162 // then sort by atom objectAddress
5163 uint64_t leftAddr
= left
->finalAddress();
5164 uint64_t rightAddr
= right
->finalAddress();
5165 return leftAddr
< rightAddr
;
5170 const char* OutputFile::assureFullPath(const char* path
)
5172 if ( path
[0] == '/' )
5174 char cwdbuff
[MAXPATHLEN
];
5175 if ( getcwd(cwdbuff
, MAXPATHLEN
) != NULL
) {
5177 asprintf(&result
, "%s/%s", cwdbuff
, path
);
5178 if ( result
!= NULL
)
5184 static time_t fileModTime(const char* path
) {
5185 struct stat statBuffer
;
5186 if ( stat(path
, &statBuffer
) == 0 ) {
5187 return statBuffer
.st_mtime
;
5193 void OutputFile::synthesizeDebugNotes(ld::Internal
& state
)
5195 // -S means don't synthesize debug map
5196 if ( _options
.debugInfoStripping() == Options::kDebugInfoNone
)
5198 // make a vector of atoms that come from files compiled with dwarf debug info
5199 std::vector
<const ld::Atom
*> atomsNeedingDebugNotes
;
5200 std::set
<const ld::Atom
*> atomsWithStabs
;
5201 atomsNeedingDebugNotes
.reserve(1024);
5202 const ld::relocatable::File
* objFile
= NULL
;
5203 bool objFileHasDwarf
= false;
5204 bool objFileHasStabs
= false;
5205 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
5206 ld::Internal::FinalSection
* sect
= *sit
;
5207 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
5208 const ld::Atom
* atom
= *ait
;
5209 // no stabs for atoms that would not be in the symbol table
5210 if ( atom
->symbolTableInclusion() == ld::Atom::symbolTableNotIn
)
5212 if ( atom
->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages
)
5214 if ( atom
->symbolTableInclusion() == ld::Atom::symbolTableInWithRandomAutoStripLabel
)
5216 // no stabs for absolute symbols
5217 if ( atom
->definition() == ld::Atom::definitionAbsolute
)
5219 // no stabs for .eh atoms
5220 if ( atom
->contentType() == ld::Atom::typeCFI
)
5222 // no stabs for string literal atoms
5223 if ( atom
->contentType() == ld::Atom::typeCString
)
5225 // no stabs for kernel dtrace probes
5226 if ( (_options
.outputKind() == Options::kStaticExecutable
) && (strncmp(atom
->name(), "__dtrace_probe$", 15) == 0) )
5228 const ld::File
* file
= atom
->file();
5229 if ( file
!= NULL
) {
5230 if ( file
!= objFile
) {
5231 objFileHasDwarf
= false;
5232 objFileHasStabs
= false;
5233 objFile
= dynamic_cast<const ld::relocatable::File
*>(file
);
5234 if ( objFile
!= NULL
) {
5235 switch ( objFile
->debugInfo() ) {
5236 case ld::relocatable::File::kDebugInfoNone
:
5238 case ld::relocatable::File::kDebugInfoDwarf
:
5239 objFileHasDwarf
= true;
5241 case ld::relocatable::File::kDebugInfoStabs
:
5242 case ld::relocatable::File::kDebugInfoStabsUUID
:
5243 objFileHasStabs
= true;
5248 if ( objFileHasDwarf
)
5249 atomsNeedingDebugNotes
.push_back(atom
);
5250 if ( objFileHasStabs
)
5251 atomsWithStabs
.insert(atom
);
5256 // sort by file ordinal then atom ordinal
5257 std::sort(atomsNeedingDebugNotes
.begin(), atomsNeedingDebugNotes
.end(), DebugNoteSorter());
5259 // <rdar://problem/17689030> Add -add_ast_path option to linker which add N_AST stab entry to output
5260 const std::vector
<const char*>& astPaths
= _options
.astFilePaths();
5261 for (std::vector
<const char*>::const_iterator it
=astPaths
.begin(); it
!= astPaths
.end(); it
++) {
5262 const char* path
= *it
;
5264 ld::relocatable::File::Stab astStab
;
5265 astStab
.atom
= NULL
;
5266 astStab
.type
= N_AST
;
5269 astStab
.value
= fileModTime(path
);
5270 astStab
.string
= path
;
5271 state
.stabs
.push_back(astStab
);
5274 // synthesize "debug notes" and add them to master stabs vector
5275 const char* dirPath
= NULL
;
5276 const char* filename
= NULL
;
5277 bool wroteStartSO
= false;
5278 state
.stabs
.reserve(atomsNeedingDebugNotes
.size()*4);
5279 std::unordered_set
<const char*, CStringHash
, CStringEquals
> seenFiles
;
5280 for (std::vector
<const ld::Atom
*>::iterator it
=atomsNeedingDebugNotes
.begin(); it
!= atomsNeedingDebugNotes
.end(); it
++) {
5281 const ld::Atom
* atom
= *it
;
5282 const ld::File
* atomFile
= atom
->file();
5283 const ld::relocatable::File
* atomObjFile
= dynamic_cast<const ld::relocatable::File
*>(atomFile
);
5284 //fprintf(stderr, "debug note for %s\n", atom->name());
5285 const char* newPath
= atom
->translationUnitSource();
5286 if ( newPath
!= NULL
) {
5287 const char* newDirPath
;
5288 const char* newFilename
;
5289 const char* lastSlash
= strrchr(newPath
, '/');
5290 if ( lastSlash
== NULL
)
5292 newFilename
= lastSlash
+1;
5293 char* temp
= strdup(newPath
);
5295 // gdb like directory SO's to end in '/', but dwarf DW_AT_comp_dir usually does not have trailing '/'
5296 temp
[lastSlash
-newPath
+1] = '\0';
5297 // need SO's whenever the translation unit source file changes
5298 if ( (filename
== NULL
) || (strcmp(newFilename
,filename
) != 0) || (strcmp(newDirPath
,dirPath
) != 0)) {
5299 if ( filename
!= NULL
) {
5300 // translation unit change, emit ending SO
5301 ld::relocatable::File::Stab endFileStab
;
5302 endFileStab
.atom
= NULL
;
5303 endFileStab
.type
= N_SO
;
5304 endFileStab
.other
= 1;
5305 endFileStab
.desc
= 0;
5306 endFileStab
.value
= 0;
5307 endFileStab
.string
= "";
5308 state
.stabs
.push_back(endFileStab
);
5310 // new translation unit, emit start SO's
5311 ld::relocatable::File::Stab dirPathStab
;
5312 dirPathStab
.atom
= NULL
;
5313 dirPathStab
.type
= N_SO
;
5314 dirPathStab
.other
= 0;
5315 dirPathStab
.desc
= 0;
5316 dirPathStab
.value
= 0;
5317 dirPathStab
.string
= newDirPath
;
5318 state
.stabs
.push_back(dirPathStab
);
5319 ld::relocatable::File::Stab fileStab
;
5320 fileStab
.atom
= NULL
;
5321 fileStab
.type
= N_SO
;
5325 fileStab
.string
= newFilename
;
5326 state
.stabs
.push_back(fileStab
);
5327 // Synthesize OSO for start of file
5328 ld::relocatable::File::Stab objStab
;
5329 objStab
.atom
= NULL
;
5330 objStab
.type
= N_OSO
;
5331 // <rdar://problem/6337329> linker should put cpusubtype in n_sect field of nlist entry for N_OSO debug note entries
5332 objStab
.other
= atomFile
->cpuSubType();
5334 if ( atomObjFile
!= NULL
) {
5335 objStab
.string
= assureFullPath(atomObjFile
->debugInfoPath());
5336 objStab
.value
= atomObjFile
->debugInfoModificationTime();
5339 objStab
.string
= assureFullPath(atomFile
->path());
5340 objStab
.value
= atomFile
->modificationTime();
5342 state
.stabs
.push_back(objStab
);
5343 wroteStartSO
= true;
5344 // add the source file path to seenFiles so it does not show up in SOLs
5345 seenFiles
.insert(newFilename
);
5347 asprintf(&fullFilePath
, "%s%s", newDirPath
, newFilename
);
5348 // add both leaf path and full path
5349 seenFiles
.insert(fullFilePath
);
5351 filename
= newFilename
;
5352 dirPath
= newDirPath
;
5353 if ( atom
->section().type() == ld::Section::typeCode
) {
5354 // Synthesize BNSYM and start FUN stabs
5355 ld::relocatable::File::Stab beginSym
;
5356 beginSym
.atom
= atom
;
5357 beginSym
.type
= N_BNSYM
;
5361 beginSym
.string
= "";
5362 state
.stabs
.push_back(beginSym
);
5363 ld::relocatable::File::Stab startFun
;
5364 startFun
.atom
= atom
;
5365 startFun
.type
= N_FUN
;
5369 startFun
.string
= atom
->name();
5370 state
.stabs
.push_back(startFun
);
5371 // Synthesize any SOL stabs needed
5372 const char* curFile
= NULL
;
5373 for (ld::Atom::LineInfo::iterator lit
= atom
->beginLineInfo(); lit
!= atom
->endLineInfo(); ++lit
) {
5374 if ( lit
->fileName
!= curFile
) {
5375 if ( seenFiles
.count(lit
->fileName
) == 0 ) {
5376 seenFiles
.insert(lit
->fileName
);
5377 ld::relocatable::File::Stab sol
;
5383 sol
.string
= lit
->fileName
;
5384 state
.stabs
.push_back(sol
);
5386 curFile
= lit
->fileName
;
5389 // Synthesize end FUN and ENSYM stabs
5390 ld::relocatable::File::Stab endFun
;
5392 endFun
.type
= N_FUN
;
5397 state
.stabs
.push_back(endFun
);
5398 ld::relocatable::File::Stab endSym
;
5400 endSym
.type
= N_ENSYM
;
5405 state
.stabs
.push_back(endSym
);
5408 ld::relocatable::File::Stab globalsStab
;
5409 const char* name
= atom
->name();
5410 if ( atom
->scope() == ld::Atom::scopeTranslationUnit
) {
5411 // Synthesize STSYM stab for statics
5412 globalsStab
.atom
= atom
;
5413 globalsStab
.type
= N_STSYM
;
5414 globalsStab
.other
= 1;
5415 globalsStab
.desc
= 0;
5416 globalsStab
.value
= 0;
5417 globalsStab
.string
= name
;
5418 state
.stabs
.push_back(globalsStab
);
5421 // Synthesize GSYM stab for other globals
5422 globalsStab
.atom
= atom
;
5423 globalsStab
.type
= N_GSYM
;
5424 globalsStab
.other
= 1;
5425 globalsStab
.desc
= 0;
5426 globalsStab
.value
= 0;
5427 globalsStab
.string
= name
;
5428 state
.stabs
.push_back(globalsStab
);
5434 if ( wroteStartSO
) {
5436 ld::relocatable::File::Stab endFileStab
;
5437 endFileStab
.atom
= NULL
;
5438 endFileStab
.type
= N_SO
;
5439 endFileStab
.other
= 1;
5440 endFileStab
.desc
= 0;
5441 endFileStab
.value
= 0;
5442 endFileStab
.string
= "";
5443 state
.stabs
.push_back(endFileStab
);
5446 // copy any stabs from .o file
5447 std::set
<const ld::File
*> filesSeenWithStabs
;
5448 for (std::set
<const ld::Atom
*>::iterator it
=atomsWithStabs
.begin(); it
!= atomsWithStabs
.end(); it
++) {
5449 const ld::Atom
* atom
= *it
;
5450 objFile
= dynamic_cast<const ld::relocatable::File
*>(atom
->file());
5451 if ( objFile
!= NULL
) {
5452 if ( filesSeenWithStabs
.count(objFile
) == 0 ) {
5453 filesSeenWithStabs
.insert(objFile
);
5454 const std::vector
<ld::relocatable::File::Stab
>* stabs
= objFile
->stabs();
5455 if ( stabs
!= NULL
) {
5456 for(std::vector
<ld::relocatable::File::Stab
>::const_iterator sit
= stabs
->begin(); sit
!= stabs
->end(); ++sit
) {
5457 ld::relocatable::File::Stab stab
= *sit
;
5458 // ignore stabs associated with atoms that were dead stripped or coalesced away
5459 if ( (sit
->atom
!= NULL
) && (atomsWithStabs
.count(sit
->atom
) == 0) )
5461 // <rdar://problem/8284718> Value of N_SO stabs should be address of first atom from translation unit
5462 if ( (stab
.type
== N_SO
) && (stab
.string
!= NULL
) && (stab
.string
[0] != '\0') ) {
5465 state
.stabs
.push_back(stab
);