1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-*
3 * Copyright (c) 2009-2011 Apple Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
27 #include <sys/types.h>
30 #include <sys/sysctl.h>
31 #include <sys/param.h>
32 #include <sys/mount.h>
37 #include <mach/mach_time.h>
38 #include <mach/vm_statistics.h>
39 #include <mach/mach_init.h>
40 #include <mach/mach_host.h>
41 #include <uuid/uuid.h>
43 #include <mach-o/dyld.h>
44 #include <mach-o/fat.h>
53 #include <unordered_set>
58 #include <CommonCrypto/CommonDigest.h>
59 #include <AvailabilityMacros.h>
61 #include "MachOTrie.hpp"
65 #include "OutputFile.h"
66 #include "Architectures.hpp"
67 #include "HeaderAndLoadCommands.hpp"
68 #include "LinkEdit.hpp"
69 #include "LinkEditClassic.hpp"
75 uint32_t sAdrpNoped
= 0;
76 uint32_t sAdrpNotNoped
= 0;
79 OutputFile::OutputFile(const Options
& opts
)
81 usesWeakExternalSymbols(false), overridesWeakExternalSymbols(false),
82 _noReExportedDylibs(false), pieDisabled(false), hasDataInCode(false),
83 headerAndLoadCommandsSection(NULL
),
84 rebaseSection(NULL
), bindingSection(NULL
), weakBindingSection(NULL
),
85 lazyBindingSection(NULL
), exportSection(NULL
),
86 splitSegInfoSection(NULL
), functionStartsSection(NULL
),
87 dataInCodeSection(NULL
), optimizationHintsSection(NULL
),
88 symbolTableSection(NULL
), stringPoolSection(NULL
),
89 localRelocationsSection(NULL
), externalRelocationsSection(NULL
),
90 sectionRelocationsSection(NULL
),
91 indirectSymbolTableSection(NULL
),
93 _hasDyldInfo(opts
.makeCompressedDyldInfo()),
94 _hasSymbolTable(true),
95 _hasSectionRelocations(opts
.outputKind() == Options::kObjectFile
),
96 _hasSplitSegInfo(opts
.sharedRegionEligible()),
97 _hasFunctionStartsInfo(opts
.addFunctionStarts()),
98 _hasDataInCodeInfo(opts
.addDataInCodeInfo()),
99 _hasDynamicSymbolTable(true),
100 _hasLocalRelocations(!opts
.makeCompressedDyldInfo()),
101 _hasExternalRelocations(!opts
.makeCompressedDyldInfo()),
102 _hasOptimizationHints(opts
.outputKind() == Options::kObjectFile
),
103 _encryptedTEXTstartOffset(0),
104 _encryptedTEXTendOffset(0),
105 _localSymbolsStartIndex(0),
106 _localSymbolsCount(0),
107 _globalSymbolsStartIndex(0),
108 _globalSymbolsCount(0),
109 _importSymbolsStartIndex(0),
110 _importSymbolsCount(0),
111 _sectionsRelocationsAtom(NULL
),
112 _localRelocsAtom(NULL
),
113 _externalRelocsAtom(NULL
),
114 _symbolTableAtom(NULL
),
115 _indirectSymbolTableAtom(NULL
),
116 _rebasingInfoAtom(NULL
),
117 _bindingInfoAtom(NULL
),
118 _lazyBindingInfoAtom(NULL
),
119 _weakBindingInfoAtom(NULL
),
120 _exportInfoAtom(NULL
),
121 _splitSegInfoAtom(NULL
),
122 _functionStartsAtom(NULL
),
123 _dataInCodeAtom(NULL
),
124 _optimizationHintsAtom(NULL
)
128 void OutputFile::dumpAtomsBySection(ld::Internal
& state
, bool printAtoms
)
130 fprintf(stderr
, "SORTED:\n");
131 for (std::vector
<ld::Internal::FinalSection
*>::iterator it
= state
.sections
.begin(); it
!= state
.sections
.end(); ++it
) {
132 fprintf(stderr
, "final section %p %s/%s %s start addr=0x%08llX, size=0x%08llX, alignment=%02d, fileOffset=0x%08llX\n",
133 (*it
), (*it
)->segmentName(), (*it
)->sectionName(), (*it
)->isSectionHidden() ? "(hidden)" : "",
134 (*it
)->address
, (*it
)->size
, (*it
)->alignment
, (*it
)->fileOffset
);
136 std::vector
<const ld::Atom
*>& atoms
= (*it
)->atoms
;
137 for (std::vector
<const ld::Atom
*>::iterator ait
= atoms
.begin(); ait
!= atoms
.end(); ++ait
) {
138 fprintf(stderr
, " %p (0x%04llX) %s\n", *ait
, (*ait
)->size(), (*ait
)->name());
142 fprintf(stderr
, "DYLIBS:\n");
143 for (std::vector
<ld::dylib::File
*>::iterator it
=state
.dylibs
.begin(); it
!= state
.dylibs
.end(); ++it
)
144 fprintf(stderr
, " %s\n", (*it
)->installPath());
147 void OutputFile::write(ld::Internal
& state
)
149 this->buildDylibOrdinalMapping(state
);
150 this->addLoadCommands(state
);
151 this->addLinkEdit(state
);
152 state
.setSectionSizesAndAlignments();
153 this->setLoadCommandsPadding(state
);
154 _fileSize
= state
.assignFileOffsets();
155 this->assignAtomAddresses(state
);
156 this->synthesizeDebugNotes(state
);
157 this->buildSymbolTable(state
);
158 this->generateLinkEditInfo(state
);
159 if ( _options
.sharedRegionEncodingV2() )
160 this->makeSplitSegInfoV2(state
);
162 this->makeSplitSegInfo(state
);
163 this->updateLINKEDITAddresses(state
);
164 //this->dumpAtomsBySection(state, false);
165 this->writeOutputFile(state
);
166 this->writeMapFile(state
);
167 this->writeJSONEntry(state
);
170 bool OutputFile::findSegment(ld::Internal
& state
, uint64_t addr
, uint64_t* start
, uint64_t* end
, uint32_t* index
)
172 uint32_t segIndex
= 0;
173 ld::Internal::FinalSection
* segFirstSection
= NULL
;
174 ld::Internal::FinalSection
* lastSection
= NULL
;
175 for (std::vector
<ld::Internal::FinalSection
*>::iterator it
= state
.sections
.begin(); it
!= state
.sections
.end(); ++it
) {
176 ld::Internal::FinalSection
* sect
= *it
;
177 if ( (segFirstSection
== NULL
) || strcmp(segFirstSection
->segmentName(), sect
->segmentName()) != 0 ) {
178 if ( segFirstSection
!= NULL
) {
179 //fprintf(stderr, "findSegment(0x%llX) seg changed to %s\n", addr, sect->segmentName());
180 if ( (addr
>= segFirstSection
->address
) && (addr
< lastSection
->address
+lastSection
->size
) ) {
181 *start
= segFirstSection
->address
;
182 *end
= lastSection
->address
+lastSection
->size
;
188 segFirstSection
= sect
;
196 void OutputFile::assignAtomAddresses(ld::Internal
& state
)
198 const bool log
= false;
199 if ( log
) fprintf(stderr
, "assignAtomAddresses()\n");
200 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
201 ld::Internal::FinalSection
* sect
= *sit
;
202 if ( log
) fprintf(stderr
, " section=%s/%s\n", sect
->segmentName(), sect
->sectionName());
203 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
204 const ld::Atom
* atom
= *ait
;
205 switch ( sect
-> type() ) {
206 case ld::Section::typeImportProxies
:
207 // want finalAddress() of all proxy atoms to be zero
208 (const_cast<ld::Atom
*>(atom
))->setSectionStartAddress(0);
210 case ld::Section::typeAbsoluteSymbols
:
211 // want finalAddress() of all absolute atoms to be value of abs symbol
212 (const_cast<ld::Atom
*>(atom
))->setSectionStartAddress(0);
214 case ld::Section::typeLinkEdit
:
215 // linkedit layout is assigned later
218 (const_cast<ld::Atom
*>(atom
))->setSectionStartAddress(sect
->address
);
219 if ( log
) fprintf(stderr
, " atom=%p, addr=0x%08llX, name=%s\n", atom
, atom
->finalAddress(), atom
->name());
226 void OutputFile::updateLINKEDITAddresses(ld::Internal
& state
)
228 if ( _options
.makeCompressedDyldInfo() ) {
229 // build dylb rebasing info
230 assert(_rebasingInfoAtom
!= NULL
);
231 _rebasingInfoAtom
->encode();
233 // build dyld binding info
234 assert(_bindingInfoAtom
!= NULL
);
235 _bindingInfoAtom
->encode();
237 // build dyld lazy binding info
238 assert(_lazyBindingInfoAtom
!= NULL
);
239 _lazyBindingInfoAtom
->encode();
241 // build dyld weak binding info
242 assert(_weakBindingInfoAtom
!= NULL
);
243 _weakBindingInfoAtom
->encode();
245 // build dyld export info
246 assert(_exportInfoAtom
!= NULL
);
247 _exportInfoAtom
->encode();
250 if ( _options
.sharedRegionEligible() ) {
251 // build split seg info
252 assert(_splitSegInfoAtom
!= NULL
);
253 _splitSegInfoAtom
->encode();
256 if ( _options
.addFunctionStarts() ) {
257 // build function starts info
258 assert(_functionStartsAtom
!= NULL
);
259 _functionStartsAtom
->encode();
262 if ( _options
.addDataInCodeInfo() ) {
263 // build data-in-code info
264 assert(_dataInCodeAtom
!= NULL
);
265 _dataInCodeAtom
->encode();
268 if ( _hasOptimizationHints
) {
269 // build linker-optimization-hint info
270 assert(_optimizationHintsAtom
!= NULL
);
271 _optimizationHintsAtom
->encode();
274 // build classic symbol table
275 assert(_symbolTableAtom
!= NULL
);
276 _symbolTableAtom
->encode();
277 assert(_indirectSymbolTableAtom
!= NULL
);
278 _indirectSymbolTableAtom
->encode();
280 // add relocations to .o files
281 if ( _options
.outputKind() == Options::kObjectFile
) {
282 assert(_sectionsRelocationsAtom
!= NULL
);
283 _sectionsRelocationsAtom
->encode();
286 if ( ! _options
.makeCompressedDyldInfo() ) {
287 // build external relocations
288 assert(_externalRelocsAtom
!= NULL
);
289 _externalRelocsAtom
->encode();
290 // build local relocations
291 assert(_localRelocsAtom
!= NULL
);
292 _localRelocsAtom
->encode();
295 // update address and file offsets now that linkedit content has been generated
296 uint64_t curLinkEditAddress
= 0;
297 uint64_t curLinkEditfileOffset
= 0;
298 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
299 ld::Internal::FinalSection
* sect
= *sit
;
300 if ( sect
->type() != ld::Section::typeLinkEdit
)
302 if ( curLinkEditAddress
== 0 ) {
303 curLinkEditAddress
= sect
->address
;
304 curLinkEditfileOffset
= sect
->fileOffset
;
306 uint16_t maxAlignment
= 0;
308 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
309 const ld::Atom
* atom
= *ait
;
310 //fprintf(stderr, "setting linkedit atom offset for %s\n", atom->name());
311 if ( atom
->alignment().powerOf2
> maxAlignment
)
312 maxAlignment
= atom
->alignment().powerOf2
;
313 // calculate section offset for this atom
314 uint64_t alignment
= 1 << atom
->alignment().powerOf2
;
315 uint64_t currentModulus
= (offset
% alignment
);
316 uint64_t requiredModulus
= atom
->alignment().modulus
;
317 if ( currentModulus
!= requiredModulus
) {
318 if ( requiredModulus
> currentModulus
)
319 offset
+= requiredModulus
-currentModulus
;
321 offset
+= requiredModulus
+alignment
-currentModulus
;
323 (const_cast<ld::Atom
*>(atom
))->setSectionOffset(offset
);
324 (const_cast<ld::Atom
*>(atom
))->setSectionStartAddress(curLinkEditAddress
);
325 offset
+= atom
->size();
328 // section alignment is that of a contained atom with the greatest alignment
329 sect
->alignment
= maxAlignment
;
330 sect
->address
= curLinkEditAddress
;
331 sect
->fileOffset
= curLinkEditfileOffset
;
332 curLinkEditAddress
+= sect
->size
;
333 curLinkEditfileOffset
+= sect
->size
;
336 _fileSize
= state
.sections
.back()->fileOffset
+ state
.sections
.back()->size
;
340 void OutputFile::setLoadCommandsPadding(ld::Internal
& state
)
342 // In other sections, any extra space is put and end of segment.
343 // In __TEXT segment, any extra space is put after load commands to allow post-processing of load commands
344 // Do a reverse layout of __TEXT segment to determine padding size and adjust section size
345 uint64_t paddingSize
= 0;
346 switch ( _options
.outputKind() ) {
348 // dyld itself has special padding requirements. We want the beginning __text section to start at a stable address
349 assert(strcmp(state
.sections
[1]->sectionName(),"__text") == 0);
350 state
.sections
[1]->alignment
= 12; // page align __text
352 case Options::kObjectFile
:
353 // mach-o .o files need no padding between load commands and first section
354 // but leave enough room that the object file could be signed
357 case Options::kPreload
:
358 // mach-o MH_PRELOAD files need no padding between load commands and first section
360 case Options::kKextBundle
:
361 if ( _options
.useTextExecSegment() ) {
365 // else fall into default case
367 // work backwards from end of segment and lay out sections so that extra room goes to padding atom
369 uint64_t textSegPageSize
= _options
.segPageSize("__TEXT");
370 if ( _options
.sharedRegionEligible() && (_options
.iOSVersionMin() >= ld::iOS_8_0
) && (textSegPageSize
== 0x4000) )
371 textSegPageSize
= 0x1000;
372 for (std::vector
<ld::Internal::FinalSection
*>::reverse_iterator it
= state
.sections
.rbegin(); it
!= state
.sections
.rend(); ++it
) {
373 ld::Internal::FinalSection
* sect
= *it
;
374 if ( strcmp(sect
->segmentName(), "__TEXT") != 0 )
376 if ( sect
== headerAndLoadCommandsSection
) {
377 addr
-= headerAndLoadCommandsSection
->size
;
378 paddingSize
= addr
% textSegPageSize
;
382 addr
= addr
& (0 - (1 << sect
->alignment
));
385 // if command line requires more padding than this
386 uint32_t minPad
= _options
.minimumHeaderPad();
387 if ( _options
.maxMminimumHeaderPad() ) {
388 // -headerpad_max_install_names means there should be room for every path load command to grow to 1204 bytes
389 uint32_t altMin
= _dylibsToLoad
.size() * MAXPATHLEN
;
390 if ( _options
.outputKind() == Options::kDynamicLibrary
)
391 altMin
+= MAXPATHLEN
;
392 if ( altMin
> minPad
)
395 if ( paddingSize
< minPad
) {
396 int extraPages
= (minPad
- paddingSize
+ _options
.segmentAlignment() - 1)/_options
.segmentAlignment();
397 paddingSize
+= extraPages
* _options
.segmentAlignment();
400 if ( _options
.makeEncryptable() ) {
401 // load commands must be on a separate non-encrypted page
402 int loadCommandsPage
= (headerAndLoadCommandsSection
->size
+ minPad
)/_options
.segmentAlignment();
403 int textPage
= (headerAndLoadCommandsSection
->size
+ paddingSize
)/_options
.segmentAlignment();
404 if ( loadCommandsPage
== textPage
) {
405 paddingSize
+= _options
.segmentAlignment();
408 // remember start for later use by load command
409 _encryptedTEXTstartOffset
= textPage
*_options
.segmentAlignment();
413 // add padding to size of section
414 headerAndLoadCommandsSection
->size
+= paddingSize
;
418 uint64_t OutputFile::pageAlign(uint64_t addr
)
420 const uint64_t alignment
= _options
.segmentAlignment();
421 return ((addr
+alignment
-1) & (-alignment
));
424 uint64_t OutputFile::pageAlign(uint64_t addr
, uint64_t pageSize
)
426 return ((addr
+pageSize
-1) & (-pageSize
));
429 static const char* makeName(const ld::Atom
& atom
)
431 static char buffer
[4096];
432 switch ( atom
.symbolTableInclusion() ) {
433 case ld::Atom::symbolTableNotIn
:
434 case ld::Atom::symbolTableNotInFinalLinkedImages
:
435 sprintf(buffer
, "%s@0x%08llX", atom
.name(), atom
.objectAddress());
437 case ld::Atom::symbolTableIn
:
438 case ld::Atom::symbolTableInAndNeverStrip
:
439 case ld::Atom::symbolTableInAsAbsolute
:
440 case ld::Atom::symbolTableInWithRandomAutoStripLabel
:
441 strlcpy(buffer
, atom
.name(), 4096);
447 static const char* referenceTargetAtomName(ld::Internal
& state
, const ld::Fixup
* ref
)
449 switch ( ref
->binding
) {
450 case ld::Fixup::bindingNone
:
452 case ld::Fixup::bindingByNameUnbound
:
453 return (char*)(ref
->u
.target
);
454 case ld::Fixup::bindingByContentBound
:
455 case ld::Fixup::bindingDirectlyBound
:
456 return makeName(*((ld::Atom
*)(ref
->u
.target
)));
457 case ld::Fixup::bindingsIndirectlyBound
:
458 return makeName(*state
.indirectBindingTable
[ref
->u
.bindingIndex
]);
460 return "BAD BINDING";
463 bool OutputFile::targetIsThumb(ld::Internal
& state
, const ld::Fixup
* fixup
)
465 switch ( fixup
->binding
) {
466 case ld::Fixup::bindingByContentBound
:
467 case ld::Fixup::bindingDirectlyBound
:
468 return fixup
->u
.target
->isThumb();
469 case ld::Fixup::bindingsIndirectlyBound
:
470 return state
.indirectBindingTable
[fixup
->u
.bindingIndex
]->isThumb();
474 throw "unexpected binding";
477 uint64_t OutputFile::addressOf(const ld::Internal
& state
, const ld::Fixup
* fixup
, const ld::Atom
** target
)
479 if ( !_options
.makeCompressedDyldInfo() ) {
480 // For external relocations the classic mach-o format
481 // has addend only stored in the content. That means
482 // that the address of the target is not used.
483 if ( fixup
->contentAddendOnly
)
486 switch ( fixup
->binding
) {
487 case ld::Fixup::bindingNone
:
488 throw "unexpected bindingNone";
489 case ld::Fixup::bindingByNameUnbound
:
490 throw "unexpected bindingByNameUnbound";
491 case ld::Fixup::bindingByContentBound
:
492 case ld::Fixup::bindingDirectlyBound
:
493 *target
= fixup
->u
.target
;
494 return (*target
)->finalAddress();
495 case ld::Fixup::bindingsIndirectlyBound
:
496 *target
= state
.indirectBindingTable
[fixup
->u
.bindingIndex
];
498 if ( ! (*target
)->finalAddressMode() ) {
499 throwf("reference to symbol (which has not been assigned an address) %s", (*target
)->name());
502 return (*target
)->finalAddress();
504 throw "unexpected binding";
507 uint64_t OutputFile::addressAndTarget(const ld::Internal
& state
, const ld::Fixup
* fixup
, const ld::Atom
** target
)
509 switch ( fixup
->binding
) {
510 case ld::Fixup::bindingNone
:
511 throw "unexpected bindingNone";
512 case ld::Fixup::bindingByNameUnbound
:
513 throw "unexpected bindingByNameUnbound";
514 case ld::Fixup::bindingByContentBound
:
515 case ld::Fixup::bindingDirectlyBound
:
516 *target
= fixup
->u
.target
;
517 return (*target
)->finalAddress();
518 case ld::Fixup::bindingsIndirectlyBound
:
519 *target
= state
.indirectBindingTable
[fixup
->u
.bindingIndex
];
521 if ( ! (*target
)->finalAddressMode() ) {
522 throwf("reference to symbol (which has not been assigned an address) %s", (*target
)->name());
525 return (*target
)->finalAddress();
527 throw "unexpected binding";
531 uint64_t OutputFile::sectionOffsetOf(const ld::Internal
& state
, const ld::Fixup
* fixup
)
533 const ld::Atom
* target
= NULL
;
534 switch ( fixup
->binding
) {
535 case ld::Fixup::bindingNone
:
536 throw "unexpected bindingNone";
537 case ld::Fixup::bindingByNameUnbound
:
538 throw "unexpected bindingByNameUnbound";
539 case ld::Fixup::bindingByContentBound
:
540 case ld::Fixup::bindingDirectlyBound
:
541 target
= fixup
->u
.target
;
543 case ld::Fixup::bindingsIndirectlyBound
:
544 target
= state
.indirectBindingTable
[fixup
->u
.bindingIndex
];
547 assert(target
!= NULL
);
549 uint64_t targetAddress
= target
->finalAddress();
550 for (std::vector
<ld::Internal::FinalSection
*>::const_iterator it
= state
.sections
.begin(); it
!= state
.sections
.end(); ++it
) {
551 const ld::Internal::FinalSection
* sect
= *it
;
552 if ( (sect
->address
<= targetAddress
) && (targetAddress
< (sect
->address
+sect
->size
)) )
553 return targetAddress
- sect
->address
;
555 throw "section not found for section offset";
560 uint64_t OutputFile::tlvTemplateOffsetOf(const ld::Internal
& state
, const ld::Fixup
* fixup
)
562 const ld::Atom
* target
= NULL
;
563 switch ( fixup
->binding
) {
564 case ld::Fixup::bindingNone
:
565 throw "unexpected bindingNone";
566 case ld::Fixup::bindingByNameUnbound
:
567 throw "unexpected bindingByNameUnbound";
568 case ld::Fixup::bindingByContentBound
:
569 case ld::Fixup::bindingDirectlyBound
:
570 target
= fixup
->u
.target
;
572 case ld::Fixup::bindingsIndirectlyBound
:
573 target
= state
.indirectBindingTable
[fixup
->u
.bindingIndex
];
576 assert(target
!= NULL
);
578 for (std::vector
<ld::Internal::FinalSection
*>::const_iterator it
= state
.sections
.begin(); it
!= state
.sections
.end(); ++it
) {
579 const ld::Internal::FinalSection
* sect
= *it
;
580 switch ( sect
->type() ) {
581 case ld::Section::typeTLVInitialValues
:
582 case ld::Section::typeTLVZeroFill
:
583 return target
->finalAddress() - sect
->address
;
588 throw "section not found for tlvTemplateOffsetOf";
591 void OutputFile::printSectionLayout(ld::Internal
& state
)
593 // show layout of final image
594 fprintf(stderr
, "final section layout:\n");
595 for (std::vector
<ld::Internal::FinalSection
*>::iterator it
= state
.sections
.begin(); it
!= state
.sections
.end(); ++it
) {
596 if ( (*it
)->isSectionHidden() )
598 fprintf(stderr
, " %s/%s addr=0x%08llX, size=0x%08llX, fileOffset=0x%08llX, type=%d\n",
599 (*it
)->segmentName(), (*it
)->sectionName(),
600 (*it
)->address
, (*it
)->size
, (*it
)->fileOffset
, (*it
)->type());
605 void OutputFile::rangeCheck8(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
607 if ( (displacement
> 127) || (displacement
< -128) ) {
608 // show layout of final image
609 printSectionLayout(state
);
611 const ld::Atom
* target
;
612 throwf("8-bit reference out of range (%lld max is +/-127B): from %s (0x%08llX) to %s (0x%08llX)",
613 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
614 addressOf(state
, fixup
, &target
));
618 void OutputFile::rangeCheck16(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
620 const int64_t thirtyTwoKLimit
= 0x00007FFF;
621 if ( (displacement
> thirtyTwoKLimit
) || (displacement
< (-thirtyTwoKLimit
)) ) {
622 // show layout of final image
623 printSectionLayout(state
);
625 const ld::Atom
* target
;
626 throwf("16-bit reference out of range (%lld max is +/-32KB): from %s (0x%08llX) to %s (0x%08llX)",
627 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
628 addressOf(state
, fixup
, &target
));
632 void OutputFile::rangeCheckBranch32(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
634 const int64_t twoGigLimit
= 0x7FFFFFFF;
635 if ( (displacement
> twoGigLimit
) || (displacement
< (-twoGigLimit
)) ) {
636 // show layout of final image
637 printSectionLayout(state
);
639 const ld::Atom
* target
;
640 throwf("32-bit branch out of range (%lld max is +/-2GB): from %s (0x%08llX) to %s (0x%08llX)",
641 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
642 addressOf(state
, fixup
, &target
));
647 void OutputFile::rangeCheckAbsolute32(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
649 const int64_t fourGigLimit
= 0xFFFFFFFF;
650 if ( displacement
> fourGigLimit
) {
651 // <rdar://problem/9610466> cannot enforce 32-bit range checks on 32-bit archs because assembler loses sign information
652 // .long _foo - 0xC0000000
653 // is encoded in mach-o the same as:
654 // .long _foo + 0x40000000
655 // so if _foo lays out to 0xC0000100, the first is ok, but the second is not.
656 if ( (_options
.architecture() == CPU_TYPE_ARM
) || (_options
.architecture() == CPU_TYPE_I386
) ) {
657 // Unlikely userland code does funky stuff like this, so warn for them, but not warn for -preload or -static
658 if ( (_options
.outputKind() != Options::kPreload
) && (_options
.outputKind() != Options::kStaticExecutable
) ) {
659 warning("32-bit absolute address out of range (0x%08llX max is 4GB): from %s + 0x%08X (0x%08llX) to 0x%08llX",
660 displacement
, atom
->name(), fixup
->offsetInAtom
, atom
->finalAddress(), displacement
);
664 // show layout of final image
665 printSectionLayout(state
);
667 const ld::Atom
* target
;
668 if ( fixup
->binding
== ld::Fixup::bindingNone
)
669 throwf("32-bit absolute address out of range (0x%08llX max is 4GB): from %s + 0x%08X (0x%08llX) to 0x%08llX",
670 displacement
, atom
->name(), fixup
->offsetInAtom
, atom
->finalAddress(), displacement
);
672 throwf("32-bit absolute address out of range (0x%08llX max is 4GB): from %s + 0x%08X (0x%08llX) to %s (0x%08llX)",
673 displacement
, atom
->name(), fixup
->offsetInAtom
, atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
674 addressOf(state
, fixup
, &target
));
679 void OutputFile::rangeCheckRIP32(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
681 const int64_t twoGigLimit
= 0x7FFFFFFF;
682 if ( (displacement
> twoGigLimit
) || (displacement
< (-twoGigLimit
)) ) {
683 // show layout of final image
684 printSectionLayout(state
);
686 const ld::Atom
* target
;
687 throwf("32-bit RIP relative reference out of range (%lld max is +/-2GB): from %s (0x%08llX) to %s (0x%08llX)",
688 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
689 addressOf(state
, fixup
, &target
));
693 void OutputFile::rangeCheckARM12(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
695 if ( (displacement
> 4092LL) || (displacement
< (-4092LL)) ) {
696 // show layout of final image
697 printSectionLayout(state
);
699 const ld::Atom
* target
;
700 throwf("ARM ldr 12-bit displacement out of range (%lld max is +/-4096B): from %s (0x%08llX) to %s (0x%08llX)",
701 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
702 addressOf(state
, fixup
, &target
));
706 bool OutputFile::checkArmBranch24Displacement(int64_t displacement
)
708 return ( (displacement
< 33554428LL) && (displacement
> (-33554432LL)) );
711 void OutputFile::rangeCheckARMBranch24(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
713 if ( checkArmBranch24Displacement(displacement
) )
716 // show layout of final image
717 printSectionLayout(state
);
719 const ld::Atom
* target
;
720 throwf("b/bl/blx ARM branch out of range (%lld max is +/-32MB): from %s (0x%08llX) to %s (0x%08llX)",
721 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
722 addressOf(state
, fixup
, &target
));
725 bool OutputFile::checkThumbBranch22Displacement(int64_t displacement
)
727 // thumb2 supports +/- 16MB displacement
728 if ( _options
.preferSubArchitecture() && _options
.archSupportsThumb2() ) {
729 if ( (displacement
> 16777214LL) || (displacement
< (-16777216LL)) ) {
734 // thumb1 supports +/- 4MB displacement
735 if ( (displacement
> 4194302LL) || (displacement
< (-4194304LL)) ) {
742 void OutputFile::rangeCheckThumbBranch22(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
744 if ( checkThumbBranch22Displacement(displacement
) )
747 // show layout of final image
748 printSectionLayout(state
);
750 const ld::Atom
* target
;
751 if ( _options
.preferSubArchitecture() && _options
.archSupportsThumb2() ) {
752 throwf("b/bl/blx thumb2 branch out of range (%lld max is +/-16MB): from %s (0x%08llX) to %s (0x%08llX)",
753 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
754 addressOf(state
, fixup
, &target
));
757 throwf("b/bl/blx thumb1 branch out of range (%lld max is +/-4MB): from %s (0x%08llX) to %s (0x%08llX)",
758 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
759 addressOf(state
, fixup
, &target
));
764 void OutputFile::rangeCheckARM64Branch26(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
766 const int64_t bl_128MegLimit
= 0x07FFFFFF;
767 if ( (displacement
> bl_128MegLimit
) || (displacement
< (-bl_128MegLimit
)) ) {
768 // show layout of final image
769 printSectionLayout(state
);
771 const ld::Atom
* target
;
772 throwf("b(l) ARM64 branch out of range (%lld max is +/-128MB): from %s (0x%08llX) to %s (0x%08llX)",
773 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
774 addressOf(state
, fixup
, &target
));
778 void OutputFile::rangeCheckARM64Page21(int64_t displacement
, ld::Internal
& state
, const ld::Atom
* atom
, const ld::Fixup
* fixup
)
780 const int64_t adrp_4GigLimit
= 0x100000000ULL
;
781 if ( (displacement
> adrp_4GigLimit
) || (displacement
< (-adrp_4GigLimit
)) ) {
782 // show layout of final image
783 printSectionLayout(state
);
785 const ld::Atom
* target
;
786 throwf("ARM64 ADRP out of range (%lld max is +/-4GB): from %s (0x%08llX) to %s (0x%08llX)",
787 displacement
, atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fixup
),
788 addressOf(state
, fixup
, &target
));
793 uint16_t OutputFile::get16LE(uint8_t* loc
) { return LittleEndian::get16(*(uint16_t*)loc
); }
794 void OutputFile::set16LE(uint8_t* loc
, uint16_t value
) { LittleEndian::set16(*(uint16_t*)loc
, value
); }
796 uint32_t OutputFile::get32LE(uint8_t* loc
) { return LittleEndian::get32(*(uint32_t*)loc
); }
797 void OutputFile::set32LE(uint8_t* loc
, uint32_t value
) { LittleEndian::set32(*(uint32_t*)loc
, value
); }
799 uint64_t OutputFile::get64LE(uint8_t* loc
) { return LittleEndian::get64(*(uint64_t*)loc
); }
800 void OutputFile::set64LE(uint8_t* loc
, uint64_t value
) { LittleEndian::set64(*(uint64_t*)loc
, value
); }
802 uint16_t OutputFile::get16BE(uint8_t* loc
) { return BigEndian::get16(*(uint16_t*)loc
); }
803 void OutputFile::set16BE(uint8_t* loc
, uint16_t value
) { BigEndian::set16(*(uint16_t*)loc
, value
); }
805 uint32_t OutputFile::get32BE(uint8_t* loc
) { return BigEndian::get32(*(uint32_t*)loc
); }
806 void OutputFile::set32BE(uint8_t* loc
, uint32_t value
) { BigEndian::set32(*(uint32_t*)loc
, value
); }
808 uint64_t OutputFile::get64BE(uint8_t* loc
) { return BigEndian::get64(*(uint64_t*)loc
); }
809 void OutputFile::set64BE(uint8_t* loc
, uint64_t value
) { BigEndian::set64(*(uint64_t*)loc
, value
); }
811 #if SUPPORT_ARCH_arm64
813 static uint32_t makeNOP() {
817 enum SignExtension
{ signedNot
, signed32
, signed64
};
818 struct LoadStoreInfo
{
821 uint32_t offset
; // after scaling
822 uint32_t size
; // 1,2,4,8, or 16
824 bool isFloat
; // if destReg is FP/SIMD
825 SignExtension signEx
; // if load is sign extended
828 static uint32_t makeLDR_literal(const LoadStoreInfo
& info
, uint64_t targetAddress
, uint64_t instructionAddress
)
830 int64_t delta
= targetAddress
- instructionAddress
;
831 assert(delta
< 1024*1024);
832 assert(delta
> -1024*1024);
833 assert((info
.reg
& 0xFFFFFFE0) == 0);
834 assert((targetAddress
& 0x3) == 0);
835 assert((instructionAddress
& 0x3) == 0);
836 assert(!info
.isStore
);
837 uint32_t imm19
= (delta
<< 3) & 0x00FFFFE0;
838 uint32_t instruction
= 0;
839 switch ( info
.size
) {
841 if ( info
.isFloat
) {
842 assert(info
.signEx
== signedNot
);
843 instruction
= 0x1C000000;
846 if ( info
.signEx
== signed64
)
847 instruction
= 0x98000000;
849 instruction
= 0x18000000;
853 assert(info
.signEx
== signedNot
);
854 instruction
= info
.isFloat
? 0x5C000000 : 0x58000000;
857 assert(info
.signEx
== signedNot
);
858 instruction
= 0x9C000000;
861 assert(0 && "invalid load size for literal");
863 return (instruction
| imm19
| info
.reg
);
866 static uint32_t makeADR(uint32_t destReg
, uint64_t targetAddress
, uint64_t instructionAddress
)
868 assert((destReg
& 0xFFFFFFE0) == 0);
869 assert((instructionAddress
& 0x3) == 0);
870 uint32_t instruction
= 0x10000000;
871 int64_t delta
= targetAddress
- instructionAddress
;
872 assert(delta
< 1024*1024);
873 assert(delta
> -1024*1024);
874 uint32_t immhi
= (delta
& 0x001FFFFC) << 3;
875 uint32_t immlo
= (delta
& 0x00000003) << 29;
876 return (instruction
| immhi
| immlo
| destReg
);
879 static uint32_t makeLoadOrStore(const LoadStoreInfo
& info
)
881 uint32_t instruction
= 0x39000000;
883 instruction
|= 0x04000000;
884 instruction
|= info
.reg
;
885 instruction
|= (info
.baseReg
<< 5);
886 uint32_t sizeBits
= 0;
887 uint32_t opcBits
= 0;
888 uint32_t imm12Bits
= 0;
889 switch ( info
.size
) {
892 imm12Bits
= info
.offset
;
893 if ( info
.isStore
) {
897 switch ( info
.signEx
) {
912 assert((info
.offset
% 2) == 0);
913 imm12Bits
= info
.offset
/2;
914 if ( info
.isStore
) {
918 switch ( info
.signEx
) {
933 assert((info
.offset
% 4) == 0);
934 imm12Bits
= info
.offset
/4;
935 if ( info
.isStore
) {
939 switch ( info
.signEx
) {
944 assert(0 && "cannot use signed32 with 32-bit load/store");
954 assert((info
.offset
% 8) == 0);
955 imm12Bits
= info
.offset
/8;
956 if ( info
.isStore
) {
961 assert(info
.signEx
== signedNot
);
966 assert((info
.offset
% 16) == 0);
967 imm12Bits
= info
.offset
/16;
968 assert(info
.isFloat
);
969 if ( info
.isStore
) {
977 assert(0 && "bad load/store size");
980 assert(imm12Bits
< 4096);
981 return (instruction
| (sizeBits
<< 30) | (opcBits
<< 22) | (imm12Bits
<< 10));
984 static bool parseLoadOrStore(uint32_t instruction
, LoadStoreInfo
& info
)
986 if ( (instruction
& 0x3B000000) != 0x39000000 )
988 info
.isFloat
= ( (instruction
& 0x04000000) != 0 );
989 info
.reg
= (instruction
& 0x1F);
990 info
.baseReg
= ((instruction
>>5) & 0x1F);
991 switch (instruction
& 0xC0C00000) {
995 info
.signEx
= signedNot
;
999 info
.isStore
= false;
1000 info
.signEx
= signedNot
;
1003 if ( info
.isFloat
) {
1005 info
.isStore
= true;
1006 info
.signEx
= signedNot
;
1010 info
.isStore
= false;
1011 info
.signEx
= signed64
;
1015 if ( info
.isFloat
) {
1017 info
.isStore
= false;
1018 info
.signEx
= signedNot
;
1022 info
.isStore
= false;
1023 info
.signEx
= signed32
;
1028 info
.isStore
= true;
1029 info
.signEx
= signedNot
;
1033 info
.isStore
= false;
1034 info
.signEx
= signedNot
;
1038 info
.isStore
= false;
1039 info
.signEx
= signed64
;
1043 info
.isStore
= false;
1044 info
.signEx
= signed32
;
1048 info
.isStore
= true;
1049 info
.signEx
= signedNot
;
1053 info
.isStore
= false;
1054 info
.signEx
= signedNot
;
1058 info
.isStore
= false;
1059 info
.signEx
= signed64
;
1063 info
.isStore
= true;
1064 info
.signEx
= signedNot
;
1068 info
.isStore
= false;
1069 info
.signEx
= signedNot
;
1074 info
.offset
= ((instruction
>> 10) & 0x0FFF) * info
.size
;
1082 static bool parseADRP(uint32_t instruction
, AdrpInfo
& info
)
1084 if ( (instruction
& 0x9F000000) != 0x90000000 )
1086 info
.destReg
= (instruction
& 0x1F);
1096 static bool parseADD(uint32_t instruction
, AddInfo
& info
)
1098 if ( (instruction
& 0xFFC00000) != 0x91000000 )
1100 info
.destReg
= (instruction
& 0x1F);
1101 info
.srcReg
= ((instruction
>>5) & 0x1F);
1102 info
.addend
= ((instruction
>>10) & 0xFFF);
1109 static uint32_t makeLDR_scaledOffset(const LoadStoreInfo
& info
)
1111 assert((info
.reg
& 0xFFFFFFE0) == 0);
1112 assert((info
.baseReg
& 0xFFFFFFE0) == 0);
1113 assert(!info
.isFloat
|| (info
.signEx
!= signedNot
));
1114 uint32_t sizeBits
= 0;
1115 uint32_t opcBits
= 1;
1116 uint32_t vBit
= info
.isFloat
;
1117 switch ( info
.signEx
) {
1128 assert(0 && "bad SignExtension runtime value");
1130 switch ( info
.size
) {
1149 assert(0 && "invalid load size for literal");
1151 assert((info
.offset
% info
.size
) == 0);
1152 uint32_t scaledOffset
= info
.offset
/info
.size
;
1153 assert(scaledOffset
< 4096);
1154 return (0x39000000 | (sizeBits
<<30) | (vBit
<<26) | (opcBits
<<22) | (scaledOffset
<<10) | (info
.baseReg
<<5) | info
.reg
);
1157 static uint32_t makeLDR_literal(uint32_t destReg
, uint32_t loadSize
, bool isFloat
, uint64_t targetAddress
, uint64_t instructionAddress
)
1159 int64_t delta
= targetAddress
- instructionAddress
;
1160 assert(delta
< 1024*1024);
1161 assert(delta
> -1024*1024);
1162 assert((destReg
& 0xFFFFFFE0) == 0);
1163 assert((targetAddress
& 0x3) == 0);
1164 assert((instructionAddress
& 0x3) == 0);
1165 uint32_t imm19
= (delta
<< 3) & 0x00FFFFE0;
1166 uint32_t instruction
= 0;
1167 switch ( loadSize
) {
1169 instruction
= isFloat
? 0x1C000000 : 0x18000000;
1172 instruction
= isFloat
? 0x5C000000 : 0x58000000;
1175 instruction
= 0x9C000000;
1178 assert(0 && "invalid load size for literal");
1180 return (instruction
| imm19
| destReg
);
1184 static bool ldrInfo(uint32_t instruction
, uint8_t* size
, uint8_t* destReg
, bool* v
, uint32_t* scaledOffset
)
1186 *v
= ( (instruction
& 0x04000000) != 0 );
1187 *destReg
= (instruction
& 0x1F);
1188 uint32_t imm12
= ((instruction
>> 10) & 0x00000FFF);
1189 switch ( (instruction
& 0xC0000000) >> 30 ) {
1191 // vector and byte LDR have same "size" bits, need to check other bits to differenciate
1192 if ( (instruction
& 0x00800000) == 0 ) {
1194 *scaledOffset
= imm12
;
1198 *scaledOffset
= imm12
* 16;
1203 *scaledOffset
= imm12
* 2;
1207 *scaledOffset
= imm12
* 4;
1211 *scaledOffset
= imm12
* 8;
1214 return ((instruction
& 0x3B400000) == 0x39400000);
1218 static bool withinOneMeg(uint64_t addr1
, uint64_t addr2
) {
1219 int64_t delta
= (addr2
- addr1
);
1220 return ( (delta
< 1024*1024) && (delta
> -1024*1024) );
1222 #endif // SUPPORT_ARCH_arm64
1224 void OutputFile::setInfo(ld::Internal
& state
, const ld::Atom
* atom
, uint8_t* buffer
, const std::map
<uint32_t, const Fixup
*>& usedByHints
,
1225 uint32_t offsetInAtom
, uint32_t delta
, InstructionInfo
* info
)
1227 info
->offsetInAtom
= offsetInAtom
+ delta
;
1228 std::map
<uint32_t, const Fixup
*>::const_iterator pos
= usedByHints
.find(info
->offsetInAtom
);
1229 if ( (pos
!= usedByHints
.end()) && (pos
->second
!= NULL
) ) {
1230 info
->fixup
= pos
->second
;
1231 info
->targetAddress
= addressOf(state
, info
->fixup
, &info
->target
);
1232 if ( info
->fixup
->clusterSize
!= ld::Fixup::k1of1
) {
1233 assert(info
->fixup
->firstInCluster());
1234 const ld::Fixup
* nextFixup
= info
->fixup
+ 1;
1235 if ( nextFixup
->kind
== ld::Fixup::kindAddAddend
) {
1236 info
->targetAddress
+= nextFixup
->u
.addend
;
1239 assert(0 && "expected addend");
1245 info
->targetAddress
= 0;
1246 info
->target
= NULL
;
1248 info
->instructionContent
= &buffer
[info
->offsetInAtom
];
1249 info
->instructionAddress
= atom
->finalAddress() + info
->offsetInAtom
;
1250 info
->instruction
= get32LE(info
->instructionContent
);
1253 #if SUPPORT_ARCH_arm64
1254 static bool isPageKind(const ld::Fixup
* fixup
, bool mustBeGOT
=false)
1256 if ( fixup
== NULL
)
1259 switch ( fixup
->kind
) {
1260 case ld::Fixup::kindStoreTargetAddressARM64Page21
:
1262 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21
:
1263 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21
:
1264 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21
:
1265 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21
:
1267 case ld::Fixup::kindSetTargetAddress
:
1271 } while ( ! f
->lastInCluster() );
1273 case ld::Fixup::kindStoreARM64Page21
:
1275 case ld::Fixup::kindStoreARM64GOTLoadPage21
:
1276 case ld::Fixup::kindStoreARM64GOTLeaPage21
:
1277 case ld::Fixup::kindStoreARM64TLVPLoadPage21
:
1278 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21
:
1290 static bool isPageOffsetKind(const ld::Fixup
* fixup
, bool mustBeGOT
=false)
1292 if ( fixup
== NULL
)
1295 switch ( fixup
->kind
) {
1296 case ld::Fixup::kindStoreTargetAddressARM64PageOff12
:
1298 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12
:
1299 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12
:
1300 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12
:
1301 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12
:
1303 case ld::Fixup::kindSetTargetAddress
:
1307 } while ( ! f
->lastInCluster() );
1309 case ld::Fixup::kindStoreARM64PageOff12
:
1311 case ld::Fixup::kindStoreARM64GOTLoadPageOff12
:
1312 case ld::Fixup::kindStoreARM64GOTLeaPageOff12
:
1313 case ld::Fixup::kindStoreARM64TLVPLoadPageOff12
:
1314 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPageOff12
:
1325 #endif // SUPPORT_ARCH_arm64
1328 #define LOH_ASSERT(cond) \
1330 warning("ignoring linker optimization hint at %s+0x%X because " #cond, atom->name(), fit->offsetInAtom); \
1334 void OutputFile::applyFixUps(ld::Internal
& state
, uint64_t mhAddress
, const ld::Atom
* atom
, uint8_t* buffer
)
1336 //fprintf(stderr, "applyFixUps() on %s\n", atom->name());
1337 int64_t accumulator
= 0;
1338 const ld::Atom
* toTarget
= NULL
;
1339 const ld::Atom
* fromTarget
;
1341 uint32_t instruction
;
1342 uint32_t newInstruction
;
1346 bool thumbTarget
= false;
1347 std::map
<uint32_t, const Fixup
*> usedByHints
;
1348 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
1349 uint8_t* fixUpLocation
= &buffer
[fit
->offsetInAtom
];
1350 ld::Fixup::LOH_arm64 lohExtra
;
1351 switch ( (ld::Fixup::Kind
)(fit
->kind
) ) {
1352 case ld::Fixup::kindNone
:
1353 case ld::Fixup::kindNoneFollowOn
:
1354 case ld::Fixup::kindNoneGroupSubordinate
:
1355 case ld::Fixup::kindNoneGroupSubordinateFDE
:
1356 case ld::Fixup::kindNoneGroupSubordinateLSDA
:
1357 case ld::Fixup::kindNoneGroupSubordinatePersonality
:
1359 case ld::Fixup::kindSetTargetAddress
:
1360 accumulator
= addressOf(state
, fit
, &toTarget
);
1361 thumbTarget
= targetIsThumb(state
, fit
);
1364 if ( fit
->contentAddendOnly
|| fit
->contentDetlaToAddendOnly
)
1367 case ld::Fixup::kindSubtractTargetAddress
:
1368 delta
= addressOf(state
, fit
, &fromTarget
);
1369 if ( ! fit
->contentAddendOnly
)
1370 accumulator
-= delta
;
1372 case ld::Fixup::kindAddAddend
:
1373 if ( ! fit
->contentIgnoresAddend
) {
1374 // <rdar://problem/8342028> ARM main executables main contain .long constants pointing
1375 // into themselves such as jump tables. These .long should not have thumb bit set
1376 // even though the target is a thumb instruction. We can tell it is an interior pointer
1377 // because we are processing an addend.
1378 if ( thumbTarget
&& (toTarget
== atom
) && ((int32_t)fit
->u
.addend
> 0) ) {
1379 accumulator
&= (-2);
1380 //warning("removing thumb bit from intra-atom pointer in %s %s+0x%0X",
1381 // atom->section().sectionName(), atom->name(), fit->offsetInAtom);
1383 accumulator
+= fit
->u
.addend
;
1386 case ld::Fixup::kindSubtractAddend
:
1387 accumulator
-= fit
->u
.addend
;
1389 case ld::Fixup::kindSetTargetImageOffset
:
1390 accumulator
= addressOf(state
, fit
, &toTarget
) - mhAddress
;
1391 thumbTarget
= targetIsThumb(state
, fit
);
1395 case ld::Fixup::kindSetTargetSectionOffset
:
1396 accumulator
= sectionOffsetOf(state
, fit
);
1398 case ld::Fixup::kindSetTargetTLVTemplateOffset
:
1399 accumulator
= tlvTemplateOffsetOf(state
, fit
);
1401 case ld::Fixup::kindStore8
:
1402 *fixUpLocation
+= accumulator
;
1404 case ld::Fixup::kindStoreLittleEndian16
:
1405 set16LE(fixUpLocation
, accumulator
);
1407 case ld::Fixup::kindStoreLittleEndianLow24of32
:
1408 set32LE(fixUpLocation
, (get32LE(fixUpLocation
) & 0xFF000000) | (accumulator
& 0x00FFFFFF) );
1410 case ld::Fixup::kindStoreLittleEndian32
:
1411 rangeCheckAbsolute32(accumulator
, state
, atom
, fit
);
1412 set32LE(fixUpLocation
, accumulator
);
1414 case ld::Fixup::kindStoreLittleEndian64
:
1415 set64LE(fixUpLocation
, accumulator
);
1417 case ld::Fixup::kindStoreBigEndian16
:
1418 set16BE(fixUpLocation
, accumulator
);
1420 case ld::Fixup::kindStoreBigEndianLow24of32
:
1421 set32BE(fixUpLocation
, (get32BE(fixUpLocation
) & 0xFF000000) | (accumulator
& 0x00FFFFFF) );
1423 case ld::Fixup::kindStoreBigEndian32
:
1424 rangeCheckAbsolute32(accumulator
, state
, atom
, fit
);
1425 set32BE(fixUpLocation
, accumulator
);
1427 case ld::Fixup::kindStoreBigEndian64
:
1428 set64BE(fixUpLocation
, accumulator
);
1430 case ld::Fixup::kindStoreX86PCRel8
:
1431 case ld::Fixup::kindStoreX86BranchPCRel8
:
1432 if ( fit
->contentAddendOnly
)
1433 delta
= accumulator
;
1435 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 1);
1436 rangeCheck8(delta
, state
, atom
, fit
);
1437 *fixUpLocation
= delta
;
1439 case ld::Fixup::kindStoreX86PCRel16
:
1440 if ( fit
->contentAddendOnly
)
1441 delta
= accumulator
;
1443 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 2);
1444 rangeCheck16(delta
, state
, atom
, fit
);
1445 set16LE(fixUpLocation
, delta
);
1447 case ld::Fixup::kindStoreX86BranchPCRel32
:
1448 if ( fit
->contentAddendOnly
)
1449 delta
= accumulator
;
1451 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1452 rangeCheckBranch32(delta
, state
, atom
, fit
);
1453 set32LE(fixUpLocation
, delta
);
1455 case ld::Fixup::kindStoreX86PCRel32GOTLoad
:
1456 case ld::Fixup::kindStoreX86PCRel32GOT
:
1457 case ld::Fixup::kindStoreX86PCRel32
:
1458 case ld::Fixup::kindStoreX86PCRel32TLVLoad
:
1459 if ( fit
->contentAddendOnly
)
1460 delta
= accumulator
;
1462 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1463 rangeCheckRIP32(delta
, state
, atom
, fit
);
1464 set32LE(fixUpLocation
, delta
);
1466 case ld::Fixup::kindStoreX86PCRel32_1
:
1467 if ( fit
->contentAddendOnly
)
1468 delta
= accumulator
- 1;
1470 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 5);
1471 rangeCheckRIP32(delta
, state
, atom
, fit
);
1472 set32LE(fixUpLocation
, delta
);
1474 case ld::Fixup::kindStoreX86PCRel32_2
:
1475 if ( fit
->contentAddendOnly
)
1476 delta
= accumulator
- 2;
1478 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 6);
1479 rangeCheckRIP32(delta
, state
, atom
, fit
);
1480 set32LE(fixUpLocation
, delta
);
1482 case ld::Fixup::kindStoreX86PCRel32_4
:
1483 if ( fit
->contentAddendOnly
)
1484 delta
= accumulator
- 4;
1486 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 8);
1487 rangeCheckRIP32(delta
, state
, atom
, fit
);
1488 set32LE(fixUpLocation
, delta
);
1490 case ld::Fixup::kindStoreX86Abs32TLVLoad
:
1491 set32LE(fixUpLocation
, accumulator
);
1493 case ld::Fixup::kindStoreX86Abs32TLVLoadNowLEA
:
1494 assert(_options
.outputKind() != Options::kObjectFile
);
1495 // TLV entry was optimized away, change movl instruction to a leal
1496 if ( fixUpLocation
[-1] != 0xA1 )
1497 throw "TLV load reloc does not point to a movl instruction";
1498 fixUpLocation
[-1] = 0xB8;
1499 set32LE(fixUpLocation
, accumulator
);
1501 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA
:
1502 assert(_options
.outputKind() != Options::kObjectFile
);
1503 // GOT entry was optimized away, change movq instruction to a leaq
1504 if ( fixUpLocation
[-2] != 0x8B )
1505 throw "GOT load reloc does not point to a movq instruction";
1506 fixUpLocation
[-2] = 0x8D;
1507 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1508 rangeCheckRIP32(delta
, state
, atom
, fit
);
1509 set32LE(fixUpLocation
, delta
);
1511 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA
:
1512 assert(_options
.outputKind() != Options::kObjectFile
);
1513 // TLV entry was optimized away, change movq instruction to a leaq
1514 if ( fixUpLocation
[-2] != 0x8B )
1515 throw "TLV load reloc does not point to a movq instruction";
1516 fixUpLocation
[-2] = 0x8D;
1517 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1518 rangeCheckRIP32(delta
, state
, atom
, fit
);
1519 set32LE(fixUpLocation
, delta
);
1521 case ld::Fixup::kindStoreTargetAddressARMLoad12
:
1522 accumulator
= addressOf(state
, fit
, &toTarget
);
1523 // fall into kindStoreARMLoad12 case
1524 case ld::Fixup::kindStoreARMLoad12
:
1525 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 8);
1526 rangeCheckARM12(delta
, state
, atom
, fit
);
1527 instruction
= get32LE(fixUpLocation
);
1529 newInstruction
= instruction
& 0xFFFFF000;
1530 newInstruction
|= ((uint32_t)delta
& 0xFFF);
1533 newInstruction
= instruction
& 0xFF7FF000;
1534 newInstruction
|= ((uint32_t)(-delta
) & 0xFFF);
1536 set32LE(fixUpLocation
, newInstruction
);
1538 case ld::Fixup::kindDtraceExtra
:
1540 case ld::Fixup::kindStoreX86DtraceCallSiteNop
:
1541 if ( _options
.outputKind() != Options::kObjectFile
) {
1542 // change call site to a NOP
1543 fixUpLocation
[-1] = 0x90; // 1-byte nop
1544 fixUpLocation
[0] = 0x0F; // 4-byte nop
1545 fixUpLocation
[1] = 0x1F;
1546 fixUpLocation
[2] = 0x40;
1547 fixUpLocation
[3] = 0x00;
1550 case ld::Fixup::kindStoreX86DtraceIsEnableSiteClear
:
1551 if ( _options
.outputKind() != Options::kObjectFile
) {
1552 // change call site to a clear eax
1553 fixUpLocation
[-1] = 0x33; // xorl eax,eax
1554 fixUpLocation
[0] = 0xC0;
1555 fixUpLocation
[1] = 0x90; // 1-byte nop
1556 fixUpLocation
[2] = 0x90; // 1-byte nop
1557 fixUpLocation
[3] = 0x90; // 1-byte nop
1560 case ld::Fixup::kindStoreARMDtraceCallSiteNop
:
1561 if ( _options
.outputKind() != Options::kObjectFile
) {
1562 // change call site to a NOP
1563 set32LE(fixUpLocation
, 0xE1A00000);
1566 case ld::Fixup::kindStoreARMDtraceIsEnableSiteClear
:
1567 if ( _options
.outputKind() != Options::kObjectFile
) {
1568 // change call site to 'eor r0, r0, r0'
1569 set32LE(fixUpLocation
, 0xE0200000);
1572 case ld::Fixup::kindStoreThumbDtraceCallSiteNop
:
1573 if ( _options
.outputKind() != Options::kObjectFile
) {
1574 // change 32-bit blx call site to two thumb NOPs
1575 set32LE(fixUpLocation
, 0x46C046C0);
1578 case ld::Fixup::kindStoreThumbDtraceIsEnableSiteClear
:
1579 if ( _options
.outputKind() != Options::kObjectFile
) {
1580 // change 32-bit blx call site to 'nop', 'eor r0, r0'
1581 set32LE(fixUpLocation
, 0x46C04040);
1584 case ld::Fixup::kindStoreARM64DtraceCallSiteNop
:
1585 if ( _options
.outputKind() != Options::kObjectFile
) {
1586 // change call site to a NOP
1587 set32LE(fixUpLocation
, 0xD503201F);
1590 case ld::Fixup::kindStoreARM64DtraceIsEnableSiteClear
:
1591 if ( _options
.outputKind() != Options::kObjectFile
) {
1592 // change call site to 'MOVZ X0,0'
1593 set32LE(fixUpLocation
, 0xD2800000);
1596 case ld::Fixup::kindLazyTarget
:
1597 case ld::Fixup::kindIslandTarget
:
1599 case ld::Fixup::kindSetLazyOffset
:
1600 assert(fit
->binding
== ld::Fixup::bindingDirectlyBound
);
1601 accumulator
= this->lazyBindingInfoOffsetForLazyPointerAddress(fit
->u
.target
->finalAddress());
1603 case ld::Fixup::kindDataInCodeStartData
:
1604 case ld::Fixup::kindDataInCodeStartJT8
:
1605 case ld::Fixup::kindDataInCodeStartJT16
:
1606 case ld::Fixup::kindDataInCodeStartJT32
:
1607 case ld::Fixup::kindDataInCodeStartJTA32
:
1608 case ld::Fixup::kindDataInCodeEnd
:
1610 case ld::Fixup::kindLinkerOptimizationHint
:
1611 // expand table of address/offsets used by hints
1612 lohExtra
.addend
= fit
->u
.addend
;
1613 usedByHints
[fit
->offsetInAtom
+ (lohExtra
.info
.delta1
<< 2)] = NULL
;
1614 if ( lohExtra
.info
.count
> 0 )
1615 usedByHints
[fit
->offsetInAtom
+ (lohExtra
.info
.delta2
<< 2)] = NULL
;
1616 if ( lohExtra
.info
.count
> 1 )
1617 usedByHints
[fit
->offsetInAtom
+ (lohExtra
.info
.delta3
<< 2)] = NULL
;
1618 if ( lohExtra
.info
.count
> 2 )
1619 usedByHints
[fit
->offsetInAtom
+ (lohExtra
.info
.delta4
<< 2)] = NULL
;
1621 case ld::Fixup::kindStoreTargetAddressLittleEndian32
:
1622 accumulator
= addressOf(state
, fit
, &toTarget
);
1623 thumbTarget
= targetIsThumb(state
, fit
);
1626 if ( fit
->contentAddendOnly
)
1628 rangeCheckAbsolute32(accumulator
, state
, atom
, fit
);
1629 set32LE(fixUpLocation
, accumulator
);
1631 case ld::Fixup::kindStoreTargetAddressLittleEndian64
:
1632 accumulator
= addressOf(state
, fit
, &toTarget
);
1633 if ( fit
->contentAddendOnly
)
1635 set64LE(fixUpLocation
, accumulator
);
1637 case ld::Fixup::kindStoreTargetAddressBigEndian32
:
1638 accumulator
= addressOf(state
, fit
, &toTarget
);
1639 if ( fit
->contentAddendOnly
)
1641 set32BE(fixUpLocation
, accumulator
);
1643 case ld::Fixup::kindStoreTargetAddressBigEndian64
:
1644 accumulator
= addressOf(state
, fit
, &toTarget
);
1645 if ( fit
->contentAddendOnly
)
1647 set64BE(fixUpLocation
, accumulator
);
1649 case ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian32
:
1650 accumulator
= tlvTemplateOffsetOf(state
, fit
);
1651 set32LE(fixUpLocation
, accumulator
);
1653 case ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian64
:
1654 accumulator
= tlvTemplateOffsetOf(state
, fit
);
1655 set64LE(fixUpLocation
, accumulator
);
1657 case ld::Fixup::kindStoreTargetAddressX86PCRel32
:
1658 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32
:
1659 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad
:
1660 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad
:
1661 accumulator
= addressOf(state
, fit
, &toTarget
);
1662 if ( fit
->contentDetlaToAddendOnly
)
1664 if ( fit
->contentAddendOnly
)
1667 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1668 rangeCheckRIP32(delta
, state
, atom
, fit
);
1669 set32LE(fixUpLocation
, delta
);
1671 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad
:
1672 set32LE(fixUpLocation
, accumulator
);
1674 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoadNowLEA
:
1675 // TLV entry was optimized away, change movl instruction to a leal
1676 if ( fixUpLocation
[-1] != 0xA1 )
1677 throw "TLV load reloc does not point to a movl <abs-address>,<reg> instruction";
1678 fixUpLocation
[-1] = 0xB8;
1679 accumulator
= addressOf(state
, fit
, &toTarget
);
1680 set32LE(fixUpLocation
, accumulator
);
1682 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA
:
1683 // GOT entry was optimized away, change movq instruction to a leaq
1684 if ( fixUpLocation
[-2] != 0x8B )
1685 throw "GOT load reloc does not point to a movq instruction";
1686 fixUpLocation
[-2] = 0x8D;
1687 accumulator
= addressOf(state
, fit
, &toTarget
);
1688 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1689 rangeCheckRIP32(delta
, state
, atom
, fit
);
1690 set32LE(fixUpLocation
, delta
);
1692 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA
:
1693 // TLV entry was optimized away, change movq instruction to a leaq
1694 if ( fixUpLocation
[-2] != 0x8B )
1695 throw "TLV load reloc does not point to a movq instruction";
1696 fixUpLocation
[-2] = 0x8D;
1697 accumulator
= addressOf(state
, fit
, &toTarget
);
1698 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1699 rangeCheckRIP32(delta
, state
, atom
, fit
);
1700 set32LE(fixUpLocation
, delta
);
1702 case ld::Fixup::kindStoreTargetAddressARMBranch24
:
1703 accumulator
= addressOf(state
, fit
, &toTarget
);
1704 thumbTarget
= targetIsThumb(state
, fit
);
1705 if ( toTarget
->contentType() == ld::Atom::typeBranchIsland
) {
1706 // Branching to island. If ultimate target is in range, branch there directly.
1707 for (ld::Fixup::iterator islandfit
= toTarget
->fixupsBegin(), end
=toTarget
->fixupsEnd(); islandfit
!= end
; ++islandfit
) {
1708 if ( islandfit
->kind
== ld::Fixup::kindIslandTarget
) {
1709 const ld::Atom
* islandTarget
= NULL
;
1710 uint64_t islandTargetAddress
= addressOf(state
, islandfit
, &islandTarget
);
1711 delta
= islandTargetAddress
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1712 if ( checkArmBranch24Displacement(delta
) ) {
1713 toTarget
= islandTarget
;
1714 accumulator
= islandTargetAddress
;
1715 thumbTarget
= targetIsThumb(state
, islandfit
);
1723 if ( fit
->contentDetlaToAddendOnly
)
1725 // fall into kindStoreARMBranch24 case
1726 case ld::Fixup::kindStoreARMBranch24
:
1727 // The pc added will be +8 from the pc
1728 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 8);
1729 rangeCheckARMBranch24(delta
, state
, atom
, fit
);
1730 instruction
= get32LE(fixUpLocation
);
1731 // Make sure we are calling arm with bl, thumb with blx
1732 is_bl
= ((instruction
& 0xFF000000) == 0xEB000000);
1733 is_blx
= ((instruction
& 0xFE000000) == 0xFA000000);
1734 is_b
= !is_blx
&& ((instruction
& 0x0F000000) == 0x0A000000);
1735 if ( (is_bl
| is_blx
) && thumbTarget
) {
1736 uint32_t opcode
= 0xFA000000; // force to be blx
1737 uint32_t disp
= (uint32_t)(delta
>> 2) & 0x00FFFFFF;
1738 uint32_t h_bit
= (uint32_t)(delta
<< 23) & 0x01000000;
1739 newInstruction
= opcode
| h_bit
| disp
;
1741 else if ( (is_bl
| is_blx
) && !thumbTarget
) {
1742 uint32_t opcode
= 0xEB000000; // force to be bl
1743 uint32_t disp
= (uint32_t)(delta
>> 2) & 0x00FFFFFF;
1744 newInstruction
= opcode
| disp
;
1746 else if ( is_b
&& thumbTarget
) {
1747 if ( fit
->contentDetlaToAddendOnly
)
1748 newInstruction
= (instruction
& 0xFF000000) | ((uint32_t)(delta
>> 2) & 0x00FFFFFF);
1750 throwf("no pc-rel bx arm instruction. Can't fix up branch to %s in %s",
1751 referenceTargetAtomName(state
, fit
), atom
->name());
1753 else if ( !is_bl
&& !is_blx
&& thumbTarget
) {
1754 throwf("don't know how to convert instruction %x referencing %s to thumb",
1755 instruction
, referenceTargetAtomName(state
, fit
));
1758 newInstruction
= (instruction
& 0xFF000000) | ((uint32_t)(delta
>> 2) & 0x00FFFFFF);
1760 set32LE(fixUpLocation
, newInstruction
);
1762 case ld::Fixup::kindStoreTargetAddressThumbBranch22
:
1763 accumulator
= addressOf(state
, fit
, &toTarget
);
1764 thumbTarget
= targetIsThumb(state
, fit
);
1765 if ( toTarget
->contentType() == ld::Atom::typeBranchIsland
) {
1766 // branching to island, so see if ultimate target is in range
1767 // and if so branch to ultimate target instead.
1768 for (ld::Fixup::iterator islandfit
= toTarget
->fixupsBegin(), end
=toTarget
->fixupsEnd(); islandfit
!= end
; ++islandfit
) {
1769 if ( islandfit
->kind
== ld::Fixup::kindIslandTarget
) {
1770 const ld::Atom
* islandTarget
= NULL
;
1771 uint64_t islandTargetAddress
= addressOf(state
, islandfit
, &islandTarget
);
1772 if ( !fit
->contentDetlaToAddendOnly
) {
1773 if ( targetIsThumb(state
, islandfit
) ) {
1774 // Thumb to thumb branch, we will be generating a bl instruction.
1775 // Delta is always even, so mask out thumb bit in target.
1776 islandTargetAddress
&= -2ULL;
1779 // Target is not thumb, we will be generating a blx instruction
1780 // Since blx cannot have the low bit set, set bit[1] of the target to
1781 // bit[1] of the base address, so that the difference is a multiple of
1783 islandTargetAddress
&= -3ULL;
1784 islandTargetAddress
|= ((atom
->finalAddress() + fit
->offsetInAtom
) & 2LL);
1787 delta
= islandTargetAddress
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1788 if ( checkThumbBranch22Displacement(delta
) ) {
1789 toTarget
= islandTarget
;
1790 accumulator
= islandTargetAddress
;
1791 thumbTarget
= targetIsThumb(state
, islandfit
);
1799 if ( fit
->contentDetlaToAddendOnly
)
1801 // fall into kindStoreThumbBranch22 case
1802 case ld::Fixup::kindStoreThumbBranch22
:
1803 instruction
= get32LE(fixUpLocation
);
1804 is_bl
= ((instruction
& 0xD000F800) == 0xD000F000);
1805 is_blx
= ((instruction
& 0xD000F800) == 0xC000F000);
1806 is_b
= ((instruction
& 0xD000F800) == 0x9000F000);
1807 if ( !fit
->contentDetlaToAddendOnly
) {
1808 if ( thumbTarget
) {
1809 // Thumb to thumb branch, we will be generating a bl instruction.
1810 // Delta is always even, so mask out thumb bit in target.
1811 accumulator
&= -2ULL;
1814 // Target is not thumb, we will be generating a blx instruction
1815 // Since blx cannot have the low bit set, set bit[1] of the target to
1816 // bit[1] of the base address, so that the difference is a multiple of
1818 accumulator
&= -3ULL;
1819 accumulator
|= ((atom
->finalAddress() + fit
->offsetInAtom
) & 2LL);
1822 // The pc added will be +4 from the pc
1823 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
+ 4);
1824 // <rdar://problem/16652542> support bl in very large .o files
1825 if ( fit
->contentDetlaToAddendOnly
) {
1826 while ( delta
< (-16777216LL) )
1829 rangeCheckThumbBranch22(delta
, state
, atom
, fit
);
1830 if ( _options
.preferSubArchitecture() && _options
.archSupportsThumb2() ) {
1831 // The instruction is really two instructions:
1832 // The lower 16 bits are the first instruction, which contains the high
1833 // 11 bits of the displacement.
1834 // The upper 16 bits are the second instruction, which contains the low
1835 // 11 bits of the displacement, as well as differentiating bl and blx.
1836 uint32_t s
= (uint32_t)(delta
>> 24) & 0x1;
1837 uint32_t i1
= (uint32_t)(delta
>> 23) & 0x1;
1838 uint32_t i2
= (uint32_t)(delta
>> 22) & 0x1;
1839 uint32_t imm10
= (uint32_t)(delta
>> 12) & 0x3FF;
1840 uint32_t imm11
= (uint32_t)(delta
>> 1) & 0x7FF;
1841 uint32_t j1
= (i1
== s
);
1842 uint32_t j2
= (i2
== s
);
1845 instruction
= 0xD000F000; // keep bl
1847 instruction
= 0xC000F000; // change to blx
1849 else if ( is_blx
) {
1851 instruction
= 0xD000F000; // change to bl
1853 instruction
= 0xC000F000; // keep blx
1856 instruction
= 0x9000F000; // keep b
1857 if ( !thumbTarget
&& !fit
->contentDetlaToAddendOnly
) {
1858 throwf("armv7 has no pc-rel bx thumb instruction. Can't fix up branch to %s in %s",
1859 referenceTargetAtomName(state
, fit
), atom
->name());
1864 throwf("don't know how to convert branch instruction %x referencing %s to bx",
1865 instruction
, referenceTargetAtomName(state
, fit
));
1866 instruction
= 0x9000F000; // keep b
1868 uint32_t nextDisp
= (j1
<< 13) | (j2
<< 11) | imm11
;
1869 uint32_t firstDisp
= (s
<< 10) | imm10
;
1870 newInstruction
= instruction
| (nextDisp
<< 16) | firstDisp
;
1871 //warning("s=%d, j1=%d, j2=%d, imm10=0x%0X, imm11=0x%0X, instruction=0x%08X, first=0x%04X, next=0x%04X, new=0x%08X, disp=0x%llX for %s to %s\n",
1872 // s, j1, j2, imm10, imm11, instruction, firstDisp, nextDisp, newInstruction, delta, atom->name(), toTarget->name());
1873 set32LE(fixUpLocation
, newInstruction
);
1876 // The instruction is really two instructions:
1877 // The lower 16 bits are the first instruction, which contains the high
1878 // 11 bits of the displacement.
1879 // The upper 16 bits are the second instruction, which contains the low
1880 // 11 bits of the displacement, as well as differentiating bl and blx.
1881 uint32_t firstDisp
= (uint32_t)(delta
>> 12) & 0x7FF;
1882 uint32_t nextDisp
= (uint32_t)(delta
>> 1) & 0x7FF;
1883 if ( is_bl
&& !thumbTarget
) {
1884 instruction
= 0xE800F000;
1886 else if ( is_blx
&& thumbTarget
) {
1887 instruction
= 0xF800F000;
1890 instruction
= 0x9000F000; // keep b
1891 if ( !thumbTarget
&& !fit
->contentDetlaToAddendOnly
) {
1892 throwf("armv6 has no pc-rel bx thumb instruction. Can't fix up branch to %s in %s",
1893 referenceTargetAtomName(state
, fit
), atom
->name());
1897 instruction
= instruction
& 0xF800F800;
1899 newInstruction
= instruction
| (nextDisp
<< 16) | firstDisp
;
1900 set32LE(fixUpLocation
, newInstruction
);
1903 case ld::Fixup::kindStoreARMLow16
:
1905 uint32_t imm4
= (accumulator
& 0x0000F000) >> 12;
1906 uint32_t imm12
= accumulator
& 0x00000FFF;
1907 instruction
= get32LE(fixUpLocation
);
1908 newInstruction
= (instruction
& 0xFFF0F000) | (imm4
<< 16) | imm12
;
1909 set32LE(fixUpLocation
, newInstruction
);
1912 case ld::Fixup::kindStoreARMHigh16
:
1914 uint32_t imm4
= (accumulator
& 0xF0000000) >> 28;
1915 uint32_t imm12
= (accumulator
& 0x0FFF0000) >> 16;
1916 instruction
= get32LE(fixUpLocation
);
1917 newInstruction
= (instruction
& 0xFFF0F000) | (imm4
<< 16) | imm12
;
1918 set32LE(fixUpLocation
, newInstruction
);
1921 case ld::Fixup::kindStoreThumbLow16
:
1923 uint32_t imm4
= (accumulator
& 0x0000F000) >> 12;
1924 uint32_t i
= (accumulator
& 0x00000800) >> 11;
1925 uint32_t imm3
= (accumulator
& 0x00000700) >> 8;
1926 uint32_t imm8
= accumulator
& 0x000000FF;
1927 instruction
= get32LE(fixUpLocation
);
1928 newInstruction
= (instruction
& 0x8F00FBF0) | imm4
| (i
<< 10) | (imm3
<< 28) | (imm8
<< 16);
1929 set32LE(fixUpLocation
, newInstruction
);
1932 case ld::Fixup::kindStoreThumbHigh16
:
1934 uint32_t imm4
= (accumulator
& 0xF0000000) >> 28;
1935 uint32_t i
= (accumulator
& 0x08000000) >> 27;
1936 uint32_t imm3
= (accumulator
& 0x07000000) >> 24;
1937 uint32_t imm8
= (accumulator
& 0x00FF0000) >> 16;
1938 instruction
= get32LE(fixUpLocation
);
1939 newInstruction
= (instruction
& 0x8F00FBF0) | imm4
| (i
<< 10) | (imm3
<< 28) | (imm8
<< 16);
1940 set32LE(fixUpLocation
, newInstruction
);
1943 #if SUPPORT_ARCH_arm64
1944 case ld::Fixup::kindStoreTargetAddressARM64Branch26
:
1945 accumulator
= addressOf(state
, fit
, &toTarget
);
1946 // fall into kindStoreARM64Branch26 case
1947 case ld::Fixup::kindStoreARM64Branch26
:
1948 if ( fit
->contentAddendOnly
)
1949 delta
= accumulator
;
1951 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
);
1952 rangeCheckARM64Branch26(delta
, state
, atom
, fit
);
1953 instruction
= get32LE(fixUpLocation
);
1954 newInstruction
= (instruction
& 0xFC000000) | ((uint32_t)(delta
>> 2) & 0x03FFFFFF);
1955 set32LE(fixUpLocation
, newInstruction
);
1957 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21
:
1958 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21
:
1959 case ld::Fixup::kindStoreTargetAddressARM64Page21
:
1960 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21
:
1961 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21
:
1962 accumulator
= addressOf(state
, fit
, &toTarget
);
1963 // fall into kindStoreARM64Branch26 case
1964 case ld::Fixup::kindStoreARM64GOTLeaPage21
:
1965 case ld::Fixup::kindStoreARM64GOTLoadPage21
:
1966 case ld::Fixup::kindStoreARM64TLVPLoadPage21
:
1967 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21
:
1968 case ld::Fixup::kindStoreARM64Page21
:
1970 // the ADRP instruction adds the imm << 12 to the page that the pc is on
1971 if ( fit
->contentAddendOnly
)
1974 delta
= (accumulator
& (-4096)) - ((atom
->finalAddress() + fit
->offsetInAtom
) & (-4096));
1975 rangeCheckARM64Page21(delta
, state
, atom
, fit
);
1976 instruction
= get32LE(fixUpLocation
);
1977 uint32_t immhi
= (delta
>> 9) & (0x00FFFFE0);
1978 uint32_t immlo
= (delta
<< 17) & (0x60000000);
1979 newInstruction
= (instruction
& 0x9F00001F) | immlo
| immhi
;
1980 set32LE(fixUpLocation
, newInstruction
);
1983 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12
:
1984 case ld::Fixup::kindStoreTargetAddressARM64PageOff12
:
1985 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12
:
1986 accumulator
= addressOf(state
, fit
, &toTarget
);
1987 // fall into kindAddressARM64PageOff12 case
1988 case ld::Fixup::kindStoreARM64TLVPLoadPageOff12
:
1989 case ld::Fixup::kindStoreARM64GOTLoadPageOff12
:
1990 case ld::Fixup::kindStoreARM64PageOff12
:
1992 uint32_t offset
= accumulator
& 0x00000FFF;
1993 instruction
= get32LE(fixUpLocation
);
1994 // LDR/STR instruction have implicit scale factor, need to compensate for that
1995 if ( instruction
& 0x08000000 ) {
1996 uint32_t implictShift
= ((instruction
>> 30) & 0x3);
1997 switch ( implictShift
) {
1999 if ( (instruction
& 0x04800000) == 0x04800000 ) {
2000 // vector and byte LDR/STR have same "size" bits, need to check other bits to differenciate
2002 if ( (offset
& 0xF) != 0 ) {
2003 throwf("128-bit LDR/STR not 16-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
2004 atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fit
),
2005 addressOf(state
, fit
, &toTarget
));
2010 if ( (offset
& 0x1) != 0 ) {
2011 throwf("16-bit LDR/STR not 2-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
2012 atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fit
),
2013 addressOf(state
, fit
, &toTarget
));
2017 if ( (offset
& 0x3) != 0 ) {
2018 throwf("32-bit LDR/STR not 4-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
2019 atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fit
),
2020 addressOf(state
, fit
, &toTarget
));
2024 if ( (offset
& 0x7) != 0 ) {
2025 throwf("64-bit LDR/STR not 8-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
2026 atom
->name(), atom
->finalAddress(), referenceTargetAtomName(state
, fit
),
2027 addressOf(state
, fit
, &toTarget
));
2031 // compensate for implicit scale
2032 offset
>>= implictShift
;
2034 if ( fit
->contentAddendOnly
)
2036 uint32_t imm12
= offset
<< 10;
2037 newInstruction
= (instruction
& 0xFFC003FF) | imm12
;
2038 set32LE(fixUpLocation
, newInstruction
);
2041 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12
:
2042 accumulator
= addressOf(state
, fit
, &toTarget
);
2043 // fall into kindStoreARM64GOTLoadPage21 case
2044 case ld::Fixup::kindStoreARM64GOTLeaPageOff12
:
2046 // GOT entry was optimized away, change LDR instruction to a ADD
2047 instruction
= get32LE(fixUpLocation
);
2048 if ( (instruction
& 0xBFC00000) != 0xB9400000 )
2049 throwf("GOT load reloc does not point to a LDR instruction in %s", atom
->name());
2050 uint32_t offset
= accumulator
& 0x00000FFF;
2051 uint32_t imm12
= offset
<< 10;
2052 newInstruction
= 0x91000000 | imm12
| (instruction
& 0x000003FF);
2053 set32LE(fixUpLocation
, newInstruction
);
2056 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12
:
2057 accumulator
= addressOf(state
, fit
, &toTarget
);
2058 // fall into kindStoreARM64TLVPLeaPageOff12 case
2059 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPageOff12
:
2061 // TLV thunk in same linkage unit, so LEA it directly, changing LDR instruction to a ADD
2062 instruction
= get32LE(fixUpLocation
);
2063 if ( (instruction
& 0xBFC00000) != 0xB9400000 )
2064 throwf("TLV load reloc does not point to a LDR instruction in %s", atom
->name());
2065 uint32_t offset
= accumulator
& 0x00000FFF;
2066 uint32_t imm12
= offset
<< 10;
2067 newInstruction
= 0x91000000 | imm12
| (instruction
& 0x000003FF);
2068 set32LE(fixUpLocation
, newInstruction
);
2071 case ld::Fixup::kindStoreARM64PointerToGOT
:
2072 set64LE(fixUpLocation
, accumulator
);
2074 case ld::Fixup::kindStoreARM64PCRelToGOT
:
2075 if ( fit
->contentAddendOnly
)
2076 delta
= accumulator
;
2078 delta
= accumulator
- (atom
->finalAddress() + fit
->offsetInAtom
);
2079 set32LE(fixUpLocation
, delta
);
2085 #if SUPPORT_ARCH_arm64
2086 // after all fixups are done on atom, if there are potential optimizations, do those
2087 if ( (usedByHints
.size() != 0) && (_options
.outputKind() != Options::kObjectFile
) && !_options
.ignoreOptimizationHints() ) {
2088 // fill in second part of usedByHints map, so we can see the target of fixups that might be optimized
2089 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
2090 switch ( fit
->kind
) {
2091 case ld::Fixup::kindLinkerOptimizationHint
:
2092 case ld::Fixup::kindNoneFollowOn
:
2093 case ld::Fixup::kindNoneGroupSubordinate
:
2094 case ld::Fixup::kindNoneGroupSubordinateFDE
:
2095 case ld::Fixup::kindNoneGroupSubordinateLSDA
:
2096 case ld::Fixup::kindNoneGroupSubordinatePersonality
:
2099 if ( fit
->firstInCluster() ) {
2100 std::map
<uint32_t, const Fixup
*>::iterator pos
= usedByHints
.find(fit
->offsetInAtom
);
2101 if ( pos
!= usedByHints
.end() ) {
2102 assert(pos
->second
== NULL
&& "two fixups in same hint location");
2104 //fprintf(stderr, "setting %s usedByHints[0x%04X], kind = %d\n", atom->name(), fit->offsetInAtom, fit->kind);
2110 // apply hints pass 1
2111 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
2112 if ( fit
->kind
!= ld::Fixup::kindLinkerOptimizationHint
)
2114 InstructionInfo infoA
;
2115 InstructionInfo infoB
;
2116 InstructionInfo infoC
;
2117 InstructionInfo infoD
;
2118 LoadStoreInfo ldrInfoB
, ldrInfoC
;
2122 bool targetFourByteAligned
;
2123 bool literalableSize
, isADRP
, isADD
, isLDR
, isSTR
;
2124 //uint8_t loadSize, destReg;
2125 //uint32_t scaledOffset;
2127 ld::Fixup::LOH_arm64 alt
;
2128 alt
.addend
= fit
->u
.addend
;
2129 setInfo(state
, atom
, buffer
, usedByHints
, fit
->offsetInAtom
, (alt
.info
.delta1
<< 2), &infoA
);
2130 if ( alt
.info
.count
> 0 )
2131 setInfo(state
, atom
, buffer
, usedByHints
, fit
->offsetInAtom
, (alt
.info
.delta2
<< 2), &infoB
);
2132 if ( alt
.info
.count
> 1 )
2133 setInfo(state
, atom
, buffer
, usedByHints
, fit
->offsetInAtom
, (alt
.info
.delta3
<< 2), &infoC
);
2134 if ( alt
.info
.count
> 2 )
2135 setInfo(state
, atom
, buffer
, usedByHints
, fit
->offsetInAtom
, (alt
.info
.delta4
<< 2), &infoD
);
2137 if ( _options
.sharedRegionEligible() ) {
2138 if ( _options
.sharedRegionEncodingV2() ) {
2139 // In v2 format, all references might be move at dyld shared cache creation time
2140 usableSegment
= false;
2143 // In v1 format, only references to something in __TEXT segment could be optimized
2144 usableSegment
= (strcmp(atom
->section().segmentName(), infoB
.target
->section().segmentName()) == 0);
2148 // main executables can optimize any reference
2149 usableSegment
= true;
2152 switch ( alt
.info
.kind
) {
2153 case LOH_ARM64_ADRP_ADRP
:
2154 // processed in pass 2 because some ADRP may have been removed
2156 case LOH_ARM64_ADRP_LDR
:
2157 LOH_ASSERT(alt
.info
.count
== 1);
2158 LOH_ASSERT(isPageKind(infoA
.fixup
));
2159 LOH_ASSERT(isPageOffsetKind(infoB
.fixup
));
2160 LOH_ASSERT(infoA
.target
== infoB
.target
);
2161 LOH_ASSERT(infoA
.targetAddress
== infoB
.targetAddress
);
2162 isADRP
= parseADRP(infoA
.instruction
, adrpInfoA
);
2164 isLDR
= parseLoadOrStore(infoB
.instruction
, ldrInfoB
);
2165 // silently ignore LDRs transformed to ADD by TLV pass
2166 if ( !isLDR
&& infoB
.fixup
->kind
== ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12
)
2169 LOH_ASSERT(ldrInfoB
.baseReg
== adrpInfoA
.destReg
);
2170 LOH_ASSERT(ldrInfoB
.offset
== (infoA
.targetAddress
& 0x00000FFF));
2171 literalableSize
= ( (ldrInfoB
.size
!= 1) && (ldrInfoB
.size
!= 2) );
2172 targetFourByteAligned
= ( (infoA
.targetAddress
& 0x3) == 0 );
2173 if ( literalableSize
&& usableSegment
&& targetFourByteAligned
&& withinOneMeg(infoB
.instructionAddress
, infoA
.targetAddress
) ) {
2174 set32LE(infoA
.instructionContent
, makeNOP());
2175 set32LE(infoB
.instructionContent
, makeLDR_literal(ldrInfoB
, infoA
.targetAddress
, infoB
.instructionAddress
));
2176 if ( _options
.verboseOptimizationHints() )
2177 fprintf(stderr
, "adrp-ldr at 0x%08llX transformed to LDR literal, usableSegment=%d usableSegment\n", infoB
.instructionAddress
, usableSegment
);
2180 if ( _options
.verboseOptimizationHints() )
2181 fprintf(stderr
, "adrp-ldr at 0x%08llX not transformed, isLDR=%d, literalableSize=%d, inRange=%d, usableSegment=%d, scaledOffset=%d\n",
2182 infoB
.instructionAddress
, isLDR
, literalableSize
, withinOneMeg(infoB
.instructionAddress
, infoA
.targetAddress
), usableSegment
, ldrInfoB
.offset
);
2185 case LOH_ARM64_ADRP_ADD_LDR
:
2186 LOH_ASSERT(alt
.info
.count
== 2);
2187 LOH_ASSERT(isPageKind(infoA
.fixup
));
2188 LOH_ASSERT(isPageOffsetKind(infoB
.fixup
));
2189 LOH_ASSERT(infoC
.fixup
== NULL
);
2190 LOH_ASSERT(infoA
.target
== infoB
.target
);
2191 LOH_ASSERT(infoA
.targetAddress
== infoB
.targetAddress
);
2192 isADRP
= parseADRP(infoA
.instruction
, adrpInfoA
);
2194 isADD
= parseADD(infoB
.instruction
, addInfoB
);
2196 LOH_ASSERT(adrpInfoA
.destReg
== addInfoB
.srcReg
);
2197 isLDR
= parseLoadOrStore(infoC
.instruction
, ldrInfoC
);
2199 LOH_ASSERT(addInfoB
.destReg
== ldrInfoC
.baseReg
);
2200 targetFourByteAligned
= ( ((infoB
.targetAddress
+ldrInfoC
.offset
) & 0x3) == 0 );
2201 literalableSize
= ( (ldrInfoC
.size
!= 1) && (ldrInfoC
.size
!= 2) );
2202 if ( literalableSize
&& usableSegment
&& targetFourByteAligned
&& withinOneMeg(infoC
.instructionAddress
, infoA
.targetAddress
+ldrInfoC
.offset
) ) {
2203 // can do T1 transformation to LDR literal
2204 set32LE(infoA
.instructionContent
, makeNOP());
2205 set32LE(infoB
.instructionContent
, makeNOP());
2206 set32LE(infoC
.instructionContent
, makeLDR_literal(ldrInfoC
, infoA
.targetAddress
+ldrInfoC
.offset
, infoC
.instructionAddress
));
2207 if ( _options
.verboseOptimizationHints() ) {
2208 fprintf(stderr
, "adrp-add-ldr at 0x%08llX T1 transformed to LDR literal\n", infoC
.instructionAddress
);
2211 else if ( usableSegment
&& withinOneMeg(infoA
.instructionAddress
, infoA
.targetAddress
+ldrInfoC
.offset
) ) {
2212 // can to T4 transformation and turn ADRP/ADD into ADR
2213 set32LE(infoA
.instructionContent
, makeADR(ldrInfoC
.baseReg
, infoA
.targetAddress
+ldrInfoC
.offset
, infoA
.instructionAddress
));
2214 set32LE(infoB
.instructionContent
, makeNOP());
2215 ldrInfoC
.offset
= 0; // offset is now in ADR instead of ADD or LDR
2216 set32LE(infoC
.instructionContent
, makeLoadOrStore(ldrInfoC
));
2217 set32LE(infoC
.instructionContent
, infoC
.instruction
& 0xFFC003FF);
2218 if ( _options
.verboseOptimizationHints() )
2219 fprintf(stderr
, "adrp-add-ldr at 0x%08llX T4 transformed to ADR/LDR\n", infoB
.instructionAddress
);
2221 else if ( ((infoB
.targetAddress
% ldrInfoC
.size
) == 0) && (ldrInfoC
.offset
== 0) ) {
2222 // can do T2 transformation by merging ADD into LD
2224 set32LE(infoB
.instructionContent
, makeNOP());
2225 ldrInfoC
.offset
+= addInfoB
.addend
;
2226 ldrInfoC
.baseReg
= adrpInfoA
.destReg
;
2227 set32LE(infoC
.instructionContent
, makeLoadOrStore(ldrInfoC
));
2228 if ( _options
.verboseOptimizationHints() )
2229 fprintf(stderr
, "adrp-add-ldr at 0x%08llX T2 transformed to ADRP/LDR \n", infoC
.instructionAddress
);
2232 if ( _options
.verboseOptimizationHints() )
2233 fprintf(stderr
, "adrp-add-ldr at 0x%08llX could not be transformed, loadSize=%d, literalableSize=%d, inRange=%d, usableSegment=%d, targetFourByteAligned=%d, imm12=%d\n",
2234 infoC
.instructionAddress
, ldrInfoC
.size
, literalableSize
, withinOneMeg(infoC
.instructionAddress
, infoA
.targetAddress
+ldrInfoC
.offset
), usableSegment
, targetFourByteAligned
, ldrInfoC
.offset
);
2237 case LOH_ARM64_ADRP_ADD
:
2238 LOH_ASSERT(alt
.info
.count
== 1);
2239 LOH_ASSERT(isPageKind(infoA
.fixup
));
2240 LOH_ASSERT(isPageOffsetKind(infoB
.fixup
));
2241 LOH_ASSERT(infoA
.target
== infoB
.target
);
2242 LOH_ASSERT(infoA
.targetAddress
== infoB
.targetAddress
);
2243 isADRP
= parseADRP(infoA
.instruction
, adrpInfoA
);
2245 isADD
= parseADD(infoB
.instruction
, addInfoB
);
2247 LOH_ASSERT(adrpInfoA
.destReg
== addInfoB
.srcReg
);
2248 if ( usableSegment
&& withinOneMeg(infoA
.targetAddress
, infoA
.instructionAddress
) ) {
2249 // can do T4 transformation and use ADR
2250 set32LE(infoA
.instructionContent
, makeADR(addInfoB
.destReg
, infoA
.targetAddress
, infoA
.instructionAddress
));
2251 set32LE(infoB
.instructionContent
, makeNOP());
2252 if ( _options
.verboseOptimizationHints() )
2253 fprintf(stderr
, "adrp-add at 0x%08llX transformed to ADR\n", infoB
.instructionAddress
);
2256 if ( _options
.verboseOptimizationHints() )
2257 fprintf(stderr
, "adrp-add at 0x%08llX not transformed, isAdd=%d, inRange=%d, usableSegment=%d\n",
2258 infoB
.instructionAddress
, isADD
, withinOneMeg(infoA
.targetAddress
, infoA
.instructionAddress
), usableSegment
);
2261 case LOH_ARM64_ADRP_LDR_GOT_LDR
:
2262 LOH_ASSERT(alt
.info
.count
== 2);
2263 LOH_ASSERT(isPageKind(infoA
.fixup
, true));
2264 LOH_ASSERT(isPageOffsetKind(infoB
.fixup
, true));
2265 LOH_ASSERT(infoC
.fixup
== NULL
);
2266 LOH_ASSERT(infoA
.target
== infoB
.target
);
2267 LOH_ASSERT(infoA
.targetAddress
== infoB
.targetAddress
);
2268 isADRP
= parseADRP(infoA
.instruction
, adrpInfoA
);
2270 isLDR
= parseLoadOrStore(infoC
.instruction
, ldrInfoC
);
2272 isADD
= parseADD(infoB
.instruction
, addInfoB
);
2273 isLDR
= parseLoadOrStore(infoB
.instruction
, ldrInfoB
);
2275 // target of GOT is external
2276 LOH_ASSERT(ldrInfoB
.size
== 8);
2277 LOH_ASSERT(!ldrInfoB
.isFloat
);
2278 LOH_ASSERT(ldrInfoC
.baseReg
== ldrInfoB
.reg
);
2279 //fprintf(stderr, "infoA.target=%p, %s, infoA.targetAddress=0x%08llX\n", infoA.target, infoA.target->name(), infoA.targetAddress);
2280 targetFourByteAligned
= ( ((infoA
.targetAddress
+ ldrInfoC
.offset
) & 0x3) == 0 );
2281 if ( usableSegment
&& targetFourByteAligned
&& withinOneMeg(infoB
.instructionAddress
, infoA
.targetAddress
+ ldrInfoC
.offset
) ) {
2282 // can do T5 transform
2283 set32LE(infoA
.instructionContent
, makeNOP());
2284 set32LE(infoB
.instructionContent
, makeLDR_literal(ldrInfoB
, infoA
.targetAddress
, infoB
.instructionAddress
));
2285 if ( _options
.verboseOptimizationHints() ) {
2286 fprintf(stderr
, "adrp-ldr-got-ldr at 0x%08llX T5 transformed to LDR literal of GOT plus LDR\n", infoC
.instructionAddress
);
2290 if ( _options
.verboseOptimizationHints() )
2291 fprintf(stderr
, "adrp-ldr-got-ldr at 0x%08llX no optimization done\n", infoC
.instructionAddress
);
2295 // target of GOT is in same linkage unit and B instruction was changed to ADD to compute LEA of target
2296 LOH_ASSERT(addInfoB
.srcReg
== adrpInfoA
.destReg
);
2297 LOH_ASSERT(addInfoB
.destReg
== ldrInfoC
.baseReg
);
2298 targetFourByteAligned
= ( ((infoA
.targetAddress
) & 0x3) == 0 );
2299 literalableSize
= ( (ldrInfoC
.size
!= 1) && (ldrInfoC
.size
!= 2) );
2300 if ( usableSegment
&& literalableSize
&& targetFourByteAligned
&& withinOneMeg(infoC
.instructionAddress
, infoA
.targetAddress
+ ldrInfoC
.offset
) ) {
2301 // can do T1 transform
2302 set32LE(infoA
.instructionContent
, makeNOP());
2303 set32LE(infoB
.instructionContent
, makeNOP());
2304 set32LE(infoC
.instructionContent
, makeLDR_literal(ldrInfoC
, infoA
.targetAddress
+ ldrInfoC
.offset
, infoC
.instructionAddress
));
2305 if ( _options
.verboseOptimizationHints() )
2306 fprintf(stderr
, "adrp-ldr-got-ldr at 0x%08llX T1 transformed to LDR literal\n", infoC
.instructionAddress
);
2308 else if ( usableSegment
&& withinOneMeg(infoA
.instructionAddress
, infoA
.targetAddress
) ) {
2309 // can do T4 transform
2310 set32LE(infoA
.instructionContent
, makeADR(ldrInfoC
.baseReg
, infoA
.targetAddress
, infoA
.instructionAddress
));
2311 set32LE(infoB
.instructionContent
, makeNOP());
2312 set32LE(infoC
.instructionContent
, makeLoadOrStore(ldrInfoC
));
2313 if ( _options
.verboseOptimizationHints() ) {
2314 fprintf(stderr
, "adrp-ldr-got-ldr at 0x%08llX T4 transformed to ADR/LDR\n", infoC
.instructionAddress
);
2317 else if ( ((infoA
.targetAddress
% ldrInfoC
.size
) == 0) && ((addInfoB
.addend
+ ldrInfoC
.offset
) < 4096) ) {
2318 // can do T2 transform
2319 set32LE(infoB
.instructionContent
, makeNOP());
2320 ldrInfoC
.baseReg
= adrpInfoA
.destReg
;
2321 ldrInfoC
.offset
+= addInfoB
.addend
;
2322 set32LE(infoC
.instructionContent
, makeLoadOrStore(ldrInfoC
));
2323 if ( _options
.verboseOptimizationHints() ) {
2324 fprintf(stderr
, "adrp-ldr-got-ldr at 0x%08llX T2 transformed to ADRP/NOP/LDR\n", infoC
.instructionAddress
);
2328 // T3 transform already done by ld::passes:got:doPass()
2329 if ( _options
.verboseOptimizationHints() ) {
2330 fprintf(stderr
, "adrp-ldr-got-ldr at 0x%08llX T3 transformed to ADRP/ADD/LDR\n", infoC
.instructionAddress
);
2335 if ( _options
.verboseOptimizationHints() )
2336 fprintf(stderr
, "adrp-ldr-got-ldr at 0x%08llX not ADD or LDR\n", infoC
.instructionAddress
);
2339 case LOH_ARM64_ADRP_ADD_STR
:
2340 LOH_ASSERT(alt
.info
.count
== 2);
2341 LOH_ASSERT(isPageKind(infoA
.fixup
));
2342 LOH_ASSERT(isPageOffsetKind(infoB
.fixup
));
2343 LOH_ASSERT(infoC
.fixup
== NULL
);
2344 LOH_ASSERT(infoA
.target
== infoB
.target
);
2345 LOH_ASSERT(infoA
.targetAddress
== infoB
.targetAddress
);
2346 isADRP
= parseADRP(infoA
.instruction
, adrpInfoA
);
2348 isADD
= parseADD(infoB
.instruction
, addInfoB
);
2350 LOH_ASSERT(adrpInfoA
.destReg
== addInfoB
.srcReg
);
2351 isSTR
= (parseLoadOrStore(infoC
.instruction
, ldrInfoC
) && ldrInfoC
.isStore
);
2353 LOH_ASSERT(addInfoB
.destReg
== ldrInfoC
.baseReg
);
2354 if ( usableSegment
&& withinOneMeg(infoA
.instructionAddress
, infoA
.targetAddress
+ldrInfoC
.offset
) ) {
2355 // can to T4 transformation and turn ADRP/ADD into ADR
2356 set32LE(infoA
.instructionContent
, makeADR(ldrInfoC
.baseReg
, infoA
.targetAddress
+ldrInfoC
.offset
, infoA
.instructionAddress
));
2357 set32LE(infoB
.instructionContent
, makeNOP());
2358 ldrInfoC
.offset
= 0; // offset is now in ADR instead of ADD or LDR
2359 set32LE(infoC
.instructionContent
, makeLoadOrStore(ldrInfoC
));
2360 set32LE(infoC
.instructionContent
, infoC
.instruction
& 0xFFC003FF);
2361 if ( _options
.verboseOptimizationHints() )
2362 fprintf(stderr
, "adrp-add-str at 0x%08llX T4 transformed to ADR/STR\n", infoB
.instructionAddress
);
2364 else if ( ((infoB
.targetAddress
% ldrInfoC
.size
) == 0) && (ldrInfoC
.offset
== 0) ) {
2365 // can do T2 transformation by merging ADD into STR
2367 set32LE(infoB
.instructionContent
, makeNOP());
2368 ldrInfoC
.offset
+= addInfoB
.addend
;
2369 set32LE(infoC
.instructionContent
, makeLoadOrStore(ldrInfoC
));
2370 if ( _options
.verboseOptimizationHints() )
2371 fprintf(stderr
, "adrp-add-str at 0x%08llX T2 transformed to ADRP/STR \n", infoC
.instructionAddress
);
2374 if ( _options
.verboseOptimizationHints() )
2375 fprintf(stderr
, "adrp-add-str at 0x%08llX could not be transformed, loadSize=%d, inRange=%d, usableSegment=%d, imm12=%d\n",
2376 infoC
.instructionAddress
, ldrInfoC
.size
, withinOneMeg(infoC
.instructionAddress
, infoA
.targetAddress
+ldrInfoC
.offset
), usableSegment
, ldrInfoC
.offset
);
2379 case LOH_ARM64_ADRP_LDR_GOT_STR
:
2380 LOH_ASSERT(alt
.info
.count
== 2);
2381 LOH_ASSERT(isPageKind(infoA
.fixup
, true));
2382 LOH_ASSERT(isPageOffsetKind(infoB
.fixup
, true));
2383 LOH_ASSERT(infoC
.fixup
== NULL
);
2384 LOH_ASSERT(infoA
.target
== infoB
.target
);
2385 LOH_ASSERT(infoA
.targetAddress
== infoB
.targetAddress
);
2386 isADRP
= parseADRP(infoA
.instruction
, adrpInfoA
);
2388 isSTR
= (parseLoadOrStore(infoC
.instruction
, ldrInfoC
) && ldrInfoC
.isStore
);
2390 isADD
= parseADD(infoB
.instruction
, addInfoB
);
2391 isLDR
= parseLoadOrStore(infoB
.instruction
, ldrInfoB
);
2393 // target of GOT is external
2394 LOH_ASSERT(ldrInfoB
.size
== 8);
2395 LOH_ASSERT(!ldrInfoB
.isFloat
);
2396 LOH_ASSERT(ldrInfoC
.baseReg
== ldrInfoB
.reg
);
2397 targetFourByteAligned
= ( ((infoA
.targetAddress
+ ldrInfoC
.offset
) & 0x3) == 0 );
2398 if ( usableSegment
&& targetFourByteAligned
&& withinOneMeg(infoB
.instructionAddress
, infoA
.targetAddress
+ ldrInfoC
.offset
) ) {
2399 // can do T5 transform
2400 set32LE(infoA
.instructionContent
, makeNOP());
2401 set32LE(infoB
.instructionContent
, makeLDR_literal(ldrInfoB
, infoA
.targetAddress
, infoB
.instructionAddress
));
2402 if ( _options
.verboseOptimizationHints() ) {
2403 fprintf(stderr
, "adrp-ldr-got-str at 0x%08llX T5 transformed to LDR literal of GOT plus STR\n", infoC
.instructionAddress
);
2407 if ( _options
.verboseOptimizationHints() )
2408 fprintf(stderr
, "adrp-ldr-got-str at 0x%08llX no optimization done\n", infoC
.instructionAddress
);
2412 // target of GOT is in same linkage unit and B instruction was changed to ADD to compute LEA of target
2413 LOH_ASSERT(addInfoB
.srcReg
== adrpInfoA
.destReg
);
2414 LOH_ASSERT(addInfoB
.destReg
== ldrInfoC
.baseReg
);
2415 targetFourByteAligned
= ( ((infoA
.targetAddress
) & 0x3) == 0 );
2416 literalableSize
= ( (ldrInfoC
.size
!= 1) && (ldrInfoC
.size
!= 2) );
2417 if ( usableSegment
&& withinOneMeg(infoA
.instructionAddress
, infoA
.targetAddress
) ) {
2418 // can do T4 transform
2419 set32LE(infoA
.instructionContent
, makeADR(ldrInfoC
.baseReg
, infoA
.targetAddress
, infoA
.instructionAddress
));
2420 set32LE(infoB
.instructionContent
, makeNOP());
2421 set32LE(infoC
.instructionContent
, makeLoadOrStore(ldrInfoC
));
2422 if ( _options
.verboseOptimizationHints() ) {
2423 fprintf(stderr
, "adrp-ldr-got-str at 0x%08llX T4 transformed to ADR/STR\n", infoC
.instructionAddress
);
2426 else if ( ((infoA
.targetAddress
% ldrInfoC
.size
) == 0) && (ldrInfoC
.offset
== 0) ) {
2427 // can do T2 transform
2428 set32LE(infoB
.instructionContent
, makeNOP());
2429 ldrInfoC
.baseReg
= adrpInfoA
.destReg
;
2430 ldrInfoC
.offset
+= addInfoB
.addend
;
2431 set32LE(infoC
.instructionContent
, makeLoadOrStore(ldrInfoC
));
2432 if ( _options
.verboseOptimizationHints() ) {
2433 fprintf(stderr
, "adrp-ldr-got-str at 0x%08llX T4 transformed to ADRP/NOP/STR\n", infoC
.instructionAddress
);
2437 // T3 transform already done by ld::passes:got:doPass()
2438 if ( _options
.verboseOptimizationHints() ) {
2439 fprintf(stderr
, "adrp-ldr-got-str at 0x%08llX T3 transformed to ADRP/ADD/STR\n", infoC
.instructionAddress
);
2444 if ( _options
.verboseOptimizationHints() )
2445 fprintf(stderr
, "adrp-ldr-got-str at 0x%08llX not ADD or LDR\n", infoC
.instructionAddress
);
2448 case LOH_ARM64_ADRP_LDR_GOT
:
2449 LOH_ASSERT(alt
.info
.count
== 1);
2450 LOH_ASSERT(isPageKind(infoA
.fixup
, true));
2451 LOH_ASSERT(isPageOffsetKind(infoB
.fixup
, true));
2452 LOH_ASSERT(infoA
.target
== infoB
.target
);
2453 LOH_ASSERT(infoA
.targetAddress
== infoB
.targetAddress
);
2454 isADRP
= parseADRP(infoA
.instruction
, adrpInfoA
);
2455 isADD
= parseADD(infoB
.instruction
, addInfoB
);
2456 isLDR
= parseLoadOrStore(infoB
.instruction
, ldrInfoB
);
2459 if ( usableSegment
&& withinOneMeg(infoB
.instructionAddress
, infoA
.targetAddress
) ) {
2460 // can do T5 transform (LDR literal load of GOT)
2461 set32LE(infoA
.instructionContent
, makeNOP());
2462 set32LE(infoB
.instructionContent
, makeLDR_literal(ldrInfoB
, infoA
.targetAddress
, infoB
.instructionAddress
));
2463 if ( _options
.verboseOptimizationHints() ) {
2464 fprintf(stderr
, "adrp-ldr-got at 0x%08llX T5 transformed to NOP/LDR\n", infoC
.instructionAddress
);
2469 if ( usableSegment
&& withinOneMeg(infoA
.instructionAddress
, infoA
.targetAddress
) ) {
2470 // can do T4 transform (ADR to compute local address)
2471 set32LE(infoA
.instructionContent
, makeADR(addInfoB
.destReg
, infoA
.targetAddress
, infoA
.instructionAddress
));
2472 set32LE(infoB
.instructionContent
, makeNOP());
2473 if ( _options
.verboseOptimizationHints() ) {
2474 fprintf(stderr
, "adrp-ldr-got at 0x%08llX T4 transformed to ADR/STR\n", infoC
.instructionAddress
);
2479 if ( _options
.verboseOptimizationHints() )
2480 fprintf(stderr
, "adrp-ldr-got at 0x%08llX not LDR or ADD\n", infoB
.instructionAddress
);
2484 if ( _options
.verboseOptimizationHints() )
2485 fprintf(stderr
, "adrp-ldr-got at 0x%08llX not ADRP\n", infoA
.instructionAddress
);
2489 if ( _options
.verboseOptimizationHints() )
2490 fprintf(stderr
, "unknown hint kind %d alt.info.kind at 0x%08llX\n", alt
.info
.kind
, infoA
.instructionAddress
);
2494 // apply hints pass 2
2495 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
2496 if ( fit
->kind
!= ld::Fixup::kindLinkerOptimizationHint
)
2498 InstructionInfo infoA
;
2499 InstructionInfo infoB
;
2500 ld::Fixup::LOH_arm64 alt
;
2501 alt
.addend
= fit
->u
.addend
;
2502 setInfo(state
, atom
, buffer
, usedByHints
, fit
->offsetInAtom
, (alt
.info
.delta1
<< 2), &infoA
);
2503 if ( alt
.info
.count
> 0 )
2504 setInfo(state
, atom
, buffer
, usedByHints
, fit
->offsetInAtom
, (alt
.info
.delta2
<< 2), &infoB
);
2506 switch ( alt
.info
.kind
) {
2507 case LOH_ARM64_ADRP_ADRP
:
2508 LOH_ASSERT(isPageKind(infoA
.fixup
));
2509 LOH_ASSERT(isPageKind(infoB
.fixup
));
2510 if ( (infoA
.instruction
& 0x9F000000) != 0x90000000 ) {
2511 if ( _options
.verboseOptimizationHints() )
2512 fprintf(stderr
, "may-reused-adrp at 0x%08llX no longer an ADRP, now 0x%08X\n", infoA
.instructionAddress
, infoA
.instruction
);
2516 if ( (infoB
.instruction
& 0x9F000000) != 0x90000000 ) {
2517 if ( _options
.verboseOptimizationHints() )
2518 fprintf(stderr
, "may-reused-adrp at 0x%08llX no longer an ADRP, now 0x%08X\n", infoB
.instructionAddress
, infoA
.instruction
);
2522 if ( (infoA
.targetAddress
& (-4096)) == (infoB
.targetAddress
& (-4096)) ) {
2523 set32LE(infoB
.instructionContent
, 0xD503201F);
2533 #endif // SUPPORT_ARCH_arm64
2537 void OutputFile::copyNoOps(uint8_t* from
, uint8_t* to
, bool thumb
)
2539 switch ( _options
.architecture() ) {
2541 case CPU_TYPE_X86_64
:
2542 for (uint8_t* p
=from
; p
< to
; ++p
)
2547 for (uint8_t* p
=from
; p
< to
; p
+= 2)
2548 OSWriteLittleInt16((uint16_t*)p
, 0, 0x46c0);
2551 for (uint8_t* p
=from
; p
< to
; p
+= 4)
2552 OSWriteLittleInt32((uint32_t*)p
, 0, 0xe1a00000);
2556 for (uint8_t* p
=from
; p
< to
; ++p
)
2562 bool OutputFile::takesNoDiskSpace(const ld::Section
* sect
)
2564 switch ( sect
->type() ) {
2565 case ld::Section::typeZeroFill
:
2566 case ld::Section::typeTLVZeroFill
:
2567 return _options
.optimizeZeroFill();
2568 case ld::Section::typePageZero
:
2569 case ld::Section::typeStack
:
2570 case ld::Section::typeAbsoluteSymbols
:
2571 case ld::Section::typeTentativeDefs
:
2579 bool OutputFile::hasZeroForFileOffset(const ld::Section
* sect
)
2581 switch ( sect
->type() ) {
2582 case ld::Section::typeZeroFill
:
2583 case ld::Section::typeTLVZeroFill
:
2584 return _options
.optimizeZeroFill();
2585 case ld::Section::typePageZero
:
2586 case ld::Section::typeStack
:
2587 case ld::Section::typeTentativeDefs
:
2595 void OutputFile::writeAtoms(ld::Internal
& state
, uint8_t* wholeBuffer
)
2597 // have each atom write itself
2598 uint64_t fileOffsetOfEndOfLastAtom
= 0;
2599 uint64_t mhAddress
= 0;
2600 bool lastAtomUsesNoOps
= false;
2601 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
2602 ld::Internal::FinalSection
* sect
= *sit
;
2603 if ( sect
->type() == ld::Section::typeMachHeader
)
2604 mhAddress
= sect
->address
;
2605 if ( takesNoDiskSpace(sect
) )
2607 const bool sectionUsesNops
= (sect
->type() == ld::Section::typeCode
);
2608 //fprintf(stderr, "file offset=0x%08llX, section %s\n", sect->fileOffset, sect->sectionName());
2609 std::vector
<const ld::Atom
*>& atoms
= sect
->atoms
;
2610 bool lastAtomWasThumb
= false;
2611 for (std::vector
<const ld::Atom
*>::iterator ait
= atoms
.begin(); ait
!= atoms
.end(); ++ait
) {
2612 const ld::Atom
* atom
= *ait
;
2613 if ( atom
->definition() == ld::Atom::definitionProxy
)
2616 uint64_t fileOffset
= atom
->finalAddress() - sect
->address
+ sect
->fileOffset
;
2617 // check for alignment padding between atoms
2618 if ( (fileOffset
!= fileOffsetOfEndOfLastAtom
) && lastAtomUsesNoOps
) {
2619 this->copyNoOps(&wholeBuffer
[fileOffsetOfEndOfLastAtom
], &wholeBuffer
[fileOffset
], lastAtomWasThumb
);
2621 // copy atom content
2622 atom
->copyRawContent(&wholeBuffer
[fileOffset
]);
2624 this->applyFixUps(state
, mhAddress
, atom
, &wholeBuffer
[fileOffset
]);
2625 fileOffsetOfEndOfLastAtom
= fileOffset
+atom
->size();
2626 lastAtomUsesNoOps
= sectionUsesNops
;
2627 lastAtomWasThumb
= atom
->isThumb();
2629 catch (const char* msg
) {
2630 if ( atom
->file() != NULL
)
2631 throwf("%s in '%s' from %s", msg
, atom
->name(), atom
->safeFilePath());
2633 throwf("%s in '%s'", msg
, atom
->name());
2638 if ( _options
.verboseOptimizationHints() ) {
2639 //fprintf(stderr, "ADRP optimized away: %d\n", sAdrpNA);
2640 //fprintf(stderr, "ADRPs changed to NOPs: %d\n", sAdrpNoped);
2641 //fprintf(stderr, "ADRPs unchanged: %d\n", sAdrpNotNoped);
2645 void OutputFile::computeContentUUID(ld::Internal
& state
, uint8_t* wholeBuffer
)
2647 const bool log
= false;
2648 if ( (_options
.outputKind() != Options::kObjectFile
) || state
.someObjectFileHasDwarf
) {
2649 uint8_t digest
[CC_MD5_DIGEST_LENGTH
];
2650 std::vector
<std::pair
<uint64_t, uint64_t>> excludeRegions
;
2651 uint64_t bitcodeCmdOffset
;
2652 uint64_t bitcodeCmdEnd
;
2653 uint64_t bitcodeSectOffset
;
2654 uint64_t bitcodePaddingEnd
;
2655 if ( _headersAndLoadCommandAtom
->bitcodeBundleCommand(bitcodeCmdOffset
, bitcodeCmdEnd
,
2656 bitcodeSectOffset
, bitcodePaddingEnd
) ) {
2657 // Exclude embedded bitcode bundle section which contains timestamps in XAR header
2658 // Note the timestamp is in the compressed XML header which means it might change the size of
2659 // bitcode section. The load command which include the size of the section and the padding after
2660 // the bitcode section should also be excluded in the UUID computation.
2661 // Bitcode section should appears before LINKEDIT
2662 // Exclude section cmd
2663 if ( log
) fprintf(stderr
, "bundle cmd start=0x%08llX, bundle cmd end=0x%08llX\n",
2664 bitcodeCmdOffset
, bitcodeCmdEnd
);
2665 excludeRegions
.emplace_back(std::pair
<uint64_t, uint64_t>(bitcodeCmdOffset
, bitcodeCmdEnd
));
2666 // Exclude section content
2667 if ( log
) fprintf(stderr
, "bundle start=0x%08llX, bundle end=0x%08llX\n",
2668 bitcodeSectOffset
, bitcodePaddingEnd
);
2669 excludeRegions
.emplace_back(std::pair
<uint64_t, uint64_t>(bitcodeSectOffset
, bitcodePaddingEnd
));
2671 uint32_t stabsStringsOffsetStart
;
2672 uint32_t tabsStringsOffsetEnd
;
2673 uint32_t stabsOffsetStart
;
2674 uint32_t stabsOffsetEnd
;
2675 if ( _symbolTableAtom
->hasStabs(stabsStringsOffsetStart
, tabsStringsOffsetEnd
, stabsOffsetStart
, stabsOffsetEnd
) ) {
2676 // find two areas of file that are stabs info and should not contribute to checksum
2677 uint64_t stringPoolFileOffset
= 0;
2678 uint64_t symbolTableFileOffset
= 0;
2679 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
2680 ld::Internal::FinalSection
* sect
= *sit
;
2681 if ( sect
->type() == ld::Section::typeLinkEdit
) {
2682 if ( strcmp(sect
->sectionName(), "__string_pool") == 0 )
2683 stringPoolFileOffset
= sect
->fileOffset
;
2684 else if ( strcmp(sect
->sectionName(), "__symbol_table") == 0 )
2685 symbolTableFileOffset
= sect
->fileOffset
;
2688 uint64_t firstStabNlistFileOffset
= symbolTableFileOffset
+ stabsOffsetStart
;
2689 uint64_t lastStabNlistFileOffset
= symbolTableFileOffset
+ stabsOffsetEnd
;
2690 uint64_t firstStabStringFileOffset
= stringPoolFileOffset
+ stabsStringsOffsetStart
;
2691 uint64_t lastStabStringFileOffset
= stringPoolFileOffset
+ tabsStringsOffsetEnd
;
2692 if ( log
) fprintf(stderr
, "stabNlist offset=0x%08llX, size=0x%08llX\n", firstStabNlistFileOffset
, lastStabNlistFileOffset
-firstStabNlistFileOffset
);
2693 if ( log
) fprintf(stderr
, "stabString offset=0x%08llX, size=0x%08llX\n", firstStabStringFileOffset
, lastStabStringFileOffset
-firstStabStringFileOffset
);
2694 assert(firstStabNlistFileOffset
<= firstStabStringFileOffset
);
2695 excludeRegions
.emplace_back(std::pair
<uint64_t, uint64_t>(firstStabNlistFileOffset
, lastStabNlistFileOffset
));
2696 excludeRegions
.emplace_back(std::pair
<uint64_t, uint64_t>(firstStabStringFileOffset
, lastStabStringFileOffset
));
2697 // exclude LINKEDIT LC_SEGMENT (size field depends on stabs size)
2698 uint64_t linkeditSegCmdOffset
;
2699 uint64_t linkeditSegCmdSize
;
2700 _headersAndLoadCommandAtom
->linkeditCmdInfo(linkeditSegCmdOffset
, linkeditSegCmdSize
);
2701 excludeRegions
.emplace_back(std::pair
<uint64_t, uint64_t>(linkeditSegCmdOffset
, linkeditSegCmdOffset
+linkeditSegCmdSize
));
2702 if ( log
) fprintf(stderr
, "linkedit SegCmdOffset=0x%08llX, size=0x%08llX\n", linkeditSegCmdOffset
, linkeditSegCmdSize
);
2703 uint64_t symbolTableCmdOffset
;
2704 uint64_t symbolTableCmdSize
;
2705 _headersAndLoadCommandAtom
->symbolTableCmdInfo(symbolTableCmdOffset
, symbolTableCmdSize
);
2706 excludeRegions
.emplace_back(std::pair
<uint64_t, uint64_t>(symbolTableCmdOffset
, symbolTableCmdOffset
+symbolTableCmdSize
));
2707 if ( log
) fprintf(stderr
, "linkedit SegCmdOffset=0x%08llX, size=0x%08llX\n", symbolTableCmdOffset
, symbolTableCmdSize
);
2709 if ( !excludeRegions
.empty() ) {
2710 CC_MD5_CTX md5state
;
2711 CC_MD5_Init(&md5state
);
2712 // rdar://problem/19487042 include the output leaf file name in the hash
2713 const char* lastSlash
= strrchr(_options
.outputFilePath(), '/');
2714 if ( lastSlash
!= NULL
) {
2715 CC_MD5_Update(&md5state
, lastSlash
, strlen(lastSlash
));
2717 std::sort(excludeRegions
.begin(), excludeRegions
.end());
2718 uint64_t checksumStart
= 0;
2719 for ( auto& region
: excludeRegions
) {
2720 uint64_t regionStart
= region
.first
;
2721 uint64_t regionEnd
= region
.second
;
2722 assert(checksumStart
<= regionStart
&& regionStart
<= regionEnd
&& "Region overlapped");
2723 if ( log
) fprintf(stderr
, "checksum 0x%08llX -> 0x%08llX\n", checksumStart
, regionStart
);
2724 CC_MD5_Update(&md5state
, &wholeBuffer
[checksumStart
], regionStart
- checksumStart
);
2725 checksumStart
= regionEnd
;
2727 if ( log
) fprintf(stderr
, "checksum 0x%08llX -> 0x%08llX\n", checksumStart
, _fileSize
);
2728 CC_MD5_Update(&md5state
, &wholeBuffer
[checksumStart
], _fileSize
-checksumStart
);
2729 CC_MD5_Final(digest
, &md5state
);
2730 if ( log
) fprintf(stderr
, "uuid=%02X, %02X, %02X, %02X, %02X, %02X, %02X, %02X\n", digest
[0], digest
[1], digest
[2],
2731 digest
[3], digest
[4], digest
[5], digest
[6], digest
[7]);
2734 CC_MD5(wholeBuffer
, _fileSize
, digest
);
2736 // <rdar://problem/6723729> LC_UUID uuids should conform to RFC 4122 UUID version 4 & UUID version 5 formats
2737 digest
[6] = ( digest
[6] & 0x0F ) | ( 3 << 4 );
2738 digest
[8] = ( digest
[8] & 0x3F ) | 0x80;
2739 // update buffer with new UUID
2740 _headersAndLoadCommandAtom
->setUUID(digest
);
2741 _headersAndLoadCommandAtom
->recopyUUIDCommand();
2745 static int sDescriptorOfPathToRemove
= -1;
2746 static void removePathAndExit(int sig
)
2748 if ( sDescriptorOfPathToRemove
!= -1 ) {
2749 char path
[MAXPATHLEN
];
2750 if ( ::fcntl(sDescriptorOfPathToRemove
, F_GETPATH
, path
) == 0 )
2753 fprintf(stderr
, "ld: interrupted\n");
2757 void OutputFile::writeOutputFile(ld::Internal
& state
)
2759 // for UNIX conformance, error if file exists and is not writable
2760 if ( (access(_options
.outputFilePath(), F_OK
) == 0) && (access(_options
.outputFilePath(), W_OK
) == -1) )
2761 throwf("can't write output file: %s", _options
.outputFilePath());
2763 mode_t permissions
= 0777;
2764 if ( _options
.outputKind() == Options::kObjectFile
)
2766 mode_t umask
= ::umask(0);
2767 ::umask(umask
); // put back the original umask
2768 permissions
&= ~umask
;
2769 // Calling unlink first assures the file is gone so that open creates it with correct permissions
2770 // It also handles the case where __options.outputFilePath() file is not writable but its directory is
2771 // And it means we don't have to truncate the file when done writing (in case new is smaller than old)
2772 // Lastly, only delete existing file if it is a normal file (e.g. not /dev/null).
2773 struct stat stat_buf
;
2774 bool outputIsRegularFile
= false;
2775 bool outputIsMappableFile
= false;
2776 if ( stat(_options
.outputFilePath(), &stat_buf
) != -1 ) {
2777 if (stat_buf
.st_mode
& S_IFREG
) {
2778 outputIsRegularFile
= true;
2779 // <rdar://problem/12264302> Don't use mmap on non-hfs volumes
2780 struct statfs fsInfo
;
2781 if ( statfs(_options
.outputFilePath(), &fsInfo
) != -1 ) {
2782 if ( (strcmp(fsInfo
.f_fstypename
, "hfs") == 0) || (strcmp(fsInfo
.f_fstypename
, "apfs") == 0) ) {
2783 (void)unlink(_options
.outputFilePath());
2784 outputIsMappableFile
= true;
2788 outputIsMappableFile
= false;
2792 outputIsRegularFile
= false;
2796 // special files (pipes, devices, etc) must already exist
2797 outputIsRegularFile
= true;
2798 // output file does not exist yet
2799 char dirPath
[PATH_MAX
];
2800 strcpy(dirPath
, _options
.outputFilePath());
2801 char* end
= strrchr(dirPath
, '/');
2802 if ( end
!= NULL
) {
2804 struct statfs fsInfo
;
2805 if ( statfs(dirPath
, &fsInfo
) != -1 ) {
2806 if ( (strcmp(fsInfo
.f_fstypename
, "hfs") == 0) || (strcmp(fsInfo
.f_fstypename
, "apfs") == 0) ) {
2807 outputIsMappableFile
= true;
2813 //fprintf(stderr, "outputIsMappableFile=%d, outputIsRegularFile=%d, path=%s\n", outputIsMappableFile, outputIsRegularFile, _options.outputFilePath());
2816 // Construct a temporary path of the form {outputFilePath}.ld_XXXXXX
2817 const char filenameTemplate
[] = ".ld_XXXXXX";
2818 char tmpOutput
[PATH_MAX
];
2819 uint8_t *wholeBuffer
;
2820 if ( outputIsRegularFile
&& outputIsMappableFile
) {
2821 // <rdar://problem/20959031> ld64 should clean up temporary files on SIGINT
2822 ::signal(SIGINT
, removePathAndExit
);
2824 strcpy(tmpOutput
, _options
.outputFilePath());
2825 // If the path is too long to add a suffix for a temporary name then
2826 // just fall back to using the output path.
2827 if (strlen(tmpOutput
)+strlen(filenameTemplate
) < PATH_MAX
) {
2828 strcat(tmpOutput
, filenameTemplate
);
2829 fd
= mkstemp(tmpOutput
);
2830 sDescriptorOfPathToRemove
= fd
;
2833 fd
= open(tmpOutput
, O_RDWR
|O_CREAT
, permissions
);
2836 throwf("can't open output file for writing '%s', errno=%d", tmpOutput
, errno
);
2837 if ( ftruncate(fd
, _fileSize
) == -1 ) {
2840 if ( err
== ENOSPC
)
2841 throwf("not enough disk space for writing '%s'", _options
.outputFilePath());
2843 throwf("can't grow file for writing '%s', errno=%d", _options
.outputFilePath(), err
);
2846 wholeBuffer
= (uint8_t *)mmap(NULL
, _fileSize
, PROT_WRITE
|PROT_READ
, MAP_SHARED
, fd
, 0);
2847 if ( wholeBuffer
== MAP_FAILED
)
2848 throwf("can't create buffer of %llu bytes for output", _fileSize
);
2851 if ( outputIsRegularFile
)
2852 fd
= open(_options
.outputFilePath(), O_RDWR
|O_CREAT
, permissions
);
2854 fd
= open(_options
.outputFilePath(), O_WRONLY
);
2856 throwf("can't open output file for writing: %s, errno=%d", _options
.outputFilePath(), errno
);
2857 // try to allocate buffer for entire output file content
2858 wholeBuffer
= (uint8_t*)calloc(_fileSize
, 1);
2859 if ( wholeBuffer
== NULL
)
2860 throwf("can't create buffer of %llu bytes for output", _fileSize
);
2863 if ( _options
.UUIDMode() == Options::kUUIDRandom
) {
2865 ::uuid_generate_random(bits
);
2866 _headersAndLoadCommandAtom
->setUUID(bits
);
2869 writeAtoms(state
, wholeBuffer
);
2872 if ( _options
.UUIDMode() == Options::kUUIDContent
)
2873 computeContentUUID(state
, wholeBuffer
);
2875 if ( outputIsRegularFile
&& outputIsMappableFile
) {
2876 if ( ::chmod(tmpOutput
, permissions
) == -1 ) {
2878 throwf("can't set permissions on output file: %s, errno=%d", tmpOutput
, errno
);
2880 if ( ::rename(tmpOutput
, _options
.outputFilePath()) == -1 && strcmp(tmpOutput
, _options
.outputFilePath()) != 0) {
2882 throwf("can't move output file in place, errno=%d", errno
);
2886 if ( ::write(fd
, wholeBuffer
, _fileSize
) == -1 ) {
2887 throwf("can't write to output file: %s, errno=%d", _options
.outputFilePath(), errno
);
2889 sDescriptorOfPathToRemove
= -1;
2891 // <rdar://problem/13118223> NFS: iOS incremental builds in Xcode 4.6 fail with codesign error
2892 // NFS seems to pad the end of the file sometimes. Calling trunc seems to correct it...
2893 ::truncate(_options
.outputFilePath(), _fileSize
);
2896 // Rename symbol map file if needed
2897 if ( _options
.renameReverseSymbolMap() ) {
2898 assert(_options
.hideSymbols() && _options
.reverseSymbolMapPath() != NULL
&& "Must hide symbol and specify a path");
2899 uuid_string_t UUIDString
;
2900 const uint8_t* rawUUID
= _headersAndLoadCommandAtom
->getUUID();
2901 uuid_unparse_upper(rawUUID
, UUIDString
);
2902 char outputMapPath
[PATH_MAX
];
2903 sprintf(outputMapPath
, "%s/%s.bcsymbolmap", _options
.reverseSymbolMapPath(), UUIDString
);
2904 if ( ::rename(_options
.reverseMapTempPath().c_str(), outputMapPath
) != 0 )
2905 throwf("could not create bcsymbolmap file: %s", outputMapPath
);
2909 struct AtomByNameSorter
2911 bool operator()(const ld::Atom
* left
, const ld::Atom
* right
) const
2913 return (strcmp(left
->name(), right
->name()) < 0);
2916 bool operator()(const ld::Atom
* left
, const char* right
) const
2918 return (strcmp(left
->name(), right
) < 0);
2921 bool operator()(const char* left
, const ld::Atom
* right
) const
2923 return (strcmp(left
, right
->name()) < 0);
2931 NotInSet(const std::set
<const ld::Atom
*>& theSet
) : _set(theSet
) {}
2933 bool operator()(const ld::Atom
* atom
) const {
2934 return ( _set
.count(atom
) == 0 );
2937 const std::set
<const ld::Atom
*>& _set
;
2941 void OutputFile::buildSymbolTable(ld::Internal
& state
)
2943 unsigned int machoSectionIndex
= 0;
2944 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
2945 ld::Internal::FinalSection
* sect
= *sit
;
2946 bool setMachoSectionIndex
= !sect
->isSectionHidden() && (sect
->type() != ld::Section::typeTentativeDefs
);
2947 if ( setMachoSectionIndex
)
2948 ++machoSectionIndex
;
2949 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
2950 const ld::Atom
* atom
= *ait
;
2951 if ( setMachoSectionIndex
)
2952 (const_cast<ld::Atom
*>(atom
))->setMachoSection(machoSectionIndex
);
2953 else if ( sect
->type() == ld::Section::typeMachHeader
)
2954 (const_cast<ld::Atom
*>(atom
))->setMachoSection(1); // __mh_execute_header is not in any section by needs n_sect==1
2955 else if ( sect
->type() == ld::Section::typeLastSection
)
2956 (const_cast<ld::Atom
*>(atom
))->setMachoSection(machoSectionIndex
); // use section index of previous section
2957 else if ( sect
->type() == ld::Section::typeFirstSection
)
2958 (const_cast<ld::Atom
*>(atom
))->setMachoSection(machoSectionIndex
+1); // use section index of next section
2960 // in -r mode, clarify symbolTableNotInFinalLinkedImages
2961 if ( _options
.outputKind() == Options::kObjectFile
) {
2962 if ( (_options
.architecture() == CPU_TYPE_X86_64
)
2963 || (_options
.architecture() == CPU_TYPE_ARM64
)
2965 // x86_64 .o files need labels on anonymous literal strings
2966 if ( (sect
->type() == ld::Section::typeCString
) && (atom
->combine() == ld::Atom::combineByNameAndContent
) ) {
2967 (const_cast<ld::Atom
*>(atom
))->setSymbolTableInclusion(ld::Atom::symbolTableIn
);
2968 _localAtoms
.push_back(atom
);
2972 if ( sect
->type() == ld::Section::typeCFI
) {
2973 if ( _options
.removeEHLabels() )
2974 (const_cast<ld::Atom
*>(atom
))->setSymbolTableInclusion(ld::Atom::symbolTableNotIn
);
2976 (const_cast<ld::Atom
*>(atom
))->setSymbolTableInclusion(ld::Atom::symbolTableIn
);
2978 else if ( sect
->type() == ld::Section::typeTempAlias
) {
2979 assert(_options
.outputKind() == Options::kObjectFile
);
2980 _importedAtoms
.push_back(atom
);
2983 if ( atom
->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages
)
2984 (const_cast<ld::Atom
*>(atom
))->setSymbolTableInclusion(ld::Atom::symbolTableIn
);
2987 // TEMP work around until <rdar://problem/7702923> goes in
2988 if ( (atom
->symbolTableInclusion() == ld::Atom::symbolTableInAndNeverStrip
)
2989 && (atom
->scope() == ld::Atom::scopeLinkageUnit
)
2990 && (_options
.outputKind() == Options::kDynamicLibrary
) ) {
2991 (const_cast<ld::Atom
*>(atom
))->setScope(ld::Atom::scopeGlobal
);
2994 // <rdar://problem/6783167> support auto hidden weak symbols: .weak_def_can_be_hidden
2995 if ( atom
->autoHide() && (_options
.outputKind() != Options::kObjectFile
) ) {
2996 // adding auto-hide symbol to .exp file should keep it global
2997 if ( !_options
.hasExportMaskList() || !_options
.shouldExport(atom
->name()) )
2998 (const_cast<ld::Atom
*>(atom
))->setScope(ld::Atom::scopeLinkageUnit
);
3001 // <rdar://problem/8626058> ld should consistently warn when resolvers are not exported
3002 if ( (atom
->contentType() == ld::Atom::typeResolver
) && (atom
->scope() == ld::Atom::scopeLinkageUnit
) )
3003 warning("resolver functions should be external, but '%s' is hidden", atom
->name());
3005 if ( sect
->type() == ld::Section::typeImportProxies
) {
3006 if ( atom
->combine() == ld::Atom::combineByName
)
3007 this->usesWeakExternalSymbols
= true;
3008 // alias proxy is a re-export with a name change, don't import changed name
3009 if ( ! atom
->isAlias() )
3010 _importedAtoms
.push_back(atom
);
3011 // scope of proxies are usually linkage unit, so done
3012 // if scope is global, we need to re-export it too
3013 if ( atom
->scope() == ld::Atom::scopeGlobal
)
3014 _exportedAtoms
.push_back(atom
);
3017 if ( atom
->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages
) {
3018 assert(_options
.outputKind() != Options::kObjectFile
);
3019 continue; // don't add to symbol table
3021 if ( atom
->symbolTableInclusion() == ld::Atom::symbolTableNotIn
) {
3022 continue; // don't add to symbol table
3024 if ( (atom
->symbolTableInclusion() == ld::Atom::symbolTableInWithRandomAutoStripLabel
)
3025 && (_options
.outputKind() != Options::kObjectFile
) ) {
3026 continue; // don't add to symbol table
3029 if ( (atom
->definition() == ld::Atom::definitionTentative
) && (_options
.outputKind() == Options::kObjectFile
) ) {
3030 if ( _options
.makeTentativeDefinitionsReal() ) {
3031 // -r -d turns tentative defintions into real def
3032 _exportedAtoms
.push_back(atom
);
3035 // in mach-o object files tentative defintions are stored like undefined symbols
3036 _importedAtoms
.push_back(atom
);
3041 switch ( atom
->scope() ) {
3042 case ld::Atom::scopeTranslationUnit
:
3043 if ( _options
.keepLocalSymbol(atom
->name()) ) {
3044 _localAtoms
.push_back(atom
);
3047 if ( _options
.outputKind() == Options::kObjectFile
) {
3048 (const_cast<ld::Atom
*>(atom
))->setSymbolTableInclusion(ld::Atom::symbolTableInWithRandomAutoStripLabel
);
3049 _localAtoms
.push_back(atom
);
3052 (const_cast<ld::Atom
*>(atom
))->setSymbolTableInclusion(ld::Atom::symbolTableNotIn
);
3055 case ld::Atom::scopeGlobal
:
3056 _exportedAtoms
.push_back(atom
);
3058 case ld::Atom::scopeLinkageUnit
:
3059 if ( _options
.outputKind() == Options::kObjectFile
) {
3060 if ( _options
.keepPrivateExterns() ) {
3061 _exportedAtoms
.push_back(atom
);
3063 else if ( _options
.keepLocalSymbol(atom
->name()) ) {
3064 _localAtoms
.push_back(atom
);
3067 (const_cast<ld::Atom
*>(atom
))->setSymbolTableInclusion(ld::Atom::symbolTableInWithRandomAutoStripLabel
);
3068 _localAtoms
.push_back(atom
);
3072 if ( _options
.keepLocalSymbol(atom
->name()) )
3073 _localAtoms
.push_back(atom
);
3074 // <rdar://problem/5804214> ld should never have a symbol in the non-lazy indirect symbol table with index 0
3075 // this works by making __mh_execute_header be a local symbol which takes symbol index 0
3076 else if ( (atom
->symbolTableInclusion() == ld::Atom::symbolTableInAndNeverStrip
) && !_options
.makeCompressedDyldInfo() )
3077 _localAtoms
.push_back(atom
);
3079 (const_cast<ld::Atom
*>(atom
))->setSymbolTableInclusion(ld::Atom::symbolTableNotIn
);
3086 // <rdar://problem/6978069> ld adds undefined symbol from .exp file to binary
3087 if ( (_options
.outputKind() == Options::kKextBundle
) && _options
.hasExportRestrictList() ) {
3088 // search for referenced undefines
3089 std::set
<const ld::Atom
*> referencedProxyAtoms
;
3090 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
=state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
3091 ld::Internal::FinalSection
* sect
= *sit
;
3092 for (std::vector
<const ld::Atom
*>::iterator ait
=sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
3093 const ld::Atom
* atom
= *ait
;
3094 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
3095 switch ( fit
->binding
) {
3096 case ld::Fixup::bindingsIndirectlyBound
:
3097 referencedProxyAtoms
.insert(state
.indirectBindingTable
[fit
->u
.bindingIndex
]);
3099 case ld::Fixup::bindingDirectlyBound
:
3100 referencedProxyAtoms
.insert(fit
->u
.target
);
3108 // remove any unreferenced _importedAtoms
3109 _importedAtoms
.erase(std::remove_if(_importedAtoms
.begin(), _importedAtoms
.end(), NotInSet(referencedProxyAtoms
)), _importedAtoms
.end());
3113 std::sort(_exportedAtoms
.begin(), _exportedAtoms
.end(), AtomByNameSorter());
3114 std::sort(_importedAtoms
.begin(), _importedAtoms
.end(), AtomByNameSorter());
3116 std::map
<std::string
, std::vector
<std::string
>> addedSymbols
;
3117 std::map
<std::string
, std::vector
<std::string
>> hiddenSymbols
;
3118 for (const auto *atom
: _exportedAtoms
) {
3119 // The exported symbols have already been sorted. Early exit the loop
3120 // once we see a symbol that is lexicographically past the special
3122 if (atom
->name()[0] > '$')
3125 std::string
name(atom
->name());
3126 if (name
.rfind("$ld$add$", 7) == 0) {
3127 auto pos
= name
.find_first_of('$', 10);
3128 if (pos
== std::string::npos
) {
3129 warning("bad special linker symbol '%s'", atom
->name());
3132 auto &&symbolName
= name
.substr(pos
+1);
3133 auto it
= addedSymbols
.emplace(symbolName
, std::initializer_list
<std::string
>{name
});
3135 it
.first
->second
.emplace_back(name
);
3136 } else if (name
.rfind("$ld$hide$", 8) == 0) {
3137 auto pos
= name
.find_first_of('$', 11);
3138 if (pos
== std::string::npos
) {
3139 warning("bad special linker symbol '%s'", atom
->name());
3142 auto &&symbolName
= name
.substr(pos
+1);
3143 auto it
= hiddenSymbols
.emplace(symbolName
, std::initializer_list
<std::string
>{name
});
3145 it
.first
->second
.emplace_back(name
);
3149 for (const auto &it
: addedSymbols
) {
3150 if (!std::binary_search(_exportedAtoms
.begin(), _exportedAtoms
.end(), it
.first
.c_str(), AtomByNameSorter()))
3152 for (const auto &symbol
: it
.second
)
3153 warning("linker symbol '%s' adds already existing symbol '%s'", symbol
.c_str(), it
.first
.c_str());
3156 auto it
= hiddenSymbols
.begin();
3157 while (it
!= hiddenSymbols
.end()) {
3158 if (std::binary_search(_exportedAtoms
.begin(), _exportedAtoms
.end(), it
->first
.c_str(), AtomByNameSorter()))
3159 it
= hiddenSymbols
.erase(it
);
3164 for (const auto &it
: hiddenSymbols
) {
3165 for (const auto &symbol
: it
.second
)
3166 warning("linker symbol '%s' hides a non-existent symbol '%s'", symbol
.c_str(), it
.first
.c_str());
3170 void OutputFile::addPreloadLinkEdit(ld::Internal
& state
)
3172 switch ( _options
.architecture() ) {
3173 #if SUPPORT_ARCH_i386
3175 if ( _hasLocalRelocations
) {
3176 _localRelocsAtom
= new LocalRelocationsAtom
<x86
>(_options
, state
, *this);
3177 localRelocationsSection
= state
.addAtom(*_localRelocsAtom
);
3179 if ( _hasExternalRelocations
) {
3180 _externalRelocsAtom
= new ExternalRelocationsAtom
<x86
>(_options
, state
, *this);
3181 externalRelocationsSection
= state
.addAtom(*_externalRelocsAtom
);
3183 if ( _hasSymbolTable
) {
3184 _indirectSymbolTableAtom
= new IndirectSymbolTableAtom
<x86
>(_options
, state
, *this);
3185 indirectSymbolTableSection
= state
.addAtom(*_indirectSymbolTableAtom
);
3186 _symbolTableAtom
= new SymbolTableAtom
<x86
>(_options
, state
, *this);
3187 symbolTableSection
= state
.addAtom(*_symbolTableAtom
);
3188 _stringPoolAtom
= new StringPoolAtom(_options
, state
, *this, 4);
3189 stringPoolSection
= state
.addAtom(*_stringPoolAtom
);
3193 #if SUPPORT_ARCH_x86_64
3194 case CPU_TYPE_X86_64
:
3195 if ( _hasLocalRelocations
) {
3196 _localRelocsAtom
= new LocalRelocationsAtom
<x86_64
>(_options
, state
, *this);
3197 localRelocationsSection
= state
.addAtom(*_localRelocsAtom
);
3199 if ( _hasExternalRelocations
) {
3200 _externalRelocsAtom
= new ExternalRelocationsAtom
<x86_64
>(_options
, state
, *this);
3201 externalRelocationsSection
= state
.addAtom(*_externalRelocsAtom
);
3203 if ( _hasSymbolTable
) {
3204 _indirectSymbolTableAtom
= new IndirectSymbolTableAtom
<x86_64
>(_options
, state
, *this);
3205 indirectSymbolTableSection
= state
.addAtom(*_indirectSymbolTableAtom
);
3206 _symbolTableAtom
= new SymbolTableAtom
<x86_64
>(_options
, state
, *this);
3207 symbolTableSection
= state
.addAtom(*_symbolTableAtom
);
3208 _stringPoolAtom
= new StringPoolAtom(_options
, state
, *this, 4);
3209 stringPoolSection
= state
.addAtom(*_stringPoolAtom
);
3213 #if SUPPORT_ARCH_arm_any
3215 if ( _hasLocalRelocations
) {
3216 _localRelocsAtom
= new LocalRelocationsAtom
<arm
>(_options
, state
, *this);
3217 localRelocationsSection
= state
.addAtom(*_localRelocsAtom
);
3219 if ( _hasExternalRelocations
) {
3220 _externalRelocsAtom
= new ExternalRelocationsAtom
<arm
>(_options
, state
, *this);
3221 externalRelocationsSection
= state
.addAtom(*_externalRelocsAtom
);
3223 if ( _hasSymbolTable
) {
3224 _indirectSymbolTableAtom
= new IndirectSymbolTableAtom
<arm
>(_options
, state
, *this);
3225 indirectSymbolTableSection
= state
.addAtom(*_indirectSymbolTableAtom
);
3226 _symbolTableAtom
= new SymbolTableAtom
<arm
>(_options
, state
, *this);
3227 symbolTableSection
= state
.addAtom(*_symbolTableAtom
);
3228 _stringPoolAtom
= new StringPoolAtom(_options
, state
, *this, 4);
3229 stringPoolSection
= state
.addAtom(*_stringPoolAtom
);
3233 #if SUPPORT_ARCH_arm64
3234 case CPU_TYPE_ARM64
:
3235 if ( _hasLocalRelocations
) {
3236 _localRelocsAtom
= new LocalRelocationsAtom
<arm64
>(_options
, state
, *this);
3237 localRelocationsSection
= state
.addAtom(*_localRelocsAtom
);
3239 if ( _hasExternalRelocations
) {
3240 _externalRelocsAtom
= new ExternalRelocationsAtom
<arm64
>(_options
, state
, *this);
3241 externalRelocationsSection
= state
.addAtom(*_externalRelocsAtom
);
3243 if ( _hasSymbolTable
) {
3244 _indirectSymbolTableAtom
= new IndirectSymbolTableAtom
<arm64
>(_options
, state
, *this);
3245 indirectSymbolTableSection
= state
.addAtom(*_indirectSymbolTableAtom
);
3246 _symbolTableAtom
= new SymbolTableAtom
<arm64
>(_options
, state
, *this);
3247 symbolTableSection
= state
.addAtom(*_symbolTableAtom
);
3248 _stringPoolAtom
= new StringPoolAtom(_options
, state
, *this, 4);
3249 stringPoolSection
= state
.addAtom(*_stringPoolAtom
);
3254 throw "-preload not supported";
3260 void OutputFile::addLinkEdit(ld::Internal
& state
)
3262 // for historical reasons, -preload orders LINKEDIT content differently
3263 if ( _options
.outputKind() == Options::kPreload
)
3264 return addPreloadLinkEdit(state
);
3266 switch ( _options
.architecture() ) {
3267 #if SUPPORT_ARCH_i386
3269 if ( _hasSectionRelocations
) {
3270 _sectionsRelocationsAtom
= new SectionRelocationsAtom
<x86
>(_options
, state
, *this);
3271 sectionRelocationsSection
= state
.addAtom(*_sectionsRelocationsAtom
);
3273 if ( _hasDyldInfo
) {
3274 _rebasingInfoAtom
= new RebaseInfoAtom
<x86
>(_options
, state
, *this);
3275 rebaseSection
= state
.addAtom(*_rebasingInfoAtom
);
3277 _bindingInfoAtom
= new BindingInfoAtom
<x86
>(_options
, state
, *this);
3278 bindingSection
= state
.addAtom(*_bindingInfoAtom
);
3280 _weakBindingInfoAtom
= new WeakBindingInfoAtom
<x86
>(_options
, state
, *this);
3281 weakBindingSection
= state
.addAtom(*_weakBindingInfoAtom
);
3283 _lazyBindingInfoAtom
= new LazyBindingInfoAtom
<x86
>(_options
, state
, *this);
3284 lazyBindingSection
= state
.addAtom(*_lazyBindingInfoAtom
);
3286 _exportInfoAtom
= new ExportInfoAtom
<x86
>(_options
, state
, *this);
3287 exportSection
= state
.addAtom(*_exportInfoAtom
);
3289 if ( _hasLocalRelocations
) {
3290 _localRelocsAtom
= new LocalRelocationsAtom
<x86
>(_options
, state
, *this);
3291 localRelocationsSection
= state
.addAtom(*_localRelocsAtom
);
3293 if ( _hasSplitSegInfo
) {
3294 if ( _options
.sharedRegionEncodingV2() )
3295 _splitSegInfoAtom
= new SplitSegInfoV2Atom
<x86
>(_options
, state
, *this);
3297 _splitSegInfoAtom
= new SplitSegInfoV1Atom
<x86
>(_options
, state
, *this);
3298 splitSegInfoSection
= state
.addAtom(*_splitSegInfoAtom
);
3300 if ( _hasFunctionStartsInfo
) {
3301 _functionStartsAtom
= new FunctionStartsAtom
<x86
>(_options
, state
, *this);
3302 functionStartsSection
= state
.addAtom(*_functionStartsAtom
);
3304 if ( _hasDataInCodeInfo
) {
3305 _dataInCodeAtom
= new DataInCodeAtom
<x86
>(_options
, state
, *this);
3306 dataInCodeSection
= state
.addAtom(*_dataInCodeAtom
);
3308 if ( _hasOptimizationHints
) {
3309 _optimizationHintsAtom
= new OptimizationHintsAtom
<x86
>(_options
, state
, *this);
3310 optimizationHintsSection
= state
.addAtom(*_optimizationHintsAtom
);
3312 if ( _hasSymbolTable
) {
3313 _symbolTableAtom
= new SymbolTableAtom
<x86
>(_options
, state
, *this);
3314 symbolTableSection
= state
.addAtom(*_symbolTableAtom
);
3316 if ( _hasExternalRelocations
) {
3317 _externalRelocsAtom
= new ExternalRelocationsAtom
<x86
>(_options
, state
, *this);
3318 externalRelocationsSection
= state
.addAtom(*_externalRelocsAtom
);
3320 if ( _hasSymbolTable
) {
3321 _indirectSymbolTableAtom
= new IndirectSymbolTableAtom
<x86
>(_options
, state
, *this);
3322 indirectSymbolTableSection
= state
.addAtom(*_indirectSymbolTableAtom
);
3323 _stringPoolAtom
= new StringPoolAtom(_options
, state
, *this, 4);
3324 stringPoolSection
= state
.addAtom(*_stringPoolAtom
);
3328 #if SUPPORT_ARCH_x86_64
3329 case CPU_TYPE_X86_64
:
3330 if ( _hasSectionRelocations
) {
3331 _sectionsRelocationsAtom
= new SectionRelocationsAtom
<x86_64
>(_options
, state
, *this);
3332 sectionRelocationsSection
= state
.addAtom(*_sectionsRelocationsAtom
);
3334 if ( _hasDyldInfo
) {
3335 _rebasingInfoAtom
= new RebaseInfoAtom
<x86_64
>(_options
, state
, *this);
3336 rebaseSection
= state
.addAtom(*_rebasingInfoAtom
);
3338 _bindingInfoAtom
= new BindingInfoAtom
<x86_64
>(_options
, state
, *this);
3339 bindingSection
= state
.addAtom(*_bindingInfoAtom
);
3341 _weakBindingInfoAtom
= new WeakBindingInfoAtom
<x86_64
>(_options
, state
, *this);
3342 weakBindingSection
= state
.addAtom(*_weakBindingInfoAtom
);
3344 _lazyBindingInfoAtom
= new LazyBindingInfoAtom
<x86_64
>(_options
, state
, *this);
3345 lazyBindingSection
= state
.addAtom(*_lazyBindingInfoAtom
);
3347 _exportInfoAtom
= new ExportInfoAtom
<x86_64
>(_options
, state
, *this);
3348 exportSection
= state
.addAtom(*_exportInfoAtom
);
3350 if ( _hasLocalRelocations
) {
3351 _localRelocsAtom
= new LocalRelocationsAtom
<x86_64
>(_options
, state
, *this);
3352 localRelocationsSection
= state
.addAtom(*_localRelocsAtom
);
3354 if ( _hasSplitSegInfo
) {
3355 if ( _options
.sharedRegionEncodingV2() )
3356 _splitSegInfoAtom
= new SplitSegInfoV2Atom
<x86_64
>(_options
, state
, *this);
3358 _splitSegInfoAtom
= new SplitSegInfoV1Atom
<x86_64
>(_options
, state
, *this);
3359 splitSegInfoSection
= state
.addAtom(*_splitSegInfoAtom
);
3361 if ( _hasFunctionStartsInfo
) {
3362 _functionStartsAtom
= new FunctionStartsAtom
<x86_64
>(_options
, state
, *this);
3363 functionStartsSection
= state
.addAtom(*_functionStartsAtom
);
3365 if ( _hasDataInCodeInfo
) {
3366 _dataInCodeAtom
= new DataInCodeAtom
<x86_64
>(_options
, state
, *this);
3367 dataInCodeSection
= state
.addAtom(*_dataInCodeAtom
);
3369 if ( _hasOptimizationHints
) {
3370 _optimizationHintsAtom
= new OptimizationHintsAtom
<x86_64
>(_options
, state
, *this);
3371 optimizationHintsSection
= state
.addAtom(*_optimizationHintsAtom
);
3373 if ( _hasSymbolTable
) {
3374 _symbolTableAtom
= new SymbolTableAtom
<x86_64
>(_options
, state
, *this);
3375 symbolTableSection
= state
.addAtom(*_symbolTableAtom
);
3377 if ( _hasExternalRelocations
) {
3378 _externalRelocsAtom
= new ExternalRelocationsAtom
<x86_64
>(_options
, state
, *this);
3379 externalRelocationsSection
= state
.addAtom(*_externalRelocsAtom
);
3381 if ( _hasSymbolTable
) {
3382 _indirectSymbolTableAtom
= new IndirectSymbolTableAtom
<x86_64
>(_options
, state
, *this);
3383 indirectSymbolTableSection
= state
.addAtom(*_indirectSymbolTableAtom
);
3384 _stringPoolAtom
= new StringPoolAtom(_options
, state
, *this, 8);
3385 stringPoolSection
= state
.addAtom(*_stringPoolAtom
);
3389 #if SUPPORT_ARCH_arm_any
3391 if ( _hasSectionRelocations
) {
3392 _sectionsRelocationsAtom
= new SectionRelocationsAtom
<arm
>(_options
, state
, *this);
3393 sectionRelocationsSection
= state
.addAtom(*_sectionsRelocationsAtom
);
3395 if ( _hasDyldInfo
) {
3396 _rebasingInfoAtom
= new RebaseInfoAtom
<arm
>(_options
, state
, *this);
3397 rebaseSection
= state
.addAtom(*_rebasingInfoAtom
);
3399 _bindingInfoAtom
= new BindingInfoAtom
<arm
>(_options
, state
, *this);
3400 bindingSection
= state
.addAtom(*_bindingInfoAtom
);
3402 _weakBindingInfoAtom
= new WeakBindingInfoAtom
<arm
>(_options
, state
, *this);
3403 weakBindingSection
= state
.addAtom(*_weakBindingInfoAtom
);
3405 _lazyBindingInfoAtom
= new LazyBindingInfoAtom
<arm
>(_options
, state
, *this);
3406 lazyBindingSection
= state
.addAtom(*_lazyBindingInfoAtom
);
3408 _exportInfoAtom
= new ExportInfoAtom
<arm
>(_options
, state
, *this);
3409 exportSection
= state
.addAtom(*_exportInfoAtom
);
3411 if ( _hasLocalRelocations
) {
3412 _localRelocsAtom
= new LocalRelocationsAtom
<arm
>(_options
, state
, *this);
3413 localRelocationsSection
= state
.addAtom(*_localRelocsAtom
);
3415 if ( _hasSplitSegInfo
) {
3416 if ( _options
.sharedRegionEncodingV2() )
3417 _splitSegInfoAtom
= new SplitSegInfoV2Atom
<arm
>(_options
, state
, *this);
3419 _splitSegInfoAtom
= new SplitSegInfoV1Atom
<arm
>(_options
, state
, *this);
3420 splitSegInfoSection
= state
.addAtom(*_splitSegInfoAtom
);
3422 if ( _hasFunctionStartsInfo
) {
3423 _functionStartsAtom
= new FunctionStartsAtom
<arm
>(_options
, state
, *this);
3424 functionStartsSection
= state
.addAtom(*_functionStartsAtom
);
3426 if ( _hasDataInCodeInfo
) {
3427 _dataInCodeAtom
= new DataInCodeAtom
<arm
>(_options
, state
, *this);
3428 dataInCodeSection
= state
.addAtom(*_dataInCodeAtom
);
3430 if ( _hasOptimizationHints
) {
3431 _optimizationHintsAtom
= new OptimizationHintsAtom
<arm
>(_options
, state
, *this);
3432 optimizationHintsSection
= state
.addAtom(*_optimizationHintsAtom
);
3434 if ( _hasSymbolTable
) {
3435 _symbolTableAtom
= new SymbolTableAtom
<arm
>(_options
, state
, *this);
3436 symbolTableSection
= state
.addAtom(*_symbolTableAtom
);
3438 if ( _hasExternalRelocations
) {
3439 _externalRelocsAtom
= new ExternalRelocationsAtom
<arm
>(_options
, state
, *this);
3440 externalRelocationsSection
= state
.addAtom(*_externalRelocsAtom
);
3442 if ( _hasSymbolTable
) {
3443 _indirectSymbolTableAtom
= new IndirectSymbolTableAtom
<arm
>(_options
, state
, *this);
3444 indirectSymbolTableSection
= state
.addAtom(*_indirectSymbolTableAtom
);
3445 _stringPoolAtom
= new StringPoolAtom(_options
, state
, *this, 4);
3446 stringPoolSection
= state
.addAtom(*_stringPoolAtom
);
3450 #if SUPPORT_ARCH_arm64
3451 case CPU_TYPE_ARM64
:
3452 if ( _hasSectionRelocations
) {
3453 _sectionsRelocationsAtom
= new SectionRelocationsAtom
<arm64
>(_options
, state
, *this);
3454 sectionRelocationsSection
= state
.addAtom(*_sectionsRelocationsAtom
);
3456 if ( _hasDyldInfo
) {
3457 _rebasingInfoAtom
= new RebaseInfoAtom
<arm64
>(_options
, state
, *this);
3458 rebaseSection
= state
.addAtom(*_rebasingInfoAtom
);
3460 _bindingInfoAtom
= new BindingInfoAtom
<arm64
>(_options
, state
, *this);
3461 bindingSection
= state
.addAtom(*_bindingInfoAtom
);
3463 _weakBindingInfoAtom
= new WeakBindingInfoAtom
<arm64
>(_options
, state
, *this);
3464 weakBindingSection
= state
.addAtom(*_weakBindingInfoAtom
);
3466 _lazyBindingInfoAtom
= new LazyBindingInfoAtom
<arm64
>(_options
, state
, *this);
3467 lazyBindingSection
= state
.addAtom(*_lazyBindingInfoAtom
);
3469 _exportInfoAtom
= new ExportInfoAtom
<arm64
>(_options
, state
, *this);
3470 exportSection
= state
.addAtom(*_exportInfoAtom
);
3472 if ( _hasLocalRelocations
) {
3473 _localRelocsAtom
= new LocalRelocationsAtom
<arm64
>(_options
, state
, *this);
3474 localRelocationsSection
= state
.addAtom(*_localRelocsAtom
);
3476 if ( _hasSplitSegInfo
) {
3477 if ( _options
.sharedRegionEncodingV2() )
3478 _splitSegInfoAtom
= new SplitSegInfoV2Atom
<arm64
>(_options
, state
, *this);
3480 _splitSegInfoAtom
= new SplitSegInfoV1Atom
<arm64
>(_options
, state
, *this);
3481 splitSegInfoSection
= state
.addAtom(*_splitSegInfoAtom
);
3483 if ( _hasFunctionStartsInfo
) {
3484 _functionStartsAtom
= new FunctionStartsAtom
<arm64
>(_options
, state
, *this);
3485 functionStartsSection
= state
.addAtom(*_functionStartsAtom
);
3487 if ( _hasDataInCodeInfo
) {
3488 _dataInCodeAtom
= new DataInCodeAtom
<arm64
>(_options
, state
, *this);
3489 dataInCodeSection
= state
.addAtom(*_dataInCodeAtom
);
3491 if ( _hasOptimizationHints
) {
3492 _optimizationHintsAtom
= new OptimizationHintsAtom
<arm64
>(_options
, state
, *this);
3493 optimizationHintsSection
= state
.addAtom(*_optimizationHintsAtom
);
3495 if ( _hasSymbolTable
) {
3496 _symbolTableAtom
= new SymbolTableAtom
<arm64
>(_options
, state
, *this);
3497 symbolTableSection
= state
.addAtom(*_symbolTableAtom
);
3499 if ( _hasExternalRelocations
) {
3500 _externalRelocsAtom
= new ExternalRelocationsAtom
<arm64
>(_options
, state
, *this);
3501 externalRelocationsSection
= state
.addAtom(*_externalRelocsAtom
);
3503 if ( _hasSymbolTable
) {
3504 _indirectSymbolTableAtom
= new IndirectSymbolTableAtom
<arm64
>(_options
, state
, *this);
3505 indirectSymbolTableSection
= state
.addAtom(*_indirectSymbolTableAtom
);
3506 _stringPoolAtom
= new StringPoolAtom(_options
, state
, *this, 4);
3507 stringPoolSection
= state
.addAtom(*_stringPoolAtom
);
3512 throw "unknown architecture";
3516 void OutputFile::addLoadCommands(ld::Internal
& state
)
3518 switch ( _options
.architecture() ) {
3519 #if SUPPORT_ARCH_x86_64
3520 case CPU_TYPE_X86_64
:
3521 _headersAndLoadCommandAtom
= new HeaderAndLoadCommandsAtom
<x86_64
>(_options
, state
, *this);
3522 headerAndLoadCommandsSection
= state
.addAtom(*_headersAndLoadCommandAtom
);
3525 #if SUPPORT_ARCH_arm_any
3527 _headersAndLoadCommandAtom
= new HeaderAndLoadCommandsAtom
<arm
>(_options
, state
, *this);
3528 headerAndLoadCommandsSection
= state
.addAtom(*_headersAndLoadCommandAtom
);
3531 #if SUPPORT_ARCH_arm64
3532 case CPU_TYPE_ARM64
:
3533 _headersAndLoadCommandAtom
= new HeaderAndLoadCommandsAtom
<arm64
>(_options
, state
, *this);
3534 headerAndLoadCommandsSection
= state
.addAtom(*_headersAndLoadCommandAtom
);
3537 #if SUPPORT_ARCH_i386
3539 _headersAndLoadCommandAtom
= new HeaderAndLoadCommandsAtom
<x86
>(_options
, state
, *this);
3540 headerAndLoadCommandsSection
= state
.addAtom(*_headersAndLoadCommandAtom
);
3544 throw "unknown architecture";
3548 uint32_t OutputFile::dylibCount()
3550 return _dylibsToLoad
.size();
3553 const ld::dylib::File
* OutputFile::dylibByOrdinal(unsigned int ordinal
)
3555 assert( ordinal
> 0 );
3556 assert( ordinal
<= _dylibsToLoad
.size() );
3557 return _dylibsToLoad
[ordinal
-1];
3560 bool OutputFile::hasOrdinalForInstallPath(const char* path
, int* ordinal
)
3562 for (std::map
<const ld::dylib::File
*, int>::const_iterator it
= _dylibToOrdinal
.begin(); it
!= _dylibToOrdinal
.end(); ++it
) {
3563 const char* installPath
= it
->first
->installPath();
3564 if ( (installPath
!= NULL
) && (strcmp(path
, installPath
) == 0) ) {
3565 *ordinal
= it
->second
;
3572 uint32_t OutputFile::dylibToOrdinal(const ld::dylib::File
* dylib
)
3574 return _dylibToOrdinal
[dylib
];
3578 void OutputFile::buildDylibOrdinalMapping(ld::Internal
& state
)
3580 // count non-public re-exported dylibs
3581 unsigned int nonPublicReExportCount
= 0;
3582 for (std::vector
<ld::dylib::File
*>::iterator it
= state
.dylibs
.begin(); it
!= state
.dylibs
.end(); ++it
) {
3583 ld::dylib::File
* aDylib
= *it
;
3584 if ( aDylib
->willBeReExported() && ! aDylib
->hasPublicInstallName() )
3585 ++nonPublicReExportCount
;
3588 // look at each dylib supplied in state
3589 bool hasReExports
= false;
3590 bool haveLazyDylibs
= false;
3591 for (std::vector
<ld::dylib::File
*>::iterator it
= state
.dylibs
.begin(); it
!= state
.dylibs
.end(); ++it
) {
3592 ld::dylib::File
* aDylib
= *it
;
3594 if ( aDylib
== state
.bundleLoader
) {
3595 _dylibToOrdinal
[aDylib
] = BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE
;
3597 else if ( this->hasOrdinalForInstallPath(aDylib
->installPath(), &ordinal
) ) {
3598 // already have a dylib with that install path, map all uses to that ordinal
3599 _dylibToOrdinal
[aDylib
] = ordinal
;
3601 else if ( aDylib
->willBeLazyLoadedDylib() ) {
3602 // all lazy dylib need to be at end of ordinals
3603 haveLazyDylibs
= true;
3605 else if ( aDylib
->willBeReExported() && ! aDylib
->hasPublicInstallName() && (nonPublicReExportCount
>= 2) ) {
3606 _dylibsToLoad
.push_back(aDylib
);
3607 _dylibToOrdinal
[aDylib
] = BIND_SPECIAL_DYLIB_SELF
;
3610 // first time this install path seen, create new ordinal
3611 _dylibsToLoad
.push_back(aDylib
);
3612 _dylibToOrdinal
[aDylib
] = _dylibsToLoad
.size();
3614 if ( aDylib
->explicitlyLinked() && aDylib
->willBeReExported() )
3615 hasReExports
= true;
3617 if ( haveLazyDylibs
) {
3618 // second pass to determine ordinals for lazy loaded dylibs
3619 for (std::vector
<ld::dylib::File
*>::iterator it
= state
.dylibs
.begin(); it
!= state
.dylibs
.end(); ++it
) {
3620 ld::dylib::File
* aDylib
= *it
;
3621 if ( aDylib
->willBeLazyLoadedDylib() ) {
3623 if ( this->hasOrdinalForInstallPath(aDylib
->installPath(), &ordinal
) ) {
3624 // already have a dylib with that install path, map all uses to that ordinal
3625 _dylibToOrdinal
[aDylib
] = ordinal
;
3628 // first time this install path seen, create new ordinal
3629 _dylibsToLoad
.push_back(aDylib
);
3630 _dylibToOrdinal
[aDylib
] = _dylibsToLoad
.size();
3635 _noReExportedDylibs
= !hasReExports
;
3636 //fprintf(stderr, "dylibs:\n");
3637 //for (std::map<const ld::dylib::File*, int>::const_iterator it = _dylibToOrdinal.begin(); it != _dylibToOrdinal.end(); ++it) {
3638 // fprintf(stderr, " %p ord=%u, install_name=%s\n",it->first, it->second, it->first->installPath());
3642 uint32_t OutputFile::lazyBindingInfoOffsetForLazyPointerAddress(uint64_t lpAddress
)
3644 return _lazyPointerAddressToInfoOffset
[lpAddress
];
3647 void OutputFile::setLazyBindingInfoOffset(uint64_t lpAddress
, uint32_t lpInfoOffset
)
3649 _lazyPointerAddressToInfoOffset
[lpAddress
] = lpInfoOffset
;
3652 int OutputFile::compressedOrdinalForAtom(const ld::Atom
* target
)
3654 // flat namespace images use zero for all ordinals
3655 if ( _options
.nameSpace() != Options::kTwoLevelNameSpace
)
3656 return BIND_SPECIAL_DYLIB_FLAT_LOOKUP
;
3658 // handle -interposable
3659 if ( target
->definition() == ld::Atom::definitionRegular
)
3660 return BIND_SPECIAL_DYLIB_SELF
;
3663 const ld::dylib::File
* dylib
= dynamic_cast<const ld::dylib::File
*>(target
->file());
3664 if ( dylib
!= NULL
) {
3665 std::map
<const ld::dylib::File
*, int>::iterator pos
= _dylibToOrdinal
.find(dylib
);
3666 if ( pos
!= _dylibToOrdinal
.end() )
3668 assert(0 && "dylib not assigned ordinal");
3671 // handle undefined dynamic_lookup
3672 if ( _options
.undefinedTreatment() == Options::kUndefinedDynamicLookup
)
3673 return BIND_SPECIAL_DYLIB_FLAT_LOOKUP
;
3676 if ( _options
.allowedUndefined(target
->name()) )
3677 return BIND_SPECIAL_DYLIB_FLAT_LOOKUP
;
3679 throw "can't find ordinal for imported symbol";
3683 bool OutputFile::isPcRelStore(ld::Fixup::Kind kind
)
3686 case ld::Fixup::kindStoreX86BranchPCRel8
:
3687 case ld::Fixup::kindStoreX86BranchPCRel32
:
3688 case ld::Fixup::kindStoreX86PCRel8
:
3689 case ld::Fixup::kindStoreX86PCRel16
:
3690 case ld::Fixup::kindStoreX86PCRel32
:
3691 case ld::Fixup::kindStoreX86PCRel32_1
:
3692 case ld::Fixup::kindStoreX86PCRel32_2
:
3693 case ld::Fixup::kindStoreX86PCRel32_4
:
3694 case ld::Fixup::kindStoreX86PCRel32GOTLoad
:
3695 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA
:
3696 case ld::Fixup::kindStoreX86PCRel32GOT
:
3697 case ld::Fixup::kindStoreX86PCRel32TLVLoad
:
3698 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA
:
3699 case ld::Fixup::kindStoreARMBranch24
:
3700 case ld::Fixup::kindStoreThumbBranch22
:
3701 case ld::Fixup::kindStoreARMLoad12
:
3702 case ld::Fixup::kindStoreTargetAddressX86PCRel32
:
3703 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad
:
3704 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA
:
3705 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad
:
3706 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA
:
3707 case ld::Fixup::kindStoreTargetAddressARMBranch24
:
3708 case ld::Fixup::kindStoreTargetAddressThumbBranch22
:
3709 case ld::Fixup::kindStoreTargetAddressARMLoad12
:
3710 #if SUPPORT_ARCH_arm64
3711 case ld::Fixup::kindStoreARM64Page21
:
3712 case ld::Fixup::kindStoreARM64PageOff12
:
3713 case ld::Fixup::kindStoreARM64GOTLoadPage21
:
3714 case ld::Fixup::kindStoreARM64GOTLoadPageOff12
:
3715 case ld::Fixup::kindStoreARM64GOTLeaPage21
:
3716 case ld::Fixup::kindStoreARM64GOTLeaPageOff12
:
3717 case ld::Fixup::kindStoreARM64TLVPLoadPage21
:
3718 case ld::Fixup::kindStoreARM64TLVPLoadPageOff12
:
3719 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21
:
3720 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPageOff12
:
3721 case ld::Fixup::kindStoreARM64PCRelToGOT
:
3722 case ld::Fixup::kindStoreTargetAddressARM64Page21
:
3723 case ld::Fixup::kindStoreTargetAddressARM64PageOff12
:
3724 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21
:
3725 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12
:
3726 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21
:
3727 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12
:
3728 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21
:
3729 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12
:
3730 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21
:
3731 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12
:
3734 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32
:
3735 #if SUPPORT_ARCH_arm64
3736 case ld::Fixup::kindStoreTargetAddressARM64Branch26
:
3738 return (_options
.outputKind() != Options::kKextBundle
);
3745 bool OutputFile::isStore(ld::Fixup::Kind kind
)
3748 case ld::Fixup::kindNone
:
3749 case ld::Fixup::kindNoneFollowOn
:
3750 case ld::Fixup::kindNoneGroupSubordinate
:
3751 case ld::Fixup::kindNoneGroupSubordinateFDE
:
3752 case ld::Fixup::kindNoneGroupSubordinateLSDA
:
3753 case ld::Fixup::kindNoneGroupSubordinatePersonality
:
3754 case ld::Fixup::kindSetTargetAddress
:
3755 case ld::Fixup::kindSubtractTargetAddress
:
3756 case ld::Fixup::kindAddAddend
:
3757 case ld::Fixup::kindSubtractAddend
:
3758 case ld::Fixup::kindSetTargetImageOffset
:
3759 case ld::Fixup::kindSetTargetSectionOffset
:
3768 bool OutputFile::setsTarget(ld::Fixup::Kind kind
)
3771 case ld::Fixup::kindSetTargetAddress
:
3772 case ld::Fixup::kindLazyTarget
:
3773 case ld::Fixup::kindStoreTargetAddressLittleEndian32
:
3774 case ld::Fixup::kindStoreTargetAddressLittleEndian64
:
3775 case ld::Fixup::kindStoreTargetAddressBigEndian32
:
3776 case ld::Fixup::kindStoreTargetAddressBigEndian64
:
3777 case ld::Fixup::kindStoreTargetAddressX86PCRel32
:
3778 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32
:
3779 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad
:
3780 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA
:
3781 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad
:
3782 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA
:
3783 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad
:
3784 case ld::Fixup::kindStoreTargetAddressARMBranch24
:
3785 case ld::Fixup::kindStoreTargetAddressThumbBranch22
:
3786 case ld::Fixup::kindStoreTargetAddressARMLoad12
:
3787 #if SUPPORT_ARCH_arm64
3788 case ld::Fixup::kindStoreTargetAddressARM64Branch26
:
3789 case ld::Fixup::kindStoreTargetAddressARM64Page21
:
3790 case ld::Fixup::kindStoreTargetAddressARM64PageOff12
:
3791 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21
:
3792 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12
:
3793 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21
:
3794 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12
:
3795 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21
:
3796 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12
:
3797 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21
:
3798 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12
:
3801 case ld::Fixup::kindStoreX86DtraceCallSiteNop
:
3802 case ld::Fixup::kindStoreX86DtraceIsEnableSiteClear
:
3803 case ld::Fixup::kindStoreARMDtraceCallSiteNop
:
3804 case ld::Fixup::kindStoreARMDtraceIsEnableSiteClear
:
3805 case ld::Fixup::kindStoreARM64DtraceCallSiteNop
:
3806 case ld::Fixup::kindStoreARM64DtraceIsEnableSiteClear
:
3807 case ld::Fixup::kindStoreThumbDtraceCallSiteNop
:
3808 case ld::Fixup::kindStoreThumbDtraceIsEnableSiteClear
:
3809 return (_options
.outputKind() == Options::kObjectFile
);
3816 bool OutputFile::isPointerToTarget(ld::Fixup::Kind kind
)
3819 case ld::Fixup::kindSetTargetAddress
:
3820 case ld::Fixup::kindStoreTargetAddressLittleEndian32
:
3821 case ld::Fixup::kindStoreTargetAddressLittleEndian64
:
3822 case ld::Fixup::kindStoreTargetAddressBigEndian32
:
3823 case ld::Fixup::kindStoreTargetAddressBigEndian64
:
3824 case ld::Fixup::kindLazyTarget
:
3831 bool OutputFile::isPointerFromTarget(ld::Fixup::Kind kind
)
3834 case ld::Fixup::kindSubtractTargetAddress
:
3843 uint64_t OutputFile::lookBackAddend(ld::Fixup::iterator fit
)
3845 uint64_t addend
= 0;
3846 switch ( fit
->clusterSize
) {
3847 case ld::Fixup::k1of1
:
3848 case ld::Fixup::k1of2
:
3849 case ld::Fixup::k2of2
:
3851 case ld::Fixup::k2of3
:
3853 switch ( fit
->kind
) {
3854 case ld::Fixup::kindAddAddend
:
3855 addend
+= fit
->u
.addend
;
3857 case ld::Fixup::kindSubtractAddend
:
3858 addend
-= fit
->u
.addend
;
3861 throw "unexpected fixup kind for binding";
3864 case ld::Fixup::k1of3
:
3866 switch ( fit
->kind
) {
3867 case ld::Fixup::kindAddAddend
:
3868 addend
+= fit
->u
.addend
;
3870 case ld::Fixup::kindSubtractAddend
:
3871 addend
-= fit
->u
.addend
;
3874 throw "unexpected fixup kind for binding";
3878 throw "unexpected fixup cluster size for binding";
3884 void OutputFile::generateLinkEditInfo(ld::Internal
& state
)
3886 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
3887 ld::Internal::FinalSection
* sect
= *sit
;
3888 // record end of last __TEXT section encrypted iPhoneOS apps.
3889 if ( _options
.makeEncryptable() && (strcmp(sect
->segmentName(), "__TEXT") == 0) && (strcmp(sect
->sectionName(), "__oslogstring") != 0) ) {
3890 _encryptedTEXTendOffset
= pageAlign(sect
->fileOffset
+ sect
->size
);
3892 bool objc1ClassRefSection
= ( (sect
->type() == ld::Section::typeCStringPointer
)
3893 && (strcmp(sect
->sectionName(), "__cls_refs") == 0)
3894 && (strcmp(sect
->segmentName(), "__OBJC") == 0) );
3895 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
3896 const ld::Atom
* atom
= *ait
;
3898 // Record regular atoms that override a dylib's weak definitions
3899 if ( (atom
->scope() == ld::Atom::scopeGlobal
) && atom
->overridesDylibsWeakDef() ) {
3900 if ( _options
.makeCompressedDyldInfo() ) {
3901 uint8_t wtype
= BIND_TYPE_OVERRIDE_OF_WEAKDEF_IN_DYLIB
;
3902 bool nonWeakDef
= (atom
->combine() == ld::Atom::combineNever
);
3903 _weakBindingInfo
.push_back(BindingInfo(wtype
, atom
->name(), nonWeakDef
, atom
->finalAddress(), 0));
3905 this->overridesWeakExternalSymbols
= true;
3906 if ( _options
.warnWeakExports() )
3907 warning("overrides weak external symbol: %s", atom
->name());
3910 ld::Fixup
* fixupWithTarget
= NULL
;
3911 ld::Fixup
* fixupWithMinusTarget
= NULL
;
3912 ld::Fixup
* fixupWithStore
= NULL
;
3913 ld::Fixup
* fixupWithAddend
= NULL
;
3914 const ld::Atom
* target
= NULL
;
3915 const ld::Atom
* minusTarget
= NULL
;
3916 uint64_t targetAddend
= 0;
3917 uint64_t minusTargetAddend
= 0;
3918 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(); fit
!= atom
->fixupsEnd(); ++fit
) {
3919 if ( fit
->firstInCluster() ) {
3920 fixupWithTarget
= NULL
;
3921 fixupWithMinusTarget
= NULL
;
3922 fixupWithStore
= NULL
;
3926 minusTargetAddend
= 0;
3928 if ( this->setsTarget(fit
->kind
) ) {
3929 switch ( fit
->binding
) {
3930 case ld::Fixup::bindingNone
:
3931 case ld::Fixup::bindingByNameUnbound
:
3933 case ld::Fixup::bindingByContentBound
:
3934 case ld::Fixup::bindingDirectlyBound
:
3935 fixupWithTarget
= fit
;
3936 target
= fit
->u
.target
;
3938 case ld::Fixup::bindingsIndirectlyBound
:
3939 fixupWithTarget
= fit
;
3940 target
= state
.indirectBindingTable
[fit
->u
.bindingIndex
];
3943 assert(target
!= NULL
);
3945 switch ( fit
->kind
) {
3946 case ld::Fixup::kindAddAddend
:
3947 targetAddend
= fit
->u
.addend
;
3948 fixupWithAddend
= fit
;
3950 case ld::Fixup::kindSubtractAddend
:
3951 minusTargetAddend
= fit
->u
.addend
;
3952 fixupWithAddend
= fit
;
3954 case ld::Fixup::kindSubtractTargetAddress
:
3955 switch ( fit
->binding
) {
3956 case ld::Fixup::bindingNone
:
3957 case ld::Fixup::bindingByNameUnbound
:
3959 case ld::Fixup::bindingByContentBound
:
3960 case ld::Fixup::bindingDirectlyBound
:
3961 fixupWithMinusTarget
= fit
;
3962 minusTarget
= fit
->u
.target
;
3964 case ld::Fixup::bindingsIndirectlyBound
:
3965 fixupWithMinusTarget
= fit
;
3966 minusTarget
= state
.indirectBindingTable
[fit
->u
.bindingIndex
];
3969 assert(minusTarget
!= NULL
);
3971 case ld::Fixup::kindDataInCodeStartData
:
3972 case ld::Fixup::kindDataInCodeStartJT8
:
3973 case ld::Fixup::kindDataInCodeStartJT16
:
3974 case ld::Fixup::kindDataInCodeStartJT32
:
3975 case ld::Fixup::kindDataInCodeStartJTA32
:
3976 case ld::Fixup::kindDataInCodeEnd
:
3977 hasDataInCode
= true;
3982 if ( this->isStore(fit
->kind
) ) {
3983 fixupWithStore
= fit
;
3985 if ( fit
->lastInCluster() ) {
3986 if ( (fixupWithStore
!= NULL
) && (target
!= NULL
) ) {
3987 if ( _options
.outputKind() == Options::kObjectFile
) {
3988 this->addSectionRelocs(state
, sect
, atom
, fixupWithTarget
, fixupWithMinusTarget
, fixupWithAddend
, fixupWithStore
,
3989 target
, minusTarget
, targetAddend
, minusTargetAddend
);
3992 if ( _options
.makeCompressedDyldInfo() ) {
3993 this->addDyldInfo(state
, sect
, atom
, fixupWithTarget
, fixupWithMinusTarget
, fixupWithStore
,
3994 target
, minusTarget
, targetAddend
, minusTargetAddend
);
3997 this->addClassicRelocs(state
, sect
, atom
, fixupWithTarget
, fixupWithMinusTarget
, fixupWithStore
,
3998 target
, minusTarget
, targetAddend
, minusTargetAddend
);
4002 else if ( objc1ClassRefSection
&& (target
!= NULL
) && (fixupWithStore
== NULL
) ) {
4003 // check for class refs to lazy loaded dylibs
4004 const ld::dylib::File
* dylib
= dynamic_cast<const ld::dylib::File
*>(target
->file());
4005 if ( (dylib
!= NULL
) && dylib
->willBeLazyLoadedDylib() )
4006 throwf("illegal class reference to %s in lazy loaded dylib %s", target
->name(), dylib
->path());
4015 void OutputFile::noteTextReloc(const ld::Atom
* atom
, const ld::Atom
* target
)
4017 if ( (atom
->contentType() == ld::Atom::typeStub
) || (atom
->contentType() == ld::Atom::typeStubHelper
) ) {
4018 // silently let stubs (synthesized by linker) use text relocs
4020 else if ( _options
.allowTextRelocs() ) {
4021 if ( _options
.warnAboutTextRelocs() )
4022 warning("text reloc in %s to %s", atom
->name(), target
->name());
4024 else if ( _options
.positionIndependentExecutable() && (_options
.outputKind() == Options::kDynamicExecutable
)
4025 && ((_options
.iOSVersionMin() >= ld::iOS_4_3
) || (_options
.macosxVersionMin() >= ld::mac10_7
)) ) {
4026 if ( ! this->pieDisabled
) {
4027 switch ( _options
.architecture()) {
4028 #if SUPPORT_ARCH_arm64
4029 case CPU_TYPE_ARM64
:
4031 #if SUPPORT_ARCH_arm64
4033 const char* demangledName
= strdup(_options
.demangleSymbol(atom
->name()));
4034 throwf("Absolute addressing not allowed in arm64 code but used in '%s' referencing '%s'", demangledName
, _options
.demangleSymbol(target
->name()));
4038 warning("PIE disabled. Absolute addressing (perhaps -mdynamic-no-pic) not allowed in code signed PIE, "
4039 "but used in %s from %s. "
4040 "To fix this warning, don't compile with -mdynamic-no-pic or link with -Wl,-no_pie",
4041 atom
->name(), atom
->safeFilePath());
4044 this->pieDisabled
= true;
4046 else if ( (target
->scope() == ld::Atom::scopeGlobal
) && (target
->combine() == ld::Atom::combineByName
) ) {
4047 throwf("illegal text-relocoation (direct reference) to (global,weak) %s in %s from %s in %s", target
->name(), target
->safeFilePath(), atom
->name(), atom
->safeFilePath());
4050 if ( (target
->file() != NULL
) && (atom
->file() != NULL
) )
4051 throwf("illegal text-relocation to '%s' in %s from '%s' in %s", target
->name(), target
->safeFilePath(), atom
->name(), atom
->safeFilePath());
4053 throwf("illegal text reloc in '%s' to '%s'", atom
->name(), target
->name());
4057 void OutputFile::addDyldInfo(ld::Internal
& state
, ld::Internal::FinalSection
* sect
, const ld::Atom
* atom
,
4058 ld::Fixup
* fixupWithTarget
, ld::Fixup
* fixupWithMinusTarget
, ld::Fixup
* fixupWithStore
,
4059 const ld::Atom
* target
, const ld::Atom
* minusTarget
,
4060 uint64_t targetAddend
, uint64_t minusTargetAddend
)
4062 if ( sect
->isSectionHidden() )
4065 // no need to rebase or bind PCRel stores
4066 if ( this->isPcRelStore(fixupWithStore
->kind
) ) {
4067 // as long as target is in same linkage unit
4068 if ( (target
== NULL
) || (target
->definition() != ld::Atom::definitionProxy
) ) {
4069 // make sure target is not global and weak
4070 if ( (target
->scope() == ld::Atom::scopeGlobal
) && (target
->combine() == ld::Atom::combineByName
) && (target
->definition() == ld::Atom::definitionRegular
)) {
4071 if ( (atom
->section().type() == ld::Section::typeCFI
)
4072 || (atom
->section().type() == ld::Section::typeDtraceDOF
)
4073 || (atom
->section().type() == ld::Section::typeUnwindInfo
) ) {
4074 // ok for __eh_frame and __uwind_info to use pointer diffs to global weak symbols
4077 // <rdar://problem/13700961> spurious warning when weak function has reference to itself
4078 if ( fixupWithTarget
->binding
== ld::Fixup::bindingDirectlyBound
) {
4079 // ok to ignore pc-rel references within a weak function to itself
4082 // Have direct reference to weak-global. This should be an indrect reference
4083 const char* demangledName
= strdup(_options
.demangleSymbol(atom
->name()));
4084 warning("direct access in function '%s' from file '%s' to global weak symbol '%s' from file '%s' means the weak symbol cannot be overridden at runtime. "
4085 "This was likely caused by different translation units being compiled with different visibility settings.",
4086 demangledName
, atom
->safeFilePath(), _options
.demangleSymbol(target
->name()), target
->safeFilePath());
4092 // no need to rebase or bind PIC internal pointer diff
4093 if ( minusTarget
!= NULL
) {
4094 // with pointer diffs, both need to be in same linkage unit
4095 assert(minusTarget
->definition() != ld::Atom::definitionProxy
);
4096 assert(target
!= NULL
);
4097 assert(target
->definition() != ld::Atom::definitionProxy
);
4098 if ( target
== minusTarget
) {
4099 // This is a compile time constant and could have been optimized away by compiler
4103 // check if target of pointer-diff is global and weak
4104 if ( (target
->scope() == ld::Atom::scopeGlobal
) && (target
->combine() == ld::Atom::combineByName
) && (target
->definition() == ld::Atom::definitionRegular
) ) {
4105 if ( (atom
->section().type() == ld::Section::typeCFI
)
4106 || (atom
->section().type() == ld::Section::typeDtraceDOF
)
4107 || (atom
->section().type() == ld::Section::typeUnwindInfo
) ) {
4108 // ok for __eh_frame and __uwind_info to use pointer diffs to global weak symbols
4111 // Have direct reference to weak-global. This should be an indrect reference
4112 const char* demangledName
= strdup(_options
.demangleSymbol(atom
->name()));
4113 warning("direct access in function '%s' from file '%s' to global weak symbol '%s' from file '%s' means the weak symbol cannot be overridden at runtime. "
4114 "This was likely caused by different translation units being compiled with different visibility settings.",
4115 demangledName
, atom
->safeFilePath(), _options
.demangleSymbol(target
->name()), target
->safeFilePath());
4120 // no need to rebase or bind an atom's references to itself if the output is not slidable
4121 if ( (atom
== target
) && !_options
.outputSlidable() )
4124 // cluster has no target, so needs no rebasing or binding
4125 if ( target
== NULL
)
4128 const uint64_t pointerSize
= (_options
.architecture() & CPU_ARCH_ABI64
) ? 8 : 4;
4129 bool inReadOnlySeg
= ((_options
.initialSegProtection(sect
->segmentName()) & VM_PROT_WRITE
) == 0);
4130 bool needsRebase
= false;
4131 bool needsBinding
= false;
4132 bool needsLazyBinding
= false;
4133 bool needsWeakBinding
= false;
4135 uint8_t rebaseType
= REBASE_TYPE_POINTER
;
4136 uint8_t type
= BIND_TYPE_POINTER
;
4137 const ld::dylib::File
* dylib
= dynamic_cast<const ld::dylib::File
*>(target
->file());
4138 bool weak_import
= (fixupWithTarget
->weakImport
|| ((dylib
!= NULL
) && dylib
->forcedWeakLinked()));
4139 uint64_t address
= atom
->finalAddress() + fixupWithTarget
->offsetInAtom
;
4140 uint64_t addend
= targetAddend
- minusTargetAddend
;
4142 // special case lazy pointers
4143 if ( fixupWithTarget
->kind
== ld::Fixup::kindLazyTarget
) {
4144 assert(fixupWithTarget
->u
.target
== target
);
4145 assert(addend
== 0);
4146 // lazy dylib lazy pointers do not have any dyld info
4147 if ( atom
->section().type() == ld::Section::typeLazyDylibPointer
)
4149 // lazy binding to weak definitions are done differently
4150 // they are directly bound to target, then have a weak bind in case of a collision
4151 if ( target
->combine() == ld::Atom::combineByName
) {
4152 if ( target
->definition() == ld::Atom::definitionProxy
) {
4153 // weak def exported from another dylib
4154 // must non-lazy bind to it plus have weak binding info in case of collision
4155 needsBinding
= true;
4156 needsWeakBinding
= true;
4159 // weak def in this linkage unit.
4160 // just rebase, plus have weak binding info in case of collision
4161 // this will be done by other cluster on lazy pointer atom
4164 else if ( target
->contentType() == ld::Atom::typeResolver
) {
4165 // <rdar://problem/8553647> Hidden resolver functions should not have lazy binding info
4166 // <rdar://problem/12629331> Resolver function run before initializers when overriding the dyld shared cache
4167 // The lazy pointers used by stubs used when non-lazy binding to a resolver are not normal lazy pointers
4168 // and should not be in lazy binding info.
4169 needsLazyBinding
= false;
4172 // normal case of a pointer to non-weak-def symbol, so can lazily bind
4173 needsLazyBinding
= true;
4177 // everything except lazy pointers
4178 switch ( target
->definition() ) {
4179 case ld::Atom::definitionProxy
:
4180 if ( (dylib
!= NULL
) && dylib
->willBeLazyLoadedDylib() )
4181 throwf("illegal data reference to %s in lazy loaded dylib %s", target
->name(), dylib
->path());
4182 if ( target
->contentType() == ld::Atom::typeTLV
) {
4183 if ( sect
->type() != ld::Section::typeTLVPointers
)
4184 throwf("illegal data reference in %s to thread local variable %s in dylib %s",
4185 atom
->name(), target
->name(), dylib
->path());
4187 if ( inReadOnlySeg
)
4188 type
= BIND_TYPE_TEXT_ABSOLUTE32
;
4189 needsBinding
= true;
4190 if ( target
->combine() == ld::Atom::combineByName
)
4191 needsWeakBinding
= true;
4193 case ld::Atom::definitionRegular
:
4194 case ld::Atom::definitionTentative
:
4195 // only slideable images need rebasing info
4196 if ( _options
.outputSlidable() ) {
4199 // references to internal symbol never need binding
4200 if ( target
->scope() != ld::Atom::scopeGlobal
)
4202 // reference to global weak def needs weak binding
4203 if ( (target
->combine() == ld::Atom::combineByName
) && (target
->definition() == ld::Atom::definitionRegular
) )
4204 needsWeakBinding
= true;
4205 else if ( _options
.outputKind() == Options::kDynamicExecutable
) {
4206 // in main executables, the only way regular symbols are indirected is if -interposable is used
4207 if ( _options
.interposable(target
->name()) ) {
4208 needsRebase
= false;
4209 needsBinding
= true;
4213 // for flat-namespace or interposable two-level-namespace
4214 // all references to exported symbols get indirected
4215 if ( (_options
.nameSpace() != Options::kTwoLevelNameSpace
) || _options
.interposable(target
->name()) ) {
4216 // <rdar://problem/5254468> no external relocs for flat objc classes
4217 if ( strncmp(target
->name(), ".objc_class_", 12) == 0 )
4219 // no rebase info for references to global symbols that will have binding info
4220 needsRebase
= false;
4221 needsBinding
= true;
4223 else if ( _options
.forceCoalesce(target
->name()) ) {
4224 needsWeakBinding
= true;
4228 case ld::Atom::definitionAbsolute
:
4233 // <rdar://problem/13828711> if target is an import alias, use base of alias
4234 if ( target
->isAlias() && (target
->definition() == ld::Atom::definitionProxy
) ) {
4235 for (ld::Fixup::iterator fit
= target
->fixupsBegin(), end
=target
->fixupsEnd(); fit
!= end
; ++fit
) {
4236 if ( fit
->firstInCluster() ) {
4237 if ( fit
->kind
== ld::Fixup::kindNoneFollowOn
) {
4238 if ( fit
->binding
== ld::Fixup::bindingDirectlyBound
) {
4239 //fprintf(stderr, "switching import of %s to import of %s\n", target->name(), fit->u.target->name());
4240 target
= fit
->u
.target
;
4247 // record dyld info for this cluster
4248 if ( needsRebase
) {
4249 if ( inReadOnlySeg
) {
4250 noteTextReloc(atom
, target
);
4251 sect
->hasLocalRelocs
= true; // so dyld knows to change permissions on __TEXT segment
4252 rebaseType
= REBASE_TYPE_TEXT_ABSOLUTE32
;
4254 if ( _options
.sharedRegionEligible() ) {
4255 // <rdar://problem/13287063> when range checking, ignore high byte of arm64 addends
4256 uint64_t checkAddend
= addend
;
4257 if ( (_options
.architecture() == CPU_TYPE_ARM64
)
4259 checkAddend
&= 0x0FFFFFFFFFFFFFFFULL
;
4260 if ( checkAddend
!= 0 ) {
4261 // make sure the addend does not cause the pointer to point outside the target's segment
4262 // if it does, update_dyld_shared_cache will not be able to put this dylib into the shared cache
4263 uint64_t targetAddress
= target
->finalAddress();
4264 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
4265 ld::Internal::FinalSection
* sct
= *sit
;
4266 uint64_t sctEnd
= (sct
->address
+sct
->size
);
4267 if ( (sct
->address
<= targetAddress
) && (targetAddress
< sctEnd
) ) {
4268 if ( (targetAddress
+checkAddend
) > sctEnd
) {
4269 warning("data symbol %s from %s has pointer to %s + 0x%08llX. "
4270 "That large of an addend may disable %s from being put in the dyld shared cache.",
4271 atom
->name(), atom
->safeFilePath(), target
->name(), addend
, _options
.installPath() );
4277 if ( ((address
& (pointerSize
-1)) != 0) && (rebaseType
== REBASE_TYPE_POINTER
) ) {
4278 switch ( _options
.unalignedPointerTreatment() ) {
4279 case Options::kUnalignedPointerError
:
4280 throwf("pointer not aligned at address 0x%llX (%s + %lld from %s)",
4281 address
, atom
->name(), (address
- atom
->finalAddress()), atom
->safeFilePath());
4283 case Options::kUnalignedPointerWarning
:
4284 warning("pointer not aligned at address 0x%llX (%s + %lld from %s)",
4285 address
, atom
->name(), (address
- atom
->finalAddress()), atom
->safeFilePath());
4287 case Options::kUnalignedPointerIgnore
:
4292 _rebaseInfo
.push_back(RebaseInfo(rebaseType
, address
));
4294 if ( needsBinding
) {
4295 if ( inReadOnlySeg
) {
4296 noteTextReloc(atom
, target
);
4297 sect
->hasExternalRelocs
= true; // so dyld knows to change permissions on __TEXT segment
4299 if ( ((address
& (pointerSize
-1)) != 0) && (type
== BIND_TYPE_POINTER
) ) {
4300 switch ( _options
.unalignedPointerTreatment() ) {
4301 case Options::kUnalignedPointerError
:
4302 throwf("pointer not aligned at address 0x%llX (%s + %lld from %s)",
4303 address
, atom
->name(), (address
- atom
->finalAddress()), atom
->safeFilePath());
4305 case Options::kUnalignedPointerWarning
:
4306 warning("pointer not aligned at address 0x%llX (%s + %lld from %s)",
4307 address
, atom
->name(), (address
- atom
->finalAddress()), atom
->safeFilePath());
4309 case Options::kUnalignedPointerIgnore
:
4314 _bindingInfo
.push_back(BindingInfo(type
, this->compressedOrdinalForAtom(target
), target
->name(), weak_import
, address
, addend
));
4316 if ( needsLazyBinding
) {
4317 if ( _options
.bindAtLoad() )
4318 _bindingInfo
.push_back(BindingInfo(type
, this->compressedOrdinalForAtom(target
), target
->name(), weak_import
, address
, addend
));
4320 _lazyBindingInfo
.push_back(BindingInfo(type
, this->compressedOrdinalForAtom(target
), target
->name(), weak_import
, address
, addend
));
4322 if ( needsWeakBinding
)
4323 _weakBindingInfo
.push_back(BindingInfo(type
, 0, target
->name(), false, address
, addend
));
4327 void OutputFile::addClassicRelocs(ld::Internal
& state
, ld::Internal::FinalSection
* sect
, const ld::Atom
* atom
,
4328 ld::Fixup
* fixupWithTarget
, ld::Fixup
* fixupWithMinusTarget
, ld::Fixup
* fixupWithStore
,
4329 const ld::Atom
* target
, const ld::Atom
* minusTarget
,
4330 uint64_t targetAddend
, uint64_t minusTargetAddend
)
4332 if ( sect
->isSectionHidden() )
4335 // non-lazy-pointer section is encoded in indirect symbol table - not using relocations
4336 if ( sect
->type() == ld::Section::typeNonLazyPointer
) {
4337 // except kexts and static pie which *do* use relocations
4338 switch (_options
.outputKind()) {
4339 case Options::kKextBundle
:
4341 case Options::kStaticExecutable
:
4342 if ( _options
.positionIndependentExecutable() )
4344 // else fall into default case
4346 assert(target
!= NULL
);
4347 assert(fixupWithTarget
!= NULL
);
4352 // no need to rebase or bind PCRel stores
4353 if ( this->isPcRelStore(fixupWithStore
->kind
) ) {
4354 // as long as target is in same linkage unit
4355 if ( (target
== NULL
) || (target
->definition() != ld::Atom::definitionProxy
) )
4359 // no need to rebase or bind PIC internal pointer diff
4360 if ( minusTarget
!= NULL
) {
4361 // with pointer diffs, both need to be in same linkage unit
4362 assert(minusTarget
->definition() != ld::Atom::definitionProxy
);
4363 assert(target
!= NULL
);
4364 assert(target
->definition() != ld::Atom::definitionProxy
);
4365 // check if target of pointer-diff is global and weak
4366 if ( (target
->scope() == ld::Atom::scopeGlobal
) && (target
->combine() == ld::Atom::combineByName
) && (target
->definition() == ld::Atom::definitionRegular
) ) {
4367 if ( (atom
->section().type() == ld::Section::typeCFI
)
4368 || (atom
->section().type() == ld::Section::typeDtraceDOF
)
4369 || (atom
->section().type() == ld::Section::typeUnwindInfo
) ) {
4370 // ok for __eh_frame and __uwind_info to use pointer diffs to global weak symbols
4373 // Have direct reference to weak-global. This should be an indrect reference
4374 const char* demangledName
= strdup(_options
.demangleSymbol(atom
->name()));
4375 warning("direct access in function '%s' from file '%s' to global weak symbol '%s' from file '%s' means the weak symbol cannot be overridden at runtime. "
4376 "This was likely caused by different translation units being compiled with different visibility settings.",
4377 demangledName
, atom
->safeFilePath(), _options
.demangleSymbol(target
->name()), target
->safeFilePath());
4382 // cluster has no target, so needs no rebasing or binding
4383 if ( target
== NULL
)
4386 assert(_localRelocsAtom
!= NULL
);
4387 uint64_t relocAddress
= atom
->finalAddress() + fixupWithTarget
->offsetInAtom
- _localRelocsAtom
->relocBaseAddress(state
);
4389 bool inReadOnlySeg
= ( strcmp(sect
->segmentName(), "__TEXT") == 0 );
4390 bool needsLocalReloc
= false;
4391 bool needsExternReloc
= false;
4393 switch ( fixupWithStore
->kind
) {
4394 case ld::Fixup::kindLazyTarget
:
4395 // lazy pointers don't need relocs
4397 case ld::Fixup::kindStoreLittleEndian32
:
4398 case ld::Fixup::kindStoreLittleEndian64
:
4399 case ld::Fixup::kindStoreBigEndian32
:
4400 case ld::Fixup::kindStoreBigEndian64
:
4401 case ld::Fixup::kindStoreTargetAddressLittleEndian32
:
4402 case ld::Fixup::kindStoreTargetAddressLittleEndian64
:
4403 case ld::Fixup::kindStoreTargetAddressBigEndian32
:
4404 case ld::Fixup::kindStoreTargetAddressBigEndian64
:
4406 switch ( target
->definition() ) {
4407 case ld::Atom::definitionProxy
:
4408 needsExternReloc
= true;
4410 case ld::Atom::definitionRegular
:
4411 case ld::Atom::definitionTentative
:
4412 // only slideable images need local relocs
4413 if ( _options
.outputSlidable() )
4414 needsLocalReloc
= true;
4415 // references to internal symbol never need binding
4416 if ( target
->scope() != ld::Atom::scopeGlobal
)
4418 // reference to global weak def needs weak binding in dynamic images
4419 if ( (target
->combine() == ld::Atom::combineByName
)
4420 && (target
->definition() == ld::Atom::definitionRegular
)
4421 && (_options
.outputKind() != Options::kStaticExecutable
)
4422 && (_options
.outputKind() != Options::kPreload
)
4423 && (atom
!= target
) ) {
4424 needsExternReloc
= true;
4426 else if ( _options
.outputKind() == Options::kDynamicExecutable
) {
4427 // in main executables, the only way regular symbols are indirected is if -interposable is used
4428 if ( _options
.interposable(target
->name()) )
4429 needsExternReloc
= true;
4432 // for flat-namespace or interposable two-level-namespace
4433 // all references to exported symbols get indirected
4434 if ( (_options
.nameSpace() != Options::kTwoLevelNameSpace
) || _options
.interposable(target
->name()) ) {
4435 // <rdar://problem/5254468> no external relocs for flat objc classes
4436 if ( strncmp(target
->name(), ".objc_class_", 12) == 0 )
4438 // no rebase info for references to global symbols that will have binding info
4439 needsExternReloc
= true;
4442 if ( needsExternReloc
)
4443 needsLocalReloc
= false;
4445 case ld::Atom::definitionAbsolute
:
4448 if ( needsExternReloc
) {
4449 if ( inReadOnlySeg
)
4450 noteTextReloc(atom
, target
);
4451 const ld::dylib::File
* dylib
= dynamic_cast<const ld::dylib::File
*>(target
->file());
4452 if ( (dylib
!= NULL
) && dylib
->willBeLazyLoadedDylib() )
4453 throwf("illegal data reference to %s in lazy loaded dylib %s", target
->name(), dylib
->path());
4454 _externalRelocsAtom
->addExternalPointerReloc(relocAddress
, target
);
4455 sect
->hasExternalRelocs
= true;
4456 fixupWithTarget
->contentAddendOnly
= true;
4458 else if ( needsLocalReloc
) {
4459 assert(target
!= NULL
);
4460 if ( inReadOnlySeg
)
4461 noteTextReloc(atom
, target
);
4462 _localRelocsAtom
->addPointerReloc(relocAddress
, target
->machoSection());
4463 sect
->hasLocalRelocs
= true;
4466 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32
:
4467 #if SUPPORT_ARCH_arm64
4468 case ld::Fixup::kindStoreTargetAddressARM64Branch26
:
4470 if ( _options
.outputKind() == Options::kKextBundle
) {
4471 assert(target
!= NULL
);
4472 if ( target
->definition() == ld::Atom::definitionProxy
) {
4473 _externalRelocsAtom
->addExternalCallSiteReloc(relocAddress
, target
);
4474 fixupWithStore
->contentAddendOnly
= true;
4479 case ld::Fixup::kindStoreARMLow16
:
4480 case ld::Fixup::kindStoreThumbLow16
:
4481 // no way to encode rebasing of binding for these instructions
4482 if ( _options
.outputSlidable() || (target
->definition() == ld::Atom::definitionProxy
) )
4483 throwf("no supported runtime lo16 relocation in %s from %s to %s", atom
->name(), atom
->safeFilePath(), target
->name());
4486 case ld::Fixup::kindStoreARMHigh16
:
4487 case ld::Fixup::kindStoreThumbHigh16
:
4488 // no way to encode rebasing of binding for these instructions
4489 if ( _options
.outputSlidable() || (target
->definition() == ld::Atom::definitionProxy
) )
4490 throwf("no supported runtime hi16 relocation in %s from %s to %s", atom
->name(), atom
->safeFilePath(), target
->name());
4499 bool OutputFile::useExternalSectionReloc(const ld::Atom
* atom
, const ld::Atom
* target
, ld::Fixup
* fixupWithTarget
)
4501 if ( (_options
.architecture() == CPU_TYPE_X86_64
)
4502 || (_options
.architecture() == CPU_TYPE_ARM64
)
4504 // x86_64 and ARM64 use external relocations for everthing that has a symbol
4505 return ( target
->symbolTableInclusion() != ld::Atom::symbolTableNotIn
);
4508 // <rdar://problem/9513487> support arm branch interworking in -r mode
4509 if ( (_options
.architecture() == CPU_TYPE_ARM
) && (_options
.outputKind() == Options::kObjectFile
) ) {
4510 if ( atom
->isThumb() != target
->isThumb() ) {
4511 switch ( fixupWithTarget
->kind
) {
4512 // have branch that switches mode, then might be 'b' not 'bl'
4513 // Force external relocation, since no way to do local reloc for 'b'
4514 case ld::Fixup::kindStoreTargetAddressThumbBranch22
:
4515 case ld::Fixup::kindStoreTargetAddressARMBranch24
:
4523 if ( (_options
.architecture() == CPU_TYPE_I386
) && (_options
.outputKind() == Options::kObjectFile
) ) {
4524 if ( target
->contentType() == ld::Atom::typeTLV
)
4528 // most architectures use external relocations only for references
4529 // to a symbol in another translation unit or for references to "weak symbols" or tentative definitions
4530 assert(target
!= NULL
);
4531 if ( target
->definition() == ld::Atom::definitionProxy
)
4533 if ( (target
->definition() == ld::Atom::definitionTentative
) && ! _options
.makeTentativeDefinitionsReal() )
4535 if ( target
->scope() != ld::Atom::scopeGlobal
)
4537 if ( (target
->combine() == ld::Atom::combineByName
) && (target
->definition() == ld::Atom::definitionRegular
) )
4542 bool OutputFile::useSectionRelocAddend(ld::Fixup
* fixupWithTarget
)
4544 #if SUPPORT_ARCH_arm64
4545 if ( _options
.architecture() == CPU_TYPE_ARM64
) {
4546 switch ( fixupWithTarget
->kind
) {
4547 case ld::Fixup::kindStoreARM64Branch26
:
4548 case ld::Fixup::kindStoreARM64Page21
:
4549 case ld::Fixup::kindStoreARM64PageOff12
:
4562 void OutputFile::addSectionRelocs(ld::Internal
& state
, ld::Internal::FinalSection
* sect
, const ld::Atom
* atom
,
4563 ld::Fixup
* fixupWithTarget
, ld::Fixup
* fixupWithMinusTarget
,
4564 ld::Fixup
* fixupWithAddend
, ld::Fixup
* fixupWithStore
,
4565 const ld::Atom
* target
, const ld::Atom
* minusTarget
,
4566 uint64_t targetAddend
, uint64_t minusTargetAddend
)
4568 if ( sect
->isSectionHidden() )
4571 // in -r mode where there will be no labels on __eh_frame section, there is no need for relocations
4572 if ( (sect
->type() == ld::Section::typeCFI
) && _options
.removeEHLabels() )
4575 // non-lazy-pointer section is encoded in indirect symbol table - not using relocations
4576 if ( sect
->type() == ld::Section::typeNonLazyPointer
)
4579 // tentative defs don't have any relocations
4580 if ( sect
->type() == ld::Section::typeTentativeDefs
)
4583 assert(target
!= NULL
);
4584 assert(fixupWithTarget
!= NULL
);
4585 bool targetUsesExternalReloc
= this->useExternalSectionReloc(atom
, target
, fixupWithTarget
);
4586 bool minusTargetUsesExternalReloc
= (minusTarget
!= NULL
) && this->useExternalSectionReloc(atom
, minusTarget
, fixupWithMinusTarget
);
4588 // in x86_64 and arm64 .o files an external reloc means the content contains just the addend
4589 if ( (_options
.architecture() == CPU_TYPE_X86_64
)
4590 || (_options
.architecture() == CPU_TYPE_ARM64
)
4592 if ( targetUsesExternalReloc
) {
4593 fixupWithTarget
->contentAddendOnly
= true;
4594 fixupWithStore
->contentAddendOnly
= true;
4595 if ( this->useSectionRelocAddend(fixupWithStore
) && (fixupWithAddend
!= NULL
) )
4596 fixupWithAddend
->contentIgnoresAddend
= true;
4598 if ( minusTargetUsesExternalReloc
)
4599 fixupWithMinusTarget
->contentAddendOnly
= true;
4602 // for other archs, content is addend only with (non pc-rel) pointers
4603 // pc-rel instructions are funny. If the target is _foo+8 and _foo is
4604 // external, then the pc-rel instruction *evalutates* to the address 8.
4605 if ( targetUsesExternalReloc
) {
4606 // TLV support for i386 acts like RIP relative addressing
4607 // The addend is the offset from the PICBase to the end of the instruction
4608 if ( (_options
.architecture() == CPU_TYPE_I386
)
4609 && (_options
.outputKind() == Options::kObjectFile
)
4610 && (fixupWithStore
->kind
== ld::Fixup::kindStoreX86PCRel32TLVLoad
) ) {
4611 fixupWithTarget
->contentAddendOnly
= true;
4612 fixupWithStore
->contentAddendOnly
= true;
4614 else if ( isPcRelStore(fixupWithStore
->kind
) ) {
4615 fixupWithTarget
->contentDetlaToAddendOnly
= true;
4616 fixupWithStore
->contentDetlaToAddendOnly
= true;
4618 else if ( minusTarget
== NULL
){
4619 fixupWithTarget
->contentAddendOnly
= true;
4620 fixupWithStore
->contentAddendOnly
= true;
4625 if ( fixupWithStore
!= NULL
) {
4626 _sectionsRelocationsAtom
->addSectionReloc(sect
, fixupWithStore
->kind
, atom
, fixupWithStore
->offsetInAtom
,
4627 targetUsesExternalReloc
, minusTargetUsesExternalReloc
,
4628 target
, targetAddend
, minusTarget
, minusTargetAddend
);
4633 void OutputFile::makeSplitSegInfo(ld::Internal
& state
)
4635 if ( !_options
.sharedRegionEligible() )
4638 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
4639 ld::Internal::FinalSection
* sect
= *sit
;
4640 if ( sect
->isSectionHidden() )
4642 if ( strcmp(sect
->segmentName(), "__TEXT") != 0 )
4644 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
4645 const ld::Atom
* atom
= *ait
;
4646 const ld::Atom
* target
= NULL
;
4647 const ld::Atom
* fromTarget
= NULL
;
4648 uint64_t accumulator
= 0;
4650 bool hadSubtract
= false;
4651 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
4652 if ( fit
->firstInCluster() )
4654 if ( this->setsTarget(fit
->kind
) ) {
4655 accumulator
= addressOf(state
, fit
, &target
);
4656 thumbTarget
= targetIsThumb(state
, fit
);
4660 switch ( fit
->kind
) {
4661 case ld::Fixup::kindSubtractTargetAddress
:
4662 accumulator
-= addressOf(state
, fit
, &fromTarget
);
4665 case ld::Fixup::kindAddAddend
:
4666 accumulator
+= fit
->u
.addend
;
4668 case ld::Fixup::kindSubtractAddend
:
4669 accumulator
-= fit
->u
.addend
;
4671 case ld::Fixup::kindStoreBigEndian32
:
4672 case ld::Fixup::kindStoreLittleEndian32
:
4673 case ld::Fixup::kindStoreLittleEndian64
:
4674 case ld::Fixup::kindStoreTargetAddressLittleEndian32
:
4675 case ld::Fixup::kindStoreTargetAddressLittleEndian64
:
4676 // if no subtract, then this is an absolute pointer which means
4677 // there is also a text reloc which update_dyld_shared_cache will use.
4678 if ( ! hadSubtract
)
4681 case ld::Fixup::kindStoreX86PCRel32
:
4682 case ld::Fixup::kindStoreX86PCRel32_1
:
4683 case ld::Fixup::kindStoreX86PCRel32_2
:
4684 case ld::Fixup::kindStoreX86PCRel32_4
:
4685 case ld::Fixup::kindStoreX86PCRel32GOTLoad
:
4686 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA
:
4687 case ld::Fixup::kindStoreX86PCRel32GOT
:
4688 case ld::Fixup::kindStoreX86PCRel32TLVLoad
:
4689 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA
:
4690 case ld::Fixup::kindStoreTargetAddressX86PCRel32
:
4691 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad
:
4692 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA
:
4693 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad
:
4694 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA
:
4695 case ld::Fixup::kindStoreARMLow16
:
4696 case ld::Fixup::kindStoreThumbLow16
:
4697 #if SUPPORT_ARCH_arm64
4698 case ld::Fixup::kindStoreARM64Page21
:
4699 case ld::Fixup::kindStoreARM64GOTLoadPage21
:
4700 case ld::Fixup::kindStoreARM64GOTLeaPage21
:
4701 case ld::Fixup::kindStoreARM64TLVPLoadPage21
:
4702 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21
:
4703 case ld::Fixup::kindStoreTargetAddressARM64Page21
:
4704 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21
:
4705 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21
:
4706 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21
:
4707 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21
:
4708 case ld::Fixup::kindStoreARM64PCRelToGOT
:
4710 assert(target
!= NULL
);
4711 if ( strcmp(sect
->segmentName(), target
->section().segmentName()) != 0 ) {
4712 _splitSegInfos
.push_back(SplitSegInfoEntry(atom
->finalAddress()+fit
->offsetInAtom
,fit
->kind
));
4715 case ld::Fixup::kindStoreARMHigh16
:
4716 case ld::Fixup::kindStoreThumbHigh16
:
4717 assert(target
!= NULL
);
4718 if ( strcmp(sect
->segmentName(), target
->section().segmentName()) != 0 ) {
4719 // hi16 needs to know upper 4-bits of low16 to compute carry
4720 uint32_t extra
= (accumulator
>> 12) & 0xF;
4721 _splitSegInfos
.push_back(SplitSegInfoEntry(atom
->finalAddress()+fit
->offsetInAtom
,fit
->kind
, extra
));
4724 case ld::Fixup::kindSetTargetImageOffset
:
4725 accumulator
= addressOf(state
, fit
, &target
);
4726 assert(target
!= NULL
);
4737 void OutputFile::makeSplitSegInfoV2(ld::Internal
& state
)
4739 static const bool log
= false;
4740 if ( !_options
.sharedRegionEligible() )
4743 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
4744 ld::Internal::FinalSection
* sect
= *sit
;
4745 if ( sect
->isSectionHidden() )
4747 bool codeSection
= (sect
->type() == ld::Section::typeCode
);
4748 if (log
) fprintf(stderr
, "sect: %s, address=0x%llX\n", sect
->sectionName(), sect
->address
);
4749 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
4750 const ld::Atom
* atom
= *ait
;
4751 const ld::Atom
* target
= NULL
;
4752 const ld::Atom
* fromTarget
= NULL
;
4753 uint32_t picBase
= 0;
4754 uint64_t accumulator
= 0;
4756 bool hadSubtract
= false;
4757 uint8_t fromSectionIndex
= atom
->machoSection();
4758 uint8_t toSectionIndex
;
4760 uint64_t fromOffset
= 0;
4761 uint64_t toOffset
= 0;
4762 uint64_t addend
= 0;
4763 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
4764 if ( fit
->firstInCluster() ) {
4766 hadSubtract
= false;
4770 toSectionIndex
= 255;
4771 fromOffset
= atom
->finalAddress() + fit
->offsetInAtom
- sect
->address
;
4773 if ( this->setsTarget(fit
->kind
) ) {
4774 accumulator
= addressAndTarget(state
, fit
, &target
);
4775 thumbTarget
= targetIsThumb(state
, fit
);
4778 toOffset
= accumulator
- state
.atomToSection
[target
]->address
;
4779 if ( target
->definition() != ld::Atom::definitionProxy
) {
4780 if ( target
->section().type() == ld::Section::typeMachHeader
)
4783 toSectionIndex
= target
->machoSection();
4786 switch ( fit
->kind
) {
4787 case ld::Fixup::kindSubtractTargetAddress
:
4788 accumulator
-= addressAndTarget(state
, fit
, &fromTarget
);
4791 case ld::Fixup::kindAddAddend
:
4792 accumulator
+= fit
->u
.addend
;
4793 addend
= fit
->u
.addend
;
4795 case ld::Fixup::kindSubtractAddend
:
4796 accumulator
-= fit
->u
.addend
;
4797 picBase
= fit
->u
.addend
;
4799 case ld::Fixup::kindSetLazyOffset
:
4801 case ld::Fixup::kindStoreBigEndian32
:
4802 case ld::Fixup::kindStoreLittleEndian32
:
4803 case ld::Fixup::kindStoreTargetAddressLittleEndian32
:
4804 if ( kind
!= DYLD_CACHE_ADJ_V2_IMAGE_OFF_32
) {
4806 kind
= DYLD_CACHE_ADJ_V2_DELTA_32
;
4808 kind
= DYLD_CACHE_ADJ_V2_POINTER_32
;
4811 case ld::Fixup::kindStoreLittleEndian64
:
4812 case ld::Fixup::kindStoreTargetAddressLittleEndian64
:
4814 kind
= DYLD_CACHE_ADJ_V2_DELTA_64
;
4816 kind
= DYLD_CACHE_ADJ_V2_POINTER_64
;
4818 case ld::Fixup::kindStoreX86PCRel32
:
4819 case ld::Fixup::kindStoreX86PCRel32_1
:
4820 case ld::Fixup::kindStoreX86PCRel32_2
:
4821 case ld::Fixup::kindStoreX86PCRel32_4
:
4822 case ld::Fixup::kindStoreX86PCRel32GOTLoad
:
4823 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA
:
4824 case ld::Fixup::kindStoreX86PCRel32GOT
:
4825 case ld::Fixup::kindStoreX86PCRel32TLVLoad
:
4826 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA
:
4827 case ld::Fixup::kindStoreTargetAddressX86PCRel32
:
4828 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad
:
4829 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA
:
4830 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad
:
4831 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA
:
4832 #if SUPPORT_ARCH_arm64
4833 case ld::Fixup::kindStoreARM64PCRelToGOT
:
4835 if ( (fromSectionIndex
!= toSectionIndex
) || !codeSection
)
4836 kind
= DYLD_CACHE_ADJ_V2_DELTA_32
;
4838 #if SUPPORT_ARCH_arm64
4839 case ld::Fixup::kindStoreARM64Page21
:
4840 case ld::Fixup::kindStoreARM64GOTLoadPage21
:
4841 case ld::Fixup::kindStoreARM64GOTLeaPage21
:
4842 case ld::Fixup::kindStoreARM64TLVPLoadPage21
:
4843 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21
:
4844 case ld::Fixup::kindStoreTargetAddressARM64Page21
:
4845 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21
:
4846 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21
:
4847 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21
:
4848 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21
:
4849 if ( fromSectionIndex
!= toSectionIndex
)
4850 kind
= DYLD_CACHE_ADJ_V2_ARM64_ADRP
;
4852 case ld::Fixup::kindStoreARM64PageOff12
:
4853 case ld::Fixup::kindStoreARM64GOTLeaPageOff12
:
4854 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPageOff12
:
4855 case ld::Fixup::kindStoreTargetAddressARM64PageOff12
:
4856 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12
:
4857 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12
:
4858 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12
:
4859 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12
:
4860 if ( fromSectionIndex
!= toSectionIndex
)
4861 kind
= DYLD_CACHE_ADJ_V2_ARM64_OFF12
;
4863 case ld::Fixup::kindStoreARM64Branch26
:
4864 case ld::Fixup::kindStoreTargetAddressARM64Branch26
:
4865 if ( fromSectionIndex
!= toSectionIndex
)
4866 kind
= DYLD_CACHE_ADJ_V2_ARM64_BR26
;
4869 case ld::Fixup::kindStoreARMHigh16
:
4870 case ld::Fixup::kindStoreARMLow16
:
4871 if ( (fromSectionIndex
!= toSectionIndex
) && (fromTarget
== atom
) ) {
4872 kind
= DYLD_CACHE_ADJ_V2_ARM_MOVW_MOVT
;
4875 case ld::Fixup::kindStoreARMBranch24
:
4876 case ld::Fixup::kindStoreTargetAddressARMBranch24
:
4877 if ( fromSectionIndex
!= toSectionIndex
)
4878 kind
= DYLD_CACHE_ADJ_V2_ARM_BR24
;
4880 case ld::Fixup::kindStoreThumbLow16
:
4881 case ld::Fixup::kindStoreThumbHigh16
:
4882 if ( (fromSectionIndex
!= toSectionIndex
) && (fromTarget
== atom
) ) {
4883 kind
= DYLD_CACHE_ADJ_V2_THUMB_MOVW_MOVT
;
4886 case ld::Fixup::kindStoreThumbBranch22
:
4887 case ld::Fixup::kindStoreTargetAddressThumbBranch22
:
4888 if ( fromSectionIndex
!= toSectionIndex
)
4889 kind
= DYLD_CACHE_ADJ_V2_THUMB_BR22
;
4891 case ld::Fixup::kindSetTargetImageOffset
:
4892 kind
= DYLD_CACHE_ADJ_V2_IMAGE_OFF_32
;
4893 accumulator
= addressAndTarget(state
, fit
, &target
);
4894 assert(target
!= NULL
);
4895 toSectionIndex
= target
->machoSection();
4896 toOffset
= accumulator
- state
.atomToSection
[target
]->address
;
4902 if ( fit
->lastInCluster() ) {
4903 if ( (kind
!= 0) && (target
!= NULL
) && (target
->definition() != ld::Atom::definitionProxy
) ) {
4904 if ( !hadSubtract
&& addend
)
4906 assert(toSectionIndex
!= 255);
4907 if (log
) fprintf(stderr
, "from (%d.%s + 0x%llX) to (%d.%s + 0x%llX), kind=%d, atomAddr=0x%llX, sectAddr=0x%llx\n",
4908 fromSectionIndex
, sect
->sectionName(), fromOffset
, toSectionIndex
, state
.atomToSection
[target
]->sectionName(),
4909 toOffset
, kind
, atom
->finalAddress(), sect
->address
);
4910 _splitSegV2Infos
.push_back(SplitSegInfoV2Entry(fromSectionIndex
, fromOffset
, toSectionIndex
, toOffset
, kind
));
4919 void OutputFile::writeMapFile(ld::Internal
& state
)
4921 if ( _options
.generatedMapPath() != NULL
) {
4922 FILE* mapFile
= fopen(_options
.generatedMapPath(), "w");
4923 if ( mapFile
!= NULL
) {
4924 // write output path
4925 fprintf(mapFile
, "# Path: %s\n", _options
.outputFilePath());
4926 // write output architecure
4927 fprintf(mapFile
, "# Arch: %s\n", _options
.architectureName());
4929 //if ( fUUIDAtom != NULL ) {
4930 // const uint8_t* uuid = fUUIDAtom->getUUID();
4931 // fprintf(mapFile, "# UUID: %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X \n",
4932 // uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5], uuid[6], uuid[7],
4933 // uuid[8], uuid[9], uuid[10], uuid[11], uuid[12], uuid[13], uuid[14], uuid[15]);
4935 // write table of object files
4936 std::map
<const ld::File
*, ld::File::Ordinal
> readerToOrdinal
;
4937 std::map
<ld::File::Ordinal
, const ld::File
*> ordinalToReader
;
4938 std::map
<const ld::File
*, uint32_t> readerToFileOrdinal
;
4939 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
4940 ld::Internal::FinalSection
* sect
= *sit
;
4941 if ( sect
->isSectionHidden() )
4943 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
4944 const ld::Atom
* atom
= *ait
;
4945 const ld::File
* reader
= atom
->originalFile();
4946 if ( reader
== NULL
)
4948 ld::File::Ordinal readerOrdinal
= reader
->ordinal();
4949 std::map
<const ld::File
*, ld::File::Ordinal
>::iterator pos
= readerToOrdinal
.find(reader
);
4950 if ( pos
== readerToOrdinal
.end() ) {
4951 readerToOrdinal
[reader
] = readerOrdinal
;
4952 ordinalToReader
[readerOrdinal
] = reader
;
4956 for (const ld::Atom
* atom
: state
.deadAtoms
) {
4957 const ld::File
* reader
= atom
->originalFile();
4958 if ( reader
== NULL
)
4960 ld::File::Ordinal readerOrdinal
= reader
->ordinal();
4961 std::map
<const ld::File
*, ld::File::Ordinal
>::iterator pos
= readerToOrdinal
.find(reader
);
4962 if ( pos
== readerToOrdinal
.end() ) {
4963 readerToOrdinal
[reader
] = readerOrdinal
;
4964 ordinalToReader
[readerOrdinal
] = reader
;
4967 fprintf(mapFile
, "# Object files:\n");
4968 fprintf(mapFile
, "[%3u] %s\n", 0, "linker synthesized");
4969 uint32_t fileIndex
= 1;
4970 for(std::map
<ld::File::Ordinal
, const ld::File
*>::iterator it
= ordinalToReader
.begin(); it
!= ordinalToReader
.end(); ++it
) {
4971 fprintf(mapFile
, "[%3u] %s\n", fileIndex
, it
->second
->path());
4972 readerToFileOrdinal
[it
->second
] = fileIndex
++;
4974 // write table of sections
4975 fprintf(mapFile
, "# Sections:\n");
4976 fprintf(mapFile
, "# Address\tSize \tSegment\tSection\n");
4977 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
4978 ld::Internal::FinalSection
* sect
= *sit
;
4979 if ( sect
->isSectionHidden() )
4981 fprintf(mapFile
, "0x%08llX\t0x%08llX\t%s\t%s\n", sect
->address
, sect
->size
,
4982 sect
->segmentName(), sect
->sectionName());
4984 // write table of symbols
4985 fprintf(mapFile
, "# Symbols:\n");
4986 fprintf(mapFile
, "# Address\tSize \tFile Name\n");
4987 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
4988 ld::Internal::FinalSection
* sect
= *sit
;
4989 if ( sect
->isSectionHidden() )
4991 //bool isCstring = (sect->type() == ld::Section::typeCString);
4992 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
4994 const ld::Atom
* atom
= *ait
;
4995 const char* name
= atom
->name();
4996 // don't add auto-stripped aliases to .map file
4997 if ( (atom
->size() == 0) && (atom
->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages
) )
4999 if ( atom
->contentType() == ld::Atom::typeCString
) {
5000 strcpy(buffer
, "literal string: ");
5001 const char* s
= (char*)atom
->rawContentPointer();
5002 char* e
= &buffer
[4094];
5003 for (char* b
= &buffer
[strlen(buffer
)]; b
< e
;) {
5015 buffer
[4095] = '\0';
5018 else if ( (atom
->contentType() == ld::Atom::typeCFI
) && (strcmp(name
, "FDE") == 0) ) {
5019 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(); fit
!= atom
->fixupsEnd(); ++fit
) {
5020 if ( (fit
->kind
== ld::Fixup::kindSetTargetAddress
) && (fit
->clusterSize
== ld::Fixup::k1of4
) ) {
5021 if ( (fit
->binding
== ld::Fixup::bindingDirectlyBound
)
5022 && (fit
->u
.target
->section().type() == ld::Section::typeCode
) ) {
5023 strcpy(buffer
, "FDE for: ");
5024 strlcat(buffer
, fit
->u
.target
->name(), 4096);
5030 else if ( atom
->contentType() == ld::Atom::typeNonLazyPointer
) {
5031 strcpy(buffer
, "non-lazy-pointer");
5032 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(); fit
!= atom
->fixupsEnd(); ++fit
) {
5033 if ( fit
->binding
== ld::Fixup::bindingsIndirectlyBound
) {
5034 strcpy(buffer
, "non-lazy-pointer-to: ");
5035 strlcat(buffer
, state
.indirectBindingTable
[fit
->u
.bindingIndex
]->name(), 4096);
5038 else if ( fit
->binding
== ld::Fixup::bindingDirectlyBound
) {
5039 strcpy(buffer
, "non-lazy-pointer-to-local: ");
5040 strlcat(buffer
, fit
->u
.target
->name(), 4096);
5046 fprintf(mapFile
, "0x%08llX\t0x%08llX\t[%3u] %s\n", atom
->finalAddress(), atom
->size(),
5047 readerToFileOrdinal
[atom
->originalFile()], name
);
5050 // preload check is hack until 26613948 is fixed
5051 if ( _options
.deadCodeStrip() && (_options
.outputKind() != Options::kPreload
) ) {
5052 fprintf(mapFile
, "\n");
5053 fprintf(mapFile
, "# Dead Stripped Symbols:\n");
5054 fprintf(mapFile
, "# \tSize \tFile Name\n");
5055 for (const ld::Atom
* atom
: state
.deadAtoms
) {
5057 const char* name
= atom
->name();
5058 // don't add auto-stripped aliases to .map file
5059 if ( (atom
->size() == 0) && (atom
->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages
) )
5061 if ( atom
->contentType() == ld::Atom::typeCString
) {
5062 strcpy(buffer
, "literal string: ");
5063 const char* s
= (char*)atom
->rawContentPointer();
5064 char* e
= &buffer
[4094];
5065 for (char* b
= &buffer
[strlen(buffer
)]; b
< e
;) {
5077 buffer
[4095] = '\0';
5080 fprintf(mapFile
, "<<dead>> \t0x%08llX\t[%3u] %s\n", atom
->size(),
5081 readerToFileOrdinal
[atom
->originalFile()], name
);
5087 warning("could not write map file: %s\n", _options
.generatedMapPath());
5092 static std::string
realPathString(const char* path
)
5094 char realName
[MAXPATHLEN
];
5095 if ( realpath(path
, realName
) != NULL
)
5100 void OutputFile::writeJSONEntry(ld::Internal
& state
)
5102 if ( _options
.traceEmitJSON() && (_options
.UUIDMode() != Options::kUUIDNone
) && (_options
.traceOutputFile() != NULL
) ) {
5104 // Convert the UUID to a string.
5105 const uint8_t* uuid
= _headersAndLoadCommandAtom
->getUUID();
5106 uuid_string_t uuidString
;
5108 uuid_unparse(uuid
, uuidString
);
5110 // Enumerate the dylibs.
5111 std::vector
<const ld::dylib::File
*> dynamicList
;
5112 std::vector
<const ld::dylib::File
*> upwardList
;
5113 std::vector
<const ld::dylib::File
*> reexportList
;
5115 for (const ld::dylib::File
* dylib
: _dylibsToLoad
) {
5117 if (dylib
->willBeUpwardDylib()) {
5119 upwardList
.push_back(dylib
);
5120 } else if (dylib
->willBeReExported()) {
5122 reexportList
.push_back(dylib
);
5125 dynamicList
.push_back(dylib
);
5130 * Build the JSON entry.
5133 std::string jsonEntry
= "{";
5135 jsonEntry
+= "\"uuid\":\"" + std::string(uuidString
) + "\",";
5137 // installPath() returns -final_output for non-dylibs
5138 const char* lastNameSlash
= strrchr(_options
.installPath(), '/');
5139 const char* leafName
= (lastNameSlash
!= NULL
) ? lastNameSlash
+1 : _options
.outputFilePath();
5140 jsonEntry
+= "\"name\":\"" + std::string(leafName
) + "\",";
5142 jsonEntry
+= "\"arch\":\"" + std::string(_options
.architectureName()) + "\"";
5144 if (dynamicList
.size() > 0) {
5145 jsonEntry
+= ",\"dynamic\":[";
5146 for (const ld::dylib::File
* dylib
: dynamicList
) {
5147 jsonEntry
+= "\"" + realPathString(dylib
->path()) + "\"";
5148 if ((dylib
!= dynamicList
.back())) {
5155 if (upwardList
.size() > 0) {
5156 jsonEntry
+= ",\"upward-dynamic\":[";
5157 for (const ld::dylib::File
* dylib
: upwardList
) {
5158 jsonEntry
+= "\"" + realPathString(dylib
->path()) + "\"";
5159 if ((dylib
!= upwardList
.back())) {
5166 if (reexportList
.size() > 0) {
5167 jsonEntry
+= ",\"re-exports\":[";
5168 for (const ld::dylib::File
* dylib
: reexportList
) {
5169 jsonEntry
+= "\"" + realPathString(dylib
->path()) + "\"";
5170 if ((dylib
!= reexportList
.back())) {
5177 if (state
.archivePaths
.size() > 0) {
5178 jsonEntry
+= ",\"archives\":[";
5179 for (const std::string
& archivePath
: state
.archivePaths
) {
5180 jsonEntry
+= "\"" + realPathString(archivePath
.c_str()) + "\"";
5181 if ((archivePath
!= state
.archivePaths
.back())) {
5188 if (state
.bundleLoader
!= NULL
) {
5189 jsonEntry
+= ",\"bundle-loader\":";
5190 jsonEntry
+= "\"" + realPathString(state
.bundleLoader
->path()) + "\"";
5195 // Write the JSON entry to the trace file.
5196 _options
.writeToTraceFile(jsonEntry
.c_str(), jsonEntry
.size());
5200 // used to sort atoms with debug notes
5201 class DebugNoteSorter
5204 bool operator()(const ld::Atom
* left
, const ld::Atom
* right
) const
5206 // first sort by reader
5207 ld::File::Ordinal leftFileOrdinal
= left
->file()->ordinal();
5208 ld::File::Ordinal rightFileOrdinal
= right
->file()->ordinal();
5209 if ( leftFileOrdinal
!= rightFileOrdinal
)
5210 return (leftFileOrdinal
< rightFileOrdinal
);
5212 // then sort by atom objectAddress
5213 uint64_t leftAddr
= left
->finalAddress();
5214 uint64_t rightAddr
= right
->finalAddress();
5215 return leftAddr
< rightAddr
;
5220 const char* OutputFile::assureFullPath(const char* path
)
5222 if ( path
[0] == '/' )
5224 char cwdbuff
[MAXPATHLEN
];
5225 if ( getcwd(cwdbuff
, MAXPATHLEN
) != NULL
) {
5227 asprintf(&result
, "%s/%s", cwdbuff
, path
);
5228 if ( result
!= NULL
)
5234 static time_t fileModTime(const char* path
) {
5235 struct stat statBuffer
;
5236 if ( stat(path
, &statBuffer
) == 0 ) {
5237 return statBuffer
.st_mtime
;
5243 void OutputFile::synthesizeDebugNotes(ld::Internal
& state
)
5245 // -S means don't synthesize debug map
5246 if ( _options
.debugInfoStripping() == Options::kDebugInfoNone
)
5248 // make a vector of atoms that come from files compiled with dwarf debug info
5249 std::vector
<const ld::Atom
*> atomsNeedingDebugNotes
;
5250 std::set
<const ld::Atom
*> atomsWithStabs
;
5251 std::set
<const ld::relocatable::File
*> filesSeenWithStabs
;
5252 atomsNeedingDebugNotes
.reserve(1024);
5253 const ld::relocatable::File
* objFile
= NULL
;
5254 bool objFileHasDwarf
= false;
5255 bool objFileHasStabs
= false;
5256 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
5257 ld::Internal::FinalSection
* sect
= *sit
;
5258 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
5259 const ld::Atom
* atom
= *ait
;
5260 // no stabs for atoms that would not be in the symbol table
5261 if ( atom
->symbolTableInclusion() == ld::Atom::symbolTableNotIn
)
5263 if ( atom
->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages
)
5265 if ( atom
->symbolTableInclusion() == ld::Atom::symbolTableInWithRandomAutoStripLabel
)
5267 // no stabs for absolute symbols
5268 if ( atom
->definition() == ld::Atom::definitionAbsolute
)
5270 // no stabs for .eh atoms
5271 if ( atom
->contentType() == ld::Atom::typeCFI
)
5273 // no stabs for string literal atoms
5274 if ( atom
->contentType() == ld::Atom::typeCString
)
5276 // no stabs for kernel dtrace probes
5277 if ( (_options
.outputKind() == Options::kStaticExecutable
) && (strncmp(atom
->name(), "__dtrace_probe$", 15) == 0) )
5279 const ld::File
* file
= atom
->file();
5280 if ( file
!= NULL
) {
5281 if ( file
!= objFile
) {
5282 objFileHasDwarf
= false;
5283 objFileHasStabs
= false;
5284 objFile
= dynamic_cast<const ld::relocatable::File
*>(file
);
5285 if ( objFile
!= NULL
) {
5286 switch ( objFile
->debugInfo() ) {
5287 case ld::relocatable::File::kDebugInfoNone
:
5289 case ld::relocatable::File::kDebugInfoDwarf
:
5290 objFileHasDwarf
= true;
5292 case ld::relocatable::File::kDebugInfoStabs
:
5293 case ld::relocatable::File::kDebugInfoStabsUUID
:
5294 objFileHasStabs
= true;
5299 if ( objFileHasDwarf
)
5300 atomsNeedingDebugNotes
.push_back(atom
);
5301 if ( objFileHasStabs
) {
5302 atomsWithStabs
.insert(atom
);
5303 if ( objFile
!= NULL
)
5304 filesSeenWithStabs
.insert(objFile
);
5310 // sort by file ordinal then atom ordinal
5311 std::sort(atomsNeedingDebugNotes
.begin(), atomsNeedingDebugNotes
.end(), DebugNoteSorter());
5313 // <rdar://problem/17689030> Add -add_ast_path option to linker which add N_AST stab entry to output
5314 const std::vector
<const char*>& astPaths
= _options
.astFilePaths();
5315 for (std::vector
<const char*>::const_iterator it
=astPaths
.begin(); it
!= astPaths
.end(); it
++) {
5316 const char* path
= *it
;
5318 ld::relocatable::File::Stab astStab
;
5319 astStab
.atom
= NULL
;
5320 astStab
.type
= N_AST
;
5323 astStab
.value
= fileModTime(path
);
5324 astStab
.string
= path
;
5325 state
.stabs
.push_back(astStab
);
5328 // synthesize "debug notes" and add them to master stabs vector
5329 const char* dirPath
= NULL
;
5330 const char* filename
= NULL
;
5331 bool wroteStartSO
= false;
5332 state
.stabs
.reserve(atomsNeedingDebugNotes
.size()*4);
5333 std::unordered_set
<const char*, CStringHash
, CStringEquals
> seenFiles
;
5334 for (std::vector
<const ld::Atom
*>::iterator it
=atomsNeedingDebugNotes
.begin(); it
!= atomsNeedingDebugNotes
.end(); it
++) {
5335 const ld::Atom
* atom
= *it
;
5336 const ld::File
* atomFile
= atom
->file();
5337 const ld::relocatable::File
* atomObjFile
= dynamic_cast<const ld::relocatable::File
*>(atomFile
);
5338 //fprintf(stderr, "debug note for %s\n", atom->name());
5339 const char* newPath
= atom
->translationUnitSource();
5340 if ( newPath
!= NULL
) {
5341 const char* newDirPath
;
5342 const char* newFilename
;
5343 const char* lastSlash
= strrchr(newPath
, '/');
5344 if ( lastSlash
== NULL
)
5346 newFilename
= lastSlash
+1;
5347 char* temp
= strdup(newPath
);
5349 // gdb like directory SO's to end in '/', but dwarf DW_AT_comp_dir usually does not have trailing '/'
5350 temp
[lastSlash
-newPath
+1] = '\0';
5351 // need SO's whenever the translation unit source file changes
5352 if ( (filename
== NULL
) || (strcmp(newFilename
,filename
) != 0) || (strcmp(newDirPath
,dirPath
) != 0)) {
5353 if ( filename
!= NULL
) {
5354 // translation unit change, emit ending SO
5355 ld::relocatable::File::Stab endFileStab
;
5356 endFileStab
.atom
= NULL
;
5357 endFileStab
.type
= N_SO
;
5358 endFileStab
.other
= 1;
5359 endFileStab
.desc
= 0;
5360 endFileStab
.value
= 0;
5361 endFileStab
.string
= "";
5362 state
.stabs
.push_back(endFileStab
);
5364 // new translation unit, emit start SO's
5365 ld::relocatable::File::Stab dirPathStab
;
5366 dirPathStab
.atom
= NULL
;
5367 dirPathStab
.type
= N_SO
;
5368 dirPathStab
.other
= 0;
5369 dirPathStab
.desc
= 0;
5370 dirPathStab
.value
= 0;
5371 dirPathStab
.string
= newDirPath
;
5372 state
.stabs
.push_back(dirPathStab
);
5373 ld::relocatable::File::Stab fileStab
;
5374 fileStab
.atom
= NULL
;
5375 fileStab
.type
= N_SO
;
5379 fileStab
.string
= newFilename
;
5380 state
.stabs
.push_back(fileStab
);
5381 // Synthesize OSO for start of file
5382 ld::relocatable::File::Stab objStab
;
5383 objStab
.atom
= NULL
;
5384 objStab
.type
= N_OSO
;
5385 // <rdar://problem/6337329> linker should put cpusubtype in n_sect field of nlist entry for N_OSO debug note entries
5386 objStab
.other
= atomFile
->cpuSubType();
5388 if ( atomObjFile
!= NULL
) {
5389 objStab
.string
= assureFullPath(atomObjFile
->debugInfoPath());
5390 objStab
.value
= atomObjFile
->debugInfoModificationTime();
5393 objStab
.string
= assureFullPath(atomFile
->path());
5394 objStab
.value
= atomFile
->modificationTime();
5396 state
.stabs
.push_back(objStab
);
5397 wroteStartSO
= true;
5398 // add the source file path to seenFiles so it does not show up in SOLs
5399 seenFiles
.insert(newFilename
);
5401 asprintf(&fullFilePath
, "%s%s", newDirPath
, newFilename
);
5402 // add both leaf path and full path
5403 seenFiles
.insert(fullFilePath
);
5405 filename
= newFilename
;
5406 dirPath
= newDirPath
;
5407 if ( atom
->section().type() == ld::Section::typeCode
) {
5408 // Synthesize BNSYM and start FUN stabs
5409 ld::relocatable::File::Stab beginSym
;
5410 beginSym
.atom
= atom
;
5411 beginSym
.type
= N_BNSYM
;
5415 beginSym
.string
= "";
5416 state
.stabs
.push_back(beginSym
);
5417 ld::relocatable::File::Stab startFun
;
5418 startFun
.atom
= atom
;
5419 startFun
.type
= N_FUN
;
5423 startFun
.string
= atom
->name();
5424 state
.stabs
.push_back(startFun
);
5425 // Synthesize any SOL stabs needed
5426 const char* curFile
= NULL
;
5427 for (ld::Atom::LineInfo::iterator lit
= atom
->beginLineInfo(); lit
!= atom
->endLineInfo(); ++lit
) {
5428 if ( lit
->fileName
!= curFile
) {
5429 if ( seenFiles
.count(lit
->fileName
) == 0 ) {
5430 seenFiles
.insert(lit
->fileName
);
5431 ld::relocatable::File::Stab sol
;
5437 sol
.string
= lit
->fileName
;
5438 state
.stabs
.push_back(sol
);
5440 curFile
= lit
->fileName
;
5443 // Synthesize end FUN and ENSYM stabs
5444 ld::relocatable::File::Stab endFun
;
5446 endFun
.type
= N_FUN
;
5451 state
.stabs
.push_back(endFun
);
5452 ld::relocatable::File::Stab endSym
;
5454 endSym
.type
= N_ENSYM
;
5459 state
.stabs
.push_back(endSym
);
5462 ld::relocatable::File::Stab globalsStab
;
5463 const char* name
= atom
->name();
5464 if ( atom
->scope() == ld::Atom::scopeTranslationUnit
) {
5465 // Synthesize STSYM stab for statics
5466 globalsStab
.atom
= atom
;
5467 globalsStab
.type
= N_STSYM
;
5468 globalsStab
.other
= 1;
5469 globalsStab
.desc
= 0;
5470 globalsStab
.value
= 0;
5471 globalsStab
.string
= name
;
5472 state
.stabs
.push_back(globalsStab
);
5475 // Synthesize GSYM stab for other globals
5476 globalsStab
.atom
= atom
;
5477 globalsStab
.type
= N_GSYM
;
5478 globalsStab
.other
= 1;
5479 globalsStab
.desc
= 0;
5480 globalsStab
.value
= 0;
5481 globalsStab
.string
= name
;
5482 state
.stabs
.push_back(globalsStab
);
5488 if ( wroteStartSO
) {
5490 ld::relocatable::File::Stab endFileStab
;
5491 endFileStab
.atom
= NULL
;
5492 endFileStab
.type
= N_SO
;
5493 endFileStab
.other
= 1;
5494 endFileStab
.desc
= 0;
5495 endFileStab
.value
= 0;
5496 endFileStab
.string
= "";
5497 state
.stabs
.push_back(endFileStab
);
5500 // copy any stabs from .o files
5501 bool deadStripping
= _options
.deadCodeStrip();
5502 for (const ld::relocatable::File
* obj
: filesSeenWithStabs
) {
5503 const std::vector
<ld::relocatable::File::Stab
>* filesStabs
= obj
->stabs();
5504 if ( filesStabs
!= NULL
) {
5505 for (const ld::relocatable::File::Stab
& stab
: *filesStabs
) {
5506 // ignore stabs associated with atoms that were dead stripped or coalesced away
5507 if ( (stab
.atom
!= NULL
) && (atomsWithStabs
.count(stab
.atom
) == 0) )
5509 // <rdar://problem/8284718> Value of N_SO stabs should be address of first atom from translation unit
5510 if ( (stab
.type
== N_SO
) && (stab
.string
!= NULL
) && (stab
.string
[0] != '\0') ) {
5511 uint64_t lowestAtomAddress
= 0;
5512 const ld::Atom
* lowestAddressAtom
= NULL
;
5513 for (const ld::relocatable::File::Stab
& stab2
: *filesStabs
) {
5514 if ( stab2
.atom
== NULL
)
5516 // skip over atoms that were dead stripped
5517 if ( deadStripping
&& !stab2
.atom
->live() )
5519 if ( stab2
.atom
->coalescedAway() )
5521 uint64_t atomAddr
= stab2
.atom
->objectAddress();
5522 if ( (lowestAddressAtom
== NULL
) || (atomAddr
< lowestAtomAddress
) ) {
5523 lowestAddressAtom
= stab2
.atom
;
5524 lowestAtomAddress
= atomAddr
;
5527 ld::relocatable::File::Stab altStab
= stab
;
5528 altStab
.atom
= lowestAddressAtom
;
5529 state
.stabs
.push_back(altStab
);
5532 state
.stabs
.push_back(stab
);