]> git.saurik.com Git - apple/ld64.git/blob - src/ld/OutputFile.cpp
1680ade089e8c8fa6221d187327a8924c9239cb8
[apple/ld64.git] / src / ld / OutputFile.cpp
1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-*
2 *
3 * Copyright (c) 2009-2011 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26 #include <stdlib.h>
27 #include <sys/types.h>
28 #include <sys/stat.h>
29 #include <sys/mman.h>
30 #include <sys/sysctl.h>
31 #include <sys/param.h>
32 #include <sys/mount.h>
33 #include <fcntl.h>
34 #include <errno.h>
35 #include <limits.h>
36 #include <unistd.h>
37 #include <mach/mach_time.h>
38 #include <mach/vm_statistics.h>
39 #include <mach/mach_init.h>
40 #include <mach/mach_host.h>
41 #include <uuid/uuid.h>
42 #include <dlfcn.h>
43 #include <mach-o/dyld.h>
44 #include <mach-o/fat.h>
45
46 #include <string>
47 #include <map>
48 #include <set>
49 #include <string>
50 #include <vector>
51 #include <list>
52 #include <algorithm>
53 #include <unordered_set>
54
55 #include <CommonCrypto/CommonDigest.h>
56 #include <AvailabilityMacros.h>
57
58 #include "MachOTrie.hpp"
59
60 #include "Options.h"
61
62 #include "OutputFile.h"
63 #include "Architectures.hpp"
64 #include "HeaderAndLoadCommands.hpp"
65 #include "LinkEdit.hpp"
66 #include "LinkEditClassic.hpp"
67
68
69 namespace ld {
70 namespace tool {
71
72 uint32_t sAdrpNA = 0;
73 uint32_t sAdrpNoped = 0;
74 uint32_t sAdrpNotNoped = 0;
75
76
77 OutputFile::OutputFile(const Options& opts)
78 :
79 usesWeakExternalSymbols(false), overridesWeakExternalSymbols(false),
80 _noReExportedDylibs(false), pieDisabled(false), hasDataInCode(false),
81 headerAndLoadCommandsSection(NULL),
82 rebaseSection(NULL), bindingSection(NULL), weakBindingSection(NULL),
83 lazyBindingSection(NULL), exportSection(NULL),
84 splitSegInfoSection(NULL), functionStartsSection(NULL),
85 dataInCodeSection(NULL), optimizationHintsSection(NULL), dependentDRsSection(NULL),
86 symbolTableSection(NULL), stringPoolSection(NULL),
87 localRelocationsSection(NULL), externalRelocationsSection(NULL),
88 sectionRelocationsSection(NULL),
89 indirectSymbolTableSection(NULL),
90 _options(opts),
91 _hasDyldInfo(opts.makeCompressedDyldInfo()),
92 _hasSymbolTable(true),
93 _hasSectionRelocations(opts.outputKind() == Options::kObjectFile),
94 _hasSplitSegInfo(opts.sharedRegionEligible()),
95 _hasFunctionStartsInfo(opts.addFunctionStarts()),
96 _hasDataInCodeInfo(opts.addDataInCodeInfo()),
97 _hasDependentDRInfo(opts.needsDependentDRInfo()),
98 _hasDynamicSymbolTable(true),
99 _hasLocalRelocations(!opts.makeCompressedDyldInfo()),
100 _hasExternalRelocations(!opts.makeCompressedDyldInfo()),
101 _hasOptimizationHints(opts.outputKind() == Options::kObjectFile),
102 _encryptedTEXTstartOffset(0),
103 _encryptedTEXTendOffset(0),
104 _localSymbolsStartIndex(0),
105 _localSymbolsCount(0),
106 _globalSymbolsStartIndex(0),
107 _globalSymbolsCount(0),
108 _importSymbolsStartIndex(0),
109 _importSymbolsCount(0),
110 _sectionsRelocationsAtom(NULL),
111 _localRelocsAtom(NULL),
112 _externalRelocsAtom(NULL),
113 _symbolTableAtom(NULL),
114 _indirectSymbolTableAtom(NULL),
115 _rebasingInfoAtom(NULL),
116 _bindingInfoAtom(NULL),
117 _lazyBindingInfoAtom(NULL),
118 _weakBindingInfoAtom(NULL),
119 _exportInfoAtom(NULL),
120 _splitSegInfoAtom(NULL),
121 _functionStartsAtom(NULL),
122 _dataInCodeAtom(NULL),
123 _dependentDRInfoAtom(NULL),
124 _optimizationHintsAtom(NULL)
125 {
126 }
127
128 void OutputFile::dumpAtomsBySection(ld::Internal& state, bool printAtoms)
129 {
130 fprintf(stderr, "SORTED:\n");
131 for (std::vector<ld::Internal::FinalSection*>::iterator it = state.sections.begin(); it != state.sections.end(); ++it) {
132 fprintf(stderr, "final section %p %s/%s %s start addr=0x%08llX, size=0x%08llX, alignment=%02d, fileOffset=0x%08llX\n",
133 (*it), (*it)->segmentName(), (*it)->sectionName(), (*it)->isSectionHidden() ? "(hidden)" : "",
134 (*it)->address, (*it)->size, (*it)->alignment, (*it)->fileOffset);
135 if ( printAtoms ) {
136 std::vector<const ld::Atom*>& atoms = (*it)->atoms;
137 for (std::vector<const ld::Atom*>::iterator ait = atoms.begin(); ait != atoms.end(); ++ait) {
138 fprintf(stderr, " %p (0x%04llX) %s\n", *ait, (*ait)->size(), (*ait)->name());
139 }
140 }
141 }
142 fprintf(stderr, "DYLIBS:\n");
143 for (std::vector<ld::dylib::File*>::iterator it=state.dylibs.begin(); it != state.dylibs.end(); ++it )
144 fprintf(stderr, " %s\n", (*it)->installPath());
145 }
146
147 void OutputFile::write(ld::Internal& state)
148 {
149 this->buildDylibOrdinalMapping(state);
150 this->addLoadCommands(state);
151 this->addLinkEdit(state);
152 state.setSectionSizesAndAlignments();
153 this->setLoadCommandsPadding(state);
154 _fileSize = state.assignFileOffsets();
155 this->assignAtomAddresses(state);
156 this->synthesizeDebugNotes(state);
157 this->buildSymbolTable(state);
158 this->generateLinkEditInfo(state);
159 this->makeSplitSegInfo(state);
160 this->updateLINKEDITAddresses(state);
161 //this->dumpAtomsBySection(state, false);
162 this->writeOutputFile(state);
163 this->writeMapFile(state);
164 }
165
166 bool OutputFile::findSegment(ld::Internal& state, uint64_t addr, uint64_t* start, uint64_t* end, uint32_t* index)
167 {
168 uint32_t segIndex = 0;
169 ld::Internal::FinalSection* segFirstSection = NULL;
170 ld::Internal::FinalSection* lastSection = NULL;
171 for (std::vector<ld::Internal::FinalSection*>::iterator it = state.sections.begin(); it != state.sections.end(); ++it) {
172 ld::Internal::FinalSection* sect = *it;
173 if ( (segFirstSection == NULL ) || strcmp(segFirstSection->segmentName(), sect->segmentName()) != 0 ) {
174 if ( segFirstSection != NULL ) {
175 //fprintf(stderr, "findSegment(0x%llX) seg changed to %s\n", addr, sect->segmentName());
176 if ( (addr >= segFirstSection->address) && (addr < lastSection->address+lastSection->size) ) {
177 *start = segFirstSection->address;
178 *end = lastSection->address+lastSection->size;
179 *index = segIndex;
180 return true;
181 }
182 ++segIndex;
183 }
184 segFirstSection = sect;
185 }
186 lastSection = sect;
187 }
188 return false;
189 }
190
191
192 void OutputFile::assignAtomAddresses(ld::Internal& state)
193 {
194 const bool log = false;
195 if ( log ) fprintf(stderr, "assignAtomAddresses()\n");
196 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
197 ld::Internal::FinalSection* sect = *sit;
198 if ( log ) fprintf(stderr, " section=%s/%s\n", sect->segmentName(), sect->sectionName());
199 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
200 const ld::Atom* atom = *ait;
201 switch ( sect-> type() ) {
202 case ld::Section::typeImportProxies:
203 // want finalAddress() of all proxy atoms to be zero
204 (const_cast<ld::Atom*>(atom))->setSectionStartAddress(0);
205 break;
206 case ld::Section::typeAbsoluteSymbols:
207 // want finalAddress() of all absolute atoms to be value of abs symbol
208 (const_cast<ld::Atom*>(atom))->setSectionStartAddress(0);
209 break;
210 case ld::Section::typeLinkEdit:
211 // linkedit layout is assigned later
212 break;
213 default:
214 (const_cast<ld::Atom*>(atom))->setSectionStartAddress(sect->address);
215 if ( log ) fprintf(stderr, " atom=%p, addr=0x%08llX, name=%s\n", atom, atom->finalAddress(), atom->name());
216 break;
217 }
218 }
219 }
220 }
221
222 void OutputFile::updateLINKEDITAddresses(ld::Internal& state)
223 {
224 if ( _options.makeCompressedDyldInfo() ) {
225 // build dylb rebasing info
226 assert(_rebasingInfoAtom != NULL);
227 _rebasingInfoAtom->encode();
228
229 // build dyld binding info
230 assert(_bindingInfoAtom != NULL);
231 _bindingInfoAtom->encode();
232
233 // build dyld lazy binding info
234 assert(_lazyBindingInfoAtom != NULL);
235 _lazyBindingInfoAtom->encode();
236
237 // build dyld weak binding info
238 assert(_weakBindingInfoAtom != NULL);
239 _weakBindingInfoAtom->encode();
240
241 // build dyld export info
242 assert(_exportInfoAtom != NULL);
243 _exportInfoAtom->encode();
244 }
245
246 if ( _options.sharedRegionEligible() ) {
247 // build split seg info
248 assert(_splitSegInfoAtom != NULL);
249 _splitSegInfoAtom->encode();
250 }
251
252 if ( _options.addFunctionStarts() ) {
253 // build function starts info
254 assert(_functionStartsAtom != NULL);
255 _functionStartsAtom->encode();
256 }
257
258 if ( _options.addDataInCodeInfo() ) {
259 // build data-in-code info
260 assert(_dataInCodeAtom != NULL);
261 _dataInCodeAtom->encode();
262 }
263
264 if ( _hasOptimizationHints ) {
265 // build linker-optimization-hint info
266 assert(_optimizationHintsAtom != NULL);
267 _optimizationHintsAtom->encode();
268 }
269
270 if ( _options.needsDependentDRInfo() ) {
271 // build dependent dylib DR info
272 assert(_dependentDRInfoAtom != NULL);
273 _dependentDRInfoAtom->encode();
274 }
275
276 // build classic symbol table
277 assert(_symbolTableAtom != NULL);
278 _symbolTableAtom->encode();
279 assert(_indirectSymbolTableAtom != NULL);
280 _indirectSymbolTableAtom->encode();
281
282 // add relocations to .o files
283 if ( _options.outputKind() == Options::kObjectFile ) {
284 assert(_sectionsRelocationsAtom != NULL);
285 _sectionsRelocationsAtom->encode();
286 }
287
288 if ( ! _options.makeCompressedDyldInfo() ) {
289 // build external relocations
290 assert(_externalRelocsAtom != NULL);
291 _externalRelocsAtom->encode();
292 // build local relocations
293 assert(_localRelocsAtom != NULL);
294 _localRelocsAtom->encode();
295 }
296
297 // update address and file offsets now that linkedit content has been generated
298 uint64_t curLinkEditAddress = 0;
299 uint64_t curLinkEditfileOffset = 0;
300 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
301 ld::Internal::FinalSection* sect = *sit;
302 if ( sect->type() != ld::Section::typeLinkEdit )
303 continue;
304 if ( curLinkEditAddress == 0 ) {
305 curLinkEditAddress = sect->address;
306 curLinkEditfileOffset = sect->fileOffset;
307 }
308 uint16_t maxAlignment = 0;
309 uint64_t offset = 0;
310 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
311 const ld::Atom* atom = *ait;
312 //fprintf(stderr, "setting linkedit atom offset for %s\n", atom->name());
313 if ( atom->alignment().powerOf2 > maxAlignment )
314 maxAlignment = atom->alignment().powerOf2;
315 // calculate section offset for this atom
316 uint64_t alignment = 1 << atom->alignment().powerOf2;
317 uint64_t currentModulus = (offset % alignment);
318 uint64_t requiredModulus = atom->alignment().modulus;
319 if ( currentModulus != requiredModulus ) {
320 if ( requiredModulus > currentModulus )
321 offset += requiredModulus-currentModulus;
322 else
323 offset += requiredModulus+alignment-currentModulus;
324 }
325 (const_cast<ld::Atom*>(atom))->setSectionOffset(offset);
326 (const_cast<ld::Atom*>(atom))->setSectionStartAddress(curLinkEditAddress);
327 offset += atom->size();
328 }
329 sect->size = offset;
330 // section alignment is that of a contained atom with the greatest alignment
331 sect->alignment = maxAlignment;
332 sect->address = curLinkEditAddress;
333 sect->fileOffset = curLinkEditfileOffset;
334 curLinkEditAddress += sect->size;
335 curLinkEditfileOffset += sect->size;
336 }
337
338 _fileSize = state.sections.back()->fileOffset + state.sections.back()->size;
339 }
340
341
342 void OutputFile::setLoadCommandsPadding(ld::Internal& state)
343 {
344 // In other sections, any extra space is put and end of segment.
345 // In __TEXT segment, any extra space is put after load commands to allow post-processing of load commands
346 // Do a reverse layout of __TEXT segment to determine padding size and adjust section size
347 uint64_t paddingSize = 0;
348 switch ( _options.outputKind() ) {
349 case Options::kDyld:
350 // dyld itself has special padding requirements. We want the beginning __text section to start at a stable address
351 assert(strcmp(state.sections[1]->sectionName(),"__text") == 0);
352 state.sections[1]->alignment = 12; // page align __text
353 break;
354 case Options::kObjectFile:
355 // mach-o .o files need no padding between load commands and first section
356 // but leave enough room that the object file could be signed
357 paddingSize = 32;
358 break;
359 case Options::kPreload:
360 // mach-o MH_PRELOAD files need no padding between load commands and first section
361 paddingSize = 0;
362 default:
363 // work backwards from end of segment and lay out sections so that extra room goes to padding atom
364 uint64_t addr = 0;
365 uint64_t textSegPageSize = _options.segPageSize("__TEXT");
366 if ( _options.sharedRegionEligible() && (_options.iOSVersionMin() >= ld::iOS_8_0) && (textSegPageSize == 0x4000) )
367 textSegPageSize = 0x1000;
368 for (std::vector<ld::Internal::FinalSection*>::reverse_iterator it = state.sections.rbegin(); it != state.sections.rend(); ++it) {
369 ld::Internal::FinalSection* sect = *it;
370 if ( strcmp(sect->segmentName(), "__TEXT") != 0 )
371 continue;
372 if ( sect == headerAndLoadCommandsSection ) {
373 addr -= headerAndLoadCommandsSection->size;
374 paddingSize = addr % textSegPageSize;
375 break;
376 }
377 addr -= sect->size;
378 addr = addr & (0 - (1 << sect->alignment));
379 }
380
381 // if command line requires more padding than this
382 uint32_t minPad = _options.minimumHeaderPad();
383 if ( _options.maxMminimumHeaderPad() ) {
384 // -headerpad_max_install_names means there should be room for every path load command to grow to 1204 bytes
385 uint32_t altMin = _dylibsToLoad.size() * MAXPATHLEN;
386 if ( _options.outputKind() == Options::kDynamicLibrary )
387 altMin += MAXPATHLEN;
388 if ( altMin > minPad )
389 minPad = altMin;
390 }
391 if ( paddingSize < minPad ) {
392 int extraPages = (minPad - paddingSize + _options.segmentAlignment() - 1)/_options.segmentAlignment();
393 paddingSize += extraPages * _options.segmentAlignment();
394 }
395
396 if ( _options.makeEncryptable() ) {
397 // load commands must be on a separate non-encrypted page
398 int loadCommandsPage = (headerAndLoadCommandsSection->size + minPad)/_options.segmentAlignment();
399 int textPage = (headerAndLoadCommandsSection->size + paddingSize)/_options.segmentAlignment();
400 if ( loadCommandsPage == textPage ) {
401 paddingSize += _options.segmentAlignment();
402 textPage += 1;
403 }
404 // remember start for later use by load command
405 _encryptedTEXTstartOffset = textPage*_options.segmentAlignment();
406 }
407 break;
408 }
409 // add padding to size of section
410 headerAndLoadCommandsSection->size += paddingSize;
411 }
412
413
414 uint64_t OutputFile::pageAlign(uint64_t addr)
415 {
416 const uint64_t alignment = _options.segmentAlignment();
417 return ((addr+alignment-1) & (-alignment));
418 }
419
420 uint64_t OutputFile::pageAlign(uint64_t addr, uint64_t pageSize)
421 {
422 return ((addr+pageSize-1) & (-pageSize));
423 }
424
425 static const char* makeName(const ld::Atom& atom)
426 {
427 static char buffer[4096];
428 switch ( atom.symbolTableInclusion() ) {
429 case ld::Atom::symbolTableNotIn:
430 case ld::Atom::symbolTableNotInFinalLinkedImages:
431 sprintf(buffer, "%s@0x%08llX", atom.name(), atom.objectAddress());
432 break;
433 case ld::Atom::symbolTableIn:
434 case ld::Atom::symbolTableInAndNeverStrip:
435 case ld::Atom::symbolTableInAsAbsolute:
436 case ld::Atom::symbolTableInWithRandomAutoStripLabel:
437 strlcpy(buffer, atom.name(), 4096);
438 break;
439 }
440 return buffer;
441 }
442
443 static const char* referenceTargetAtomName(ld::Internal& state, const ld::Fixup* ref)
444 {
445 switch ( ref->binding ) {
446 case ld::Fixup::bindingNone:
447 return "NO BINDING";
448 case ld::Fixup::bindingByNameUnbound:
449 return (char*)(ref->u.target);
450 case ld::Fixup::bindingByContentBound:
451 case ld::Fixup::bindingDirectlyBound:
452 return makeName(*((ld::Atom*)(ref->u.target)));
453 case ld::Fixup::bindingsIndirectlyBound:
454 return makeName(*state.indirectBindingTable[ref->u.bindingIndex]);
455 }
456 return "BAD BINDING";
457 }
458
459 bool OutputFile::targetIsThumb(ld::Internal& state, const ld::Fixup* fixup)
460 {
461 switch ( fixup->binding ) {
462 case ld::Fixup::bindingByContentBound:
463 case ld::Fixup::bindingDirectlyBound:
464 return fixup->u.target->isThumb();
465 case ld::Fixup::bindingsIndirectlyBound:
466 return state.indirectBindingTable[fixup->u.bindingIndex]->isThumb();
467 default:
468 break;
469 }
470 throw "unexpected binding";
471 }
472
473 uint64_t OutputFile::addressOf(const ld::Internal& state, const ld::Fixup* fixup, const ld::Atom** target)
474 {
475 if ( !_options.makeCompressedDyldInfo() ) {
476 // For external relocations the classic mach-o format
477 // has addend only stored in the content. That means
478 // that the address of the target is not used.
479 if ( fixup->contentAddendOnly )
480 return 0;
481 }
482 switch ( fixup->binding ) {
483 case ld::Fixup::bindingNone:
484 throw "unexpected bindingNone";
485 case ld::Fixup::bindingByNameUnbound:
486 throw "unexpected bindingByNameUnbound";
487 case ld::Fixup::bindingByContentBound:
488 case ld::Fixup::bindingDirectlyBound:
489 *target = fixup->u.target;
490 return (*target)->finalAddress();
491 case ld::Fixup::bindingsIndirectlyBound:
492 *target = state.indirectBindingTable[fixup->u.bindingIndex];
493 #ifndef NDEBUG
494 if ( ! (*target)->finalAddressMode() ) {
495 throwf("reference to symbol (which has not been assigned an address) %s", (*target)->name());
496 }
497 #endif
498 return (*target)->finalAddress();
499 }
500 throw "unexpected binding";
501 }
502
503 uint64_t OutputFile::sectionOffsetOf(const ld::Internal& state, const ld::Fixup* fixup)
504 {
505 const ld::Atom* target = NULL;
506 switch ( fixup->binding ) {
507 case ld::Fixup::bindingNone:
508 throw "unexpected bindingNone";
509 case ld::Fixup::bindingByNameUnbound:
510 throw "unexpected bindingByNameUnbound";
511 case ld::Fixup::bindingByContentBound:
512 case ld::Fixup::bindingDirectlyBound:
513 target = fixup->u.target;
514 break;
515 case ld::Fixup::bindingsIndirectlyBound:
516 target = state.indirectBindingTable[fixup->u.bindingIndex];
517 break;
518 }
519 assert(target != NULL);
520
521 uint64_t targetAddress = target->finalAddress();
522 for (std::vector<ld::Internal::FinalSection*>::const_iterator it = state.sections.begin(); it != state.sections.end(); ++it) {
523 const ld::Internal::FinalSection* sect = *it;
524 if ( (sect->address <= targetAddress) && (targetAddress < (sect->address+sect->size)) )
525 return targetAddress - sect->address;
526 }
527 throw "section not found for section offset";
528 }
529
530
531
532 uint64_t OutputFile::tlvTemplateOffsetOf(const ld::Internal& state, const ld::Fixup* fixup)
533 {
534 const ld::Atom* target = NULL;
535 switch ( fixup->binding ) {
536 case ld::Fixup::bindingNone:
537 throw "unexpected bindingNone";
538 case ld::Fixup::bindingByNameUnbound:
539 throw "unexpected bindingByNameUnbound";
540 case ld::Fixup::bindingByContentBound:
541 case ld::Fixup::bindingDirectlyBound:
542 target = fixup->u.target;
543 break;
544 case ld::Fixup::bindingsIndirectlyBound:
545 target = state.indirectBindingTable[fixup->u.bindingIndex];
546 break;
547 }
548 assert(target != NULL);
549
550 for (std::vector<ld::Internal::FinalSection*>::const_iterator it = state.sections.begin(); it != state.sections.end(); ++it) {
551 const ld::Internal::FinalSection* sect = *it;
552 switch ( sect->type() ) {
553 case ld::Section::typeTLVInitialValues:
554 case ld::Section::typeTLVZeroFill:
555 return target->finalAddress() - sect->address;
556 default:
557 break;
558 }
559 }
560 throw "section not found for tlvTemplateOffsetOf";
561 }
562
563 void OutputFile::printSectionLayout(ld::Internal& state)
564 {
565 // show layout of final image
566 fprintf(stderr, "final section layout:\n");
567 for (std::vector<ld::Internal::FinalSection*>::iterator it = state.sections.begin(); it != state.sections.end(); ++it) {
568 if ( (*it)->isSectionHidden() )
569 continue;
570 fprintf(stderr, " %s/%s addr=0x%08llX, size=0x%08llX, fileOffset=0x%08llX, type=%d\n",
571 (*it)->segmentName(), (*it)->sectionName(),
572 (*it)->address, (*it)->size, (*it)->fileOffset, (*it)->type());
573 }
574 }
575
576
577 void OutputFile::rangeCheck8(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
578 {
579 if ( (displacement > 127) || (displacement < -128) ) {
580 // show layout of final image
581 printSectionLayout(state);
582
583 const ld::Atom* target;
584 throwf("8-bit reference out of range (%lld max is +/-127B): from %s (0x%08llX) to %s (0x%08llX)",
585 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
586 addressOf(state, fixup, &target));
587 }
588 }
589
590 void OutputFile::rangeCheck16(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
591 {
592 const int64_t thirtyTwoKLimit = 0x00007FFF;
593 if ( (displacement > thirtyTwoKLimit) || (displacement < (-thirtyTwoKLimit)) ) {
594 // show layout of final image
595 printSectionLayout(state);
596
597 const ld::Atom* target;
598 throwf("16-bit reference out of range (%lld max is +/-32KB): from %s (0x%08llX) to %s (0x%08llX)",
599 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
600 addressOf(state, fixup, &target));
601 }
602 }
603
604 void OutputFile::rangeCheckBranch32(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
605 {
606 const int64_t twoGigLimit = 0x7FFFFFFF;
607 if ( (displacement > twoGigLimit) || (displacement < (-twoGigLimit)) ) {
608 // show layout of final image
609 printSectionLayout(state);
610
611 const ld::Atom* target;
612 throwf("32-bit branch out of range (%lld max is +/-2GB): from %s (0x%08llX) to %s (0x%08llX)",
613 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
614 addressOf(state, fixup, &target));
615 }
616 }
617
618
619 void OutputFile::rangeCheckAbsolute32(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
620 {
621 const int64_t fourGigLimit = 0xFFFFFFFF;
622 if ( displacement > fourGigLimit ) {
623 // <rdar://problem/9610466> cannot enforce 32-bit range checks on 32-bit archs because assembler loses sign information
624 // .long _foo - 0xC0000000
625 // is encoded in mach-o the same as:
626 // .long _foo + 0x40000000
627 // so if _foo lays out to 0xC0000100, the first is ok, but the second is not.
628 if ( (_options.architecture() == CPU_TYPE_ARM) || (_options.architecture() == CPU_TYPE_I386) ) {
629 // Unlikely userland code does funky stuff like this, so warn for them, but not warn for -preload or -static
630 if ( (_options.outputKind() != Options::kPreload) && (_options.outputKind() != Options::kStaticExecutable) ) {
631 warning("32-bit absolute address out of range (0x%08llX max is 4GB): from %s + 0x%08X (0x%08llX) to 0x%08llX",
632 displacement, atom->name(), fixup->offsetInAtom, atom->finalAddress(), displacement);
633 }
634 return;
635 }
636 // show layout of final image
637 printSectionLayout(state);
638
639 const ld::Atom* target;
640 if ( fixup->binding == ld::Fixup::bindingNone )
641 throwf("32-bit absolute address out of range (0x%08llX max is 4GB): from %s + 0x%08X (0x%08llX) to 0x%08llX",
642 displacement, atom->name(), fixup->offsetInAtom, atom->finalAddress(), displacement);
643 else
644 throwf("32-bit absolute address out of range (0x%08llX max is 4GB): from %s + 0x%08X (0x%08llX) to %s (0x%08llX)",
645 displacement, atom->name(), fixup->offsetInAtom, atom->finalAddress(), referenceTargetAtomName(state, fixup),
646 addressOf(state, fixup, &target));
647 }
648 }
649
650
651 void OutputFile::rangeCheckRIP32(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
652 {
653 const int64_t twoGigLimit = 0x7FFFFFFF;
654 if ( (displacement > twoGigLimit) || (displacement < (-twoGigLimit)) ) {
655 // show layout of final image
656 printSectionLayout(state);
657
658 const ld::Atom* target;
659 throwf("32-bit RIP relative reference out of range (%lld max is +/-4GB): from %s (0x%08llX) to %s (0x%08llX)",
660 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
661 addressOf(state, fixup, &target));
662 }
663 }
664
665 void OutputFile::rangeCheckARM12(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
666 {
667 if ( (displacement > 4092LL) || (displacement < (-4092LL)) ) {
668 // show layout of final image
669 printSectionLayout(state);
670
671 const ld::Atom* target;
672 throwf("ARM ldr 12-bit displacement out of range (%lld max is +/-4096B): from %s (0x%08llX) to %s (0x%08llX)",
673 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
674 addressOf(state, fixup, &target));
675 }
676 }
677
678 bool OutputFile::checkArmBranch24Displacement(int64_t displacement)
679 {
680 return ( (displacement < 33554428LL) && (displacement > (-33554432LL)) );
681 }
682
683 void OutputFile::rangeCheckARMBranch24(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
684 {
685 if ( checkArmBranch24Displacement(displacement) )
686 return;
687
688 // show layout of final image
689 printSectionLayout(state);
690
691 const ld::Atom* target;
692 throwf("b/bl/blx ARM branch out of range (%lld max is +/-32MB): from %s (0x%08llX) to %s (0x%08llX)",
693 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
694 addressOf(state, fixup, &target));
695 }
696
697 bool OutputFile::checkThumbBranch22Displacement(int64_t displacement)
698 {
699 // thumb2 supports +/- 16MB displacement
700 if ( _options.preferSubArchitecture() && _options.archSupportsThumb2() ) {
701 if ( (displacement > 16777214LL) || (displacement < (-16777216LL)) ) {
702 return false;
703 }
704 }
705 else {
706 // thumb1 supports +/- 4MB displacement
707 if ( (displacement > 4194302LL) || (displacement < (-4194304LL)) ) {
708 return false;
709 }
710 }
711 return true;
712 }
713
714 void OutputFile::rangeCheckThumbBranch22(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
715 {
716 if ( checkThumbBranch22Displacement(displacement) )
717 return;
718
719 // show layout of final image
720 printSectionLayout(state);
721
722 const ld::Atom* target;
723 if ( _options.preferSubArchitecture() && _options.archSupportsThumb2() ) {
724 throwf("b/bl/blx thumb2 branch out of range (%lld max is +/-16MB): from %s (0x%08llX) to %s (0x%08llX)",
725 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
726 addressOf(state, fixup, &target));
727 }
728 else {
729 throwf("b/bl/blx thumb1 branch out of range (%lld max is +/-4MB): from %s (0x%08llX) to %s (0x%08llX)",
730 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
731 addressOf(state, fixup, &target));
732 }
733 }
734
735
736 void OutputFile::rangeCheckARM64Branch26(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
737 {
738 const int64_t bl_128MegLimit = 0x07FFFFFF;
739 if ( (displacement > bl_128MegLimit) || (displacement < (-bl_128MegLimit)) ) {
740 // show layout of final image
741 printSectionLayout(state);
742
743 const ld::Atom* target;
744 throwf("b(l) ARM64 branch out of range (%lld max is +/-128MB): from %s (0x%08llX) to %s (0x%08llX)",
745 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
746 addressOf(state, fixup, &target));
747 }
748 }
749
750 void OutputFile::rangeCheckARM64Page21(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
751 {
752 const int64_t adrp_4GigLimit = 0x100000000ULL;
753 if ( (displacement > adrp_4GigLimit) || (displacement < (-adrp_4GigLimit)) ) {
754 // show layout of final image
755 printSectionLayout(state);
756
757 const ld::Atom* target;
758 throwf("ARM64 ADRP out of range (%lld max is +/-4GB): from %s (0x%08llX) to %s (0x%08llX)",
759 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
760 addressOf(state, fixup, &target));
761 }
762 }
763
764
765 uint16_t OutputFile::get16LE(uint8_t* loc) { return LittleEndian::get16(*(uint16_t*)loc); }
766 void OutputFile::set16LE(uint8_t* loc, uint16_t value) { LittleEndian::set16(*(uint16_t*)loc, value); }
767
768 uint32_t OutputFile::get32LE(uint8_t* loc) { return LittleEndian::get32(*(uint32_t*)loc); }
769 void OutputFile::set32LE(uint8_t* loc, uint32_t value) { LittleEndian::set32(*(uint32_t*)loc, value); }
770
771 uint64_t OutputFile::get64LE(uint8_t* loc) { return LittleEndian::get64(*(uint64_t*)loc); }
772 void OutputFile::set64LE(uint8_t* loc, uint64_t value) { LittleEndian::set64(*(uint64_t*)loc, value); }
773
774 uint16_t OutputFile::get16BE(uint8_t* loc) { return BigEndian::get16(*(uint16_t*)loc); }
775 void OutputFile::set16BE(uint8_t* loc, uint16_t value) { BigEndian::set16(*(uint16_t*)loc, value); }
776
777 uint32_t OutputFile::get32BE(uint8_t* loc) { return BigEndian::get32(*(uint32_t*)loc); }
778 void OutputFile::set32BE(uint8_t* loc, uint32_t value) { BigEndian::set32(*(uint32_t*)loc, value); }
779
780 uint64_t OutputFile::get64BE(uint8_t* loc) { return BigEndian::get64(*(uint64_t*)loc); }
781 void OutputFile::set64BE(uint8_t* loc, uint64_t value) { BigEndian::set64(*(uint64_t*)loc, value); }
782
783 #if SUPPORT_ARCH_arm64
784
785 static uint32_t makeNOP() {
786 return 0xD503201F;
787 }
788
789 enum SignExtension { signedNot, signed32, signed64 };
790 struct LoadStoreInfo {
791 uint32_t reg;
792 uint32_t baseReg;
793 uint32_t offset; // after scaling
794 uint32_t size; // 1,2,4,8, or 16
795 bool isStore;
796 bool isFloat; // if destReg is FP/SIMD
797 SignExtension signEx; // if load is sign extended
798 };
799
800 static uint32_t makeLDR_literal(const LoadStoreInfo& info, uint64_t targetAddress, uint64_t instructionAddress)
801 {
802 int64_t delta = targetAddress - instructionAddress;
803 assert(delta < 1024*1024);
804 assert(delta > -1024*1024);
805 assert((info.reg & 0xFFFFFFE0) == 0);
806 assert((targetAddress & 0x3) == 0);
807 assert((instructionAddress & 0x3) == 0);
808 assert(!info.isStore);
809 uint32_t imm19 = (delta << 3) & 0x00FFFFE0;
810 uint32_t instruction = 0;
811 switch ( info.size ) {
812 case 4:
813 if ( info.isFloat ) {
814 assert(info.signEx == signedNot);
815 instruction = 0x1C000000;
816 }
817 else {
818 if ( info.signEx == signed64 )
819 instruction = 0x98000000;
820 else
821 instruction = 0x18000000;
822 }
823 break;
824 case 8:
825 assert(info.signEx == signedNot);
826 instruction = info.isFloat ? 0x5C000000 : 0x58000000;
827 break;
828 case 16:
829 assert(info.signEx == signedNot);
830 instruction = 0x9C000000;
831 break;
832 default:
833 assert(0 && "invalid load size for literal");
834 }
835 return (instruction | imm19 | info.reg);
836 }
837
838 static uint32_t makeADR(uint32_t destReg, uint64_t targetAddress, uint64_t instructionAddress)
839 {
840 assert((destReg & 0xFFFFFFE0) == 0);
841 assert((instructionAddress & 0x3) == 0);
842 uint32_t instruction = 0x10000000;
843 int64_t delta = targetAddress - instructionAddress;
844 assert(delta < 1024*1024);
845 assert(delta > -1024*1024);
846 uint32_t immhi = (delta & 0x001FFFFC) << 3;
847 uint32_t immlo = (delta & 0x00000003) << 29;
848 return (instruction | immhi | immlo | destReg);
849 }
850
851 static uint32_t makeLoadOrStore(const LoadStoreInfo& info)
852 {
853 uint32_t instruction = 0x39000000;
854 if ( info.isFloat )
855 instruction |= 0x04000000;
856 instruction |= info.reg;
857 instruction |= (info.baseReg << 5);
858 uint32_t sizeBits = 0;
859 uint32_t opcBits = 0;
860 uint32_t imm12Bits = 0;
861 switch ( info.size ) {
862 case 1:
863 sizeBits = 0;
864 imm12Bits = info.offset;
865 if ( info.isStore ) {
866 opcBits = 0;
867 }
868 else {
869 switch ( info.signEx ) {
870 case signedNot:
871 opcBits = 1;
872 break;
873 case signed32:
874 opcBits = 3;
875 break;
876 case signed64:
877 opcBits = 2;
878 break;
879 }
880 }
881 break;
882 case 2:
883 sizeBits = 1;
884 assert((info.offset % 2) == 0);
885 imm12Bits = info.offset/2;
886 if ( info.isStore ) {
887 opcBits = 0;
888 }
889 else {
890 switch ( info.signEx ) {
891 case signedNot:
892 opcBits = 1;
893 break;
894 case signed32:
895 opcBits = 3;
896 break;
897 case signed64:
898 opcBits = 2;
899 break;
900 }
901 }
902 break;
903 case 4:
904 sizeBits = 2;
905 assert((info.offset % 4) == 0);
906 imm12Bits = info.offset/4;
907 if ( info.isStore ) {
908 opcBits = 0;
909 }
910 else {
911 switch ( info.signEx ) {
912 case signedNot:
913 opcBits = 1;
914 break;
915 case signed32:
916 assert(0 && "cannot use signed32 with 32-bit load/store");
917 break;
918 case signed64:
919 opcBits = 2;
920 break;
921 }
922 }
923 break;
924 case 8:
925 sizeBits = 3;
926 assert((info.offset % 8) == 0);
927 imm12Bits = info.offset/8;
928 if ( info.isStore ) {
929 opcBits = 0;
930 }
931 else {
932 opcBits = 1;
933 assert(info.signEx == signedNot);
934 }
935 break;
936 case 16:
937 sizeBits = 0;
938 assert((info.offset % 16) == 0);
939 imm12Bits = info.offset/16;
940 assert(info.isFloat);
941 if ( info.isStore ) {
942 opcBits = 2;
943 }
944 else {
945 opcBits = 3;
946 }
947 break;
948 default:
949 assert(0 && "bad load/store size");
950 break;
951 }
952 assert(imm12Bits < 4096);
953 return (instruction | (sizeBits << 30) | (opcBits << 22) | (imm12Bits << 10));
954 }
955
956 static bool parseLoadOrStore(uint32_t instruction, LoadStoreInfo& info)
957 {
958 if ( (instruction & 0x3B000000) != 0x39000000 )
959 return false;
960 info.isFloat = ( (instruction & 0x04000000) != 0 );
961 info.reg = (instruction & 0x1F);
962 info.baseReg = ((instruction>>5) & 0x1F);
963 switch (instruction & 0xC0C00000) {
964 case 0x00000000:
965 info.size = 1;
966 info.isStore = true;
967 info.signEx = signedNot;
968 break;
969 case 0x00400000:
970 info.size = 1;
971 info.isStore = false;
972 info.signEx = signedNot;
973 break;
974 case 0x00800000:
975 if ( info.isFloat ) {
976 info.size = 16;
977 info.isStore = true;
978 info.signEx = signedNot;
979 }
980 else {
981 info.size = 1;
982 info.isStore = false;
983 info.signEx = signed64;
984 }
985 break;
986 case 0x00C00000:
987 if ( info.isFloat ) {
988 info.size = 16;
989 info.isStore = false;
990 info.signEx = signedNot;
991 }
992 else {
993 info.size = 1;
994 info.isStore = false;
995 info.signEx = signed32;
996 }
997 break;
998 case 0x40000000:
999 info.size = 2;
1000 info.isStore = true;
1001 info.signEx = signedNot;
1002 break;
1003 case 0x40400000:
1004 info.size = 2;
1005 info.isStore = false;
1006 info.signEx = signedNot;
1007 break;
1008 case 0x40800000:
1009 info.size = 2;
1010 info.isStore = false;
1011 info.signEx = signed64;
1012 break;
1013 case 0x40C00000:
1014 info.size = 2;
1015 info.isStore = false;
1016 info.signEx = signed32;
1017 break;
1018 case 0x80000000:
1019 info.size = 4;
1020 info.isStore = true;
1021 info.signEx = signedNot;
1022 break;
1023 case 0x80400000:
1024 info.size = 4;
1025 info.isStore = false;
1026 info.signEx = signedNot;
1027 break;
1028 case 0x80800000:
1029 info.size = 4;
1030 info.isStore = false;
1031 info.signEx = signed64;
1032 break;
1033 case 0xC0000000:
1034 info.size = 8;
1035 info.isStore = true;
1036 info.signEx = signedNot;
1037 break;
1038 case 0xC0400000:
1039 info.size = 8;
1040 info.isStore = false;
1041 info.signEx = signedNot;
1042 break;
1043 default:
1044 return false;
1045 }
1046 info.offset = ((instruction >> 10) & 0x0FFF) * info.size;
1047 return true;
1048 }
1049
1050 struct AdrpInfo {
1051 uint32_t destReg;
1052 };
1053
1054 static bool parseADRP(uint32_t instruction, AdrpInfo& info)
1055 {
1056 if ( (instruction & 0x9F000000) != 0x90000000 )
1057 return false;
1058 info.destReg = (instruction & 0x1F);
1059 return true;
1060 }
1061
1062 struct AddInfo {
1063 uint32_t destReg;
1064 uint32_t srcReg;
1065 uint32_t addend;
1066 };
1067
1068 static bool parseADD(uint32_t instruction, AddInfo& info)
1069 {
1070 if ( (instruction & 0xFFC00000) != 0x91000000 )
1071 return false;
1072 info.destReg = (instruction & 0x1F);
1073 info.srcReg = ((instruction>>5) & 0x1F);
1074 info.addend = ((instruction>>10) & 0xFFF);
1075 return true;
1076 }
1077
1078
1079
1080 #if 0
1081 static uint32_t makeLDR_scaledOffset(const LoadStoreInfo& info)
1082 {
1083 assert((info.reg & 0xFFFFFFE0) == 0);
1084 assert((info.baseReg & 0xFFFFFFE0) == 0);
1085 assert(!info.isFloat || (info.signEx != signedNot));
1086 uint32_t sizeBits = 0;
1087 uint32_t opcBits = 1;
1088 uint32_t vBit = info.isFloat;
1089 switch ( info.signEx ) {
1090 case signedNot:
1091 opcBits = 1;
1092 break;
1093 case signed32:
1094 opcBits = 3;
1095 break;
1096 case signed64:
1097 opcBits = 2;
1098 break;
1099 default:
1100 assert(0 && "bad SignExtension runtime value");
1101 }
1102 switch ( info.size ) {
1103 case 1:
1104 sizeBits = 0;
1105 break;
1106 case 2:
1107 sizeBits = 1;
1108 break;
1109 case 4:
1110 sizeBits = 2;
1111 break;
1112 case 8:
1113 sizeBits = 3;
1114 break;
1115 case 16:
1116 sizeBits = 0;
1117 vBit = 1;
1118 opcBits = 3;
1119 break;
1120 default:
1121 assert(0 && "invalid load size for literal");
1122 }
1123 assert((info.offset % info.size) == 0);
1124 uint32_t scaledOffset = info.offset/info.size;
1125 assert(scaledOffset < 4096);
1126 return (0x39000000 | (sizeBits<<30) | (vBit<<26) | (opcBits<<22) | (scaledOffset<<10) | (info.baseReg<<5) | info.reg);
1127 }
1128
1129 static uint32_t makeLDR_literal(uint32_t destReg, uint32_t loadSize, bool isFloat, uint64_t targetAddress, uint64_t instructionAddress)
1130 {
1131 int64_t delta = targetAddress - instructionAddress;
1132 assert(delta < 1024*1024);
1133 assert(delta > -1024*1024);
1134 assert((destReg & 0xFFFFFFE0) == 0);
1135 assert((targetAddress & 0x3) == 0);
1136 assert((instructionAddress & 0x3) == 0);
1137 uint32_t imm19 = (delta << 3) & 0x00FFFFE0;
1138 uint32_t instruction = 0;
1139 switch ( loadSize ) {
1140 case 4:
1141 instruction = isFloat ? 0x1C000000 : 0x18000000;
1142 break;
1143 case 8:
1144 instruction = isFloat ? 0x5C000000 : 0x58000000;
1145 break;
1146 case 16:
1147 instruction = 0x9C000000;
1148 break;
1149 default:
1150 assert(0 && "invalid load size for literal");
1151 }
1152 return (instruction | imm19 | destReg);
1153 }
1154
1155
1156 static bool ldrInfo(uint32_t instruction, uint8_t* size, uint8_t* destReg, bool* v, uint32_t* scaledOffset)
1157 {
1158 *v = ( (instruction & 0x04000000) != 0 );
1159 *destReg = (instruction & 0x1F);
1160 uint32_t imm12 = ((instruction >> 10) & 0x00000FFF);
1161 switch ( (instruction & 0xC0000000) >> 30 ) {
1162 case 0:
1163 // vector and byte LDR have same "size" bits, need to check other bits to differenciate
1164 if ( (instruction & 0x00800000) == 0 ) {
1165 *size = 1;
1166 *scaledOffset = imm12;
1167 }
1168 else {
1169 *size = 16;
1170 *scaledOffset = imm12 * 16;
1171 }
1172 break;
1173 case 1:
1174 *size = 2;
1175 *scaledOffset = imm12 * 2;
1176 break;
1177 case 2:
1178 *size = 4;
1179 *scaledOffset = imm12 * 4;
1180 break;
1181 case 3:
1182 *size = 8;
1183 *scaledOffset = imm12 * 8;
1184 break;
1185 }
1186 return ((instruction & 0x3B400000) == 0x39400000);
1187 }
1188 #endif
1189
1190 static bool withinOneMeg(uint64_t addr1, uint64_t addr2) {
1191 int64_t delta = (addr2 - addr1);
1192 return ( (delta < 1024*1024) && (delta > -1024*1024) );
1193 }
1194 #endif // SUPPORT_ARCH_arm64
1195
1196 void OutputFile::setInfo(ld::Internal& state, const ld::Atom* atom, uint8_t* buffer, const std::map<uint32_t, const Fixup*>& usedByHints,
1197 uint32_t offsetInAtom, uint32_t delta, InstructionInfo* info)
1198 {
1199 info->offsetInAtom = offsetInAtom + delta;
1200 std::map<uint32_t, const Fixup*>::const_iterator pos = usedByHints.find(info->offsetInAtom);
1201 if ( (pos != usedByHints.end()) && (pos->second != NULL) ) {
1202 info->fixup = pos->second;
1203 info->targetAddress = addressOf(state, info->fixup, &info->target);
1204 if ( info->fixup->clusterSize != ld::Fixup::k1of1 ) {
1205 assert(info->fixup->firstInCluster());
1206 const ld::Fixup* nextFixup = info->fixup + 1;
1207 if ( nextFixup->kind == ld::Fixup::kindAddAddend ) {
1208 info->targetAddress += nextFixup->u.addend;
1209 }
1210 else {
1211 assert(0 && "expected addend");
1212 }
1213 }
1214 }
1215 else {
1216 info->fixup = NULL;
1217 info->targetAddress = 0;
1218 info->target = NULL;
1219 }
1220 info->instructionContent = &buffer[info->offsetInAtom];
1221 info->instructionAddress = atom->finalAddress() + info->offsetInAtom;
1222 info->instruction = get32LE(info->instructionContent);
1223 }
1224
1225 #if SUPPORT_ARCH_arm64
1226 static bool isPageKind(const ld::Fixup* fixup, bool mustBeGOT=false)
1227 {
1228 if ( fixup == NULL )
1229 return false;
1230 const ld::Fixup* f;
1231 switch ( fixup->kind ) {
1232 case ld::Fixup::kindStoreTargetAddressARM64Page21:
1233 return !mustBeGOT;
1234 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
1235 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21:
1236 return true;
1237 case ld::Fixup::kindSetTargetAddress:
1238 f = fixup;
1239 do {
1240 ++f;
1241 } while ( ! f->lastInCluster() );
1242 switch (f->kind ) {
1243 case ld::Fixup::kindStoreARM64Page21:
1244 return !mustBeGOT;
1245 case ld::Fixup::kindStoreARM64GOTLoadPage21:
1246 case ld::Fixup::kindStoreARM64GOTLeaPage21:
1247 return true;
1248 default:
1249 break;
1250 }
1251 break;
1252 default:
1253 break;
1254 }
1255 return false;
1256 }
1257
1258 static bool isPageOffsetKind(const ld::Fixup* fixup, bool mustBeGOT=false)
1259 {
1260 if ( fixup == NULL )
1261 return false;
1262 const ld::Fixup* f;
1263 switch ( fixup->kind ) {
1264 case ld::Fixup::kindStoreTargetAddressARM64PageOff12:
1265 return !mustBeGOT;
1266 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12:
1267 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12:
1268 return true;
1269 case ld::Fixup::kindSetTargetAddress:
1270 f = fixup;
1271 do {
1272 ++f;
1273 } while ( ! f->lastInCluster() );
1274 switch (f->kind ) {
1275 case ld::Fixup::kindStoreARM64PageOff12:
1276 return !mustBeGOT;
1277 case ld::Fixup::kindStoreARM64GOTLoadPageOff12:
1278 case ld::Fixup::kindStoreARM64GOTLeaPageOff12:
1279 return true;
1280 default:
1281 break;
1282 }
1283 break;
1284 default:
1285 break;
1286 }
1287 return false;
1288 }
1289 #endif // SUPPORT_ARCH_arm64
1290
1291
1292 #define LOH_ASSERT(cond) \
1293 if ( !(cond) ) { \
1294 warning("ignoring linker optimzation hint at %s+0x%X because " #cond, atom->name(), fit->offsetInAtom); \
1295 break; \
1296 }
1297
1298
1299 void OutputFile::applyFixUps(ld::Internal& state, uint64_t mhAddress, const ld::Atom* atom, uint8_t* buffer)
1300 {
1301 //fprintf(stderr, "applyFixUps() on %s\n", atom->name());
1302 int64_t accumulator = 0;
1303 const ld::Atom* toTarget = NULL;
1304 const ld::Atom* fromTarget;
1305 int64_t delta;
1306 uint32_t instruction;
1307 uint32_t newInstruction;
1308 bool is_bl;
1309 bool is_blx;
1310 bool is_b;
1311 bool thumbTarget = false;
1312 std::map<uint32_t, const Fixup*> usedByHints;
1313 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
1314 uint8_t* fixUpLocation = &buffer[fit->offsetInAtom];
1315 ld::Fixup::LOH_arm64 lohExtra;
1316 switch ( (ld::Fixup::Kind)(fit->kind) ) {
1317 case ld::Fixup::kindNone:
1318 case ld::Fixup::kindNoneFollowOn:
1319 case ld::Fixup::kindNoneGroupSubordinate:
1320 case ld::Fixup::kindNoneGroupSubordinateFDE:
1321 case ld::Fixup::kindNoneGroupSubordinateLSDA:
1322 case ld::Fixup::kindNoneGroupSubordinatePersonality:
1323 break;
1324 case ld::Fixup::kindSetTargetAddress:
1325 accumulator = addressOf(state, fit, &toTarget);
1326 thumbTarget = targetIsThumb(state, fit);
1327 if ( thumbTarget )
1328 accumulator |= 1;
1329 if ( fit->contentAddendOnly || fit->contentDetlaToAddendOnly )
1330 accumulator = 0;
1331 break;
1332 case ld::Fixup::kindSubtractTargetAddress:
1333 delta = addressOf(state, fit, &fromTarget);
1334 if ( ! fit->contentAddendOnly )
1335 accumulator -= delta;
1336 break;
1337 case ld::Fixup::kindAddAddend:
1338 if ( ! fit->contentIgnoresAddend ) {
1339 // <rdar://problem/8342028> ARM main executables main contain .long constants pointing
1340 // into themselves such as jump tables. These .long should not have thumb bit set
1341 // even though the target is a thumb instruction. We can tell it is an interior pointer
1342 // because we are processing an addend.
1343 if ( thumbTarget && (toTarget == atom) && ((int32_t)fit->u.addend > 0) ) {
1344 accumulator &= (-2);
1345 //warning("removing thumb bit from intra-atom pointer in %s %s+0x%0X",
1346 // atom->section().sectionName(), atom->name(), fit->offsetInAtom);
1347 }
1348 accumulator += fit->u.addend;
1349 }
1350 break;
1351 case ld::Fixup::kindSubtractAddend:
1352 accumulator -= fit->u.addend;
1353 break;
1354 case ld::Fixup::kindSetTargetImageOffset:
1355 accumulator = addressOf(state, fit, &toTarget) - mhAddress;
1356 thumbTarget = targetIsThumb(state, fit);
1357 if ( thumbTarget )
1358 accumulator |= 1;
1359 break;
1360 case ld::Fixup::kindSetTargetSectionOffset:
1361 accumulator = sectionOffsetOf(state, fit);
1362 break;
1363 case ld::Fixup::kindSetTargetTLVTemplateOffset:
1364 accumulator = tlvTemplateOffsetOf(state, fit);
1365 break;
1366 case ld::Fixup::kindStore8:
1367 *fixUpLocation += accumulator;
1368 break;
1369 case ld::Fixup::kindStoreLittleEndian16:
1370 set16LE(fixUpLocation, accumulator);
1371 break;
1372 case ld::Fixup::kindStoreLittleEndianLow24of32:
1373 set32LE(fixUpLocation, (get32LE(fixUpLocation) & 0xFF000000) | (accumulator & 0x00FFFFFF) );
1374 break;
1375 case ld::Fixup::kindStoreLittleEndian32:
1376 rangeCheckAbsolute32(accumulator, state, atom, fit);
1377 set32LE(fixUpLocation, accumulator);
1378 break;
1379 case ld::Fixup::kindStoreLittleEndian64:
1380 set64LE(fixUpLocation, accumulator);
1381 break;
1382 case ld::Fixup::kindStoreBigEndian16:
1383 set16BE(fixUpLocation, accumulator);
1384 break;
1385 case ld::Fixup::kindStoreBigEndianLow24of32:
1386 set32BE(fixUpLocation, (get32BE(fixUpLocation) & 0xFF000000) | (accumulator & 0x00FFFFFF) );
1387 break;
1388 case ld::Fixup::kindStoreBigEndian32:
1389 rangeCheckAbsolute32(accumulator, state, atom, fit);
1390 set32BE(fixUpLocation, accumulator);
1391 break;
1392 case ld::Fixup::kindStoreBigEndian64:
1393 set64BE(fixUpLocation, accumulator);
1394 break;
1395 case ld::Fixup::kindStoreX86PCRel8:
1396 case ld::Fixup::kindStoreX86BranchPCRel8:
1397 if ( fit->contentAddendOnly )
1398 delta = accumulator;
1399 else
1400 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 1);
1401 rangeCheck8(delta, state, atom, fit);
1402 *fixUpLocation = delta;
1403 break;
1404 case ld::Fixup::kindStoreX86PCRel16:
1405 if ( fit->contentAddendOnly )
1406 delta = accumulator;
1407 else
1408 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 2);
1409 rangeCheck16(delta, state, atom, fit);
1410 set16LE(fixUpLocation, delta);
1411 break;
1412 case ld::Fixup::kindStoreX86BranchPCRel32:
1413 if ( fit->contentAddendOnly )
1414 delta = accumulator;
1415 else
1416 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1417 rangeCheckBranch32(delta, state, atom, fit);
1418 set32LE(fixUpLocation, delta);
1419 break;
1420 case ld::Fixup::kindStoreX86PCRel32GOTLoad:
1421 case ld::Fixup::kindStoreX86PCRel32GOT:
1422 case ld::Fixup::kindStoreX86PCRel32:
1423 case ld::Fixup::kindStoreX86PCRel32TLVLoad:
1424 if ( fit->contentAddendOnly )
1425 delta = accumulator;
1426 else
1427 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1428 rangeCheckRIP32(delta, state, atom, fit);
1429 set32LE(fixUpLocation, delta);
1430 break;
1431 case ld::Fixup::kindStoreX86PCRel32_1:
1432 if ( fit->contentAddendOnly )
1433 delta = accumulator - 1;
1434 else
1435 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 5);
1436 rangeCheckRIP32(delta, state, atom, fit);
1437 set32LE(fixUpLocation, delta);
1438 break;
1439 case ld::Fixup::kindStoreX86PCRel32_2:
1440 if ( fit->contentAddendOnly )
1441 delta = accumulator - 2;
1442 else
1443 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 6);
1444 rangeCheckRIP32(delta, state, atom, fit);
1445 set32LE(fixUpLocation, delta);
1446 break;
1447 case ld::Fixup::kindStoreX86PCRel32_4:
1448 if ( fit->contentAddendOnly )
1449 delta = accumulator - 4;
1450 else
1451 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 8);
1452 rangeCheckRIP32(delta, state, atom, fit);
1453 set32LE(fixUpLocation, delta);
1454 break;
1455 case ld::Fixup::kindStoreX86Abs32TLVLoad:
1456 set32LE(fixUpLocation, accumulator);
1457 break;
1458 case ld::Fixup::kindStoreX86Abs32TLVLoadNowLEA:
1459 assert(_options.outputKind() != Options::kObjectFile);
1460 // TLV entry was optimized away, change movl instruction to a leal
1461 if ( fixUpLocation[-1] != 0xA1 )
1462 throw "TLV load reloc does not point to a movl instruction";
1463 fixUpLocation[-1] = 0xB8;
1464 set32LE(fixUpLocation, accumulator);
1465 break;
1466 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA:
1467 assert(_options.outputKind() != Options::kObjectFile);
1468 // GOT entry was optimized away, change movq instruction to a leaq
1469 if ( fixUpLocation[-2] != 0x8B )
1470 throw "GOT load reloc does not point to a movq instruction";
1471 fixUpLocation[-2] = 0x8D;
1472 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1473 rangeCheckRIP32(delta, state, atom, fit);
1474 set32LE(fixUpLocation, delta);
1475 break;
1476 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA:
1477 assert(_options.outputKind() != Options::kObjectFile);
1478 // TLV entry was optimized away, change movq instruction to a leaq
1479 if ( fixUpLocation[-2] != 0x8B )
1480 throw "TLV load reloc does not point to a movq instruction";
1481 fixUpLocation[-2] = 0x8D;
1482 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1483 rangeCheckRIP32(delta, state, atom, fit);
1484 set32LE(fixUpLocation, delta);
1485 break;
1486 case ld::Fixup::kindStoreTargetAddressARMLoad12:
1487 accumulator = addressOf(state, fit, &toTarget);
1488 // fall into kindStoreARMLoad12 case
1489 case ld::Fixup::kindStoreARMLoad12:
1490 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 8);
1491 rangeCheckARM12(delta, state, atom, fit);
1492 instruction = get32LE(fixUpLocation);
1493 if ( delta >= 0 ) {
1494 newInstruction = instruction & 0xFFFFF000;
1495 newInstruction |= ((uint32_t)delta & 0xFFF);
1496 }
1497 else {
1498 newInstruction = instruction & 0xFF7FF000;
1499 newInstruction |= ((uint32_t)(-delta) & 0xFFF);
1500 }
1501 set32LE(fixUpLocation, newInstruction);
1502 break;
1503 case ld::Fixup::kindDtraceExtra:
1504 break;
1505 case ld::Fixup::kindStoreX86DtraceCallSiteNop:
1506 if ( _options.outputKind() != Options::kObjectFile ) {
1507 // change call site to a NOP
1508 fixUpLocation[-1] = 0x90; // 1-byte nop
1509 fixUpLocation[0] = 0x0F; // 4-byte nop
1510 fixUpLocation[1] = 0x1F;
1511 fixUpLocation[2] = 0x40;
1512 fixUpLocation[3] = 0x00;
1513 }
1514 break;
1515 case ld::Fixup::kindStoreX86DtraceIsEnableSiteClear:
1516 if ( _options.outputKind() != Options::kObjectFile ) {
1517 // change call site to a clear eax
1518 fixUpLocation[-1] = 0x33; // xorl eax,eax
1519 fixUpLocation[0] = 0xC0;
1520 fixUpLocation[1] = 0x90; // 1-byte nop
1521 fixUpLocation[2] = 0x90; // 1-byte nop
1522 fixUpLocation[3] = 0x90; // 1-byte nop
1523 }
1524 break;
1525 case ld::Fixup::kindStoreARMDtraceCallSiteNop:
1526 if ( _options.outputKind() != Options::kObjectFile ) {
1527 // change call site to a NOP
1528 set32LE(fixUpLocation, 0xE1A00000);
1529 }
1530 break;
1531 case ld::Fixup::kindStoreARMDtraceIsEnableSiteClear:
1532 if ( _options.outputKind() != Options::kObjectFile ) {
1533 // change call site to 'eor r0, r0, r0'
1534 set32LE(fixUpLocation, 0xE0200000);
1535 }
1536 break;
1537 case ld::Fixup::kindStoreThumbDtraceCallSiteNop:
1538 if ( _options.outputKind() != Options::kObjectFile ) {
1539 // change 32-bit blx call site to two thumb NOPs
1540 set32LE(fixUpLocation, 0x46C046C0);
1541 }
1542 break;
1543 case ld::Fixup::kindStoreThumbDtraceIsEnableSiteClear:
1544 if ( _options.outputKind() != Options::kObjectFile ) {
1545 // change 32-bit blx call site to 'nop', 'eor r0, r0'
1546 set32LE(fixUpLocation, 0x46C04040);
1547 }
1548 break;
1549 case ld::Fixup::kindStoreARM64DtraceCallSiteNop:
1550 if ( _options.outputKind() != Options::kObjectFile ) {
1551 // change call site to a NOP
1552 set32LE(fixUpLocation, 0xD503201F);
1553 }
1554 break;
1555 case ld::Fixup::kindStoreARM64DtraceIsEnableSiteClear:
1556 if ( _options.outputKind() != Options::kObjectFile ) {
1557 // change call site to 'MOVZ X0,0'
1558 set32LE(fixUpLocation, 0xD2800000);
1559 }
1560 break;
1561 case ld::Fixup::kindLazyTarget:
1562 case ld::Fixup::kindIslandTarget:
1563 break;
1564 case ld::Fixup::kindSetLazyOffset:
1565 assert(fit->binding == ld::Fixup::bindingDirectlyBound);
1566 accumulator = this->lazyBindingInfoOffsetForLazyPointerAddress(fit->u.target->finalAddress());
1567 break;
1568 case ld::Fixup::kindDataInCodeStartData:
1569 case ld::Fixup::kindDataInCodeStartJT8:
1570 case ld::Fixup::kindDataInCodeStartJT16:
1571 case ld::Fixup::kindDataInCodeStartJT32:
1572 case ld::Fixup::kindDataInCodeStartJTA32:
1573 case ld::Fixup::kindDataInCodeEnd:
1574 break;
1575 case ld::Fixup::kindLinkerOptimizationHint:
1576 // expand table of address/offsets used by hints
1577 lohExtra.addend = fit->u.addend;
1578 usedByHints[fit->offsetInAtom + (lohExtra.info.delta1 << 2)] = NULL;
1579 if ( lohExtra.info.count > 0 )
1580 usedByHints[fit->offsetInAtom + (lohExtra.info.delta2 << 2)] = NULL;
1581 if ( lohExtra.info.count > 1 )
1582 usedByHints[fit->offsetInAtom + (lohExtra.info.delta3 << 2)] = NULL;
1583 if ( lohExtra.info.count > 2 )
1584 usedByHints[fit->offsetInAtom + (lohExtra.info.delta4 << 2)] = NULL;
1585 break;
1586 case ld::Fixup::kindStoreTargetAddressLittleEndian32:
1587 accumulator = addressOf(state, fit, &toTarget);
1588 thumbTarget = targetIsThumb(state, fit);
1589 if ( thumbTarget )
1590 accumulator |= 1;
1591 if ( fit->contentAddendOnly )
1592 accumulator = 0;
1593 rangeCheckAbsolute32(accumulator, state, atom, fit);
1594 set32LE(fixUpLocation, accumulator);
1595 break;
1596 case ld::Fixup::kindStoreTargetAddressLittleEndian64:
1597 accumulator = addressOf(state, fit, &toTarget);
1598 if ( fit->contentAddendOnly )
1599 accumulator = 0;
1600 set64LE(fixUpLocation, accumulator);
1601 break;
1602 case ld::Fixup::kindStoreTargetAddressBigEndian32:
1603 accumulator = addressOf(state, fit, &toTarget);
1604 if ( fit->contentAddendOnly )
1605 accumulator = 0;
1606 set32BE(fixUpLocation, accumulator);
1607 break;
1608 case ld::Fixup::kindStoreTargetAddressBigEndian64:
1609 accumulator = addressOf(state, fit, &toTarget);
1610 if ( fit->contentAddendOnly )
1611 accumulator = 0;
1612 set64BE(fixUpLocation, accumulator);
1613 break;
1614 case ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian32:
1615 accumulator = tlvTemplateOffsetOf(state, fit);
1616 set32LE(fixUpLocation, accumulator);
1617 break;
1618 case ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian64:
1619 accumulator = tlvTemplateOffsetOf(state, fit);
1620 set64LE(fixUpLocation, accumulator);
1621 break;
1622 case ld::Fixup::kindStoreTargetAddressX86PCRel32:
1623 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32:
1624 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad:
1625 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad:
1626 accumulator = addressOf(state, fit, &toTarget);
1627 if ( fit->contentDetlaToAddendOnly )
1628 accumulator = 0;
1629 if ( fit->contentAddendOnly )
1630 delta = 0;
1631 else
1632 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1633 rangeCheckRIP32(delta, state, atom, fit);
1634 set32LE(fixUpLocation, delta);
1635 break;
1636 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad:
1637 set32LE(fixUpLocation, accumulator);
1638 break;
1639 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoadNowLEA:
1640 // TLV entry was optimized away, change movl instruction to a leal
1641 if ( fixUpLocation[-1] != 0xA1 )
1642 throw "TLV load reloc does not point to a movl <abs-address>,<reg> instruction";
1643 fixUpLocation[-1] = 0xB8;
1644 accumulator = addressOf(state, fit, &toTarget);
1645 set32LE(fixUpLocation, accumulator);
1646 break;
1647 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA:
1648 // GOT entry was optimized away, change movq instruction to a leaq
1649 if ( fixUpLocation[-2] != 0x8B )
1650 throw "GOT load reloc does not point to a movq instruction";
1651 fixUpLocation[-2] = 0x8D;
1652 accumulator = addressOf(state, fit, &toTarget);
1653 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1654 rangeCheckRIP32(delta, state, atom, fit);
1655 set32LE(fixUpLocation, delta);
1656 break;
1657 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA:
1658 // TLV entry was optimized away, change movq instruction to a leaq
1659 if ( fixUpLocation[-2] != 0x8B )
1660 throw "TLV load reloc does not point to a movq instruction";
1661 fixUpLocation[-2] = 0x8D;
1662 accumulator = addressOf(state, fit, &toTarget);
1663 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1664 rangeCheckRIP32(delta, state, atom, fit);
1665 set32LE(fixUpLocation, delta);
1666 break;
1667 case ld::Fixup::kindStoreTargetAddressARMBranch24:
1668 accumulator = addressOf(state, fit, &toTarget);
1669 thumbTarget = targetIsThumb(state, fit);
1670 if ( toTarget->contentType() == ld::Atom::typeBranchIsland ) {
1671 // Branching to island. If ultimate target is in range, branch there directly.
1672 for (ld::Fixup::iterator islandfit = toTarget->fixupsBegin(), end=toTarget->fixupsEnd(); islandfit != end; ++islandfit) {
1673 if ( islandfit->kind == ld::Fixup::kindIslandTarget ) {
1674 const ld::Atom* islandTarget = NULL;
1675 uint64_t islandTargetAddress = addressOf(state, islandfit, &islandTarget);
1676 delta = islandTargetAddress - (atom->finalAddress() + fit->offsetInAtom + 4);
1677 if ( checkArmBranch24Displacement(delta) ) {
1678 toTarget = islandTarget;
1679 accumulator = islandTargetAddress;
1680 thumbTarget = targetIsThumb(state, islandfit);
1681 }
1682 break;
1683 }
1684 }
1685 }
1686 if ( thumbTarget )
1687 accumulator |= 1;
1688 if ( fit->contentDetlaToAddendOnly )
1689 accumulator = 0;
1690 // fall into kindStoreARMBranch24 case
1691 case ld::Fixup::kindStoreARMBranch24:
1692 // The pc added will be +8 from the pc
1693 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 8);
1694 rangeCheckARMBranch24(delta, state, atom, fit);
1695 instruction = get32LE(fixUpLocation);
1696 // Make sure we are calling arm with bl, thumb with blx
1697 is_bl = ((instruction & 0xFF000000) == 0xEB000000);
1698 is_blx = ((instruction & 0xFE000000) == 0xFA000000);
1699 is_b = !is_blx && ((instruction & 0x0F000000) == 0x0A000000);
1700 if ( (is_bl | is_blx) && thumbTarget ) {
1701 uint32_t opcode = 0xFA000000; // force to be blx
1702 uint32_t disp = (uint32_t)(delta >> 2) & 0x00FFFFFF;
1703 uint32_t h_bit = (uint32_t)(delta << 23) & 0x01000000;
1704 newInstruction = opcode | h_bit | disp;
1705 }
1706 else if ( (is_bl | is_blx) && !thumbTarget ) {
1707 uint32_t opcode = 0xEB000000; // force to be bl
1708 uint32_t disp = (uint32_t)(delta >> 2) & 0x00FFFFFF;
1709 newInstruction = opcode | disp;
1710 }
1711 else if ( is_b && thumbTarget ) {
1712 if ( fit->contentDetlaToAddendOnly )
1713 newInstruction = (instruction & 0xFF000000) | ((uint32_t)(delta >> 2) & 0x00FFFFFF);
1714 else
1715 throwf("no pc-rel bx arm instruction. Can't fix up branch to %s in %s",
1716 referenceTargetAtomName(state, fit), atom->name());
1717 }
1718 else if ( !is_bl && !is_blx && thumbTarget ) {
1719 throwf("don't know how to convert instruction %x referencing %s to thumb",
1720 instruction, referenceTargetAtomName(state, fit));
1721 }
1722 else {
1723 newInstruction = (instruction & 0xFF000000) | ((uint32_t)(delta >> 2) & 0x00FFFFFF);
1724 }
1725 set32LE(fixUpLocation, newInstruction);
1726 break;
1727 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
1728 accumulator = addressOf(state, fit, &toTarget);
1729 thumbTarget = targetIsThumb(state, fit);
1730 if ( toTarget->contentType() == ld::Atom::typeBranchIsland ) {
1731 // branching to island, so see if ultimate target is in range
1732 // and if so branch to ultimate target instead.
1733 for (ld::Fixup::iterator islandfit = toTarget->fixupsBegin(), end=toTarget->fixupsEnd(); islandfit != end; ++islandfit) {
1734 if ( islandfit->kind == ld::Fixup::kindIslandTarget ) {
1735 const ld::Atom* islandTarget = NULL;
1736 uint64_t islandTargetAddress = addressOf(state, islandfit, &islandTarget);
1737 if ( !fit->contentDetlaToAddendOnly ) {
1738 if ( targetIsThumb(state, islandfit) ) {
1739 // Thumb to thumb branch, we will be generating a bl instruction.
1740 // Delta is always even, so mask out thumb bit in target.
1741 islandTargetAddress &= -2ULL;
1742 }
1743 else {
1744 // Target is not thumb, we will be generating a blx instruction
1745 // Since blx cannot have the low bit set, set bit[1] of the target to
1746 // bit[1] of the base address, so that the difference is a multiple of
1747 // 4 bytes.
1748 islandTargetAddress &= -3ULL;
1749 islandTargetAddress |= ((atom->finalAddress() + fit->offsetInAtom ) & 2LL);
1750 }
1751 }
1752 delta = islandTargetAddress - (atom->finalAddress() + fit->offsetInAtom + 4);
1753 if ( checkThumbBranch22Displacement(delta) ) {
1754 toTarget = islandTarget;
1755 accumulator = islandTargetAddress;
1756 thumbTarget = targetIsThumb(state, islandfit);
1757 }
1758 break;
1759 }
1760 }
1761 }
1762 if ( thumbTarget )
1763 accumulator |= 1;
1764 if ( fit->contentDetlaToAddendOnly )
1765 accumulator = 0;
1766 // fall into kindStoreThumbBranch22 case
1767 case ld::Fixup::kindStoreThumbBranch22:
1768 instruction = get32LE(fixUpLocation);
1769 is_bl = ((instruction & 0xD000F800) == 0xD000F000);
1770 is_blx = ((instruction & 0xD000F800) == 0xC000F000);
1771 is_b = ((instruction & 0xD000F800) == 0x9000F000);
1772 if ( !fit->contentDetlaToAddendOnly ) {
1773 if ( thumbTarget ) {
1774 // Thumb to thumb branch, we will be generating a bl instruction.
1775 // Delta is always even, so mask out thumb bit in target.
1776 accumulator &= -2ULL;
1777 }
1778 else {
1779 // Target is not thumb, we will be generating a blx instruction
1780 // Since blx cannot have the low bit set, set bit[1] of the target to
1781 // bit[1] of the base address, so that the difference is a multiple of
1782 // 4 bytes.
1783 accumulator &= -3ULL;
1784 accumulator |= ((atom->finalAddress() + fit->offsetInAtom ) & 2LL);
1785 }
1786 }
1787 // The pc added will be +4 from the pc
1788 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1789 // <rdar://problem/16652542> support bl in very large .o files
1790 if ( fit->contentDetlaToAddendOnly ) {
1791 while ( delta < (-16777216LL) )
1792 delta += 0x2000000;
1793 }
1794 rangeCheckThumbBranch22(delta, state, atom, fit);
1795 if ( _options.preferSubArchitecture() && _options.archSupportsThumb2() ) {
1796 // The instruction is really two instructions:
1797 // The lower 16 bits are the first instruction, which contains the high
1798 // 11 bits of the displacement.
1799 // The upper 16 bits are the second instruction, which contains the low
1800 // 11 bits of the displacement, as well as differentiating bl and blx.
1801 uint32_t s = (uint32_t)(delta >> 24) & 0x1;
1802 uint32_t i1 = (uint32_t)(delta >> 23) & 0x1;
1803 uint32_t i2 = (uint32_t)(delta >> 22) & 0x1;
1804 uint32_t imm10 = (uint32_t)(delta >> 12) & 0x3FF;
1805 uint32_t imm11 = (uint32_t)(delta >> 1) & 0x7FF;
1806 uint32_t j1 = (i1 == s);
1807 uint32_t j2 = (i2 == s);
1808 if ( is_bl ) {
1809 if ( thumbTarget )
1810 instruction = 0xD000F000; // keep bl
1811 else
1812 instruction = 0xC000F000; // change to blx
1813 }
1814 else if ( is_blx ) {
1815 if ( thumbTarget )
1816 instruction = 0xD000F000; // change to bl
1817 else
1818 instruction = 0xC000F000; // keep blx
1819 }
1820 else if ( is_b ) {
1821 instruction = 0x9000F000; // keep b
1822 if ( !thumbTarget && !fit->contentDetlaToAddendOnly ) {
1823 throwf("armv7 has no pc-rel bx thumb instruction. Can't fix up branch to %s in %s",
1824 referenceTargetAtomName(state, fit), atom->name());
1825 }
1826 }
1827 else {
1828 if ( !thumbTarget )
1829 throwf("don't know how to convert branch instruction %x referencing %s to bx",
1830 instruction, referenceTargetAtomName(state, fit));
1831 instruction = 0x9000F000; // keep b
1832 }
1833 uint32_t nextDisp = (j1 << 13) | (j2 << 11) | imm11;
1834 uint32_t firstDisp = (s << 10) | imm10;
1835 newInstruction = instruction | (nextDisp << 16) | firstDisp;
1836 //warning("s=%d, j1=%d, j2=%d, imm10=0x%0X, imm11=0x%0X, instruction=0x%08X, first=0x%04X, next=0x%04X, new=0x%08X, disp=0x%llX for %s to %s\n",
1837 // s, j1, j2, imm10, imm11, instruction, firstDisp, nextDisp, newInstruction, delta, atom->name(), toTarget->name());
1838 set32LE(fixUpLocation, newInstruction);
1839 }
1840 else {
1841 // The instruction is really two instructions:
1842 // The lower 16 bits are the first instruction, which contains the high
1843 // 11 bits of the displacement.
1844 // The upper 16 bits are the second instruction, which contains the low
1845 // 11 bits of the displacement, as well as differentiating bl and blx.
1846 uint32_t firstDisp = (uint32_t)(delta >> 12) & 0x7FF;
1847 uint32_t nextDisp = (uint32_t)(delta >> 1) & 0x7FF;
1848 if ( is_bl && !thumbTarget ) {
1849 instruction = 0xE800F000;
1850 }
1851 else if ( is_blx && thumbTarget ) {
1852 instruction = 0xF800F000;
1853 }
1854 else if ( is_b ) {
1855 instruction = 0x9000F000; // keep b
1856 if ( !thumbTarget && !fit->contentDetlaToAddendOnly ) {
1857 throwf("armv6 has no pc-rel bx thumb instruction. Can't fix up branch to %s in %s",
1858 referenceTargetAtomName(state, fit), atom->name());
1859 }
1860 }
1861 else {
1862 instruction = instruction & 0xF800F800;
1863 }
1864 newInstruction = instruction | (nextDisp << 16) | firstDisp;
1865 set32LE(fixUpLocation, newInstruction);
1866 }
1867 break;
1868 case ld::Fixup::kindStoreARMLow16:
1869 {
1870 uint32_t imm4 = (accumulator & 0x0000F000) >> 12;
1871 uint32_t imm12 = accumulator & 0x00000FFF;
1872 instruction = get32LE(fixUpLocation);
1873 newInstruction = (instruction & 0xFFF0F000) | (imm4 << 16) | imm12;
1874 set32LE(fixUpLocation, newInstruction);
1875 }
1876 break;
1877 case ld::Fixup::kindStoreARMHigh16:
1878 {
1879 uint32_t imm4 = (accumulator & 0xF0000000) >> 28;
1880 uint32_t imm12 = (accumulator & 0x0FFF0000) >> 16;
1881 instruction = get32LE(fixUpLocation);
1882 newInstruction = (instruction & 0xFFF0F000) | (imm4 << 16) | imm12;
1883 set32LE(fixUpLocation, newInstruction);
1884 }
1885 break;
1886 case ld::Fixup::kindStoreThumbLow16:
1887 {
1888 uint32_t imm4 = (accumulator & 0x0000F000) >> 12;
1889 uint32_t i = (accumulator & 0x00000800) >> 11;
1890 uint32_t imm3 = (accumulator & 0x00000700) >> 8;
1891 uint32_t imm8 = accumulator & 0x000000FF;
1892 instruction = get32LE(fixUpLocation);
1893 newInstruction = (instruction & 0x8F00FBF0) | imm4 | (i << 10) | (imm3 << 28) | (imm8 << 16);
1894 set32LE(fixUpLocation, newInstruction);
1895 }
1896 break;
1897 case ld::Fixup::kindStoreThumbHigh16:
1898 {
1899 uint32_t imm4 = (accumulator & 0xF0000000) >> 28;
1900 uint32_t i = (accumulator & 0x08000000) >> 27;
1901 uint32_t imm3 = (accumulator & 0x07000000) >> 24;
1902 uint32_t imm8 = (accumulator & 0x00FF0000) >> 16;
1903 instruction = get32LE(fixUpLocation);
1904 newInstruction = (instruction & 0x8F00FBF0) | imm4 | (i << 10) | (imm3 << 28) | (imm8 << 16);
1905 set32LE(fixUpLocation, newInstruction);
1906 }
1907 break;
1908 #if SUPPORT_ARCH_arm64
1909 case ld::Fixup::kindStoreTargetAddressARM64Branch26:
1910 accumulator = addressOf(state, fit, &toTarget);
1911 // fall into kindStoreARM64Branch26 case
1912 case ld::Fixup::kindStoreARM64Branch26:
1913 if ( fit->contentAddendOnly )
1914 delta = accumulator;
1915 else
1916 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom);
1917 rangeCheckARM64Branch26(delta, state, atom, fit);
1918 instruction = get32LE(fixUpLocation);
1919 newInstruction = (instruction & 0xFC000000) | ((uint32_t)(delta >> 2) & 0x03FFFFFF);
1920 set32LE(fixUpLocation, newInstruction);
1921 break;
1922 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21:
1923 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
1924 case ld::Fixup::kindStoreTargetAddressARM64Page21:
1925 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21:
1926 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21:
1927 accumulator = addressOf(state, fit, &toTarget);
1928 // fall into kindStoreARM64Branch26 case
1929 case ld::Fixup::kindStoreARM64GOTLeaPage21:
1930 case ld::Fixup::kindStoreARM64GOTLoadPage21:
1931 case ld::Fixup::kindStoreARM64TLVPLoadPage21:
1932 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21:
1933 case ld::Fixup::kindStoreARM64Page21:
1934 {
1935 // the ADRP instruction adds the imm << 12 to the page that the pc is on
1936 if ( fit->contentAddendOnly )
1937 delta = 0;
1938 else
1939 delta = (accumulator & (-4096)) - ((atom->finalAddress() + fit->offsetInAtom) & (-4096));
1940 rangeCheckARM64Page21(delta, state, atom, fit);
1941 instruction = get32LE(fixUpLocation);
1942 uint32_t immhi = (delta >> 9) & (0x00FFFFE0);
1943 uint32_t immlo = (delta << 17) & (0x60000000);
1944 newInstruction = (instruction & 0x9F00001F) | immlo | immhi;
1945 set32LE(fixUpLocation, newInstruction);
1946 }
1947 break;
1948 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12:
1949 case ld::Fixup::kindStoreTargetAddressARM64PageOff12:
1950 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12:
1951 accumulator = addressOf(state, fit, &toTarget);
1952 // fall into kindAddressARM64PageOff12 case
1953 case ld::Fixup::kindStoreARM64TLVPLoadPageOff12:
1954 case ld::Fixup::kindStoreARM64GOTLoadPageOff12:
1955 case ld::Fixup::kindStoreARM64PageOff12:
1956 {
1957 uint32_t offset = accumulator & 0x00000FFF;
1958 instruction = get32LE(fixUpLocation);
1959 // LDR/STR instruction have implicit scale factor, need to compensate for that
1960 if ( instruction & 0x08000000 ) {
1961 uint32_t implictShift = ((instruction >> 30) & 0x3);
1962 switch ( implictShift ) {
1963 case 0:
1964 if ( (instruction & 0x04800000) == 0x04800000 ) {
1965 // vector and byte LDR/STR have same "size" bits, need to check other bits to differenciate
1966 implictShift = 4;
1967 if ( (offset & 0xF) != 0 ) {
1968 throwf("128-bit LDR/STR not 16-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
1969 atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fit),
1970 addressOf(state, fit, &toTarget));
1971 }
1972 }
1973 break;
1974 case 1:
1975 if ( (offset & 0x1) != 0 ) {
1976 throwf("16-bit LDR/STR not 2-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
1977 atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fit),
1978 addressOf(state, fit, &toTarget));
1979 }
1980 break;
1981 case 2:
1982 if ( (offset & 0x3) != 0 ) {
1983 throwf("32-bit LDR/STR not 4-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
1984 atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fit),
1985 addressOf(state, fit, &toTarget));
1986 }
1987 break;
1988 case 3:
1989 if ( (offset & 0x7) != 0 ) {
1990 throwf("64-bit LDR/STR not 8-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
1991 atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fit),
1992 addressOf(state, fit, &toTarget));
1993 }
1994 break;
1995 }
1996 // compensate for implicit scale
1997 offset >>= implictShift;
1998 }
1999 if ( fit->contentAddendOnly )
2000 offset = 0;
2001 uint32_t imm12 = offset << 10;
2002 newInstruction = (instruction & 0xFFC003FF) | imm12;
2003 set32LE(fixUpLocation, newInstruction);
2004 }
2005 break;
2006 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12:
2007 accumulator = addressOf(state, fit, &toTarget);
2008 // fall into kindStoreARM64GOTLoadPage21 case
2009 case ld::Fixup::kindStoreARM64GOTLeaPageOff12:
2010 {
2011 // GOT entry was optimized away, change LDR instruction to a ADD
2012 instruction = get32LE(fixUpLocation);
2013 if ( (instruction & 0xFFC00000) != 0xF9400000 )
2014 throwf("GOT load reloc does not point to a LDR instruction in %s", atom->name());
2015 uint32_t offset = accumulator & 0x00000FFF;
2016 uint32_t imm12 = offset << 10;
2017 newInstruction = 0x91000000 | imm12 | (instruction & 0x000003FF);
2018 set32LE(fixUpLocation, newInstruction);
2019 }
2020 break;
2021 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12:
2022 accumulator = addressOf(state, fit, &toTarget);
2023 // fall into kindStoreARM64TLVPLeaPageOff12 case
2024 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPageOff12:
2025 {
2026 // TLV thunk in same linkage unit, so LEA it directly, changing LDR instruction to a ADD
2027 instruction = get32LE(fixUpLocation);
2028 if ( (instruction & 0xFFC00000) != 0xF9400000 )
2029 throwf("TLV load reloc does not point to a LDR instruction in %s", atom->name());
2030 uint32_t offset = accumulator & 0x00000FFF;
2031 uint32_t imm12 = offset << 10;
2032 newInstruction = 0x91000000 | imm12 | (instruction & 0x000003FF);
2033 set32LE(fixUpLocation, newInstruction);
2034 }
2035 break;
2036 case ld::Fixup::kindStoreARM64PointerToGOT:
2037 set64LE(fixUpLocation, accumulator);
2038 break;
2039 case ld::Fixup::kindStoreARM64PCRelToGOT:
2040 if ( fit->contentAddendOnly )
2041 delta = accumulator;
2042 else
2043 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom);
2044 set32LE(fixUpLocation, delta);
2045 break;
2046 #endif
2047 }
2048 }
2049
2050 #if SUPPORT_ARCH_arm64
2051 // after all fixups are done on atom, if there are potential optimizations, do those
2052 if ( (usedByHints.size() != 0) && (_options.outputKind() != Options::kObjectFile) && !_options.ignoreOptimizationHints() ) {
2053 // fill in second part of usedByHints map, so we can see the target of fixups that might be optimized
2054 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
2055 switch ( fit->kind ) {
2056 case ld::Fixup::kindLinkerOptimizationHint:
2057 case ld::Fixup::kindNoneFollowOn:
2058 case ld::Fixup::kindNoneGroupSubordinate:
2059 case ld::Fixup::kindNoneGroupSubordinateFDE:
2060 case ld::Fixup::kindNoneGroupSubordinateLSDA:
2061 case ld::Fixup::kindNoneGroupSubordinatePersonality:
2062 break;
2063 default:
2064 if ( fit->firstInCluster() ) {
2065 std::map<uint32_t, const Fixup*>::iterator pos = usedByHints.find(fit->offsetInAtom);
2066 if ( pos != usedByHints.end() ) {
2067 assert(pos->second == NULL && "two fixups in same hint location");
2068 pos->second = fit;
2069 //fprintf(stderr, "setting %s usedByHints[0x%04X], kind = %d\n", atom->name(), fit->offsetInAtom, fit->kind);
2070 }
2071 }
2072 }
2073 }
2074
2075 // apply hints pass 1
2076 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
2077 if ( fit->kind != ld::Fixup::kindLinkerOptimizationHint )
2078 continue;
2079 InstructionInfo infoA;
2080 InstructionInfo infoB;
2081 InstructionInfo infoC;
2082 InstructionInfo infoD;
2083 LoadStoreInfo ldrInfoB, ldrInfoC;
2084 AddInfo addInfoB;
2085 AdrpInfo adrpInfoA;
2086 bool usableSegment;
2087 bool targetFourByteAligned;
2088 bool literalableSize, isADRP, isADD, isLDR, isSTR;
2089 //uint8_t loadSize, destReg;
2090 //uint32_t scaledOffset;
2091 //uint32_t imm12;
2092 ld::Fixup::LOH_arm64 alt;
2093 alt.addend = fit->u.addend;
2094 setInfo(state, atom, buffer, usedByHints, fit->offsetInAtom, (alt.info.delta1 << 2), &infoA);
2095 if ( alt.info.count > 0 )
2096 setInfo(state, atom, buffer, usedByHints, fit->offsetInAtom, (alt.info.delta2 << 2), &infoB);
2097 if ( alt.info.count > 1 )
2098 setInfo(state, atom, buffer, usedByHints, fit->offsetInAtom, (alt.info.delta3 << 2), &infoC);
2099 if ( alt.info.count > 2 )
2100 setInfo(state, atom, buffer, usedByHints, fit->offsetInAtom, (alt.info.delta4 << 2), &infoD);
2101
2102 switch ( alt.info.kind ) {
2103 case LOH_ARM64_ADRP_ADRP:
2104 // processed in pass 2 beacuse some ADRP may have been removed
2105 break;
2106 case LOH_ARM64_ADRP_LDR:
2107 LOH_ASSERT(alt.info.count == 1);
2108 LOH_ASSERT(isPageKind(infoA.fixup));
2109 LOH_ASSERT(isPageOffsetKind(infoB.fixup));
2110 LOH_ASSERT(infoA.target == infoB.target);
2111 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2112 usableSegment = ( !_options.sharedRegionEligible() || (strcmp(atom->section().segmentName(), infoB.target->section().segmentName()) == 0) );
2113 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2114 LOH_ASSERT(isADRP);
2115 isLDR = parseLoadOrStore(infoB.instruction, ldrInfoB);
2116 LOH_ASSERT(isLDR);
2117 LOH_ASSERT(ldrInfoB.baseReg == adrpInfoA.destReg);
2118 LOH_ASSERT(ldrInfoB.offset == (infoA.targetAddress & 0x00000FFF));
2119 literalableSize = ( (ldrInfoB.size != 1) && (ldrInfoB.size != 2) );
2120 targetFourByteAligned = ( (infoA.targetAddress & 0x3) == 0 );
2121 if ( literalableSize && usableSegment && targetFourByteAligned && withinOneMeg(infoB.instructionAddress, infoA.targetAddress) ) {
2122 set32LE(infoA.instructionContent, makeNOP());
2123 set32LE(infoB.instructionContent, makeLDR_literal(ldrInfoB, infoA.targetAddress, infoB.instructionAddress));
2124 if ( _options.verboseOptimizationHints() )
2125 fprintf(stderr, "adrp-ldr at 0x%08llX transformed to LDR literal\n", infoB.instructionAddress);
2126 }
2127 else {
2128 if ( _options.verboseOptimizationHints() )
2129 fprintf(stderr, "adrp-ldr at 0x%08llX not transformed, isLDR=%d, literalableSize=%d, inRange=%d, usableSegment=%d, scaledOffset=%d\n",
2130 infoB.instructionAddress, isLDR, literalableSize, withinOneMeg(infoB.instructionAddress, infoA.targetAddress), usableSegment, ldrInfoB.offset);
2131 }
2132 break;
2133 case LOH_ARM64_ADRP_ADD_LDR:
2134 LOH_ASSERT(alt.info.count == 2);
2135 LOH_ASSERT(isPageKind(infoA.fixup));
2136 LOH_ASSERT(isPageOffsetKind(infoB.fixup));
2137 LOH_ASSERT(infoC.fixup == NULL);
2138 LOH_ASSERT(infoA.target == infoB.target);
2139 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2140 usableSegment = ( !_options.sharedRegionEligible() || (strcmp(atom->section().segmentName(), infoB.target->section().segmentName()) == 0) );
2141 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2142 LOH_ASSERT(isADRP);
2143 isADD = parseADD(infoB.instruction, addInfoB);
2144 LOH_ASSERT(isADD);
2145 LOH_ASSERT(adrpInfoA.destReg == addInfoB.srcReg);
2146 isLDR = parseLoadOrStore(infoC.instruction, ldrInfoC);
2147 LOH_ASSERT(isLDR);
2148 LOH_ASSERT(addInfoB.destReg == ldrInfoC.baseReg);
2149 targetFourByteAligned = ( ((infoB.targetAddress+ldrInfoC.offset) & 0x3) == 0 );
2150 literalableSize = ( (ldrInfoC.size != 1) && (ldrInfoC.size != 2) );
2151 if ( literalableSize && usableSegment && targetFourByteAligned && withinOneMeg(infoC.instructionAddress, infoA.targetAddress+ldrInfoC.offset) ) {
2152 // can do T1 transformation to LDR literal
2153 set32LE(infoA.instructionContent, makeNOP());
2154 set32LE(infoB.instructionContent, makeNOP());
2155 set32LE(infoC.instructionContent, makeLDR_literal(ldrInfoC, infoA.targetAddress+ldrInfoC.offset, infoC.instructionAddress));
2156 if ( _options.verboseOptimizationHints() ) {
2157 fprintf(stderr, "adrp-add-ldr at 0x%08llX T1 transformed to LDR literal\n", infoC.instructionAddress);
2158 }
2159 }
2160 else if ( usableSegment && withinOneMeg(infoA.instructionAddress, infoA.targetAddress+ldrInfoC.offset) ) {
2161 // can to T4 transformation and turn ADRP/ADD into ADR
2162 set32LE(infoA.instructionContent, makeADR(ldrInfoC.baseReg, infoA.targetAddress+ldrInfoC.offset, infoA.instructionAddress));
2163 set32LE(infoB.instructionContent, makeNOP());
2164 ldrInfoC.offset = 0; // offset is now in ADR instead of ADD or LDR
2165 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2166 set32LE(infoC.instructionContent, infoC.instruction & 0xFFC003FF);
2167 if ( _options.verboseOptimizationHints() )
2168 fprintf(stderr, "adrp-add-ldr at 0x%08llX T4 transformed to ADR/LDR\n", infoB.instructionAddress);
2169 }
2170 else if ( ((infoB.targetAddress % ldrInfoC.size) == 0) && (ldrInfoC.offset == 0) ) {
2171 // can do T2 transformation by merging ADD into LD
2172 // Leave ADRP as-is
2173 set32LE(infoB.instructionContent, makeNOP());
2174 ldrInfoC.offset += addInfoB.addend;
2175 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2176 if ( _options.verboseOptimizationHints() )
2177 fprintf(stderr, "adrp-add-ldr at 0x%08llX T2 transformed to ADRP/LDR \n", infoC.instructionAddress);
2178 }
2179 else {
2180 if ( _options.verboseOptimizationHints() )
2181 fprintf(stderr, "adrp-add-ldr at 0x%08llX could not be transformed, loadSize=%d, literalableSize=%d, inRange=%d, usableSegment=%d, targetFourByteAligned=%d, imm12=%d\n",
2182 infoC.instructionAddress, ldrInfoC.size, literalableSize, withinOneMeg(infoC.instructionAddress, infoA.targetAddress+ldrInfoC.offset), usableSegment, targetFourByteAligned, ldrInfoC.offset);
2183 }
2184 break;
2185 case LOH_ARM64_ADRP_ADD:
2186 LOH_ASSERT(alt.info.count == 1);
2187 LOH_ASSERT(isPageKind(infoA.fixup));
2188 LOH_ASSERT(isPageOffsetKind(infoB.fixup));
2189 LOH_ASSERT(infoA.target == infoB.target);
2190 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2191 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2192 LOH_ASSERT(isADRP);
2193 isADD = parseADD(infoB.instruction, addInfoB);
2194 LOH_ASSERT(isADD);
2195 LOH_ASSERT(adrpInfoA.destReg == addInfoB.srcReg);
2196 usableSegment = ( !_options.sharedRegionEligible() || (strcmp(atom->section().segmentName(), infoB.target->section().segmentName()) == 0) );
2197 if ( usableSegment && withinOneMeg(infoA.targetAddress, infoA.instructionAddress) ) {
2198 // can do T4 transformation and use ADR
2199 set32LE(infoA.instructionContent, makeADR(addInfoB.destReg, infoA.targetAddress, infoA.instructionAddress));
2200 set32LE(infoB.instructionContent, makeNOP());
2201 if ( _options.verboseOptimizationHints() )
2202 fprintf(stderr, "adrp-add at 0x%08llX transformed to ADR\n", infoB.instructionAddress);
2203 }
2204 else {
2205 if ( _options.verboseOptimizationHints() )
2206 fprintf(stderr, "adrp-add at 0x%08llX not transformed, isAdd=%d, inRange=%d, usableSegment=%d\n",
2207 infoB.instructionAddress, isADD, withinOneMeg(infoA.targetAddress, infoA.instructionAddress), usableSegment);
2208 }
2209 break;
2210 case LOH_ARM64_ADRP_LDR_GOT_LDR:
2211 LOH_ASSERT(alt.info.count == 2);
2212 LOH_ASSERT(isPageKind(infoA.fixup, true));
2213 LOH_ASSERT(isPageOffsetKind(infoB.fixup, true));
2214 LOH_ASSERT(infoC.fixup == NULL);
2215 LOH_ASSERT(infoA.target == infoB.target);
2216 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2217 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2218 LOH_ASSERT(isADRP);
2219 isLDR = parseLoadOrStore(infoC.instruction, ldrInfoC);
2220 LOH_ASSERT(isLDR);
2221 LOH_ASSERT(ldrInfoC.offset == 0);
2222 isADD = parseADD(infoB.instruction, addInfoB);
2223 isLDR = parseLoadOrStore(infoB.instruction, ldrInfoB);
2224 if ( isLDR ) {
2225 // target of GOT is external
2226 LOH_ASSERT(ldrInfoB.size == 8);
2227 LOH_ASSERT(!ldrInfoB.isFloat);
2228 LOH_ASSERT(ldrInfoC.baseReg == ldrInfoB.reg);
2229 //fprintf(stderr, "infoA.target=%p, %s, infoA.targetAddress=0x%08llX\n", infoA.target, infoA.target->name(), infoA.targetAddress);
2230 usableSegment = ( !_options.sharedRegionEligible() || (strcmp(atom->section().segmentName(), infoB.target->section().segmentName()) == 0) );
2231 targetFourByteAligned = ( ((infoA.targetAddress) & 0x3) == 0 );
2232 if ( usableSegment && targetFourByteAligned && withinOneMeg(infoB.instructionAddress, infoA.targetAddress) ) {
2233 // can do T5 transform
2234 set32LE(infoA.instructionContent, makeNOP());
2235 set32LE(infoB.instructionContent, makeLDR_literal(ldrInfoB, infoA.targetAddress, infoB.instructionAddress));
2236 if ( _options.verboseOptimizationHints() ) {
2237 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX T5 transformed to LDR literal of GOT plus LDR\n", infoC.instructionAddress);
2238 }
2239 }
2240 else {
2241 if ( _options.verboseOptimizationHints() )
2242 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX no optimization done\n", infoC.instructionAddress);
2243 }
2244 }
2245 else if ( isADD ) {
2246 // target of GOT is in same linkage unit and B instruction was changed to ADD to compute LEA of target
2247 LOH_ASSERT(addInfoB.srcReg == adrpInfoA.destReg);
2248 LOH_ASSERT(addInfoB.destReg == ldrInfoC.baseReg);
2249 usableSegment = ( !_options.sharedRegionEligible() || (strcmp(atom->section().segmentName(), infoB.target->section().segmentName()) == 0) );
2250 targetFourByteAligned = ( ((infoA.targetAddress) & 0x3) == 0 );
2251 literalableSize = ( (ldrInfoC.size != 1) && (ldrInfoC.size != 2) );
2252 if ( usableSegment && literalableSize && targetFourByteAligned && withinOneMeg(infoC.instructionAddress, infoA.targetAddress) ) {
2253 // can do T1 transform
2254 set32LE(infoA.instructionContent, makeNOP());
2255 set32LE(infoB.instructionContent, makeNOP());
2256 set32LE(infoC.instructionContent, makeLDR_literal(ldrInfoC, infoA.targetAddress, infoC.instructionAddress));
2257 if ( _options.verboseOptimizationHints() )
2258 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX T1 transformed to LDR literal\n", infoC.instructionAddress);
2259 }
2260 else if ( usableSegment && withinOneMeg(infoA.instructionAddress, infoA.targetAddress) ) {
2261 // can do T4 transform
2262 set32LE(infoA.instructionContent, makeADR(ldrInfoC.baseReg, infoA.targetAddress, infoA.instructionAddress));
2263 set32LE(infoB.instructionContent, makeNOP());
2264 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2265 if ( _options.verboseOptimizationHints() ) {
2266 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX T4 transformed to ADR/LDR\n", infoC.instructionAddress);
2267 }
2268 }
2269 else if ( (infoA.targetAddress % ldrInfoC.size) == 0 ) {
2270 // can do T2 transform
2271 set32LE(infoB.instructionContent, makeNOP());
2272 ldrInfoC.baseReg = adrpInfoA.destReg;
2273 ldrInfoC.offset = addInfoB.addend;
2274 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2275 if ( _options.verboseOptimizationHints() ) {
2276 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX T4 transformed to ADRP/NOP/LDR\n", infoC.instructionAddress);
2277 }
2278 }
2279 else {
2280 // T3 transform already done by ld::passes:got:doPass()
2281 if ( _options.verboseOptimizationHints() ) {
2282 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX T3 transformed to ADRP/ADD/LDR\n", infoC.instructionAddress);
2283 }
2284 }
2285 }
2286 else {
2287 if ( _options.verboseOptimizationHints() )
2288 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX not ADD or LDR\n", infoC.instructionAddress);
2289 }
2290 break;
2291 case LOH_ARM64_ADRP_ADD_STR:
2292 LOH_ASSERT(alt.info.count == 2);
2293 LOH_ASSERT(isPageKind(infoA.fixup));
2294 LOH_ASSERT(isPageOffsetKind(infoB.fixup));
2295 LOH_ASSERT(infoC.fixup == NULL);
2296 LOH_ASSERT(infoA.target == infoB.target);
2297 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2298 usableSegment = ( !_options.sharedRegionEligible() || (strcmp(atom->section().segmentName(), infoB.target->section().segmentName()) == 0) );
2299 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2300 LOH_ASSERT(isADRP);
2301 isADD = parseADD(infoB.instruction, addInfoB);
2302 LOH_ASSERT(isADD);
2303 LOH_ASSERT(adrpInfoA.destReg == addInfoB.srcReg);
2304 isSTR = (parseLoadOrStore(infoC.instruction, ldrInfoC) && ldrInfoC.isStore);
2305 LOH_ASSERT(isSTR);
2306 LOH_ASSERT(addInfoB.destReg == ldrInfoC.baseReg);
2307 if ( usableSegment && withinOneMeg(infoA.instructionAddress, infoA.targetAddress+ldrInfoC.offset) ) {
2308 // can to T4 transformation and turn ADRP/ADD into ADR
2309 set32LE(infoA.instructionContent, makeADR(ldrInfoC.baseReg, infoA.targetAddress+ldrInfoC.offset, infoA.instructionAddress));
2310 set32LE(infoB.instructionContent, makeNOP());
2311 ldrInfoC.offset = 0; // offset is now in ADR instead of ADD or LDR
2312 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2313 set32LE(infoC.instructionContent, infoC.instruction & 0xFFC003FF);
2314 if ( _options.verboseOptimizationHints() )
2315 fprintf(stderr, "adrp-add-str at 0x%08llX T4 transformed to ADR/STR\n", infoB.instructionAddress);
2316 }
2317 else if ( ((infoB.targetAddress % ldrInfoC.size) == 0) && (ldrInfoC.offset == 0) ) {
2318 // can do T2 transformation by merging ADD into STR
2319 // Leave ADRP as-is
2320 set32LE(infoB.instructionContent, makeNOP());
2321 ldrInfoC.offset += addInfoB.addend;
2322 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2323 if ( _options.verboseOptimizationHints() )
2324 fprintf(stderr, "adrp-add-str at 0x%08llX T2 transformed to ADRP/STR \n", infoC.instructionAddress);
2325 }
2326 else {
2327 if ( _options.verboseOptimizationHints() )
2328 fprintf(stderr, "adrp-add-str at 0x%08llX could not be transformed, loadSize=%d, inRange=%d, usableSegment=%d, imm12=%d\n",
2329 infoC.instructionAddress, ldrInfoC.size, withinOneMeg(infoC.instructionAddress, infoA.targetAddress+ldrInfoC.offset), usableSegment, ldrInfoC.offset);
2330 }
2331 break;
2332 case LOH_ARM64_ADRP_LDR_GOT_STR:
2333 LOH_ASSERT(alt.info.count == 2);
2334 LOH_ASSERT(isPageKind(infoA.fixup, true));
2335 LOH_ASSERT(isPageOffsetKind(infoB.fixup, true));
2336 LOH_ASSERT(infoC.fixup == NULL);
2337 LOH_ASSERT(infoA.target == infoB.target);
2338 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2339 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2340 LOH_ASSERT(isADRP);
2341 isSTR = (parseLoadOrStore(infoC.instruction, ldrInfoC) && ldrInfoC.isStore);
2342 LOH_ASSERT(isSTR);
2343 LOH_ASSERT(ldrInfoC.offset == 0);
2344 isADD = parseADD(infoB.instruction, addInfoB);
2345 isLDR = parseLoadOrStore(infoB.instruction, ldrInfoB);
2346 if ( isLDR ) {
2347 // target of GOT is external
2348 LOH_ASSERT(ldrInfoB.size == 8);
2349 LOH_ASSERT(!ldrInfoB.isFloat);
2350 LOH_ASSERT(ldrInfoC.baseReg == ldrInfoB.reg);
2351 usableSegment = ( !_options.sharedRegionEligible() || (strcmp(atom->section().segmentName(), infoB.target->section().segmentName()) == 0) );
2352 targetFourByteAligned = ( ((infoA.targetAddress) & 0x3) == 0 );
2353 if ( usableSegment && targetFourByteAligned && withinOneMeg(infoB.instructionAddress, infoA.targetAddress) ) {
2354 // can do T5 transform
2355 set32LE(infoA.instructionContent, makeNOP());
2356 set32LE(infoB.instructionContent, makeLDR_literal(ldrInfoB, infoA.targetAddress, infoB.instructionAddress));
2357 if ( _options.verboseOptimizationHints() ) {
2358 fprintf(stderr, "adrp-ldr-got-str at 0x%08llX T5 transformed to LDR literal of GOT plus STR\n", infoC.instructionAddress);
2359 }
2360 }
2361 else {
2362 if ( _options.verboseOptimizationHints() )
2363 fprintf(stderr, "adrp-ldr-got-str at 0x%08llX no optimization done\n", infoC.instructionAddress);
2364 }
2365 }
2366 else if ( isADD ) {
2367 // target of GOT is in same linkage unit and B instruction was changed to ADD to compute LEA of target
2368 LOH_ASSERT(addInfoB.srcReg == adrpInfoA.destReg);
2369 LOH_ASSERT(addInfoB.destReg == ldrInfoC.baseReg);
2370 usableSegment = ( !_options.sharedRegionEligible() || (strcmp(atom->section().segmentName(), infoB.target->section().segmentName()) == 0) );
2371 targetFourByteAligned = ( ((infoA.targetAddress) & 0x3) == 0 );
2372 literalableSize = ( (ldrInfoC.size != 1) && (ldrInfoC.size != 2) );
2373 if ( usableSegment && withinOneMeg(infoA.instructionAddress, infoA.targetAddress) ) {
2374 // can do T4 transform
2375 set32LE(infoA.instructionContent, makeADR(ldrInfoC.baseReg, infoA.targetAddress, infoA.instructionAddress));
2376 set32LE(infoB.instructionContent, makeNOP());
2377 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2378 if ( _options.verboseOptimizationHints() ) {
2379 fprintf(stderr, "adrp-ldr-got-str at 0x%08llX T4 transformed to ADR/STR\n", infoC.instructionAddress);
2380 }
2381 }
2382 else if ( ((infoA.targetAddress % ldrInfoC.size) == 0) && (ldrInfoC.offset == 0) ) {
2383 // can do T2 transform
2384 set32LE(infoB.instructionContent, makeNOP());
2385 ldrInfoC.baseReg = adrpInfoA.destReg;
2386 ldrInfoC.offset = addInfoB.addend;
2387 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2388 if ( _options.verboseOptimizationHints() ) {
2389 fprintf(stderr, "adrp-ldr-got-str at 0x%08llX T4 transformed to ADRP/NOP/STR\n", infoC.instructionAddress);
2390 }
2391 }
2392 else {
2393 // T3 transform already done by ld::passes:got:doPass()
2394 if ( _options.verboseOptimizationHints() ) {
2395 fprintf(stderr, "adrp-ldr-got-str at 0x%08llX T3 transformed to ADRP/ADD/STR\n", infoC.instructionAddress);
2396 }
2397 }
2398 }
2399 else {
2400 if ( _options.verboseOptimizationHints() )
2401 fprintf(stderr, "adrp-ldr-got-str at 0x%08llX not ADD or LDR\n", infoC.instructionAddress);
2402 }
2403 break;
2404 case LOH_ARM64_ADRP_LDR_GOT:
2405 LOH_ASSERT(alt.info.count == 1);
2406 LOH_ASSERT(isPageKind(infoA.fixup, true));
2407 LOH_ASSERT(isPageOffsetKind(infoB.fixup, true));
2408 LOH_ASSERT(infoA.target == infoB.target);
2409 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2410 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2411 isADD = parseADD(infoB.instruction, addInfoB);
2412 isLDR = parseLoadOrStore(infoB.instruction, ldrInfoB);
2413 usableSegment = ( !_options.sharedRegionEligible() || (strcmp(atom->section().segmentName(), infoB.target->section().segmentName()) == 0) );
2414 if ( isADRP ) {
2415 if ( isLDR ) {
2416 if ( usableSegment && withinOneMeg(infoB.instructionAddress, infoA.targetAddress) ) {
2417 // can do T5 transform (LDR literal load of GOT)
2418 set32LE(infoA.instructionContent, makeNOP());
2419 set32LE(infoB.instructionContent, makeLDR_literal(ldrInfoB, infoA.targetAddress, infoB.instructionAddress));
2420 if ( _options.verboseOptimizationHints() ) {
2421 fprintf(stderr, "adrp-ldr-got at 0x%08llX T5 transformed to NOP/LDR\n", infoC.instructionAddress);
2422 }
2423 }
2424 }
2425 else if ( isADD ) {
2426 if ( usableSegment && withinOneMeg(infoA.instructionAddress, infoA.targetAddress) ) {
2427 // can do T4 transform (ADR to compute local address)
2428 set32LE(infoA.instructionContent, makeADR(addInfoB.destReg, infoA.targetAddress, infoA.instructionAddress));
2429 set32LE(infoB.instructionContent, makeNOP());
2430 if ( _options.verboseOptimizationHints() ) {
2431 fprintf(stderr, "adrp-ldr-got at 0x%08llX T4 transformed to ADR/STR\n", infoC.instructionAddress);
2432 }
2433 }
2434 }
2435 else {
2436 if ( _options.verboseOptimizationHints() )
2437 fprintf(stderr, "adrp-ldr-got at 0x%08llX not LDR or ADD\n", infoB.instructionAddress);
2438 }
2439 }
2440 else {
2441 if ( _options.verboseOptimizationHints() )
2442 fprintf(stderr, "adrp-ldr-got at 0x%08llX not ADRP\n", infoA.instructionAddress);
2443 }
2444 break;
2445 default:
2446 if ( _options.verboseOptimizationHints() )
2447 fprintf(stderr, "unknown hint kind %d alt.info.kind at 0x%08llX\n", alt.info.kind, infoA.instructionAddress);
2448 break;
2449 }
2450 }
2451 // apply hints pass 2
2452 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
2453 if ( fit->kind != ld::Fixup::kindLinkerOptimizationHint )
2454 continue;
2455 InstructionInfo infoA;
2456 InstructionInfo infoB;
2457 ld::Fixup::LOH_arm64 alt;
2458 alt.addend = fit->u.addend;
2459 setInfo(state, atom, buffer, usedByHints, fit->offsetInAtom, (alt.info.delta1 << 2), &infoA);
2460 if ( alt.info.count > 0 )
2461 setInfo(state, atom, buffer, usedByHints, fit->offsetInAtom, (alt.info.delta2 << 2), &infoB);
2462
2463 switch ( alt.info.kind ) {
2464 case LOH_ARM64_ADRP_ADRP:
2465 LOH_ASSERT(isPageKind(infoA.fixup));
2466 LOH_ASSERT(isPageKind(infoB.fixup));
2467 if ( (infoA.instruction & 0x9F000000) != 0x90000000 ) {
2468 if ( _options.verboseOptimizationHints() )
2469 fprintf(stderr, "may-reused-adrp at 0x%08llX no longer an ADRP, now 0x%08X\n", infoA.instructionAddress, infoA.instruction);
2470 sAdrpNA++;
2471 break;
2472 }
2473 if ( (infoB.instruction & 0x9F000000) != 0x90000000 ) {
2474 if ( _options.verboseOptimizationHints() )
2475 fprintf(stderr, "may-reused-adrp at 0x%08llX no longer an ADRP, now 0x%08X\n", infoB.instructionAddress, infoA.instruction);
2476 sAdrpNA++;
2477 break;
2478 }
2479 if ( (infoA.targetAddress & (-4096)) == (infoB.targetAddress & (-4096)) ) {
2480 set32LE(infoB.instructionContent, 0xD503201F);
2481 sAdrpNoped++;
2482 }
2483 else {
2484 sAdrpNotNoped++;
2485 }
2486 break;
2487 }
2488 }
2489 }
2490 #endif // SUPPORT_ARCH_arm64
2491
2492 }
2493
2494 void OutputFile::copyNoOps(uint8_t* from, uint8_t* to, bool thumb)
2495 {
2496 switch ( _options.architecture() ) {
2497 case CPU_TYPE_I386:
2498 case CPU_TYPE_X86_64:
2499 for (uint8_t* p=from; p < to; ++p)
2500 *p = 0x90;
2501 break;
2502 case CPU_TYPE_ARM:
2503 if ( thumb ) {
2504 for (uint8_t* p=from; p < to; p += 2)
2505 OSWriteLittleInt16((uint16_t*)p, 0, 0x46c0);
2506 }
2507 else {
2508 for (uint8_t* p=from; p < to; p += 4)
2509 OSWriteLittleInt32((uint32_t*)p, 0, 0xe1a00000);
2510 }
2511 break;
2512 default:
2513 for (uint8_t* p=from; p < to; ++p)
2514 *p = 0x00;
2515 break;
2516 }
2517 }
2518
2519 bool OutputFile::takesNoDiskSpace(const ld::Section* sect)
2520 {
2521 switch ( sect->type() ) {
2522 case ld::Section::typeZeroFill:
2523 case ld::Section::typeTLVZeroFill:
2524 return _options.optimizeZeroFill();
2525 case ld::Section::typePageZero:
2526 case ld::Section::typeStack:
2527 case ld::Section::typeAbsoluteSymbols:
2528 case ld::Section::typeTentativeDefs:
2529 return true;
2530 default:
2531 break;
2532 }
2533 return false;
2534 }
2535
2536 bool OutputFile::hasZeroForFileOffset(const ld::Section* sect)
2537 {
2538 switch ( sect->type() ) {
2539 case ld::Section::typeZeroFill:
2540 case ld::Section::typeTLVZeroFill:
2541 return _options.optimizeZeroFill();
2542 case ld::Section::typePageZero:
2543 case ld::Section::typeStack:
2544 case ld::Section::typeTentativeDefs:
2545 return true;
2546 default:
2547 break;
2548 }
2549 return false;
2550 }
2551
2552 void OutputFile::writeAtoms(ld::Internal& state, uint8_t* wholeBuffer)
2553 {
2554 // have each atom write itself
2555 uint64_t fileOffsetOfEndOfLastAtom = 0;
2556 uint64_t mhAddress = 0;
2557 bool lastAtomUsesNoOps = false;
2558 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
2559 ld::Internal::FinalSection* sect = *sit;
2560 if ( sect->type() == ld::Section::typeMachHeader )
2561 mhAddress = sect->address;
2562 if ( takesNoDiskSpace(sect) )
2563 continue;
2564 const bool sectionUsesNops = (sect->type() == ld::Section::typeCode);
2565 //fprintf(stderr, "file offset=0x%08llX, section %s\n", sect->fileOffset, sect->sectionName());
2566 std::vector<const ld::Atom*>& atoms = sect->atoms;
2567 bool lastAtomWasThumb = false;
2568 for (std::vector<const ld::Atom*>::iterator ait = atoms.begin(); ait != atoms.end(); ++ait) {
2569 const ld::Atom* atom = *ait;
2570 if ( atom->definition() == ld::Atom::definitionProxy )
2571 continue;
2572 try {
2573 uint64_t fileOffset = atom->finalAddress() - sect->address + sect->fileOffset;
2574 // check for alignment padding between atoms
2575 if ( (fileOffset != fileOffsetOfEndOfLastAtom) && lastAtomUsesNoOps ) {
2576 this->copyNoOps(&wholeBuffer[fileOffsetOfEndOfLastAtom], &wholeBuffer[fileOffset], lastAtomWasThumb);
2577 }
2578 // copy atom content
2579 atom->copyRawContent(&wholeBuffer[fileOffset]);
2580 // apply fix ups
2581 this->applyFixUps(state, mhAddress, atom, &wholeBuffer[fileOffset]);
2582 fileOffsetOfEndOfLastAtom = fileOffset+atom->size();
2583 lastAtomUsesNoOps = sectionUsesNops;
2584 lastAtomWasThumb = atom->isThumb();
2585 }
2586 catch (const char* msg) {
2587 if ( atom->file() != NULL )
2588 throwf("%s in '%s' from %s", msg, atom->name(), atom->file()->path());
2589 else
2590 throwf("%s in '%s'", msg, atom->name());
2591 }
2592 }
2593 }
2594
2595 if ( _options.verboseOptimizationHints() ) {
2596 //fprintf(stderr, "ADRP optimized away: %d\n", sAdrpNA);
2597 //fprintf(stderr, "ADRPs changed to NOPs: %d\n", sAdrpNoped);
2598 //fprintf(stderr, "ADRPs unchanged: %d\n", sAdrpNotNoped);
2599 }
2600 }
2601
2602
2603 void OutputFile::computeContentUUID(ld::Internal& state, uint8_t* wholeBuffer)
2604 {
2605 const bool log = false;
2606 if ( (_options.outputKind() != Options::kObjectFile) || state.someObjectFileHasDwarf ) {
2607 uint8_t digest[CC_MD5_DIGEST_LENGTH];
2608 uint32_t stabsStringsOffsetStart;
2609 uint32_t tabsStringsOffsetEnd;
2610 uint32_t stabsOffsetStart;
2611 uint32_t stabsOffsetEnd;
2612 if ( _symbolTableAtom->hasStabs(stabsStringsOffsetStart, tabsStringsOffsetEnd, stabsOffsetStart, stabsOffsetEnd) ) {
2613 // find two areas of file that are stabs info and should not contribute to checksum
2614 uint64_t stringPoolFileOffset = 0;
2615 uint64_t symbolTableFileOffset = 0;
2616 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
2617 ld::Internal::FinalSection* sect = *sit;
2618 if ( sect->type() == ld::Section::typeLinkEdit ) {
2619 if ( strcmp(sect->sectionName(), "__string_pool") == 0 )
2620 stringPoolFileOffset = sect->fileOffset;
2621 else if ( strcmp(sect->sectionName(), "__symbol_table") == 0 )
2622 symbolTableFileOffset = sect->fileOffset;
2623 }
2624 }
2625 uint64_t firstStabNlistFileOffset = symbolTableFileOffset + stabsOffsetStart;
2626 uint64_t lastStabNlistFileOffset = symbolTableFileOffset + stabsOffsetEnd;
2627 uint64_t firstStabStringFileOffset = stringPoolFileOffset + stabsStringsOffsetStart;
2628 uint64_t lastStabStringFileOffset = stringPoolFileOffset + tabsStringsOffsetEnd;
2629 if ( log ) fprintf(stderr, "firstStabNlistFileOffset=0x%08llX\n", firstStabNlistFileOffset);
2630 if ( log ) fprintf(stderr, "lastStabNlistFileOffset=0x%08llX\n", lastStabNlistFileOffset);
2631 if ( log ) fprintf(stderr, "firstStabStringFileOffset=0x%08llX\n", firstStabStringFileOffset);
2632 if ( log ) fprintf(stderr, "lastStabStringFileOffset=0x%08llX\n", lastStabStringFileOffset);
2633 assert(firstStabNlistFileOffset <= firstStabStringFileOffset);
2634
2635 CC_MD5_CTX md5state;
2636 CC_MD5_Init(&md5state);
2637 // checksum everything up to first stabs nlist
2638 if ( log ) fprintf(stderr, "checksum 0x%08X -> 0x%08llX\n", 0, firstStabNlistFileOffset);
2639 CC_MD5_Update(&md5state, &wholeBuffer[0], firstStabNlistFileOffset);
2640 // checkusm everything after last stabs nlist and up to first stabs string
2641 if ( log ) fprintf(stderr, "checksum 0x%08llX -> 0x%08llX\n", lastStabNlistFileOffset, firstStabStringFileOffset);
2642 CC_MD5_Update(&md5state, &wholeBuffer[lastStabNlistFileOffset], firstStabStringFileOffset-lastStabNlistFileOffset);
2643 // checksum everything after last stabs string to end of file
2644 if ( log ) fprintf(stderr, "checksum 0x%08llX -> 0x%08llX\n", lastStabStringFileOffset, _fileSize);
2645 CC_MD5_Update(&md5state, &wholeBuffer[lastStabStringFileOffset], _fileSize-lastStabStringFileOffset);
2646 CC_MD5_Final(digest, &md5state);
2647 if ( log ) fprintf(stderr, "uuid=%02X, %02X, %02X, %02X, %02X, %02X, %02X, %02X\n", digest[0], digest[1], digest[2],
2648 digest[3], digest[4], digest[5], digest[6], digest[7]);
2649 }
2650 else {
2651 CC_MD5(wholeBuffer, _fileSize, digest);
2652 }
2653 // <rdar://problem/6723729> LC_UUID uuids should conform to RFC 4122 UUID version 4 & UUID version 5 formats
2654 digest[6] = ( digest[6] & 0x0F ) | ( 3 << 4 );
2655 digest[8] = ( digest[8] & 0x3F ) | 0x80;
2656 // update buffer with new UUID
2657 _headersAndLoadCommandAtom->setUUID(digest);
2658 _headersAndLoadCommandAtom->recopyUUIDCommand();
2659 }
2660 }
2661
2662
2663 void OutputFile::writeOutputFile(ld::Internal& state)
2664 {
2665 // for UNIX conformance, error if file exists and is not writable
2666 if ( (access(_options.outputFilePath(), F_OK) == 0) && (access(_options.outputFilePath(), W_OK) == -1) )
2667 throwf("can't write output file: %s", _options.outputFilePath());
2668
2669 mode_t permissions = 0777;
2670 if ( _options.outputKind() == Options::kObjectFile )
2671 permissions = 0666;
2672 mode_t umask = ::umask(0);
2673 ::umask(umask); // put back the original umask
2674 permissions &= ~umask;
2675 // Calling unlink first assures the file is gone so that open creates it with correct permissions
2676 // It also handles the case where __options.outputFilePath() file is not writable but its directory is
2677 // And it means we don't have to truncate the file when done writing (in case new is smaller than old)
2678 // Lastly, only delete existing file if it is a normal file (e.g. not /dev/null).
2679 struct stat stat_buf;
2680 bool outputIsRegularFile = false;
2681 bool outputIsMappableFile = false;
2682 if ( stat(_options.outputFilePath(), &stat_buf) != -1 ) {
2683 if (stat_buf.st_mode & S_IFREG) {
2684 outputIsRegularFile = true;
2685 // <rdar://problem/12264302> Don't use mmap on non-hfs volumes
2686 struct statfs fsInfo;
2687 if ( statfs(_options.outputFilePath(), &fsInfo) != -1 ) {
2688 if ( strcmp(fsInfo.f_fstypename, "hfs") == 0) {
2689 (void)unlink(_options.outputFilePath());
2690 outputIsMappableFile = true;
2691 }
2692 }
2693 else {
2694 outputIsMappableFile = false;
2695 }
2696 }
2697 else {
2698 outputIsRegularFile = false;
2699 }
2700 }
2701 else {
2702 // special files (pipes, devices, etc) must already exist
2703 outputIsRegularFile = true;
2704 // output file does not exist yet
2705 char dirPath[PATH_MAX];
2706 strcpy(dirPath, _options.outputFilePath());
2707 char* end = strrchr(dirPath, '/');
2708 if ( end != NULL ) {
2709 end[1] = '\0';
2710 struct statfs fsInfo;
2711 if ( statfs(dirPath, &fsInfo) != -1 ) {
2712 if ( strcmp(fsInfo.f_fstypename, "hfs") == 0) {
2713 outputIsMappableFile = true;
2714 }
2715 }
2716 }
2717 }
2718
2719 //fprintf(stderr, "outputIsMappableFile=%d, outputIsRegularFile=%d, path=%s\n", outputIsMappableFile, outputIsRegularFile, _options.outputFilePath());
2720
2721 int fd;
2722 // Construct a temporary path of the form {outputFilePath}.ld_XXXXXX
2723 const char filenameTemplate[] = ".ld_XXXXXX";
2724 char tmpOutput[PATH_MAX];
2725 uint8_t *wholeBuffer;
2726 if ( outputIsRegularFile && outputIsMappableFile ) {
2727 strcpy(tmpOutput, _options.outputFilePath());
2728 // If the path is too long to add a suffix for a temporary name then
2729 // just fall back to using the output path.
2730 if (strlen(tmpOutput)+strlen(filenameTemplate) < PATH_MAX) {
2731 strcat(tmpOutput, filenameTemplate);
2732 fd = mkstemp(tmpOutput);
2733 }
2734 else {
2735 fd = open(tmpOutput, O_RDWR|O_CREAT, permissions);
2736 }
2737 if ( fd == -1 )
2738 throwf("can't open output file for writing '%s', errno=%d", tmpOutput, errno);
2739 if ( ftruncate(fd, _fileSize) == -1 ) {
2740 int err = errno;
2741 unlink(tmpOutput);
2742 if ( err == ENOSPC )
2743 throwf("not enough disk space for writing '%s'", _options.outputFilePath());
2744 else
2745 throwf("can't grow file for writing '%s', errno=%d", _options.outputFilePath(), err);
2746 }
2747
2748 wholeBuffer = (uint8_t *)mmap(NULL, _fileSize, PROT_WRITE|PROT_READ, MAP_SHARED, fd, 0);
2749 if ( wholeBuffer == MAP_FAILED )
2750 throwf("can't create buffer of %llu bytes for output", _fileSize);
2751 }
2752 else {
2753 if ( outputIsRegularFile )
2754 fd = open(_options.outputFilePath(), O_RDWR|O_CREAT, permissions);
2755 else
2756 fd = open(_options.outputFilePath(), O_WRONLY);
2757 if ( fd == -1 )
2758 throwf("can't open output file for writing: %s, errno=%d", _options.outputFilePath(), errno);
2759 // try to allocate buffer for entire output file content
2760 wholeBuffer = (uint8_t*)calloc(_fileSize, 1);
2761 if ( wholeBuffer == NULL )
2762 throwf("can't create buffer of %llu bytes for output", _fileSize);
2763 }
2764
2765 if ( _options.UUIDMode() == Options::kUUIDRandom ) {
2766 uint8_t bits[16];
2767 ::uuid_generate_random(bits);
2768 _headersAndLoadCommandAtom->setUUID(bits);
2769 }
2770
2771 writeAtoms(state, wholeBuffer);
2772
2773 // compute UUID
2774 if ( _options.UUIDMode() == Options::kUUIDContent )
2775 computeContentUUID(state, wholeBuffer);
2776
2777 if ( outputIsRegularFile && outputIsMappableFile ) {
2778 if ( ::chmod(tmpOutput, permissions) == -1 ) {
2779 unlink(tmpOutput);
2780 throwf("can't set permissions on output file: %s, errno=%d", tmpOutput, errno);
2781 }
2782 if ( ::rename(tmpOutput, _options.outputFilePath()) == -1 && strcmp(tmpOutput, _options.outputFilePath()) != 0) {
2783 unlink(tmpOutput);
2784 throwf("can't move output file in place, errno=%d", errno);
2785 }
2786 }
2787 else {
2788 if ( ::write(fd, wholeBuffer, _fileSize) == -1 ) {
2789 throwf("can't write to output file: %s, errno=%d", _options.outputFilePath(), errno);
2790 }
2791 ::close(fd);
2792 // <rdar://problem/13118223> NFS: iOS incremental builds in Xcode 4.6 fail with codesign error
2793 // NFS seems to pad the end of the file sometimes. Calling trunc seems to correct it...
2794 ::truncate(_options.outputFilePath(), _fileSize);
2795 }
2796 }
2797
2798 struct AtomByNameSorter
2799 {
2800 bool operator()(const ld::Atom* left, const ld::Atom* right)
2801 {
2802 return (strcmp(left->name(), right->name()) < 0);
2803 }
2804 };
2805
2806 class NotInSet
2807 {
2808 public:
2809 NotInSet(const std::set<const ld::Atom*>& theSet) : _set(theSet) {}
2810
2811 bool operator()(const ld::Atom* atom) const {
2812 return ( _set.count(atom) == 0 );
2813 }
2814 private:
2815 const std::set<const ld::Atom*>& _set;
2816 };
2817
2818
2819 void OutputFile::buildSymbolTable(ld::Internal& state)
2820 {
2821 unsigned int machoSectionIndex = 0;
2822 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
2823 ld::Internal::FinalSection* sect = *sit;
2824 bool setMachoSectionIndex = !sect->isSectionHidden() && (sect->type() != ld::Section::typeTentativeDefs);
2825 if ( setMachoSectionIndex )
2826 ++machoSectionIndex;
2827 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
2828 const ld::Atom* atom = *ait;
2829 if ( setMachoSectionIndex )
2830 (const_cast<ld::Atom*>(atom))->setMachoSection(machoSectionIndex);
2831 else if ( sect->type() == ld::Section::typeMachHeader )
2832 (const_cast<ld::Atom*>(atom))->setMachoSection(1); // __mh_execute_header is not in any section by needs n_sect==1
2833 else if ( sect->type() == ld::Section::typeLastSection )
2834 (const_cast<ld::Atom*>(atom))->setMachoSection(machoSectionIndex); // use section index of previous section
2835 else if ( sect->type() == ld::Section::typeFirstSection )
2836 (const_cast<ld::Atom*>(atom))->setMachoSection(machoSectionIndex+1); // use section index of next section
2837
2838 // in -r mode, clarify symbolTableNotInFinalLinkedImages
2839 if ( _options.outputKind() == Options::kObjectFile ) {
2840 if ( (_options.architecture() == CPU_TYPE_X86_64) || (_options.architecture() == CPU_TYPE_ARM64) ) {
2841 // x86_64 .o files need labels on anonymous literal strings
2842 if ( (sect->type() == ld::Section::typeCString) && (atom->combine() == ld::Atom::combineByNameAndContent) ) {
2843 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableIn);
2844 _localAtoms.push_back(atom);
2845 continue;
2846 }
2847 }
2848 if ( sect->type() == ld::Section::typeCFI ) {
2849 if ( _options.removeEHLabels() )
2850 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableNotIn);
2851 else
2852 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableIn);
2853 }
2854 else if ( sect->type() == ld::Section::typeTempAlias ) {
2855 assert(_options.outputKind() == Options::kObjectFile);
2856 _importedAtoms.push_back(atom);
2857 continue;
2858 }
2859 if ( atom->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages )
2860 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableIn);
2861 }
2862
2863 // TEMP work around until <rdar://problem/7702923> goes in
2864 if ( (atom->symbolTableInclusion() == ld::Atom::symbolTableInAndNeverStrip)
2865 && (atom->scope() == ld::Atom::scopeLinkageUnit)
2866 && (_options.outputKind() == Options::kDynamicLibrary) ) {
2867 (const_cast<ld::Atom*>(atom))->setScope(ld::Atom::scopeGlobal);
2868 }
2869
2870 // <rdar://problem/6783167> support auto hidden weak symbols: .weak_def_can_be_hidden
2871 if ( atom->autoHide() && (_options.outputKind() != Options::kObjectFile) ) {
2872 // adding auto-hide symbol to .exp file should keep it global
2873 if ( !_options.hasExportMaskList() || !_options.shouldExport(atom->name()) )
2874 (const_cast<ld::Atom*>(atom))->setScope(ld::Atom::scopeLinkageUnit);
2875 }
2876
2877 // <rdar://problem/8626058> ld should consistently warn when resolvers are not exported
2878 if ( (atom->contentType() == ld::Atom::typeResolver) && (atom->scope() == ld::Atom::scopeLinkageUnit) )
2879 warning("resolver functions should be external, but '%s' is hidden", atom->name());
2880
2881 if ( sect->type() == ld::Section::typeImportProxies ) {
2882 if ( atom->combine() == ld::Atom::combineByName )
2883 this->usesWeakExternalSymbols = true;
2884 // alias proxy is a re-export with a name change, don't import changed name
2885 if ( ! atom->isAlias() )
2886 _importedAtoms.push_back(atom);
2887 // scope of proxies are usually linkage unit, so done
2888 // if scope is global, we need to re-export it too
2889 if ( atom->scope() == ld::Atom::scopeGlobal )
2890 _exportedAtoms.push_back(atom);
2891 continue;
2892 }
2893 if ( atom->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages ) {
2894 assert(_options.outputKind() != Options::kObjectFile);
2895 continue; // don't add to symbol table
2896 }
2897 if ( atom->symbolTableInclusion() == ld::Atom::symbolTableNotIn ) {
2898 continue; // don't add to symbol table
2899 }
2900 if ( (atom->symbolTableInclusion() == ld::Atom::symbolTableInWithRandomAutoStripLabel)
2901 && (_options.outputKind() != Options::kObjectFile) ) {
2902 continue; // don't add to symbol table
2903 }
2904
2905 if ( (atom->definition() == ld::Atom::definitionTentative) && (_options.outputKind() == Options::kObjectFile) ) {
2906 if ( _options.makeTentativeDefinitionsReal() ) {
2907 // -r -d turns tentative defintions into real def
2908 _exportedAtoms.push_back(atom);
2909 }
2910 else {
2911 // in mach-o object files tentative defintions are stored like undefined symbols
2912 _importedAtoms.push_back(atom);
2913 }
2914 continue;
2915 }
2916
2917 switch ( atom->scope() ) {
2918 case ld::Atom::scopeTranslationUnit:
2919 if ( _options.keepLocalSymbol(atom->name()) ) {
2920 _localAtoms.push_back(atom);
2921 }
2922 else {
2923 if ( _options.outputKind() == Options::kObjectFile ) {
2924 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableInWithRandomAutoStripLabel);
2925 _localAtoms.push_back(atom);
2926 }
2927 else
2928 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableNotIn);
2929 }
2930 break;
2931 case ld::Atom::scopeGlobal:
2932 _exportedAtoms.push_back(atom);
2933 break;
2934 case ld::Atom::scopeLinkageUnit:
2935 if ( _options.outputKind() == Options::kObjectFile ) {
2936 if ( _options.keepPrivateExterns() ) {
2937 _exportedAtoms.push_back(atom);
2938 }
2939 else if ( _options.keepLocalSymbol(atom->name()) ) {
2940 _localAtoms.push_back(atom);
2941 }
2942 else {
2943 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableInWithRandomAutoStripLabel);
2944 _localAtoms.push_back(atom);
2945 }
2946 }
2947 else {
2948 if ( _options.keepLocalSymbol(atom->name()) )
2949 _localAtoms.push_back(atom);
2950 // <rdar://problem/5804214> ld should never have a symbol in the non-lazy indirect symbol table with index 0
2951 // this works by making __mh_execute_header be a local symbol which takes symbol index 0
2952 else if ( (atom->symbolTableInclusion() == ld::Atom::symbolTableInAndNeverStrip) && !_options.makeCompressedDyldInfo() )
2953 _localAtoms.push_back(atom);
2954 else
2955 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableNotIn);
2956 }
2957 break;
2958 }
2959 }
2960 }
2961
2962 // <rdar://problem/6978069> ld adds undefined symbol from .exp file to binary
2963 if ( (_options.outputKind() == Options::kKextBundle) && _options.hasExportRestrictList() ) {
2964 // search for referenced undefines
2965 std::set<const ld::Atom*> referencedProxyAtoms;
2966 for (std::vector<ld::Internal::FinalSection*>::iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) {
2967 ld::Internal::FinalSection* sect = *sit;
2968 for (std::vector<const ld::Atom*>::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
2969 const ld::Atom* atom = *ait;
2970 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
2971 switch ( fit->binding ) {
2972 case ld::Fixup::bindingsIndirectlyBound:
2973 referencedProxyAtoms.insert(state.indirectBindingTable[fit->u.bindingIndex]);
2974 break;
2975 case ld::Fixup::bindingDirectlyBound:
2976 referencedProxyAtoms.insert(fit->u.target);
2977 break;
2978 default:
2979 break;
2980 }
2981 }
2982 }
2983 }
2984 // remove any unreferenced _importedAtoms
2985 _importedAtoms.erase(std::remove_if(_importedAtoms.begin(), _importedAtoms.end(), NotInSet(referencedProxyAtoms)), _importedAtoms.end());
2986 }
2987
2988 // sort by name
2989 std::sort(_exportedAtoms.begin(), _exportedAtoms.end(), AtomByNameSorter());
2990 std::sort(_importedAtoms.begin(), _importedAtoms.end(), AtomByNameSorter());
2991 }
2992
2993 void OutputFile::addPreloadLinkEdit(ld::Internal& state)
2994 {
2995 switch ( _options.architecture() ) {
2996 #if SUPPORT_ARCH_i386
2997 case CPU_TYPE_I386:
2998 if ( _hasLocalRelocations ) {
2999 _localRelocsAtom = new LocalRelocationsAtom<x86>(_options, state, *this);
3000 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3001 }
3002 if ( _hasExternalRelocations ) {
3003 _externalRelocsAtom = new ExternalRelocationsAtom<x86>(_options, state, *this);
3004 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3005 }
3006 if ( _hasSymbolTable ) {
3007 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<x86>(_options, state, *this);
3008 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3009 _symbolTableAtom = new SymbolTableAtom<x86>(_options, state, *this);
3010 symbolTableSection = state.addAtom(*_symbolTableAtom);
3011 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3012 stringPoolSection = state.addAtom(*_stringPoolAtom);
3013 }
3014 break;
3015 #endif
3016 #if SUPPORT_ARCH_x86_64
3017 case CPU_TYPE_X86_64:
3018 if ( _hasLocalRelocations ) {
3019 _localRelocsAtom = new LocalRelocationsAtom<x86_64>(_options, state, *this);
3020 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3021 }
3022 if ( _hasExternalRelocations ) {
3023 _externalRelocsAtom = new ExternalRelocationsAtom<x86_64>(_options, state, *this);
3024 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3025 }
3026 if ( _hasSymbolTable ) {
3027 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<x86_64>(_options, state, *this);
3028 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3029 _symbolTableAtom = new SymbolTableAtom<x86_64>(_options, state, *this);
3030 symbolTableSection = state.addAtom(*_symbolTableAtom);
3031 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3032 stringPoolSection = state.addAtom(*_stringPoolAtom);
3033 }
3034 break;
3035 #endif
3036 #if SUPPORT_ARCH_arm_any
3037 case CPU_TYPE_ARM:
3038 if ( _hasLocalRelocations ) {
3039 _localRelocsAtom = new LocalRelocationsAtom<arm>(_options, state, *this);
3040 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3041 }
3042 if ( _hasExternalRelocations ) {
3043 _externalRelocsAtom = new ExternalRelocationsAtom<arm>(_options, state, *this);
3044 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3045 }
3046 if ( _hasSymbolTable ) {
3047 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<arm>(_options, state, *this);
3048 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3049 _symbolTableAtom = new SymbolTableAtom<arm>(_options, state, *this);
3050 symbolTableSection = state.addAtom(*_symbolTableAtom);
3051 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3052 stringPoolSection = state.addAtom(*_stringPoolAtom);
3053 }
3054 break;
3055 #endif
3056 #if SUPPORT_ARCH_arm64
3057 case CPU_TYPE_ARM64:
3058 if ( _hasLocalRelocations ) {
3059 _localRelocsAtom = new LocalRelocationsAtom<arm64>(_options, state, *this);
3060 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3061 }
3062 if ( _hasExternalRelocations ) {
3063 _externalRelocsAtom = new ExternalRelocationsAtom<arm64>(_options, state, *this);
3064 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3065 }
3066 if ( _hasSymbolTable ) {
3067 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<arm64>(_options, state, *this);
3068 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3069 _symbolTableAtom = new SymbolTableAtom<arm64>(_options, state, *this);
3070 symbolTableSection = state.addAtom(*_symbolTableAtom);
3071 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3072 stringPoolSection = state.addAtom(*_stringPoolAtom);
3073 }
3074 break;
3075 #endif
3076 default:
3077 throw "-preload not supported";
3078 }
3079
3080 }
3081
3082
3083 void OutputFile::addLinkEdit(ld::Internal& state)
3084 {
3085 // for historical reasons, -preload orders LINKEDIT content differently
3086 if ( _options.outputKind() == Options::kPreload )
3087 return addPreloadLinkEdit(state);
3088
3089 switch ( _options.architecture() ) {
3090 #if SUPPORT_ARCH_i386
3091 case CPU_TYPE_I386:
3092 if ( _hasSectionRelocations ) {
3093 _sectionsRelocationsAtom = new SectionRelocationsAtom<x86>(_options, state, *this);
3094 sectionRelocationsSection = state.addAtom(*_sectionsRelocationsAtom);
3095 }
3096 if ( _hasDyldInfo ) {
3097 _rebasingInfoAtom = new RebaseInfoAtom<x86>(_options, state, *this);
3098 rebaseSection = state.addAtom(*_rebasingInfoAtom);
3099
3100 _bindingInfoAtom = new BindingInfoAtom<x86>(_options, state, *this);
3101 bindingSection = state.addAtom(*_bindingInfoAtom);
3102
3103 _weakBindingInfoAtom = new WeakBindingInfoAtom<x86>(_options, state, *this);
3104 weakBindingSection = state.addAtom(*_weakBindingInfoAtom);
3105
3106 _lazyBindingInfoAtom = new LazyBindingInfoAtom<x86>(_options, state, *this);
3107 lazyBindingSection = state.addAtom(*_lazyBindingInfoAtom);
3108
3109 _exportInfoAtom = new ExportInfoAtom<x86>(_options, state, *this);
3110 exportSection = state.addAtom(*_exportInfoAtom);
3111 }
3112 if ( _hasLocalRelocations ) {
3113 _localRelocsAtom = new LocalRelocationsAtom<x86>(_options, state, *this);
3114 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3115 }
3116 if ( _hasSplitSegInfo ) {
3117 _splitSegInfoAtom = new SplitSegInfoAtom<x86>(_options, state, *this);
3118 splitSegInfoSection = state.addAtom(*_splitSegInfoAtom);
3119 }
3120 if ( _hasFunctionStartsInfo ) {
3121 _functionStartsAtom = new FunctionStartsAtom<x86>(_options, state, *this);
3122 functionStartsSection = state.addAtom(*_functionStartsAtom);
3123 }
3124 if ( _hasDataInCodeInfo ) {
3125 _dataInCodeAtom = new DataInCodeAtom<x86>(_options, state, *this);
3126 dataInCodeSection = state.addAtom(*_dataInCodeAtom);
3127 }
3128 if ( _hasOptimizationHints ) {
3129 _optimizationHintsAtom = new OptimizationHintsAtom<x86>(_options, state, *this);
3130 optimizationHintsSection = state.addAtom(*_optimizationHintsAtom);
3131 }
3132 if ( _hasDependentDRInfo ) {
3133 _dependentDRInfoAtom = new DependentDRAtom<x86>(_options, state, *this);
3134 dependentDRsSection = state.addAtom(*_dependentDRInfoAtom);
3135 }
3136 if ( _hasSymbolTable ) {
3137 _symbolTableAtom = new SymbolTableAtom<x86>(_options, state, *this);
3138 symbolTableSection = state.addAtom(*_symbolTableAtom);
3139 }
3140 if ( _hasExternalRelocations ) {
3141 _externalRelocsAtom = new ExternalRelocationsAtom<x86>(_options, state, *this);
3142 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3143 }
3144 if ( _hasSymbolTable ) {
3145 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<x86>(_options, state, *this);
3146 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3147 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3148 stringPoolSection = state.addAtom(*_stringPoolAtom);
3149 }
3150 break;
3151 #endif
3152 #if SUPPORT_ARCH_x86_64
3153 case CPU_TYPE_X86_64:
3154 if ( _hasSectionRelocations ) {
3155 _sectionsRelocationsAtom = new SectionRelocationsAtom<x86_64>(_options, state, *this);
3156 sectionRelocationsSection = state.addAtom(*_sectionsRelocationsAtom);
3157 }
3158 if ( _hasDyldInfo ) {
3159 _rebasingInfoAtom = new RebaseInfoAtom<x86_64>(_options, state, *this);
3160 rebaseSection = state.addAtom(*_rebasingInfoAtom);
3161
3162 _bindingInfoAtom = new BindingInfoAtom<x86_64>(_options, state, *this);
3163 bindingSection = state.addAtom(*_bindingInfoAtom);
3164
3165 _weakBindingInfoAtom = new WeakBindingInfoAtom<x86_64>(_options, state, *this);
3166 weakBindingSection = state.addAtom(*_weakBindingInfoAtom);
3167
3168 _lazyBindingInfoAtom = new LazyBindingInfoAtom<x86_64>(_options, state, *this);
3169 lazyBindingSection = state.addAtom(*_lazyBindingInfoAtom);
3170
3171 _exportInfoAtom = new ExportInfoAtom<x86_64>(_options, state, *this);
3172 exportSection = state.addAtom(*_exportInfoAtom);
3173 }
3174 if ( _hasLocalRelocations ) {
3175 _localRelocsAtom = new LocalRelocationsAtom<x86_64>(_options, state, *this);
3176 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3177 }
3178 if ( _hasSplitSegInfo ) {
3179 _splitSegInfoAtom = new SplitSegInfoAtom<x86_64>(_options, state, *this);
3180 splitSegInfoSection = state.addAtom(*_splitSegInfoAtom);
3181 }
3182 if ( _hasFunctionStartsInfo ) {
3183 _functionStartsAtom = new FunctionStartsAtom<x86_64>(_options, state, *this);
3184 functionStartsSection = state.addAtom(*_functionStartsAtom);
3185 }
3186 if ( _hasDataInCodeInfo ) {
3187 _dataInCodeAtom = new DataInCodeAtom<x86_64>(_options, state, *this);
3188 dataInCodeSection = state.addAtom(*_dataInCodeAtom);
3189 }
3190 if ( _hasOptimizationHints ) {
3191 _optimizationHintsAtom = new OptimizationHintsAtom<x86_64>(_options, state, *this);
3192 optimizationHintsSection = state.addAtom(*_optimizationHintsAtom);
3193 }
3194 if ( _hasDependentDRInfo ) {
3195 _dependentDRInfoAtom = new DependentDRAtom<x86_64>(_options, state, *this);
3196 dependentDRsSection = state.addAtom(*_dependentDRInfoAtom);
3197 }
3198 if ( _hasSymbolTable ) {
3199 _symbolTableAtom = new SymbolTableAtom<x86_64>(_options, state, *this);
3200 symbolTableSection = state.addAtom(*_symbolTableAtom);
3201 }
3202 if ( _hasExternalRelocations ) {
3203 _externalRelocsAtom = new ExternalRelocationsAtom<x86_64>(_options, state, *this);
3204 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3205 }
3206 if ( _hasSymbolTable ) {
3207 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<x86_64>(_options, state, *this);
3208 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3209 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 8);
3210 stringPoolSection = state.addAtom(*_stringPoolAtom);
3211 }
3212 break;
3213 #endif
3214 #if SUPPORT_ARCH_arm_any
3215 case CPU_TYPE_ARM:
3216 if ( _hasSectionRelocations ) {
3217 _sectionsRelocationsAtom = new SectionRelocationsAtom<arm>(_options, state, *this);
3218 sectionRelocationsSection = state.addAtom(*_sectionsRelocationsAtom);
3219 }
3220 if ( _hasDyldInfo ) {
3221 _rebasingInfoAtom = new RebaseInfoAtom<arm>(_options, state, *this);
3222 rebaseSection = state.addAtom(*_rebasingInfoAtom);
3223
3224 _bindingInfoAtom = new BindingInfoAtom<arm>(_options, state, *this);
3225 bindingSection = state.addAtom(*_bindingInfoAtom);
3226
3227 _weakBindingInfoAtom = new WeakBindingInfoAtom<arm>(_options, state, *this);
3228 weakBindingSection = state.addAtom(*_weakBindingInfoAtom);
3229
3230 _lazyBindingInfoAtom = new LazyBindingInfoAtom<arm>(_options, state, *this);
3231 lazyBindingSection = state.addAtom(*_lazyBindingInfoAtom);
3232
3233 _exportInfoAtom = new ExportInfoAtom<arm>(_options, state, *this);
3234 exportSection = state.addAtom(*_exportInfoAtom);
3235 }
3236 if ( _hasLocalRelocations ) {
3237 _localRelocsAtom = new LocalRelocationsAtom<arm>(_options, state, *this);
3238 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3239 }
3240 if ( _hasSplitSegInfo ) {
3241 _splitSegInfoAtom = new SplitSegInfoAtom<arm>(_options, state, *this);
3242 splitSegInfoSection = state.addAtom(*_splitSegInfoAtom);
3243 }
3244 if ( _hasFunctionStartsInfo ) {
3245 _functionStartsAtom = new FunctionStartsAtom<arm>(_options, state, *this);
3246 functionStartsSection = state.addAtom(*_functionStartsAtom);
3247 }
3248 if ( _hasDataInCodeInfo ) {
3249 _dataInCodeAtom = new DataInCodeAtom<arm>(_options, state, *this);
3250 dataInCodeSection = state.addAtom(*_dataInCodeAtom);
3251 }
3252 if ( _hasOptimizationHints ) {
3253 _optimizationHintsAtom = new OptimizationHintsAtom<arm>(_options, state, *this);
3254 optimizationHintsSection = state.addAtom(*_optimizationHintsAtom);
3255 }
3256 if ( _hasDependentDRInfo ) {
3257 _dependentDRInfoAtom = new DependentDRAtom<arm>(_options, state, *this);
3258 dependentDRsSection = state.addAtom(*_dependentDRInfoAtom);
3259 }
3260 if ( _hasSymbolTable ) {
3261 _symbolTableAtom = new SymbolTableAtom<arm>(_options, state, *this);
3262 symbolTableSection = state.addAtom(*_symbolTableAtom);
3263 }
3264 if ( _hasExternalRelocations ) {
3265 _externalRelocsAtom = new ExternalRelocationsAtom<arm>(_options, state, *this);
3266 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3267 }
3268 if ( _hasSymbolTable ) {
3269 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<arm>(_options, state, *this);
3270 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3271 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3272 stringPoolSection = state.addAtom(*_stringPoolAtom);
3273 }
3274 break;
3275 #endif
3276 #if SUPPORT_ARCH_arm64
3277 case CPU_TYPE_ARM64:
3278 if ( _hasSectionRelocations ) {
3279 _sectionsRelocationsAtom = new SectionRelocationsAtom<arm64>(_options, state, *this);
3280 sectionRelocationsSection = state.addAtom(*_sectionsRelocationsAtom);
3281 }
3282 if ( _hasDyldInfo ) {
3283 _rebasingInfoAtom = new RebaseInfoAtom<arm64>(_options, state, *this);
3284 rebaseSection = state.addAtom(*_rebasingInfoAtom);
3285
3286 _bindingInfoAtom = new BindingInfoAtom<arm64>(_options, state, *this);
3287 bindingSection = state.addAtom(*_bindingInfoAtom);
3288
3289 _weakBindingInfoAtom = new WeakBindingInfoAtom<arm64>(_options, state, *this);
3290 weakBindingSection = state.addAtom(*_weakBindingInfoAtom);
3291
3292 _lazyBindingInfoAtom = new LazyBindingInfoAtom<arm64>(_options, state, *this);
3293 lazyBindingSection = state.addAtom(*_lazyBindingInfoAtom);
3294
3295 _exportInfoAtom = new ExportInfoAtom<arm64>(_options, state, *this);
3296 exportSection = state.addAtom(*_exportInfoAtom);
3297 }
3298 if ( _hasLocalRelocations ) {
3299 _localRelocsAtom = new LocalRelocationsAtom<arm64>(_options, state, *this);
3300 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3301 }
3302 if ( _hasSplitSegInfo ) {
3303 _splitSegInfoAtom = new SplitSegInfoAtom<arm64>(_options, state, *this);
3304 splitSegInfoSection = state.addAtom(*_splitSegInfoAtom);
3305 }
3306 if ( _hasFunctionStartsInfo ) {
3307 _functionStartsAtom = new FunctionStartsAtom<arm64>(_options, state, *this);
3308 functionStartsSection = state.addAtom(*_functionStartsAtom);
3309 }
3310 if ( _hasDataInCodeInfo ) {
3311 _dataInCodeAtom = new DataInCodeAtom<arm64>(_options, state, *this);
3312 dataInCodeSection = state.addAtom(*_dataInCodeAtom);
3313 }
3314 if ( _hasOptimizationHints ) {
3315 _optimizationHintsAtom = new OptimizationHintsAtom<arm64>(_options, state, *this);
3316 optimizationHintsSection = state.addAtom(*_optimizationHintsAtom);
3317 }
3318 if ( _hasDependentDRInfo ) {
3319 _dependentDRInfoAtom = new DependentDRAtom<arm64>(_options, state, *this);
3320 dependentDRsSection = state.addAtom(*_dependentDRInfoAtom);
3321 }
3322 if ( _hasSymbolTable ) {
3323 _symbolTableAtom = new SymbolTableAtom<arm64>(_options, state, *this);
3324 symbolTableSection = state.addAtom(*_symbolTableAtom);
3325 }
3326 if ( _hasExternalRelocations ) {
3327 _externalRelocsAtom = new ExternalRelocationsAtom<arm64>(_options, state, *this);
3328 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3329 }
3330 if ( _hasSymbolTable ) {
3331 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<arm64>(_options, state, *this);
3332 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3333 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3334 stringPoolSection = state.addAtom(*_stringPoolAtom);
3335 }
3336 break;
3337 #endif
3338 default:
3339 throw "unknown architecture";
3340 }
3341 }
3342
3343 void OutputFile::addLoadCommands(ld::Internal& state)
3344 {
3345 switch ( _options.architecture() ) {
3346 #if SUPPORT_ARCH_x86_64
3347 case CPU_TYPE_X86_64:
3348 _headersAndLoadCommandAtom = new HeaderAndLoadCommandsAtom<x86_64>(_options, state, *this);
3349 headerAndLoadCommandsSection = state.addAtom(*_headersAndLoadCommandAtom);
3350 break;
3351 #endif
3352 #if SUPPORT_ARCH_arm_any
3353 case CPU_TYPE_ARM:
3354 _headersAndLoadCommandAtom = new HeaderAndLoadCommandsAtom<arm>(_options, state, *this);
3355 headerAndLoadCommandsSection = state.addAtom(*_headersAndLoadCommandAtom);
3356 break;
3357 #endif
3358 #if SUPPORT_ARCH_arm64
3359 case CPU_TYPE_ARM64:
3360 _headersAndLoadCommandAtom = new HeaderAndLoadCommandsAtom<arm64>(_options, state, *this);
3361 headerAndLoadCommandsSection = state.addAtom(*_headersAndLoadCommandAtom);
3362 break;
3363 #endif
3364 #if SUPPORT_ARCH_i386
3365 case CPU_TYPE_I386:
3366 _headersAndLoadCommandAtom = new HeaderAndLoadCommandsAtom<x86>(_options, state, *this);
3367 headerAndLoadCommandsSection = state.addAtom(*_headersAndLoadCommandAtom);
3368 break;
3369 #endif
3370 default:
3371 throw "unknown architecture";
3372 }
3373 }
3374
3375 uint32_t OutputFile::dylibCount()
3376 {
3377 return _dylibsToLoad.size();
3378 }
3379
3380 const ld::dylib::File* OutputFile::dylibByOrdinal(unsigned int ordinal)
3381 {
3382 assert( ordinal > 0 );
3383 assert( ordinal <= _dylibsToLoad.size() );
3384 return _dylibsToLoad[ordinal-1];
3385 }
3386
3387 bool OutputFile::hasOrdinalForInstallPath(const char* path, int* ordinal)
3388 {
3389 for (std::map<const ld::dylib::File*, int>::const_iterator it = _dylibToOrdinal.begin(); it != _dylibToOrdinal.end(); ++it) {
3390 const char* installPath = it->first->installPath();
3391 if ( (installPath != NULL) && (strcmp(path, installPath) == 0) ) {
3392 *ordinal = it->second;
3393 return true;
3394 }
3395 }
3396 return false;
3397 }
3398
3399 uint32_t OutputFile::dylibToOrdinal(const ld::dylib::File* dylib)
3400 {
3401 return _dylibToOrdinal[dylib];
3402 }
3403
3404
3405 void OutputFile::buildDylibOrdinalMapping(ld::Internal& state)
3406 {
3407 // count non-public re-exported dylibs
3408 unsigned int nonPublicReExportCount = 0;
3409 for (std::vector<ld::dylib::File*>::iterator it = state.dylibs.begin(); it != state.dylibs.end(); ++it) {
3410 ld::dylib::File* aDylib = *it;
3411 if ( aDylib->willBeReExported() && ! aDylib->hasPublicInstallName() )
3412 ++nonPublicReExportCount;
3413 }
3414
3415 // look at each dylib supplied in state
3416 bool hasReExports = false;
3417 bool haveLazyDylibs = false;
3418 for (std::vector<ld::dylib::File*>::iterator it = state.dylibs.begin(); it != state.dylibs.end(); ++it) {
3419 ld::dylib::File* aDylib = *it;
3420 int ordinal;
3421 if ( aDylib == state.bundleLoader ) {
3422 _dylibToOrdinal[aDylib] = BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE;
3423 }
3424 else if ( this->hasOrdinalForInstallPath(aDylib->installPath(), &ordinal) ) {
3425 // already have a dylib with that install path, map all uses to that ordinal
3426 _dylibToOrdinal[aDylib] = ordinal;
3427 }
3428 else if ( aDylib->willBeLazyLoadedDylib() ) {
3429 // all lazy dylib need to be at end of ordinals
3430 haveLazyDylibs = true;
3431 }
3432 else if ( aDylib->willBeReExported() && ! aDylib->hasPublicInstallName() && (nonPublicReExportCount >= 2) ) {
3433 _dylibsToLoad.push_back(aDylib);
3434 _dylibToOrdinal[aDylib] = BIND_SPECIAL_DYLIB_SELF;
3435 }
3436 else {
3437 // first time this install path seen, create new ordinal
3438 _dylibsToLoad.push_back(aDylib);
3439 _dylibToOrdinal[aDylib] = _dylibsToLoad.size();
3440 }
3441 if ( aDylib->explicitlyLinked() && aDylib->willBeReExported() )
3442 hasReExports = true;
3443 }
3444 if ( haveLazyDylibs ) {
3445 // second pass to determine ordinals for lazy loaded dylibs
3446 for (std::vector<ld::dylib::File*>::iterator it = state.dylibs.begin(); it != state.dylibs.end(); ++it) {
3447 ld::dylib::File* aDylib = *it;
3448 if ( aDylib->willBeLazyLoadedDylib() ) {
3449 int ordinal;
3450 if ( this->hasOrdinalForInstallPath(aDylib->installPath(), &ordinal) ) {
3451 // already have a dylib with that install path, map all uses to that ordinal
3452 _dylibToOrdinal[aDylib] = ordinal;
3453 }
3454 else {
3455 // first time this install path seen, create new ordinal
3456 _dylibsToLoad.push_back(aDylib);
3457 _dylibToOrdinal[aDylib] = _dylibsToLoad.size();
3458 }
3459 }
3460 }
3461 }
3462 _noReExportedDylibs = !hasReExports;
3463 //fprintf(stderr, "dylibs:\n");
3464 //for (std::map<const ld::dylib::File*, int>::const_iterator it = _dylibToOrdinal.begin(); it != _dylibToOrdinal.end(); ++it) {
3465 // fprintf(stderr, " %p ord=%u, install_name=%s\n",it->first, it->second, it->first->installPath());
3466 //}
3467 }
3468
3469 uint32_t OutputFile::lazyBindingInfoOffsetForLazyPointerAddress(uint64_t lpAddress)
3470 {
3471 return _lazyPointerAddressToInfoOffset[lpAddress];
3472 }
3473
3474 void OutputFile::setLazyBindingInfoOffset(uint64_t lpAddress, uint32_t lpInfoOffset)
3475 {
3476 _lazyPointerAddressToInfoOffset[lpAddress] = lpInfoOffset;
3477 }
3478
3479 int OutputFile::compressedOrdinalForAtom(const ld::Atom* target)
3480 {
3481 // flat namespace images use zero for all ordinals
3482 if ( _options.nameSpace() != Options::kTwoLevelNameSpace )
3483 return BIND_SPECIAL_DYLIB_FLAT_LOOKUP;
3484
3485 // handle -interposable
3486 if ( target->definition() == ld::Atom::definitionRegular )
3487 return BIND_SPECIAL_DYLIB_SELF;
3488
3489 // regular ordinal
3490 const ld::dylib::File* dylib = dynamic_cast<const ld::dylib::File*>(target->file());
3491 if ( dylib != NULL ) {
3492 std::map<const ld::dylib::File*, int>::iterator pos = _dylibToOrdinal.find(dylib);
3493 if ( pos != _dylibToOrdinal.end() )
3494 return pos->second;
3495 assert(0 && "dylib not assigned ordinal");
3496 }
3497
3498 // handle undefined dynamic_lookup
3499 if ( _options.undefinedTreatment() == Options::kUndefinedDynamicLookup )
3500 return BIND_SPECIAL_DYLIB_FLAT_LOOKUP;
3501
3502 // handle -U _foo
3503 if ( _options.allowedUndefined(target->name()) )
3504 return BIND_SPECIAL_DYLIB_FLAT_LOOKUP;
3505
3506 throw "can't find ordinal for imported symbol";
3507 }
3508
3509
3510 bool OutputFile::isPcRelStore(ld::Fixup::Kind kind)
3511 {
3512 switch ( kind ) {
3513 case ld::Fixup::kindStoreX86BranchPCRel8:
3514 case ld::Fixup::kindStoreX86BranchPCRel32:
3515 case ld::Fixup::kindStoreX86PCRel8:
3516 case ld::Fixup::kindStoreX86PCRel16:
3517 case ld::Fixup::kindStoreX86PCRel32:
3518 case ld::Fixup::kindStoreX86PCRel32_1:
3519 case ld::Fixup::kindStoreX86PCRel32_2:
3520 case ld::Fixup::kindStoreX86PCRel32_4:
3521 case ld::Fixup::kindStoreX86PCRel32GOTLoad:
3522 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA:
3523 case ld::Fixup::kindStoreX86PCRel32GOT:
3524 case ld::Fixup::kindStoreX86PCRel32TLVLoad:
3525 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA:
3526 case ld::Fixup::kindStoreARMBranch24:
3527 case ld::Fixup::kindStoreThumbBranch22:
3528 case ld::Fixup::kindStoreARMLoad12:
3529 case ld::Fixup::kindStoreTargetAddressX86PCRel32:
3530 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad:
3531 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA:
3532 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad:
3533 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA:
3534 case ld::Fixup::kindStoreTargetAddressARMBranch24:
3535 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
3536 case ld::Fixup::kindStoreTargetAddressARMLoad12:
3537 #if SUPPORT_ARCH_arm64
3538 case ld::Fixup::kindStoreARM64Page21:
3539 case ld::Fixup::kindStoreARM64PageOff12:
3540 case ld::Fixup::kindStoreARM64GOTLoadPage21:
3541 case ld::Fixup::kindStoreARM64GOTLoadPageOff12:
3542 case ld::Fixup::kindStoreARM64GOTLeaPage21:
3543 case ld::Fixup::kindStoreARM64GOTLeaPageOff12:
3544 case ld::Fixup::kindStoreARM64PCRelToGOT:
3545 case ld::Fixup::kindStoreTargetAddressARM64Page21:
3546 case ld::Fixup::kindStoreTargetAddressARM64PageOff12:
3547 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
3548 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12:
3549 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21:
3550 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12:
3551 #endif
3552 return true;
3553 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32:
3554 #if SUPPORT_ARCH_arm64
3555 case ld::Fixup::kindStoreTargetAddressARM64Branch26:
3556 #endif
3557 return (_options.outputKind() != Options::kKextBundle);
3558 default:
3559 break;
3560 }
3561 return false;
3562 }
3563
3564 bool OutputFile::isStore(ld::Fixup::Kind kind)
3565 {
3566 switch ( kind ) {
3567 case ld::Fixup::kindNone:
3568 case ld::Fixup::kindNoneFollowOn:
3569 case ld::Fixup::kindNoneGroupSubordinate:
3570 case ld::Fixup::kindNoneGroupSubordinateFDE:
3571 case ld::Fixup::kindNoneGroupSubordinateLSDA:
3572 case ld::Fixup::kindNoneGroupSubordinatePersonality:
3573 case ld::Fixup::kindSetTargetAddress:
3574 case ld::Fixup::kindSubtractTargetAddress:
3575 case ld::Fixup::kindAddAddend:
3576 case ld::Fixup::kindSubtractAddend:
3577 case ld::Fixup::kindSetTargetImageOffset:
3578 case ld::Fixup::kindSetTargetSectionOffset:
3579 return false;
3580 default:
3581 break;
3582 }
3583 return true;
3584 }
3585
3586
3587 bool OutputFile::setsTarget(ld::Fixup::Kind kind)
3588 {
3589 switch ( kind ) {
3590 case ld::Fixup::kindSetTargetAddress:
3591 case ld::Fixup::kindLazyTarget:
3592 case ld::Fixup::kindStoreTargetAddressLittleEndian32:
3593 case ld::Fixup::kindStoreTargetAddressLittleEndian64:
3594 case ld::Fixup::kindStoreTargetAddressBigEndian32:
3595 case ld::Fixup::kindStoreTargetAddressBigEndian64:
3596 case ld::Fixup::kindStoreTargetAddressX86PCRel32:
3597 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32:
3598 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad:
3599 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA:
3600 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad:
3601 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA:
3602 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad:
3603 case ld::Fixup::kindStoreTargetAddressARMBranch24:
3604 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
3605 case ld::Fixup::kindStoreTargetAddressARMLoad12:
3606 #if SUPPORT_ARCH_arm64
3607 case ld::Fixup::kindStoreTargetAddressARM64Branch26:
3608 case ld::Fixup::kindStoreTargetAddressARM64Page21:
3609 case ld::Fixup::kindStoreTargetAddressARM64PageOff12:
3610 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
3611 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12:
3612 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21:
3613 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12:
3614 #endif
3615 return true;
3616 case ld::Fixup::kindStoreX86DtraceCallSiteNop:
3617 case ld::Fixup::kindStoreX86DtraceIsEnableSiteClear:
3618 case ld::Fixup::kindStoreARMDtraceCallSiteNop:
3619 case ld::Fixup::kindStoreARMDtraceIsEnableSiteClear:
3620 case ld::Fixup::kindStoreARM64DtraceCallSiteNop:
3621 case ld::Fixup::kindStoreARM64DtraceIsEnableSiteClear:
3622 case ld::Fixup::kindStoreThumbDtraceCallSiteNop:
3623 case ld::Fixup::kindStoreThumbDtraceIsEnableSiteClear:
3624 return (_options.outputKind() == Options::kObjectFile);
3625 default:
3626 break;
3627 }
3628 return false;
3629 }
3630
3631 bool OutputFile::isPointerToTarget(ld::Fixup::Kind kind)
3632 {
3633 switch ( kind ) {
3634 case ld::Fixup::kindSetTargetAddress:
3635 case ld::Fixup::kindStoreTargetAddressLittleEndian32:
3636 case ld::Fixup::kindStoreTargetAddressLittleEndian64:
3637 case ld::Fixup::kindStoreTargetAddressBigEndian32:
3638 case ld::Fixup::kindStoreTargetAddressBigEndian64:
3639 case ld::Fixup::kindLazyTarget:
3640 return true;
3641 default:
3642 break;
3643 }
3644 return false;
3645 }
3646 bool OutputFile::isPointerFromTarget(ld::Fixup::Kind kind)
3647 {
3648 switch ( kind ) {
3649 case ld::Fixup::kindSubtractTargetAddress:
3650 return true;
3651 default:
3652 break;
3653 }
3654 return false;
3655 }
3656
3657
3658 uint64_t OutputFile::lookBackAddend(ld::Fixup::iterator fit)
3659 {
3660 uint64_t addend = 0;
3661 switch ( fit->clusterSize ) {
3662 case ld::Fixup::k1of1:
3663 case ld::Fixup::k1of2:
3664 case ld::Fixup::k2of2:
3665 break;
3666 case ld::Fixup::k2of3:
3667 --fit;
3668 switch ( fit->kind ) {
3669 case ld::Fixup::kindAddAddend:
3670 addend += fit->u.addend;
3671 break;
3672 case ld::Fixup::kindSubtractAddend:
3673 addend -= fit->u.addend;
3674 break;
3675 default:
3676 throw "unexpected fixup kind for binding";
3677 }
3678 break;
3679 case ld::Fixup::k1of3:
3680 ++fit;
3681 switch ( fit->kind ) {
3682 case ld::Fixup::kindAddAddend:
3683 addend += fit->u.addend;
3684 break;
3685 case ld::Fixup::kindSubtractAddend:
3686 addend -= fit->u.addend;
3687 break;
3688 default:
3689 throw "unexpected fixup kind for binding";
3690 }
3691 break;
3692 default:
3693 throw "unexpected fixup cluster size for binding";
3694 }
3695 return addend;
3696 }
3697
3698
3699 void OutputFile::generateLinkEditInfo(ld::Internal& state)
3700 {
3701 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
3702 ld::Internal::FinalSection* sect = *sit;
3703 // record end of last __TEXT section encrypted iPhoneOS apps.
3704 if ( _options.makeEncryptable() && (strcmp(sect->segmentName(), "__TEXT") == 0) ) {
3705 _encryptedTEXTendOffset = pageAlign(sect->fileOffset + sect->size);
3706 }
3707 bool objc1ClassRefSection = ( (sect->type() == ld::Section::typeCStringPointer)
3708 && (strcmp(sect->sectionName(), "__cls_refs") == 0)
3709 && (strcmp(sect->segmentName(), "__OBJC") == 0) );
3710 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
3711 const ld::Atom* atom = *ait;
3712
3713 // Record regular atoms that override a dylib's weak definitions
3714 if ( (atom->scope() == ld::Atom::scopeGlobal) && atom->overridesDylibsWeakDef() ) {
3715 if ( _options.makeCompressedDyldInfo() ) {
3716 uint8_t wtype = BIND_TYPE_OVERRIDE_OF_WEAKDEF_IN_DYLIB;
3717 bool nonWeakDef = (atom->combine() == ld::Atom::combineNever);
3718 _weakBindingInfo.push_back(BindingInfo(wtype, atom->name(), nonWeakDef, atom->finalAddress(), 0));
3719 }
3720 this->overridesWeakExternalSymbols = true;
3721 if ( _options.warnWeakExports() )
3722 warning("overrides weak external symbol: %s", atom->name());
3723 }
3724
3725 ld::Fixup* fixupWithTarget = NULL;
3726 ld::Fixup* fixupWithMinusTarget = NULL;
3727 ld::Fixup* fixupWithStore = NULL;
3728 ld::Fixup* fixupWithAddend = NULL;
3729 const ld::Atom* target = NULL;
3730 const ld::Atom* minusTarget = NULL;
3731 uint64_t targetAddend = 0;
3732 uint64_t minusTargetAddend = 0;
3733 for (ld::Fixup::iterator fit = atom->fixupsBegin(); fit != atom->fixupsEnd(); ++fit) {
3734 if ( fit->firstInCluster() ) {
3735 fixupWithTarget = NULL;
3736 fixupWithMinusTarget = NULL;
3737 fixupWithStore = NULL;
3738 target = NULL;
3739 minusTarget = NULL;
3740 targetAddend = 0;
3741 minusTargetAddend = 0;
3742 }
3743 if ( this->setsTarget(fit->kind) ) {
3744 switch ( fit->binding ) {
3745 case ld::Fixup::bindingNone:
3746 case ld::Fixup::bindingByNameUnbound:
3747 break;
3748 case ld::Fixup::bindingByContentBound:
3749 case ld::Fixup::bindingDirectlyBound:
3750 fixupWithTarget = fit;
3751 target = fit->u.target;
3752 break;
3753 case ld::Fixup::bindingsIndirectlyBound:
3754 fixupWithTarget = fit;
3755 target = state.indirectBindingTable[fit->u.bindingIndex];
3756 break;
3757 }
3758 assert(target != NULL);
3759 }
3760 switch ( fit->kind ) {
3761 case ld::Fixup::kindAddAddend:
3762 targetAddend = fit->u.addend;
3763 fixupWithAddend = fit;
3764 break;
3765 case ld::Fixup::kindSubtractAddend:
3766 minusTargetAddend = fit->u.addend;
3767 fixupWithAddend = fit;
3768 break;
3769 case ld::Fixup::kindSubtractTargetAddress:
3770 switch ( fit->binding ) {
3771 case ld::Fixup::bindingNone:
3772 case ld::Fixup::bindingByNameUnbound:
3773 break;
3774 case ld::Fixup::bindingByContentBound:
3775 case ld::Fixup::bindingDirectlyBound:
3776 fixupWithMinusTarget = fit;
3777 minusTarget = fit->u.target;
3778 break;
3779 case ld::Fixup::bindingsIndirectlyBound:
3780 fixupWithMinusTarget = fit;
3781 minusTarget = state.indirectBindingTable[fit->u.bindingIndex];
3782 break;
3783 }
3784 assert(minusTarget != NULL);
3785 break;
3786 case ld::Fixup::kindDataInCodeStartData:
3787 case ld::Fixup::kindDataInCodeStartJT8:
3788 case ld::Fixup::kindDataInCodeStartJT16:
3789 case ld::Fixup::kindDataInCodeStartJT32:
3790 case ld::Fixup::kindDataInCodeStartJTA32:
3791 case ld::Fixup::kindDataInCodeEnd:
3792 hasDataInCode = true;
3793 break;
3794 default:
3795 break;
3796 }
3797 if ( this->isStore(fit->kind) ) {
3798 fixupWithStore = fit;
3799 }
3800 if ( fit->lastInCluster() ) {
3801 if ( (fixupWithStore != NULL) && (target != NULL) ) {
3802 if ( _options.outputKind() == Options::kObjectFile ) {
3803 this->addSectionRelocs(state, sect, atom, fixupWithTarget, fixupWithMinusTarget, fixupWithAddend, fixupWithStore,
3804 target, minusTarget, targetAddend, minusTargetAddend);
3805 }
3806 else {
3807 if ( _options.makeCompressedDyldInfo() ) {
3808 this->addDyldInfo(state, sect, atom, fixupWithTarget, fixupWithMinusTarget, fixupWithStore,
3809 target, minusTarget, targetAddend, minusTargetAddend);
3810 }
3811 else {
3812 this->addClassicRelocs(state, sect, atom, fixupWithTarget, fixupWithMinusTarget, fixupWithStore,
3813 target, minusTarget, targetAddend, minusTargetAddend);
3814 }
3815 }
3816 }
3817 else if ( objc1ClassRefSection && (target != NULL) && (fixupWithStore == NULL) ) {
3818 // check for class refs to lazy loaded dylibs
3819 const ld::dylib::File* dylib = dynamic_cast<const ld::dylib::File*>(target->file());
3820 if ( (dylib != NULL) && dylib->willBeLazyLoadedDylib() )
3821 throwf("illegal class reference to %s in lazy loaded dylib %s", target->name(), dylib->path());
3822 }
3823 }
3824 }
3825 }
3826 }
3827 }
3828
3829
3830 void OutputFile::noteTextReloc(const ld::Atom* atom, const ld::Atom* target)
3831 {
3832 if ( (atom->contentType() == ld::Atom::typeStub) || (atom->contentType() == ld::Atom::typeStubHelper) ) {
3833 // silently let stubs (synthesized by linker) use text relocs
3834 }
3835 else if ( _options.allowTextRelocs() ) {
3836 if ( _options.warnAboutTextRelocs() )
3837 warning("text reloc in %s to %s", atom->name(), target->name());
3838 }
3839 else if ( _options.positionIndependentExecutable() && (_options.outputKind() == Options::kDynamicExecutable)
3840 && ((_options.iOSVersionMin() >= ld::iOS_4_3) || (_options.macosxVersionMin() >= ld::mac10_7)) ) {
3841 if ( ! this->pieDisabled ) {
3842 #if SUPPORT_ARCH_arm64
3843 if ( _options.architecture() == CPU_TYPE_ARM64 ) {
3844 const char* demangledName = strdup(_options.demangleSymbol(atom->name()));
3845 throwf("Absolute addressing not allowed in arm64 code but used in '%s' referencing '%s'", demangledName, _options.demangleSymbol(target->name()));
3846 }
3847 else
3848 #endif
3849 {
3850 warning("PIE disabled. Absolute addressing (perhaps -mdynamic-no-pic) not allowed in code signed PIE, "
3851 "but used in %s from %s. "
3852 "To fix this warning, don't compile with -mdynamic-no-pic or link with -Wl,-no_pie",
3853 atom->name(), atom->file()->path());
3854 }
3855 }
3856 this->pieDisabled = true;
3857 }
3858 else if ( (target->scope() == ld::Atom::scopeGlobal) && (target->combine() == ld::Atom::combineByName) ) {
3859 throwf("illegal text-relocoation (direct reference) to (global,weak) %s in %s from %s in %s", target->name(), target->file()->path(), atom->name(), atom->file()->path());
3860 }
3861 else {
3862 if ( (target->file() != NULL) && (atom->file() != NULL) )
3863 throwf("illegal text-relocation to '%s' in %s from '%s' in %s", target->name(), target->file()->path(), atom->name(), atom->file()->path());
3864 else
3865 throwf("illegal text reloc in '%s' to '%s'", atom->name(), target->name());
3866 }
3867 }
3868
3869 void OutputFile::addDyldInfo(ld::Internal& state, ld::Internal::FinalSection* sect, const ld::Atom* atom,
3870 ld::Fixup* fixupWithTarget, ld::Fixup* fixupWithMinusTarget, ld::Fixup* fixupWithStore,
3871 const ld::Atom* target, const ld::Atom* minusTarget,
3872 uint64_t targetAddend, uint64_t minusTargetAddend)
3873 {
3874 if ( sect->isSectionHidden() )
3875 return;
3876
3877 // no need to rebase or bind PCRel stores
3878 if ( this->isPcRelStore(fixupWithStore->kind) ) {
3879 // as long as target is in same linkage unit
3880 if ( (target == NULL) || (target->definition() != ld::Atom::definitionProxy) ) {
3881 // make sure target is not global and weak
3882 if ( (target->scope() == ld::Atom::scopeGlobal) && (target->combine() == ld::Atom::combineByName) && (target->definition() == ld::Atom::definitionRegular)) {
3883 if ( (atom->section().type() == ld::Section::typeCFI)
3884 || (atom->section().type() == ld::Section::typeDtraceDOF)
3885 || (atom->section().type() == ld::Section::typeUnwindInfo) ) {
3886 // ok for __eh_frame and __uwind_info to use pointer diffs to global weak symbols
3887 return;
3888 }
3889 // <rdar://problem/13700961> spurious warning when weak function has reference to itself
3890 if ( fixupWithTarget->binding == ld::Fixup::bindingDirectlyBound ) {
3891 // ok to ignore pc-rel references within a weak function to itself
3892 return;
3893 }
3894 // Have direct reference to weak-global. This should be an indrect reference
3895 const char* demangledName = strdup(_options.demangleSymbol(atom->name()));
3896 warning("direct access in %s to global weak symbol %s means the weak symbol cannot be overridden at runtime. "
3897 "This was likely caused by different translation units being compiled with different visibility settings.",
3898 demangledName, _options.demangleSymbol(target->name()));
3899 }
3900 return;
3901 }
3902 }
3903
3904 // no need to rebase or bind PIC internal pointer diff
3905 if ( minusTarget != NULL ) {
3906 // with pointer diffs, both need to be in same linkage unit
3907 assert(minusTarget->definition() != ld::Atom::definitionProxy);
3908 assert(target != NULL);
3909 assert(target->definition() != ld::Atom::definitionProxy);
3910 if ( target == minusTarget ) {
3911 // This is a compile time constant and could have been optimized away by compiler
3912 return;
3913 }
3914
3915 // check if target of pointer-diff is global and weak
3916 if ( (target->scope() == ld::Atom::scopeGlobal) && (target->combine() == ld::Atom::combineByName) && (target->definition() == ld::Atom::definitionRegular) ) {
3917 if ( (atom->section().type() == ld::Section::typeCFI)
3918 || (atom->section().type() == ld::Section::typeDtraceDOF)
3919 || (atom->section().type() == ld::Section::typeUnwindInfo) ) {
3920 // ok for __eh_frame and __uwind_info to use pointer diffs to global weak symbols
3921 return;
3922 }
3923 // Have direct reference to weak-global. This should be an indrect reference
3924 const char* demangledName = strdup(_options.demangleSymbol(atom->name()));
3925 warning("direct access in %s to global weak symbol %s means the weak symbol cannot be overridden at runtime. "
3926 "This was likely caused by different translation units being compiled with different visibility settings.",
3927 demangledName, _options.demangleSymbol(target->name()));
3928 }
3929 return;
3930 }
3931
3932 // no need to rebase or bind an atom's references to itself if the output is not slidable
3933 if ( (atom == target) && !_options.outputSlidable() )
3934 return;
3935
3936 // cluster has no target, so needs no rebasing or binding
3937 if ( target == NULL )
3938 return;
3939
3940 bool inReadOnlySeg = ((_options.initialSegProtection(sect->segmentName()) & VM_PROT_WRITE) == 0);
3941 bool needsRebase = false;
3942 bool needsBinding = false;
3943 bool needsLazyBinding = false;
3944 bool needsWeakBinding = false;
3945
3946 uint8_t rebaseType = REBASE_TYPE_POINTER;
3947 uint8_t type = BIND_TYPE_POINTER;
3948 const ld::dylib::File* dylib = dynamic_cast<const ld::dylib::File*>(target->file());
3949 bool weak_import = (fixupWithTarget->weakImport || ((dylib != NULL) && dylib->forcedWeakLinked()));
3950 uint64_t address = atom->finalAddress() + fixupWithTarget->offsetInAtom;
3951 uint64_t addend = targetAddend - minusTargetAddend;
3952
3953 // special case lazy pointers
3954 if ( fixupWithTarget->kind == ld::Fixup::kindLazyTarget ) {
3955 assert(fixupWithTarget->u.target == target);
3956 assert(addend == 0);
3957 // lazy dylib lazy pointers do not have any dyld info
3958 if ( atom->section().type() == ld::Section::typeLazyDylibPointer )
3959 return;
3960 // lazy binding to weak definitions are done differently
3961 // they are directly bound to target, then have a weak bind in case of a collision
3962 if ( target->combine() == ld::Atom::combineByName ) {
3963 if ( target->definition() == ld::Atom::definitionProxy ) {
3964 // weak def exported from another dylib
3965 // must non-lazy bind to it plus have weak binding info in case of collision
3966 needsBinding = true;
3967 needsWeakBinding = true;
3968 }
3969 else {
3970 // weak def in this linkage unit.
3971 // just rebase, plus have weak binding info in case of collision
3972 // this will be done by other cluster on lazy pointer atom
3973 }
3974 }
3975 else if ( target->contentType() == ld::Atom::typeResolver ) {
3976 // <rdar://problem/8553647> Hidden resolver functions should not have lazy binding info
3977 // <rdar://problem/12629331> Resolver function run before initializers when overriding the dyld shared cache
3978 // The lazy pointers used by stubs used when non-lazy binding to a resolver are not normal lazy pointers
3979 // and should not be in lazy binding info.
3980 needsLazyBinding = false;
3981 }
3982 else {
3983 // normal case of a pointer to non-weak-def symbol, so can lazily bind
3984 needsLazyBinding = true;
3985 }
3986 }
3987 else {
3988 // everything except lazy pointers
3989 switch ( target->definition() ) {
3990 case ld::Atom::definitionProxy:
3991 if ( (dylib != NULL) && dylib->willBeLazyLoadedDylib() )
3992 throwf("illegal data reference to %s in lazy loaded dylib %s", target->name(), dylib->path());
3993 if ( target->contentType() == ld::Atom::typeTLV ) {
3994 if ( sect->type() != ld::Section::typeTLVPointers )
3995 throwf("illegal data reference in %s to thread local variable %s in dylib %s",
3996 atom->name(), target->name(), dylib->path());
3997 }
3998 if ( inReadOnlySeg )
3999 type = BIND_TYPE_TEXT_ABSOLUTE32;
4000 needsBinding = true;
4001 if ( target->combine() == ld::Atom::combineByName )
4002 needsWeakBinding = true;
4003 break;
4004 case ld::Atom::definitionRegular:
4005 case ld::Atom::definitionTentative:
4006 // only slideable images need rebasing info
4007 if ( _options.outputSlidable() ) {
4008 needsRebase = true;
4009 }
4010 // references to internal symbol never need binding
4011 if ( target->scope() != ld::Atom::scopeGlobal )
4012 break;
4013 // reference to global weak def needs weak binding
4014 if ( (target->combine() == ld::Atom::combineByName) && (target->definition() == ld::Atom::definitionRegular) )
4015 needsWeakBinding = true;
4016 else if ( _options.outputKind() == Options::kDynamicExecutable ) {
4017 // in main executables, the only way regular symbols are indirected is if -interposable is used
4018 if ( _options.interposable(target->name()) ) {
4019 needsRebase = false;
4020 needsBinding = true;
4021 }
4022 }
4023 else {
4024 // for flat-namespace or interposable two-level-namespace
4025 // all references to exported symbols get indirected
4026 if ( (_options.nameSpace() != Options::kTwoLevelNameSpace) || _options.interposable(target->name()) ) {
4027 // <rdar://problem/5254468> no external relocs for flat objc classes
4028 if ( strncmp(target->name(), ".objc_class_", 12) == 0 )
4029 break;
4030 // no rebase info for references to global symbols that will have binding info
4031 needsRebase = false;
4032 needsBinding = true;
4033 }
4034 else if ( _options.forceCoalesce(target->name()) ) {
4035 needsWeakBinding = true;
4036 }
4037 }
4038 break;
4039 case ld::Atom::definitionAbsolute:
4040 break;
4041 }
4042 }
4043
4044 // <rdar://problem/13828711> if target is an import alias, use base of alias
4045 if ( target->isAlias() && (target->definition() == ld::Atom::definitionProxy) ) {
4046 for (ld::Fixup::iterator fit = target->fixupsBegin(), end=target->fixupsEnd(); fit != end; ++fit) {
4047 if ( fit->firstInCluster() ) {
4048 if ( fit->kind == ld::Fixup::kindNoneFollowOn ) {
4049 if ( fit->binding == ld::Fixup::bindingDirectlyBound ) {
4050 //fprintf(stderr, "switching import of %s to import of %s\n", target->name(), fit->u.target->name());
4051 target = fit->u.target;
4052 }
4053 }
4054 }
4055 }
4056 }
4057
4058 // record dyld info for this cluster
4059 if ( needsRebase ) {
4060 if ( inReadOnlySeg ) {
4061 noteTextReloc(atom, target);
4062 sect->hasLocalRelocs = true; // so dyld knows to change permissions on __TEXT segment
4063 rebaseType = REBASE_TYPE_TEXT_ABSOLUTE32;
4064 }
4065 if ( _options.sharedRegionEligible() ) {
4066 // <rdar://problem/13287063> when range checking, ignore high byte of arm64 addends
4067 uint64_t checkAddend = addend;
4068 if ( _options.architecture() == CPU_TYPE_ARM64 )
4069 checkAddend &= 0x0FFFFFFFFFFFFFFFULL;
4070 if ( checkAddend != 0 ) {
4071 // make sure the addend does not cause the pointer to point outside the target's segment
4072 // if it does, update_dyld_shared_cache will not be able to put this dylib into the shared cache
4073 uint64_t targetAddress = target->finalAddress();
4074 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
4075 ld::Internal::FinalSection* sct = *sit;
4076 uint64_t sctEnd = (sct->address+sct->size);
4077 if ( (sct->address <= targetAddress) && (targetAddress < sctEnd) ) {
4078 if ( (targetAddress+checkAddend) > sctEnd ) {
4079 warning("data symbol %s from %s has pointer to %s + 0x%08llX. "
4080 "That large of an addend may disable %s from being put in the dyld shared cache.",
4081 atom->name(), atom->file()->path(), target->name(), addend, _options.installPath() );
4082 }
4083 }
4084 }
4085 }
4086 }
4087 _rebaseInfo.push_back(RebaseInfo(rebaseType, address));
4088 }
4089 if ( needsBinding ) {
4090 if ( inReadOnlySeg ) {
4091 noteTextReloc(atom, target);
4092 sect->hasExternalRelocs = true; // so dyld knows to change permissions on __TEXT segment
4093 }
4094 _bindingInfo.push_back(BindingInfo(type, this->compressedOrdinalForAtom(target), target->name(), weak_import, address, addend));
4095 }
4096 if ( needsLazyBinding ) {
4097 if ( _options.bindAtLoad() )
4098 _bindingInfo.push_back(BindingInfo(type, this->compressedOrdinalForAtom(target), target->name(), weak_import, address, addend));
4099 else
4100 _lazyBindingInfo.push_back(BindingInfo(type, this->compressedOrdinalForAtom(target), target->name(), weak_import, address, addend));
4101 }
4102 if ( needsWeakBinding )
4103 _weakBindingInfo.push_back(BindingInfo(type, 0, target->name(), false, address, addend));
4104 }
4105
4106
4107 void OutputFile::addClassicRelocs(ld::Internal& state, ld::Internal::FinalSection* sect, const ld::Atom* atom,
4108 ld::Fixup* fixupWithTarget, ld::Fixup* fixupWithMinusTarget, ld::Fixup* fixupWithStore,
4109 const ld::Atom* target, const ld::Atom* minusTarget,
4110 uint64_t targetAddend, uint64_t minusTargetAddend)
4111 {
4112 if ( sect->isSectionHidden() )
4113 return;
4114
4115 // non-lazy-pointer section is encoded in indirect symbol table - not using relocations
4116 if ( sect->type() == ld::Section::typeNonLazyPointer ) {
4117 // except kexts and static pie which *do* use relocations
4118 switch (_options.outputKind()) {
4119 case Options::kKextBundle:
4120 break;
4121 case Options::kStaticExecutable:
4122 if ( _options.positionIndependentExecutable() )
4123 break;
4124 // else fall into default case
4125 default:
4126 assert(target != NULL);
4127 assert(fixupWithTarget != NULL);
4128 return;
4129 }
4130 }
4131
4132 // no need to rebase or bind PCRel stores
4133 if ( this->isPcRelStore(fixupWithStore->kind) ) {
4134 // as long as target is in same linkage unit
4135 if ( (target == NULL) || (target->definition() != ld::Atom::definitionProxy) )
4136 return;
4137 }
4138
4139 // no need to rebase or bind PIC internal pointer diff
4140 if ( minusTarget != NULL ) {
4141 // with pointer diffs, both need to be in same linkage unit
4142 assert(minusTarget->definition() != ld::Atom::definitionProxy);
4143 assert(target != NULL);
4144 assert(target->definition() != ld::Atom::definitionProxy);
4145 // make sure target is not global and weak
4146 if ( (target->scope() == ld::Atom::scopeGlobal) && (target->combine() == ld::Atom::combineByName)
4147 && (atom->section().type() != ld::Section::typeCFI)
4148 && (atom->section().type() != ld::Section::typeDtraceDOF)
4149 && (atom->section().type() != ld::Section::typeUnwindInfo)
4150 && (minusTarget != target) ) {
4151 // ok for __eh_frame and __uwind_info to use pointer diffs to global weak symbols
4152 throwf("bad codegen, pointer diff in %s to global weak symbol %s", atom->name(), target->name());
4153 }
4154 return;
4155 }
4156
4157 // cluster has no target, so needs no rebasing or binding
4158 if ( target == NULL )
4159 return;
4160
4161 assert(_localRelocsAtom != NULL);
4162 uint64_t relocAddress = atom->finalAddress() + fixupWithTarget->offsetInAtom - _localRelocsAtom->relocBaseAddress(state);
4163
4164 bool inReadOnlySeg = ( strcmp(sect->segmentName(), "__TEXT") == 0 );
4165 bool needsLocalReloc = false;
4166 bool needsExternReloc = false;
4167
4168 switch ( fixupWithStore->kind ) {
4169 case ld::Fixup::kindLazyTarget:
4170 // lazy pointers don't need relocs
4171 break;
4172 case ld::Fixup::kindStoreLittleEndian32:
4173 case ld::Fixup::kindStoreLittleEndian64:
4174 case ld::Fixup::kindStoreBigEndian32:
4175 case ld::Fixup::kindStoreBigEndian64:
4176 case ld::Fixup::kindStoreTargetAddressLittleEndian32:
4177 case ld::Fixup::kindStoreTargetAddressLittleEndian64:
4178 case ld::Fixup::kindStoreTargetAddressBigEndian32:
4179 case ld::Fixup::kindStoreTargetAddressBigEndian64:
4180 // is pointer
4181 switch ( target->definition() ) {
4182 case ld::Atom::definitionProxy:
4183 needsExternReloc = true;
4184 break;
4185 case ld::Atom::definitionRegular:
4186 case ld::Atom::definitionTentative:
4187 // only slideable images need local relocs
4188 if ( _options.outputSlidable() )
4189 needsLocalReloc = true;
4190 // references to internal symbol never need binding
4191 if ( target->scope() != ld::Atom::scopeGlobal )
4192 break;
4193 // reference to global weak def needs weak binding in dynamic images
4194 if ( (target->combine() == ld::Atom::combineByName)
4195 && (target->definition() == ld::Atom::definitionRegular)
4196 && (_options.outputKind() != Options::kStaticExecutable)
4197 && (_options.outputKind() != Options::kPreload)
4198 && (atom != target) ) {
4199 needsExternReloc = true;
4200 }
4201 else if ( _options.outputKind() == Options::kDynamicExecutable ) {
4202 // in main executables, the only way regular symbols are indirected is if -interposable is used
4203 if ( _options.interposable(target->name()) )
4204 needsExternReloc = true;
4205 }
4206 else {
4207 // for flat-namespace or interposable two-level-namespace
4208 // all references to exported symbols get indirected
4209 if ( (_options.nameSpace() != Options::kTwoLevelNameSpace) || _options.interposable(target->name()) ) {
4210 // <rdar://problem/5254468> no external relocs for flat objc classes
4211 if ( strncmp(target->name(), ".objc_class_", 12) == 0 )
4212 break;
4213 // no rebase info for references to global symbols that will have binding info
4214 needsExternReloc = true;
4215 }
4216 }
4217 if ( needsExternReloc )
4218 needsLocalReloc = false;
4219 break;
4220 case ld::Atom::definitionAbsolute:
4221 break;
4222 }
4223 if ( needsExternReloc ) {
4224 if ( inReadOnlySeg )
4225 noteTextReloc(atom, target);
4226 const ld::dylib::File* dylib = dynamic_cast<const ld::dylib::File*>(target->file());
4227 if ( (dylib != NULL) && dylib->willBeLazyLoadedDylib() )
4228 throwf("illegal data reference to %s in lazy loaded dylib %s", target->name(), dylib->path());
4229 _externalRelocsAtom->addExternalPointerReloc(relocAddress, target);
4230 sect->hasExternalRelocs = true;
4231 fixupWithTarget->contentAddendOnly = true;
4232 }
4233 else if ( needsLocalReloc ) {
4234 assert(target != NULL);
4235 if ( inReadOnlySeg )
4236 noteTextReloc(atom, target);
4237 _localRelocsAtom->addPointerReloc(relocAddress, target->machoSection());
4238 sect->hasLocalRelocs = true;
4239 }
4240 break;
4241 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32:
4242 #if SUPPORT_ARCH_arm64
4243 case ld::Fixup::kindStoreTargetAddressARM64Branch26:
4244 #endif
4245 if ( _options.outputKind() == Options::kKextBundle ) {
4246 assert(target != NULL);
4247 if ( target->definition() == ld::Atom::definitionProxy ) {
4248 _externalRelocsAtom->addExternalCallSiteReloc(relocAddress, target);
4249 fixupWithStore->contentAddendOnly = true;
4250 }
4251 }
4252 break;
4253
4254 case ld::Fixup::kindStoreARMLow16:
4255 case ld::Fixup::kindStoreThumbLow16:
4256 // no way to encode rebasing of binding for these instructions
4257 if ( _options.outputSlidable() || (target->definition() == ld::Atom::definitionProxy) )
4258 throwf("no supported runtime lo16 relocation in %s from %s to %s", atom->name(), atom->file()->path(), target->name());
4259 break;
4260
4261 case ld::Fixup::kindStoreARMHigh16:
4262 case ld::Fixup::kindStoreThumbHigh16:
4263 // no way to encode rebasing of binding for these instructions
4264 if ( _options.outputSlidable() || (target->definition() == ld::Atom::definitionProxy) )
4265 throwf("no supported runtime hi16 relocation in %s from %s to %s", atom->name(), atom->file()->path(), target->name());
4266 break;
4267
4268 default:
4269 break;
4270 }
4271 }
4272
4273
4274 bool OutputFile::useExternalSectionReloc(const ld::Atom* atom, const ld::Atom* target, ld::Fixup* fixupWithTarget)
4275 {
4276 if ( (_options.architecture() == CPU_TYPE_X86_64) || (_options.architecture() == CPU_TYPE_ARM64) ) {
4277 // x86_64 and ARM64 use external relocations for everthing that has a symbol
4278 return ( target->symbolTableInclusion() != ld::Atom::symbolTableNotIn );
4279 }
4280
4281 // <rdar://problem/9513487> support arm branch interworking in -r mode
4282 if ( (_options.architecture() == CPU_TYPE_ARM) && (_options.outputKind() == Options::kObjectFile) ) {
4283 if ( atom->isThumb() != target->isThumb() ) {
4284 switch ( fixupWithTarget->kind ) {
4285 // have branch that switches mode, then might be 'b' not 'bl'
4286 // Force external relocation, since no way to do local reloc for 'b'
4287 case ld::Fixup::kindStoreTargetAddressThumbBranch22 :
4288 case ld::Fixup::kindStoreTargetAddressARMBranch24:
4289 return true;
4290 default:
4291 break;
4292 }
4293 }
4294 }
4295
4296 if ( (_options.architecture() == CPU_TYPE_I386) && (_options.outputKind() == Options::kObjectFile) ) {
4297 if ( target->contentType() == ld::Atom::typeTLV )
4298 return true;
4299 }
4300
4301 // most architectures use external relocations only for references
4302 // to a symbol in another translation unit or for references to "weak symbols" or tentative definitions
4303 assert(target != NULL);
4304 if ( target->definition() == ld::Atom::definitionProxy )
4305 return true;
4306 if ( (target->definition() == ld::Atom::definitionTentative) && ! _options.makeTentativeDefinitionsReal() )
4307 return true;
4308 if ( target->scope() != ld::Atom::scopeGlobal )
4309 return false;
4310 if ( (target->combine() == ld::Atom::combineByName) && (target->definition() == ld::Atom::definitionRegular) )
4311 return true;
4312 return false;
4313 }
4314
4315 bool OutputFile::useSectionRelocAddend(ld::Fixup* fixupWithTarget)
4316 {
4317 #if SUPPORT_ARCH_arm64
4318 if ( _options.architecture() == CPU_TYPE_ARM64 ) {
4319 switch ( fixupWithTarget->kind ) {
4320 case ld::Fixup::kindStoreARM64Branch26:
4321 case ld::Fixup::kindStoreARM64Page21:
4322 case ld::Fixup::kindStoreARM64PageOff12:
4323 return true;
4324 default:
4325 return false;
4326 }
4327 }
4328 #endif
4329 return false;
4330 }
4331
4332
4333
4334
4335 void OutputFile::addSectionRelocs(ld::Internal& state, ld::Internal::FinalSection* sect, const ld::Atom* atom,
4336 ld::Fixup* fixupWithTarget, ld::Fixup* fixupWithMinusTarget,
4337 ld::Fixup* fixupWithAddend, ld::Fixup* fixupWithStore,
4338 const ld::Atom* target, const ld::Atom* minusTarget,
4339 uint64_t targetAddend, uint64_t minusTargetAddend)
4340 {
4341 if ( sect->isSectionHidden() )
4342 return;
4343
4344 // in -r mode where there will be no labels on __eh_frame section, there is no need for relocations
4345 if ( (sect->type() == ld::Section::typeCFI) && _options.removeEHLabels() )
4346 return;
4347
4348 // non-lazy-pointer section is encoded in indirect symbol table - not using relocations
4349 if ( sect->type() == ld::Section::typeNonLazyPointer )
4350 return;
4351
4352 // tentative defs don't have any relocations
4353 if ( sect->type() == ld::Section::typeTentativeDefs )
4354 return;
4355
4356 assert(target != NULL);
4357 assert(fixupWithTarget != NULL);
4358 bool targetUsesExternalReloc = this->useExternalSectionReloc(atom, target, fixupWithTarget);
4359 bool minusTargetUsesExternalReloc = (minusTarget != NULL) && this->useExternalSectionReloc(atom, minusTarget, fixupWithMinusTarget);
4360
4361 // in x86_64 and arm64 .o files an external reloc means the content contains just the addend
4362 if ( (_options.architecture() == CPU_TYPE_X86_64) ||(_options.architecture() == CPU_TYPE_ARM64) ) {
4363 if ( targetUsesExternalReloc ) {
4364 fixupWithTarget->contentAddendOnly = true;
4365 fixupWithStore->contentAddendOnly = true;
4366 if ( this->useSectionRelocAddend(fixupWithStore) && (fixupWithAddend != NULL) )
4367 fixupWithAddend->contentIgnoresAddend = true;
4368 }
4369 if ( minusTargetUsesExternalReloc )
4370 fixupWithMinusTarget->contentAddendOnly = true;
4371 }
4372 else {
4373 // for other archs, content is addend only with (non pc-rel) pointers
4374 // pc-rel instructions are funny. If the target is _foo+8 and _foo is
4375 // external, then the pc-rel instruction *evalutates* to the address 8.
4376 if ( targetUsesExternalReloc ) {
4377 // TLV support for i386 acts like RIP relative addressing
4378 // The addend is the offset from the PICBase to the end of the instruction
4379 if ( (_options.architecture() == CPU_TYPE_I386)
4380 && (_options.outputKind() == Options::kObjectFile)
4381 && (fixupWithStore->kind == ld::Fixup::kindStoreX86PCRel32TLVLoad) ) {
4382 fixupWithTarget->contentAddendOnly = true;
4383 fixupWithStore->contentAddendOnly = true;
4384 }
4385 else if ( isPcRelStore(fixupWithStore->kind) ) {
4386 fixupWithTarget->contentDetlaToAddendOnly = true;
4387 fixupWithStore->contentDetlaToAddendOnly = true;
4388 }
4389 else if ( minusTarget == NULL ){
4390 fixupWithTarget->contentAddendOnly = true;
4391 fixupWithStore->contentAddendOnly = true;
4392 }
4393 }
4394 }
4395
4396 if ( fixupWithStore != NULL ) {
4397 _sectionsRelocationsAtom->addSectionReloc(sect, fixupWithStore->kind, atom, fixupWithStore->offsetInAtom,
4398 targetUsesExternalReloc, minusTargetUsesExternalReloc,
4399 target, targetAddend, minusTarget, minusTargetAddend);
4400 }
4401
4402 }
4403
4404
4405 void OutputFile::makeSplitSegInfo(ld::Internal& state)
4406 {
4407 if ( !_options.sharedRegionEligible() )
4408 return;
4409
4410 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
4411 ld::Internal::FinalSection* sect = *sit;
4412 if ( sect->isSectionHidden() )
4413 continue;
4414 if ( strcmp(sect->segmentName(), "__TEXT") != 0 )
4415 continue;
4416 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
4417 const ld::Atom* atom = *ait;
4418 const ld::Atom* target = NULL;
4419 const ld::Atom* fromTarget = NULL;
4420 uint64_t accumulator = 0;
4421 bool thumbTarget;
4422 bool hadSubtract = false;
4423 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
4424 if ( fit->firstInCluster() )
4425 target = NULL;
4426 if ( this->setsTarget(fit->kind) ) {
4427 accumulator = addressOf(state, fit, &target);
4428 thumbTarget = targetIsThumb(state, fit);
4429 if ( thumbTarget )
4430 accumulator |= 1;
4431 }
4432 switch ( fit->kind ) {
4433 case ld::Fixup::kindSubtractTargetAddress:
4434 accumulator -= addressOf(state, fit, &fromTarget);
4435 hadSubtract = true;
4436 break;
4437 case ld::Fixup::kindAddAddend:
4438 accumulator += fit->u.addend;
4439 break;
4440 case ld::Fixup::kindSubtractAddend:
4441 accumulator -= fit->u.addend;
4442 break;
4443 case ld::Fixup::kindStoreBigEndian32:
4444 case ld::Fixup::kindStoreLittleEndian32:
4445 case ld::Fixup::kindStoreLittleEndian64:
4446 case ld::Fixup::kindStoreTargetAddressLittleEndian32:
4447 case ld::Fixup::kindStoreTargetAddressLittleEndian64:
4448 // if no subtract, then this is an absolute pointer which means
4449 // there is also a text reloc which update_dyld_shared_cache will use.
4450 if ( ! hadSubtract )
4451 break;
4452 // fall through
4453 case ld::Fixup::kindStoreX86PCRel32:
4454 case ld::Fixup::kindStoreX86PCRel32_1:
4455 case ld::Fixup::kindStoreX86PCRel32_2:
4456 case ld::Fixup::kindStoreX86PCRel32_4:
4457 case ld::Fixup::kindStoreX86PCRel32GOTLoad:
4458 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA:
4459 case ld::Fixup::kindStoreX86PCRel32GOT:
4460 case ld::Fixup::kindStoreX86PCRel32TLVLoad:
4461 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA:
4462 case ld::Fixup::kindStoreTargetAddressX86PCRel32:
4463 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad:
4464 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA:
4465 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad:
4466 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA:
4467 case ld::Fixup::kindStoreARMLow16:
4468 case ld::Fixup::kindStoreThumbLow16:
4469 #if SUPPORT_ARCH_arm64
4470 case ld::Fixup::kindStoreARM64Page21:
4471 case ld::Fixup::kindStoreARM64GOTLoadPage21:
4472 case ld::Fixup::kindStoreARM64GOTLeaPage21:
4473 case ld::Fixup::kindStoreARM64TLVPLoadPage21:
4474 case ld::Fixup::kindStoreTargetAddressARM64Page21:
4475 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
4476 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21:
4477 case ld::Fixup::kindStoreARM64PCRelToGOT:
4478 #endif
4479 assert(target != NULL);
4480 if ( strcmp(sect->segmentName(), target->section().segmentName()) != 0 ) {
4481 _splitSegInfos.push_back(SplitSegInfoEntry(atom->finalAddress()+fit->offsetInAtom,fit->kind));
4482 }
4483 break;
4484 case ld::Fixup::kindStoreARMHigh16:
4485 case ld::Fixup::kindStoreThumbHigh16:
4486 assert(target != NULL);
4487 if ( strcmp(sect->segmentName(), target->section().segmentName()) != 0 ) {
4488 // hi16 needs to know upper 4-bits of low16 to compute carry
4489 uint32_t extra = (accumulator >> 12) & 0xF;
4490 _splitSegInfos.push_back(SplitSegInfoEntry(atom->finalAddress()+fit->offsetInAtom,fit->kind, extra));
4491 }
4492 break;
4493 case ld::Fixup::kindSetTargetImageOffset:
4494 accumulator = addressOf(state, fit, &target);
4495 assert(target != NULL);
4496 hadSubtract = true;
4497 break;
4498 default:
4499 break;
4500 }
4501 }
4502 }
4503 }
4504 }
4505
4506
4507 void OutputFile::writeMapFile(ld::Internal& state)
4508 {
4509 if ( _options.generatedMapPath() != NULL ) {
4510 FILE* mapFile = fopen(_options.generatedMapPath(), "w");
4511 if ( mapFile != NULL ) {
4512 // write output path
4513 fprintf(mapFile, "# Path: %s\n", _options.outputFilePath());
4514 // write output architecure
4515 fprintf(mapFile, "# Arch: %s\n", _options.architectureName());
4516 // write UUID
4517 //if ( fUUIDAtom != NULL ) {
4518 // const uint8_t* uuid = fUUIDAtom->getUUID();
4519 // fprintf(mapFile, "# UUID: %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X \n",
4520 // uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5], uuid[6], uuid[7],
4521 // uuid[8], uuid[9], uuid[10], uuid[11], uuid[12], uuid[13], uuid[14], uuid[15]);
4522 //}
4523 // write table of object files
4524 std::map<const ld::File*, ld::File::Ordinal> readerToOrdinal;
4525 std::map<ld::File::Ordinal, const ld::File*> ordinalToReader;
4526 std::map<const ld::File*, uint32_t> readerToFileOrdinal;
4527 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
4528 ld::Internal::FinalSection* sect = *sit;
4529 if ( sect->isSectionHidden() )
4530 continue;
4531 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
4532 const ld::Atom* atom = *ait;
4533 const ld::File* reader = atom->file();
4534 if ( reader == NULL )
4535 continue;
4536 ld::File::Ordinal readerOrdinal = reader->ordinal();
4537 std::map<const ld::File*, ld::File::Ordinal>::iterator pos = readerToOrdinal.find(reader);
4538 if ( pos == readerToOrdinal.end() ) {
4539 readerToOrdinal[reader] = readerOrdinal;
4540 ordinalToReader[readerOrdinal] = reader;
4541 }
4542 }
4543 }
4544 fprintf(mapFile, "# Object files:\n");
4545 fprintf(mapFile, "[%3u] %s\n", 0, "linker synthesized");
4546 uint32_t fileIndex = 1;
4547 for(std::map<ld::File::Ordinal, const ld::File*>::iterator it = ordinalToReader.begin(); it != ordinalToReader.end(); ++it) {
4548 fprintf(mapFile, "[%3u] %s\n", fileIndex, it->second->path());
4549 readerToFileOrdinal[it->second] = fileIndex++;
4550 }
4551 // write table of sections
4552 fprintf(mapFile, "# Sections:\n");
4553 fprintf(mapFile, "# Address\tSize \tSegment\tSection\n");
4554 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
4555 ld::Internal::FinalSection* sect = *sit;
4556 if ( sect->isSectionHidden() )
4557 continue;
4558 fprintf(mapFile, "0x%08llX\t0x%08llX\t%s\t%s\n", sect->address, sect->size,
4559 sect->segmentName(), sect->sectionName());
4560 }
4561 // write table of symbols
4562 fprintf(mapFile, "# Symbols:\n");
4563 fprintf(mapFile, "# Address\tSize \tFile Name\n");
4564 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
4565 ld::Internal::FinalSection* sect = *sit;
4566 if ( sect->isSectionHidden() )
4567 continue;
4568 //bool isCstring = (sect->type() == ld::Section::typeCString);
4569 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
4570 char buffer[4096];
4571 const ld::Atom* atom = *ait;
4572 const char* name = atom->name();
4573 // don't add auto-stripped aliases to .map file
4574 if ( (atom->size() == 0) && (atom->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages) )
4575 continue;
4576 if ( atom->contentType() == ld::Atom::typeCString ) {
4577 strcpy(buffer, "literal string: ");
4578 strlcat(buffer, (char*)atom->rawContentPointer(), 4096);
4579 name = buffer;
4580 }
4581 else if ( (atom->contentType() == ld::Atom::typeCFI) && (strcmp(name, "FDE") == 0) ) {
4582 for (ld::Fixup::iterator fit = atom->fixupsBegin(); fit != atom->fixupsEnd(); ++fit) {
4583 if ( (fit->kind == ld::Fixup::kindSetTargetAddress) && (fit->clusterSize == ld::Fixup::k1of4) ) {
4584 if ( (fit->binding == ld::Fixup::bindingDirectlyBound)
4585 && (fit->u.target->section().type() == ld::Section::typeCode) ) {
4586 strcpy(buffer, "FDE for: ");
4587 strlcat(buffer, fit->u.target->name(), 4096);
4588 name = buffer;
4589 }
4590 }
4591 }
4592 }
4593 else if ( atom->contentType() == ld::Atom::typeNonLazyPointer ) {
4594 strcpy(buffer, "non-lazy-pointer");
4595 for (ld::Fixup::iterator fit = atom->fixupsBegin(); fit != atom->fixupsEnd(); ++fit) {
4596 if ( fit->binding == ld::Fixup::bindingsIndirectlyBound ) {
4597 strcpy(buffer, "non-lazy-pointer-to: ");
4598 strlcat(buffer, state.indirectBindingTable[fit->u.bindingIndex]->name(), 4096);
4599 break;
4600 }
4601 else if ( fit->binding == ld::Fixup::bindingDirectlyBound ) {
4602 strcpy(buffer, "non-lazy-pointer-to-local: ");
4603 strlcat(buffer, fit->u.target->name(), 4096);
4604 break;
4605 }
4606 }
4607 name = buffer;
4608 }
4609 fprintf(mapFile, "0x%08llX\t0x%08llX\t[%3u] %s\n", atom->finalAddress(), atom->size(),
4610 readerToFileOrdinal[atom->file()], name);
4611 }
4612 }
4613 fclose(mapFile);
4614 }
4615 else {
4616 warning("could not write map file: %s\n", _options.generatedMapPath());
4617 }
4618 }
4619 }
4620
4621
4622 // used to sort atoms with debug notes
4623 class DebugNoteSorter
4624 {
4625 public:
4626 bool operator()(const ld::Atom* left, const ld::Atom* right) const
4627 {
4628 // first sort by reader
4629 ld::File::Ordinal leftFileOrdinal = left->file()->ordinal();
4630 ld::File::Ordinal rightFileOrdinal = right->file()->ordinal();
4631 if ( leftFileOrdinal!= rightFileOrdinal)
4632 return (leftFileOrdinal < rightFileOrdinal);
4633
4634 // then sort by atom objectAddress
4635 uint64_t leftAddr = left->finalAddress();
4636 uint64_t rightAddr = right->finalAddress();
4637 return leftAddr < rightAddr;
4638 }
4639 };
4640
4641
4642 const char* OutputFile::assureFullPath(const char* path)
4643 {
4644 if ( path[0] == '/' )
4645 return path;
4646 char cwdbuff[MAXPATHLEN];
4647 if ( getcwd(cwdbuff, MAXPATHLEN) != NULL ) {
4648 char* result;
4649 asprintf(&result, "%s/%s", cwdbuff, path);
4650 if ( result != NULL )
4651 return result;
4652 }
4653 return path;
4654 }
4655
4656 static time_t fileModTime(const char* path) {
4657 struct stat statBuffer;
4658 if ( stat(path, &statBuffer) == 0 ) {
4659 return statBuffer.st_mtime;
4660 }
4661 return 0;
4662 }
4663
4664
4665 void OutputFile::synthesizeDebugNotes(ld::Internal& state)
4666 {
4667 // -S means don't synthesize debug map
4668 if ( _options.debugInfoStripping() == Options::kDebugInfoNone )
4669 return;
4670 // make a vector of atoms that come from files compiled with dwarf debug info
4671 std::vector<const ld::Atom*> atomsNeedingDebugNotes;
4672 std::set<const ld::Atom*> atomsWithStabs;
4673 atomsNeedingDebugNotes.reserve(1024);
4674 const ld::relocatable::File* objFile = NULL;
4675 bool objFileHasDwarf = false;
4676 bool objFileHasStabs = false;
4677 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
4678 ld::Internal::FinalSection* sect = *sit;
4679 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
4680 const ld::Atom* atom = *ait;
4681 // no stabs for atoms that would not be in the symbol table
4682 if ( atom->symbolTableInclusion() == ld::Atom::symbolTableNotIn )
4683 continue;
4684 if ( atom->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages )
4685 continue;
4686 if ( atom->symbolTableInclusion() == ld::Atom::symbolTableInWithRandomAutoStripLabel )
4687 continue;
4688 // no stabs for absolute symbols
4689 if ( atom->definition() == ld::Atom::definitionAbsolute )
4690 continue;
4691 // no stabs for .eh atoms
4692 if ( atom->contentType() == ld::Atom::typeCFI )
4693 continue;
4694 // no stabs for string literal atoms
4695 if ( atom->contentType() == ld::Atom::typeCString )
4696 continue;
4697 // no stabs for kernel dtrace probes
4698 if ( (_options.outputKind() == Options::kStaticExecutable) && (strncmp(atom->name(), "__dtrace_probe$", 15) == 0) )
4699 continue;
4700 const ld::File* file = atom->file();
4701 if ( file != NULL ) {
4702 if ( file != objFile ) {
4703 objFileHasDwarf = false;
4704 objFileHasStabs = false;
4705 objFile = dynamic_cast<const ld::relocatable::File*>(file);
4706 if ( objFile != NULL ) {
4707 switch ( objFile->debugInfo() ) {
4708 case ld::relocatable::File::kDebugInfoNone:
4709 break;
4710 case ld::relocatable::File::kDebugInfoDwarf:
4711 objFileHasDwarf = true;
4712 break;
4713 case ld::relocatable::File::kDebugInfoStabs:
4714 case ld::relocatable::File::kDebugInfoStabsUUID:
4715 objFileHasStabs = true;
4716 break;
4717 }
4718 }
4719 }
4720 if ( objFileHasDwarf )
4721 atomsNeedingDebugNotes.push_back(atom);
4722 if ( objFileHasStabs )
4723 atomsWithStabs.insert(atom);
4724 }
4725 }
4726 }
4727
4728 // sort by file ordinal then atom ordinal
4729 std::sort(atomsNeedingDebugNotes.begin(), atomsNeedingDebugNotes.end(), DebugNoteSorter());
4730
4731 // <rdar://problem/17689030> Add -add_ast_path option to linker which add N_AST stab entry to output
4732 const std::vector<const char*>& astPaths = _options.astFilePaths();
4733 for (std::vector<const char*>::const_iterator it=astPaths.begin(); it != astPaths.end(); it++) {
4734 const char* path = *it;
4735 // emit N_AST
4736 ld::relocatable::File::Stab astStab;
4737 astStab.atom = NULL;
4738 astStab.type = N_AST;
4739 astStab.other = 0;
4740 astStab.desc = 0;
4741 astStab.value = fileModTime(path);
4742 astStab.string = path;
4743 state.stabs.push_back(astStab);
4744 }
4745
4746 // synthesize "debug notes" and add them to master stabs vector
4747 const char* dirPath = NULL;
4748 const char* filename = NULL;
4749 bool wroteStartSO = false;
4750 state.stabs.reserve(atomsNeedingDebugNotes.size()*4);
4751 std::unordered_set<const char*, CStringHash, CStringEquals> seenFiles;
4752 for (std::vector<const ld::Atom*>::iterator it=atomsNeedingDebugNotes.begin(); it != atomsNeedingDebugNotes.end(); it++) {
4753 const ld::Atom* atom = *it;
4754 const ld::File* atomFile = atom->file();
4755 const ld::relocatable::File* atomObjFile = dynamic_cast<const ld::relocatable::File*>(atomFile);
4756 //fprintf(stderr, "debug note for %s\n", atom->name());
4757 const char* newPath = atom->translationUnitSource();
4758 if ( newPath != NULL ) {
4759 const char* newDirPath;
4760 const char* newFilename;
4761 const char* lastSlash = strrchr(newPath, '/');
4762 if ( lastSlash == NULL )
4763 continue;
4764 newFilename = lastSlash+1;
4765 char* temp = strdup(newPath);
4766 newDirPath = temp;
4767 // gdb like directory SO's to end in '/', but dwarf DW_AT_comp_dir usually does not have trailing '/'
4768 temp[lastSlash-newPath+1] = '\0';
4769 // need SO's whenever the translation unit source file changes
4770 if ( (filename == NULL) || (strcmp(newFilename,filename) != 0) || (strcmp(newDirPath,dirPath) != 0)) {
4771 if ( filename != NULL ) {
4772 // translation unit change, emit ending SO
4773 ld::relocatable::File::Stab endFileStab;
4774 endFileStab.atom = NULL;
4775 endFileStab.type = N_SO;
4776 endFileStab.other = 1;
4777 endFileStab.desc = 0;
4778 endFileStab.value = 0;
4779 endFileStab.string = "";
4780 state.stabs.push_back(endFileStab);
4781 }
4782 // new translation unit, emit start SO's
4783 ld::relocatable::File::Stab dirPathStab;
4784 dirPathStab.atom = NULL;
4785 dirPathStab.type = N_SO;
4786 dirPathStab.other = 0;
4787 dirPathStab.desc = 0;
4788 dirPathStab.value = 0;
4789 dirPathStab.string = newDirPath;
4790 state.stabs.push_back(dirPathStab);
4791 ld::relocatable::File::Stab fileStab;
4792 fileStab.atom = NULL;
4793 fileStab.type = N_SO;
4794 fileStab.other = 0;
4795 fileStab.desc = 0;
4796 fileStab.value = 0;
4797 fileStab.string = newFilename;
4798 state.stabs.push_back(fileStab);
4799 // Synthesize OSO for start of file
4800 ld::relocatable::File::Stab objStab;
4801 objStab.atom = NULL;
4802 objStab.type = N_OSO;
4803 // <rdar://problem/6337329> linker should put cpusubtype in n_sect field of nlist entry for N_OSO debug note entries
4804 objStab.other = atomFile->cpuSubType();
4805 objStab.desc = 1;
4806 if ( atomObjFile != NULL ) {
4807 objStab.string = assureFullPath(atomObjFile->debugInfoPath());
4808 objStab.value = atomObjFile->debugInfoModificationTime();
4809 }
4810 else {
4811 objStab.string = assureFullPath(atomFile->path());
4812 objStab.value = atomFile->modificationTime();
4813 }
4814 state.stabs.push_back(objStab);
4815 wroteStartSO = true;
4816 // add the source file path to seenFiles so it does not show up in SOLs
4817 seenFiles.insert(newFilename);
4818 char* fullFilePath;
4819 asprintf(&fullFilePath, "%s%s", newDirPath, newFilename);
4820 // add both leaf path and full path
4821 seenFiles.insert(fullFilePath);
4822 }
4823 filename = newFilename;
4824 dirPath = newDirPath;
4825 if ( atom->section().type() == ld::Section::typeCode ) {
4826 // Synthesize BNSYM and start FUN stabs
4827 ld::relocatable::File::Stab beginSym;
4828 beginSym.atom = atom;
4829 beginSym.type = N_BNSYM;
4830 beginSym.other = 1;
4831 beginSym.desc = 0;
4832 beginSym.value = 0;
4833 beginSym.string = "";
4834 state.stabs.push_back(beginSym);
4835 ld::relocatable::File::Stab startFun;
4836 startFun.atom = atom;
4837 startFun.type = N_FUN;
4838 startFun.other = 1;
4839 startFun.desc = 0;
4840 startFun.value = 0;
4841 startFun.string = atom->name();
4842 state.stabs.push_back(startFun);
4843 // Synthesize any SOL stabs needed
4844 const char* curFile = NULL;
4845 for (ld::Atom::LineInfo::iterator lit = atom->beginLineInfo(); lit != atom->endLineInfo(); ++lit) {
4846 if ( lit->fileName != curFile ) {
4847 if ( seenFiles.count(lit->fileName) == 0 ) {
4848 seenFiles.insert(lit->fileName);
4849 ld::relocatable::File::Stab sol;
4850 sol.atom = 0;
4851 sol.type = N_SOL;
4852 sol.other = 0;
4853 sol.desc = 0;
4854 sol.value = 0;
4855 sol.string = lit->fileName;
4856 state.stabs.push_back(sol);
4857 }
4858 curFile = lit->fileName;
4859 }
4860 }
4861 // Synthesize end FUN and ENSYM stabs
4862 ld::relocatable::File::Stab endFun;
4863 endFun.atom = atom;
4864 endFun.type = N_FUN;
4865 endFun.other = 0;
4866 endFun.desc = 0;
4867 endFun.value = 0;
4868 endFun.string = "";
4869 state.stabs.push_back(endFun);
4870 ld::relocatable::File::Stab endSym;
4871 endSym.atom = atom;
4872 endSym.type = N_ENSYM;
4873 endSym.other = 1;
4874 endSym.desc = 0;
4875 endSym.value = 0;
4876 endSym.string = "";
4877 state.stabs.push_back(endSym);
4878 }
4879 else {
4880 ld::relocatable::File::Stab globalsStab;
4881 const char* name = atom->name();
4882 if ( atom->scope() == ld::Atom::scopeTranslationUnit ) {
4883 // Synthesize STSYM stab for statics
4884 globalsStab.atom = atom;
4885 globalsStab.type = N_STSYM;
4886 globalsStab.other = 1;
4887 globalsStab.desc = 0;
4888 globalsStab.value = 0;
4889 globalsStab.string = name;
4890 state.stabs.push_back(globalsStab);
4891 }
4892 else {
4893 // Synthesize GSYM stab for other globals
4894 globalsStab.atom = atom;
4895 globalsStab.type = N_GSYM;
4896 globalsStab.other = 1;
4897 globalsStab.desc = 0;
4898 globalsStab.value = 0;
4899 globalsStab.string = name;
4900 state.stabs.push_back(globalsStab);
4901 }
4902 }
4903 }
4904 }
4905
4906 if ( wroteStartSO ) {
4907 // emit ending SO
4908 ld::relocatable::File::Stab endFileStab;
4909 endFileStab.atom = NULL;
4910 endFileStab.type = N_SO;
4911 endFileStab.other = 1;
4912 endFileStab.desc = 0;
4913 endFileStab.value = 0;
4914 endFileStab.string = "";
4915 state.stabs.push_back(endFileStab);
4916 }
4917
4918 // copy any stabs from .o file
4919 std::set<const ld::File*> filesSeenWithStabs;
4920 for (std::set<const ld::Atom*>::iterator it=atomsWithStabs.begin(); it != atomsWithStabs.end(); it++) {
4921 const ld::Atom* atom = *it;
4922 objFile = dynamic_cast<const ld::relocatable::File*>(atom->file());
4923 if ( objFile != NULL ) {
4924 if ( filesSeenWithStabs.count(objFile) == 0 ) {
4925 filesSeenWithStabs.insert(objFile);
4926 const std::vector<ld::relocatable::File::Stab>* stabs = objFile->stabs();
4927 if ( stabs != NULL ) {
4928 for(std::vector<ld::relocatable::File::Stab>::const_iterator sit = stabs->begin(); sit != stabs->end(); ++sit) {
4929 ld::relocatable::File::Stab stab = *sit;
4930 // ignore stabs associated with atoms that were dead stripped or coalesced away
4931 if ( (sit->atom != NULL) && (atomsWithStabs.count(sit->atom) == 0) )
4932 continue;
4933 // <rdar://problem/8284718> Value of N_SO stabs should be address of first atom from translation unit
4934 if ( (stab.type == N_SO) && (stab.string != NULL) && (stab.string[0] != '\0') ) {
4935 stab.atom = atom;
4936 }
4937 state.stabs.push_back(stab);
4938 }
4939 }
4940 }
4941 }
4942 }
4943
4944 }
4945
4946
4947 } // namespace tool
4948 } // namespace ld
4949