]> git.saurik.com Git - apple/ld64.git/blob - src/ld/OutputFile.cpp
732eb547ee4ac72f0ccec2599b53ed6972435390
[apple/ld64.git] / src / ld / OutputFile.cpp
1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-*
2 *
3 * Copyright (c) 2009-2011 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26 #include <stdlib.h>
27 #include <sys/types.h>
28 #include <sys/stat.h>
29 #include <sys/mman.h>
30 #include <sys/sysctl.h>
31 #include <sys/param.h>
32 #include <sys/mount.h>
33 #include <fcntl.h>
34 #include <errno.h>
35 #include <limits.h>
36 #include <unistd.h>
37 #include <mach/mach_time.h>
38 #include <mach/vm_statistics.h>
39 #include <mach/mach_init.h>
40 #include <mach/mach_host.h>
41 #include <uuid/uuid.h>
42 #include <dlfcn.h>
43 #include <mach-o/dyld.h>
44 #include <mach-o/fat.h>
45
46 #include <string>
47 #include <map>
48 #include <set>
49 #include <string>
50 #include <vector>
51 #include <list>
52 #include <algorithm>
53 #include <unordered_set>
54 #include <utility>
55 #include <iostream>
56 #include <fstream>
57
58 #include <CommonCrypto/CommonDigest.h>
59 #include <AvailabilityMacros.h>
60
61 #include "MachOTrie.hpp"
62
63 #include "Options.h"
64
65 #include "OutputFile.h"
66 #include "Architectures.hpp"
67 #include "HeaderAndLoadCommands.hpp"
68 #include "LinkEdit.hpp"
69 #include "LinkEditClassic.hpp"
70
71 namespace ld {
72 namespace tool {
73
74 uint32_t sAdrpNA = 0;
75 uint32_t sAdrpNoped = 0;
76 uint32_t sAdrpNotNoped = 0;
77
78
79 OutputFile::OutputFile(const Options& opts)
80 :
81 usesWeakExternalSymbols(false), overridesWeakExternalSymbols(false),
82 _noReExportedDylibs(false), pieDisabled(false), hasDataInCode(false),
83 headerAndLoadCommandsSection(NULL),
84 rebaseSection(NULL), bindingSection(NULL), weakBindingSection(NULL),
85 lazyBindingSection(NULL), exportSection(NULL),
86 splitSegInfoSection(NULL), functionStartsSection(NULL),
87 dataInCodeSection(NULL), optimizationHintsSection(NULL),
88 symbolTableSection(NULL), stringPoolSection(NULL),
89 localRelocationsSection(NULL), externalRelocationsSection(NULL),
90 sectionRelocationsSection(NULL),
91 indirectSymbolTableSection(NULL),
92 _options(opts),
93 _hasDyldInfo(opts.makeCompressedDyldInfo()),
94 _hasSymbolTable(true),
95 _hasSectionRelocations(opts.outputKind() == Options::kObjectFile),
96 _hasSplitSegInfo(opts.sharedRegionEligible()),
97 _hasFunctionStartsInfo(opts.addFunctionStarts()),
98 _hasDataInCodeInfo(opts.addDataInCodeInfo()),
99 _hasDynamicSymbolTable(true),
100 _hasLocalRelocations(!opts.makeCompressedDyldInfo()),
101 _hasExternalRelocations(!opts.makeCompressedDyldInfo()),
102 _hasOptimizationHints(opts.outputKind() == Options::kObjectFile),
103 _encryptedTEXTstartOffset(0),
104 _encryptedTEXTendOffset(0),
105 _localSymbolsStartIndex(0),
106 _localSymbolsCount(0),
107 _globalSymbolsStartIndex(0),
108 _globalSymbolsCount(0),
109 _importSymbolsStartIndex(0),
110 _importSymbolsCount(0),
111 _sectionsRelocationsAtom(NULL),
112 _localRelocsAtom(NULL),
113 _externalRelocsAtom(NULL),
114 _symbolTableAtom(NULL),
115 _indirectSymbolTableAtom(NULL),
116 _rebasingInfoAtom(NULL),
117 _bindingInfoAtom(NULL),
118 _lazyBindingInfoAtom(NULL),
119 _weakBindingInfoAtom(NULL),
120 _exportInfoAtom(NULL),
121 _splitSegInfoAtom(NULL),
122 _functionStartsAtom(NULL),
123 _dataInCodeAtom(NULL),
124 _optimizationHintsAtom(NULL)
125 {
126 }
127
128 void OutputFile::dumpAtomsBySection(ld::Internal& state, bool printAtoms)
129 {
130 fprintf(stderr, "SORTED:\n");
131 for (std::vector<ld::Internal::FinalSection*>::iterator it = state.sections.begin(); it != state.sections.end(); ++it) {
132 fprintf(stderr, "final section %p %s/%s %s start addr=0x%08llX, size=0x%08llX, alignment=%02d, fileOffset=0x%08llX\n",
133 (*it), (*it)->segmentName(), (*it)->sectionName(), (*it)->isSectionHidden() ? "(hidden)" : "",
134 (*it)->address, (*it)->size, (*it)->alignment, (*it)->fileOffset);
135 if ( printAtoms ) {
136 std::vector<const ld::Atom*>& atoms = (*it)->atoms;
137 for (std::vector<const ld::Atom*>::iterator ait = atoms.begin(); ait != atoms.end(); ++ait) {
138 fprintf(stderr, " %p (0x%04llX) %s\n", *ait, (*ait)->size(), (*ait)->name());
139 }
140 }
141 }
142 fprintf(stderr, "DYLIBS:\n");
143 for (std::vector<ld::dylib::File*>::iterator it=state.dylibs.begin(); it != state.dylibs.end(); ++it )
144 fprintf(stderr, " %s\n", (*it)->installPath());
145 }
146
147 void OutputFile::write(ld::Internal& state)
148 {
149 this->buildDylibOrdinalMapping(state);
150 this->addLoadCommands(state);
151 this->addLinkEdit(state);
152 state.setSectionSizesAndAlignments();
153 this->setLoadCommandsPadding(state);
154 _fileSize = state.assignFileOffsets();
155 this->assignAtomAddresses(state);
156 this->synthesizeDebugNotes(state);
157 this->buildSymbolTable(state);
158 this->generateLinkEditInfo(state);
159 if ( _options.sharedRegionEncodingV2() )
160 this->makeSplitSegInfoV2(state);
161 else
162 this->makeSplitSegInfo(state);
163 this->updateLINKEDITAddresses(state);
164 //this->dumpAtomsBySection(state, false);
165 this->writeOutputFile(state);
166 this->writeMapFile(state);
167 this->writeJSONEntry(state);
168 }
169
170 bool OutputFile::findSegment(ld::Internal& state, uint64_t addr, uint64_t* start, uint64_t* end, uint32_t* index)
171 {
172 uint32_t segIndex = 0;
173 ld::Internal::FinalSection* segFirstSection = NULL;
174 ld::Internal::FinalSection* lastSection = NULL;
175 for (std::vector<ld::Internal::FinalSection*>::iterator it = state.sections.begin(); it != state.sections.end(); ++it) {
176 ld::Internal::FinalSection* sect = *it;
177 if ( (segFirstSection == NULL ) || strcmp(segFirstSection->segmentName(), sect->segmentName()) != 0 ) {
178 if ( segFirstSection != NULL ) {
179 //fprintf(stderr, "findSegment(0x%llX) seg changed to %s\n", addr, sect->segmentName());
180 if ( (addr >= segFirstSection->address) && (addr < lastSection->address+lastSection->size) ) {
181 *start = segFirstSection->address;
182 *end = lastSection->address+lastSection->size;
183 *index = segIndex;
184 return true;
185 }
186 ++segIndex;
187 }
188 segFirstSection = sect;
189 }
190 lastSection = sect;
191 }
192 return false;
193 }
194
195
196 void OutputFile::assignAtomAddresses(ld::Internal& state)
197 {
198 const bool log = false;
199 if ( log ) fprintf(stderr, "assignAtomAddresses()\n");
200 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
201 ld::Internal::FinalSection* sect = *sit;
202 if ( log ) fprintf(stderr, " section=%s/%s\n", sect->segmentName(), sect->sectionName());
203 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
204 const ld::Atom* atom = *ait;
205 switch ( sect-> type() ) {
206 case ld::Section::typeImportProxies:
207 // want finalAddress() of all proxy atoms to be zero
208 (const_cast<ld::Atom*>(atom))->setSectionStartAddress(0);
209 break;
210 case ld::Section::typeAbsoluteSymbols:
211 // want finalAddress() of all absolute atoms to be value of abs symbol
212 (const_cast<ld::Atom*>(atom))->setSectionStartAddress(0);
213 break;
214 case ld::Section::typeLinkEdit:
215 // linkedit layout is assigned later
216 break;
217 default:
218 (const_cast<ld::Atom*>(atom))->setSectionStartAddress(sect->address);
219 if ( log ) fprintf(stderr, " atom=%p, addr=0x%08llX, name=%s\n", atom, atom->finalAddress(), atom->name());
220 break;
221 }
222 }
223 }
224 }
225
226 void OutputFile::updateLINKEDITAddresses(ld::Internal& state)
227 {
228 if ( _options.makeCompressedDyldInfo() ) {
229 // build dylb rebasing info
230 assert(_rebasingInfoAtom != NULL);
231 _rebasingInfoAtom->encode();
232
233 // build dyld binding info
234 assert(_bindingInfoAtom != NULL);
235 _bindingInfoAtom->encode();
236
237 // build dyld lazy binding info
238 assert(_lazyBindingInfoAtom != NULL);
239 _lazyBindingInfoAtom->encode();
240
241 // build dyld weak binding info
242 assert(_weakBindingInfoAtom != NULL);
243 _weakBindingInfoAtom->encode();
244
245 // build dyld export info
246 assert(_exportInfoAtom != NULL);
247 _exportInfoAtom->encode();
248 }
249
250 if ( _options.sharedRegionEligible() ) {
251 // build split seg info
252 assert(_splitSegInfoAtom != NULL);
253 _splitSegInfoAtom->encode();
254 }
255
256 if ( _options.addFunctionStarts() ) {
257 // build function starts info
258 assert(_functionStartsAtom != NULL);
259 _functionStartsAtom->encode();
260 }
261
262 if ( _options.addDataInCodeInfo() ) {
263 // build data-in-code info
264 assert(_dataInCodeAtom != NULL);
265 _dataInCodeAtom->encode();
266 }
267
268 if ( _hasOptimizationHints ) {
269 // build linker-optimization-hint info
270 assert(_optimizationHintsAtom != NULL);
271 _optimizationHintsAtom->encode();
272 }
273
274 // build classic symbol table
275 assert(_symbolTableAtom != NULL);
276 _symbolTableAtom->encode();
277 assert(_indirectSymbolTableAtom != NULL);
278 _indirectSymbolTableAtom->encode();
279
280 // add relocations to .o files
281 if ( _options.outputKind() == Options::kObjectFile ) {
282 assert(_sectionsRelocationsAtom != NULL);
283 _sectionsRelocationsAtom->encode();
284 }
285
286 if ( ! _options.makeCompressedDyldInfo() ) {
287 // build external relocations
288 assert(_externalRelocsAtom != NULL);
289 _externalRelocsAtom->encode();
290 // build local relocations
291 assert(_localRelocsAtom != NULL);
292 _localRelocsAtom->encode();
293 }
294
295 // update address and file offsets now that linkedit content has been generated
296 uint64_t curLinkEditAddress = 0;
297 uint64_t curLinkEditfileOffset = 0;
298 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
299 ld::Internal::FinalSection* sect = *sit;
300 if ( sect->type() != ld::Section::typeLinkEdit )
301 continue;
302 if ( curLinkEditAddress == 0 ) {
303 curLinkEditAddress = sect->address;
304 curLinkEditfileOffset = sect->fileOffset;
305 }
306 uint16_t maxAlignment = 0;
307 uint64_t offset = 0;
308 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
309 const ld::Atom* atom = *ait;
310 //fprintf(stderr, "setting linkedit atom offset for %s\n", atom->name());
311 if ( atom->alignment().powerOf2 > maxAlignment )
312 maxAlignment = atom->alignment().powerOf2;
313 // calculate section offset for this atom
314 uint64_t alignment = 1 << atom->alignment().powerOf2;
315 uint64_t currentModulus = (offset % alignment);
316 uint64_t requiredModulus = atom->alignment().modulus;
317 if ( currentModulus != requiredModulus ) {
318 if ( requiredModulus > currentModulus )
319 offset += requiredModulus-currentModulus;
320 else
321 offset += requiredModulus+alignment-currentModulus;
322 }
323 (const_cast<ld::Atom*>(atom))->setSectionOffset(offset);
324 (const_cast<ld::Atom*>(atom))->setSectionStartAddress(curLinkEditAddress);
325 offset += atom->size();
326 }
327 sect->size = offset;
328 // section alignment is that of a contained atom with the greatest alignment
329 sect->alignment = maxAlignment;
330 sect->address = curLinkEditAddress;
331 sect->fileOffset = curLinkEditfileOffset;
332 curLinkEditAddress += sect->size;
333 curLinkEditfileOffset += sect->size;
334 }
335
336 _fileSize = state.sections.back()->fileOffset + state.sections.back()->size;
337 }
338
339
340 void OutputFile::setLoadCommandsPadding(ld::Internal& state)
341 {
342 // In other sections, any extra space is put and end of segment.
343 // In __TEXT segment, any extra space is put after load commands to allow post-processing of load commands
344 // Do a reverse layout of __TEXT segment to determine padding size and adjust section size
345 uint64_t paddingSize = 0;
346 switch ( _options.outputKind() ) {
347 case Options::kDyld:
348 // dyld itself has special padding requirements. We want the beginning __text section to start at a stable address
349 assert(strcmp(state.sections[1]->sectionName(),"__text") == 0);
350 state.sections[1]->alignment = 12; // page align __text
351 break;
352 case Options::kObjectFile:
353 // mach-o .o files need no padding between load commands and first section
354 // but leave enough room that the object file could be signed
355 paddingSize = 32;
356 break;
357 case Options::kPreload:
358 // mach-o MH_PRELOAD files need no padding between load commands and first section
359 paddingSize = 0;
360 case Options::kKextBundle:
361 if ( _options.useTextExecSegment() ) {
362 paddingSize = 32;
363 break;
364 }
365 // else fall into default case
366 default:
367 // work backwards from end of segment and lay out sections so that extra room goes to padding atom
368 uint64_t addr = 0;
369 uint64_t textSegPageSize = _options.segPageSize("__TEXT");
370 if ( _options.sharedRegionEligible() && (_options.iOSVersionMin() >= ld::iOS_8_0) && (textSegPageSize == 0x4000) )
371 textSegPageSize = 0x1000;
372 for (std::vector<ld::Internal::FinalSection*>::reverse_iterator it = state.sections.rbegin(); it != state.sections.rend(); ++it) {
373 ld::Internal::FinalSection* sect = *it;
374 if ( strcmp(sect->segmentName(), "__TEXT") != 0 )
375 continue;
376 if ( sect == headerAndLoadCommandsSection ) {
377 addr -= headerAndLoadCommandsSection->size;
378 paddingSize = addr % textSegPageSize;
379 break;
380 }
381 addr -= sect->size;
382 addr = addr & (0 - (1 << sect->alignment));
383 }
384
385 // if command line requires more padding than this
386 uint32_t minPad = _options.minimumHeaderPad();
387 if ( _options.maxMminimumHeaderPad() ) {
388 // -headerpad_max_install_names means there should be room for every path load command to grow to 1204 bytes
389 uint32_t altMin = _dylibsToLoad.size() * MAXPATHLEN;
390 if ( _options.outputKind() == Options::kDynamicLibrary )
391 altMin += MAXPATHLEN;
392 if ( altMin > minPad )
393 minPad = altMin;
394 }
395 if ( paddingSize < minPad ) {
396 int extraPages = (minPad - paddingSize + _options.segmentAlignment() - 1)/_options.segmentAlignment();
397 paddingSize += extraPages * _options.segmentAlignment();
398 }
399
400 if ( _options.makeEncryptable() ) {
401 // load commands must be on a separate non-encrypted page
402 int loadCommandsPage = (headerAndLoadCommandsSection->size + minPad)/_options.segmentAlignment();
403 int textPage = (headerAndLoadCommandsSection->size + paddingSize)/_options.segmentAlignment();
404 if ( loadCommandsPage == textPage ) {
405 paddingSize += _options.segmentAlignment();
406 textPage += 1;
407 }
408 // remember start for later use by load command
409 _encryptedTEXTstartOffset = textPage*_options.segmentAlignment();
410 }
411 break;
412 }
413 // add padding to size of section
414 headerAndLoadCommandsSection->size += paddingSize;
415 }
416
417
418 uint64_t OutputFile::pageAlign(uint64_t addr)
419 {
420 const uint64_t alignment = _options.segmentAlignment();
421 return ((addr+alignment-1) & (-alignment));
422 }
423
424 uint64_t OutputFile::pageAlign(uint64_t addr, uint64_t pageSize)
425 {
426 return ((addr+pageSize-1) & (-pageSize));
427 }
428
429 static const char* makeName(const ld::Atom& atom)
430 {
431 static char buffer[4096];
432 switch ( atom.symbolTableInclusion() ) {
433 case ld::Atom::symbolTableNotIn:
434 case ld::Atom::symbolTableNotInFinalLinkedImages:
435 sprintf(buffer, "%s@0x%08llX", atom.name(), atom.objectAddress());
436 break;
437 case ld::Atom::symbolTableIn:
438 case ld::Atom::symbolTableInAndNeverStrip:
439 case ld::Atom::symbolTableInAsAbsolute:
440 case ld::Atom::symbolTableInWithRandomAutoStripLabel:
441 strlcpy(buffer, atom.name(), 4096);
442 break;
443 }
444 return buffer;
445 }
446
447 static const char* referenceTargetAtomName(ld::Internal& state, const ld::Fixup* ref)
448 {
449 switch ( ref->binding ) {
450 case ld::Fixup::bindingNone:
451 return "NO BINDING";
452 case ld::Fixup::bindingByNameUnbound:
453 return (char*)(ref->u.target);
454 case ld::Fixup::bindingByContentBound:
455 case ld::Fixup::bindingDirectlyBound:
456 return makeName(*((ld::Atom*)(ref->u.target)));
457 case ld::Fixup::bindingsIndirectlyBound:
458 return makeName(*state.indirectBindingTable[ref->u.bindingIndex]);
459 }
460 return "BAD BINDING";
461 }
462
463 bool OutputFile::targetIsThumb(ld::Internal& state, const ld::Fixup* fixup)
464 {
465 switch ( fixup->binding ) {
466 case ld::Fixup::bindingByContentBound:
467 case ld::Fixup::bindingDirectlyBound:
468 return fixup->u.target->isThumb();
469 case ld::Fixup::bindingsIndirectlyBound:
470 return state.indirectBindingTable[fixup->u.bindingIndex]->isThumb();
471 default:
472 break;
473 }
474 throw "unexpected binding";
475 }
476
477 uint64_t OutputFile::addressOf(const ld::Internal& state, const ld::Fixup* fixup, const ld::Atom** target)
478 {
479 if ( !_options.makeCompressedDyldInfo() ) {
480 // For external relocations the classic mach-o format
481 // has addend only stored in the content. That means
482 // that the address of the target is not used.
483 if ( fixup->contentAddendOnly )
484 return 0;
485 }
486 switch ( fixup->binding ) {
487 case ld::Fixup::bindingNone:
488 throw "unexpected bindingNone";
489 case ld::Fixup::bindingByNameUnbound:
490 throw "unexpected bindingByNameUnbound";
491 case ld::Fixup::bindingByContentBound:
492 case ld::Fixup::bindingDirectlyBound:
493 *target = fixup->u.target;
494 return (*target)->finalAddress();
495 case ld::Fixup::bindingsIndirectlyBound:
496 *target = state.indirectBindingTable[fixup->u.bindingIndex];
497 #ifndef NDEBUG
498 if ( ! (*target)->finalAddressMode() ) {
499 throwf("reference to symbol (which has not been assigned an address) %s", (*target)->name());
500 }
501 #endif
502 return (*target)->finalAddress();
503 }
504 throw "unexpected binding";
505 }
506
507 uint64_t OutputFile::addressAndTarget(const ld::Internal& state, const ld::Fixup* fixup, const ld::Atom** target)
508 {
509 switch ( fixup->binding ) {
510 case ld::Fixup::bindingNone:
511 throw "unexpected bindingNone";
512 case ld::Fixup::bindingByNameUnbound:
513 throw "unexpected bindingByNameUnbound";
514 case ld::Fixup::bindingByContentBound:
515 case ld::Fixup::bindingDirectlyBound:
516 *target = fixup->u.target;
517 return (*target)->finalAddress();
518 case ld::Fixup::bindingsIndirectlyBound:
519 *target = state.indirectBindingTable[fixup->u.bindingIndex];
520 #ifndef NDEBUG
521 if ( ! (*target)->finalAddressMode() ) {
522 throwf("reference to symbol (which has not been assigned an address) %s", (*target)->name());
523 }
524 #endif
525 return (*target)->finalAddress();
526 }
527 throw "unexpected binding";
528 }
529
530
531 uint64_t OutputFile::sectionOffsetOf(const ld::Internal& state, const ld::Fixup* fixup)
532 {
533 const ld::Atom* target = NULL;
534 switch ( fixup->binding ) {
535 case ld::Fixup::bindingNone:
536 throw "unexpected bindingNone";
537 case ld::Fixup::bindingByNameUnbound:
538 throw "unexpected bindingByNameUnbound";
539 case ld::Fixup::bindingByContentBound:
540 case ld::Fixup::bindingDirectlyBound:
541 target = fixup->u.target;
542 break;
543 case ld::Fixup::bindingsIndirectlyBound:
544 target = state.indirectBindingTable[fixup->u.bindingIndex];
545 break;
546 }
547 assert(target != NULL);
548
549 uint64_t targetAddress = target->finalAddress();
550 for (std::vector<ld::Internal::FinalSection*>::const_iterator it = state.sections.begin(); it != state.sections.end(); ++it) {
551 const ld::Internal::FinalSection* sect = *it;
552 if ( (sect->address <= targetAddress) && (targetAddress < (sect->address+sect->size)) )
553 return targetAddress - sect->address;
554 }
555 throw "section not found for section offset";
556 }
557
558
559
560 uint64_t OutputFile::tlvTemplateOffsetOf(const ld::Internal& state, const ld::Fixup* fixup)
561 {
562 const ld::Atom* target = NULL;
563 switch ( fixup->binding ) {
564 case ld::Fixup::bindingNone:
565 throw "unexpected bindingNone";
566 case ld::Fixup::bindingByNameUnbound:
567 throw "unexpected bindingByNameUnbound";
568 case ld::Fixup::bindingByContentBound:
569 case ld::Fixup::bindingDirectlyBound:
570 target = fixup->u.target;
571 break;
572 case ld::Fixup::bindingsIndirectlyBound:
573 target = state.indirectBindingTable[fixup->u.bindingIndex];
574 break;
575 }
576 assert(target != NULL);
577
578 for (std::vector<ld::Internal::FinalSection*>::const_iterator it = state.sections.begin(); it != state.sections.end(); ++it) {
579 const ld::Internal::FinalSection* sect = *it;
580 switch ( sect->type() ) {
581 case ld::Section::typeTLVInitialValues:
582 case ld::Section::typeTLVZeroFill:
583 return target->finalAddress() - sect->address;
584 default:
585 break;
586 }
587 }
588 throw "section not found for tlvTemplateOffsetOf";
589 }
590
591 void OutputFile::printSectionLayout(ld::Internal& state)
592 {
593 // show layout of final image
594 fprintf(stderr, "final section layout:\n");
595 for (std::vector<ld::Internal::FinalSection*>::iterator it = state.sections.begin(); it != state.sections.end(); ++it) {
596 if ( (*it)->isSectionHidden() )
597 continue;
598 fprintf(stderr, " %s/%s addr=0x%08llX, size=0x%08llX, fileOffset=0x%08llX, type=%d\n",
599 (*it)->segmentName(), (*it)->sectionName(),
600 (*it)->address, (*it)->size, (*it)->fileOffset, (*it)->type());
601 }
602 }
603
604
605 void OutputFile::rangeCheck8(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
606 {
607 if ( (displacement > 127) || (displacement < -128) ) {
608 // show layout of final image
609 printSectionLayout(state);
610
611 const ld::Atom* target;
612 throwf("8-bit reference out of range (%lld max is +/-127B): from %s (0x%08llX) to %s (0x%08llX)",
613 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
614 addressOf(state, fixup, &target));
615 }
616 }
617
618 void OutputFile::rangeCheck16(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
619 {
620 const int64_t thirtyTwoKLimit = 0x00007FFF;
621 if ( (displacement > thirtyTwoKLimit) || (displacement < (-thirtyTwoKLimit)) ) {
622 // show layout of final image
623 printSectionLayout(state);
624
625 const ld::Atom* target;
626 throwf("16-bit reference out of range (%lld max is +/-32KB): from %s (0x%08llX) to %s (0x%08llX)",
627 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
628 addressOf(state, fixup, &target));
629 }
630 }
631
632 void OutputFile::rangeCheckBranch32(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
633 {
634 const int64_t twoGigLimit = 0x7FFFFFFF;
635 if ( (displacement > twoGigLimit) || (displacement < (-twoGigLimit)) ) {
636 // show layout of final image
637 printSectionLayout(state);
638
639 const ld::Atom* target;
640 throwf("32-bit branch out of range (%lld max is +/-2GB): from %s (0x%08llX) to %s (0x%08llX)",
641 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
642 addressOf(state, fixup, &target));
643 }
644 }
645
646
647 void OutputFile::rangeCheckAbsolute32(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
648 {
649 const int64_t fourGigLimit = 0xFFFFFFFF;
650 if ( displacement > fourGigLimit ) {
651 // <rdar://problem/9610466> cannot enforce 32-bit range checks on 32-bit archs because assembler loses sign information
652 // .long _foo - 0xC0000000
653 // is encoded in mach-o the same as:
654 // .long _foo + 0x40000000
655 // so if _foo lays out to 0xC0000100, the first is ok, but the second is not.
656 if ( (_options.architecture() == CPU_TYPE_ARM) || (_options.architecture() == CPU_TYPE_I386) ) {
657 // Unlikely userland code does funky stuff like this, so warn for them, but not warn for -preload or -static
658 if ( (_options.outputKind() != Options::kPreload) && (_options.outputKind() != Options::kStaticExecutable) ) {
659 warning("32-bit absolute address out of range (0x%08llX max is 4GB): from %s + 0x%08X (0x%08llX) to 0x%08llX",
660 displacement, atom->name(), fixup->offsetInAtom, atom->finalAddress(), displacement);
661 }
662 return;
663 }
664 // show layout of final image
665 printSectionLayout(state);
666
667 const ld::Atom* target;
668 if ( fixup->binding == ld::Fixup::bindingNone )
669 throwf("32-bit absolute address out of range (0x%08llX max is 4GB): from %s + 0x%08X (0x%08llX) to 0x%08llX",
670 displacement, atom->name(), fixup->offsetInAtom, atom->finalAddress(), displacement);
671 else
672 throwf("32-bit absolute address out of range (0x%08llX max is 4GB): from %s + 0x%08X (0x%08llX) to %s (0x%08llX)",
673 displacement, atom->name(), fixup->offsetInAtom, atom->finalAddress(), referenceTargetAtomName(state, fixup),
674 addressOf(state, fixup, &target));
675 }
676 }
677
678
679 void OutputFile::rangeCheckRIP32(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
680 {
681 const int64_t twoGigLimit = 0x7FFFFFFF;
682 if ( (displacement > twoGigLimit) || (displacement < (-twoGigLimit)) ) {
683 // show layout of final image
684 printSectionLayout(state);
685
686 const ld::Atom* target;
687 throwf("32-bit RIP relative reference out of range (%lld max is +/-4GB): from %s (0x%08llX) to %s (0x%08llX)",
688 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
689 addressOf(state, fixup, &target));
690 }
691 }
692
693 void OutputFile::rangeCheckARM12(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
694 {
695 if ( (displacement > 4092LL) || (displacement < (-4092LL)) ) {
696 // show layout of final image
697 printSectionLayout(state);
698
699 const ld::Atom* target;
700 throwf("ARM ldr 12-bit displacement out of range (%lld max is +/-4096B): from %s (0x%08llX) to %s (0x%08llX)",
701 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
702 addressOf(state, fixup, &target));
703 }
704 }
705
706 bool OutputFile::checkArmBranch24Displacement(int64_t displacement)
707 {
708 return ( (displacement < 33554428LL) && (displacement > (-33554432LL)) );
709 }
710
711 void OutputFile::rangeCheckARMBranch24(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
712 {
713 if ( checkArmBranch24Displacement(displacement) )
714 return;
715
716 // show layout of final image
717 printSectionLayout(state);
718
719 const ld::Atom* target;
720 throwf("b/bl/blx ARM branch out of range (%lld max is +/-32MB): from %s (0x%08llX) to %s (0x%08llX)",
721 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
722 addressOf(state, fixup, &target));
723 }
724
725 bool OutputFile::checkThumbBranch22Displacement(int64_t displacement)
726 {
727 // thumb2 supports +/- 16MB displacement
728 if ( _options.preferSubArchitecture() && _options.archSupportsThumb2() ) {
729 if ( (displacement > 16777214LL) || (displacement < (-16777216LL)) ) {
730 return false;
731 }
732 }
733 else {
734 // thumb1 supports +/- 4MB displacement
735 if ( (displacement > 4194302LL) || (displacement < (-4194304LL)) ) {
736 return false;
737 }
738 }
739 return true;
740 }
741
742 void OutputFile::rangeCheckThumbBranch22(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
743 {
744 if ( checkThumbBranch22Displacement(displacement) )
745 return;
746
747 // show layout of final image
748 printSectionLayout(state);
749
750 const ld::Atom* target;
751 if ( _options.preferSubArchitecture() && _options.archSupportsThumb2() ) {
752 throwf("b/bl/blx thumb2 branch out of range (%lld max is +/-16MB): from %s (0x%08llX) to %s (0x%08llX)",
753 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
754 addressOf(state, fixup, &target));
755 }
756 else {
757 throwf("b/bl/blx thumb1 branch out of range (%lld max is +/-4MB): from %s (0x%08llX) to %s (0x%08llX)",
758 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
759 addressOf(state, fixup, &target));
760 }
761 }
762
763
764 void OutputFile::rangeCheckARM64Branch26(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
765 {
766 const int64_t bl_128MegLimit = 0x07FFFFFF;
767 if ( (displacement > bl_128MegLimit) || (displacement < (-bl_128MegLimit)) ) {
768 // show layout of final image
769 printSectionLayout(state);
770
771 const ld::Atom* target;
772 throwf("b(l) ARM64 branch out of range (%lld max is +/-128MB): from %s (0x%08llX) to %s (0x%08llX)",
773 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
774 addressOf(state, fixup, &target));
775 }
776 }
777
778 void OutputFile::rangeCheckARM64Page21(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
779 {
780 const int64_t adrp_4GigLimit = 0x100000000ULL;
781 if ( (displacement > adrp_4GigLimit) || (displacement < (-adrp_4GigLimit)) ) {
782 // show layout of final image
783 printSectionLayout(state);
784
785 const ld::Atom* target;
786 throwf("ARM64 ADRP out of range (%lld max is +/-4GB): from %s (0x%08llX) to %s (0x%08llX)",
787 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
788 addressOf(state, fixup, &target));
789 }
790 }
791
792
793 uint16_t OutputFile::get16LE(uint8_t* loc) { return LittleEndian::get16(*(uint16_t*)loc); }
794 void OutputFile::set16LE(uint8_t* loc, uint16_t value) { LittleEndian::set16(*(uint16_t*)loc, value); }
795
796 uint32_t OutputFile::get32LE(uint8_t* loc) { return LittleEndian::get32(*(uint32_t*)loc); }
797 void OutputFile::set32LE(uint8_t* loc, uint32_t value) { LittleEndian::set32(*(uint32_t*)loc, value); }
798
799 uint64_t OutputFile::get64LE(uint8_t* loc) { return LittleEndian::get64(*(uint64_t*)loc); }
800 void OutputFile::set64LE(uint8_t* loc, uint64_t value) { LittleEndian::set64(*(uint64_t*)loc, value); }
801
802 uint16_t OutputFile::get16BE(uint8_t* loc) { return BigEndian::get16(*(uint16_t*)loc); }
803 void OutputFile::set16BE(uint8_t* loc, uint16_t value) { BigEndian::set16(*(uint16_t*)loc, value); }
804
805 uint32_t OutputFile::get32BE(uint8_t* loc) { return BigEndian::get32(*(uint32_t*)loc); }
806 void OutputFile::set32BE(uint8_t* loc, uint32_t value) { BigEndian::set32(*(uint32_t*)loc, value); }
807
808 uint64_t OutputFile::get64BE(uint8_t* loc) { return BigEndian::get64(*(uint64_t*)loc); }
809 void OutputFile::set64BE(uint8_t* loc, uint64_t value) { BigEndian::set64(*(uint64_t*)loc, value); }
810
811 #if SUPPORT_ARCH_arm64
812
813 static uint32_t makeNOP() {
814 return 0xD503201F;
815 }
816
817 enum SignExtension { signedNot, signed32, signed64 };
818 struct LoadStoreInfo {
819 uint32_t reg;
820 uint32_t baseReg;
821 uint32_t offset; // after scaling
822 uint32_t size; // 1,2,4,8, or 16
823 bool isStore;
824 bool isFloat; // if destReg is FP/SIMD
825 SignExtension signEx; // if load is sign extended
826 };
827
828 static uint32_t makeLDR_literal(const LoadStoreInfo& info, uint64_t targetAddress, uint64_t instructionAddress)
829 {
830 int64_t delta = targetAddress - instructionAddress;
831 assert(delta < 1024*1024);
832 assert(delta > -1024*1024);
833 assert((info.reg & 0xFFFFFFE0) == 0);
834 assert((targetAddress & 0x3) == 0);
835 assert((instructionAddress & 0x3) == 0);
836 assert(!info.isStore);
837 uint32_t imm19 = (delta << 3) & 0x00FFFFE0;
838 uint32_t instruction = 0;
839 switch ( info.size ) {
840 case 4:
841 if ( info.isFloat ) {
842 assert(info.signEx == signedNot);
843 instruction = 0x1C000000;
844 }
845 else {
846 if ( info.signEx == signed64 )
847 instruction = 0x98000000;
848 else
849 instruction = 0x18000000;
850 }
851 break;
852 case 8:
853 assert(info.signEx == signedNot);
854 instruction = info.isFloat ? 0x5C000000 : 0x58000000;
855 break;
856 case 16:
857 assert(info.signEx == signedNot);
858 instruction = 0x9C000000;
859 break;
860 default:
861 assert(0 && "invalid load size for literal");
862 }
863 return (instruction | imm19 | info.reg);
864 }
865
866 static uint32_t makeADR(uint32_t destReg, uint64_t targetAddress, uint64_t instructionAddress)
867 {
868 assert((destReg & 0xFFFFFFE0) == 0);
869 assert((instructionAddress & 0x3) == 0);
870 uint32_t instruction = 0x10000000;
871 int64_t delta = targetAddress - instructionAddress;
872 assert(delta < 1024*1024);
873 assert(delta > -1024*1024);
874 uint32_t immhi = (delta & 0x001FFFFC) << 3;
875 uint32_t immlo = (delta & 0x00000003) << 29;
876 return (instruction | immhi | immlo | destReg);
877 }
878
879 static uint32_t makeLoadOrStore(const LoadStoreInfo& info)
880 {
881 uint32_t instruction = 0x39000000;
882 if ( info.isFloat )
883 instruction |= 0x04000000;
884 instruction |= info.reg;
885 instruction |= (info.baseReg << 5);
886 uint32_t sizeBits = 0;
887 uint32_t opcBits = 0;
888 uint32_t imm12Bits = 0;
889 switch ( info.size ) {
890 case 1:
891 sizeBits = 0;
892 imm12Bits = info.offset;
893 if ( info.isStore ) {
894 opcBits = 0;
895 }
896 else {
897 switch ( info.signEx ) {
898 case signedNot:
899 opcBits = 1;
900 break;
901 case signed32:
902 opcBits = 3;
903 break;
904 case signed64:
905 opcBits = 2;
906 break;
907 }
908 }
909 break;
910 case 2:
911 sizeBits = 1;
912 assert((info.offset % 2) == 0);
913 imm12Bits = info.offset/2;
914 if ( info.isStore ) {
915 opcBits = 0;
916 }
917 else {
918 switch ( info.signEx ) {
919 case signedNot:
920 opcBits = 1;
921 break;
922 case signed32:
923 opcBits = 3;
924 break;
925 case signed64:
926 opcBits = 2;
927 break;
928 }
929 }
930 break;
931 case 4:
932 sizeBits = 2;
933 assert((info.offset % 4) == 0);
934 imm12Bits = info.offset/4;
935 if ( info.isStore ) {
936 opcBits = 0;
937 }
938 else {
939 switch ( info.signEx ) {
940 case signedNot:
941 opcBits = 1;
942 break;
943 case signed32:
944 assert(0 && "cannot use signed32 with 32-bit load/store");
945 break;
946 case signed64:
947 opcBits = 2;
948 break;
949 }
950 }
951 break;
952 case 8:
953 sizeBits = 3;
954 assert((info.offset % 8) == 0);
955 imm12Bits = info.offset/8;
956 if ( info.isStore ) {
957 opcBits = 0;
958 }
959 else {
960 opcBits = 1;
961 assert(info.signEx == signedNot);
962 }
963 break;
964 case 16:
965 sizeBits = 0;
966 assert((info.offset % 16) == 0);
967 imm12Bits = info.offset/16;
968 assert(info.isFloat);
969 if ( info.isStore ) {
970 opcBits = 2;
971 }
972 else {
973 opcBits = 3;
974 }
975 break;
976 default:
977 assert(0 && "bad load/store size");
978 break;
979 }
980 assert(imm12Bits < 4096);
981 return (instruction | (sizeBits << 30) | (opcBits << 22) | (imm12Bits << 10));
982 }
983
984 static bool parseLoadOrStore(uint32_t instruction, LoadStoreInfo& info)
985 {
986 if ( (instruction & 0x3B000000) != 0x39000000 )
987 return false;
988 info.isFloat = ( (instruction & 0x04000000) != 0 );
989 info.reg = (instruction & 0x1F);
990 info.baseReg = ((instruction>>5) & 0x1F);
991 switch (instruction & 0xC0C00000) {
992 case 0x00000000:
993 info.size = 1;
994 info.isStore = true;
995 info.signEx = signedNot;
996 break;
997 case 0x00400000:
998 info.size = 1;
999 info.isStore = false;
1000 info.signEx = signedNot;
1001 break;
1002 case 0x00800000:
1003 if ( info.isFloat ) {
1004 info.size = 16;
1005 info.isStore = true;
1006 info.signEx = signedNot;
1007 }
1008 else {
1009 info.size = 1;
1010 info.isStore = false;
1011 info.signEx = signed64;
1012 }
1013 break;
1014 case 0x00C00000:
1015 if ( info.isFloat ) {
1016 info.size = 16;
1017 info.isStore = false;
1018 info.signEx = signedNot;
1019 }
1020 else {
1021 info.size = 1;
1022 info.isStore = false;
1023 info.signEx = signed32;
1024 }
1025 break;
1026 case 0x40000000:
1027 info.size = 2;
1028 info.isStore = true;
1029 info.signEx = signedNot;
1030 break;
1031 case 0x40400000:
1032 info.size = 2;
1033 info.isStore = false;
1034 info.signEx = signedNot;
1035 break;
1036 case 0x40800000:
1037 info.size = 2;
1038 info.isStore = false;
1039 info.signEx = signed64;
1040 break;
1041 case 0x40C00000:
1042 info.size = 2;
1043 info.isStore = false;
1044 info.signEx = signed32;
1045 break;
1046 case 0x80000000:
1047 info.size = 4;
1048 info.isStore = true;
1049 info.signEx = signedNot;
1050 break;
1051 case 0x80400000:
1052 info.size = 4;
1053 info.isStore = false;
1054 info.signEx = signedNot;
1055 break;
1056 case 0x80800000:
1057 info.size = 4;
1058 info.isStore = false;
1059 info.signEx = signed64;
1060 break;
1061 case 0xC0000000:
1062 info.size = 8;
1063 info.isStore = true;
1064 info.signEx = signedNot;
1065 break;
1066 case 0xC0400000:
1067 info.size = 8;
1068 info.isStore = false;
1069 info.signEx = signedNot;
1070 break;
1071 default:
1072 return false;
1073 }
1074 info.offset = ((instruction >> 10) & 0x0FFF) * info.size;
1075 return true;
1076 }
1077
1078 struct AdrpInfo {
1079 uint32_t destReg;
1080 };
1081
1082 static bool parseADRP(uint32_t instruction, AdrpInfo& info)
1083 {
1084 if ( (instruction & 0x9F000000) != 0x90000000 )
1085 return false;
1086 info.destReg = (instruction & 0x1F);
1087 return true;
1088 }
1089
1090 struct AddInfo {
1091 uint32_t destReg;
1092 uint32_t srcReg;
1093 uint32_t addend;
1094 };
1095
1096 static bool parseADD(uint32_t instruction, AddInfo& info)
1097 {
1098 if ( (instruction & 0xFFC00000) != 0x91000000 )
1099 return false;
1100 info.destReg = (instruction & 0x1F);
1101 info.srcReg = ((instruction>>5) & 0x1F);
1102 info.addend = ((instruction>>10) & 0xFFF);
1103 return true;
1104 }
1105
1106
1107
1108 #if 0
1109 static uint32_t makeLDR_scaledOffset(const LoadStoreInfo& info)
1110 {
1111 assert((info.reg & 0xFFFFFFE0) == 0);
1112 assert((info.baseReg & 0xFFFFFFE0) == 0);
1113 assert(!info.isFloat || (info.signEx != signedNot));
1114 uint32_t sizeBits = 0;
1115 uint32_t opcBits = 1;
1116 uint32_t vBit = info.isFloat;
1117 switch ( info.signEx ) {
1118 case signedNot:
1119 opcBits = 1;
1120 break;
1121 case signed32:
1122 opcBits = 3;
1123 break;
1124 case signed64:
1125 opcBits = 2;
1126 break;
1127 default:
1128 assert(0 && "bad SignExtension runtime value");
1129 }
1130 switch ( info.size ) {
1131 case 1:
1132 sizeBits = 0;
1133 break;
1134 case 2:
1135 sizeBits = 1;
1136 break;
1137 case 4:
1138 sizeBits = 2;
1139 break;
1140 case 8:
1141 sizeBits = 3;
1142 break;
1143 case 16:
1144 sizeBits = 0;
1145 vBit = 1;
1146 opcBits = 3;
1147 break;
1148 default:
1149 assert(0 && "invalid load size for literal");
1150 }
1151 assert((info.offset % info.size) == 0);
1152 uint32_t scaledOffset = info.offset/info.size;
1153 assert(scaledOffset < 4096);
1154 return (0x39000000 | (sizeBits<<30) | (vBit<<26) | (opcBits<<22) | (scaledOffset<<10) | (info.baseReg<<5) | info.reg);
1155 }
1156
1157 static uint32_t makeLDR_literal(uint32_t destReg, uint32_t loadSize, bool isFloat, uint64_t targetAddress, uint64_t instructionAddress)
1158 {
1159 int64_t delta = targetAddress - instructionAddress;
1160 assert(delta < 1024*1024);
1161 assert(delta > -1024*1024);
1162 assert((destReg & 0xFFFFFFE0) == 0);
1163 assert((targetAddress & 0x3) == 0);
1164 assert((instructionAddress & 0x3) == 0);
1165 uint32_t imm19 = (delta << 3) & 0x00FFFFE0;
1166 uint32_t instruction = 0;
1167 switch ( loadSize ) {
1168 case 4:
1169 instruction = isFloat ? 0x1C000000 : 0x18000000;
1170 break;
1171 case 8:
1172 instruction = isFloat ? 0x5C000000 : 0x58000000;
1173 break;
1174 case 16:
1175 instruction = 0x9C000000;
1176 break;
1177 default:
1178 assert(0 && "invalid load size for literal");
1179 }
1180 return (instruction | imm19 | destReg);
1181 }
1182
1183
1184 static bool ldrInfo(uint32_t instruction, uint8_t* size, uint8_t* destReg, bool* v, uint32_t* scaledOffset)
1185 {
1186 *v = ( (instruction & 0x04000000) != 0 );
1187 *destReg = (instruction & 0x1F);
1188 uint32_t imm12 = ((instruction >> 10) & 0x00000FFF);
1189 switch ( (instruction & 0xC0000000) >> 30 ) {
1190 case 0:
1191 // vector and byte LDR have same "size" bits, need to check other bits to differenciate
1192 if ( (instruction & 0x00800000) == 0 ) {
1193 *size = 1;
1194 *scaledOffset = imm12;
1195 }
1196 else {
1197 *size = 16;
1198 *scaledOffset = imm12 * 16;
1199 }
1200 break;
1201 case 1:
1202 *size = 2;
1203 *scaledOffset = imm12 * 2;
1204 break;
1205 case 2:
1206 *size = 4;
1207 *scaledOffset = imm12 * 4;
1208 break;
1209 case 3:
1210 *size = 8;
1211 *scaledOffset = imm12 * 8;
1212 break;
1213 }
1214 return ((instruction & 0x3B400000) == 0x39400000);
1215 }
1216 #endif
1217
1218 static bool withinOneMeg(uint64_t addr1, uint64_t addr2) {
1219 int64_t delta = (addr2 - addr1);
1220 return ( (delta < 1024*1024) && (delta > -1024*1024) );
1221 }
1222 #endif // SUPPORT_ARCH_arm64
1223
1224 void OutputFile::setInfo(ld::Internal& state, const ld::Atom* atom, uint8_t* buffer, const std::map<uint32_t, const Fixup*>& usedByHints,
1225 uint32_t offsetInAtom, uint32_t delta, InstructionInfo* info)
1226 {
1227 info->offsetInAtom = offsetInAtom + delta;
1228 std::map<uint32_t, const Fixup*>::const_iterator pos = usedByHints.find(info->offsetInAtom);
1229 if ( (pos != usedByHints.end()) && (pos->second != NULL) ) {
1230 info->fixup = pos->second;
1231 info->targetAddress = addressOf(state, info->fixup, &info->target);
1232 if ( info->fixup->clusterSize != ld::Fixup::k1of1 ) {
1233 assert(info->fixup->firstInCluster());
1234 const ld::Fixup* nextFixup = info->fixup + 1;
1235 if ( nextFixup->kind == ld::Fixup::kindAddAddend ) {
1236 info->targetAddress += nextFixup->u.addend;
1237 }
1238 else {
1239 assert(0 && "expected addend");
1240 }
1241 }
1242 }
1243 else {
1244 info->fixup = NULL;
1245 info->targetAddress = 0;
1246 info->target = NULL;
1247 }
1248 info->instructionContent = &buffer[info->offsetInAtom];
1249 info->instructionAddress = atom->finalAddress() + info->offsetInAtom;
1250 info->instruction = get32LE(info->instructionContent);
1251 }
1252
1253 #if SUPPORT_ARCH_arm64
1254 static bool isPageKind(const ld::Fixup* fixup, bool mustBeGOT=false)
1255 {
1256 if ( fixup == NULL )
1257 return false;
1258 const ld::Fixup* f;
1259 switch ( fixup->kind ) {
1260 case ld::Fixup::kindStoreTargetAddressARM64Page21:
1261 return !mustBeGOT;
1262 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
1263 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21:
1264 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21:
1265 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21:
1266 return true;
1267 case ld::Fixup::kindSetTargetAddress:
1268 f = fixup;
1269 do {
1270 ++f;
1271 } while ( ! f->lastInCluster() );
1272 switch (f->kind ) {
1273 case ld::Fixup::kindStoreARM64Page21:
1274 return !mustBeGOT;
1275 case ld::Fixup::kindStoreARM64GOTLoadPage21:
1276 case ld::Fixup::kindStoreARM64GOTLeaPage21:
1277 case ld::Fixup::kindStoreARM64TLVPLoadPage21:
1278 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21:
1279 return true;
1280 default:
1281 break;
1282 }
1283 break;
1284 default:
1285 break;
1286 }
1287 return false;
1288 }
1289
1290 static bool isPageOffsetKind(const ld::Fixup* fixup, bool mustBeGOT=false)
1291 {
1292 if ( fixup == NULL )
1293 return false;
1294 const ld::Fixup* f;
1295 switch ( fixup->kind ) {
1296 case ld::Fixup::kindStoreTargetAddressARM64PageOff12:
1297 return !mustBeGOT;
1298 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12:
1299 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12:
1300 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12:
1301 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12:
1302 return true;
1303 case ld::Fixup::kindSetTargetAddress:
1304 f = fixup;
1305 do {
1306 ++f;
1307 } while ( ! f->lastInCluster() );
1308 switch (f->kind ) {
1309 case ld::Fixup::kindStoreARM64PageOff12:
1310 return !mustBeGOT;
1311 case ld::Fixup::kindStoreARM64GOTLoadPageOff12:
1312 case ld::Fixup::kindStoreARM64GOTLeaPageOff12:
1313 case ld::Fixup::kindStoreARM64TLVPLoadPageOff12:
1314 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPageOff12:
1315 return true;
1316 default:
1317 break;
1318 }
1319 break;
1320 default:
1321 break;
1322 }
1323 return false;
1324 }
1325 #endif // SUPPORT_ARCH_arm64
1326
1327
1328 #define LOH_ASSERT(cond) \
1329 if ( !(cond) ) { \
1330 warning("ignoring linker optimization hint at %s+0x%X because " #cond, atom->name(), fit->offsetInAtom); \
1331 break; \
1332 }
1333
1334 void OutputFile::applyFixUps(ld::Internal& state, uint64_t mhAddress, const ld::Atom* atom, uint8_t* buffer)
1335 {
1336 //fprintf(stderr, "applyFixUps() on %s\n", atom->name());
1337 int64_t accumulator = 0;
1338 const ld::Atom* toTarget = NULL;
1339 const ld::Atom* fromTarget;
1340 int64_t delta;
1341 uint32_t instruction;
1342 uint32_t newInstruction;
1343 bool is_bl;
1344 bool is_blx;
1345 bool is_b;
1346 bool thumbTarget = false;
1347 std::map<uint32_t, const Fixup*> usedByHints;
1348 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
1349 uint8_t* fixUpLocation = &buffer[fit->offsetInAtom];
1350 ld::Fixup::LOH_arm64 lohExtra;
1351 switch ( (ld::Fixup::Kind)(fit->kind) ) {
1352 case ld::Fixup::kindNone:
1353 case ld::Fixup::kindNoneFollowOn:
1354 case ld::Fixup::kindNoneGroupSubordinate:
1355 case ld::Fixup::kindNoneGroupSubordinateFDE:
1356 case ld::Fixup::kindNoneGroupSubordinateLSDA:
1357 case ld::Fixup::kindNoneGroupSubordinatePersonality:
1358 break;
1359 case ld::Fixup::kindSetTargetAddress:
1360 accumulator = addressOf(state, fit, &toTarget);
1361 thumbTarget = targetIsThumb(state, fit);
1362 if ( thumbTarget )
1363 accumulator |= 1;
1364 if ( fit->contentAddendOnly || fit->contentDetlaToAddendOnly )
1365 accumulator = 0;
1366 break;
1367 case ld::Fixup::kindSubtractTargetAddress:
1368 delta = addressOf(state, fit, &fromTarget);
1369 if ( ! fit->contentAddendOnly )
1370 accumulator -= delta;
1371 break;
1372 case ld::Fixup::kindAddAddend:
1373 if ( ! fit->contentIgnoresAddend ) {
1374 // <rdar://problem/8342028> ARM main executables main contain .long constants pointing
1375 // into themselves such as jump tables. These .long should not have thumb bit set
1376 // even though the target is a thumb instruction. We can tell it is an interior pointer
1377 // because we are processing an addend.
1378 if ( thumbTarget && (toTarget == atom) && ((int32_t)fit->u.addend > 0) ) {
1379 accumulator &= (-2);
1380 //warning("removing thumb bit from intra-atom pointer in %s %s+0x%0X",
1381 // atom->section().sectionName(), atom->name(), fit->offsetInAtom);
1382 }
1383 accumulator += fit->u.addend;
1384 }
1385 break;
1386 case ld::Fixup::kindSubtractAddend:
1387 accumulator -= fit->u.addend;
1388 break;
1389 case ld::Fixup::kindSetTargetImageOffset:
1390 accumulator = addressOf(state, fit, &toTarget) - mhAddress;
1391 thumbTarget = targetIsThumb(state, fit);
1392 if ( thumbTarget )
1393 accumulator |= 1;
1394 break;
1395 case ld::Fixup::kindSetTargetSectionOffset:
1396 accumulator = sectionOffsetOf(state, fit);
1397 break;
1398 case ld::Fixup::kindSetTargetTLVTemplateOffset:
1399 accumulator = tlvTemplateOffsetOf(state, fit);
1400 break;
1401 case ld::Fixup::kindStore8:
1402 *fixUpLocation += accumulator;
1403 break;
1404 case ld::Fixup::kindStoreLittleEndian16:
1405 set16LE(fixUpLocation, accumulator);
1406 break;
1407 case ld::Fixup::kindStoreLittleEndianLow24of32:
1408 set32LE(fixUpLocation, (get32LE(fixUpLocation) & 0xFF000000) | (accumulator & 0x00FFFFFF) );
1409 break;
1410 case ld::Fixup::kindStoreLittleEndian32:
1411 rangeCheckAbsolute32(accumulator, state, atom, fit);
1412 set32LE(fixUpLocation, accumulator);
1413 break;
1414 case ld::Fixup::kindStoreLittleEndian64:
1415 set64LE(fixUpLocation, accumulator);
1416 break;
1417 case ld::Fixup::kindStoreBigEndian16:
1418 set16BE(fixUpLocation, accumulator);
1419 break;
1420 case ld::Fixup::kindStoreBigEndianLow24of32:
1421 set32BE(fixUpLocation, (get32BE(fixUpLocation) & 0xFF000000) | (accumulator & 0x00FFFFFF) );
1422 break;
1423 case ld::Fixup::kindStoreBigEndian32:
1424 rangeCheckAbsolute32(accumulator, state, atom, fit);
1425 set32BE(fixUpLocation, accumulator);
1426 break;
1427 case ld::Fixup::kindStoreBigEndian64:
1428 set64BE(fixUpLocation, accumulator);
1429 break;
1430 case ld::Fixup::kindStoreX86PCRel8:
1431 case ld::Fixup::kindStoreX86BranchPCRel8:
1432 if ( fit->contentAddendOnly )
1433 delta = accumulator;
1434 else
1435 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 1);
1436 rangeCheck8(delta, state, atom, fit);
1437 *fixUpLocation = delta;
1438 break;
1439 case ld::Fixup::kindStoreX86PCRel16:
1440 if ( fit->contentAddendOnly )
1441 delta = accumulator;
1442 else
1443 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 2);
1444 rangeCheck16(delta, state, atom, fit);
1445 set16LE(fixUpLocation, delta);
1446 break;
1447 case ld::Fixup::kindStoreX86BranchPCRel32:
1448 if ( fit->contentAddendOnly )
1449 delta = accumulator;
1450 else
1451 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1452 rangeCheckBranch32(delta, state, atom, fit);
1453 set32LE(fixUpLocation, delta);
1454 break;
1455 case ld::Fixup::kindStoreX86PCRel32GOTLoad:
1456 case ld::Fixup::kindStoreX86PCRel32GOT:
1457 case ld::Fixup::kindStoreX86PCRel32:
1458 case ld::Fixup::kindStoreX86PCRel32TLVLoad:
1459 if ( fit->contentAddendOnly )
1460 delta = accumulator;
1461 else
1462 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1463 rangeCheckRIP32(delta, state, atom, fit);
1464 set32LE(fixUpLocation, delta);
1465 break;
1466 case ld::Fixup::kindStoreX86PCRel32_1:
1467 if ( fit->contentAddendOnly )
1468 delta = accumulator - 1;
1469 else
1470 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 5);
1471 rangeCheckRIP32(delta, state, atom, fit);
1472 set32LE(fixUpLocation, delta);
1473 break;
1474 case ld::Fixup::kindStoreX86PCRel32_2:
1475 if ( fit->contentAddendOnly )
1476 delta = accumulator - 2;
1477 else
1478 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 6);
1479 rangeCheckRIP32(delta, state, atom, fit);
1480 set32LE(fixUpLocation, delta);
1481 break;
1482 case ld::Fixup::kindStoreX86PCRel32_4:
1483 if ( fit->contentAddendOnly )
1484 delta = accumulator - 4;
1485 else
1486 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 8);
1487 rangeCheckRIP32(delta, state, atom, fit);
1488 set32LE(fixUpLocation, delta);
1489 break;
1490 case ld::Fixup::kindStoreX86Abs32TLVLoad:
1491 set32LE(fixUpLocation, accumulator);
1492 break;
1493 case ld::Fixup::kindStoreX86Abs32TLVLoadNowLEA:
1494 assert(_options.outputKind() != Options::kObjectFile);
1495 // TLV entry was optimized away, change movl instruction to a leal
1496 if ( fixUpLocation[-1] != 0xA1 )
1497 throw "TLV load reloc does not point to a movl instruction";
1498 fixUpLocation[-1] = 0xB8;
1499 set32LE(fixUpLocation, accumulator);
1500 break;
1501 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA:
1502 assert(_options.outputKind() != Options::kObjectFile);
1503 // GOT entry was optimized away, change movq instruction to a leaq
1504 if ( fixUpLocation[-2] != 0x8B )
1505 throw "GOT load reloc does not point to a movq instruction";
1506 fixUpLocation[-2] = 0x8D;
1507 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1508 rangeCheckRIP32(delta, state, atom, fit);
1509 set32LE(fixUpLocation, delta);
1510 break;
1511 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA:
1512 assert(_options.outputKind() != Options::kObjectFile);
1513 // TLV entry was optimized away, change movq instruction to a leaq
1514 if ( fixUpLocation[-2] != 0x8B )
1515 throw "TLV load reloc does not point to a movq instruction";
1516 fixUpLocation[-2] = 0x8D;
1517 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1518 rangeCheckRIP32(delta, state, atom, fit);
1519 set32LE(fixUpLocation, delta);
1520 break;
1521 case ld::Fixup::kindStoreTargetAddressARMLoad12:
1522 accumulator = addressOf(state, fit, &toTarget);
1523 // fall into kindStoreARMLoad12 case
1524 case ld::Fixup::kindStoreARMLoad12:
1525 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 8);
1526 rangeCheckARM12(delta, state, atom, fit);
1527 instruction = get32LE(fixUpLocation);
1528 if ( delta >= 0 ) {
1529 newInstruction = instruction & 0xFFFFF000;
1530 newInstruction |= ((uint32_t)delta & 0xFFF);
1531 }
1532 else {
1533 newInstruction = instruction & 0xFF7FF000;
1534 newInstruction |= ((uint32_t)(-delta) & 0xFFF);
1535 }
1536 set32LE(fixUpLocation, newInstruction);
1537 break;
1538 case ld::Fixup::kindDtraceExtra:
1539 break;
1540 case ld::Fixup::kindStoreX86DtraceCallSiteNop:
1541 if ( _options.outputKind() != Options::kObjectFile ) {
1542 // change call site to a NOP
1543 fixUpLocation[-1] = 0x90; // 1-byte nop
1544 fixUpLocation[0] = 0x0F; // 4-byte nop
1545 fixUpLocation[1] = 0x1F;
1546 fixUpLocation[2] = 0x40;
1547 fixUpLocation[3] = 0x00;
1548 }
1549 break;
1550 case ld::Fixup::kindStoreX86DtraceIsEnableSiteClear:
1551 if ( _options.outputKind() != Options::kObjectFile ) {
1552 // change call site to a clear eax
1553 fixUpLocation[-1] = 0x33; // xorl eax,eax
1554 fixUpLocation[0] = 0xC0;
1555 fixUpLocation[1] = 0x90; // 1-byte nop
1556 fixUpLocation[2] = 0x90; // 1-byte nop
1557 fixUpLocation[3] = 0x90; // 1-byte nop
1558 }
1559 break;
1560 case ld::Fixup::kindStoreARMDtraceCallSiteNop:
1561 if ( _options.outputKind() != Options::kObjectFile ) {
1562 // change call site to a NOP
1563 set32LE(fixUpLocation, 0xE1A00000);
1564 }
1565 break;
1566 case ld::Fixup::kindStoreARMDtraceIsEnableSiteClear:
1567 if ( _options.outputKind() != Options::kObjectFile ) {
1568 // change call site to 'eor r0, r0, r0'
1569 set32LE(fixUpLocation, 0xE0200000);
1570 }
1571 break;
1572 case ld::Fixup::kindStoreThumbDtraceCallSiteNop:
1573 if ( _options.outputKind() != Options::kObjectFile ) {
1574 // change 32-bit blx call site to two thumb NOPs
1575 set32LE(fixUpLocation, 0x46C046C0);
1576 }
1577 break;
1578 case ld::Fixup::kindStoreThumbDtraceIsEnableSiteClear:
1579 if ( _options.outputKind() != Options::kObjectFile ) {
1580 // change 32-bit blx call site to 'nop', 'eor r0, r0'
1581 set32LE(fixUpLocation, 0x46C04040);
1582 }
1583 break;
1584 case ld::Fixup::kindStoreARM64DtraceCallSiteNop:
1585 if ( _options.outputKind() != Options::kObjectFile ) {
1586 // change call site to a NOP
1587 set32LE(fixUpLocation, 0xD503201F);
1588 }
1589 break;
1590 case ld::Fixup::kindStoreARM64DtraceIsEnableSiteClear:
1591 if ( _options.outputKind() != Options::kObjectFile ) {
1592 // change call site to 'MOVZ X0,0'
1593 set32LE(fixUpLocation, 0xD2800000);
1594 }
1595 break;
1596 case ld::Fixup::kindLazyTarget:
1597 case ld::Fixup::kindIslandTarget:
1598 break;
1599 case ld::Fixup::kindSetLazyOffset:
1600 assert(fit->binding == ld::Fixup::bindingDirectlyBound);
1601 accumulator = this->lazyBindingInfoOffsetForLazyPointerAddress(fit->u.target->finalAddress());
1602 break;
1603 case ld::Fixup::kindDataInCodeStartData:
1604 case ld::Fixup::kindDataInCodeStartJT8:
1605 case ld::Fixup::kindDataInCodeStartJT16:
1606 case ld::Fixup::kindDataInCodeStartJT32:
1607 case ld::Fixup::kindDataInCodeStartJTA32:
1608 case ld::Fixup::kindDataInCodeEnd:
1609 break;
1610 case ld::Fixup::kindLinkerOptimizationHint:
1611 // expand table of address/offsets used by hints
1612 lohExtra.addend = fit->u.addend;
1613 usedByHints[fit->offsetInAtom + (lohExtra.info.delta1 << 2)] = NULL;
1614 if ( lohExtra.info.count > 0 )
1615 usedByHints[fit->offsetInAtom + (lohExtra.info.delta2 << 2)] = NULL;
1616 if ( lohExtra.info.count > 1 )
1617 usedByHints[fit->offsetInAtom + (lohExtra.info.delta3 << 2)] = NULL;
1618 if ( lohExtra.info.count > 2 )
1619 usedByHints[fit->offsetInAtom + (lohExtra.info.delta4 << 2)] = NULL;
1620 break;
1621 case ld::Fixup::kindStoreTargetAddressLittleEndian32:
1622 accumulator = addressOf(state, fit, &toTarget);
1623 thumbTarget = targetIsThumb(state, fit);
1624 if ( thumbTarget )
1625 accumulator |= 1;
1626 if ( fit->contentAddendOnly )
1627 accumulator = 0;
1628 rangeCheckAbsolute32(accumulator, state, atom, fit);
1629 set32LE(fixUpLocation, accumulator);
1630 break;
1631 case ld::Fixup::kindStoreTargetAddressLittleEndian64:
1632 accumulator = addressOf(state, fit, &toTarget);
1633 if ( fit->contentAddendOnly )
1634 accumulator = 0;
1635 set64LE(fixUpLocation, accumulator);
1636 break;
1637 case ld::Fixup::kindStoreTargetAddressBigEndian32:
1638 accumulator = addressOf(state, fit, &toTarget);
1639 if ( fit->contentAddendOnly )
1640 accumulator = 0;
1641 set32BE(fixUpLocation, accumulator);
1642 break;
1643 case ld::Fixup::kindStoreTargetAddressBigEndian64:
1644 accumulator = addressOf(state, fit, &toTarget);
1645 if ( fit->contentAddendOnly )
1646 accumulator = 0;
1647 set64BE(fixUpLocation, accumulator);
1648 break;
1649 case ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian32:
1650 accumulator = tlvTemplateOffsetOf(state, fit);
1651 set32LE(fixUpLocation, accumulator);
1652 break;
1653 case ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian64:
1654 accumulator = tlvTemplateOffsetOf(state, fit);
1655 set64LE(fixUpLocation, accumulator);
1656 break;
1657 case ld::Fixup::kindStoreTargetAddressX86PCRel32:
1658 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32:
1659 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad:
1660 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad:
1661 accumulator = addressOf(state, fit, &toTarget);
1662 if ( fit->contentDetlaToAddendOnly )
1663 accumulator = 0;
1664 if ( fit->contentAddendOnly )
1665 delta = 0;
1666 else
1667 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1668 rangeCheckRIP32(delta, state, atom, fit);
1669 set32LE(fixUpLocation, delta);
1670 break;
1671 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad:
1672 set32LE(fixUpLocation, accumulator);
1673 break;
1674 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoadNowLEA:
1675 // TLV entry was optimized away, change movl instruction to a leal
1676 if ( fixUpLocation[-1] != 0xA1 )
1677 throw "TLV load reloc does not point to a movl <abs-address>,<reg> instruction";
1678 fixUpLocation[-1] = 0xB8;
1679 accumulator = addressOf(state, fit, &toTarget);
1680 set32LE(fixUpLocation, accumulator);
1681 break;
1682 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA:
1683 // GOT entry was optimized away, change movq instruction to a leaq
1684 if ( fixUpLocation[-2] != 0x8B )
1685 throw "GOT load reloc does not point to a movq instruction";
1686 fixUpLocation[-2] = 0x8D;
1687 accumulator = addressOf(state, fit, &toTarget);
1688 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1689 rangeCheckRIP32(delta, state, atom, fit);
1690 set32LE(fixUpLocation, delta);
1691 break;
1692 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA:
1693 // TLV entry was optimized away, change movq instruction to a leaq
1694 if ( fixUpLocation[-2] != 0x8B )
1695 throw "TLV load reloc does not point to a movq instruction";
1696 fixUpLocation[-2] = 0x8D;
1697 accumulator = addressOf(state, fit, &toTarget);
1698 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1699 rangeCheckRIP32(delta, state, atom, fit);
1700 set32LE(fixUpLocation, delta);
1701 break;
1702 case ld::Fixup::kindStoreTargetAddressARMBranch24:
1703 accumulator = addressOf(state, fit, &toTarget);
1704 thumbTarget = targetIsThumb(state, fit);
1705 if ( toTarget->contentType() == ld::Atom::typeBranchIsland ) {
1706 // Branching to island. If ultimate target is in range, branch there directly.
1707 for (ld::Fixup::iterator islandfit = toTarget->fixupsBegin(), end=toTarget->fixupsEnd(); islandfit != end; ++islandfit) {
1708 if ( islandfit->kind == ld::Fixup::kindIslandTarget ) {
1709 const ld::Atom* islandTarget = NULL;
1710 uint64_t islandTargetAddress = addressOf(state, islandfit, &islandTarget);
1711 delta = islandTargetAddress - (atom->finalAddress() + fit->offsetInAtom + 4);
1712 if ( checkArmBranch24Displacement(delta) ) {
1713 toTarget = islandTarget;
1714 accumulator = islandTargetAddress;
1715 thumbTarget = targetIsThumb(state, islandfit);
1716 }
1717 break;
1718 }
1719 }
1720 }
1721 if ( thumbTarget )
1722 accumulator |= 1;
1723 if ( fit->contentDetlaToAddendOnly )
1724 accumulator = 0;
1725 // fall into kindStoreARMBranch24 case
1726 case ld::Fixup::kindStoreARMBranch24:
1727 // The pc added will be +8 from the pc
1728 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 8);
1729 rangeCheckARMBranch24(delta, state, atom, fit);
1730 instruction = get32LE(fixUpLocation);
1731 // Make sure we are calling arm with bl, thumb with blx
1732 is_bl = ((instruction & 0xFF000000) == 0xEB000000);
1733 is_blx = ((instruction & 0xFE000000) == 0xFA000000);
1734 is_b = !is_blx && ((instruction & 0x0F000000) == 0x0A000000);
1735 if ( (is_bl | is_blx) && thumbTarget ) {
1736 uint32_t opcode = 0xFA000000; // force to be blx
1737 uint32_t disp = (uint32_t)(delta >> 2) & 0x00FFFFFF;
1738 uint32_t h_bit = (uint32_t)(delta << 23) & 0x01000000;
1739 newInstruction = opcode | h_bit | disp;
1740 }
1741 else if ( (is_bl | is_blx) && !thumbTarget ) {
1742 uint32_t opcode = 0xEB000000; // force to be bl
1743 uint32_t disp = (uint32_t)(delta >> 2) & 0x00FFFFFF;
1744 newInstruction = opcode | disp;
1745 }
1746 else if ( is_b && thumbTarget ) {
1747 if ( fit->contentDetlaToAddendOnly )
1748 newInstruction = (instruction & 0xFF000000) | ((uint32_t)(delta >> 2) & 0x00FFFFFF);
1749 else
1750 throwf("no pc-rel bx arm instruction. Can't fix up branch to %s in %s",
1751 referenceTargetAtomName(state, fit), atom->name());
1752 }
1753 else if ( !is_bl && !is_blx && thumbTarget ) {
1754 throwf("don't know how to convert instruction %x referencing %s to thumb",
1755 instruction, referenceTargetAtomName(state, fit));
1756 }
1757 else {
1758 newInstruction = (instruction & 0xFF000000) | ((uint32_t)(delta >> 2) & 0x00FFFFFF);
1759 }
1760 set32LE(fixUpLocation, newInstruction);
1761 break;
1762 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
1763 accumulator = addressOf(state, fit, &toTarget);
1764 thumbTarget = targetIsThumb(state, fit);
1765 if ( toTarget->contentType() == ld::Atom::typeBranchIsland ) {
1766 // branching to island, so see if ultimate target is in range
1767 // and if so branch to ultimate target instead.
1768 for (ld::Fixup::iterator islandfit = toTarget->fixupsBegin(), end=toTarget->fixupsEnd(); islandfit != end; ++islandfit) {
1769 if ( islandfit->kind == ld::Fixup::kindIslandTarget ) {
1770 const ld::Atom* islandTarget = NULL;
1771 uint64_t islandTargetAddress = addressOf(state, islandfit, &islandTarget);
1772 if ( !fit->contentDetlaToAddendOnly ) {
1773 if ( targetIsThumb(state, islandfit) ) {
1774 // Thumb to thumb branch, we will be generating a bl instruction.
1775 // Delta is always even, so mask out thumb bit in target.
1776 islandTargetAddress &= -2ULL;
1777 }
1778 else {
1779 // Target is not thumb, we will be generating a blx instruction
1780 // Since blx cannot have the low bit set, set bit[1] of the target to
1781 // bit[1] of the base address, so that the difference is a multiple of
1782 // 4 bytes.
1783 islandTargetAddress &= -3ULL;
1784 islandTargetAddress |= ((atom->finalAddress() + fit->offsetInAtom ) & 2LL);
1785 }
1786 }
1787 delta = islandTargetAddress - (atom->finalAddress() + fit->offsetInAtom + 4);
1788 if ( checkThumbBranch22Displacement(delta) ) {
1789 toTarget = islandTarget;
1790 accumulator = islandTargetAddress;
1791 thumbTarget = targetIsThumb(state, islandfit);
1792 }
1793 break;
1794 }
1795 }
1796 }
1797 if ( thumbTarget )
1798 accumulator |= 1;
1799 if ( fit->contentDetlaToAddendOnly )
1800 accumulator = 0;
1801 // fall into kindStoreThumbBranch22 case
1802 case ld::Fixup::kindStoreThumbBranch22:
1803 instruction = get32LE(fixUpLocation);
1804 is_bl = ((instruction & 0xD000F800) == 0xD000F000);
1805 is_blx = ((instruction & 0xD000F800) == 0xC000F000);
1806 is_b = ((instruction & 0xD000F800) == 0x9000F000);
1807 if ( !fit->contentDetlaToAddendOnly ) {
1808 if ( thumbTarget ) {
1809 // Thumb to thumb branch, we will be generating a bl instruction.
1810 // Delta is always even, so mask out thumb bit in target.
1811 accumulator &= -2ULL;
1812 }
1813 else {
1814 // Target is not thumb, we will be generating a blx instruction
1815 // Since blx cannot have the low bit set, set bit[1] of the target to
1816 // bit[1] of the base address, so that the difference is a multiple of
1817 // 4 bytes.
1818 accumulator &= -3ULL;
1819 accumulator |= ((atom->finalAddress() + fit->offsetInAtom ) & 2LL);
1820 }
1821 }
1822 // The pc added will be +4 from the pc
1823 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1824 // <rdar://problem/16652542> support bl in very large .o files
1825 if ( fit->contentDetlaToAddendOnly ) {
1826 while ( delta < (-16777216LL) )
1827 delta += 0x2000000;
1828 }
1829 rangeCheckThumbBranch22(delta, state, atom, fit);
1830 if ( _options.preferSubArchitecture() && _options.archSupportsThumb2() ) {
1831 // The instruction is really two instructions:
1832 // The lower 16 bits are the first instruction, which contains the high
1833 // 11 bits of the displacement.
1834 // The upper 16 bits are the second instruction, which contains the low
1835 // 11 bits of the displacement, as well as differentiating bl and blx.
1836 uint32_t s = (uint32_t)(delta >> 24) & 0x1;
1837 uint32_t i1 = (uint32_t)(delta >> 23) & 0x1;
1838 uint32_t i2 = (uint32_t)(delta >> 22) & 0x1;
1839 uint32_t imm10 = (uint32_t)(delta >> 12) & 0x3FF;
1840 uint32_t imm11 = (uint32_t)(delta >> 1) & 0x7FF;
1841 uint32_t j1 = (i1 == s);
1842 uint32_t j2 = (i2 == s);
1843 if ( is_bl ) {
1844 if ( thumbTarget )
1845 instruction = 0xD000F000; // keep bl
1846 else
1847 instruction = 0xC000F000; // change to blx
1848 }
1849 else if ( is_blx ) {
1850 if ( thumbTarget )
1851 instruction = 0xD000F000; // change to bl
1852 else
1853 instruction = 0xC000F000; // keep blx
1854 }
1855 else if ( is_b ) {
1856 instruction = 0x9000F000; // keep b
1857 if ( !thumbTarget && !fit->contentDetlaToAddendOnly ) {
1858 throwf("armv7 has no pc-rel bx thumb instruction. Can't fix up branch to %s in %s",
1859 referenceTargetAtomName(state, fit), atom->name());
1860 }
1861 }
1862 else {
1863 if ( !thumbTarget )
1864 throwf("don't know how to convert branch instruction %x referencing %s to bx",
1865 instruction, referenceTargetAtomName(state, fit));
1866 instruction = 0x9000F000; // keep b
1867 }
1868 uint32_t nextDisp = (j1 << 13) | (j2 << 11) | imm11;
1869 uint32_t firstDisp = (s << 10) | imm10;
1870 newInstruction = instruction | (nextDisp << 16) | firstDisp;
1871 //warning("s=%d, j1=%d, j2=%d, imm10=0x%0X, imm11=0x%0X, instruction=0x%08X, first=0x%04X, next=0x%04X, new=0x%08X, disp=0x%llX for %s to %s\n",
1872 // s, j1, j2, imm10, imm11, instruction, firstDisp, nextDisp, newInstruction, delta, atom->name(), toTarget->name());
1873 set32LE(fixUpLocation, newInstruction);
1874 }
1875 else {
1876 // The instruction is really two instructions:
1877 // The lower 16 bits are the first instruction, which contains the high
1878 // 11 bits of the displacement.
1879 // The upper 16 bits are the second instruction, which contains the low
1880 // 11 bits of the displacement, as well as differentiating bl and blx.
1881 uint32_t firstDisp = (uint32_t)(delta >> 12) & 0x7FF;
1882 uint32_t nextDisp = (uint32_t)(delta >> 1) & 0x7FF;
1883 if ( is_bl && !thumbTarget ) {
1884 instruction = 0xE800F000;
1885 }
1886 else if ( is_blx && thumbTarget ) {
1887 instruction = 0xF800F000;
1888 }
1889 else if ( is_b ) {
1890 instruction = 0x9000F000; // keep b
1891 if ( !thumbTarget && !fit->contentDetlaToAddendOnly ) {
1892 throwf("armv6 has no pc-rel bx thumb instruction. Can't fix up branch to %s in %s",
1893 referenceTargetAtomName(state, fit), atom->name());
1894 }
1895 }
1896 else {
1897 instruction = instruction & 0xF800F800;
1898 }
1899 newInstruction = instruction | (nextDisp << 16) | firstDisp;
1900 set32LE(fixUpLocation, newInstruction);
1901 }
1902 break;
1903 case ld::Fixup::kindStoreARMLow16:
1904 {
1905 uint32_t imm4 = (accumulator & 0x0000F000) >> 12;
1906 uint32_t imm12 = accumulator & 0x00000FFF;
1907 instruction = get32LE(fixUpLocation);
1908 newInstruction = (instruction & 0xFFF0F000) | (imm4 << 16) | imm12;
1909 set32LE(fixUpLocation, newInstruction);
1910 }
1911 break;
1912 case ld::Fixup::kindStoreARMHigh16:
1913 {
1914 uint32_t imm4 = (accumulator & 0xF0000000) >> 28;
1915 uint32_t imm12 = (accumulator & 0x0FFF0000) >> 16;
1916 instruction = get32LE(fixUpLocation);
1917 newInstruction = (instruction & 0xFFF0F000) | (imm4 << 16) | imm12;
1918 set32LE(fixUpLocation, newInstruction);
1919 }
1920 break;
1921 case ld::Fixup::kindStoreThumbLow16:
1922 {
1923 uint32_t imm4 = (accumulator & 0x0000F000) >> 12;
1924 uint32_t i = (accumulator & 0x00000800) >> 11;
1925 uint32_t imm3 = (accumulator & 0x00000700) >> 8;
1926 uint32_t imm8 = accumulator & 0x000000FF;
1927 instruction = get32LE(fixUpLocation);
1928 newInstruction = (instruction & 0x8F00FBF0) | imm4 | (i << 10) | (imm3 << 28) | (imm8 << 16);
1929 set32LE(fixUpLocation, newInstruction);
1930 }
1931 break;
1932 case ld::Fixup::kindStoreThumbHigh16:
1933 {
1934 uint32_t imm4 = (accumulator & 0xF0000000) >> 28;
1935 uint32_t i = (accumulator & 0x08000000) >> 27;
1936 uint32_t imm3 = (accumulator & 0x07000000) >> 24;
1937 uint32_t imm8 = (accumulator & 0x00FF0000) >> 16;
1938 instruction = get32LE(fixUpLocation);
1939 newInstruction = (instruction & 0x8F00FBF0) | imm4 | (i << 10) | (imm3 << 28) | (imm8 << 16);
1940 set32LE(fixUpLocation, newInstruction);
1941 }
1942 break;
1943 #if SUPPORT_ARCH_arm64
1944 case ld::Fixup::kindStoreTargetAddressARM64Branch26:
1945 accumulator = addressOf(state, fit, &toTarget);
1946 // fall into kindStoreARM64Branch26 case
1947 case ld::Fixup::kindStoreARM64Branch26:
1948 if ( fit->contentAddendOnly )
1949 delta = accumulator;
1950 else
1951 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom);
1952 rangeCheckARM64Branch26(delta, state, atom, fit);
1953 instruction = get32LE(fixUpLocation);
1954 newInstruction = (instruction & 0xFC000000) | ((uint32_t)(delta >> 2) & 0x03FFFFFF);
1955 set32LE(fixUpLocation, newInstruction);
1956 break;
1957 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21:
1958 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
1959 case ld::Fixup::kindStoreTargetAddressARM64Page21:
1960 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21:
1961 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21:
1962 accumulator = addressOf(state, fit, &toTarget);
1963 // fall into kindStoreARM64Branch26 case
1964 case ld::Fixup::kindStoreARM64GOTLeaPage21:
1965 case ld::Fixup::kindStoreARM64GOTLoadPage21:
1966 case ld::Fixup::kindStoreARM64TLVPLoadPage21:
1967 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21:
1968 case ld::Fixup::kindStoreARM64Page21:
1969 {
1970 // the ADRP instruction adds the imm << 12 to the page that the pc is on
1971 if ( fit->contentAddendOnly )
1972 delta = 0;
1973 else
1974 delta = (accumulator & (-4096)) - ((atom->finalAddress() + fit->offsetInAtom) & (-4096));
1975 rangeCheckARM64Page21(delta, state, atom, fit);
1976 instruction = get32LE(fixUpLocation);
1977 uint32_t immhi = (delta >> 9) & (0x00FFFFE0);
1978 uint32_t immlo = (delta << 17) & (0x60000000);
1979 newInstruction = (instruction & 0x9F00001F) | immlo | immhi;
1980 set32LE(fixUpLocation, newInstruction);
1981 }
1982 break;
1983 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12:
1984 case ld::Fixup::kindStoreTargetAddressARM64PageOff12:
1985 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12:
1986 accumulator = addressOf(state, fit, &toTarget);
1987 // fall into kindAddressARM64PageOff12 case
1988 case ld::Fixup::kindStoreARM64TLVPLoadPageOff12:
1989 case ld::Fixup::kindStoreARM64GOTLoadPageOff12:
1990 case ld::Fixup::kindStoreARM64PageOff12:
1991 {
1992 uint32_t offset = accumulator & 0x00000FFF;
1993 instruction = get32LE(fixUpLocation);
1994 // LDR/STR instruction have implicit scale factor, need to compensate for that
1995 if ( instruction & 0x08000000 ) {
1996 uint32_t implictShift = ((instruction >> 30) & 0x3);
1997 switch ( implictShift ) {
1998 case 0:
1999 if ( (instruction & 0x04800000) == 0x04800000 ) {
2000 // vector and byte LDR/STR have same "size" bits, need to check other bits to differenciate
2001 implictShift = 4;
2002 if ( (offset & 0xF) != 0 ) {
2003 throwf("128-bit LDR/STR not 16-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
2004 atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fit),
2005 addressOf(state, fit, &toTarget));
2006 }
2007 }
2008 break;
2009 case 1:
2010 if ( (offset & 0x1) != 0 ) {
2011 throwf("16-bit LDR/STR not 2-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
2012 atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fit),
2013 addressOf(state, fit, &toTarget));
2014 }
2015 break;
2016 case 2:
2017 if ( (offset & 0x3) != 0 ) {
2018 throwf("32-bit LDR/STR not 4-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
2019 atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fit),
2020 addressOf(state, fit, &toTarget));
2021 }
2022 break;
2023 case 3:
2024 if ( (offset & 0x7) != 0 ) {
2025 throwf("64-bit LDR/STR not 8-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
2026 atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fit),
2027 addressOf(state, fit, &toTarget));
2028 }
2029 break;
2030 }
2031 // compensate for implicit scale
2032 offset >>= implictShift;
2033 }
2034 if ( fit->contentAddendOnly )
2035 offset = 0;
2036 uint32_t imm12 = offset << 10;
2037 newInstruction = (instruction & 0xFFC003FF) | imm12;
2038 set32LE(fixUpLocation, newInstruction);
2039 }
2040 break;
2041 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12:
2042 accumulator = addressOf(state, fit, &toTarget);
2043 // fall into kindStoreARM64GOTLoadPage21 case
2044 case ld::Fixup::kindStoreARM64GOTLeaPageOff12:
2045 {
2046 // GOT entry was optimized away, change LDR instruction to a ADD
2047 instruction = get32LE(fixUpLocation);
2048 if ( (instruction & 0xBFC00000) != 0xB9400000 )
2049 throwf("GOT load reloc does not point to a LDR instruction in %s", atom->name());
2050 uint32_t offset = accumulator & 0x00000FFF;
2051 uint32_t imm12 = offset << 10;
2052 newInstruction = 0x91000000 | imm12 | (instruction & 0x000003FF);
2053 set32LE(fixUpLocation, newInstruction);
2054 }
2055 break;
2056 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12:
2057 accumulator = addressOf(state, fit, &toTarget);
2058 // fall into kindStoreARM64TLVPLeaPageOff12 case
2059 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPageOff12:
2060 {
2061 // TLV thunk in same linkage unit, so LEA it directly, changing LDR instruction to a ADD
2062 instruction = get32LE(fixUpLocation);
2063 if ( (instruction & 0xBFC00000) != 0xB9400000 )
2064 throwf("TLV load reloc does not point to a LDR instruction in %s", atom->name());
2065 uint32_t offset = accumulator & 0x00000FFF;
2066 uint32_t imm12 = offset << 10;
2067 newInstruction = 0x91000000 | imm12 | (instruction & 0x000003FF);
2068 set32LE(fixUpLocation, newInstruction);
2069 }
2070 break;
2071 case ld::Fixup::kindStoreARM64PointerToGOT:
2072 set64LE(fixUpLocation, accumulator);
2073 break;
2074 case ld::Fixup::kindStoreARM64PCRelToGOT:
2075 if ( fit->contentAddendOnly )
2076 delta = accumulator;
2077 else
2078 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom);
2079 set32LE(fixUpLocation, delta);
2080 break;
2081 #endif
2082 }
2083 }
2084
2085 #if SUPPORT_ARCH_arm64
2086 // after all fixups are done on atom, if there are potential optimizations, do those
2087 if ( (usedByHints.size() != 0) && (_options.outputKind() != Options::kObjectFile) && !_options.ignoreOptimizationHints() ) {
2088 // fill in second part of usedByHints map, so we can see the target of fixups that might be optimized
2089 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
2090 switch ( fit->kind ) {
2091 case ld::Fixup::kindLinkerOptimizationHint:
2092 case ld::Fixup::kindNoneFollowOn:
2093 case ld::Fixup::kindNoneGroupSubordinate:
2094 case ld::Fixup::kindNoneGroupSubordinateFDE:
2095 case ld::Fixup::kindNoneGroupSubordinateLSDA:
2096 case ld::Fixup::kindNoneGroupSubordinatePersonality:
2097 break;
2098 default:
2099 if ( fit->firstInCluster() ) {
2100 std::map<uint32_t, const Fixup*>::iterator pos = usedByHints.find(fit->offsetInAtom);
2101 if ( pos != usedByHints.end() ) {
2102 assert(pos->second == NULL && "two fixups in same hint location");
2103 pos->second = fit;
2104 //fprintf(stderr, "setting %s usedByHints[0x%04X], kind = %d\n", atom->name(), fit->offsetInAtom, fit->kind);
2105 }
2106 }
2107 }
2108 }
2109
2110 // apply hints pass 1
2111 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
2112 if ( fit->kind != ld::Fixup::kindLinkerOptimizationHint )
2113 continue;
2114 InstructionInfo infoA;
2115 InstructionInfo infoB;
2116 InstructionInfo infoC;
2117 InstructionInfo infoD;
2118 LoadStoreInfo ldrInfoB, ldrInfoC;
2119 AddInfo addInfoB;
2120 AdrpInfo adrpInfoA;
2121 bool usableSegment;
2122 bool targetFourByteAligned;
2123 bool literalableSize, isADRP, isADD, isLDR, isSTR;
2124 //uint8_t loadSize, destReg;
2125 //uint32_t scaledOffset;
2126 //uint32_t imm12;
2127 ld::Fixup::LOH_arm64 alt;
2128 alt.addend = fit->u.addend;
2129 setInfo(state, atom, buffer, usedByHints, fit->offsetInAtom, (alt.info.delta1 << 2), &infoA);
2130 if ( alt.info.count > 0 )
2131 setInfo(state, atom, buffer, usedByHints, fit->offsetInAtom, (alt.info.delta2 << 2), &infoB);
2132 if ( alt.info.count > 1 )
2133 setInfo(state, atom, buffer, usedByHints, fit->offsetInAtom, (alt.info.delta3 << 2), &infoC);
2134 if ( alt.info.count > 2 )
2135 setInfo(state, atom, buffer, usedByHints, fit->offsetInAtom, (alt.info.delta4 << 2), &infoD);
2136
2137 if ( _options.sharedRegionEligible() ) {
2138 if ( _options.sharedRegionEncodingV2() ) {
2139 // In v2 format, all references might be move at dyld shared cache creation time
2140 usableSegment = false;
2141 }
2142 else {
2143 // In v1 format, only references to something in __TEXT segment could be optimized
2144 usableSegment = (strcmp(atom->section().segmentName(), infoB.target->section().segmentName()) == 0);
2145 }
2146 }
2147 else {
2148 // main executables can optimize any reference
2149 usableSegment = true;
2150 }
2151
2152 switch ( alt.info.kind ) {
2153 case LOH_ARM64_ADRP_ADRP:
2154 // processed in pass 2 because some ADRP may have been removed
2155 break;
2156 case LOH_ARM64_ADRP_LDR:
2157 LOH_ASSERT(alt.info.count == 1);
2158 LOH_ASSERT(isPageKind(infoA.fixup));
2159 LOH_ASSERT(isPageOffsetKind(infoB.fixup));
2160 LOH_ASSERT(infoA.target == infoB.target);
2161 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2162 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2163 LOH_ASSERT(isADRP);
2164 isLDR = parseLoadOrStore(infoB.instruction, ldrInfoB);
2165 // silently ignore LDRs transformed to ADD by TLV pass
2166 if ( !isLDR && infoB.fixup->kind == ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12 )
2167 break;
2168 LOH_ASSERT(isLDR);
2169 LOH_ASSERT(ldrInfoB.baseReg == adrpInfoA.destReg);
2170 LOH_ASSERT(ldrInfoB.offset == (infoA.targetAddress & 0x00000FFF));
2171 literalableSize = ( (ldrInfoB.size != 1) && (ldrInfoB.size != 2) );
2172 targetFourByteAligned = ( (infoA.targetAddress & 0x3) == 0 );
2173 if ( literalableSize && usableSegment && targetFourByteAligned && withinOneMeg(infoB.instructionAddress, infoA.targetAddress) ) {
2174 set32LE(infoA.instructionContent, makeNOP());
2175 set32LE(infoB.instructionContent, makeLDR_literal(ldrInfoB, infoA.targetAddress, infoB.instructionAddress));
2176 if ( _options.verboseOptimizationHints() )
2177 fprintf(stderr, "adrp-ldr at 0x%08llX transformed to LDR literal, usableSegment=%d usableSegment\n", infoB.instructionAddress, usableSegment);
2178 }
2179 else {
2180 if ( _options.verboseOptimizationHints() )
2181 fprintf(stderr, "adrp-ldr at 0x%08llX not transformed, isLDR=%d, literalableSize=%d, inRange=%d, usableSegment=%d, scaledOffset=%d\n",
2182 infoB.instructionAddress, isLDR, literalableSize, withinOneMeg(infoB.instructionAddress, infoA.targetAddress), usableSegment, ldrInfoB.offset);
2183 }
2184 break;
2185 case LOH_ARM64_ADRP_ADD_LDR:
2186 LOH_ASSERT(alt.info.count == 2);
2187 LOH_ASSERT(isPageKind(infoA.fixup));
2188 LOH_ASSERT(isPageOffsetKind(infoB.fixup));
2189 LOH_ASSERT(infoC.fixup == NULL);
2190 LOH_ASSERT(infoA.target == infoB.target);
2191 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2192 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2193 LOH_ASSERT(isADRP);
2194 isADD = parseADD(infoB.instruction, addInfoB);
2195 LOH_ASSERT(isADD);
2196 LOH_ASSERT(adrpInfoA.destReg == addInfoB.srcReg);
2197 isLDR = parseLoadOrStore(infoC.instruction, ldrInfoC);
2198 LOH_ASSERT(isLDR);
2199 LOH_ASSERT(addInfoB.destReg == ldrInfoC.baseReg);
2200 targetFourByteAligned = ( ((infoB.targetAddress+ldrInfoC.offset) & 0x3) == 0 );
2201 literalableSize = ( (ldrInfoC.size != 1) && (ldrInfoC.size != 2) );
2202 if ( literalableSize && usableSegment && targetFourByteAligned && withinOneMeg(infoC.instructionAddress, infoA.targetAddress+ldrInfoC.offset) ) {
2203 // can do T1 transformation to LDR literal
2204 set32LE(infoA.instructionContent, makeNOP());
2205 set32LE(infoB.instructionContent, makeNOP());
2206 set32LE(infoC.instructionContent, makeLDR_literal(ldrInfoC, infoA.targetAddress+ldrInfoC.offset, infoC.instructionAddress));
2207 if ( _options.verboseOptimizationHints() ) {
2208 fprintf(stderr, "adrp-add-ldr at 0x%08llX T1 transformed to LDR literal\n", infoC.instructionAddress);
2209 }
2210 }
2211 else if ( usableSegment && withinOneMeg(infoA.instructionAddress, infoA.targetAddress+ldrInfoC.offset) ) {
2212 // can to T4 transformation and turn ADRP/ADD into ADR
2213 set32LE(infoA.instructionContent, makeADR(ldrInfoC.baseReg, infoA.targetAddress+ldrInfoC.offset, infoA.instructionAddress));
2214 set32LE(infoB.instructionContent, makeNOP());
2215 ldrInfoC.offset = 0; // offset is now in ADR instead of ADD or LDR
2216 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2217 set32LE(infoC.instructionContent, infoC.instruction & 0xFFC003FF);
2218 if ( _options.verboseOptimizationHints() )
2219 fprintf(stderr, "adrp-add-ldr at 0x%08llX T4 transformed to ADR/LDR\n", infoB.instructionAddress);
2220 }
2221 else if ( ((infoB.targetAddress % ldrInfoC.size) == 0) && (ldrInfoC.offset == 0) ) {
2222 // can do T2 transformation by merging ADD into LD
2223 // Leave ADRP as-is
2224 set32LE(infoB.instructionContent, makeNOP());
2225 ldrInfoC.offset += addInfoB.addend;
2226 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2227 if ( _options.verboseOptimizationHints() )
2228 fprintf(stderr, "adrp-add-ldr at 0x%08llX T2 transformed to ADRP/LDR \n", infoC.instructionAddress);
2229 }
2230 else {
2231 if ( _options.verboseOptimizationHints() )
2232 fprintf(stderr, "adrp-add-ldr at 0x%08llX could not be transformed, loadSize=%d, literalableSize=%d, inRange=%d, usableSegment=%d, targetFourByteAligned=%d, imm12=%d\n",
2233 infoC.instructionAddress, ldrInfoC.size, literalableSize, withinOneMeg(infoC.instructionAddress, infoA.targetAddress+ldrInfoC.offset), usableSegment, targetFourByteAligned, ldrInfoC.offset);
2234 }
2235 break;
2236 case LOH_ARM64_ADRP_ADD:
2237 LOH_ASSERT(alt.info.count == 1);
2238 LOH_ASSERT(isPageKind(infoA.fixup));
2239 LOH_ASSERT(isPageOffsetKind(infoB.fixup));
2240 LOH_ASSERT(infoA.target == infoB.target);
2241 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2242 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2243 LOH_ASSERT(isADRP);
2244 isADD = parseADD(infoB.instruction, addInfoB);
2245 LOH_ASSERT(isADD);
2246 LOH_ASSERT(adrpInfoA.destReg == addInfoB.srcReg);
2247 if ( usableSegment && withinOneMeg(infoA.targetAddress, infoA.instructionAddress) ) {
2248 // can do T4 transformation and use ADR
2249 set32LE(infoA.instructionContent, makeADR(addInfoB.destReg, infoA.targetAddress, infoA.instructionAddress));
2250 set32LE(infoB.instructionContent, makeNOP());
2251 if ( _options.verboseOptimizationHints() )
2252 fprintf(stderr, "adrp-add at 0x%08llX transformed to ADR\n", infoB.instructionAddress);
2253 }
2254 else {
2255 if ( _options.verboseOptimizationHints() )
2256 fprintf(stderr, "adrp-add at 0x%08llX not transformed, isAdd=%d, inRange=%d, usableSegment=%d\n",
2257 infoB.instructionAddress, isADD, withinOneMeg(infoA.targetAddress, infoA.instructionAddress), usableSegment);
2258 }
2259 break;
2260 case LOH_ARM64_ADRP_LDR_GOT_LDR:
2261 LOH_ASSERT(alt.info.count == 2);
2262 LOH_ASSERT(isPageKind(infoA.fixup, true));
2263 LOH_ASSERT(isPageOffsetKind(infoB.fixup, true));
2264 LOH_ASSERT(infoC.fixup == NULL);
2265 LOH_ASSERT(infoA.target == infoB.target);
2266 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2267 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2268 LOH_ASSERT(isADRP);
2269 isLDR = parseLoadOrStore(infoC.instruction, ldrInfoC);
2270 LOH_ASSERT(isLDR);
2271 isADD = parseADD(infoB.instruction, addInfoB);
2272 isLDR = parseLoadOrStore(infoB.instruction, ldrInfoB);
2273 if ( isLDR ) {
2274 // target of GOT is external
2275 LOH_ASSERT(ldrInfoB.size == 8);
2276 LOH_ASSERT(!ldrInfoB.isFloat);
2277 LOH_ASSERT(ldrInfoC.baseReg == ldrInfoB.reg);
2278 //fprintf(stderr, "infoA.target=%p, %s, infoA.targetAddress=0x%08llX\n", infoA.target, infoA.target->name(), infoA.targetAddress);
2279 targetFourByteAligned = ( ((infoA.targetAddress + ldrInfoC.offset) & 0x3) == 0 );
2280 if ( usableSegment && targetFourByteAligned && withinOneMeg(infoB.instructionAddress, infoA.targetAddress + ldrInfoC.offset) ) {
2281 // can do T5 transform
2282 set32LE(infoA.instructionContent, makeNOP());
2283 set32LE(infoB.instructionContent, makeLDR_literal(ldrInfoB, infoA.targetAddress, infoB.instructionAddress));
2284 if ( _options.verboseOptimizationHints() ) {
2285 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX T5 transformed to LDR literal of GOT plus LDR\n", infoC.instructionAddress);
2286 }
2287 }
2288 else {
2289 if ( _options.verboseOptimizationHints() )
2290 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX no optimization done\n", infoC.instructionAddress);
2291 }
2292 }
2293 else if ( isADD ) {
2294 // target of GOT is in same linkage unit and B instruction was changed to ADD to compute LEA of target
2295 LOH_ASSERT(addInfoB.srcReg == adrpInfoA.destReg);
2296 LOH_ASSERT(addInfoB.destReg == ldrInfoC.baseReg);
2297 targetFourByteAligned = ( ((infoA.targetAddress) & 0x3) == 0 );
2298 literalableSize = ( (ldrInfoC.size != 1) && (ldrInfoC.size != 2) );
2299 if ( usableSegment && literalableSize && targetFourByteAligned && withinOneMeg(infoC.instructionAddress, infoA.targetAddress + ldrInfoC.offset) ) {
2300 // can do T1 transform
2301 set32LE(infoA.instructionContent, makeNOP());
2302 set32LE(infoB.instructionContent, makeNOP());
2303 set32LE(infoC.instructionContent, makeLDR_literal(ldrInfoC, infoA.targetAddress + ldrInfoC.offset, infoC.instructionAddress));
2304 if ( _options.verboseOptimizationHints() )
2305 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX T1 transformed to LDR literal\n", infoC.instructionAddress);
2306 }
2307 else if ( usableSegment && withinOneMeg(infoA.instructionAddress, infoA.targetAddress) ) {
2308 // can do T4 transform
2309 set32LE(infoA.instructionContent, makeADR(ldrInfoC.baseReg, infoA.targetAddress, infoA.instructionAddress));
2310 set32LE(infoB.instructionContent, makeNOP());
2311 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2312 if ( _options.verboseOptimizationHints() ) {
2313 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX T4 transformed to ADR/LDR\n", infoC.instructionAddress);
2314 }
2315 }
2316 else if ( ((infoA.targetAddress % ldrInfoC.size) == 0) && ((addInfoB.addend + ldrInfoC.offset) < 4096) ) {
2317 // can do T2 transform
2318 set32LE(infoB.instructionContent, makeNOP());
2319 ldrInfoC.baseReg = adrpInfoA.destReg;
2320 ldrInfoC.offset += addInfoB.addend;
2321 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2322 if ( _options.verboseOptimizationHints() ) {
2323 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX T2 transformed to ADRP/NOP/LDR\n", infoC.instructionAddress);
2324 }
2325 }
2326 else {
2327 // T3 transform already done by ld::passes:got:doPass()
2328 if ( _options.verboseOptimizationHints() ) {
2329 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX T3 transformed to ADRP/ADD/LDR\n", infoC.instructionAddress);
2330 }
2331 }
2332 }
2333 else {
2334 if ( _options.verboseOptimizationHints() )
2335 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX not ADD or LDR\n", infoC.instructionAddress);
2336 }
2337 break;
2338 case LOH_ARM64_ADRP_ADD_STR:
2339 LOH_ASSERT(alt.info.count == 2);
2340 LOH_ASSERT(isPageKind(infoA.fixup));
2341 LOH_ASSERT(isPageOffsetKind(infoB.fixup));
2342 LOH_ASSERT(infoC.fixup == NULL);
2343 LOH_ASSERT(infoA.target == infoB.target);
2344 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2345 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2346 LOH_ASSERT(isADRP);
2347 isADD = parseADD(infoB.instruction, addInfoB);
2348 LOH_ASSERT(isADD);
2349 LOH_ASSERT(adrpInfoA.destReg == addInfoB.srcReg);
2350 isSTR = (parseLoadOrStore(infoC.instruction, ldrInfoC) && ldrInfoC.isStore);
2351 LOH_ASSERT(isSTR);
2352 LOH_ASSERT(addInfoB.destReg == ldrInfoC.baseReg);
2353 if ( usableSegment && withinOneMeg(infoA.instructionAddress, infoA.targetAddress+ldrInfoC.offset) ) {
2354 // can to T4 transformation and turn ADRP/ADD into ADR
2355 set32LE(infoA.instructionContent, makeADR(ldrInfoC.baseReg, infoA.targetAddress+ldrInfoC.offset, infoA.instructionAddress));
2356 set32LE(infoB.instructionContent, makeNOP());
2357 ldrInfoC.offset = 0; // offset is now in ADR instead of ADD or LDR
2358 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2359 set32LE(infoC.instructionContent, infoC.instruction & 0xFFC003FF);
2360 if ( _options.verboseOptimizationHints() )
2361 fprintf(stderr, "adrp-add-str at 0x%08llX T4 transformed to ADR/STR\n", infoB.instructionAddress);
2362 }
2363 else if ( ((infoB.targetAddress % ldrInfoC.size) == 0) && (ldrInfoC.offset == 0) ) {
2364 // can do T2 transformation by merging ADD into STR
2365 // Leave ADRP as-is
2366 set32LE(infoB.instructionContent, makeNOP());
2367 ldrInfoC.offset += addInfoB.addend;
2368 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2369 if ( _options.verboseOptimizationHints() )
2370 fprintf(stderr, "adrp-add-str at 0x%08llX T2 transformed to ADRP/STR \n", infoC.instructionAddress);
2371 }
2372 else {
2373 if ( _options.verboseOptimizationHints() )
2374 fprintf(stderr, "adrp-add-str at 0x%08llX could not be transformed, loadSize=%d, inRange=%d, usableSegment=%d, imm12=%d\n",
2375 infoC.instructionAddress, ldrInfoC.size, withinOneMeg(infoC.instructionAddress, infoA.targetAddress+ldrInfoC.offset), usableSegment, ldrInfoC.offset);
2376 }
2377 break;
2378 case LOH_ARM64_ADRP_LDR_GOT_STR:
2379 LOH_ASSERT(alt.info.count == 2);
2380 LOH_ASSERT(isPageKind(infoA.fixup, true));
2381 LOH_ASSERT(isPageOffsetKind(infoB.fixup, true));
2382 LOH_ASSERT(infoC.fixup == NULL);
2383 LOH_ASSERT(infoA.target == infoB.target);
2384 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2385 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2386 LOH_ASSERT(isADRP);
2387 isSTR = (parseLoadOrStore(infoC.instruction, ldrInfoC) && ldrInfoC.isStore);
2388 LOH_ASSERT(isSTR);
2389 isADD = parseADD(infoB.instruction, addInfoB);
2390 isLDR = parseLoadOrStore(infoB.instruction, ldrInfoB);
2391 if ( isLDR ) {
2392 // target of GOT is external
2393 LOH_ASSERT(ldrInfoB.size == 8);
2394 LOH_ASSERT(!ldrInfoB.isFloat);
2395 LOH_ASSERT(ldrInfoC.baseReg == ldrInfoB.reg);
2396 targetFourByteAligned = ( ((infoA.targetAddress + ldrInfoC.offset) & 0x3) == 0 );
2397 if ( usableSegment && targetFourByteAligned && withinOneMeg(infoB.instructionAddress, infoA.targetAddress + ldrInfoC.offset) ) {
2398 // can do T5 transform
2399 set32LE(infoA.instructionContent, makeNOP());
2400 set32LE(infoB.instructionContent, makeLDR_literal(ldrInfoB, infoA.targetAddress, infoB.instructionAddress));
2401 if ( _options.verboseOptimizationHints() ) {
2402 fprintf(stderr, "adrp-ldr-got-str at 0x%08llX T5 transformed to LDR literal of GOT plus STR\n", infoC.instructionAddress);
2403 }
2404 }
2405 else {
2406 if ( _options.verboseOptimizationHints() )
2407 fprintf(stderr, "adrp-ldr-got-str at 0x%08llX no optimization done\n", infoC.instructionAddress);
2408 }
2409 }
2410 else if ( isADD ) {
2411 // target of GOT is in same linkage unit and B instruction was changed to ADD to compute LEA of target
2412 LOH_ASSERT(addInfoB.srcReg == adrpInfoA.destReg);
2413 LOH_ASSERT(addInfoB.destReg == ldrInfoC.baseReg);
2414 targetFourByteAligned = ( ((infoA.targetAddress) & 0x3) == 0 );
2415 literalableSize = ( (ldrInfoC.size != 1) && (ldrInfoC.size != 2) );
2416 if ( usableSegment && withinOneMeg(infoA.instructionAddress, infoA.targetAddress) ) {
2417 // can do T4 transform
2418 set32LE(infoA.instructionContent, makeADR(ldrInfoC.baseReg, infoA.targetAddress, infoA.instructionAddress));
2419 set32LE(infoB.instructionContent, makeNOP());
2420 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2421 if ( _options.verboseOptimizationHints() ) {
2422 fprintf(stderr, "adrp-ldr-got-str at 0x%08llX T4 transformed to ADR/STR\n", infoC.instructionAddress);
2423 }
2424 }
2425 else if ( ((infoA.targetAddress % ldrInfoC.size) == 0) && (ldrInfoC.offset == 0) ) {
2426 // can do T2 transform
2427 set32LE(infoB.instructionContent, makeNOP());
2428 ldrInfoC.baseReg = adrpInfoA.destReg;
2429 ldrInfoC.offset += addInfoB.addend;
2430 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2431 if ( _options.verboseOptimizationHints() ) {
2432 fprintf(stderr, "adrp-ldr-got-str at 0x%08llX T4 transformed to ADRP/NOP/STR\n", infoC.instructionAddress);
2433 }
2434 }
2435 else {
2436 // T3 transform already done by ld::passes:got:doPass()
2437 if ( _options.verboseOptimizationHints() ) {
2438 fprintf(stderr, "adrp-ldr-got-str at 0x%08llX T3 transformed to ADRP/ADD/STR\n", infoC.instructionAddress);
2439 }
2440 }
2441 }
2442 else {
2443 if ( _options.verboseOptimizationHints() )
2444 fprintf(stderr, "adrp-ldr-got-str at 0x%08llX not ADD or LDR\n", infoC.instructionAddress);
2445 }
2446 break;
2447 case LOH_ARM64_ADRP_LDR_GOT:
2448 LOH_ASSERT(alt.info.count == 1);
2449 LOH_ASSERT(isPageKind(infoA.fixup, true));
2450 LOH_ASSERT(isPageOffsetKind(infoB.fixup, true));
2451 LOH_ASSERT(infoA.target == infoB.target);
2452 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2453 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2454 isADD = parseADD(infoB.instruction, addInfoB);
2455 isLDR = parseLoadOrStore(infoB.instruction, ldrInfoB);
2456 if ( isADRP ) {
2457 if ( isLDR ) {
2458 if ( usableSegment && withinOneMeg(infoB.instructionAddress, infoA.targetAddress) ) {
2459 // can do T5 transform (LDR literal load of GOT)
2460 set32LE(infoA.instructionContent, makeNOP());
2461 set32LE(infoB.instructionContent, makeLDR_literal(ldrInfoB, infoA.targetAddress, infoB.instructionAddress));
2462 if ( _options.verboseOptimizationHints() ) {
2463 fprintf(stderr, "adrp-ldr-got at 0x%08llX T5 transformed to NOP/LDR\n", infoC.instructionAddress);
2464 }
2465 }
2466 }
2467 else if ( isADD ) {
2468 if ( usableSegment && withinOneMeg(infoA.instructionAddress, infoA.targetAddress) ) {
2469 // can do T4 transform (ADR to compute local address)
2470 set32LE(infoA.instructionContent, makeADR(addInfoB.destReg, infoA.targetAddress, infoA.instructionAddress));
2471 set32LE(infoB.instructionContent, makeNOP());
2472 if ( _options.verboseOptimizationHints() ) {
2473 fprintf(stderr, "adrp-ldr-got at 0x%08llX T4 transformed to ADR/STR\n", infoC.instructionAddress);
2474 }
2475 }
2476 }
2477 else {
2478 if ( _options.verboseOptimizationHints() )
2479 fprintf(stderr, "adrp-ldr-got at 0x%08llX not LDR or ADD\n", infoB.instructionAddress);
2480 }
2481 }
2482 else {
2483 if ( _options.verboseOptimizationHints() )
2484 fprintf(stderr, "adrp-ldr-got at 0x%08llX not ADRP\n", infoA.instructionAddress);
2485 }
2486 break;
2487 default:
2488 if ( _options.verboseOptimizationHints() )
2489 fprintf(stderr, "unknown hint kind %d alt.info.kind at 0x%08llX\n", alt.info.kind, infoA.instructionAddress);
2490 break;
2491 }
2492 }
2493 // apply hints pass 2
2494 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
2495 if ( fit->kind != ld::Fixup::kindLinkerOptimizationHint )
2496 continue;
2497 InstructionInfo infoA;
2498 InstructionInfo infoB;
2499 ld::Fixup::LOH_arm64 alt;
2500 alt.addend = fit->u.addend;
2501 setInfo(state, atom, buffer, usedByHints, fit->offsetInAtom, (alt.info.delta1 << 2), &infoA);
2502 if ( alt.info.count > 0 )
2503 setInfo(state, atom, buffer, usedByHints, fit->offsetInAtom, (alt.info.delta2 << 2), &infoB);
2504
2505 switch ( alt.info.kind ) {
2506 case LOH_ARM64_ADRP_ADRP:
2507 LOH_ASSERT(isPageKind(infoA.fixup));
2508 LOH_ASSERT(isPageKind(infoB.fixup));
2509 if ( (infoA.instruction & 0x9F000000) != 0x90000000 ) {
2510 if ( _options.verboseOptimizationHints() )
2511 fprintf(stderr, "may-reused-adrp at 0x%08llX no longer an ADRP, now 0x%08X\n", infoA.instructionAddress, infoA.instruction);
2512 sAdrpNA++;
2513 break;
2514 }
2515 if ( (infoB.instruction & 0x9F000000) != 0x90000000 ) {
2516 if ( _options.verboseOptimizationHints() )
2517 fprintf(stderr, "may-reused-adrp at 0x%08llX no longer an ADRP, now 0x%08X\n", infoB.instructionAddress, infoA.instruction);
2518 sAdrpNA++;
2519 break;
2520 }
2521 if ( (infoA.targetAddress & (-4096)) == (infoB.targetAddress & (-4096)) ) {
2522 set32LE(infoB.instructionContent, 0xD503201F);
2523 sAdrpNoped++;
2524 }
2525 else {
2526 sAdrpNotNoped++;
2527 }
2528 break;
2529 }
2530 }
2531 }
2532 #endif // SUPPORT_ARCH_arm64
2533
2534 }
2535
2536 void OutputFile::copyNoOps(uint8_t* from, uint8_t* to, bool thumb)
2537 {
2538 switch ( _options.architecture() ) {
2539 case CPU_TYPE_I386:
2540 case CPU_TYPE_X86_64:
2541 for (uint8_t* p=from; p < to; ++p)
2542 *p = 0x90;
2543 break;
2544 case CPU_TYPE_ARM:
2545 if ( thumb ) {
2546 for (uint8_t* p=from; p < to; p += 2)
2547 OSWriteLittleInt16((uint16_t*)p, 0, 0x46c0);
2548 }
2549 else {
2550 for (uint8_t* p=from; p < to; p += 4)
2551 OSWriteLittleInt32((uint32_t*)p, 0, 0xe1a00000);
2552 }
2553 break;
2554 default:
2555 for (uint8_t* p=from; p < to; ++p)
2556 *p = 0x00;
2557 break;
2558 }
2559 }
2560
2561 bool OutputFile::takesNoDiskSpace(const ld::Section* sect)
2562 {
2563 switch ( sect->type() ) {
2564 case ld::Section::typeZeroFill:
2565 case ld::Section::typeTLVZeroFill:
2566 return _options.optimizeZeroFill();
2567 case ld::Section::typePageZero:
2568 case ld::Section::typeStack:
2569 case ld::Section::typeAbsoluteSymbols:
2570 case ld::Section::typeTentativeDefs:
2571 return true;
2572 default:
2573 break;
2574 }
2575 return false;
2576 }
2577
2578 bool OutputFile::hasZeroForFileOffset(const ld::Section* sect)
2579 {
2580 switch ( sect->type() ) {
2581 case ld::Section::typeZeroFill:
2582 case ld::Section::typeTLVZeroFill:
2583 return _options.optimizeZeroFill();
2584 case ld::Section::typePageZero:
2585 case ld::Section::typeStack:
2586 case ld::Section::typeTentativeDefs:
2587 return true;
2588 default:
2589 break;
2590 }
2591 return false;
2592 }
2593
2594 void OutputFile::writeAtoms(ld::Internal& state, uint8_t* wholeBuffer)
2595 {
2596 // have each atom write itself
2597 uint64_t fileOffsetOfEndOfLastAtom = 0;
2598 uint64_t mhAddress = 0;
2599 bool lastAtomUsesNoOps = false;
2600 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
2601 ld::Internal::FinalSection* sect = *sit;
2602 if ( sect->type() == ld::Section::typeMachHeader )
2603 mhAddress = sect->address;
2604 if ( takesNoDiskSpace(sect) )
2605 continue;
2606 const bool sectionUsesNops = (sect->type() == ld::Section::typeCode);
2607 //fprintf(stderr, "file offset=0x%08llX, section %s\n", sect->fileOffset, sect->sectionName());
2608 std::vector<const ld::Atom*>& atoms = sect->atoms;
2609 bool lastAtomWasThumb = false;
2610 for (std::vector<const ld::Atom*>::iterator ait = atoms.begin(); ait != atoms.end(); ++ait) {
2611 const ld::Atom* atom = *ait;
2612 if ( atom->definition() == ld::Atom::definitionProxy )
2613 continue;
2614 try {
2615 uint64_t fileOffset = atom->finalAddress() - sect->address + sect->fileOffset;
2616 // check for alignment padding between atoms
2617 if ( (fileOffset != fileOffsetOfEndOfLastAtom) && lastAtomUsesNoOps ) {
2618 this->copyNoOps(&wholeBuffer[fileOffsetOfEndOfLastAtom], &wholeBuffer[fileOffset], lastAtomWasThumb);
2619 }
2620 // copy atom content
2621 atom->copyRawContent(&wholeBuffer[fileOffset]);
2622 // apply fix ups
2623 this->applyFixUps(state, mhAddress, atom, &wholeBuffer[fileOffset]);
2624 fileOffsetOfEndOfLastAtom = fileOffset+atom->size();
2625 lastAtomUsesNoOps = sectionUsesNops;
2626 lastAtomWasThumb = atom->isThumb();
2627 }
2628 catch (const char* msg) {
2629 if ( atom->file() != NULL )
2630 throwf("%s in '%s' from %s", msg, atom->name(), atom->file()->path());
2631 else
2632 throwf("%s in '%s'", msg, atom->name());
2633 }
2634 }
2635 }
2636
2637 if ( _options.verboseOptimizationHints() ) {
2638 //fprintf(stderr, "ADRP optimized away: %d\n", sAdrpNA);
2639 //fprintf(stderr, "ADRPs changed to NOPs: %d\n", sAdrpNoped);
2640 //fprintf(stderr, "ADRPs unchanged: %d\n", sAdrpNotNoped);
2641 }
2642 }
2643
2644 void OutputFile::computeContentUUID(ld::Internal& state, uint8_t* wholeBuffer)
2645 {
2646 const bool log = false;
2647 if ( (_options.outputKind() != Options::kObjectFile) || state.someObjectFileHasDwarf ) {
2648 uint8_t digest[CC_MD5_DIGEST_LENGTH];
2649 std::vector<std::pair<uint64_t, uint64_t>> excludeRegions;
2650 uint64_t bitcodeCmdOffset;
2651 uint64_t bitcodeCmdEnd;
2652 uint64_t bitcodeSectOffset;
2653 uint64_t bitcodePaddingEnd;
2654 if ( _headersAndLoadCommandAtom->bitcodeBundleCommand(bitcodeCmdOffset, bitcodeCmdEnd,
2655 bitcodeSectOffset, bitcodePaddingEnd) ) {
2656 // Exclude embedded bitcode bundle section which contains timestamps in XAR header
2657 // Note the timestamp is in the compressed XML header which means it might change the size of
2658 // bitcode section. The load command which include the size of the section and the padding after
2659 // the bitcode section should also be excluded in the UUID computation.
2660 // Bitcode section should appears before LINKEDIT
2661 // Exclude section cmd
2662 if ( log ) fprintf(stderr, "bundle cmd start=0x%08llX, bundle cmd end=0x%08llX\n",
2663 bitcodeCmdOffset, bitcodeCmdEnd);
2664 excludeRegions.emplace_back(std::pair<uint64_t, uint64_t>(bitcodeCmdOffset, bitcodeCmdEnd));
2665 // Exclude section content
2666 if ( log ) fprintf(stderr, "bundle start=0x%08llX, bundle end=0x%08llX\n",
2667 bitcodeSectOffset, bitcodePaddingEnd);
2668 excludeRegions.emplace_back(std::pair<uint64_t, uint64_t>(bitcodeSectOffset, bitcodePaddingEnd));
2669 }
2670 uint32_t stabsStringsOffsetStart;
2671 uint32_t tabsStringsOffsetEnd;
2672 uint32_t stabsOffsetStart;
2673 uint32_t stabsOffsetEnd;
2674 if ( _symbolTableAtom->hasStabs(stabsStringsOffsetStart, tabsStringsOffsetEnd, stabsOffsetStart, stabsOffsetEnd) ) {
2675 // find two areas of file that are stabs info and should not contribute to checksum
2676 uint64_t stringPoolFileOffset = 0;
2677 uint64_t symbolTableFileOffset = 0;
2678 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
2679 ld::Internal::FinalSection* sect = *sit;
2680 if ( sect->type() == ld::Section::typeLinkEdit ) {
2681 if ( strcmp(sect->sectionName(), "__string_pool") == 0 )
2682 stringPoolFileOffset = sect->fileOffset;
2683 else if ( strcmp(sect->sectionName(), "__symbol_table") == 0 )
2684 symbolTableFileOffset = sect->fileOffset;
2685 }
2686 }
2687 uint64_t firstStabNlistFileOffset = symbolTableFileOffset + stabsOffsetStart;
2688 uint64_t lastStabNlistFileOffset = symbolTableFileOffset + stabsOffsetEnd;
2689 uint64_t firstStabStringFileOffset = stringPoolFileOffset + stabsStringsOffsetStart;
2690 uint64_t lastStabStringFileOffset = stringPoolFileOffset + tabsStringsOffsetEnd;
2691 if ( log ) fprintf(stderr, "stabNlist offset=0x%08llX, size=0x%08llX\n", firstStabNlistFileOffset, lastStabNlistFileOffset-firstStabNlistFileOffset);
2692 if ( log ) fprintf(stderr, "stabString offset=0x%08llX, size=0x%08llX\n", firstStabStringFileOffset, lastStabStringFileOffset-firstStabStringFileOffset);
2693 assert(firstStabNlistFileOffset <= firstStabStringFileOffset);
2694 excludeRegions.emplace_back(std::pair<uint64_t, uint64_t>(firstStabNlistFileOffset, lastStabNlistFileOffset));
2695 excludeRegions.emplace_back(std::pair<uint64_t, uint64_t>(firstStabStringFileOffset, lastStabStringFileOffset));
2696 // exclude LINKEDIT LC_SEGMENT (size field depends on stabs size)
2697 uint64_t linkeditSegCmdOffset;
2698 uint64_t linkeditSegCmdSize;
2699 _headersAndLoadCommandAtom->linkeditCmdInfo(linkeditSegCmdOffset, linkeditSegCmdSize);
2700 excludeRegions.emplace_back(std::pair<uint64_t, uint64_t>(linkeditSegCmdOffset, linkeditSegCmdOffset+linkeditSegCmdSize));
2701 if ( log ) fprintf(stderr, "linkedit SegCmdOffset=0x%08llX, size=0x%08llX\n", linkeditSegCmdOffset, linkeditSegCmdSize);
2702 uint64_t symbolTableCmdOffset;
2703 uint64_t symbolTableCmdSize;
2704 _headersAndLoadCommandAtom->symbolTableCmdInfo(symbolTableCmdOffset, symbolTableCmdSize);
2705 excludeRegions.emplace_back(std::pair<uint64_t, uint64_t>(symbolTableCmdOffset, symbolTableCmdOffset+symbolTableCmdSize));
2706 if ( log ) fprintf(stderr, "linkedit SegCmdOffset=0x%08llX, size=0x%08llX\n", symbolTableCmdOffset, symbolTableCmdSize);
2707 }
2708 if ( !excludeRegions.empty() ) {
2709 CC_MD5_CTX md5state;
2710 CC_MD5_Init(&md5state);
2711 // rdar://problem/19487042 include the output leaf file name in the hash
2712 const char* lastSlash = strrchr(_options.outputFilePath(), '/');
2713 if ( lastSlash != NULL ) {
2714 CC_MD5_Update(&md5state, lastSlash, strlen(lastSlash));
2715 }
2716 std::sort(excludeRegions.begin(), excludeRegions.end());
2717 uint64_t checksumStart = 0;
2718 for ( auto& region : excludeRegions ) {
2719 uint64_t regionStart = region.first;
2720 uint64_t regionEnd = region.second;
2721 assert(checksumStart <= regionStart && regionStart <= regionEnd && "Region overlapped");
2722 if ( log ) fprintf(stderr, "checksum 0x%08llX -> 0x%08llX\n", checksumStart, regionStart);
2723 CC_MD5_Update(&md5state, &wholeBuffer[checksumStart], regionStart - checksumStart);
2724 checksumStart = regionEnd;
2725 }
2726 if ( log ) fprintf(stderr, "checksum 0x%08llX -> 0x%08llX\n", checksumStart, _fileSize);
2727 CC_MD5_Update(&md5state, &wholeBuffer[checksumStart], _fileSize-checksumStart);
2728 CC_MD5_Final(digest, &md5state);
2729 if ( log ) fprintf(stderr, "uuid=%02X, %02X, %02X, %02X, %02X, %02X, %02X, %02X\n", digest[0], digest[1], digest[2],
2730 digest[3], digest[4], digest[5], digest[6], digest[7]);
2731 }
2732 else {
2733 CC_MD5(wholeBuffer, _fileSize, digest);
2734 }
2735 // <rdar://problem/6723729> LC_UUID uuids should conform to RFC 4122 UUID version 4 & UUID version 5 formats
2736 digest[6] = ( digest[6] & 0x0F ) | ( 3 << 4 );
2737 digest[8] = ( digest[8] & 0x3F ) | 0x80;
2738 // update buffer with new UUID
2739 _headersAndLoadCommandAtom->setUUID(digest);
2740 _headersAndLoadCommandAtom->recopyUUIDCommand();
2741 }
2742 }
2743
2744 static int sDescriptorOfPathToRemove = -1;
2745 static void removePathAndExit(int sig)
2746 {
2747 if ( sDescriptorOfPathToRemove != -1 ) {
2748 char path[MAXPATHLEN];
2749 if ( ::fcntl(sDescriptorOfPathToRemove, F_GETPATH, path) == 0 )
2750 ::unlink(path);
2751 }
2752 fprintf(stderr, "ld: interrupted\n");
2753 exit(1);
2754 }
2755
2756 void OutputFile::writeOutputFile(ld::Internal& state)
2757 {
2758 // for UNIX conformance, error if file exists and is not writable
2759 if ( (access(_options.outputFilePath(), F_OK) == 0) && (access(_options.outputFilePath(), W_OK) == -1) )
2760 throwf("can't write output file: %s", _options.outputFilePath());
2761
2762 mode_t permissions = 0777;
2763 if ( _options.outputKind() == Options::kObjectFile )
2764 permissions = 0666;
2765 mode_t umask = ::umask(0);
2766 ::umask(umask); // put back the original umask
2767 permissions &= ~umask;
2768 // Calling unlink first assures the file is gone so that open creates it with correct permissions
2769 // It also handles the case where __options.outputFilePath() file is not writable but its directory is
2770 // And it means we don't have to truncate the file when done writing (in case new is smaller than old)
2771 // Lastly, only delete existing file if it is a normal file (e.g. not /dev/null).
2772 struct stat stat_buf;
2773 bool outputIsRegularFile = false;
2774 bool outputIsMappableFile = false;
2775 if ( stat(_options.outputFilePath(), &stat_buf) != -1 ) {
2776 if (stat_buf.st_mode & S_IFREG) {
2777 outputIsRegularFile = true;
2778 // <rdar://problem/12264302> Don't use mmap on non-hfs volumes
2779 struct statfs fsInfo;
2780 if ( statfs(_options.outputFilePath(), &fsInfo) != -1 ) {
2781 if ( strcmp(fsInfo.f_fstypename, "hfs") == 0) {
2782 (void)unlink(_options.outputFilePath());
2783 outputIsMappableFile = true;
2784 }
2785 }
2786 else {
2787 outputIsMappableFile = false;
2788 }
2789 }
2790 else {
2791 outputIsRegularFile = false;
2792 }
2793 }
2794 else {
2795 // special files (pipes, devices, etc) must already exist
2796 outputIsRegularFile = true;
2797 // output file does not exist yet
2798 char dirPath[PATH_MAX];
2799 strcpy(dirPath, _options.outputFilePath());
2800 char* end = strrchr(dirPath, '/');
2801 if ( end != NULL ) {
2802 end[1] = '\0';
2803 struct statfs fsInfo;
2804 if ( statfs(dirPath, &fsInfo) != -1 ) {
2805 if ( strcmp(fsInfo.f_fstypename, "hfs") == 0) {
2806 outputIsMappableFile = true;
2807 }
2808 }
2809 }
2810 }
2811
2812 //fprintf(stderr, "outputIsMappableFile=%d, outputIsRegularFile=%d, path=%s\n", outputIsMappableFile, outputIsRegularFile, _options.outputFilePath());
2813
2814 int fd;
2815 // Construct a temporary path of the form {outputFilePath}.ld_XXXXXX
2816 const char filenameTemplate[] = ".ld_XXXXXX";
2817 char tmpOutput[PATH_MAX];
2818 uint8_t *wholeBuffer;
2819 if ( outputIsRegularFile && outputIsMappableFile ) {
2820 // <rdar://problem/20959031> ld64 should clean up temporary files on SIGINT
2821 ::signal(SIGINT, removePathAndExit);
2822
2823 strcpy(tmpOutput, _options.outputFilePath());
2824 // If the path is too long to add a suffix for a temporary name then
2825 // just fall back to using the output path.
2826 if (strlen(tmpOutput)+strlen(filenameTemplate) < PATH_MAX) {
2827 strcat(tmpOutput, filenameTemplate);
2828 fd = mkstemp(tmpOutput);
2829 sDescriptorOfPathToRemove = fd;
2830 }
2831 else {
2832 fd = open(tmpOutput, O_RDWR|O_CREAT, permissions);
2833 }
2834 if ( fd == -1 )
2835 throwf("can't open output file for writing '%s', errno=%d", tmpOutput, errno);
2836 if ( ftruncate(fd, _fileSize) == -1 ) {
2837 int err = errno;
2838 unlink(tmpOutput);
2839 if ( err == ENOSPC )
2840 throwf("not enough disk space for writing '%s'", _options.outputFilePath());
2841 else
2842 throwf("can't grow file for writing '%s', errno=%d", _options.outputFilePath(), err);
2843 }
2844
2845 wholeBuffer = (uint8_t *)mmap(NULL, _fileSize, PROT_WRITE|PROT_READ, MAP_SHARED, fd, 0);
2846 if ( wholeBuffer == MAP_FAILED )
2847 throwf("can't create buffer of %llu bytes for output", _fileSize);
2848 }
2849 else {
2850 if ( outputIsRegularFile )
2851 fd = open(_options.outputFilePath(), O_RDWR|O_CREAT, permissions);
2852 else
2853 fd = open(_options.outputFilePath(), O_WRONLY);
2854 if ( fd == -1 )
2855 throwf("can't open output file for writing: %s, errno=%d", _options.outputFilePath(), errno);
2856 // try to allocate buffer for entire output file content
2857 wholeBuffer = (uint8_t*)calloc(_fileSize, 1);
2858 if ( wholeBuffer == NULL )
2859 throwf("can't create buffer of %llu bytes for output", _fileSize);
2860 }
2861
2862 if ( _options.UUIDMode() == Options::kUUIDRandom ) {
2863 uint8_t bits[16];
2864 ::uuid_generate_random(bits);
2865 _headersAndLoadCommandAtom->setUUID(bits);
2866 }
2867
2868 writeAtoms(state, wholeBuffer);
2869
2870 // compute UUID
2871 if ( _options.UUIDMode() == Options::kUUIDContent )
2872 computeContentUUID(state, wholeBuffer);
2873
2874 if ( outputIsRegularFile && outputIsMappableFile ) {
2875 if ( ::chmod(tmpOutput, permissions) == -1 ) {
2876 unlink(tmpOutput);
2877 throwf("can't set permissions on output file: %s, errno=%d", tmpOutput, errno);
2878 }
2879 if ( ::rename(tmpOutput, _options.outputFilePath()) == -1 && strcmp(tmpOutput, _options.outputFilePath()) != 0) {
2880 unlink(tmpOutput);
2881 throwf("can't move output file in place, errno=%d", errno);
2882 }
2883 }
2884 else {
2885 if ( ::write(fd, wholeBuffer, _fileSize) == -1 ) {
2886 throwf("can't write to output file: %s, errno=%d", _options.outputFilePath(), errno);
2887 }
2888 sDescriptorOfPathToRemove = -1;
2889 ::close(fd);
2890 // <rdar://problem/13118223> NFS: iOS incremental builds in Xcode 4.6 fail with codesign error
2891 // NFS seems to pad the end of the file sometimes. Calling trunc seems to correct it...
2892 ::truncate(_options.outputFilePath(), _fileSize);
2893 }
2894
2895 // Rename symbol map file if needed
2896 if ( _options.renameReverseSymbolMap() ) {
2897 assert(_options.hideSymbols() && _options.reverseSymbolMapPath() != NULL && "Must hide symbol and specify a path");
2898 uuid_string_t UUIDString;
2899 const uint8_t* rawUUID = _headersAndLoadCommandAtom->getUUID();
2900 uuid_unparse_upper(rawUUID, UUIDString);
2901 char outputMapPath[PATH_MAX];
2902 sprintf(outputMapPath, "%s/%s.bcsymbolmap", _options.reverseSymbolMapPath(), UUIDString);
2903 if ( ::rename(_options.reverseMapTempPath().c_str(), outputMapPath) != 0 )
2904 throwf("could not create bcsymbolmap file: %s", outputMapPath);
2905 }
2906 }
2907
2908 struct AtomByNameSorter
2909 {
2910 bool operator()(const ld::Atom* left, const ld::Atom* right) const
2911 {
2912 return (strcmp(left->name(), right->name()) < 0);
2913 }
2914
2915 bool operator()(const ld::Atom* left, const char* right) const
2916 {
2917 return (strcmp(left->name(), right) < 0);
2918 }
2919
2920 bool operator()(const char* left, const ld::Atom* right) const
2921 {
2922 return (strcmp(left, right->name()) < 0);
2923 }
2924 };
2925
2926
2927 class NotInSet
2928 {
2929 public:
2930 NotInSet(const std::set<const ld::Atom*>& theSet) : _set(theSet) {}
2931
2932 bool operator()(const ld::Atom* atom) const {
2933 return ( _set.count(atom) == 0 );
2934 }
2935 private:
2936 const std::set<const ld::Atom*>& _set;
2937 };
2938
2939
2940 void OutputFile::buildSymbolTable(ld::Internal& state)
2941 {
2942 unsigned int machoSectionIndex = 0;
2943 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
2944 ld::Internal::FinalSection* sect = *sit;
2945 bool setMachoSectionIndex = !sect->isSectionHidden() && (sect->type() != ld::Section::typeTentativeDefs);
2946 if ( setMachoSectionIndex )
2947 ++machoSectionIndex;
2948 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
2949 const ld::Atom* atom = *ait;
2950 if ( setMachoSectionIndex )
2951 (const_cast<ld::Atom*>(atom))->setMachoSection(machoSectionIndex);
2952 else if ( sect->type() == ld::Section::typeMachHeader )
2953 (const_cast<ld::Atom*>(atom))->setMachoSection(1); // __mh_execute_header is not in any section by needs n_sect==1
2954 else if ( sect->type() == ld::Section::typeLastSection )
2955 (const_cast<ld::Atom*>(atom))->setMachoSection(machoSectionIndex); // use section index of previous section
2956 else if ( sect->type() == ld::Section::typeFirstSection )
2957 (const_cast<ld::Atom*>(atom))->setMachoSection(machoSectionIndex+1); // use section index of next section
2958
2959 // in -r mode, clarify symbolTableNotInFinalLinkedImages
2960 if ( _options.outputKind() == Options::kObjectFile ) {
2961 if ( (_options.architecture() == CPU_TYPE_X86_64)
2962 || (_options.architecture() == CPU_TYPE_ARM64)
2963 ) {
2964 // x86_64 .o files need labels on anonymous literal strings
2965 if ( (sect->type() == ld::Section::typeCString) && (atom->combine() == ld::Atom::combineByNameAndContent) ) {
2966 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableIn);
2967 _localAtoms.push_back(atom);
2968 continue;
2969 }
2970 }
2971 if ( sect->type() == ld::Section::typeCFI ) {
2972 if ( _options.removeEHLabels() )
2973 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableNotIn);
2974 else
2975 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableIn);
2976 }
2977 else if ( sect->type() == ld::Section::typeTempAlias ) {
2978 assert(_options.outputKind() == Options::kObjectFile);
2979 _importedAtoms.push_back(atom);
2980 continue;
2981 }
2982 if ( atom->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages )
2983 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableIn);
2984 }
2985
2986 // TEMP work around until <rdar://problem/7702923> goes in
2987 if ( (atom->symbolTableInclusion() == ld::Atom::symbolTableInAndNeverStrip)
2988 && (atom->scope() == ld::Atom::scopeLinkageUnit)
2989 && (_options.outputKind() == Options::kDynamicLibrary) ) {
2990 (const_cast<ld::Atom*>(atom))->setScope(ld::Atom::scopeGlobal);
2991 }
2992
2993 // <rdar://problem/6783167> support auto hidden weak symbols: .weak_def_can_be_hidden
2994 if ( atom->autoHide() && (_options.outputKind() != Options::kObjectFile) ) {
2995 // adding auto-hide symbol to .exp file should keep it global
2996 if ( !_options.hasExportMaskList() || !_options.shouldExport(atom->name()) )
2997 (const_cast<ld::Atom*>(atom))->setScope(ld::Atom::scopeLinkageUnit);
2998 }
2999
3000 // <rdar://problem/8626058> ld should consistently warn when resolvers are not exported
3001 if ( (atom->contentType() == ld::Atom::typeResolver) && (atom->scope() == ld::Atom::scopeLinkageUnit) )
3002 warning("resolver functions should be external, but '%s' is hidden", atom->name());
3003
3004 if ( sect->type() == ld::Section::typeImportProxies ) {
3005 if ( atom->combine() == ld::Atom::combineByName )
3006 this->usesWeakExternalSymbols = true;
3007 // alias proxy is a re-export with a name change, don't import changed name
3008 if ( ! atom->isAlias() )
3009 _importedAtoms.push_back(atom);
3010 // scope of proxies are usually linkage unit, so done
3011 // if scope is global, we need to re-export it too
3012 if ( atom->scope() == ld::Atom::scopeGlobal )
3013 _exportedAtoms.push_back(atom);
3014 continue;
3015 }
3016 if ( atom->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages ) {
3017 assert(_options.outputKind() != Options::kObjectFile);
3018 continue; // don't add to symbol table
3019 }
3020 if ( atom->symbolTableInclusion() == ld::Atom::symbolTableNotIn ) {
3021 continue; // don't add to symbol table
3022 }
3023 if ( (atom->symbolTableInclusion() == ld::Atom::symbolTableInWithRandomAutoStripLabel)
3024 && (_options.outputKind() != Options::kObjectFile) ) {
3025 continue; // don't add to symbol table
3026 }
3027
3028 if ( (atom->definition() == ld::Atom::definitionTentative) && (_options.outputKind() == Options::kObjectFile) ) {
3029 if ( _options.makeTentativeDefinitionsReal() ) {
3030 // -r -d turns tentative defintions into real def
3031 _exportedAtoms.push_back(atom);
3032 }
3033 else {
3034 // in mach-o object files tentative defintions are stored like undefined symbols
3035 _importedAtoms.push_back(atom);
3036 }
3037 continue;
3038 }
3039
3040 switch ( atom->scope() ) {
3041 case ld::Atom::scopeTranslationUnit:
3042 if ( _options.keepLocalSymbol(atom->name()) ) {
3043 _localAtoms.push_back(atom);
3044 }
3045 else {
3046 if ( _options.outputKind() == Options::kObjectFile ) {
3047 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableInWithRandomAutoStripLabel);
3048 _localAtoms.push_back(atom);
3049 }
3050 else
3051 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableNotIn);
3052 }
3053 break;
3054 case ld::Atom::scopeGlobal:
3055 _exportedAtoms.push_back(atom);
3056 break;
3057 case ld::Atom::scopeLinkageUnit:
3058 if ( _options.outputKind() == Options::kObjectFile ) {
3059 if ( _options.keepPrivateExterns() ) {
3060 _exportedAtoms.push_back(atom);
3061 }
3062 else if ( _options.keepLocalSymbol(atom->name()) ) {
3063 _localAtoms.push_back(atom);
3064 }
3065 else {
3066 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableInWithRandomAutoStripLabel);
3067 _localAtoms.push_back(atom);
3068 }
3069 }
3070 else {
3071 if ( _options.keepLocalSymbol(atom->name()) )
3072 _localAtoms.push_back(atom);
3073 // <rdar://problem/5804214> ld should never have a symbol in the non-lazy indirect symbol table with index 0
3074 // this works by making __mh_execute_header be a local symbol which takes symbol index 0
3075 else if ( (atom->symbolTableInclusion() == ld::Atom::symbolTableInAndNeverStrip) && !_options.makeCompressedDyldInfo() )
3076 _localAtoms.push_back(atom);
3077 else
3078 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableNotIn);
3079 }
3080 break;
3081 }
3082 }
3083 }
3084
3085 // <rdar://problem/6978069> ld adds undefined symbol from .exp file to binary
3086 if ( (_options.outputKind() == Options::kKextBundle) && _options.hasExportRestrictList() ) {
3087 // search for referenced undefines
3088 std::set<const ld::Atom*> referencedProxyAtoms;
3089 for (std::vector<ld::Internal::FinalSection*>::iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) {
3090 ld::Internal::FinalSection* sect = *sit;
3091 for (std::vector<const ld::Atom*>::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
3092 const ld::Atom* atom = *ait;
3093 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
3094 switch ( fit->binding ) {
3095 case ld::Fixup::bindingsIndirectlyBound:
3096 referencedProxyAtoms.insert(state.indirectBindingTable[fit->u.bindingIndex]);
3097 break;
3098 case ld::Fixup::bindingDirectlyBound:
3099 referencedProxyAtoms.insert(fit->u.target);
3100 break;
3101 default:
3102 break;
3103 }
3104 }
3105 }
3106 }
3107 // remove any unreferenced _importedAtoms
3108 _importedAtoms.erase(std::remove_if(_importedAtoms.begin(), _importedAtoms.end(), NotInSet(referencedProxyAtoms)), _importedAtoms.end());
3109 }
3110
3111 // sort by name
3112 std::sort(_exportedAtoms.begin(), _exportedAtoms.end(), AtomByNameSorter());
3113 std::sort(_importedAtoms.begin(), _importedAtoms.end(), AtomByNameSorter());
3114
3115 std::map<std::string, std::vector<std::string>> addedSymbols;
3116 std::map<std::string, std::vector<std::string>> hiddenSymbols;
3117 for (const auto *atom : _exportedAtoms) {
3118 // The exported symbols have already been sorted. Early exit the loop
3119 // once we see a symbol that is lexicographically past the special
3120 // linker symbol.
3121 if (atom->name()[0] > '$')
3122 break;
3123
3124 std::string name(atom->name());
3125 if (name.rfind("$ld$add$", 7) == 0) {
3126 auto pos = name.find_first_of('$', 10);
3127 if (pos == std::string::npos) {
3128 warning("bad special linker symbol '%s'", atom->name());
3129 continue;
3130 }
3131 auto &&symbolName = name.substr(pos+1);
3132 auto it = addedSymbols.emplace(symbolName, std::initializer_list<std::string>{name});
3133 if (!it.second)
3134 it.first->second.emplace_back(name);
3135 } else if (name.rfind("$ld$hide$", 8) == 0) {
3136 auto pos = name.find_first_of('$', 11);
3137 if (pos == std::string::npos) {
3138 warning("bad special linker symbol '%s'", atom->name());
3139 continue;
3140 }
3141 auto &&symbolName = name.substr(pos+1);
3142 auto it = hiddenSymbols.emplace(symbolName, std::initializer_list<std::string>{name});
3143 if (!it.second)
3144 it.first->second.emplace_back(name);
3145 }
3146 }
3147
3148 for (const auto &it : addedSymbols) {
3149 if (!std::binary_search(_exportedAtoms.begin(), _exportedAtoms.end(), it.first.c_str(), AtomByNameSorter()))
3150 continue;
3151 for (const auto &symbol : it.second)
3152 warning("linker symbol '%s' adds already existing symbol '%s'", symbol.c_str(), it.first.c_str());
3153 }
3154
3155 auto it = hiddenSymbols.begin();
3156 while (it != hiddenSymbols.end()) {
3157 if (std::binary_search(_exportedAtoms.begin(), _exportedAtoms.end(), it->first.c_str(), AtomByNameSorter()))
3158 it = hiddenSymbols.erase(it);
3159 else
3160 ++it;
3161 }
3162
3163 for (const auto &it : hiddenSymbols) {
3164 for (const auto &symbol : it.second)
3165 warning("linker symbol '%s' hides a non-existent symbol '%s'", symbol.c_str(), it.first.c_str());
3166 }
3167 }
3168
3169 void OutputFile::addPreloadLinkEdit(ld::Internal& state)
3170 {
3171 switch ( _options.architecture() ) {
3172 #if SUPPORT_ARCH_i386
3173 case CPU_TYPE_I386:
3174 if ( _hasLocalRelocations ) {
3175 _localRelocsAtom = new LocalRelocationsAtom<x86>(_options, state, *this);
3176 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3177 }
3178 if ( _hasExternalRelocations ) {
3179 _externalRelocsAtom = new ExternalRelocationsAtom<x86>(_options, state, *this);
3180 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3181 }
3182 if ( _hasSymbolTable ) {
3183 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<x86>(_options, state, *this);
3184 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3185 _symbolTableAtom = new SymbolTableAtom<x86>(_options, state, *this);
3186 symbolTableSection = state.addAtom(*_symbolTableAtom);
3187 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3188 stringPoolSection = state.addAtom(*_stringPoolAtom);
3189 }
3190 break;
3191 #endif
3192 #if SUPPORT_ARCH_x86_64
3193 case CPU_TYPE_X86_64:
3194 if ( _hasLocalRelocations ) {
3195 _localRelocsAtom = new LocalRelocationsAtom<x86_64>(_options, state, *this);
3196 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3197 }
3198 if ( _hasExternalRelocations ) {
3199 _externalRelocsAtom = new ExternalRelocationsAtom<x86_64>(_options, state, *this);
3200 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3201 }
3202 if ( _hasSymbolTable ) {
3203 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<x86_64>(_options, state, *this);
3204 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3205 _symbolTableAtom = new SymbolTableAtom<x86_64>(_options, state, *this);
3206 symbolTableSection = state.addAtom(*_symbolTableAtom);
3207 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3208 stringPoolSection = state.addAtom(*_stringPoolAtom);
3209 }
3210 break;
3211 #endif
3212 #if SUPPORT_ARCH_arm_any
3213 case CPU_TYPE_ARM:
3214 if ( _hasLocalRelocations ) {
3215 _localRelocsAtom = new LocalRelocationsAtom<arm>(_options, state, *this);
3216 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3217 }
3218 if ( _hasExternalRelocations ) {
3219 _externalRelocsAtom = new ExternalRelocationsAtom<arm>(_options, state, *this);
3220 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3221 }
3222 if ( _hasSymbolTable ) {
3223 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<arm>(_options, state, *this);
3224 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3225 _symbolTableAtom = new SymbolTableAtom<arm>(_options, state, *this);
3226 symbolTableSection = state.addAtom(*_symbolTableAtom);
3227 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3228 stringPoolSection = state.addAtom(*_stringPoolAtom);
3229 }
3230 break;
3231 #endif
3232 #if SUPPORT_ARCH_arm64
3233 case CPU_TYPE_ARM64:
3234 if ( _hasLocalRelocations ) {
3235 _localRelocsAtom = new LocalRelocationsAtom<arm64>(_options, state, *this);
3236 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3237 }
3238 if ( _hasExternalRelocations ) {
3239 _externalRelocsAtom = new ExternalRelocationsAtom<arm64>(_options, state, *this);
3240 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3241 }
3242 if ( _hasSymbolTable ) {
3243 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<arm64>(_options, state, *this);
3244 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3245 _symbolTableAtom = new SymbolTableAtom<arm64>(_options, state, *this);
3246 symbolTableSection = state.addAtom(*_symbolTableAtom);
3247 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3248 stringPoolSection = state.addAtom(*_stringPoolAtom);
3249 }
3250 break;
3251 #endif
3252 default:
3253 throw "-preload not supported";
3254 }
3255
3256 }
3257
3258
3259 void OutputFile::addLinkEdit(ld::Internal& state)
3260 {
3261 // for historical reasons, -preload orders LINKEDIT content differently
3262 if ( _options.outputKind() == Options::kPreload )
3263 return addPreloadLinkEdit(state);
3264
3265 switch ( _options.architecture() ) {
3266 #if SUPPORT_ARCH_i386
3267 case CPU_TYPE_I386:
3268 if ( _hasSectionRelocations ) {
3269 _sectionsRelocationsAtom = new SectionRelocationsAtom<x86>(_options, state, *this);
3270 sectionRelocationsSection = state.addAtom(*_sectionsRelocationsAtom);
3271 }
3272 if ( _hasDyldInfo ) {
3273 _rebasingInfoAtom = new RebaseInfoAtom<x86>(_options, state, *this);
3274 rebaseSection = state.addAtom(*_rebasingInfoAtom);
3275
3276 _bindingInfoAtom = new BindingInfoAtom<x86>(_options, state, *this);
3277 bindingSection = state.addAtom(*_bindingInfoAtom);
3278
3279 _weakBindingInfoAtom = new WeakBindingInfoAtom<x86>(_options, state, *this);
3280 weakBindingSection = state.addAtom(*_weakBindingInfoAtom);
3281
3282 _lazyBindingInfoAtom = new LazyBindingInfoAtom<x86>(_options, state, *this);
3283 lazyBindingSection = state.addAtom(*_lazyBindingInfoAtom);
3284
3285 _exportInfoAtom = new ExportInfoAtom<x86>(_options, state, *this);
3286 exportSection = state.addAtom(*_exportInfoAtom);
3287 }
3288 if ( _hasLocalRelocations ) {
3289 _localRelocsAtom = new LocalRelocationsAtom<x86>(_options, state, *this);
3290 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3291 }
3292 if ( _hasSplitSegInfo ) {
3293 if ( _options.sharedRegionEncodingV2() )
3294 _splitSegInfoAtom = new SplitSegInfoV2Atom<x86>(_options, state, *this);
3295 else
3296 _splitSegInfoAtom = new SplitSegInfoV1Atom<x86>(_options, state, *this);
3297 splitSegInfoSection = state.addAtom(*_splitSegInfoAtom);
3298 }
3299 if ( _hasFunctionStartsInfo ) {
3300 _functionStartsAtom = new FunctionStartsAtom<x86>(_options, state, *this);
3301 functionStartsSection = state.addAtom(*_functionStartsAtom);
3302 }
3303 if ( _hasDataInCodeInfo ) {
3304 _dataInCodeAtom = new DataInCodeAtom<x86>(_options, state, *this);
3305 dataInCodeSection = state.addAtom(*_dataInCodeAtom);
3306 }
3307 if ( _hasOptimizationHints ) {
3308 _optimizationHintsAtom = new OptimizationHintsAtom<x86>(_options, state, *this);
3309 optimizationHintsSection = state.addAtom(*_optimizationHintsAtom);
3310 }
3311 if ( _hasSymbolTable ) {
3312 _symbolTableAtom = new SymbolTableAtom<x86>(_options, state, *this);
3313 symbolTableSection = state.addAtom(*_symbolTableAtom);
3314 }
3315 if ( _hasExternalRelocations ) {
3316 _externalRelocsAtom = new ExternalRelocationsAtom<x86>(_options, state, *this);
3317 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3318 }
3319 if ( _hasSymbolTable ) {
3320 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<x86>(_options, state, *this);
3321 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3322 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3323 stringPoolSection = state.addAtom(*_stringPoolAtom);
3324 }
3325 break;
3326 #endif
3327 #if SUPPORT_ARCH_x86_64
3328 case CPU_TYPE_X86_64:
3329 if ( _hasSectionRelocations ) {
3330 _sectionsRelocationsAtom = new SectionRelocationsAtom<x86_64>(_options, state, *this);
3331 sectionRelocationsSection = state.addAtom(*_sectionsRelocationsAtom);
3332 }
3333 if ( _hasDyldInfo ) {
3334 _rebasingInfoAtom = new RebaseInfoAtom<x86_64>(_options, state, *this);
3335 rebaseSection = state.addAtom(*_rebasingInfoAtom);
3336
3337 _bindingInfoAtom = new BindingInfoAtom<x86_64>(_options, state, *this);
3338 bindingSection = state.addAtom(*_bindingInfoAtom);
3339
3340 _weakBindingInfoAtom = new WeakBindingInfoAtom<x86_64>(_options, state, *this);
3341 weakBindingSection = state.addAtom(*_weakBindingInfoAtom);
3342
3343 _lazyBindingInfoAtom = new LazyBindingInfoAtom<x86_64>(_options, state, *this);
3344 lazyBindingSection = state.addAtom(*_lazyBindingInfoAtom);
3345
3346 _exportInfoAtom = new ExportInfoAtom<x86_64>(_options, state, *this);
3347 exportSection = state.addAtom(*_exportInfoAtom);
3348 }
3349 if ( _hasLocalRelocations ) {
3350 _localRelocsAtom = new LocalRelocationsAtom<x86_64>(_options, state, *this);
3351 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3352 }
3353 if ( _hasSplitSegInfo ) {
3354 if ( _options.sharedRegionEncodingV2() )
3355 _splitSegInfoAtom = new SplitSegInfoV2Atom<x86_64>(_options, state, *this);
3356 else
3357 _splitSegInfoAtom = new SplitSegInfoV1Atom<x86_64>(_options, state, *this);
3358 splitSegInfoSection = state.addAtom(*_splitSegInfoAtom);
3359 }
3360 if ( _hasFunctionStartsInfo ) {
3361 _functionStartsAtom = new FunctionStartsAtom<x86_64>(_options, state, *this);
3362 functionStartsSection = state.addAtom(*_functionStartsAtom);
3363 }
3364 if ( _hasDataInCodeInfo ) {
3365 _dataInCodeAtom = new DataInCodeAtom<x86_64>(_options, state, *this);
3366 dataInCodeSection = state.addAtom(*_dataInCodeAtom);
3367 }
3368 if ( _hasOptimizationHints ) {
3369 _optimizationHintsAtom = new OptimizationHintsAtom<x86_64>(_options, state, *this);
3370 optimizationHintsSection = state.addAtom(*_optimizationHintsAtom);
3371 }
3372 if ( _hasSymbolTable ) {
3373 _symbolTableAtom = new SymbolTableAtom<x86_64>(_options, state, *this);
3374 symbolTableSection = state.addAtom(*_symbolTableAtom);
3375 }
3376 if ( _hasExternalRelocations ) {
3377 _externalRelocsAtom = new ExternalRelocationsAtom<x86_64>(_options, state, *this);
3378 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3379 }
3380 if ( _hasSymbolTable ) {
3381 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<x86_64>(_options, state, *this);
3382 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3383 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 8);
3384 stringPoolSection = state.addAtom(*_stringPoolAtom);
3385 }
3386 break;
3387 #endif
3388 #if SUPPORT_ARCH_arm_any
3389 case CPU_TYPE_ARM:
3390 if ( _hasSectionRelocations ) {
3391 _sectionsRelocationsAtom = new SectionRelocationsAtom<arm>(_options, state, *this);
3392 sectionRelocationsSection = state.addAtom(*_sectionsRelocationsAtom);
3393 }
3394 if ( _hasDyldInfo ) {
3395 _rebasingInfoAtom = new RebaseInfoAtom<arm>(_options, state, *this);
3396 rebaseSection = state.addAtom(*_rebasingInfoAtom);
3397
3398 _bindingInfoAtom = new BindingInfoAtom<arm>(_options, state, *this);
3399 bindingSection = state.addAtom(*_bindingInfoAtom);
3400
3401 _weakBindingInfoAtom = new WeakBindingInfoAtom<arm>(_options, state, *this);
3402 weakBindingSection = state.addAtom(*_weakBindingInfoAtom);
3403
3404 _lazyBindingInfoAtom = new LazyBindingInfoAtom<arm>(_options, state, *this);
3405 lazyBindingSection = state.addAtom(*_lazyBindingInfoAtom);
3406
3407 _exportInfoAtom = new ExportInfoAtom<arm>(_options, state, *this);
3408 exportSection = state.addAtom(*_exportInfoAtom);
3409 }
3410 if ( _hasLocalRelocations ) {
3411 _localRelocsAtom = new LocalRelocationsAtom<arm>(_options, state, *this);
3412 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3413 }
3414 if ( _hasSplitSegInfo ) {
3415 if ( _options.sharedRegionEncodingV2() )
3416 _splitSegInfoAtom = new SplitSegInfoV2Atom<arm>(_options, state, *this);
3417 else
3418 _splitSegInfoAtom = new SplitSegInfoV1Atom<arm>(_options, state, *this);
3419 splitSegInfoSection = state.addAtom(*_splitSegInfoAtom);
3420 }
3421 if ( _hasFunctionStartsInfo ) {
3422 _functionStartsAtom = new FunctionStartsAtom<arm>(_options, state, *this);
3423 functionStartsSection = state.addAtom(*_functionStartsAtom);
3424 }
3425 if ( _hasDataInCodeInfo ) {
3426 _dataInCodeAtom = new DataInCodeAtom<arm>(_options, state, *this);
3427 dataInCodeSection = state.addAtom(*_dataInCodeAtom);
3428 }
3429 if ( _hasOptimizationHints ) {
3430 _optimizationHintsAtom = new OptimizationHintsAtom<arm>(_options, state, *this);
3431 optimizationHintsSection = state.addAtom(*_optimizationHintsAtom);
3432 }
3433 if ( _hasSymbolTable ) {
3434 _symbolTableAtom = new SymbolTableAtom<arm>(_options, state, *this);
3435 symbolTableSection = state.addAtom(*_symbolTableAtom);
3436 }
3437 if ( _hasExternalRelocations ) {
3438 _externalRelocsAtom = new ExternalRelocationsAtom<arm>(_options, state, *this);
3439 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3440 }
3441 if ( _hasSymbolTable ) {
3442 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<arm>(_options, state, *this);
3443 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3444 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3445 stringPoolSection = state.addAtom(*_stringPoolAtom);
3446 }
3447 break;
3448 #endif
3449 #if SUPPORT_ARCH_arm64
3450 case CPU_TYPE_ARM64:
3451 if ( _hasSectionRelocations ) {
3452 _sectionsRelocationsAtom = new SectionRelocationsAtom<arm64>(_options, state, *this);
3453 sectionRelocationsSection = state.addAtom(*_sectionsRelocationsAtom);
3454 }
3455 if ( _hasDyldInfo ) {
3456 _rebasingInfoAtom = new RebaseInfoAtom<arm64>(_options, state, *this);
3457 rebaseSection = state.addAtom(*_rebasingInfoAtom);
3458
3459 _bindingInfoAtom = new BindingInfoAtom<arm64>(_options, state, *this);
3460 bindingSection = state.addAtom(*_bindingInfoAtom);
3461
3462 _weakBindingInfoAtom = new WeakBindingInfoAtom<arm64>(_options, state, *this);
3463 weakBindingSection = state.addAtom(*_weakBindingInfoAtom);
3464
3465 _lazyBindingInfoAtom = new LazyBindingInfoAtom<arm64>(_options, state, *this);
3466 lazyBindingSection = state.addAtom(*_lazyBindingInfoAtom);
3467
3468 _exportInfoAtom = new ExportInfoAtom<arm64>(_options, state, *this);
3469 exportSection = state.addAtom(*_exportInfoAtom);
3470 }
3471 if ( _hasLocalRelocations ) {
3472 _localRelocsAtom = new LocalRelocationsAtom<arm64>(_options, state, *this);
3473 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3474 }
3475 if ( _hasSplitSegInfo ) {
3476 if ( _options.sharedRegionEncodingV2() )
3477 _splitSegInfoAtom = new SplitSegInfoV2Atom<arm64>(_options, state, *this);
3478 else
3479 _splitSegInfoAtom = new SplitSegInfoV1Atom<arm64>(_options, state, *this);
3480 splitSegInfoSection = state.addAtom(*_splitSegInfoAtom);
3481 }
3482 if ( _hasFunctionStartsInfo ) {
3483 _functionStartsAtom = new FunctionStartsAtom<arm64>(_options, state, *this);
3484 functionStartsSection = state.addAtom(*_functionStartsAtom);
3485 }
3486 if ( _hasDataInCodeInfo ) {
3487 _dataInCodeAtom = new DataInCodeAtom<arm64>(_options, state, *this);
3488 dataInCodeSection = state.addAtom(*_dataInCodeAtom);
3489 }
3490 if ( _hasOptimizationHints ) {
3491 _optimizationHintsAtom = new OptimizationHintsAtom<arm64>(_options, state, *this);
3492 optimizationHintsSection = state.addAtom(*_optimizationHintsAtom);
3493 }
3494 if ( _hasSymbolTable ) {
3495 _symbolTableAtom = new SymbolTableAtom<arm64>(_options, state, *this);
3496 symbolTableSection = state.addAtom(*_symbolTableAtom);
3497 }
3498 if ( _hasExternalRelocations ) {
3499 _externalRelocsAtom = new ExternalRelocationsAtom<arm64>(_options, state, *this);
3500 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3501 }
3502 if ( _hasSymbolTable ) {
3503 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<arm64>(_options, state, *this);
3504 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3505 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3506 stringPoolSection = state.addAtom(*_stringPoolAtom);
3507 }
3508 break;
3509 #endif
3510 default:
3511 throw "unknown architecture";
3512 }
3513 }
3514
3515 void OutputFile::addLoadCommands(ld::Internal& state)
3516 {
3517 switch ( _options.architecture() ) {
3518 #if SUPPORT_ARCH_x86_64
3519 case CPU_TYPE_X86_64:
3520 _headersAndLoadCommandAtom = new HeaderAndLoadCommandsAtom<x86_64>(_options, state, *this);
3521 headerAndLoadCommandsSection = state.addAtom(*_headersAndLoadCommandAtom);
3522 break;
3523 #endif
3524 #if SUPPORT_ARCH_arm_any
3525 case CPU_TYPE_ARM:
3526 _headersAndLoadCommandAtom = new HeaderAndLoadCommandsAtom<arm>(_options, state, *this);
3527 headerAndLoadCommandsSection = state.addAtom(*_headersAndLoadCommandAtom);
3528 break;
3529 #endif
3530 #if SUPPORT_ARCH_arm64
3531 case CPU_TYPE_ARM64:
3532 _headersAndLoadCommandAtom = new HeaderAndLoadCommandsAtom<arm64>(_options, state, *this);
3533 headerAndLoadCommandsSection = state.addAtom(*_headersAndLoadCommandAtom);
3534 break;
3535 #endif
3536 #if SUPPORT_ARCH_i386
3537 case CPU_TYPE_I386:
3538 _headersAndLoadCommandAtom = new HeaderAndLoadCommandsAtom<x86>(_options, state, *this);
3539 headerAndLoadCommandsSection = state.addAtom(*_headersAndLoadCommandAtom);
3540 break;
3541 #endif
3542 default:
3543 throw "unknown architecture";
3544 }
3545 }
3546
3547 uint32_t OutputFile::dylibCount()
3548 {
3549 return _dylibsToLoad.size();
3550 }
3551
3552 const ld::dylib::File* OutputFile::dylibByOrdinal(unsigned int ordinal)
3553 {
3554 assert( ordinal > 0 );
3555 assert( ordinal <= _dylibsToLoad.size() );
3556 return _dylibsToLoad[ordinal-1];
3557 }
3558
3559 bool OutputFile::hasOrdinalForInstallPath(const char* path, int* ordinal)
3560 {
3561 for (std::map<const ld::dylib::File*, int>::const_iterator it = _dylibToOrdinal.begin(); it != _dylibToOrdinal.end(); ++it) {
3562 const char* installPath = it->first->installPath();
3563 if ( (installPath != NULL) && (strcmp(path, installPath) == 0) ) {
3564 *ordinal = it->second;
3565 return true;
3566 }
3567 }
3568 return false;
3569 }
3570
3571 uint32_t OutputFile::dylibToOrdinal(const ld::dylib::File* dylib)
3572 {
3573 return _dylibToOrdinal[dylib];
3574 }
3575
3576
3577 void OutputFile::buildDylibOrdinalMapping(ld::Internal& state)
3578 {
3579 // count non-public re-exported dylibs
3580 unsigned int nonPublicReExportCount = 0;
3581 for (std::vector<ld::dylib::File*>::iterator it = state.dylibs.begin(); it != state.dylibs.end(); ++it) {
3582 ld::dylib::File* aDylib = *it;
3583 if ( aDylib->willBeReExported() && ! aDylib->hasPublicInstallName() )
3584 ++nonPublicReExportCount;
3585 }
3586
3587 // look at each dylib supplied in state
3588 bool hasReExports = false;
3589 bool haveLazyDylibs = false;
3590 for (std::vector<ld::dylib::File*>::iterator it = state.dylibs.begin(); it != state.dylibs.end(); ++it) {
3591 ld::dylib::File* aDylib = *it;
3592 int ordinal;
3593 if ( aDylib == state.bundleLoader ) {
3594 _dylibToOrdinal[aDylib] = BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE;
3595 }
3596 else if ( this->hasOrdinalForInstallPath(aDylib->installPath(), &ordinal) ) {
3597 // already have a dylib with that install path, map all uses to that ordinal
3598 _dylibToOrdinal[aDylib] = ordinal;
3599 }
3600 else if ( aDylib->willBeLazyLoadedDylib() ) {
3601 // all lazy dylib need to be at end of ordinals
3602 haveLazyDylibs = true;
3603 }
3604 else if ( aDylib->willBeReExported() && ! aDylib->hasPublicInstallName() && (nonPublicReExportCount >= 2) ) {
3605 _dylibsToLoad.push_back(aDylib);
3606 _dylibToOrdinal[aDylib] = BIND_SPECIAL_DYLIB_SELF;
3607 }
3608 else {
3609 // first time this install path seen, create new ordinal
3610 _dylibsToLoad.push_back(aDylib);
3611 _dylibToOrdinal[aDylib] = _dylibsToLoad.size();
3612 }
3613 if ( aDylib->explicitlyLinked() && aDylib->willBeReExported() )
3614 hasReExports = true;
3615 }
3616 if ( haveLazyDylibs ) {
3617 // second pass to determine ordinals for lazy loaded dylibs
3618 for (std::vector<ld::dylib::File*>::iterator it = state.dylibs.begin(); it != state.dylibs.end(); ++it) {
3619 ld::dylib::File* aDylib = *it;
3620 if ( aDylib->willBeLazyLoadedDylib() ) {
3621 int ordinal;
3622 if ( this->hasOrdinalForInstallPath(aDylib->installPath(), &ordinal) ) {
3623 // already have a dylib with that install path, map all uses to that ordinal
3624 _dylibToOrdinal[aDylib] = ordinal;
3625 }
3626 else {
3627 // first time this install path seen, create new ordinal
3628 _dylibsToLoad.push_back(aDylib);
3629 _dylibToOrdinal[aDylib] = _dylibsToLoad.size();
3630 }
3631 }
3632 }
3633 }
3634 _noReExportedDylibs = !hasReExports;
3635 //fprintf(stderr, "dylibs:\n");
3636 //for (std::map<const ld::dylib::File*, int>::const_iterator it = _dylibToOrdinal.begin(); it != _dylibToOrdinal.end(); ++it) {
3637 // fprintf(stderr, " %p ord=%u, install_name=%s\n",it->first, it->second, it->first->installPath());
3638 //}
3639 }
3640
3641 uint32_t OutputFile::lazyBindingInfoOffsetForLazyPointerAddress(uint64_t lpAddress)
3642 {
3643 return _lazyPointerAddressToInfoOffset[lpAddress];
3644 }
3645
3646 void OutputFile::setLazyBindingInfoOffset(uint64_t lpAddress, uint32_t lpInfoOffset)
3647 {
3648 _lazyPointerAddressToInfoOffset[lpAddress] = lpInfoOffset;
3649 }
3650
3651 int OutputFile::compressedOrdinalForAtom(const ld::Atom* target)
3652 {
3653 // flat namespace images use zero for all ordinals
3654 if ( _options.nameSpace() != Options::kTwoLevelNameSpace )
3655 return BIND_SPECIAL_DYLIB_FLAT_LOOKUP;
3656
3657 // handle -interposable
3658 if ( target->definition() == ld::Atom::definitionRegular )
3659 return BIND_SPECIAL_DYLIB_SELF;
3660
3661 // regular ordinal
3662 const ld::dylib::File* dylib = dynamic_cast<const ld::dylib::File*>(target->file());
3663 if ( dylib != NULL ) {
3664 std::map<const ld::dylib::File*, int>::iterator pos = _dylibToOrdinal.find(dylib);
3665 if ( pos != _dylibToOrdinal.end() )
3666 return pos->second;
3667 assert(0 && "dylib not assigned ordinal");
3668 }
3669
3670 // handle undefined dynamic_lookup
3671 if ( _options.undefinedTreatment() == Options::kUndefinedDynamicLookup )
3672 return BIND_SPECIAL_DYLIB_FLAT_LOOKUP;
3673
3674 // handle -U _foo
3675 if ( _options.allowedUndefined(target->name()) )
3676 return BIND_SPECIAL_DYLIB_FLAT_LOOKUP;
3677
3678 throw "can't find ordinal for imported symbol";
3679 }
3680
3681
3682 bool OutputFile::isPcRelStore(ld::Fixup::Kind kind)
3683 {
3684 switch ( kind ) {
3685 case ld::Fixup::kindStoreX86BranchPCRel8:
3686 case ld::Fixup::kindStoreX86BranchPCRel32:
3687 case ld::Fixup::kindStoreX86PCRel8:
3688 case ld::Fixup::kindStoreX86PCRel16:
3689 case ld::Fixup::kindStoreX86PCRel32:
3690 case ld::Fixup::kindStoreX86PCRel32_1:
3691 case ld::Fixup::kindStoreX86PCRel32_2:
3692 case ld::Fixup::kindStoreX86PCRel32_4:
3693 case ld::Fixup::kindStoreX86PCRel32GOTLoad:
3694 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA:
3695 case ld::Fixup::kindStoreX86PCRel32GOT:
3696 case ld::Fixup::kindStoreX86PCRel32TLVLoad:
3697 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA:
3698 case ld::Fixup::kindStoreARMBranch24:
3699 case ld::Fixup::kindStoreThumbBranch22:
3700 case ld::Fixup::kindStoreARMLoad12:
3701 case ld::Fixup::kindStoreTargetAddressX86PCRel32:
3702 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad:
3703 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA:
3704 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad:
3705 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA:
3706 case ld::Fixup::kindStoreTargetAddressARMBranch24:
3707 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
3708 case ld::Fixup::kindStoreTargetAddressARMLoad12:
3709 #if SUPPORT_ARCH_arm64
3710 case ld::Fixup::kindStoreARM64Page21:
3711 case ld::Fixup::kindStoreARM64PageOff12:
3712 case ld::Fixup::kindStoreARM64GOTLoadPage21:
3713 case ld::Fixup::kindStoreARM64GOTLoadPageOff12:
3714 case ld::Fixup::kindStoreARM64GOTLeaPage21:
3715 case ld::Fixup::kindStoreARM64GOTLeaPageOff12:
3716 case ld::Fixup::kindStoreARM64TLVPLoadPage21:
3717 case ld::Fixup::kindStoreARM64TLVPLoadPageOff12:
3718 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21:
3719 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPageOff12:
3720 case ld::Fixup::kindStoreARM64PCRelToGOT:
3721 case ld::Fixup::kindStoreTargetAddressARM64Page21:
3722 case ld::Fixup::kindStoreTargetAddressARM64PageOff12:
3723 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
3724 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12:
3725 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21:
3726 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12:
3727 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21:
3728 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12:
3729 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21:
3730 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12:
3731 #endif
3732 return true;
3733 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32:
3734 #if SUPPORT_ARCH_arm64
3735 case ld::Fixup::kindStoreTargetAddressARM64Branch26:
3736 #endif
3737 return (_options.outputKind() != Options::kKextBundle);
3738 default:
3739 break;
3740 }
3741 return false;
3742 }
3743
3744 bool OutputFile::isStore(ld::Fixup::Kind kind)
3745 {
3746 switch ( kind ) {
3747 case ld::Fixup::kindNone:
3748 case ld::Fixup::kindNoneFollowOn:
3749 case ld::Fixup::kindNoneGroupSubordinate:
3750 case ld::Fixup::kindNoneGroupSubordinateFDE:
3751 case ld::Fixup::kindNoneGroupSubordinateLSDA:
3752 case ld::Fixup::kindNoneGroupSubordinatePersonality:
3753 case ld::Fixup::kindSetTargetAddress:
3754 case ld::Fixup::kindSubtractTargetAddress:
3755 case ld::Fixup::kindAddAddend:
3756 case ld::Fixup::kindSubtractAddend:
3757 case ld::Fixup::kindSetTargetImageOffset:
3758 case ld::Fixup::kindSetTargetSectionOffset:
3759 return false;
3760 default:
3761 break;
3762 }
3763 return true;
3764 }
3765
3766
3767 bool OutputFile::setsTarget(ld::Fixup::Kind kind)
3768 {
3769 switch ( kind ) {
3770 case ld::Fixup::kindSetTargetAddress:
3771 case ld::Fixup::kindLazyTarget:
3772 case ld::Fixup::kindStoreTargetAddressLittleEndian32:
3773 case ld::Fixup::kindStoreTargetAddressLittleEndian64:
3774 case ld::Fixup::kindStoreTargetAddressBigEndian32:
3775 case ld::Fixup::kindStoreTargetAddressBigEndian64:
3776 case ld::Fixup::kindStoreTargetAddressX86PCRel32:
3777 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32:
3778 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad:
3779 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA:
3780 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad:
3781 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA:
3782 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad:
3783 case ld::Fixup::kindStoreTargetAddressARMBranch24:
3784 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
3785 case ld::Fixup::kindStoreTargetAddressARMLoad12:
3786 #if SUPPORT_ARCH_arm64
3787 case ld::Fixup::kindStoreTargetAddressARM64Branch26:
3788 case ld::Fixup::kindStoreTargetAddressARM64Page21:
3789 case ld::Fixup::kindStoreTargetAddressARM64PageOff12:
3790 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
3791 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12:
3792 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21:
3793 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12:
3794 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21:
3795 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12:
3796 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21:
3797 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12:
3798 #endif
3799 return true;
3800 case ld::Fixup::kindStoreX86DtraceCallSiteNop:
3801 case ld::Fixup::kindStoreX86DtraceIsEnableSiteClear:
3802 case ld::Fixup::kindStoreARMDtraceCallSiteNop:
3803 case ld::Fixup::kindStoreARMDtraceIsEnableSiteClear:
3804 case ld::Fixup::kindStoreARM64DtraceCallSiteNop:
3805 case ld::Fixup::kindStoreARM64DtraceIsEnableSiteClear:
3806 case ld::Fixup::kindStoreThumbDtraceCallSiteNop:
3807 case ld::Fixup::kindStoreThumbDtraceIsEnableSiteClear:
3808 return (_options.outputKind() == Options::kObjectFile);
3809 default:
3810 break;
3811 }
3812 return false;
3813 }
3814
3815 bool OutputFile::isPointerToTarget(ld::Fixup::Kind kind)
3816 {
3817 switch ( kind ) {
3818 case ld::Fixup::kindSetTargetAddress:
3819 case ld::Fixup::kindStoreTargetAddressLittleEndian32:
3820 case ld::Fixup::kindStoreTargetAddressLittleEndian64:
3821 case ld::Fixup::kindStoreTargetAddressBigEndian32:
3822 case ld::Fixup::kindStoreTargetAddressBigEndian64:
3823 case ld::Fixup::kindLazyTarget:
3824 return true;
3825 default:
3826 break;
3827 }
3828 return false;
3829 }
3830 bool OutputFile::isPointerFromTarget(ld::Fixup::Kind kind)
3831 {
3832 switch ( kind ) {
3833 case ld::Fixup::kindSubtractTargetAddress:
3834 return true;
3835 default:
3836 break;
3837 }
3838 return false;
3839 }
3840
3841
3842 uint64_t OutputFile::lookBackAddend(ld::Fixup::iterator fit)
3843 {
3844 uint64_t addend = 0;
3845 switch ( fit->clusterSize ) {
3846 case ld::Fixup::k1of1:
3847 case ld::Fixup::k1of2:
3848 case ld::Fixup::k2of2:
3849 break;
3850 case ld::Fixup::k2of3:
3851 --fit;
3852 switch ( fit->kind ) {
3853 case ld::Fixup::kindAddAddend:
3854 addend += fit->u.addend;
3855 break;
3856 case ld::Fixup::kindSubtractAddend:
3857 addend -= fit->u.addend;
3858 break;
3859 default:
3860 throw "unexpected fixup kind for binding";
3861 }
3862 break;
3863 case ld::Fixup::k1of3:
3864 ++fit;
3865 switch ( fit->kind ) {
3866 case ld::Fixup::kindAddAddend:
3867 addend += fit->u.addend;
3868 break;
3869 case ld::Fixup::kindSubtractAddend:
3870 addend -= fit->u.addend;
3871 break;
3872 default:
3873 throw "unexpected fixup kind for binding";
3874 }
3875 break;
3876 default:
3877 throw "unexpected fixup cluster size for binding";
3878 }
3879 return addend;
3880 }
3881
3882
3883 void OutputFile::generateLinkEditInfo(ld::Internal& state)
3884 {
3885 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
3886 ld::Internal::FinalSection* sect = *sit;
3887 // record end of last __TEXT section encrypted iPhoneOS apps.
3888 if ( _options.makeEncryptable() && (strcmp(sect->segmentName(), "__TEXT") == 0) && (strcmp(sect->sectionName(), "__oslogstring") != 0) ) {
3889 _encryptedTEXTendOffset = pageAlign(sect->fileOffset + sect->size);
3890 }
3891 bool objc1ClassRefSection = ( (sect->type() == ld::Section::typeCStringPointer)
3892 && (strcmp(sect->sectionName(), "__cls_refs") == 0)
3893 && (strcmp(sect->segmentName(), "__OBJC") == 0) );
3894 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
3895 const ld::Atom* atom = *ait;
3896
3897 // Record regular atoms that override a dylib's weak definitions
3898 if ( (atom->scope() == ld::Atom::scopeGlobal) && atom->overridesDylibsWeakDef() ) {
3899 if ( _options.makeCompressedDyldInfo() ) {
3900 uint8_t wtype = BIND_TYPE_OVERRIDE_OF_WEAKDEF_IN_DYLIB;
3901 bool nonWeakDef = (atom->combine() == ld::Atom::combineNever);
3902 _weakBindingInfo.push_back(BindingInfo(wtype, atom->name(), nonWeakDef, atom->finalAddress(), 0));
3903 }
3904 this->overridesWeakExternalSymbols = true;
3905 if ( _options.warnWeakExports() )
3906 warning("overrides weak external symbol: %s", atom->name());
3907 }
3908
3909 ld::Fixup* fixupWithTarget = NULL;
3910 ld::Fixup* fixupWithMinusTarget = NULL;
3911 ld::Fixup* fixupWithStore = NULL;
3912 ld::Fixup* fixupWithAddend = NULL;
3913 const ld::Atom* target = NULL;
3914 const ld::Atom* minusTarget = NULL;
3915 uint64_t targetAddend = 0;
3916 uint64_t minusTargetAddend = 0;
3917 for (ld::Fixup::iterator fit = atom->fixupsBegin(); fit != atom->fixupsEnd(); ++fit) {
3918 if ( fit->firstInCluster() ) {
3919 fixupWithTarget = NULL;
3920 fixupWithMinusTarget = NULL;
3921 fixupWithStore = NULL;
3922 target = NULL;
3923 minusTarget = NULL;
3924 targetAddend = 0;
3925 minusTargetAddend = 0;
3926 }
3927 if ( this->setsTarget(fit->kind) ) {
3928 switch ( fit->binding ) {
3929 case ld::Fixup::bindingNone:
3930 case ld::Fixup::bindingByNameUnbound:
3931 break;
3932 case ld::Fixup::bindingByContentBound:
3933 case ld::Fixup::bindingDirectlyBound:
3934 fixupWithTarget = fit;
3935 target = fit->u.target;
3936 break;
3937 case ld::Fixup::bindingsIndirectlyBound:
3938 fixupWithTarget = fit;
3939 target = state.indirectBindingTable[fit->u.bindingIndex];
3940 break;
3941 }
3942 assert(target != NULL);
3943 }
3944 switch ( fit->kind ) {
3945 case ld::Fixup::kindAddAddend:
3946 targetAddend = fit->u.addend;
3947 fixupWithAddend = fit;
3948 break;
3949 case ld::Fixup::kindSubtractAddend:
3950 minusTargetAddend = fit->u.addend;
3951 fixupWithAddend = fit;
3952 break;
3953 case ld::Fixup::kindSubtractTargetAddress:
3954 switch ( fit->binding ) {
3955 case ld::Fixup::bindingNone:
3956 case ld::Fixup::bindingByNameUnbound:
3957 break;
3958 case ld::Fixup::bindingByContentBound:
3959 case ld::Fixup::bindingDirectlyBound:
3960 fixupWithMinusTarget = fit;
3961 minusTarget = fit->u.target;
3962 break;
3963 case ld::Fixup::bindingsIndirectlyBound:
3964 fixupWithMinusTarget = fit;
3965 minusTarget = state.indirectBindingTable[fit->u.bindingIndex];
3966 break;
3967 }
3968 assert(minusTarget != NULL);
3969 break;
3970 case ld::Fixup::kindDataInCodeStartData:
3971 case ld::Fixup::kindDataInCodeStartJT8:
3972 case ld::Fixup::kindDataInCodeStartJT16:
3973 case ld::Fixup::kindDataInCodeStartJT32:
3974 case ld::Fixup::kindDataInCodeStartJTA32:
3975 case ld::Fixup::kindDataInCodeEnd:
3976 hasDataInCode = true;
3977 break;
3978 default:
3979 break;
3980 }
3981 if ( this->isStore(fit->kind) ) {
3982 fixupWithStore = fit;
3983 }
3984 if ( fit->lastInCluster() ) {
3985 if ( (fixupWithStore != NULL) && (target != NULL) ) {
3986 if ( _options.outputKind() == Options::kObjectFile ) {
3987 this->addSectionRelocs(state, sect, atom, fixupWithTarget, fixupWithMinusTarget, fixupWithAddend, fixupWithStore,
3988 target, minusTarget, targetAddend, minusTargetAddend);
3989 }
3990 else {
3991 if ( _options.makeCompressedDyldInfo() ) {
3992 this->addDyldInfo(state, sect, atom, fixupWithTarget, fixupWithMinusTarget, fixupWithStore,
3993 target, minusTarget, targetAddend, minusTargetAddend);
3994 }
3995 else {
3996 this->addClassicRelocs(state, sect, atom, fixupWithTarget, fixupWithMinusTarget, fixupWithStore,
3997 target, minusTarget, targetAddend, minusTargetAddend);
3998 }
3999 }
4000 }
4001 else if ( objc1ClassRefSection && (target != NULL) && (fixupWithStore == NULL) ) {
4002 // check for class refs to lazy loaded dylibs
4003 const ld::dylib::File* dylib = dynamic_cast<const ld::dylib::File*>(target->file());
4004 if ( (dylib != NULL) && dylib->willBeLazyLoadedDylib() )
4005 throwf("illegal class reference to %s in lazy loaded dylib %s", target->name(), dylib->path());
4006 }
4007 }
4008 }
4009 }
4010 }
4011 }
4012
4013
4014 void OutputFile::noteTextReloc(const ld::Atom* atom, const ld::Atom* target)
4015 {
4016 if ( (atom->contentType() == ld::Atom::typeStub) || (atom->contentType() == ld::Atom::typeStubHelper) ) {
4017 // silently let stubs (synthesized by linker) use text relocs
4018 }
4019 else if ( _options.allowTextRelocs() ) {
4020 if ( _options.warnAboutTextRelocs() )
4021 warning("text reloc in %s to %s", atom->name(), target->name());
4022 }
4023 else if ( _options.positionIndependentExecutable() && (_options.outputKind() == Options::kDynamicExecutable)
4024 && ((_options.iOSVersionMin() >= ld::iOS_4_3) || (_options.macosxVersionMin() >= ld::mac10_7)) ) {
4025 if ( ! this->pieDisabled ) {
4026 switch ( _options.architecture()) {
4027 #if SUPPORT_ARCH_arm64
4028 case CPU_TYPE_ARM64:
4029 #endif
4030 #if SUPPORT_ARCH_arm64
4031 {
4032 const char* demangledName = strdup(_options.demangleSymbol(atom->name()));
4033 throwf("Absolute addressing not allowed in arm64 code but used in '%s' referencing '%s'", demangledName, _options.demangleSymbol(target->name()));
4034 }
4035 #endif
4036 default:
4037 warning("PIE disabled. Absolute addressing (perhaps -mdynamic-no-pic) not allowed in code signed PIE, "
4038 "but used in %s from %s. "
4039 "To fix this warning, don't compile with -mdynamic-no-pic or link with -Wl,-no_pie",
4040 atom->name(), atom->file()->path());
4041 }
4042 }
4043 this->pieDisabled = true;
4044 }
4045 else if ( (target->scope() == ld::Atom::scopeGlobal) && (target->combine() == ld::Atom::combineByName) ) {
4046 throwf("illegal text-relocoation (direct reference) to (global,weak) %s in %s from %s in %s", target->name(), target->file()->path(), atom->name(), atom->file()->path());
4047 }
4048 else {
4049 if ( (target->file() != NULL) && (atom->file() != NULL) )
4050 throwf("illegal text-relocation to '%s' in %s from '%s' in %s", target->name(), target->file()->path(), atom->name(), atom->file()->path());
4051 else
4052 throwf("illegal text reloc in '%s' to '%s'", atom->name(), target->name());
4053 }
4054 }
4055
4056 void OutputFile::addDyldInfo(ld::Internal& state, ld::Internal::FinalSection* sect, const ld::Atom* atom,
4057 ld::Fixup* fixupWithTarget, ld::Fixup* fixupWithMinusTarget, ld::Fixup* fixupWithStore,
4058 const ld::Atom* target, const ld::Atom* minusTarget,
4059 uint64_t targetAddend, uint64_t minusTargetAddend)
4060 {
4061 if ( sect->isSectionHidden() )
4062 return;
4063
4064 // no need to rebase or bind PCRel stores
4065 if ( this->isPcRelStore(fixupWithStore->kind) ) {
4066 // as long as target is in same linkage unit
4067 if ( (target == NULL) || (target->definition() != ld::Atom::definitionProxy) ) {
4068 // make sure target is not global and weak
4069 if ( (target->scope() == ld::Atom::scopeGlobal) && (target->combine() == ld::Atom::combineByName) && (target->definition() == ld::Atom::definitionRegular)) {
4070 if ( (atom->section().type() == ld::Section::typeCFI)
4071 || (atom->section().type() == ld::Section::typeDtraceDOF)
4072 || (atom->section().type() == ld::Section::typeUnwindInfo) ) {
4073 // ok for __eh_frame and __uwind_info to use pointer diffs to global weak symbols
4074 return;
4075 }
4076 // <rdar://problem/13700961> spurious warning when weak function has reference to itself
4077 if ( fixupWithTarget->binding == ld::Fixup::bindingDirectlyBound ) {
4078 // ok to ignore pc-rel references within a weak function to itself
4079 return;
4080 }
4081 // Have direct reference to weak-global. This should be an indrect reference
4082 const char* demangledName = strdup(_options.demangleSymbol(atom->name()));
4083 warning("direct access in function '%s' from file '%s' to global weak symbol '%s' from file '%s' means the weak symbol cannot be overridden at runtime. "
4084 "This was likely caused by different translation units being compiled with different visibility settings.",
4085 demangledName, atom->file()->path(), _options.demangleSymbol(target->name()), target->file()->path());
4086 }
4087 return;
4088 }
4089 }
4090
4091 // no need to rebase or bind PIC internal pointer diff
4092 if ( minusTarget != NULL ) {
4093 // with pointer diffs, both need to be in same linkage unit
4094 assert(minusTarget->definition() != ld::Atom::definitionProxy);
4095 assert(target != NULL);
4096 assert(target->definition() != ld::Atom::definitionProxy);
4097 if ( target == minusTarget ) {
4098 // This is a compile time constant and could have been optimized away by compiler
4099 return;
4100 }
4101
4102 // check if target of pointer-diff is global and weak
4103 if ( (target->scope() == ld::Atom::scopeGlobal) && (target->combine() == ld::Atom::combineByName) && (target->definition() == ld::Atom::definitionRegular) ) {
4104 if ( (atom->section().type() == ld::Section::typeCFI)
4105 || (atom->section().type() == ld::Section::typeDtraceDOF)
4106 || (atom->section().type() == ld::Section::typeUnwindInfo) ) {
4107 // ok for __eh_frame and __uwind_info to use pointer diffs to global weak symbols
4108 return;
4109 }
4110 // Have direct reference to weak-global. This should be an indrect reference
4111 const char* demangledName = strdup(_options.demangleSymbol(atom->name()));
4112 warning("direct access in function '%s' from file '%s' to global weak symbol '%s' from file '%s' means the weak symbol cannot be overridden at runtime. "
4113 "This was likely caused by different translation units being compiled with different visibility settings.",
4114 demangledName, atom->file()->path(), _options.demangleSymbol(target->name()), target->file()->path());
4115 }
4116 return;
4117 }
4118
4119 // no need to rebase or bind an atom's references to itself if the output is not slidable
4120 if ( (atom == target) && !_options.outputSlidable() )
4121 return;
4122
4123 // cluster has no target, so needs no rebasing or binding
4124 if ( target == NULL )
4125 return;
4126
4127 bool inReadOnlySeg = ((_options.initialSegProtection(sect->segmentName()) & VM_PROT_WRITE) == 0);
4128 bool needsRebase = false;
4129 bool needsBinding = false;
4130 bool needsLazyBinding = false;
4131 bool needsWeakBinding = false;
4132
4133 uint8_t rebaseType = REBASE_TYPE_POINTER;
4134 uint8_t type = BIND_TYPE_POINTER;
4135 const ld::dylib::File* dylib = dynamic_cast<const ld::dylib::File*>(target->file());
4136 bool weak_import = (fixupWithTarget->weakImport || ((dylib != NULL) && dylib->forcedWeakLinked()));
4137 uint64_t address = atom->finalAddress() + fixupWithTarget->offsetInAtom;
4138 uint64_t addend = targetAddend - minusTargetAddend;
4139
4140 // special case lazy pointers
4141 if ( fixupWithTarget->kind == ld::Fixup::kindLazyTarget ) {
4142 assert(fixupWithTarget->u.target == target);
4143 assert(addend == 0);
4144 // lazy dylib lazy pointers do not have any dyld info
4145 if ( atom->section().type() == ld::Section::typeLazyDylibPointer )
4146 return;
4147 // lazy binding to weak definitions are done differently
4148 // they are directly bound to target, then have a weak bind in case of a collision
4149 if ( target->combine() == ld::Atom::combineByName ) {
4150 if ( target->definition() == ld::Atom::definitionProxy ) {
4151 // weak def exported from another dylib
4152 // must non-lazy bind to it plus have weak binding info in case of collision
4153 needsBinding = true;
4154 needsWeakBinding = true;
4155 }
4156 else {
4157 // weak def in this linkage unit.
4158 // just rebase, plus have weak binding info in case of collision
4159 // this will be done by other cluster on lazy pointer atom
4160 }
4161 }
4162 else if ( target->contentType() == ld::Atom::typeResolver ) {
4163 // <rdar://problem/8553647> Hidden resolver functions should not have lazy binding info
4164 // <rdar://problem/12629331> Resolver function run before initializers when overriding the dyld shared cache
4165 // The lazy pointers used by stubs used when non-lazy binding to a resolver are not normal lazy pointers
4166 // and should not be in lazy binding info.
4167 needsLazyBinding = false;
4168 }
4169 else {
4170 // normal case of a pointer to non-weak-def symbol, so can lazily bind
4171 needsLazyBinding = true;
4172 }
4173 }
4174 else {
4175 // everything except lazy pointers
4176 switch ( target->definition() ) {
4177 case ld::Atom::definitionProxy:
4178 if ( (dylib != NULL) && dylib->willBeLazyLoadedDylib() )
4179 throwf("illegal data reference to %s in lazy loaded dylib %s", target->name(), dylib->path());
4180 if ( target->contentType() == ld::Atom::typeTLV ) {
4181 if ( sect->type() != ld::Section::typeTLVPointers )
4182 throwf("illegal data reference in %s to thread local variable %s in dylib %s",
4183 atom->name(), target->name(), dylib->path());
4184 }
4185 if ( inReadOnlySeg )
4186 type = BIND_TYPE_TEXT_ABSOLUTE32;
4187 needsBinding = true;
4188 if ( target->combine() == ld::Atom::combineByName )
4189 needsWeakBinding = true;
4190 break;
4191 case ld::Atom::definitionRegular:
4192 case ld::Atom::definitionTentative:
4193 // only slideable images need rebasing info
4194 if ( _options.outputSlidable() ) {
4195 needsRebase = true;
4196 }
4197 // references to internal symbol never need binding
4198 if ( target->scope() != ld::Atom::scopeGlobal )
4199 break;
4200 // reference to global weak def needs weak binding
4201 if ( (target->combine() == ld::Atom::combineByName) && (target->definition() == ld::Atom::definitionRegular) )
4202 needsWeakBinding = true;
4203 else if ( _options.outputKind() == Options::kDynamicExecutable ) {
4204 // in main executables, the only way regular symbols are indirected is if -interposable is used
4205 if ( _options.interposable(target->name()) ) {
4206 needsRebase = false;
4207 needsBinding = true;
4208 }
4209 }
4210 else {
4211 // for flat-namespace or interposable two-level-namespace
4212 // all references to exported symbols get indirected
4213 if ( (_options.nameSpace() != Options::kTwoLevelNameSpace) || _options.interposable(target->name()) ) {
4214 // <rdar://problem/5254468> no external relocs for flat objc classes
4215 if ( strncmp(target->name(), ".objc_class_", 12) == 0 )
4216 break;
4217 // no rebase info for references to global symbols that will have binding info
4218 needsRebase = false;
4219 needsBinding = true;
4220 }
4221 else if ( _options.forceCoalesce(target->name()) ) {
4222 needsWeakBinding = true;
4223 }
4224 }
4225 break;
4226 case ld::Atom::definitionAbsolute:
4227 break;
4228 }
4229 }
4230
4231 // <rdar://problem/13828711> if target is an import alias, use base of alias
4232 if ( target->isAlias() && (target->definition() == ld::Atom::definitionProxy) ) {
4233 for (ld::Fixup::iterator fit = target->fixupsBegin(), end=target->fixupsEnd(); fit != end; ++fit) {
4234 if ( fit->firstInCluster() ) {
4235 if ( fit->kind == ld::Fixup::kindNoneFollowOn ) {
4236 if ( fit->binding == ld::Fixup::bindingDirectlyBound ) {
4237 //fprintf(stderr, "switching import of %s to import of %s\n", target->name(), fit->u.target->name());
4238 target = fit->u.target;
4239 }
4240 }
4241 }
4242 }
4243 }
4244
4245 // record dyld info for this cluster
4246 if ( needsRebase ) {
4247 if ( inReadOnlySeg ) {
4248 noteTextReloc(atom, target);
4249 sect->hasLocalRelocs = true; // so dyld knows to change permissions on __TEXT segment
4250 rebaseType = REBASE_TYPE_TEXT_ABSOLUTE32;
4251 }
4252 if ( _options.sharedRegionEligible() ) {
4253 // <rdar://problem/13287063> when range checking, ignore high byte of arm64 addends
4254 uint64_t checkAddend = addend;
4255 if ( (_options.architecture() == CPU_TYPE_ARM64)
4256 )
4257 checkAddend &= 0x0FFFFFFFFFFFFFFFULL;
4258 if ( checkAddend != 0 ) {
4259 // make sure the addend does not cause the pointer to point outside the target's segment
4260 // if it does, update_dyld_shared_cache will not be able to put this dylib into the shared cache
4261 uint64_t targetAddress = target->finalAddress();
4262 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
4263 ld::Internal::FinalSection* sct = *sit;
4264 uint64_t sctEnd = (sct->address+sct->size);
4265 if ( (sct->address <= targetAddress) && (targetAddress < sctEnd) ) {
4266 if ( (targetAddress+checkAddend) > sctEnd ) {
4267 warning("data symbol %s from %s has pointer to %s + 0x%08llX. "
4268 "That large of an addend may disable %s from being put in the dyld shared cache.",
4269 atom->name(), atom->file()->path(), target->name(), addend, _options.installPath() );
4270 }
4271 }
4272 }
4273 }
4274 }
4275 _rebaseInfo.push_back(RebaseInfo(rebaseType, address));
4276 }
4277 if ( needsBinding ) {
4278 if ( inReadOnlySeg ) {
4279 noteTextReloc(atom, target);
4280 sect->hasExternalRelocs = true; // so dyld knows to change permissions on __TEXT segment
4281 }
4282 _bindingInfo.push_back(BindingInfo(type, this->compressedOrdinalForAtom(target), target->name(), weak_import, address, addend));
4283 }
4284 if ( needsLazyBinding ) {
4285 if ( _options.bindAtLoad() )
4286 _bindingInfo.push_back(BindingInfo(type, this->compressedOrdinalForAtom(target), target->name(), weak_import, address, addend));
4287 else
4288 _lazyBindingInfo.push_back(BindingInfo(type, this->compressedOrdinalForAtom(target), target->name(), weak_import, address, addend));
4289 }
4290 if ( needsWeakBinding )
4291 _weakBindingInfo.push_back(BindingInfo(type, 0, target->name(), false, address, addend));
4292 }
4293
4294
4295 void OutputFile::addClassicRelocs(ld::Internal& state, ld::Internal::FinalSection* sect, const ld::Atom* atom,
4296 ld::Fixup* fixupWithTarget, ld::Fixup* fixupWithMinusTarget, ld::Fixup* fixupWithStore,
4297 const ld::Atom* target, const ld::Atom* minusTarget,
4298 uint64_t targetAddend, uint64_t minusTargetAddend)
4299 {
4300 if ( sect->isSectionHidden() )
4301 return;
4302
4303 // non-lazy-pointer section is encoded in indirect symbol table - not using relocations
4304 if ( sect->type() == ld::Section::typeNonLazyPointer ) {
4305 // except kexts and static pie which *do* use relocations
4306 switch (_options.outputKind()) {
4307 case Options::kKextBundle:
4308 break;
4309 case Options::kStaticExecutable:
4310 if ( _options.positionIndependentExecutable() )
4311 break;
4312 // else fall into default case
4313 default:
4314 assert(target != NULL);
4315 assert(fixupWithTarget != NULL);
4316 return;
4317 }
4318 }
4319
4320 // no need to rebase or bind PCRel stores
4321 if ( this->isPcRelStore(fixupWithStore->kind) ) {
4322 // as long as target is in same linkage unit
4323 if ( (target == NULL) || (target->definition() != ld::Atom::definitionProxy) )
4324 return;
4325 }
4326
4327 // no need to rebase or bind PIC internal pointer diff
4328 if ( minusTarget != NULL ) {
4329 // with pointer diffs, both need to be in same linkage unit
4330 assert(minusTarget->definition() != ld::Atom::definitionProxy);
4331 assert(target != NULL);
4332 assert(target->definition() != ld::Atom::definitionProxy);
4333 // make sure target is not global and weak
4334 if ( (target->scope() == ld::Atom::scopeGlobal) && (target->combine() == ld::Atom::combineByName)
4335 && (atom->section().type() != ld::Section::typeCFI)
4336 && (atom->section().type() != ld::Section::typeDtraceDOF)
4337 && (atom->section().type() != ld::Section::typeUnwindInfo)
4338 && (minusTarget != target) ) {
4339 // ok for __eh_frame and __uwind_info to use pointer diffs to global weak symbols
4340 throwf("bad codegen, pointer diff in %s to global weak symbol %s", atom->name(), target->name());
4341 }
4342 return;
4343 }
4344
4345 // cluster has no target, so needs no rebasing or binding
4346 if ( target == NULL )
4347 return;
4348
4349 assert(_localRelocsAtom != NULL);
4350 uint64_t relocAddress = atom->finalAddress() + fixupWithTarget->offsetInAtom - _localRelocsAtom->relocBaseAddress(state);
4351
4352 bool inReadOnlySeg = ( strcmp(sect->segmentName(), "__TEXT") == 0 );
4353 bool needsLocalReloc = false;
4354 bool needsExternReloc = false;
4355
4356 switch ( fixupWithStore->kind ) {
4357 case ld::Fixup::kindLazyTarget:
4358 // lazy pointers don't need relocs
4359 break;
4360 case ld::Fixup::kindStoreLittleEndian32:
4361 case ld::Fixup::kindStoreLittleEndian64:
4362 case ld::Fixup::kindStoreBigEndian32:
4363 case ld::Fixup::kindStoreBigEndian64:
4364 case ld::Fixup::kindStoreTargetAddressLittleEndian32:
4365 case ld::Fixup::kindStoreTargetAddressLittleEndian64:
4366 case ld::Fixup::kindStoreTargetAddressBigEndian32:
4367 case ld::Fixup::kindStoreTargetAddressBigEndian64:
4368 // is pointer
4369 switch ( target->definition() ) {
4370 case ld::Atom::definitionProxy:
4371 needsExternReloc = true;
4372 break;
4373 case ld::Atom::definitionRegular:
4374 case ld::Atom::definitionTentative:
4375 // only slideable images need local relocs
4376 if ( _options.outputSlidable() )
4377 needsLocalReloc = true;
4378 // references to internal symbol never need binding
4379 if ( target->scope() != ld::Atom::scopeGlobal )
4380 break;
4381 // reference to global weak def needs weak binding in dynamic images
4382 if ( (target->combine() == ld::Atom::combineByName)
4383 && (target->definition() == ld::Atom::definitionRegular)
4384 && (_options.outputKind() != Options::kStaticExecutable)
4385 && (_options.outputKind() != Options::kPreload)
4386 && (atom != target) ) {
4387 needsExternReloc = true;
4388 }
4389 else if ( _options.outputKind() == Options::kDynamicExecutable ) {
4390 // in main executables, the only way regular symbols are indirected is if -interposable is used
4391 if ( _options.interposable(target->name()) )
4392 needsExternReloc = true;
4393 }
4394 else {
4395 // for flat-namespace or interposable two-level-namespace
4396 // all references to exported symbols get indirected
4397 if ( (_options.nameSpace() != Options::kTwoLevelNameSpace) || _options.interposable(target->name()) ) {
4398 // <rdar://problem/5254468> no external relocs for flat objc classes
4399 if ( strncmp(target->name(), ".objc_class_", 12) == 0 )
4400 break;
4401 // no rebase info for references to global symbols that will have binding info
4402 needsExternReloc = true;
4403 }
4404 }
4405 if ( needsExternReloc )
4406 needsLocalReloc = false;
4407 break;
4408 case ld::Atom::definitionAbsolute:
4409 break;
4410 }
4411 if ( needsExternReloc ) {
4412 if ( inReadOnlySeg )
4413 noteTextReloc(atom, target);
4414 const ld::dylib::File* dylib = dynamic_cast<const ld::dylib::File*>(target->file());
4415 if ( (dylib != NULL) && dylib->willBeLazyLoadedDylib() )
4416 throwf("illegal data reference to %s in lazy loaded dylib %s", target->name(), dylib->path());
4417 _externalRelocsAtom->addExternalPointerReloc(relocAddress, target);
4418 sect->hasExternalRelocs = true;
4419 fixupWithTarget->contentAddendOnly = true;
4420 }
4421 else if ( needsLocalReloc ) {
4422 assert(target != NULL);
4423 if ( inReadOnlySeg )
4424 noteTextReloc(atom, target);
4425 _localRelocsAtom->addPointerReloc(relocAddress, target->machoSection());
4426 sect->hasLocalRelocs = true;
4427 }
4428 break;
4429 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32:
4430 #if SUPPORT_ARCH_arm64
4431 case ld::Fixup::kindStoreTargetAddressARM64Branch26:
4432 #endif
4433 if ( _options.outputKind() == Options::kKextBundle ) {
4434 assert(target != NULL);
4435 if ( target->definition() == ld::Atom::definitionProxy ) {
4436 _externalRelocsAtom->addExternalCallSiteReloc(relocAddress, target);
4437 fixupWithStore->contentAddendOnly = true;
4438 }
4439 }
4440 break;
4441
4442 case ld::Fixup::kindStoreARMLow16:
4443 case ld::Fixup::kindStoreThumbLow16:
4444 // no way to encode rebasing of binding for these instructions
4445 if ( _options.outputSlidable() || (target->definition() == ld::Atom::definitionProxy) )
4446 throwf("no supported runtime lo16 relocation in %s from %s to %s", atom->name(), atom->file()->path(), target->name());
4447 break;
4448
4449 case ld::Fixup::kindStoreARMHigh16:
4450 case ld::Fixup::kindStoreThumbHigh16:
4451 // no way to encode rebasing of binding for these instructions
4452 if ( _options.outputSlidable() || (target->definition() == ld::Atom::definitionProxy) )
4453 throwf("no supported runtime hi16 relocation in %s from %s to %s", atom->name(), atom->file()->path(), target->name());
4454 break;
4455
4456 default:
4457 break;
4458 }
4459 }
4460
4461
4462 bool OutputFile::useExternalSectionReloc(const ld::Atom* atom, const ld::Atom* target, ld::Fixup* fixupWithTarget)
4463 {
4464 if ( (_options.architecture() == CPU_TYPE_X86_64)
4465 || (_options.architecture() == CPU_TYPE_ARM64)
4466 ) {
4467 // x86_64 and ARM64 use external relocations for everthing that has a symbol
4468 return ( target->symbolTableInclusion() != ld::Atom::symbolTableNotIn );
4469 }
4470
4471 // <rdar://problem/9513487> support arm branch interworking in -r mode
4472 if ( (_options.architecture() == CPU_TYPE_ARM) && (_options.outputKind() == Options::kObjectFile) ) {
4473 if ( atom->isThumb() != target->isThumb() ) {
4474 switch ( fixupWithTarget->kind ) {
4475 // have branch that switches mode, then might be 'b' not 'bl'
4476 // Force external relocation, since no way to do local reloc for 'b'
4477 case ld::Fixup::kindStoreTargetAddressThumbBranch22 :
4478 case ld::Fixup::kindStoreTargetAddressARMBranch24:
4479 return true;
4480 default:
4481 break;
4482 }
4483 }
4484 }
4485
4486 if ( (_options.architecture() == CPU_TYPE_I386) && (_options.outputKind() == Options::kObjectFile) ) {
4487 if ( target->contentType() == ld::Atom::typeTLV )
4488 return true;
4489 }
4490
4491 // most architectures use external relocations only for references
4492 // to a symbol in another translation unit or for references to "weak symbols" or tentative definitions
4493 assert(target != NULL);
4494 if ( target->definition() == ld::Atom::definitionProxy )
4495 return true;
4496 if ( (target->definition() == ld::Atom::definitionTentative) && ! _options.makeTentativeDefinitionsReal() )
4497 return true;
4498 if ( target->scope() != ld::Atom::scopeGlobal )
4499 return false;
4500 if ( (target->combine() == ld::Atom::combineByName) && (target->definition() == ld::Atom::definitionRegular) )
4501 return true;
4502 return false;
4503 }
4504
4505 bool OutputFile::useSectionRelocAddend(ld::Fixup* fixupWithTarget)
4506 {
4507 #if SUPPORT_ARCH_arm64
4508 if ( _options.architecture() == CPU_TYPE_ARM64 ) {
4509 switch ( fixupWithTarget->kind ) {
4510 case ld::Fixup::kindStoreARM64Branch26:
4511 case ld::Fixup::kindStoreARM64Page21:
4512 case ld::Fixup::kindStoreARM64PageOff12:
4513 return true;
4514 default:
4515 return false;
4516 }
4517 }
4518 #endif
4519 return false;
4520 }
4521
4522
4523
4524
4525 void OutputFile::addSectionRelocs(ld::Internal& state, ld::Internal::FinalSection* sect, const ld::Atom* atom,
4526 ld::Fixup* fixupWithTarget, ld::Fixup* fixupWithMinusTarget,
4527 ld::Fixup* fixupWithAddend, ld::Fixup* fixupWithStore,
4528 const ld::Atom* target, const ld::Atom* minusTarget,
4529 uint64_t targetAddend, uint64_t minusTargetAddend)
4530 {
4531 if ( sect->isSectionHidden() )
4532 return;
4533
4534 // in -r mode where there will be no labels on __eh_frame section, there is no need for relocations
4535 if ( (sect->type() == ld::Section::typeCFI) && _options.removeEHLabels() )
4536 return;
4537
4538 // non-lazy-pointer section is encoded in indirect symbol table - not using relocations
4539 if ( sect->type() == ld::Section::typeNonLazyPointer )
4540 return;
4541
4542 // tentative defs don't have any relocations
4543 if ( sect->type() == ld::Section::typeTentativeDefs )
4544 return;
4545
4546 assert(target != NULL);
4547 assert(fixupWithTarget != NULL);
4548 bool targetUsesExternalReloc = this->useExternalSectionReloc(atom, target, fixupWithTarget);
4549 bool minusTargetUsesExternalReloc = (minusTarget != NULL) && this->useExternalSectionReloc(atom, minusTarget, fixupWithMinusTarget);
4550
4551 // in x86_64 and arm64 .o files an external reloc means the content contains just the addend
4552 if ( (_options.architecture() == CPU_TYPE_X86_64)
4553 || (_options.architecture() == CPU_TYPE_ARM64)
4554 ) {
4555 if ( targetUsesExternalReloc ) {
4556 fixupWithTarget->contentAddendOnly = true;
4557 fixupWithStore->contentAddendOnly = true;
4558 if ( this->useSectionRelocAddend(fixupWithStore) && (fixupWithAddend != NULL) )
4559 fixupWithAddend->contentIgnoresAddend = true;
4560 }
4561 if ( minusTargetUsesExternalReloc )
4562 fixupWithMinusTarget->contentAddendOnly = true;
4563 }
4564 else {
4565 // for other archs, content is addend only with (non pc-rel) pointers
4566 // pc-rel instructions are funny. If the target is _foo+8 and _foo is
4567 // external, then the pc-rel instruction *evalutates* to the address 8.
4568 if ( targetUsesExternalReloc ) {
4569 // TLV support for i386 acts like RIP relative addressing
4570 // The addend is the offset from the PICBase to the end of the instruction
4571 if ( (_options.architecture() == CPU_TYPE_I386)
4572 && (_options.outputKind() == Options::kObjectFile)
4573 && (fixupWithStore->kind == ld::Fixup::kindStoreX86PCRel32TLVLoad) ) {
4574 fixupWithTarget->contentAddendOnly = true;
4575 fixupWithStore->contentAddendOnly = true;
4576 }
4577 else if ( isPcRelStore(fixupWithStore->kind) ) {
4578 fixupWithTarget->contentDetlaToAddendOnly = true;
4579 fixupWithStore->contentDetlaToAddendOnly = true;
4580 }
4581 else if ( minusTarget == NULL ){
4582 fixupWithTarget->contentAddendOnly = true;
4583 fixupWithStore->contentAddendOnly = true;
4584 }
4585 }
4586 }
4587
4588 if ( fixupWithStore != NULL ) {
4589 _sectionsRelocationsAtom->addSectionReloc(sect, fixupWithStore->kind, atom, fixupWithStore->offsetInAtom,
4590 targetUsesExternalReloc, minusTargetUsesExternalReloc,
4591 target, targetAddend, minusTarget, minusTargetAddend);
4592 }
4593
4594 }
4595
4596 void OutputFile::makeSplitSegInfo(ld::Internal& state)
4597 {
4598 if ( !_options.sharedRegionEligible() )
4599 return;
4600
4601 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
4602 ld::Internal::FinalSection* sect = *sit;
4603 if ( sect->isSectionHidden() )
4604 continue;
4605 if ( strcmp(sect->segmentName(), "__TEXT") != 0 )
4606 continue;
4607 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
4608 const ld::Atom* atom = *ait;
4609 const ld::Atom* target = NULL;
4610 const ld::Atom* fromTarget = NULL;
4611 uint64_t accumulator = 0;
4612 bool thumbTarget;
4613 bool hadSubtract = false;
4614 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
4615 if ( fit->firstInCluster() )
4616 target = NULL;
4617 if ( this->setsTarget(fit->kind) ) {
4618 accumulator = addressOf(state, fit, &target);
4619 thumbTarget = targetIsThumb(state, fit);
4620 if ( thumbTarget )
4621 accumulator |= 1;
4622 }
4623 switch ( fit->kind ) {
4624 case ld::Fixup::kindSubtractTargetAddress:
4625 accumulator -= addressOf(state, fit, &fromTarget);
4626 hadSubtract = true;
4627 break;
4628 case ld::Fixup::kindAddAddend:
4629 accumulator += fit->u.addend;
4630 break;
4631 case ld::Fixup::kindSubtractAddend:
4632 accumulator -= fit->u.addend;
4633 break;
4634 case ld::Fixup::kindStoreBigEndian32:
4635 case ld::Fixup::kindStoreLittleEndian32:
4636 case ld::Fixup::kindStoreLittleEndian64:
4637 case ld::Fixup::kindStoreTargetAddressLittleEndian32:
4638 case ld::Fixup::kindStoreTargetAddressLittleEndian64:
4639 // if no subtract, then this is an absolute pointer which means
4640 // there is also a text reloc which update_dyld_shared_cache will use.
4641 if ( ! hadSubtract )
4642 break;
4643 // fall through
4644 case ld::Fixup::kindStoreX86PCRel32:
4645 case ld::Fixup::kindStoreX86PCRel32_1:
4646 case ld::Fixup::kindStoreX86PCRel32_2:
4647 case ld::Fixup::kindStoreX86PCRel32_4:
4648 case ld::Fixup::kindStoreX86PCRel32GOTLoad:
4649 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA:
4650 case ld::Fixup::kindStoreX86PCRel32GOT:
4651 case ld::Fixup::kindStoreX86PCRel32TLVLoad:
4652 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA:
4653 case ld::Fixup::kindStoreTargetAddressX86PCRel32:
4654 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad:
4655 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA:
4656 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad:
4657 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA:
4658 case ld::Fixup::kindStoreARMLow16:
4659 case ld::Fixup::kindStoreThumbLow16:
4660 #if SUPPORT_ARCH_arm64
4661 case ld::Fixup::kindStoreARM64Page21:
4662 case ld::Fixup::kindStoreARM64GOTLoadPage21:
4663 case ld::Fixup::kindStoreARM64GOTLeaPage21:
4664 case ld::Fixup::kindStoreARM64TLVPLoadPage21:
4665 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21:
4666 case ld::Fixup::kindStoreTargetAddressARM64Page21:
4667 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
4668 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21:
4669 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21:
4670 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21:
4671 case ld::Fixup::kindStoreARM64PCRelToGOT:
4672 #endif
4673 assert(target != NULL);
4674 if ( strcmp(sect->segmentName(), target->section().segmentName()) != 0 ) {
4675 _splitSegInfos.push_back(SplitSegInfoEntry(atom->finalAddress()+fit->offsetInAtom,fit->kind));
4676 }
4677 break;
4678 case ld::Fixup::kindStoreARMHigh16:
4679 case ld::Fixup::kindStoreThumbHigh16:
4680 assert(target != NULL);
4681 if ( strcmp(sect->segmentName(), target->section().segmentName()) != 0 ) {
4682 // hi16 needs to know upper 4-bits of low16 to compute carry
4683 uint32_t extra = (accumulator >> 12) & 0xF;
4684 _splitSegInfos.push_back(SplitSegInfoEntry(atom->finalAddress()+fit->offsetInAtom,fit->kind, extra));
4685 }
4686 break;
4687 case ld::Fixup::kindSetTargetImageOffset:
4688 accumulator = addressOf(state, fit, &target);
4689 assert(target != NULL);
4690 hadSubtract = true;
4691 break;
4692 default:
4693 break;
4694 }
4695 }
4696 }
4697 }
4698 }
4699
4700 void OutputFile::makeSplitSegInfoV2(ld::Internal& state)
4701 {
4702 static const bool log = false;
4703 if ( !_options.sharedRegionEligible() )
4704 return;
4705
4706 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
4707 ld::Internal::FinalSection* sect = *sit;
4708 if ( sect->isSectionHidden() )
4709 continue;
4710 bool codeSection = (sect->type() == ld::Section::typeCode);
4711 if (log) fprintf(stderr, "sect: %s, address=0x%llX\n", sect->sectionName(), sect->address);
4712 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
4713 const ld::Atom* atom = *ait;
4714 const ld::Atom* target = NULL;
4715 const ld::Atom* fromTarget = NULL;
4716 uint32_t picBase = 0;
4717 uint64_t accumulator = 0;
4718 bool thumbTarget;
4719 bool hadSubtract = false;
4720 uint8_t fromSectionIndex = atom->machoSection();
4721 uint8_t toSectionIndex;
4722 uint8_t kind = 0;
4723 uint64_t fromOffset = 0;
4724 uint64_t toOffset = 0;
4725 uint64_t addend = 0;
4726 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
4727 if ( fit->firstInCluster() ) {
4728 target = NULL;
4729 hadSubtract = false;
4730 fromTarget = NULL;
4731 kind = 0;
4732 addend = 0;
4733 toSectionIndex = 255;
4734 fromOffset = atom->finalAddress() + fit->offsetInAtom - sect->address;
4735 }
4736 if ( this->setsTarget(fit->kind) ) {
4737 accumulator = addressAndTarget(state, fit, &target);
4738 thumbTarget = targetIsThumb(state, fit);
4739 if ( thumbTarget )
4740 accumulator |= 1;
4741 toOffset = accumulator - state.atomToSection[target]->address;
4742 if ( target->definition() != ld::Atom::definitionProxy ) {
4743 if ( target->section().type() == ld::Section::typeMachHeader )
4744 toSectionIndex = 0;
4745 else
4746 toSectionIndex = target->machoSection();
4747 }
4748 }
4749 switch ( fit->kind ) {
4750 case ld::Fixup::kindSubtractTargetAddress:
4751 accumulator -= addressAndTarget(state, fit, &fromTarget);
4752 hadSubtract = true;
4753 break;
4754 case ld::Fixup::kindAddAddend:
4755 accumulator += fit->u.addend;
4756 addend = fit->u.addend;
4757 break;
4758 case ld::Fixup::kindSubtractAddend:
4759 accumulator -= fit->u.addend;
4760 picBase = fit->u.addend;
4761 break;
4762 case ld::Fixup::kindSetLazyOffset:
4763 break;
4764 case ld::Fixup::kindStoreBigEndian32:
4765 case ld::Fixup::kindStoreLittleEndian32:
4766 case ld::Fixup::kindStoreTargetAddressLittleEndian32:
4767 if ( kind != DYLD_CACHE_ADJ_V2_IMAGE_OFF_32 ) {
4768 if ( hadSubtract )
4769 kind = DYLD_CACHE_ADJ_V2_DELTA_32;
4770 else
4771 kind = DYLD_CACHE_ADJ_V2_POINTER_32;
4772 }
4773 break;
4774 case ld::Fixup::kindStoreLittleEndian64:
4775 case ld::Fixup::kindStoreTargetAddressLittleEndian64:
4776 if ( hadSubtract )
4777 kind = DYLD_CACHE_ADJ_V2_DELTA_64;
4778 else
4779 kind = DYLD_CACHE_ADJ_V2_POINTER_64;
4780 break;
4781 case ld::Fixup::kindStoreX86PCRel32:
4782 case ld::Fixup::kindStoreX86PCRel32_1:
4783 case ld::Fixup::kindStoreX86PCRel32_2:
4784 case ld::Fixup::kindStoreX86PCRel32_4:
4785 case ld::Fixup::kindStoreX86PCRel32GOTLoad:
4786 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA:
4787 case ld::Fixup::kindStoreX86PCRel32GOT:
4788 case ld::Fixup::kindStoreX86PCRel32TLVLoad:
4789 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA:
4790 case ld::Fixup::kindStoreTargetAddressX86PCRel32:
4791 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad:
4792 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA:
4793 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad:
4794 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA:
4795 #if SUPPORT_ARCH_arm64
4796 case ld::Fixup::kindStoreARM64PCRelToGOT:
4797 #endif
4798 if ( (fromSectionIndex != toSectionIndex) || !codeSection )
4799 kind = DYLD_CACHE_ADJ_V2_DELTA_32;
4800 break;
4801 #if SUPPORT_ARCH_arm64
4802 case ld::Fixup::kindStoreARM64Page21:
4803 case ld::Fixup::kindStoreARM64GOTLoadPage21:
4804 case ld::Fixup::kindStoreARM64GOTLeaPage21:
4805 case ld::Fixup::kindStoreARM64TLVPLoadPage21:
4806 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21:
4807 case ld::Fixup::kindStoreTargetAddressARM64Page21:
4808 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
4809 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21:
4810 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21:
4811 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21:
4812 if ( fromSectionIndex != toSectionIndex )
4813 kind = DYLD_CACHE_ADJ_V2_ARM64_ADRP;
4814 break;
4815 case ld::Fixup::kindStoreARM64PageOff12:
4816 case ld::Fixup::kindStoreARM64GOTLeaPageOff12:
4817 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPageOff12:
4818 case ld::Fixup::kindStoreTargetAddressARM64PageOff12:
4819 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12:
4820 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12:
4821 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12:
4822 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12:
4823 if ( fromSectionIndex != toSectionIndex )
4824 kind = DYLD_CACHE_ADJ_V2_ARM64_OFF12;
4825 break;
4826 case ld::Fixup::kindStoreARM64Branch26:
4827 case ld::Fixup::kindStoreTargetAddressARM64Branch26:
4828 if ( fromSectionIndex != toSectionIndex )
4829 kind = DYLD_CACHE_ADJ_V2_ARM64_BR26;
4830 break;
4831 #endif
4832 case ld::Fixup::kindStoreARMHigh16:
4833 case ld::Fixup::kindStoreARMLow16:
4834 if ( (fromSectionIndex != toSectionIndex) && (fromTarget == atom) ) {
4835 kind = DYLD_CACHE_ADJ_V2_ARM_MOVW_MOVT;
4836 }
4837 break;
4838 case ld::Fixup::kindStoreARMBranch24:
4839 case ld::Fixup::kindStoreTargetAddressARMBranch24:
4840 if ( fromSectionIndex != toSectionIndex )
4841 kind = DYLD_CACHE_ADJ_V2_ARM_BR24;
4842 break;
4843 case ld::Fixup::kindStoreThumbLow16:
4844 case ld::Fixup::kindStoreThumbHigh16:
4845 if ( (fromSectionIndex != toSectionIndex) && (fromTarget == atom) ) {
4846 kind = DYLD_CACHE_ADJ_V2_THUMB_MOVW_MOVT;
4847 }
4848 break;
4849 case ld::Fixup::kindStoreThumbBranch22:
4850 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
4851 if ( fromSectionIndex != toSectionIndex )
4852 kind = DYLD_CACHE_ADJ_V2_THUMB_BR22;
4853 break;
4854 case ld::Fixup::kindSetTargetImageOffset:
4855 kind = DYLD_CACHE_ADJ_V2_IMAGE_OFF_32;
4856 accumulator = addressAndTarget(state, fit, &target);
4857 assert(target != NULL);
4858 toSectionIndex = target->machoSection();
4859 toOffset = accumulator - state.atomToSection[target]->address;
4860 hadSubtract = true;
4861 break;
4862 default:
4863 break;
4864 }
4865 if ( fit->lastInCluster() ) {
4866 if ( (kind != 0) && (target != NULL) && (target->definition() != ld::Atom::definitionProxy) ) {
4867 if ( !hadSubtract && addend )
4868 toOffset += addend;
4869 assert(toSectionIndex != 255);
4870 if (log) fprintf(stderr, "from (%d.%s + 0x%llX) to (%d.%s + 0x%llX), kind=%d, atomAddr=0x%llX, sectAddr=0x%llx\n",
4871 fromSectionIndex, sect->sectionName(), fromOffset, toSectionIndex, state.atomToSection[target]->sectionName(),
4872 toOffset, kind, atom->finalAddress(), sect->address);
4873 _splitSegV2Infos.push_back(SplitSegInfoV2Entry(fromSectionIndex, fromOffset, toSectionIndex, toOffset, kind));
4874 }
4875 }
4876 }
4877 }
4878 }
4879 }
4880
4881
4882 void OutputFile::writeMapFile(ld::Internal& state)
4883 {
4884 if ( _options.generatedMapPath() != NULL ) {
4885 FILE* mapFile = fopen(_options.generatedMapPath(), "w");
4886 if ( mapFile != NULL ) {
4887 // write output path
4888 fprintf(mapFile, "# Path: %s\n", _options.outputFilePath());
4889 // write output architecure
4890 fprintf(mapFile, "# Arch: %s\n", _options.architectureName());
4891 // write UUID
4892 //if ( fUUIDAtom != NULL ) {
4893 // const uint8_t* uuid = fUUIDAtom->getUUID();
4894 // fprintf(mapFile, "# UUID: %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X \n",
4895 // uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5], uuid[6], uuid[7],
4896 // uuid[8], uuid[9], uuid[10], uuid[11], uuid[12], uuid[13], uuid[14], uuid[15]);
4897 //}
4898 // write table of object files
4899 std::map<const ld::File*, ld::File::Ordinal> readerToOrdinal;
4900 std::map<ld::File::Ordinal, const ld::File*> ordinalToReader;
4901 std::map<const ld::File*, uint32_t> readerToFileOrdinal;
4902 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
4903 ld::Internal::FinalSection* sect = *sit;
4904 if ( sect->isSectionHidden() )
4905 continue;
4906 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
4907 const ld::Atom* atom = *ait;
4908 const ld::File* reader = atom->originalFile();
4909 if ( reader == NULL )
4910 continue;
4911 ld::File::Ordinal readerOrdinal = reader->ordinal();
4912 std::map<const ld::File*, ld::File::Ordinal>::iterator pos = readerToOrdinal.find(reader);
4913 if ( pos == readerToOrdinal.end() ) {
4914 readerToOrdinal[reader] = readerOrdinal;
4915 ordinalToReader[readerOrdinal] = reader;
4916 }
4917 }
4918 }
4919 for (const ld::Atom* atom : state.deadAtoms) {
4920 const ld::File* reader = atom->originalFile();
4921 if ( reader == NULL )
4922 continue;
4923 ld::File::Ordinal readerOrdinal = reader->ordinal();
4924 std::map<const ld::File*, ld::File::Ordinal>::iterator pos = readerToOrdinal.find(reader);
4925 if ( pos == readerToOrdinal.end() ) {
4926 readerToOrdinal[reader] = readerOrdinal;
4927 ordinalToReader[readerOrdinal] = reader;
4928 }
4929 }
4930 fprintf(mapFile, "# Object files:\n");
4931 fprintf(mapFile, "[%3u] %s\n", 0, "linker synthesized");
4932 uint32_t fileIndex = 1;
4933 for(std::map<ld::File::Ordinal, const ld::File*>::iterator it = ordinalToReader.begin(); it != ordinalToReader.end(); ++it) {
4934 fprintf(mapFile, "[%3u] %s\n", fileIndex, it->second->path());
4935 readerToFileOrdinal[it->second] = fileIndex++;
4936 }
4937 // write table of sections
4938 fprintf(mapFile, "# Sections:\n");
4939 fprintf(mapFile, "# Address\tSize \tSegment\tSection\n");
4940 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
4941 ld::Internal::FinalSection* sect = *sit;
4942 if ( sect->isSectionHidden() )
4943 continue;
4944 fprintf(mapFile, "0x%08llX\t0x%08llX\t%s\t%s\n", sect->address, sect->size,
4945 sect->segmentName(), sect->sectionName());
4946 }
4947 // write table of symbols
4948 fprintf(mapFile, "# Symbols:\n");
4949 fprintf(mapFile, "# Address\tSize \tFile Name\n");
4950 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
4951 ld::Internal::FinalSection* sect = *sit;
4952 if ( sect->isSectionHidden() )
4953 continue;
4954 //bool isCstring = (sect->type() == ld::Section::typeCString);
4955 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
4956 char buffer[4096];
4957 const ld::Atom* atom = *ait;
4958 const char* name = atom->name();
4959 // don't add auto-stripped aliases to .map file
4960 if ( (atom->size() == 0) && (atom->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages) )
4961 continue;
4962 if ( atom->contentType() == ld::Atom::typeCString ) {
4963 strcpy(buffer, "literal string: ");
4964 const char* s = (char*)atom->rawContentPointer();
4965 char* e = &buffer[4094];
4966 for (char* b = &buffer[strlen(buffer)]; b < e;) {
4967 char c = *s++;
4968 if ( c == '\n' ) {
4969 *b++ = '\\';
4970 *b++ = 'n';
4971 }
4972 else {
4973 *b++ = c;
4974 }
4975 if ( c == '\0' )
4976 break;
4977 }
4978 buffer[4095] = '\0';
4979 name = buffer;
4980 }
4981 else if ( (atom->contentType() == ld::Atom::typeCFI) && (strcmp(name, "FDE") == 0) ) {
4982 for (ld::Fixup::iterator fit = atom->fixupsBegin(); fit != atom->fixupsEnd(); ++fit) {
4983 if ( (fit->kind == ld::Fixup::kindSetTargetAddress) && (fit->clusterSize == ld::Fixup::k1of4) ) {
4984 if ( (fit->binding == ld::Fixup::bindingDirectlyBound)
4985 && (fit->u.target->section().type() == ld::Section::typeCode) ) {
4986 strcpy(buffer, "FDE for: ");
4987 strlcat(buffer, fit->u.target->name(), 4096);
4988 name = buffer;
4989 }
4990 }
4991 }
4992 }
4993 else if ( atom->contentType() == ld::Atom::typeNonLazyPointer ) {
4994 strcpy(buffer, "non-lazy-pointer");
4995 for (ld::Fixup::iterator fit = atom->fixupsBegin(); fit != atom->fixupsEnd(); ++fit) {
4996 if ( fit->binding == ld::Fixup::bindingsIndirectlyBound ) {
4997 strcpy(buffer, "non-lazy-pointer-to: ");
4998 strlcat(buffer, state.indirectBindingTable[fit->u.bindingIndex]->name(), 4096);
4999 break;
5000 }
5001 else if ( fit->binding == ld::Fixup::bindingDirectlyBound ) {
5002 strcpy(buffer, "non-lazy-pointer-to-local: ");
5003 strlcat(buffer, fit->u.target->name(), 4096);
5004 break;
5005 }
5006 }
5007 name = buffer;
5008 }
5009 fprintf(mapFile, "0x%08llX\t0x%08llX\t[%3u] %s\n", atom->finalAddress(), atom->size(),
5010 readerToFileOrdinal[atom->originalFile()], name);
5011 }
5012 }
5013 // preload check is hack until 26613948 is fixed
5014 if ( _options.deadCodeStrip() && (_options.outputKind() != Options::kPreload) ) {
5015 fprintf(mapFile, "\n");
5016 fprintf(mapFile, "# Dead Stripped Symbols:\n");
5017 fprintf(mapFile, "# \tSize \tFile Name\n");
5018 for (const ld::Atom* atom : state.deadAtoms) {
5019 char buffer[4096];
5020 const char* name = atom->name();
5021 // don't add auto-stripped aliases to .map file
5022 if ( (atom->size() == 0) && (atom->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages) )
5023 continue;
5024 if ( atom->contentType() == ld::Atom::typeCString ) {
5025 strcpy(buffer, "literal string: ");
5026 const char* s = (char*)atom->rawContentPointer();
5027 char* e = &buffer[4094];
5028 for (char* b = &buffer[strlen(buffer)]; b < e;) {
5029 char c = *s++;
5030 if ( c == '\n' ) {
5031 *b++ = '\\';
5032 *b++ = 'n';
5033 }
5034 else {
5035 *b++ = c;
5036 }
5037 if ( c == '\0' )
5038 break;
5039 }
5040 buffer[4095] = '\0';
5041 name = buffer;
5042 }
5043 fprintf(mapFile, "<<dead>> \t0x%08llX\t[%3u] %s\n", atom->size(),
5044 readerToFileOrdinal[atom->originalFile()], name);
5045 }
5046 }
5047 fclose(mapFile);
5048 }
5049 else {
5050 warning("could not write map file: %s\n", _options.generatedMapPath());
5051 }
5052 }
5053 }
5054
5055 void OutputFile::writeJSONEntry(ld::Internal& state)
5056 {
5057 if ( _options.traceEmitJSON() && (_options.UUIDMode() != Options::kUUIDNone) && (_options.traceOutputFile() != NULL) ) {
5058
5059 // Convert the UUID to a string.
5060 const uint8_t* uuid = _headersAndLoadCommandAtom->getUUID();
5061 uuid_string_t uuidString;
5062
5063 uuid_unparse(uuid, uuidString);
5064
5065 // Enumerate the dylibs.
5066 std::vector<const ld::dylib::File*> dynamicList;
5067 std::vector<const ld::dylib::File*> upwardList;
5068 std::vector<const ld::dylib::File*> reexportList;
5069
5070 for (const ld::dylib::File* dylib : _dylibsToLoad) {
5071
5072 if (dylib->willBeUpwardDylib()) {
5073
5074 upwardList.push_back(dylib);
5075 } else if (dylib->willBeReExported()) {
5076
5077 reexportList.push_back(dylib);
5078 } else {
5079
5080 dynamicList.push_back(dylib);
5081 }
5082 }
5083
5084 /*
5085 * Build the JSON entry.
5086 */
5087
5088 std::string jsonEntry = "{";
5089
5090 jsonEntry += "\"uuid\":\"" + std::string(uuidString) + "\",";
5091
5092 // installPath() returns -final_output for non-dylibs
5093 const char* lastNameSlash = strrchr(_options.installPath(), '/');
5094 const char* leafName = (lastNameSlash != NULL) ? lastNameSlash+1 : _options.outputFilePath();
5095 jsonEntry += "\"name\":\"" + std::string(leafName) + "\",";
5096
5097 jsonEntry += "\"arch\":\"" + std::string(_options.architectureName()) + "\"";
5098
5099 if (dynamicList.size() > 0) {
5100 jsonEntry += ",\"dynamic\":[";
5101 for (const ld::dylib::File* dylib : dynamicList) {
5102 jsonEntry += "\"" + std::string(dylib->path()) + "\"";
5103 if ((dylib != dynamicList.back())) {
5104 jsonEntry += ",";
5105 }
5106 }
5107 jsonEntry += "]";
5108 }
5109
5110 if (upwardList.size() > 0) {
5111 jsonEntry += ",\"upward-dynamic\":[";
5112 for (const ld::dylib::File* dylib : upwardList) {
5113 jsonEntry += "\"" + std::string(dylib->path()) + "\"";
5114 if ((dylib != upwardList.back())) {
5115 jsonEntry += ",";
5116 }
5117 }
5118 jsonEntry += "]";
5119 }
5120
5121 if (reexportList.size() > 0) {
5122 jsonEntry += ",\"re-exports\":[";
5123 for (const ld::dylib::File* dylib : reexportList) {
5124 jsonEntry += "\"" + std::string(dylib->path()) + "\"";
5125 if ((dylib != reexportList.back())) {
5126 jsonEntry += ",";
5127 }
5128 }
5129 jsonEntry += "]";
5130 }
5131
5132 if (state.archivePaths.size() > 0) {
5133 jsonEntry += ",\"archives\":[";
5134 for (const std::string& archivePath : state.archivePaths) {
5135 jsonEntry += "\"" + std::string(archivePath) + "\"";
5136 if ((archivePath != state.archivePaths.back())) {
5137 jsonEntry += ",";
5138 }
5139 }
5140 jsonEntry += "]";
5141 }
5142 jsonEntry += "}\n";
5143
5144 // Write the JSON entry to the trace file.
5145 std::ofstream out(_options.traceOutputFile(), ios::app);
5146 out << jsonEntry;
5147 }
5148 }
5149
5150 // used to sort atoms with debug notes
5151 class DebugNoteSorter
5152 {
5153 public:
5154 bool operator()(const ld::Atom* left, const ld::Atom* right) const
5155 {
5156 // first sort by reader
5157 ld::File::Ordinal leftFileOrdinal = left->file()->ordinal();
5158 ld::File::Ordinal rightFileOrdinal = right->file()->ordinal();
5159 if ( leftFileOrdinal!= rightFileOrdinal)
5160 return (leftFileOrdinal < rightFileOrdinal);
5161
5162 // then sort by atom objectAddress
5163 uint64_t leftAddr = left->finalAddress();
5164 uint64_t rightAddr = right->finalAddress();
5165 return leftAddr < rightAddr;
5166 }
5167 };
5168
5169
5170 const char* OutputFile::assureFullPath(const char* path)
5171 {
5172 if ( path[0] == '/' )
5173 return path;
5174 char cwdbuff[MAXPATHLEN];
5175 if ( getcwd(cwdbuff, MAXPATHLEN) != NULL ) {
5176 char* result;
5177 asprintf(&result, "%s/%s", cwdbuff, path);
5178 if ( result != NULL )
5179 return result;
5180 }
5181 return path;
5182 }
5183
5184 static time_t fileModTime(const char* path) {
5185 struct stat statBuffer;
5186 if ( stat(path, &statBuffer) == 0 ) {
5187 return statBuffer.st_mtime;
5188 }
5189 return 0;
5190 }
5191
5192
5193 void OutputFile::synthesizeDebugNotes(ld::Internal& state)
5194 {
5195 // -S means don't synthesize debug map
5196 if ( _options.debugInfoStripping() == Options::kDebugInfoNone )
5197 return;
5198 // make a vector of atoms that come from files compiled with dwarf debug info
5199 std::vector<const ld::Atom*> atomsNeedingDebugNotes;
5200 std::set<const ld::Atom*> atomsWithStabs;
5201 atomsNeedingDebugNotes.reserve(1024);
5202 const ld::relocatable::File* objFile = NULL;
5203 bool objFileHasDwarf = false;
5204 bool objFileHasStabs = false;
5205 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
5206 ld::Internal::FinalSection* sect = *sit;
5207 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
5208 const ld::Atom* atom = *ait;
5209 // no stabs for atoms that would not be in the symbol table
5210 if ( atom->symbolTableInclusion() == ld::Atom::symbolTableNotIn )
5211 continue;
5212 if ( atom->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages )
5213 continue;
5214 if ( atom->symbolTableInclusion() == ld::Atom::symbolTableInWithRandomAutoStripLabel )
5215 continue;
5216 // no stabs for absolute symbols
5217 if ( atom->definition() == ld::Atom::definitionAbsolute )
5218 continue;
5219 // no stabs for .eh atoms
5220 if ( atom->contentType() == ld::Atom::typeCFI )
5221 continue;
5222 // no stabs for string literal atoms
5223 if ( atom->contentType() == ld::Atom::typeCString )
5224 continue;
5225 // no stabs for kernel dtrace probes
5226 if ( (_options.outputKind() == Options::kStaticExecutable) && (strncmp(atom->name(), "__dtrace_probe$", 15) == 0) )
5227 continue;
5228 const ld::File* file = atom->file();
5229 if ( file != NULL ) {
5230 if ( file != objFile ) {
5231 objFileHasDwarf = false;
5232 objFileHasStabs = false;
5233 objFile = dynamic_cast<const ld::relocatable::File*>(file);
5234 if ( objFile != NULL ) {
5235 switch ( objFile->debugInfo() ) {
5236 case ld::relocatable::File::kDebugInfoNone:
5237 break;
5238 case ld::relocatable::File::kDebugInfoDwarf:
5239 objFileHasDwarf = true;
5240 break;
5241 case ld::relocatable::File::kDebugInfoStabs:
5242 case ld::relocatable::File::kDebugInfoStabsUUID:
5243 objFileHasStabs = true;
5244 break;
5245 }
5246 }
5247 }
5248 if ( objFileHasDwarf )
5249 atomsNeedingDebugNotes.push_back(atom);
5250 if ( objFileHasStabs )
5251 atomsWithStabs.insert(atom);
5252 }
5253 }
5254 }
5255
5256 // sort by file ordinal then atom ordinal
5257 std::sort(atomsNeedingDebugNotes.begin(), atomsNeedingDebugNotes.end(), DebugNoteSorter());
5258
5259 // <rdar://problem/17689030> Add -add_ast_path option to linker which add N_AST stab entry to output
5260 const std::vector<const char*>& astPaths = _options.astFilePaths();
5261 for (std::vector<const char*>::const_iterator it=astPaths.begin(); it != astPaths.end(); it++) {
5262 const char* path = *it;
5263 // emit N_AST
5264 ld::relocatable::File::Stab astStab;
5265 astStab.atom = NULL;
5266 astStab.type = N_AST;
5267 astStab.other = 0;
5268 astStab.desc = 0;
5269 astStab.value = fileModTime(path);
5270 astStab.string = path;
5271 state.stabs.push_back(astStab);
5272 }
5273
5274 // synthesize "debug notes" and add them to master stabs vector
5275 const char* dirPath = NULL;
5276 const char* filename = NULL;
5277 bool wroteStartSO = false;
5278 state.stabs.reserve(atomsNeedingDebugNotes.size()*4);
5279 std::unordered_set<const char*, CStringHash, CStringEquals> seenFiles;
5280 for (std::vector<const ld::Atom*>::iterator it=atomsNeedingDebugNotes.begin(); it != atomsNeedingDebugNotes.end(); it++) {
5281 const ld::Atom* atom = *it;
5282 const ld::File* atomFile = atom->file();
5283 const ld::relocatable::File* atomObjFile = dynamic_cast<const ld::relocatable::File*>(atomFile);
5284 //fprintf(stderr, "debug note for %s\n", atom->name());
5285 const char* newPath = atom->translationUnitSource();
5286 if ( newPath != NULL ) {
5287 const char* newDirPath;
5288 const char* newFilename;
5289 const char* lastSlash = strrchr(newPath, '/');
5290 if ( lastSlash == NULL )
5291 continue;
5292 newFilename = lastSlash+1;
5293 char* temp = strdup(newPath);
5294 newDirPath = temp;
5295 // gdb like directory SO's to end in '/', but dwarf DW_AT_comp_dir usually does not have trailing '/'
5296 temp[lastSlash-newPath+1] = '\0';
5297 // need SO's whenever the translation unit source file changes
5298 if ( (filename == NULL) || (strcmp(newFilename,filename) != 0) || (strcmp(newDirPath,dirPath) != 0)) {
5299 if ( filename != NULL ) {
5300 // translation unit change, emit ending SO
5301 ld::relocatable::File::Stab endFileStab;
5302 endFileStab.atom = NULL;
5303 endFileStab.type = N_SO;
5304 endFileStab.other = 1;
5305 endFileStab.desc = 0;
5306 endFileStab.value = 0;
5307 endFileStab.string = "";
5308 state.stabs.push_back(endFileStab);
5309 }
5310 // new translation unit, emit start SO's
5311 ld::relocatable::File::Stab dirPathStab;
5312 dirPathStab.atom = NULL;
5313 dirPathStab.type = N_SO;
5314 dirPathStab.other = 0;
5315 dirPathStab.desc = 0;
5316 dirPathStab.value = 0;
5317 dirPathStab.string = newDirPath;
5318 state.stabs.push_back(dirPathStab);
5319 ld::relocatable::File::Stab fileStab;
5320 fileStab.atom = NULL;
5321 fileStab.type = N_SO;
5322 fileStab.other = 0;
5323 fileStab.desc = 0;
5324 fileStab.value = 0;
5325 fileStab.string = newFilename;
5326 state.stabs.push_back(fileStab);
5327 // Synthesize OSO for start of file
5328 ld::relocatable::File::Stab objStab;
5329 objStab.atom = NULL;
5330 objStab.type = N_OSO;
5331 // <rdar://problem/6337329> linker should put cpusubtype in n_sect field of nlist entry for N_OSO debug note entries
5332 objStab.other = atomFile->cpuSubType();
5333 objStab.desc = 1;
5334 if ( atomObjFile != NULL ) {
5335 objStab.string = assureFullPath(atomObjFile->debugInfoPath());
5336 objStab.value = atomObjFile->debugInfoModificationTime();
5337 }
5338 else {
5339 objStab.string = assureFullPath(atomFile->path());
5340 objStab.value = atomFile->modificationTime();
5341 }
5342 state.stabs.push_back(objStab);
5343 wroteStartSO = true;
5344 // add the source file path to seenFiles so it does not show up in SOLs
5345 seenFiles.insert(newFilename);
5346 char* fullFilePath;
5347 asprintf(&fullFilePath, "%s%s", newDirPath, newFilename);
5348 // add both leaf path and full path
5349 seenFiles.insert(fullFilePath);
5350 }
5351 filename = newFilename;
5352 dirPath = newDirPath;
5353 if ( atom->section().type() == ld::Section::typeCode ) {
5354 // Synthesize BNSYM and start FUN stabs
5355 ld::relocatable::File::Stab beginSym;
5356 beginSym.atom = atom;
5357 beginSym.type = N_BNSYM;
5358 beginSym.other = 1;
5359 beginSym.desc = 0;
5360 beginSym.value = 0;
5361 beginSym.string = "";
5362 state.stabs.push_back(beginSym);
5363 ld::relocatable::File::Stab startFun;
5364 startFun.atom = atom;
5365 startFun.type = N_FUN;
5366 startFun.other = 1;
5367 startFun.desc = 0;
5368 startFun.value = 0;
5369 startFun.string = atom->name();
5370 state.stabs.push_back(startFun);
5371 // Synthesize any SOL stabs needed
5372 const char* curFile = NULL;
5373 for (ld::Atom::LineInfo::iterator lit = atom->beginLineInfo(); lit != atom->endLineInfo(); ++lit) {
5374 if ( lit->fileName != curFile ) {
5375 if ( seenFiles.count(lit->fileName) == 0 ) {
5376 seenFiles.insert(lit->fileName);
5377 ld::relocatable::File::Stab sol;
5378 sol.atom = 0;
5379 sol.type = N_SOL;
5380 sol.other = 0;
5381 sol.desc = 0;
5382 sol.value = 0;
5383 sol.string = lit->fileName;
5384 state.stabs.push_back(sol);
5385 }
5386 curFile = lit->fileName;
5387 }
5388 }
5389 // Synthesize end FUN and ENSYM stabs
5390 ld::relocatable::File::Stab endFun;
5391 endFun.atom = atom;
5392 endFun.type = N_FUN;
5393 endFun.other = 0;
5394 endFun.desc = 0;
5395 endFun.value = 0;
5396 endFun.string = "";
5397 state.stabs.push_back(endFun);
5398 ld::relocatable::File::Stab endSym;
5399 endSym.atom = atom;
5400 endSym.type = N_ENSYM;
5401 endSym.other = 1;
5402 endSym.desc = 0;
5403 endSym.value = 0;
5404 endSym.string = "";
5405 state.stabs.push_back(endSym);
5406 }
5407 else {
5408 ld::relocatable::File::Stab globalsStab;
5409 const char* name = atom->name();
5410 if ( atom->scope() == ld::Atom::scopeTranslationUnit ) {
5411 // Synthesize STSYM stab for statics
5412 globalsStab.atom = atom;
5413 globalsStab.type = N_STSYM;
5414 globalsStab.other = 1;
5415 globalsStab.desc = 0;
5416 globalsStab.value = 0;
5417 globalsStab.string = name;
5418 state.stabs.push_back(globalsStab);
5419 }
5420 else {
5421 // Synthesize GSYM stab for other globals
5422 globalsStab.atom = atom;
5423 globalsStab.type = N_GSYM;
5424 globalsStab.other = 1;
5425 globalsStab.desc = 0;
5426 globalsStab.value = 0;
5427 globalsStab.string = name;
5428 state.stabs.push_back(globalsStab);
5429 }
5430 }
5431 }
5432 }
5433
5434 if ( wroteStartSO ) {
5435 // emit ending SO
5436 ld::relocatable::File::Stab endFileStab;
5437 endFileStab.atom = NULL;
5438 endFileStab.type = N_SO;
5439 endFileStab.other = 1;
5440 endFileStab.desc = 0;
5441 endFileStab.value = 0;
5442 endFileStab.string = "";
5443 state.stabs.push_back(endFileStab);
5444 }
5445
5446 // copy any stabs from .o file
5447 std::set<const ld::File*> filesSeenWithStabs;
5448 for (std::set<const ld::Atom*>::iterator it=atomsWithStabs.begin(); it != atomsWithStabs.end(); it++) {
5449 const ld::Atom* atom = *it;
5450 objFile = dynamic_cast<const ld::relocatable::File*>(atom->file());
5451 if ( objFile != NULL ) {
5452 if ( filesSeenWithStabs.count(objFile) == 0 ) {
5453 filesSeenWithStabs.insert(objFile);
5454 const std::vector<ld::relocatable::File::Stab>* stabs = objFile->stabs();
5455 if ( stabs != NULL ) {
5456 for(std::vector<ld::relocatable::File::Stab>::const_iterator sit = stabs->begin(); sit != stabs->end(); ++sit) {
5457 ld::relocatable::File::Stab stab = *sit;
5458 // ignore stabs associated with atoms that were dead stripped or coalesced away
5459 if ( (sit->atom != NULL) && (atomsWithStabs.count(sit->atom) == 0) )
5460 continue;
5461 // <rdar://problem/8284718> Value of N_SO stabs should be address of first atom from translation unit
5462 if ( (stab.type == N_SO) && (stab.string != NULL) && (stab.string[0] != '\0') ) {
5463 stab.atom = atom;
5464 }
5465 state.stabs.push_back(stab);
5466 }
5467 }
5468 }
5469 }
5470 }
5471
5472 }
5473
5474
5475 } // namespace tool
5476 } // namespace ld
5477