]> git.saurik.com Git - apple/ld64.git/blob - src/ld/OutputFile.cpp
3b775c911c2ec8876501c96dceb229b324998c29
[apple/ld64.git] / src / ld / OutputFile.cpp
1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-*
2 *
3 * Copyright (c) 2009-2011 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26 #include <stdlib.h>
27 #include <sys/types.h>
28 #include <sys/stat.h>
29 #include <sys/mman.h>
30 #include <sys/sysctl.h>
31 #include <sys/param.h>
32 #include <sys/mount.h>
33 #include <fcntl.h>
34 #include <errno.h>
35 #include <limits.h>
36 #include <unistd.h>
37 #include <mach/mach_time.h>
38 #include <mach/vm_statistics.h>
39 #include <mach/mach_init.h>
40 #include <mach/mach_host.h>
41 #include <uuid/uuid.h>
42 #include <dlfcn.h>
43 #include <mach-o/dyld.h>
44 #include <mach-o/fat.h>
45
46 #include <string>
47 #include <map>
48 #include <set>
49 #include <string>
50 #include <vector>
51 #include <list>
52 #include <algorithm>
53 #include <unordered_set>
54 #include <utility>
55 #include <iostream>
56 #include <fstream>
57
58 #include <CommonCrypto/CommonDigest.h>
59 #include <AvailabilityMacros.h>
60
61 #include "MachOTrie.hpp"
62
63 #include "Options.h"
64
65 #include "OutputFile.h"
66 #include "Architectures.hpp"
67 #include "HeaderAndLoadCommands.hpp"
68 #include "LinkEdit.hpp"
69 #include "LinkEditClassic.hpp"
70
71 namespace ld {
72 namespace tool {
73
74 uint32_t sAdrpNA = 0;
75 uint32_t sAdrpNoped = 0;
76 uint32_t sAdrpNotNoped = 0;
77
78
79 OutputFile::OutputFile(const Options& opts)
80 :
81 usesWeakExternalSymbols(false), overridesWeakExternalSymbols(false),
82 _noReExportedDylibs(false), pieDisabled(false), hasDataInCode(false),
83 headerAndLoadCommandsSection(NULL),
84 rebaseSection(NULL), bindingSection(NULL), weakBindingSection(NULL),
85 lazyBindingSection(NULL), exportSection(NULL),
86 splitSegInfoSection(NULL), functionStartsSection(NULL),
87 dataInCodeSection(NULL), optimizationHintsSection(NULL),
88 symbolTableSection(NULL), stringPoolSection(NULL),
89 localRelocationsSection(NULL), externalRelocationsSection(NULL),
90 sectionRelocationsSection(NULL),
91 indirectSymbolTableSection(NULL),
92 _options(opts),
93 _hasDyldInfo(opts.makeCompressedDyldInfo()),
94 _hasSymbolTable(true),
95 _hasSectionRelocations(opts.outputKind() == Options::kObjectFile),
96 _hasSplitSegInfo(opts.sharedRegionEligible()),
97 _hasFunctionStartsInfo(opts.addFunctionStarts()),
98 _hasDataInCodeInfo(opts.addDataInCodeInfo()),
99 _hasDynamicSymbolTable(true),
100 _hasLocalRelocations(!opts.makeCompressedDyldInfo()),
101 _hasExternalRelocations(!opts.makeCompressedDyldInfo()),
102 _hasOptimizationHints(opts.outputKind() == Options::kObjectFile),
103 _encryptedTEXTstartOffset(0),
104 _encryptedTEXTendOffset(0),
105 _localSymbolsStartIndex(0),
106 _localSymbolsCount(0),
107 _globalSymbolsStartIndex(0),
108 _globalSymbolsCount(0),
109 _importSymbolsStartIndex(0),
110 _importSymbolsCount(0),
111 _sectionsRelocationsAtom(NULL),
112 _localRelocsAtom(NULL),
113 _externalRelocsAtom(NULL),
114 _symbolTableAtom(NULL),
115 _indirectSymbolTableAtom(NULL),
116 _rebasingInfoAtom(NULL),
117 _bindingInfoAtom(NULL),
118 _lazyBindingInfoAtom(NULL),
119 _weakBindingInfoAtom(NULL),
120 _exportInfoAtom(NULL),
121 _splitSegInfoAtom(NULL),
122 _functionStartsAtom(NULL),
123 _dataInCodeAtom(NULL),
124 _optimizationHintsAtom(NULL)
125 {
126 }
127
128 void OutputFile::dumpAtomsBySection(ld::Internal& state, bool printAtoms)
129 {
130 fprintf(stderr, "SORTED:\n");
131 for (std::vector<ld::Internal::FinalSection*>::iterator it = state.sections.begin(); it != state.sections.end(); ++it) {
132 fprintf(stderr, "final section %p %s/%s %s start addr=0x%08llX, size=0x%08llX, alignment=%02d, fileOffset=0x%08llX\n",
133 (*it), (*it)->segmentName(), (*it)->sectionName(), (*it)->isSectionHidden() ? "(hidden)" : "",
134 (*it)->address, (*it)->size, (*it)->alignment, (*it)->fileOffset);
135 if ( printAtoms ) {
136 std::vector<const ld::Atom*>& atoms = (*it)->atoms;
137 for (std::vector<const ld::Atom*>::iterator ait = atoms.begin(); ait != atoms.end(); ++ait) {
138 fprintf(stderr, " %p (0x%04llX) %s\n", *ait, (*ait)->size(), (*ait)->name());
139 }
140 }
141 }
142 fprintf(stderr, "DYLIBS:\n");
143 for (std::vector<ld::dylib::File*>::iterator it=state.dylibs.begin(); it != state.dylibs.end(); ++it )
144 fprintf(stderr, " %s\n", (*it)->installPath());
145 }
146
147 void OutputFile::write(ld::Internal& state)
148 {
149 this->buildDylibOrdinalMapping(state);
150 this->addLoadCommands(state);
151 this->addLinkEdit(state);
152 state.setSectionSizesAndAlignments();
153 this->setLoadCommandsPadding(state);
154 _fileSize = state.assignFileOffsets();
155 this->assignAtomAddresses(state);
156 this->synthesizeDebugNotes(state);
157 this->buildSymbolTable(state);
158 this->generateLinkEditInfo(state);
159 if ( _options.sharedRegionEncodingV2() )
160 this->makeSplitSegInfoV2(state);
161 else
162 this->makeSplitSegInfo(state);
163 this->updateLINKEDITAddresses(state);
164 //this->dumpAtomsBySection(state, false);
165 this->writeOutputFile(state);
166 this->writeMapFile(state);
167 this->writeJSONEntry(state);
168 }
169
170 bool OutputFile::findSegment(ld::Internal& state, uint64_t addr, uint64_t* start, uint64_t* end, uint32_t* index)
171 {
172 uint32_t segIndex = 0;
173 ld::Internal::FinalSection* segFirstSection = NULL;
174 ld::Internal::FinalSection* lastSection = NULL;
175 for (std::vector<ld::Internal::FinalSection*>::iterator it = state.sections.begin(); it != state.sections.end(); ++it) {
176 ld::Internal::FinalSection* sect = *it;
177 if ( (segFirstSection == NULL ) || strcmp(segFirstSection->segmentName(), sect->segmentName()) != 0 ) {
178 if ( segFirstSection != NULL ) {
179 //fprintf(stderr, "findSegment(0x%llX) seg changed to %s\n", addr, sect->segmentName());
180 if ( (addr >= segFirstSection->address) && (addr < lastSection->address+lastSection->size) ) {
181 *start = segFirstSection->address;
182 *end = lastSection->address+lastSection->size;
183 *index = segIndex;
184 return true;
185 }
186 ++segIndex;
187 }
188 segFirstSection = sect;
189 }
190 lastSection = sect;
191 }
192 return false;
193 }
194
195
196 void OutputFile::assignAtomAddresses(ld::Internal& state)
197 {
198 const bool log = false;
199 if ( log ) fprintf(stderr, "assignAtomAddresses()\n");
200 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
201 ld::Internal::FinalSection* sect = *sit;
202 if ( log ) fprintf(stderr, " section=%s/%s\n", sect->segmentName(), sect->sectionName());
203 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
204 const ld::Atom* atom = *ait;
205 switch ( sect-> type() ) {
206 case ld::Section::typeImportProxies:
207 // want finalAddress() of all proxy atoms to be zero
208 (const_cast<ld::Atom*>(atom))->setSectionStartAddress(0);
209 break;
210 case ld::Section::typeAbsoluteSymbols:
211 // want finalAddress() of all absolute atoms to be value of abs symbol
212 (const_cast<ld::Atom*>(atom))->setSectionStartAddress(0);
213 break;
214 case ld::Section::typeLinkEdit:
215 // linkedit layout is assigned later
216 break;
217 default:
218 (const_cast<ld::Atom*>(atom))->setSectionStartAddress(sect->address);
219 if ( log ) fprintf(stderr, " atom=%p, addr=0x%08llX, name=%s\n", atom, atom->finalAddress(), atom->name());
220 break;
221 }
222 }
223 }
224 }
225
226 void OutputFile::updateLINKEDITAddresses(ld::Internal& state)
227 {
228 if ( _options.makeCompressedDyldInfo() ) {
229 // build dylb rebasing info
230 assert(_rebasingInfoAtom != NULL);
231 _rebasingInfoAtom->encode();
232
233 // build dyld binding info
234 assert(_bindingInfoAtom != NULL);
235 _bindingInfoAtom->encode();
236
237 // build dyld lazy binding info
238 assert(_lazyBindingInfoAtom != NULL);
239 _lazyBindingInfoAtom->encode();
240
241 // build dyld weak binding info
242 assert(_weakBindingInfoAtom != NULL);
243 _weakBindingInfoAtom->encode();
244
245 // build dyld export info
246 assert(_exportInfoAtom != NULL);
247 _exportInfoAtom->encode();
248 }
249
250 if ( _options.sharedRegionEligible() ) {
251 // build split seg info
252 assert(_splitSegInfoAtom != NULL);
253 _splitSegInfoAtom->encode();
254 }
255
256 if ( _options.addFunctionStarts() ) {
257 // build function starts info
258 assert(_functionStartsAtom != NULL);
259 _functionStartsAtom->encode();
260 }
261
262 if ( _options.addDataInCodeInfo() ) {
263 // build data-in-code info
264 assert(_dataInCodeAtom != NULL);
265 _dataInCodeAtom->encode();
266 }
267
268 if ( _hasOptimizationHints ) {
269 // build linker-optimization-hint info
270 assert(_optimizationHintsAtom != NULL);
271 _optimizationHintsAtom->encode();
272 }
273
274 // build classic symbol table
275 assert(_symbolTableAtom != NULL);
276 _symbolTableAtom->encode();
277 assert(_indirectSymbolTableAtom != NULL);
278 _indirectSymbolTableAtom->encode();
279
280 // add relocations to .o files
281 if ( _options.outputKind() == Options::kObjectFile ) {
282 assert(_sectionsRelocationsAtom != NULL);
283 _sectionsRelocationsAtom->encode();
284 }
285
286 if ( ! _options.makeCompressedDyldInfo() ) {
287 // build external relocations
288 assert(_externalRelocsAtom != NULL);
289 _externalRelocsAtom->encode();
290 // build local relocations
291 assert(_localRelocsAtom != NULL);
292 _localRelocsAtom->encode();
293 }
294
295 // update address and file offsets now that linkedit content has been generated
296 uint64_t curLinkEditAddress = 0;
297 uint64_t curLinkEditfileOffset = 0;
298 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
299 ld::Internal::FinalSection* sect = *sit;
300 if ( sect->type() != ld::Section::typeLinkEdit )
301 continue;
302 if ( curLinkEditAddress == 0 ) {
303 curLinkEditAddress = sect->address;
304 curLinkEditfileOffset = sect->fileOffset;
305 }
306 uint16_t maxAlignment = 0;
307 uint64_t offset = 0;
308 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
309 const ld::Atom* atom = *ait;
310 //fprintf(stderr, "setting linkedit atom offset for %s\n", atom->name());
311 if ( atom->alignment().powerOf2 > maxAlignment )
312 maxAlignment = atom->alignment().powerOf2;
313 // calculate section offset for this atom
314 uint64_t alignment = 1 << atom->alignment().powerOf2;
315 uint64_t currentModulus = (offset % alignment);
316 uint64_t requiredModulus = atom->alignment().modulus;
317 if ( currentModulus != requiredModulus ) {
318 if ( requiredModulus > currentModulus )
319 offset += requiredModulus-currentModulus;
320 else
321 offset += requiredModulus+alignment-currentModulus;
322 }
323 (const_cast<ld::Atom*>(atom))->setSectionOffset(offset);
324 (const_cast<ld::Atom*>(atom))->setSectionStartAddress(curLinkEditAddress);
325 offset += atom->size();
326 }
327 sect->size = offset;
328 // section alignment is that of a contained atom with the greatest alignment
329 sect->alignment = maxAlignment;
330 sect->address = curLinkEditAddress;
331 sect->fileOffset = curLinkEditfileOffset;
332 curLinkEditAddress += sect->size;
333 curLinkEditfileOffset += sect->size;
334 }
335
336 _fileSize = state.sections.back()->fileOffset + state.sections.back()->size;
337 }
338
339
340 void OutputFile::setLoadCommandsPadding(ld::Internal& state)
341 {
342 // In other sections, any extra space is put and end of segment.
343 // In __TEXT segment, any extra space is put after load commands to allow post-processing of load commands
344 // Do a reverse layout of __TEXT segment to determine padding size and adjust section size
345 uint64_t paddingSize = 0;
346 switch ( _options.outputKind() ) {
347 case Options::kDyld:
348 // dyld itself has special padding requirements. We want the beginning __text section to start at a stable address
349 assert(strcmp(state.sections[1]->sectionName(),"__text") == 0);
350 state.sections[1]->alignment = 12; // page align __text
351 break;
352 case Options::kObjectFile:
353 // mach-o .o files need no padding between load commands and first section
354 // but leave enough room that the object file could be signed
355 paddingSize = 32;
356 break;
357 case Options::kPreload:
358 // mach-o MH_PRELOAD files need no padding between load commands and first section
359 paddingSize = 0;
360 case Options::kKextBundle:
361 if ( _options.useTextExecSegment() ) {
362 paddingSize = 32;
363 break;
364 }
365 // else fall into default case
366 default:
367 // work backwards from end of segment and lay out sections so that extra room goes to padding atom
368 uint64_t addr = 0;
369 uint64_t textSegPageSize = _options.segPageSize("__TEXT");
370 if ( _options.sharedRegionEligible() && (_options.iOSVersionMin() >= ld::iOS_8_0) && (textSegPageSize == 0x4000) )
371 textSegPageSize = 0x1000;
372 for (std::vector<ld::Internal::FinalSection*>::reverse_iterator it = state.sections.rbegin(); it != state.sections.rend(); ++it) {
373 ld::Internal::FinalSection* sect = *it;
374 if ( strcmp(sect->segmentName(), "__TEXT") != 0 )
375 continue;
376 if ( sect == headerAndLoadCommandsSection ) {
377 addr -= headerAndLoadCommandsSection->size;
378 paddingSize = addr % textSegPageSize;
379 break;
380 }
381 addr -= sect->size;
382 addr = addr & (0 - (1 << sect->alignment));
383 }
384
385 // if command line requires more padding than this
386 uint32_t minPad = _options.minimumHeaderPad();
387 if ( _options.maxMminimumHeaderPad() ) {
388 // -headerpad_max_install_names means there should be room for every path load command to grow to 1204 bytes
389 uint32_t altMin = _dylibsToLoad.size() * MAXPATHLEN;
390 if ( _options.outputKind() == Options::kDynamicLibrary )
391 altMin += MAXPATHLEN;
392 if ( altMin > minPad )
393 minPad = altMin;
394 }
395 if ( paddingSize < minPad ) {
396 int extraPages = (minPad - paddingSize + _options.segmentAlignment() - 1)/_options.segmentAlignment();
397 paddingSize += extraPages * _options.segmentAlignment();
398 }
399
400 if ( _options.makeEncryptable() ) {
401 // load commands must be on a separate non-encrypted page
402 int loadCommandsPage = (headerAndLoadCommandsSection->size + minPad)/_options.segmentAlignment();
403 int textPage = (headerAndLoadCommandsSection->size + paddingSize)/_options.segmentAlignment();
404 if ( loadCommandsPage == textPage ) {
405 paddingSize += _options.segmentAlignment();
406 textPage += 1;
407 }
408 // remember start for later use by load command
409 _encryptedTEXTstartOffset = textPage*_options.segmentAlignment();
410 }
411 break;
412 }
413 // add padding to size of section
414 headerAndLoadCommandsSection->size += paddingSize;
415 }
416
417
418 uint64_t OutputFile::pageAlign(uint64_t addr)
419 {
420 const uint64_t alignment = _options.segmentAlignment();
421 return ((addr+alignment-1) & (-alignment));
422 }
423
424 uint64_t OutputFile::pageAlign(uint64_t addr, uint64_t pageSize)
425 {
426 return ((addr+pageSize-1) & (-pageSize));
427 }
428
429 static const char* makeName(const ld::Atom& atom)
430 {
431 static char buffer[4096];
432 switch ( atom.symbolTableInclusion() ) {
433 case ld::Atom::symbolTableNotIn:
434 case ld::Atom::symbolTableNotInFinalLinkedImages:
435 sprintf(buffer, "%s@0x%08llX", atom.name(), atom.objectAddress());
436 break;
437 case ld::Atom::symbolTableIn:
438 case ld::Atom::symbolTableInAndNeverStrip:
439 case ld::Atom::symbolTableInAsAbsolute:
440 case ld::Atom::symbolTableInWithRandomAutoStripLabel:
441 strlcpy(buffer, atom.name(), 4096);
442 break;
443 }
444 return buffer;
445 }
446
447 static const char* referenceTargetAtomName(ld::Internal& state, const ld::Fixup* ref)
448 {
449 switch ( ref->binding ) {
450 case ld::Fixup::bindingNone:
451 return "NO BINDING";
452 case ld::Fixup::bindingByNameUnbound:
453 return (char*)(ref->u.target);
454 case ld::Fixup::bindingByContentBound:
455 case ld::Fixup::bindingDirectlyBound:
456 return makeName(*((ld::Atom*)(ref->u.target)));
457 case ld::Fixup::bindingsIndirectlyBound:
458 return makeName(*state.indirectBindingTable[ref->u.bindingIndex]);
459 }
460 return "BAD BINDING";
461 }
462
463 bool OutputFile::targetIsThumb(ld::Internal& state, const ld::Fixup* fixup)
464 {
465 switch ( fixup->binding ) {
466 case ld::Fixup::bindingByContentBound:
467 case ld::Fixup::bindingDirectlyBound:
468 return fixup->u.target->isThumb();
469 case ld::Fixup::bindingsIndirectlyBound:
470 return state.indirectBindingTable[fixup->u.bindingIndex]->isThumb();
471 default:
472 break;
473 }
474 throw "unexpected binding";
475 }
476
477 uint64_t OutputFile::addressOf(const ld::Internal& state, const ld::Fixup* fixup, const ld::Atom** target)
478 {
479 if ( !_options.makeCompressedDyldInfo() ) {
480 // For external relocations the classic mach-o format
481 // has addend only stored in the content. That means
482 // that the address of the target is not used.
483 if ( fixup->contentAddendOnly )
484 return 0;
485 }
486 switch ( fixup->binding ) {
487 case ld::Fixup::bindingNone:
488 throw "unexpected bindingNone";
489 case ld::Fixup::bindingByNameUnbound:
490 throw "unexpected bindingByNameUnbound";
491 case ld::Fixup::bindingByContentBound:
492 case ld::Fixup::bindingDirectlyBound:
493 *target = fixup->u.target;
494 return (*target)->finalAddress();
495 case ld::Fixup::bindingsIndirectlyBound:
496 *target = state.indirectBindingTable[fixup->u.bindingIndex];
497 #ifndef NDEBUG
498 if ( ! (*target)->finalAddressMode() ) {
499 throwf("reference to symbol (which has not been assigned an address) %s", (*target)->name());
500 }
501 #endif
502 return (*target)->finalAddress();
503 }
504 throw "unexpected binding";
505 }
506
507 uint64_t OutputFile::addressAndTarget(const ld::Internal& state, const ld::Fixup* fixup, const ld::Atom** target)
508 {
509 switch ( fixup->binding ) {
510 case ld::Fixup::bindingNone:
511 throw "unexpected bindingNone";
512 case ld::Fixup::bindingByNameUnbound:
513 throw "unexpected bindingByNameUnbound";
514 case ld::Fixup::bindingByContentBound:
515 case ld::Fixup::bindingDirectlyBound:
516 *target = fixup->u.target;
517 return (*target)->finalAddress();
518 case ld::Fixup::bindingsIndirectlyBound:
519 *target = state.indirectBindingTable[fixup->u.bindingIndex];
520 #ifndef NDEBUG
521 if ( ! (*target)->finalAddressMode() ) {
522 throwf("reference to symbol (which has not been assigned an address) %s", (*target)->name());
523 }
524 #endif
525 return (*target)->finalAddress();
526 }
527 throw "unexpected binding";
528 }
529
530
531 uint64_t OutputFile::sectionOffsetOf(const ld::Internal& state, const ld::Fixup* fixup)
532 {
533 const ld::Atom* target = NULL;
534 switch ( fixup->binding ) {
535 case ld::Fixup::bindingNone:
536 throw "unexpected bindingNone";
537 case ld::Fixup::bindingByNameUnbound:
538 throw "unexpected bindingByNameUnbound";
539 case ld::Fixup::bindingByContentBound:
540 case ld::Fixup::bindingDirectlyBound:
541 target = fixup->u.target;
542 break;
543 case ld::Fixup::bindingsIndirectlyBound:
544 target = state.indirectBindingTable[fixup->u.bindingIndex];
545 break;
546 }
547 assert(target != NULL);
548
549 uint64_t targetAddress = target->finalAddress();
550 for (std::vector<ld::Internal::FinalSection*>::const_iterator it = state.sections.begin(); it != state.sections.end(); ++it) {
551 const ld::Internal::FinalSection* sect = *it;
552 if ( (sect->address <= targetAddress) && (targetAddress < (sect->address+sect->size)) )
553 return targetAddress - sect->address;
554 }
555 throw "section not found for section offset";
556 }
557
558
559
560 uint64_t OutputFile::tlvTemplateOffsetOf(const ld::Internal& state, const ld::Fixup* fixup)
561 {
562 const ld::Atom* target = NULL;
563 switch ( fixup->binding ) {
564 case ld::Fixup::bindingNone:
565 throw "unexpected bindingNone";
566 case ld::Fixup::bindingByNameUnbound:
567 throw "unexpected bindingByNameUnbound";
568 case ld::Fixup::bindingByContentBound:
569 case ld::Fixup::bindingDirectlyBound:
570 target = fixup->u.target;
571 break;
572 case ld::Fixup::bindingsIndirectlyBound:
573 target = state.indirectBindingTable[fixup->u.bindingIndex];
574 break;
575 }
576 assert(target != NULL);
577
578 for (std::vector<ld::Internal::FinalSection*>::const_iterator it = state.sections.begin(); it != state.sections.end(); ++it) {
579 const ld::Internal::FinalSection* sect = *it;
580 switch ( sect->type() ) {
581 case ld::Section::typeTLVInitialValues:
582 case ld::Section::typeTLVZeroFill:
583 return target->finalAddress() - sect->address;
584 default:
585 break;
586 }
587 }
588 throw "section not found for tlvTemplateOffsetOf";
589 }
590
591 void OutputFile::printSectionLayout(ld::Internal& state)
592 {
593 // show layout of final image
594 fprintf(stderr, "final section layout:\n");
595 for (std::vector<ld::Internal::FinalSection*>::iterator it = state.sections.begin(); it != state.sections.end(); ++it) {
596 if ( (*it)->isSectionHidden() )
597 continue;
598 fprintf(stderr, " %s/%s addr=0x%08llX, size=0x%08llX, fileOffset=0x%08llX, type=%d\n",
599 (*it)->segmentName(), (*it)->sectionName(),
600 (*it)->address, (*it)->size, (*it)->fileOffset, (*it)->type());
601 }
602 }
603
604
605 void OutputFile::rangeCheck8(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
606 {
607 if ( (displacement > 127) || (displacement < -128) ) {
608 // show layout of final image
609 printSectionLayout(state);
610
611 const ld::Atom* target;
612 throwf("8-bit reference out of range (%lld max is +/-127B): from %s (0x%08llX) to %s (0x%08llX)",
613 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
614 addressOf(state, fixup, &target));
615 }
616 }
617
618 void OutputFile::rangeCheck16(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
619 {
620 const int64_t thirtyTwoKLimit = 0x00007FFF;
621 if ( (displacement > thirtyTwoKLimit) || (displacement < (-thirtyTwoKLimit)) ) {
622 // show layout of final image
623 printSectionLayout(state);
624
625 const ld::Atom* target;
626 throwf("16-bit reference out of range (%lld max is +/-32KB): from %s (0x%08llX) to %s (0x%08llX)",
627 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
628 addressOf(state, fixup, &target));
629 }
630 }
631
632 void OutputFile::rangeCheckBranch32(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
633 {
634 const int64_t twoGigLimit = 0x7FFFFFFF;
635 if ( (displacement > twoGigLimit) || (displacement < (-twoGigLimit)) ) {
636 // show layout of final image
637 printSectionLayout(state);
638
639 const ld::Atom* target;
640 throwf("32-bit branch out of range (%lld max is +/-2GB): from %s (0x%08llX) to %s (0x%08llX)",
641 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
642 addressOf(state, fixup, &target));
643 }
644 }
645
646
647 void OutputFile::rangeCheckAbsolute32(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
648 {
649 const int64_t fourGigLimit = 0xFFFFFFFF;
650 if ( displacement > fourGigLimit ) {
651 // <rdar://problem/9610466> cannot enforce 32-bit range checks on 32-bit archs because assembler loses sign information
652 // .long _foo - 0xC0000000
653 // is encoded in mach-o the same as:
654 // .long _foo + 0x40000000
655 // so if _foo lays out to 0xC0000100, the first is ok, but the second is not.
656 if ( (_options.architecture() == CPU_TYPE_ARM) || (_options.architecture() == CPU_TYPE_I386) ) {
657 // Unlikely userland code does funky stuff like this, so warn for them, but not warn for -preload or -static
658 if ( (_options.outputKind() != Options::kPreload) && (_options.outputKind() != Options::kStaticExecutable) ) {
659 warning("32-bit absolute address out of range (0x%08llX max is 4GB): from %s + 0x%08X (0x%08llX) to 0x%08llX",
660 displacement, atom->name(), fixup->offsetInAtom, atom->finalAddress(), displacement);
661 }
662 return;
663 }
664 // show layout of final image
665 printSectionLayout(state);
666
667 const ld::Atom* target;
668 if ( fixup->binding == ld::Fixup::bindingNone )
669 throwf("32-bit absolute address out of range (0x%08llX max is 4GB): from %s + 0x%08X (0x%08llX) to 0x%08llX",
670 displacement, atom->name(), fixup->offsetInAtom, atom->finalAddress(), displacement);
671 else
672 throwf("32-bit absolute address out of range (0x%08llX max is 4GB): from %s + 0x%08X (0x%08llX) to %s (0x%08llX)",
673 displacement, atom->name(), fixup->offsetInAtom, atom->finalAddress(), referenceTargetAtomName(state, fixup),
674 addressOf(state, fixup, &target));
675 }
676 }
677
678
679 void OutputFile::rangeCheckRIP32(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
680 {
681 const int64_t twoGigLimit = 0x7FFFFFFF;
682 if ( (displacement > twoGigLimit) || (displacement < (-twoGigLimit)) ) {
683 // show layout of final image
684 printSectionLayout(state);
685
686 const ld::Atom* target;
687 throwf("32-bit RIP relative reference out of range (%lld max is +/-4GB): from %s (0x%08llX) to %s (0x%08llX)",
688 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
689 addressOf(state, fixup, &target));
690 }
691 }
692
693 void OutputFile::rangeCheckARM12(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
694 {
695 if ( (displacement > 4092LL) || (displacement < (-4092LL)) ) {
696 // show layout of final image
697 printSectionLayout(state);
698
699 const ld::Atom* target;
700 throwf("ARM ldr 12-bit displacement out of range (%lld max is +/-4096B): from %s (0x%08llX) to %s (0x%08llX)",
701 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
702 addressOf(state, fixup, &target));
703 }
704 }
705
706 bool OutputFile::checkArmBranch24Displacement(int64_t displacement)
707 {
708 return ( (displacement < 33554428LL) && (displacement > (-33554432LL)) );
709 }
710
711 void OutputFile::rangeCheckARMBranch24(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
712 {
713 if ( checkArmBranch24Displacement(displacement) )
714 return;
715
716 // show layout of final image
717 printSectionLayout(state);
718
719 const ld::Atom* target;
720 throwf("b/bl/blx ARM branch out of range (%lld max is +/-32MB): from %s (0x%08llX) to %s (0x%08llX)",
721 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
722 addressOf(state, fixup, &target));
723 }
724
725 bool OutputFile::checkThumbBranch22Displacement(int64_t displacement)
726 {
727 // thumb2 supports +/- 16MB displacement
728 if ( _options.preferSubArchitecture() && _options.archSupportsThumb2() ) {
729 if ( (displacement > 16777214LL) || (displacement < (-16777216LL)) ) {
730 return false;
731 }
732 }
733 else {
734 // thumb1 supports +/- 4MB displacement
735 if ( (displacement > 4194302LL) || (displacement < (-4194304LL)) ) {
736 return false;
737 }
738 }
739 return true;
740 }
741
742 void OutputFile::rangeCheckThumbBranch22(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
743 {
744 if ( checkThumbBranch22Displacement(displacement) )
745 return;
746
747 // show layout of final image
748 printSectionLayout(state);
749
750 const ld::Atom* target;
751 if ( _options.preferSubArchitecture() && _options.archSupportsThumb2() ) {
752 throwf("b/bl/blx thumb2 branch out of range (%lld max is +/-16MB): from %s (0x%08llX) to %s (0x%08llX)",
753 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
754 addressOf(state, fixup, &target));
755 }
756 else {
757 throwf("b/bl/blx thumb1 branch out of range (%lld max is +/-4MB): from %s (0x%08llX) to %s (0x%08llX)",
758 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
759 addressOf(state, fixup, &target));
760 }
761 }
762
763
764 void OutputFile::rangeCheckARM64Branch26(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
765 {
766 const int64_t bl_128MegLimit = 0x07FFFFFF;
767 if ( (displacement > bl_128MegLimit) || (displacement < (-bl_128MegLimit)) ) {
768 // show layout of final image
769 printSectionLayout(state);
770
771 const ld::Atom* target;
772 throwf("b(l) ARM64 branch out of range (%lld max is +/-128MB): from %s (0x%08llX) to %s (0x%08llX)",
773 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
774 addressOf(state, fixup, &target));
775 }
776 }
777
778 void OutputFile::rangeCheckARM64Page21(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
779 {
780 const int64_t adrp_4GigLimit = 0x100000000ULL;
781 if ( (displacement > adrp_4GigLimit) || (displacement < (-adrp_4GigLimit)) ) {
782 // show layout of final image
783 printSectionLayout(state);
784
785 const ld::Atom* target;
786 throwf("ARM64 ADRP out of range (%lld max is +/-4GB): from %s (0x%08llX) to %s (0x%08llX)",
787 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
788 addressOf(state, fixup, &target));
789 }
790 }
791
792
793 uint16_t OutputFile::get16LE(uint8_t* loc) { return LittleEndian::get16(*(uint16_t*)loc); }
794 void OutputFile::set16LE(uint8_t* loc, uint16_t value) { LittleEndian::set16(*(uint16_t*)loc, value); }
795
796 uint32_t OutputFile::get32LE(uint8_t* loc) { return LittleEndian::get32(*(uint32_t*)loc); }
797 void OutputFile::set32LE(uint8_t* loc, uint32_t value) { LittleEndian::set32(*(uint32_t*)loc, value); }
798
799 uint64_t OutputFile::get64LE(uint8_t* loc) { return LittleEndian::get64(*(uint64_t*)loc); }
800 void OutputFile::set64LE(uint8_t* loc, uint64_t value) { LittleEndian::set64(*(uint64_t*)loc, value); }
801
802 uint16_t OutputFile::get16BE(uint8_t* loc) { return BigEndian::get16(*(uint16_t*)loc); }
803 void OutputFile::set16BE(uint8_t* loc, uint16_t value) { BigEndian::set16(*(uint16_t*)loc, value); }
804
805 uint32_t OutputFile::get32BE(uint8_t* loc) { return BigEndian::get32(*(uint32_t*)loc); }
806 void OutputFile::set32BE(uint8_t* loc, uint32_t value) { BigEndian::set32(*(uint32_t*)loc, value); }
807
808 uint64_t OutputFile::get64BE(uint8_t* loc) { return BigEndian::get64(*(uint64_t*)loc); }
809 void OutputFile::set64BE(uint8_t* loc, uint64_t value) { BigEndian::set64(*(uint64_t*)loc, value); }
810
811 #if SUPPORT_ARCH_arm64
812
813 static uint32_t makeNOP() {
814 return 0xD503201F;
815 }
816
817 enum SignExtension { signedNot, signed32, signed64 };
818 struct LoadStoreInfo {
819 uint32_t reg;
820 uint32_t baseReg;
821 uint32_t offset; // after scaling
822 uint32_t size; // 1,2,4,8, or 16
823 bool isStore;
824 bool isFloat; // if destReg is FP/SIMD
825 SignExtension signEx; // if load is sign extended
826 };
827
828 static uint32_t makeLDR_literal(const LoadStoreInfo& info, uint64_t targetAddress, uint64_t instructionAddress)
829 {
830 int64_t delta = targetAddress - instructionAddress;
831 assert(delta < 1024*1024);
832 assert(delta > -1024*1024);
833 assert((info.reg & 0xFFFFFFE0) == 0);
834 assert((targetAddress & 0x3) == 0);
835 assert((instructionAddress & 0x3) == 0);
836 assert(!info.isStore);
837 uint32_t imm19 = (delta << 3) & 0x00FFFFE0;
838 uint32_t instruction = 0;
839 switch ( info.size ) {
840 case 4:
841 if ( info.isFloat ) {
842 assert(info.signEx == signedNot);
843 instruction = 0x1C000000;
844 }
845 else {
846 if ( info.signEx == signed64 )
847 instruction = 0x98000000;
848 else
849 instruction = 0x18000000;
850 }
851 break;
852 case 8:
853 assert(info.signEx == signedNot);
854 instruction = info.isFloat ? 0x5C000000 : 0x58000000;
855 break;
856 case 16:
857 assert(info.signEx == signedNot);
858 instruction = 0x9C000000;
859 break;
860 default:
861 assert(0 && "invalid load size for literal");
862 }
863 return (instruction | imm19 | info.reg);
864 }
865
866 static uint32_t makeADR(uint32_t destReg, uint64_t targetAddress, uint64_t instructionAddress)
867 {
868 assert((destReg & 0xFFFFFFE0) == 0);
869 assert((instructionAddress & 0x3) == 0);
870 uint32_t instruction = 0x10000000;
871 int64_t delta = targetAddress - instructionAddress;
872 assert(delta < 1024*1024);
873 assert(delta > -1024*1024);
874 uint32_t immhi = (delta & 0x001FFFFC) << 3;
875 uint32_t immlo = (delta & 0x00000003) << 29;
876 return (instruction | immhi | immlo | destReg);
877 }
878
879 static uint32_t makeLoadOrStore(const LoadStoreInfo& info)
880 {
881 uint32_t instruction = 0x39000000;
882 if ( info.isFloat )
883 instruction |= 0x04000000;
884 instruction |= info.reg;
885 instruction |= (info.baseReg << 5);
886 uint32_t sizeBits = 0;
887 uint32_t opcBits = 0;
888 uint32_t imm12Bits = 0;
889 switch ( info.size ) {
890 case 1:
891 sizeBits = 0;
892 imm12Bits = info.offset;
893 if ( info.isStore ) {
894 opcBits = 0;
895 }
896 else {
897 switch ( info.signEx ) {
898 case signedNot:
899 opcBits = 1;
900 break;
901 case signed32:
902 opcBits = 3;
903 break;
904 case signed64:
905 opcBits = 2;
906 break;
907 }
908 }
909 break;
910 case 2:
911 sizeBits = 1;
912 assert((info.offset % 2) == 0);
913 imm12Bits = info.offset/2;
914 if ( info.isStore ) {
915 opcBits = 0;
916 }
917 else {
918 switch ( info.signEx ) {
919 case signedNot:
920 opcBits = 1;
921 break;
922 case signed32:
923 opcBits = 3;
924 break;
925 case signed64:
926 opcBits = 2;
927 break;
928 }
929 }
930 break;
931 case 4:
932 sizeBits = 2;
933 assert((info.offset % 4) == 0);
934 imm12Bits = info.offset/4;
935 if ( info.isStore ) {
936 opcBits = 0;
937 }
938 else {
939 switch ( info.signEx ) {
940 case signedNot:
941 opcBits = 1;
942 break;
943 case signed32:
944 assert(0 && "cannot use signed32 with 32-bit load/store");
945 break;
946 case signed64:
947 opcBits = 2;
948 break;
949 }
950 }
951 break;
952 case 8:
953 sizeBits = 3;
954 assert((info.offset % 8) == 0);
955 imm12Bits = info.offset/8;
956 if ( info.isStore ) {
957 opcBits = 0;
958 }
959 else {
960 opcBits = 1;
961 assert(info.signEx == signedNot);
962 }
963 break;
964 case 16:
965 sizeBits = 0;
966 assert((info.offset % 16) == 0);
967 imm12Bits = info.offset/16;
968 assert(info.isFloat);
969 if ( info.isStore ) {
970 opcBits = 2;
971 }
972 else {
973 opcBits = 3;
974 }
975 break;
976 default:
977 assert(0 && "bad load/store size");
978 break;
979 }
980 assert(imm12Bits < 4096);
981 return (instruction | (sizeBits << 30) | (opcBits << 22) | (imm12Bits << 10));
982 }
983
984 static bool parseLoadOrStore(uint32_t instruction, LoadStoreInfo& info)
985 {
986 if ( (instruction & 0x3B000000) != 0x39000000 )
987 return false;
988 info.isFloat = ( (instruction & 0x04000000) != 0 );
989 info.reg = (instruction & 0x1F);
990 info.baseReg = ((instruction>>5) & 0x1F);
991 switch (instruction & 0xC0C00000) {
992 case 0x00000000:
993 info.size = 1;
994 info.isStore = true;
995 info.signEx = signedNot;
996 break;
997 case 0x00400000:
998 info.size = 1;
999 info.isStore = false;
1000 info.signEx = signedNot;
1001 break;
1002 case 0x00800000:
1003 if ( info.isFloat ) {
1004 info.size = 16;
1005 info.isStore = true;
1006 info.signEx = signedNot;
1007 }
1008 else {
1009 info.size = 1;
1010 info.isStore = false;
1011 info.signEx = signed64;
1012 }
1013 break;
1014 case 0x00C00000:
1015 if ( info.isFloat ) {
1016 info.size = 16;
1017 info.isStore = false;
1018 info.signEx = signedNot;
1019 }
1020 else {
1021 info.size = 1;
1022 info.isStore = false;
1023 info.signEx = signed32;
1024 }
1025 break;
1026 case 0x40000000:
1027 info.size = 2;
1028 info.isStore = true;
1029 info.signEx = signedNot;
1030 break;
1031 case 0x40400000:
1032 info.size = 2;
1033 info.isStore = false;
1034 info.signEx = signedNot;
1035 break;
1036 case 0x40800000:
1037 info.size = 2;
1038 info.isStore = false;
1039 info.signEx = signed64;
1040 break;
1041 case 0x40C00000:
1042 info.size = 2;
1043 info.isStore = false;
1044 info.signEx = signed32;
1045 break;
1046 case 0x80000000:
1047 info.size = 4;
1048 info.isStore = true;
1049 info.signEx = signedNot;
1050 break;
1051 case 0x80400000:
1052 info.size = 4;
1053 info.isStore = false;
1054 info.signEx = signedNot;
1055 break;
1056 case 0x80800000:
1057 info.size = 4;
1058 info.isStore = false;
1059 info.signEx = signed64;
1060 break;
1061 case 0xC0000000:
1062 info.size = 8;
1063 info.isStore = true;
1064 info.signEx = signedNot;
1065 break;
1066 case 0xC0400000:
1067 info.size = 8;
1068 info.isStore = false;
1069 info.signEx = signedNot;
1070 break;
1071 default:
1072 return false;
1073 }
1074 info.offset = ((instruction >> 10) & 0x0FFF) * info.size;
1075 return true;
1076 }
1077
1078 struct AdrpInfo {
1079 uint32_t destReg;
1080 };
1081
1082 static bool parseADRP(uint32_t instruction, AdrpInfo& info)
1083 {
1084 if ( (instruction & 0x9F000000) != 0x90000000 )
1085 return false;
1086 info.destReg = (instruction & 0x1F);
1087 return true;
1088 }
1089
1090 struct AddInfo {
1091 uint32_t destReg;
1092 uint32_t srcReg;
1093 uint32_t addend;
1094 };
1095
1096 static bool parseADD(uint32_t instruction, AddInfo& info)
1097 {
1098 if ( (instruction & 0xFFC00000) != 0x91000000 )
1099 return false;
1100 info.destReg = (instruction & 0x1F);
1101 info.srcReg = ((instruction>>5) & 0x1F);
1102 info.addend = ((instruction>>10) & 0xFFF);
1103 return true;
1104 }
1105
1106
1107
1108 #if 0
1109 static uint32_t makeLDR_scaledOffset(const LoadStoreInfo& info)
1110 {
1111 assert((info.reg & 0xFFFFFFE0) == 0);
1112 assert((info.baseReg & 0xFFFFFFE0) == 0);
1113 assert(!info.isFloat || (info.signEx != signedNot));
1114 uint32_t sizeBits = 0;
1115 uint32_t opcBits = 1;
1116 uint32_t vBit = info.isFloat;
1117 switch ( info.signEx ) {
1118 case signedNot:
1119 opcBits = 1;
1120 break;
1121 case signed32:
1122 opcBits = 3;
1123 break;
1124 case signed64:
1125 opcBits = 2;
1126 break;
1127 default:
1128 assert(0 && "bad SignExtension runtime value");
1129 }
1130 switch ( info.size ) {
1131 case 1:
1132 sizeBits = 0;
1133 break;
1134 case 2:
1135 sizeBits = 1;
1136 break;
1137 case 4:
1138 sizeBits = 2;
1139 break;
1140 case 8:
1141 sizeBits = 3;
1142 break;
1143 case 16:
1144 sizeBits = 0;
1145 vBit = 1;
1146 opcBits = 3;
1147 break;
1148 default:
1149 assert(0 && "invalid load size for literal");
1150 }
1151 assert((info.offset % info.size) == 0);
1152 uint32_t scaledOffset = info.offset/info.size;
1153 assert(scaledOffset < 4096);
1154 return (0x39000000 | (sizeBits<<30) | (vBit<<26) | (opcBits<<22) | (scaledOffset<<10) | (info.baseReg<<5) | info.reg);
1155 }
1156
1157 static uint32_t makeLDR_literal(uint32_t destReg, uint32_t loadSize, bool isFloat, uint64_t targetAddress, uint64_t instructionAddress)
1158 {
1159 int64_t delta = targetAddress - instructionAddress;
1160 assert(delta < 1024*1024);
1161 assert(delta > -1024*1024);
1162 assert((destReg & 0xFFFFFFE0) == 0);
1163 assert((targetAddress & 0x3) == 0);
1164 assert((instructionAddress & 0x3) == 0);
1165 uint32_t imm19 = (delta << 3) & 0x00FFFFE0;
1166 uint32_t instruction = 0;
1167 switch ( loadSize ) {
1168 case 4:
1169 instruction = isFloat ? 0x1C000000 : 0x18000000;
1170 break;
1171 case 8:
1172 instruction = isFloat ? 0x5C000000 : 0x58000000;
1173 break;
1174 case 16:
1175 instruction = 0x9C000000;
1176 break;
1177 default:
1178 assert(0 && "invalid load size for literal");
1179 }
1180 return (instruction | imm19 | destReg);
1181 }
1182
1183
1184 static bool ldrInfo(uint32_t instruction, uint8_t* size, uint8_t* destReg, bool* v, uint32_t* scaledOffset)
1185 {
1186 *v = ( (instruction & 0x04000000) != 0 );
1187 *destReg = (instruction & 0x1F);
1188 uint32_t imm12 = ((instruction >> 10) & 0x00000FFF);
1189 switch ( (instruction & 0xC0000000) >> 30 ) {
1190 case 0:
1191 // vector and byte LDR have same "size" bits, need to check other bits to differenciate
1192 if ( (instruction & 0x00800000) == 0 ) {
1193 *size = 1;
1194 *scaledOffset = imm12;
1195 }
1196 else {
1197 *size = 16;
1198 *scaledOffset = imm12 * 16;
1199 }
1200 break;
1201 case 1:
1202 *size = 2;
1203 *scaledOffset = imm12 * 2;
1204 break;
1205 case 2:
1206 *size = 4;
1207 *scaledOffset = imm12 * 4;
1208 break;
1209 case 3:
1210 *size = 8;
1211 *scaledOffset = imm12 * 8;
1212 break;
1213 }
1214 return ((instruction & 0x3B400000) == 0x39400000);
1215 }
1216 #endif
1217
1218 static bool withinOneMeg(uint64_t addr1, uint64_t addr2) {
1219 int64_t delta = (addr2 - addr1);
1220 return ( (delta < 1024*1024) && (delta > -1024*1024) );
1221 }
1222 #endif // SUPPORT_ARCH_arm64
1223
1224 void OutputFile::setInfo(ld::Internal& state, const ld::Atom* atom, uint8_t* buffer, const std::map<uint32_t, const Fixup*>& usedByHints,
1225 uint32_t offsetInAtom, uint32_t delta, InstructionInfo* info)
1226 {
1227 info->offsetInAtom = offsetInAtom + delta;
1228 std::map<uint32_t, const Fixup*>::const_iterator pos = usedByHints.find(info->offsetInAtom);
1229 if ( (pos != usedByHints.end()) && (pos->second != NULL) ) {
1230 info->fixup = pos->second;
1231 info->targetAddress = addressOf(state, info->fixup, &info->target);
1232 if ( info->fixup->clusterSize != ld::Fixup::k1of1 ) {
1233 assert(info->fixup->firstInCluster());
1234 const ld::Fixup* nextFixup = info->fixup + 1;
1235 if ( nextFixup->kind == ld::Fixup::kindAddAddend ) {
1236 info->targetAddress += nextFixup->u.addend;
1237 }
1238 else {
1239 assert(0 && "expected addend");
1240 }
1241 }
1242 }
1243 else {
1244 info->fixup = NULL;
1245 info->targetAddress = 0;
1246 info->target = NULL;
1247 }
1248 info->instructionContent = &buffer[info->offsetInAtom];
1249 info->instructionAddress = atom->finalAddress() + info->offsetInAtom;
1250 info->instruction = get32LE(info->instructionContent);
1251 }
1252
1253 #if SUPPORT_ARCH_arm64
1254 static bool isPageKind(const ld::Fixup* fixup, bool mustBeGOT=false)
1255 {
1256 if ( fixup == NULL )
1257 return false;
1258 const ld::Fixup* f;
1259 switch ( fixup->kind ) {
1260 case ld::Fixup::kindStoreTargetAddressARM64Page21:
1261 return !mustBeGOT;
1262 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
1263 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21:
1264 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21:
1265 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21:
1266 return true;
1267 case ld::Fixup::kindSetTargetAddress:
1268 f = fixup;
1269 do {
1270 ++f;
1271 } while ( ! f->lastInCluster() );
1272 switch (f->kind ) {
1273 case ld::Fixup::kindStoreARM64Page21:
1274 return !mustBeGOT;
1275 case ld::Fixup::kindStoreARM64GOTLoadPage21:
1276 case ld::Fixup::kindStoreARM64GOTLeaPage21:
1277 case ld::Fixup::kindStoreARM64TLVPLoadPage21:
1278 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21:
1279 return true;
1280 default:
1281 break;
1282 }
1283 break;
1284 default:
1285 break;
1286 }
1287 return false;
1288 }
1289
1290 static bool isPageOffsetKind(const ld::Fixup* fixup, bool mustBeGOT=false)
1291 {
1292 if ( fixup == NULL )
1293 return false;
1294 const ld::Fixup* f;
1295 switch ( fixup->kind ) {
1296 case ld::Fixup::kindStoreTargetAddressARM64PageOff12:
1297 return !mustBeGOT;
1298 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12:
1299 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12:
1300 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12:
1301 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12:
1302 return true;
1303 case ld::Fixup::kindSetTargetAddress:
1304 f = fixup;
1305 do {
1306 ++f;
1307 } while ( ! f->lastInCluster() );
1308 switch (f->kind ) {
1309 case ld::Fixup::kindStoreARM64PageOff12:
1310 return !mustBeGOT;
1311 case ld::Fixup::kindStoreARM64GOTLoadPageOff12:
1312 case ld::Fixup::kindStoreARM64GOTLeaPageOff12:
1313 case ld::Fixup::kindStoreARM64TLVPLoadPageOff12:
1314 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPageOff12:
1315 return true;
1316 default:
1317 break;
1318 }
1319 break;
1320 default:
1321 break;
1322 }
1323 return false;
1324 }
1325 #endif // SUPPORT_ARCH_arm64
1326
1327
1328 #define LOH_ASSERT(cond) \
1329 if ( !(cond) ) { \
1330 warning("ignoring linker optimization hint at %s+0x%X because " #cond, atom->name(), fit->offsetInAtom); \
1331 break; \
1332 }
1333
1334 void OutputFile::applyFixUps(ld::Internal& state, uint64_t mhAddress, const ld::Atom* atom, uint8_t* buffer)
1335 {
1336 //fprintf(stderr, "applyFixUps() on %s\n", atom->name());
1337 int64_t accumulator = 0;
1338 const ld::Atom* toTarget = NULL;
1339 const ld::Atom* fromTarget;
1340 int64_t delta;
1341 uint32_t instruction;
1342 uint32_t newInstruction;
1343 bool is_bl;
1344 bool is_blx;
1345 bool is_b;
1346 bool thumbTarget = false;
1347 std::map<uint32_t, const Fixup*> usedByHints;
1348 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
1349 uint8_t* fixUpLocation = &buffer[fit->offsetInAtom];
1350 ld::Fixup::LOH_arm64 lohExtra;
1351 switch ( (ld::Fixup::Kind)(fit->kind) ) {
1352 case ld::Fixup::kindNone:
1353 case ld::Fixup::kindNoneFollowOn:
1354 case ld::Fixup::kindNoneGroupSubordinate:
1355 case ld::Fixup::kindNoneGroupSubordinateFDE:
1356 case ld::Fixup::kindNoneGroupSubordinateLSDA:
1357 case ld::Fixup::kindNoneGroupSubordinatePersonality:
1358 break;
1359 case ld::Fixup::kindSetTargetAddress:
1360 accumulator = addressOf(state, fit, &toTarget);
1361 thumbTarget = targetIsThumb(state, fit);
1362 if ( thumbTarget )
1363 accumulator |= 1;
1364 if ( fit->contentAddendOnly || fit->contentDetlaToAddendOnly )
1365 accumulator = 0;
1366 break;
1367 case ld::Fixup::kindSubtractTargetAddress:
1368 delta = addressOf(state, fit, &fromTarget);
1369 if ( ! fit->contentAddendOnly )
1370 accumulator -= delta;
1371 break;
1372 case ld::Fixup::kindAddAddend:
1373 if ( ! fit->contentIgnoresAddend ) {
1374 // <rdar://problem/8342028> ARM main executables main contain .long constants pointing
1375 // into themselves such as jump tables. These .long should not have thumb bit set
1376 // even though the target is a thumb instruction. We can tell it is an interior pointer
1377 // because we are processing an addend.
1378 if ( thumbTarget && (toTarget == atom) && ((int32_t)fit->u.addend > 0) ) {
1379 accumulator &= (-2);
1380 //warning("removing thumb bit from intra-atom pointer in %s %s+0x%0X",
1381 // atom->section().sectionName(), atom->name(), fit->offsetInAtom);
1382 }
1383 accumulator += fit->u.addend;
1384 }
1385 break;
1386 case ld::Fixup::kindSubtractAddend:
1387 accumulator -= fit->u.addend;
1388 break;
1389 case ld::Fixup::kindSetTargetImageOffset:
1390 accumulator = addressOf(state, fit, &toTarget) - mhAddress;
1391 thumbTarget = targetIsThumb(state, fit);
1392 if ( thumbTarget )
1393 accumulator |= 1;
1394 break;
1395 case ld::Fixup::kindSetTargetSectionOffset:
1396 accumulator = sectionOffsetOf(state, fit);
1397 break;
1398 case ld::Fixup::kindSetTargetTLVTemplateOffset:
1399 accumulator = tlvTemplateOffsetOf(state, fit);
1400 break;
1401 case ld::Fixup::kindStore8:
1402 *fixUpLocation += accumulator;
1403 break;
1404 case ld::Fixup::kindStoreLittleEndian16:
1405 set16LE(fixUpLocation, accumulator);
1406 break;
1407 case ld::Fixup::kindStoreLittleEndianLow24of32:
1408 set32LE(fixUpLocation, (get32LE(fixUpLocation) & 0xFF000000) | (accumulator & 0x00FFFFFF) );
1409 break;
1410 case ld::Fixup::kindStoreLittleEndian32:
1411 rangeCheckAbsolute32(accumulator, state, atom, fit);
1412 set32LE(fixUpLocation, accumulator);
1413 break;
1414 case ld::Fixup::kindStoreLittleEndian64:
1415 set64LE(fixUpLocation, accumulator);
1416 break;
1417 case ld::Fixup::kindStoreBigEndian16:
1418 set16BE(fixUpLocation, accumulator);
1419 break;
1420 case ld::Fixup::kindStoreBigEndianLow24of32:
1421 set32BE(fixUpLocation, (get32BE(fixUpLocation) & 0xFF000000) | (accumulator & 0x00FFFFFF) );
1422 break;
1423 case ld::Fixup::kindStoreBigEndian32:
1424 rangeCheckAbsolute32(accumulator, state, atom, fit);
1425 set32BE(fixUpLocation, accumulator);
1426 break;
1427 case ld::Fixup::kindStoreBigEndian64:
1428 set64BE(fixUpLocation, accumulator);
1429 break;
1430 case ld::Fixup::kindStoreX86PCRel8:
1431 case ld::Fixup::kindStoreX86BranchPCRel8:
1432 if ( fit->contentAddendOnly )
1433 delta = accumulator;
1434 else
1435 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 1);
1436 rangeCheck8(delta, state, atom, fit);
1437 *fixUpLocation = delta;
1438 break;
1439 case ld::Fixup::kindStoreX86PCRel16:
1440 if ( fit->contentAddendOnly )
1441 delta = accumulator;
1442 else
1443 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 2);
1444 rangeCheck16(delta, state, atom, fit);
1445 set16LE(fixUpLocation, delta);
1446 break;
1447 case ld::Fixup::kindStoreX86BranchPCRel32:
1448 if ( fit->contentAddendOnly )
1449 delta = accumulator;
1450 else
1451 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1452 rangeCheckBranch32(delta, state, atom, fit);
1453 set32LE(fixUpLocation, delta);
1454 break;
1455 case ld::Fixup::kindStoreX86PCRel32GOTLoad:
1456 case ld::Fixup::kindStoreX86PCRel32GOT:
1457 case ld::Fixup::kindStoreX86PCRel32:
1458 case ld::Fixup::kindStoreX86PCRel32TLVLoad:
1459 if ( fit->contentAddendOnly )
1460 delta = accumulator;
1461 else
1462 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1463 rangeCheckRIP32(delta, state, atom, fit);
1464 set32LE(fixUpLocation, delta);
1465 break;
1466 case ld::Fixup::kindStoreX86PCRel32_1:
1467 if ( fit->contentAddendOnly )
1468 delta = accumulator - 1;
1469 else
1470 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 5);
1471 rangeCheckRIP32(delta, state, atom, fit);
1472 set32LE(fixUpLocation, delta);
1473 break;
1474 case ld::Fixup::kindStoreX86PCRel32_2:
1475 if ( fit->contentAddendOnly )
1476 delta = accumulator - 2;
1477 else
1478 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 6);
1479 rangeCheckRIP32(delta, state, atom, fit);
1480 set32LE(fixUpLocation, delta);
1481 break;
1482 case ld::Fixup::kindStoreX86PCRel32_4:
1483 if ( fit->contentAddendOnly )
1484 delta = accumulator - 4;
1485 else
1486 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 8);
1487 rangeCheckRIP32(delta, state, atom, fit);
1488 set32LE(fixUpLocation, delta);
1489 break;
1490 case ld::Fixup::kindStoreX86Abs32TLVLoad:
1491 set32LE(fixUpLocation, accumulator);
1492 break;
1493 case ld::Fixup::kindStoreX86Abs32TLVLoadNowLEA:
1494 assert(_options.outputKind() != Options::kObjectFile);
1495 // TLV entry was optimized away, change movl instruction to a leal
1496 if ( fixUpLocation[-1] != 0xA1 )
1497 throw "TLV load reloc does not point to a movl instruction";
1498 fixUpLocation[-1] = 0xB8;
1499 set32LE(fixUpLocation, accumulator);
1500 break;
1501 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA:
1502 assert(_options.outputKind() != Options::kObjectFile);
1503 // GOT entry was optimized away, change movq instruction to a leaq
1504 if ( fixUpLocation[-2] != 0x8B )
1505 throw "GOT load reloc does not point to a movq instruction";
1506 fixUpLocation[-2] = 0x8D;
1507 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1508 rangeCheckRIP32(delta, state, atom, fit);
1509 set32LE(fixUpLocation, delta);
1510 break;
1511 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA:
1512 assert(_options.outputKind() != Options::kObjectFile);
1513 // TLV entry was optimized away, change movq instruction to a leaq
1514 if ( fixUpLocation[-2] != 0x8B )
1515 throw "TLV load reloc does not point to a movq instruction";
1516 fixUpLocation[-2] = 0x8D;
1517 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1518 rangeCheckRIP32(delta, state, atom, fit);
1519 set32LE(fixUpLocation, delta);
1520 break;
1521 case ld::Fixup::kindStoreTargetAddressARMLoad12:
1522 accumulator = addressOf(state, fit, &toTarget);
1523 // fall into kindStoreARMLoad12 case
1524 case ld::Fixup::kindStoreARMLoad12:
1525 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 8);
1526 rangeCheckARM12(delta, state, atom, fit);
1527 instruction = get32LE(fixUpLocation);
1528 if ( delta >= 0 ) {
1529 newInstruction = instruction & 0xFFFFF000;
1530 newInstruction |= ((uint32_t)delta & 0xFFF);
1531 }
1532 else {
1533 newInstruction = instruction & 0xFF7FF000;
1534 newInstruction |= ((uint32_t)(-delta) & 0xFFF);
1535 }
1536 set32LE(fixUpLocation, newInstruction);
1537 break;
1538 case ld::Fixup::kindDtraceExtra:
1539 break;
1540 case ld::Fixup::kindStoreX86DtraceCallSiteNop:
1541 if ( _options.outputKind() != Options::kObjectFile ) {
1542 // change call site to a NOP
1543 fixUpLocation[-1] = 0x90; // 1-byte nop
1544 fixUpLocation[0] = 0x0F; // 4-byte nop
1545 fixUpLocation[1] = 0x1F;
1546 fixUpLocation[2] = 0x40;
1547 fixUpLocation[3] = 0x00;
1548 }
1549 break;
1550 case ld::Fixup::kindStoreX86DtraceIsEnableSiteClear:
1551 if ( _options.outputKind() != Options::kObjectFile ) {
1552 // change call site to a clear eax
1553 fixUpLocation[-1] = 0x33; // xorl eax,eax
1554 fixUpLocation[0] = 0xC0;
1555 fixUpLocation[1] = 0x90; // 1-byte nop
1556 fixUpLocation[2] = 0x90; // 1-byte nop
1557 fixUpLocation[3] = 0x90; // 1-byte nop
1558 }
1559 break;
1560 case ld::Fixup::kindStoreARMDtraceCallSiteNop:
1561 if ( _options.outputKind() != Options::kObjectFile ) {
1562 // change call site to a NOP
1563 set32LE(fixUpLocation, 0xE1A00000);
1564 }
1565 break;
1566 case ld::Fixup::kindStoreARMDtraceIsEnableSiteClear:
1567 if ( _options.outputKind() != Options::kObjectFile ) {
1568 // change call site to 'eor r0, r0, r0'
1569 set32LE(fixUpLocation, 0xE0200000);
1570 }
1571 break;
1572 case ld::Fixup::kindStoreThumbDtraceCallSiteNop:
1573 if ( _options.outputKind() != Options::kObjectFile ) {
1574 // change 32-bit blx call site to two thumb NOPs
1575 set32LE(fixUpLocation, 0x46C046C0);
1576 }
1577 break;
1578 case ld::Fixup::kindStoreThumbDtraceIsEnableSiteClear:
1579 if ( _options.outputKind() != Options::kObjectFile ) {
1580 // change 32-bit blx call site to 'nop', 'eor r0, r0'
1581 set32LE(fixUpLocation, 0x46C04040);
1582 }
1583 break;
1584 case ld::Fixup::kindStoreARM64DtraceCallSiteNop:
1585 if ( _options.outputKind() != Options::kObjectFile ) {
1586 // change call site to a NOP
1587 set32LE(fixUpLocation, 0xD503201F);
1588 }
1589 break;
1590 case ld::Fixup::kindStoreARM64DtraceIsEnableSiteClear:
1591 if ( _options.outputKind() != Options::kObjectFile ) {
1592 // change call site to 'MOVZ X0,0'
1593 set32LE(fixUpLocation, 0xD2800000);
1594 }
1595 break;
1596 case ld::Fixup::kindLazyTarget:
1597 case ld::Fixup::kindIslandTarget:
1598 break;
1599 case ld::Fixup::kindSetLazyOffset:
1600 assert(fit->binding == ld::Fixup::bindingDirectlyBound);
1601 accumulator = this->lazyBindingInfoOffsetForLazyPointerAddress(fit->u.target->finalAddress());
1602 break;
1603 case ld::Fixup::kindDataInCodeStartData:
1604 case ld::Fixup::kindDataInCodeStartJT8:
1605 case ld::Fixup::kindDataInCodeStartJT16:
1606 case ld::Fixup::kindDataInCodeStartJT32:
1607 case ld::Fixup::kindDataInCodeStartJTA32:
1608 case ld::Fixup::kindDataInCodeEnd:
1609 break;
1610 case ld::Fixup::kindLinkerOptimizationHint:
1611 // expand table of address/offsets used by hints
1612 lohExtra.addend = fit->u.addend;
1613 usedByHints[fit->offsetInAtom + (lohExtra.info.delta1 << 2)] = NULL;
1614 if ( lohExtra.info.count > 0 )
1615 usedByHints[fit->offsetInAtom + (lohExtra.info.delta2 << 2)] = NULL;
1616 if ( lohExtra.info.count > 1 )
1617 usedByHints[fit->offsetInAtom + (lohExtra.info.delta3 << 2)] = NULL;
1618 if ( lohExtra.info.count > 2 )
1619 usedByHints[fit->offsetInAtom + (lohExtra.info.delta4 << 2)] = NULL;
1620 break;
1621 case ld::Fixup::kindStoreTargetAddressLittleEndian32:
1622 accumulator = addressOf(state, fit, &toTarget);
1623 thumbTarget = targetIsThumb(state, fit);
1624 if ( thumbTarget )
1625 accumulator |= 1;
1626 if ( fit->contentAddendOnly )
1627 accumulator = 0;
1628 rangeCheckAbsolute32(accumulator, state, atom, fit);
1629 set32LE(fixUpLocation, accumulator);
1630 break;
1631 case ld::Fixup::kindStoreTargetAddressLittleEndian64:
1632 accumulator = addressOf(state, fit, &toTarget);
1633 if ( fit->contentAddendOnly )
1634 accumulator = 0;
1635 set64LE(fixUpLocation, accumulator);
1636 break;
1637 case ld::Fixup::kindStoreTargetAddressBigEndian32:
1638 accumulator = addressOf(state, fit, &toTarget);
1639 if ( fit->contentAddendOnly )
1640 accumulator = 0;
1641 set32BE(fixUpLocation, accumulator);
1642 break;
1643 case ld::Fixup::kindStoreTargetAddressBigEndian64:
1644 accumulator = addressOf(state, fit, &toTarget);
1645 if ( fit->contentAddendOnly )
1646 accumulator = 0;
1647 set64BE(fixUpLocation, accumulator);
1648 break;
1649 case ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian32:
1650 accumulator = tlvTemplateOffsetOf(state, fit);
1651 set32LE(fixUpLocation, accumulator);
1652 break;
1653 case ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian64:
1654 accumulator = tlvTemplateOffsetOf(state, fit);
1655 set64LE(fixUpLocation, accumulator);
1656 break;
1657 case ld::Fixup::kindStoreTargetAddressX86PCRel32:
1658 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32:
1659 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad:
1660 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad:
1661 accumulator = addressOf(state, fit, &toTarget);
1662 if ( fit->contentDetlaToAddendOnly )
1663 accumulator = 0;
1664 if ( fit->contentAddendOnly )
1665 delta = 0;
1666 else
1667 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1668 rangeCheckRIP32(delta, state, atom, fit);
1669 set32LE(fixUpLocation, delta);
1670 break;
1671 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad:
1672 set32LE(fixUpLocation, accumulator);
1673 break;
1674 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoadNowLEA:
1675 // TLV entry was optimized away, change movl instruction to a leal
1676 if ( fixUpLocation[-1] != 0xA1 )
1677 throw "TLV load reloc does not point to a movl <abs-address>,<reg> instruction";
1678 fixUpLocation[-1] = 0xB8;
1679 accumulator = addressOf(state, fit, &toTarget);
1680 set32LE(fixUpLocation, accumulator);
1681 break;
1682 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA:
1683 // GOT entry was optimized away, change movq instruction to a leaq
1684 if ( fixUpLocation[-2] != 0x8B )
1685 throw "GOT load reloc does not point to a movq instruction";
1686 fixUpLocation[-2] = 0x8D;
1687 accumulator = addressOf(state, fit, &toTarget);
1688 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1689 rangeCheckRIP32(delta, state, atom, fit);
1690 set32LE(fixUpLocation, delta);
1691 break;
1692 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA:
1693 // TLV entry was optimized away, change movq instruction to a leaq
1694 if ( fixUpLocation[-2] != 0x8B )
1695 throw "TLV load reloc does not point to a movq instruction";
1696 fixUpLocation[-2] = 0x8D;
1697 accumulator = addressOf(state, fit, &toTarget);
1698 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1699 rangeCheckRIP32(delta, state, atom, fit);
1700 set32LE(fixUpLocation, delta);
1701 break;
1702 case ld::Fixup::kindStoreTargetAddressARMBranch24:
1703 accumulator = addressOf(state, fit, &toTarget);
1704 thumbTarget = targetIsThumb(state, fit);
1705 if ( toTarget->contentType() == ld::Atom::typeBranchIsland ) {
1706 // Branching to island. If ultimate target is in range, branch there directly.
1707 for (ld::Fixup::iterator islandfit = toTarget->fixupsBegin(), end=toTarget->fixupsEnd(); islandfit != end; ++islandfit) {
1708 if ( islandfit->kind == ld::Fixup::kindIslandTarget ) {
1709 const ld::Atom* islandTarget = NULL;
1710 uint64_t islandTargetAddress = addressOf(state, islandfit, &islandTarget);
1711 delta = islandTargetAddress - (atom->finalAddress() + fit->offsetInAtom + 4);
1712 if ( checkArmBranch24Displacement(delta) ) {
1713 toTarget = islandTarget;
1714 accumulator = islandTargetAddress;
1715 thumbTarget = targetIsThumb(state, islandfit);
1716 }
1717 break;
1718 }
1719 }
1720 }
1721 if ( thumbTarget )
1722 accumulator |= 1;
1723 if ( fit->contentDetlaToAddendOnly )
1724 accumulator = 0;
1725 // fall into kindStoreARMBranch24 case
1726 case ld::Fixup::kindStoreARMBranch24:
1727 // The pc added will be +8 from the pc
1728 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 8);
1729 rangeCheckARMBranch24(delta, state, atom, fit);
1730 instruction = get32LE(fixUpLocation);
1731 // Make sure we are calling arm with bl, thumb with blx
1732 is_bl = ((instruction & 0xFF000000) == 0xEB000000);
1733 is_blx = ((instruction & 0xFE000000) == 0xFA000000);
1734 is_b = !is_blx && ((instruction & 0x0F000000) == 0x0A000000);
1735 if ( (is_bl | is_blx) && thumbTarget ) {
1736 uint32_t opcode = 0xFA000000; // force to be blx
1737 uint32_t disp = (uint32_t)(delta >> 2) & 0x00FFFFFF;
1738 uint32_t h_bit = (uint32_t)(delta << 23) & 0x01000000;
1739 newInstruction = opcode | h_bit | disp;
1740 }
1741 else if ( (is_bl | is_blx) && !thumbTarget ) {
1742 uint32_t opcode = 0xEB000000; // force to be bl
1743 uint32_t disp = (uint32_t)(delta >> 2) & 0x00FFFFFF;
1744 newInstruction = opcode | disp;
1745 }
1746 else if ( is_b && thumbTarget ) {
1747 if ( fit->contentDetlaToAddendOnly )
1748 newInstruction = (instruction & 0xFF000000) | ((uint32_t)(delta >> 2) & 0x00FFFFFF);
1749 else
1750 throwf("no pc-rel bx arm instruction. Can't fix up branch to %s in %s",
1751 referenceTargetAtomName(state, fit), atom->name());
1752 }
1753 else if ( !is_bl && !is_blx && thumbTarget ) {
1754 throwf("don't know how to convert instruction %x referencing %s to thumb",
1755 instruction, referenceTargetAtomName(state, fit));
1756 }
1757 else {
1758 newInstruction = (instruction & 0xFF000000) | ((uint32_t)(delta >> 2) & 0x00FFFFFF);
1759 }
1760 set32LE(fixUpLocation, newInstruction);
1761 break;
1762 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
1763 accumulator = addressOf(state, fit, &toTarget);
1764 thumbTarget = targetIsThumb(state, fit);
1765 if ( toTarget->contentType() == ld::Atom::typeBranchIsland ) {
1766 // branching to island, so see if ultimate target is in range
1767 // and if so branch to ultimate target instead.
1768 for (ld::Fixup::iterator islandfit = toTarget->fixupsBegin(), end=toTarget->fixupsEnd(); islandfit != end; ++islandfit) {
1769 if ( islandfit->kind == ld::Fixup::kindIslandTarget ) {
1770 const ld::Atom* islandTarget = NULL;
1771 uint64_t islandTargetAddress = addressOf(state, islandfit, &islandTarget);
1772 if ( !fit->contentDetlaToAddendOnly ) {
1773 if ( targetIsThumb(state, islandfit) ) {
1774 // Thumb to thumb branch, we will be generating a bl instruction.
1775 // Delta is always even, so mask out thumb bit in target.
1776 islandTargetAddress &= -2ULL;
1777 }
1778 else {
1779 // Target is not thumb, we will be generating a blx instruction
1780 // Since blx cannot have the low bit set, set bit[1] of the target to
1781 // bit[1] of the base address, so that the difference is a multiple of
1782 // 4 bytes.
1783 islandTargetAddress &= -3ULL;
1784 islandTargetAddress |= ((atom->finalAddress() + fit->offsetInAtom ) & 2LL);
1785 }
1786 }
1787 delta = islandTargetAddress - (atom->finalAddress() + fit->offsetInAtom + 4);
1788 if ( checkThumbBranch22Displacement(delta) ) {
1789 toTarget = islandTarget;
1790 accumulator = islandTargetAddress;
1791 thumbTarget = targetIsThumb(state, islandfit);
1792 }
1793 break;
1794 }
1795 }
1796 }
1797 if ( thumbTarget )
1798 accumulator |= 1;
1799 if ( fit->contentDetlaToAddendOnly )
1800 accumulator = 0;
1801 // fall into kindStoreThumbBranch22 case
1802 case ld::Fixup::kindStoreThumbBranch22:
1803 instruction = get32LE(fixUpLocation);
1804 is_bl = ((instruction & 0xD000F800) == 0xD000F000);
1805 is_blx = ((instruction & 0xD000F800) == 0xC000F000);
1806 is_b = ((instruction & 0xD000F800) == 0x9000F000);
1807 if ( !fit->contentDetlaToAddendOnly ) {
1808 if ( thumbTarget ) {
1809 // Thumb to thumb branch, we will be generating a bl instruction.
1810 // Delta is always even, so mask out thumb bit in target.
1811 accumulator &= -2ULL;
1812 }
1813 else {
1814 // Target is not thumb, we will be generating a blx instruction
1815 // Since blx cannot have the low bit set, set bit[1] of the target to
1816 // bit[1] of the base address, so that the difference is a multiple of
1817 // 4 bytes.
1818 accumulator &= -3ULL;
1819 accumulator |= ((atom->finalAddress() + fit->offsetInAtom ) & 2LL);
1820 }
1821 }
1822 // The pc added will be +4 from the pc
1823 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1824 // <rdar://problem/16652542> support bl in very large .o files
1825 if ( fit->contentDetlaToAddendOnly ) {
1826 while ( delta < (-16777216LL) )
1827 delta += 0x2000000;
1828 }
1829 rangeCheckThumbBranch22(delta, state, atom, fit);
1830 if ( _options.preferSubArchitecture() && _options.archSupportsThumb2() ) {
1831 // The instruction is really two instructions:
1832 // The lower 16 bits are the first instruction, which contains the high
1833 // 11 bits of the displacement.
1834 // The upper 16 bits are the second instruction, which contains the low
1835 // 11 bits of the displacement, as well as differentiating bl and blx.
1836 uint32_t s = (uint32_t)(delta >> 24) & 0x1;
1837 uint32_t i1 = (uint32_t)(delta >> 23) & 0x1;
1838 uint32_t i2 = (uint32_t)(delta >> 22) & 0x1;
1839 uint32_t imm10 = (uint32_t)(delta >> 12) & 0x3FF;
1840 uint32_t imm11 = (uint32_t)(delta >> 1) & 0x7FF;
1841 uint32_t j1 = (i1 == s);
1842 uint32_t j2 = (i2 == s);
1843 if ( is_bl ) {
1844 if ( thumbTarget )
1845 instruction = 0xD000F000; // keep bl
1846 else
1847 instruction = 0xC000F000; // change to blx
1848 }
1849 else if ( is_blx ) {
1850 if ( thumbTarget )
1851 instruction = 0xD000F000; // change to bl
1852 else
1853 instruction = 0xC000F000; // keep blx
1854 }
1855 else if ( is_b ) {
1856 instruction = 0x9000F000; // keep b
1857 if ( !thumbTarget && !fit->contentDetlaToAddendOnly ) {
1858 throwf("armv7 has no pc-rel bx thumb instruction. Can't fix up branch to %s in %s",
1859 referenceTargetAtomName(state, fit), atom->name());
1860 }
1861 }
1862 else {
1863 if ( !thumbTarget )
1864 throwf("don't know how to convert branch instruction %x referencing %s to bx",
1865 instruction, referenceTargetAtomName(state, fit));
1866 instruction = 0x9000F000; // keep b
1867 }
1868 uint32_t nextDisp = (j1 << 13) | (j2 << 11) | imm11;
1869 uint32_t firstDisp = (s << 10) | imm10;
1870 newInstruction = instruction | (nextDisp << 16) | firstDisp;
1871 //warning("s=%d, j1=%d, j2=%d, imm10=0x%0X, imm11=0x%0X, instruction=0x%08X, first=0x%04X, next=0x%04X, new=0x%08X, disp=0x%llX for %s to %s\n",
1872 // s, j1, j2, imm10, imm11, instruction, firstDisp, nextDisp, newInstruction, delta, atom->name(), toTarget->name());
1873 set32LE(fixUpLocation, newInstruction);
1874 }
1875 else {
1876 // The instruction is really two instructions:
1877 // The lower 16 bits are the first instruction, which contains the high
1878 // 11 bits of the displacement.
1879 // The upper 16 bits are the second instruction, which contains the low
1880 // 11 bits of the displacement, as well as differentiating bl and blx.
1881 uint32_t firstDisp = (uint32_t)(delta >> 12) & 0x7FF;
1882 uint32_t nextDisp = (uint32_t)(delta >> 1) & 0x7FF;
1883 if ( is_bl && !thumbTarget ) {
1884 instruction = 0xE800F000;
1885 }
1886 else if ( is_blx && thumbTarget ) {
1887 instruction = 0xF800F000;
1888 }
1889 else if ( is_b ) {
1890 instruction = 0x9000F000; // keep b
1891 if ( !thumbTarget && !fit->contentDetlaToAddendOnly ) {
1892 throwf("armv6 has no pc-rel bx thumb instruction. Can't fix up branch to %s in %s",
1893 referenceTargetAtomName(state, fit), atom->name());
1894 }
1895 }
1896 else {
1897 instruction = instruction & 0xF800F800;
1898 }
1899 newInstruction = instruction | (nextDisp << 16) | firstDisp;
1900 set32LE(fixUpLocation, newInstruction);
1901 }
1902 break;
1903 case ld::Fixup::kindStoreARMLow16:
1904 {
1905 uint32_t imm4 = (accumulator & 0x0000F000) >> 12;
1906 uint32_t imm12 = accumulator & 0x00000FFF;
1907 instruction = get32LE(fixUpLocation);
1908 newInstruction = (instruction & 0xFFF0F000) | (imm4 << 16) | imm12;
1909 set32LE(fixUpLocation, newInstruction);
1910 }
1911 break;
1912 case ld::Fixup::kindStoreARMHigh16:
1913 {
1914 uint32_t imm4 = (accumulator & 0xF0000000) >> 28;
1915 uint32_t imm12 = (accumulator & 0x0FFF0000) >> 16;
1916 instruction = get32LE(fixUpLocation);
1917 newInstruction = (instruction & 0xFFF0F000) | (imm4 << 16) | imm12;
1918 set32LE(fixUpLocation, newInstruction);
1919 }
1920 break;
1921 case ld::Fixup::kindStoreThumbLow16:
1922 {
1923 uint32_t imm4 = (accumulator & 0x0000F000) >> 12;
1924 uint32_t i = (accumulator & 0x00000800) >> 11;
1925 uint32_t imm3 = (accumulator & 0x00000700) >> 8;
1926 uint32_t imm8 = accumulator & 0x000000FF;
1927 instruction = get32LE(fixUpLocation);
1928 newInstruction = (instruction & 0x8F00FBF0) | imm4 | (i << 10) | (imm3 << 28) | (imm8 << 16);
1929 set32LE(fixUpLocation, newInstruction);
1930 }
1931 break;
1932 case ld::Fixup::kindStoreThumbHigh16:
1933 {
1934 uint32_t imm4 = (accumulator & 0xF0000000) >> 28;
1935 uint32_t i = (accumulator & 0x08000000) >> 27;
1936 uint32_t imm3 = (accumulator & 0x07000000) >> 24;
1937 uint32_t imm8 = (accumulator & 0x00FF0000) >> 16;
1938 instruction = get32LE(fixUpLocation);
1939 newInstruction = (instruction & 0x8F00FBF0) | imm4 | (i << 10) | (imm3 << 28) | (imm8 << 16);
1940 set32LE(fixUpLocation, newInstruction);
1941 }
1942 break;
1943 #if SUPPORT_ARCH_arm64
1944 case ld::Fixup::kindStoreTargetAddressARM64Branch26:
1945 accumulator = addressOf(state, fit, &toTarget);
1946 // fall into kindStoreARM64Branch26 case
1947 case ld::Fixup::kindStoreARM64Branch26:
1948 if ( fit->contentAddendOnly )
1949 delta = accumulator;
1950 else
1951 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom);
1952 rangeCheckARM64Branch26(delta, state, atom, fit);
1953 instruction = get32LE(fixUpLocation);
1954 newInstruction = (instruction & 0xFC000000) | ((uint32_t)(delta >> 2) & 0x03FFFFFF);
1955 set32LE(fixUpLocation, newInstruction);
1956 break;
1957 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21:
1958 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
1959 case ld::Fixup::kindStoreTargetAddressARM64Page21:
1960 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21:
1961 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21:
1962 accumulator = addressOf(state, fit, &toTarget);
1963 // fall into kindStoreARM64Branch26 case
1964 case ld::Fixup::kindStoreARM64GOTLeaPage21:
1965 case ld::Fixup::kindStoreARM64GOTLoadPage21:
1966 case ld::Fixup::kindStoreARM64TLVPLoadPage21:
1967 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21:
1968 case ld::Fixup::kindStoreARM64Page21:
1969 {
1970 // the ADRP instruction adds the imm << 12 to the page that the pc is on
1971 if ( fit->contentAddendOnly )
1972 delta = 0;
1973 else
1974 delta = (accumulator & (-4096)) - ((atom->finalAddress() + fit->offsetInAtom) & (-4096));
1975 rangeCheckARM64Page21(delta, state, atom, fit);
1976 instruction = get32LE(fixUpLocation);
1977 uint32_t immhi = (delta >> 9) & (0x00FFFFE0);
1978 uint32_t immlo = (delta << 17) & (0x60000000);
1979 newInstruction = (instruction & 0x9F00001F) | immlo | immhi;
1980 set32LE(fixUpLocation, newInstruction);
1981 }
1982 break;
1983 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12:
1984 case ld::Fixup::kindStoreTargetAddressARM64PageOff12:
1985 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12:
1986 accumulator = addressOf(state, fit, &toTarget);
1987 // fall into kindAddressARM64PageOff12 case
1988 case ld::Fixup::kindStoreARM64TLVPLoadPageOff12:
1989 case ld::Fixup::kindStoreARM64GOTLoadPageOff12:
1990 case ld::Fixup::kindStoreARM64PageOff12:
1991 {
1992 uint32_t offset = accumulator & 0x00000FFF;
1993 instruction = get32LE(fixUpLocation);
1994 // LDR/STR instruction have implicit scale factor, need to compensate for that
1995 if ( instruction & 0x08000000 ) {
1996 uint32_t implictShift = ((instruction >> 30) & 0x3);
1997 switch ( implictShift ) {
1998 case 0:
1999 if ( (instruction & 0x04800000) == 0x04800000 ) {
2000 // vector and byte LDR/STR have same "size" bits, need to check other bits to differenciate
2001 implictShift = 4;
2002 if ( (offset & 0xF) != 0 ) {
2003 throwf("128-bit LDR/STR not 16-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
2004 atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fit),
2005 addressOf(state, fit, &toTarget));
2006 }
2007 }
2008 break;
2009 case 1:
2010 if ( (offset & 0x1) != 0 ) {
2011 throwf("16-bit LDR/STR not 2-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
2012 atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fit),
2013 addressOf(state, fit, &toTarget));
2014 }
2015 break;
2016 case 2:
2017 if ( (offset & 0x3) != 0 ) {
2018 throwf("32-bit LDR/STR not 4-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
2019 atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fit),
2020 addressOf(state, fit, &toTarget));
2021 }
2022 break;
2023 case 3:
2024 if ( (offset & 0x7) != 0 ) {
2025 throwf("64-bit LDR/STR not 8-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
2026 atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fit),
2027 addressOf(state, fit, &toTarget));
2028 }
2029 break;
2030 }
2031 // compensate for implicit scale
2032 offset >>= implictShift;
2033 }
2034 if ( fit->contentAddendOnly )
2035 offset = 0;
2036 uint32_t imm12 = offset << 10;
2037 newInstruction = (instruction & 0xFFC003FF) | imm12;
2038 set32LE(fixUpLocation, newInstruction);
2039 }
2040 break;
2041 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12:
2042 accumulator = addressOf(state, fit, &toTarget);
2043 // fall into kindStoreARM64GOTLoadPage21 case
2044 case ld::Fixup::kindStoreARM64GOTLeaPageOff12:
2045 {
2046 // GOT entry was optimized away, change LDR instruction to a ADD
2047 instruction = get32LE(fixUpLocation);
2048 if ( (instruction & 0xBFC00000) != 0xB9400000 )
2049 throwf("GOT load reloc does not point to a LDR instruction in %s", atom->name());
2050 uint32_t offset = accumulator & 0x00000FFF;
2051 uint32_t imm12 = offset << 10;
2052 newInstruction = 0x91000000 | imm12 | (instruction & 0x000003FF);
2053 set32LE(fixUpLocation, newInstruction);
2054 }
2055 break;
2056 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12:
2057 accumulator = addressOf(state, fit, &toTarget);
2058 // fall into kindStoreARM64TLVPLeaPageOff12 case
2059 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPageOff12:
2060 {
2061 // TLV thunk in same linkage unit, so LEA it directly, changing LDR instruction to a ADD
2062 instruction = get32LE(fixUpLocation);
2063 if ( (instruction & 0xBFC00000) != 0xB9400000 )
2064 throwf("TLV load reloc does not point to a LDR instruction in %s", atom->name());
2065 uint32_t offset = accumulator & 0x00000FFF;
2066 uint32_t imm12 = offset << 10;
2067 newInstruction = 0x91000000 | imm12 | (instruction & 0x000003FF);
2068 set32LE(fixUpLocation, newInstruction);
2069 }
2070 break;
2071 case ld::Fixup::kindStoreARM64PointerToGOT:
2072 set64LE(fixUpLocation, accumulator);
2073 break;
2074 case ld::Fixup::kindStoreARM64PCRelToGOT:
2075 if ( fit->contentAddendOnly )
2076 delta = accumulator;
2077 else
2078 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom);
2079 set32LE(fixUpLocation, delta);
2080 break;
2081 #endif
2082 }
2083 }
2084
2085 #if SUPPORT_ARCH_arm64
2086 // after all fixups are done on atom, if there are potential optimizations, do those
2087 if ( (usedByHints.size() != 0) && (_options.outputKind() != Options::kObjectFile) && !_options.ignoreOptimizationHints() ) {
2088 // fill in second part of usedByHints map, so we can see the target of fixups that might be optimized
2089 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
2090 switch ( fit->kind ) {
2091 case ld::Fixup::kindLinkerOptimizationHint:
2092 case ld::Fixup::kindNoneFollowOn:
2093 case ld::Fixup::kindNoneGroupSubordinate:
2094 case ld::Fixup::kindNoneGroupSubordinateFDE:
2095 case ld::Fixup::kindNoneGroupSubordinateLSDA:
2096 case ld::Fixup::kindNoneGroupSubordinatePersonality:
2097 break;
2098 default:
2099 if ( fit->firstInCluster() ) {
2100 std::map<uint32_t, const Fixup*>::iterator pos = usedByHints.find(fit->offsetInAtom);
2101 if ( pos != usedByHints.end() ) {
2102 assert(pos->second == NULL && "two fixups in same hint location");
2103 pos->second = fit;
2104 //fprintf(stderr, "setting %s usedByHints[0x%04X], kind = %d\n", atom->name(), fit->offsetInAtom, fit->kind);
2105 }
2106 }
2107 }
2108 }
2109
2110 // apply hints pass 1
2111 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
2112 if ( fit->kind != ld::Fixup::kindLinkerOptimizationHint )
2113 continue;
2114 InstructionInfo infoA;
2115 InstructionInfo infoB;
2116 InstructionInfo infoC;
2117 InstructionInfo infoD;
2118 LoadStoreInfo ldrInfoB, ldrInfoC;
2119 AddInfo addInfoB;
2120 AdrpInfo adrpInfoA;
2121 bool usableSegment;
2122 bool targetFourByteAligned;
2123 bool literalableSize, isADRP, isADD, isLDR, isSTR;
2124 //uint8_t loadSize, destReg;
2125 //uint32_t scaledOffset;
2126 //uint32_t imm12;
2127 ld::Fixup::LOH_arm64 alt;
2128 alt.addend = fit->u.addend;
2129 setInfo(state, atom, buffer, usedByHints, fit->offsetInAtom, (alt.info.delta1 << 2), &infoA);
2130 if ( alt.info.count > 0 )
2131 setInfo(state, atom, buffer, usedByHints, fit->offsetInAtom, (alt.info.delta2 << 2), &infoB);
2132 if ( alt.info.count > 1 )
2133 setInfo(state, atom, buffer, usedByHints, fit->offsetInAtom, (alt.info.delta3 << 2), &infoC);
2134 if ( alt.info.count > 2 )
2135 setInfo(state, atom, buffer, usedByHints, fit->offsetInAtom, (alt.info.delta4 << 2), &infoD);
2136
2137 if ( _options.sharedRegionEligible() ) {
2138 if ( _options.sharedRegionEncodingV2() ) {
2139 // In v2 format, all references might be move at dyld shared cache creation time
2140 usableSegment = false;
2141 }
2142 else {
2143 // In v1 format, only references to something in __TEXT segment could be optimized
2144 usableSegment = (strcmp(atom->section().segmentName(), infoB.target->section().segmentName()) == 0);
2145 }
2146 }
2147 else {
2148 // main executables can optimize any reference
2149 usableSegment = true;
2150 }
2151
2152 switch ( alt.info.kind ) {
2153 case LOH_ARM64_ADRP_ADRP:
2154 // processed in pass 2 because some ADRP may have been removed
2155 break;
2156 case LOH_ARM64_ADRP_LDR:
2157 LOH_ASSERT(alt.info.count == 1);
2158 LOH_ASSERT(isPageKind(infoA.fixup));
2159 LOH_ASSERT(isPageOffsetKind(infoB.fixup));
2160 LOH_ASSERT(infoA.target == infoB.target);
2161 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2162 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2163 LOH_ASSERT(isADRP);
2164 isLDR = parseLoadOrStore(infoB.instruction, ldrInfoB);
2165 // silently ignore LDRs transformed to ADD by TLV pass
2166 if ( !isLDR && infoB.fixup->kind == ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12 )
2167 break;
2168 LOH_ASSERT(isLDR);
2169 LOH_ASSERT(ldrInfoB.baseReg == adrpInfoA.destReg);
2170 LOH_ASSERT(ldrInfoB.offset == (infoA.targetAddress & 0x00000FFF));
2171 literalableSize = ( (ldrInfoB.size != 1) && (ldrInfoB.size != 2) );
2172 targetFourByteAligned = ( (infoA.targetAddress & 0x3) == 0 );
2173 if ( literalableSize && usableSegment && targetFourByteAligned && withinOneMeg(infoB.instructionAddress, infoA.targetAddress) ) {
2174 set32LE(infoA.instructionContent, makeNOP());
2175 set32LE(infoB.instructionContent, makeLDR_literal(ldrInfoB, infoA.targetAddress, infoB.instructionAddress));
2176 if ( _options.verboseOptimizationHints() )
2177 fprintf(stderr, "adrp-ldr at 0x%08llX transformed to LDR literal, usableSegment=%d usableSegment\n", infoB.instructionAddress, usableSegment);
2178 }
2179 else {
2180 if ( _options.verboseOptimizationHints() )
2181 fprintf(stderr, "adrp-ldr at 0x%08llX not transformed, isLDR=%d, literalableSize=%d, inRange=%d, usableSegment=%d, scaledOffset=%d\n",
2182 infoB.instructionAddress, isLDR, literalableSize, withinOneMeg(infoB.instructionAddress, infoA.targetAddress), usableSegment, ldrInfoB.offset);
2183 }
2184 break;
2185 case LOH_ARM64_ADRP_ADD_LDR:
2186 LOH_ASSERT(alt.info.count == 2);
2187 LOH_ASSERT(isPageKind(infoA.fixup));
2188 LOH_ASSERT(isPageOffsetKind(infoB.fixup));
2189 LOH_ASSERT(infoC.fixup == NULL);
2190 LOH_ASSERT(infoA.target == infoB.target);
2191 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2192 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2193 LOH_ASSERT(isADRP);
2194 isADD = parseADD(infoB.instruction, addInfoB);
2195 LOH_ASSERT(isADD);
2196 LOH_ASSERT(adrpInfoA.destReg == addInfoB.srcReg);
2197 isLDR = parseLoadOrStore(infoC.instruction, ldrInfoC);
2198 LOH_ASSERT(isLDR);
2199 LOH_ASSERT(addInfoB.destReg == ldrInfoC.baseReg);
2200 targetFourByteAligned = ( ((infoB.targetAddress+ldrInfoC.offset) & 0x3) == 0 );
2201 literalableSize = ( (ldrInfoC.size != 1) && (ldrInfoC.size != 2) );
2202 if ( literalableSize && usableSegment && targetFourByteAligned && withinOneMeg(infoC.instructionAddress, infoA.targetAddress+ldrInfoC.offset) ) {
2203 // can do T1 transformation to LDR literal
2204 set32LE(infoA.instructionContent, makeNOP());
2205 set32LE(infoB.instructionContent, makeNOP());
2206 set32LE(infoC.instructionContent, makeLDR_literal(ldrInfoC, infoA.targetAddress+ldrInfoC.offset, infoC.instructionAddress));
2207 if ( _options.verboseOptimizationHints() ) {
2208 fprintf(stderr, "adrp-add-ldr at 0x%08llX T1 transformed to LDR literal\n", infoC.instructionAddress);
2209 }
2210 }
2211 else if ( usableSegment && withinOneMeg(infoA.instructionAddress, infoA.targetAddress+ldrInfoC.offset) ) {
2212 // can to T4 transformation and turn ADRP/ADD into ADR
2213 set32LE(infoA.instructionContent, makeADR(ldrInfoC.baseReg, infoA.targetAddress+ldrInfoC.offset, infoA.instructionAddress));
2214 set32LE(infoB.instructionContent, makeNOP());
2215 ldrInfoC.offset = 0; // offset is now in ADR instead of ADD or LDR
2216 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2217 set32LE(infoC.instructionContent, infoC.instruction & 0xFFC003FF);
2218 if ( _options.verboseOptimizationHints() )
2219 fprintf(stderr, "adrp-add-ldr at 0x%08llX T4 transformed to ADR/LDR\n", infoB.instructionAddress);
2220 }
2221 else if ( ((infoB.targetAddress % ldrInfoC.size) == 0) && (ldrInfoC.offset == 0) ) {
2222 // can do T2 transformation by merging ADD into LD
2223 // Leave ADRP as-is
2224 set32LE(infoB.instructionContent, makeNOP());
2225 ldrInfoC.offset += addInfoB.addend;
2226 ldrInfoC.baseReg = adrpInfoA.destReg;
2227 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2228 if ( _options.verboseOptimizationHints() )
2229 fprintf(stderr, "adrp-add-ldr at 0x%08llX T2 transformed to ADRP/LDR \n", infoC.instructionAddress);
2230 }
2231 else {
2232 if ( _options.verboseOptimizationHints() )
2233 fprintf(stderr, "adrp-add-ldr at 0x%08llX could not be transformed, loadSize=%d, literalableSize=%d, inRange=%d, usableSegment=%d, targetFourByteAligned=%d, imm12=%d\n",
2234 infoC.instructionAddress, ldrInfoC.size, literalableSize, withinOneMeg(infoC.instructionAddress, infoA.targetAddress+ldrInfoC.offset), usableSegment, targetFourByteAligned, ldrInfoC.offset);
2235 }
2236 break;
2237 case LOH_ARM64_ADRP_ADD:
2238 LOH_ASSERT(alt.info.count == 1);
2239 LOH_ASSERT(isPageKind(infoA.fixup));
2240 LOH_ASSERT(isPageOffsetKind(infoB.fixup));
2241 LOH_ASSERT(infoA.target == infoB.target);
2242 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2243 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2244 LOH_ASSERT(isADRP);
2245 isADD = parseADD(infoB.instruction, addInfoB);
2246 LOH_ASSERT(isADD);
2247 LOH_ASSERT(adrpInfoA.destReg == addInfoB.srcReg);
2248 if ( usableSegment && withinOneMeg(infoA.targetAddress, infoA.instructionAddress) ) {
2249 // can do T4 transformation and use ADR
2250 set32LE(infoA.instructionContent, makeADR(addInfoB.destReg, infoA.targetAddress, infoA.instructionAddress));
2251 set32LE(infoB.instructionContent, makeNOP());
2252 if ( _options.verboseOptimizationHints() )
2253 fprintf(stderr, "adrp-add at 0x%08llX transformed to ADR\n", infoB.instructionAddress);
2254 }
2255 else {
2256 if ( _options.verboseOptimizationHints() )
2257 fprintf(stderr, "adrp-add at 0x%08llX not transformed, isAdd=%d, inRange=%d, usableSegment=%d\n",
2258 infoB.instructionAddress, isADD, withinOneMeg(infoA.targetAddress, infoA.instructionAddress), usableSegment);
2259 }
2260 break;
2261 case LOH_ARM64_ADRP_LDR_GOT_LDR:
2262 LOH_ASSERT(alt.info.count == 2);
2263 LOH_ASSERT(isPageKind(infoA.fixup, true));
2264 LOH_ASSERT(isPageOffsetKind(infoB.fixup, true));
2265 LOH_ASSERT(infoC.fixup == NULL);
2266 LOH_ASSERT(infoA.target == infoB.target);
2267 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2268 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2269 LOH_ASSERT(isADRP);
2270 isLDR = parseLoadOrStore(infoC.instruction, ldrInfoC);
2271 LOH_ASSERT(isLDR);
2272 isADD = parseADD(infoB.instruction, addInfoB);
2273 isLDR = parseLoadOrStore(infoB.instruction, ldrInfoB);
2274 if ( isLDR ) {
2275 // target of GOT is external
2276 LOH_ASSERT(ldrInfoB.size == 8);
2277 LOH_ASSERT(!ldrInfoB.isFloat);
2278 LOH_ASSERT(ldrInfoC.baseReg == ldrInfoB.reg);
2279 //fprintf(stderr, "infoA.target=%p, %s, infoA.targetAddress=0x%08llX\n", infoA.target, infoA.target->name(), infoA.targetAddress);
2280 targetFourByteAligned = ( ((infoA.targetAddress + ldrInfoC.offset) & 0x3) == 0 );
2281 if ( usableSegment && targetFourByteAligned && withinOneMeg(infoB.instructionAddress, infoA.targetAddress + ldrInfoC.offset) ) {
2282 // can do T5 transform
2283 set32LE(infoA.instructionContent, makeNOP());
2284 set32LE(infoB.instructionContent, makeLDR_literal(ldrInfoB, infoA.targetAddress, infoB.instructionAddress));
2285 if ( _options.verboseOptimizationHints() ) {
2286 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX T5 transformed to LDR literal of GOT plus LDR\n", infoC.instructionAddress);
2287 }
2288 }
2289 else {
2290 if ( _options.verboseOptimizationHints() )
2291 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX no optimization done\n", infoC.instructionAddress);
2292 }
2293 }
2294 else if ( isADD ) {
2295 // target of GOT is in same linkage unit and B instruction was changed to ADD to compute LEA of target
2296 LOH_ASSERT(addInfoB.srcReg == adrpInfoA.destReg);
2297 LOH_ASSERT(addInfoB.destReg == ldrInfoC.baseReg);
2298 targetFourByteAligned = ( ((infoA.targetAddress) & 0x3) == 0 );
2299 literalableSize = ( (ldrInfoC.size != 1) && (ldrInfoC.size != 2) );
2300 if ( usableSegment && literalableSize && targetFourByteAligned && withinOneMeg(infoC.instructionAddress, infoA.targetAddress + ldrInfoC.offset) ) {
2301 // can do T1 transform
2302 set32LE(infoA.instructionContent, makeNOP());
2303 set32LE(infoB.instructionContent, makeNOP());
2304 set32LE(infoC.instructionContent, makeLDR_literal(ldrInfoC, infoA.targetAddress + ldrInfoC.offset, infoC.instructionAddress));
2305 if ( _options.verboseOptimizationHints() )
2306 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX T1 transformed to LDR literal\n", infoC.instructionAddress);
2307 }
2308 else if ( usableSegment && withinOneMeg(infoA.instructionAddress, infoA.targetAddress) ) {
2309 // can do T4 transform
2310 set32LE(infoA.instructionContent, makeADR(ldrInfoC.baseReg, infoA.targetAddress, infoA.instructionAddress));
2311 set32LE(infoB.instructionContent, makeNOP());
2312 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2313 if ( _options.verboseOptimizationHints() ) {
2314 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX T4 transformed to ADR/LDR\n", infoC.instructionAddress);
2315 }
2316 }
2317 else if ( ((infoA.targetAddress % ldrInfoC.size) == 0) && ((addInfoB.addend + ldrInfoC.offset) < 4096) ) {
2318 // can do T2 transform
2319 set32LE(infoB.instructionContent, makeNOP());
2320 ldrInfoC.baseReg = adrpInfoA.destReg;
2321 ldrInfoC.offset += addInfoB.addend;
2322 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2323 if ( _options.verboseOptimizationHints() ) {
2324 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX T2 transformed to ADRP/NOP/LDR\n", infoC.instructionAddress);
2325 }
2326 }
2327 else {
2328 // T3 transform already done by ld::passes:got:doPass()
2329 if ( _options.verboseOptimizationHints() ) {
2330 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX T3 transformed to ADRP/ADD/LDR\n", infoC.instructionAddress);
2331 }
2332 }
2333 }
2334 else {
2335 if ( _options.verboseOptimizationHints() )
2336 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX not ADD or LDR\n", infoC.instructionAddress);
2337 }
2338 break;
2339 case LOH_ARM64_ADRP_ADD_STR:
2340 LOH_ASSERT(alt.info.count == 2);
2341 LOH_ASSERT(isPageKind(infoA.fixup));
2342 LOH_ASSERT(isPageOffsetKind(infoB.fixup));
2343 LOH_ASSERT(infoC.fixup == NULL);
2344 LOH_ASSERT(infoA.target == infoB.target);
2345 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2346 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2347 LOH_ASSERT(isADRP);
2348 isADD = parseADD(infoB.instruction, addInfoB);
2349 LOH_ASSERT(isADD);
2350 LOH_ASSERT(adrpInfoA.destReg == addInfoB.srcReg);
2351 isSTR = (parseLoadOrStore(infoC.instruction, ldrInfoC) && ldrInfoC.isStore);
2352 LOH_ASSERT(isSTR);
2353 LOH_ASSERT(addInfoB.destReg == ldrInfoC.baseReg);
2354 if ( usableSegment && withinOneMeg(infoA.instructionAddress, infoA.targetAddress+ldrInfoC.offset) ) {
2355 // can to T4 transformation and turn ADRP/ADD into ADR
2356 set32LE(infoA.instructionContent, makeADR(ldrInfoC.baseReg, infoA.targetAddress+ldrInfoC.offset, infoA.instructionAddress));
2357 set32LE(infoB.instructionContent, makeNOP());
2358 ldrInfoC.offset = 0; // offset is now in ADR instead of ADD or LDR
2359 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2360 set32LE(infoC.instructionContent, infoC.instruction & 0xFFC003FF);
2361 if ( _options.verboseOptimizationHints() )
2362 fprintf(stderr, "adrp-add-str at 0x%08llX T4 transformed to ADR/STR\n", infoB.instructionAddress);
2363 }
2364 else if ( ((infoB.targetAddress % ldrInfoC.size) == 0) && (ldrInfoC.offset == 0) ) {
2365 // can do T2 transformation by merging ADD into STR
2366 // Leave ADRP as-is
2367 set32LE(infoB.instructionContent, makeNOP());
2368 ldrInfoC.offset += addInfoB.addend;
2369 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2370 if ( _options.verboseOptimizationHints() )
2371 fprintf(stderr, "adrp-add-str at 0x%08llX T2 transformed to ADRP/STR \n", infoC.instructionAddress);
2372 }
2373 else {
2374 if ( _options.verboseOptimizationHints() )
2375 fprintf(stderr, "adrp-add-str at 0x%08llX could not be transformed, loadSize=%d, inRange=%d, usableSegment=%d, imm12=%d\n",
2376 infoC.instructionAddress, ldrInfoC.size, withinOneMeg(infoC.instructionAddress, infoA.targetAddress+ldrInfoC.offset), usableSegment, ldrInfoC.offset);
2377 }
2378 break;
2379 case LOH_ARM64_ADRP_LDR_GOT_STR:
2380 LOH_ASSERT(alt.info.count == 2);
2381 LOH_ASSERT(isPageKind(infoA.fixup, true));
2382 LOH_ASSERT(isPageOffsetKind(infoB.fixup, true));
2383 LOH_ASSERT(infoC.fixup == NULL);
2384 LOH_ASSERT(infoA.target == infoB.target);
2385 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2386 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2387 LOH_ASSERT(isADRP);
2388 isSTR = (parseLoadOrStore(infoC.instruction, ldrInfoC) && ldrInfoC.isStore);
2389 LOH_ASSERT(isSTR);
2390 isADD = parseADD(infoB.instruction, addInfoB);
2391 isLDR = parseLoadOrStore(infoB.instruction, ldrInfoB);
2392 if ( isLDR ) {
2393 // target of GOT is external
2394 LOH_ASSERT(ldrInfoB.size == 8);
2395 LOH_ASSERT(!ldrInfoB.isFloat);
2396 LOH_ASSERT(ldrInfoC.baseReg == ldrInfoB.reg);
2397 targetFourByteAligned = ( ((infoA.targetAddress + ldrInfoC.offset) & 0x3) == 0 );
2398 if ( usableSegment && targetFourByteAligned && withinOneMeg(infoB.instructionAddress, infoA.targetAddress + ldrInfoC.offset) ) {
2399 // can do T5 transform
2400 set32LE(infoA.instructionContent, makeNOP());
2401 set32LE(infoB.instructionContent, makeLDR_literal(ldrInfoB, infoA.targetAddress, infoB.instructionAddress));
2402 if ( _options.verboseOptimizationHints() ) {
2403 fprintf(stderr, "adrp-ldr-got-str at 0x%08llX T5 transformed to LDR literal of GOT plus STR\n", infoC.instructionAddress);
2404 }
2405 }
2406 else {
2407 if ( _options.verboseOptimizationHints() )
2408 fprintf(stderr, "adrp-ldr-got-str at 0x%08llX no optimization done\n", infoC.instructionAddress);
2409 }
2410 }
2411 else if ( isADD ) {
2412 // target of GOT is in same linkage unit and B instruction was changed to ADD to compute LEA of target
2413 LOH_ASSERT(addInfoB.srcReg == adrpInfoA.destReg);
2414 LOH_ASSERT(addInfoB.destReg == ldrInfoC.baseReg);
2415 targetFourByteAligned = ( ((infoA.targetAddress) & 0x3) == 0 );
2416 literalableSize = ( (ldrInfoC.size != 1) && (ldrInfoC.size != 2) );
2417 if ( usableSegment && withinOneMeg(infoA.instructionAddress, infoA.targetAddress) ) {
2418 // can do T4 transform
2419 set32LE(infoA.instructionContent, makeADR(ldrInfoC.baseReg, infoA.targetAddress, infoA.instructionAddress));
2420 set32LE(infoB.instructionContent, makeNOP());
2421 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2422 if ( _options.verboseOptimizationHints() ) {
2423 fprintf(stderr, "adrp-ldr-got-str at 0x%08llX T4 transformed to ADR/STR\n", infoC.instructionAddress);
2424 }
2425 }
2426 else if ( ((infoA.targetAddress % ldrInfoC.size) == 0) && (ldrInfoC.offset == 0) ) {
2427 // can do T2 transform
2428 set32LE(infoB.instructionContent, makeNOP());
2429 ldrInfoC.baseReg = adrpInfoA.destReg;
2430 ldrInfoC.offset += addInfoB.addend;
2431 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2432 if ( _options.verboseOptimizationHints() ) {
2433 fprintf(stderr, "adrp-ldr-got-str at 0x%08llX T4 transformed to ADRP/NOP/STR\n", infoC.instructionAddress);
2434 }
2435 }
2436 else {
2437 // T3 transform already done by ld::passes:got:doPass()
2438 if ( _options.verboseOptimizationHints() ) {
2439 fprintf(stderr, "adrp-ldr-got-str at 0x%08llX T3 transformed to ADRP/ADD/STR\n", infoC.instructionAddress);
2440 }
2441 }
2442 }
2443 else {
2444 if ( _options.verboseOptimizationHints() )
2445 fprintf(stderr, "adrp-ldr-got-str at 0x%08llX not ADD or LDR\n", infoC.instructionAddress);
2446 }
2447 break;
2448 case LOH_ARM64_ADRP_LDR_GOT:
2449 LOH_ASSERT(alt.info.count == 1);
2450 LOH_ASSERT(isPageKind(infoA.fixup, true));
2451 LOH_ASSERT(isPageOffsetKind(infoB.fixup, true));
2452 LOH_ASSERT(infoA.target == infoB.target);
2453 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2454 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2455 isADD = parseADD(infoB.instruction, addInfoB);
2456 isLDR = parseLoadOrStore(infoB.instruction, ldrInfoB);
2457 if ( isADRP ) {
2458 if ( isLDR ) {
2459 if ( usableSegment && withinOneMeg(infoB.instructionAddress, infoA.targetAddress) ) {
2460 // can do T5 transform (LDR literal load of GOT)
2461 set32LE(infoA.instructionContent, makeNOP());
2462 set32LE(infoB.instructionContent, makeLDR_literal(ldrInfoB, infoA.targetAddress, infoB.instructionAddress));
2463 if ( _options.verboseOptimizationHints() ) {
2464 fprintf(stderr, "adrp-ldr-got at 0x%08llX T5 transformed to NOP/LDR\n", infoC.instructionAddress);
2465 }
2466 }
2467 }
2468 else if ( isADD ) {
2469 if ( usableSegment && withinOneMeg(infoA.instructionAddress, infoA.targetAddress) ) {
2470 // can do T4 transform (ADR to compute local address)
2471 set32LE(infoA.instructionContent, makeADR(addInfoB.destReg, infoA.targetAddress, infoA.instructionAddress));
2472 set32LE(infoB.instructionContent, makeNOP());
2473 if ( _options.verboseOptimizationHints() ) {
2474 fprintf(stderr, "adrp-ldr-got at 0x%08llX T4 transformed to ADR/STR\n", infoC.instructionAddress);
2475 }
2476 }
2477 }
2478 else {
2479 if ( _options.verboseOptimizationHints() )
2480 fprintf(stderr, "adrp-ldr-got at 0x%08llX not LDR or ADD\n", infoB.instructionAddress);
2481 }
2482 }
2483 else {
2484 if ( _options.verboseOptimizationHints() )
2485 fprintf(stderr, "adrp-ldr-got at 0x%08llX not ADRP\n", infoA.instructionAddress);
2486 }
2487 break;
2488 default:
2489 if ( _options.verboseOptimizationHints() )
2490 fprintf(stderr, "unknown hint kind %d alt.info.kind at 0x%08llX\n", alt.info.kind, infoA.instructionAddress);
2491 break;
2492 }
2493 }
2494 // apply hints pass 2
2495 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
2496 if ( fit->kind != ld::Fixup::kindLinkerOptimizationHint )
2497 continue;
2498 InstructionInfo infoA;
2499 InstructionInfo infoB;
2500 ld::Fixup::LOH_arm64 alt;
2501 alt.addend = fit->u.addend;
2502 setInfo(state, atom, buffer, usedByHints, fit->offsetInAtom, (alt.info.delta1 << 2), &infoA);
2503 if ( alt.info.count > 0 )
2504 setInfo(state, atom, buffer, usedByHints, fit->offsetInAtom, (alt.info.delta2 << 2), &infoB);
2505
2506 switch ( alt.info.kind ) {
2507 case LOH_ARM64_ADRP_ADRP:
2508 LOH_ASSERT(isPageKind(infoA.fixup));
2509 LOH_ASSERT(isPageKind(infoB.fixup));
2510 if ( (infoA.instruction & 0x9F000000) != 0x90000000 ) {
2511 if ( _options.verboseOptimizationHints() )
2512 fprintf(stderr, "may-reused-adrp at 0x%08llX no longer an ADRP, now 0x%08X\n", infoA.instructionAddress, infoA.instruction);
2513 sAdrpNA++;
2514 break;
2515 }
2516 if ( (infoB.instruction & 0x9F000000) != 0x90000000 ) {
2517 if ( _options.verboseOptimizationHints() )
2518 fprintf(stderr, "may-reused-adrp at 0x%08llX no longer an ADRP, now 0x%08X\n", infoB.instructionAddress, infoA.instruction);
2519 sAdrpNA++;
2520 break;
2521 }
2522 if ( (infoA.targetAddress & (-4096)) == (infoB.targetAddress & (-4096)) ) {
2523 set32LE(infoB.instructionContent, 0xD503201F);
2524 sAdrpNoped++;
2525 }
2526 else {
2527 sAdrpNotNoped++;
2528 }
2529 break;
2530 }
2531 }
2532 }
2533 #endif // SUPPORT_ARCH_arm64
2534
2535 }
2536
2537 void OutputFile::copyNoOps(uint8_t* from, uint8_t* to, bool thumb)
2538 {
2539 switch ( _options.architecture() ) {
2540 case CPU_TYPE_I386:
2541 case CPU_TYPE_X86_64:
2542 for (uint8_t* p=from; p < to; ++p)
2543 *p = 0x90;
2544 break;
2545 case CPU_TYPE_ARM:
2546 if ( thumb ) {
2547 for (uint8_t* p=from; p < to; p += 2)
2548 OSWriteLittleInt16((uint16_t*)p, 0, 0x46c0);
2549 }
2550 else {
2551 for (uint8_t* p=from; p < to; p += 4)
2552 OSWriteLittleInt32((uint32_t*)p, 0, 0xe1a00000);
2553 }
2554 break;
2555 default:
2556 for (uint8_t* p=from; p < to; ++p)
2557 *p = 0x00;
2558 break;
2559 }
2560 }
2561
2562 bool OutputFile::takesNoDiskSpace(const ld::Section* sect)
2563 {
2564 switch ( sect->type() ) {
2565 case ld::Section::typeZeroFill:
2566 case ld::Section::typeTLVZeroFill:
2567 return _options.optimizeZeroFill();
2568 case ld::Section::typePageZero:
2569 case ld::Section::typeStack:
2570 case ld::Section::typeAbsoluteSymbols:
2571 case ld::Section::typeTentativeDefs:
2572 return true;
2573 default:
2574 break;
2575 }
2576 return false;
2577 }
2578
2579 bool OutputFile::hasZeroForFileOffset(const ld::Section* sect)
2580 {
2581 switch ( sect->type() ) {
2582 case ld::Section::typeZeroFill:
2583 case ld::Section::typeTLVZeroFill:
2584 return _options.optimizeZeroFill();
2585 case ld::Section::typePageZero:
2586 case ld::Section::typeStack:
2587 case ld::Section::typeTentativeDefs:
2588 return true;
2589 default:
2590 break;
2591 }
2592 return false;
2593 }
2594
2595 void OutputFile::writeAtoms(ld::Internal& state, uint8_t* wholeBuffer)
2596 {
2597 // have each atom write itself
2598 uint64_t fileOffsetOfEndOfLastAtom = 0;
2599 uint64_t mhAddress = 0;
2600 bool lastAtomUsesNoOps = false;
2601 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
2602 ld::Internal::FinalSection* sect = *sit;
2603 if ( sect->type() == ld::Section::typeMachHeader )
2604 mhAddress = sect->address;
2605 if ( takesNoDiskSpace(sect) )
2606 continue;
2607 const bool sectionUsesNops = (sect->type() == ld::Section::typeCode);
2608 //fprintf(stderr, "file offset=0x%08llX, section %s\n", sect->fileOffset, sect->sectionName());
2609 std::vector<const ld::Atom*>& atoms = sect->atoms;
2610 bool lastAtomWasThumb = false;
2611 for (std::vector<const ld::Atom*>::iterator ait = atoms.begin(); ait != atoms.end(); ++ait) {
2612 const ld::Atom* atom = *ait;
2613 if ( atom->definition() == ld::Atom::definitionProxy )
2614 continue;
2615 try {
2616 uint64_t fileOffset = atom->finalAddress() - sect->address + sect->fileOffset;
2617 // check for alignment padding between atoms
2618 if ( (fileOffset != fileOffsetOfEndOfLastAtom) && lastAtomUsesNoOps ) {
2619 this->copyNoOps(&wholeBuffer[fileOffsetOfEndOfLastAtom], &wholeBuffer[fileOffset], lastAtomWasThumb);
2620 }
2621 // copy atom content
2622 atom->copyRawContent(&wholeBuffer[fileOffset]);
2623 // apply fix ups
2624 this->applyFixUps(state, mhAddress, atom, &wholeBuffer[fileOffset]);
2625 fileOffsetOfEndOfLastAtom = fileOffset+atom->size();
2626 lastAtomUsesNoOps = sectionUsesNops;
2627 lastAtomWasThumb = atom->isThumb();
2628 }
2629 catch (const char* msg) {
2630 if ( atom->file() != NULL )
2631 throwf("%s in '%s' from %s", msg, atom->name(), atom->safeFilePath());
2632 else
2633 throwf("%s in '%s'", msg, atom->name());
2634 }
2635 }
2636 }
2637
2638 if ( _options.verboseOptimizationHints() ) {
2639 //fprintf(stderr, "ADRP optimized away: %d\n", sAdrpNA);
2640 //fprintf(stderr, "ADRPs changed to NOPs: %d\n", sAdrpNoped);
2641 //fprintf(stderr, "ADRPs unchanged: %d\n", sAdrpNotNoped);
2642 }
2643 }
2644
2645 void OutputFile::computeContentUUID(ld::Internal& state, uint8_t* wholeBuffer)
2646 {
2647 const bool log = false;
2648 if ( (_options.outputKind() != Options::kObjectFile) || state.someObjectFileHasDwarf ) {
2649 uint8_t digest[CC_MD5_DIGEST_LENGTH];
2650 std::vector<std::pair<uint64_t, uint64_t>> excludeRegions;
2651 uint64_t bitcodeCmdOffset;
2652 uint64_t bitcodeCmdEnd;
2653 uint64_t bitcodeSectOffset;
2654 uint64_t bitcodePaddingEnd;
2655 if ( _headersAndLoadCommandAtom->bitcodeBundleCommand(bitcodeCmdOffset, bitcodeCmdEnd,
2656 bitcodeSectOffset, bitcodePaddingEnd) ) {
2657 // Exclude embedded bitcode bundle section which contains timestamps in XAR header
2658 // Note the timestamp is in the compressed XML header which means it might change the size of
2659 // bitcode section. The load command which include the size of the section and the padding after
2660 // the bitcode section should also be excluded in the UUID computation.
2661 // Bitcode section should appears before LINKEDIT
2662 // Exclude section cmd
2663 if ( log ) fprintf(stderr, "bundle cmd start=0x%08llX, bundle cmd end=0x%08llX\n",
2664 bitcodeCmdOffset, bitcodeCmdEnd);
2665 excludeRegions.emplace_back(std::pair<uint64_t, uint64_t>(bitcodeCmdOffset, bitcodeCmdEnd));
2666 // Exclude section content
2667 if ( log ) fprintf(stderr, "bundle start=0x%08llX, bundle end=0x%08llX\n",
2668 bitcodeSectOffset, bitcodePaddingEnd);
2669 excludeRegions.emplace_back(std::pair<uint64_t, uint64_t>(bitcodeSectOffset, bitcodePaddingEnd));
2670 }
2671 uint32_t stabsStringsOffsetStart;
2672 uint32_t tabsStringsOffsetEnd;
2673 uint32_t stabsOffsetStart;
2674 uint32_t stabsOffsetEnd;
2675 if ( _symbolTableAtom->hasStabs(stabsStringsOffsetStart, tabsStringsOffsetEnd, stabsOffsetStart, stabsOffsetEnd) ) {
2676 // find two areas of file that are stabs info and should not contribute to checksum
2677 uint64_t stringPoolFileOffset = 0;
2678 uint64_t symbolTableFileOffset = 0;
2679 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
2680 ld::Internal::FinalSection* sect = *sit;
2681 if ( sect->type() == ld::Section::typeLinkEdit ) {
2682 if ( strcmp(sect->sectionName(), "__string_pool") == 0 )
2683 stringPoolFileOffset = sect->fileOffset;
2684 else if ( strcmp(sect->sectionName(), "__symbol_table") == 0 )
2685 symbolTableFileOffset = sect->fileOffset;
2686 }
2687 }
2688 uint64_t firstStabNlistFileOffset = symbolTableFileOffset + stabsOffsetStart;
2689 uint64_t lastStabNlistFileOffset = symbolTableFileOffset + stabsOffsetEnd;
2690 uint64_t firstStabStringFileOffset = stringPoolFileOffset + stabsStringsOffsetStart;
2691 uint64_t lastStabStringFileOffset = stringPoolFileOffset + tabsStringsOffsetEnd;
2692 if ( log ) fprintf(stderr, "stabNlist offset=0x%08llX, size=0x%08llX\n", firstStabNlistFileOffset, lastStabNlistFileOffset-firstStabNlistFileOffset);
2693 if ( log ) fprintf(stderr, "stabString offset=0x%08llX, size=0x%08llX\n", firstStabStringFileOffset, lastStabStringFileOffset-firstStabStringFileOffset);
2694 assert(firstStabNlistFileOffset <= firstStabStringFileOffset);
2695 excludeRegions.emplace_back(std::pair<uint64_t, uint64_t>(firstStabNlistFileOffset, lastStabNlistFileOffset));
2696 excludeRegions.emplace_back(std::pair<uint64_t, uint64_t>(firstStabStringFileOffset, lastStabStringFileOffset));
2697 // exclude LINKEDIT LC_SEGMENT (size field depends on stabs size)
2698 uint64_t linkeditSegCmdOffset;
2699 uint64_t linkeditSegCmdSize;
2700 _headersAndLoadCommandAtom->linkeditCmdInfo(linkeditSegCmdOffset, linkeditSegCmdSize);
2701 excludeRegions.emplace_back(std::pair<uint64_t, uint64_t>(linkeditSegCmdOffset, linkeditSegCmdOffset+linkeditSegCmdSize));
2702 if ( log ) fprintf(stderr, "linkedit SegCmdOffset=0x%08llX, size=0x%08llX\n", linkeditSegCmdOffset, linkeditSegCmdSize);
2703 uint64_t symbolTableCmdOffset;
2704 uint64_t symbolTableCmdSize;
2705 _headersAndLoadCommandAtom->symbolTableCmdInfo(symbolTableCmdOffset, symbolTableCmdSize);
2706 excludeRegions.emplace_back(std::pair<uint64_t, uint64_t>(symbolTableCmdOffset, symbolTableCmdOffset+symbolTableCmdSize));
2707 if ( log ) fprintf(stderr, "linkedit SegCmdOffset=0x%08llX, size=0x%08llX\n", symbolTableCmdOffset, symbolTableCmdSize);
2708 }
2709 if ( !excludeRegions.empty() ) {
2710 CC_MD5_CTX md5state;
2711 CC_MD5_Init(&md5state);
2712 // rdar://problem/19487042 include the output leaf file name in the hash
2713 const char* lastSlash = strrchr(_options.outputFilePath(), '/');
2714 if ( lastSlash != NULL ) {
2715 CC_MD5_Update(&md5state, lastSlash, strlen(lastSlash));
2716 }
2717 std::sort(excludeRegions.begin(), excludeRegions.end());
2718 uint64_t checksumStart = 0;
2719 for ( auto& region : excludeRegions ) {
2720 uint64_t regionStart = region.first;
2721 uint64_t regionEnd = region.second;
2722 assert(checksumStart <= regionStart && regionStart <= regionEnd && "Region overlapped");
2723 if ( log ) fprintf(stderr, "checksum 0x%08llX -> 0x%08llX\n", checksumStart, regionStart);
2724 CC_MD5_Update(&md5state, &wholeBuffer[checksumStart], regionStart - checksumStart);
2725 checksumStart = regionEnd;
2726 }
2727 if ( log ) fprintf(stderr, "checksum 0x%08llX -> 0x%08llX\n", checksumStart, _fileSize);
2728 CC_MD5_Update(&md5state, &wholeBuffer[checksumStart], _fileSize-checksumStart);
2729 CC_MD5_Final(digest, &md5state);
2730 if ( log ) fprintf(stderr, "uuid=%02X, %02X, %02X, %02X, %02X, %02X, %02X, %02X\n", digest[0], digest[1], digest[2],
2731 digest[3], digest[4], digest[5], digest[6], digest[7]);
2732 }
2733 else {
2734 CC_MD5(wholeBuffer, _fileSize, digest);
2735 }
2736 // <rdar://problem/6723729> LC_UUID uuids should conform to RFC 4122 UUID version 4 & UUID version 5 formats
2737 digest[6] = ( digest[6] & 0x0F ) | ( 3 << 4 );
2738 digest[8] = ( digest[8] & 0x3F ) | 0x80;
2739 // update buffer with new UUID
2740 _headersAndLoadCommandAtom->setUUID(digest);
2741 _headersAndLoadCommandAtom->recopyUUIDCommand();
2742 }
2743 }
2744
2745 static int sDescriptorOfPathToRemove = -1;
2746 static void removePathAndExit(int sig)
2747 {
2748 if ( sDescriptorOfPathToRemove != -1 ) {
2749 char path[MAXPATHLEN];
2750 if ( ::fcntl(sDescriptorOfPathToRemove, F_GETPATH, path) == 0 )
2751 ::unlink(path);
2752 }
2753 fprintf(stderr, "ld: interrupted\n");
2754 exit(1);
2755 }
2756
2757 void OutputFile::writeOutputFile(ld::Internal& state)
2758 {
2759 // for UNIX conformance, error if file exists and is not writable
2760 if ( (access(_options.outputFilePath(), F_OK) == 0) && (access(_options.outputFilePath(), W_OK) == -1) )
2761 throwf("can't write output file: %s", _options.outputFilePath());
2762
2763 mode_t permissions = 0777;
2764 if ( _options.outputKind() == Options::kObjectFile )
2765 permissions = 0666;
2766 mode_t umask = ::umask(0);
2767 ::umask(umask); // put back the original umask
2768 permissions &= ~umask;
2769 // Calling unlink first assures the file is gone so that open creates it with correct permissions
2770 // It also handles the case where __options.outputFilePath() file is not writable but its directory is
2771 // And it means we don't have to truncate the file when done writing (in case new is smaller than old)
2772 // Lastly, only delete existing file if it is a normal file (e.g. not /dev/null).
2773 struct stat stat_buf;
2774 bool outputIsRegularFile = false;
2775 bool outputIsMappableFile = false;
2776 if ( stat(_options.outputFilePath(), &stat_buf) != -1 ) {
2777 if (stat_buf.st_mode & S_IFREG) {
2778 outputIsRegularFile = true;
2779 // <rdar://problem/12264302> Don't use mmap on non-hfs volumes
2780 struct statfs fsInfo;
2781 if ( statfs(_options.outputFilePath(), &fsInfo) != -1 ) {
2782 if ( (strcmp(fsInfo.f_fstypename, "hfs") == 0) || (strcmp(fsInfo.f_fstypename, "apfs") == 0) ) {
2783 (void)unlink(_options.outputFilePath());
2784 outputIsMappableFile = true;
2785 }
2786 }
2787 else {
2788 outputIsMappableFile = false;
2789 }
2790 }
2791 else {
2792 outputIsRegularFile = false;
2793 }
2794 }
2795 else {
2796 // special files (pipes, devices, etc) must already exist
2797 outputIsRegularFile = true;
2798 // output file does not exist yet
2799 char dirPath[PATH_MAX];
2800 strcpy(dirPath, _options.outputFilePath());
2801 char* end = strrchr(dirPath, '/');
2802 if ( end != NULL ) {
2803 end[1] = '\0';
2804 struct statfs fsInfo;
2805 if ( statfs(dirPath, &fsInfo) != -1 ) {
2806 if ( (strcmp(fsInfo.f_fstypename, "hfs") == 0) || (strcmp(fsInfo.f_fstypename, "apfs") == 0) ) {
2807 outputIsMappableFile = true;
2808 }
2809 }
2810 }
2811 }
2812
2813 //fprintf(stderr, "outputIsMappableFile=%d, outputIsRegularFile=%d, path=%s\n", outputIsMappableFile, outputIsRegularFile, _options.outputFilePath());
2814
2815 int fd;
2816 // Construct a temporary path of the form {outputFilePath}.ld_XXXXXX
2817 const char filenameTemplate[] = ".ld_XXXXXX";
2818 char tmpOutput[PATH_MAX];
2819 uint8_t *wholeBuffer;
2820 if ( outputIsRegularFile && outputIsMappableFile ) {
2821 // <rdar://problem/20959031> ld64 should clean up temporary files on SIGINT
2822 ::signal(SIGINT, removePathAndExit);
2823
2824 strcpy(tmpOutput, _options.outputFilePath());
2825 // If the path is too long to add a suffix for a temporary name then
2826 // just fall back to using the output path.
2827 if (strlen(tmpOutput)+strlen(filenameTemplate) < PATH_MAX) {
2828 strcat(tmpOutput, filenameTemplate);
2829 fd = mkstemp(tmpOutput);
2830 sDescriptorOfPathToRemove = fd;
2831 }
2832 else {
2833 fd = open(tmpOutput, O_RDWR|O_CREAT, permissions);
2834 }
2835 if ( fd == -1 )
2836 throwf("can't open output file for writing '%s', errno=%d", tmpOutput, errno);
2837 if ( ftruncate(fd, _fileSize) == -1 ) {
2838 int err = errno;
2839 unlink(tmpOutput);
2840 if ( err == ENOSPC )
2841 throwf("not enough disk space for writing '%s'", _options.outputFilePath());
2842 else
2843 throwf("can't grow file for writing '%s', errno=%d", _options.outputFilePath(), err);
2844 }
2845
2846 wholeBuffer = (uint8_t *)mmap(NULL, _fileSize, PROT_WRITE|PROT_READ, MAP_SHARED, fd, 0);
2847 if ( wholeBuffer == MAP_FAILED )
2848 throwf("can't create buffer of %llu bytes for output", _fileSize);
2849 }
2850 else {
2851 if ( outputIsRegularFile )
2852 fd = open(_options.outputFilePath(), O_RDWR|O_CREAT, permissions);
2853 else
2854 fd = open(_options.outputFilePath(), O_WRONLY);
2855 if ( fd == -1 )
2856 throwf("can't open output file for writing: %s, errno=%d", _options.outputFilePath(), errno);
2857 // try to allocate buffer for entire output file content
2858 wholeBuffer = (uint8_t*)calloc(_fileSize, 1);
2859 if ( wholeBuffer == NULL )
2860 throwf("can't create buffer of %llu bytes for output", _fileSize);
2861 }
2862
2863 if ( _options.UUIDMode() == Options::kUUIDRandom ) {
2864 uint8_t bits[16];
2865 ::uuid_generate_random(bits);
2866 _headersAndLoadCommandAtom->setUUID(bits);
2867 }
2868
2869 writeAtoms(state, wholeBuffer);
2870
2871 // compute UUID
2872 if ( _options.UUIDMode() == Options::kUUIDContent )
2873 computeContentUUID(state, wholeBuffer);
2874
2875 if ( outputIsRegularFile && outputIsMappableFile ) {
2876 if ( ::chmod(tmpOutput, permissions) == -1 ) {
2877 unlink(tmpOutput);
2878 throwf("can't set permissions on output file: %s, errno=%d", tmpOutput, errno);
2879 }
2880 if ( ::rename(tmpOutput, _options.outputFilePath()) == -1 && strcmp(tmpOutput, _options.outputFilePath()) != 0) {
2881 unlink(tmpOutput);
2882 throwf("can't move output file in place, errno=%d", errno);
2883 }
2884 }
2885 else {
2886 if ( ::write(fd, wholeBuffer, _fileSize) == -1 ) {
2887 throwf("can't write to output file: %s, errno=%d", _options.outputFilePath(), errno);
2888 }
2889 sDescriptorOfPathToRemove = -1;
2890 ::close(fd);
2891 // <rdar://problem/13118223> NFS: iOS incremental builds in Xcode 4.6 fail with codesign error
2892 // NFS seems to pad the end of the file sometimes. Calling trunc seems to correct it...
2893 ::truncate(_options.outputFilePath(), _fileSize);
2894 }
2895
2896 // Rename symbol map file if needed
2897 if ( _options.renameReverseSymbolMap() ) {
2898 assert(_options.hideSymbols() && _options.reverseSymbolMapPath() != NULL && "Must hide symbol and specify a path");
2899 uuid_string_t UUIDString;
2900 const uint8_t* rawUUID = _headersAndLoadCommandAtom->getUUID();
2901 uuid_unparse_upper(rawUUID, UUIDString);
2902 char outputMapPath[PATH_MAX];
2903 sprintf(outputMapPath, "%s/%s.bcsymbolmap", _options.reverseSymbolMapPath(), UUIDString);
2904 if ( ::rename(_options.reverseMapTempPath().c_str(), outputMapPath) != 0 )
2905 throwf("could not create bcsymbolmap file: %s", outputMapPath);
2906 }
2907 }
2908
2909 struct AtomByNameSorter
2910 {
2911 bool operator()(const ld::Atom* left, const ld::Atom* right) const
2912 {
2913 return (strcmp(left->name(), right->name()) < 0);
2914 }
2915
2916 bool operator()(const ld::Atom* left, const char* right) const
2917 {
2918 return (strcmp(left->name(), right) < 0);
2919 }
2920
2921 bool operator()(const char* left, const ld::Atom* right) const
2922 {
2923 return (strcmp(left, right->name()) < 0);
2924 }
2925 };
2926
2927
2928 class NotInSet
2929 {
2930 public:
2931 NotInSet(const std::set<const ld::Atom*>& theSet) : _set(theSet) {}
2932
2933 bool operator()(const ld::Atom* atom) const {
2934 return ( _set.count(atom) == 0 );
2935 }
2936 private:
2937 const std::set<const ld::Atom*>& _set;
2938 };
2939
2940
2941 void OutputFile::buildSymbolTable(ld::Internal& state)
2942 {
2943 unsigned int machoSectionIndex = 0;
2944 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
2945 ld::Internal::FinalSection* sect = *sit;
2946 bool setMachoSectionIndex = !sect->isSectionHidden() && (sect->type() != ld::Section::typeTentativeDefs);
2947 if ( setMachoSectionIndex )
2948 ++machoSectionIndex;
2949 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
2950 const ld::Atom* atom = *ait;
2951 if ( setMachoSectionIndex )
2952 (const_cast<ld::Atom*>(atom))->setMachoSection(machoSectionIndex);
2953 else if ( sect->type() == ld::Section::typeMachHeader )
2954 (const_cast<ld::Atom*>(atom))->setMachoSection(1); // __mh_execute_header is not in any section by needs n_sect==1
2955 else if ( sect->type() == ld::Section::typeLastSection )
2956 (const_cast<ld::Atom*>(atom))->setMachoSection(machoSectionIndex); // use section index of previous section
2957 else if ( sect->type() == ld::Section::typeFirstSection )
2958 (const_cast<ld::Atom*>(atom))->setMachoSection(machoSectionIndex+1); // use section index of next section
2959
2960 // in -r mode, clarify symbolTableNotInFinalLinkedImages
2961 if ( _options.outputKind() == Options::kObjectFile ) {
2962 if ( (_options.architecture() == CPU_TYPE_X86_64)
2963 || (_options.architecture() == CPU_TYPE_ARM64)
2964 ) {
2965 // x86_64 .o files need labels on anonymous literal strings
2966 if ( (sect->type() == ld::Section::typeCString) && (atom->combine() == ld::Atom::combineByNameAndContent) ) {
2967 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableIn);
2968 _localAtoms.push_back(atom);
2969 continue;
2970 }
2971 }
2972 if ( sect->type() == ld::Section::typeCFI ) {
2973 if ( _options.removeEHLabels() )
2974 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableNotIn);
2975 else
2976 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableIn);
2977 }
2978 else if ( sect->type() == ld::Section::typeTempAlias ) {
2979 assert(_options.outputKind() == Options::kObjectFile);
2980 _importedAtoms.push_back(atom);
2981 continue;
2982 }
2983 if ( atom->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages )
2984 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableIn);
2985 }
2986
2987 // TEMP work around until <rdar://problem/7702923> goes in
2988 if ( (atom->symbolTableInclusion() == ld::Atom::symbolTableInAndNeverStrip)
2989 && (atom->scope() == ld::Atom::scopeLinkageUnit)
2990 && (_options.outputKind() == Options::kDynamicLibrary) ) {
2991 (const_cast<ld::Atom*>(atom))->setScope(ld::Atom::scopeGlobal);
2992 }
2993
2994 // <rdar://problem/6783167> support auto hidden weak symbols: .weak_def_can_be_hidden
2995 if ( atom->autoHide() && (_options.outputKind() != Options::kObjectFile) ) {
2996 // adding auto-hide symbol to .exp file should keep it global
2997 if ( !_options.hasExportMaskList() || !_options.shouldExport(atom->name()) )
2998 (const_cast<ld::Atom*>(atom))->setScope(ld::Atom::scopeLinkageUnit);
2999 }
3000
3001 // <rdar://problem/8626058> ld should consistently warn when resolvers are not exported
3002 if ( (atom->contentType() == ld::Atom::typeResolver) && (atom->scope() == ld::Atom::scopeLinkageUnit) )
3003 warning("resolver functions should be external, but '%s' is hidden", atom->name());
3004
3005 if ( sect->type() == ld::Section::typeImportProxies ) {
3006 if ( atom->combine() == ld::Atom::combineByName )
3007 this->usesWeakExternalSymbols = true;
3008 // alias proxy is a re-export with a name change, don't import changed name
3009 if ( ! atom->isAlias() )
3010 _importedAtoms.push_back(atom);
3011 // scope of proxies are usually linkage unit, so done
3012 // if scope is global, we need to re-export it too
3013 if ( atom->scope() == ld::Atom::scopeGlobal )
3014 _exportedAtoms.push_back(atom);
3015 continue;
3016 }
3017 if ( atom->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages ) {
3018 assert(_options.outputKind() != Options::kObjectFile);
3019 continue; // don't add to symbol table
3020 }
3021 if ( atom->symbolTableInclusion() == ld::Atom::symbolTableNotIn ) {
3022 continue; // don't add to symbol table
3023 }
3024 if ( (atom->symbolTableInclusion() == ld::Atom::symbolTableInWithRandomAutoStripLabel)
3025 && (_options.outputKind() != Options::kObjectFile) ) {
3026 continue; // don't add to symbol table
3027 }
3028
3029 if ( (atom->definition() == ld::Atom::definitionTentative) && (_options.outputKind() == Options::kObjectFile) ) {
3030 if ( _options.makeTentativeDefinitionsReal() ) {
3031 // -r -d turns tentative defintions into real def
3032 _exportedAtoms.push_back(atom);
3033 }
3034 else {
3035 // in mach-o object files tentative defintions are stored like undefined symbols
3036 _importedAtoms.push_back(atom);
3037 }
3038 continue;
3039 }
3040
3041 switch ( atom->scope() ) {
3042 case ld::Atom::scopeTranslationUnit:
3043 if ( _options.keepLocalSymbol(atom->name()) ) {
3044 _localAtoms.push_back(atom);
3045 }
3046 else {
3047 if ( _options.outputKind() == Options::kObjectFile ) {
3048 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableInWithRandomAutoStripLabel);
3049 _localAtoms.push_back(atom);
3050 }
3051 else
3052 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableNotIn);
3053 }
3054 break;
3055 case ld::Atom::scopeGlobal:
3056 _exportedAtoms.push_back(atom);
3057 break;
3058 case ld::Atom::scopeLinkageUnit:
3059 if ( _options.outputKind() == Options::kObjectFile ) {
3060 if ( _options.keepPrivateExterns() ) {
3061 _exportedAtoms.push_back(atom);
3062 }
3063 else if ( _options.keepLocalSymbol(atom->name()) ) {
3064 _localAtoms.push_back(atom);
3065 }
3066 else {
3067 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableInWithRandomAutoStripLabel);
3068 _localAtoms.push_back(atom);
3069 }
3070 }
3071 else {
3072 if ( _options.keepLocalSymbol(atom->name()) )
3073 _localAtoms.push_back(atom);
3074 // <rdar://problem/5804214> ld should never have a symbol in the non-lazy indirect symbol table with index 0
3075 // this works by making __mh_execute_header be a local symbol which takes symbol index 0
3076 else if ( (atom->symbolTableInclusion() == ld::Atom::symbolTableInAndNeverStrip) && !_options.makeCompressedDyldInfo() )
3077 _localAtoms.push_back(atom);
3078 else
3079 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableNotIn);
3080 }
3081 break;
3082 }
3083 }
3084 }
3085
3086 // <rdar://problem/6978069> ld adds undefined symbol from .exp file to binary
3087 if ( (_options.outputKind() == Options::kKextBundle) && _options.hasExportRestrictList() ) {
3088 // search for referenced undefines
3089 std::set<const ld::Atom*> referencedProxyAtoms;
3090 for (std::vector<ld::Internal::FinalSection*>::iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) {
3091 ld::Internal::FinalSection* sect = *sit;
3092 for (std::vector<const ld::Atom*>::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
3093 const ld::Atom* atom = *ait;
3094 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
3095 switch ( fit->binding ) {
3096 case ld::Fixup::bindingsIndirectlyBound:
3097 referencedProxyAtoms.insert(state.indirectBindingTable[fit->u.bindingIndex]);
3098 break;
3099 case ld::Fixup::bindingDirectlyBound:
3100 referencedProxyAtoms.insert(fit->u.target);
3101 break;
3102 default:
3103 break;
3104 }
3105 }
3106 }
3107 }
3108 // remove any unreferenced _importedAtoms
3109 _importedAtoms.erase(std::remove_if(_importedAtoms.begin(), _importedAtoms.end(), NotInSet(referencedProxyAtoms)), _importedAtoms.end());
3110 }
3111
3112 // sort by name
3113 std::sort(_exportedAtoms.begin(), _exportedAtoms.end(), AtomByNameSorter());
3114 std::sort(_importedAtoms.begin(), _importedAtoms.end(), AtomByNameSorter());
3115
3116 std::map<std::string, std::vector<std::string>> addedSymbols;
3117 std::map<std::string, std::vector<std::string>> hiddenSymbols;
3118 for (const auto *atom : _exportedAtoms) {
3119 // The exported symbols have already been sorted. Early exit the loop
3120 // once we see a symbol that is lexicographically past the special
3121 // linker symbol.
3122 if (atom->name()[0] > '$')
3123 break;
3124
3125 std::string name(atom->name());
3126 if (name.rfind("$ld$add$", 7) == 0) {
3127 auto pos = name.find_first_of('$', 10);
3128 if (pos == std::string::npos) {
3129 warning("bad special linker symbol '%s'", atom->name());
3130 continue;
3131 }
3132 auto &&symbolName = name.substr(pos+1);
3133 auto it = addedSymbols.emplace(symbolName, std::initializer_list<std::string>{name});
3134 if (!it.second)
3135 it.first->second.emplace_back(name);
3136 } else if (name.rfind("$ld$hide$", 8) == 0) {
3137 auto pos = name.find_first_of('$', 11);
3138 if (pos == std::string::npos) {
3139 warning("bad special linker symbol '%s'", atom->name());
3140 continue;
3141 }
3142 auto &&symbolName = name.substr(pos+1);
3143 auto it = hiddenSymbols.emplace(symbolName, std::initializer_list<std::string>{name});
3144 if (!it.second)
3145 it.first->second.emplace_back(name);
3146 }
3147 }
3148
3149 for (const auto &it : addedSymbols) {
3150 if (!std::binary_search(_exportedAtoms.begin(), _exportedAtoms.end(), it.first.c_str(), AtomByNameSorter()))
3151 continue;
3152 for (const auto &symbol : it.second)
3153 warning("linker symbol '%s' adds already existing symbol '%s'", symbol.c_str(), it.first.c_str());
3154 }
3155
3156 auto it = hiddenSymbols.begin();
3157 while (it != hiddenSymbols.end()) {
3158 if (std::binary_search(_exportedAtoms.begin(), _exportedAtoms.end(), it->first.c_str(), AtomByNameSorter()))
3159 it = hiddenSymbols.erase(it);
3160 else
3161 ++it;
3162 }
3163
3164 for (const auto &it : hiddenSymbols) {
3165 for (const auto &symbol : it.second)
3166 warning("linker symbol '%s' hides a non-existent symbol '%s'", symbol.c_str(), it.first.c_str());
3167 }
3168 }
3169
3170 void OutputFile::addPreloadLinkEdit(ld::Internal& state)
3171 {
3172 switch ( _options.architecture() ) {
3173 #if SUPPORT_ARCH_i386
3174 case CPU_TYPE_I386:
3175 if ( _hasLocalRelocations ) {
3176 _localRelocsAtom = new LocalRelocationsAtom<x86>(_options, state, *this);
3177 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3178 }
3179 if ( _hasExternalRelocations ) {
3180 _externalRelocsAtom = new ExternalRelocationsAtom<x86>(_options, state, *this);
3181 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3182 }
3183 if ( _hasSymbolTable ) {
3184 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<x86>(_options, state, *this);
3185 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3186 _symbolTableAtom = new SymbolTableAtom<x86>(_options, state, *this);
3187 symbolTableSection = state.addAtom(*_symbolTableAtom);
3188 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3189 stringPoolSection = state.addAtom(*_stringPoolAtom);
3190 }
3191 break;
3192 #endif
3193 #if SUPPORT_ARCH_x86_64
3194 case CPU_TYPE_X86_64:
3195 if ( _hasLocalRelocations ) {
3196 _localRelocsAtom = new LocalRelocationsAtom<x86_64>(_options, state, *this);
3197 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3198 }
3199 if ( _hasExternalRelocations ) {
3200 _externalRelocsAtom = new ExternalRelocationsAtom<x86_64>(_options, state, *this);
3201 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3202 }
3203 if ( _hasSymbolTable ) {
3204 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<x86_64>(_options, state, *this);
3205 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3206 _symbolTableAtom = new SymbolTableAtom<x86_64>(_options, state, *this);
3207 symbolTableSection = state.addAtom(*_symbolTableAtom);
3208 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3209 stringPoolSection = state.addAtom(*_stringPoolAtom);
3210 }
3211 break;
3212 #endif
3213 #if SUPPORT_ARCH_arm_any
3214 case CPU_TYPE_ARM:
3215 if ( _hasLocalRelocations ) {
3216 _localRelocsAtom = new LocalRelocationsAtom<arm>(_options, state, *this);
3217 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3218 }
3219 if ( _hasExternalRelocations ) {
3220 _externalRelocsAtom = new ExternalRelocationsAtom<arm>(_options, state, *this);
3221 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3222 }
3223 if ( _hasSymbolTable ) {
3224 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<arm>(_options, state, *this);
3225 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3226 _symbolTableAtom = new SymbolTableAtom<arm>(_options, state, *this);
3227 symbolTableSection = state.addAtom(*_symbolTableAtom);
3228 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3229 stringPoolSection = state.addAtom(*_stringPoolAtom);
3230 }
3231 break;
3232 #endif
3233 #if SUPPORT_ARCH_arm64
3234 case CPU_TYPE_ARM64:
3235 if ( _hasLocalRelocations ) {
3236 _localRelocsAtom = new LocalRelocationsAtom<arm64>(_options, state, *this);
3237 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3238 }
3239 if ( _hasExternalRelocations ) {
3240 _externalRelocsAtom = new ExternalRelocationsAtom<arm64>(_options, state, *this);
3241 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3242 }
3243 if ( _hasSymbolTable ) {
3244 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<arm64>(_options, state, *this);
3245 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3246 _symbolTableAtom = new SymbolTableAtom<arm64>(_options, state, *this);
3247 symbolTableSection = state.addAtom(*_symbolTableAtom);
3248 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3249 stringPoolSection = state.addAtom(*_stringPoolAtom);
3250 }
3251 break;
3252 #endif
3253 default:
3254 throw "-preload not supported";
3255 }
3256
3257 }
3258
3259
3260 void OutputFile::addLinkEdit(ld::Internal& state)
3261 {
3262 // for historical reasons, -preload orders LINKEDIT content differently
3263 if ( _options.outputKind() == Options::kPreload )
3264 return addPreloadLinkEdit(state);
3265
3266 switch ( _options.architecture() ) {
3267 #if SUPPORT_ARCH_i386
3268 case CPU_TYPE_I386:
3269 if ( _hasSectionRelocations ) {
3270 _sectionsRelocationsAtom = new SectionRelocationsAtom<x86>(_options, state, *this);
3271 sectionRelocationsSection = state.addAtom(*_sectionsRelocationsAtom);
3272 }
3273 if ( _hasDyldInfo ) {
3274 _rebasingInfoAtom = new RebaseInfoAtom<x86>(_options, state, *this);
3275 rebaseSection = state.addAtom(*_rebasingInfoAtom);
3276
3277 _bindingInfoAtom = new BindingInfoAtom<x86>(_options, state, *this);
3278 bindingSection = state.addAtom(*_bindingInfoAtom);
3279
3280 _weakBindingInfoAtom = new WeakBindingInfoAtom<x86>(_options, state, *this);
3281 weakBindingSection = state.addAtom(*_weakBindingInfoAtom);
3282
3283 _lazyBindingInfoAtom = new LazyBindingInfoAtom<x86>(_options, state, *this);
3284 lazyBindingSection = state.addAtom(*_lazyBindingInfoAtom);
3285
3286 _exportInfoAtom = new ExportInfoAtom<x86>(_options, state, *this);
3287 exportSection = state.addAtom(*_exportInfoAtom);
3288 }
3289 if ( _hasLocalRelocations ) {
3290 _localRelocsAtom = new LocalRelocationsAtom<x86>(_options, state, *this);
3291 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3292 }
3293 if ( _hasSplitSegInfo ) {
3294 if ( _options.sharedRegionEncodingV2() )
3295 _splitSegInfoAtom = new SplitSegInfoV2Atom<x86>(_options, state, *this);
3296 else
3297 _splitSegInfoAtom = new SplitSegInfoV1Atom<x86>(_options, state, *this);
3298 splitSegInfoSection = state.addAtom(*_splitSegInfoAtom);
3299 }
3300 if ( _hasFunctionStartsInfo ) {
3301 _functionStartsAtom = new FunctionStartsAtom<x86>(_options, state, *this);
3302 functionStartsSection = state.addAtom(*_functionStartsAtom);
3303 }
3304 if ( _hasDataInCodeInfo ) {
3305 _dataInCodeAtom = new DataInCodeAtom<x86>(_options, state, *this);
3306 dataInCodeSection = state.addAtom(*_dataInCodeAtom);
3307 }
3308 if ( _hasOptimizationHints ) {
3309 _optimizationHintsAtom = new OptimizationHintsAtom<x86>(_options, state, *this);
3310 optimizationHintsSection = state.addAtom(*_optimizationHintsAtom);
3311 }
3312 if ( _hasSymbolTable ) {
3313 _symbolTableAtom = new SymbolTableAtom<x86>(_options, state, *this);
3314 symbolTableSection = state.addAtom(*_symbolTableAtom);
3315 }
3316 if ( _hasExternalRelocations ) {
3317 _externalRelocsAtom = new ExternalRelocationsAtom<x86>(_options, state, *this);
3318 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3319 }
3320 if ( _hasSymbolTable ) {
3321 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<x86>(_options, state, *this);
3322 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3323 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3324 stringPoolSection = state.addAtom(*_stringPoolAtom);
3325 }
3326 break;
3327 #endif
3328 #if SUPPORT_ARCH_x86_64
3329 case CPU_TYPE_X86_64:
3330 if ( _hasSectionRelocations ) {
3331 _sectionsRelocationsAtom = new SectionRelocationsAtom<x86_64>(_options, state, *this);
3332 sectionRelocationsSection = state.addAtom(*_sectionsRelocationsAtom);
3333 }
3334 if ( _hasDyldInfo ) {
3335 _rebasingInfoAtom = new RebaseInfoAtom<x86_64>(_options, state, *this);
3336 rebaseSection = state.addAtom(*_rebasingInfoAtom);
3337
3338 _bindingInfoAtom = new BindingInfoAtom<x86_64>(_options, state, *this);
3339 bindingSection = state.addAtom(*_bindingInfoAtom);
3340
3341 _weakBindingInfoAtom = new WeakBindingInfoAtom<x86_64>(_options, state, *this);
3342 weakBindingSection = state.addAtom(*_weakBindingInfoAtom);
3343
3344 _lazyBindingInfoAtom = new LazyBindingInfoAtom<x86_64>(_options, state, *this);
3345 lazyBindingSection = state.addAtom(*_lazyBindingInfoAtom);
3346
3347 _exportInfoAtom = new ExportInfoAtom<x86_64>(_options, state, *this);
3348 exportSection = state.addAtom(*_exportInfoAtom);
3349 }
3350 if ( _hasLocalRelocations ) {
3351 _localRelocsAtom = new LocalRelocationsAtom<x86_64>(_options, state, *this);
3352 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3353 }
3354 if ( _hasSplitSegInfo ) {
3355 if ( _options.sharedRegionEncodingV2() )
3356 _splitSegInfoAtom = new SplitSegInfoV2Atom<x86_64>(_options, state, *this);
3357 else
3358 _splitSegInfoAtom = new SplitSegInfoV1Atom<x86_64>(_options, state, *this);
3359 splitSegInfoSection = state.addAtom(*_splitSegInfoAtom);
3360 }
3361 if ( _hasFunctionStartsInfo ) {
3362 _functionStartsAtom = new FunctionStartsAtom<x86_64>(_options, state, *this);
3363 functionStartsSection = state.addAtom(*_functionStartsAtom);
3364 }
3365 if ( _hasDataInCodeInfo ) {
3366 _dataInCodeAtom = new DataInCodeAtom<x86_64>(_options, state, *this);
3367 dataInCodeSection = state.addAtom(*_dataInCodeAtom);
3368 }
3369 if ( _hasOptimizationHints ) {
3370 _optimizationHintsAtom = new OptimizationHintsAtom<x86_64>(_options, state, *this);
3371 optimizationHintsSection = state.addAtom(*_optimizationHintsAtom);
3372 }
3373 if ( _hasSymbolTable ) {
3374 _symbolTableAtom = new SymbolTableAtom<x86_64>(_options, state, *this);
3375 symbolTableSection = state.addAtom(*_symbolTableAtom);
3376 }
3377 if ( _hasExternalRelocations ) {
3378 _externalRelocsAtom = new ExternalRelocationsAtom<x86_64>(_options, state, *this);
3379 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3380 }
3381 if ( _hasSymbolTable ) {
3382 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<x86_64>(_options, state, *this);
3383 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3384 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 8);
3385 stringPoolSection = state.addAtom(*_stringPoolAtom);
3386 }
3387 break;
3388 #endif
3389 #if SUPPORT_ARCH_arm_any
3390 case CPU_TYPE_ARM:
3391 if ( _hasSectionRelocations ) {
3392 _sectionsRelocationsAtom = new SectionRelocationsAtom<arm>(_options, state, *this);
3393 sectionRelocationsSection = state.addAtom(*_sectionsRelocationsAtom);
3394 }
3395 if ( _hasDyldInfo ) {
3396 _rebasingInfoAtom = new RebaseInfoAtom<arm>(_options, state, *this);
3397 rebaseSection = state.addAtom(*_rebasingInfoAtom);
3398
3399 _bindingInfoAtom = new BindingInfoAtom<arm>(_options, state, *this);
3400 bindingSection = state.addAtom(*_bindingInfoAtom);
3401
3402 _weakBindingInfoAtom = new WeakBindingInfoAtom<arm>(_options, state, *this);
3403 weakBindingSection = state.addAtom(*_weakBindingInfoAtom);
3404
3405 _lazyBindingInfoAtom = new LazyBindingInfoAtom<arm>(_options, state, *this);
3406 lazyBindingSection = state.addAtom(*_lazyBindingInfoAtom);
3407
3408 _exportInfoAtom = new ExportInfoAtom<arm>(_options, state, *this);
3409 exportSection = state.addAtom(*_exportInfoAtom);
3410 }
3411 if ( _hasLocalRelocations ) {
3412 _localRelocsAtom = new LocalRelocationsAtom<arm>(_options, state, *this);
3413 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3414 }
3415 if ( _hasSplitSegInfo ) {
3416 if ( _options.sharedRegionEncodingV2() )
3417 _splitSegInfoAtom = new SplitSegInfoV2Atom<arm>(_options, state, *this);
3418 else
3419 _splitSegInfoAtom = new SplitSegInfoV1Atom<arm>(_options, state, *this);
3420 splitSegInfoSection = state.addAtom(*_splitSegInfoAtom);
3421 }
3422 if ( _hasFunctionStartsInfo ) {
3423 _functionStartsAtom = new FunctionStartsAtom<arm>(_options, state, *this);
3424 functionStartsSection = state.addAtom(*_functionStartsAtom);
3425 }
3426 if ( _hasDataInCodeInfo ) {
3427 _dataInCodeAtom = new DataInCodeAtom<arm>(_options, state, *this);
3428 dataInCodeSection = state.addAtom(*_dataInCodeAtom);
3429 }
3430 if ( _hasOptimizationHints ) {
3431 _optimizationHintsAtom = new OptimizationHintsAtom<arm>(_options, state, *this);
3432 optimizationHintsSection = state.addAtom(*_optimizationHintsAtom);
3433 }
3434 if ( _hasSymbolTable ) {
3435 _symbolTableAtom = new SymbolTableAtom<arm>(_options, state, *this);
3436 symbolTableSection = state.addAtom(*_symbolTableAtom);
3437 }
3438 if ( _hasExternalRelocations ) {
3439 _externalRelocsAtom = new ExternalRelocationsAtom<arm>(_options, state, *this);
3440 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3441 }
3442 if ( _hasSymbolTable ) {
3443 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<arm>(_options, state, *this);
3444 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3445 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3446 stringPoolSection = state.addAtom(*_stringPoolAtom);
3447 }
3448 break;
3449 #endif
3450 #if SUPPORT_ARCH_arm64
3451 case CPU_TYPE_ARM64:
3452 if ( _hasSectionRelocations ) {
3453 _sectionsRelocationsAtom = new SectionRelocationsAtom<arm64>(_options, state, *this);
3454 sectionRelocationsSection = state.addAtom(*_sectionsRelocationsAtom);
3455 }
3456 if ( _hasDyldInfo ) {
3457 _rebasingInfoAtom = new RebaseInfoAtom<arm64>(_options, state, *this);
3458 rebaseSection = state.addAtom(*_rebasingInfoAtom);
3459
3460 _bindingInfoAtom = new BindingInfoAtom<arm64>(_options, state, *this);
3461 bindingSection = state.addAtom(*_bindingInfoAtom);
3462
3463 _weakBindingInfoAtom = new WeakBindingInfoAtom<arm64>(_options, state, *this);
3464 weakBindingSection = state.addAtom(*_weakBindingInfoAtom);
3465
3466 _lazyBindingInfoAtom = new LazyBindingInfoAtom<arm64>(_options, state, *this);
3467 lazyBindingSection = state.addAtom(*_lazyBindingInfoAtom);
3468
3469 _exportInfoAtom = new ExportInfoAtom<arm64>(_options, state, *this);
3470 exportSection = state.addAtom(*_exportInfoAtom);
3471 }
3472 if ( _hasLocalRelocations ) {
3473 _localRelocsAtom = new LocalRelocationsAtom<arm64>(_options, state, *this);
3474 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3475 }
3476 if ( _hasSplitSegInfo ) {
3477 if ( _options.sharedRegionEncodingV2() )
3478 _splitSegInfoAtom = new SplitSegInfoV2Atom<arm64>(_options, state, *this);
3479 else
3480 _splitSegInfoAtom = new SplitSegInfoV1Atom<arm64>(_options, state, *this);
3481 splitSegInfoSection = state.addAtom(*_splitSegInfoAtom);
3482 }
3483 if ( _hasFunctionStartsInfo ) {
3484 _functionStartsAtom = new FunctionStartsAtom<arm64>(_options, state, *this);
3485 functionStartsSection = state.addAtom(*_functionStartsAtom);
3486 }
3487 if ( _hasDataInCodeInfo ) {
3488 _dataInCodeAtom = new DataInCodeAtom<arm64>(_options, state, *this);
3489 dataInCodeSection = state.addAtom(*_dataInCodeAtom);
3490 }
3491 if ( _hasOptimizationHints ) {
3492 _optimizationHintsAtom = new OptimizationHintsAtom<arm64>(_options, state, *this);
3493 optimizationHintsSection = state.addAtom(*_optimizationHintsAtom);
3494 }
3495 if ( _hasSymbolTable ) {
3496 _symbolTableAtom = new SymbolTableAtom<arm64>(_options, state, *this);
3497 symbolTableSection = state.addAtom(*_symbolTableAtom);
3498 }
3499 if ( _hasExternalRelocations ) {
3500 _externalRelocsAtom = new ExternalRelocationsAtom<arm64>(_options, state, *this);
3501 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3502 }
3503 if ( _hasSymbolTable ) {
3504 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<arm64>(_options, state, *this);
3505 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3506 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3507 stringPoolSection = state.addAtom(*_stringPoolAtom);
3508 }
3509 break;
3510 #endif
3511 default:
3512 throw "unknown architecture";
3513 }
3514 }
3515
3516 void OutputFile::addLoadCommands(ld::Internal& state)
3517 {
3518 switch ( _options.architecture() ) {
3519 #if SUPPORT_ARCH_x86_64
3520 case CPU_TYPE_X86_64:
3521 _headersAndLoadCommandAtom = new HeaderAndLoadCommandsAtom<x86_64>(_options, state, *this);
3522 headerAndLoadCommandsSection = state.addAtom(*_headersAndLoadCommandAtom);
3523 break;
3524 #endif
3525 #if SUPPORT_ARCH_arm_any
3526 case CPU_TYPE_ARM:
3527 _headersAndLoadCommandAtom = new HeaderAndLoadCommandsAtom<arm>(_options, state, *this);
3528 headerAndLoadCommandsSection = state.addAtom(*_headersAndLoadCommandAtom);
3529 break;
3530 #endif
3531 #if SUPPORT_ARCH_arm64
3532 case CPU_TYPE_ARM64:
3533 _headersAndLoadCommandAtom = new HeaderAndLoadCommandsAtom<arm64>(_options, state, *this);
3534 headerAndLoadCommandsSection = state.addAtom(*_headersAndLoadCommandAtom);
3535 break;
3536 #endif
3537 #if SUPPORT_ARCH_i386
3538 case CPU_TYPE_I386:
3539 _headersAndLoadCommandAtom = new HeaderAndLoadCommandsAtom<x86>(_options, state, *this);
3540 headerAndLoadCommandsSection = state.addAtom(*_headersAndLoadCommandAtom);
3541 break;
3542 #endif
3543 default:
3544 throw "unknown architecture";
3545 }
3546 }
3547
3548 uint32_t OutputFile::dylibCount()
3549 {
3550 return _dylibsToLoad.size();
3551 }
3552
3553 const ld::dylib::File* OutputFile::dylibByOrdinal(unsigned int ordinal)
3554 {
3555 assert( ordinal > 0 );
3556 assert( ordinal <= _dylibsToLoad.size() );
3557 return _dylibsToLoad[ordinal-1];
3558 }
3559
3560 bool OutputFile::hasOrdinalForInstallPath(const char* path, int* ordinal)
3561 {
3562 for (std::map<const ld::dylib::File*, int>::const_iterator it = _dylibToOrdinal.begin(); it != _dylibToOrdinal.end(); ++it) {
3563 const char* installPath = it->first->installPath();
3564 if ( (installPath != NULL) && (strcmp(path, installPath) == 0) ) {
3565 *ordinal = it->second;
3566 return true;
3567 }
3568 }
3569 return false;
3570 }
3571
3572 uint32_t OutputFile::dylibToOrdinal(const ld::dylib::File* dylib)
3573 {
3574 return _dylibToOrdinal[dylib];
3575 }
3576
3577
3578 void OutputFile::buildDylibOrdinalMapping(ld::Internal& state)
3579 {
3580 // count non-public re-exported dylibs
3581 unsigned int nonPublicReExportCount = 0;
3582 for (std::vector<ld::dylib::File*>::iterator it = state.dylibs.begin(); it != state.dylibs.end(); ++it) {
3583 ld::dylib::File* aDylib = *it;
3584 if ( aDylib->willBeReExported() && ! aDylib->hasPublicInstallName() )
3585 ++nonPublicReExportCount;
3586 }
3587
3588 // look at each dylib supplied in state
3589 bool hasReExports = false;
3590 bool haveLazyDylibs = false;
3591 for (std::vector<ld::dylib::File*>::iterator it = state.dylibs.begin(); it != state.dylibs.end(); ++it) {
3592 ld::dylib::File* aDylib = *it;
3593 int ordinal;
3594 if ( aDylib == state.bundleLoader ) {
3595 _dylibToOrdinal[aDylib] = BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE;
3596 }
3597 else if ( this->hasOrdinalForInstallPath(aDylib->installPath(), &ordinal) ) {
3598 // already have a dylib with that install path, map all uses to that ordinal
3599 _dylibToOrdinal[aDylib] = ordinal;
3600 }
3601 else if ( aDylib->willBeLazyLoadedDylib() ) {
3602 // all lazy dylib need to be at end of ordinals
3603 haveLazyDylibs = true;
3604 }
3605 else if ( aDylib->willBeReExported() && ! aDylib->hasPublicInstallName() && (nonPublicReExportCount >= 2) ) {
3606 _dylibsToLoad.push_back(aDylib);
3607 _dylibToOrdinal[aDylib] = BIND_SPECIAL_DYLIB_SELF;
3608 }
3609 else {
3610 // first time this install path seen, create new ordinal
3611 _dylibsToLoad.push_back(aDylib);
3612 _dylibToOrdinal[aDylib] = _dylibsToLoad.size();
3613 }
3614 if ( aDylib->explicitlyLinked() && aDylib->willBeReExported() )
3615 hasReExports = true;
3616 }
3617 if ( haveLazyDylibs ) {
3618 // second pass to determine ordinals for lazy loaded dylibs
3619 for (std::vector<ld::dylib::File*>::iterator it = state.dylibs.begin(); it != state.dylibs.end(); ++it) {
3620 ld::dylib::File* aDylib = *it;
3621 if ( aDylib->willBeLazyLoadedDylib() ) {
3622 int ordinal;
3623 if ( this->hasOrdinalForInstallPath(aDylib->installPath(), &ordinal) ) {
3624 // already have a dylib with that install path, map all uses to that ordinal
3625 _dylibToOrdinal[aDylib] = ordinal;
3626 }
3627 else {
3628 // first time this install path seen, create new ordinal
3629 _dylibsToLoad.push_back(aDylib);
3630 _dylibToOrdinal[aDylib] = _dylibsToLoad.size();
3631 }
3632 }
3633 }
3634 }
3635 _noReExportedDylibs = !hasReExports;
3636 //fprintf(stderr, "dylibs:\n");
3637 //for (std::map<const ld::dylib::File*, int>::const_iterator it = _dylibToOrdinal.begin(); it != _dylibToOrdinal.end(); ++it) {
3638 // fprintf(stderr, " %p ord=%u, install_name=%s\n",it->first, it->second, it->first->installPath());
3639 //}
3640 }
3641
3642 uint32_t OutputFile::lazyBindingInfoOffsetForLazyPointerAddress(uint64_t lpAddress)
3643 {
3644 return _lazyPointerAddressToInfoOffset[lpAddress];
3645 }
3646
3647 void OutputFile::setLazyBindingInfoOffset(uint64_t lpAddress, uint32_t lpInfoOffset)
3648 {
3649 _lazyPointerAddressToInfoOffset[lpAddress] = lpInfoOffset;
3650 }
3651
3652 int OutputFile::compressedOrdinalForAtom(const ld::Atom* target)
3653 {
3654 // flat namespace images use zero for all ordinals
3655 if ( _options.nameSpace() != Options::kTwoLevelNameSpace )
3656 return BIND_SPECIAL_DYLIB_FLAT_LOOKUP;
3657
3658 // handle -interposable
3659 if ( target->definition() == ld::Atom::definitionRegular )
3660 return BIND_SPECIAL_DYLIB_SELF;
3661
3662 // regular ordinal
3663 const ld::dylib::File* dylib = dynamic_cast<const ld::dylib::File*>(target->file());
3664 if ( dylib != NULL ) {
3665 std::map<const ld::dylib::File*, int>::iterator pos = _dylibToOrdinal.find(dylib);
3666 if ( pos != _dylibToOrdinal.end() )
3667 return pos->second;
3668 assert(0 && "dylib not assigned ordinal");
3669 }
3670
3671 // handle undefined dynamic_lookup
3672 if ( _options.undefinedTreatment() == Options::kUndefinedDynamicLookup )
3673 return BIND_SPECIAL_DYLIB_FLAT_LOOKUP;
3674
3675 // handle -U _foo
3676 if ( _options.allowedUndefined(target->name()) )
3677 return BIND_SPECIAL_DYLIB_FLAT_LOOKUP;
3678
3679 throw "can't find ordinal for imported symbol";
3680 }
3681
3682
3683 bool OutputFile::isPcRelStore(ld::Fixup::Kind kind)
3684 {
3685 switch ( kind ) {
3686 case ld::Fixup::kindStoreX86BranchPCRel8:
3687 case ld::Fixup::kindStoreX86BranchPCRel32:
3688 case ld::Fixup::kindStoreX86PCRel8:
3689 case ld::Fixup::kindStoreX86PCRel16:
3690 case ld::Fixup::kindStoreX86PCRel32:
3691 case ld::Fixup::kindStoreX86PCRel32_1:
3692 case ld::Fixup::kindStoreX86PCRel32_2:
3693 case ld::Fixup::kindStoreX86PCRel32_4:
3694 case ld::Fixup::kindStoreX86PCRel32GOTLoad:
3695 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA:
3696 case ld::Fixup::kindStoreX86PCRel32GOT:
3697 case ld::Fixup::kindStoreX86PCRel32TLVLoad:
3698 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA:
3699 case ld::Fixup::kindStoreARMBranch24:
3700 case ld::Fixup::kindStoreThumbBranch22:
3701 case ld::Fixup::kindStoreARMLoad12:
3702 case ld::Fixup::kindStoreTargetAddressX86PCRel32:
3703 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad:
3704 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA:
3705 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad:
3706 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA:
3707 case ld::Fixup::kindStoreTargetAddressARMBranch24:
3708 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
3709 case ld::Fixup::kindStoreTargetAddressARMLoad12:
3710 #if SUPPORT_ARCH_arm64
3711 case ld::Fixup::kindStoreARM64Page21:
3712 case ld::Fixup::kindStoreARM64PageOff12:
3713 case ld::Fixup::kindStoreARM64GOTLoadPage21:
3714 case ld::Fixup::kindStoreARM64GOTLoadPageOff12:
3715 case ld::Fixup::kindStoreARM64GOTLeaPage21:
3716 case ld::Fixup::kindStoreARM64GOTLeaPageOff12:
3717 case ld::Fixup::kindStoreARM64TLVPLoadPage21:
3718 case ld::Fixup::kindStoreARM64TLVPLoadPageOff12:
3719 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21:
3720 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPageOff12:
3721 case ld::Fixup::kindStoreARM64PCRelToGOT:
3722 case ld::Fixup::kindStoreTargetAddressARM64Page21:
3723 case ld::Fixup::kindStoreTargetAddressARM64PageOff12:
3724 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
3725 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12:
3726 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21:
3727 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12:
3728 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21:
3729 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12:
3730 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21:
3731 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12:
3732 #endif
3733 return true;
3734 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32:
3735 #if SUPPORT_ARCH_arm64
3736 case ld::Fixup::kindStoreTargetAddressARM64Branch26:
3737 #endif
3738 return (_options.outputKind() != Options::kKextBundle);
3739 default:
3740 break;
3741 }
3742 return false;
3743 }
3744
3745 bool OutputFile::isStore(ld::Fixup::Kind kind)
3746 {
3747 switch ( kind ) {
3748 case ld::Fixup::kindNone:
3749 case ld::Fixup::kindNoneFollowOn:
3750 case ld::Fixup::kindNoneGroupSubordinate:
3751 case ld::Fixup::kindNoneGroupSubordinateFDE:
3752 case ld::Fixup::kindNoneGroupSubordinateLSDA:
3753 case ld::Fixup::kindNoneGroupSubordinatePersonality:
3754 case ld::Fixup::kindSetTargetAddress:
3755 case ld::Fixup::kindSubtractTargetAddress:
3756 case ld::Fixup::kindAddAddend:
3757 case ld::Fixup::kindSubtractAddend:
3758 case ld::Fixup::kindSetTargetImageOffset:
3759 case ld::Fixup::kindSetTargetSectionOffset:
3760 return false;
3761 default:
3762 break;
3763 }
3764 return true;
3765 }
3766
3767
3768 bool OutputFile::setsTarget(ld::Fixup::Kind kind)
3769 {
3770 switch ( kind ) {
3771 case ld::Fixup::kindSetTargetAddress:
3772 case ld::Fixup::kindLazyTarget:
3773 case ld::Fixup::kindStoreTargetAddressLittleEndian32:
3774 case ld::Fixup::kindStoreTargetAddressLittleEndian64:
3775 case ld::Fixup::kindStoreTargetAddressBigEndian32:
3776 case ld::Fixup::kindStoreTargetAddressBigEndian64:
3777 case ld::Fixup::kindStoreTargetAddressX86PCRel32:
3778 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32:
3779 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad:
3780 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA:
3781 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad:
3782 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA:
3783 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad:
3784 case ld::Fixup::kindStoreTargetAddressARMBranch24:
3785 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
3786 case ld::Fixup::kindStoreTargetAddressARMLoad12:
3787 #if SUPPORT_ARCH_arm64
3788 case ld::Fixup::kindStoreTargetAddressARM64Branch26:
3789 case ld::Fixup::kindStoreTargetAddressARM64Page21:
3790 case ld::Fixup::kindStoreTargetAddressARM64PageOff12:
3791 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
3792 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12:
3793 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21:
3794 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12:
3795 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21:
3796 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12:
3797 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21:
3798 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12:
3799 #endif
3800 return true;
3801 case ld::Fixup::kindStoreX86DtraceCallSiteNop:
3802 case ld::Fixup::kindStoreX86DtraceIsEnableSiteClear:
3803 case ld::Fixup::kindStoreARMDtraceCallSiteNop:
3804 case ld::Fixup::kindStoreARMDtraceIsEnableSiteClear:
3805 case ld::Fixup::kindStoreARM64DtraceCallSiteNop:
3806 case ld::Fixup::kindStoreARM64DtraceIsEnableSiteClear:
3807 case ld::Fixup::kindStoreThumbDtraceCallSiteNop:
3808 case ld::Fixup::kindStoreThumbDtraceIsEnableSiteClear:
3809 return (_options.outputKind() == Options::kObjectFile);
3810 default:
3811 break;
3812 }
3813 return false;
3814 }
3815
3816 bool OutputFile::isPointerToTarget(ld::Fixup::Kind kind)
3817 {
3818 switch ( kind ) {
3819 case ld::Fixup::kindSetTargetAddress:
3820 case ld::Fixup::kindStoreTargetAddressLittleEndian32:
3821 case ld::Fixup::kindStoreTargetAddressLittleEndian64:
3822 case ld::Fixup::kindStoreTargetAddressBigEndian32:
3823 case ld::Fixup::kindStoreTargetAddressBigEndian64:
3824 case ld::Fixup::kindLazyTarget:
3825 return true;
3826 default:
3827 break;
3828 }
3829 return false;
3830 }
3831 bool OutputFile::isPointerFromTarget(ld::Fixup::Kind kind)
3832 {
3833 switch ( kind ) {
3834 case ld::Fixup::kindSubtractTargetAddress:
3835 return true;
3836 default:
3837 break;
3838 }
3839 return false;
3840 }
3841
3842
3843 uint64_t OutputFile::lookBackAddend(ld::Fixup::iterator fit)
3844 {
3845 uint64_t addend = 0;
3846 switch ( fit->clusterSize ) {
3847 case ld::Fixup::k1of1:
3848 case ld::Fixup::k1of2:
3849 case ld::Fixup::k2of2:
3850 break;
3851 case ld::Fixup::k2of3:
3852 --fit;
3853 switch ( fit->kind ) {
3854 case ld::Fixup::kindAddAddend:
3855 addend += fit->u.addend;
3856 break;
3857 case ld::Fixup::kindSubtractAddend:
3858 addend -= fit->u.addend;
3859 break;
3860 default:
3861 throw "unexpected fixup kind for binding";
3862 }
3863 break;
3864 case ld::Fixup::k1of3:
3865 ++fit;
3866 switch ( fit->kind ) {
3867 case ld::Fixup::kindAddAddend:
3868 addend += fit->u.addend;
3869 break;
3870 case ld::Fixup::kindSubtractAddend:
3871 addend -= fit->u.addend;
3872 break;
3873 default:
3874 throw "unexpected fixup kind for binding";
3875 }
3876 break;
3877 default:
3878 throw "unexpected fixup cluster size for binding";
3879 }
3880 return addend;
3881 }
3882
3883
3884 void OutputFile::generateLinkEditInfo(ld::Internal& state)
3885 {
3886 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
3887 ld::Internal::FinalSection* sect = *sit;
3888 // record end of last __TEXT section encrypted iPhoneOS apps.
3889 if ( _options.makeEncryptable() && (strcmp(sect->segmentName(), "__TEXT") == 0) && (strcmp(sect->sectionName(), "__oslogstring") != 0) ) {
3890 _encryptedTEXTendOffset = pageAlign(sect->fileOffset + sect->size);
3891 }
3892 bool objc1ClassRefSection = ( (sect->type() == ld::Section::typeCStringPointer)
3893 && (strcmp(sect->sectionName(), "__cls_refs") == 0)
3894 && (strcmp(sect->segmentName(), "__OBJC") == 0) );
3895 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
3896 const ld::Atom* atom = *ait;
3897
3898 // Record regular atoms that override a dylib's weak definitions
3899 if ( (atom->scope() == ld::Atom::scopeGlobal) && atom->overridesDylibsWeakDef() ) {
3900 if ( _options.makeCompressedDyldInfo() ) {
3901 uint8_t wtype = BIND_TYPE_OVERRIDE_OF_WEAKDEF_IN_DYLIB;
3902 bool nonWeakDef = (atom->combine() == ld::Atom::combineNever);
3903 _weakBindingInfo.push_back(BindingInfo(wtype, atom->name(), nonWeakDef, atom->finalAddress(), 0));
3904 }
3905 this->overridesWeakExternalSymbols = true;
3906 if ( _options.warnWeakExports() )
3907 warning("overrides weak external symbol: %s", atom->name());
3908 }
3909
3910 ld::Fixup* fixupWithTarget = NULL;
3911 ld::Fixup* fixupWithMinusTarget = NULL;
3912 ld::Fixup* fixupWithStore = NULL;
3913 ld::Fixup* fixupWithAddend = NULL;
3914 const ld::Atom* target = NULL;
3915 const ld::Atom* minusTarget = NULL;
3916 uint64_t targetAddend = 0;
3917 uint64_t minusTargetAddend = 0;
3918 for (ld::Fixup::iterator fit = atom->fixupsBegin(); fit != atom->fixupsEnd(); ++fit) {
3919 if ( fit->firstInCluster() ) {
3920 fixupWithTarget = NULL;
3921 fixupWithMinusTarget = NULL;
3922 fixupWithStore = NULL;
3923 target = NULL;
3924 minusTarget = NULL;
3925 targetAddend = 0;
3926 minusTargetAddend = 0;
3927 }
3928 if ( this->setsTarget(fit->kind) ) {
3929 switch ( fit->binding ) {
3930 case ld::Fixup::bindingNone:
3931 case ld::Fixup::bindingByNameUnbound:
3932 break;
3933 case ld::Fixup::bindingByContentBound:
3934 case ld::Fixup::bindingDirectlyBound:
3935 fixupWithTarget = fit;
3936 target = fit->u.target;
3937 break;
3938 case ld::Fixup::bindingsIndirectlyBound:
3939 fixupWithTarget = fit;
3940 target = state.indirectBindingTable[fit->u.bindingIndex];
3941 break;
3942 }
3943 assert(target != NULL);
3944 }
3945 switch ( fit->kind ) {
3946 case ld::Fixup::kindAddAddend:
3947 targetAddend = fit->u.addend;
3948 fixupWithAddend = fit;
3949 break;
3950 case ld::Fixup::kindSubtractAddend:
3951 minusTargetAddend = fit->u.addend;
3952 fixupWithAddend = fit;
3953 break;
3954 case ld::Fixup::kindSubtractTargetAddress:
3955 switch ( fit->binding ) {
3956 case ld::Fixup::bindingNone:
3957 case ld::Fixup::bindingByNameUnbound:
3958 break;
3959 case ld::Fixup::bindingByContentBound:
3960 case ld::Fixup::bindingDirectlyBound:
3961 fixupWithMinusTarget = fit;
3962 minusTarget = fit->u.target;
3963 break;
3964 case ld::Fixup::bindingsIndirectlyBound:
3965 fixupWithMinusTarget = fit;
3966 minusTarget = state.indirectBindingTable[fit->u.bindingIndex];
3967 break;
3968 }
3969 assert(minusTarget != NULL);
3970 break;
3971 case ld::Fixup::kindDataInCodeStartData:
3972 case ld::Fixup::kindDataInCodeStartJT8:
3973 case ld::Fixup::kindDataInCodeStartJT16:
3974 case ld::Fixup::kindDataInCodeStartJT32:
3975 case ld::Fixup::kindDataInCodeStartJTA32:
3976 case ld::Fixup::kindDataInCodeEnd:
3977 hasDataInCode = true;
3978 break;
3979 default:
3980 break;
3981 }
3982 if ( this->isStore(fit->kind) ) {
3983 fixupWithStore = fit;
3984 }
3985 if ( fit->lastInCluster() ) {
3986 if ( (fixupWithStore != NULL) && (target != NULL) ) {
3987 if ( _options.outputKind() == Options::kObjectFile ) {
3988 this->addSectionRelocs(state, sect, atom, fixupWithTarget, fixupWithMinusTarget, fixupWithAddend, fixupWithStore,
3989 target, minusTarget, targetAddend, minusTargetAddend);
3990 }
3991 else {
3992 if ( _options.makeCompressedDyldInfo() ) {
3993 this->addDyldInfo(state, sect, atom, fixupWithTarget, fixupWithMinusTarget, fixupWithStore,
3994 target, minusTarget, targetAddend, minusTargetAddend);
3995 }
3996 else {
3997 this->addClassicRelocs(state, sect, atom, fixupWithTarget, fixupWithMinusTarget, fixupWithStore,
3998 target, minusTarget, targetAddend, minusTargetAddend);
3999 }
4000 }
4001 }
4002 else if ( objc1ClassRefSection && (target != NULL) && (fixupWithStore == NULL) ) {
4003 // check for class refs to lazy loaded dylibs
4004 const ld::dylib::File* dylib = dynamic_cast<const ld::dylib::File*>(target->file());
4005 if ( (dylib != NULL) && dylib->willBeLazyLoadedDylib() )
4006 throwf("illegal class reference to %s in lazy loaded dylib %s", target->name(), dylib->path());
4007 }
4008 }
4009 }
4010 }
4011 }
4012 }
4013
4014
4015 void OutputFile::noteTextReloc(const ld::Atom* atom, const ld::Atom* target)
4016 {
4017 if ( (atom->contentType() == ld::Atom::typeStub) || (atom->contentType() == ld::Atom::typeStubHelper) ) {
4018 // silently let stubs (synthesized by linker) use text relocs
4019 }
4020 else if ( _options.allowTextRelocs() ) {
4021 if ( _options.warnAboutTextRelocs() )
4022 warning("text reloc in %s to %s", atom->name(), target->name());
4023 }
4024 else if ( _options.positionIndependentExecutable() && (_options.outputKind() == Options::kDynamicExecutable)
4025 && ((_options.iOSVersionMin() >= ld::iOS_4_3) || (_options.macosxVersionMin() >= ld::mac10_7)) ) {
4026 if ( ! this->pieDisabled ) {
4027 switch ( _options.architecture()) {
4028 #if SUPPORT_ARCH_arm64
4029 case CPU_TYPE_ARM64:
4030 #endif
4031 #if SUPPORT_ARCH_arm64
4032 {
4033 const char* demangledName = strdup(_options.demangleSymbol(atom->name()));
4034 throwf("Absolute addressing not allowed in arm64 code but used in '%s' referencing '%s'", demangledName, _options.demangleSymbol(target->name()));
4035 }
4036 #endif
4037 default:
4038 warning("PIE disabled. Absolute addressing (perhaps -mdynamic-no-pic) not allowed in code signed PIE, "
4039 "but used in %s from %s. "
4040 "To fix this warning, don't compile with -mdynamic-no-pic or link with -Wl,-no_pie",
4041 atom->name(), atom->safeFilePath());
4042 }
4043 }
4044 this->pieDisabled = true;
4045 }
4046 else if ( (target->scope() == ld::Atom::scopeGlobal) && (target->combine() == ld::Atom::combineByName) ) {
4047 throwf("illegal text-relocoation (direct reference) to (global,weak) %s in %s from %s in %s", target->name(), target->safeFilePath(), atom->name(), atom->safeFilePath());
4048 }
4049 else {
4050 if ( (target->file() != NULL) && (atom->file() != NULL) )
4051 throwf("illegal text-relocation to '%s' in %s from '%s' in %s", target->name(), target->safeFilePath(), atom->name(), atom->safeFilePath());
4052 else
4053 throwf("illegal text reloc in '%s' to '%s'", atom->name(), target->name());
4054 }
4055 }
4056
4057 void OutputFile::addDyldInfo(ld::Internal& state, ld::Internal::FinalSection* sect, const ld::Atom* atom,
4058 ld::Fixup* fixupWithTarget, ld::Fixup* fixupWithMinusTarget, ld::Fixup* fixupWithStore,
4059 const ld::Atom* target, const ld::Atom* minusTarget,
4060 uint64_t targetAddend, uint64_t minusTargetAddend)
4061 {
4062 if ( sect->isSectionHidden() )
4063 return;
4064
4065 // no need to rebase or bind PCRel stores
4066 if ( this->isPcRelStore(fixupWithStore->kind) ) {
4067 // as long as target is in same linkage unit
4068 if ( (target == NULL) || (target->definition() != ld::Atom::definitionProxy) ) {
4069 // make sure target is not global and weak
4070 if ( (target->scope() == ld::Atom::scopeGlobal) && (target->combine() == ld::Atom::combineByName) && (target->definition() == ld::Atom::definitionRegular)) {
4071 if ( (atom->section().type() == ld::Section::typeCFI)
4072 || (atom->section().type() == ld::Section::typeDtraceDOF)
4073 || (atom->section().type() == ld::Section::typeUnwindInfo) ) {
4074 // ok for __eh_frame and __uwind_info to use pointer diffs to global weak symbols
4075 return;
4076 }
4077 // <rdar://problem/13700961> spurious warning when weak function has reference to itself
4078 if ( fixupWithTarget->binding == ld::Fixup::bindingDirectlyBound ) {
4079 // ok to ignore pc-rel references within a weak function to itself
4080 return;
4081 }
4082 // Have direct reference to weak-global. This should be an indrect reference
4083 const char* demangledName = strdup(_options.demangleSymbol(atom->name()));
4084 warning("direct access in function '%s' from file '%s' to global weak symbol '%s' from file '%s' means the weak symbol cannot be overridden at runtime. "
4085 "This was likely caused by different translation units being compiled with different visibility settings.",
4086 demangledName, atom->safeFilePath(), _options.demangleSymbol(target->name()), target->safeFilePath());
4087 }
4088 return;
4089 }
4090 }
4091
4092 // no need to rebase or bind PIC internal pointer diff
4093 if ( minusTarget != NULL ) {
4094 // with pointer diffs, both need to be in same linkage unit
4095 assert(minusTarget->definition() != ld::Atom::definitionProxy);
4096 assert(target != NULL);
4097 assert(target->definition() != ld::Atom::definitionProxy);
4098 if ( target == minusTarget ) {
4099 // This is a compile time constant and could have been optimized away by compiler
4100 return;
4101 }
4102
4103 // check if target of pointer-diff is global and weak
4104 if ( (target->scope() == ld::Atom::scopeGlobal) && (target->combine() == ld::Atom::combineByName) && (target->definition() == ld::Atom::definitionRegular) ) {
4105 if ( (atom->section().type() == ld::Section::typeCFI)
4106 || (atom->section().type() == ld::Section::typeDtraceDOF)
4107 || (atom->section().type() == ld::Section::typeUnwindInfo) ) {
4108 // ok for __eh_frame and __uwind_info to use pointer diffs to global weak symbols
4109 return;
4110 }
4111 // Have direct reference to weak-global. This should be an indrect reference
4112 const char* demangledName = strdup(_options.demangleSymbol(atom->name()));
4113 warning("direct access in function '%s' from file '%s' to global weak symbol '%s' from file '%s' means the weak symbol cannot be overridden at runtime. "
4114 "This was likely caused by different translation units being compiled with different visibility settings.",
4115 demangledName, atom->safeFilePath(), _options.demangleSymbol(target->name()), target->safeFilePath());
4116 }
4117 return;
4118 }
4119
4120 // no need to rebase or bind an atom's references to itself if the output is not slidable
4121 if ( (atom == target) && !_options.outputSlidable() )
4122 return;
4123
4124 // cluster has no target, so needs no rebasing or binding
4125 if ( target == NULL )
4126 return;
4127
4128 const uint64_t pointerSize = (_options.architecture() & CPU_ARCH_ABI64) ? 8 : 4;
4129 bool inReadOnlySeg = ((_options.initialSegProtection(sect->segmentName()) & VM_PROT_WRITE) == 0);
4130 bool needsRebase = false;
4131 bool needsBinding = false;
4132 bool needsLazyBinding = false;
4133 bool needsWeakBinding = false;
4134
4135 uint8_t rebaseType = REBASE_TYPE_POINTER;
4136 uint8_t type = BIND_TYPE_POINTER;
4137 const ld::dylib::File* dylib = dynamic_cast<const ld::dylib::File*>(target->file());
4138 bool weak_import = (fixupWithTarget->weakImport || ((dylib != NULL) && dylib->forcedWeakLinked()));
4139 uint64_t address = atom->finalAddress() + fixupWithTarget->offsetInAtom;
4140 uint64_t addend = targetAddend - minusTargetAddend;
4141
4142 // special case lazy pointers
4143 if ( fixupWithTarget->kind == ld::Fixup::kindLazyTarget ) {
4144 assert(fixupWithTarget->u.target == target);
4145 assert(addend == 0);
4146 // lazy dylib lazy pointers do not have any dyld info
4147 if ( atom->section().type() == ld::Section::typeLazyDylibPointer )
4148 return;
4149 // lazy binding to weak definitions are done differently
4150 // they are directly bound to target, then have a weak bind in case of a collision
4151 if ( target->combine() == ld::Atom::combineByName ) {
4152 if ( target->definition() == ld::Atom::definitionProxy ) {
4153 // weak def exported from another dylib
4154 // must non-lazy bind to it plus have weak binding info in case of collision
4155 needsBinding = true;
4156 needsWeakBinding = true;
4157 }
4158 else {
4159 // weak def in this linkage unit.
4160 // just rebase, plus have weak binding info in case of collision
4161 // this will be done by other cluster on lazy pointer atom
4162 }
4163 }
4164 else if ( target->contentType() == ld::Atom::typeResolver ) {
4165 // <rdar://problem/8553647> Hidden resolver functions should not have lazy binding info
4166 // <rdar://problem/12629331> Resolver function run before initializers when overriding the dyld shared cache
4167 // The lazy pointers used by stubs used when non-lazy binding to a resolver are not normal lazy pointers
4168 // and should not be in lazy binding info.
4169 needsLazyBinding = false;
4170 }
4171 else {
4172 // normal case of a pointer to non-weak-def symbol, so can lazily bind
4173 needsLazyBinding = true;
4174 }
4175 }
4176 else {
4177 // everything except lazy pointers
4178 switch ( target->definition() ) {
4179 case ld::Atom::definitionProxy:
4180 if ( (dylib != NULL) && dylib->willBeLazyLoadedDylib() )
4181 throwf("illegal data reference to %s in lazy loaded dylib %s", target->name(), dylib->path());
4182 if ( target->contentType() == ld::Atom::typeTLV ) {
4183 if ( sect->type() != ld::Section::typeTLVPointers )
4184 throwf("illegal data reference in %s to thread local variable %s in dylib %s",
4185 atom->name(), target->name(), dylib->path());
4186 }
4187 if ( inReadOnlySeg )
4188 type = BIND_TYPE_TEXT_ABSOLUTE32;
4189 needsBinding = true;
4190 if ( target->combine() == ld::Atom::combineByName )
4191 needsWeakBinding = true;
4192 break;
4193 case ld::Atom::definitionRegular:
4194 case ld::Atom::definitionTentative:
4195 // only slideable images need rebasing info
4196 if ( _options.outputSlidable() ) {
4197 needsRebase = true;
4198 }
4199 // references to internal symbol never need binding
4200 if ( target->scope() != ld::Atom::scopeGlobal )
4201 break;
4202 // reference to global weak def needs weak binding
4203 if ( (target->combine() == ld::Atom::combineByName) && (target->definition() == ld::Atom::definitionRegular) )
4204 needsWeakBinding = true;
4205 else if ( _options.outputKind() == Options::kDynamicExecutable ) {
4206 // in main executables, the only way regular symbols are indirected is if -interposable is used
4207 if ( _options.interposable(target->name()) ) {
4208 needsRebase = false;
4209 needsBinding = true;
4210 }
4211 }
4212 else {
4213 // for flat-namespace or interposable two-level-namespace
4214 // all references to exported symbols get indirected
4215 if ( (_options.nameSpace() != Options::kTwoLevelNameSpace) || _options.interposable(target->name()) ) {
4216 // <rdar://problem/5254468> no external relocs for flat objc classes
4217 if ( strncmp(target->name(), ".objc_class_", 12) == 0 )
4218 break;
4219 // no rebase info for references to global symbols that will have binding info
4220 needsRebase = false;
4221 needsBinding = true;
4222 }
4223 else if ( _options.forceCoalesce(target->name()) ) {
4224 needsWeakBinding = true;
4225 }
4226 }
4227 break;
4228 case ld::Atom::definitionAbsolute:
4229 break;
4230 }
4231 }
4232
4233 // <rdar://problem/13828711> if target is an import alias, use base of alias
4234 if ( target->isAlias() && (target->definition() == ld::Atom::definitionProxy) ) {
4235 for (ld::Fixup::iterator fit = target->fixupsBegin(), end=target->fixupsEnd(); fit != end; ++fit) {
4236 if ( fit->firstInCluster() ) {
4237 if ( fit->kind == ld::Fixup::kindNoneFollowOn ) {
4238 if ( fit->binding == ld::Fixup::bindingDirectlyBound ) {
4239 //fprintf(stderr, "switching import of %s to import of %s\n", target->name(), fit->u.target->name());
4240 target = fit->u.target;
4241 }
4242 }
4243 }
4244 }
4245 }
4246
4247 // record dyld info for this cluster
4248 if ( needsRebase ) {
4249 if ( inReadOnlySeg ) {
4250 noteTextReloc(atom, target);
4251 sect->hasLocalRelocs = true; // so dyld knows to change permissions on __TEXT segment
4252 rebaseType = REBASE_TYPE_TEXT_ABSOLUTE32;
4253 }
4254 if ( _options.sharedRegionEligible() ) {
4255 // <rdar://problem/13287063> when range checking, ignore high byte of arm64 addends
4256 uint64_t checkAddend = addend;
4257 if ( (_options.architecture() == CPU_TYPE_ARM64)
4258 )
4259 checkAddend &= 0x0FFFFFFFFFFFFFFFULL;
4260 if ( checkAddend != 0 ) {
4261 // make sure the addend does not cause the pointer to point outside the target's segment
4262 // if it does, update_dyld_shared_cache will not be able to put this dylib into the shared cache
4263 uint64_t targetAddress = target->finalAddress();
4264 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
4265 ld::Internal::FinalSection* sct = *sit;
4266 uint64_t sctEnd = (sct->address+sct->size);
4267 if ( (sct->address <= targetAddress) && (targetAddress < sctEnd) ) {
4268 if ( (targetAddress+checkAddend) > sctEnd ) {
4269 warning("data symbol %s from %s has pointer to %s + 0x%08llX. "
4270 "That large of an addend may disable %s from being put in the dyld shared cache.",
4271 atom->name(), atom->safeFilePath(), target->name(), addend, _options.installPath() );
4272 }
4273 }
4274 }
4275 }
4276 }
4277 if ( ((address & (pointerSize-1)) != 0) && (rebaseType == REBASE_TYPE_POINTER) ) {
4278 if ( (pointerSize == 8) && ((address & 7) == 4) ) {
4279 // for now, don't warning about 8-byte pointers only 4-byte aligned
4280 }
4281 else {
4282 warning("pointer not aligned at address 0x%llX (%s + %lld from %s)",
4283 address, atom->name(), (address - atom->finalAddress()), atom->safeFilePath());
4284 }
4285 }
4286 _rebaseInfo.push_back(RebaseInfo(rebaseType, address));
4287 }
4288 if ( needsBinding ) {
4289 if ( inReadOnlySeg ) {
4290 noteTextReloc(atom, target);
4291 sect->hasExternalRelocs = true; // so dyld knows to change permissions on __TEXT segment
4292 }
4293 if ( ((address & (pointerSize-1)) != 0) && (type == BIND_TYPE_POINTER) ) {
4294 if ( (pointerSize == 8) && ((address & 7) == 4) ) {
4295 // for now, don't warning about 8-byte pointers only 4-byte aligned
4296 }
4297 else {
4298 warning("pointer not aligned at address 0x%llX (%s + %lld from %s)",
4299 address, atom->name(), (address - atom->finalAddress()), atom->safeFilePath());
4300 }
4301 }
4302 _bindingInfo.push_back(BindingInfo(type, this->compressedOrdinalForAtom(target), target->name(), weak_import, address, addend));
4303 }
4304 if ( needsLazyBinding ) {
4305 if ( _options.bindAtLoad() )
4306 _bindingInfo.push_back(BindingInfo(type, this->compressedOrdinalForAtom(target), target->name(), weak_import, address, addend));
4307 else
4308 _lazyBindingInfo.push_back(BindingInfo(type, this->compressedOrdinalForAtom(target), target->name(), weak_import, address, addend));
4309 }
4310 if ( needsWeakBinding )
4311 _weakBindingInfo.push_back(BindingInfo(type, 0, target->name(), false, address, addend));
4312 }
4313
4314
4315 void OutputFile::addClassicRelocs(ld::Internal& state, ld::Internal::FinalSection* sect, const ld::Atom* atom,
4316 ld::Fixup* fixupWithTarget, ld::Fixup* fixupWithMinusTarget, ld::Fixup* fixupWithStore,
4317 const ld::Atom* target, const ld::Atom* minusTarget,
4318 uint64_t targetAddend, uint64_t minusTargetAddend)
4319 {
4320 if ( sect->isSectionHidden() )
4321 return;
4322
4323 // non-lazy-pointer section is encoded in indirect symbol table - not using relocations
4324 if ( sect->type() == ld::Section::typeNonLazyPointer ) {
4325 // except kexts and static pie which *do* use relocations
4326 switch (_options.outputKind()) {
4327 case Options::kKextBundle:
4328 break;
4329 case Options::kStaticExecutable:
4330 if ( _options.positionIndependentExecutable() )
4331 break;
4332 // else fall into default case
4333 default:
4334 assert(target != NULL);
4335 assert(fixupWithTarget != NULL);
4336 return;
4337 }
4338 }
4339
4340 // no need to rebase or bind PCRel stores
4341 if ( this->isPcRelStore(fixupWithStore->kind) ) {
4342 // as long as target is in same linkage unit
4343 if ( (target == NULL) || (target->definition() != ld::Atom::definitionProxy) )
4344 return;
4345 }
4346
4347 // no need to rebase or bind PIC internal pointer diff
4348 if ( minusTarget != NULL ) {
4349 // with pointer diffs, both need to be in same linkage unit
4350 assert(minusTarget->definition() != ld::Atom::definitionProxy);
4351 assert(target != NULL);
4352 assert(target->definition() != ld::Atom::definitionProxy);
4353 // check if target of pointer-diff is global and weak
4354 if ( (target->scope() == ld::Atom::scopeGlobal) && (target->combine() == ld::Atom::combineByName) && (target->definition() == ld::Atom::definitionRegular) ) {
4355 if ( (atom->section().type() == ld::Section::typeCFI)
4356 || (atom->section().type() == ld::Section::typeDtraceDOF)
4357 || (atom->section().type() == ld::Section::typeUnwindInfo) ) {
4358 // ok for __eh_frame and __uwind_info to use pointer diffs to global weak symbols
4359 return;
4360 }
4361 // Have direct reference to weak-global. This should be an indrect reference
4362 const char* demangledName = strdup(_options.demangleSymbol(atom->name()));
4363 warning("direct access in function '%s' from file '%s' to global weak symbol '%s' from file '%s' means the weak symbol cannot be overridden at runtime. "
4364 "This was likely caused by different translation units being compiled with different visibility settings.",
4365 demangledName, atom->safeFilePath(), _options.demangleSymbol(target->name()), target->safeFilePath());
4366 }
4367 return;
4368 }
4369
4370 // cluster has no target, so needs no rebasing or binding
4371 if ( target == NULL )
4372 return;
4373
4374 assert(_localRelocsAtom != NULL);
4375 uint64_t relocAddress = atom->finalAddress() + fixupWithTarget->offsetInAtom - _localRelocsAtom->relocBaseAddress(state);
4376
4377 bool inReadOnlySeg = ( strcmp(sect->segmentName(), "__TEXT") == 0 );
4378 bool needsLocalReloc = false;
4379 bool needsExternReloc = false;
4380
4381 switch ( fixupWithStore->kind ) {
4382 case ld::Fixup::kindLazyTarget:
4383 // lazy pointers don't need relocs
4384 break;
4385 case ld::Fixup::kindStoreLittleEndian32:
4386 case ld::Fixup::kindStoreLittleEndian64:
4387 case ld::Fixup::kindStoreBigEndian32:
4388 case ld::Fixup::kindStoreBigEndian64:
4389 case ld::Fixup::kindStoreTargetAddressLittleEndian32:
4390 case ld::Fixup::kindStoreTargetAddressLittleEndian64:
4391 case ld::Fixup::kindStoreTargetAddressBigEndian32:
4392 case ld::Fixup::kindStoreTargetAddressBigEndian64:
4393 // is pointer
4394 switch ( target->definition() ) {
4395 case ld::Atom::definitionProxy:
4396 needsExternReloc = true;
4397 break;
4398 case ld::Atom::definitionRegular:
4399 case ld::Atom::definitionTentative:
4400 // only slideable images need local relocs
4401 if ( _options.outputSlidable() )
4402 needsLocalReloc = true;
4403 // references to internal symbol never need binding
4404 if ( target->scope() != ld::Atom::scopeGlobal )
4405 break;
4406 // reference to global weak def needs weak binding in dynamic images
4407 if ( (target->combine() == ld::Atom::combineByName)
4408 && (target->definition() == ld::Atom::definitionRegular)
4409 && (_options.outputKind() != Options::kStaticExecutable)
4410 && (_options.outputKind() != Options::kPreload)
4411 && (atom != target) ) {
4412 needsExternReloc = true;
4413 }
4414 else if ( _options.outputKind() == Options::kDynamicExecutable ) {
4415 // in main executables, the only way regular symbols are indirected is if -interposable is used
4416 if ( _options.interposable(target->name()) )
4417 needsExternReloc = true;
4418 }
4419 else {
4420 // for flat-namespace or interposable two-level-namespace
4421 // all references to exported symbols get indirected
4422 if ( (_options.nameSpace() != Options::kTwoLevelNameSpace) || _options.interposable(target->name()) ) {
4423 // <rdar://problem/5254468> no external relocs for flat objc classes
4424 if ( strncmp(target->name(), ".objc_class_", 12) == 0 )
4425 break;
4426 // no rebase info for references to global symbols that will have binding info
4427 needsExternReloc = true;
4428 }
4429 }
4430 if ( needsExternReloc )
4431 needsLocalReloc = false;
4432 break;
4433 case ld::Atom::definitionAbsolute:
4434 break;
4435 }
4436 if ( needsExternReloc ) {
4437 if ( inReadOnlySeg )
4438 noteTextReloc(atom, target);
4439 const ld::dylib::File* dylib = dynamic_cast<const ld::dylib::File*>(target->file());
4440 if ( (dylib != NULL) && dylib->willBeLazyLoadedDylib() )
4441 throwf("illegal data reference to %s in lazy loaded dylib %s", target->name(), dylib->path());
4442 _externalRelocsAtom->addExternalPointerReloc(relocAddress, target);
4443 sect->hasExternalRelocs = true;
4444 fixupWithTarget->contentAddendOnly = true;
4445 }
4446 else if ( needsLocalReloc ) {
4447 assert(target != NULL);
4448 if ( inReadOnlySeg )
4449 noteTextReloc(atom, target);
4450 _localRelocsAtom->addPointerReloc(relocAddress, target->machoSection());
4451 sect->hasLocalRelocs = true;
4452 }
4453 break;
4454 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32:
4455 #if SUPPORT_ARCH_arm64
4456 case ld::Fixup::kindStoreTargetAddressARM64Branch26:
4457 #endif
4458 if ( _options.outputKind() == Options::kKextBundle ) {
4459 assert(target != NULL);
4460 if ( target->definition() == ld::Atom::definitionProxy ) {
4461 _externalRelocsAtom->addExternalCallSiteReloc(relocAddress, target);
4462 fixupWithStore->contentAddendOnly = true;
4463 }
4464 }
4465 break;
4466
4467 case ld::Fixup::kindStoreARMLow16:
4468 case ld::Fixup::kindStoreThumbLow16:
4469 // no way to encode rebasing of binding for these instructions
4470 if ( _options.outputSlidable() || (target->definition() == ld::Atom::definitionProxy) )
4471 throwf("no supported runtime lo16 relocation in %s from %s to %s", atom->name(), atom->safeFilePath(), target->name());
4472 break;
4473
4474 case ld::Fixup::kindStoreARMHigh16:
4475 case ld::Fixup::kindStoreThumbHigh16:
4476 // no way to encode rebasing of binding for these instructions
4477 if ( _options.outputSlidable() || (target->definition() == ld::Atom::definitionProxy) )
4478 throwf("no supported runtime hi16 relocation in %s from %s to %s", atom->name(), atom->safeFilePath(), target->name());
4479 break;
4480
4481 default:
4482 break;
4483 }
4484 }
4485
4486
4487 bool OutputFile::useExternalSectionReloc(const ld::Atom* atom, const ld::Atom* target, ld::Fixup* fixupWithTarget)
4488 {
4489 if ( (_options.architecture() == CPU_TYPE_X86_64)
4490 || (_options.architecture() == CPU_TYPE_ARM64)
4491 ) {
4492 // x86_64 and ARM64 use external relocations for everthing that has a symbol
4493 return ( target->symbolTableInclusion() != ld::Atom::symbolTableNotIn );
4494 }
4495
4496 // <rdar://problem/9513487> support arm branch interworking in -r mode
4497 if ( (_options.architecture() == CPU_TYPE_ARM) && (_options.outputKind() == Options::kObjectFile) ) {
4498 if ( atom->isThumb() != target->isThumb() ) {
4499 switch ( fixupWithTarget->kind ) {
4500 // have branch that switches mode, then might be 'b' not 'bl'
4501 // Force external relocation, since no way to do local reloc for 'b'
4502 case ld::Fixup::kindStoreTargetAddressThumbBranch22 :
4503 case ld::Fixup::kindStoreTargetAddressARMBranch24:
4504 return true;
4505 default:
4506 break;
4507 }
4508 }
4509 }
4510
4511 if ( (_options.architecture() == CPU_TYPE_I386) && (_options.outputKind() == Options::kObjectFile) ) {
4512 if ( target->contentType() == ld::Atom::typeTLV )
4513 return true;
4514 }
4515
4516 // most architectures use external relocations only for references
4517 // to a symbol in another translation unit or for references to "weak symbols" or tentative definitions
4518 assert(target != NULL);
4519 if ( target->definition() == ld::Atom::definitionProxy )
4520 return true;
4521 if ( (target->definition() == ld::Atom::definitionTentative) && ! _options.makeTentativeDefinitionsReal() )
4522 return true;
4523 if ( target->scope() != ld::Atom::scopeGlobal )
4524 return false;
4525 if ( (target->combine() == ld::Atom::combineByName) && (target->definition() == ld::Atom::definitionRegular) )
4526 return true;
4527 return false;
4528 }
4529
4530 bool OutputFile::useSectionRelocAddend(ld::Fixup* fixupWithTarget)
4531 {
4532 #if SUPPORT_ARCH_arm64
4533 if ( _options.architecture() == CPU_TYPE_ARM64 ) {
4534 switch ( fixupWithTarget->kind ) {
4535 case ld::Fixup::kindStoreARM64Branch26:
4536 case ld::Fixup::kindStoreARM64Page21:
4537 case ld::Fixup::kindStoreARM64PageOff12:
4538 return true;
4539 default:
4540 return false;
4541 }
4542 }
4543 #endif
4544 return false;
4545 }
4546
4547
4548
4549
4550 void OutputFile::addSectionRelocs(ld::Internal& state, ld::Internal::FinalSection* sect, const ld::Atom* atom,
4551 ld::Fixup* fixupWithTarget, ld::Fixup* fixupWithMinusTarget,
4552 ld::Fixup* fixupWithAddend, ld::Fixup* fixupWithStore,
4553 const ld::Atom* target, const ld::Atom* minusTarget,
4554 uint64_t targetAddend, uint64_t minusTargetAddend)
4555 {
4556 if ( sect->isSectionHidden() )
4557 return;
4558
4559 // in -r mode where there will be no labels on __eh_frame section, there is no need for relocations
4560 if ( (sect->type() == ld::Section::typeCFI) && _options.removeEHLabels() )
4561 return;
4562
4563 // non-lazy-pointer section is encoded in indirect symbol table - not using relocations
4564 if ( sect->type() == ld::Section::typeNonLazyPointer )
4565 return;
4566
4567 // tentative defs don't have any relocations
4568 if ( sect->type() == ld::Section::typeTentativeDefs )
4569 return;
4570
4571 assert(target != NULL);
4572 assert(fixupWithTarget != NULL);
4573 bool targetUsesExternalReloc = this->useExternalSectionReloc(atom, target, fixupWithTarget);
4574 bool minusTargetUsesExternalReloc = (minusTarget != NULL) && this->useExternalSectionReloc(atom, minusTarget, fixupWithMinusTarget);
4575
4576 // in x86_64 and arm64 .o files an external reloc means the content contains just the addend
4577 if ( (_options.architecture() == CPU_TYPE_X86_64)
4578 || (_options.architecture() == CPU_TYPE_ARM64)
4579 ) {
4580 if ( targetUsesExternalReloc ) {
4581 fixupWithTarget->contentAddendOnly = true;
4582 fixupWithStore->contentAddendOnly = true;
4583 if ( this->useSectionRelocAddend(fixupWithStore) && (fixupWithAddend != NULL) )
4584 fixupWithAddend->contentIgnoresAddend = true;
4585 }
4586 if ( minusTargetUsesExternalReloc )
4587 fixupWithMinusTarget->contentAddendOnly = true;
4588 }
4589 else {
4590 // for other archs, content is addend only with (non pc-rel) pointers
4591 // pc-rel instructions are funny. If the target is _foo+8 and _foo is
4592 // external, then the pc-rel instruction *evalutates* to the address 8.
4593 if ( targetUsesExternalReloc ) {
4594 // TLV support for i386 acts like RIP relative addressing
4595 // The addend is the offset from the PICBase to the end of the instruction
4596 if ( (_options.architecture() == CPU_TYPE_I386)
4597 && (_options.outputKind() == Options::kObjectFile)
4598 && (fixupWithStore->kind == ld::Fixup::kindStoreX86PCRel32TLVLoad) ) {
4599 fixupWithTarget->contentAddendOnly = true;
4600 fixupWithStore->contentAddendOnly = true;
4601 }
4602 else if ( isPcRelStore(fixupWithStore->kind) ) {
4603 fixupWithTarget->contentDetlaToAddendOnly = true;
4604 fixupWithStore->contentDetlaToAddendOnly = true;
4605 }
4606 else if ( minusTarget == NULL ){
4607 fixupWithTarget->contentAddendOnly = true;
4608 fixupWithStore->contentAddendOnly = true;
4609 }
4610 }
4611 }
4612
4613 if ( fixupWithStore != NULL ) {
4614 _sectionsRelocationsAtom->addSectionReloc(sect, fixupWithStore->kind, atom, fixupWithStore->offsetInAtom,
4615 targetUsesExternalReloc, minusTargetUsesExternalReloc,
4616 target, targetAddend, minusTarget, minusTargetAddend);
4617 }
4618
4619 }
4620
4621 void OutputFile::makeSplitSegInfo(ld::Internal& state)
4622 {
4623 if ( !_options.sharedRegionEligible() )
4624 return;
4625
4626 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
4627 ld::Internal::FinalSection* sect = *sit;
4628 if ( sect->isSectionHidden() )
4629 continue;
4630 if ( strcmp(sect->segmentName(), "__TEXT") != 0 )
4631 continue;
4632 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
4633 const ld::Atom* atom = *ait;
4634 const ld::Atom* target = NULL;
4635 const ld::Atom* fromTarget = NULL;
4636 uint64_t accumulator = 0;
4637 bool thumbTarget;
4638 bool hadSubtract = false;
4639 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
4640 if ( fit->firstInCluster() )
4641 target = NULL;
4642 if ( this->setsTarget(fit->kind) ) {
4643 accumulator = addressOf(state, fit, &target);
4644 thumbTarget = targetIsThumb(state, fit);
4645 if ( thumbTarget )
4646 accumulator |= 1;
4647 }
4648 switch ( fit->kind ) {
4649 case ld::Fixup::kindSubtractTargetAddress:
4650 accumulator -= addressOf(state, fit, &fromTarget);
4651 hadSubtract = true;
4652 break;
4653 case ld::Fixup::kindAddAddend:
4654 accumulator += fit->u.addend;
4655 break;
4656 case ld::Fixup::kindSubtractAddend:
4657 accumulator -= fit->u.addend;
4658 break;
4659 case ld::Fixup::kindStoreBigEndian32:
4660 case ld::Fixup::kindStoreLittleEndian32:
4661 case ld::Fixup::kindStoreLittleEndian64:
4662 case ld::Fixup::kindStoreTargetAddressLittleEndian32:
4663 case ld::Fixup::kindStoreTargetAddressLittleEndian64:
4664 // if no subtract, then this is an absolute pointer which means
4665 // there is also a text reloc which update_dyld_shared_cache will use.
4666 if ( ! hadSubtract )
4667 break;
4668 // fall through
4669 case ld::Fixup::kindStoreX86PCRel32:
4670 case ld::Fixup::kindStoreX86PCRel32_1:
4671 case ld::Fixup::kindStoreX86PCRel32_2:
4672 case ld::Fixup::kindStoreX86PCRel32_4:
4673 case ld::Fixup::kindStoreX86PCRel32GOTLoad:
4674 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA:
4675 case ld::Fixup::kindStoreX86PCRel32GOT:
4676 case ld::Fixup::kindStoreX86PCRel32TLVLoad:
4677 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA:
4678 case ld::Fixup::kindStoreTargetAddressX86PCRel32:
4679 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad:
4680 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA:
4681 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad:
4682 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA:
4683 case ld::Fixup::kindStoreARMLow16:
4684 case ld::Fixup::kindStoreThumbLow16:
4685 #if SUPPORT_ARCH_arm64
4686 case ld::Fixup::kindStoreARM64Page21:
4687 case ld::Fixup::kindStoreARM64GOTLoadPage21:
4688 case ld::Fixup::kindStoreARM64GOTLeaPage21:
4689 case ld::Fixup::kindStoreARM64TLVPLoadPage21:
4690 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21:
4691 case ld::Fixup::kindStoreTargetAddressARM64Page21:
4692 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
4693 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21:
4694 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21:
4695 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21:
4696 case ld::Fixup::kindStoreARM64PCRelToGOT:
4697 #endif
4698 assert(target != NULL);
4699 if ( strcmp(sect->segmentName(), target->section().segmentName()) != 0 ) {
4700 _splitSegInfos.push_back(SplitSegInfoEntry(atom->finalAddress()+fit->offsetInAtom,fit->kind));
4701 }
4702 break;
4703 case ld::Fixup::kindStoreARMHigh16:
4704 case ld::Fixup::kindStoreThumbHigh16:
4705 assert(target != NULL);
4706 if ( strcmp(sect->segmentName(), target->section().segmentName()) != 0 ) {
4707 // hi16 needs to know upper 4-bits of low16 to compute carry
4708 uint32_t extra = (accumulator >> 12) & 0xF;
4709 _splitSegInfos.push_back(SplitSegInfoEntry(atom->finalAddress()+fit->offsetInAtom,fit->kind, extra));
4710 }
4711 break;
4712 case ld::Fixup::kindSetTargetImageOffset:
4713 accumulator = addressOf(state, fit, &target);
4714 assert(target != NULL);
4715 hadSubtract = true;
4716 break;
4717 default:
4718 break;
4719 }
4720 }
4721 }
4722 }
4723 }
4724
4725 void OutputFile::makeSplitSegInfoV2(ld::Internal& state)
4726 {
4727 static const bool log = false;
4728 if ( !_options.sharedRegionEligible() )
4729 return;
4730
4731 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
4732 ld::Internal::FinalSection* sect = *sit;
4733 if ( sect->isSectionHidden() )
4734 continue;
4735 bool codeSection = (sect->type() == ld::Section::typeCode);
4736 if (log) fprintf(stderr, "sect: %s, address=0x%llX\n", sect->sectionName(), sect->address);
4737 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
4738 const ld::Atom* atom = *ait;
4739 const ld::Atom* target = NULL;
4740 const ld::Atom* fromTarget = NULL;
4741 uint32_t picBase = 0;
4742 uint64_t accumulator = 0;
4743 bool thumbTarget;
4744 bool hadSubtract = false;
4745 uint8_t fromSectionIndex = atom->machoSection();
4746 uint8_t toSectionIndex;
4747 uint8_t kind = 0;
4748 uint64_t fromOffset = 0;
4749 uint64_t toOffset = 0;
4750 uint64_t addend = 0;
4751 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
4752 if ( fit->firstInCluster() ) {
4753 target = NULL;
4754 hadSubtract = false;
4755 fromTarget = NULL;
4756 kind = 0;
4757 addend = 0;
4758 toSectionIndex = 255;
4759 fromOffset = atom->finalAddress() + fit->offsetInAtom - sect->address;
4760 }
4761 if ( this->setsTarget(fit->kind) ) {
4762 accumulator = addressAndTarget(state, fit, &target);
4763 thumbTarget = targetIsThumb(state, fit);
4764 if ( thumbTarget )
4765 accumulator |= 1;
4766 toOffset = accumulator - state.atomToSection[target]->address;
4767 if ( target->definition() != ld::Atom::definitionProxy ) {
4768 if ( target->section().type() == ld::Section::typeMachHeader )
4769 toSectionIndex = 0;
4770 else
4771 toSectionIndex = target->machoSection();
4772 }
4773 }
4774 switch ( fit->kind ) {
4775 case ld::Fixup::kindSubtractTargetAddress:
4776 accumulator -= addressAndTarget(state, fit, &fromTarget);
4777 hadSubtract = true;
4778 break;
4779 case ld::Fixup::kindAddAddend:
4780 accumulator += fit->u.addend;
4781 addend = fit->u.addend;
4782 break;
4783 case ld::Fixup::kindSubtractAddend:
4784 accumulator -= fit->u.addend;
4785 picBase = fit->u.addend;
4786 break;
4787 case ld::Fixup::kindSetLazyOffset:
4788 break;
4789 case ld::Fixup::kindStoreBigEndian32:
4790 case ld::Fixup::kindStoreLittleEndian32:
4791 case ld::Fixup::kindStoreTargetAddressLittleEndian32:
4792 if ( kind != DYLD_CACHE_ADJ_V2_IMAGE_OFF_32 ) {
4793 if ( hadSubtract )
4794 kind = DYLD_CACHE_ADJ_V2_DELTA_32;
4795 else
4796 kind = DYLD_CACHE_ADJ_V2_POINTER_32;
4797 }
4798 break;
4799 case ld::Fixup::kindStoreLittleEndian64:
4800 case ld::Fixup::kindStoreTargetAddressLittleEndian64:
4801 if ( hadSubtract )
4802 kind = DYLD_CACHE_ADJ_V2_DELTA_64;
4803 else
4804 kind = DYLD_CACHE_ADJ_V2_POINTER_64;
4805 break;
4806 case ld::Fixup::kindStoreX86PCRel32:
4807 case ld::Fixup::kindStoreX86PCRel32_1:
4808 case ld::Fixup::kindStoreX86PCRel32_2:
4809 case ld::Fixup::kindStoreX86PCRel32_4:
4810 case ld::Fixup::kindStoreX86PCRel32GOTLoad:
4811 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA:
4812 case ld::Fixup::kindStoreX86PCRel32GOT:
4813 case ld::Fixup::kindStoreX86PCRel32TLVLoad:
4814 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA:
4815 case ld::Fixup::kindStoreTargetAddressX86PCRel32:
4816 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad:
4817 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA:
4818 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad:
4819 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA:
4820 #if SUPPORT_ARCH_arm64
4821 case ld::Fixup::kindStoreARM64PCRelToGOT:
4822 #endif
4823 if ( (fromSectionIndex != toSectionIndex) || !codeSection )
4824 kind = DYLD_CACHE_ADJ_V2_DELTA_32;
4825 break;
4826 #if SUPPORT_ARCH_arm64
4827 case ld::Fixup::kindStoreARM64Page21:
4828 case ld::Fixup::kindStoreARM64GOTLoadPage21:
4829 case ld::Fixup::kindStoreARM64GOTLeaPage21:
4830 case ld::Fixup::kindStoreARM64TLVPLoadPage21:
4831 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21:
4832 case ld::Fixup::kindStoreTargetAddressARM64Page21:
4833 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
4834 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21:
4835 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21:
4836 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21:
4837 if ( fromSectionIndex != toSectionIndex )
4838 kind = DYLD_CACHE_ADJ_V2_ARM64_ADRP;
4839 break;
4840 case ld::Fixup::kindStoreARM64PageOff12:
4841 case ld::Fixup::kindStoreARM64GOTLeaPageOff12:
4842 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPageOff12:
4843 case ld::Fixup::kindStoreTargetAddressARM64PageOff12:
4844 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12:
4845 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12:
4846 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12:
4847 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12:
4848 if ( fromSectionIndex != toSectionIndex )
4849 kind = DYLD_CACHE_ADJ_V2_ARM64_OFF12;
4850 break;
4851 case ld::Fixup::kindStoreARM64Branch26:
4852 case ld::Fixup::kindStoreTargetAddressARM64Branch26:
4853 if ( fromSectionIndex != toSectionIndex )
4854 kind = DYLD_CACHE_ADJ_V2_ARM64_BR26;
4855 break;
4856 #endif
4857 case ld::Fixup::kindStoreARMHigh16:
4858 case ld::Fixup::kindStoreARMLow16:
4859 if ( (fromSectionIndex != toSectionIndex) && (fromTarget == atom) ) {
4860 kind = DYLD_CACHE_ADJ_V2_ARM_MOVW_MOVT;
4861 }
4862 break;
4863 case ld::Fixup::kindStoreARMBranch24:
4864 case ld::Fixup::kindStoreTargetAddressARMBranch24:
4865 if ( fromSectionIndex != toSectionIndex )
4866 kind = DYLD_CACHE_ADJ_V2_ARM_BR24;
4867 break;
4868 case ld::Fixup::kindStoreThumbLow16:
4869 case ld::Fixup::kindStoreThumbHigh16:
4870 if ( (fromSectionIndex != toSectionIndex) && (fromTarget == atom) ) {
4871 kind = DYLD_CACHE_ADJ_V2_THUMB_MOVW_MOVT;
4872 }
4873 break;
4874 case ld::Fixup::kindStoreThumbBranch22:
4875 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
4876 if ( fromSectionIndex != toSectionIndex )
4877 kind = DYLD_CACHE_ADJ_V2_THUMB_BR22;
4878 break;
4879 case ld::Fixup::kindSetTargetImageOffset:
4880 kind = DYLD_CACHE_ADJ_V2_IMAGE_OFF_32;
4881 accumulator = addressAndTarget(state, fit, &target);
4882 assert(target != NULL);
4883 toSectionIndex = target->machoSection();
4884 toOffset = accumulator - state.atomToSection[target]->address;
4885 hadSubtract = true;
4886 break;
4887 default:
4888 break;
4889 }
4890 if ( fit->lastInCluster() ) {
4891 if ( (kind != 0) && (target != NULL) && (target->definition() != ld::Atom::definitionProxy) ) {
4892 if ( !hadSubtract && addend )
4893 toOffset += addend;
4894 assert(toSectionIndex != 255);
4895 if (log) fprintf(stderr, "from (%d.%s + 0x%llX) to (%d.%s + 0x%llX), kind=%d, atomAddr=0x%llX, sectAddr=0x%llx\n",
4896 fromSectionIndex, sect->sectionName(), fromOffset, toSectionIndex, state.atomToSection[target]->sectionName(),
4897 toOffset, kind, atom->finalAddress(), sect->address);
4898 _splitSegV2Infos.push_back(SplitSegInfoV2Entry(fromSectionIndex, fromOffset, toSectionIndex, toOffset, kind));
4899 }
4900 }
4901 }
4902 }
4903 }
4904 }
4905
4906
4907 void OutputFile::writeMapFile(ld::Internal& state)
4908 {
4909 if ( _options.generatedMapPath() != NULL ) {
4910 FILE* mapFile = fopen(_options.generatedMapPath(), "w");
4911 if ( mapFile != NULL ) {
4912 // write output path
4913 fprintf(mapFile, "# Path: %s\n", _options.outputFilePath());
4914 // write output architecure
4915 fprintf(mapFile, "# Arch: %s\n", _options.architectureName());
4916 // write UUID
4917 //if ( fUUIDAtom != NULL ) {
4918 // const uint8_t* uuid = fUUIDAtom->getUUID();
4919 // fprintf(mapFile, "# UUID: %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X \n",
4920 // uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5], uuid[6], uuid[7],
4921 // uuid[8], uuid[9], uuid[10], uuid[11], uuid[12], uuid[13], uuid[14], uuid[15]);
4922 //}
4923 // write table of object files
4924 std::map<const ld::File*, ld::File::Ordinal> readerToOrdinal;
4925 std::map<ld::File::Ordinal, const ld::File*> ordinalToReader;
4926 std::map<const ld::File*, uint32_t> readerToFileOrdinal;
4927 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
4928 ld::Internal::FinalSection* sect = *sit;
4929 if ( sect->isSectionHidden() )
4930 continue;
4931 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
4932 const ld::Atom* atom = *ait;
4933 const ld::File* reader = atom->originalFile();
4934 if ( reader == NULL )
4935 continue;
4936 ld::File::Ordinal readerOrdinal = reader->ordinal();
4937 std::map<const ld::File*, ld::File::Ordinal>::iterator pos = readerToOrdinal.find(reader);
4938 if ( pos == readerToOrdinal.end() ) {
4939 readerToOrdinal[reader] = readerOrdinal;
4940 ordinalToReader[readerOrdinal] = reader;
4941 }
4942 }
4943 }
4944 for (const ld::Atom* atom : state.deadAtoms) {
4945 const ld::File* reader = atom->originalFile();
4946 if ( reader == NULL )
4947 continue;
4948 ld::File::Ordinal readerOrdinal = reader->ordinal();
4949 std::map<const ld::File*, ld::File::Ordinal>::iterator pos = readerToOrdinal.find(reader);
4950 if ( pos == readerToOrdinal.end() ) {
4951 readerToOrdinal[reader] = readerOrdinal;
4952 ordinalToReader[readerOrdinal] = reader;
4953 }
4954 }
4955 fprintf(mapFile, "# Object files:\n");
4956 fprintf(mapFile, "[%3u] %s\n", 0, "linker synthesized");
4957 uint32_t fileIndex = 1;
4958 for(std::map<ld::File::Ordinal, const ld::File*>::iterator it = ordinalToReader.begin(); it != ordinalToReader.end(); ++it) {
4959 fprintf(mapFile, "[%3u] %s\n", fileIndex, it->second->path());
4960 readerToFileOrdinal[it->second] = fileIndex++;
4961 }
4962 // write table of sections
4963 fprintf(mapFile, "# Sections:\n");
4964 fprintf(mapFile, "# Address\tSize \tSegment\tSection\n");
4965 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
4966 ld::Internal::FinalSection* sect = *sit;
4967 if ( sect->isSectionHidden() )
4968 continue;
4969 fprintf(mapFile, "0x%08llX\t0x%08llX\t%s\t%s\n", sect->address, sect->size,
4970 sect->segmentName(), sect->sectionName());
4971 }
4972 // write table of symbols
4973 fprintf(mapFile, "# Symbols:\n");
4974 fprintf(mapFile, "# Address\tSize \tFile Name\n");
4975 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
4976 ld::Internal::FinalSection* sect = *sit;
4977 if ( sect->isSectionHidden() )
4978 continue;
4979 //bool isCstring = (sect->type() == ld::Section::typeCString);
4980 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
4981 char buffer[4096];
4982 const ld::Atom* atom = *ait;
4983 const char* name = atom->name();
4984 // don't add auto-stripped aliases to .map file
4985 if ( (atom->size() == 0) && (atom->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages) )
4986 continue;
4987 if ( atom->contentType() == ld::Atom::typeCString ) {
4988 strcpy(buffer, "literal string: ");
4989 const char* s = (char*)atom->rawContentPointer();
4990 char* e = &buffer[4094];
4991 for (char* b = &buffer[strlen(buffer)]; b < e;) {
4992 char c = *s++;
4993 if ( c == '\n' ) {
4994 *b++ = '\\';
4995 *b++ = 'n';
4996 }
4997 else {
4998 *b++ = c;
4999 }
5000 if ( c == '\0' )
5001 break;
5002 }
5003 buffer[4095] = '\0';
5004 name = buffer;
5005 }
5006 else if ( (atom->contentType() == ld::Atom::typeCFI) && (strcmp(name, "FDE") == 0) ) {
5007 for (ld::Fixup::iterator fit = atom->fixupsBegin(); fit != atom->fixupsEnd(); ++fit) {
5008 if ( (fit->kind == ld::Fixup::kindSetTargetAddress) && (fit->clusterSize == ld::Fixup::k1of4) ) {
5009 if ( (fit->binding == ld::Fixup::bindingDirectlyBound)
5010 && (fit->u.target->section().type() == ld::Section::typeCode) ) {
5011 strcpy(buffer, "FDE for: ");
5012 strlcat(buffer, fit->u.target->name(), 4096);
5013 name = buffer;
5014 }
5015 }
5016 }
5017 }
5018 else if ( atom->contentType() == ld::Atom::typeNonLazyPointer ) {
5019 strcpy(buffer, "non-lazy-pointer");
5020 for (ld::Fixup::iterator fit = atom->fixupsBegin(); fit != atom->fixupsEnd(); ++fit) {
5021 if ( fit->binding == ld::Fixup::bindingsIndirectlyBound ) {
5022 strcpy(buffer, "non-lazy-pointer-to: ");
5023 strlcat(buffer, state.indirectBindingTable[fit->u.bindingIndex]->name(), 4096);
5024 break;
5025 }
5026 else if ( fit->binding == ld::Fixup::bindingDirectlyBound ) {
5027 strcpy(buffer, "non-lazy-pointer-to-local: ");
5028 strlcat(buffer, fit->u.target->name(), 4096);
5029 break;
5030 }
5031 }
5032 name = buffer;
5033 }
5034 fprintf(mapFile, "0x%08llX\t0x%08llX\t[%3u] %s\n", atom->finalAddress(), atom->size(),
5035 readerToFileOrdinal[atom->originalFile()], name);
5036 }
5037 }
5038 // preload check is hack until 26613948 is fixed
5039 if ( _options.deadCodeStrip() && (_options.outputKind() != Options::kPreload) ) {
5040 fprintf(mapFile, "\n");
5041 fprintf(mapFile, "# Dead Stripped Symbols:\n");
5042 fprintf(mapFile, "# \tSize \tFile Name\n");
5043 for (const ld::Atom* atom : state.deadAtoms) {
5044 char buffer[4096];
5045 const char* name = atom->name();
5046 // don't add auto-stripped aliases to .map file
5047 if ( (atom->size() == 0) && (atom->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages) )
5048 continue;
5049 if ( atom->contentType() == ld::Atom::typeCString ) {
5050 strcpy(buffer, "literal string: ");
5051 const char* s = (char*)atom->rawContentPointer();
5052 char* e = &buffer[4094];
5053 for (char* b = &buffer[strlen(buffer)]; b < e;) {
5054 char c = *s++;
5055 if ( c == '\n' ) {
5056 *b++ = '\\';
5057 *b++ = 'n';
5058 }
5059 else {
5060 *b++ = c;
5061 }
5062 if ( c == '\0' )
5063 break;
5064 }
5065 buffer[4095] = '\0';
5066 name = buffer;
5067 }
5068 fprintf(mapFile, "<<dead>> \t0x%08llX\t[%3u] %s\n", atom->size(),
5069 readerToFileOrdinal[atom->originalFile()], name);
5070 }
5071 }
5072 fclose(mapFile);
5073 }
5074 else {
5075 warning("could not write map file: %s\n", _options.generatedMapPath());
5076 }
5077 }
5078 }
5079
5080 static std::string realPathString(const char* path)
5081 {
5082 char realName[MAXPATHLEN];
5083 if ( realpath(path, realName) != NULL )
5084 return realName;
5085 return path;
5086 }
5087
5088 void OutputFile::writeJSONEntry(ld::Internal& state)
5089 {
5090 if ( _options.traceEmitJSON() && (_options.UUIDMode() != Options::kUUIDNone) && (_options.traceOutputFile() != NULL) ) {
5091
5092 // Convert the UUID to a string.
5093 const uint8_t* uuid = _headersAndLoadCommandAtom->getUUID();
5094 uuid_string_t uuidString;
5095
5096 uuid_unparse(uuid, uuidString);
5097
5098 // Enumerate the dylibs.
5099 std::vector<const ld::dylib::File*> dynamicList;
5100 std::vector<const ld::dylib::File*> upwardList;
5101 std::vector<const ld::dylib::File*> reexportList;
5102
5103 for (const ld::dylib::File* dylib : _dylibsToLoad) {
5104
5105 if (dylib->willBeUpwardDylib()) {
5106
5107 upwardList.push_back(dylib);
5108 } else if (dylib->willBeReExported()) {
5109
5110 reexportList.push_back(dylib);
5111 } else {
5112
5113 dynamicList.push_back(dylib);
5114 }
5115 }
5116
5117 /*
5118 * Build the JSON entry.
5119 */
5120
5121 std::string jsonEntry = "{";
5122
5123 jsonEntry += "\"uuid\":\"" + std::string(uuidString) + "\",";
5124
5125 // installPath() returns -final_output for non-dylibs
5126 const char* lastNameSlash = strrchr(_options.installPath(), '/');
5127 const char* leafName = (lastNameSlash != NULL) ? lastNameSlash+1 : _options.outputFilePath();
5128 jsonEntry += "\"name\":\"" + std::string(leafName) + "\",";
5129
5130 jsonEntry += "\"arch\":\"" + std::string(_options.architectureName()) + "\"";
5131
5132 if (dynamicList.size() > 0) {
5133 jsonEntry += ",\"dynamic\":[";
5134 for (const ld::dylib::File* dylib : dynamicList) {
5135 jsonEntry += "\"" + realPathString(dylib->path()) + "\"";
5136 if ((dylib != dynamicList.back())) {
5137 jsonEntry += ",";
5138 }
5139 }
5140 jsonEntry += "]";
5141 }
5142
5143 if (upwardList.size() > 0) {
5144 jsonEntry += ",\"upward-dynamic\":[";
5145 for (const ld::dylib::File* dylib : upwardList) {
5146 jsonEntry += "\"" + realPathString(dylib->path()) + "\"";
5147 if ((dylib != upwardList.back())) {
5148 jsonEntry += ",";
5149 }
5150 }
5151 jsonEntry += "]";
5152 }
5153
5154 if (reexportList.size() > 0) {
5155 jsonEntry += ",\"re-exports\":[";
5156 for (const ld::dylib::File* dylib : reexportList) {
5157 jsonEntry += "\"" + realPathString(dylib->path()) + "\"";
5158 if ((dylib != reexportList.back())) {
5159 jsonEntry += ",";
5160 }
5161 }
5162 jsonEntry += "]";
5163 }
5164
5165 if (state.archivePaths.size() > 0) {
5166 jsonEntry += ",\"archives\":[";
5167 for (const std::string& archivePath : state.archivePaths) {
5168 jsonEntry += "\"" + realPathString(archivePath.c_str()) + "\"";
5169 if ((archivePath != state.archivePaths.back())) {
5170 jsonEntry += ",";
5171 }
5172 }
5173 jsonEntry += "]";
5174 }
5175
5176 if (state.bundleLoader != NULL) {
5177 jsonEntry += ",\"bundle-loader\":";
5178 jsonEntry += "\"" + realPathString(state.bundleLoader->path()) + "\"";
5179 }
5180
5181 jsonEntry += "}\n";
5182
5183 // Write the JSON entry to the trace file.
5184 _options.writeToTraceFile(jsonEntry.c_str(), jsonEntry.size());
5185 }
5186 }
5187
5188 // used to sort atoms with debug notes
5189 class DebugNoteSorter
5190 {
5191 public:
5192 bool operator()(const ld::Atom* left, const ld::Atom* right) const
5193 {
5194 // first sort by reader
5195 ld::File::Ordinal leftFileOrdinal = left->file()->ordinal();
5196 ld::File::Ordinal rightFileOrdinal = right->file()->ordinal();
5197 if ( leftFileOrdinal!= rightFileOrdinal)
5198 return (leftFileOrdinal < rightFileOrdinal);
5199
5200 // then sort by atom objectAddress
5201 uint64_t leftAddr = left->finalAddress();
5202 uint64_t rightAddr = right->finalAddress();
5203 return leftAddr < rightAddr;
5204 }
5205 };
5206
5207
5208 const char* OutputFile::assureFullPath(const char* path)
5209 {
5210 if ( path[0] == '/' )
5211 return path;
5212 char cwdbuff[MAXPATHLEN];
5213 if ( getcwd(cwdbuff, MAXPATHLEN) != NULL ) {
5214 char* result;
5215 asprintf(&result, "%s/%s", cwdbuff, path);
5216 if ( result != NULL )
5217 return result;
5218 }
5219 return path;
5220 }
5221
5222 static time_t fileModTime(const char* path) {
5223 struct stat statBuffer;
5224 if ( stat(path, &statBuffer) == 0 ) {
5225 return statBuffer.st_mtime;
5226 }
5227 return 0;
5228 }
5229
5230
5231 void OutputFile::synthesizeDebugNotes(ld::Internal& state)
5232 {
5233 // -S means don't synthesize debug map
5234 if ( _options.debugInfoStripping() == Options::kDebugInfoNone )
5235 return;
5236 // make a vector of atoms that come from files compiled with dwarf debug info
5237 std::vector<const ld::Atom*> atomsNeedingDebugNotes;
5238 std::set<const ld::Atom*> atomsWithStabs;
5239 atomsNeedingDebugNotes.reserve(1024);
5240 const ld::relocatable::File* objFile = NULL;
5241 bool objFileHasDwarf = false;
5242 bool objFileHasStabs = false;
5243 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
5244 ld::Internal::FinalSection* sect = *sit;
5245 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
5246 const ld::Atom* atom = *ait;
5247 // no stabs for atoms that would not be in the symbol table
5248 if ( atom->symbolTableInclusion() == ld::Atom::symbolTableNotIn )
5249 continue;
5250 if ( atom->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages )
5251 continue;
5252 if ( atom->symbolTableInclusion() == ld::Atom::symbolTableInWithRandomAutoStripLabel )
5253 continue;
5254 // no stabs for absolute symbols
5255 if ( atom->definition() == ld::Atom::definitionAbsolute )
5256 continue;
5257 // no stabs for .eh atoms
5258 if ( atom->contentType() == ld::Atom::typeCFI )
5259 continue;
5260 // no stabs for string literal atoms
5261 if ( atom->contentType() == ld::Atom::typeCString )
5262 continue;
5263 // no stabs for kernel dtrace probes
5264 if ( (_options.outputKind() == Options::kStaticExecutable) && (strncmp(atom->name(), "__dtrace_probe$", 15) == 0) )
5265 continue;
5266 const ld::File* file = atom->file();
5267 if ( file != NULL ) {
5268 if ( file != objFile ) {
5269 objFileHasDwarf = false;
5270 objFileHasStabs = false;
5271 objFile = dynamic_cast<const ld::relocatable::File*>(file);
5272 if ( objFile != NULL ) {
5273 switch ( objFile->debugInfo() ) {
5274 case ld::relocatable::File::kDebugInfoNone:
5275 break;
5276 case ld::relocatable::File::kDebugInfoDwarf:
5277 objFileHasDwarf = true;
5278 break;
5279 case ld::relocatable::File::kDebugInfoStabs:
5280 case ld::relocatable::File::kDebugInfoStabsUUID:
5281 objFileHasStabs = true;
5282 break;
5283 }
5284 }
5285 }
5286 if ( objFileHasDwarf )
5287 atomsNeedingDebugNotes.push_back(atom);
5288 if ( objFileHasStabs )
5289 atomsWithStabs.insert(atom);
5290 }
5291 }
5292 }
5293
5294 // sort by file ordinal then atom ordinal
5295 std::sort(atomsNeedingDebugNotes.begin(), atomsNeedingDebugNotes.end(), DebugNoteSorter());
5296
5297 // <rdar://problem/17689030> Add -add_ast_path option to linker which add N_AST stab entry to output
5298 const std::vector<const char*>& astPaths = _options.astFilePaths();
5299 for (std::vector<const char*>::const_iterator it=astPaths.begin(); it != astPaths.end(); it++) {
5300 const char* path = *it;
5301 // emit N_AST
5302 ld::relocatable::File::Stab astStab;
5303 astStab.atom = NULL;
5304 astStab.type = N_AST;
5305 astStab.other = 0;
5306 astStab.desc = 0;
5307 astStab.value = fileModTime(path);
5308 astStab.string = path;
5309 state.stabs.push_back(astStab);
5310 }
5311
5312 // synthesize "debug notes" and add them to master stabs vector
5313 const char* dirPath = NULL;
5314 const char* filename = NULL;
5315 bool wroteStartSO = false;
5316 state.stabs.reserve(atomsNeedingDebugNotes.size()*4);
5317 std::unordered_set<const char*, CStringHash, CStringEquals> seenFiles;
5318 for (std::vector<const ld::Atom*>::iterator it=atomsNeedingDebugNotes.begin(); it != atomsNeedingDebugNotes.end(); it++) {
5319 const ld::Atom* atom = *it;
5320 const ld::File* atomFile = atom->file();
5321 const ld::relocatable::File* atomObjFile = dynamic_cast<const ld::relocatable::File*>(atomFile);
5322 //fprintf(stderr, "debug note for %s\n", atom->name());
5323 const char* newPath = atom->translationUnitSource();
5324 if ( newPath != NULL ) {
5325 const char* newDirPath;
5326 const char* newFilename;
5327 const char* lastSlash = strrchr(newPath, '/');
5328 if ( lastSlash == NULL )
5329 continue;
5330 newFilename = lastSlash+1;
5331 char* temp = strdup(newPath);
5332 newDirPath = temp;
5333 // gdb like directory SO's to end in '/', but dwarf DW_AT_comp_dir usually does not have trailing '/'
5334 temp[lastSlash-newPath+1] = '\0';
5335 // need SO's whenever the translation unit source file changes
5336 if ( (filename == NULL) || (strcmp(newFilename,filename) != 0) || (strcmp(newDirPath,dirPath) != 0)) {
5337 if ( filename != NULL ) {
5338 // translation unit change, emit ending SO
5339 ld::relocatable::File::Stab endFileStab;
5340 endFileStab.atom = NULL;
5341 endFileStab.type = N_SO;
5342 endFileStab.other = 1;
5343 endFileStab.desc = 0;
5344 endFileStab.value = 0;
5345 endFileStab.string = "";
5346 state.stabs.push_back(endFileStab);
5347 }
5348 // new translation unit, emit start SO's
5349 ld::relocatable::File::Stab dirPathStab;
5350 dirPathStab.atom = NULL;
5351 dirPathStab.type = N_SO;
5352 dirPathStab.other = 0;
5353 dirPathStab.desc = 0;
5354 dirPathStab.value = 0;
5355 dirPathStab.string = newDirPath;
5356 state.stabs.push_back(dirPathStab);
5357 ld::relocatable::File::Stab fileStab;
5358 fileStab.atom = NULL;
5359 fileStab.type = N_SO;
5360 fileStab.other = 0;
5361 fileStab.desc = 0;
5362 fileStab.value = 0;
5363 fileStab.string = newFilename;
5364 state.stabs.push_back(fileStab);
5365 // Synthesize OSO for start of file
5366 ld::relocatable::File::Stab objStab;
5367 objStab.atom = NULL;
5368 objStab.type = N_OSO;
5369 // <rdar://problem/6337329> linker should put cpusubtype in n_sect field of nlist entry for N_OSO debug note entries
5370 objStab.other = atomFile->cpuSubType();
5371 objStab.desc = 1;
5372 if ( atomObjFile != NULL ) {
5373 objStab.string = assureFullPath(atomObjFile->debugInfoPath());
5374 objStab.value = atomObjFile->debugInfoModificationTime();
5375 }
5376 else {
5377 objStab.string = assureFullPath(atomFile->path());
5378 objStab.value = atomFile->modificationTime();
5379 }
5380 state.stabs.push_back(objStab);
5381 wroteStartSO = true;
5382 // add the source file path to seenFiles so it does not show up in SOLs
5383 seenFiles.insert(newFilename);
5384 char* fullFilePath;
5385 asprintf(&fullFilePath, "%s%s", newDirPath, newFilename);
5386 // add both leaf path and full path
5387 seenFiles.insert(fullFilePath);
5388 }
5389 filename = newFilename;
5390 dirPath = newDirPath;
5391 if ( atom->section().type() == ld::Section::typeCode ) {
5392 // Synthesize BNSYM and start FUN stabs
5393 ld::relocatable::File::Stab beginSym;
5394 beginSym.atom = atom;
5395 beginSym.type = N_BNSYM;
5396 beginSym.other = 1;
5397 beginSym.desc = 0;
5398 beginSym.value = 0;
5399 beginSym.string = "";
5400 state.stabs.push_back(beginSym);
5401 ld::relocatable::File::Stab startFun;
5402 startFun.atom = atom;
5403 startFun.type = N_FUN;
5404 startFun.other = 1;
5405 startFun.desc = 0;
5406 startFun.value = 0;
5407 startFun.string = atom->name();
5408 state.stabs.push_back(startFun);
5409 // Synthesize any SOL stabs needed
5410 const char* curFile = NULL;
5411 for (ld::Atom::LineInfo::iterator lit = atom->beginLineInfo(); lit != atom->endLineInfo(); ++lit) {
5412 if ( lit->fileName != curFile ) {
5413 if ( seenFiles.count(lit->fileName) == 0 ) {
5414 seenFiles.insert(lit->fileName);
5415 ld::relocatable::File::Stab sol;
5416 sol.atom = 0;
5417 sol.type = N_SOL;
5418 sol.other = 0;
5419 sol.desc = 0;
5420 sol.value = 0;
5421 sol.string = lit->fileName;
5422 state.stabs.push_back(sol);
5423 }
5424 curFile = lit->fileName;
5425 }
5426 }
5427 // Synthesize end FUN and ENSYM stabs
5428 ld::relocatable::File::Stab endFun;
5429 endFun.atom = atom;
5430 endFun.type = N_FUN;
5431 endFun.other = 0;
5432 endFun.desc = 0;
5433 endFun.value = 0;
5434 endFun.string = "";
5435 state.stabs.push_back(endFun);
5436 ld::relocatable::File::Stab endSym;
5437 endSym.atom = atom;
5438 endSym.type = N_ENSYM;
5439 endSym.other = 1;
5440 endSym.desc = 0;
5441 endSym.value = 0;
5442 endSym.string = "";
5443 state.stabs.push_back(endSym);
5444 }
5445 else {
5446 ld::relocatable::File::Stab globalsStab;
5447 const char* name = atom->name();
5448 if ( atom->scope() == ld::Atom::scopeTranslationUnit ) {
5449 // Synthesize STSYM stab for statics
5450 globalsStab.atom = atom;
5451 globalsStab.type = N_STSYM;
5452 globalsStab.other = 1;
5453 globalsStab.desc = 0;
5454 globalsStab.value = 0;
5455 globalsStab.string = name;
5456 state.stabs.push_back(globalsStab);
5457 }
5458 else {
5459 // Synthesize GSYM stab for other globals
5460 globalsStab.atom = atom;
5461 globalsStab.type = N_GSYM;
5462 globalsStab.other = 1;
5463 globalsStab.desc = 0;
5464 globalsStab.value = 0;
5465 globalsStab.string = name;
5466 state.stabs.push_back(globalsStab);
5467 }
5468 }
5469 }
5470 }
5471
5472 if ( wroteStartSO ) {
5473 // emit ending SO
5474 ld::relocatable::File::Stab endFileStab;
5475 endFileStab.atom = NULL;
5476 endFileStab.type = N_SO;
5477 endFileStab.other = 1;
5478 endFileStab.desc = 0;
5479 endFileStab.value = 0;
5480 endFileStab.string = "";
5481 state.stabs.push_back(endFileStab);
5482 }
5483
5484 // copy any stabs from .o file
5485 std::set<const ld::File*> filesSeenWithStabs;
5486 for (std::set<const ld::Atom*>::iterator it=atomsWithStabs.begin(); it != atomsWithStabs.end(); it++) {
5487 const ld::Atom* atom = *it;
5488 objFile = dynamic_cast<const ld::relocatable::File*>(atom->file());
5489 if ( objFile != NULL ) {
5490 if ( filesSeenWithStabs.count(objFile) == 0 ) {
5491 filesSeenWithStabs.insert(objFile);
5492 const std::vector<ld::relocatable::File::Stab>* stabs = objFile->stabs();
5493 if ( stabs != NULL ) {
5494 for(std::vector<ld::relocatable::File::Stab>::const_iterator sit = stabs->begin(); sit != stabs->end(); ++sit) {
5495 ld::relocatable::File::Stab stab = *sit;
5496 // ignore stabs associated with atoms that were dead stripped or coalesced away
5497 if ( (sit->atom != NULL) && (atomsWithStabs.count(sit->atom) == 0) )
5498 continue;
5499 // <rdar://problem/8284718> Value of N_SO stabs should be address of first atom from translation unit
5500 if ( (stab.type == N_SO) && (stab.string != NULL) && (stab.string[0] != '\0') ) {
5501 stab.atom = atom;
5502 }
5503 state.stabs.push_back(stab);
5504 }
5505 }
5506 }
5507 }
5508 }
5509
5510 }
5511
5512
5513 } // namespace tool
5514 } // namespace ld
5515