]> git.saurik.com Git - apple/ld64.git/blob - src/ld/MachOWriterExecutable.hpp
ld64-97.17.tar.gz
[apple/ld64.git] / src / ld / MachOWriterExecutable.hpp
1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
2 *
3 * Copyright (c) 2005-2009 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25 #ifndef __EXECUTABLE_MACH_O__
26 #define __EXECUTABLE_MACH_O__
27
28 #include <stdint.h>
29 #include <stddef.h>
30 #include <fcntl.h>
31 #include <sys/time.h>
32 #include <uuid/uuid.h>
33 #include <mach/i386/thread_status.h>
34 #include <mach/ppc/thread_status.h>
35 #include <CommonCrypto/CommonDigest.h>
36
37 #include <vector>
38 #include <algorithm>
39 #include <map>
40 #include <set>
41 #include <ext/hash_map>
42
43 #include "ObjectFile.h"
44 #include "ExecutableFile.h"
45 #include "Options.h"
46
47 #include "MachOFileAbstraction.hpp"
48 #include "MachOTrie.hpp"
49
50
51 //
52 //
53 // To implement architecture xxx, you must write template specializations for the following methods:
54 // MachHeaderAtom<xxx>::setHeaderInfo()
55 // ThreadsLoadCommandsAtom<xxx>::getSize()
56 // ThreadsLoadCommandsAtom<xxx>::copyRawContent()
57 // Writer<xxx>::addObjectRelocs()
58 // Writer<xxx>::fixUpReferenceRelocatable()
59 // Writer<xxx>::fixUpReferenceFinal()
60 // Writer<xxx>::stubableReference()
61 // Writer<xxx>::weakImportReferenceKind()
62 // Writer<xxx>::GOTReferenceKind()
63 //
64
65
66 namespace mach_o {
67 namespace executable {
68
69 // forward references
70 template <typename A> class WriterAtom;
71 template <typename A> class PageZeroAtom;
72 template <typename A> class CustomStackAtom;
73 template <typename A> class MachHeaderAtom;
74 template <typename A> class SegmentLoadCommandsAtom;
75 template <typename A> class EncryptionLoadCommandsAtom;
76 template <typename A> class SymbolTableLoadCommandsAtom;
77 template <typename A> class DyldInfoLoadCommandsAtom;
78 template <typename A> class ThreadsLoadCommandsAtom;
79 template <typename A> class DylibIDLoadCommandsAtom;
80 template <typename A> class RoutinesLoadCommandsAtom;
81 template <typename A> class DyldLoadCommandsAtom;
82 template <typename A> class UUIDLoadCommandAtom;
83 template <typename A> class LinkEditAtom;
84 template <typename A> class SectionRelocationsLinkEditAtom;
85 template <typename A> class CompressedRebaseInfoLinkEditAtom;
86 template <typename A> class CompressedBindingInfoLinkEditAtom;
87 template <typename A> class CompressedWeakBindingInfoLinkEditAtom;
88 template <typename A> class CompressedLazyBindingInfoLinkEditAtom;
89 template <typename A> class CompressedExportInfoLinkEditAtom;
90 template <typename A> class LocalRelocationsLinkEditAtom;
91 template <typename A> class ExternalRelocationsLinkEditAtom;
92 template <typename A> class SymbolTableLinkEditAtom;
93 template <typename A> class SegmentSplitInfoLoadCommandsAtom;
94 template <typename A> class SegmentSplitInfoContentAtom;
95 template <typename A> class IndirectTableLinkEditAtom;
96 template <typename A> class ModuleInfoLinkEditAtom;
97 template <typename A> class StringsLinkEditAtom;
98 template <typename A> class LoadCommandsPaddingAtom;
99 template <typename A> class UnwindInfoAtom;
100 template <typename A> class StubAtom;
101 template <typename A> class StubHelperAtom;
102 template <typename A> class ClassicStubHelperAtom;
103 template <typename A> class HybridStubHelperAtom;
104 template <typename A> class HybridStubHelperHelperAtom;
105 template <typename A> class FastStubHelperAtom;
106 template <typename A> class FastStubHelperHelperAtom;
107 template <typename A> class LazyPointerAtom;
108 template <typename A> class NonLazyPointerAtom;
109 template <typename A> class DylibLoadCommandsAtom;
110 template <typename A> class BranchIslandAtom;
111
112
113 // SectionInfo should be nested inside Writer, but I can't figure out how to make the type accessible to the Atom classes
114 class SectionInfo : public ObjectFile::Section {
115 public:
116 SectionInfo() : fFileOffset(0), fSize(0), fRelocCount(0), fRelocOffset(0),
117 fIndirectSymbolOffset(0), fAlignment(0), fAllLazyPointers(false),
118 fAllLazyDylibPointers(false),fAllNonLazyPointers(false), fAllStubs(false),
119 fAllSelfModifyingStubs(false), fAllStubHelpers(false),
120 fAllZeroFill(false), fVirtualSection(false),
121 fHasTextLocalRelocs(false), fHasTextExternalRelocs(false)
122 { fSegmentName[0] = '\0'; fSectionName[0] = '\0'; }
123 void setIndex(unsigned int index) { fIndex=index; }
124 std::vector<ObjectFile::Atom*> fAtoms;
125 char fSegmentName[20];
126 char fSectionName[20];
127 uint64_t fFileOffset;
128 uint64_t fSize;
129 uint32_t fRelocCount;
130 uint32_t fRelocOffset;
131 uint32_t fIndirectSymbolOffset;
132 uint8_t fAlignment;
133 bool fAllLazyPointers;
134 bool fAllLazyDylibPointers;
135 bool fAllNonLazyPointers;
136 bool fAllStubs;
137 bool fAllSelfModifyingStubs;
138 bool fAllStubHelpers;
139 bool fAllZeroFill;
140 bool fVirtualSection;
141 bool fHasTextLocalRelocs;
142 bool fHasTextExternalRelocs;
143 };
144
145 // SegmentInfo should be nested inside Writer, but I can't figure out how to make the type accessible to the Atom classes
146 class SegmentInfo
147 {
148 public:
149 SegmentInfo(uint64_t pageSize) : fInitProtection(0), fMaxProtection(0), fFileOffset(0), fFileSize(0),
150 fBaseAddress(0), fSize(0), fPageSize(pageSize), fFixedAddress(false),
151 fIndependentAddress(false), fHasLoadCommand(true) { fName[0] = '\0'; }
152 std::vector<class SectionInfo*> fSections;
153 char fName[20];
154 uint32_t fInitProtection;
155 uint32_t fMaxProtection;
156 uint64_t fFileOffset;
157 uint64_t fFileSize;
158 uint64_t fBaseAddress;
159 uint64_t fSize;
160 uint64_t fPageSize;
161 bool fFixedAddress;
162 bool fIndependentAddress;
163 bool fHasLoadCommand;
164 };
165
166
167 struct RebaseInfo {
168 RebaseInfo(uint8_t t, uint64_t addr) : fType(t), fAddress(addr) {}
169 uint8_t fType;
170 uint64_t fAddress;
171 // for sorting
172 int operator<(const RebaseInfo& rhs) const {
173 // sort by type, then address
174 if ( this->fType != rhs.fType )
175 return (this->fType < rhs.fType );
176 return (this->fAddress < rhs.fAddress );
177 }
178 };
179
180 struct BindingInfo {
181 BindingInfo(uint8_t t, int ord, const char* sym, bool weak_import, uint64_t addr, int64_t addend)
182 : fType(t), fFlags(weak_import ? BIND_SYMBOL_FLAGS_WEAK_IMPORT : 0 ), fLibraryOrdinal(ord),
183 fSymbolName(sym), fAddress(addr), fAddend(addend) {}
184 BindingInfo(uint8_t t, const char* sym, bool non_weak_definition, uint64_t addr, int64_t addend)
185 : fType(t), fFlags(non_weak_definition ? BIND_SYMBOL_FLAGS_NON_WEAK_DEFINITION : 0 ), fLibraryOrdinal(0),
186 fSymbolName(sym), fAddress(addr), fAddend(addend) {}
187 uint8_t fType;
188 uint8_t fFlags;
189 int fLibraryOrdinal;
190 const char* fSymbolName;
191 uint64_t fAddress;
192 int64_t fAddend;
193
194 // for sorting
195 int operator<(const BindingInfo& rhs) const {
196 // sort by library, symbol, type, then address
197 if ( this->fLibraryOrdinal != rhs.fLibraryOrdinal )
198 return (this->fLibraryOrdinal < rhs.fLibraryOrdinal );
199 if ( this->fSymbolName != rhs.fSymbolName )
200 return ( strcmp(this->fSymbolName, rhs.fSymbolName) < 0 );
201 if ( this->fType != rhs.fType )
202 return (this->fType < rhs.fType );
203 return (this->fAddress < rhs.fAddress );
204 }
205 };
206
207
208 class ByteStream {
209 private:
210 std::vector<uint8_t> fData;
211 public:
212 std::vector<uint8_t>& bytes() { return fData; }
213 unsigned long size() const { return fData.size(); }
214 void reserve(unsigned long l) { fData.reserve(l); }
215 const uint8_t* start() const { return &fData[0]; }
216
217 void append_uleb128(uint64_t value) {
218 uint8_t byte;
219 do {
220 byte = value & 0x7F;
221 value &= ~0x7F;
222 if ( value != 0 )
223 byte |= 0x80;
224 fData.push_back(byte);
225 value = value >> 7;
226 } while( byte >= 0x80 );
227 }
228
229 void append_sleb128(int64_t value) {
230 bool isNeg = ( value < 0 );
231 uint8_t byte;
232 bool more;
233 do {
234 byte = value & 0x7F;
235 value = value >> 7;
236 if ( isNeg )
237 more = ( (value != -1) || ((byte & 0x40) == 0) );
238 else
239 more = ( (value != 0) || ((byte & 0x40) != 0) );
240 if ( more )
241 byte |= 0x80;
242 fData.push_back(byte);
243 }
244 while( more );
245 }
246
247 void append_string(const char* str) {
248 for (const char* s = str; *s != '\0'; ++s)
249 fData.push_back(*s);
250 fData.push_back('\0');
251 }
252
253 void append_byte(uint8_t byte) {
254 fData.push_back(byte);
255 }
256
257 static unsigned int uleb128_size(uint64_t value) {
258 uint32_t result = 0;
259 do {
260 value = value >> 7;
261 ++result;
262 } while ( value != 0 );
263 return result;
264 }
265
266 void pad_to_size(unsigned int alignment) {
267 while ( (fData.size() % alignment) != 0 )
268 fData.push_back(0);
269 }
270 };
271
272
273 template <typename A>
274 class Writer : public ExecutableFile::Writer
275 {
276 public:
277 Writer(const char* path, Options& options, std::vector<ExecutableFile::DyLibUsed>& dynamicLibraries);
278 virtual ~Writer();
279
280 virtual const char* getPath() { return fFilePath; }
281 virtual time_t getModificationTime() { return 0; }
282 virtual DebugInfoKind getDebugInfoKind() { return ObjectFile::Reader::kDebugInfoNone; }
283 virtual std::vector<class ObjectFile::Atom*>& getAtoms() { return fWriterSynthesizedAtoms; }
284 virtual std::vector<class ObjectFile::Atom*>* getJustInTimeAtomsFor(const char* name) { return NULL; }
285 virtual std::vector<Stab>* getStabs() { return NULL; }
286
287 virtual ObjectFile::Atom& makeObjcInfoAtom(ObjectFile::Reader::ObjcConstraint objcContraint,
288 bool objcReplacementClasses);
289 virtual class ObjectFile::Atom* getUndefinedProxyAtom(const char* name);
290 virtual void addSynthesizedAtoms(const std::vector<class ObjectFile::Atom*>& existingAtoms,
291 class ObjectFile::Atom* dyldClassicHelperAtom,
292 class ObjectFile::Atom* dyldCompressedHelperAtom,
293 class ObjectFile::Atom* dyldLazyDylibHelperAtom,
294 bool biggerThanTwoGigs,
295 uint32_t dylibSymbolCount,
296 std::vector<class ObjectFile::Atom*>& newAtoms);
297 virtual uint64_t write(std::vector<class ObjectFile::Atom*>& atoms,
298 std::vector<class ObjectFile::Reader::Stab>& stabs,
299 class ObjectFile::Atom* entryPointAtom,
300 bool createUUID, bool canScatter,
301 ObjectFile::Reader::CpuConstraint cpuConstraint,
302 std::set<const class ObjectFile::Atom*>& atomsThatOverrideWeak,
303 bool hasExternalWeakDefinitions);
304
305 private:
306 typedef typename A::P P;
307 typedef typename A::P::uint_t pint_t;
308
309 enum RelocKind { kRelocNone, kRelocInternal, kRelocExternal };
310
311 void assignFileOffsets();
312 void synthesizeStubs(const std::vector<class ObjectFile::Atom*>& existingAtoms,
313 std::vector<class ObjectFile::Atom*>& newAtoms);
314 void synthesizeKextGOT(const std::vector<class ObjectFile::Atom*>& existingAtoms,
315 std::vector<class ObjectFile::Atom*>& newAtoms);
316 void createSplitSegContent();
317 void synthesizeUnwindInfoTable();
318 void insertDummyStubs();
319 void partitionIntoSections();
320 bool addBranchIslands();
321 bool createBranchIslands();
322 bool isBranchThatMightNeedIsland(uint8_t kind);
323 uint32_t textSizeWhenMightNeedBranchIslands();
324 uint32_t maxDistanceBetweenIslands();
325 void adjustLoadCommandsAndPadding();
326 void createDynamicLinkerCommand();
327 void createDylibCommands();
328 void buildLinkEdit();
329 const char* getArchString();
330 void writeMap();
331 uint64_t writeAtoms();
332 void writeNoOps(int fd, uint32_t from, uint32_t to);
333 void copyNoOps(uint8_t* from, uint8_t* to);
334 bool segmentsCanSplitApart(const ObjectFile::Atom& from, const ObjectFile::Atom& to);
335 void addCrossSegmentRef(const ObjectFile::Atom* atom, const ObjectFile::Reference* ref);
336 void collectExportedAndImportedAndLocalAtoms();
337 void setNlistRange(std::vector<class ObjectFile::Atom*>& atoms, uint32_t startIndex, uint32_t count);
338 void addLocalLabel(ObjectFile::Atom& atom, uint32_t offsetInAtom, const char* name);
339 void addGlobalLabel(ObjectFile::Atom& atom, uint32_t offsetInAtom, const char* name);
340 void buildSymbolTable();
341 bool stringsNeedLabelsInObjects();
342 const char* symbolTableName(const ObjectFile::Atom* atom);
343 void setExportNlist(const ObjectFile::Atom* atom, macho_nlist<P>* entry);
344 void setImportNlist(const ObjectFile::Atom* atom, macho_nlist<P>* entry);
345 void setLocalNlist(const ObjectFile::Atom* atom, macho_nlist<P>* entry);
346 void copyNlistRange(const std::vector<macho_nlist<P> >& entries, uint32_t startIndex);
347 uint64_t getAtomLoadAddress(const ObjectFile::Atom* atom);
348 uint8_t ordinalForLibrary(ObjectFile::Reader* file);
349 bool targetRequiresWeakBinding(const ObjectFile::Atom& target);
350 int compressedOrdinalForImortedAtom(ObjectFile::Atom* target);
351 bool shouldExport(const ObjectFile::Atom& atom) const;
352 void buildFixups();
353 void adjustLinkEditSections();
354 void buildObjectFileFixups();
355 void buildExecutableFixups();
356 bool preboundLazyPointerType(uint8_t* type);
357 uint64_t relocAddressInFinalLinkedImage(uint64_t address, const ObjectFile::Atom* atom) const;
358 void fixUpReferenceFinal(const ObjectFile::Reference* ref, const ObjectFile::Atom* inAtom, uint8_t buffer[]) const;
359 void fixUpReferenceRelocatable(const ObjectFile::Reference* ref, const ObjectFile::Atom* inAtom, uint8_t buffer[]) const;
360 void fixUpReference_powerpc(const ObjectFile::Reference* ref, const ObjectFile::Atom* inAtom,
361 uint8_t buffer[], bool finalLinkedImage) const;
362 uint32_t symbolIndex(ObjectFile::Atom& atom);
363 bool makesExternalRelocatableReference(ObjectFile::Atom& target) const;
364 uint32_t addObjectRelocs(ObjectFile::Atom* atom, ObjectFile::Reference* ref);
365 uint32_t addObjectRelocs_powerpc(ObjectFile::Atom* atom, ObjectFile::Reference* ref);
366 uint8_t getRelocPointerSize();
367 uint64_t maxAddress();
368 bool stubableReference(const ObjectFile::Atom* inAtom, const ObjectFile::Reference* ref);
369 bool GOTReferenceKind(uint8_t kind);
370 bool optimizableGOTReferenceKind(uint8_t kind);
371 bool weakImportReferenceKind(uint8_t kind);
372 unsigned int collectStabs();
373 uint64_t valueForStab(const ObjectFile::Reader::Stab& stab);
374 uint32_t stringOffsetForStab(const ObjectFile::Reader::Stab& stab);
375 uint8_t sectionIndexForStab(const ObjectFile::Reader::Stab& stab);
376 void addStabs(uint32_t startIndex);
377 RelocKind relocationNeededInFinalLinkedImage(const ObjectFile::Atom& target) const;
378 bool illegalRelocInFinalLinkedImage(const ObjectFile::Reference&);
379 bool generatesLocalTextReloc(const ObjectFile::Reference&, const ObjectFile::Atom& atom, SectionInfo* curSection);
380 bool generatesExternalTextReloc(const ObjectFile::Reference&, const ObjectFile::Atom& atom, SectionInfo* curSection);
381 bool mightNeedPadSegment();
382 void scanForAbsoluteReferences();
383 bool needsModuleTable();
384 void optimizeDylibReferences();
385 bool indirectSymbolInRelocatableIsLocal(const ObjectFile::Reference* ref) const;
386
387 struct DirectLibrary {
388 class ObjectFile::Reader* fLibrary;
389 bool fWeak;
390 bool fReExport;
391 };
392
393 friend class WriterAtom<A>;
394 friend class PageZeroAtom<A>;
395 friend class CustomStackAtom<A>;
396 friend class MachHeaderAtom<A>;
397 friend class SegmentLoadCommandsAtom<A>;
398 friend class EncryptionLoadCommandsAtom<A>;
399 friend class SymbolTableLoadCommandsAtom<A>;
400 friend class DyldInfoLoadCommandsAtom<A>;
401 friend class ThreadsLoadCommandsAtom<A>;
402 friend class DylibIDLoadCommandsAtom<A>;
403 friend class RoutinesLoadCommandsAtom<A>;
404 friend class DyldLoadCommandsAtom<A>;
405 friend class UUIDLoadCommandAtom<A>;
406 friend class LinkEditAtom<A>;
407 friend class SectionRelocationsLinkEditAtom<A>;
408 friend class CompressedRebaseInfoLinkEditAtom<A>;
409 friend class CompressedBindingInfoLinkEditAtom<A>;
410 friend class CompressedWeakBindingInfoLinkEditAtom<A>;
411 friend class CompressedLazyBindingInfoLinkEditAtom<A>;
412 friend class CompressedExportInfoLinkEditAtom<A>;
413 friend class LocalRelocationsLinkEditAtom<A>;
414 friend class ExternalRelocationsLinkEditAtom<A>;
415 friend class SymbolTableLinkEditAtom<A>;
416 friend class SegmentSplitInfoLoadCommandsAtom<A>;
417 friend class SegmentSplitInfoContentAtom<A>;
418 friend class IndirectTableLinkEditAtom<A>;
419 friend class ModuleInfoLinkEditAtom<A>;
420 friend class StringsLinkEditAtom<A>;
421 friend class LoadCommandsPaddingAtom<A>;
422 friend class UnwindInfoAtom<A>;
423 friend class StubAtom<A>;
424 friend class StubHelperAtom<A>;
425 friend class ClassicStubHelperAtom<A>;
426 friend class HybridStubHelperAtom<A>;
427 friend class FastStubHelperAtom<A>;
428 friend class FastStubHelperHelperAtom<A>;
429 friend class HybridStubHelperHelperAtom<A>;
430 friend class LazyPointerAtom<A>;
431 friend class NonLazyPointerAtom<A>;
432 friend class DylibLoadCommandsAtom<A>;
433 friend class BranchIslandAtom<A>;
434
435 const char* fFilePath;
436 Options& fOptions;
437 std::vector<class ObjectFile::Atom*>* fAllAtoms;
438 std::vector<class ObjectFile::Reader::Stab>* fStabs;
439 std::set<const class ObjectFile::Atom*>* fRegularDefAtomsThatOverrideADylibsWeakDef;
440 class SectionInfo* fLoadCommandsSection;
441 class SegmentInfo* fLoadCommandsSegment;
442 class MachHeaderAtom<A>* fMachHeaderAtom;
443 class EncryptionLoadCommandsAtom<A>* fEncryptionLoadCommand;
444 class SegmentLoadCommandsAtom<A>* fSegmentCommands;
445 class SymbolTableLoadCommandsAtom<A>* fSymbolTableCommands;
446 class LoadCommandsPaddingAtom<A>* fHeaderPadding;
447 class UnwindInfoAtom<A>* fUnwindInfoAtom;
448 class UUIDLoadCommandAtom<A>* fUUIDAtom;
449 std::vector<class ObjectFile::Atom*> fWriterSynthesizedAtoms;
450 std::vector<SegmentInfo*> fSegmentInfos;
451 class SegmentInfo* fPadSegmentInfo;
452 class ObjectFile::Atom* fEntryPoint;
453 class ObjectFile::Atom* fDyldClassicHelperAtom;
454 class ObjectFile::Atom* fDyldCompressedHelperAtom;
455 class ObjectFile::Atom* fDyldLazyDylibHelper;
456 std::map<class ObjectFile::Reader*, DylibLoadCommandsAtom<A>*> fLibraryToLoadCommand;
457 std::map<class ObjectFile::Reader*, uint32_t> fLibraryToOrdinal;
458 std::map<class ObjectFile::Reader*, class ObjectFile::Reader*> fLibraryAliases;
459 std::set<class ObjectFile::Reader*> fForcedWeakImportReaders;
460 std::vector<class ObjectFile::Atom*> fExportedAtoms;
461 std::vector<class ObjectFile::Atom*> fImportedAtoms;
462 std::vector<class ObjectFile::Atom*> fLocalSymbolAtoms;
463 std::vector<macho_nlist<P> > fLocalExtraLabels;
464 std::vector<macho_nlist<P> > fGlobalExtraLabels;
465 std::map<ObjectFile::Atom*, uint32_t> fAtomToSymbolIndex;
466 class SectionRelocationsLinkEditAtom<A>* fSectionRelocationsAtom;
467 class CompressedRebaseInfoLinkEditAtom<A>* fCompressedRebaseInfoAtom;
468 class CompressedBindingInfoLinkEditAtom<A>* fCompressedBindingInfoAtom;
469 class CompressedWeakBindingInfoLinkEditAtom<A>* fCompressedWeakBindingInfoAtom;
470 class CompressedLazyBindingInfoLinkEditAtom<A>* fCompressedLazyBindingInfoAtom;
471 class CompressedExportInfoLinkEditAtom<A>* fCompressedExportInfoAtom;
472 class LocalRelocationsLinkEditAtom<A>* fLocalRelocationsAtom;
473 class ExternalRelocationsLinkEditAtom<A>* fExternalRelocationsAtom;
474 class SymbolTableLinkEditAtom<A>* fSymbolTableAtom;
475 class SegmentSplitInfoContentAtom<A>* fSplitCodeToDataContentAtom;
476 class IndirectTableLinkEditAtom<A>* fIndirectTableAtom;
477 class ModuleInfoLinkEditAtom<A>* fModuleInfoAtom;
478 class StringsLinkEditAtom<A>* fStringsAtom;
479 class PageZeroAtom<A>* fPageZeroAtom;
480 class NonLazyPointerAtom<A>* fFastStubGOTAtom;
481 macho_nlist<P>* fSymbolTable;
482 std::vector<macho_relocation_info<P> > fSectionRelocs;
483 std::vector<macho_relocation_info<P> > fInternalRelocs;
484 std::vector<macho_relocation_info<P> > fExternalRelocs;
485 std::vector<RebaseInfo> fRebaseInfo;
486 std::vector<BindingInfo> fBindingInfo;
487 std::vector<BindingInfo> fWeakBindingInfo;
488 std::map<const ObjectFile::Atom*,ObjectFile::Atom*> fStubsMap;
489 std::map<ObjectFile::Atom*,ObjectFile::Atom*> fGOTMap;
490 std::vector<class StubAtom<A>*> fAllSynthesizedStubs;
491 std::vector<ObjectFile::Atom*> fAllSynthesizedStubHelpers;
492 std::vector<class LazyPointerAtom<A>*> fAllSynthesizedLazyPointers;
493 std::vector<class LazyPointerAtom<A>*> fAllSynthesizedLazyDylibPointers;
494 std::vector<class NonLazyPointerAtom<A>*> fAllSynthesizedNonLazyPointers;
495 uint32_t fSymbolTableCount;
496 uint32_t fSymbolTableStabsCount;
497 uint32_t fSymbolTableStabsStartIndex;
498 uint32_t fSymbolTableLocalCount;
499 uint32_t fSymbolTableLocalStartIndex;
500 uint32_t fSymbolTableExportCount;
501 uint32_t fSymbolTableExportStartIndex;
502 uint32_t fSymbolTableImportCount;
503 uint32_t fSymbolTableImportStartIndex;
504 uint32_t fLargestAtomSize;
505 uint32_t fDylibSymbolCountUpperBound;
506 bool fEmitVirtualSections;
507 bool fHasWeakExports;
508 bool fReferencesWeakImports;
509 bool fCanScatter;
510 bool fWritableSegmentPastFirst4GB;
511 bool fNoReExportedDylibs;
512 bool fBiggerThanTwoGigs;
513 bool fSlideable;
514 bool fHasThumbBranches;
515 std::map<const ObjectFile::Atom*,bool> fWeakImportMap;
516 std::set<const ObjectFile::Reader*> fDylibReadersWithNonWeakImports;
517 std::set<const ObjectFile::Reader*> fDylibReadersWithWeakImports;
518 SegmentInfo* fFirstWritableSegment;
519 ObjectFile::Reader::CpuConstraint fCpuConstraint;
520 uint32_t fAnonNameIndex;
521 };
522
523
524 class Segment : public ObjectFile::Segment
525 {
526 public:
527 Segment(const char* name, bool readable, bool writable, bool executable, bool fixedAddress)
528 : fName(name), fReadable(readable), fWritable(writable), fExecutable(executable), fFixedAddress(fixedAddress) {}
529 virtual const char* getName() const { return fName; }
530 virtual bool isContentReadable() const { return fReadable; }
531 virtual bool isContentWritable() const { return fWritable; }
532 virtual bool isContentExecutable() const { return fExecutable; }
533 virtual bool hasFixedAddress() const { return fFixedAddress; }
534
535 static Segment fgTextSegment;
536 static Segment fgPageZeroSegment;
537 static Segment fgLinkEditSegment;
538 static Segment fgStackSegment;
539 static Segment fgImportSegment;
540 static Segment fgROImportSegment;
541 static Segment fgDataSegment;
542 static Segment fgObjCSegment;
543 static Segment fgHeaderSegment;
544
545
546 private:
547 const char* fName;
548 const bool fReadable;
549 const bool fWritable;
550 const bool fExecutable;
551 const bool fFixedAddress;
552 };
553
554 Segment Segment::fgPageZeroSegment("__PAGEZERO", false, false, false, true);
555 Segment Segment::fgTextSegment("__TEXT", true, false, true, false);
556 Segment Segment::fgLinkEditSegment("__LINKEDIT", true, false, false, false);
557 Segment Segment::fgStackSegment("__UNIXSTACK", true, true, false, true);
558 Segment Segment::fgImportSegment("__IMPORT", true, true, true, false);
559 Segment Segment::fgROImportSegment("__IMPORT", true, false, true, false);
560 Segment Segment::fgDataSegment("__DATA", true, true, false, false);
561 Segment Segment::fgObjCSegment("__OBJC", true, true, false, false);
562 Segment Segment::fgHeaderSegment("__HEADER", true, false, true, false);
563
564
565 template <typename A>
566 class WriterAtom : public ObjectFile::Atom
567 {
568 public:
569 enum Kind { zeropage, machHeaderApp, machHeaderDylib, machHeaderBundle, machHeaderObject, loadCommands, undefinedProxy };
570 WriterAtom(Writer<A>& writer, Segment& segment) : fWriter(writer), fSegment(segment) { }
571
572 virtual ObjectFile::Reader* getFile() const { return &fWriter; }
573 virtual bool getTranslationUnitSource(const char** dir, const char** name) const { return false; }
574 virtual const char* getName() const { return NULL; }
575 virtual const char* getDisplayName() const { return this->getName(); }
576 virtual Scope getScope() const { return ObjectFile::Atom::scopeTranslationUnit; }
577 virtual DefinitionKind getDefinitionKind() const { return kRegularDefinition; }
578 virtual SymbolTableInclusion getSymbolTableInclusion() const { return ObjectFile::Atom::kSymbolTableNotIn; }
579 virtual bool dontDeadStrip() const { return true; }
580 virtual bool isZeroFill() const { return false; }
581 virtual bool isThumb() const { return false; }
582 virtual std::vector<ObjectFile::Reference*>& getReferences() const { return fgEmptyReferenceList; }
583 virtual bool mustRemainInSection() const { return true; }
584 virtual ObjectFile::Segment& getSegment() const { return fSegment; }
585 virtual ObjectFile::Atom& getFollowOnAtom() const { return *((ObjectFile::Atom*)NULL); }
586 virtual uint32_t getOrdinal() const { return 0; }
587 virtual std::vector<ObjectFile::LineInfo>* getLineInfo() const { return NULL; }
588 virtual ObjectFile::Alignment getAlignment() const { return ObjectFile::Alignment(2); }
589 virtual void copyRawContent(uint8_t buffer[]) const { throw "don't use copyRawContent"; }
590 virtual void setScope(Scope) { }
591
592
593 protected:
594 virtual ~WriterAtom() {}
595 typedef typename A::P P;
596 typedef typename A::P::E E;
597
598 static Segment& headerSegment(Writer<A>& writer) { return (writer.fOptions.outputKind()==Options::kPreload)
599 ? Segment::fgHeaderSegment : Segment::fgTextSegment; }
600
601 static std::vector<ObjectFile::Reference*> fgEmptyReferenceList;
602
603 Writer<A>& fWriter;
604 Segment& fSegment;
605 };
606
607 template <typename A> std::vector<ObjectFile::Reference*> WriterAtom<A>::fgEmptyReferenceList;
608
609
610 template <typename A>
611 class PageZeroAtom : public WriterAtom<A>
612 {
613 public:
614 PageZeroAtom(Writer<A>& writer) : WriterAtom<A>(writer, Segment::fgPageZeroSegment),
615 fSize(fWriter.fOptions.zeroPageSize()) {}
616 virtual const char* getDisplayName() const { return "page zero content"; }
617 virtual bool isZeroFill() const { return true; }
618 virtual uint64_t getSize() const { return fSize; }
619 virtual const char* getSectionName() const { return "._zeropage"; }
620 virtual ObjectFile::Alignment getAlignment() const { return ObjectFile::Alignment(12); }
621 void setSize(uint64_t size) { fSize = size; }
622 private:
623 using WriterAtom<A>::fWriter;
624 typedef typename A::P P;
625 uint64_t fSize;
626 };
627
628
629 template <typename A>
630 class DsoHandleAtom : public WriterAtom<A>
631 {
632 public:
633 DsoHandleAtom(Writer<A>& writer) : WriterAtom<A>(writer, Segment::fgTextSegment) {}
634 virtual const char* getName() const { return "___dso_handle"; }
635 virtual ObjectFile::Atom::Scope getScope() const { return ObjectFile::Atom::scopeLinkageUnit; }
636 virtual ObjectFile::Atom::SymbolTableInclusion getSymbolTableInclusion() const { return ObjectFile::Atom::kSymbolTableNotIn; }
637 virtual uint64_t getSize() const { return 0; }
638 virtual ObjectFile::Alignment getAlignment() const { return ObjectFile::Alignment(12); }
639 virtual const char* getSectionName() const { return "._mach_header"; }
640 virtual void copyRawContent(uint8_t buffer[]) const {}
641 };
642
643
644 template <typename A>
645 class MachHeaderAtom : public WriterAtom<A>
646 {
647 public:
648 MachHeaderAtom(Writer<A>& writer) : WriterAtom<A>(writer, headerSegment(writer)) {}
649 virtual const char* getName() const;
650 virtual const char* getDisplayName() const;
651 virtual ObjectFile::Atom::Scope getScope() const;
652 virtual ObjectFile::Atom::SymbolTableInclusion getSymbolTableInclusion() const;
653 virtual uint64_t getSize() const { return sizeof(macho_header<typename A::P>); }
654 virtual ObjectFile::Alignment getAlignment() const { return ObjectFile::Alignment(12); }
655 virtual const char* getSectionName() const { return "._mach_header"; }
656 virtual uint32_t getOrdinal() const { return 1; }
657 virtual void copyRawContent(uint8_t buffer[]) const;
658 private:
659 using WriterAtom<A>::fWriter;
660 typedef typename A::P P;
661 void setHeaderInfo(macho_header<typename A::P>& header) const;
662 };
663
664 template <typename A>
665 class CustomStackAtom : public WriterAtom<A>
666 {
667 public:
668 CustomStackAtom(Writer<A>& writer);
669 virtual const char* getDisplayName() const { return "custom stack content"; }
670 virtual bool isZeroFill() const { return true; }
671 virtual uint64_t getSize() const { return fWriter.fOptions.customStackSize(); }
672 virtual const char* getSectionName() const { return "._stack"; }
673 virtual ObjectFile::Alignment getAlignment() const { return ObjectFile::Alignment(12); }
674 private:
675 using WriterAtom<A>::fWriter;
676 typedef typename A::P P;
677 static bool stackGrowsDown();
678 };
679
680 template <typename A>
681 class LoadCommandAtom : public WriterAtom<A>
682 {
683 protected:
684 LoadCommandAtom(Writer<A>& writer) : WriterAtom<A>(writer, headerSegment(writer)), fOrdinal(fgCurrentOrdinal++) {}
685 virtual ObjectFile::Alignment getAlignment() const { return ObjectFile::Alignment(log2(sizeof(typename A::P::uint_t))); }
686 virtual const char* getSectionName() const { return "._load_commands"; }
687 virtual uint32_t getOrdinal() const { return fOrdinal; }
688 static uint64_t alignedSize(uint64_t size);
689 protected:
690 uint32_t fOrdinal;
691 static uint32_t fgCurrentOrdinal;
692 };
693
694 template <typename A> uint32_t LoadCommandAtom<A>::fgCurrentOrdinal = 0;
695
696 template <typename A>
697 class SegmentLoadCommandsAtom : public LoadCommandAtom<A>
698 {
699 public:
700 SegmentLoadCommandsAtom(Writer<A>& writer)
701 : LoadCommandAtom<A>(writer), fCommandCount(0), fSize(0)
702 { writer.fSegmentCommands = this; }
703 virtual const char* getDisplayName() const { return "segment load commands"; }
704 virtual uint64_t getSize() const { return fSize; }
705 virtual void copyRawContent(uint8_t buffer[]) const;
706
707 void computeSize();
708 void setup();
709 unsigned int commandCount() { return fCommandCount; }
710 private:
711 using WriterAtom<A>::fWriter;
712 typedef typename A::P P;
713 unsigned int fCommandCount;
714 uint32_t fSize;
715 };
716
717
718 template <typename A>
719 class SymbolTableLoadCommandsAtom : public LoadCommandAtom<A>
720 {
721 public:
722 SymbolTableLoadCommandsAtom(Writer<A>&);
723 virtual const char* getDisplayName() const { return "symbol table load commands"; }
724 virtual uint64_t getSize() const;
725 virtual void copyRawContent(uint8_t buffer[]) const;
726 unsigned int commandCount();
727 void needDynamicTable();
728 private:
729 using WriterAtom<A>::fWriter;
730 typedef typename A::P P;
731 bool fNeedsDynamicSymbolTable;
732 macho_symtab_command<typename A::P> fSymbolTable;
733 macho_dysymtab_command<typename A::P> fDynamicSymbolTable;
734 };
735
736 template <typename A>
737 class ThreadsLoadCommandsAtom : public LoadCommandAtom<A>
738 {
739 public:
740 ThreadsLoadCommandsAtom(Writer<A>& writer)
741 : LoadCommandAtom<A>(writer) {}
742 virtual const char* getDisplayName() const { return "thread load commands"; }
743 virtual uint64_t getSize() const;
744 virtual void copyRawContent(uint8_t buffer[]) const;
745 private:
746 using WriterAtom<A>::fWriter;
747 typedef typename A::P P;
748 uint8_t* fBuffer;
749 uint32_t fBufferSize;
750 };
751
752 template <typename A>
753 class DyldLoadCommandsAtom : public LoadCommandAtom<A>
754 {
755 public:
756 DyldLoadCommandsAtom(Writer<A>& writer) : LoadCommandAtom<A>(writer) {}
757 virtual const char* getDisplayName() const { return "dyld load command"; }
758 virtual uint64_t getSize() const;
759 virtual void copyRawContent(uint8_t buffer[]) const;
760 private:
761 using WriterAtom<A>::fWriter;
762 typedef typename A::P P;
763 };
764
765 template <typename A>
766 class SegmentSplitInfoLoadCommandsAtom : public LoadCommandAtom<A>
767 {
768 public:
769 SegmentSplitInfoLoadCommandsAtom(Writer<A>& writer) : LoadCommandAtom<A>(writer) {}
770 virtual const char* getDisplayName() const { return "segment split info load command"; }
771 virtual uint64_t getSize() const;
772 virtual void copyRawContent(uint8_t buffer[]) const;
773 private:
774 using WriterAtom<A>::fWriter;
775 typedef typename A::P P;
776 };
777
778 template <typename A>
779 class AllowableClientLoadCommandsAtom : public LoadCommandAtom<A>
780 {
781 public:
782 AllowableClientLoadCommandsAtom(Writer<A>& writer, const char* client) :
783 LoadCommandAtom<A>(writer), clientString(client) {}
784 virtual const char* getDisplayName() const { return "allowable_client load command"; }
785 virtual uint64_t getSize() const;
786 virtual void copyRawContent(uint8_t buffer[]) const;
787 private:
788 using WriterAtom<A>::fWriter;
789 typedef typename A::P P;
790 const char* clientString;
791 };
792
793 template <typename A>
794 class DylibLoadCommandsAtom : public LoadCommandAtom<A>
795 {
796 public:
797 DylibLoadCommandsAtom(Writer<A>& writer, ExecutableFile::DyLibUsed& info)
798 : LoadCommandAtom<A>(writer), fInfo(info),
799 fOptimizedAway(false) { if (fInfo.options.fLazyLoad) this->fOrdinal += 256; }
800 virtual const char* getDisplayName() const { return "dylib load command"; }
801 virtual uint64_t getSize() const;
802 virtual void copyRawContent(uint8_t buffer[]) const;
803 virtual void optimizeAway() { fOptimizedAway = true; }
804 bool linkedWeak() { return fInfo.options.fWeakImport; }
805 private:
806 using WriterAtom<A>::fWriter;
807 typedef typename A::P P;
808 ExecutableFile::DyLibUsed fInfo;
809 bool fOptimizedAway;
810 };
811
812 template <typename A>
813 class DylibIDLoadCommandsAtom : public LoadCommandAtom<A>
814 {
815 public:
816 DylibIDLoadCommandsAtom(Writer<A>& writer) : LoadCommandAtom<A>(writer) {}
817 virtual const char* getDisplayName() const { return "dylib ID load command"; }
818 virtual uint64_t getSize() const;
819 virtual void copyRawContent(uint8_t buffer[]) const;
820 private:
821 using WriterAtom<A>::fWriter;
822 typedef typename A::P P;
823 };
824
825 template <typename A>
826 class RoutinesLoadCommandsAtom : public LoadCommandAtom<A>
827 {
828 public:
829 RoutinesLoadCommandsAtom(Writer<A>& writer) : LoadCommandAtom<A>(writer) {}
830 virtual const char* getDisplayName() const { return "routines load command"; }
831 virtual uint64_t getSize() const { return sizeof(macho_routines_command<typename A::P>); }
832 virtual void copyRawContent(uint8_t buffer[]) const;
833 private:
834 using WriterAtom<A>::fWriter;
835 typedef typename A::P P;
836 };
837
838 template <typename A>
839 class SubUmbrellaLoadCommandsAtom : public LoadCommandAtom<A>
840 {
841 public:
842 SubUmbrellaLoadCommandsAtom(Writer<A>& writer, const char* name)
843 : LoadCommandAtom<A>(writer), fName(name) {}
844 virtual const char* getDisplayName() const { return "sub-umbrella load command"; }
845 virtual uint64_t getSize() const;
846 virtual void copyRawContent(uint8_t buffer[]) const;
847 private:
848 typedef typename A::P P;
849 const char* fName;
850 };
851
852 template <typename A>
853 class SubLibraryLoadCommandsAtom : public LoadCommandAtom<A>
854 {
855 public:
856 SubLibraryLoadCommandsAtom(Writer<A>& writer, const char* nameStart, int nameLen)
857 : LoadCommandAtom<A>(writer), fNameStart(nameStart), fNameLength(nameLen) {}
858 virtual const char* getDisplayName() const { return "sub-library load command"; }
859 virtual uint64_t getSize() const;
860 virtual void copyRawContent(uint8_t buffer[]) const;
861 private:
862 using WriterAtom<A>::fWriter;
863 typedef typename A::P P;
864 const char* fNameStart;
865 int fNameLength;
866 };
867
868 template <typename A>
869 class UmbrellaLoadCommandsAtom : public LoadCommandAtom<A>
870 {
871 public:
872 UmbrellaLoadCommandsAtom(Writer<A>& writer, const char* name)
873 : LoadCommandAtom<A>(writer), fName(name) {}
874 virtual const char* getDisplayName() const { return "umbrella load command"; }
875 virtual uint64_t getSize() const;
876 virtual void copyRawContent(uint8_t buffer[]) const;
877 private:
878 using WriterAtom<A>::fWriter;
879 typedef typename A::P P;
880 const char* fName;
881 };
882
883 template <typename A>
884 class UUIDLoadCommandAtom : public LoadCommandAtom<A>
885 {
886 public:
887 UUIDLoadCommandAtom(Writer<A>& writer)
888 : LoadCommandAtom<A>(writer), fEmit(false) {}
889 virtual const char* getDisplayName() const { return "uuid load command"; }
890 virtual uint64_t getSize() const { return fEmit ? sizeof(macho_uuid_command<typename A::P>) : 0; }
891 virtual void copyRawContent(uint8_t buffer[]) const;
892 virtual void generate();
893 void setContent(const uint8_t uuid[16]);
894 const uint8_t* getUUID() { return fUUID; }
895 private:
896 using WriterAtom<A>::fWriter;
897 typedef typename A::P P;
898 uuid_t fUUID;
899 bool fEmit;
900 };
901
902
903 template <typename A>
904 class RPathLoadCommandsAtom : public LoadCommandAtom<A>
905 {
906 public:
907 RPathLoadCommandsAtom(Writer<A>& writer, const char* path)
908 : LoadCommandAtom<A>(writer), fPath(path) {}
909 virtual const char* getDisplayName() const { return "rpath load command"; }
910 virtual uint64_t getSize() const;
911 virtual void copyRawContent(uint8_t buffer[]) const;
912 private:
913 using WriterAtom<A>::fWriter;
914 typedef typename A::P P;
915 const char* fPath;
916 };
917
918 template <typename A>
919 class EncryptionLoadCommandsAtom : public LoadCommandAtom<A>
920 {
921 public:
922 EncryptionLoadCommandsAtom(Writer<A>& writer)
923 : LoadCommandAtom<A>(writer), fStartOffset(0),
924 fEndOffset(0) {}
925 virtual const char* getDisplayName() const { return "encryption info load command"; }
926 virtual uint64_t getSize() const { return sizeof(macho_encryption_info_command<typename A::P>); }
927 virtual void copyRawContent(uint8_t buffer[]) const;
928 void setStartEncryptionOffset(uint32_t off) { fStartOffset = off; }
929 void setEndEncryptionOffset(uint32_t off) { fEndOffset = off; }
930 private:
931 using WriterAtom<A>::fWriter;
932 typedef typename A::P P;
933 uint32_t fStartOffset;
934 uint32_t fEndOffset;
935 };
936
937 template <typename A>
938 class DyldInfoLoadCommandsAtom : public LoadCommandAtom<A>
939 {
940 public:
941 DyldInfoLoadCommandsAtom(Writer<A>& writer)
942 : LoadCommandAtom<A>(writer) {}
943 virtual const char* getDisplayName() const { return "dyld info load command"; }
944 virtual uint64_t getSize() const { return sizeof(macho_dyld_info_command<typename A::P>); }
945 virtual void copyRawContent(uint8_t buffer[]) const;
946 private:
947 using WriterAtom<A>::fWriter;
948 typedef typename A::P P;
949 };
950
951
952 template <typename A>
953 class LoadCommandsPaddingAtom : public WriterAtom<A>
954 {
955 public:
956 LoadCommandsPaddingAtom(Writer<A>& writer)
957 : WriterAtom<A>(writer, headerSegment(writer)), fSize(0) {}
958 virtual const char* getDisplayName() const { return "header padding"; }
959 virtual uint64_t getSize() const { return fSize; }
960 virtual const char* getSectionName() const { return "._load_cmds_pad"; }
961 virtual void copyRawContent(uint8_t buffer[]) const;
962
963 void setSize(uint64_t newSize);
964 private:
965 using WriterAtom<A>::fWriter;
966 typedef typename A::P P;
967 uint64_t fSize;
968 };
969
970 template <typename A>
971 class MinimalTextAtom : public WriterAtom<A>
972 {
973 public:
974 MinimalTextAtom(Writer<A>& writer)
975 : WriterAtom<A>(writer, headerSegment(writer)) {}
976 virtual const char* getDisplayName() const { return "minimal text"; }
977 virtual uint64_t getSize() const { return 0; }
978 virtual const char* getSectionName() const { return "__text"; }
979 virtual void copyRawContent(uint8_t buffer[]) const { }
980 virtual ObjectFile::Atom::SymbolTableInclusion getSymbolTableInclusion() const { return ObjectFile::Atom::kSymbolTableNotIn; }
981
982 private:
983 using WriterAtom<A>::fWriter;
984 };
985
986
987 template <typename A>
988 class UnwindInfoAtom : public WriterAtom<A>
989 {
990 public:
991 UnwindInfoAtom(Writer<A>& writer) : WriterAtom<A>(writer, Segment::fgTextSegment),
992 fHeaderSize(0), fPagesSize(0), fAlignment(4) {}
993 virtual const char* getName() const { return "unwind info"; }
994 virtual ObjectFile::Atom::Scope getScope() const { return ObjectFile::Atom::scopeTranslationUnit; }
995 virtual ObjectFile::Atom::SymbolTableInclusion getSymbolTableInclusion() const { return ObjectFile::Atom::kSymbolTableNotIn; }
996 virtual uint64_t getSize() const { return fHeaderSize+fPagesSize; }
997 virtual ObjectFile::Alignment getAlignment() const { return fAlignment; }
998 virtual const char* getSectionName() const { return "__unwind_info"; }
999 virtual uint32_t getOrdinal() const { return 1; }
1000 virtual std::vector<ObjectFile::Reference*>& getReferences() const { return (std::vector<ObjectFile::Reference*>&)fReferences; }
1001 virtual void copyRawContent(uint8_t buffer[]) const;
1002
1003 void addUnwindInfo(ObjectFile::Atom* func, uint32_t offset, uint32_t encoding,
1004 ObjectFile::Reference* fdeRef, ObjectFile::Reference* lsda,
1005 ObjectFile::Atom* personalityPointer);
1006 void generate();
1007
1008 private:
1009 using WriterAtom<A>::fWriter;
1010 typedef typename A::P P;
1011 struct Info { ObjectFile::Atom* func; ObjectFile::Atom* fde; ObjectFile::Atom* lsda; uint32_t lsdaOffset; ObjectFile::Atom* personalityPointer; uint32_t encoding; };
1012 struct LSDAEntry { ObjectFile::Atom* func; ObjectFile::Atom* lsda; uint32_t lsdaOffset; };
1013 struct RegFixUp { uint8_t* contentPointer; ObjectFile::Atom* func; ObjectFile::Atom* fde; };
1014 struct CompressedFixUp { uint8_t* contentPointer; ObjectFile::Atom* func; ObjectFile::Atom* fromFunc; };
1015 struct CompressedEncodingFixUp { uint8_t* contentPointer; ObjectFile::Atom* fde; };
1016
1017 bool encodingMeansUseDwarf(compact_unwind_encoding_t encoding);
1018 void compressDuplicates(std::vector<Info>& uniqueInfos);
1019 void findCommonEncoding(const std::vector<Info>& uniqueInfos, std::map<uint32_t, unsigned int>& commonEncodings);
1020 void makeLsdaIndex(const std::vector<Info>& uniqueInfos, std::map<ObjectFile::Atom*, uint32_t>& lsdaIndexOffsetMap);
1021 unsigned int makeRegularSecondLevelPage(const std::vector<Info>& uniqueInfos, uint32_t pageSize, unsigned int endIndex,
1022 uint8_t*& pageEnd);
1023 unsigned int makeCompressedSecondLevelPage(const std::vector<Info>& uniqueInfos,
1024 const std::map<uint32_t,unsigned int> commonEncodings,
1025 uint32_t pageSize, unsigned int endIndex, uint8_t*& pageEnd);
1026 void makePersonalityIndex(std::vector<Info>& uniqueInfos);
1027
1028
1029 uint32_t fHeaderSize;
1030 uint32_t fPagesSize;
1031 uint8_t* fHeaderContent;
1032 uint8_t* fPagesContent;
1033 uint8_t* fPagesContentForDelete;
1034 ObjectFile::Alignment fAlignment;
1035 std::vector<Info> fInfos;
1036 std::map<ObjectFile::Atom*, uint32_t> fPersonalityIndexMap;
1037 std::vector<LSDAEntry> fLSDAIndex;
1038 std::vector<RegFixUp> fRegFixUps;
1039 std::vector<CompressedFixUp> fCompressedFixUps;
1040 std::vector<CompressedEncodingFixUp> fCompressedEncodingFixUps;
1041 std::vector<ObjectFile::Reference*> fReferences;
1042 };
1043
1044
1045
1046 template <typename A>
1047 class LinkEditAtom : public WriterAtom<A>
1048 {
1049 public:
1050 LinkEditAtom(Writer<A>& writer) : WriterAtom<A>(writer, Segment::fgLinkEditSegment), fOrdinal(fgCurrentOrdinal++) {}
1051 uint64_t getFileOffset() const;
1052 virtual ObjectFile::Alignment getAlignment() const { return ObjectFile::Alignment(log2(sizeof(typename A::P::uint_t))); }
1053 virtual uint32_t getOrdinal() const { return fOrdinal; }
1054 private:
1055 uint32_t fOrdinal;
1056 static uint32_t fgCurrentOrdinal;
1057 private:
1058 typedef typename A::P P;
1059 };
1060
1061 template <typename A> uint32_t LinkEditAtom<A>::fgCurrentOrdinal = 0;
1062
1063 template <typename A>
1064 class SectionRelocationsLinkEditAtom : public LinkEditAtom<A>
1065 {
1066 public:
1067 SectionRelocationsLinkEditAtom(Writer<A>& writer) : LinkEditAtom<A>(writer) { }
1068 virtual const char* getDisplayName() const { return "section relocations"; }
1069 virtual uint64_t getSize() const;
1070 virtual const char* getSectionName() const { return "._section_relocs"; }
1071 virtual void copyRawContent(uint8_t buffer[]) const;
1072 private:
1073 using WriterAtom<A>::fWriter;
1074 typedef typename A::P P;
1075 };
1076
1077 template <typename A>
1078 class CompressedInfoLinkEditAtom : public LinkEditAtom<A>
1079 {
1080 public:
1081 CompressedInfoLinkEditAtom(Writer<A>& writer) : LinkEditAtom<A>(writer) { }
1082 virtual uint64_t getSize() const { return fEncodedData.size(); }
1083 virtual void copyRawContent(uint8_t buffer[]) const { memcpy(buffer, fEncodedData.start(), fEncodedData.size()); }
1084 protected:
1085 typedef typename A::P::uint_t pint_t;
1086 ByteStream fEncodedData;
1087 private:
1088 using WriterAtom<A>::fWriter;
1089 typedef typename A::P P;
1090 };
1091
1092
1093
1094 template <typename A>
1095 class CompressedRebaseInfoLinkEditAtom : public CompressedInfoLinkEditAtom<A>
1096 {
1097 public:
1098 CompressedRebaseInfoLinkEditAtom(Writer<A>& writer) : CompressedInfoLinkEditAtom<A>(writer) { }
1099 virtual const char* getDisplayName() const { return "compressed rebase info"; }
1100 virtual const char* getSectionName() const { return "._rebase info"; }
1101 void encode();
1102 private:
1103 using CompressedInfoLinkEditAtom<A>::fEncodedData;
1104 using CompressedInfoLinkEditAtom<A>::fWriter;
1105 typedef typename A::P P;
1106 typedef typename A::P::uint_t pint_t;
1107 };
1108
1109 template <typename A>
1110 class CompressedBindingInfoLinkEditAtom : public CompressedInfoLinkEditAtom<A>
1111 {
1112 public:
1113 CompressedBindingInfoLinkEditAtom(Writer<A>& writer) : CompressedInfoLinkEditAtom<A>(writer) { }
1114 virtual const char* getDisplayName() const { return "compressed binding info"; }
1115 virtual const char* getSectionName() const { return "._binding info"; }
1116 void encode();
1117 private:
1118 using CompressedInfoLinkEditAtom<A>::fWriter;
1119 using CompressedInfoLinkEditAtom<A>::fEncodedData;
1120 typedef typename A::P P;
1121 typedef typename A::P::uint_t pint_t;
1122 };
1123
1124 template <typename A>
1125 class CompressedWeakBindingInfoLinkEditAtom : public CompressedInfoLinkEditAtom<A>
1126 {
1127 public:
1128 CompressedWeakBindingInfoLinkEditAtom(Writer<A>& writer) : CompressedInfoLinkEditAtom<A>(writer) { }
1129 virtual const char* getDisplayName() const { return "compressed weak binding info"; }
1130 virtual const char* getSectionName() const { return "._wkbinding info"; }
1131 void encode();
1132 private:
1133 using CompressedInfoLinkEditAtom<A>::fWriter;
1134 using CompressedInfoLinkEditAtom<A>::fEncodedData;
1135 typedef typename A::P P;
1136 typedef typename A::P::uint_t pint_t;
1137 };
1138
1139 template <typename A>
1140 class CompressedLazyBindingInfoLinkEditAtom : public CompressedInfoLinkEditAtom<A>
1141 {
1142 public:
1143 CompressedLazyBindingInfoLinkEditAtom(Writer<A>& writer) : CompressedInfoLinkEditAtom<A>(writer) { }
1144 virtual const char* getDisplayName() const { return "compressed lazy binding info"; }
1145 virtual const char* getSectionName() const { return "._lzbinding info"; }
1146 void encode();
1147 private:
1148 std::vector<uint32_t> fStarts;
1149
1150 using CompressedInfoLinkEditAtom<A>::fWriter;
1151 using CompressedInfoLinkEditAtom<A>::fEncodedData;
1152 typedef typename A::P P;
1153 typedef typename A::P::uint_t pint_t;
1154 };
1155
1156
1157 template <typename A>
1158 class CompressedExportInfoLinkEditAtom : public CompressedInfoLinkEditAtom<A>
1159 {
1160 public:
1161 CompressedExportInfoLinkEditAtom(Writer<A>& writer)
1162 : CompressedInfoLinkEditAtom<A>(writer), fStartNode(strdup("")) { }
1163 virtual const char* getDisplayName() const { return "compressed export info"; }
1164 virtual const char* getSectionName() const { return "._export info"; }
1165 void encode();
1166 private:
1167 using WriterAtom<A>::fWriter;
1168 using CompressedInfoLinkEditAtom<A>::fEncodedData;
1169 typedef typename A::P P;
1170 typedef typename A::P::uint_t pint_t;
1171 struct node;
1172
1173 struct edge
1174 {
1175 edge(const char* s, struct node* n) : fSubString(s), fChild(n) { }
1176 ~edge() { }
1177 const char* fSubString;
1178 struct node* fChild;
1179
1180 };
1181
1182 struct node
1183 {
1184 node(const char* s) : fCummulativeString(s), fAddress(0), fFlags(0), fOrdered(false),
1185 fHaveExportInfo(false), fTrieOffset(0) {}
1186 ~node() { }
1187 const char* fCummulativeString;
1188 std::vector<edge> fChildren;
1189 uint64_t fAddress;
1190 uint32_t fFlags;
1191 bool fOrdered;
1192 bool fHaveExportInfo;
1193 uint32_t fTrieOffset;
1194
1195 void addSymbol(const char* fullStr, uint64_t address, uint32_t flags) {
1196 const char* partialStr = &fullStr[strlen(fCummulativeString)];
1197 for (typename std::vector<edge>::iterator it = fChildren.begin(); it != fChildren.end(); ++it) {
1198 edge& e = *it;
1199 int subStringLen = strlen(e.fSubString);
1200 if ( strncmp(e.fSubString, partialStr, subStringLen) == 0 ) {
1201 // already have matching edge, go down that path
1202 e.fChild->addSymbol(fullStr, address, flags);
1203 return;
1204 }
1205 else {
1206 for (int i=subStringLen-1; i > 0; --i) {
1207 if ( strncmp(e.fSubString, partialStr, i) == 0 ) {
1208 // found a common substring, splice in new node
1209 // was A -> C, now A -> B -> C
1210 char* bNodeCummStr = strdup(e.fChild->fCummulativeString);
1211 bNodeCummStr[strlen(bNodeCummStr)+i-subStringLen] = '\0';
1212 //node* aNode = this;
1213 node* bNode = new node(bNodeCummStr);
1214 node* cNode = e.fChild;
1215 char* abEdgeStr = strdup(e.fSubString);
1216 abEdgeStr[i] = '\0';
1217 char* bcEdgeStr = strdup(&e.fSubString[i]);
1218 edge& abEdge = e;
1219 abEdge.fSubString = abEdgeStr;
1220 abEdge.fChild = bNode;
1221 edge bcEdge(bcEdgeStr, cNode);
1222 bNode->fChildren.push_back(bcEdge);
1223 bNode->addSymbol(fullStr, address, flags);
1224 return;
1225 }
1226 }
1227 }
1228 }
1229 // no commonality with any existing child, make a new edge that is this whole string
1230 node* newNode = new node(strdup(fullStr));
1231 edge newEdge(strdup(partialStr), newNode);
1232 fChildren.push_back(newEdge);
1233 newNode->fAddress = address;
1234 newNode->fFlags = flags;
1235 newNode->fHaveExportInfo = true;
1236 }
1237
1238 void addOrderedNodes(const char* name, std::vector<node*>& orderedNodes) {
1239 if ( !fOrdered ) {
1240 orderedNodes.push_back(this);
1241 //fprintf(stderr, "ordered %p %s\n", this, fCummulativeString);
1242 fOrdered = true;
1243 }
1244 const char* partialStr = &name[strlen(fCummulativeString)];
1245 for (typename std::vector<edge>::iterator it = fChildren.begin(); it != fChildren.end(); ++it) {
1246 edge& e = *it;
1247 int subStringLen = strlen(e.fSubString);
1248 if ( strncmp(e.fSubString, partialStr, subStringLen) == 0 ) {
1249 // already have matching edge, go down that path
1250 e.fChild->addOrderedNodes(name, orderedNodes);
1251 return;
1252 }
1253 }
1254 }
1255
1256 // byte for terminal node size in bytes, or 0x00 if not terminal node
1257 // teminal node (uleb128 flags, uleb128 addr)
1258 // byte for child node count
1259 // each child: zero terminated substring, uleb128 node offset
1260 bool updateOffset(uint32_t& offset) {
1261 uint32_t nodeSize = 1; // byte for length of export info
1262 if ( fHaveExportInfo )
1263 nodeSize += ByteStream::uleb128_size(fFlags) + ByteStream::uleb128_size(fAddress);
1264
1265 // add children
1266 ++nodeSize; // byte for count of chidren
1267 for (typename std::vector<edge>::iterator it = fChildren.begin(); it != fChildren.end(); ++it) {
1268 edge& e = *it;
1269 nodeSize += strlen(e.fSubString) + 1 + ByteStream::uleb128_size(e.fChild->fTrieOffset);
1270 }
1271 bool result = (fTrieOffset != offset);
1272 fTrieOffset = offset;
1273 //fprintf(stderr, "updateOffset %p %05d %s\n", this, fTrieOffset, fCummulativeString);
1274 offset += nodeSize;
1275 // return true if fTrieOffset was changed
1276 return result;
1277 }
1278
1279 void appendToStream(ByteStream& out) {
1280 if ( fHaveExportInfo ) {
1281 // nodes with export info: size, flags, address
1282 out.append_byte(out.uleb128_size(fFlags) + out.uleb128_size(fAddress));
1283 out.append_uleb128(fFlags);
1284 out.append_uleb128(fAddress);
1285 }
1286 else {
1287 // no export info
1288 out.append_byte(0);
1289 }
1290 // write number of children
1291 out.append_byte(fChildren.size());
1292 // write each child
1293 for (typename std::vector<edge>::iterator it = fChildren.begin(); it != fChildren.end(); ++it) {
1294 edge& e = *it;
1295 out.append_string(e.fSubString);
1296 out.append_uleb128(e.fChild->fTrieOffset);
1297 }
1298 }
1299
1300 };
1301
1302
1303 struct node fStartNode;
1304 };
1305
1306 template <typename A>
1307 class LocalRelocationsLinkEditAtom : public LinkEditAtom<A>
1308 {
1309 public:
1310 LocalRelocationsLinkEditAtom(Writer<A>& writer) : LinkEditAtom<A>(writer) { }
1311 virtual const char* getDisplayName() const { return "local relocations"; }
1312 virtual uint64_t getSize() const;
1313 virtual const char* getSectionName() const { return "._local_relocs"; }
1314 virtual void copyRawContent(uint8_t buffer[]) const;
1315 private:
1316 using WriterAtom<A>::fWriter;
1317 typedef typename A::P P;
1318 };
1319
1320 template <typename A>
1321 class SymbolTableLinkEditAtom : public LinkEditAtom<A>
1322 {
1323 public:
1324 SymbolTableLinkEditAtom(Writer<A>& writer) : LinkEditAtom<A>(writer) { }
1325 virtual const char* getDisplayName() const { return "symbol table"; }
1326 virtual uint64_t getSize() const;
1327 virtual const char* getSectionName() const { return "._symbol_table"; }
1328 virtual void copyRawContent(uint8_t buffer[]) const;
1329 private:
1330 using WriterAtom<A>::fWriter;
1331 typedef typename A::P P;
1332 };
1333
1334 template <typename A>
1335 class ExternalRelocationsLinkEditAtom : public LinkEditAtom<A>
1336 {
1337 public:
1338 ExternalRelocationsLinkEditAtom(Writer<A>& writer) : LinkEditAtom<A>(writer) { }
1339 virtual const char* getDisplayName() const { return "external relocations"; }
1340 virtual uint64_t getSize() const;
1341 virtual const char* getSectionName() const { return "._extern_relocs"; }
1342 virtual void copyRawContent(uint8_t buffer[]) const;
1343 private:
1344 using WriterAtom<A>::fWriter;
1345 typedef typename A::P P;
1346 };
1347
1348 struct IndirectEntry {
1349 uint32_t indirectIndex;
1350 uint32_t symbolIndex;
1351 };
1352
1353
1354 template <typename A>
1355 class SegmentSplitInfoContentAtom : public LinkEditAtom<A>
1356 {
1357 public:
1358 SegmentSplitInfoContentAtom(Writer<A>& writer) : LinkEditAtom<A>(writer), fCantEncode(false) { }
1359 virtual const char* getDisplayName() const { return "split segment info"; }
1360 virtual uint64_t getSize() const;
1361 virtual const char* getSectionName() const { return "._split_info"; }
1362 virtual void copyRawContent(uint8_t buffer[]) const;
1363 bool canEncode() { return !fCantEncode; }
1364 void setCantEncode() { fCantEncode = true; }
1365 void add32bitPointerLocation(const ObjectFile::Atom* atom, uint32_t offset) { fKind1Locations.push_back(AtomAndOffset(atom, offset)); }
1366 void add64bitPointerLocation(const ObjectFile::Atom* atom, uint32_t offset) { fKind2Locations.push_back(AtomAndOffset(atom, offset)); }
1367 void addPPCHi16Location(const ObjectFile::Atom* atom, uint32_t offset) { fKind3Locations.push_back(AtomAndOffset(atom, offset)); }
1368 void add32bitImportLocation(const ObjectFile::Atom* atom, uint32_t offset) { fKind4Locations.push_back(AtomAndOffset(atom, offset)); }
1369 void encode();
1370
1371 private:
1372 using WriterAtom<A>::fWriter;
1373 typedef typename A::P P;
1374 typedef typename A::P::uint_t pint_t;
1375 struct AtomAndOffset {
1376 AtomAndOffset(const ObjectFile::Atom* a, uint32_t off) : atom(a), offset(off) {}
1377 const ObjectFile::Atom* atom;
1378 uint32_t offset;
1379 };
1380 void uleb128EncodeAddresses(const std::vector<AtomAndOffset>& locations);
1381
1382 std::vector<AtomAndOffset> fKind1Locations;
1383 std::vector<AtomAndOffset> fKind2Locations;
1384 std::vector<AtomAndOffset> fKind3Locations;
1385 std::vector<AtomAndOffset> fKind4Locations;
1386 std::vector<uint8_t> fEncodedData;
1387 bool fCantEncode;
1388 };
1389
1390 template <typename A>
1391 class IndirectTableLinkEditAtom : public LinkEditAtom<A>
1392 {
1393 public:
1394 IndirectTableLinkEditAtom(Writer<A>& writer) : LinkEditAtom<A>(writer) { }
1395 virtual const char* getDisplayName() const { return "indirect symbol table"; }
1396 virtual uint64_t getSize() const;
1397 virtual const char* getSectionName() const { return "._indirect_syms"; }
1398 virtual void copyRawContent(uint8_t buffer[]) const;
1399
1400 std::vector<IndirectEntry> fTable;
1401
1402 private:
1403 using WriterAtom<A>::fWriter;
1404 typedef typename A::P P;
1405 };
1406
1407 template <typename A>
1408 class ModuleInfoLinkEditAtom : public LinkEditAtom<A>
1409 {
1410 public:
1411 ModuleInfoLinkEditAtom(Writer<A>& writer) : LinkEditAtom<A>(writer), fModuleNameOffset(0) { }
1412 virtual const char* getDisplayName() const { return "module table"; }
1413 virtual uint64_t getSize() const;
1414 virtual const char* getSectionName() const { return "._module_info"; }
1415 virtual void copyRawContent(uint8_t buffer[]) const;
1416
1417 void setName() { fModuleNameOffset = fWriter.fStringsAtom->add("single module"); }
1418 uint32_t getTableOfContentsFileOffset() const;
1419 uint32_t getModuleTableFileOffset() const;
1420 uint32_t getReferencesFileOffset() const;
1421 uint32_t getReferencesCount() const;
1422
1423 private:
1424 using WriterAtom<A>::fWriter;
1425 typedef typename A::P P;
1426 typedef typename A::P::uint_t pint_t;
1427 uint32_t fModuleNameOffset;
1428 };
1429
1430
1431 class CStringEquals
1432 {
1433 public:
1434 bool operator()(const char* left, const char* right) const { return (strcmp(left, right) == 0); }
1435 };
1436
1437 template <typename A>
1438 class StringsLinkEditAtom : public LinkEditAtom<A>
1439 {
1440 public:
1441 StringsLinkEditAtom(Writer<A>& writer);
1442 virtual const char* getDisplayName() const { return "string pool"; }
1443 virtual uint64_t getSize() const;
1444 virtual const char* getSectionName() const { return "._string_pool"; }
1445 virtual void copyRawContent(uint8_t buffer[]) const;
1446
1447 int32_t add(const char* name);
1448 int32_t addUnique(const char* name);
1449 int32_t emptyString() { return 1; }
1450 const char* stringForIndex(int32_t) const;
1451
1452 private:
1453 using WriterAtom<A>::fWriter;
1454 typedef typename A::P P;
1455 enum { kBufferSize = 0x01000000 };
1456 typedef __gnu_cxx::hash_map<const char*, int32_t, __gnu_cxx::hash<const char*>, CStringEquals> StringToOffset;
1457
1458 std::vector<char*> fFullBuffers;
1459 char* fCurrentBuffer;
1460 uint32_t fCurrentBufferUsed;
1461 StringToOffset fUniqueStrings;
1462 };
1463
1464
1465
1466 template <typename A>
1467 class UndefinedSymbolProxyAtom : public WriterAtom<A>
1468 {
1469 public:
1470 UndefinedSymbolProxyAtom(Writer<A>& writer, const char* name) : WriterAtom<A>(writer, Segment::fgLinkEditSegment), fName(name) {}
1471 virtual const char* getName() const { return fName; }
1472 virtual ObjectFile::Atom::Scope getScope() const { return ObjectFile::Atom::scopeGlobal; }
1473 virtual ObjectFile::Atom::DefinitionKind getDefinitionKind() const { return ObjectFile::Atom::kExternalDefinition; }
1474 virtual ObjectFile::Atom::SymbolTableInclusion getSymbolTableInclusion() const { return ObjectFile::Atom::kSymbolTableIn; }
1475 virtual uint64_t getSize() const { return 0; }
1476 virtual const char* getSectionName() const { return "._imports"; }
1477 private:
1478 using WriterAtom<A>::fWriter;
1479 typedef typename A::P P;
1480 const char* fName;
1481 };
1482
1483 template <typename A>
1484 class BranchIslandAtom : public WriterAtom<A>
1485 {
1486 public:
1487 BranchIslandAtom(Writer<A>& writer, const char* name, int islandRegion, ObjectFile::Atom& target,
1488 ObjectFile::Atom& finalTarget, uint32_t finalTargetOffset);
1489 virtual const char* getName() const { return fName; }
1490 virtual ObjectFile::Atom::Scope getScope() const { return ObjectFile::Atom::scopeLinkageUnit; }
1491 virtual uint64_t getSize() const;
1492 virtual bool isThumb() const { return (fIslandKind == kBranchIslandToThumb2); }
1493 virtual ObjectFile::Atom::ContentType getContentType() const { return ObjectFile::Atom::kBranchIsland; }
1494 virtual ObjectFile::Atom::SymbolTableInclusion getSymbolTableInclusion() const { return ObjectFile::Atom::kSymbolTableIn; }
1495 virtual const char* getSectionName() const { return "__text"; }
1496 virtual void copyRawContent(uint8_t buffer[]) const;
1497 uint64_t getFinalTargetAdress() const { return fFinalTarget.getAddress() + fFinalTargetOffset; }
1498 private:
1499 using WriterAtom<A>::fWriter;
1500 enum IslandKind { kBranchIslandToARM, kBranchIslandToThumb2, kBranchIslandToThumb1, kBranchIslandNoPicToThumb1 };
1501 const char* fName;
1502 ObjectFile::Atom& fTarget;
1503 ObjectFile::Atom& fFinalTarget;
1504 uint32_t fFinalTargetOffset;
1505 IslandKind fIslandKind;
1506 };
1507
1508 template <typename A>
1509 class StubAtom : public WriterAtom<A>
1510 {
1511 public:
1512 StubAtom(Writer<A>& writer, ObjectFile::Atom& target, bool forLazyDylib);
1513 virtual const char* getName() const { return fName; }
1514 virtual ObjectFile::Atom::Scope getScope() const { return ObjectFile::Atom::scopeLinkageUnit; }
1515 virtual ObjectFile::Atom::ContentType getContentType() const { return ObjectFile::Atom::kStub; }
1516 virtual uint64_t getSize() const;
1517 virtual ObjectFile::Alignment getAlignment() const;
1518 virtual const char* getSectionName() const { return "__symbol_stub1"; }
1519 virtual std::vector<ObjectFile::Reference*>& getReferences() const { return (std::vector<ObjectFile::Reference*>&)(fReferences); }
1520 virtual void copyRawContent(uint8_t buffer[]) const;
1521 ObjectFile::Atom* getTarget() { return &fTarget; }
1522 virtual uint32_t getOrdinal() const { return fSortingOrdinal; }
1523 void setSortingOrdinal(uint32_t o) { fSortingOrdinal = o; }
1524 private:
1525 static const char* stubName(const char* importName);
1526 friend class LazyPointerAtom<A>;
1527 using WriterAtom<A>::fWriter;
1528 enum StubKind { kStubPIC, kStubNoPIC, kStubShort, kJumpTable };
1529 const char* fName;
1530 ObjectFile::Atom& fTarget;
1531 std::vector<ObjectFile::Reference*> fReferences;
1532 bool fForLazyDylib;
1533 StubKind fKind;
1534 uint32_t fSortingOrdinal;
1535 };
1536
1537
1538 template <typename A>
1539 class FastStubHelperHelperAtom : public WriterAtom<A>
1540 {
1541 public:
1542 FastStubHelperHelperAtom(Writer<A>& writer);
1543 virtual const char* getName() const { return " stub helpers"; } // name sorts to start of helpers
1544 virtual ObjectFile::Atom::SymbolTableInclusion getSymbolTableInclusion() const { return ObjectFile::Atom::kSymbolTableIn; }
1545 virtual ObjectFile::Atom::Scope getScope() const { return ObjectFile::Atom::scopeLinkageUnit; }
1546 virtual ObjectFile::Atom::ContentType getContentType() const { return ObjectFile::Atom::kStubHelper; }
1547 virtual uint64_t getSize() const;
1548 virtual const char* getSectionName() const { return "__stub_helper"; }
1549 virtual std::vector<ObjectFile::Reference*>& getReferences() const { return (std::vector<ObjectFile::Reference*>&)(fReferences); }
1550 virtual void copyRawContent(uint8_t buffer[]) const;
1551 virtual ObjectFile::Alignment getAlignment() const { return ObjectFile::Alignment(0); }
1552 virtual uint32_t getOrdinal() const { return 0; }
1553 protected:
1554 using WriterAtom<A>::fWriter;
1555 std::vector<ObjectFile::Reference*> fReferences;
1556 };
1557
1558 template <typename A>
1559 class HybridStubHelperHelperAtom : public WriterAtom<A>
1560 {
1561 public:
1562 HybridStubHelperHelperAtom(Writer<A>& writer);
1563 virtual const char* getName() const { return " stub helpers"; } // name sorts to start of helpers
1564 virtual ObjectFile::Atom::SymbolTableInclusion getSymbolTableInclusion() const { return ObjectFile::Atom::kSymbolTableIn; }
1565 virtual ObjectFile::Atom::Scope getScope() const { return ObjectFile::Atom::scopeLinkageUnit; }
1566 virtual ObjectFile::Atom::ContentType getContentType() const { return ObjectFile::Atom::kStubHelper; }
1567 virtual uint64_t getSize() const;
1568 virtual const char* getSectionName() const { return "__stub_helper"; }
1569 virtual std::vector<ObjectFile::Reference*>& getReferences() const { return (std::vector<ObjectFile::Reference*>&)(fReferences); }
1570 virtual void copyRawContent(uint8_t buffer[]) const;
1571 virtual ObjectFile::Alignment getAlignment() const { return ObjectFile::Alignment(0); }
1572 virtual uint32_t getOrdinal() const { return 0; }
1573 protected:
1574 using WriterAtom<A>::fWriter;
1575 std::vector<ObjectFile::Reference*> fReferences;
1576 };
1577
1578 template <typename A>
1579 class StubHelperAtom : public WriterAtom<A>
1580 {
1581 public:
1582 StubHelperAtom(Writer<A>& writer, ObjectFile::Atom& target,
1583 LazyPointerAtom<A>& lazyPointer, bool forLazyDylib)
1584 : WriterAtom<A>(writer, Segment::fgTextSegment), fName(stubName(target.getName())),
1585 fTarget(target), fLazyPointerAtom(lazyPointer) {
1586 writer.fAllSynthesizedStubHelpers.push_back(this);
1587 }
1588
1589 virtual const char* getName() const { return fName; }
1590 virtual ObjectFile::Atom::Scope getScope() const { return ObjectFile::Atom::scopeLinkageUnit; }
1591 virtual ObjectFile::Atom::ContentType getContentType() const { return ObjectFile::Atom::kStubHelper; }
1592 virtual const char* getSectionName() const { return "__stub_helper"; }
1593 virtual std::vector<ObjectFile::Reference*>& getReferences() const { return (std::vector<ObjectFile::Reference*>&)(fReferences); }
1594 ObjectFile::Atom* getTarget() { return &fTarget; }
1595 virtual ObjectFile::Alignment getAlignment() const { return ObjectFile::Alignment(0); }
1596 virtual uint32_t getOrdinal() const { return 1; }
1597 protected:
1598 static const char* stubName(const char* importName);
1599 using WriterAtom<A>::fWriter;
1600 const char* fName;
1601 ObjectFile::Atom& fTarget;
1602 LazyPointerAtom<A>& fLazyPointerAtom;
1603 std::vector<ObjectFile::Reference*> fReferences;
1604 };
1605
1606 template <typename A>
1607 class ClassicStubHelperAtom : public StubHelperAtom<A>
1608 {
1609 public:
1610 ClassicStubHelperAtom(Writer<A>& writer, ObjectFile::Atom& target,
1611 class LazyPointerAtom<A>& lazyPointer, bool forLazyDylib);
1612
1613 virtual uint64_t getSize() const;
1614 virtual void copyRawContent(uint8_t buffer[]) const;
1615 };
1616
1617
1618 template <typename A>
1619 class HybridStubHelperAtom : public StubHelperAtom<A>
1620 {
1621 public:
1622 HybridStubHelperAtom(Writer<A>& writer, ObjectFile::Atom& target,
1623 class LazyPointerAtom<A>& lazyPointer, bool forLazyDylib);
1624
1625 virtual uint64_t getSize() const;
1626 virtual void copyRawContent(uint8_t buffer[]) const;
1627 static class HybridStubHelperHelperAtom<A>* fgHelperHelperAtom;
1628 };
1629 template <typename A> class HybridStubHelperHelperAtom<A>* HybridStubHelperAtom<A>::fgHelperHelperAtom = NULL;
1630
1631 template <typename A>
1632 class FastStubHelperAtom : public StubHelperAtom<A>
1633 {
1634 public:
1635 FastStubHelperAtom(Writer<A>& writer, ObjectFile::Atom& target,
1636 class LazyPointerAtom<A>& lazyPointer, bool forLazyDylib);
1637 virtual uint64_t getSize() const;
1638 virtual void copyRawContent(uint8_t buffer[]) const;
1639 static FastStubHelperHelperAtom<A>* fgHelperHelperAtom;
1640 };
1641 template <typename A> FastStubHelperHelperAtom<A>* FastStubHelperAtom<A>::fgHelperHelperAtom = NULL;
1642
1643
1644
1645 template <typename A>
1646 class LazyPointerAtom : public WriterAtom<A>
1647 {
1648 public:
1649 LazyPointerAtom(Writer<A>& writer, ObjectFile::Atom& target,
1650 StubAtom<A>& stub, bool forLazyDylib);
1651 virtual const char* getName() const { return fName; }
1652 virtual ObjectFile::Atom::Scope getScope() const { return ObjectFile::Atom::scopeTranslationUnit; }
1653 virtual ObjectFile::Atom::ContentType getContentType() const { return fForLazyDylib ? ObjectFile::Atom::kLazyDylibPointer : ObjectFile::Atom::kLazyPointer; }
1654 virtual uint64_t getSize() const { return sizeof(typename A::P::uint_t); }
1655 virtual const char* getSectionName() const;
1656 virtual std::vector<ObjectFile::Reference*>& getReferences() const { return (std::vector<ObjectFile::Reference*>&)(fReferences); }
1657 virtual void copyRawContent(uint8_t buffer[]) const;
1658 ObjectFile::Atom* getTarget() { return &fExternalTarget; }
1659 void setLazyBindingInfoOffset(uint32_t off) { fLazyBindingOffset = off; }
1660 uint32_t getLazyBindingInfoOffset() { return fLazyBindingOffset; }
1661 virtual uint32_t getOrdinal() const { return fSortingOrdinal; }
1662 void setSortingOrdinal(uint32_t o) { fSortingOrdinal = o; }
1663 private:
1664 using WriterAtom<A>::fWriter;
1665 static const char* lazyPointerName(const char* importName);
1666 const char* fName;
1667 ObjectFile::Atom& fTarget;
1668 ObjectFile::Atom& fExternalTarget;
1669 std::vector<ObjectFile::Reference*> fReferences;
1670 bool fForLazyDylib;
1671 bool fCloseStub;
1672 uint32_t fLazyBindingOffset;
1673 uint32_t fSortingOrdinal;
1674 };
1675
1676
1677 template <typename A>
1678 class NonLazyPointerAtom : public WriterAtom<A>
1679 {
1680 public:
1681 NonLazyPointerAtom(Writer<A>& writer, ObjectFile::Atom& target);
1682 NonLazyPointerAtom(Writer<A>& writer, const char* targetName);
1683 NonLazyPointerAtom(Writer<A>& writer);
1684 virtual const char* getName() const { return fName; }
1685 virtual ObjectFile::Atom::Scope getScope() const { return ObjectFile::Atom::scopeLinkageUnit; }
1686 virtual ObjectFile::Atom::ContentType getContentType() const { return ObjectFile::Atom::kNonLazyPointer; }
1687 virtual uint64_t getSize() const { return sizeof(typename A::P::uint_t); }
1688 virtual const char* getSectionName() const { return (fWriter.fOptions.outputKind() == Options::kKextBundle) ? "__got" : "__nl_symbol_ptr"; }
1689 virtual std::vector<ObjectFile::Reference*>& getReferences() const { return (std::vector<ObjectFile::Reference*>&)(fReferences); }
1690 virtual void copyRawContent(uint8_t buffer[]) const;
1691 ObjectFile::Atom* getTarget() { return fTarget; }
1692 virtual uint32_t getOrdinal() const { return fSortingOrdinal; }
1693 void setSortingOrdinal(uint32_t o) { fSortingOrdinal = o; }
1694 private:
1695 using WriterAtom<A>::fWriter;
1696 static const char* nonlazyPointerName(const char* importName);
1697 const char* fName;
1698 ObjectFile::Atom* fTarget;
1699 std::vector<ObjectFile::Reference*> fReferences;
1700 uint32_t fSortingOrdinal;
1701 };
1702
1703
1704 template <typename A>
1705 class ObjCInfoAtom : public WriterAtom<A>
1706 {
1707 public:
1708 ObjCInfoAtom(Writer<A>& writer, ObjectFile::Reader::ObjcConstraint objcContraint,
1709 bool objcReplacementClasses, bool abi2override);
1710 virtual const char* getName() const { return "objc$info"; }
1711 virtual ObjectFile::Atom::Scope getScope() const { return ObjectFile::Atom::scopeLinkageUnit; }
1712 virtual uint64_t getSize() const { return 8; }
1713 virtual const char* getSectionName() const;
1714 virtual void copyRawContent(uint8_t buffer[]) const;
1715 private:
1716 Segment& getInfoSegment(bool abi2override) const;
1717 bool fAbi2override;
1718 uint32_t fContent[2];
1719 };
1720
1721
1722 template <typename A>
1723 class WriterReference : public ObjectFile::Reference
1724 {
1725 public:
1726 typedef typename A::ReferenceKinds Kinds;
1727
1728 WriterReference(uint32_t offset, Kinds kind, ObjectFile::Atom* target,
1729 uint32_t toOffset=0, ObjectFile::Atom* fromTarget=NULL, uint32_t fromOffset=0)
1730 : fKind(kind), fFixUpOffsetInSrc(offset), fTarget(target), fTargetName(target->getName()),
1731 fTargetOffset(toOffset), fFromTarget(fromTarget), fFromTargetOffset(fromOffset) {}
1732 WriterReference(uint32_t offset, Kinds kind, const char* targetName)
1733 : fKind(kind), fFixUpOffsetInSrc(offset), fTarget(NULL), fTargetName(targetName),
1734 fTargetOffset(0), fFromTarget(NULL), fFromTargetOffset(0) {}
1735
1736 virtual ~WriterReference() {}
1737
1738 virtual ObjectFile::Reference::TargetBinding getTargetBinding() const { return (fTarget != NULL) ? ObjectFile::Reference::kBoundDirectly : ObjectFile::Reference::kUnboundByName; }
1739 virtual ObjectFile::Reference::TargetBinding getFromTargetBinding() const { return (fFromTarget != NULL) ? ObjectFile::Reference::kBoundDirectly : ObjectFile::Reference::kDontBind; }
1740 virtual uint8_t getKind() const { return (uint8_t)fKind; }
1741 virtual uint64_t getFixUpOffset() const { return fFixUpOffsetInSrc; }
1742 virtual const char* getTargetName() const { return fTargetName; }
1743 virtual ObjectFile::Atom& getTarget() const { return *fTarget; }
1744 virtual uint64_t getTargetOffset() const { return fTargetOffset; }
1745 virtual ObjectFile::Atom& getFromTarget() const { return *fFromTarget; }
1746 virtual const char* getFromTargetName() const { return fFromTarget->getName(); }
1747 virtual void setTarget(ObjectFile::Atom& target, uint64_t offset) { fTarget = &target; fTargetOffset = offset; }
1748 virtual void setFromTarget(ObjectFile::Atom& target) { fFromTarget = &target; }
1749 virtual void setFromTargetName(const char* name) { }
1750 virtual void setFromTargetOffset(uint64_t offset) { fFromTargetOffset = offset; }
1751 virtual const char* getDescription() const { return "writer reference"; }
1752 virtual uint64_t getFromTargetOffset() const { return fFromTargetOffset; }
1753
1754 private:
1755 Kinds fKind;
1756 uint32_t fFixUpOffsetInSrc;
1757 ObjectFile::Atom* fTarget;
1758 const char* fTargetName;
1759 uint32_t fTargetOffset;
1760 ObjectFile::Atom* fFromTarget;
1761 uint32_t fFromTargetOffset;
1762 };
1763
1764
1765 template <typename A>
1766 const char* StubHelperAtom<A>::stubName(const char* name)
1767 {
1768 char* buf;
1769 asprintf(&buf, "%s$stubHelper", name);
1770 return buf;
1771 }
1772
1773 template <>
1774 ClassicStubHelperAtom<x86_64>::ClassicStubHelperAtom(Writer<x86_64>& writer, ObjectFile::Atom& target,
1775 class LazyPointerAtom<x86_64>& lazyPointer, bool forLazyDylib)
1776 : StubHelperAtom<x86_64>(writer, target, lazyPointer, forLazyDylib)
1777 {
1778 fReferences.push_back(new WriterReference<x86_64>(3, x86_64::kPCRel32, &fLazyPointerAtom));
1779 if ( forLazyDylib ) {
1780 if ( fWriter.fDyldLazyDylibHelper == NULL )
1781 throw "symbol dyld_lazy_dylib_stub_binding_helper not defined (usually in lazydylib1.o)";
1782 fReferences.push_back(new WriterReference<x86_64>(8, x86_64::kPCRel32, fWriter.fDyldLazyDylibHelper));
1783 }
1784 else {
1785 if ( fWriter.fDyldClassicHelperAtom == NULL )
1786 throw "symbol dyld_stub_binding_helper not defined (usually in crt1.o/dylib1.o/bundle1.o)";
1787 fReferences.push_back(new WriterReference<x86_64>(8, x86_64::kPCRel32, fWriter.fDyldClassicHelperAtom));
1788 }
1789 }
1790
1791
1792 template <>
1793 uint64_t ClassicStubHelperAtom<x86_64>::getSize() const
1794 {
1795 return 12;
1796 }
1797
1798 template <>
1799 void ClassicStubHelperAtom<x86_64>::copyRawContent(uint8_t buffer[]) const
1800 {
1801 buffer[0] = 0x4C; // lea foo$lazy_ptr(%rip),%r11
1802 buffer[1] = 0x8D;
1803 buffer[2] = 0x1D;
1804 buffer[3] = 0x00;
1805 buffer[4] = 0x00;
1806 buffer[5] = 0x00;
1807 buffer[6] = 0x00;
1808 buffer[7] = 0xE9; // jmp dyld_stub_binding_helper
1809 buffer[8] = 0x00;
1810 buffer[9] = 0x00;
1811 buffer[10] = 0x00;
1812 buffer[11] = 0x00;
1813 }
1814
1815
1816 template <>
1817 FastStubHelperHelperAtom<x86_64>::FastStubHelperHelperAtom(Writer<x86_64>& writer)
1818 : WriterAtom<x86_64>(writer, Segment::fgTextSegment)
1819 {
1820 fReferences.push_back(new WriterReference<x86_64>(3, x86_64::kPCRel32, new NonLazyPointerAtom<x86_64>(writer)));
1821 fReferences.push_back(new WriterReference<x86_64>(11, x86_64::kPCRel32, writer.fFastStubGOTAtom));
1822 }
1823
1824 template <>
1825 uint64_t FastStubHelperHelperAtom<x86_64>::getSize() const
1826 {
1827 return 16;
1828 }
1829
1830 template <>
1831 void FastStubHelperHelperAtom<x86_64>::copyRawContent(uint8_t buffer[]) const
1832 {
1833 buffer[0] = 0x4C; // leaq dyld_mageLoaderCache(%rip),%r11
1834 buffer[1] = 0x8D;
1835 buffer[2] = 0x1D;
1836 buffer[3] = 0x00;
1837 buffer[4] = 0x00;
1838 buffer[5] = 0x00;
1839 buffer[6] = 0x00;
1840 buffer[7] = 0x41; // pushq %r11
1841 buffer[8] = 0x53;
1842 buffer[9] = 0xFF; // jmp *_fast_lazy_bind(%rip)
1843 buffer[10] = 0x25;
1844 buffer[11] = 0x00;
1845 buffer[12] = 0x00;
1846 buffer[13] = 0x00;
1847 buffer[14] = 0x00;
1848 buffer[15] = 0x90; // nop
1849 }
1850
1851
1852 template <>
1853 HybridStubHelperHelperAtom<x86_64>::HybridStubHelperHelperAtom(Writer<x86_64>& writer)
1854 : WriterAtom<x86_64>(writer, Segment::fgTextSegment)
1855 {
1856 if ( writer.fDyldClassicHelperAtom == NULL )
1857 throw "symbol dyld_stub_binding_helper not defined (usually in crt1.o/dylib1.o/bundle1.o)";
1858 fReferences.push_back(new WriterReference<x86_64>(3, x86_64::kPCRel32_1, writer.fFastStubGOTAtom));
1859 fReferences.push_back(new WriterReference<x86_64>(13, x86_64::kPCRel32, new NonLazyPointerAtom<x86_64>(writer)));
1860 fReferences.push_back(new WriterReference<x86_64>(21, x86_64::kPCRel32, writer.fFastStubGOTAtom));
1861 fReferences.push_back(new WriterReference<x86_64>(30, x86_64::kPCRel32, writer.fDyldClassicHelperAtom));
1862 }
1863
1864 template <>
1865 uint64_t HybridStubHelperHelperAtom<x86_64>::getSize() const
1866 {
1867 return 34;
1868 }
1869
1870 template <>
1871 void HybridStubHelperHelperAtom<x86_64>::copyRawContent(uint8_t buffer[]) const
1872 {
1873 buffer[0] = 0x48; // cmpl $0x00,_fast_lazy_bind
1874 buffer[1] = 0x83;
1875 buffer[2] = 0x3D;
1876 buffer[3] = 0x00;
1877 buffer[4] = 0x00;
1878 buffer[5] = 0x00;
1879 buffer[6] = 0x00;
1880 buffer[7] = 0x00;
1881 buffer[8] = 0x74; // je 16
1882 buffer[9] = 0x0F;
1883 buffer[10] = 0x4C; // leaq imageCache(%rip),%r11
1884 buffer[11] = 0x8D;
1885 buffer[12] = 0x1D;
1886 buffer[13] = 0x00;
1887 buffer[14] = 0x00;
1888 buffer[15] = 0x00;
1889 buffer[16] = 0x00;
1890 buffer[17] = 0x41; // pushq %r11
1891 buffer[18] = 0x53;
1892 buffer[19] = 0xFF; // jmp *_fast_lazy_bind(%rip)
1893 buffer[20] = 0x25;
1894 buffer[21] = 0x00;
1895 buffer[22] = 0x00;
1896 buffer[23] = 0x00;
1897 buffer[24] = 0x00;
1898 buffer[25] = 0x48; // addq $8,%rsp
1899 buffer[26] = 0x83;
1900 buffer[27] = 0xC4;
1901 buffer[28] = 0x08;
1902 buffer[29] = 0xE9; // jmp dyld_stub_binding_helper
1903 buffer[30] = 0x00;
1904 buffer[31] = 0x00;
1905 buffer[32] = 0x00;
1906 buffer[33] = 0x00;
1907 }
1908
1909
1910 template <>
1911 HybridStubHelperAtom<x86_64>::HybridStubHelperAtom(Writer<x86_64>& writer, ObjectFile::Atom& target,
1912 class LazyPointerAtom<x86_64>& lazyPointer, bool forLazyDylib)
1913 : StubHelperAtom<x86_64>(writer, target, lazyPointer, forLazyDylib)
1914 {
1915 if ( fgHelperHelperAtom == NULL ) {
1916 fgHelperHelperAtom = new HybridStubHelperHelperAtom<x86_64>::HybridStubHelperHelperAtom(fWriter);
1917 fWriter.fAllSynthesizedStubHelpers.push_back(fgHelperHelperAtom);
1918 }
1919 fReferences.push_back(new WriterReference<x86_64>(8, x86_64::kPCRel32, &fLazyPointerAtom));
1920 fReferences.push_back(new WriterReference<x86_64>(13, x86_64::kPCRel32, fgHelperHelperAtom));
1921 }
1922
1923 template <>
1924 uint64_t HybridStubHelperAtom<x86_64>::getSize() const
1925 {
1926 return 18;
1927 }
1928
1929 template <>
1930 void HybridStubHelperAtom<x86_64>::copyRawContent(uint8_t buffer[]) const
1931 {
1932 buffer[0] = 0x68; // pushq $lazy-info-offset
1933 buffer[1] = 0x00;
1934 buffer[2] = 0x00;
1935 buffer[3] = 0x00;
1936 buffer[4] = 0x00;
1937 buffer[5] = 0x4C; // lea foo$lazy_ptr(%rip),%r11
1938 buffer[6] = 0x8D;
1939 buffer[7] = 0x1D;
1940 buffer[8] = 0x00;
1941 buffer[9] = 0x00;
1942 buffer[10] = 0x00;
1943 buffer[11] = 0x00;
1944 buffer[12] = 0xE9; // jmp helper-helper
1945 buffer[13] = 0x00;
1946 buffer[14] = 0x00;
1947 buffer[15] = 0x00;
1948 buffer[16] = 0x00;
1949 buffer[17] = 0x90; // nop
1950
1951 // the lazy binding info is created later than this helper atom, so there
1952 // is no Reference to update. Instead we blast the offset here.
1953 uint32_t offset;
1954 LittleEndian::set32(offset, fLazyPointerAtom.getLazyBindingInfoOffset());
1955 memcpy(&buffer[1], &offset, 4);
1956 }
1957
1958 template <>
1959 FastStubHelperAtom<x86_64>::FastStubHelperAtom(Writer<x86_64>& writer, ObjectFile::Atom& target,
1960 class LazyPointerAtom<x86_64>& lazyPointer, bool forLazyDylib)
1961 : StubHelperAtom<x86_64>(writer, target, lazyPointer, forLazyDylib)
1962 {
1963 if ( fgHelperHelperAtom == NULL ) {
1964 fgHelperHelperAtom = new FastStubHelperHelperAtom<x86_64>::FastStubHelperHelperAtom(fWriter);
1965 fWriter.fAllSynthesizedStubHelpers.push_back(fgHelperHelperAtom);
1966 }
1967 fReferences.push_back(new WriterReference<x86_64>(6, x86_64::kPCRel32, fgHelperHelperAtom));
1968 }
1969
1970 template <>
1971 uint64_t FastStubHelperAtom<x86_64>::getSize() const
1972 {
1973 return 10;
1974 }
1975
1976 template <>
1977 void FastStubHelperAtom<x86_64>::copyRawContent(uint8_t buffer[]) const
1978 {
1979 buffer[0] = 0x68; // pushq $lazy-info-offset
1980 buffer[1] = 0x00;
1981 buffer[2] = 0x00;
1982 buffer[3] = 0x00;
1983 buffer[4] = 0x00;
1984 buffer[5] = 0xE9; // jmp helperhelper
1985 buffer[6] = 0x00;
1986 buffer[7] = 0x00;
1987 buffer[8] = 0x00;
1988 buffer[9] = 0x00;
1989
1990 // the lazy binding info is created later than this helper atom, so there
1991 // is no Reference to update. Instead we blast the offset here.
1992 uint32_t offset;
1993 LittleEndian::set32(offset, fLazyPointerAtom.getLazyBindingInfoOffset());
1994 memcpy(&buffer[1], &offset, 4);
1995 }
1996
1997 template <>
1998 FastStubHelperHelperAtom<x86>::FastStubHelperHelperAtom(Writer<x86>& writer)
1999 : WriterAtom<x86>(writer, Segment::fgTextSegment)
2000 {
2001 fReferences.push_back(new WriterReference<x86>(1, x86::kAbsolute32, new NonLazyPointerAtom<x86>(writer)));
2002 fReferences.push_back(new WriterReference<x86>(7, x86::kAbsolute32, writer.fFastStubGOTAtom));
2003 }
2004
2005 template <>
2006 uint64_t FastStubHelperHelperAtom<x86>::getSize() const
2007 {
2008 return 12;
2009 }
2010
2011 template <>
2012 void FastStubHelperHelperAtom<x86>::copyRawContent(uint8_t buffer[]) const
2013 {
2014 buffer[0] = 0x68; // pushl $dyld_ImageLoaderCache
2015 buffer[1] = 0x00;
2016 buffer[2] = 0x00;
2017 buffer[3] = 0x00;
2018 buffer[4] = 0x00;
2019 buffer[5] = 0xFF; // jmp *_fast_lazy_bind
2020 buffer[6] = 0x25;
2021 buffer[7] = 0x00;
2022 buffer[8] = 0x00;
2023 buffer[9] = 0x00;
2024 buffer[10] = 0x00;
2025 buffer[11] = 0x90; // nop
2026 }
2027
2028
2029 template <>
2030 FastStubHelperHelperAtom<arm>::FastStubHelperHelperAtom(Writer<arm>& writer)
2031 : WriterAtom<arm>(writer, Segment::fgTextSegment)
2032 {
2033 fReferences.push_back(new WriterReference<arm>(28, arm::kPointerDiff, new NonLazyPointerAtom<arm>(writer), 0, this, 16));
2034 fReferences.push_back(new WriterReference<arm>(32, arm::kPointerDiff, writer.fFastStubGOTAtom, 0, this, 28));
2035 }
2036
2037 template <>
2038 uint64_t FastStubHelperHelperAtom<arm>::getSize() const
2039 {
2040 return 36;
2041 }
2042
2043 template <>
2044 void FastStubHelperHelperAtom<arm>::copyRawContent(uint8_t buffer[]) const
2045 {
2046 // push lazy-info-offset
2047 OSWriteLittleInt32(&buffer[ 0], 0, 0xe52dc004); // str ip, [sp, #-4]!
2048 // push address of dyld_mageLoaderCache
2049 OSWriteLittleInt32(&buffer[ 4], 0, 0xe59fc010); // ldr ip, L1
2050 OSWriteLittleInt32(&buffer[ 8], 0, 0xe08fc00c); // add ip, pc, ip
2051 OSWriteLittleInt32(&buffer[12], 0, 0xe52dc004); // str ip, [sp, #-4]!
2052 // jump through _fast_lazy_bind
2053 OSWriteLittleInt32(&buffer[16], 0, 0xe59fc008); // ldr ip, L2
2054 OSWriteLittleInt32(&buffer[20], 0, 0xe08fc00c); // add ip, pc, ip
2055 OSWriteLittleInt32(&buffer[24], 0, 0xe59cf000); // ldr pc, [ip]
2056 OSWriteLittleInt32(&buffer[28], 0, 0x00000000); // L1: .long fFastStubGOTAtom - (helperhelper+16)
2057 OSWriteLittleInt32(&buffer[32], 0, 0x00000000); // L2: .long _fast_lazy_bind - (helperhelper+28)
2058 }
2059
2060 template <>
2061 ObjectFile::Alignment StubHelperAtom<arm>::getAlignment() const { return ObjectFile::Alignment(2); }
2062
2063 template <>
2064 FastStubHelperAtom<arm>::FastStubHelperAtom(Writer<arm>& writer, ObjectFile::Atom& target,
2065 class LazyPointerAtom<arm>& lazyPointer, bool forLazyDylib)
2066 : StubHelperAtom<arm>(writer, target, lazyPointer, forLazyDylib)
2067 {
2068 if ( fgHelperHelperAtom == NULL ) {
2069 fgHelperHelperAtom = new FastStubHelperHelperAtom<arm>::FastStubHelperHelperAtom(fWriter);
2070 fWriter.fAllSynthesizedStubHelpers.push_back(fgHelperHelperAtom);
2071 }
2072 fReferences.push_back(new WriterReference<arm>(4, arm::kBranch24, fgHelperHelperAtom));
2073 }
2074
2075 template <>
2076 uint64_t FastStubHelperAtom<arm>::getSize() const
2077 {
2078 return 12;
2079 }
2080
2081 template <>
2082 void FastStubHelperAtom<arm>::copyRawContent(uint8_t buffer[]) const
2083 {
2084 OSWriteLittleInt32(&buffer[0], 0, 0xe59fc000); // ldr ip, [pc, #0]
2085 OSWriteLittleInt32(&buffer[4], 0, 0xea000000); // b _helperhelper
2086 // the lazy binding info is created later than this helper atom, so there
2087 // is no Reference to update. Instead we blast the offset here.
2088 OSWriteLittleInt32(&buffer[8], 0, fLazyPointerAtom.getLazyBindingInfoOffset());
2089 }
2090
2091
2092 template <>
2093 HybridStubHelperHelperAtom<x86>::HybridStubHelperHelperAtom(Writer<x86>& writer)
2094 : WriterAtom<x86>(writer, Segment::fgTextSegment)
2095 {
2096 if ( writer.fDyldClassicHelperAtom == NULL )
2097 throw "symbol dyld_stub_binding_helper not defined (usually in crt1.o/dylib1.o/bundle1.o)";
2098 fReferences.push_back(new WriterReference<x86>(2, x86::kAbsolute32, writer.fFastStubGOTAtom));
2099 fReferences.push_back(new WriterReference<x86>(18, x86::kPCRel32, writer.fDyldClassicHelperAtom));
2100 fReferences.push_back(new WriterReference<x86>(26, x86::kAbsolute32, new NonLazyPointerAtom<x86>(writer)));
2101 fReferences.push_back(new WriterReference<x86>(32, x86::kAbsolute32, writer.fFastStubGOTAtom));
2102 }
2103
2104 template <>
2105 uint64_t HybridStubHelperHelperAtom<x86>::getSize() const
2106 {
2107 return 36;
2108 }
2109
2110
2111 template <>
2112 void HybridStubHelperHelperAtom<x86>::copyRawContent(uint8_t buffer[]) const
2113 {
2114 buffer[0] = 0x83; // cmpl $0x00,_fast_lazy_bind
2115 buffer[1] = 0x3D;
2116 buffer[2] = 0x00;
2117 buffer[3] = 0x00;
2118 buffer[4] = 0x00;
2119 buffer[5] = 0x00;
2120 buffer[6] = 0x00;
2121 buffer[7] = 0x75; // jne 22
2122 buffer[8] = 0x0D;
2123 buffer[9] = 0x89; // %eax,4(%esp)
2124 buffer[10] = 0x44;
2125 buffer[11] = 0x24;
2126 buffer[12] = 0x04;
2127 buffer[13] = 0x58; // popl %eax
2128 buffer[14] = 0x87; // xchgl (%esp),%eax
2129 buffer[15] = 0x04;
2130 buffer[16] = 0x24;
2131 buffer[17] = 0xE9; // jmpl dyld_stub_binding_helper
2132 buffer[18] = 0x00;
2133 buffer[19] = 0x00;
2134 buffer[20] = 0x00;
2135 buffer[21] = 0x00;
2136 buffer[22] = 0x83; // addl $0x04,%esp
2137 buffer[23] = 0xC4;
2138 buffer[24] = 0x04;
2139 buffer[25] = 0x68; // pushl imageloadercahce
2140 buffer[26] = 0x00;
2141 buffer[27] = 0x00;
2142 buffer[28] = 0x00;
2143 buffer[29] = 0x00;
2144 buffer[30] = 0xFF; // jmp *_fast_lazy_bind(%rip)
2145 buffer[31] = 0x25;
2146 buffer[32] = 0x00;
2147 buffer[33] = 0x00;
2148 buffer[34] = 0x00;
2149 buffer[35] = 0x00;
2150 }
2151
2152
2153 template <>
2154 ClassicStubHelperAtom<x86>::ClassicStubHelperAtom(Writer<x86>& writer, ObjectFile::Atom& target,
2155 class LazyPointerAtom<x86>& lazyPointer, bool forLazyDylib)
2156 : StubHelperAtom<x86>(writer, target, lazyPointer, forLazyDylib)
2157 {
2158 fReferences.push_back(new WriterReference<x86>(1, x86::kAbsolute32, &fLazyPointerAtom));
2159 if ( forLazyDylib ) {
2160 if ( fWriter.fDyldLazyDylibHelper == NULL )
2161 throw "symbol dyld_lazy_dylib_stub_binding_helper not defined (usually in lazydylib1.o)";
2162 fReferences.push_back(new WriterReference<x86>(6, x86::kPCRel32, fWriter.fDyldLazyDylibHelper));
2163 }
2164 else {
2165 if ( fWriter.fDyldClassicHelperAtom == NULL )
2166 throw "symbol dyld_stub_binding_helper not defined (usually in crt1.o/dylib1.o/bundle1.o)";
2167 fReferences.push_back(new WriterReference<x86>(6, x86::kPCRel32, fWriter.fDyldClassicHelperAtom));
2168 }
2169 }
2170
2171 template <>
2172 uint64_t ClassicStubHelperAtom<x86>::getSize() const
2173 {
2174 return 10;
2175 }
2176
2177 template <>
2178 void ClassicStubHelperAtom<x86>::copyRawContent(uint8_t buffer[]) const
2179 {
2180 buffer[0] = 0x68; // pushl $foo$lazy_ptr
2181 buffer[1] = 0x00;
2182 buffer[2] = 0x00;
2183 buffer[3] = 0x00;
2184 buffer[4] = 0x00;
2185 buffer[5] = 0xE9; // jmp helperhelper
2186 buffer[6] = 0x00;
2187 buffer[7] = 0x00;
2188 buffer[8] = 0x00;
2189 buffer[9] = 0x00;
2190 }
2191
2192 template <>
2193 HybridStubHelperAtom<x86>::HybridStubHelperAtom(Writer<x86>& writer, ObjectFile::Atom& target,
2194 class LazyPointerAtom<x86>& lazyPointer, bool forLazyDylib)
2195 : StubHelperAtom<x86>(writer, target, lazyPointer, forLazyDylib)
2196 {
2197 if ( fgHelperHelperAtom == NULL ) {
2198 fgHelperHelperAtom = new HybridStubHelperHelperAtom<x86>::HybridStubHelperHelperAtom(fWriter);
2199 fWriter.fAllSynthesizedStubHelpers.push_back(fgHelperHelperAtom);
2200 }
2201 fReferences.push_back(new WriterReference<x86>(6, x86::kAbsolute32, &fLazyPointerAtom));
2202 fReferences.push_back(new WriterReference<x86>(11, x86::kPCRel32, fgHelperHelperAtom));
2203 }
2204
2205
2206 template <>
2207 uint64_t HybridStubHelperAtom<x86>::getSize() const
2208 {
2209 return 16;
2210 }
2211
2212 template <>
2213 void HybridStubHelperAtom<x86>::copyRawContent(uint8_t buffer[]) const
2214 {
2215 buffer[0] = 0x68; // pushl $lazy-info-offset
2216 buffer[1] = 0x00;
2217 buffer[2] = 0x00;
2218 buffer[3] = 0x00;
2219 buffer[4] = 0x00;
2220 buffer[5] = 0x68; // pushl $foo$lazy_ptr
2221 buffer[6] = 0x00;
2222 buffer[7] = 0x00;
2223 buffer[8] = 0x00;
2224 buffer[9] = 0x00;
2225 buffer[10] = 0xE9; // jmp dyld_hybrid_stub_binding_helper
2226 buffer[11] = 0x00;
2227 buffer[12] = 0x00;
2228 buffer[13] = 0x00;
2229 buffer[14] = 0x00;
2230 buffer[15] = 0x90; // nop
2231
2232 // the lazy binding info is created later than this helper atom, so there
2233 // is no Reference to update. Instead we blast the offset here.
2234 uint32_t offset;
2235 LittleEndian::set32(offset, fLazyPointerAtom.getLazyBindingInfoOffset());
2236 memcpy(&buffer[1], &offset, 4);
2237 }
2238
2239
2240 template <>
2241 FastStubHelperAtom<x86>::FastStubHelperAtom(Writer<x86>& writer, ObjectFile::Atom& target,
2242 class LazyPointerAtom<x86>& lazyPointer, bool forLazyDylib)
2243 : StubHelperAtom<x86>(writer, target, lazyPointer, forLazyDylib)
2244 {
2245 if ( fgHelperHelperAtom == NULL ) {
2246 fgHelperHelperAtom = new FastStubHelperHelperAtom<x86>::FastStubHelperHelperAtom(fWriter);
2247 fWriter.fAllSynthesizedStubHelpers.push_back(fgHelperHelperAtom);
2248 }
2249 fReferences.push_back(new WriterReference<x86>(6, x86::kPCRel32, fgHelperHelperAtom));
2250 }
2251
2252
2253 template <>
2254 uint64_t FastStubHelperAtom<x86>::getSize() const
2255 {
2256 return 10;
2257 }
2258
2259 template <>
2260 void FastStubHelperAtom<x86>::copyRawContent(uint8_t buffer[]) const
2261 {
2262 buffer[0] = 0x68; // pushl $lazy-info-offset
2263 buffer[1] = 0x00;
2264 buffer[2] = 0x00;
2265 buffer[3] = 0x00;
2266 buffer[4] = 0x00;
2267 buffer[5] = 0xE9; // jmp helperhelper
2268 buffer[6] = 0x00;
2269 buffer[7] = 0x00;
2270 buffer[8] = 0x00;
2271 buffer[9] = 0x00;
2272
2273 // the lazy binding info is created later than this helper atom, so there
2274 // is no Reference to update. Instead we blast the offset here.
2275 uint32_t offset;
2276 LittleEndian::set32(offset, fLazyPointerAtom.getLazyBindingInfoOffset());
2277 memcpy(&buffer[1], &offset, 4);
2278 }
2279
2280 template <typename A>
2281 const char* LazyPointerAtom<A>::getSectionName() const
2282 {
2283 if ( fCloseStub )
2284 return "__lazy_symbol";
2285 else if ( fForLazyDylib )
2286 return "__ld_symbol_ptr";
2287 else
2288 return "__la_symbol_ptr";
2289 }
2290
2291 // specialize lazy pointer for x86_64 to initially pointer to stub helper
2292 template <>
2293 LazyPointerAtom<x86_64>::LazyPointerAtom(Writer<x86_64>& writer, ObjectFile::Atom& target, StubAtom<x86_64>& stub, bool forLazyDylib)
2294 : WriterAtom<x86_64>(writer, Segment::fgDataSegment), fName(lazyPointerName(target.getName())), fTarget(target),
2295 fExternalTarget(*stub.getTarget()), fForLazyDylib(forLazyDylib), fCloseStub(false), fLazyBindingOffset(0)
2296 {
2297 if ( forLazyDylib )
2298 writer.fAllSynthesizedLazyDylibPointers.push_back(this);
2299 else
2300 writer.fAllSynthesizedLazyPointers.push_back(this);
2301
2302 ObjectFile::Atom* helper;
2303 if ( writer.fOptions.makeCompressedDyldInfo() && !forLazyDylib ) {
2304 if ( writer.fOptions.makeClassicDyldInfo() )
2305 // hybrid LINKEDIT, no fast bind info for weak symbols so use traditional helper
2306 if ( writer.targetRequiresWeakBinding(target) )
2307 helper = new ClassicStubHelperAtom<x86_64>(writer, target, *this, forLazyDylib);
2308 else
2309 helper = new HybridStubHelperAtom<x86_64>(writer, target, *this, forLazyDylib);
2310 else {
2311 if ( target.getDefinitionKind() == ObjectFile::Atom::kWeakDefinition )
2312 helper = &target;
2313 else
2314 helper = new FastStubHelperAtom<x86_64>(writer, target, *this, forLazyDylib);
2315 }
2316 }
2317 else {
2318 helper = new ClassicStubHelperAtom<x86_64>(writer, target, *this, forLazyDylib);
2319 }
2320 fReferences.push_back(new WriterReference<x86_64>(0, x86_64::kPointer, helper));
2321 }
2322
2323
2324 // specialize lazy pointer for x86 to initially pointer to stub helper
2325 template <>
2326 LazyPointerAtom<x86>::LazyPointerAtom(Writer<x86>& writer, ObjectFile::Atom& target, StubAtom<x86>& stub, bool forLazyDylib)
2327 : WriterAtom<x86>(writer, Segment::fgDataSegment), fName(lazyPointerName(target.getName())), fTarget(target),
2328 fExternalTarget(*stub.getTarget()), fForLazyDylib(forLazyDylib), fCloseStub(false), fLazyBindingOffset(0)
2329 {
2330 if ( forLazyDylib )
2331 writer.fAllSynthesizedLazyDylibPointers.push_back(this);
2332 else
2333 writer.fAllSynthesizedLazyPointers.push_back(this);
2334
2335 ObjectFile::Atom* helper;
2336 if ( writer.fOptions.makeCompressedDyldInfo() && !forLazyDylib ) {
2337 if ( writer.fOptions.makeClassicDyldInfo() ) {
2338 // hybrid LINKEDIT, no fast bind info for weak symbols so use traditional helper
2339 if ( writer.targetRequiresWeakBinding(target) )
2340 helper = new ClassicStubHelperAtom<x86>(writer, target, *this, forLazyDylib);
2341 else
2342 helper = new HybridStubHelperAtom<x86>(writer, target, *this, forLazyDylib);
2343 }
2344 else {
2345 if ( target.getDefinitionKind() == ObjectFile::Atom::kWeakDefinition )
2346 helper = &target;
2347 else
2348 helper = new FastStubHelperAtom<x86>(writer, target, *this, forLazyDylib);
2349 }
2350 }
2351 else {
2352 helper = new ClassicStubHelperAtom<x86>(writer, target, *this, forLazyDylib);
2353 }
2354 fReferences.push_back(new WriterReference<x86>(0, x86::kPointer, helper));
2355 }
2356
2357 // specialize lazy pointer for arm to initially pointer to stub helper
2358 template <>
2359 LazyPointerAtom<arm>::LazyPointerAtom(Writer<arm>& writer, ObjectFile::Atom& target, StubAtom<arm>& stub, bool forLazyDylib)
2360 : WriterAtom<arm>(writer, Segment::fgDataSegment), fName(lazyPointerName(target.getName())), fTarget(target),
2361 fExternalTarget(*stub.getTarget()), fForLazyDylib(forLazyDylib), fCloseStub(false), fLazyBindingOffset(0)
2362 {
2363 if ( forLazyDylib )
2364 writer.fAllSynthesizedLazyDylibPointers.push_back(this);
2365 else
2366 writer.fAllSynthesizedLazyPointers.push_back(this);
2367
2368 // The one instruction stubs must be close to the lazy pointers
2369 if ( stub.fKind == StubAtom<arm>::kStubShort )
2370 fCloseStub = true;
2371
2372 ObjectFile::Atom* helper;
2373 if ( forLazyDylib ) {
2374 if ( writer.fDyldLazyDylibHelper == NULL )
2375 throw "symbol dyld_lazy_dylib_stub_binding_helper not defined (usually in lazydylib1.o)";
2376 helper = writer.fDyldLazyDylibHelper;
2377 }
2378 else if ( writer.fOptions.makeCompressedDyldInfo() ) {
2379 if ( target.getDefinitionKind() == ObjectFile::Atom::kWeakDefinition )
2380 helper = &target;
2381 else
2382 helper = new FastStubHelperAtom<arm>(writer, target, *this, forLazyDylib);
2383 }
2384 else {
2385 if ( writer.fDyldClassicHelperAtom == NULL )
2386 throw "symbol dyld_stub_binding_helper not defined (usually in crt1.o/dylib1.o/bundle1.o)";
2387 helper = writer.fDyldClassicHelperAtom;
2388 }
2389 fReferences.push_back(new WriterReference<arm>(0, arm::kPointer, helper));
2390 }
2391
2392 template <typename A>
2393 LazyPointerAtom<A>::LazyPointerAtom(Writer<A>& writer, ObjectFile::Atom& target, StubAtom<A>& stub, bool forLazyDylib)
2394 : WriterAtom<A>(writer, Segment::fgDataSegment), fName(lazyPointerName(target.getName())), fTarget(target),
2395 fExternalTarget(*stub.getTarget()), fForLazyDylib(forLazyDylib), fCloseStub(false), fLazyBindingOffset(0)
2396 {
2397 if ( forLazyDylib )
2398 writer.fAllSynthesizedLazyDylibPointers.push_back(this);
2399 else
2400 writer.fAllSynthesizedLazyPointers.push_back(this);
2401
2402 fReferences.push_back(new WriterReference<A>(0, A::kPointer, &target));
2403 }
2404
2405
2406
2407 template <typename A>
2408 const char* LazyPointerAtom<A>::lazyPointerName(const char* name)
2409 {
2410 char* buf;
2411 asprintf(&buf, "%s$lazy_pointer", name);
2412 return buf;
2413 }
2414
2415 template <typename A>
2416 void LazyPointerAtom<A>::copyRawContent(uint8_t buffer[]) const
2417 {
2418 bzero(buffer, getSize());
2419 }
2420
2421
2422 template <typename A>
2423 NonLazyPointerAtom<A>::NonLazyPointerAtom(Writer<A>& writer, ObjectFile::Atom& target)
2424 : WriterAtom<A>(writer, Segment::fgDataSegment), fName(nonlazyPointerName(target.getName())), fTarget(&target)
2425 {
2426 writer.fAllSynthesizedNonLazyPointers.push_back(this);
2427 fReferences.push_back(new WriterReference<A>(0, A::kPointer, &target));
2428 }
2429
2430 template <typename A>
2431 NonLazyPointerAtom<A>::NonLazyPointerAtom(Writer<A>& writer)
2432 : WriterAtom<A>(writer, Segment::fgDataSegment), fName("none"), fTarget(NULL)
2433 {
2434 writer.fAllSynthesizedNonLazyPointers.push_back(this);
2435 }
2436
2437 template <typename A>
2438 NonLazyPointerAtom<A>::NonLazyPointerAtom(Writer<A>& writer, const char* targetName)
2439 : WriterAtom<A>(writer, Segment::fgDataSegment), fName(nonlazyPointerName(targetName)), fTarget(NULL)
2440 {
2441 writer.fAllSynthesizedNonLazyPointers.push_back(this);
2442 fReferences.push_back(new WriterReference<A>(0, A::kPointer, targetName));
2443 }
2444
2445 template <typename A>
2446 const char* NonLazyPointerAtom<A>::nonlazyPointerName(const char* name)
2447 {
2448 char* buf;
2449 asprintf(&buf, "%s$non_lazy_pointer", name);
2450 return buf;
2451 }
2452
2453 template <typename A>
2454 void NonLazyPointerAtom<A>::copyRawContent(uint8_t buffer[]) const
2455 {
2456 bzero(buffer, getSize());
2457 }
2458
2459
2460
2461
2462 template <>
2463 ObjectFile::Alignment StubAtom<ppc>::getAlignment() const
2464 {
2465 return 2;
2466 }
2467
2468 template <>
2469 ObjectFile::Alignment StubAtom<ppc64>::getAlignment() const
2470 {
2471 return 2;
2472 }
2473
2474 template <>
2475 ObjectFile::Alignment StubAtom<arm>::getAlignment() const
2476 {
2477 return 2;
2478 }
2479
2480 template <>
2481 StubAtom<ppc>::StubAtom(Writer<ppc>& writer, ObjectFile::Atom& target, bool forLazyDylib)
2482 : WriterAtom<ppc>(writer, Segment::fgTextSegment), fName(stubName(target.getName())),
2483 fTarget(target), fForLazyDylib(forLazyDylib)
2484 {
2485 writer.fAllSynthesizedStubs.push_back(this);
2486 LazyPointerAtom<ppc>* lp;
2487 if ( fWriter.fOptions.prebind() ) {
2488 // for prebound ppc, lazy pointer starts out pointing to target symbol's address
2489 // if target is a weak definition within this linkage unit or zero if in some dylib
2490 lp = new LazyPointerAtom<ppc>(writer, target, *this, forLazyDylib);
2491 }
2492 else {
2493 // for non-prebound ppc, lazy pointer starts out pointing to dyld_stub_binding_helper glue code
2494 if ( forLazyDylib ) {
2495 if ( writer.fDyldLazyDylibHelper == NULL )
2496 throw "symbol dyld_lazy_dylib_stub_binding_helper not defined (usually in lazydylib1.o)";
2497 lp = new LazyPointerAtom<ppc>(writer, *writer.fDyldLazyDylibHelper, *this, forLazyDylib);
2498 }
2499 else {
2500 if ( writer.fDyldClassicHelperAtom == NULL )
2501 throw "symbol dyld_stub_binding_helper not defined (usually in crt1.o/dylib1.o/bundle1.o)";
2502 lp = new LazyPointerAtom<ppc>(writer, *writer.fDyldClassicHelperAtom, *this, forLazyDylib);
2503 }
2504 }
2505 fKind = ( fWriter.fSlideable ? kStubPIC : kStubNoPIC );
2506 if ( fKind == kStubPIC ) {
2507 // picbase is 8 bytes into atom
2508 fReferences.push_back(new WriterReference<ppc>(12, ppc::kPICBaseHigh16, lp, 0, this, 8));
2509 fReferences.push_back(new WriterReference<ppc>(20, ppc::kPICBaseLow16, lp, 0, this, 8));
2510 }
2511 else {
2512 fReferences.push_back(new WriterReference<ppc>(0, ppc::kAbsHigh16AddLow, lp));
2513 fReferences.push_back(new WriterReference<ppc>(4, ppc::kAbsLow16, lp));
2514 }
2515 }
2516
2517 template <>
2518 StubAtom<ppc64>::StubAtom(Writer<ppc64>& writer, ObjectFile::Atom& target, bool forLazyDylib)
2519 : WriterAtom<ppc64>(writer, Segment::fgTextSegment), fName(stubName(target.getName())),
2520 fTarget(target), fForLazyDylib(forLazyDylib)
2521 {
2522 writer.fAllSynthesizedStubs.push_back(this);
2523
2524 LazyPointerAtom<ppc64>* lp;
2525 if ( forLazyDylib ) {
2526 if ( writer.fDyldLazyDylibHelper == NULL )
2527 throw "symbol dyld_lazy_dylib_stub_binding_helper not defined (usually in lazydylib1.o)";
2528 lp = new LazyPointerAtom<ppc64>(writer, *writer.fDyldLazyDylibHelper, *this, forLazyDylib);
2529 }
2530 else {
2531 if ( writer.fDyldClassicHelperAtom == NULL )
2532 throw "symbol dyld_stub_binding_helper not defined (usually in crt1.o/dylib1.o/bundle1.o)";
2533 lp = new LazyPointerAtom<ppc64>(writer, *writer.fDyldClassicHelperAtom, *this, forLazyDylib);
2534 }
2535 if ( fWriter.fSlideable || ((fWriter.fPageZeroAtom != NULL) && (fWriter.fPageZeroAtom->getSize() > 4096)) )
2536 fKind = kStubPIC;
2537 else
2538 fKind = kStubNoPIC;
2539 if ( fKind == kStubPIC ) {
2540 // picbase is 8 bytes into atom
2541 fReferences.push_back(new WriterReference<ppc64>(12, ppc64::kPICBaseHigh16, lp, 0, this, 8));
2542 fReferences.push_back(new WriterReference<ppc64>(20, ppc64::kPICBaseLow14, lp, 0, this, 8));
2543 }
2544 else {
2545 fReferences.push_back(new WriterReference<ppc64>(0, ppc64::kAbsHigh16AddLow, lp));
2546 fReferences.push_back(new WriterReference<ppc64>(4, ppc64::kAbsLow14, lp));
2547 }
2548 }
2549
2550 template <>
2551 StubAtom<x86>::StubAtom(Writer<x86>& writer, ObjectFile::Atom& target, bool forLazyDylib)
2552 : WriterAtom<x86>(writer, (writer.fOptions.makeCompressedDyldInfo()|| forLazyDylib) ? Segment::fgTextSegment : Segment::fgImportSegment),
2553 fName(NULL), fTarget(target), fForLazyDylib(forLazyDylib)
2554 {
2555 if ( writer.fOptions.makeCompressedDyldInfo() || forLazyDylib ) {
2556 fKind = kStubNoPIC;
2557 fName = stubName(target.getName());
2558 LazyPointerAtom<x86>* lp = new LazyPointerAtom<x86>(writer, target, *this, forLazyDylib);
2559 fReferences.push_back(new WriterReference<x86>(2, x86::kAbsolute32, lp));
2560 writer.fAllSynthesizedStubs.push_back(this);
2561 }
2562 else {
2563 fKind = kJumpTable;
2564 if ( &target == NULL )
2565 asprintf((char**)&fName, "cache-line-crossing-stub %p", this);
2566 else {
2567 fName = stubName(target.getName());
2568 writer.fAllSynthesizedStubs.push_back(this);
2569 }
2570 }
2571 }
2572
2573
2574 template <>
2575 StubAtom<x86_64>::StubAtom(Writer<x86_64>& writer, ObjectFile::Atom& target, bool forLazyDylib)
2576 : WriterAtom<x86_64>(writer, Segment::fgTextSegment), fName(stubName(target.getName())), fTarget(target)
2577 {
2578 writer.fAllSynthesizedStubs.push_back(this);
2579
2580 LazyPointerAtom<x86_64>* lp = new LazyPointerAtom<x86_64>(writer, target, *this, forLazyDylib);
2581 fReferences.push_back(new WriterReference<x86_64>(2, x86_64::kPCRel32, lp));
2582 }
2583
2584 template <>
2585 StubAtom<arm>::StubAtom(Writer<arm>& writer, ObjectFile::Atom& target, bool forLazyDylib)
2586 : WriterAtom<arm>(writer, Segment::fgTextSegment), fName(stubName(target.getName())), fTarget(target)
2587 {
2588 writer.fAllSynthesizedStubs.push_back(this);
2589 if ( (writer.fDylibSymbolCountUpperBound < 900)
2590 && writer.fOptions.makeCompressedDyldInfo()
2591 && (writer.fOptions.outputKind() != Options::kDynamicLibrary)
2592 && !forLazyDylib ) {
2593 // dylibs might have __TEXT and __DATA pulled apart to live in shared region
2594 // if > 1000 stubs, the displacement to the lazy pointer my be > 12 bits.
2595 fKind = kStubShort;
2596 }
2597 else if ( fWriter.fSlideable ) {
2598 fKind = kStubPIC;
2599 }
2600 else {
2601 fKind = kStubNoPIC;
2602 }
2603 LazyPointerAtom<arm>* lp = new LazyPointerAtom<arm>(writer, target, *this, forLazyDylib);
2604 switch ( fKind ) {
2605 case kStubPIC:
2606 fReferences.push_back(new WriterReference<arm>(12, arm::kPointerDiff, lp, 0, this, 12));
2607 break;
2608 case kStubNoPIC:
2609 fReferences.push_back(new WriterReference<arm>(8, arm::kReadOnlyPointer, lp));
2610 break;
2611 case kStubShort:
2612 fReferences.push_back(new WriterReference<arm>(0, arm::kPointerDiff12, lp, 0, this, 8));
2613 break;
2614 default:
2615 throw "internal error";
2616 }
2617 }
2618
2619
2620
2621 template <typename A>
2622 const char* StubAtom<A>::stubName(const char* name)
2623 {
2624 char* buf;
2625 asprintf(&buf, "%s$stub", name);
2626 return buf;
2627 }
2628
2629 template <>
2630 uint64_t StubAtom<ppc>::getSize() const
2631 {
2632
2633 return ( (fKind == kStubPIC) ? 32 : 16 );
2634 }
2635
2636 template <>
2637 uint64_t StubAtom<ppc64>::getSize() const
2638 {
2639 return ( (fKind == kStubPIC) ? 32 : 16 );
2640 }
2641
2642
2643 template <>
2644 uint64_t StubAtom<arm>::getSize() const
2645 {
2646 switch ( fKind ) {
2647 case kStubPIC:
2648 return 16;
2649 case kStubNoPIC:
2650 return 12;
2651 case kStubShort:
2652 return 4;
2653 default:
2654 throw "internal error";
2655 }
2656 }
2657
2658 template <>
2659 uint64_t StubAtom<x86>::getSize() const
2660 {
2661 switch ( fKind ) {
2662 case kStubNoPIC:
2663 return 6;
2664 case kJumpTable:
2665 return 5;
2666 default:
2667 throw "internal error";
2668 }
2669 }
2670
2671 template <>
2672 uint64_t StubAtom<x86_64>::getSize() const
2673 {
2674 return 6;
2675 }
2676
2677 template <>
2678 ObjectFile::Alignment StubAtom<x86>::getAlignment() const
2679 {
2680 switch ( fKind ) {
2681 case kStubNoPIC:
2682 return 1;
2683 case kJumpTable:
2684 return 0; // special case x86 self-modifying stubs to be byte aligned
2685 default:
2686 throw "internal error";
2687 }
2688 }
2689
2690 template <>
2691 void StubAtom<ppc64>::copyRawContent(uint8_t buffer[]) const
2692 {
2693 if ( fKind == kStubPIC ) {
2694 OSWriteBigInt32(&buffer [0], 0, 0x7c0802a6); // mflr r0
2695 OSWriteBigInt32(&buffer[ 4], 0, 0x429f0005); // bcl 20,31,Lpicbase
2696 OSWriteBigInt32(&buffer[ 8], 0, 0x7d6802a6); // Lpicbase: mflr r11
2697 OSWriteBigInt32(&buffer[12], 0, 0x3d6b0000); // addis r11,r11,ha16(L_fwrite$lazy_ptr-Lpicbase)
2698 OSWriteBigInt32(&buffer[16], 0, 0x7c0803a6); // mtlr r0
2699 OSWriteBigInt32(&buffer[20], 0, 0xe98b0001); // ldu r12,lo16(L_fwrite$lazy_ptr-Lpicbase)(r11)
2700 OSWriteBigInt32(&buffer[24], 0, 0x7d8903a6); // mtctr r12
2701 OSWriteBigInt32(&buffer[28], 0, 0x4e800420); // bctr
2702 }
2703 else {
2704 OSWriteBigInt32(&buffer[ 0], 0, 0x3d600000); // lis r11,ha16(L_fwrite$lazy_ptr)
2705 OSWriteBigInt32(&buffer[ 4], 0, 0xe98b0001); // ldu r12,lo16(L_fwrite$lazy_ptr)(r11)
2706 OSWriteBigInt32(&buffer[ 8], 0, 0x7d8903a6); // mtctr r12
2707 OSWriteBigInt32(&buffer[12], 0, 0x4e800420); // bctr
2708 }
2709 }
2710
2711 template <>
2712 void StubAtom<ppc>::copyRawContent(uint8_t buffer[]) const
2713 {
2714 if ( fKind == kStubPIC ) {
2715 OSWriteBigInt32(&buffer[ 0], 0, 0x7c0802a6); // mflr r0
2716 OSWriteBigInt32(&buffer[ 4], 0, 0x429f0005); // bcl 20,31,Lpicbase
2717 OSWriteBigInt32(&buffer[ 8], 0, 0x7d6802a6); // Lpicbase: mflr r11
2718 OSWriteBigInt32(&buffer[12], 0, 0x3d6b0000); // addis r11,r11,ha16(L_fwrite$lazy_ptr-Lpicbase)
2719 OSWriteBigInt32(&buffer[16], 0, 0x7c0803a6); // mtlr r0
2720 OSWriteBigInt32(&buffer[20], 0, 0x858b0000); // lwzu r12,lo16(L_fwrite$lazy_ptr-Lpicbase)(r11)
2721 OSWriteBigInt32(&buffer[24], 0, 0x7d8903a6); // mtctr r12
2722 OSWriteBigInt32(&buffer[28], 0, 0x4e800420); // bctr
2723 }
2724 else {
2725 OSWriteBigInt32(&buffer[ 0], 0, 0x3d600000); // lis r11,ha16(L_fwrite$lazy_ptr)
2726 OSWriteBigInt32(&buffer[ 4], 0, 0x858b0000); // lwzu r12,lo16(L_fwrite$lazy_ptr)(r11)
2727 OSWriteBigInt32(&buffer[ 8], 0, 0x7d8903a6); // mtctr r12
2728 OSWriteBigInt32(&buffer[12], 0, 0x4e800420); // bctr
2729 }
2730 }
2731
2732 template <>
2733 void StubAtom<x86>::copyRawContent(uint8_t buffer[]) const
2734 {
2735 switch ( fKind ) {
2736 case kStubNoPIC:
2737 buffer[0] = 0xFF; // jmp *foo$lazy_pointer
2738 buffer[1] = 0x25;
2739 buffer[2] = 0x00;
2740 buffer[3] = 0x00;
2741 buffer[4] = 0x00;
2742 buffer[5] = 0x00;
2743 break;
2744 case kJumpTable:
2745 if ( fWriter.fOptions.prebind() ) {
2746 uint32_t address = this->getAddress();
2747 int32_t rel32 = 0 - (address+5);
2748 buffer[0] = 0xE9;
2749 buffer[1] = rel32 & 0xFF;
2750 buffer[2] = (rel32 >> 8) & 0xFF;
2751 buffer[3] = (rel32 >> 16) & 0xFF;
2752 buffer[4] = (rel32 >> 24) & 0xFF;
2753 }
2754 else {
2755 buffer[0] = 0xF4;
2756 buffer[1] = 0xF4;
2757 buffer[2] = 0xF4;
2758 buffer[3] = 0xF4;
2759 buffer[4] = 0xF4;
2760 }
2761 break;
2762 default:
2763 throw "internal error";
2764 }
2765 }
2766
2767 template <>
2768 void StubAtom<x86_64>::copyRawContent(uint8_t buffer[]) const
2769 {
2770 buffer[0] = 0xFF; // jmp *foo$lazy_pointer(%rip)
2771 buffer[1] = 0x25;
2772 buffer[2] = 0x00;
2773 buffer[3] = 0x00;
2774 buffer[4] = 0x00;
2775 buffer[5] = 0x00;
2776 }
2777
2778 template <>
2779 void StubAtom<arm>::copyRawContent(uint8_t buffer[]) const
2780 {
2781 switch ( fKind ) {
2782 case kStubPIC:
2783 OSWriteLittleInt32(&buffer[ 0], 0, 0xe59fc004); // ldr ip, pc + 12
2784 OSWriteLittleInt32(&buffer[ 4], 0, 0xe08fc00c); // add ip, pc, ip
2785 OSWriteLittleInt32(&buffer[ 8], 0, 0xe59cf000); // ldr pc, [ip]
2786 OSWriteLittleInt32(&buffer[12], 0, 0x00000000); // .long L_foo$lazy_ptr - (L1$scv + 8)
2787 break;
2788 case kStubNoPIC:
2789 OSWriteLittleInt32(&buffer[ 0], 0, 0xe59fc000); // ldr ip, [pc, #0]
2790 OSWriteLittleInt32(&buffer[ 4], 0, 0xe59cf000); // ldr pc, [ip]
2791 OSWriteLittleInt32(&buffer[ 8], 0, 0x00000000); // .long L_foo$lazy_ptr
2792 break;
2793 case kStubShort:
2794 OSWriteLittleInt32(&buffer[ 0], 0, 0xE59FF000);// ldr pc, [pc, #foo$lazy_ptr]
2795 break;
2796 default:
2797 throw "internal error";
2798 }
2799 }
2800
2801 // x86_64 stubs are 6 bytes
2802 template <>
2803 ObjectFile::Alignment StubAtom<x86_64>::getAlignment() const
2804 {
2805 return 1;
2806 }
2807
2808 template <>
2809 const char* StubAtom<ppc>::getSectionName() const
2810 {
2811 return ( (fKind == kStubPIC) ? "__picsymbolstub1" : "__symbol_stub1");
2812 }
2813
2814 template <>
2815 const char* StubAtom<ppc64>::getSectionName() const
2816 {
2817 return ( (fKind == kStubPIC) ? "__picsymbolstub1" : "__symbol_stub1");
2818 }
2819
2820 template <>
2821 const char* StubAtom<arm>::getSectionName() const
2822 {
2823 switch ( fKind ) {
2824 case kStubPIC:
2825 return "__picsymbolstub4";
2826 case kStubNoPIC:
2827 return "__symbol_stub4";
2828 case kStubShort:
2829 return "__symbolstub1";
2830 default:
2831 throw "internal error";
2832 }
2833 }
2834
2835 template <>
2836 const char* StubAtom<x86>::getSectionName() const
2837 {
2838 switch ( fKind ) {
2839 case kStubNoPIC:
2840 return "__symbol_stub";
2841 case kJumpTable:
2842 return "__jump_table";
2843 default:
2844 throw "internal error";
2845 }
2846 }
2847
2848
2849
2850
2851 struct AtomByNameSorter
2852 {
2853 bool operator()(ObjectFile::Atom* left, ObjectFile::Atom* right)
2854 {
2855 return (strcmp(left->getName(), right->getName()) < 0);
2856 }
2857 };
2858
2859 template <typename P>
2860 struct ExternalRelocSorter
2861 {
2862 bool operator()(const macho_relocation_info<P>& left, const macho_relocation_info<P>& right)
2863 {
2864 // sort first by symbol number
2865 if ( left.r_symbolnum() != right.r_symbolnum() )
2866 return (left.r_symbolnum() < right.r_symbolnum());
2867 // then sort all uses of the same symbol by address
2868 return (left.r_address() < right.r_address());
2869 }
2870 };
2871
2872
2873 template <typename A>
2874 Writer<A>::Writer(const char* path, Options& options, std::vector<ExecutableFile::DyLibUsed>& dynamicLibraries)
2875 : ExecutableFile::Writer(dynamicLibraries), fFilePath(strdup(path)), fOptions(options),
2876 fAllAtoms(NULL), fStabs(NULL), fRegularDefAtomsThatOverrideADylibsWeakDef(NULL), fLoadCommandsSection(NULL),
2877 fLoadCommandsSegment(NULL), fMachHeaderAtom(NULL), fEncryptionLoadCommand(NULL), fSegmentCommands(NULL),
2878 fSymbolTableCommands(NULL), fHeaderPadding(NULL), fUnwindInfoAtom(NULL),
2879 fUUIDAtom(NULL), fPadSegmentInfo(NULL), fEntryPoint( NULL),
2880 fDyldClassicHelperAtom(NULL), fDyldCompressedHelperAtom(NULL), fDyldLazyDylibHelper(NULL),
2881 fSectionRelocationsAtom(NULL), fCompressedRebaseInfoAtom(NULL), fCompressedBindingInfoAtom(NULL),
2882 fCompressedWeakBindingInfoAtom(NULL), fCompressedLazyBindingInfoAtom(NULL), fCompressedExportInfoAtom(NULL),
2883 fLocalRelocationsAtom(NULL), fExternalRelocationsAtom(NULL),
2884 fSymbolTableAtom(NULL), fSplitCodeToDataContentAtom(NULL), fIndirectTableAtom(NULL), fModuleInfoAtom(NULL),
2885 fStringsAtom(NULL), fPageZeroAtom(NULL), fFastStubGOTAtom(NULL), fSymbolTable(NULL), fSymbolTableCount(0),
2886 fSymbolTableStabsCount(0), fSymbolTableLocalCount(0), fSymbolTableExportCount(0), fSymbolTableImportCount(0),
2887 fLargestAtomSize(1),
2888 fEmitVirtualSections(false), fHasWeakExports(false), fReferencesWeakImports(false),
2889 fCanScatter(false), fWritableSegmentPastFirst4GB(false), fNoReExportedDylibs(false),
2890 fBiggerThanTwoGigs(false), fSlideable(false), fHasThumbBranches(false),
2891 fFirstWritableSegment(NULL), fAnonNameIndex(1000)
2892 {
2893 switch ( fOptions.outputKind() ) {
2894 case Options::kDynamicExecutable:
2895 case Options::kStaticExecutable:
2896 if ( fOptions.zeroPageSize() != 0 )
2897 fWriterSynthesizedAtoms.push_back(fPageZeroAtom = new PageZeroAtom<A>(*this));
2898 if ( fOptions.outputKind() == Options::kDynamicExecutable )
2899 fWriterSynthesizedAtoms.push_back(new DsoHandleAtom<A>(*this));
2900 fWriterSynthesizedAtoms.push_back(fMachHeaderAtom = new MachHeaderAtom<A>(*this));
2901 fWriterSynthesizedAtoms.push_back(new SegmentLoadCommandsAtom<A>(*this));
2902 if ( fOptions.makeCompressedDyldInfo() )
2903 fWriterSynthesizedAtoms.push_back(new DyldInfoLoadCommandsAtom<A>(*this));
2904 fWriterSynthesizedAtoms.push_back(new SymbolTableLoadCommandsAtom<A>(*this));
2905 if ( fOptions.outputKind() == Options::kDynamicExecutable )
2906 fWriterSynthesizedAtoms.push_back(new DyldLoadCommandsAtom<A>(*this));
2907 fWriterSynthesizedAtoms.push_back(fUUIDAtom = new UUIDLoadCommandAtom<A>(*this));
2908 fWriterSynthesizedAtoms.push_back(new ThreadsLoadCommandsAtom<A>(*this));
2909 if ( fOptions.hasCustomStack() )
2910 fWriterSynthesizedAtoms.push_back(new CustomStackAtom<A>(*this));
2911 fWriterSynthesizedAtoms.push_back(fHeaderPadding = new LoadCommandsPaddingAtom<A>(*this));
2912 fWriterSynthesizedAtoms.push_back(new MinimalTextAtom<A>(*this));
2913 if ( fOptions.needsUnwindInfoSection() )
2914 fWriterSynthesizedAtoms.push_back(fUnwindInfoAtom = new UnwindInfoAtom<A>(*this));
2915 fWriterSynthesizedAtoms.push_back(fSectionRelocationsAtom = new SectionRelocationsLinkEditAtom<A>(*this));
2916 if ( fOptions.makeCompressedDyldInfo() ) {
2917 fWriterSynthesizedAtoms.push_back(fCompressedRebaseInfoAtom = new CompressedRebaseInfoLinkEditAtom<A>(*this));
2918 fWriterSynthesizedAtoms.push_back(fCompressedBindingInfoAtom = new CompressedBindingInfoLinkEditAtom<A>(*this));
2919 fWriterSynthesizedAtoms.push_back(fCompressedWeakBindingInfoAtom = new CompressedWeakBindingInfoLinkEditAtom<A>(*this));
2920 fWriterSynthesizedAtoms.push_back(fCompressedLazyBindingInfoAtom = new CompressedLazyBindingInfoLinkEditAtom<A>(*this));
2921 fWriterSynthesizedAtoms.push_back(fCompressedExportInfoAtom = new CompressedExportInfoLinkEditAtom<A>(*this));
2922 }
2923 if ( fOptions.makeClassicDyldInfo() )
2924 fWriterSynthesizedAtoms.push_back(fLocalRelocationsAtom = new LocalRelocationsLinkEditAtom<A>(*this));
2925 fWriterSynthesizedAtoms.push_back(fSymbolTableAtom = new SymbolTableLinkEditAtom<A>(*this));
2926 if ( fOptions.makeClassicDyldInfo() )
2927 fWriterSynthesizedAtoms.push_back(fExternalRelocationsAtom = new ExternalRelocationsLinkEditAtom<A>(*this));
2928 fWriterSynthesizedAtoms.push_back(fIndirectTableAtom = new IndirectTableLinkEditAtom<A>(*this));
2929 fWriterSynthesizedAtoms.push_back(fStringsAtom = new StringsLinkEditAtom<A>(*this));
2930 break;
2931 case Options::kPreload:
2932 fWriterSynthesizedAtoms.push_back(fMachHeaderAtom = new MachHeaderAtom<A>(*this));
2933 fWriterSynthesizedAtoms.push_back(new SegmentLoadCommandsAtom<A>(*this));
2934 fWriterSynthesizedAtoms.push_back(new SymbolTableLoadCommandsAtom<A>(*this));
2935 fWriterSynthesizedAtoms.push_back(fUUIDAtom = new UUIDLoadCommandAtom<A>(*this));
2936 fWriterSynthesizedAtoms.push_back(new ThreadsLoadCommandsAtom<A>(*this));
2937 fWriterSynthesizedAtoms.push_back(fHeaderPadding = new LoadCommandsPaddingAtom<A>(*this));
2938 fWriterSynthesizedAtoms.push_back(fSectionRelocationsAtom = new SectionRelocationsLinkEditAtom<A>(*this));
2939 fWriterSynthesizedAtoms.push_back(fLocalRelocationsAtom = new LocalRelocationsLinkEditAtom<A>(*this));
2940 fWriterSynthesizedAtoms.push_back(fExternalRelocationsAtom = new ExternalRelocationsLinkEditAtom<A>(*this));
2941 fWriterSynthesizedAtoms.push_back(fIndirectTableAtom = new IndirectTableLinkEditAtom<A>(*this));
2942 fWriterSynthesizedAtoms.push_back(fSymbolTableAtom = new SymbolTableLinkEditAtom<A>(*this));
2943 fWriterSynthesizedAtoms.push_back(fStringsAtom = new StringsLinkEditAtom<A>(*this));
2944 break;
2945 case Options::kDynamicLibrary:
2946 case Options::kDynamicBundle:
2947 fWriterSynthesizedAtoms.push_back(new DsoHandleAtom<A>(*this));
2948 case Options::kKextBundle:
2949 fWriterSynthesizedAtoms.push_back(fMachHeaderAtom = new MachHeaderAtom<A>(*this));
2950 fWriterSynthesizedAtoms.push_back(new SegmentLoadCommandsAtom<A>(*this));
2951 if ( fOptions.outputKind() == Options::kDynamicLibrary ) {
2952 fWriterSynthesizedAtoms.push_back(new DylibIDLoadCommandsAtom<A>(*this));
2953 if ( fOptions.initFunctionName() != NULL )
2954 fWriterSynthesizedAtoms.push_back(new RoutinesLoadCommandsAtom<A>(*this));
2955 }
2956 fWriterSynthesizedAtoms.push_back(fUUIDAtom = new UUIDLoadCommandAtom<A>(*this));
2957 if ( fOptions.makeCompressedDyldInfo() )
2958 fWriterSynthesizedAtoms.push_back(new DyldInfoLoadCommandsAtom<A>(*this));
2959 fWriterSynthesizedAtoms.push_back(new SymbolTableLoadCommandsAtom<A>(*this));
2960 if ( fOptions.sharedRegionEligible() )
2961 fWriterSynthesizedAtoms.push_back(new SegmentSplitInfoLoadCommandsAtom<A>(*this));
2962 fWriterSynthesizedAtoms.push_back(fHeaderPadding = new LoadCommandsPaddingAtom<A>(*this));
2963 fWriterSynthesizedAtoms.push_back(new MinimalTextAtom<A>(*this));
2964 if ( fOptions.needsUnwindInfoSection() )
2965 fWriterSynthesizedAtoms.push_back(fUnwindInfoAtom = new UnwindInfoAtom<A>(*this));
2966 fWriterSynthesizedAtoms.push_back(fSectionRelocationsAtom = new SectionRelocationsLinkEditAtom<A>(*this));
2967 if ( fOptions.makeCompressedDyldInfo() ) {
2968 fWriterSynthesizedAtoms.push_back(fCompressedRebaseInfoAtom = new CompressedRebaseInfoLinkEditAtom<A>(*this));
2969 fWriterSynthesizedAtoms.push_back(fCompressedBindingInfoAtom = new CompressedBindingInfoLinkEditAtom<A>(*this));
2970 fWriterSynthesizedAtoms.push_back(fCompressedWeakBindingInfoAtom = new CompressedWeakBindingInfoLinkEditAtom<A>(*this));
2971 fWriterSynthesizedAtoms.push_back(fCompressedLazyBindingInfoAtom = new CompressedLazyBindingInfoLinkEditAtom<A>(*this));
2972 fWriterSynthesizedAtoms.push_back(fCompressedExportInfoAtom = new CompressedExportInfoLinkEditAtom<A>(*this));
2973 }
2974 if ( fOptions.makeClassicDyldInfo() )
2975 fWriterSynthesizedAtoms.push_back(fLocalRelocationsAtom = new LocalRelocationsLinkEditAtom<A>(*this));
2976 if ( fOptions.sharedRegionEligible() ) {
2977 fWriterSynthesizedAtoms.push_back(fSplitCodeToDataContentAtom = new SegmentSplitInfoContentAtom<A>(*this));
2978 }
2979 fWriterSynthesizedAtoms.push_back(fSymbolTableAtom = new SymbolTableLinkEditAtom<A>(*this));
2980 if ( fOptions.makeClassicDyldInfo() )
2981 fWriterSynthesizedAtoms.push_back(fExternalRelocationsAtom = new ExternalRelocationsLinkEditAtom<A>(*this));
2982 if ( fOptions.outputKind() != Options::kKextBundle )
2983 fWriterSynthesizedAtoms.push_back(fIndirectTableAtom = new IndirectTableLinkEditAtom<A>(*this));
2984 if ( this->needsModuleTable() )
2985 fWriterSynthesizedAtoms.push_back(fModuleInfoAtom = new ModuleInfoLinkEditAtom<A>(*this));
2986 fWriterSynthesizedAtoms.push_back(fStringsAtom = new StringsLinkEditAtom<A>(*this));
2987 break;
2988 case Options::kObjectFile:
2989 fWriterSynthesizedAtoms.push_back(fMachHeaderAtom = new MachHeaderAtom<A>(*this));
2990 fWriterSynthesizedAtoms.push_back(new SegmentLoadCommandsAtom<A>(*this));
2991 fWriterSynthesizedAtoms.push_back(fUUIDAtom = new UUIDLoadCommandAtom<A>(*this));
2992 fWriterSynthesizedAtoms.push_back(new SymbolTableLoadCommandsAtom<A>(*this));
2993 fWriterSynthesizedAtoms.push_back(fHeaderPadding = new LoadCommandsPaddingAtom<A>(*this));
2994 fWriterSynthesizedAtoms.push_back(fSectionRelocationsAtom = new SectionRelocationsLinkEditAtom<A>(*this));
2995 fWriterSynthesizedAtoms.push_back(fLocalRelocationsAtom = new LocalRelocationsLinkEditAtom<A>(*this));
2996 fWriterSynthesizedAtoms.push_back(fSymbolTableAtom = new SymbolTableLinkEditAtom<A>(*this));
2997 fWriterSynthesizedAtoms.push_back(fExternalRelocationsAtom = new ExternalRelocationsLinkEditAtom<A>(*this));
2998 fWriterSynthesizedAtoms.push_back(fIndirectTableAtom = new IndirectTableLinkEditAtom<A>(*this));
2999 fWriterSynthesizedAtoms.push_back(fStringsAtom = new StringsLinkEditAtom<A>(*this));
3000 break;
3001 case Options::kDyld:
3002 fWriterSynthesizedAtoms.push_back(new DsoHandleAtom<A>(*this));
3003 fWriterSynthesizedAtoms.push_back(fMachHeaderAtom = new MachHeaderAtom<A>(*this));
3004 fWriterSynthesizedAtoms.push_back(new SegmentLoadCommandsAtom<A>(*this));
3005 fWriterSynthesizedAtoms.push_back(new SymbolTableLoadCommandsAtom<A>(*this));
3006 fWriterSynthesizedAtoms.push_back(new DyldLoadCommandsAtom<A>(*this));
3007 fWriterSynthesizedAtoms.push_back(fUUIDAtom = new UUIDLoadCommandAtom<A>(*this));
3008 fWriterSynthesizedAtoms.push_back(new ThreadsLoadCommandsAtom<A>(*this));
3009 fWriterSynthesizedAtoms.push_back(fHeaderPadding = new LoadCommandsPaddingAtom<A>(*this));
3010 if ( fOptions.needsUnwindInfoSection() )
3011 fWriterSynthesizedAtoms.push_back(fUnwindInfoAtom = new UnwindInfoAtom<A>(*this));
3012 fWriterSynthesizedAtoms.push_back(fLocalRelocationsAtom = new LocalRelocationsLinkEditAtom<A>(*this));
3013 fWriterSynthesizedAtoms.push_back(fSymbolTableAtom = new SymbolTableLinkEditAtom<A>(*this));
3014 fWriterSynthesizedAtoms.push_back(fExternalRelocationsAtom = new ExternalRelocationsLinkEditAtom<A>(*this));
3015 fWriterSynthesizedAtoms.push_back(fIndirectTableAtom = new IndirectTableLinkEditAtom<A>(*this));
3016 fWriterSynthesizedAtoms.push_back(fStringsAtom = new StringsLinkEditAtom<A>(*this));
3017 break;
3018 }
3019
3020 // add extra commmands
3021 bool hasReExports = false;
3022 uint32_t ordinal = 1;
3023 switch ( fOptions.outputKind() ) {
3024 case Options::kDynamicExecutable:
3025 if ( fOptions.makeEncryptable() ) {
3026 fEncryptionLoadCommand = new EncryptionLoadCommandsAtom<A>(*this);
3027 fWriterSynthesizedAtoms.push_back(fEncryptionLoadCommand);
3028 }
3029 // fall through
3030 case Options::kDynamicLibrary:
3031 case Options::kDynamicBundle:
3032 {
3033 // add dylib load command atoms for all dynamic libraries
3034 const unsigned int libCount = dynamicLibraries.size();
3035 for (unsigned int i=0; i < libCount; ++i) {
3036 ExecutableFile::DyLibUsed& dylibInfo = dynamicLibraries[i];
3037 //fprintf(stderr, "dynamicLibraries[%d]: reader=%p, %s, install=%s\n", i, dylibInfo.reader, dylibInfo.reader->getPath(), dylibInfo.reader->getInstallPath() );
3038
3039 if ( dylibInfo.options.fReExport ) {
3040 hasReExports = true;
3041 }
3042 else {
3043 const char* parentUmbrella = dylibInfo.reader->parentUmbrella();
3044 if ( (parentUmbrella != NULL) && (fOptions.outputKind() == Options::kDynamicLibrary) ) {
3045 const char* thisIDLastSlash = strrchr(fOptions.installPath(), '/');
3046 if ( (thisIDLastSlash != NULL) && (strcmp(&thisIDLastSlash[1], parentUmbrella) == 0) )
3047 hasReExports = true;
3048 }
3049 }
3050
3051 if ( dylibInfo.options.fWeakImport ) {
3052 fForcedWeakImportReaders.insert(dylibInfo.reader);
3053 }
3054
3055 if ( dylibInfo.options.fBundleLoader ) {
3056 fLibraryToOrdinal[dylibInfo.reader] = EXECUTABLE_ORDINAL;
3057 }
3058 else {
3059 // see if a DylibLoadCommandsAtom has already been created for this install path
3060 bool newDylib = true;
3061 const char* dylibInstallPath = dylibInfo.reader->getInstallPath();
3062 for (unsigned int seenLib=0; seenLib < i; ++seenLib) {
3063 ExecutableFile::DyLibUsed& seenDylibInfo = dynamicLibraries[seenLib];
3064 if ( !seenDylibInfo.options.fBundleLoader ) {
3065 const char* seenDylibInstallPath = seenDylibInfo.reader->getInstallPath();
3066 if ( strcmp(seenDylibInstallPath, dylibInstallPath) == 0 ) {
3067 fLibraryToOrdinal[dylibInfo.reader] = fLibraryToOrdinal[seenDylibInfo.reader];
3068 fLibraryToLoadCommand[dylibInfo.reader] = fLibraryToLoadCommand[seenDylibInfo.reader];
3069 fLibraryAliases[dylibInfo.reader] = seenDylibInfo.reader;
3070 newDylib = false;
3071 break;
3072 }
3073 }
3074 }
3075
3076 if ( newDylib ) {
3077 // assign new ordinal and check for other paired load commands
3078 fLibraryToOrdinal[dylibInfo.reader] = ordinal++;
3079 DylibLoadCommandsAtom<A>* dyliblc = new DylibLoadCommandsAtom<A>(*this, dylibInfo);
3080 fLibraryToLoadCommand[dylibInfo.reader] = dyliblc;
3081 fWriterSynthesizedAtoms.push_back(dyliblc);
3082 if ( dylibInfo.options.fReExport
3083 && !fOptions.useSimplifiedDylibReExports()
3084 && (fOptions.outputKind() == Options::kDynamicLibrary) ) {
3085 // see if child has sub-framework that is this
3086 bool isSubFramework = false;
3087 const char* childInUmbrella = dylibInfo.reader->parentUmbrella();
3088 if ( childInUmbrella != NULL ) {
3089 const char* myLeaf = strrchr(fOptions.installPath(), '/');
3090 if ( myLeaf != NULL ) {
3091 if ( strcmp(childInUmbrella, &myLeaf[1]) == 0 )
3092 isSubFramework = true;
3093 }
3094 }
3095 // LC_SUB_FRAMEWORK is in child, so do nothing in parent
3096 if ( ! isSubFramework ) {
3097 // this dylib also needs a sub_x load command
3098 bool isFrameworkReExport = false;
3099 const char* lastSlash = strrchr(dylibInstallPath, '/');
3100 if ( lastSlash != NULL ) {
3101 char frameworkName[strlen(lastSlash)+20];
3102 sprintf(frameworkName, "/%s.framework/", &lastSlash[1]);
3103 isFrameworkReExport = (strstr(dylibInstallPath, frameworkName) != NULL);
3104 }
3105 if ( isFrameworkReExport ) {
3106 // needs a LC_SUB_UMBRELLA command
3107 fWriterSynthesizedAtoms.push_back(new SubUmbrellaLoadCommandsAtom<A>(*this, &lastSlash[1]));
3108 }
3109 else {
3110 // needs a LC_SUB_LIBRARY command
3111 const char* nameStart = &lastSlash[1];
3112 if ( lastSlash == NULL )
3113 nameStart = dylibInstallPath;
3114 int len = strlen(nameStart);
3115 const char* dot = strchr(nameStart, '.');
3116 if ( dot != NULL )
3117 len = dot - nameStart;
3118 fWriterSynthesizedAtoms.push_back(new SubLibraryLoadCommandsAtom<A>(*this, nameStart, len));
3119 }
3120 }
3121 }
3122 }
3123 }
3124 }
3125 // add umbrella command if needed
3126 if ( fOptions.umbrellaName() != NULL ) {
3127 fWriterSynthesizedAtoms.push_back(new UmbrellaLoadCommandsAtom<A>(*this, fOptions.umbrellaName()));
3128 }
3129 // add allowable client commands if used
3130 std::vector<const char*>& allowableClients = fOptions.allowableClients();
3131 for (std::vector<const char*>::iterator it=allowableClients.begin(); it != allowableClients.end(); ++it)
3132 fWriterSynthesizedAtoms.push_back(new AllowableClientLoadCommandsAtom<A>(*this, *it));
3133 }
3134 break;
3135 case Options::kStaticExecutable:
3136 case Options::kObjectFile:
3137 case Options::kDyld:
3138 case Options::kPreload:
3139 case Options::kKextBundle:
3140 break;
3141 }
3142 fNoReExportedDylibs = !hasReExports;
3143
3144 // add any rpath load commands
3145 for(std::vector<const char*>::const_iterator it=fOptions.rpaths().begin(); it != fOptions.rpaths().end(); ++it) {
3146 fWriterSynthesizedAtoms.push_back(new RPathLoadCommandsAtom<A>(*this, *it));
3147 }
3148
3149 // set up fSlideable
3150 switch ( fOptions.outputKind() ) {
3151 case Options::kObjectFile:
3152 case Options::kStaticExecutable:
3153 fSlideable = false;
3154 break;
3155 case Options::kDynamicExecutable:
3156 fSlideable = fOptions.positionIndependentExecutable();
3157 break;
3158 case Options::kDyld:
3159 case Options::kDynamicLibrary:
3160 case Options::kDynamicBundle:
3161 case Options::kPreload:
3162 case Options::kKextBundle:
3163 fSlideable = true;
3164 break;
3165 }
3166
3167 //fprintf(stderr, "ordinals table:\n");
3168 //for (std::map<class ObjectFile::Reader*, uint32_t>::iterator it = fLibraryToOrdinal.begin(); it != fLibraryToOrdinal.end(); ++it) {
3169 // fprintf(stderr, "%d <== %s\n", it->second, it->first->getPath());
3170 //}
3171 }
3172
3173 template <typename A>
3174 Writer<A>::~Writer()
3175 {
3176 if ( fFilePath != NULL )
3177 free((void*)fFilePath);
3178 if ( fSymbolTable != NULL )
3179 delete [] fSymbolTable;
3180 }
3181
3182
3183 // for ppc64, -mdynamic-no-pic only works in low 2GB, so we might need to split the zeropage into two segments
3184 template <>bool Writer<ppc64>::mightNeedPadSegment() { return (fOptions.zeroPageSize() >= 0x80000000ULL); }
3185 template <typename A> bool Writer<A>::mightNeedPadSegment() { return false; }
3186
3187
3188 template <typename A>
3189 ObjectFile::Atom* Writer<A>::getUndefinedProxyAtom(const char* name)
3190 {
3191 if ( fOptions.outputKind() == Options::kKextBundle ) {
3192 return new UndefinedSymbolProxyAtom<A>(*this, name);
3193 }
3194 else if ( fOptions.outputKind() == Options::kObjectFile ) {
3195 // when doing -r -exported_symbols_list, don't create proxy for a symbol
3196 // that is supposed to be exported. We want an error instead
3197 // <rdar://problem/5062685> ld does not report error when -r is used and exported symbols are not defined.
3198 if ( fOptions.hasExportMaskList() && fOptions.shouldExport(name) )
3199 return NULL;
3200 else
3201 return new UndefinedSymbolProxyAtom<A>(*this, name);
3202 }
3203 else if ( (fOptions.undefinedTreatment() != Options::kUndefinedError) || fOptions.allowedUndefined(name) )
3204 return new UndefinedSymbolProxyAtom<A>(*this, name);
3205 else
3206 return NULL;
3207 }
3208
3209 template <typename A>
3210 uint8_t Writer<A>::ordinalForLibrary(ObjectFile::Reader* lib)
3211 {
3212 // flat namespace images use zero for all ordinals
3213 if ( fOptions.nameSpace() != Options::kTwoLevelNameSpace )
3214 return 0;
3215
3216 // is an UndefinedSymbolProxyAtom
3217 if ( lib == this )
3218 if ( fOptions.nameSpace() == Options::kTwoLevelNameSpace )
3219 return DYNAMIC_LOOKUP_ORDINAL;
3220
3221 std::map<class ObjectFile::Reader*, uint32_t>::iterator pos = fLibraryToOrdinal.find(lib);
3222 if ( pos != fLibraryToOrdinal.end() )
3223 return pos->second;
3224
3225 throw "can't find ordinal for imported symbol";
3226 }
3227
3228 template <typename A>
3229 bool Writer<A>::targetRequiresWeakBinding(const ObjectFile::Atom& target)
3230 {
3231 switch ( target.getDefinitionKind() ) {
3232 case ObjectFile::Atom::kExternalWeakDefinition:
3233 case ObjectFile::Atom::kWeakDefinition:
3234 return true;
3235 case ObjectFile::Atom::kExternalDefinition:
3236 case ObjectFile::Atom::kAbsoluteSymbol:
3237 case ObjectFile::Atom::kRegularDefinition:
3238 case ObjectFile::Atom::kTentativeDefinition:
3239 break;
3240 }
3241 return false;
3242 }
3243
3244 template <typename A>
3245 int Writer<A>::compressedOrdinalForImortedAtom(ObjectFile::Atom* target)
3246 {
3247 // flat namespace images use zero for all ordinals
3248 if ( fOptions.nameSpace() != Options::kTwoLevelNameSpace )
3249 return BIND_SPECIAL_DYLIB_FLAT_LOOKUP;
3250
3251 // is an UndefinedSymbolProxyAtom
3252 ObjectFile::Reader* lib = target->getFile();
3253 if ( lib == this )
3254 if ( fOptions.nameSpace() == Options::kTwoLevelNameSpace )
3255 return BIND_SPECIAL_DYLIB_FLAT_LOOKUP;
3256
3257 std::map<class ObjectFile::Reader*, uint32_t>::iterator pos;
3258 switch ( target->getDefinitionKind() ) {
3259 case ObjectFile::Atom::kExternalDefinition:
3260 case ObjectFile::Atom::kExternalWeakDefinition:
3261 pos = fLibraryToOrdinal.find(lib);
3262 if ( pos != fLibraryToOrdinal.end() ) {
3263 if ( pos->second == EXECUTABLE_ORDINAL )
3264 return BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE;
3265 else
3266 return pos->second;
3267 }
3268 break;
3269 case ObjectFile::Atom::kWeakDefinition:
3270 throw "compressedOrdinalForImortedAtom() should not have been called on a weak definition";
3271 case ObjectFile::Atom::kAbsoluteSymbol:
3272 case ObjectFile::Atom::kRegularDefinition:
3273 case ObjectFile::Atom::kTentativeDefinition:
3274 return BIND_SPECIAL_DYLIB_SELF;
3275 }
3276
3277 throw "can't find ordinal for imported symbol";
3278 }
3279
3280
3281 template <typename A>
3282 ObjectFile::Atom& Writer<A>::makeObjcInfoAtom(ObjectFile::Reader::ObjcConstraint objcContraint, bool objcReplacementClasses)
3283 {
3284
3285 return *(new ObjCInfoAtom<A>(*this, objcContraint, objcReplacementClasses, fOptions.objCABIVersion2POverride()));
3286 }
3287
3288 template <typename A>
3289 void Writer<A>::addSynthesizedAtoms(const std::vector<class ObjectFile::Atom*>& existingAtoms,
3290 class ObjectFile::Atom* dyldClassicHelperAtom,
3291 class ObjectFile::Atom* dyldCompressedHelperAtom,
3292 class ObjectFile::Atom* dyldLazyDylibHelperAtom,
3293 bool biggerThanTwoGigs,
3294 uint32_t dylibSymbolCount,
3295 std::vector<class ObjectFile::Atom*>& newAtoms)
3296 {
3297 fDyldClassicHelperAtom = dyldClassicHelperAtom;
3298 fDyldCompressedHelperAtom = dyldCompressedHelperAtom;
3299 fDyldLazyDylibHelper = dyldLazyDylibHelperAtom;
3300 fBiggerThanTwoGigs = biggerThanTwoGigs;
3301 fDylibSymbolCountUpperBound = dylibSymbolCount;
3302
3303 // create inter-library stubs
3304 synthesizeStubs(existingAtoms, newAtoms);
3305 }
3306
3307
3308 template <typename A>
3309 uint64_t Writer<A>::write(std::vector<class ObjectFile::Atom*>& atoms,
3310 std::vector<class ObjectFile::Reader::Stab>& stabs,
3311 class ObjectFile::Atom* entryPointAtom,
3312 bool createUUID, bool canScatter, ObjectFile::Reader::CpuConstraint cpuConstraint,
3313 std::set<const class ObjectFile::Atom*>& atomsThatOverrideWeak,
3314 bool hasExternalWeakDefinitions)
3315 {
3316 fAllAtoms = &atoms;
3317 fStabs = &stabs;
3318 fEntryPoint = entryPointAtom;
3319 fCanScatter = canScatter;
3320 fCpuConstraint = cpuConstraint;
3321 fHasWeakExports = hasExternalWeakDefinitions; // dyld needs to search this image as if it had weak exports
3322 fRegularDefAtomsThatOverrideADylibsWeakDef = &atomsThatOverrideWeak;
3323
3324
3325 try {
3326 // Set for create UUID
3327 if (createUUID)
3328 fUUIDAtom->generate();
3329
3330 // remove uneeded dylib load commands
3331 optimizeDylibReferences();
3332
3333 // check for mdynamic-no-pic codegen
3334 scanForAbsoluteReferences();
3335
3336 // create table of unwind info
3337 synthesizeUnwindInfoTable();
3338
3339 // create SegmentInfo and SectionInfo objects and assign all atoms to a section
3340 partitionIntoSections();
3341
3342 // segment load command can now be sized and padding can be set
3343 adjustLoadCommandsAndPadding();
3344
3345 // assign each section a file offset
3346 assignFileOffsets();
3347
3348 // if need to add branch islands, reassign file offsets
3349 if ( addBranchIslands() )
3350 assignFileOffsets();
3351
3352 // now that addresses are assigned, create unwind info
3353 if ( fUnwindInfoAtom != NULL ) {
3354 fUnwindInfoAtom->generate();
3355 // re-layout
3356 adjustLoadCommandsAndPadding();
3357 assignFileOffsets();
3358 }
3359
3360 // make spit-seg info now that all atoms exist
3361 createSplitSegContent();
3362
3363 // build symbol table and relocations
3364 buildLinkEdit();
3365
3366 // write map file if requested
3367 writeMap();
3368
3369 // write everything
3370 return writeAtoms();
3371 } catch (...) {
3372 // clean up if any errors
3373 (void)unlink(fFilePath);
3374 throw;
3375 }
3376 }
3377
3378 template <typename A>
3379 void Writer<A>::buildLinkEdit()
3380 {
3381 this->collectExportedAndImportedAndLocalAtoms();
3382 this->buildSymbolTable();
3383 this->buildFixups();
3384 this->adjustLinkEditSections();
3385 }
3386
3387
3388
3389 template <typename A>
3390 uint64_t Writer<A>::getAtomLoadAddress(const ObjectFile::Atom* atom)
3391 {
3392 return atom->getAddress();
3393 // SectionInfo* info = (SectionInfo*)atom->getSection();
3394 // return info->getBaseAddress() + atom->getSectionOffset();
3395 }
3396
3397 template <>
3398 bool Writer<x86_64>::stringsNeedLabelsInObjects()
3399 {
3400 return true;
3401 }
3402
3403 template <typename A>
3404 bool Writer<A>::stringsNeedLabelsInObjects()
3405 {
3406 return false;
3407 }
3408
3409 template <typename A>
3410 const char* Writer<A>::symbolTableName(const ObjectFile::Atom* atom)
3411 {
3412 static unsigned int counter = 0;
3413 const char* name;
3414 if ( stringsNeedLabelsInObjects()
3415 && (atom->getContentType() == ObjectFile::Atom::kCStringType)
3416 && (atom->getDefinitionKind() == ObjectFile::Atom::kWeakDefinition) )
3417 asprintf((char**)&name, "LC%u", counter++);
3418 else
3419 name = atom->getName();
3420 return name;
3421 return atom->getName();
3422 }
3423
3424 template <typename A>
3425 void Writer<A>::setExportNlist(const ObjectFile::Atom* atom, macho_nlist<P>* entry)
3426 {
3427 // set n_strx
3428 entry->set_n_strx(this->fStringsAtom->add(this->symbolTableName(atom)));
3429
3430 // set n_type
3431 if ( atom->getSymbolTableInclusion() == ObjectFile::Atom::kSymbolTableInAsAbsolute ) {
3432 entry->set_n_type(N_EXT | N_ABS);
3433 }
3434 else {
3435 entry->set_n_type(N_EXT | N_SECT);
3436 if ( (atom->getScope() == ObjectFile::Atom::scopeLinkageUnit) && (fOptions.outputKind() == Options::kObjectFile) ) {
3437 if ( fOptions.keepPrivateExterns() )
3438 entry->set_n_type(N_EXT | N_SECT | N_PEXT);
3439 }
3440 }
3441
3442 // set n_sect (section number of implementation )
3443 uint8_t sectionIndex = atom->getSection()->getIndex();
3444 entry->set_n_sect(sectionIndex);
3445
3446 // the __mh_execute_header is magic and must be an absolute symbol
3447 if ( (sectionIndex==0)
3448 && (fOptions.outputKind() == Options::kDynamicExecutable)
3449 && (atom->getSymbolTableInclusion() == ObjectFile::Atom::kSymbolTableInAndNeverStrip ))
3450 entry->set_n_type(N_EXT | N_ABS);
3451
3452 // set n_desc
3453 uint16_t desc = 0;
3454 if ( atom->isThumb() )
3455 desc |= N_ARM_THUMB_DEF;
3456 if ( atom->getSymbolTableInclusion() == ObjectFile::Atom::kSymbolTableInAndNeverStrip )
3457 desc |= REFERENCED_DYNAMICALLY;
3458 if ( atom->dontDeadStrip() && (fOptions.outputKind() == Options::kObjectFile) )
3459 desc |= N_NO_DEAD_STRIP;
3460 if ( atom->getDefinitionKind() == ObjectFile::Atom::kWeakDefinition ) {
3461 desc |= N_WEAK_DEF;
3462 fHasWeakExports = true;
3463 }
3464 entry->set_n_desc(desc);
3465
3466 // set n_value ( address this symbol will be at if this executable is loaded at it preferred address )
3467 if ( atom->getDefinitionKind() == ObjectFile::Atom::kAbsoluteSymbol )
3468 entry->set_n_value(atom->getSectionOffset());
3469 else
3470 entry->set_n_value(this->getAtomLoadAddress(atom));
3471 }
3472
3473 template <typename A>
3474 void Writer<A>::setImportNlist(const ObjectFile::Atom* atom, macho_nlist<P>* entry)
3475 {
3476 // set n_strx
3477 entry->set_n_strx(this->fStringsAtom->add(atom->getName()));
3478
3479 // set n_type
3480 if ( fOptions.outputKind() == Options::kObjectFile ) {
3481 if ( (atom->getScope() == ObjectFile::Atom::scopeLinkageUnit)
3482 && (atom->getDefinitionKind() == ObjectFile::Atom::kTentativeDefinition) )
3483 entry->set_n_type(N_UNDF | N_EXT | N_PEXT);
3484 else
3485 entry->set_n_type(N_UNDF | N_EXT);
3486 }
3487 else {
3488 if ( fOptions.prebind() )
3489 entry->set_n_type(N_PBUD | N_EXT);
3490 else
3491 entry->set_n_type(N_UNDF | N_EXT);
3492 }
3493
3494 // set n_sect
3495 entry->set_n_sect(0);
3496
3497 uint16_t desc = 0;
3498 if ( fOptions.outputKind() != Options::kObjectFile ) {
3499 // set n_desc ( high byte is library ordinal, low byte is reference type )
3500 std::map<const ObjectFile::Atom*,ObjectFile::Atom*>::iterator pos = fStubsMap.find(atom);
3501 if ( pos != fStubsMap.end() || ( strncmp(atom->getName(), ".objc_class_name_", 17) == 0) )
3502 desc = REFERENCE_FLAG_UNDEFINED_LAZY;
3503 else
3504 desc = REFERENCE_FLAG_UNDEFINED_NON_LAZY;
3505 try {
3506 uint8_t ordinal = this->ordinalForLibrary(atom->getFile());
3507 //fprintf(stderr, "ordinal=%u from reader=%p for symbol=%s\n", ordinal, atom->getFile(), atom->getName());
3508 SET_LIBRARY_ORDINAL(desc, ordinal);
3509 }
3510 catch (const char* msg) {
3511 throwf("%s %s from %s", msg, atom->getDisplayName(), atom->getFile()->getPath());
3512 }
3513 }
3514 else if ( atom->getDefinitionKind() == ObjectFile::Atom::kTentativeDefinition ) {
3515 uint8_t align = atom->getAlignment().powerOf2;
3516 // always record custom alignment of common symbols to match what compiler does
3517 SET_COMM_ALIGN(desc, align);
3518 }
3519 if ( atom->isThumb() )
3520 desc |= N_ARM_THUMB_DEF;
3521 if ( atom->getSymbolTableInclusion() == ObjectFile::Atom::kSymbolTableInAndNeverStrip )
3522 desc |= REFERENCED_DYNAMICALLY;
3523 if ( ( fOptions.outputKind() != Options::kObjectFile) && (atom->getDefinitionKind() == ObjectFile::Atom::kExternalWeakDefinition) ) {
3524 desc |= N_REF_TO_WEAK;
3525 fReferencesWeakImports = true;
3526 }
3527 // set weak_import attribute
3528 if ( fWeakImportMap[atom] )
3529 desc |= N_WEAK_REF;
3530 entry->set_n_desc(desc);
3531
3532 // set n_value, zero for import proxy and size for tentative definition
3533 entry->set_n_value(atom->getSize());
3534 }
3535
3536
3537 template <typename A>
3538 void Writer<A>::setLocalNlist(const ObjectFile::Atom* atom, macho_nlist<P>* entry)
3539 {
3540 // set n_strx
3541 const char* symbolName = this->symbolTableName(atom);
3542 char anonName[32];
3543 if ( (fOptions.outputKind() == Options::kObjectFile) && !fOptions.keepLocalSymbol(symbolName) ) {
3544 if ( stringsNeedLabelsInObjects() && (atom->getContentType() == ObjectFile::Atom::kCStringType) ) {
3545 // don't use 'l' labels for x86_64 strings
3546 // <rdar://problem/6605499> x86_64 obj-c runtime confused when static lib is stripped
3547 }
3548 else {
3549 sprintf(anonName, "l%u", fAnonNameIndex++);
3550 symbolName = anonName;
3551 }
3552 }
3553 entry->set_n_strx(this->fStringsAtom->add(symbolName));
3554
3555 // set n_type
3556 uint8_t type = N_SECT;
3557 if ( atom->getDefinitionKind() == ObjectFile::Atom::kAbsoluteSymbol )
3558 type = N_ABS;
3559 if ( atom->getScope() == ObjectFile::Atom::scopeLinkageUnit )
3560 type |= N_PEXT;
3561 entry->set_n_type(type);
3562
3563 // set n_sect (section number of implementation )
3564 uint8_t sectIndex = atom->getSection()->getIndex();
3565 if ( sectIndex == 0 ) {
3566 // see <mach-o/ldsyms.h> synthesized lable for mach_header needs special section number...
3567 if ( strcmp(atom->getSectionName(), "._mach_header") == 0 )
3568 sectIndex = 1;
3569 }
3570 entry->set_n_sect(sectIndex);
3571
3572 // set n_desc
3573 uint16_t desc = 0;
3574 if ( atom->dontDeadStrip() && (fOptions.outputKind() == Options::kObjectFile) )
3575 desc |= N_NO_DEAD_STRIP;
3576 if ( atom->getDefinitionKind() == ObjectFile::Atom::kWeakDefinition )
3577 desc |= N_WEAK_DEF;
3578 if ( atom->isThumb() )
3579 desc |= N_ARM_THUMB_DEF;
3580 entry->set_n_desc(desc);
3581
3582 // set n_value ( address this symbol will be at if this executable is loaded at it preferred address )
3583 if ( atom->getDefinitionKind() == ObjectFile::Atom::kAbsoluteSymbol )
3584 entry->set_n_value(atom->getSectionOffset());
3585 else
3586 entry->set_n_value(this->getAtomLoadAddress(atom));
3587 }
3588
3589
3590 template <typename A>
3591 void Writer<A>::addLocalLabel(ObjectFile::Atom& atom, uint32_t offsetInAtom, const char* name)
3592 {
3593 macho_nlist<P> entry;
3594
3595 // set n_strx
3596 entry.set_n_strx(fStringsAtom->add(name));
3597
3598 // set n_type
3599 entry.set_n_type(N_SECT);
3600
3601 // set n_sect (section number of implementation )
3602 entry.set_n_sect(atom.getSection()->getIndex());
3603
3604 // set n_desc
3605 entry.set_n_desc(0);
3606
3607 // set n_value ( address this symbol will be at if this executable is loaded at it preferred address )
3608 entry.set_n_value(this->getAtomLoadAddress(&atom) + offsetInAtom);
3609
3610 // add
3611 fLocalExtraLabels.push_back(entry);
3612 }
3613
3614
3615
3616 template <typename A>
3617 void Writer<A>::addGlobalLabel(ObjectFile::Atom& atom, uint32_t offsetInAtom, const char* name)
3618 {
3619 macho_nlist<P> entry;
3620
3621 // set n_strx
3622 entry.set_n_strx(fStringsAtom->add(name));
3623
3624 // set n_type
3625 entry.set_n_type(N_SECT|N_EXT);
3626
3627 // set n_sect (section number of implementation )
3628 entry.set_n_sect(atom.getSection()->getIndex());
3629
3630 // set n_desc
3631 entry.set_n_desc(0);
3632
3633 // set n_value ( address this symbol will be at if this executable is loaded at it preferred address )
3634 entry.set_n_value(this->getAtomLoadAddress(&atom) + offsetInAtom);
3635
3636 // add
3637 fGlobalExtraLabels.push_back(entry);
3638 }
3639
3640 template <typename A>
3641 void Writer<A>::setNlistRange(std::vector<class ObjectFile::Atom*>& atoms, uint32_t startIndex, uint32_t count)
3642 {
3643 macho_nlist<P>* entry = &fSymbolTable[startIndex];
3644 for (uint32_t i=0; i < count; ++i, ++entry) {
3645 ObjectFile::Atom* atom = atoms[i];
3646 if ( &atoms == &fExportedAtoms ) {
3647 this->setExportNlist(atom, entry);
3648 }
3649 else if ( &atoms == &fImportedAtoms ) {
3650 this->setImportNlist(atom, entry);
3651 }
3652 else {
3653 this->setLocalNlist(atom, entry);
3654 }
3655 }
3656 }
3657
3658 template <typename A>
3659 void Writer<A>::copyNlistRange(const std::vector<macho_nlist<P> >& entries, uint32_t startIndex)
3660 {
3661 for ( typename std::vector<macho_nlist<P> >::const_iterator it = entries.begin(); it != entries.end(); ++it)
3662 fSymbolTable[startIndex++] = *it;
3663 }
3664
3665
3666 template <typename A>
3667 struct NListNameSorter
3668 {
3669 NListNameSorter(StringsLinkEditAtom<A>* pool) : fStringPool(pool) {}
3670
3671 bool operator()(const macho_nlist<typename A::P>& left, const macho_nlist<typename A::P>& right)
3672 {
3673 return (strcmp(fStringPool->stringForIndex(left.n_strx()), fStringPool->stringForIndex(right.n_strx())) < 0);
3674 }
3675 private:
3676 StringsLinkEditAtom<A>* fStringPool;
3677 };
3678
3679
3680 template <typename A>
3681 void Writer<A>::buildSymbolTable()
3682 {
3683 fSymbolTableStabsStartIndex = 0;
3684 fSymbolTableStabsCount = fStabs->size();
3685 fSymbolTableLocalStartIndex = fSymbolTableStabsStartIndex + fSymbolTableStabsCount;
3686 fSymbolTableLocalCount = fLocalSymbolAtoms.size() + fLocalExtraLabels.size();
3687 fSymbolTableExportStartIndex = fSymbolTableLocalStartIndex + fSymbolTableLocalCount;
3688 fSymbolTableExportCount = fExportedAtoms.size() + fGlobalExtraLabels.size();
3689 fSymbolTableImportStartIndex = fSymbolTableExportStartIndex + fSymbolTableExportCount;
3690 fSymbolTableImportCount = fImportedAtoms.size();
3691
3692 // allocate symbol table
3693 fSymbolTableCount = fSymbolTableStabsCount + fSymbolTableLocalCount + fSymbolTableExportCount + fSymbolTableImportCount;
3694 fSymbolTable = new macho_nlist<P>[fSymbolTableCount];
3695
3696 // fill in symbol table and string pool (do stabs last so strings are at end of pool)
3697 setNlistRange(fLocalSymbolAtoms, fSymbolTableLocalStartIndex, fLocalSymbolAtoms.size());
3698 if ( fLocalExtraLabels.size() != 0 )
3699 copyNlistRange(fLocalExtraLabels, fSymbolTableLocalStartIndex+fLocalSymbolAtoms.size());
3700 setNlistRange(fExportedAtoms, fSymbolTableExportStartIndex, fExportedAtoms.size());
3701 if ( fGlobalExtraLabels.size() != 0 ) {
3702 copyNlistRange(fGlobalExtraLabels, fSymbolTableExportStartIndex+fExportedAtoms.size());
3703 // re-sort combined range
3704 std::sort( &fSymbolTable[fSymbolTableExportStartIndex],
3705 &fSymbolTable[fSymbolTableExportStartIndex+fSymbolTableExportCount],
3706 NListNameSorter<A>(fStringsAtom) );
3707 }
3708 setNlistRange(fImportedAtoms, fSymbolTableImportStartIndex, fSymbolTableImportCount);
3709 addStabs(fSymbolTableStabsStartIndex);
3710
3711 // set up module table
3712 if ( fModuleInfoAtom != NULL )
3713 fModuleInfoAtom->setName();
3714
3715 // create atom to symbol index map
3716 // imports
3717 int i = 0;
3718 for(std::vector<ObjectFile::Atom*>::iterator it=fImportedAtoms.begin(); it != fImportedAtoms.end(); ++it) {
3719 fAtomToSymbolIndex[*it] = i + fSymbolTableImportStartIndex;
3720 ++i;
3721 }
3722 // locals
3723 i = 0;
3724 for(std::vector<ObjectFile::Atom*>::iterator it=fLocalSymbolAtoms.begin(); it != fLocalSymbolAtoms.end(); ++it) {
3725 fAtomToSymbolIndex[*it] = i + fSymbolTableLocalStartIndex;
3726 ++i;
3727 }
3728 // exports
3729 i = 0;
3730 for(std::vector<ObjectFile::Atom*>::iterator it=fExportedAtoms.begin(); it != fExportedAtoms.end(); ++it) {
3731 fAtomToSymbolIndex[*it] = i + fSymbolTableExportStartIndex;
3732 ++i;
3733 }
3734
3735 }
3736
3737
3738
3739 template <typename A>
3740 bool Writer<A>::shouldExport(const ObjectFile::Atom& atom) const
3741 {
3742 switch ( atom.getSymbolTableInclusion() ) {
3743 case ObjectFile::Atom::kSymbolTableNotIn:
3744 return false;
3745 case ObjectFile::Atom::kSymbolTableInAndNeverStrip:
3746 return true;
3747 case ObjectFile::Atom::kSymbolTableInAsAbsolute:
3748 case ObjectFile::Atom::kSymbolTableIn:
3749 switch ( atom.getScope() ) {
3750 case ObjectFile::Atom::scopeGlobal:
3751 return true;
3752 case ObjectFile::Atom::scopeLinkageUnit:
3753 return ( (fOptions.outputKind() == Options::kObjectFile) && fOptions.keepPrivateExterns() );
3754 default:
3755 return false;
3756 }
3757 break;
3758 }
3759 return false;
3760 }
3761
3762 template <typename A>
3763 void Writer<A>::collectExportedAndImportedAndLocalAtoms()
3764 {
3765 const int atomCount = fAllAtoms->size();
3766 // guess at sizes of each bucket to minimize re-allocations
3767 fImportedAtoms.reserve(100);
3768 fExportedAtoms.reserve(atomCount/2);
3769 fLocalSymbolAtoms.reserve(atomCount);
3770
3771 for (std::vector<SegmentInfo*>::iterator segit = fSegmentInfos.begin(); segit != fSegmentInfos.end(); ++segit) {
3772 std::vector<SectionInfo*>& sectionInfos = (*segit)->fSections;
3773 for (std::vector<SectionInfo*>::iterator secit = sectionInfos.begin(); secit != sectionInfos.end(); ++secit) {
3774 std::vector<ObjectFile::Atom*>& sectionAtoms = (*secit)->fAtoms;
3775 for (std::vector<ObjectFile::Atom*>::iterator ait = sectionAtoms.begin(); ait != sectionAtoms.end(); ++ait) {
3776 ObjectFile::Atom* atom = *ait;
3777 // only named atoms go in symbol table
3778 if ( atom->getName() != NULL ) {
3779 // put atom into correct bucket: imports, exports, locals
3780 //fprintf(stderr, "collectExportedAndImportedAndLocalAtoms() name=%s\n", atom->getDisplayName());
3781 switch ( atom->getDefinitionKind() ) {
3782 case ObjectFile::Atom::kExternalDefinition:
3783 case ObjectFile::Atom::kExternalWeakDefinition:
3784 fImportedAtoms.push_back(atom);
3785 break;
3786 case ObjectFile::Atom::kTentativeDefinition:
3787 if ( (fOptions.outputKind() == Options::kObjectFile) && !fOptions.readerOptions().fMakeTentativeDefinitionsReal ) {
3788 fImportedAtoms.push_back(atom);
3789 break;
3790 }
3791 // else fall into
3792 case ObjectFile::Atom::kWeakDefinition:
3793 if ( stringsNeedLabelsInObjects()
3794 && (fOptions.outputKind() == Options::kObjectFile)
3795 && (atom->getSymbolTableInclusion() == ObjectFile::Atom::kSymbolTableIn)
3796 && (atom->getScope() == ObjectFile::Atom::scopeLinkageUnit)
3797 && (atom->getContentType() == ObjectFile::Atom::kCStringType) ) {
3798 fLocalSymbolAtoms.push_back(atom);
3799 break;
3800 }
3801 // else fall into
3802 case ObjectFile::Atom::kRegularDefinition:
3803 case ObjectFile::Atom::kAbsoluteSymbol:
3804 if ( this->shouldExport(*atom) )
3805 fExportedAtoms.push_back(atom);
3806 else if ( (atom->getSymbolTableInclusion() != ObjectFile::Atom::kSymbolTableNotIn)
3807 && ((fOptions.outputKind() == Options::kObjectFile) || fOptions.keepLocalSymbol(atom->getName())) )
3808 fLocalSymbolAtoms.push_back(atom);
3809 break;
3810 }
3811 }
3812 // when geneating a .o file, dtrace static probes become local labels
3813 if ( (fOptions.outputKind() == Options::kObjectFile) && !fOptions.readerOptions().fForStatic ) {
3814 std::vector<ObjectFile::Reference*>& references = atom->getReferences();
3815 for (std::vector<ObjectFile::Reference*>::iterator rit=references.begin(); rit != references.end(); rit++) {
3816 ObjectFile::Reference* ref = *rit;
3817 if ( ref->getKind() == A::kDtraceProbe ) {
3818 // dtrace probe points to be add back into generated .o file
3819 this->addLocalLabel(*atom, ref->getFixUpOffset(), ref->getTargetName());
3820 }
3821 }
3822 }
3823 // when linking kernel, old style dtrace static probes become global labels
3824 else if ( fOptions.readerOptions().fForStatic ) {
3825 std::vector<ObjectFile::Reference*>& references = atom->getReferences();
3826 for (std::vector<ObjectFile::Reference*>::iterator rit=references.begin(); rit != references.end(); rit++) {
3827 ObjectFile::Reference* ref = *rit;
3828 if ( ref->getKind() == A::kDtraceProbe ) {
3829 // dtrace probe points to be add back into generated .o file
3830 this->addGlobalLabel(*atom, ref->getFixUpOffset(), ref->getTargetName());
3831 }
3832 }
3833 }
3834 }
3835 }
3836 }
3837
3838 // sort exported atoms by name
3839 std::sort(fExportedAtoms.begin(), fExportedAtoms.end(), AtomByNameSorter());
3840 // sort imported atoms by name (not required by runtime, but helps make generated files binary diffable)
3841 std::sort(fImportedAtoms.begin(), fImportedAtoms.end(), AtomByNameSorter());
3842 }
3843
3844
3845 template <typename A>
3846 uint64_t Writer<A>::valueForStab(const ObjectFile::Reader::Stab& stab)
3847 {
3848 switch ( stab.type ) {
3849 case N_FUN:
3850 if ( (stab.string == NULL) || (strlen(stab.string) == 0) ) {
3851 // end of function N_FUN has size
3852 return stab.atom->getSize();
3853 }
3854 else {
3855 // start of function N_FUN has address
3856 return getAtomLoadAddress(stab.atom);
3857 }
3858 case N_LBRAC:
3859 case N_RBRAC:
3860 case N_SLINE:
3861 if ( stab.atom == NULL )
3862 // some weird assembly files have slines not associated with a function
3863 return stab.value;
3864 else
3865 // all these stab types need their value changed from an offset in the atom to an address
3866 return getAtomLoadAddress(stab.atom) + stab.value;
3867 case N_STSYM:
3868 case N_LCSYM:
3869 case N_BNSYM:
3870 // all these need address of atom
3871 return getAtomLoadAddress(stab.atom);;
3872 case N_ENSYM:
3873 return stab.atom->getSize();
3874 case N_SO:
3875 if ( stab.atom == NULL ) {
3876 return 0;
3877 }
3878 else {
3879 if ( (stab.string == NULL) || (strlen(stab.string) == 0) ) {
3880 // end of translation unit N_SO has address of end of last atom
3881 return getAtomLoadAddress(stab.atom) + stab.atom->getSize();
3882 }
3883 else {
3884 // start of translation unit N_SO has address of end of first atom
3885 return getAtomLoadAddress(stab.atom);
3886 }
3887 }
3888 break;
3889 default:
3890 return stab.value;
3891 }
3892 }
3893
3894 template <typename A>
3895 uint32_t Writer<A>::stringOffsetForStab(const ObjectFile::Reader::Stab& stab)
3896 {
3897 switch (stab.type) {
3898 case N_SO:
3899 if ( (stab.string == NULL) || stab.string[0] == '\0' ) {
3900 return this->fStringsAtom->emptyString();
3901 break;
3902 }
3903 // fall into uniquing case
3904 case N_SOL:
3905 case N_BINCL:
3906 case N_EXCL:
3907 return this->fStringsAtom->addUnique(stab.string);
3908 break;
3909 default:
3910 if ( stab.string == NULL )
3911 return 0;
3912 else if ( stab.string[0] == '\0' )
3913 return this->fStringsAtom->emptyString();
3914 else
3915 return this->fStringsAtom->add(stab.string);
3916 }
3917 return 0;
3918 }
3919
3920 template <typename A>
3921 uint8_t Writer<A>::sectionIndexForStab(const ObjectFile::Reader::Stab& stab)
3922 {
3923 // in FUN stabs, n_sect field is 0 for start FUN and 1 for end FUN
3924 if ( stab.type == N_FUN )
3925 return stab.other;
3926 else if ( stab.atom != NULL )
3927 return stab.atom->getSection()->getIndex();
3928 else
3929 return stab.other;
3930 }
3931
3932 template <typename A>
3933 void Writer<A>::addStabs(uint32_t startIndex)
3934 {
3935 macho_nlist<P>* entry = &fSymbolTable[startIndex];
3936 for(std::vector<ObjectFile::Reader::Stab>::iterator it = fStabs->begin(); it != fStabs->end(); ++it, ++entry) {
3937 const ObjectFile::Reader::Stab& stab = *it;
3938 entry->set_n_type(stab.type);
3939 entry->set_n_sect(sectionIndexForStab(stab));
3940 entry->set_n_desc(stab.desc);
3941 entry->set_n_value(valueForStab(stab));
3942 entry->set_n_strx(stringOffsetForStab(stab));
3943 }
3944 }
3945
3946
3947
3948 template <typename A>
3949 uint32_t Writer<A>::symbolIndex(ObjectFile::Atom& atom)
3950 {
3951 std::map<ObjectFile::Atom*, uint32_t>::iterator pos = fAtomToSymbolIndex.find(&atom);
3952 if ( pos != fAtomToSymbolIndex.end() )
3953 return pos->second;
3954 throwf("atom not found in symbolIndex(%s) for %s", atom.getDisplayName(), atom.getFile()->getPath());
3955 }
3956
3957
3958 template <>
3959 bool Writer<x86_64>::makesExternalRelocatableReference(ObjectFile::Atom& target) const
3960 {
3961 switch ( target.getSymbolTableInclusion() ) {
3962 case ObjectFile::Atom::kSymbolTableNotIn:
3963 return false;
3964 case ObjectFile::Atom::kSymbolTableInAsAbsolute:
3965 case ObjectFile::Atom::kSymbolTableIn:
3966 case ObjectFile::Atom::kSymbolTableInAndNeverStrip:
3967 return true;
3968 };
3969 return false;
3970 }
3971
3972 template <typename A>
3973 bool Writer<A>::makesExternalRelocatableReference(ObjectFile::Atom& target) const
3974 {
3975 switch ( target.getDefinitionKind() ) {
3976 case ObjectFile::Atom::kRegularDefinition:
3977 case ObjectFile::Atom::kWeakDefinition:
3978 case ObjectFile::Atom::kAbsoluteSymbol:
3979 return false;
3980 case ObjectFile::Atom::kTentativeDefinition:
3981 if ( fOptions.readerOptions().fMakeTentativeDefinitionsReal )
3982 return false;
3983 else
3984 return (target.getScope() != ObjectFile::Atom::scopeTranslationUnit);
3985 case ObjectFile::Atom::kExternalDefinition:
3986 case ObjectFile::Atom::kExternalWeakDefinition:
3987 return shouldExport(target);
3988 }
3989 return false;
3990 }
3991
3992 template <typename A>
3993 void Writer<A>::buildFixups()
3994 {
3995 if ( fOptions.outputKind() == Options::kObjectFile ) {
3996 this->buildObjectFileFixups();
3997 }
3998 else {
3999 if ( fOptions.keepRelocations() )
4000 this->buildObjectFileFixups();
4001 this->buildExecutableFixups();
4002 }
4003 }
4004
4005 template <>
4006 uint32_t Writer<x86_64>::addObjectRelocs(ObjectFile::Atom* atom, ObjectFile::Reference* ref)
4007 {
4008 ObjectFile::Atom& target = ref->getTarget();
4009 bool external = this->makesExternalRelocatableReference(target);
4010 uint32_t symbolIndex = external ? this->symbolIndex(target) : target.getSection()->getIndex();
4011 uint32_t address = atom->getSectionOffset()+ref->getFixUpOffset();
4012 macho_relocation_info<P> reloc1;
4013 macho_relocation_info<P> reloc2;
4014 x86_64::ReferenceKinds kind = (x86_64::ReferenceKinds)ref->getKind();
4015
4016 switch ( kind ) {
4017 case x86_64::kNoFixUp:
4018 case x86_64::kGOTNoFixUp:
4019 case x86_64::kFollowOn:
4020 case x86_64::kGroupSubordinate:
4021 return 0;
4022
4023 case x86_64::kPointer:
4024 case x86_64::kPointerWeakImport:
4025 reloc1.set_r_address(address);
4026 reloc1.set_r_symbolnum(symbolIndex);
4027 reloc1.set_r_pcrel(false);
4028 reloc1.set_r_length(3);
4029 reloc1.set_r_extern(external);
4030 reloc1.set_r_type(X86_64_RELOC_UNSIGNED);
4031 fSectionRelocs.push_back(reloc1);
4032 return 1;
4033
4034 case x86_64::kPointer32:
4035 reloc1.set_r_address(address);
4036 reloc1.set_r_symbolnum(symbolIndex);
4037 reloc1.set_r_pcrel(false);
4038 reloc1.set_r_length(2);
4039 reloc1.set_r_extern(external);
4040 reloc1.set_r_type(X86_64_RELOC_UNSIGNED);
4041 fSectionRelocs.push_back(reloc1);
4042 return 1;
4043
4044 case x86_64::kPointerDiff32:
4045 case x86_64::kPointerDiff:
4046 {
4047 ObjectFile::Atom& fromTarget = ref->getFromTarget();
4048 bool fromExternal = (fromTarget.getSymbolTableInclusion() != ObjectFile::Atom::kSymbolTableNotIn);
4049 uint32_t fromSymbolIndex = fromExternal ? this->symbolIndex(fromTarget) : fromTarget.getSection()->getIndex();
4050 reloc1.set_r_address(address);
4051 reloc1.set_r_symbolnum(symbolIndex);
4052 reloc1.set_r_pcrel(false);
4053 reloc1.set_r_length(kind==x86_64::kPointerDiff32 ? 2 : 3);
4054 reloc1.set_r_extern(external);
4055 reloc1.set_r_type(X86_64_RELOC_UNSIGNED);
4056 reloc2.set_r_address(address);
4057 reloc2.set_r_symbolnum(fromSymbolIndex);
4058 reloc2.set_r_pcrel(false);
4059 reloc2.set_r_length(kind==x86_64::kPointerDiff32 ? 2 : 3);
4060 reloc2.set_r_extern(fromExternal);
4061 reloc2.set_r_type(X86_64_RELOC_SUBTRACTOR);
4062 fSectionRelocs.push_back(reloc1);
4063 fSectionRelocs.push_back(reloc2);
4064 return 2;
4065 }
4066
4067 case x86_64::kBranchPCRel32:
4068 case x86_64::kBranchPCRel32WeakImport:
4069 case x86_64::kDtraceProbeSite:
4070 case x86_64::kDtraceIsEnabledSite:
4071 reloc1.set_r_address(address);
4072 reloc1.set_r_symbolnum(symbolIndex);
4073 reloc1.set_r_pcrel(true);
4074 reloc1.set_r_length(2);
4075 reloc1.set_r_extern(external);
4076 reloc1.set_r_type(X86_64_RELOC_BRANCH);
4077 fSectionRelocs.push_back(reloc1);
4078 return 1;
4079
4080 case x86_64::kPCRel32:
4081 reloc1.set_r_address(address);
4082 reloc1.set_r_symbolnum(symbolIndex);
4083 reloc1.set_r_pcrel(true);
4084 reloc1.set_r_length(2);
4085 reloc1.set_r_extern(external);
4086 reloc1.set_r_type(X86_64_RELOC_SIGNED);
4087 fSectionRelocs.push_back(reloc1);
4088 return 1;
4089
4090 case x86_64::kPCRel32_1:
4091 reloc1.set_r_address(address);
4092 reloc1.set_r_symbolnum(symbolIndex);
4093 reloc1.set_r_pcrel(true);
4094 reloc1.set_r_length(2);
4095 reloc1.set_r_extern(external);
4096 reloc1.set_r_type(X86_64_RELOC_SIGNED_1);
4097 fSectionRelocs.push_back(reloc1);
4098 return 1;
4099
4100 case x86_64::kPCRel32_2:
4101 reloc1.set_r_address(address);
4102 reloc1.set_r_symbolnum(symbolIndex);
4103 reloc1.set_r_pcrel(true);
4104 reloc1.set_r_length(2);
4105 reloc1.set_r_extern(external);
4106 reloc1.set_r_type(X86_64_RELOC_SIGNED_2);
4107 fSectionRelocs.push_back(reloc1);
4108 return 1;
4109
4110 case x86_64::kPCRel32_4:
4111 reloc1.set_r_address(address);
4112 reloc1.set_r_symbolnum(symbolIndex);
4113 reloc1.set_r_pcrel(true);
4114 reloc1.set_r_length(2);
4115 reloc1.set_r_extern(external);
4116 reloc1.set_r_type(X86_64_RELOC_SIGNED_4);
4117 fSectionRelocs.push_back(reloc1);
4118 return 1;
4119
4120 case x86_64::kBranchPCRel8:
4121 reloc1.set_r_address(address);
4122 reloc1.set_r_symbolnum(symbolIndex);
4123 reloc1.set_r_pcrel(true);
4124 reloc1.set_r_length(0);
4125 reloc1.set_r_extern(external);
4126 reloc1.set_r_type(X86_64_RELOC_BRANCH);
4127 fSectionRelocs.push_back(reloc1);
4128 return 1;
4129
4130 case x86_64::kPCRel32GOT:
4131 case x86_64::kPCRel32GOTWeakImport:
4132 reloc1.set_r_address(address);
4133 reloc1.set_r_symbolnum(symbolIndex);
4134 reloc1.set_r_pcrel(true);
4135 reloc1.set_r_length(2);
4136 reloc1.set_r_extern(external);
4137 reloc1.set_r_type(X86_64_RELOC_GOT);
4138 fSectionRelocs.push_back(reloc1);
4139 return 1;
4140
4141 case x86_64::kPCRel32GOTLoad:
4142 case x86_64::kPCRel32GOTLoadWeakImport:
4143 reloc1.set_r_address(address);
4144 reloc1.set_r_symbolnum(symbolIndex);
4145 reloc1.set_r_pcrel(true);
4146 reloc1.set_r_length(2);
4147 reloc1.set_r_extern(external);
4148 reloc1.set_r_type(X86_64_RELOC_GOT_LOAD);
4149 fSectionRelocs.push_back(reloc1);
4150 return 1;
4151
4152 case x86_64::kPointerDiff24:
4153 throw "internal linker error, kPointerDiff24 can't be encoded into object files";
4154
4155 case x86_64::kImageOffset32:
4156 throw "internal linker error, kImageOffset32 can't be encoded into object files";
4157
4158 case x86_64::kSectionOffset24:
4159 throw "internal linker error, kSectionOffset24 can't be encoded into object files";
4160
4161 case x86_64::kDtraceTypeReference:
4162 case x86_64::kDtraceProbe:
4163 // generates no relocs
4164 return 0;
4165 }
4166 return 0;
4167 }
4168
4169
4170 template <>
4171 uint32_t Writer<x86>::addObjectRelocs(ObjectFile::Atom* atom, ObjectFile::Reference* ref)
4172 {
4173 ObjectFile::Atom& target = ref->getTarget();
4174 bool isExtern = this->makesExternalRelocatableReference(target);
4175 uint32_t symbolIndex = 0;
4176 if ( isExtern )
4177 symbolIndex = this->symbolIndex(target);
4178 uint32_t sectionNum = target.getSection()->getIndex();
4179 uint32_t address = atom->getSectionOffset()+ref->getFixUpOffset();
4180 macho_relocation_info<P> reloc1;
4181 macho_relocation_info<P> reloc2;
4182 macho_scattered_relocation_info<P>* sreloc1 = (macho_scattered_relocation_info<P>*)&reloc1;
4183 macho_scattered_relocation_info<P>* sreloc2 = (macho_scattered_relocation_info<P>*)&reloc2;
4184 x86::ReferenceKinds kind = (x86::ReferenceKinds)ref->getKind();
4185
4186 if ( !isExtern && (sectionNum == 0) && (target.getDefinitionKind() != ObjectFile::Atom::kAbsoluteSymbol) )
4187 warning("section index == 0 for %s (kind=%d, scope=%d, inclusion=%d) in %s",
4188 target.getDisplayName(), target.getDefinitionKind(), target.getScope(), target.getSymbolTableInclusion(), target.getFile()->getPath());
4189
4190
4191 switch ( kind ) {
4192 case x86::kNoFixUp:
4193 case x86::kFollowOn:
4194 case x86::kGroupSubordinate:
4195 return 0;
4196
4197 case x86::kPointer:
4198 case x86::kPointerWeakImport:
4199 case x86::kAbsolute32:
4200 if ( !isExtern && (ref->getTargetOffset() != 0) ) {
4201 // use scattered reloc is target offset is non-zero
4202 sreloc1->set_r_scattered(true);
4203 sreloc1->set_r_pcrel(false);
4204 sreloc1->set_r_length(2);
4205 sreloc1->set_r_type(GENERIC_RELOC_VANILLA);
4206 sreloc1->set_r_address(address);
4207 sreloc1->set_r_value(target.getAddress());
4208 }
4209 else {
4210 reloc1.set_r_address(address);
4211 reloc1.set_r_symbolnum(isExtern ? symbolIndex : sectionNum);
4212 reloc1.set_r_pcrel(false);
4213 reloc1.set_r_length(2);
4214 reloc1.set_r_extern(isExtern);
4215 reloc1.set_r_type(GENERIC_RELOC_VANILLA);
4216 }
4217 fSectionRelocs.push_back(reloc1);
4218 return 1;
4219
4220 case x86::kPointerDiff16:
4221 case x86::kPointerDiff:
4222 {
4223 //pint_t fromAddr = ref->getFromTarget().getAddress() + ref->getFromTargetOffset();
4224 //fprintf(stderr, "addObjectRelocs(): refFromTarget=%s, refTarget=%s, refFromTargetAddr=0x%llX, refFromTargetOffset=0x%llX\n",
4225 // ref->getFromTarget().getDisplayName(), ref->getTarget().getDisplayName(),
4226 // ref->getFromTarget().getAddress(), ref->getFromTargetOffset());
4227 sreloc1->set_r_scattered(true);
4228 sreloc1->set_r_pcrel(false);
4229 sreloc1->set_r_length( (kind==x86::kPointerDiff) ? 2 : 1 );
4230 if ( ref->getTarget().getScope() == ObjectFile::Atom::scopeTranslationUnit )
4231 sreloc1->set_r_type(GENERIC_RELOC_LOCAL_SECTDIFF);
4232 else
4233 sreloc1->set_r_type(GENERIC_RELOC_SECTDIFF);
4234 sreloc1->set_r_address(address);
4235 sreloc1->set_r_value(target.getAddress());
4236
4237 sreloc2->set_r_scattered(true);
4238 sreloc2->set_r_pcrel(false);
4239 sreloc2->set_r_length( (kind==x86::kPointerDiff) ? 2 : 1 );
4240 sreloc2->set_r_type(GENERIC_RELOC_PAIR);
4241 sreloc2->set_r_address(0);
4242 if ( &ref->getFromTarget() == atom )
4243 sreloc2->set_r_value(ref->getFromTarget().getAddress()+ref->getFromTargetOffset());
4244 else
4245 sreloc2->set_r_value(ref->getFromTarget().getAddress());
4246 fSectionRelocs.push_back(reloc2);
4247 fSectionRelocs.push_back(reloc1);
4248 return 2;
4249 }
4250
4251 case x86::kPCRel32WeakImport:
4252 case x86::kPCRel32:
4253 case x86::kPCRel16:
4254 case x86::kPCRel8:
4255 case x86::kDtraceProbeSite:
4256 case x86::kDtraceIsEnabledSite:
4257 if ( !isExtern && (ref->getTargetOffset() != 0) ) {
4258 // use scattered reloc is target offset is non-zero
4259 sreloc1->set_r_scattered(true);
4260 sreloc1->set_r_pcrel(true);
4261 sreloc1->set_r_length( (kind==x86::kPCRel8) ? 0 : ((kind==x86::kPCRel16) ? 1 : 2) );
4262 sreloc1->set_r_type(GENERIC_RELOC_VANILLA);
4263 sreloc1->set_r_address(address);
4264 sreloc1->set_r_value(target.getAddress());
4265 }
4266 else {
4267 reloc1.set_r_address(address);
4268 reloc1.set_r_symbolnum(isExtern ? symbolIndex : sectionNum);
4269 reloc1.set_r_pcrel(true);
4270 reloc1.set_r_length( (kind==x86::kPCRel8) ? 0 : ((kind==x86::kPCRel16) ? 1 : 2) );
4271 reloc1.set_r_extern(isExtern);
4272 reloc1.set_r_type(GENERIC_RELOC_VANILLA);
4273 }
4274 fSectionRelocs.push_back(reloc1);
4275 return 1;
4276
4277 case x86::kPointerDiff24:
4278 throw "internal linker error, kPointerDiff24 can't be encoded into object files";
4279
4280 case x86::kImageOffset32:
4281 throw "internal linker error, kImageOffset32 can't be encoded into object files";
4282
4283 case x86::kSectionOffset24:
4284 throw "internal linker error, kSectionOffset24 can't be encoded into object files";
4285
4286 case x86::kDtraceTypeReference:
4287 case x86::kDtraceProbe:
4288 // generates no relocs
4289 return 0;
4290
4291 }
4292 return 0;
4293 }
4294
4295 template <>
4296 uint32_t Writer<arm>::addObjectRelocs(ObjectFile::Atom* atom, ObjectFile::Reference* ref)
4297 {
4298 ObjectFile::Atom& target = ref->getTarget();
4299 bool isExtern = this->makesExternalRelocatableReference(target);
4300 uint32_t symbolIndex = 0;
4301 if ( isExtern )
4302 symbolIndex = this->symbolIndex(target);
4303 uint32_t sectionNum = target.getSection()->getIndex();
4304 uint32_t address = atom->getSectionOffset()+ref->getFixUpOffset();
4305 macho_relocation_info<P> reloc1;
4306 macho_relocation_info<P> reloc2;
4307 macho_scattered_relocation_info<P>* sreloc1 = (macho_scattered_relocation_info<P>*)&reloc1;
4308 macho_scattered_relocation_info<P>* sreloc2 = (macho_scattered_relocation_info<P>*)&reloc2;
4309 arm::ReferenceKinds kind = (arm::ReferenceKinds)ref->getKind();
4310
4311 if ( !isExtern && (sectionNum == 0) && (target.getDefinitionKind() != ObjectFile::Atom::kAbsoluteSymbol) )
4312 warning("section index == 0 for %s (kind=%d, scope=%d, inclusion=%d) in %s",
4313 target.getDisplayName(), target.getDefinitionKind(), target.getScope(), target.getSymbolTableInclusion(), target.getFile()->getPath());
4314
4315
4316 switch ( kind ) {
4317 case arm::kNoFixUp:
4318 case arm::kFollowOn:
4319 case arm::kGroupSubordinate:
4320 return 0;
4321
4322 case arm::kPointer:
4323 case arm::kReadOnlyPointer:
4324 case arm::kPointerWeakImport:
4325 if ( !isExtern && (ref->getTargetOffset() != 0) ) {
4326 // use scattered reloc is target offset is non-zero
4327 sreloc1->set_r_scattered(true);
4328 sreloc1->set_r_pcrel(false);
4329 sreloc1->set_r_length(2);
4330 sreloc1->set_r_type(ARM_RELOC_VANILLA);
4331 sreloc1->set_r_address(address);
4332 sreloc1->set_r_value(target.getAddress());
4333 }
4334 else {
4335 reloc1.set_r_address(address);
4336 reloc1.set_r_symbolnum(isExtern ? symbolIndex : sectionNum);
4337 reloc1.set_r_pcrel(false);
4338 reloc1.set_r_length(2);
4339 reloc1.set_r_extern(isExtern);
4340 reloc1.set_r_type(ARM_RELOC_VANILLA);
4341 }
4342 fSectionRelocs.push_back(reloc1);
4343 return 1;
4344
4345 case arm::kPointerDiff:
4346 {
4347 sreloc1->set_r_scattered(true);
4348 sreloc1->set_r_pcrel(false);
4349 sreloc1->set_r_length(2);
4350 if ( ref->getTarget().getScope() == ObjectFile::Atom::scopeTranslationUnit )
4351 sreloc1->set_r_type(ARM_RELOC_LOCAL_SECTDIFF);
4352 else
4353 sreloc1->set_r_type(ARM_RELOC_SECTDIFF);
4354 sreloc1->set_r_address(address);
4355 if ( ref->getTargetOffset() >= target.getSize() )
4356 sreloc1->set_r_value(target.getAddress());
4357 else
4358 sreloc1->set_r_value(target.getAddress()+ref->getTargetOffset());
4359 sreloc2->set_r_scattered(true);
4360 sreloc2->set_r_pcrel(false);
4361 sreloc2->set_r_length(2);
4362 sreloc2->set_r_type(ARM_RELOC_PAIR);
4363 sreloc2->set_r_address(0);
4364 if ( &ref->getFromTarget() == atom ) {
4365 unsigned int pcBaseOffset = atom->isThumb() ? 4 : 8;
4366 if ( (ref->getFromTargetOffset() > pcBaseOffset) && (strncmp(atom->getSectionName(), "__text", 6) == 0) ) {
4367 sreloc2->set_r_value(ref->getFromTarget().getAddress()+ref->getFromTargetOffset()-pcBaseOffset);
4368 }
4369 else
4370 sreloc2->set_r_value(ref->getFromTarget().getAddress()+ref->getFromTargetOffset());
4371 }
4372 else
4373 sreloc2->set_r_value(ref->getFromTarget().getAddress());
4374 fSectionRelocs.push_back(reloc2);
4375 fSectionRelocs.push_back(reloc1);
4376 return 2;
4377 }
4378
4379 case arm::kBranch24WeakImport:
4380 case arm::kBranch24:
4381 case arm::kDtraceProbeSite:
4382 case arm::kDtraceIsEnabledSite:
4383 if ( !isExtern && (ref->getTargetOffset() != 0) ) {
4384 // use scattered reloc is target offset is non-zero
4385 sreloc1->set_r_scattered(true);
4386 sreloc1->set_r_pcrel(true);
4387 sreloc1->set_r_length(2);
4388 sreloc1->set_r_type(ARM_RELOC_BR24);
4389 sreloc1->set_r_address(address);
4390 sreloc1->set_r_value(target.getAddress());
4391 }
4392 else {
4393 reloc1.set_r_address(address);
4394 reloc1.set_r_symbolnum(isExtern ? symbolIndex : sectionNum);
4395 reloc1.set_r_pcrel(true);
4396 reloc1.set_r_length(2);
4397 reloc1.set_r_extern(isExtern);
4398 reloc1.set_r_type(ARM_RELOC_BR24);
4399 }
4400 fSectionRelocs.push_back(reloc1);
4401 return 1;
4402
4403 case arm::kThumbBranch22WeakImport:
4404 case arm::kThumbBranch22:
4405 if ( !isExtern && (ref->getTargetOffset() != 0) ) {
4406 // use scattered reloc if target offset is non-zero
4407 sreloc1->set_r_scattered(true);
4408 sreloc1->set_r_pcrel(true);
4409 sreloc1->set_r_length(2);
4410 sreloc1->set_r_type(ARM_THUMB_RELOC_BR22);
4411 sreloc1->set_r_address(address);
4412 sreloc1->set_r_value(target.getAddress());
4413 }
4414 else {
4415 reloc1.set_r_address(address);
4416 reloc1.set_r_symbolnum(isExtern ? symbolIndex : sectionNum);
4417 reloc1.set_r_pcrel(true);
4418 reloc1.set_r_length(2);
4419 reloc1.set_r_extern(isExtern);
4420 reloc1.set_r_type(ARM_THUMB_RELOC_BR22);
4421 }
4422 fSectionRelocs.push_back(reloc1);
4423 return 1;
4424
4425 case arm::kPointerDiff12:
4426 throw "internal error. no reloc for 12-bit pointer diffs";
4427
4428 case arm::kDtraceTypeReference:
4429 case arm::kDtraceProbe:
4430 // generates no relocs
4431 return 0;
4432
4433 }
4434 return 0;
4435 }
4436
4437 template <> uint64_t Writer<ppc>::maxAddress() { return 0xFFFFFFFFULL; }
4438 template <> uint64_t Writer<ppc64>::maxAddress() { return 0xFFFFFFFFFFFFFFFFULL; }
4439 template <> uint64_t Writer<x86>::maxAddress() { return 0xFFFFFFFFULL; }
4440 template <> uint64_t Writer<x86_64>::maxAddress() { return 0xFFFFFFFFFFFFFFFFULL; }
4441 template <> uint64_t Writer<arm>::maxAddress() { return 0xFFFFFFFFULL; }
4442
4443 template <>
4444 uint8_t Writer<ppc>::getRelocPointerSize()
4445 {
4446 return 2;
4447 }
4448
4449 template <>
4450 uint8_t Writer<ppc64>::getRelocPointerSize()
4451 {
4452 return 3;
4453 }
4454
4455 template <>
4456 uint32_t Writer<ppc>::addObjectRelocs(ObjectFile::Atom* atom, ObjectFile::Reference* ref)
4457 {
4458 return addObjectRelocs_powerpc(atom, ref);
4459 }
4460
4461 template <>
4462 uint32_t Writer<ppc64>::addObjectRelocs(ObjectFile::Atom* atom, ObjectFile::Reference* ref)
4463 {
4464 return addObjectRelocs_powerpc(atom, ref);
4465 }
4466
4467 //
4468 // addObjectRelocs<ppc> and addObjectRelocs<ppc64> are almost exactly the same, so
4469 // they use a common addObjectRelocs_powerpc() method.
4470 //
4471 template <typename A>
4472 uint32_t Writer<A>::addObjectRelocs_powerpc(ObjectFile::Atom* atom, ObjectFile::Reference* ref)
4473 {
4474 ObjectFile::Atom& target = ref->getTarget();
4475 bool isExtern = this->makesExternalRelocatableReference(target);
4476 uint32_t symbolIndex = 0;
4477 if ( isExtern )
4478 symbolIndex = this->symbolIndex(target);
4479 uint32_t sectionNum = target.getSection()->getIndex();
4480 uint32_t address = atom->getSectionOffset()+ref->getFixUpOffset();
4481 macho_relocation_info<P> reloc1;
4482 macho_relocation_info<P> reloc2;
4483 macho_scattered_relocation_info<P>* sreloc1 = (macho_scattered_relocation_info<P>*)&reloc1;
4484 macho_scattered_relocation_info<P>* sreloc2 = (macho_scattered_relocation_info<P>*)&reloc2;
4485 typename A::ReferenceKinds kind = (typename A::ReferenceKinds)ref->getKind();
4486
4487 switch ( kind ) {
4488 case A::kNoFixUp:
4489 case A::kFollowOn:
4490 case A::kGroupSubordinate:
4491 return 0;
4492
4493 case A::kPointer:
4494 case A::kPointerWeakImport:
4495 if ( !isExtern && (ref->getTargetOffset() >= target.getSize()) ) {
4496 // use scattered reloc is target offset is outside target
4497 sreloc1->set_r_scattered(true);
4498 sreloc1->set_r_pcrel(false);
4499 sreloc1->set_r_length(getRelocPointerSize());
4500 sreloc1->set_r_type(GENERIC_RELOC_VANILLA);
4501 sreloc1->set_r_address(address);
4502 sreloc1->set_r_value(target.getAddress());
4503 }
4504 else {
4505 reloc1.set_r_address(address);
4506 if ( isExtern )
4507 reloc1.set_r_symbolnum(symbolIndex);
4508 else
4509 reloc1.set_r_symbolnum(sectionNum);
4510 reloc1.set_r_pcrel(false);
4511 reloc1.set_r_length(getRelocPointerSize());
4512 reloc1.set_r_extern(isExtern);
4513 reloc1.set_r_type(GENERIC_RELOC_VANILLA);
4514 }
4515 fSectionRelocs.push_back(reloc1);
4516 return 1;
4517
4518 case A::kPointerDiff16:
4519 case A::kPointerDiff32:
4520 case A::kPointerDiff64:
4521 {
4522 sreloc1->set_r_scattered(true);
4523 sreloc1->set_r_pcrel(false);
4524 sreloc1->set_r_length( (kind == A::kPointerDiff32) ? 2 : ((kind == A::kPointerDiff64) ? 3 : 1));
4525 if ( ref->getTarget().getScope() == ObjectFile::Atom::scopeTranslationUnit )
4526 sreloc1->set_r_type(PPC_RELOC_LOCAL_SECTDIFF);
4527 else
4528 sreloc1->set_r_type(PPC_RELOC_SECTDIFF);
4529 sreloc1->set_r_address(address);
4530 sreloc1->set_r_value(target.getAddress());
4531 sreloc2->set_r_scattered(true);
4532 sreloc2->set_r_pcrel(false);
4533 sreloc2->set_r_length(sreloc1->r_length());
4534 sreloc2->set_r_type(PPC_RELOC_PAIR);
4535 sreloc2->set_r_address(0);
4536 sreloc2->set_r_value(ref->getFromTarget().getAddress()+ref->getFromTargetOffset());
4537 fSectionRelocs.push_back(reloc2);
4538 fSectionRelocs.push_back(reloc1);
4539 return 2;
4540 }
4541
4542 case A::kBranch24WeakImport:
4543 case A::kBranch24:
4544 case A::kDtraceProbeSite:
4545 case A::kDtraceIsEnabledSite:
4546 if ( (ref->getTargetOffset() == 0) || isExtern ) {
4547 reloc1.set_r_address(address);
4548 if ( isExtern )
4549 reloc1.set_r_symbolnum(symbolIndex);
4550 else
4551 reloc1.set_r_symbolnum(sectionNum);
4552 reloc1.set_r_pcrel(true);
4553 reloc1.set_r_length(2);
4554 reloc1.set_r_type(PPC_RELOC_BR24);
4555 reloc1.set_r_extern(isExtern);
4556 }
4557 else {
4558 sreloc1->set_r_scattered(true);
4559 sreloc1->set_r_pcrel(true);
4560 sreloc1->set_r_length(2);
4561 sreloc1->set_r_type(PPC_RELOC_BR24);
4562 sreloc1->set_r_address(address);
4563 sreloc1->set_r_value(target.getAddress());
4564 }
4565 fSectionRelocs.push_back(reloc1);
4566 return 1;
4567
4568 case A::kBranch14:
4569 if ( (ref->getTargetOffset() == 0) || isExtern ) {
4570 reloc1.set_r_address(address);
4571 if ( isExtern )
4572 reloc1.set_r_symbolnum(symbolIndex);
4573 else
4574 reloc1.set_r_symbolnum(sectionNum);
4575 reloc1.set_r_pcrel(true);
4576 reloc1.set_r_length(2);
4577 reloc1.set_r_type(PPC_RELOC_BR14);
4578 reloc1.set_r_extern(isExtern);
4579 }
4580 else {
4581 sreloc1->set_r_scattered(true);
4582 sreloc1->set_r_pcrel(true);
4583 sreloc1->set_r_length(2);
4584 sreloc1->set_r_type(PPC_RELOC_BR14);
4585 sreloc1->set_r_address(address);
4586 sreloc1->set_r_value(target.getAddress());
4587 }
4588 fSectionRelocs.push_back(reloc1);
4589 return 1;
4590
4591 case A::kPICBaseLow16:
4592 case A::kPICBaseLow14:
4593 {
4594 pint_t fromAddr = atom->getAddress() + ref->getFromTargetOffset();
4595 pint_t toAddr = target.getAddress() + ref->getTargetOffset();
4596 sreloc1->set_r_scattered(true);
4597 sreloc1->set_r_pcrel(false);
4598 sreloc1->set_r_length(2);
4599 sreloc1->set_r_type(kind == A::kPICBaseLow16 ? PPC_RELOC_LO16_SECTDIFF : PPC_RELOC_LO14_SECTDIFF);
4600 sreloc1->set_r_address(address);
4601 sreloc1->set_r_value(target.getAddress());
4602 sreloc2->set_r_scattered(true);
4603 sreloc2->set_r_pcrel(false);
4604 sreloc2->set_r_length(2);
4605 sreloc2->set_r_type(PPC_RELOC_PAIR);
4606 sreloc2->set_r_address(((toAddr-fromAddr) >> 16) & 0xFFFF);
4607 sreloc2->set_r_value(fromAddr);
4608 fSectionRelocs.push_back(reloc2);
4609 fSectionRelocs.push_back(reloc1);
4610 return 2;
4611 }
4612
4613 case A::kPICBaseHigh16:
4614 {
4615 pint_t fromAddr = atom->getAddress() + ref->getFromTargetOffset();
4616 pint_t toAddr = target.getAddress() + ref->getTargetOffset();
4617 sreloc1->set_r_scattered(true);
4618 sreloc1->set_r_pcrel(false);
4619 sreloc1->set_r_length(2);
4620 sreloc1->set_r_type(PPC_RELOC_HA16_SECTDIFF);
4621 sreloc1->set_r_address(address);
4622 sreloc1->set_r_value(target.getAddress());
4623 sreloc2->set_r_scattered(true);
4624 sreloc2->set_r_pcrel(false);
4625 sreloc2->set_r_length(2);
4626 sreloc2->set_r_type(PPC_RELOC_PAIR);
4627 sreloc2->set_r_address((toAddr-fromAddr) & 0xFFFF);
4628 sreloc2->set_r_value(fromAddr);
4629 fSectionRelocs.push_back(reloc2);
4630 fSectionRelocs.push_back(reloc1);
4631 return 2;
4632 }
4633
4634 case A::kAbsLow14:
4635 case A::kAbsLow16:
4636 {
4637 pint_t toAddr = target.getAddress() + ref->getTargetOffset();
4638 if ( (ref->getTargetOffset() == 0) || isExtern ) {
4639 reloc1.set_r_address(address);
4640 if ( isExtern )
4641 reloc1.set_r_symbolnum(symbolIndex);
4642 else
4643 reloc1.set_r_symbolnum(sectionNum);
4644 reloc1.set_r_pcrel(false);
4645 reloc1.set_r_length(2);
4646 reloc1.set_r_extern(isExtern);
4647 reloc1.set_r_type(kind==A::kAbsLow16 ? PPC_RELOC_LO16 : PPC_RELOC_LO14);
4648 }
4649 else {
4650 sreloc1->set_r_scattered(true);
4651 sreloc1->set_r_pcrel(false);
4652 sreloc1->set_r_length(2);
4653 sreloc1->set_r_type(kind==A::kAbsLow16 ? PPC_RELOC_LO16 : PPC_RELOC_LO14);
4654 sreloc1->set_r_address(address);
4655 sreloc1->set_r_value(target.getAddress());
4656 }
4657 if ( isExtern )
4658 reloc2.set_r_address(ref->getTargetOffset() >> 16);
4659 else
4660 reloc2.set_r_address(toAddr >> 16);
4661 reloc2.set_r_symbolnum(0);
4662 reloc2.set_r_pcrel(false);
4663 reloc2.set_r_length(2);
4664 reloc2.set_r_extern(false);
4665 reloc2.set_r_type(PPC_RELOC_PAIR);
4666 fSectionRelocs.push_back(reloc2);
4667 fSectionRelocs.push_back(reloc1);
4668 return 2;
4669 }
4670
4671 case A::kAbsHigh16:
4672 {
4673 pint_t toAddr = target.getAddress() + ref->getTargetOffset();
4674 if ( (ref->getTargetOffset() == 0) || isExtern ) {
4675 reloc1.set_r_address(address);
4676 if ( isExtern )
4677 reloc1.set_r_symbolnum(symbolIndex);
4678 else
4679 reloc1.set_r_symbolnum(sectionNum);
4680 reloc1.set_r_pcrel(false);
4681 reloc1.set_r_length(2);
4682 reloc1.set_r_extern(isExtern);
4683 reloc1.set_r_type(PPC_RELOC_HI16);
4684 }
4685 else {
4686 sreloc1->set_r_scattered(true);
4687 sreloc1->set_r_pcrel(false);
4688 sreloc1->set_r_length(2);
4689 sreloc1->set_r_type(PPC_RELOC_HI16);
4690 sreloc1->set_r_address(address);
4691 sreloc1->set_r_value(target.getAddress());
4692 }
4693 if ( isExtern )
4694 reloc2.set_r_address(ref->getTargetOffset() & 0xFFFF);
4695 else
4696 reloc2.set_r_address(toAddr & 0xFFFF);
4697 reloc2.set_r_symbolnum(0);
4698 reloc2.set_r_pcrel(false);
4699 reloc2.set_r_length(2);
4700 reloc2.set_r_extern(false);
4701 reloc2.set_r_type(PPC_RELOC_PAIR);
4702 fSectionRelocs.push_back(reloc2);
4703 fSectionRelocs.push_back(reloc1);
4704 return 2;
4705 }
4706
4707 case A::kAbsHigh16AddLow:
4708 {
4709 pint_t toAddr = target.getAddress() + ref->getTargetOffset();
4710 uint32_t overflow = 0;
4711 if ( (toAddr & 0x00008000) != 0 )
4712 overflow = 0x10000;
4713 if ( (ref->getTargetOffset() == 0) || isExtern ) {
4714 reloc1.set_r_address(address);
4715 if ( isExtern )
4716 reloc1.set_r_symbolnum(symbolIndex);
4717 else
4718 reloc1.set_r_symbolnum(sectionNum);
4719 reloc1.set_r_pcrel(false);
4720 reloc1.set_r_length(2);
4721 reloc1.set_r_extern(isExtern);
4722 reloc1.set_r_type(PPC_RELOC_HA16);
4723 }
4724 else {
4725 sreloc1->set_r_scattered(true);
4726 sreloc1->set_r_pcrel(false);
4727 sreloc1->set_r_length(2);
4728 sreloc1->set_r_type(PPC_RELOC_HA16);
4729 sreloc1->set_r_address(address);
4730 sreloc1->set_r_value(target.getAddress());
4731 }
4732 if ( isExtern )
4733 reloc2.set_r_address(ref->getTargetOffset() & 0xFFFF);
4734 else
4735 reloc2.set_r_address(toAddr & 0xFFFF);
4736 reloc2.set_r_symbolnum(0);
4737 reloc2.set_r_pcrel(false);
4738 reloc2.set_r_length(2);
4739 reloc2.set_r_extern(false);
4740 reloc2.set_r_type(PPC_RELOC_PAIR);
4741 fSectionRelocs.push_back(reloc2);
4742 fSectionRelocs.push_back(reloc1);
4743 return 2;
4744 }
4745
4746 case A::kDtraceTypeReference:
4747 case A::kDtraceProbe:
4748 // generates no relocs
4749 return 0;
4750 }
4751 return 0;
4752 }
4753
4754
4755
4756 //
4757 // There are cases when an entry in the indirect symbol table is the magic value
4758 // INDIRECT_SYMBOL_LOCAL instead of being a symbol index. When that happens
4759 // the content of the corresponding part of the __nl_symbol_pointer section
4760 // must also change.
4761 //
4762 template <typename A>
4763 bool Writer<A>::indirectSymbolInRelocatableIsLocal(const ObjectFile::Reference* ref) const
4764 {
4765 // cannot use INDIRECT_SYMBOL_LOCAL to tentative definitions in object files
4766 // because tentative defs don't have addresses
4767 if ( ref->getTarget().getDefinitionKind() == ObjectFile::Atom::kTentativeDefinition )
4768 return false;
4769
4770 // must use INDIRECT_SYMBOL_LOCAL if there is an addend
4771 if ( ref->getTargetOffset() != 0 )
4772 return true;
4773
4774 // don't use INDIRECT_SYMBOL_LOCAL for external symbols
4775 return ! this->shouldExport(ref->getTarget());
4776 }
4777
4778
4779 template <typename A>
4780 void Writer<A>::buildObjectFileFixups()
4781 {
4782 uint32_t relocIndex = 0;
4783 std::vector<SegmentInfo*>& segmentInfos = fSegmentInfos;
4784 const int segCount = segmentInfos.size();
4785 for(int i=0; i < segCount; ++i) {
4786 SegmentInfo* curSegment = segmentInfos[i];
4787 std::vector<SectionInfo*>& sectionInfos = curSegment->fSections;
4788 const int sectionCount = sectionInfos.size();
4789 for(int j=0; j < sectionCount; ++j) {
4790 SectionInfo* curSection = sectionInfos[j];
4791 //fprintf(stderr, "buildObjectFileFixups(): starting section %s\n", curSection->fSectionName);
4792 std::vector<ObjectFile::Atom*>& sectionAtoms = curSection->fAtoms;
4793 if ( ! curSection->fAllZeroFill ) {
4794 if ( curSection->fAllNonLazyPointers || curSection->fAllLazyPointers
4795 || curSection->fAllLazyDylibPointers || curSection->fAllStubs )
4796 curSection->fIndirectSymbolOffset = fIndirectTableAtom->fTable.size();
4797 curSection->fRelocOffset = relocIndex;
4798 const int atomCount = sectionAtoms.size();
4799 for (int k=0; k < atomCount; ++k) {
4800 ObjectFile::Atom* atom = sectionAtoms[k];
4801 //fprintf(stderr, "buildObjectFileFixups(): atom %s has %lu references\n", atom->getDisplayName(), atom->getReferences().size());
4802 std::vector<ObjectFile::Reference*>& refs = atom->getReferences();
4803 const int refCount = refs.size();
4804 for (int l=0; l < refCount; ++l) {
4805 ObjectFile::Reference* ref = refs[l];
4806 if ( curSection->fAllNonLazyPointers || curSection->fAllLazyPointers
4807 || curSection->fAllLazyDylibPointers || curSection->fAllStubs ) {
4808 uint32_t offsetInSection = atom->getSectionOffset();
4809 uint32_t indexInSection = offsetInSection / atom->getSize();
4810 uint32_t undefinedSymbolIndex;
4811 if ( curSection->fAllStubs ) {
4812 ObjectFile::Atom& stubTarget =ref->getTarget();
4813 ObjectFile::Atom& stubTargetTarget = stubTarget.getReferences()[0]->getTarget();
4814 undefinedSymbolIndex = this->symbolIndex(stubTargetTarget);
4815 //fprintf(stderr, "stub %s ==> %s ==> %s ==> index:%u\n", atom->getDisplayName(), stubTarget.getDisplayName(), stubTargetTarget.getDisplayName(), undefinedSymbolIndex);
4816 }
4817 else if ( curSection->fAllNonLazyPointers) {
4818 // only use INDIRECT_SYMBOL_LOCAL in non-lazy-pointers for atoms that won't be in symbol table or have an addend
4819 if ( this->indirectSymbolInRelocatableIsLocal(ref) )
4820 undefinedSymbolIndex = INDIRECT_SYMBOL_LOCAL;
4821 else
4822 undefinedSymbolIndex = this->symbolIndex(ref->getTarget());
4823 }
4824 else {
4825 // should never get here, fAllLazyPointers not used in generated .o files
4826 undefinedSymbolIndex = INDIRECT_SYMBOL_LOCAL;
4827 }
4828 uint32_t indirectTableIndex = indexInSection + curSection->fIndirectSymbolOffset;
4829 IndirectEntry entry = { indirectTableIndex, undefinedSymbolIndex };
4830 //printf("fIndirectTableAtom->fTable.add(sectionIndex=%u, indirectTableIndex=%u => %u), size=%lld\n", indexInSection, indirectTableIndex, undefinedSymbolIndex, atom->getSize());
4831 fIndirectTableAtom->fTable.push_back(entry);
4832 if ( curSection->fAllLazyPointers ) {
4833 ObjectFile::Atom& target = ref->getTarget();
4834 ObjectFile::Atom& fromTarget = ref->getFromTarget();
4835 if ( &fromTarget == NULL ) {
4836 warning("lazy pointer %s missing initial binding", atom->getDisplayName());
4837 }
4838 else {
4839 bool isExtern = ( ((target.getDefinitionKind() == ObjectFile::Atom::kExternalDefinition)
4840 || (target.getDefinitionKind() == ObjectFile::Atom::kExternalWeakDefinition))
4841 && (target.getSymbolTableInclusion() != ObjectFile::Atom::kSymbolTableNotIn) );
4842 macho_relocation_info<P> reloc1;
4843 reloc1.set_r_address(atom->getSectionOffset());
4844 reloc1.set_r_symbolnum(isExtern ? this->symbolIndex(target) : target.getSection()->getIndex());
4845 reloc1.set_r_pcrel(false);
4846 reloc1.set_r_length();
4847 reloc1.set_r_extern(isExtern);
4848 reloc1.set_r_type(GENERIC_RELOC_VANILLA);
4849 fSectionRelocs.push_back(reloc1);
4850 ++relocIndex;
4851 }
4852 }
4853 else if ( curSection->fAllStubs ) {
4854 relocIndex += this->addObjectRelocs(atom, ref);
4855 }
4856 }
4857 else if ( (ref->getKind() != A::kNoFixUp) && (ref->getTargetBinding() != ObjectFile::Reference::kDontBind) ) {
4858 relocIndex += this->addObjectRelocs(atom, ref);
4859 }
4860 }
4861 }
4862 curSection->fRelocCount = relocIndex - curSection->fRelocOffset;
4863 }
4864 }
4865 }
4866
4867 // reverse the relocs
4868 std::reverse(fSectionRelocs.begin(), fSectionRelocs.end());
4869
4870 // now reverse section reloc offsets
4871 for(int i=0; i < segCount; ++i) {
4872 SegmentInfo* curSegment = segmentInfos[i];
4873 std::vector<SectionInfo*>& sectionInfos = curSegment->fSections;
4874 const int sectionCount = sectionInfos.size();
4875 for(int j=0; j < sectionCount; ++j) {
4876 SectionInfo* curSection = sectionInfos[j];
4877 curSection->fRelocOffset = relocIndex - curSection->fRelocOffset - curSection->fRelocCount;
4878 }
4879 }
4880
4881 }
4882
4883
4884 template <>
4885 uint64_t Writer<x86_64>::relocAddressInFinalLinkedImage(uint64_t address, const ObjectFile::Atom* atom) const
4886 {
4887 uint64_t result;
4888 if ( fOptions.outputKind() == Options::kKextBundle ) {
4889 // for x86_64 kext bundles, the r_address field in relocs
4890 // is the offset from the start address of the first segment
4891 result = address - fSegmentInfos[0]->fBaseAddress;
4892 if ( result > 0xFFFFFFFF ) {
4893 throwf("kext bundle too large: address can't fit in 31-bit r_address field in %s from %s",
4894 atom->getDisplayName(), atom->getFile()->getPath());
4895 }
4896 }
4897 else {
4898 // for x86_64, the r_address field in relocs for final linked images
4899 // is the offset from the start address of the first writable segment
4900 result = address - fFirstWritableSegment->fBaseAddress;
4901 if ( result > 0xFFFFFFFF ) {
4902 if ( strcmp(atom->getSegment().getName(), "__TEXT") == 0 )
4903 throwf("text relocs not supported for x86_64 in %s from %s",
4904 atom->getDisplayName(), atom->getFile()->getPath());
4905 else
4906 throwf("image too large: address can't fit in 32-bit r_address field in %s from %s",
4907 atom->getDisplayName(), atom->getFile()->getPath());
4908 }
4909 }
4910 return result;
4911 }
4912
4913
4914 template <>
4915 bool Writer<ppc>::illegalRelocInFinalLinkedImage(const ObjectFile::Reference& ref)
4916 {
4917 switch ( ref.getKind() ) {
4918 case ppc::kAbsLow16:
4919 case ppc::kAbsLow14:
4920 case ppc::kAbsHigh16:
4921 case ppc::kAbsHigh16AddLow:
4922 if ( fSlideable )
4923 return true;
4924 }
4925 return false;
4926 }
4927
4928
4929 template <>
4930 bool Writer<ppc64>::illegalRelocInFinalLinkedImage(const ObjectFile::Reference& ref)
4931 {
4932 switch ( ref.getKind() ) {
4933 case ppc::kAbsLow16:
4934 case ppc::kAbsLow14:
4935 case ppc::kAbsHigh16:
4936 case ppc::kAbsHigh16AddLow:
4937 if ( fSlideable )
4938 return true;
4939 }
4940 return false;
4941 }
4942
4943 template <>
4944 bool Writer<x86>::illegalRelocInFinalLinkedImage(const ObjectFile::Reference& ref)
4945 {
4946 if ( ref.getKind() == x86::kAbsolute32 ) {
4947 switch ( ref.getTarget().getDefinitionKind() ) {
4948 case ObjectFile::Atom::kTentativeDefinition:
4949 case ObjectFile::Atom::kRegularDefinition:
4950 case ObjectFile::Atom::kWeakDefinition:
4951 // illegal in dylibs/bundles, until we support TEXT relocs
4952 return fSlideable;
4953 case ObjectFile::Atom::kExternalDefinition:
4954 case ObjectFile::Atom::kExternalWeakDefinition:
4955 // illegal until we support TEXT relocs
4956 return true;
4957 case ObjectFile::Atom::kAbsoluteSymbol:
4958 // absolute symbbols only allowed in static executables
4959 return ( fOptions.outputKind() != Options::kStaticExecutable);
4960 }
4961 }
4962 return false;
4963 }
4964
4965 template <>
4966 bool Writer<x86_64>::illegalRelocInFinalLinkedImage(const ObjectFile::Reference& ref)
4967 {
4968 if ( fOptions.outputKind() == Options::kKextBundle ) {
4969 switch ( ref.getTarget().getDefinitionKind() ) {
4970 case ObjectFile::Atom::kTentativeDefinition:
4971 case ObjectFile::Atom::kRegularDefinition:
4972 case ObjectFile::Atom::kWeakDefinition:
4973 case ObjectFile::Atom::kAbsoluteSymbol:
4974 return false;
4975 case ObjectFile::Atom::kExternalDefinition:
4976 case ObjectFile::Atom::kExternalWeakDefinition:
4977 // true means we need a TEXT relocs
4978 switch ( ref.getKind() ) {
4979 case x86_64::kBranchPCRel32:
4980 case x86_64::kBranchPCRel32WeakImport:
4981 case x86_64::kPCRel32GOTLoad:
4982 case x86_64::kPCRel32GOTLoadWeakImport:
4983 case x86_64::kPCRel32GOT:
4984 case x86_64::kPCRel32GOTWeakImport:
4985 return true;
4986 }
4987 break;
4988 }
4989 }
4990 return false;
4991 }
4992
4993 template <>
4994 bool Writer<arm>::illegalRelocInFinalLinkedImage(const ObjectFile::Reference& ref)
4995 {
4996 switch ( fOptions.outputKind()) {
4997 case Options::kStaticExecutable:
4998 case Options::kPreload:
4999 // all relocations allowed in static executables
5000 return false;
5001 default:
5002 break;
5003 }
5004 if ( ref.getKind() == arm::kReadOnlyPointer ) {
5005 switch ( ref.getTarget().getDefinitionKind() ) {
5006 case ObjectFile::Atom::kTentativeDefinition:
5007 case ObjectFile::Atom::kRegularDefinition:
5008 case ObjectFile::Atom::kWeakDefinition:
5009 // illegal in dylibs/bundles, until we support TEXT relocs
5010 return fSlideable;
5011 case ObjectFile::Atom::kExternalDefinition:
5012 case ObjectFile::Atom::kExternalWeakDefinition:
5013 // illegal until we support TEXT relocs
5014 return true;
5015 case ObjectFile::Atom::kAbsoluteSymbol:
5016 // absolute symbbols only allowed in static executables
5017 return true;
5018 }
5019 }
5020 return false;
5021 }
5022
5023 template <>
5024 bool Writer<x86>::generatesLocalTextReloc(const ObjectFile::Reference& ref, const ObjectFile::Atom& atom, SectionInfo* atomSection)
5025 {
5026 if ( ref.getKind() == x86::kAbsolute32 ) {
5027 switch ( ref.getTarget().getDefinitionKind() ) {
5028 case ObjectFile::Atom::kTentativeDefinition:
5029 case ObjectFile::Atom::kRegularDefinition:
5030 case ObjectFile::Atom::kWeakDefinition:
5031 // a reference to the absolute address of something in this same linkage unit can be
5032 // encoded as a local text reloc in a dylib or bundle
5033 if ( fSlideable ) {
5034 macho_relocation_info<P> reloc;
5035 SectionInfo* sectInfo = (SectionInfo*)(ref.getTarget().getSection());
5036 reloc.set_r_address(this->relocAddressInFinalLinkedImage(atom.getAddress() + ref.getFixUpOffset(), &atom));
5037 reloc.set_r_symbolnum(sectInfo->getIndex());
5038 reloc.set_r_pcrel(false);
5039 reloc.set_r_length();
5040 reloc.set_r_extern(false);
5041 reloc.set_r_type(GENERIC_RELOC_VANILLA);
5042 fInternalRelocs.push_back(reloc);
5043 atomSection->fHasTextLocalRelocs = true;
5044 if ( fOptions.makeCompressedDyldInfo() ) {
5045 fRebaseInfo.push_back(RebaseInfo(REBASE_TYPE_TEXT_ABSOLUTE32, atom.getAddress() + ref.getFixUpOffset()));
5046 }
5047 return true;
5048 }
5049 return false;
5050 case ObjectFile::Atom::kExternalDefinition:
5051 case ObjectFile::Atom::kExternalWeakDefinition:
5052 case ObjectFile::Atom::kAbsoluteSymbol:
5053 return false;
5054 }
5055 }
5056 return false;
5057 }
5058
5059 template <>
5060 bool Writer<ppc>::generatesLocalTextReloc(const ObjectFile::Reference& ref, const ObjectFile::Atom& atom, SectionInfo* atomSection)
5061 {
5062 macho_relocation_info<P> reloc1;
5063 macho_relocation_info<P> reloc2;
5064 switch ( ref.getTarget().getDefinitionKind() ) {
5065 case ObjectFile::Atom::kTentativeDefinition:
5066 case ObjectFile::Atom::kRegularDefinition:
5067 case ObjectFile::Atom::kWeakDefinition:
5068 switch ( ref.getKind() ) {
5069 case ppc::kAbsLow16:
5070 case ppc::kAbsLow14:
5071 // a reference to the absolute address of something in this same linkage unit can be
5072 // encoded as a local text reloc in a dylib or bundle
5073 if ( fSlideable ) {
5074 SectionInfo* sectInfo = (SectionInfo*)(ref.getTarget().getSection());
5075 uint32_t targetAddr = ref.getTarget().getAddress() + ref.getTargetOffset();
5076 reloc1.set_r_address(this->relocAddressInFinalLinkedImage(atom.getAddress() + ref.getFixUpOffset(), &atom));
5077 reloc1.set_r_symbolnum(sectInfo->getIndex());
5078 reloc1.set_r_pcrel(false);
5079 reloc1.set_r_length(2);
5080 reloc1.set_r_extern(false);
5081 reloc1.set_r_type(ref.getKind()==ppc::kAbsLow16 ? PPC_RELOC_LO16 : PPC_RELOC_LO14);
5082 reloc2.set_r_address(targetAddr >> 16);
5083 reloc2.set_r_symbolnum(0);
5084 reloc2.set_r_pcrel(false);
5085 reloc2.set_r_length(2);
5086 reloc2.set_r_extern(false);
5087 reloc2.set_r_type(PPC_RELOC_PAIR);
5088 fInternalRelocs.push_back(reloc1);
5089 fInternalRelocs.push_back(reloc2);
5090 atomSection->fHasTextLocalRelocs = true;
5091 return true;
5092 }
5093 break;
5094 case ppc::kAbsHigh16:
5095 case ppc::kAbsHigh16AddLow:
5096 if ( fSlideable ) {
5097 SectionInfo* sectInfo = (SectionInfo*)(ref.getTarget().getSection());
5098 uint32_t targetAddr = ref.getTarget().getAddress() + ref.getTargetOffset();
5099 reloc1.set_r_address(this->relocAddressInFinalLinkedImage(atom.getAddress() + ref.getFixUpOffset(), &atom));
5100 reloc1.set_r_symbolnum(sectInfo->getIndex());
5101 reloc1.set_r_pcrel(false);
5102 reloc1.set_r_length(2);
5103 reloc1.set_r_extern(false);
5104 reloc1.set_r_type(ref.getKind()==ppc::kAbsHigh16AddLow ? PPC_RELOC_HA16 : PPC_RELOC_HI16);
5105 reloc2.set_r_address(targetAddr & 0xFFFF);
5106 reloc2.set_r_symbolnum(0);
5107 reloc2.set_r_pcrel(false);
5108 reloc2.set_r_length(2);
5109 reloc2.set_r_extern(false);
5110 reloc2.set_r_type(PPC_RELOC_PAIR);
5111 fInternalRelocs.push_back(reloc1);
5112 fInternalRelocs.push_back(reloc2);
5113 atomSection->fHasTextLocalRelocs = true;
5114 return true;
5115 }
5116 }
5117 break;
5118 case ObjectFile::Atom::kExternalDefinition:
5119 case ObjectFile::Atom::kExternalWeakDefinition:
5120 case ObjectFile::Atom::kAbsoluteSymbol:
5121 return false;
5122 }
5123 return false;
5124 }
5125
5126 template <>
5127 bool Writer<arm>::generatesLocalTextReloc(const ObjectFile::Reference& ref, const ObjectFile::Atom& atom, SectionInfo* atomSection)
5128 {
5129 if ( ref.getKind() == arm::kReadOnlyPointer ) {
5130 switch ( ref.getTarget().getDefinitionKind() ) {
5131 case ObjectFile::Atom::kTentativeDefinition:
5132 case ObjectFile::Atom::kRegularDefinition:
5133 case ObjectFile::Atom::kWeakDefinition:
5134 // a reference to the absolute address of something in this same linkage unit can be
5135 // encoded as a local text reloc in a dylib or bundle
5136 if ( fSlideable ) {
5137 macho_relocation_info<P> reloc;
5138 SectionInfo* sectInfo = (SectionInfo*)(ref.getTarget().getSection());
5139 reloc.set_r_address(this->relocAddressInFinalLinkedImage(atom.getAddress() + ref.getFixUpOffset(), &atom));
5140 reloc.set_r_symbolnum(sectInfo->getIndex());
5141 reloc.set_r_pcrel(false);
5142 reloc.set_r_length();
5143 reloc.set_r_extern(false);
5144 reloc.set_r_type(GENERIC_RELOC_VANILLA);
5145 fInternalRelocs.push_back(reloc);
5146 atomSection->fHasTextLocalRelocs = true;
5147 if ( fOptions.makeCompressedDyldInfo() ) {
5148 fRebaseInfo.push_back(RebaseInfo(REBASE_TYPE_TEXT_ABSOLUTE32, atom.getAddress() + ref.getFixUpOffset()));
5149 }
5150 return true;
5151 }
5152 return false;
5153 case ObjectFile::Atom::kExternalDefinition:
5154 case ObjectFile::Atom::kExternalWeakDefinition:
5155 case ObjectFile::Atom::kAbsoluteSymbol:
5156 return false;
5157 }
5158 }
5159 return false;
5160 }
5161
5162
5163 template <>
5164 bool Writer<x86_64>::generatesLocalTextReloc(const ObjectFile::Reference&, const ObjectFile::Atom& atom, SectionInfo* curSection)
5165 {
5166 // text relocs not supported (usually never needed because of RIP addressing)
5167 return false;
5168 }
5169
5170 template <>
5171 bool Writer<ppc64>::generatesLocalTextReloc(const ObjectFile::Reference&, const ObjectFile::Atom& atom, SectionInfo* curSection)
5172 {
5173 // text relocs not supported
5174 return false;
5175 }
5176
5177 template <>
5178 bool Writer<x86>::generatesExternalTextReloc(const ObjectFile::Reference& ref, const ObjectFile::Atom& atom, SectionInfo* atomSection)
5179 {
5180 if ( ref.getKind() == x86::kAbsolute32 ) {
5181 macho_relocation_info<P> reloc;
5182 switch ( ref.getTarget().getDefinitionKind() ) {
5183 case ObjectFile::Atom::kTentativeDefinition:
5184 case ObjectFile::Atom::kRegularDefinition:
5185 case ObjectFile::Atom::kWeakDefinition:
5186 return false;
5187 case ObjectFile::Atom::kExternalDefinition:
5188 case ObjectFile::Atom::kExternalWeakDefinition:
5189 // a reference to the absolute address of something in another linkage unit can be
5190 // encoded as an external text reloc in a dylib or bundle
5191 reloc.set_r_address(this->relocAddressInFinalLinkedImage(atom.getAddress() + ref.getFixUpOffset(), &atom));
5192 reloc.set_r_symbolnum(this->symbolIndex(ref.getTarget()));
5193 reloc.set_r_pcrel(false);
5194 reloc.set_r_length();
5195 reloc.set_r_extern(true);
5196 reloc.set_r_type(GENERIC_RELOC_VANILLA);
5197 fExternalRelocs.push_back(reloc);
5198 atomSection->fHasTextExternalRelocs = true;
5199 return true;
5200 case ObjectFile::Atom::kAbsoluteSymbol:
5201 return false;
5202 }
5203 }
5204 return false;
5205 }
5206
5207 template <>
5208 bool Writer<x86_64>::generatesExternalTextReloc(const ObjectFile::Reference& ref, const ObjectFile::Atom& atom, SectionInfo* atomSection)
5209 {
5210 if ( fOptions.outputKind() == Options::kKextBundle ) {
5211 macho_relocation_info<P> reloc;
5212 switch ( ref.getTarget().getDefinitionKind() ) {
5213 case ObjectFile::Atom::kTentativeDefinition:
5214 case ObjectFile::Atom::kRegularDefinition:
5215 case ObjectFile::Atom::kWeakDefinition:
5216 case ObjectFile::Atom::kAbsoluteSymbol:
5217 return false;
5218 case ObjectFile::Atom::kExternalDefinition:
5219 case ObjectFile::Atom::kExternalWeakDefinition:
5220 switch ( ref.getKind() ) {
5221 case x86_64::kBranchPCRel32:
5222 case x86_64::kBranchPCRel32WeakImport:
5223 // a branch to something in another linkage unit is
5224 // encoded as an external text reloc in a kext bundle
5225 reloc.set_r_address(this->relocAddressInFinalLinkedImage(atom.getAddress() + ref.getFixUpOffset(), &atom));
5226 reloc.set_r_symbolnum(this->symbolIndex(ref.getTarget()));
5227 reloc.set_r_pcrel(true);
5228 reloc.set_r_length(2);
5229 reloc.set_r_extern(true);
5230 reloc.set_r_type(X86_64_RELOC_BRANCH);
5231 fExternalRelocs.push_back(reloc);
5232 atomSection->fHasTextExternalRelocs = true;
5233 return true;
5234 case x86_64::kPCRel32GOTLoad:
5235 case x86_64::kPCRel32GOTLoadWeakImport:
5236 // a load of the GOT entry for a symbol in another linkage unit is
5237 // encoded as an external text reloc in a kext bundle
5238 reloc.set_r_address(this->relocAddressInFinalLinkedImage(atom.getAddress() + ref.getFixUpOffset(), &atom));
5239 reloc.set_r_symbolnum(this->symbolIndex(ref.getTarget()));
5240 reloc.set_r_pcrel(true);
5241 reloc.set_r_length(2);
5242 reloc.set_r_extern(true);
5243 reloc.set_r_type(X86_64_RELOC_GOT_LOAD);
5244 fExternalRelocs.push_back(reloc);
5245 atomSection->fHasTextExternalRelocs = true;
5246 return true;
5247 case x86_64::kPCRel32GOT:
5248 case x86_64::kPCRel32GOTWeakImport:
5249 // a use of the GOT entry for a symbol in another linkage unit is
5250 // encoded as an external text reloc in a kext bundle
5251 reloc.set_r_address(this->relocAddressInFinalLinkedImage(atom.getAddress() + ref.getFixUpOffset(), &atom));
5252 reloc.set_r_symbolnum(this->symbolIndex(ref.getTarget()));
5253 reloc.set_r_pcrel(true);
5254 reloc.set_r_length(2);
5255 reloc.set_r_extern(true);
5256 reloc.set_r_type(X86_64_RELOC_GOT);
5257 fExternalRelocs.push_back(reloc);
5258 atomSection->fHasTextExternalRelocs = true;
5259 return true;
5260 }
5261 break;
5262 }
5263 }
5264 return false;
5265 }
5266
5267
5268 template <typename A>
5269 bool Writer<A>::generatesExternalTextReloc(const ObjectFile::Reference&, const ObjectFile::Atom& atom, SectionInfo* curSection)
5270 {
5271 return false;
5272 }
5273
5274
5275
5276
5277 template <typename A>
5278 typename Writer<A>::RelocKind Writer<A>::relocationNeededInFinalLinkedImage(const ObjectFile::Atom& target) const
5279 {
5280 switch ( target.getDefinitionKind() ) {
5281 case ObjectFile::Atom::kTentativeDefinition:
5282 case ObjectFile::Atom::kRegularDefinition:
5283 // in main executables, the only way regular symbols are indirected is if -interposable is used
5284 if ( fOptions.outputKind() == Options::kDynamicExecutable ) {
5285 if ( this->shouldExport(target) && fOptions.interposable(target.getName()) )
5286 return kRelocExternal;
5287 else if ( fSlideable )
5288 return kRelocInternal;
5289 else
5290 return kRelocNone;
5291 }
5292 // for flat-namespace or interposable two-level-namespace
5293 // all references to exported symbols get indirected
5294 else if ( this->shouldExport(target) &&
5295 ((fOptions.nameSpace() == Options::kFlatNameSpace)
5296 || (fOptions.nameSpace() == Options::kForceFlatNameSpace)
5297 || fOptions.interposable(target.getName()))
5298 && (target.getName() != NULL)
5299 && (strncmp(target.getName(), ".objc_class_", 12) != 0) ) // <rdar://problem/5254468>
5300 return kRelocExternal;
5301 else if ( fSlideable )
5302 return kRelocInternal;
5303 else
5304 return kRelocNone;
5305 case ObjectFile::Atom::kWeakDefinition:
5306 // in static executables, references to weak definitions are not indirected
5307 if ( fOptions.outputKind() == Options::kStaticExecutable)
5308 return kRelocNone;
5309 // in dynamic code, all calls to global weak definitions get indirected
5310 if ( this->shouldExport(target) )
5311 return kRelocExternal;
5312 else if ( fSlideable )
5313 return kRelocInternal;
5314 else
5315 return kRelocNone;
5316 case ObjectFile::Atom::kExternalDefinition:
5317 case ObjectFile::Atom::kExternalWeakDefinition:
5318 return kRelocExternal;
5319 case ObjectFile::Atom::kAbsoluteSymbol:
5320 return kRelocNone;
5321 }
5322 return kRelocNone;
5323 }
5324
5325 template <typename A>
5326 uint64_t Writer<A>::relocAddressInFinalLinkedImage(uint64_t address, const ObjectFile::Atom* atom) const
5327 {
5328 // for 32-bit architectures, the r_address field in relocs
5329 // for final linked images is the offset from the first segment
5330 uint64_t result = address - fSegmentInfos[0]->fBaseAddress;
5331 if ( fOptions.outputKind() == Options::kPreload ) {
5332 // kPreload uses a virtual __HEADER segment to cover the load commands
5333 result = address - fSegmentInfos[1]->fBaseAddress;
5334 }
5335 // or the offset from the first writable segment if built split-seg
5336 if ( fOptions.splitSeg() )
5337 result = address - fFirstWritableSegment->fBaseAddress;
5338 if ( result > 0x7FFFFFFF ) {
5339 throwf("image too large: address can't fit in 31-bit r_address field in %s from %s",
5340 atom->getDisplayName(), atom->getFile()->getPath());
5341 }
5342 return result;
5343 }
5344
5345 template <>
5346 uint64_t Writer<ppc64>::relocAddressInFinalLinkedImage(uint64_t address, const ObjectFile::Atom* atom) const
5347 {
5348 // for ppc64, the Mac OS X 10.4 dyld assumes r_address is always the offset from the base address.
5349 // the 10.5 dyld, iterprets the r_address as:
5350 // 1) an offset from the base address, iff there are no writable segments with a address > 4GB from base address, otherwise
5351 // 2) an offset from the base address of the first writable segment
5352 // For dyld, r_address is always the offset from the base address
5353 uint64_t result;
5354 bool badFor10_4 = false;
5355 if ( fWritableSegmentPastFirst4GB ) {
5356 if ( fOptions.macosxVersionMin() < ObjectFile::ReaderOptions::k10_5 )
5357 badFor10_4 = true;
5358 result = address - fFirstWritableSegment->fBaseAddress;
5359 if ( result > 0xFFFFFFFF ) {
5360 throwf("image too large: address can't fit in 32-bit r_address field in %s from %s",
5361 atom->getDisplayName(), atom->getFile()->getPath());
5362 }
5363 }
5364 else {
5365 result = address - fSegmentInfos[0]->fBaseAddress;
5366 if ( (fOptions.macosxVersionMin() < ObjectFile::ReaderOptions::k10_5) && (result > 0x7FFFFFFF) )
5367 badFor10_4 = true;
5368 }
5369 if ( badFor10_4 ) {
5370 throwf("image or pagezero_size too large for Mac OS X 10.4: address can't fit in 31-bit r_address field for %s from %s",
5371 atom->getDisplayName(), atom->getFile()->getPath());
5372 }
5373 return result;
5374 }
5375
5376
5377 template <> bool Writer<ppc>::preboundLazyPointerType(uint8_t* type) { *type = PPC_RELOC_PB_LA_PTR; return true; }
5378 template <> bool Writer<ppc64>::preboundLazyPointerType(uint8_t* type) { throw "prebinding not supported"; }
5379 template <> bool Writer<x86>::preboundLazyPointerType(uint8_t* type) { *type = GENERIC_RELOC_PB_LA_PTR; return true; }
5380 template <> bool Writer<x86_64>::preboundLazyPointerType(uint8_t* type) { throw "prebinding not supported"; }
5381 template <> bool Writer<arm>::preboundLazyPointerType(uint8_t* type) { *type = ARM_RELOC_PB_LA_PTR; return true; }
5382
5383 template <typename A>
5384 void Writer<A>::buildExecutableFixups()
5385 {
5386 if ( fIndirectTableAtom != NULL )
5387 fIndirectTableAtom->fTable.reserve(50); // minimize reallocations
5388 std::vector<SegmentInfo*>& segmentInfos = fSegmentInfos;
5389 const int segCount = segmentInfos.size();
5390 for(int i=0; i < segCount; ++i) {
5391 SegmentInfo* curSegment = segmentInfos[i];
5392 std::vector<SectionInfo*>& sectionInfos = curSegment->fSections;
5393 const int sectionCount = sectionInfos.size();
5394 for(int j=0; j < sectionCount; ++j) {
5395 SectionInfo* curSection = sectionInfos[j];
5396 //fprintf(stderr, "starting section %s\n", curSection->fSectionName);
5397 std::vector<ObjectFile::Atom*>& sectionAtoms = curSection->fAtoms;
5398 if ( ! curSection->fAllZeroFill ) {
5399 if ( curSection->fAllNonLazyPointers || curSection->fAllLazyPointers || curSection->fAllLazyDylibPointers
5400 || curSection->fAllStubs || curSection->fAllSelfModifyingStubs ) {
5401 if ( fIndirectTableAtom != NULL )
5402 curSection->fIndirectSymbolOffset = fIndirectTableAtom->fTable.size();
5403 }
5404 const int atomCount = sectionAtoms.size();
5405 for (int k=0; k < atomCount; ++k) {
5406 ObjectFile::Atom* atom = sectionAtoms[k];
5407 std::vector<ObjectFile::Reference*>& refs = atom->getReferences();
5408 const int refCount = refs.size();
5409 //fprintf(stderr, "atom %s has %d references in section %s, %p\n", atom->getDisplayName(), refCount, curSection->fSectionName, atom->getSection());
5410 if ( curSection->fAllNonLazyPointers && (refCount == 0) ) {
5411 // handle imageloadercache GOT slot
5412 uint32_t offsetInSection = atom->getSectionOffset();
5413 uint32_t indexInSection = offsetInSection / sizeof(pint_t);
5414 uint32_t indirectTableIndex = indexInSection + curSection->fIndirectSymbolOffset;
5415 // use INDIRECT_SYMBOL_ABS so 10.5 dyld will leave value as zero
5416 IndirectEntry entry = { indirectTableIndex, INDIRECT_SYMBOL_ABS };
5417 //fprintf(stderr,"fIndirectTableAtom->fTable.push_back(tableIndex=%d, symIndex=0x%X, section=%s)\n",
5418 // indirectTableIndex, INDIRECT_SYMBOL_LOCAL, curSection->fSectionName);
5419 fIndirectTableAtom->fTable.push_back(entry);
5420 }
5421 for (int l=0; l < refCount; ++l) {
5422 ObjectFile::Reference* ref = refs[l];
5423 if ( (fOptions.outputKind() != Options::kKextBundle) &&
5424 (curSection->fAllNonLazyPointers || curSection->fAllLazyPointers || curSection->fAllLazyDylibPointers) ) {
5425 // if atom is in (non)lazy_pointer section, this is encoded as an indirect symbol
5426 if ( atom->getSize() != sizeof(pint_t) ) {
5427 warning("wrong size pointer atom %s from file %s", atom->getDisplayName(), atom->getFile()->getPath());
5428 }
5429 ObjectFile::Atom* pointerTarget = &(ref->getTarget());
5430 if ( curSection->fAllLazyPointers || curSection->fAllLazyDylibPointers ) {
5431 pointerTarget = ((LazyPointerAtom<A>*)atom)->getTarget();
5432 }
5433 uint32_t offsetInSection = atom->getSectionOffset();
5434 uint32_t indexInSection = offsetInSection / sizeof(pint_t);
5435 uint32_t undefinedSymbolIndex = INDIRECT_SYMBOL_LOCAL;
5436 if (atom == fFastStubGOTAtom)
5437 undefinedSymbolIndex = INDIRECT_SYMBOL_ABS;
5438 else if ( this->relocationNeededInFinalLinkedImage(*pointerTarget) == kRelocExternal )
5439 undefinedSymbolIndex = this->symbolIndex(*pointerTarget);
5440 uint32_t indirectTableIndex = indexInSection + curSection->fIndirectSymbolOffset;
5441 IndirectEntry entry = { indirectTableIndex, undefinedSymbolIndex };
5442 //fprintf(stderr,"fIndirectTableAtom->fTable.push_back(tableIndex=%d, symIndex=0x%X, section=%s)\n",
5443 // indirectTableIndex, undefinedSymbolIndex, curSection->fSectionName);
5444 fIndirectTableAtom->fTable.push_back(entry);
5445 if ( curSection->fAllLazyPointers || curSection->fAllLazyDylibPointers ) {
5446 uint8_t preboundLazyType;
5447 if ( fOptions.prebind() && (fDyldClassicHelperAtom != NULL)
5448 && curSection->fAllLazyPointers && preboundLazyPointerType(&preboundLazyType) ) {
5449 // this is a prebound image, need special relocs for dyld to reset lazy pointers if prebinding is invalid
5450 macho_scattered_relocation_info<P> pblaReloc;
5451 pblaReloc.set_r_scattered(true);
5452 pblaReloc.set_r_pcrel(false);
5453 pblaReloc.set_r_length();
5454 pblaReloc.set_r_type(preboundLazyType);
5455 pblaReloc.set_r_address(relocAddressInFinalLinkedImage(atom->getAddress(), atom));
5456 pblaReloc.set_r_value(fDyldClassicHelperAtom->getAddress());
5457 fInternalRelocs.push_back(*((macho_relocation_info<P>*)&pblaReloc));
5458 }
5459 else if ( fSlideable ) {
5460 // this is a non-prebound dylib/bundle, need vanilla internal relocation to fix up binding handler if image slides
5461 macho_relocation_info<P> dyldHelperReloc;
5462 uint32_t sectionNum = 1;
5463 if ( fDyldClassicHelperAtom != NULL )
5464 sectionNum = ((SectionInfo*)(fDyldClassicHelperAtom->getSection()))->getIndex();
5465 //fprintf(stderr, "lazy pointer reloc, section index=%u, section name=%s\n", sectionNum, curSection->fSectionName);
5466 dyldHelperReloc.set_r_address(relocAddressInFinalLinkedImage(atom->getAddress(), atom));
5467 dyldHelperReloc.set_r_symbolnum(sectionNum);
5468 dyldHelperReloc.set_r_pcrel(false);
5469 dyldHelperReloc.set_r_length();
5470 dyldHelperReloc.set_r_extern(false);
5471 dyldHelperReloc.set_r_type(GENERIC_RELOC_VANILLA);
5472 fInternalRelocs.push_back(dyldHelperReloc);
5473 if ( fOptions.makeCompressedDyldInfo() ) {
5474 fRebaseInfo.push_back(RebaseInfo(REBASE_TYPE_POINTER,atom->getAddress()));
5475 }
5476 }
5477 if ( fOptions.makeCompressedDyldInfo() ) {
5478 uint8_t type = BIND_TYPE_POINTER;
5479 uint64_t addresss = atom->getAddress() + ref->getFixUpOffset();
5480 if ( pointerTarget->getDefinitionKind() == ObjectFile::Atom::kExternalWeakDefinition ) {
5481 // This is a referece to a weak def in some dylib (e.g. operator new)
5482 // need to bind into to directly bind this
5483 // later weak binding info may override
5484 int ordinal = compressedOrdinalForImortedAtom(pointerTarget);
5485 fBindingInfo.push_back(BindingInfo(type, ordinal, pointerTarget->getName(), false, addresss, 0));
5486 }
5487 if ( targetRequiresWeakBinding(*pointerTarget) ) {
5488 // note: lazy pointers to weak symbols are not bound lazily
5489 fWeakBindingInfo.push_back(BindingInfo(type, pointerTarget->getName(), false, addresss, 0));
5490 }
5491 }
5492 }
5493 if ( curSection->fAllNonLazyPointers && fOptions.makeCompressedDyldInfo() ) {
5494 if ( pointerTarget != NULL ) {
5495 switch ( this->relocationNeededInFinalLinkedImage(*pointerTarget) ) {
5496 case kRelocNone:
5497 // no rebase or binding info needed
5498 break;
5499 case kRelocInternal:
5500 // a non-lazy pointer that has been optimized to LOCAL needs rebasing info
5501 // but not the magic fFastStubGOTAtom atom
5502 if (atom != fFastStubGOTAtom)
5503 fRebaseInfo.push_back(RebaseInfo(REBASE_TYPE_POINTER,atom->getAddress()));
5504 break;
5505 case kRelocExternal:
5506 {
5507 uint8_t type = BIND_TYPE_POINTER;
5508 uint64_t addresss = atom->getAddress();
5509 if ( targetRequiresWeakBinding(ref->getTarget()) ) {
5510 fWeakBindingInfo.push_back(BindingInfo(type, ref->getTarget().getName(), false, addresss, 0));
5511 // if this is a non-lazy pointer to a weak definition within this linkage unit
5512 // the pointer needs to initially point within linkage unit and have
5513 // rebase command to slide it.
5514 if ( ref->getTarget().getDefinitionKind() == ObjectFile::Atom::kWeakDefinition ) {
5515 // unless if this is a hybrid format, in which case the non-lazy pointer
5516 // is zero on disk. So use a bind instead of a rebase to set initial value
5517 if ( fOptions.makeClassicDyldInfo() )
5518 fBindingInfo.push_back(BindingInfo(type, BIND_SPECIAL_DYLIB_SELF, ref->getTarget().getName(), false, addresss, 0));
5519 else
5520 fRebaseInfo.push_back(RebaseInfo(REBASE_TYPE_POINTER,atom->getAddress()));
5521 }
5522 // if this is a non-lazy pointer to a weak definition in a dylib,
5523 // the pointer needs to initially bind to the dylib
5524 else if ( ref->getTarget().getDefinitionKind() == ObjectFile::Atom::kExternalWeakDefinition ) {
5525 int ordinal = compressedOrdinalForImortedAtom(pointerTarget);
5526 fBindingInfo.push_back(BindingInfo(BIND_TYPE_POINTER, ordinal, pointerTarget->getName(), false, addresss, 0));
5527 }
5528 }
5529 else {
5530 int ordinal = compressedOrdinalForImortedAtom(pointerTarget);
5531 bool weak_import = fWeakImportMap[pointerTarget];
5532 fBindingInfo.push_back(BindingInfo(type, ordinal, ref->getTarget().getName(), weak_import, addresss, 0));
5533 }
5534 }
5535 }
5536 }
5537 }
5538 }
5539 else if ( (ref->getKind() == A::kPointer) || (ref->getKind() == A::kPointerWeakImport) ) {
5540 if ( fSlideable && ((curSegment->fInitProtection & VM_PROT_WRITE) == 0) ) {
5541 if ( fOptions.allowTextRelocs() ) {
5542 if ( fOptions.warnAboutTextRelocs() )
5543 warning("text reloc in %s to %s", atom->getDisplayName(), ref->getTargetName());
5544 }
5545 else {
5546 throwf("pointer in read-only segment not allowed in slidable image, used in %s from %s",
5547 atom->getDisplayName(), atom->getFile()->getPath());
5548 }
5549 }
5550 switch ( this->relocationNeededInFinalLinkedImage(ref->getTarget()) ) {
5551 case kRelocNone:
5552 // no reloc needed
5553 break;
5554 case kRelocInternal:
5555 {
5556 macho_relocation_info<P> internalReloc;
5557 SectionInfo* sectInfo = (SectionInfo*)ref->getTarget().getSection();
5558 uint32_t sectionNum = sectInfo->getIndex();
5559 // special case _mh_dylib_header and friends which are not in any real section
5560 if ( (sectionNum ==0) && sectInfo->fVirtualSection && (strcmp(sectInfo->fSectionName, "._mach_header") == 0) )
5561 sectionNum = 1;
5562 internalReloc.set_r_address(this->relocAddressInFinalLinkedImage(atom->getAddress() + ref->getFixUpOffset(), atom));
5563 internalReloc.set_r_symbolnum(sectionNum);
5564 internalReloc.set_r_pcrel(false);
5565 internalReloc.set_r_length();
5566 internalReloc.set_r_extern(false);
5567 internalReloc.set_r_type(GENERIC_RELOC_VANILLA);
5568 fInternalRelocs.push_back(internalReloc);
5569 if ( fOptions.makeCompressedDyldInfo() ) {
5570 fRebaseInfo.push_back(RebaseInfo(REBASE_TYPE_POINTER, atom->getAddress() + ref->getFixUpOffset()));
5571 }
5572 }
5573 break;
5574 case kRelocExternal:
5575 {
5576 macho_relocation_info<P> externalReloc;
5577 externalReloc.set_r_address(this->relocAddressInFinalLinkedImage(atom->getAddress() + ref->getFixUpOffset(), atom));
5578 externalReloc.set_r_symbolnum(this->symbolIndex(ref->getTarget()));
5579 externalReloc.set_r_pcrel(false);
5580 externalReloc.set_r_length();
5581 externalReloc.set_r_extern(true);
5582 externalReloc.set_r_type(GENERIC_RELOC_VANILLA);
5583 fExternalRelocs.push_back(externalReloc);
5584 if ( fOptions.makeCompressedDyldInfo() ) {
5585 int64_t addend = ref->getTargetOffset();
5586 uint64_t addresss = atom->getAddress() + ref->getFixUpOffset();
5587 if ( !fOptions.makeClassicDyldInfo() ) {
5588 if ( ref->getTarget().getDefinitionKind() == ObjectFile::Atom::kWeakDefinition ) {
5589 // pointers to internal weak defs need a rebase
5590 fRebaseInfo.push_back(RebaseInfo(REBASE_TYPE_POINTER, addresss));
5591 }
5592 }
5593 uint8_t type = BIND_TYPE_POINTER;
5594 if ( targetRequiresWeakBinding(ref->getTarget()) ) {
5595 fWeakBindingInfo.push_back(BindingInfo(type, ref->getTarget().getName(), false, addresss, addend));
5596 if ( fOptions.makeClassicDyldInfo() && (ref->getTarget().getDefinitionKind() == ObjectFile::Atom::kWeakDefinition) ) {
5597 // hybrid linkedit puts addend in data, so we need bind phase to reset pointer to local definifion
5598 fBindingInfo.push_back(BindingInfo(type, BIND_SPECIAL_DYLIB_SELF, ref->getTarget().getName(), false, addresss, addend));
5599 }
5600 // if this is a pointer to a weak definition in a dylib,
5601 // the pointer needs to initially bind to the dylib
5602 else if ( ref->getTarget().getDefinitionKind() == ObjectFile::Atom::kExternalWeakDefinition ) {
5603 int ordinal = compressedOrdinalForImortedAtom(&ref->getTarget());
5604 fBindingInfo.push_back(BindingInfo(BIND_TYPE_POINTER, ordinal, ref->getTarget().getName(), false, addresss, addend));
5605 }
5606 }
5607 else {
5608 int ordinal = compressedOrdinalForImortedAtom(&ref->getTarget());
5609 bool weak_import = fWeakImportMap[&(ref->getTarget())];
5610 fBindingInfo.push_back(BindingInfo(type, ordinal, ref->getTarget().getName(), weak_import, addresss, addend));
5611 }
5612 }
5613 }
5614 break;
5615 }
5616 }
5617 else if ( this->illegalRelocInFinalLinkedImage(*ref) ) {
5618 // new x86 stubs always require text relocs
5619 if ( curSection->fAllStubs || curSection->fAllStubHelpers ) {
5620 if ( this->generatesLocalTextReloc(*ref, *atom, curSection) ) {
5621 // relocs added to fInternalRelocs
5622 }
5623 }
5624 else if ( fOptions.allowTextRelocs() && !atom->getSegment().isContentWritable() ) {
5625 if ( fOptions.warnAboutTextRelocs() )
5626 warning("text reloc in %s to %s", atom->getDisplayName(), ref->getTargetName());
5627 if ( this->generatesLocalTextReloc(*ref, *atom, curSection) ) {
5628 // relocs added to fInternalRelocs
5629 }
5630 else if ( this->generatesExternalTextReloc(*ref, *atom, curSection) ) {
5631 // relocs added to fExternalRelocs
5632 }
5633 else {
5634 throwf("relocation used in %s from %s not allowed in slidable image", atom->getDisplayName(), atom->getFile()->getPath());
5635 }
5636 }
5637 else {
5638 throwf("absolute addressing (perhaps -mdynamic-no-pic) used in %s from %s not allowed in slidable image. "
5639 "Use '-read_only_relocs suppress' to enable text relocs", atom->getDisplayName(), atom->getFile()->getPath());
5640 }
5641 }
5642 }
5643 if ( curSection->fAllSelfModifyingStubs || curSection->fAllStubs ) {
5644 ObjectFile::Atom* stubTarget = ((StubAtom<A>*)atom)->getTarget();
5645 uint32_t undefinedSymbolIndex = (stubTarget != NULL) ? this->symbolIndex(*stubTarget) : INDIRECT_SYMBOL_ABS;
5646 uint32_t offsetInSection = atom->getSectionOffset();
5647 uint32_t indexInSection = offsetInSection / atom->getSize();
5648 uint32_t indirectTableIndex = indexInSection + curSection->fIndirectSymbolOffset;
5649 IndirectEntry entry = { indirectTableIndex, undefinedSymbolIndex };
5650 //fprintf(stderr,"for stub: fIndirectTableAtom->fTable.add(%d-%d => 0x%X-%s), size=%lld\n", indexInSection, indirectTableIndex, undefinedSymbolIndex, stubTarget->getName(), atom->getSize());
5651 fIndirectTableAtom->fTable.push_back(entry);
5652 }
5653 }
5654 }
5655 }
5656 }
5657 if ( fSplitCodeToDataContentAtom != NULL )
5658 fSplitCodeToDataContentAtom->encode();
5659 if ( fCompressedRebaseInfoAtom != NULL )
5660 fCompressedRebaseInfoAtom->encode();
5661 if ( fCompressedBindingInfoAtom != NULL )
5662 fCompressedBindingInfoAtom->encode();
5663 if ( fCompressedWeakBindingInfoAtom != NULL )
5664 fCompressedWeakBindingInfoAtom->encode();
5665 if ( fCompressedLazyBindingInfoAtom != NULL )
5666 fCompressedLazyBindingInfoAtom->encode();
5667 if ( fCompressedExportInfoAtom != NULL )
5668 fCompressedExportInfoAtom->encode();
5669 }
5670
5671
5672 template <>
5673 void Writer<ppc>::addCrossSegmentRef(const ObjectFile::Atom* atom, const ObjectFile::Reference* ref)
5674 {
5675 switch ( (ppc::ReferenceKinds)ref->getKind() ) {
5676 case ppc::kPICBaseHigh16:
5677 fSplitCodeToDataContentAtom->addPPCHi16Location(atom, ref->getFixUpOffset());
5678 break;
5679 case ppc::kPointerDiff32:
5680 fSplitCodeToDataContentAtom->add32bitPointerLocation(atom, ref->getFixUpOffset());
5681 break;
5682 case ppc::kPointerDiff64:
5683 fSplitCodeToDataContentAtom->add64bitPointerLocation(atom, ref->getFixUpOffset());
5684 break;
5685 case ppc::kNoFixUp:
5686 case ppc::kGroupSubordinate:
5687 case ppc::kPointer:
5688 case ppc::kPointerWeakImport:
5689 case ppc::kPICBaseLow16:
5690 case ppc::kPICBaseLow14:
5691 // ignore
5692 break;
5693 default:
5694 warning("codegen with reference kind %d in %s prevents image from loading in dyld shared cache", ref->getKind(), atom->getDisplayName());
5695 fSplitCodeToDataContentAtom->setCantEncode();
5696 }
5697 }
5698
5699 template <>
5700 void Writer<ppc64>::addCrossSegmentRef(const ObjectFile::Atom* atom, const ObjectFile::Reference* ref)
5701 {
5702 switch ( (ppc64::ReferenceKinds)ref->getKind() ) {
5703 case ppc64::kPICBaseHigh16:
5704 fSplitCodeToDataContentAtom->addPPCHi16Location(atom, ref->getFixUpOffset());
5705 break;
5706 case ppc64::kPointerDiff32:
5707 fSplitCodeToDataContentAtom->add32bitPointerLocation(atom, ref->getFixUpOffset());
5708 break;
5709 case ppc64::kPointerDiff64:
5710 fSplitCodeToDataContentAtom->add64bitPointerLocation(atom, ref->getFixUpOffset());
5711 break;
5712 case ppc64::kNoFixUp:
5713 case ppc64::kGroupSubordinate:
5714 case ppc64::kPointer:
5715 case ppc64::kPointerWeakImport:
5716 case ppc64::kPICBaseLow16:
5717 case ppc64::kPICBaseLow14:
5718 // ignore
5719 break;
5720 default:
5721 warning("codegen with reference kind %d in %s prevents image from loading in dyld shared cache", ref->getKind(), atom->getDisplayName());
5722 fSplitCodeToDataContentAtom->setCantEncode();
5723 }
5724 }
5725
5726 template <>
5727 void Writer<x86>::addCrossSegmentRef(const ObjectFile::Atom* atom, const ObjectFile::Reference* ref)
5728 {
5729 switch ( (x86::ReferenceKinds)ref->getKind() ) {
5730 case x86::kPointerDiff:
5731 case x86::kImageOffset32:
5732 if ( strcmp(ref->getTarget().getSegment().getName(), "__IMPORT") == 0 )
5733 fSplitCodeToDataContentAtom->add32bitImportLocation(atom, ref->getFixUpOffset());
5734 else
5735 fSplitCodeToDataContentAtom->add32bitPointerLocation(atom, ref->getFixUpOffset());
5736 break;
5737 case x86::kNoFixUp:
5738 case x86::kGroupSubordinate:
5739 case x86::kPointer:
5740 case x86::kPointerWeakImport:
5741 // ignore
5742 break;
5743 case x86::kPCRel32:
5744 case x86::kPCRel32WeakImport:
5745 if ( (&(ref->getTarget().getSegment()) == &Segment::fgImportSegment)
5746 || (&(ref->getTarget().getSegment()) == &Segment::fgROImportSegment) ) {
5747 fSplitCodeToDataContentAtom->add32bitImportLocation(atom, ref->getFixUpOffset());
5748 break;
5749 }
5750 // fall into warning case
5751 default:
5752 if ( fOptions.makeCompressedDyldInfo() && (ref->getKind() == x86::kAbsolute32) ) {
5753 // will be encoded in rebase info
5754 }
5755 else {
5756 warning("codegen in %s (offset 0x%08llX) prevents image from loading in dyld shared cache", atom->getDisplayName(), ref->getFixUpOffset());
5757 fSplitCodeToDataContentAtom->setCantEncode();
5758 }
5759 }
5760 }
5761
5762 template <>
5763 void Writer<x86_64>::addCrossSegmentRef(const ObjectFile::Atom* atom, const ObjectFile::Reference* ref)
5764 {
5765 switch ( (x86_64::ReferenceKinds)ref->getKind() ) {
5766 case x86_64::kPCRel32:
5767 case x86_64::kPCRel32_1:
5768 case x86_64::kPCRel32_2:
5769 case x86_64::kPCRel32_4:
5770 case x86_64::kPCRel32GOTLoad:
5771 case x86_64::kPCRel32GOTLoadWeakImport:
5772 case x86_64::kPCRel32GOT:
5773 case x86_64::kPCRel32GOTWeakImport:
5774 case x86_64::kPointerDiff32:
5775 case x86_64::kImageOffset32:
5776 fSplitCodeToDataContentAtom->add32bitPointerLocation(atom, ref->getFixUpOffset());
5777 break;
5778 case x86_64::kPointerDiff:
5779 fSplitCodeToDataContentAtom->add64bitPointerLocation(atom, ref->getFixUpOffset());
5780 break;
5781 case x86_64::kNoFixUp:
5782 case x86_64::kGroupSubordinate:
5783 case x86_64::kPointer:
5784 case x86_64::kGOTNoFixUp:
5785 // ignore
5786 break;
5787 default:
5788 warning("codegen in %s with kind %d prevents image from loading in dyld shared cache", atom->getDisplayName(), ref->getKind());
5789 fSplitCodeToDataContentAtom->setCantEncode();
5790 }
5791 }
5792
5793 template <>
5794 void Writer<arm>::addCrossSegmentRef(const ObjectFile::Atom* atom, const ObjectFile::Reference* ref)
5795 {
5796 switch ( (arm::ReferenceKinds)ref->getKind() ) {
5797 case arm::kPointerDiff:
5798 fSplitCodeToDataContentAtom->add32bitPointerLocation(atom, ref->getFixUpOffset());
5799 break;
5800 case arm::kNoFixUp:
5801 case arm::kGroupSubordinate:
5802 case arm::kPointer:
5803 case arm::kPointerWeakImport:
5804 case arm::kReadOnlyPointer:
5805 // ignore
5806 break;
5807 default:
5808 warning("codegen in %s prevents image from loading in dyld shared cache", atom->getDisplayName());
5809 fSplitCodeToDataContentAtom->setCantEncode();
5810 }
5811 }
5812
5813 template <typename A>
5814 bool Writer<A>::segmentsCanSplitApart(const ObjectFile::Atom& from, const ObjectFile::Atom& to)
5815 {
5816 switch ( to.getDefinitionKind() ) {
5817 case ObjectFile::Atom::kExternalDefinition:
5818 case ObjectFile::Atom::kExternalWeakDefinition:
5819 case ObjectFile::Atom::kAbsoluteSymbol:
5820 return false;
5821 case ObjectFile::Atom::kRegularDefinition:
5822 case ObjectFile::Atom::kWeakDefinition:
5823 case ObjectFile::Atom::kTentativeDefinition:
5824 // segments with same permissions slide together
5825 return ( (from.getSegment().isContentExecutable() != to.getSegment().isContentExecutable())
5826 || (from.getSegment().isContentWritable() != to.getSegment().isContentWritable()) );
5827 }
5828 throw "ld64 internal error";
5829 }
5830
5831
5832 template <>
5833 void Writer<ppc>::writeNoOps(int fd, uint32_t from, uint32_t to)
5834 {
5835 uint32_t ppcNop;
5836 OSWriteBigInt32(&ppcNop, 0, 0x60000000);
5837 for (uint32_t p=from; p < to; p += 4)
5838 ::pwrite(fd, &ppcNop, 4, p);
5839 }
5840
5841 template <>
5842 void Writer<ppc64>::writeNoOps(int fd, uint32_t from, uint32_t to)
5843 {
5844 uint32_t ppcNop;
5845 OSWriteBigInt32(&ppcNop, 0, 0x60000000);
5846 for (uint32_t p=from; p < to; p += 4)
5847 ::pwrite(fd, &ppcNop, 4, p);
5848 }
5849
5850 template <>
5851 void Writer<x86>::writeNoOps(int fd, uint32_t from, uint32_t to)
5852 {
5853 uint8_t x86Nop = 0x90;
5854 for (uint32_t p=from; p < to; ++p)
5855 ::pwrite(fd, &x86Nop, 1, p);
5856 }
5857
5858 template <>
5859 void Writer<x86_64>::writeNoOps(int fd, uint32_t from, uint32_t to)
5860 {
5861 uint8_t x86Nop = 0x90;
5862 for (uint32_t p=from; p < to; ++p)
5863 ::pwrite(fd, &x86Nop, 1, p);
5864 }
5865
5866 template <>
5867 void Writer<arm>::writeNoOps(int fd, uint32_t from, uint32_t to)
5868 {
5869 // FIXME: need thumb nop?
5870 uint32_t armNop;
5871 OSWriteLittleInt32(&armNop, 0, 0xe1a00000);
5872 for (uint32_t p=from; p < to; p += 4)
5873 ::pwrite(fd, &armNop, 4, p);
5874 }
5875
5876 template <>
5877 void Writer<ppc>::copyNoOps(uint8_t* from, uint8_t* to)
5878 {
5879 for (uint8_t* p=from; p < to; p += 4)
5880 OSWriteBigInt32((uint32_t*)p, 0, 0x60000000);
5881 }
5882
5883 template <>
5884 void Writer<ppc64>::copyNoOps(uint8_t* from, uint8_t* to)
5885 {
5886 for (uint8_t* p=from; p < to; p += 4)
5887 OSWriteBigInt32((uint32_t*)p, 0, 0x60000000);
5888 }
5889
5890 template <>
5891 void Writer<x86>::copyNoOps(uint8_t* from, uint8_t* to)
5892 {
5893 for (uint8_t* p=from; p < to; ++p)
5894 *p = 0x90;
5895 }
5896
5897 template <>
5898 void Writer<x86_64>::copyNoOps(uint8_t* from, uint8_t* to)
5899 {
5900 for (uint8_t* p=from; p < to; ++p)
5901 *p = 0x90;
5902 }
5903
5904 template <>
5905 void Writer<arm>::copyNoOps(uint8_t* from, uint8_t* to)
5906 {
5907 // fixme: need thumb nop?
5908 for (uint8_t* p=from; p < to; p += 4)
5909 OSWriteBigInt32((uint32_t*)p, 0, 0xe1a00000);
5910 }
5911
5912 static const char* stringName(const char* str)
5913 {
5914 if ( strncmp(str, "cstring=", 8) == 0) {
5915 static char buffer[1024];
5916 char* t = buffer;
5917 *t++ = '\"';
5918 for(const char*s = &str[8]; *s != '\0'; ++s) {
5919 switch(*s) {
5920 case '\n':
5921 *t++ = '\\';
5922 *t++ = 'n';
5923 break;
5924 case '\t':
5925 *t++ = '\\';
5926 *t++ = 't';
5927 break;
5928 default:
5929 *t++ = *s;
5930 break;
5931 }
5932 if ( t > &buffer[1020] ) {
5933 *t++= '\"';
5934 *t++= '.';
5935 *t++= '.';
5936 *t++= '.';
5937 *t++= '\0';
5938 return buffer;
5939 }
5940 }
5941 *t++= '\"';
5942 *t++= '\0';
5943 return buffer;
5944 }
5945 else {
5946 return str;
5947 }
5948 }
5949
5950
5951 template <> const char* Writer<ppc>::getArchString() { return "ppc"; }
5952 template <> const char* Writer<ppc64>::getArchString() { return "ppc64"; }
5953 template <> const char* Writer<x86>::getArchString() { return "i386"; }
5954 template <> const char* Writer<x86_64>::getArchString() { return "x86_64"; }
5955 template <> const char* Writer<arm>::getArchString() { return "arm"; }
5956
5957 template <typename A>
5958 void Writer<A>::writeMap()
5959 {
5960 if ( fOptions.generatedMapPath() != NULL ) {
5961 FILE* mapFile = fopen(fOptions.generatedMapPath(), "w");
5962 if ( mapFile != NULL ) {
5963 // write output path
5964 fprintf(mapFile, "# Path: %s\n", fFilePath);
5965 // write output architecure
5966 fprintf(mapFile, "# Arch: %s\n", getArchString());
5967 // write UUID
5968 if ( fUUIDAtom != NULL ) {
5969 const uint8_t* uuid = fUUIDAtom->getUUID();
5970 fprintf(mapFile, "# UUID: %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X \n",
5971 uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5], uuid[6], uuid[7],
5972 uuid[8], uuid[9], uuid[10], uuid[11], uuid[12], uuid[13], uuid[14], uuid[15]);
5973 }
5974 // write table of object files
5975 std::map<ObjectFile::Reader*, uint32_t> readerToOrdinal;
5976 std::map<uint32_t, ObjectFile::Reader*> ordinalToReader;
5977 std::map<ObjectFile::Reader*, uint32_t> readerToFileOrdinal;
5978 for (std::vector<SegmentInfo*>::iterator segit = fSegmentInfos.begin(); segit != fSegmentInfos.end(); ++segit) {
5979 std::vector<SectionInfo*>& sectionInfos = (*segit)->fSections;
5980 for (std::vector<SectionInfo*>::iterator secit = sectionInfos.begin(); secit != sectionInfos.end(); ++secit) {
5981 if ( ! (*secit)->fVirtualSection ) {
5982 std::vector<ObjectFile::Atom*>& sectionAtoms = (*secit)->fAtoms;
5983 for (std::vector<ObjectFile::Atom*>::iterator ait = sectionAtoms.begin(); ait != sectionAtoms.end(); ++ait) {
5984 ObjectFile::Reader* reader = (*ait)->getFile();
5985 uint32_t readerOrdinal = (*ait)->getOrdinal();
5986 std::map<ObjectFile::Reader*, uint32_t>::iterator pos = readerToOrdinal.find(reader);
5987 if ( pos == readerToOrdinal.end() ) {
5988 readerToOrdinal[reader] = readerOrdinal;
5989 ordinalToReader[readerOrdinal] = reader;
5990 }
5991 }
5992 }
5993 }
5994 }
5995 fprintf(mapFile, "# Object files:\n");
5996 fprintf(mapFile, "[%3u] %s\n", 0, "linker synthesized");
5997 uint32_t fileIndex = 0;
5998 readerToFileOrdinal[this] = fileIndex++;
5999 for(std::map<uint32_t, ObjectFile::Reader*>::iterator it = ordinalToReader.begin(); it != ordinalToReader.end(); ++it) {
6000 if ( it->first != 0 ) {
6001 fprintf(mapFile, "[%3u] %s\n", fileIndex, it->second->getPath());
6002 readerToFileOrdinal[it->second] = fileIndex++;
6003 }
6004 }
6005 // write table of sections
6006 fprintf(mapFile, "# Sections:\n");
6007 fprintf(mapFile, "# Address\tSize \tSegment\tSection\n");
6008 for (std::vector<SegmentInfo*>::iterator segit = fSegmentInfos.begin(); segit != fSegmentInfos.end(); ++segit) {
6009 std::vector<SectionInfo*>& sectionInfos = (*segit)->fSections;
6010 for (std::vector<SectionInfo*>::iterator secit = sectionInfos.begin(); secit != sectionInfos.end(); ++secit) {
6011 if ( ! (*secit)->fVirtualSection ) {
6012 SectionInfo* sect = *secit;
6013 fprintf(mapFile, "0x%08llX\t0x%08llX\t%s\t%s\n", sect->getBaseAddress(), sect->fSize,
6014 (*segit)->fName, sect->fSectionName);
6015 }
6016 }
6017 }
6018 // write table of symbols
6019 fprintf(mapFile, "# Symbols:\n");
6020 fprintf(mapFile, "# Address\tSize \tFile Name\n");
6021 for (std::vector<SegmentInfo*>::iterator segit = fSegmentInfos.begin(); segit != fSegmentInfos.end(); ++segit) {
6022 std::vector<SectionInfo*>& sectionInfos = (*segit)->fSections;
6023 for (std::vector<SectionInfo*>::iterator secit = sectionInfos.begin(); secit != sectionInfos.end(); ++secit) {
6024 if ( ! (*secit)->fVirtualSection ) {
6025 std::vector<ObjectFile::Atom*>& sectionAtoms = (*secit)->fAtoms;
6026 bool isCstring = (strcmp((*secit)->fSectionName, "__cstring") == 0);
6027 for (std::vector<ObjectFile::Atom*>::iterator ait = sectionAtoms.begin(); ait != sectionAtoms.end(); ++ait) {
6028 ObjectFile::Atom* atom = *ait;
6029 fprintf(mapFile, "0x%08llX\t0x%08llX\t[%3u] %s\n", atom->getAddress(), atom->getSize(),
6030 readerToFileOrdinal[atom->getFile()], isCstring ? stringName(atom->getDisplayName()): atom->getDisplayName());
6031 }
6032 }
6033 }
6034 }
6035 fclose(mapFile);
6036 }
6037 else {
6038 warning("could not write map file: %s\n", fOptions.generatedMapPath());
6039 }
6040 }
6041 }
6042
6043 static const char* sCleanupFile = NULL;
6044 static void cleanup(int sig)
6045 {
6046 ::signal(sig, SIG_DFL);
6047 if ( sCleanupFile != NULL ) {
6048 ::unlink(sCleanupFile);
6049 }
6050 if ( sig == SIGINT )
6051 ::exit(1);
6052 }
6053
6054
6055 template <typename A>
6056 uint64_t Writer<A>::writeAtoms()
6057 {
6058 // for UNIX conformance, error if file exists and is not writable
6059 if ( (access(fFilePath, F_OK) == 0) && (access(fFilePath, W_OK) == -1) )
6060 throwf("can't write output file: %s", fFilePath);
6061
6062 int permissions = 0777;
6063 if ( fOptions.outputKind() == Options::kObjectFile )
6064 permissions = 0666;
6065 // Calling unlink first assures the file is gone so that open creates it with correct permissions
6066 // It also handles the case where fFilePath file is not writable but its directory is
6067 // And it means we don't have to truncate the file when done writing (in case new is smaller than old)
6068 (void)unlink(fFilePath);
6069
6070 // try to allocate buffer for entire output file content
6071 int fd = -1;
6072 SectionInfo* lastSection = fSegmentInfos.back()->fSections.back();
6073 uint64_t fileBufferSize = (lastSection->fFileOffset + lastSection->fSize + 4095) & (-4096);
6074 uint8_t* wholeBuffer = (uint8_t*)calloc(fileBufferSize, 1);
6075 uint8_t* atomBuffer = NULL;
6076 bool streaming = false;
6077 if ( wholeBuffer == NULL ) {
6078 fd = open(fFilePath, O_CREAT | O_WRONLY | O_TRUNC, permissions);
6079 if ( fd == -1 )
6080 throwf("can't open output file for writing: %s, errno=%d", fFilePath, errno);
6081 atomBuffer = new uint8_t[(fLargestAtomSize+4095) & (-4096)];
6082 streaming = true;
6083 // install signal handlers to delete output file if program is killed
6084 sCleanupFile = fFilePath;
6085 ::signal(SIGINT, cleanup);
6086 ::signal(SIGBUS, cleanup);
6087 ::signal(SIGSEGV, cleanup);
6088 }
6089 uint32_t size = 0;
6090 uint32_t end = 0;
6091 try {
6092 for (std::vector<SegmentInfo*>::iterator segit = fSegmentInfos.begin(); segit != fSegmentInfos.end(); ++segit) {
6093 SegmentInfo* curSegment = *segit;
6094 std::vector<SectionInfo*>& sectionInfos = curSegment->fSections;
6095 for (std::vector<SectionInfo*>::iterator secit = sectionInfos.begin(); secit != sectionInfos.end(); ++secit) {
6096 SectionInfo* curSection = *secit;
6097 std::vector<ObjectFile::Atom*>& sectionAtoms = curSection->fAtoms;
6098 //printf("writing with max atom size 0x%X\n", fLargestAtomSize);
6099 //fprintf(stderr, "writing %lu atoms for section %p %s at file offset 0x%08llX\n", sectionAtoms.size(), curSection, curSection->fSectionName, curSection->fFileOffset);
6100 if ( ! curSection->fAllZeroFill ) {
6101 bool needsNops = ((strcmp(curSection->fSegmentName, "__TEXT") == 0) && (strncmp(curSection->fSectionName, "__text", 6) == 0));
6102 for (std::vector<ObjectFile::Atom*>::iterator ait = sectionAtoms.begin(); ait != sectionAtoms.end(); ++ait) {
6103 ObjectFile::Atom* atom = *ait;
6104 if ( (atom->getDefinitionKind() != ObjectFile::Atom::kExternalDefinition)
6105 && (atom->getDefinitionKind() != ObjectFile::Atom::kExternalWeakDefinition)
6106 && (atom->getDefinitionKind() != ObjectFile::Atom::kAbsoluteSymbol) ) {
6107 uint32_t fileOffset = curSection->fFileOffset + atom->getSectionOffset();
6108 if ( fileOffset != end ) {
6109 //fprintf(stderr, "writing %d pad bytes, needsNops=%d\n", fileOffset-end, needsNops);
6110 if ( needsNops ) {
6111 // fill gaps with no-ops
6112 if ( streaming )
6113 writeNoOps(fd, end, fileOffset);
6114 else
6115 copyNoOps(&wholeBuffer[end], &wholeBuffer[fileOffset]);
6116 }
6117 else if ( streaming ) {
6118 // zero fill gaps
6119 if ( (fileOffset-end) == 4 ) {
6120 uint32_t zero = 0;
6121 ::pwrite(fd, &zero, 4, end);
6122 }
6123 else {
6124 uint8_t zero = 0x00;
6125 for (uint32_t p=end; p < fileOffset; ++p)
6126 ::pwrite(fd, &zero, 1, p);
6127 }
6128 }
6129 }
6130 uint64_t atomSize = atom->getSize();
6131 if ( streaming ) {
6132 if ( atomSize > fLargestAtomSize )
6133 throwf("ld64 internal error: atom \"%s\"is larger than expected 0x%llX > 0x%X",
6134 atom->getDisplayName(), atomSize, fLargestAtomSize);
6135 }
6136 else {
6137 if ( fileOffset > fileBufferSize )
6138 throwf("ld64 internal error: atom \"%s\" has file offset greater thatn expceted 0x%X > 0x%llX",
6139 atom->getDisplayName(), fileOffset, fileBufferSize);
6140 }
6141 uint8_t* buffer = streaming ? atomBuffer : &wholeBuffer[fileOffset];
6142 end = fileOffset+atomSize;
6143 // copy raw bytes
6144 atom->copyRawContent(buffer);
6145 // apply any fix-ups
6146 try {
6147 std::vector<ObjectFile::Reference*>& references = atom->getReferences();
6148 for (std::vector<ObjectFile::Reference*>::iterator it=references.begin(); it != references.end(); it++) {
6149 ObjectFile::Reference* ref = *it;
6150 if ( fOptions.outputKind() == Options::kObjectFile ) {
6151 // doing ld -r
6152 // skip fix-ups for undefined targets
6153 if ( &(ref->getTarget()) != NULL )
6154 this->fixUpReferenceRelocatable(ref, atom, buffer);
6155 }
6156 else {
6157 // producing final linked image
6158 this->fixUpReferenceFinal(ref, atom, buffer);
6159 }
6160 }
6161 }
6162 catch (const char* msg) {
6163 throwf("%s in %s from %s", msg, atom->getDisplayName(), atom->getFile()->getPath());
6164 }
6165 //fprintf(stderr, "writing 0x%08X -> 0x%08X (addr=0x%llX, size=0x%llX), atom %p %s from %s\n",
6166 // fileOffset, end, atom->getAddress(), atom->getSize(), atom, atom->getDisplayName(), atom->getFile()->getPath());
6167 if ( streaming ) {
6168 // write out
6169 ::pwrite(fd, buffer, atomSize, fileOffset);
6170 }
6171 else {
6172 if ( (fileOffset + atomSize) > size )
6173 size = fileOffset + atomSize;
6174 }
6175 }
6176 }
6177 }
6178 }
6179 }
6180
6181 // update content based UUID
6182 if ( fOptions.getUUIDMode() == Options::kUUIDContent ) {
6183 uint8_t digest[CC_MD5_DIGEST_LENGTH];
6184 if ( streaming ) {
6185 // if output file file did not fit in memory, re-read file to generate md5 hash
6186 uint32_t kMD5BufferSize = 16*1024;
6187 uint8_t* md5Buffer = (uint8_t*)::malloc(kMD5BufferSize);
6188 if ( md5Buffer != NULL ) {
6189 CC_MD5_CTX md5State;
6190 CC_MD5_Init(&md5State);
6191 ::lseek(fd, 0, SEEK_SET);
6192 ssize_t len;
6193 while ( (len = ::read(fd, md5Buffer, kMD5BufferSize)) > 0 )
6194 CC_MD5_Update(&md5State, md5Buffer, len);
6195 CC_MD5_Final(digest, &md5State);
6196 ::free(md5Buffer);
6197 }
6198 else {
6199 // if malloc fails, fall back to random uuid
6200 ::uuid_generate_random(digest);
6201 }
6202 fUUIDAtom->setContent(digest);
6203 uint32_t uuidOffset = ((SectionInfo*)fUUIDAtom->getSection())->fFileOffset + fUUIDAtom->getSectionOffset();
6204 fUUIDAtom->copyRawContent(atomBuffer);
6205 ::pwrite(fd, atomBuffer, fUUIDAtom->getSize(), uuidOffset);
6206 }
6207 else {
6208 // if output file fit in memory, just genrate an md5 hash in memory
6209 #if 1
6210 // temp hack for building on Tiger
6211 CC_MD5_CTX md5State;
6212 CC_MD5_Init(&md5State);
6213 CC_MD5_Update(&md5State, wholeBuffer, size);
6214 CC_MD5_Final(digest, &md5State);
6215 #else
6216 CC_MD5(wholeBuffer, size, digest);
6217 #endif
6218 fUUIDAtom->setContent(digest);
6219 uint32_t uuidOffset = ((SectionInfo*)fUUIDAtom->getSection())->fFileOffset + fUUIDAtom->getSectionOffset();
6220 fUUIDAtom->copyRawContent(&wholeBuffer[uuidOffset]);
6221 }
6222 }
6223 }
6224 catch (...) {
6225 if ( sCleanupFile != NULL )
6226 ::unlink(sCleanupFile);
6227 throw;
6228 }
6229
6230 // finish up
6231 if ( streaming ) {
6232 delete [] atomBuffer;
6233 close(fd);
6234 // restore default signal handlers
6235 sCleanupFile = NULL;
6236 ::signal(SIGINT, SIG_DFL);
6237 ::signal(SIGBUS, SIG_DFL);
6238 ::signal(SIGSEGV, SIG_DFL);
6239 }
6240 else {
6241 // write whole output file in one chunk
6242 fd = open(fFilePath, O_CREAT | O_WRONLY | O_TRUNC, permissions);
6243 if ( fd == -1 )
6244 throwf("can't open output file for writing: %s, errno=%d", fFilePath, errno);
6245 ::pwrite(fd, wholeBuffer, size, 0);
6246 close(fd);
6247 delete [] wholeBuffer;
6248 }
6249
6250 return end;
6251 }
6252
6253 template <>
6254 void Writer<arm>::fixUpReferenceFinal(const ObjectFile::Reference* ref, const ObjectFile::Atom* inAtom, uint8_t buffer[]) const
6255 {
6256 int64_t displacement;
6257 int64_t baseAddr;
6258 uint32_t instruction;
6259 uint32_t newInstruction;
6260 uint64_t targetAddr = 0;
6261 uint32_t firstDisp;
6262 uint32_t nextDisp;
6263 uint32_t opcode = 0;
6264 int32_t diff;
6265 bool relocateableExternal = false;
6266 bool is_bl;
6267 bool is_blx;
6268 bool targetIsThumb;
6269
6270 if ( ref->getTargetBinding() != ObjectFile::Reference::kDontBind ) {
6271 targetAddr = ref->getTarget().getAddress() + ref->getTargetOffset();
6272 relocateableExternal = (relocationNeededInFinalLinkedImage(ref->getTarget()) == kRelocExternal);
6273 }
6274
6275 uint32_t* fixUp = (uint32_t*)&buffer[ref->getFixUpOffset()];
6276 switch ( (arm::ReferenceKinds)(ref->getKind()) ) {
6277 case arm::kNoFixUp:
6278 case arm::kFollowOn:
6279 case arm::kGroupSubordinate:
6280 // do nothing
6281 break;
6282 case arm::kPointerWeakImport:
6283 case arm::kPointer:
6284 // If this is the lazy pointers section, then set all lazy pointers to
6285 // point to the dyld stub binding helper.
6286 if ( ((SectionInfo*)inAtom->getSection())->fAllLazyPointers
6287 || ((SectionInfo*)inAtom->getSection())->fAllLazyDylibPointers ) {
6288 switch (ref->getTarget().getDefinitionKind()) {
6289 case ObjectFile::Atom::kExternalDefinition:
6290 case ObjectFile::Atom::kExternalWeakDefinition:
6291 // prebound lazy pointer to another dylib ==> pointer contains zero
6292 LittleEndian::set32(*fixUp, 0);
6293 break;
6294 case ObjectFile::Atom::kTentativeDefinition:
6295 case ObjectFile::Atom::kRegularDefinition:
6296 case ObjectFile::Atom::kWeakDefinition:
6297 case ObjectFile::Atom::kAbsoluteSymbol:
6298 // prebound lazy pointer to withing this dylib ==> pointer contains address
6299 if ( ref->getTarget().isThumb() && (ref->getTargetOffset() == 0) )
6300 targetAddr |= 1;
6301 LittleEndian::set32(*fixUp, targetAddr);
6302 break;
6303 }
6304 }
6305 else if ( relocateableExternal ) {
6306 if ( fOptions.prebind() ) {
6307 switch (ref->getTarget().getDefinitionKind()) {
6308 case ObjectFile::Atom::kExternalDefinition:
6309 case ObjectFile::Atom::kExternalWeakDefinition:
6310 // prebound external relocation ==> pointer contains addend
6311 LittleEndian::set32(*fixUp, ref->getTargetOffset());
6312 break;
6313 case ObjectFile::Atom::kTentativeDefinition:
6314 case ObjectFile::Atom::kRegularDefinition:
6315 case ObjectFile::Atom::kWeakDefinition:
6316 // prebound external relocation to internal atom ==> pointer contains target address + addend
6317 if ( ref->getTarget().isThumb() && (ref->getTargetOffset() == 0) )
6318 targetAddr |= 1;
6319 LittleEndian::set32(*fixUp, targetAddr);
6320 break;
6321 case ObjectFile::Atom::kAbsoluteSymbol:
6322 break;
6323 }
6324 }
6325 else if ( !fOptions.makeClassicDyldInfo()
6326 && (ref->getTarget().getDefinitionKind() == ObjectFile::Atom::kWeakDefinition) ) {
6327 // when using only compressed dyld info, pointer is initially set to point directly to weak definition
6328 if ( ref->getTarget().isThumb() )
6329 targetAddr |= 1;
6330 LittleEndian::set32(*fixUp, targetAddr);
6331 }
6332 else {
6333 // external relocation ==> pointer contains addend
6334 LittleEndian::set32(*fixUp, ref->getTargetOffset());
6335 }
6336 }
6337 else {
6338 // pointer contains target address
6339 if ( ref->getTarget().isThumb() && (ref->getTargetOffset() == 0))
6340 targetAddr |= 1;
6341 LittleEndian::set32(*fixUp, targetAddr);
6342 }
6343 break;
6344 case arm::kPointerDiff:
6345 diff = (ref->getTarget().getAddress() + ref->getTargetOffset()) - (ref->getFromTarget().getAddress() + ref->getFromTargetOffset());
6346 if ( ref->getTarget().isThumb() && (ref->getTargetOffset() == 0) )
6347 diff |= 1;
6348 LittleEndian::set32(*fixUp, diff);
6349 break;
6350 case arm::kReadOnlyPointer:
6351 if ( ref->getTarget().isThumb() && (ref->getTargetOffset() == 0))
6352 targetAddr |= 1;
6353 switch ( ref->getTarget().getDefinitionKind() ) {
6354 case ObjectFile::Atom::kRegularDefinition:
6355 case ObjectFile::Atom::kWeakDefinition:
6356 case ObjectFile::Atom::kTentativeDefinition:
6357 // pointer contains target address
6358 LittleEndian::set32(*fixUp, targetAddr);
6359 break;
6360 case ObjectFile::Atom::kExternalDefinition:
6361 case ObjectFile::Atom::kExternalWeakDefinition:
6362 // external relocation ==> pointer contains addend
6363 LittleEndian::set32(*fixUp, ref->getTargetOffset());
6364 break;
6365 case ObjectFile::Atom::kAbsoluteSymbol:
6366 // pointer contains target address
6367 LittleEndian::set32(*fixUp, targetAddr);
6368 break;
6369 }
6370 break;
6371 case arm::kBranch24WeakImport:
6372 case arm::kBranch24:
6373 displacement = targetAddr - (inAtom->getAddress() + ref->getFixUpOffset());
6374 // check if this is a branch to a branch island that can be skipped
6375 if ( ref->getTarget().getContentType() == ObjectFile::Atom::kBranchIsland ) {
6376 uint64_t finalTargetAddress = ((BranchIslandAtom<arm>*)(&(ref->getTarget())))->getFinalTargetAdress();
6377 int64_t altDisplacment = finalTargetAddress - (inAtom->getAddress() + ref->getFixUpOffset());
6378 if ( (altDisplacment < 33554428LL) && (altDisplacment > (-33554432LL)) ) {
6379 //fprintf(stderr, "using altDisplacment = %lld\n", altDisplacment);
6380 // yes, we can skip the branch island
6381 displacement = altDisplacment;
6382 }
6383 }
6384 // The pc added will be +8 from the pc
6385 displacement -= 8;
6386 //fprintf(stderr, "bl/blx fixup to %s at 0x%08llX, displacement = 0x%08llX\n", ref->getTarget().getDisplayName(), ref->getTarget().getAddress(), displacement);
6387 // max positive displacement is 0x007FFFFF << 2
6388 // max negative displacement is 0xFF800000 << 2
6389 if ( (displacement > 33554428LL) || (displacement < (-33554432LL)) ) {
6390 throwf("b/bl/blx out of range (%lld max is +/-32M) from 0x%08llX %s in %s to 0x%08llX %s in %s",
6391 displacement, inAtom->getAddress(), inAtom->getDisplayName(), inAtom->getFile()->getPath(),
6392 ref->getTarget().getAddress(), ref->getTarget().getDisplayName(), ref->getTarget().getFile()->getPath());
6393 }
6394 instruction = LittleEndian::get32(*fixUp);
6395 // Make sure we are calling arm with bl, thumb with blx
6396 is_bl = ((instruction & 0xFF000000) == 0xEB000000);
6397 is_blx = ((instruction & 0xFE000000) == 0xFA000000);
6398 if ( is_bl && ref->getTarget().isThumb() ) {
6399 uint32_t opcode = 0xFA000000;
6400 uint32_t disp = (uint32_t)(displacement >> 2) & 0x00FFFFFF;
6401 uint32_t h_bit = (uint32_t)(displacement << 23) & 0x01000000;
6402 newInstruction = opcode | h_bit | disp;
6403 }
6404 else if ( is_blx && !ref->getTarget().isThumb() ) {
6405 uint32_t opcode = 0xEB000000;
6406 uint32_t disp = (uint32_t)(displacement >> 2) & 0x00FFFFFF;
6407 newInstruction = opcode | disp;
6408 }
6409 else if ( !is_bl && !is_blx && ref->getTarget().isThumb() ) {
6410 throwf("don't know how to convert instruction %x referencing %s to thumb",
6411 instruction, ref->getTarget().getDisplayName());
6412 }
6413 else {
6414 newInstruction = (instruction & 0xFF000000) | ((uint32_t)(displacement >> 2) & 0x00FFFFFF);
6415 }
6416 LittleEndian::set32(*fixUp, newInstruction);
6417 break;
6418 case arm::kThumbBranch22WeakImport:
6419 case arm::kThumbBranch22:
6420 instruction = LittleEndian::get32(*fixUp);
6421 is_bl = ((instruction & 0xD000F800) == 0xD000F000);
6422 is_blx = ((instruction & 0xD000F800) == 0xC000F000);
6423 targetIsThumb = ref->getTarget().isThumb();
6424
6425 // The pc added will be +4 from the pc
6426 baseAddr = inAtom->getAddress() + ref->getFixUpOffset() + 4;
6427 // If the target is not thumb, we will be generating a blx instruction
6428 // Since blx cannot have the low bit set, set bit[1] of the target to
6429 // bit[1] of the base address, so that the difference is a multiple of
6430 // 4 bytes.
6431 if ( !targetIsThumb ) {
6432 targetAddr &= -3ULL;
6433 targetAddr |= (baseAddr & 2LL);
6434 }
6435 displacement = targetAddr - baseAddr;
6436
6437 // max positive displacement is 0x003FFFFE
6438 // max negative displacement is 0xFFC00000
6439 if ( (displacement > 4194302LL) || (displacement < (-4194304LL)) ) {
6440 // armv7 supports a larger displacement
6441 if ( fOptions.preferSubArchitecture() && fOptions.subArchitecture() == CPU_SUBTYPE_ARM_V7 ) {
6442 if ( (displacement > 16777214) || (displacement < (-16777216LL)) ) {
6443 throwf("thumb bl/blx out of range (%lld max is +/-16M) from %s in %s to %s in %s",
6444 displacement, inAtom->getDisplayName(), inAtom->getFile()->getPath(),
6445 ref->getTarget().getDisplayName(), ref->getTarget().getFile()->getPath());
6446 }
6447 else {
6448 // The instruction is really two instructions:
6449 // The lower 16 bits are the first instruction, which contains the high
6450 // 11 bits of the displacement.
6451 // The upper 16 bits are the second instruction, which contains the low
6452 // 11 bits of the displacement, as well as differentiating bl and blx.
6453 uint32_t s = (uint32_t)(displacement >> 24) & 0x1;
6454 uint32_t i1 = (uint32_t)(displacement >> 23) & 0x1;
6455 uint32_t i2 = (uint32_t)(displacement >> 22) & 0x1;
6456 uint32_t imm10 = (uint32_t)(displacement >> 12) & 0x3FF;
6457 uint32_t imm11 = (uint32_t)(displacement >> 1) & 0x7FF;
6458 uint32_t j1 = (i1 == s);
6459 uint32_t j2 = (i2 == s);
6460 if ( is_bl ) {
6461 if ( targetIsThumb )
6462 opcode = 0xD000F000; // keep bl
6463 else
6464 opcode = 0xC000F000; // change to blx
6465 }
6466 else if ( is_blx ) {
6467 if ( targetIsThumb )
6468 opcode = 0xD000F000; // change to bl
6469 else
6470 opcode = 0xC000F000; // keep blx
6471 }
6472 else if ( !is_bl && !is_blx && !targetIsThumb ) {
6473 throwf("don't know how to convert instruction %x referencing %s to arm",
6474 instruction, ref->getTarget().getDisplayName());
6475 }
6476 nextDisp = (j1 << 13) | (j2 << 11) | imm11;
6477 firstDisp = (s << 10) | imm10;
6478 newInstruction = opcode | (nextDisp << 16) | firstDisp;
6479 //warning("s=%d, j1=%d, j2=%d, imm10=0x%0X, imm11=0x%0X, opcode=0x%08X, first=0x%04X, next=0x%04X, new=0x%08X, disp=0x%llX for %s to %s\n",
6480 // s, j1, j2, imm10, imm11, opcode, firstDisp, nextDisp, newInstruction, displacement, inAtom->getDisplayName(), ref->getTarget().getDisplayName());
6481 LittleEndian::set32(*fixUp, newInstruction);
6482 }
6483 }
6484 else {
6485 throwf("thumb bl/blx out of range (%lld max is +/-4M) from %s in %s to %s in %s",
6486 displacement, inAtom->getDisplayName(), inAtom->getFile()->getPath(),
6487 ref->getTarget().getDisplayName(), ref->getTarget().getFile()->getPath());
6488 }
6489 }
6490 else {
6491 // The instruction is really two instructions:
6492 // The lower 16 bits are the first instruction, which contains the high
6493 // 11 bits of the displacement.
6494 // The upper 16 bits are the second instruction, which contains the low
6495 // 11 bits of the displacement, as well as differentiating bl and blx.
6496 firstDisp = (uint32_t)(displacement >> 12) & 0x7FF;
6497 nextDisp = (uint32_t)(displacement >> 1) & 0x7FF;
6498 if ( is_bl && !targetIsThumb ) {
6499 opcode = 0xE800F000;
6500 }
6501 else if ( is_blx && targetIsThumb ) {
6502 opcode = 0xF800F000;
6503 }
6504 else if ( !is_bl && !is_blx && !targetIsThumb ) {
6505 throwf("don't know how to convert instruction %x referencing %s to arm",
6506 instruction, ref->getTarget().getDisplayName());
6507 }
6508 else {
6509 opcode = instruction & 0xF800F800;
6510 }
6511 newInstruction = opcode | (nextDisp << 16) | firstDisp;
6512 LittleEndian::set32(*fixUp, newInstruction);
6513 }
6514 break;
6515 case arm::kDtraceProbeSite:
6516 if ( inAtom->isThumb() ) {
6517 // change 32-bit blx call site to two thumb NOPs
6518 LittleEndian::set32(*fixUp, 0x46C046C0);
6519 }
6520 else {
6521 // change call site to a NOP
6522 LittleEndian::set32(*fixUp, 0xE1A00000);
6523 }
6524 break;
6525 case arm::kDtraceIsEnabledSite:
6526 if ( inAtom->isThumb() ) {
6527 // change 32-bit blx call site to 'nop', 'eor r0, r0'
6528 LittleEndian::set32(*fixUp, 0x46C04040);
6529 }
6530 else {
6531 // change call site to 'eor r0, r0, r0'
6532 LittleEndian::set32(*fixUp, 0xE0200000);
6533 }
6534 break;
6535 case arm::kDtraceTypeReference:
6536 case arm::kDtraceProbe:
6537 // nothing to fix up
6538 break;
6539 case arm::kPointerDiff12:
6540 displacement = (ref->getTarget().getAddress() + ref->getTargetOffset()) - (ref->getFromTarget().getAddress() + ref->getFromTargetOffset());
6541 if ( (displacement > 4092LL) || (displacement <-4092LL) ) {
6542 throwf("ldr 12-bit displacement out of range (%lld max +/-4096) in %s", displacement, inAtom->getDisplayName());
6543 }
6544 instruction = LittleEndian::get32(*fixUp);
6545 if ( displacement >= 0 ) {
6546 instruction &= 0xFFFFF000;
6547 instruction |= ((uint32_t)displacement & 0xFFF);
6548 }
6549 else {
6550 instruction &= 0xFF7FF000;
6551 instruction |= ((uint32_t)(-displacement) & 0xFFF);
6552 }
6553 LittleEndian::set32(*fixUp, instruction);
6554 break;
6555 }
6556 }
6557
6558 template <>
6559 void Writer<arm>::fixUpReferenceRelocatable(const ObjectFile::Reference* ref, const ObjectFile::Atom* inAtom, uint8_t buffer[]) const
6560 {
6561 int64_t displacement;
6562 uint32_t instruction;
6563 uint32_t newInstruction;
6564 uint64_t targetAddr = 0;
6565 int64_t baseAddr;
6566 uint32_t firstDisp;
6567 uint32_t nextDisp;
6568 uint32_t opcode = 0;
6569 int32_t diff;
6570 bool relocateableExternal = false;
6571 bool is_bl;
6572 bool is_blx;
6573 bool targetIsThumb;
6574
6575 if ( ref->getTargetBinding() != ObjectFile::Reference::kDontBind ) {
6576 targetAddr = ref->getTarget().getAddress() + ref->getTargetOffset();
6577 relocateableExternal = this->makesExternalRelocatableReference(ref->getTarget());
6578 }
6579
6580 uint32_t* fixUp = (uint32_t*)&buffer[ref->getFixUpOffset()];
6581 switch ( (arm::ReferenceKinds)(ref->getKind()) ) {
6582 case arm::kNoFixUp:
6583 case arm::kFollowOn:
6584 case arm::kGroupSubordinate:
6585 // do nothing
6586 break;
6587 case arm::kPointer:
6588 case arm::kReadOnlyPointer:
6589 case arm::kPointerWeakImport:
6590 if ( ((SectionInfo*)inAtom->getSection())->fAllNonLazyPointers ) {
6591 // indirect symbol table has INDIRECT_SYMBOL_LOCAL, so we must put address in content
6592 if ( this->indirectSymbolInRelocatableIsLocal(ref) )
6593 LittleEndian::set32(*fixUp, targetAddr);
6594 else
6595 LittleEndian::set32(*fixUp, 0);
6596 }
6597 else if ( relocateableExternal ) {
6598 if ( fOptions.prebind() ) {
6599 switch (ref->getTarget().getDefinitionKind()) {
6600 case ObjectFile::Atom::kExternalDefinition:
6601 case ObjectFile::Atom::kExternalWeakDefinition:
6602 // prebound external relocation ==> pointer contains addend
6603 LittleEndian::set32(*fixUp, ref->getTargetOffset());
6604 break;
6605 case ObjectFile::Atom::kTentativeDefinition:
6606 case ObjectFile::Atom::kRegularDefinition:
6607 case ObjectFile::Atom::kWeakDefinition:
6608 // prebound external relocation to internal atom ==> pointer contains target address + addend
6609 LittleEndian::set32(*fixUp, targetAddr);
6610 break;
6611 case ObjectFile::Atom::kAbsoluteSymbol:
6612 break;
6613 }
6614 }
6615 }
6616 else {
6617 // internal relocation => pointer contains target address
6618 if ( ref->getTarget().isThumb() && (ref->getTargetOffset() == 0) )
6619 targetAddr |= 1;
6620 LittleEndian::set32(*fixUp, targetAddr);
6621 }
6622 break;
6623 case arm::kPointerDiff:
6624 diff = (ref->getTarget().getAddress() + ref->getTargetOffset()) - (ref->getFromTarget().getAddress() + ref->getFromTargetOffset());
6625 if ( ref->getTarget().isThumb() && (ref->getTargetOffset() == 0) )
6626 diff |= 1;
6627 LittleEndian::set32(*fixUp, diff);
6628 break;
6629 case arm::kDtraceProbeSite:
6630 case arm::kDtraceIsEnabledSite:
6631 case arm::kBranch24WeakImport:
6632 case arm::kBranch24:
6633 displacement = targetAddr - (inAtom->getAddress() + ref->getFixUpOffset());
6634 // The pc added will be +8 from the pc
6635 displacement -= 8;
6636 // fprintf(stderr, "b/bl/blx fixup to %s at 0x%08llX, displacement = 0x%08llX\n", ref->getTarget().getDisplayName(), ref->getTarget().getAddress(), displacement);
6637 if ( relocateableExternal ) {
6638 // doing "ld -r" to an external symbol
6639 // the mach-o way of encoding this is that the bl instruction's target addr is the offset into the target
6640 displacement -= ref->getTarget().getAddress();
6641 }
6642 else {
6643 // max positive displacement is 0x007FFFFF << 2
6644 // max negative displacement is 0xFF800000 << 2
6645 if ( (displacement > 33554428LL) || (displacement < (-33554432LL)) ) {
6646 throwf("arm b/bl/blx out of range (%lld max is +/-32M) from %s in %s to %s in %s",
6647 displacement, inAtom->getDisplayName(), inAtom->getFile()->getPath(),
6648 ref->getTarget().getDisplayName(), ref->getTarget().getFile()->getPath());
6649 }
6650 }
6651 instruction = LittleEndian::get32(*fixUp);
6652 // Make sure we are calling arm with bl, thumb with blx
6653 is_bl = ((instruction & 0xFF000000) == 0xEB000000);
6654 is_blx = ((instruction & 0xFE000000) == 0xFA000000);
6655 if ( is_bl && ref->getTarget().isThumb() ) {
6656 uint32_t opcode = 0xFA000000;
6657 uint32_t disp = (uint32_t)(displacement >> 2) & 0x00FFFFFF;
6658 uint32_t h_bit = (uint32_t)(displacement << 23) & 0x01000000;
6659 newInstruction = opcode | h_bit | disp;
6660 }
6661 else if ( is_blx && !ref->getTarget().isThumb() ) {
6662 uint32_t opcode = 0xEB000000;
6663 uint32_t disp = (uint32_t)(displacement >> 2) & 0x00FFFFFF;
6664 newInstruction = opcode | disp;
6665 }
6666 else if ( !is_bl && !is_blx && ref->getTarget().isThumb() ) {
6667 throwf("don't know how to convert instruction %x referencing %s to thumb",
6668 instruction, ref->getTarget().getDisplayName());
6669 }
6670 else {
6671 newInstruction = (instruction & 0xFF000000) | ((uint32_t)(displacement >> 2) & 0x00FFFFFF);
6672 }
6673 LittleEndian::set32(*fixUp, newInstruction);
6674 break;
6675 case arm::kThumbBranch22WeakImport:
6676 case arm::kThumbBranch22:
6677 instruction = LittleEndian::get32(*fixUp);
6678 is_bl = ((instruction & 0xF8000000) == 0xF8000000);
6679 is_blx = ((instruction & 0xF8000000) == 0xE8000000);
6680 targetIsThumb = ref->getTarget().isThumb();
6681
6682 // The pc added will be +4 from the pc
6683 baseAddr = inAtom->getAddress() + ref->getFixUpOffset() + 4;
6684 // If the target is not thumb, we will be generating a blx instruction
6685 // Since blx cannot have the low bit set, set bit[1] of the target to
6686 // bit[1] of the base address, so that the difference is a multiple of
6687 // 4 bytes.
6688 if (!targetIsThumb) {
6689 targetAddr &= -3ULL;
6690 targetAddr |= (baseAddr & 2LL);
6691 }
6692 displacement = targetAddr - baseAddr;
6693
6694 //fprintf(stderr, "thumb %s fixup to %s at 0x%08llX, baseAddr = 0x%08llX, displacement = 0x%08llX, %d\n", is_blx ? "blx" : "bl", ref->getTarget().getDisplayName(), targetAddr, baseAddr, displacement, targetIsThumb);
6695 if ( relocateableExternal ) {
6696 // doing "ld -r" to an external symbol
6697 // the mach-o way of encoding this is that the bl instruction's target addr is the offset into the target
6698 displacement -= ref->getTarget().getAddress();
6699 }
6700
6701 if ( (displacement > 4194302LL) || (displacement < (-4194304LL)) ) {
6702 // armv7 supports a larger displacement
6703 if ( fOptions.preferSubArchitecture() && fOptions.subArchitecture() == CPU_SUBTYPE_ARM_V7 ) {
6704 if ( (displacement > 16777214) || (displacement < (-16777216LL)) ) {
6705 throwf("thumb bl/blx out of range (%lld max is +/-16M) from %s in %s to %s in %s",
6706 displacement, inAtom->getDisplayName(), inAtom->getFile()->getPath(),
6707 ref->getTarget().getDisplayName(), ref->getTarget().getFile()->getPath());
6708 }
6709 else {
6710 // The instruction is really two instructions:
6711 // The lower 16 bits are the first instruction, which contains the high
6712 // 11 bits of the displacement.
6713 // The upper 16 bits are the second instruction, which contains the low
6714 // 11 bits of the displacement, as well as differentiating bl and blx.
6715 uint32_t s = (uint32_t)(displacement >> 24) & 0x1;
6716 uint32_t i1 = (uint32_t)(displacement >> 23) & 0x1;
6717 uint32_t i2 = (uint32_t)(displacement >> 22) & 0x1;
6718 uint32_t imm10 = (uint32_t)(displacement >> 12) & 0x3FF;
6719 uint32_t imm11 = (uint32_t)(displacement >> 1) & 0x7FF;
6720 uint32_t j1 = (i1 == s);
6721 uint32_t j2 = (i2 == s);
6722 if ( is_bl ) {
6723 if ( targetIsThumb )
6724 opcode = 0xD000F000; // keep bl
6725 else
6726 opcode = 0xC000F000; // change to blx
6727 }
6728 else if ( is_blx ) {
6729 if ( targetIsThumb )
6730 opcode = 0xD000F000; // change to bl
6731 else
6732 opcode = 0xC000F000; // keep blx
6733 }
6734 else if ( !is_bl && !is_blx && !targetIsThumb ) {
6735 throwf("don't know how to convert instruction %x referencing %s to arm",
6736 instruction, ref->getTarget().getDisplayName());
6737 }
6738 nextDisp = (j1 << 13) | (j2 << 11) | imm11;
6739 firstDisp = (s << 10) | imm10;
6740 newInstruction = opcode | (nextDisp << 16) | firstDisp;
6741 //warning("s=%d, j1=%d, j2=%d, imm10=0x%0X, imm11=0x%0X, opcode=0x%08X, first=0x%04X, next=0x%04X, new=0x%08X, disp=0x%llX for %s to %s\n",
6742 // s, j1, j2, imm10, imm11, opcode, firstDisp, nextDisp, newInstruction, displacement, inAtom->getDisplayName(), ref->getTarget().getDisplayName());
6743 LittleEndian::set32(*fixUp, newInstruction);
6744 break;
6745 }
6746 }
6747 else {
6748 throwf("thumb bl/blx out of range (%lld max is +/-4M) from %s in %s to %s in %s",
6749 displacement, inAtom->getDisplayName(), inAtom->getFile()->getPath(),
6750 ref->getTarget().getDisplayName(), ref->getTarget().getFile()->getPath());
6751 }
6752 }
6753 // The instruction is really two instructions:
6754 // The lower 16 bits are the first instruction, which contains the first
6755 // 11 bits of the displacement.
6756 // The upper 16 bits are the second instruction, which contains the next
6757 // 11 bits of the displacement, as well as differentiating bl and blx.
6758 firstDisp = (uint32_t)(displacement >> 12) & 0x7FF;
6759 nextDisp = (uint32_t)(displacement >> 1) & 0x7FF;
6760 if ( is_bl && !targetIsThumb ) {
6761 opcode = 0xE800F000;
6762 }
6763 else if ( is_blx && targetIsThumb ) {
6764 opcode = 0xF800F000;
6765 }
6766 else if ( !is_bl && !is_blx && !targetIsThumb ) {
6767 throwf("don't know how to convert instruction %x referencing %s to arm",
6768 instruction, ref->getTarget().getDisplayName());
6769 }
6770 else {
6771 opcode = instruction & 0xF800F800;
6772 }
6773 newInstruction = opcode | (nextDisp << 16) | firstDisp;
6774 LittleEndian::set32(*fixUp, newInstruction);
6775 break;
6776 case arm::kDtraceProbe:
6777 case arm::kDtraceTypeReference:
6778 // nothing to fix up
6779 break;
6780 case arm::kPointerDiff12:
6781 throw "internal error. no reloc for 12-bit pointer diffs";
6782 }
6783 }
6784
6785 template <>
6786 void Writer<x86>::fixUpReferenceFinal(const ObjectFile::Reference* ref, const ObjectFile::Atom* inAtom, uint8_t buffer[]) const
6787 {
6788 uint32_t* fixUp = (uint32_t*)&buffer[ref->getFixUpOffset()];
6789 uint8_t* dtraceProbeSite;
6790 const int64_t kTwoGigLimit = 0x7FFFFFFF;
6791 const int64_t kSixteenMegLimit = 0x00FFFFFF;
6792 const int64_t kSixtyFourKiloLimit = 0x7FFF;
6793 const int64_t kOneTwentyEightLimit = 0x7F;
6794 int64_t displacement;
6795 uint32_t temp;
6796 x86::ReferenceKinds kind = (x86::ReferenceKinds)(ref->getKind());
6797 switch ( kind ) {
6798 case x86::kNoFixUp:
6799 case x86::kFollowOn:
6800 case x86::kGroupSubordinate:
6801 // do nothing
6802 break;
6803 case x86::kPointerWeakImport:
6804 case x86::kPointer:
6805 {
6806 if ( this->relocationNeededInFinalLinkedImage(ref->getTarget()) == kRelocExternal ) {
6807 if ( fOptions.prebind() ) {
6808 switch (ref->getTarget().getDefinitionKind()) {
6809 case ObjectFile::Atom::kExternalDefinition:
6810 case ObjectFile::Atom::kExternalWeakDefinition:
6811 // prebound external relocation ==> pointer contains addend
6812 LittleEndian::set32(*fixUp, ref->getTargetOffset());
6813 break;
6814 case ObjectFile::Atom::kTentativeDefinition:
6815 case ObjectFile::Atom::kRegularDefinition:
6816 case ObjectFile::Atom::kWeakDefinition:
6817 // prebound external relocation to internal atom ==> pointer contains target address + addend
6818 LittleEndian::set32(*fixUp, ref->getTarget().getAddress() + ref->getTargetOffset());
6819 break;
6820 case ObjectFile::Atom::kAbsoluteSymbol:
6821 break;
6822 }
6823 }
6824 else if ( !fOptions.makeClassicDyldInfo()
6825 && (ref->getTarget().getDefinitionKind() == ObjectFile::Atom::kWeakDefinition) ) {
6826 // when using only compressed dyld info, pointer is initially set to point directly to weak definition
6827 LittleEndian::set32(*fixUp, ref->getTarget().getAddress() + ref->getTargetOffset());
6828 }
6829 else {
6830 // external relocation ==> pointer contains addend
6831 LittleEndian::set32(*fixUp, ref->getTargetOffset());
6832 }
6833 }
6834 else {
6835 // pointer contains target address
6836 //printf("Atom::fixUpReferenceFinal() target.name=%s, target.address=0x%08llX\n", target.getDisplayName(), target.getAddress());
6837 LittleEndian::set32(*fixUp, ref->getTarget().getAddress() + ref->getTargetOffset());
6838 }
6839 }
6840 break;
6841 case x86::kPointerDiff:
6842 displacement = (ref->getTarget().getAddress() + ref->getTargetOffset()) - (ref->getFromTarget().getAddress() + ref->getFromTargetOffset());
6843 LittleEndian::set32(*fixUp, (uint32_t)displacement);
6844 break;
6845 case x86::kPointerDiff16:
6846 displacement = (ref->getTarget().getAddress() + ref->getTargetOffset()) - (ref->getFromTarget().getAddress() + ref->getFromTargetOffset());
6847 if ( (displacement > kSixtyFourKiloLimit) || (displacement < -(kSixtyFourKiloLimit)) )
6848 throwf("16-bit pointer diff out of range in %s", inAtom->getDisplayName());
6849 LittleEndian::set16(*((uint16_t*)fixUp), (uint16_t)displacement);
6850 break;
6851 case x86::kPointerDiff24:
6852 displacement = (ref->getTarget().getAddress() + ref->getTargetOffset()) - (ref->getFromTarget().getAddress() + ref->getFromTargetOffset());
6853 if ( (displacement > kSixteenMegLimit) || (displacement < 0) )
6854 throwf("24-bit pointer diff out of range in %s", inAtom->getDisplayName());
6855 temp = LittleEndian::get32(*fixUp);
6856 temp &= 0xFF000000;
6857 temp |= (displacement & 0x00FFFFFF);
6858 LittleEndian::set32(*fixUp, temp);
6859 break;
6860 case x86::kSectionOffset24:
6861 displacement = ref->getTarget().getSectionOffset();
6862 if ( (displacement > kSixteenMegLimit) || (displacement < 0) )
6863 throwf("24-bit pointer diff out of range in %s", inAtom->getDisplayName());
6864 temp = LittleEndian::get32(*fixUp);
6865 temp &= 0xFF000000;
6866 temp |= (displacement & 0x00FFFFFF);
6867 LittleEndian::set32(*fixUp, temp);
6868 break;
6869 case x86::kDtraceProbeSite:
6870 // change call site to a NOP
6871 dtraceProbeSite = (uint8_t*)fixUp;
6872 dtraceProbeSite[-1] = 0x90; // 1-byte nop
6873 dtraceProbeSite[0] = 0x0F; // 4-byte nop
6874 dtraceProbeSite[1] = 0x1F;
6875 dtraceProbeSite[2] = 0x40;
6876 dtraceProbeSite[3] = 0x00;
6877 break;
6878 case x86::kDtraceIsEnabledSite:
6879 // change call site to a clear eax
6880 dtraceProbeSite = (uint8_t*)fixUp;
6881 dtraceProbeSite[-1] = 0x33; // xorl eax,eax
6882 dtraceProbeSite[0] = 0xC0;
6883 dtraceProbeSite[1] = 0x90; // 1-byte nop
6884 dtraceProbeSite[2] = 0x90; // 1-byte nop
6885 dtraceProbeSite[3] = 0x90; // 1-byte nop
6886 break;
6887 case x86::kPCRel32WeakImport:
6888 case x86::kPCRel32:
6889 case x86::kPCRel16:
6890 case x86::kPCRel8:
6891 displacement = 0;
6892 switch ( ref->getTarget().getDefinitionKind() ) {
6893 case ObjectFile::Atom::kRegularDefinition:
6894 case ObjectFile::Atom::kWeakDefinition:
6895 displacement = (ref->getTarget().getAddress() + ref->getTargetOffset()) - (inAtom->getAddress() + ref->getFixUpOffset() + 4);
6896 break;
6897 case ObjectFile::Atom::kExternalDefinition:
6898 case ObjectFile::Atom::kExternalWeakDefinition:
6899 throw "codegen problem, can't use rel32 to external symbol";
6900 case ObjectFile::Atom::kTentativeDefinition:
6901 displacement = 0;
6902 break;
6903 case ObjectFile::Atom::kAbsoluteSymbol:
6904 displacement = (ref->getTarget().getSectionOffset() + ref->getTargetOffset()) - (inAtom->getAddress() + ref->getFixUpOffset() + 4);
6905 break;
6906 }
6907 if ( kind == x86::kPCRel8 ) {
6908 displacement += 3;
6909 if ( (displacement > kOneTwentyEightLimit) || (displacement < -(kOneTwentyEightLimit)) ) {
6910 //fprintf(stderr, "call out of range from %s in %s to %s in %s\n", this->getDisplayName(), this->getFile()->getPath(), target.getDisplayName(), target.getFile()->getPath());
6911 throwf("rel8 out of range in %s", inAtom->getDisplayName());
6912 }
6913 *(int8_t*)fixUp = (int8_t)displacement;
6914 }
6915 else if ( kind == x86::kPCRel16 ) {
6916 displacement += 2;
6917 if ( (displacement > kSixtyFourKiloLimit) || (displacement < -(kSixtyFourKiloLimit)) ) {
6918 //fprintf(stderr, "call out of range from %s in %s to %s in %s\n", this->getDisplayName(), this->getFile()->getPath(), target.getDisplayName(), target.getFile()->getPath());
6919 throwf("rel16 out of range in %s", inAtom->getDisplayName());
6920 }
6921 LittleEndian::set16(*((uint16_t*)fixUp), (uint16_t)displacement);
6922 }
6923 else {
6924 if ( (displacement > kTwoGigLimit) || (displacement < (-kTwoGigLimit)) ) {
6925 //fprintf(stderr, "call out of range from %s in %s to %s in %s\n", this->getDisplayName(), this->getFile()->getPath(), target.getDisplayName(), target.getFile()->getPath());
6926 throwf("rel32 out of range in %s", inAtom->getDisplayName());
6927 }
6928 LittleEndian::set32(*fixUp, (int32_t)displacement);
6929 }
6930 break;
6931 case x86::kAbsolute32:
6932 switch ( ref->getTarget().getDefinitionKind() ) {
6933 case ObjectFile::Atom::kRegularDefinition:
6934 case ObjectFile::Atom::kWeakDefinition:
6935 case ObjectFile::Atom::kTentativeDefinition:
6936 // pointer contains target address
6937 LittleEndian::set32(*fixUp, ref->getTarget().getAddress() + ref->getTargetOffset());
6938 break;
6939 case ObjectFile::Atom::kExternalDefinition:
6940 case ObjectFile::Atom::kExternalWeakDefinition:
6941 // external relocation ==> pointer contains addend
6942 LittleEndian::set32(*fixUp, ref->getTargetOffset());
6943 break;
6944 case ObjectFile::Atom::kAbsoluteSymbol:
6945 // pointer contains target address
6946 LittleEndian::set32(*fixUp, ref->getTarget().getSectionOffset() + ref->getTargetOffset());
6947 break;
6948 }
6949 break;
6950 case x86::kImageOffset32:
6951 // offset of target atom from mach_header
6952 displacement = ref->getTarget().getAddress() + ref->getTargetOffset() - fMachHeaderAtom->getAddress();
6953 LittleEndian::set32(*fixUp, (int32_t)displacement);
6954 break;
6955 case x86::kDtraceTypeReference:
6956 case x86::kDtraceProbe:
6957 // nothing to fix up
6958 break;
6959 }
6960 }
6961
6962
6963
6964 template <>
6965 void Writer<x86>::fixUpReferenceRelocatable(const ObjectFile::Reference* ref, const ObjectFile::Atom* inAtom, uint8_t buffer[]) const
6966 {
6967 const int64_t kTwoGigLimit = 0x7FFFFFFF;
6968 const int64_t kSixtyFourKiloLimit = 0x7FFF;
6969 const int64_t kOneTwentyEightLimit = 0x7F;
6970 uint32_t* fixUp = (uint32_t*)&buffer[ref->getFixUpOffset()];
6971 bool isExtern = this->makesExternalRelocatableReference(ref->getTarget());
6972 int64_t displacement;
6973 x86::ReferenceKinds kind = (x86::ReferenceKinds)(ref->getKind());
6974 switch ( kind ) {
6975 case x86::kNoFixUp:
6976 case x86::kFollowOn:
6977 case x86::kGroupSubordinate:
6978 // do nothing
6979 break;
6980 case x86::kPointer:
6981 case x86::kPointerWeakImport:
6982 case x86::kAbsolute32:
6983 {
6984 if ( ((SectionInfo*)inAtom->getSection())->fAllNonLazyPointers ) {
6985 // if INDIRECT_SYMBOL_LOCAL the content is pointer, else it is zero
6986 if ( this->indirectSymbolInRelocatableIsLocal(ref) )
6987 LittleEndian::set32(*fixUp, ref->getTarget().getAddress() + ref->getTargetOffset());
6988 else
6989 LittleEndian::set32(*fixUp, 0);
6990 }
6991 else if ( isExtern ) {
6992 // external relocation ==> pointer contains addend
6993 LittleEndian::set32(*fixUp, ref->getTargetOffset());
6994 }
6995 else if ( ref->getTarget().getDefinitionKind() != ObjectFile::Atom::kTentativeDefinition ) {
6996 // internal relocation => pointer contains target address
6997 LittleEndian::set32(*fixUp, ref->getTarget().getAddress() + ref->getTargetOffset());
6998 }
6999 else {
7000 // internal relocation to tentative ==> pointer contains addend
7001 LittleEndian::set32(*fixUp, ref->getTargetOffset());
7002 }
7003 }
7004 break;
7005 case x86::kPointerDiff:
7006 displacement = (ref->getTarget().getAddress() + ref->getTargetOffset()) - (ref->getFromTarget().getAddress() + ref->getFromTargetOffset());
7007 LittleEndian::set32(*fixUp, (uint32_t)displacement);
7008 break;
7009 case x86::kPointerDiff16:
7010 displacement = (ref->getTarget().getAddress() + ref->getTargetOffset()) - (ref->getFromTarget().getAddress() + ref->getFromTargetOffset());
7011 if ( (displacement > kSixtyFourKiloLimit) || (displacement < -(kSixtyFourKiloLimit)) )
7012 throwf("16-bit pointer diff out of range in %s", inAtom->getDisplayName());
7013 LittleEndian::set16(*((uint16_t*)fixUp), (uint16_t)displacement);
7014 break;
7015 case x86::kPCRel8:
7016 case x86::kPCRel16:
7017 case x86::kPCRel32:
7018 case x86::kPCRel32WeakImport:
7019 case x86::kDtraceProbeSite:
7020 case x86::kDtraceIsEnabledSite:
7021 {
7022 if ( isExtern )
7023 displacement = ref->getTargetOffset() - (inAtom->getAddress() + ref->getFixUpOffset() + 4);
7024 else
7025 displacement = (ref->getTarget().getAddress() + ref->getTargetOffset()) - (inAtom->getAddress() + ref->getFixUpOffset() + 4);
7026 if ( kind == x86::kPCRel8 ) {
7027 displacement += 3;
7028 if ( (displacement > kOneTwentyEightLimit) || (displacement < -(kOneTwentyEightLimit)) ) {
7029 //fprintf(stderr, "call out of range from %s in %s to %s in %s\n", this->getDisplayName(), this->getFile()->getPath(), target.getDisplayName(), target.getFile()->getPath());
7030 throwf("rel8 out of range (%lld)in %s", displacement, inAtom->getDisplayName());
7031 }
7032 int8_t byte = (int8_t)displacement;
7033 *((int8_t*)fixUp) = byte;
7034 }
7035 else if ( kind == x86::kPCRel16 ) {
7036 displacement += 2;
7037 if ( (displacement > kSixtyFourKiloLimit) || (displacement < -(kSixtyFourKiloLimit)) ) {
7038 //fprintf(stderr, "call out of range from %s in %s to %s in %s\n", this->getDisplayName(), this->getFile()->getPath(), target.getDisplayName(), target.getFile()->getPath());
7039 throwf("rel16 out of range in %s", inAtom->getDisplayName());
7040 }
7041 int16_t word = (int16_t)displacement;
7042 LittleEndian::set16(*((uint16_t*)fixUp), word);
7043 }
7044 else {
7045 if ( (displacement > kTwoGigLimit) || (displacement < (-kTwoGigLimit)) ) {
7046 //fprintf(stderr, "call out of range, displacement=ox%llX, from %s in %s to %s in %s\n", displacement,
7047 // inAtom->getDisplayName(), inAtom->getFile()->getPath(), ref->getTarget().getDisplayName(), ref->getTarget().getFile()->getPath());
7048 throwf("rel32 out of range in %s", inAtom->getDisplayName());
7049 }
7050 LittleEndian::set32(*fixUp, (int32_t)displacement);
7051 }
7052 }
7053 break;
7054 case x86::kPointerDiff24:
7055 throw "internal linker error, kPointerDiff24 can't be encoded into object files";
7056 case x86::kImageOffset32:
7057 throw "internal linker error, kImageOffset32 can't be encoded into object files";
7058 case x86::kSectionOffset24:
7059 throw "internal linker error, kSectionOffset24 can't be encoded into object files";
7060 case x86::kDtraceProbe:
7061 case x86::kDtraceTypeReference:
7062 // nothing to fix up
7063 break;
7064 }
7065 }
7066
7067 template <>
7068 void Writer<x86_64>::fixUpReferenceFinal(const ObjectFile::Reference* ref, const ObjectFile::Atom* inAtom, uint8_t buffer[]) const
7069 {
7070 const int64_t twoGigLimit = 0x7FFFFFFF;
7071 const int64_t kSixteenMegLimit = 0x00FFFFFF;
7072 uint64_t* fixUp = (uint64_t*)&buffer[ref->getFixUpOffset()];
7073 uint8_t* dtraceProbeSite;
7074 int64_t displacement = 0;
7075 uint32_t temp;
7076 switch ( (x86_64::ReferenceKinds)(ref->getKind()) ) {
7077 case x86_64::kNoFixUp:
7078 case x86_64::kGOTNoFixUp:
7079 case x86_64::kFollowOn:
7080 case x86_64::kGroupSubordinate:
7081 // do nothing
7082 break;
7083 case x86_64::kPointerWeakImport:
7084 case x86_64::kPointer:
7085 {
7086 if ( &ref->getTarget() != NULL ) {
7087 //fprintf(stderr, "fixUpReferenceFinal: %s reference to %s\n", this->getDisplayName(), target.getDisplayName());
7088 if ( this->relocationNeededInFinalLinkedImage(ref->getTarget()) == kRelocExternal) {
7089 if ( !fOptions.makeClassicDyldInfo()
7090 && (ref->getTarget().getDefinitionKind() == ObjectFile::Atom::kWeakDefinition) ) {
7091 // when using only compressed dyld info, pointer is initially set to point directly to weak definition
7092 LittleEndian::set64(*fixUp, ref->getTarget().getAddress() + ref->getTargetOffset());
7093 }
7094 else {
7095 // external relocation ==> pointer contains addend
7096 LittleEndian::set64(*fixUp, ref->getTargetOffset());
7097 }
7098 }
7099 else {
7100 // internal relocation
7101 // pointer contains target address
7102 //printf("Atom::fixUpReferenceFinal) target.name=%s, target.address=0x%08llX\n", target.getDisplayName(), target.getAddress());
7103 LittleEndian::set64(*fixUp, ref->getTarget().getAddress() + ref->getTargetOffset());
7104 }
7105 }
7106 }
7107 break;
7108 case x86_64::kPointer32:
7109 {
7110 //fprintf(stderr, "fixUpReferenceFinal: %s reference to %s\n", this->getDisplayName(), target.getDisplayName());
7111 if ( this->relocationNeededInFinalLinkedImage(ref->getTarget()) == kRelocExternal ) {
7112 // external relocation
7113 throwf("32-bit pointer to dylib or weak symbol %s not supported for x86_64",ref->getTarget().getDisplayName());
7114 }
7115 else {
7116 // internal relocation
7117 // pointer contains target address
7118 //printf("Atom::fixUpReferenceFinal) target.name=%s, target.address=0x%08llX\n", target.getDisplayName(), target.getAddress());
7119 displacement = ref->getTarget().getAddress() + ref->getTargetOffset();
7120 switch ( fOptions.outputKind() ) {
7121 case Options::kObjectFile:
7122 case Options::kPreload:
7123 case Options::kDyld:
7124 case Options::kDynamicLibrary:
7125 case Options::kDynamicBundle:
7126 case Options::kKextBundle:
7127 throwf("32-bit pointer to symbol %s not supported for x86_64",ref->getTarget().getDisplayName());
7128 case Options::kDynamicExecutable:
7129 // <rdar://problem/5855588> allow x86_64 main executables to use 32-bit pointers if program loads in load 2GB
7130 if ( (displacement > twoGigLimit) || (displacement < (-twoGigLimit)) )
7131 throw "32-bit pointer out of range";
7132 break;
7133 case Options::kStaticExecutable:
7134 // <rdar://problem/5855588> allow x86_64 mach_kernel to truncate pointers
7135 break;
7136 }
7137 LittleEndian::set32(*((uint32_t*)fixUp), (uint32_t)displacement);
7138 }
7139 }
7140 break;
7141 case x86_64::kPointerDiff32:
7142 displacement = (ref->getTarget().getAddress() + ref->getTargetOffset()) - (ref->getFromTarget().getAddress() + ref->getFromTargetOffset());
7143 if ( (displacement > twoGigLimit) || (displacement < (-twoGigLimit)) )
7144 throw "32-bit pointer difference out of range";
7145 LittleEndian::set32(*((uint32_t*)fixUp), (uint32_t)displacement);
7146 break;
7147 case x86_64::kPointerDiff:
7148 LittleEndian::set64(*fixUp,
7149 (ref->getTarget().getAddress() + ref->getTargetOffset()) - (ref->getFromTarget().getAddress() + ref->getFromTargetOffset()) );
7150 break;
7151 case x86_64::kPointerDiff24:
7152 displacement = (ref->getTarget().getAddress() + ref->getTargetOffset()) - (ref->getFromTarget().getAddress() + ref->getFromTargetOffset());
7153 if ( (displacement > kSixteenMegLimit) || (displacement < 0) )
7154 throwf("24-bit pointer diff out of range in %s", inAtom->getDisplayName());
7155 temp = LittleEndian::get32(*((uint32_t*)fixUp));
7156 temp &= 0xFF000000;
7157 temp |= (displacement & 0x00FFFFFF);
7158 LittleEndian::set32(*((uint32_t*)fixUp), temp);
7159 break;
7160 case x86_64::kSectionOffset24:
7161 displacement = ref->getTarget().getSectionOffset();
7162 if ( (displacement > kSixteenMegLimit) || (displacement < 0) )
7163 throwf("24-bit pointer diff out of range in %s", inAtom->getDisplayName());
7164 temp = LittleEndian::get32(*((uint32_t*)fixUp));
7165 temp &= 0xFF000000;
7166 temp |= (displacement & 0x00FFFFFF);
7167 LittleEndian::set32(*((uint32_t*)fixUp), temp);
7168 break;
7169 case x86_64::kPCRel32GOTLoad:
7170 case x86_64::kPCRel32GOTLoadWeakImport:
7171 // if GOT entry was optimized away, change movq instruction to a leaq
7172 if ( std::find(fAllSynthesizedNonLazyPointers.begin(), fAllSynthesizedNonLazyPointers.end(), &(ref->getTarget())) == fAllSynthesizedNonLazyPointers.end() ) {
7173 //fprintf(stderr, "GOT for %s optimized away\n", ref->getTarget().getDisplayName());
7174 uint8_t* opcodes = (uint8_t*)fixUp;
7175 if ( opcodes[-2] != 0x8B )
7176 throw "GOT load reloc does not point to a movq instruction";
7177 opcodes[-2] = 0x8D;
7178 }
7179 // fall into general rel32 case
7180 case x86_64::kBranchPCRel32WeakImport:
7181 case x86_64::kBranchPCRel32:
7182 case x86_64::kBranchPCRel8:
7183 case x86_64::kPCRel32:
7184 case x86_64::kPCRel32_1:
7185 case x86_64::kPCRel32_2:
7186 case x86_64::kPCRel32_4:
7187 case x86_64::kPCRel32GOT:
7188 case x86_64::kPCRel32GOTWeakImport:
7189 switch ( ref->getTarget().getDefinitionKind() ) {
7190 case ObjectFile::Atom::kRegularDefinition:
7191 case ObjectFile::Atom::kWeakDefinition:
7192 case ObjectFile::Atom::kTentativeDefinition:
7193 displacement = (ref->getTarget().getAddress() + (int32_t)ref->getTargetOffset()) - (inAtom->getAddress() + ref->getFixUpOffset() + 4);
7194 break;
7195 case ObjectFile::Atom::kAbsoluteSymbol:
7196 displacement = (ref->getTarget().getSectionOffset() + (int32_t)ref->getTargetOffset()) - (inAtom->getAddress() + ref->getFixUpOffset() + 4);
7197 break;
7198 case ObjectFile::Atom::kExternalDefinition:
7199 case ObjectFile::Atom::kExternalWeakDefinition:
7200 if ( fOptions.outputKind() == Options::kKextBundle )
7201 displacement = 0;
7202 else
7203 throwf("codegen problem, can't use rel32 to external symbol %s", ref->getTarget().getDisplayName());
7204 break;
7205 }
7206 switch ( ref->getKind() ) {
7207 case x86_64::kPCRel32_1:
7208 displacement -= 1;
7209 break;
7210 case x86_64::kPCRel32_2:
7211 displacement -= 2;
7212 break;
7213 case x86_64::kPCRel32_4:
7214 displacement -= 4;
7215 break;
7216 case x86_64::kBranchPCRel8:
7217 displacement += 3;
7218 break;
7219 }
7220 if ( ref->getKind() == x86_64::kBranchPCRel8 ) {
7221 if ( (displacement > 127) || (displacement < (-128)) ) {
7222 fprintf(stderr, "branch out of range from %s (%llX) in %s to %s (%llX) in %s\n",
7223 inAtom->getDisplayName(), inAtom->getAddress(), inAtom->getFile()->getPath(), ref->getTarget().getDisplayName(), ref->getTarget().getAddress(), ref->getTarget().getFile()->getPath());
7224 throw "rel8 out of range";
7225 }
7226 *((int8_t*)fixUp) = (int8_t)displacement;
7227 }
7228 else {
7229 if ( (displacement > twoGigLimit) || (displacement < (-twoGigLimit)) ) {
7230 fprintf(stderr, "reference out of range from %s (%llX) in %s to %s (%llX) in %s\n",
7231 inAtom->getDisplayName(), inAtom->getAddress(), inAtom->getFile()->getPath(), ref->getTarget().getDisplayName(), ref->getTarget().getAddress(), ref->getTarget().getFile()->getPath());
7232 throw "rel32 out of range";
7233 }
7234 LittleEndian::set32(*((uint32_t*)fixUp), (int32_t)displacement);
7235 }
7236 break;
7237 case x86_64::kImageOffset32:
7238 // offset of target atom from mach_header
7239 displacement = ref->getTarget().getAddress() + ref->getTargetOffset() - fMachHeaderAtom->getAddress();
7240 LittleEndian::set32(*((uint32_t*)fixUp), (int32_t)displacement);
7241 break;
7242 case x86_64::kDtraceProbeSite:
7243 // change call site to a NOP
7244 dtraceProbeSite = (uint8_t*)fixUp;
7245 dtraceProbeSite[-1] = 0x90; // 1-byte nop
7246 dtraceProbeSite[0] = 0x0F; // 4-byte nop
7247 dtraceProbeSite[1] = 0x1F;
7248 dtraceProbeSite[2] = 0x40;
7249 dtraceProbeSite[3] = 0x00;
7250 break;
7251 case x86_64::kDtraceIsEnabledSite:
7252 // change call site to a clear eax
7253 dtraceProbeSite = (uint8_t*)fixUp;
7254 dtraceProbeSite[-1] = 0x48; // xorq eax,eax
7255 dtraceProbeSite[0] = 0x33;
7256 dtraceProbeSite[1] = 0xC0;
7257 dtraceProbeSite[2] = 0x90; // 1-byte nop
7258 dtraceProbeSite[3] = 0x90; // 1-byte nop
7259 break;
7260 case x86_64::kDtraceTypeReference:
7261 case x86_64::kDtraceProbe:
7262 // nothing to fix up
7263 break;
7264 }
7265 }
7266
7267 template <>
7268 void Writer<x86_64>::fixUpReferenceRelocatable(const ObjectFile::Reference* ref, const ObjectFile::Atom* inAtom, uint8_t buffer[]) const
7269 {
7270 const int64_t twoGigLimit = 0x7FFFFFFF;
7271 bool external = this->makesExternalRelocatableReference(ref->getTarget());
7272 uint64_t* fixUp = (uint64_t*)&buffer[ref->getFixUpOffset()];
7273 int64_t displacement = 0;
7274 int32_t temp32;
7275 switch ( (x86_64::ReferenceKinds)(ref->getKind()) ) {
7276 case x86_64::kNoFixUp:
7277 case x86_64::kGOTNoFixUp:
7278 case x86_64::kFollowOn:
7279 case x86_64::kGroupSubordinate:
7280 // do nothing
7281 break;
7282 case x86_64::kPointer:
7283 case x86_64::kPointerWeakImport:
7284 {
7285 if ( external ) {
7286 // external relocation ==> pointer contains addend
7287 LittleEndian::set64(*fixUp, ref->getTargetOffset());
7288 }
7289 else {
7290 // internal relocation ==> pointer contains target address
7291 LittleEndian::set64(*fixUp, ref->getTarget().getAddress() + ref->getTargetOffset());
7292 }
7293 }
7294 break;
7295 case x86_64::kPointer32:
7296 {
7297 if ( external ) {
7298 // external relocation ==> pointer contains addend
7299 LittleEndian::set32(*((uint32_t*)fixUp), ref->getTargetOffset());
7300 }
7301 else {
7302 // internal relocation ==> pointer contains target address
7303 LittleEndian::set32(*((uint32_t*)fixUp), ref->getTarget().getAddress() + ref->getTargetOffset());
7304 }
7305 }
7306 break;
7307 case x86_64::kPointerDiff32:
7308 displacement = ref->getTargetOffset() - ref->getFromTargetOffset();
7309 if ( ref->getTarget().getSymbolTableInclusion() == ObjectFile::Atom::kSymbolTableNotIn )
7310 displacement += ref->getTarget().getAddress();
7311 if ( ref->getFromTarget().getSymbolTableInclusion() == ObjectFile::Atom::kSymbolTableNotIn )
7312 displacement -= ref->getFromTarget().getAddress();
7313 LittleEndian::set32(*((uint32_t*)fixUp), displacement);
7314 break;
7315 case x86_64::kPointerDiff:
7316 displacement = ref->getTargetOffset() - ref->getFromTargetOffset();
7317 if ( ref->getTarget().getSymbolTableInclusion() == ObjectFile::Atom::kSymbolTableNotIn )
7318 displacement += ref->getTarget().getAddress();
7319 if ( ref->getFromTarget().getSymbolTableInclusion() == ObjectFile::Atom::kSymbolTableNotIn )
7320 displacement -= ref->getFromTarget().getAddress();
7321 LittleEndian::set64(*fixUp, displacement);
7322 break;
7323 case x86_64::kBranchPCRel32:
7324 case x86_64::kBranchPCRel32WeakImport:
7325 case x86_64::kDtraceProbeSite:
7326 case x86_64::kDtraceIsEnabledSite:
7327 case x86_64::kPCRel32:
7328 case x86_64::kPCRel32_1:
7329 case x86_64::kPCRel32_2:
7330 case x86_64::kPCRel32_4:
7331 // turn unsigned 64-bit target offset in signed 32-bit offset, since that is what source originally had
7332 temp32 = ref->getTargetOffset();
7333 if ( external ) {
7334 // extern relocation contains addend
7335 displacement = temp32;
7336 }
7337 else {
7338 // internal relocations contain delta to target address
7339 displacement = (ref->getTarget().getAddress() + temp32) - (inAtom->getAddress() + ref->getFixUpOffset() + 4);
7340 }
7341 switch ( ref->getKind() ) {
7342 case x86_64::kPCRel32_1:
7343 displacement -= 1;
7344 break;
7345 case x86_64::kPCRel32_2:
7346 displacement -= 2;
7347 break;
7348 case x86_64::kPCRel32_4:
7349 displacement -= 4;
7350 break;
7351 }
7352 if ( (displacement > twoGigLimit) || (displacement < (-twoGigLimit)) ) {
7353 //fprintf(stderr, "call out of range from %s in %s to %s in %s\n", this->getDisplayName(), this->getFile()->getPath(), target.getDisplayName(), target.getFile()->getPath());
7354 throw "rel32 out of range";
7355 }
7356 LittleEndian::set32(*((uint32_t*)fixUp), (int32_t)displacement);
7357 break;
7358 case x86_64::kBranchPCRel8:
7359 // turn unsigned 64-bit target offset in signed 32-bit offset, since that is what source originally had
7360 temp32 = ref->getTargetOffset();
7361 if ( external ) {
7362 // extern relocation contains addend
7363 displacement = temp32;
7364 }
7365 else {
7366 // internal relocations contain delta to target address
7367 displacement = (ref->getTarget().getAddress() + temp32) - (inAtom->getAddress() + ref->getFixUpOffset() + 1);
7368 }
7369 if ( (displacement > 127) || (displacement < (-128)) ) {
7370 //fprintf(stderr, "call out of range from %s in %s to %s in %s\n", this->getDisplayName(), this->getFile()->getPath(), target.getDisplayName(), target.getFile()->getPath());
7371 throw "rel8 out of range";
7372 }
7373 *((int8_t*)fixUp) = (int8_t)displacement;
7374 break;
7375 case x86_64::kPCRel32GOT:
7376 case x86_64::kPCRel32GOTLoad:
7377 case x86_64::kPCRel32GOTWeakImport:
7378 case x86_64::kPCRel32GOTLoadWeakImport:
7379 // contains addend (usually zero)
7380 LittleEndian::set32(*((uint32_t*)fixUp), (uint32_t)(ref->getTargetOffset()));
7381 break;
7382 case x86_64::kPointerDiff24:
7383 throw "internal linker error, kPointerDiff24 can't be encoded into object files";
7384 case x86_64::kImageOffset32:
7385 throw "internal linker error, kImageOffset32 can't be encoded into object files";
7386 case x86_64::kSectionOffset24:
7387 throw "internal linker error, kSectionOffset24 can't be encoded into object files";
7388 case x86_64::kDtraceTypeReference:
7389 case x86_64::kDtraceProbe:
7390 // nothing to fix up
7391 break;
7392 }
7393 }
7394
7395 template <>
7396 void Writer<ppc>::fixUpReferenceFinal(const ObjectFile::Reference* ref, const ObjectFile::Atom* inAtom, uint8_t buffer[]) const
7397 {
7398 fixUpReference_powerpc(ref, inAtom, buffer, true);
7399 }
7400
7401 template <>
7402 void Writer<ppc64>::fixUpReferenceFinal(const ObjectFile::Reference* ref, const ObjectFile::Atom* inAtom, uint8_t buffer[]) const
7403 {
7404 fixUpReference_powerpc(ref, inAtom, buffer, true);
7405 }
7406
7407 template <>
7408 void Writer<ppc>::fixUpReferenceRelocatable(const ObjectFile::Reference* ref, const ObjectFile::Atom* inAtom, uint8_t buffer[]) const
7409 {
7410 fixUpReference_powerpc(ref, inAtom, buffer, false);
7411 }
7412
7413 template <>
7414 void Writer<ppc64>::fixUpReferenceRelocatable(const ObjectFile::Reference* ref, const ObjectFile::Atom* inAtom, uint8_t buffer[]) const
7415 {
7416 fixUpReference_powerpc(ref, inAtom, buffer, false);
7417 }
7418
7419 //
7420 // ppc and ppc64 are mostly the same, so they share a template specialzation
7421 //
7422 template <typename A>
7423 void Writer<A>::fixUpReference_powerpc(const ObjectFile::Reference* ref, const ObjectFile::Atom* inAtom, uint8_t buffer[], bool finalLinkedImage) const
7424 {
7425 uint32_t instruction;
7426 uint32_t newInstruction;
7427 int64_t displacement;
7428 uint64_t targetAddr = 0;
7429 uint64_t picBaseAddr;
7430 uint16_t instructionLowHalf;
7431 uint16_t instructionHighHalf;
7432 uint32_t* fixUp = (uint32_t*)&buffer[ref->getFixUpOffset()];
7433 pint_t* fixUpPointer = (pint_t*)&buffer[ref->getFixUpOffset()];
7434 bool relocateableExternal = false;
7435 const int64_t picbase_twoGigLimit = 0x80000000;
7436
7437 if ( ref->getTargetBinding() != ObjectFile::Reference::kDontBind ) {
7438 targetAddr = ref->getTarget().getAddress() + ref->getTargetOffset();
7439 if ( finalLinkedImage )
7440 relocateableExternal = (relocationNeededInFinalLinkedImage(ref->getTarget()) == kRelocExternal);
7441 else
7442 relocateableExternal = this->makesExternalRelocatableReference(ref->getTarget());
7443 }
7444
7445 switch ( (typename A::ReferenceKinds)(ref->getKind()) ) {
7446 case A::kNoFixUp:
7447 case A::kFollowOn:
7448 case A::kGroupSubordinate:
7449 // do nothing
7450 break;
7451 case A::kPointerWeakImport:
7452 case A::kPointer:
7453 {
7454 //fprintf(stderr, "fixUpReferenceFinal: %s reference to %s\n", this->getDisplayName(), target.getDisplayName());
7455 if ( finalLinkedImage && (((SectionInfo*)inAtom->getSection())->fAllLazyPointers
7456 || ((SectionInfo*)inAtom->getSection())->fAllLazyDylibPointers) ) {
7457 switch (ref->getTarget().getDefinitionKind()) {
7458 case ObjectFile::Atom::kExternalDefinition:
7459 case ObjectFile::Atom::kExternalWeakDefinition:
7460 // prebound lazy pointer to another dylib ==> pointer contains zero
7461 P::setP(*fixUpPointer, 0);
7462 break;
7463 case ObjectFile::Atom::kTentativeDefinition:
7464 case ObjectFile::Atom::kRegularDefinition:
7465 case ObjectFile::Atom::kWeakDefinition:
7466 case ObjectFile::Atom::kAbsoluteSymbol:
7467 // prebound lazy pointer to withing this dylib ==> pointer contains address
7468 P::setP(*fixUpPointer, targetAddr);
7469 break;
7470 }
7471 }
7472 else if ( !finalLinkedImage && ((SectionInfo*)inAtom->getSection())->fAllNonLazyPointers ) {
7473 // if INDIRECT_SYMBOL_LOCAL the content is pointer, else it is zero
7474 if ( this->indirectSymbolInRelocatableIsLocal(ref) )
7475 P::setP(*fixUpPointer, targetAddr);
7476 else
7477 P::setP(*fixUpPointer, 0);
7478 }
7479 else if ( relocateableExternal ) {
7480 if ( fOptions.prebind() ) {
7481 switch (ref->getTarget().getDefinitionKind()) {
7482 case ObjectFile::Atom::kExternalDefinition:
7483 case ObjectFile::Atom::kExternalWeakDefinition:
7484 // prebound external relocation ==> pointer contains addend
7485 P::setP(*fixUpPointer, ref->getTargetOffset());
7486 break;
7487 case ObjectFile::Atom::kTentativeDefinition:
7488 case ObjectFile::Atom::kRegularDefinition:
7489 case ObjectFile::Atom::kWeakDefinition:
7490 // prebound external relocation to internal atom ==> pointer contains target address + addend
7491 P::setP(*fixUpPointer, targetAddr);
7492 break;
7493 case ObjectFile::Atom::kAbsoluteSymbol:
7494 break;
7495 }
7496 }
7497 else {
7498 // external relocation ==> pointer contains addend
7499 P::setP(*fixUpPointer, ref->getTargetOffset());
7500 }
7501 }
7502 else {
7503 // internal relocation
7504 if ( finalLinkedImage || (ref->getTarget().getDefinitionKind() != ObjectFile::Atom::kTentativeDefinition) ) {
7505 // pointer contains target address
7506 //printf("Atom::fixUpReference_powerpc() target.name=%s, target.address=0x%08llX\n", ref->getTarget().getDisplayName(), targetAddr);
7507 P::setP(*fixUpPointer, targetAddr);
7508 }
7509 else {
7510 // pointer contains addend
7511 P::setP(*fixUpPointer, ref->getTargetOffset());
7512 }
7513 }
7514 }
7515 break;
7516 case A::kPointerDiff64:
7517 P::setP(*fixUpPointer, targetAddr - (ref->getFromTarget().getAddress() + ref->getFromTargetOffset()) );
7518 break;
7519 case A::kPointerDiff32:
7520 P::E::set32(*fixUp, targetAddr - (ref->getFromTarget().getAddress() + ref->getFromTargetOffset()) );
7521 break;
7522 case A::kPointerDiff16:
7523 P::E::set16(*((uint16_t*)fixUp), targetAddr - (ref->getFromTarget().getAddress() + ref->getFromTargetOffset()) );
7524 break;
7525 case A::kDtraceProbeSite:
7526 if ( finalLinkedImage ) {
7527 // change call site to a NOP
7528 BigEndian::set32(*fixUp, 0x60000000);
7529 }
7530 else {
7531 // set bl instuction to branch to address zero in .o file
7532 int64_t displacement = ref->getTargetOffset() - (inAtom->getAddress() + ref->getFixUpOffset());
7533 instruction = BigEndian::get32(*fixUp);
7534 newInstruction = (instruction & 0xFC000003) | ((uint32_t)displacement & 0x03FFFFFC);
7535 BigEndian::set32(*fixUp, newInstruction);
7536 }
7537 break;
7538 case A::kDtraceIsEnabledSite:
7539 if ( finalLinkedImage ) {
7540 // change call site to a li r3,0
7541 BigEndian::set32(*fixUp, 0x38600000);
7542 }
7543 else {
7544 // set bl instuction to branch to address zero in .o file
7545 int64_t displacement = ref->getTargetOffset() - (inAtom->getAddress() + ref->getFixUpOffset());
7546 instruction = BigEndian::get32(*fixUp);
7547 newInstruction = (instruction & 0xFC000003) | ((uint32_t)displacement & 0x03FFFFFC);
7548 BigEndian::set32(*fixUp, newInstruction);
7549 }
7550 break;
7551 case A::kBranch24WeakImport:
7552 case A::kBranch24:
7553 {
7554 //fprintf(stderr, "bl fixup to %s at 0x%08llX, ", target.getDisplayName(), target.getAddress());
7555 int64_t displacement = targetAddr - (inAtom->getAddress() + ref->getFixUpOffset());
7556 if ( relocateableExternal ) {
7557 // doing "ld -r" to an external symbol
7558 // the mach-o way of encoding this is that the bl instruction's target addr is the offset into the target
7559 displacement -= ref->getTarget().getAddress();
7560 }
7561 else {
7562 const int64_t bl_eightMegLimit = 0x00FFFFFF;
7563 if ( (displacement > bl_eightMegLimit) || (displacement < (-bl_eightMegLimit)) ) {
7564 //fprintf(stderr, "bl out of range (%lld max is +/-16M) from %s in %s to %s in %s\n", displacement, this->getDisplayName(), this->getFile()->getPath(), target.getDisplayName(), target.getFile()->getPath());
7565 throwf("bl out of range (%lld max is +/-16M) from %s at 0x%08llX in %s of %s to %s at 0x%08llX in %s of %s",
7566 displacement, inAtom->getDisplayName(), inAtom->getAddress(), inAtom->getSectionName(), inAtom->getFile()->getPath(),
7567 ref->getTarget().getDisplayName(), ref->getTarget().getAddress(), ref->getTarget().getSectionName(), ref->getTarget().getFile()->getPath());
7568 }
7569 }
7570 instruction = BigEndian::get32(*fixUp);
7571 newInstruction = (instruction & 0xFC000003) | ((uint32_t)displacement & 0x03FFFFFC);
7572 //fprintf(stderr, "bl fixup: 0x%08X -> 0x%08X\n", instruction, newInstruction);
7573 BigEndian::set32(*fixUp, newInstruction);
7574 }
7575 break;
7576 case A::kBranch14:
7577 {
7578 int64_t displacement = targetAddr - (inAtom->getAddress() + ref->getFixUpOffset());
7579 if ( relocateableExternal ) {
7580 // doing "ld -r" to an external symbol
7581 // the mach-o way of encoding this is that the bl instruction's target addr is the offset into the target
7582 displacement -= ref->getTarget().getAddress();
7583 }
7584 const int64_t b_sixtyFourKiloLimit = 0x0000FFFF;
7585 if ( (displacement > b_sixtyFourKiloLimit) || (displacement < (-b_sixtyFourKiloLimit)) ) {
7586 //fprintf(stderr, "bl out of range (%lld max is +/-16M) from %s in %s to %s in %s\n", displacement, this->getDisplayName(), this->getFile()->getPath(), target.getDisplayName(), target.getFile()->getPath());
7587 throwf("bcc out of range (%lld max is +/-64K) from %s in %s to %s in %s",
7588 displacement, inAtom->getDisplayName(), inAtom->getFile()->getPath(),
7589 ref->getTarget().getDisplayName(), ref->getTarget().getFile()->getPath());
7590 }
7591
7592 //fprintf(stderr, "bcc fixup displacement=0x%08llX, atom.addr=0x%08llX, atom.offset=0x%08X\n", displacement, inAtom->getAddress(), (uint32_t)ref->getFixUpOffset());
7593 instruction = BigEndian::get32(*fixUp);
7594 newInstruction = (instruction & 0xFFFF0003) | ((uint32_t)displacement & 0x0000FFFC);
7595 //fprintf(stderr, "bc fixup: 0x%08X -> 0x%08X\n", instruction, newInstruction);
7596 BigEndian::set32(*fixUp, newInstruction);
7597 }
7598 break;
7599 case A::kPICBaseLow16:
7600 picBaseAddr = ref->getFromTarget().getAddress() + ref->getFromTargetOffset();
7601 displacement = targetAddr - picBaseAddr;
7602 if ( (displacement > picbase_twoGigLimit) || (displacement < (-picbase_twoGigLimit)) )
7603 throw "32-bit pic-base out of range";
7604 instructionLowHalf = (displacement & 0xFFFF);
7605 instruction = BigEndian::get32(*fixUp);
7606 newInstruction = (instruction & 0xFFFF0000) | instructionLowHalf;
7607 BigEndian::set32(*fixUp, newInstruction);
7608 break;
7609 case A::kPICBaseLow14:
7610 picBaseAddr = ref->getFromTarget().getAddress() + ref->getFromTargetOffset();
7611 displacement = targetAddr - picBaseAddr;
7612 if ( (displacement > picbase_twoGigLimit) || (displacement < (-picbase_twoGigLimit)) )
7613 throw "32-bit pic-base out of range";
7614 if ( (displacement & 0x3) != 0 )
7615 throwf("bad offset (0x%08X) for lo14 instruction pic-base fix-up", (uint32_t)displacement);
7616 instructionLowHalf = (displacement & 0xFFFC);
7617 instruction = BigEndian::get32(*fixUp);
7618 newInstruction = (instruction & 0xFFFF0003) | instructionLowHalf;
7619 BigEndian::set32(*fixUp, newInstruction);
7620 break;
7621 case A::kPICBaseHigh16:
7622 picBaseAddr = ref->getFromTarget().getAddress() + ref->getFromTargetOffset();
7623 displacement = targetAddr - picBaseAddr;
7624 if ( (displacement > picbase_twoGigLimit) || (displacement < (-picbase_twoGigLimit)) )
7625 throw "32-bit pic-base out of range";
7626 instructionLowHalf = displacement >> 16;
7627 if ( (displacement & 0x00008000) != 0 )
7628 ++instructionLowHalf;
7629 instruction = BigEndian::get32(*fixUp);
7630 newInstruction = (instruction & 0xFFFF0000) | instructionLowHalf;
7631 BigEndian::set32(*fixUp, newInstruction);
7632 break;
7633 case A::kAbsLow16:
7634 if ( relocateableExternal && !finalLinkedImage )
7635 targetAddr -= ref->getTarget().getAddress();
7636 instructionLowHalf = (targetAddr & 0xFFFF);
7637 instruction = BigEndian::get32(*fixUp);
7638 newInstruction = (instruction & 0xFFFF0000) | instructionLowHalf;
7639 BigEndian::set32(*fixUp, newInstruction);
7640 break;
7641 case A::kAbsLow14:
7642 if ( relocateableExternal && !finalLinkedImage )
7643 targetAddr -= ref->getTarget().getAddress();
7644 if ( (targetAddr & 0x3) != 0 )
7645 throw "bad address for absolute lo14 instruction fix-up";
7646 instructionLowHalf = (targetAddr & 0xFFFF);
7647 instruction = BigEndian::get32(*fixUp);
7648 newInstruction = (instruction & 0xFFFF0003) | instructionLowHalf;
7649 BigEndian::set32(*fixUp, newInstruction);
7650 break;
7651 case A::kAbsHigh16:
7652 if ( relocateableExternal ) {
7653 if ( finalLinkedImage ) {
7654 switch (ref->getTarget().getDefinitionKind()) {
7655 case ObjectFile::Atom::kExternalDefinition:
7656 case ObjectFile::Atom::kExternalWeakDefinition:
7657 throwf("absolute address to symbol %s in a different linkage unit not supported", ref->getTargetName());
7658 break;
7659 case ObjectFile::Atom::kTentativeDefinition:
7660 case ObjectFile::Atom::kRegularDefinition:
7661 case ObjectFile::Atom::kWeakDefinition:
7662 // use target address
7663 break;
7664 case ObjectFile::Atom::kAbsoluteSymbol:
7665 targetAddr = ref->getTarget().getSectionOffset();
7666 break;
7667 }
7668 }
7669 else {
7670 targetAddr -= ref->getTarget().getAddress();
7671 }
7672 }
7673 instructionHighHalf = (targetAddr >> 16);
7674 instruction = BigEndian::get32(*fixUp);
7675 newInstruction = (instruction & 0xFFFF0000) | instructionHighHalf;
7676 BigEndian::set32(*fixUp, newInstruction);
7677 break;
7678 case A::kAbsHigh16AddLow:
7679 if ( relocateableExternal ) {
7680 if ( finalLinkedImage ) {
7681 switch (ref->getTarget().getDefinitionKind()) {
7682 case ObjectFile::Atom::kExternalDefinition:
7683 case ObjectFile::Atom::kExternalWeakDefinition:
7684 throwf("absolute address to symbol %s in a different linkage unit not supported", ref->getTargetName());
7685 break;
7686 case ObjectFile::Atom::kTentativeDefinition:
7687 case ObjectFile::Atom::kRegularDefinition:
7688 case ObjectFile::Atom::kWeakDefinition:
7689 // use target address
7690 break;
7691 case ObjectFile::Atom::kAbsoluteSymbol:
7692 targetAddr = ref->getTarget().getSectionOffset();
7693 break;
7694 }
7695 }
7696 else {
7697 targetAddr -= ref->getTarget().getAddress();
7698 }
7699 }
7700 if ( targetAddr & 0x00008000 )
7701 targetAddr += 0x00010000;
7702 instruction = BigEndian::get32(*fixUp);
7703 newInstruction = (instruction & 0xFFFF0000) | (targetAddr >> 16);
7704 BigEndian::set32(*fixUp, newInstruction);
7705 break;
7706 case A::kDtraceTypeReference:
7707 case A::kDtraceProbe:
7708 // nothing to fix up
7709 break;
7710 }
7711 }
7712
7713 template <>
7714 bool Writer<ppc>::stubableReference(const ObjectFile::Atom* inAtom, const ObjectFile::Reference* ref)
7715 {
7716 uint8_t kind = ref->getKind();
7717 switch ( (ppc::ReferenceKinds)kind ) {
7718 case ppc::kNoFixUp:
7719 case ppc::kFollowOn:
7720 case ppc::kGroupSubordinate:
7721 case ppc::kPointer:
7722 case ppc::kPointerWeakImport:
7723 case ppc::kPointerDiff16:
7724 case ppc::kPointerDiff32:
7725 case ppc::kPointerDiff64:
7726 case ppc::kDtraceProbe:
7727 case ppc::kDtraceProbeSite:
7728 case ppc::kDtraceIsEnabledSite:
7729 case ppc::kDtraceTypeReference:
7730 // these are never used to call external functions
7731 return false;
7732 case ppc::kBranch24:
7733 case ppc::kBranch24WeakImport:
7734 case ppc::kBranch14:
7735 // these are used to call external functions
7736 return true;
7737 case ppc::kPICBaseLow16:
7738 case ppc::kPICBaseLow14:
7739 case ppc::kPICBaseHigh16:
7740 case ppc::kAbsLow16:
7741 case ppc::kAbsLow14:
7742 case ppc::kAbsHigh16:
7743 case ppc::kAbsHigh16AddLow:
7744 // these are only used to call external functions
7745 // in -mlong-branch stubs
7746 switch ( ref->getTarget().getDefinitionKind() ) {
7747 case ObjectFile::Atom::kExternalDefinition:
7748 case ObjectFile::Atom::kExternalWeakDefinition:
7749 // if the .o file this atom came from has long-branch stubs,
7750 // then assume these instructions in a stub.
7751 // Otherwise, these are a direct reference to something (maybe a runtime text reloc)
7752 return ( inAtom->getFile()->hasLongBranchStubs() );
7753 case ObjectFile::Atom::kTentativeDefinition:
7754 case ObjectFile::Atom::kRegularDefinition:
7755 case ObjectFile::Atom::kWeakDefinition:
7756 case ObjectFile::Atom::kAbsoluteSymbol:
7757 return false;
7758 }
7759 break;
7760 }
7761 return false;
7762 }
7763
7764 template <>
7765 bool Writer<arm>::stubableReference(const ObjectFile::Atom* inAtom, const ObjectFile::Reference* ref)
7766 {
7767 uint8_t kind = ref->getKind();
7768 switch ( (arm::ReferenceKinds)kind ) {
7769 case arm::kBranch24:
7770 case arm::kBranch24WeakImport:
7771 return true;
7772 case arm::kThumbBranch22:
7773 case arm::kThumbBranch22WeakImport:
7774 fHasThumbBranches = true;
7775 return true;
7776 case arm::kNoFixUp:
7777 case arm::kFollowOn:
7778 case arm::kGroupSubordinate:
7779 case arm::kPointer:
7780 case arm::kReadOnlyPointer:
7781 case arm::kPointerWeakImport:
7782 case arm::kPointerDiff:
7783 case arm::kDtraceProbe:
7784 case arm::kDtraceProbeSite:
7785 case arm::kDtraceIsEnabledSite:
7786 case arm::kDtraceTypeReference:
7787 case arm::kPointerDiff12:
7788 return false;
7789 }
7790 return false;
7791 }
7792
7793 template <>
7794 bool Writer<ppc64>::stubableReference(const ObjectFile::Atom* inAtom, const ObjectFile::Reference* ref)
7795 {
7796 uint8_t kind = ref->getKind();
7797 switch ( (ppc64::ReferenceKinds)kind ) {
7798 case ppc::kNoFixUp:
7799 case ppc::kFollowOn:
7800 case ppc::kGroupSubordinate:
7801 case ppc::kPointer:
7802 case ppc::kPointerWeakImport:
7803 case ppc::kPointerDiff16:
7804 case ppc::kPointerDiff32:
7805 case ppc::kPointerDiff64:
7806 case ppc::kPICBaseLow16:
7807 case ppc::kPICBaseLow14:
7808 case ppc::kPICBaseHigh16:
7809 case ppc::kAbsLow16:
7810 case ppc::kAbsLow14:
7811 case ppc::kAbsHigh16:
7812 case ppc::kAbsHigh16AddLow:
7813 case ppc::kDtraceProbe:
7814 case ppc::kDtraceProbeSite:
7815 case ppc::kDtraceIsEnabledSite:
7816 case ppc::kDtraceTypeReference:
7817 // these are never used to call external functions
7818 return false;
7819 case ppc::kBranch24:
7820 case ppc::kBranch24WeakImport:
7821 case ppc::kBranch14:
7822 // these are used to call external functions
7823 return true;
7824 }
7825 return false;
7826 }
7827
7828 template <>
7829 bool Writer<x86>::stubableReference(const ObjectFile::Atom* inAtom, const ObjectFile::Reference* ref)
7830 {
7831 uint8_t kind = ref->getKind();
7832 return (kind == x86::kPCRel32 || kind == x86::kPCRel32WeakImport);
7833 }
7834
7835 template <>
7836 bool Writer<x86_64>::stubableReference(const ObjectFile::Atom* inAtom, const ObjectFile::Reference* ref)
7837 {
7838 uint8_t kind = ref->getKind();
7839 return (kind == x86_64::kBranchPCRel32 || kind == x86_64::kBranchPCRel32WeakImport);
7840 }
7841
7842
7843 template <>
7844 bool Writer<ppc>::weakImportReferenceKind(uint8_t kind)
7845 {
7846 return (kind == ppc::kBranch24WeakImport || kind == ppc::kPointerWeakImport);
7847 }
7848
7849 template <>
7850 bool Writer<ppc64>::weakImportReferenceKind(uint8_t kind)
7851 {
7852 return (kind == ppc64::kBranch24WeakImport || kind == ppc64::kPointerWeakImport);
7853 }
7854
7855 template <>
7856 bool Writer<x86>::weakImportReferenceKind(uint8_t kind)
7857 {
7858 return (kind == x86::kPCRel32WeakImport || kind == x86::kPointerWeakImport);
7859 }
7860
7861 template <>
7862 bool Writer<x86_64>::weakImportReferenceKind(uint8_t kind)
7863 {
7864 switch ( kind ) {
7865 case x86_64::kPointerWeakImport:
7866 case x86_64::kBranchPCRel32WeakImport:
7867 case x86_64::kPCRel32GOTWeakImport:
7868 case x86_64::kPCRel32GOTLoadWeakImport:
7869 return true;
7870 }
7871 return false;
7872 }
7873
7874 template <>
7875 bool Writer<arm>::weakImportReferenceKind(uint8_t kind)
7876 {
7877 return (kind == arm::kBranch24WeakImport || kind == arm::kThumbBranch22WeakImport ||
7878 kind == arm::kPointerWeakImport);
7879 }
7880
7881 template <>
7882 bool Writer<ppc>::GOTReferenceKind(uint8_t kind)
7883 {
7884 return false;
7885 }
7886
7887 template <>
7888 bool Writer<ppc64>::GOTReferenceKind(uint8_t kind)
7889 {
7890 return false;
7891 }
7892
7893 template <>
7894 bool Writer<x86>::GOTReferenceKind(uint8_t kind)
7895 {
7896 return false;
7897 }
7898
7899 template <>
7900 bool Writer<x86_64>::GOTReferenceKind(uint8_t kind)
7901 {
7902 switch ( kind ) {
7903 case x86_64::kPCRel32GOT:
7904 case x86_64::kPCRel32GOTWeakImport:
7905 case x86_64::kPCRel32GOTLoad:
7906 case x86_64::kPCRel32GOTLoadWeakImport:
7907 case x86_64::kGOTNoFixUp:
7908 return true;
7909 }
7910 return false;
7911 }
7912
7913 template <>
7914 bool Writer<arm>::GOTReferenceKind(uint8_t kind)
7915 {
7916 return false;
7917 }
7918
7919 template <>
7920 bool Writer<ppc>::optimizableGOTReferenceKind(uint8_t kind)
7921 {
7922 return false;
7923 }
7924
7925 template <>
7926 bool Writer<ppc64>::optimizableGOTReferenceKind(uint8_t kind)
7927 {
7928 return false;
7929 }
7930
7931 template <>
7932 bool Writer<x86>::optimizableGOTReferenceKind(uint8_t kind)
7933 {
7934 return false;
7935 }
7936
7937 template <>
7938 bool Writer<x86_64>::optimizableGOTReferenceKind(uint8_t kind)
7939 {
7940 switch ( kind ) {
7941 case x86_64::kPCRel32GOTLoad:
7942 case x86_64::kPCRel32GOTLoadWeakImport:
7943 return true;
7944 }
7945 return false;
7946 }
7947
7948 template <>
7949 bool Writer<arm>::optimizableGOTReferenceKind(uint8_t kind)
7950 {
7951 return false;
7952 }
7953
7954 // 64-bit architectures never need module table, 32-bit sometimes do for backwards compatiblity
7955 template <typename A> bool Writer<A>::needsModuleTable() {return fOptions.needsModuleTable(); }
7956 template <> bool Writer<ppc64>::needsModuleTable() { return false; }
7957 template <> bool Writer<x86_64>::needsModuleTable() { return false; }
7958
7959
7960 template <typename A>
7961 void Writer<A>::optimizeDylibReferences()
7962 {
7963 //fprintf(stderr, "original ordinals table:\n");
7964 //for (std::map<class ObjectFile::Reader*, uint32_t>::iterator it = fLibraryToOrdinal.begin(); it != fLibraryToOrdinal.end(); ++it) {
7965 // fprintf(stderr, "%u <== %p/%s\n", it->second, it->first, it->first->getPath());
7966 //}
7967 // find unused dylibs that can be removed
7968 std::map<uint32_t, ObjectFile::Reader*> ordinalToReader;
7969 std::map<ObjectFile::Reader*, ObjectFile::Reader*> readerAliases;
7970 for (std::map<ObjectFile::Reader*, uint32_t>::iterator it = fLibraryToOrdinal.begin(); it != fLibraryToOrdinal.end(); ++it) {
7971 ObjectFile::Reader* reader = it->first;
7972 std::map<ObjectFile::Reader*, ObjectFile::Reader*>::iterator aliasPos = fLibraryAliases.find(reader);
7973 if ( aliasPos != fLibraryAliases.end() ) {
7974 // already noticed that this reader has same install name as another reader
7975 readerAliases[reader] = aliasPos->second;
7976 }
7977 else if ( !reader->providedExportAtom() && (reader->implicitlyLinked() || reader->deadStrippable() || fOptions.deadStripDylibs()) ) {
7978 // this reader can be optimized away
7979 it->second = 0xFFFFFFFF;
7980 typename std::map<class ObjectFile::Reader*, class DylibLoadCommandsAtom<A>* >::iterator pos = fLibraryToLoadCommand.find(reader);
7981 if ( pos != fLibraryToLoadCommand.end() )
7982 pos->second->optimizeAway();
7983 }
7984 else {
7985 // mark this reader as using it ordinal
7986 std::map<uint32_t, ObjectFile::Reader*>::iterator pos = ordinalToReader.find(it->second);
7987 if ( pos == ordinalToReader.end() )
7988 ordinalToReader[it->second] = reader;
7989 else
7990 readerAliases[reader] = pos->second;
7991 }
7992 }
7993 // renumber ordinals (depends on iterator walking in ordinal order)
7994 // all LC_LAZY_LOAD_DYLIB load commands must have highest ordinals
7995 uint32_t newOrdinal = 0;
7996 for (std::map<uint32_t, ObjectFile::Reader*>::iterator it = ordinalToReader.begin(); it != ordinalToReader.end(); ++it) {
7997 if ( it->first <= fLibraryToOrdinal.size() ) {
7998 if ( ! it->second->isLazyLoadedDylib() )
7999 fLibraryToOrdinal[it->second] = ++newOrdinal;
8000 }
8001 }
8002 for (std::map<uint32_t, ObjectFile::Reader*>::iterator it = ordinalToReader.begin(); it != ordinalToReader.end(); ++it) {
8003 if ( it->first <= fLibraryToOrdinal.size() ) {
8004 if ( it->second->isLazyLoadedDylib() ) {
8005 fLibraryToOrdinal[it->second] = ++newOrdinal;
8006 }
8007 }
8008 }
8009
8010 // <rdar://problem/5504954> linker does not error when dylib ordinal exceeds 250
8011 if ( (newOrdinal >= MAX_LIBRARY_ORDINAL) && (fOptions.nameSpace() == Options::kTwoLevelNameSpace) )
8012 throwf("two level namespace mach-o files can link with at most %d dylibs, this link would use %d dylibs", MAX_LIBRARY_ORDINAL, newOrdinal);
8013
8014 // add aliases (e.g. -lm points to libSystem.dylib)
8015 for (std::map<ObjectFile::Reader*, ObjectFile::Reader*>::iterator it = readerAliases.begin(); it != readerAliases.end(); ++it) {
8016 fLibraryToOrdinal[it->first] = fLibraryToOrdinal[it->second];
8017 }
8018
8019 //fprintf(stderr, "new ordinals table:\n");
8020 //for (std::map<class ObjectFile::Reader*, uint32_t>::iterator it = fLibraryToOrdinal.begin(); it != fLibraryToOrdinal.end(); ++it) {
8021 // fprintf(stderr, "%u <== %p/%s\n", it->second, it->first, it->first->getPath());
8022 //}
8023 }
8024
8025
8026 template <>
8027 void Writer<arm>::scanForAbsoluteReferences()
8028 {
8029 // arm codegen never has absolute references. FIXME: Is this correct?
8030 }
8031
8032 template <>
8033 void Writer<x86_64>::scanForAbsoluteReferences()
8034 {
8035 // x86_64 codegen never has absolute references
8036 }
8037
8038 template <>
8039 void Writer<x86>::scanForAbsoluteReferences()
8040 {
8041 // when linking -pie verify there are no absolute addressing, unless -read_only_relocs is also used
8042 if ( fOptions.positionIndependentExecutable() && !fOptions.allowTextRelocs() ) {
8043 for (std::vector<ObjectFile::Atom*>::iterator it=fAllAtoms->begin(); it != fAllAtoms->end(); it++) {
8044 ObjectFile::Atom* atom = *it;
8045 if ( atom->getContentType() == ObjectFile::Atom::kStub )
8046 continue;
8047 if ( atom->getContentType() == ObjectFile::Atom::kStubHelper )
8048 continue;
8049 std::vector<ObjectFile::Reference*>& references = atom->getReferences();
8050 for (std::vector<ObjectFile::Reference*>::iterator rit=references.begin(); rit != references.end(); rit++) {
8051 ObjectFile::Reference* ref = *rit;
8052 switch (ref->getKind()) {
8053 case x86::kAbsolute32:
8054 throwf("cannot link -pie: -mdynamic-no-pic codegen found in %s from %s", atom->getDisplayName(), atom->getFile()->getPath());
8055 return;
8056 }
8057 }
8058 }
8059 }
8060 }
8061
8062 template <>
8063 void Writer<ppc>::scanForAbsoluteReferences()
8064 {
8065 // when linking -pie verify there are no absolute addressing, unless -read_only_relocs is also used
8066 if ( fOptions.positionIndependentExecutable() && !fOptions.allowTextRelocs() ) {
8067 for (std::vector<ObjectFile::Atom*>::iterator it=fAllAtoms->begin(); it != fAllAtoms->end(); it++) {
8068 ObjectFile::Atom* atom = *it;
8069 std::vector<ObjectFile::Reference*>& references = atom->getReferences();
8070 for (std::vector<ObjectFile::Reference*>::iterator rit=references.begin(); rit != references.end(); rit++) {
8071 ObjectFile::Reference* ref = *rit;
8072 switch (ref->getKind()) {
8073 case ppc::kAbsLow16:
8074 case ppc::kAbsLow14:
8075 case ppc::kAbsHigh16:
8076 case ppc::kAbsHigh16AddLow:
8077 throwf("cannot link -pie: -mdynamic-no-pic codegen found in %s from %s", atom->getDisplayName(), atom->getFile()->getPath());
8078 return;
8079 }
8080 }
8081 }
8082 }
8083 }
8084
8085
8086 // for ppc64 look for any -mdynamic-no-pic codegen
8087 template <>
8088 void Writer<ppc64>::scanForAbsoluteReferences()
8089 {
8090 // only do this for main executable
8091 if ( mightNeedPadSegment() && (fPageZeroAtom != NULL) ) {
8092 for (std::vector<ObjectFile::Atom*>::iterator it=fAllAtoms->begin(); it != fAllAtoms->end(); it++) {
8093 ObjectFile::Atom* atom = *it;
8094 std::vector<ObjectFile::Reference*>& references = atom->getReferences();
8095 for (std::vector<ObjectFile::Reference*>::iterator rit=references.begin(); rit != references.end(); rit++) {
8096 ObjectFile::Reference* ref = *rit;
8097 switch (ref->getKind()) {
8098 case ppc64::kAbsLow16:
8099 case ppc64::kAbsLow14:
8100 case ppc64::kAbsHigh16:
8101 case ppc64::kAbsHigh16AddLow:
8102 //fprintf(stderr, "found -mdynamic-no-pic codegen in %s in %s\n", atom->getDisplayName(), atom->getFile()->getPath());
8103 // shrink page-zero and add pad segment to compensate
8104 fPadSegmentInfo = new SegmentInfo(4096);
8105 strcpy(fPadSegmentInfo->fName, "__4GBFILL");
8106 fPageZeroAtom->setSize(0x1000);
8107 return;
8108 }
8109 }
8110 }
8111 }
8112 }
8113
8114
8115 template <typename A>
8116 void Writer<A>::insertDummyStubs()
8117 {
8118 // only needed for x86
8119 }
8120
8121 template <>
8122 void Writer<x86>::insertDummyStubs()
8123 {
8124 // any 5-byte stubs that cross a 32-byte cache line may update incorrectly
8125 std::vector<class StubAtom<x86>*> betterStubs;
8126 for (std::vector<class StubAtom<x86>*>::iterator it=fAllSynthesizedStubs.begin(); it != fAllSynthesizedStubs.end(); it++) {
8127 switch (betterStubs.size() % 64 ) {
8128 case 12:// stub would occupy 0x3C->0x41
8129 case 25:// stub would occupy 0x7D->0x82
8130 case 38:// stub would occupy 0xBE->0xC3
8131 case 51:// stub would occupy 0xFF->0x04
8132 betterStubs.push_back(new StubAtom<x86>(*this, *((ObjectFile::Atom*)NULL), false)); //pad with dummy stub
8133 break;
8134 }
8135 betterStubs.push_back(*it);
8136 }
8137 // replace
8138 fAllSynthesizedStubs.clear();
8139 fAllSynthesizedStubs.insert(fAllSynthesizedStubs.begin(), betterStubs.begin(), betterStubs.end());
8140 }
8141
8142
8143 template <typename A>
8144 void Writer<A>::synthesizeKextGOT(const std::vector<class ObjectFile::Atom*>& existingAtoms,
8145 std::vector<class ObjectFile::Atom*>& newAtoms)
8146 {
8147 // walk every atom and reference
8148 for (std::vector<ObjectFile::Atom*>::const_iterator it=existingAtoms.begin(); it != existingAtoms.end(); it++) {
8149 const ObjectFile::Atom* atom = *it;
8150 std::vector<ObjectFile::Reference*>& references = atom->getReferences();
8151 for (std::vector<ObjectFile::Reference*>::iterator rit=references.begin(); rit != references.end(); rit++) {
8152 ObjectFile::Reference* ref = *rit;
8153 switch ( ref->getTargetBinding()) {
8154 case ObjectFile::Reference::kUnboundByName:
8155 case ObjectFile::Reference::kDontBind:
8156 break;
8157 case ObjectFile::Reference::kBoundByName:
8158 case ObjectFile::Reference::kBoundDirectly:
8159 ObjectFile::Atom& target = ref->getTarget();
8160 // create GOT slots (non-lazy pointers) as needed
8161 if ( this->GOTReferenceKind(ref->getKind()) ) {
8162 bool useGOT = ( this->relocationNeededInFinalLinkedImage(ref->getTarget()) == kRelocExternal );
8163 // if this GOT usage cannot be optimized away then make a GOT enry
8164 if ( ! this->optimizableGOTReferenceKind(ref->getKind()) )
8165 useGOT = true;
8166 if ( useGOT ) {
8167 ObjectFile::Atom* nlp = NULL;
8168 std::map<ObjectFile::Atom*,ObjectFile::Atom*>::iterator pos = fGOTMap.find(&target);
8169 if ( pos == fGOTMap.end() ) {
8170 nlp = new NonLazyPointerAtom<A>(*this, target);
8171 fGOTMap[&target] = nlp;
8172 newAtoms.push_back(nlp);
8173 }
8174 else {
8175 nlp = pos->second;
8176 }
8177 // alter reference to use non lazy pointer instead
8178 ref->setTarget(*nlp, ref->getTargetOffset());
8179 }
8180 }
8181 // build map of which symbols need weak importing
8182 if ( (target.getDefinitionKind() == ObjectFile::Atom::kExternalDefinition)
8183 || (target.getDefinitionKind() == ObjectFile::Atom::kExternalWeakDefinition) ) {
8184 if ( this->weakImportReferenceKind(ref->getKind()) ) {
8185 fWeakImportMap[&target] = true;
8186 }
8187 }
8188 break;
8189 }
8190 }
8191 }
8192 }
8193
8194
8195 template <typename A>
8196 void Writer<A>::synthesizeStubs(const std::vector<class ObjectFile::Atom*>& existingAtoms,
8197 std::vector<class ObjectFile::Atom*>& newAtoms)
8198 {
8199 switch ( fOptions.outputKind() ) {
8200 case Options::kObjectFile:
8201 case Options::kPreload:
8202 // these output kinds never have stubs
8203 return;
8204 case Options::kKextBundle:
8205 // new kext need a synthesized GOT only
8206 synthesizeKextGOT(existingAtoms, newAtoms);
8207 return;
8208 case Options::kStaticExecutable:
8209 case Options::kDyld:
8210 case Options::kDynamicLibrary:
8211 case Options::kDynamicBundle:
8212 case Options::kDynamicExecutable:
8213 // try to synthesize stubs for these
8214 break;
8215 }
8216
8217 // walk every atom and reference
8218 for (std::vector<ObjectFile::Atom*>::const_iterator it=existingAtoms.begin(); it != existingAtoms.end(); it++) {
8219 ObjectFile::Atom* atom = *it;
8220 std::vector<ObjectFile::Reference*>& references = atom->getReferences();
8221 for (std::vector<ObjectFile::Reference*>::iterator rit=references.begin(); rit != references.end(); rit++) {
8222 ObjectFile::Reference* ref = *rit;
8223 switch ( ref->getTargetBinding()) {
8224 case ObjectFile::Reference::kUnboundByName:
8225 case ObjectFile::Reference::kDontBind:
8226 break;
8227 case ObjectFile::Reference::kBoundByName:
8228 case ObjectFile::Reference::kBoundDirectly:
8229 ObjectFile::Atom& target = ref->getTarget();
8230 // build map of which symbols need weak importing
8231 if ( (target.getDefinitionKind() == ObjectFile::Atom::kExternalDefinition)
8232 || (target.getDefinitionKind() == ObjectFile::Atom::kExternalWeakDefinition) ) {
8233 bool weakImport = this->weakImportReferenceKind(ref->getKind());
8234 // <rdar://problem/5633081> Obj-C Symbols in Leopard Can't Be Weak Linked
8235 // dyld in Mac OS X 10.3 and earlier need N_WEAK_REF bit set on undefines to objc symbols
8236 // in dylibs that are weakly linked.
8237 if ( (ref->getKind() == A::kNoFixUp) && (strncmp(target.getName(), ".objc_class_name_", 17) == 0) ) {
8238 typename std::map<class ObjectFile::Reader*, class DylibLoadCommandsAtom<A>* >::iterator pos;
8239 pos = fLibraryToLoadCommand.find(target.getFile());
8240 if ( pos != fLibraryToLoadCommand.end() ) {
8241 if ( pos->second->linkedWeak() )
8242 weakImport = true;
8243 }
8244 }
8245 // <rdar://problem/6186838> -weak_library no longer forces uses to be weak_import
8246 if ( fForcedWeakImportReaders.count(target.getFile()) != 0 ) {
8247 fWeakImportMap[&target] = true;
8248 weakImport = true;
8249 }
8250
8251 std::map<const ObjectFile::Atom*,bool>::iterator pos = fWeakImportMap.find(&target);
8252 if ( pos == fWeakImportMap.end() ) {
8253 // target not in fWeakImportMap, so add
8254 fWeakImportMap[&target] = weakImport;
8255 }
8256 else {
8257 // target in fWeakImportMap, check for weakness mismatch
8258 if ( pos->second != weakImport ) {
8259 // found mismatch
8260 switch ( fOptions.weakReferenceMismatchTreatment() ) {
8261 case Options::kWeakReferenceMismatchError:
8262 throwf("mismatching weak references for symbol: %s", target.getName());
8263 case Options::kWeakReferenceMismatchWeak:
8264 pos->second = true;
8265 break;
8266 case Options::kWeakReferenceMismatchNonWeak:
8267 pos->second = false;
8268 break;
8269 }
8270 }
8271 }
8272 // update if we use a weak_import or a strong import from this dylib
8273 if ( fWeakImportMap[&target] )
8274 fDylibReadersWithWeakImports.insert(target.getFile());
8275 else
8276 fDylibReadersWithNonWeakImports.insert(target.getFile());
8277 }
8278 // create stubs as needed
8279 if ( this->stubableReference(atom, ref)
8280 && (ref->getTargetOffset() == 0)
8281 && this->relocationNeededInFinalLinkedImage(target) == kRelocExternal ) {
8282 ObjectFile::Atom* stub = NULL;
8283 std::map<const ObjectFile::Atom*,ObjectFile::Atom*>::iterator pos = fStubsMap.find(&target);
8284 if ( pos == fStubsMap.end() ) {
8285 bool forLazyDylib = false;
8286 switch ( target.getDefinitionKind() ) {
8287 case ObjectFile::Atom::kRegularDefinition:
8288 case ObjectFile::Atom::kWeakDefinition:
8289 case ObjectFile::Atom::kAbsoluteSymbol:
8290 case ObjectFile::Atom::kTentativeDefinition:
8291 break;
8292 case ObjectFile::Atom::kExternalDefinition:
8293 case ObjectFile::Atom::kExternalWeakDefinition:
8294 if ( target.getFile()->isLazyLoadedDylib() )
8295 forLazyDylib = true;
8296 break;
8297 }
8298 // just-in-time, create GOT slot to dyld_stub_binder
8299 if ( fOptions.makeCompressedDyldInfo() && (fFastStubGOTAtom == NULL) ) {
8300 if ( fDyldCompressedHelperAtom == NULL )
8301 throw "missing symbol dyld_stub_binder";
8302 fFastStubGOTAtom = new NonLazyPointerAtom<A>(*this, *fDyldCompressedHelperAtom);
8303 }
8304 stub = new StubAtom<A>(*this, target, forLazyDylib);
8305 fStubsMap[&target] = stub;
8306 }
8307 else {
8308 stub = pos->second;
8309 }
8310 // alter reference to use stub instead
8311 ref->setTarget(*stub, 0);
8312 }
8313 else if ( fOptions.usingLazyDylibLinking() && target.getFile()->isLazyLoadedDylib() ) {
8314 throwf("illegal reference to %s in lazy loaded dylib from %s in %s",
8315 target.getDisplayName(), atom->getDisplayName(),
8316 atom->getFile()->getPath());
8317 }
8318 // create GOT slots (non-lazy pointers) as needed
8319 else if ( this->GOTReferenceKind(ref->getKind()) ) {
8320 //
8321 bool mustUseGOT = ( this->relocationNeededInFinalLinkedImage(ref->getTarget()) == kRelocExternal );
8322 bool useGOT;
8323 if ( fBiggerThanTwoGigs ) {
8324 // in big images use GOT for all zero fill atoms
8325 // this is just a heuristic and may need to be re-examined
8326 useGOT = mustUseGOT || ref->getTarget().isZeroFill();
8327 }
8328 else {
8329 // < 2GB image so remove all GOT entries that we can
8330 useGOT = mustUseGOT;
8331 }
8332 // if this GOT usage cannot be optimized away then make a GOT enry
8333 if ( ! this->optimizableGOTReferenceKind(ref->getKind()) )
8334 useGOT = true;
8335 if ( useGOT ) {
8336 ObjectFile::Atom* nlp = NULL;
8337 std::map<ObjectFile::Atom*,ObjectFile::Atom*>::iterator pos = fGOTMap.find(&target);
8338 if ( pos == fGOTMap.end() ) {
8339 nlp = new NonLazyPointerAtom<A>(*this, target);
8340 fGOTMap[&target] = nlp;
8341 }
8342 else {
8343 nlp = pos->second;
8344 }
8345 // alter reference to use non lazy pointer instead
8346 ref->setTarget(*nlp, ref->getTargetOffset());
8347 }
8348 }
8349 }
8350 }
8351 }
8352
8353 // sort stubs
8354 std::sort(fAllSynthesizedStubs.begin(), fAllSynthesizedStubs.end(), AtomByNameSorter());
8355 // add dummy self-modifying stubs (x86 only)
8356 if ( ! fOptions.makeCompressedDyldInfo() )
8357 this->insertDummyStubs();
8358 // set ordinals so sorting is preserved
8359 uint32_t sortOrder = 0;
8360 for (typename std::vector<StubAtom<A>*>::iterator it=fAllSynthesizedStubs.begin(); it != fAllSynthesizedStubs.end(); it++)
8361 (*it)->setSortingOrdinal(sortOrder++);
8362 std::sort(fAllSynthesizedStubHelpers.begin(), fAllSynthesizedStubHelpers.end(), AtomByNameSorter());
8363
8364 // sort lazy pointers
8365 std::sort(fAllSynthesizedLazyPointers.begin(), fAllSynthesizedLazyPointers.end(), AtomByNameSorter());
8366 sortOrder = 0;
8367 for (typename std::vector<LazyPointerAtom<A>*>::iterator it=fAllSynthesizedLazyPointers.begin(); it != fAllSynthesizedLazyPointers.end(); it++)
8368 (*it)->setSortingOrdinal(sortOrder++);
8369 std::sort(fAllSynthesizedLazyDylibPointers.begin(), fAllSynthesizedLazyDylibPointers.end(), AtomByNameSorter());
8370
8371 // sort non-lazy pointers
8372 std::sort(fAllSynthesizedNonLazyPointers.begin(), fAllSynthesizedNonLazyPointers.end(), AtomByNameSorter());
8373 sortOrder = 0;
8374 for (typename std::vector<NonLazyPointerAtom<A>*>::iterator it=fAllSynthesizedNonLazyPointers.begin(); it != fAllSynthesizedNonLazyPointers.end(); it++)
8375 (*it)->setSortingOrdinal(sortOrder++);
8376 std::sort(fAllSynthesizedNonLazyPointers.begin(), fAllSynthesizedNonLazyPointers.end(), AtomByNameSorter());
8377
8378 // tell linker about all synthesized atoms
8379 newAtoms.insert(newAtoms.end(), fAllSynthesizedStubs.begin(), fAllSynthesizedStubs.end());
8380 newAtoms.insert(newAtoms.end(), fAllSynthesizedStubHelpers.begin(), fAllSynthesizedStubHelpers.end());
8381 newAtoms.insert(newAtoms.end(), fAllSynthesizedLazyPointers.begin(), fAllSynthesizedLazyPointers.end());
8382 newAtoms.insert(newAtoms.end(), fAllSynthesizedLazyDylibPointers.begin(), fAllSynthesizedLazyDylibPointers.end());
8383 newAtoms.insert(newAtoms.end(), fAllSynthesizedNonLazyPointers.begin(), fAllSynthesizedNonLazyPointers.end());
8384
8385 }
8386
8387 template <typename A>
8388 void Writer<A>::createSplitSegContent()
8389 {
8390 // build LC_SEGMENT_SPLIT_INFO once all atoms exist
8391 if ( fSplitCodeToDataContentAtom != NULL ) {
8392 for (std::vector<ObjectFile::Atom*>::iterator it=fAllAtoms->begin(); it != fAllAtoms->end(); it++) {
8393 ObjectFile::Atom* atom = *it;
8394 std::vector<ObjectFile::Reference*>& references = atom->getReferences();
8395 for (std::vector<ObjectFile::Reference*>::iterator rit=references.begin(); rit != references.end(); rit++) {
8396 ObjectFile::Reference* ref = *rit;
8397 switch ( ref->getTargetBinding()) {
8398 case ObjectFile::Reference::kUnboundByName:
8399 case ObjectFile::Reference::kDontBind:
8400 break;
8401 case ObjectFile::Reference::kBoundByName:
8402 case ObjectFile::Reference::kBoundDirectly:
8403 if ( this->segmentsCanSplitApart(*atom, ref->getTarget()) ) {
8404 this->addCrossSegmentRef(atom, ref);
8405 }
8406 break;
8407 }
8408 }
8409 }
8410 // bad codegen may cause LC_SEGMENT_SPLIT_INFO to be removed
8411 adjustLoadCommandsAndPadding();
8412 }
8413
8414 }
8415
8416
8417 template <typename A>
8418 void Writer<A>::synthesizeUnwindInfoTable()
8419 {
8420 if ( fUnwindInfoAtom != NULL ) {
8421 // walk every atom and gets its unwind info
8422 for (std::vector<ObjectFile::Atom*>::iterator it=fAllAtoms->begin(); it != fAllAtoms->end(); it++) {
8423 ObjectFile::Atom* atom = *it;
8424 if ( atom->beginUnwind() == atom->endUnwind() ) {
8425 // be sure to mark that we have no unwind info for stuff in the TEXT segment without unwind info
8426 if ( strcmp(atom->getSegment().getName(), "__TEXT") == 0 )
8427 fUnwindInfoAtom->addUnwindInfo(atom, 0, 0, NULL, NULL, NULL);
8428 }
8429 else {
8430 // atom has unwind
8431 for ( ObjectFile::UnwindInfo::iterator uit = atom->beginUnwind(); uit != atom->endUnwind(); ++uit ) {
8432 fUnwindInfoAtom->addUnwindInfo(atom, uit->startOffset, uit->unwindInfo, atom->getFDE(), atom->getLSDA(), atom->getPersonalityPointer());
8433 }
8434 }
8435 }
8436 }
8437 }
8438
8439
8440 template <typename A>
8441 void Writer<A>::partitionIntoSections()
8442 {
8443 const bool oneSegmentCommand = (fOptions.outputKind() == Options::kObjectFile);
8444
8445 // for every atom, set its sectionInfo object and section offset
8446 // build up fSegmentInfos along the way
8447 ObjectFile::Section* curSection = (ObjectFile::Section*)(-1);
8448 SectionInfo* currentSectionInfo = NULL;
8449 SegmentInfo* currentSegmentInfo = NULL;
8450 SectionInfo* cstringSectionInfo = NULL;
8451 unsigned int sectionIndex = 1;
8452 fSegmentInfos.reserve(8);
8453 for (unsigned int i=0; i < fAllAtoms->size(); ++i) {
8454 ObjectFile::Atom* atom = (*fAllAtoms)[i];
8455 if ( ((atom->getSection() != curSection) || (curSection==NULL))
8456 && ((currentSectionInfo == NULL)
8457 || (strcmp(atom->getSectionName(),currentSectionInfo->fSectionName) != 0)
8458 || (strcmp(atom->getSegment().getName(),currentSectionInfo->fSegmentName) != 0)) ) {
8459 if ( oneSegmentCommand ) {
8460 if ( currentSegmentInfo == NULL ) {
8461 currentSegmentInfo = new SegmentInfo(fOptions.segmentAlignment());
8462 currentSegmentInfo->fInitProtection = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
8463 currentSegmentInfo->fMaxProtection = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
8464 this->fSegmentInfos.push_back(currentSegmentInfo);
8465 }
8466 currentSectionInfo = new SectionInfo();
8467 strcpy(currentSectionInfo->fSectionName, atom->getSectionName());
8468 strcpy(currentSectionInfo->fSegmentName, atom->getSegment().getName());
8469 currentSectionInfo->fAlignment = atom->getAlignment().powerOf2;
8470 currentSectionInfo->fAllZeroFill = atom->isZeroFill();
8471 currentSectionInfo->fVirtualSection = (currentSectionInfo->fSectionName[0] == '.');
8472 if ( !currentSectionInfo->fVirtualSection || fEmitVirtualSections )
8473 currentSectionInfo->setIndex(sectionIndex++);
8474 currentSegmentInfo->fSections.push_back(currentSectionInfo);
8475 if ( (strcmp(currentSectionInfo->fSegmentName, "__TEXT") == 0) && (strcmp(currentSectionInfo->fSectionName, "__cstring") == 0) )
8476 cstringSectionInfo = currentSectionInfo;
8477 }
8478 else {
8479 if ( (currentSegmentInfo == NULL) || (strcmp(currentSegmentInfo->fName, atom->getSegment().getName()) != 0) ) {
8480 currentSegmentInfo = new SegmentInfo(fOptions.segmentAlignment());
8481 strcpy(currentSegmentInfo->fName, atom->getSegment().getName());
8482 uint32_t initprot = 0;
8483 if ( atom->getSegment().isContentReadable() )
8484 initprot |= VM_PROT_READ;
8485 if ( atom->getSegment().isContentWritable() )
8486 initprot |= VM_PROT_WRITE;
8487 if ( atom->getSegment().isContentExecutable() )
8488 initprot |= VM_PROT_EXECUTE;
8489 if ( fOptions.readOnlyx86Stubs() && (strcmp(atom->getSegment().getName(), "__IMPORT") == 0) )
8490 initprot &= ~VM_PROT_WRITE; // hack until i386 __pointers section is synthesized by linker
8491 currentSegmentInfo->fInitProtection = initprot;
8492 if ( initprot == 0 )
8493 currentSegmentInfo->fMaxProtection = 0; // pagezero should have maxprot==initprot==0
8494 else if ( fOptions.architecture() == CPU_TYPE_ARM )
8495 currentSegmentInfo->fMaxProtection = currentSegmentInfo->fInitProtection; // iPhoneOS wants max==init
8496 else
8497 currentSegmentInfo->fMaxProtection = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
8498 std::vector<Options::SegmentProtect>& customSegProtections = fOptions.customSegmentProtections();
8499 for(std::vector<Options::SegmentProtect>::iterator it = customSegProtections.begin(); it != customSegProtections.end(); ++it) {
8500 if ( strcmp(it->name, currentSegmentInfo->fName) == 0 ) {
8501 currentSegmentInfo->fInitProtection = it->init;
8502 currentSegmentInfo->fMaxProtection = it->max;
8503 }
8504 }
8505 currentSegmentInfo->fBaseAddress = atom->getSegment().getBaseAddress();
8506 currentSegmentInfo->fFixedAddress = atom->getSegment().hasFixedAddress();
8507 if ( currentSegmentInfo->fFixedAddress && (&(atom->getSegment()) == &Segment::fgStackSegment) )
8508 currentSegmentInfo->fIndependentAddress = true;
8509 if ( (fOptions.outputKind() == Options::kPreload) && (strcmp(currentSegmentInfo->fName, "__LINKEDIT")==0) )
8510 currentSegmentInfo->fHasLoadCommand = false;
8511 if ( strcmp(currentSegmentInfo->fName, "__HEADER")==0 )
8512 currentSegmentInfo->fHasLoadCommand = false;
8513 this->fSegmentInfos.push_back(currentSegmentInfo);
8514 }
8515 currentSectionInfo = new SectionInfo();
8516 currentSectionInfo->fAtoms.reserve(fAllAtoms->size()/4); // reduce reallocations by starting large
8517 strcpy(currentSectionInfo->fSectionName, atom->getSectionName());
8518 strcpy(currentSectionInfo->fSegmentName, atom->getSegment().getName());
8519 currentSectionInfo->fAlignment = atom->getAlignment().powerOf2;
8520 // check for -sectalign override
8521 std::vector<Options::SectionAlignment>& alignmentOverrides = fOptions.sectionAlignments();
8522 for(std::vector<Options::SectionAlignment>::iterator it=alignmentOverrides.begin(); it != alignmentOverrides.end(); ++it) {
8523 if ( (strcmp(it->segmentName, currentSectionInfo->fSegmentName) == 0) && (strcmp(it->sectionName, currentSectionInfo->fSectionName) == 0) )
8524 currentSectionInfo->fAlignment = it->alignment;
8525 }
8526 currentSectionInfo->fAllZeroFill = atom->isZeroFill();
8527 currentSectionInfo->fVirtualSection = ( currentSectionInfo->fSectionName[0] == '.');
8528 if ( !currentSectionInfo->fVirtualSection || fEmitVirtualSections )
8529 currentSectionInfo->setIndex(sectionIndex++);
8530 currentSegmentInfo->fSections.push_back(currentSectionInfo);
8531 }
8532 //fprintf(stderr, "new section %s for atom %s\n", atom->getSectionName(), atom->getDisplayName());
8533 if ( strcmp(currentSectionInfo->fSectionName, "._load_commands") == 0 ) {
8534 fLoadCommandsSection = currentSectionInfo;
8535 fLoadCommandsSegment = currentSegmentInfo;
8536 }
8537 switch ( atom->getContentType() ) {
8538 case ObjectFile::Atom::kLazyPointer:
8539 currentSectionInfo->fAllLazyPointers = true;
8540 fSymbolTableCommands->needDynamicTable();
8541 break;
8542 case ObjectFile::Atom::kNonLazyPointer:
8543 currentSectionInfo->fAllNonLazyPointers = true;
8544 fSymbolTableCommands->needDynamicTable();
8545 break;
8546 case ObjectFile::Atom::kLazyDylibPointer:
8547 currentSectionInfo->fAllLazyDylibPointers = true;
8548 break;
8549 case ObjectFile::Atom::kStubHelper:
8550 currentSectionInfo->fAllStubHelpers = true;
8551 break;
8552 case ObjectFile::Atom::kCFIType:
8553 currentSectionInfo->fAlignment = __builtin_ctz(sizeof(pint_t)); // always start CFI info pointer aligned
8554 break;
8555 case ObjectFile::Atom::kStub:
8556 if ( (strcmp(currentSectionInfo->fSegmentName, "__IMPORT") == 0) && (strcmp(currentSectionInfo->fSectionName, "__jump_table") == 0) ) {
8557 currentSectionInfo->fAllSelfModifyingStubs = true;
8558 currentSectionInfo->fAlignment = 6; // force x86 fast stubs to start on 64-byte boundary
8559 }
8560 else {
8561 currentSectionInfo->fAllStubs = true;
8562 }
8563 fSymbolTableCommands->needDynamicTable();
8564 break;
8565 default:
8566 break;
8567 }
8568 curSection = atom->getSection();
8569 }
8570 // any non-zero fill atoms make whole section marked not-zero-fill
8571 if ( currentSectionInfo->fAllZeroFill && ! atom->isZeroFill() )
8572 currentSectionInfo->fAllZeroFill = false;
8573 // change section object to be Writer's SectionInfo object
8574 atom->setSection(currentSectionInfo);
8575 // section alignment is that of a contained atom with the greatest alignment
8576 uint8_t atomAlign = atom->getAlignment().powerOf2;
8577 if ( currentSectionInfo->fAlignment < atomAlign )
8578 currentSectionInfo->fAlignment = atomAlign;
8579 // calculate section offset for this atom
8580 uint64_t offset = currentSectionInfo->fSize;
8581 uint64_t alignment = 1 << atomAlign;
8582 uint64_t currentModulus = (offset % alignment);
8583 uint64_t requiredModulus = atom->getAlignment().modulus;
8584 if ( currentModulus != requiredModulus ) {
8585 if ( requiredModulus > currentModulus )
8586 offset += requiredModulus-currentModulus;
8587 else
8588 offset += requiredModulus+alignment-currentModulus;
8589 }
8590 atom->setSectionOffset(offset);
8591 uint64_t curAtomSize = atom->getSize();
8592 currentSectionInfo->fSize = offset + curAtomSize;
8593 // add atom to section vector
8594 currentSectionInfo->fAtoms.push_back(atom);
8595 //fprintf(stderr, " adding atom %p %s size=0x%0llX to section %p %s from %s\n", atom, atom->getDisplayName(), atom->getSize(),
8596 // currentSectionInfo, currentSectionInfo->fSectionName, atom->getFile()->getPath());
8597 // update largest size
8598 if ( !currentSectionInfo->fAllZeroFill && (curAtomSize > fLargestAtomSize) )
8599 fLargestAtomSize = curAtomSize;
8600 }
8601 if ( (cstringSectionInfo != NULL) && (cstringSectionInfo->fAlignment > 0) ) {
8602 // when merging cstring sections in .o files, all strings need to use the max alignment
8603 uint64_t offset = 0;
8604 uint64_t cstringAlignment = 1 << cstringSectionInfo->fAlignment;
8605 for (std::vector<ObjectFile::Atom*>::iterator it=cstringSectionInfo->fAtoms.begin(); it != cstringSectionInfo->fAtoms.end(); it++) {
8606 offset = (offset + (cstringAlignment-1)) & (-cstringAlignment);
8607 ObjectFile::Atom* atom = *it;
8608 atom->setSectionOffset(offset);
8609 offset += atom->getSize();
8610 }
8611 cstringSectionInfo->fSize = offset;
8612 }
8613 }
8614
8615
8616 struct TargetAndOffset { ObjectFile::Atom* atom; uint32_t offset; };
8617 class TargetAndOffsetComparor
8618 {
8619 public:
8620 bool operator()(const TargetAndOffset& left, const TargetAndOffset& right) const
8621 {
8622 if ( left.atom != right.atom )
8623 return ( left.atom < right.atom );
8624 return ( left.offset < right.offset );
8625 }
8626 };
8627
8628 template <>
8629 bool Writer<ppc>::addBranchIslands()
8630 {
8631 return this->createBranchIslands();
8632 }
8633
8634 template <>
8635 bool Writer<ppc64>::addBranchIslands()
8636 {
8637 return this->createBranchIslands();
8638 }
8639
8640 template <>
8641 bool Writer<x86>::addBranchIslands()
8642 {
8643 // x86 branches can reach entire 4G address space, so no need for branch islands
8644 return false;
8645 }
8646
8647 template <>
8648 bool Writer<x86_64>::addBranchIslands()
8649 {
8650 // x86 branches can reach entire 4G size of largest image
8651 return false;
8652 }
8653
8654 template <>
8655 bool Writer<arm>::addBranchIslands()
8656 {
8657 return this->createBranchIslands();
8658 }
8659
8660 template <>
8661 bool Writer<ppc>::isBranchThatMightNeedIsland(uint8_t kind)
8662 {
8663 switch (kind) {
8664 case ppc::kBranch24:
8665 case ppc::kBranch24WeakImport:
8666 return true;
8667 }
8668 return false;
8669 }
8670
8671 template <>
8672 bool Writer<ppc64>::isBranchThatMightNeedIsland(uint8_t kind)
8673 {
8674 switch (kind) {
8675 case ppc64::kBranch24:
8676 case ppc64::kBranch24WeakImport:
8677 return true;
8678 }
8679 return false;
8680 }
8681
8682 template <>
8683 bool Writer<arm>::isBranchThatMightNeedIsland(uint8_t kind)
8684 {
8685 switch (kind) {
8686 case arm::kBranch24:
8687 case arm::kBranch24WeakImport:
8688 case arm::kThumbBranch22:
8689 case arm::kThumbBranch22WeakImport:
8690 return true;
8691 }
8692 return false;
8693 }
8694
8695 template <>
8696 uint32_t Writer<ppc>::textSizeWhenMightNeedBranchIslands()
8697 {
8698 return 16000000;
8699 }
8700
8701 template <>
8702 uint32_t Writer<ppc64>::textSizeWhenMightNeedBranchIslands()
8703 {
8704 return 16000000;
8705 }
8706
8707 template <>
8708 uint32_t Writer<arm>::textSizeWhenMightNeedBranchIslands()
8709 {
8710 if ( fHasThumbBranches == false )
8711 return 32000000; // ARM can branch +/- 32MB
8712 else if ( fOptions.preferSubArchitecture() && fOptions.subArchitecture() == CPU_SUBTYPE_ARM_V7 )
8713 return 16000000; // thumb2 can branch +/- 16MB
8714 else
8715 return 4000000; // thumb1 can branch +/- 4MB
8716 }
8717
8718 template <>
8719 uint32_t Writer<ppc>::maxDistanceBetweenIslands()
8720 {
8721 return 14*1024*1024;
8722 }
8723
8724 template <>
8725 uint32_t Writer<ppc64>::maxDistanceBetweenIslands()
8726 {
8727 return 14*1024*1024;
8728 }
8729
8730 template <>
8731 uint32_t Writer<arm>::maxDistanceBetweenIslands()
8732 {
8733 if ( fHasThumbBranches == false )
8734 return 30*1024*1024;
8735 else if ( fOptions.preferSubArchitecture() && fOptions.subArchitecture() == CPU_SUBTYPE_ARM_V7 )
8736 return 14*1024*1024;
8737 else
8738 return 3500000;
8739 }
8740
8741
8742 //
8743 // PowerPC can do PC relative branches as far as +/-16MB.
8744 // If a branch target is >16MB then we insert one or more
8745 // "branch islands" between the branch and its target that
8746 // allows island hopping to the target.
8747 //
8748 // Branch Island Algorithm
8749 //
8750 // If the __TEXT segment < 16MB, then no branch islands needed
8751 // Otherwise, every 14MB into the __TEXT segment a region is
8752 // added which can contain branch islands. Every out-of-range
8753 // bl instruction is checked. If it crosses a region, an island
8754 // is added to that region with the same target and the bl is
8755 // adjusted to target the island instead.
8756 //
8757 // In theory, if too many islands are added to one region, it
8758 // could grow the __TEXT enough that other previously in-range
8759 // bl branches could be pushed out of range. We reduce the
8760 // probability this could happen by placing the ranges every
8761 // 14MB which means the region would have to be 2MB (512,000 islands)
8762 // before any branches could be pushed out of range.
8763 //
8764 template <typename A>
8765 bool Writer<A>::createBranchIslands()
8766 {
8767 bool log = false;
8768 bool result = false;
8769 // Can only possibly need branch islands if __TEXT segment > 16M
8770 if ( fLoadCommandsSegment->fSize > textSizeWhenMightNeedBranchIslands() ) {
8771 if ( log) fprintf(stderr, "ld: checking for branch islands, __TEXT segment size=%llu\n", fLoadCommandsSegment->fSize);
8772 const uint32_t kBetweenRegions = maxDistanceBetweenIslands(); // place regions of islands every 14MB in __text section
8773 SectionInfo* textSection = NULL;
8774 for (std::vector<SectionInfo*>::iterator it=fLoadCommandsSegment->fSections.begin(); it != fLoadCommandsSegment->fSections.end(); it++) {
8775 if ( strcmp((*it)->fSectionName, "__text") == 0 ) {
8776 textSection = *it;
8777 if ( log) fprintf(stderr, "ld: checking for branch islands, __text section size=%llu\n", textSection->fSize);
8778 break;
8779 }
8780 }
8781 const int kIslandRegionsCount = fLoadCommandsSegment->fSize / kBetweenRegions;
8782 typedef std::map<TargetAndOffset,ObjectFile::Atom*, TargetAndOffsetComparor> AtomToIsland;
8783 AtomToIsland regionsMap[kIslandRegionsCount];
8784 std::vector<ObjectFile::Atom*> regionsIslands[kIslandRegionsCount];
8785 unsigned int islandCount = 0;
8786 if (log) fprintf(stderr, "ld: will use %u branch island regions\n", kIslandRegionsCount);
8787
8788 // create islands for branch references that are out of range
8789 for (std::vector<ObjectFile::Atom*>::iterator it=fAllAtoms->begin(); it != fAllAtoms->end(); it++) {
8790 ObjectFile::Atom* atom = *it;
8791 std::vector<ObjectFile::Reference*>& references = atom->getReferences();
8792 for (std::vector<ObjectFile::Reference*>::iterator rit=references.begin(); rit != references.end(); rit++) {
8793 ObjectFile::Reference* ref = *rit;
8794 if ( this->isBranchThatMightNeedIsland(ref->getKind()) ) {
8795 ObjectFile::Atom& target = ref->getTarget();
8796 int64_t srcAddr = atom->getAddress() + ref->getFixUpOffset();
8797 int64_t dstAddr = target.getAddress() + ref->getTargetOffset();
8798 int64_t displacement = dstAddr - srcAddr;
8799 TargetAndOffset finalTargetAndOffset = { &target, ref->getTargetOffset() };
8800 const int64_t kBranchLimit = kBetweenRegions;
8801 if ( displacement > kBranchLimit ) {
8802 // create forward branch chain
8803 ObjectFile::Atom* nextTarget = &target;
8804 for (int i=kIslandRegionsCount-1; i >=0 ; --i) {
8805 AtomToIsland* region = &regionsMap[i];
8806 int64_t islandRegionAddr = kBetweenRegions * (i+1) + textSection->getBaseAddress();
8807 if ( (srcAddr < islandRegionAddr) && (islandRegionAddr <= dstAddr) ) {
8808 AtomToIsland::iterator pos = region->find(finalTargetAndOffset);
8809 if ( pos == region->end() ) {
8810 BranchIslandAtom<A>* island = new BranchIslandAtom<A>(*this, target.getDisplayName(), i, *nextTarget, *finalTargetAndOffset.atom, finalTargetAndOffset.offset);
8811 island->setSection(textSection);
8812 (*region)[finalTargetAndOffset] = island;
8813 if (log) fprintf(stderr, "added island %s to region %d for %s\n", island->getDisplayName(), i, atom->getDisplayName());
8814 regionsIslands[i].push_back(island);
8815 ++islandCount;
8816 nextTarget = island;
8817 }
8818 else {
8819 nextTarget = pos->second;
8820 }
8821 }
8822 }
8823 if (log) fprintf(stderr, "using island %s for branch to %s from %s\n", nextTarget->getDisplayName(), target.getDisplayName(), atom->getDisplayName());
8824 ref->setTarget(*nextTarget, 0);
8825 }
8826 else if ( displacement < (-kBranchLimit) ) {
8827 // create back branching chain
8828 ObjectFile::Atom* prevTarget = &target;
8829 for (int i=0; i < kIslandRegionsCount ; ++i) {
8830 AtomToIsland* region = &regionsMap[i];
8831 int64_t islandRegionAddr = kBetweenRegions * (i+1);
8832 if ( (dstAddr <= islandRegionAddr) && (islandRegionAddr < srcAddr) ) {
8833 AtomToIsland::iterator pos = region->find(finalTargetAndOffset);
8834 if ( pos == region->end() ) {
8835 BranchIslandAtom<A>* island = new BranchIslandAtom<A>(*this, target.getDisplayName(), i, *prevTarget, *finalTargetAndOffset.atom, finalTargetAndOffset.offset);
8836 island->setSection(textSection);
8837 (*region)[finalTargetAndOffset] = island;
8838 if (log) fprintf(stderr, "added back island %s to region %d for %s\n", island->getDisplayName(), i, atom->getDisplayName());
8839 regionsIslands[i].push_back(island);
8840 ++islandCount;
8841 prevTarget = island;
8842 }
8843 else {
8844 prevTarget = pos->second;
8845 }
8846 }
8847 }
8848 if (log) fprintf(stderr, "using back island %s for %s\n", prevTarget->getDisplayName(), atom->getDisplayName());
8849 ref->setTarget(*prevTarget, 0);
8850 }
8851 }
8852 }
8853 }
8854
8855 // insert islands into __text section and adjust section offsets
8856 if ( islandCount > 0 ) {
8857 if ( log ) fprintf(stderr, "ld: %u branch islands required in %u regions\n", islandCount, kIslandRegionsCount);
8858 std::vector<ObjectFile::Atom*> newAtomList;
8859 newAtomList.reserve(textSection->fAtoms.size()+islandCount);
8860 uint64_t islandRegionAddr = kBetweenRegions + textSection->getBaseAddress();
8861 uint64_t textSectionAlignment = (1 << textSection->fAlignment);
8862 int regionIndex = 0;
8863 uint64_t atomSlide = 0;
8864 uint64_t sectionOffset = 0;
8865 for (std::vector<ObjectFile::Atom*>::iterator it=textSection->fAtoms.begin(); it != textSection->fAtoms.end(); it++) {
8866 ObjectFile::Atom* atom = *it;
8867 if ( (atom->getAddress()+atom->getSize()) > islandRegionAddr ) {
8868 uint64_t islandStartOffset = atom->getSectionOffset() + atomSlide;
8869 sectionOffset = islandStartOffset;
8870 std::vector<ObjectFile::Atom*>* regionIslands = &regionsIslands[regionIndex];
8871 for (std::vector<ObjectFile::Atom*>::iterator rit=regionIslands->begin(); rit != regionIslands->end(); rit++) {
8872 ObjectFile::Atom* islandAtom = *rit;
8873 newAtomList.push_back(islandAtom);
8874 uint64_t alignment = 1 << (islandAtom->getAlignment().powerOf2);
8875 sectionOffset = ( (sectionOffset+alignment-1) & (-alignment) );
8876 islandAtom->setSectionOffset(sectionOffset);
8877 if ( log ) fprintf(stderr, "assigning __text offset 0x%08llx to %s\n", sectionOffset, islandAtom->getDisplayName());
8878 sectionOffset += islandAtom->getSize();
8879 }
8880 ++regionIndex;
8881 islandRegionAddr += kBetweenRegions;
8882 uint64_t islandRegionAlignmentBlocks = (sectionOffset - islandStartOffset + textSectionAlignment - 1) / textSectionAlignment;
8883 atomSlide += (islandRegionAlignmentBlocks * textSectionAlignment);
8884 }
8885 newAtomList.push_back(atom);
8886 if ( atomSlide != 0 )
8887 atom->setSectionOffset(atom->getSectionOffset()+atomSlide);
8888 }
8889 sectionOffset = textSection->fSize+atomSlide;
8890 // put any remaining islands at end of __text section
8891 if ( regionIndex < kIslandRegionsCount ) {
8892 std::vector<ObjectFile::Atom*>* regionIslands = &regionsIslands[regionIndex];
8893 for (std::vector<ObjectFile::Atom*>::iterator rit=regionIslands->begin(); rit != regionIslands->end(); rit++) {
8894 ObjectFile::Atom* islandAtom = *rit;
8895 newAtomList.push_back(islandAtom);
8896 uint64_t alignment = 1 << (islandAtom->getAlignment().powerOf2);
8897 sectionOffset = ( (sectionOffset+alignment-1) & (-alignment) );
8898 islandAtom->setSectionOffset(sectionOffset);
8899 if ( log ) fprintf(stderr, "assigning __text offset 0x%08llx to %s\n", sectionOffset, islandAtom->getDisplayName());
8900 sectionOffset += islandAtom->getSize();
8901 }
8902 }
8903
8904 textSection->fAtoms = newAtomList;
8905 textSection->fSize = sectionOffset;
8906 result = true;
8907 }
8908
8909 }
8910 return result;
8911 }
8912
8913
8914 template <typename A>
8915 void Writer<A>::adjustLoadCommandsAndPadding()
8916 {
8917 fSegmentCommands->computeSize();
8918
8919 // recompute load command section offsets
8920 uint64_t offset = 0;
8921 std::vector<class ObjectFile::Atom*>& loadCommandAtoms = fLoadCommandsSection->fAtoms;
8922 const unsigned int atomCount = loadCommandAtoms.size();
8923 for (unsigned int i=0; i < atomCount; ++i) {
8924 ObjectFile::Atom* atom = loadCommandAtoms[i];
8925 uint64_t alignment = 1 << atom->getAlignment().powerOf2;
8926 offset = ( (offset+alignment-1) & (-alignment) );
8927 atom->setSectionOffset(offset);
8928 uint32_t atomSize = atom->getSize();
8929 if ( atomSize > fLargestAtomSize )
8930 fLargestAtomSize = atomSize;
8931 offset += atomSize;
8932 fLoadCommandsSection->fSize = offset;
8933 }
8934 const uint32_t sizeOfLoadCommandsPlusHeader = offset + sizeof(macho_header<typename A::P>);
8935
8936 std::vector<SectionInfo*>& sectionInfos = fLoadCommandsSegment->fSections;
8937 const int sectionCount = sectionInfos.size();
8938 uint32_t totalSizeOfTEXTLessHeaderAndLoadCommands = 0;
8939 for(int j=0; j < sectionCount; ++j) {
8940 SectionInfo* curSection = sectionInfos[j];
8941 if ( strcmp(curSection->fSectionName, fHeaderPadding->getSectionName()) == 0 )
8942 break;
8943 totalSizeOfTEXTLessHeaderAndLoadCommands += curSection->fSize;
8944 }
8945 uint64_t paddingSize = 0;
8946 if ( fOptions.outputKind() == Options::kDyld ) {
8947 // dyld itself has special padding requirements. We want the beginning __text section to start at a stable address
8948 paddingSize = 4096 - (totalSizeOfTEXTLessHeaderAndLoadCommands % 4096);
8949 }
8950 else if ( fOptions.outputKind() == Options::kObjectFile ) {
8951 // mach-o .o files need no padding between load commands and first section
8952 // but leave enough room that the object file could be signed
8953 paddingSize = 32;
8954 }
8955 else if ( fOptions.outputKind() == Options::kPreload ) {
8956 // mach-o MH_PRELOAD files need no padding between load commands and first section
8957 paddingSize = 0;
8958 }
8959 else {
8960 // work backwards from end of segment and lay out sections so that extra room goes to padding atom
8961 uint64_t addr = 0;
8962 for(int j=sectionCount-1; j >=0; --j) {
8963 SectionInfo* curSection = sectionInfos[j];
8964 if ( strcmp(curSection->fSectionName, fHeaderPadding->getSectionName()) == 0 ) {
8965 addr -= (fLoadCommandsSection->fSize+fMachHeaderAtom->getSize());
8966 paddingSize = addr % fOptions.segmentAlignment();
8967 break;
8968 }
8969 addr -= curSection->fSize;
8970 addr = addr & (0 - (1 << curSection->fAlignment));
8971 }
8972
8973 // if command line requires more padding than this
8974 uint32_t minPad = fOptions.minimumHeaderPad();
8975 if ( fOptions.maxMminimumHeaderPad() ) {
8976 // -headerpad_max_install_names means there should be room for every path load command to grow to 1204 bytes
8977 uint32_t altMin = fLibraryToOrdinal.size() * MAXPATHLEN;
8978 if ( fOptions.outputKind() == Options::kDynamicLibrary )
8979 altMin += MAXPATHLEN;
8980 if ( altMin > minPad )
8981 minPad = altMin;
8982 }
8983 if ( paddingSize < minPad ) {
8984 int extraPages = (minPad - paddingSize + fOptions.segmentAlignment() - 1)/fOptions.segmentAlignment();
8985 paddingSize += extraPages * fOptions.segmentAlignment();
8986 }
8987
8988 if ( fOptions.makeEncryptable() ) {
8989 // load commands must be on a separate non-encrypted page
8990 int loadCommandsPage = (sizeOfLoadCommandsPlusHeader + minPad)/fOptions.segmentAlignment();
8991 int textPage = (sizeOfLoadCommandsPlusHeader + paddingSize)/fOptions.segmentAlignment();
8992 if ( loadCommandsPage == textPage ) {
8993 paddingSize += fOptions.segmentAlignment();
8994 textPage += 1;
8995 }
8996
8997 //paddingSize = 4096 - ((totalSizeOfTEXTLessHeaderAndLoadCommands+fOptions.minimumHeaderPad()) % 4096) + fOptions.minimumHeaderPad();
8998 fEncryptionLoadCommand->setStartEncryptionOffset(textPage*fOptions.segmentAlignment());
8999 }
9000 }
9001
9002 // adjust atom size and update section size
9003 fHeaderPadding->setSize(paddingSize);
9004 for(int j=0; j < sectionCount; ++j) {
9005 SectionInfo* curSection = sectionInfos[j];
9006 if ( strcmp(curSection->fSectionName, fHeaderPadding->getSectionName()) == 0 )
9007 curSection->fSize = paddingSize;
9008 }
9009 }
9010
9011 static uint64_t segmentAlign(uint64_t addr, uint64_t alignment)
9012 {
9013 return ((addr+alignment-1) & (-alignment));
9014 }
9015
9016 // assign file offsets and logical address to all segments
9017 template <typename A>
9018 void Writer<A>::assignFileOffsets()
9019 {
9020 const bool virtualSectionOccupyAddressSpace = ((fOptions.outputKind() != Options::kObjectFile)
9021 && (fOptions.outputKind() != Options::kPreload));
9022 bool haveFixedSegments = false;
9023 uint64_t fileOffset = 0;
9024 uint64_t nextContiguousAddress = fOptions.baseAddress();
9025 uint64_t nextReadOnlyAddress = fOptions.baseAddress();
9026 uint64_t nextWritableAddress = fOptions.baseWritableAddress();
9027
9028 // process segments with fixed addresses (-segaddr)
9029 for (std::vector<Options::SegmentStart>::iterator it = fOptions.customSegmentAddresses().begin(); it != fOptions.customSegmentAddresses().end(); ++it) {
9030 for (std::vector<SegmentInfo*>::iterator segit = fSegmentInfos.begin(); segit != fSegmentInfos.end(); ++segit) {
9031 SegmentInfo* curSegment = *segit;
9032 if ( strcmp(curSegment->fName, it->name) == 0 ) {
9033 curSegment->fBaseAddress = it->address;
9034 curSegment->fFixedAddress = true;
9035 break;
9036 }
9037 }
9038 }
9039
9040 // process segments with fixed addresses (-seg_page_size)
9041 for (std::vector<Options::SegmentSize>::iterator it = fOptions.customSegmentSizes().begin(); it != fOptions.customSegmentSizes().end(); ++it) {
9042 for (std::vector<SegmentInfo*>::iterator segit = fSegmentInfos.begin(); segit != fSegmentInfos.end(); ++segit) {
9043 SegmentInfo* curSegment = *segit;
9044 if ( strcmp(curSegment->fName, it->name) == 0 ) {
9045 curSegment->fPageSize = it->size;
9046 break;
9047 }
9048 }
9049 }
9050
9051 // Run through the segments and each segment's sections to assign addresses
9052 for (std::vector<SegmentInfo*>::iterator segit = fSegmentInfos.begin(); segit != fSegmentInfos.end(); ++segit) {
9053 SegmentInfo* curSegment = *segit;
9054
9055 if ( fOptions.splitSeg() ) {
9056 if ( curSegment->fInitProtection & VM_PROT_WRITE )
9057 nextContiguousAddress = nextWritableAddress;
9058 else
9059 nextContiguousAddress = nextReadOnlyAddress;
9060 }
9061
9062 if ( fOptions.outputKind() == Options::kPreload ) {
9063 if ( strcmp(curSegment->fName, "__HEADER") == 0 )
9064 nextContiguousAddress = 0;
9065 else if ( strcmp(curSegment->fName, "__TEXT") == 0 )
9066 nextContiguousAddress = fOptions.baseAddress();
9067 }
9068
9069 fileOffset = segmentAlign(fileOffset, curSegment->fPageSize);
9070 curSegment->fFileOffset = fileOffset;
9071
9072 // Set the segment base address
9073 if ( curSegment->fFixedAddress )
9074 haveFixedSegments = true;
9075 else
9076 curSegment->fBaseAddress = segmentAlign(nextContiguousAddress, curSegment->fPageSize);
9077
9078 // We've set the segment address, now run through each section.
9079 uint64_t address = curSegment->fBaseAddress;
9080 SectionInfo* firstZeroFillSection = NULL;
9081 SectionInfo* prevSection = NULL;
9082
9083 std::vector<SectionInfo*>& sectionInfos = curSegment->fSections;
9084
9085 for (std::vector<SectionInfo*>::iterator it = sectionInfos.begin(); it != sectionInfos.end(); ++it) {
9086 SectionInfo* curSection = *it;
9087
9088 // adjust section address based on alignment
9089 uint64_t alignment = 1 << curSection->fAlignment;
9090 if ( curSection->fAtoms.size() == 1 ) {
9091 // if there is only one atom in section, use modulus for even better layout
9092 ObjectFile::Alignment atomAlign = curSection->fAtoms[0]->getAlignment();
9093 uint64_t atomAlignP2 = (1 << atomAlign.powerOf2);
9094 uint64_t currentModulus = (address % atomAlignP2);
9095 if ( currentModulus != atomAlign.modulus ) {
9096 if ( atomAlign.modulus > currentModulus )
9097 address += atomAlign.modulus-currentModulus;
9098 else
9099 address += atomAlign.modulus+atomAlignP2-currentModulus;
9100 }
9101 }
9102 else {
9103 address = ( (address+alignment-1) & (-alignment) );
9104 }
9105 // adjust file offset to match address
9106 if ( prevSection != NULL ) {
9107 if ( virtualSectionOccupyAddressSpace || !prevSection->fVirtualSection )
9108 fileOffset = (address - prevSection->getBaseAddress()) + prevSection->fFileOffset;
9109 else
9110 fileOffset = ( (fileOffset+alignment-1) & (-alignment) );
9111 }
9112
9113 // update section info
9114 curSection->fFileOffset = fileOffset;
9115 curSection->setBaseAddress(address);
9116 //fprintf(stderr, "%s %s addr=0x%llX, fileoffset=0x%llX, size=0x%llX\n", curSegment->fName, curSection->fSectionName, address, fileOffset, curSection->fSize);
9117
9118 // keep track of trailing zero fill sections
9119 if ( curSection->fAllZeroFill && (firstZeroFillSection == NULL) )
9120 firstZeroFillSection = curSection;
9121 if ( !curSection->fAllZeroFill && (firstZeroFillSection != NULL) && (fOptions.outputKind() != Options::kObjectFile) )
9122 throwf("zero-fill section %s not at end of segment", curSection->fSectionName);
9123
9124 // update running pointers
9125 if ( virtualSectionOccupyAddressSpace || !curSection->fVirtualSection )
9126 address += curSection->fSize;
9127 fileOffset += curSection->fSize;
9128
9129 // sanity check size of 32-bit binaries
9130 if ( address > maxAddress() )
9131 throwf("section %s exceeds 4GB limit", curSection->fSectionName);
9132
9133 // update segment info
9134 curSegment->fFileSize = fileOffset - curSegment->fFileOffset;
9135 curSegment->fSize = curSegment->fFileSize;
9136 prevSection = curSection;
9137 }
9138
9139 if ( fOptions.outputKind() == Options::kObjectFile ) {
9140 // don't page align .o files
9141 }
9142 else {
9143 // optimize trailing zero-fill sections to not occupy disk space
9144 if ( firstZeroFillSection != NULL ) {
9145 curSegment->fFileSize = firstZeroFillSection->fFileOffset - curSegment->fFileOffset;
9146 fileOffset = firstZeroFillSection->fFileOffset;
9147 }
9148 // page align segment size
9149 curSegment->fFileSize = segmentAlign(curSegment->fFileSize, curSegment->fPageSize);
9150 curSegment->fSize = segmentAlign(curSegment->fSize, curSegment->fPageSize);
9151 if ( !curSegment->fIndependentAddress && (curSegment->fBaseAddress >= nextContiguousAddress) ) {
9152 nextContiguousAddress = segmentAlign(curSegment->fBaseAddress+curSegment->fSize, curSegment->fPageSize);
9153 fileOffset = segmentAlign(fileOffset, curSegment->fPageSize);
9154 if ( curSegment->fInitProtection & VM_PROT_WRITE )
9155 nextWritableAddress = nextContiguousAddress;
9156 else
9157 nextReadOnlyAddress = nextContiguousAddress;
9158 }
9159 }
9160 //fprintf(stderr, "end of seg %s, fileoffset=0x%llX, nextContiguousAddress=0x%llX\n", curSegment->fName, fileOffset, nextContiguousAddress);
9161 }
9162
9163 // check for segment overlaps caused by user specified fixed segments (e.g. __PAGEZERO, __UNIXSTACK)
9164 if ( haveFixedSegments ) {
9165 int segCount = fSegmentInfos.size();
9166 for(int i=0; i < segCount; ++i) {
9167 SegmentInfo* segment1 = fSegmentInfos[i];
9168
9169 for(int j=0; j < segCount; ++j) {
9170 if ( i != j ) {
9171 SegmentInfo* segment2 = fSegmentInfos[j];
9172
9173 if ( segment1->fBaseAddress < segment2->fBaseAddress ) {
9174 if ( (segment1->fBaseAddress+segment1->fSize) > segment2->fBaseAddress )
9175 throwf("segments overlap: %s (0x%08llX + 0x%08llX) and %s (0x%08llX + 0x%08llX)",
9176 segment1->fName, segment1->fBaseAddress, segment1->fSize, segment2->fName, segment2->fBaseAddress, segment2->fSize);
9177 }
9178 else if ( segment1->fBaseAddress > segment2->fBaseAddress ) {
9179 if ( (segment2->fBaseAddress+segment2->fSize) > segment1->fBaseAddress )
9180 throwf("segments overlap: %s (0x%08llX + 0x%08llX) and %s (0x%08llX + 0x%08llX)",
9181 segment1->fName, segment1->fBaseAddress, segment1->fSize, segment2->fName, segment2->fBaseAddress, segment2->fSize);
9182 }
9183 else if ( (segment1->fSize != 0) && (segment2->fSize != 0) ) {
9184 throwf("segments overlap: %s (0x%08llX + 0x%08llX) and %s (0x%08llX + 0x%08llX)",
9185 segment1->fName, segment1->fBaseAddress, segment1->fSize, segment2->fName, segment2->fBaseAddress, segment2->fSize);
9186 }
9187 }
9188 }
9189 }
9190 }
9191
9192 // set up fFirstWritableSegment and fWritableSegmentPastFirst4GB
9193 for (std::vector<SegmentInfo*>::iterator segit = fSegmentInfos.begin(); segit != fSegmentInfos.end(); ++segit) {
9194 SegmentInfo* curSegment = *segit;
9195 if ( (curSegment->fInitProtection & VM_PROT_WRITE) != 0 ) {
9196 if ( fFirstWritableSegment == NULL )
9197 fFirstWritableSegment = curSegment;
9198 if ( (curSegment->fBaseAddress + curSegment->fSize - fOptions.baseAddress()) >= 0x100000000LL )
9199 fWritableSegmentPastFirst4GB = true;
9200 }
9201 }
9202
9203 // record size of encrypted part of __TEXT segment
9204 if ( fOptions.makeEncryptable() ) {
9205 for (std::vector<SegmentInfo*>::iterator segit = fSegmentInfos.begin(); segit != fSegmentInfos.end(); ++segit) {
9206 SegmentInfo* curSegment = *segit;
9207 if ( strcmp(curSegment->fName, "__TEXT") == 0 ) {
9208 fEncryptionLoadCommand->setEndEncryptionOffset(curSegment->fFileSize);
9209 break;
9210 }
9211 }
9212 }
9213
9214 }
9215
9216 template <typename A>
9217 void Writer<A>::adjustLinkEditSections()
9218 {
9219 // link edit content is always in last segment
9220 SegmentInfo* lastSeg = fSegmentInfos[fSegmentInfos.size()-1];
9221 unsigned int firstLinkEditSectionIndex = 0;
9222 while ( strcmp(lastSeg->fSections[firstLinkEditSectionIndex]->fSegmentName, "__LINKEDIT") != 0 )
9223 ++firstLinkEditSectionIndex;
9224
9225 const unsigned int linkEditSectionCount = lastSeg->fSections.size();
9226 uint64_t fileOffset = lastSeg->fSections[firstLinkEditSectionIndex]->fFileOffset;
9227 uint64_t address = lastSeg->fSections[firstLinkEditSectionIndex]->getBaseAddress();
9228 if ( fPadSegmentInfo != NULL ) {
9229 // insert __4GBFILL segment into segments vector before LINKEDIT
9230 for(std::vector<SegmentInfo*>::iterator it = fSegmentInfos.begin(); it != fSegmentInfos.end(); ++it) {
9231 if ( *it == lastSeg ) {
9232 fSegmentInfos.insert(it, fPadSegmentInfo);
9233 break;
9234 }
9235 }
9236 // adjust __4GBFILL segment to span from end of last segment to zeroPageSize
9237 fPadSegmentInfo->fSize = fOptions.zeroPageSize() - address;
9238 fPadSegmentInfo->fBaseAddress = address;
9239 // adjust LINKEDIT to start at zeroPageSize
9240 address = fOptions.zeroPageSize();
9241 lastSeg->fBaseAddress = fOptions.zeroPageSize();
9242 }
9243 for (unsigned int i=firstLinkEditSectionIndex; i < linkEditSectionCount; ++i) {
9244 std::vector<class ObjectFile::Atom*>& atoms = lastSeg->fSections[i]->fAtoms;
9245 // adjust section address based on alignment
9246 uint64_t sectionAlignment = 1 << lastSeg->fSections[i]->fAlignment;
9247 uint64_t pad = ((address+sectionAlignment-1) & (-sectionAlignment)) - address;
9248 address += pad;
9249 fileOffset += pad; // adjust file offset to match address
9250 lastSeg->fSections[i]->setBaseAddress(address);
9251 if ( strcmp(lastSeg->fSections[i]->fSectionName, "._absolute") == 0 )
9252 lastSeg->fSections[i]->setBaseAddress(0);
9253 lastSeg->fSections[i]->fFileOffset = fileOffset;
9254 uint64_t sectionOffset = 0;
9255 for (unsigned int j=0; j < atoms.size(); ++j) {
9256 ObjectFile::Atom* atom = atoms[j];
9257 uint64_t alignment = 1 << atom->getAlignment().powerOf2;
9258 sectionOffset = ( (sectionOffset+alignment-1) & (-alignment) );
9259 atom->setSectionOffset(sectionOffset);
9260 uint64_t size = atom->getSize();
9261 sectionOffset += size;
9262 if ( size > fLargestAtomSize )
9263 fLargestAtomSize = size;
9264 }
9265 //fprintf(stderr, "setting: lastSeg->fSections[%d]->fSize = 0x%08llX\n", i, sectionOffset);
9266 lastSeg->fSections[i]->fSize = sectionOffset;
9267 fileOffset += sectionOffset;
9268 address += sectionOffset;
9269 }
9270 if ( fOptions.outputKind() == Options::kObjectFile ) {
9271 //lastSeg->fBaseAddress = 0;
9272 //lastSeg->fSize = lastSeg->fSections[firstLinkEditSectionIndex]->
9273 //lastSeg->fFileOffset = 0;
9274 //lastSeg->fFileSize =
9275 }
9276 else {
9277 lastSeg->fFileSize = fileOffset - lastSeg->fFileOffset;
9278 lastSeg->fSize = (address - lastSeg->fBaseAddress+4095) & (-4096);
9279 }
9280 }
9281
9282
9283 template <typename A>
9284 ObjectFile::Atom::Scope MachHeaderAtom<A>::getScope() const
9285 {
9286 switch ( fWriter.fOptions.outputKind() ) {
9287 case Options::kDynamicExecutable:
9288 case Options::kStaticExecutable:
9289 return ObjectFile::Atom::scopeGlobal;
9290 case Options::kDynamicLibrary:
9291 case Options::kDynamicBundle:
9292 case Options::kDyld:
9293 case Options::kObjectFile:
9294 case Options::kPreload:
9295 case Options::kKextBundle:
9296 return ObjectFile::Atom::scopeLinkageUnit;
9297 }
9298 throw "unknown header type";
9299 }
9300
9301 template <typename A>
9302 ObjectFile::Atom::SymbolTableInclusion MachHeaderAtom<A>::getSymbolTableInclusion() const
9303 {
9304 switch ( fWriter.fOptions.outputKind() ) {
9305 case Options::kDynamicExecutable:
9306 return ObjectFile::Atom::kSymbolTableInAndNeverStrip;
9307 case Options::kStaticExecutable:
9308 return ObjectFile::Atom::kSymbolTableInAsAbsolute;
9309 case Options::kDynamicLibrary:
9310 case Options::kDynamicBundle:
9311 case Options::kDyld:
9312 return ObjectFile::Atom::kSymbolTableIn;
9313 case Options::kObjectFile:
9314 case Options::kPreload:
9315 case Options::kKextBundle:
9316 return ObjectFile::Atom::kSymbolTableNotIn;
9317 }
9318 throw "unknown header type";
9319 }
9320
9321 template <typename A>
9322 const char* MachHeaderAtom<A>::getName() const
9323 {
9324 switch ( fWriter.fOptions.outputKind() ) {
9325 case Options::kDynamicExecutable:
9326 case Options::kStaticExecutable:
9327 return "__mh_execute_header";
9328 case Options::kDynamicLibrary:
9329 return "__mh_dylib_header";
9330 case Options::kDynamicBundle:
9331 return "__mh_bundle_header";
9332 case Options::kObjectFile:
9333 case Options::kPreload:
9334 case Options::kKextBundle:
9335 return NULL;
9336 case Options::kDyld:
9337 return "__mh_dylinker_header";
9338 }
9339 throw "unknown header type";
9340 }
9341
9342 template <typename A>
9343 const char* MachHeaderAtom<A>::getDisplayName() const
9344 {
9345 switch ( fWriter.fOptions.outputKind() ) {
9346 case Options::kDynamicExecutable:
9347 case Options::kStaticExecutable:
9348 case Options::kDynamicLibrary:
9349 case Options::kDynamicBundle:
9350 case Options::kDyld:
9351 return this->getName();
9352 case Options::kObjectFile:
9353 case Options::kPreload:
9354 case Options::kKextBundle:
9355 return "mach header";
9356 }
9357 throw "unknown header type";
9358 }
9359
9360 template <typename A>
9361 void MachHeaderAtom<A>::copyRawContent(uint8_t buffer[]) const
9362 {
9363 // get file type
9364 uint32_t fileType = 0;
9365 switch ( fWriter.fOptions.outputKind() ) {
9366 case Options::kDynamicExecutable:
9367 case Options::kStaticExecutable:
9368 fileType = MH_EXECUTE;
9369 break;
9370 case Options::kDynamicLibrary:
9371 fileType = MH_DYLIB;
9372 break;
9373 case Options::kDynamicBundle:
9374 fileType = MH_BUNDLE;
9375 break;
9376 case Options::kObjectFile:
9377 fileType = MH_OBJECT;
9378 break;
9379 case Options::kDyld:
9380 fileType = MH_DYLINKER;
9381 break;
9382 case Options::kPreload:
9383 fileType = MH_PRELOAD;
9384 break;
9385 case Options::kKextBundle:
9386 fileType = MH_KEXT_BUNDLE;
9387 break;
9388 }
9389
9390 // get flags
9391 uint32_t flags = 0;
9392 if ( fWriter.fOptions.outputKind() == Options::kObjectFile ) {
9393 if ( fWriter.fCanScatter )
9394 flags = MH_SUBSECTIONS_VIA_SYMBOLS;
9395 }
9396 else {
9397 if ( fWriter.fOptions.outputKind() == Options::kStaticExecutable ) {
9398 flags |= MH_NOUNDEFS;
9399 }
9400 else if ( fWriter.fOptions.outputKind() == Options::kPreload ) {
9401 flags |= MH_NOUNDEFS;
9402 if ( fWriter.fOptions.positionIndependentExecutable() )
9403 flags |= MH_PIE;
9404 }
9405 else {
9406 flags = MH_DYLDLINK;
9407 if ( fWriter.fOptions.bindAtLoad() )
9408 flags |= MH_BINDATLOAD;
9409 switch ( fWriter.fOptions.nameSpace() ) {
9410 case Options::kTwoLevelNameSpace:
9411 flags |= MH_TWOLEVEL | MH_NOUNDEFS;
9412 break;
9413 case Options::kFlatNameSpace:
9414 break;
9415 case Options::kForceFlatNameSpace:
9416 flags |= MH_FORCE_FLAT;
9417 break;
9418 }
9419 bool hasWeakDefines = fWriter.fHasWeakExports;
9420 if ( fWriter.fRegularDefAtomsThatOverrideADylibsWeakDef->size() != 0 ) {
9421 for(std::set<const ObjectFile::Atom*>::iterator it = fWriter.fRegularDefAtomsThatOverrideADylibsWeakDef->begin();
9422 it != fWriter.fRegularDefAtomsThatOverrideADylibsWeakDef->end(); ++it) {
9423 if ( fWriter.shouldExport(**it) ) {
9424 hasWeakDefines = true;
9425 break;
9426 }
9427 }
9428 }
9429 if ( hasWeakDefines )
9430 flags |= MH_WEAK_DEFINES;
9431 if ( fWriter.fReferencesWeakImports || fWriter.fHasWeakExports )
9432 flags |= MH_BINDS_TO_WEAK;
9433 if ( fWriter.fOptions.prebind() )
9434 flags |= MH_PREBOUND;
9435 if ( fWriter.fOptions.splitSeg() )
9436 flags |= MH_SPLIT_SEGS;
9437 if ( (fWriter.fOptions.outputKind() == Options::kDynamicLibrary) && fWriter.fNoReExportedDylibs )
9438 flags |= MH_NO_REEXPORTED_DYLIBS;
9439 if ( fWriter.fOptions.positionIndependentExecutable() )
9440 flags |= MH_PIE;
9441 if ( fWriter.fOptions.markAutoDeadStripDylib() )
9442 flags |= MH_DEAD_STRIPPABLE_DYLIB;
9443 }
9444 if ( fWriter.fOptions.hasExecutableStack() )
9445 flags |= MH_ALLOW_STACK_EXECUTION;
9446 if ( fWriter.fOptions.readerOptions().fRootSafe )
9447 flags |= MH_ROOT_SAFE;
9448 if ( fWriter.fOptions.readerOptions().fSetuidSafe )
9449 flags |= MH_SETUID_SAFE;
9450 }
9451
9452 // get commands info
9453 uint32_t commandsSize = 0;
9454 uint32_t commandsCount = 0;
9455
9456 std::vector<class ObjectFile::Atom*>& loadCommandAtoms = fWriter.fLoadCommandsSection->fAtoms;
9457 for (std::vector<ObjectFile::Atom*>::iterator it=loadCommandAtoms.begin(); it != loadCommandAtoms.end(); it++) {
9458 ObjectFile::Atom* atom = *it;
9459 commandsSize += atom->getSize();
9460 // segment and symbol table atoms can contain more than one load command
9461 if ( atom == fWriter.fSegmentCommands )
9462 commandsCount += fWriter.fSegmentCommands->commandCount();
9463 else if ( atom == fWriter.fSymbolTableCommands )
9464 commandsCount += fWriter.fSymbolTableCommands->commandCount();
9465 else if ( atom->getSize() != 0 )
9466 ++commandsCount;
9467 }
9468
9469 // fill out mach_header
9470 macho_header<typename A::P>* mh = (macho_header<typename A::P>*)buffer;
9471 setHeaderInfo(*mh);
9472 mh->set_filetype(fileType);
9473 mh->set_ncmds(commandsCount);
9474 mh->set_sizeofcmds(commandsSize);
9475 mh->set_flags(flags);
9476 }
9477
9478 template <>
9479 void MachHeaderAtom<ppc>::setHeaderInfo(macho_header<ppc::P>& header) const
9480 {
9481 header.set_magic(MH_MAGIC);
9482 header.set_cputype(CPU_TYPE_POWERPC);
9483 header.set_cpusubtype(fWriter.fCpuConstraint);
9484 }
9485
9486 template <>
9487 void MachHeaderAtom<ppc64>::setHeaderInfo(macho_header<ppc64::P>& header) const
9488 {
9489 header.set_magic(MH_MAGIC_64);
9490 header.set_cputype(CPU_TYPE_POWERPC64);
9491 if ( (fWriter.fOptions.outputKind() == Options::kDynamicExecutable) && (fWriter.fOptions.macosxVersionMin() >= ObjectFile::ReaderOptions::k10_5) )
9492 header.set_cpusubtype(CPU_SUBTYPE_POWERPC_ALL | 0x80000000);
9493 else
9494 header.set_cpusubtype(CPU_SUBTYPE_POWERPC_ALL);
9495 header.set_reserved(0);
9496 }
9497
9498 template <>
9499 void MachHeaderAtom<x86>::setHeaderInfo(macho_header<x86::P>& header) const
9500 {
9501 header.set_magic(MH_MAGIC);
9502 header.set_cputype(CPU_TYPE_I386);
9503 header.set_cpusubtype(CPU_SUBTYPE_I386_ALL);
9504 }
9505
9506 template <>
9507 void MachHeaderAtom<x86_64>::setHeaderInfo(macho_header<x86_64::P>& header) const
9508 {
9509 header.set_magic(MH_MAGIC_64);
9510 header.set_cputype(CPU_TYPE_X86_64);
9511 if ( (fWriter.fOptions.outputKind() == Options::kDynamicExecutable) && (fWriter.fOptions.macosxVersionMin() >= ObjectFile::ReaderOptions::k10_5) )
9512 header.set_cpusubtype(CPU_SUBTYPE_X86_64_ALL | 0x80000000);
9513 else
9514 header.set_cpusubtype(CPU_SUBTYPE_X86_64_ALL);
9515 header.set_reserved(0);
9516 }
9517
9518 template <>
9519 void MachHeaderAtom<arm>::setHeaderInfo(macho_header<arm::P>& header) const
9520 {
9521 header.set_magic(MH_MAGIC);
9522 header.set_cputype(CPU_TYPE_ARM);
9523 header.set_cpusubtype(fWriter.fCpuConstraint);
9524 }
9525
9526 template <typename A>
9527 CustomStackAtom<A>::CustomStackAtom(Writer<A>& writer)
9528 : WriterAtom<A>(writer, Segment::fgStackSegment)
9529 {
9530 if ( stackGrowsDown() )
9531 Segment::fgStackSegment.setBaseAddress(writer.fOptions.customStackAddr() - writer.fOptions.customStackSize());
9532 else
9533 Segment::fgStackSegment.setBaseAddress(writer.fOptions.customStackAddr());
9534 }
9535
9536
9537 template <> bool CustomStackAtom<ppc>::stackGrowsDown() { return true; }
9538 template <> bool CustomStackAtom<ppc64>::stackGrowsDown() { return true; }
9539 template <> bool CustomStackAtom<x86>::stackGrowsDown() { return true; }
9540 template <> bool CustomStackAtom<x86_64>::stackGrowsDown() { return true; }
9541 template <> bool CustomStackAtom<arm>::stackGrowsDown() { return true; }
9542
9543 template <typename A>
9544 void SegmentLoadCommandsAtom<A>::computeSize()
9545 {
9546 uint64_t size = 0;
9547 std::vector<SegmentInfo*>& segmentInfos = fWriter.fSegmentInfos;
9548 int segCount = 0;
9549 for(std::vector<SegmentInfo*>::iterator it = segmentInfos.begin(); it != segmentInfos.end(); ++it) {
9550 SegmentInfo* seg = *it;
9551 if ( seg->fHasLoadCommand ) {
9552 ++segCount;
9553 size += sizeof(macho_segment_command<P>);
9554 std::vector<SectionInfo*>& sectionInfos = seg->fSections;
9555 const int sectionCount = sectionInfos.size();
9556 for(int j=0; j < sectionCount; ++j) {
9557 if ( fWriter.fEmitVirtualSections || ! sectionInfos[j]->fVirtualSection )
9558 size += sizeof(macho_section<P>);
9559 }
9560 }
9561 }
9562 fSize = size;
9563 fCommandCount = segCount;
9564 if ( fWriter.fPadSegmentInfo != NULL ) {
9565 ++fCommandCount;
9566 fSize += sizeof(macho_segment_command<P>);
9567 }
9568 }
9569
9570 template <>
9571 uint64_t LoadCommandAtom<ppc>::alignedSize(uint64_t size)
9572 {
9573 return ((size+3) & (-4)); // 4-byte align all load commands for 32-bit mach-o
9574 }
9575
9576 template <>
9577 uint64_t LoadCommandAtom<ppc64>::alignedSize(uint64_t size)
9578 {
9579 return ((size+7) & (-8)); // 8-byte align all load commands for 64-bit mach-o
9580 }
9581
9582 template <>
9583 uint64_t LoadCommandAtom<x86>::alignedSize(uint64_t size)
9584 {
9585 return ((size+3) & (-4)); // 4-byte align all load commands for 32-bit mach-o
9586 }
9587
9588 template <>
9589 uint64_t LoadCommandAtom<x86_64>::alignedSize(uint64_t size)
9590 {
9591 return ((size+7) & (-8)); // 8-byte align all load commands for 64-bit mach-o
9592 }
9593
9594 template <>
9595 uint64_t LoadCommandAtom<arm>::alignedSize(uint64_t size)
9596 {
9597 return ((size+3) & (-4)); // 4-byte align all load commands for 32-bit mach-o
9598 }
9599
9600 template <typename A>
9601 void SegmentLoadCommandsAtom<A>::copyRawContent(uint8_t buffer[]) const
9602 {
9603 uint64_t size = this->getSize();
9604 const bool oneSegment =( fWriter.fOptions.outputKind() == Options::kObjectFile );
9605 bzero(buffer, size);
9606 uint8_t* p = buffer;
9607 typename std::vector<SegmentInfo*>& segmentInfos = fWriter.fSegmentInfos;
9608 for(std::vector<SegmentInfo*>::iterator it = segmentInfos.begin(); it != segmentInfos.end(); ++it) {
9609 SegmentInfo* segInfo = *it;
9610 if ( ! segInfo->fHasLoadCommand )
9611 continue;
9612 const int sectionCount = segInfo->fSections.size();
9613 macho_segment_command<P>* cmd = (macho_segment_command<P>*)p;
9614 cmd->set_cmd(macho_segment_command<P>::CMD);
9615 cmd->set_segname(segInfo->fName);
9616 cmd->set_vmaddr(segInfo->fBaseAddress);
9617 cmd->set_vmsize(oneSegment ? 0 : segInfo->fSize);
9618 cmd->set_fileoff(segInfo->fFileOffset);
9619 cmd->set_filesize(oneSegment ? 0 : segInfo->fFileSize);
9620 cmd->set_maxprot(segInfo->fMaxProtection);
9621 cmd->set_initprot(segInfo->fInitProtection);
9622 // add sections array
9623 macho_section<P>* const sections = (macho_section<P>*)&p[sizeof(macho_segment_command<P>)];
9624 unsigned int sectionsEmitted = 0;
9625 for (int j=0; j < sectionCount; ++j) {
9626 SectionInfo* sectInfo = segInfo->fSections[j];
9627 if ( fWriter.fEmitVirtualSections || !sectInfo->fVirtualSection ) {
9628 macho_section<P>* sect = &sections[sectionsEmitted++];
9629 if ( oneSegment ) {
9630 // .o file segment does not cover load commands, so recalc at first real section
9631 if ( sectionsEmitted == 1 ) {
9632 cmd->set_vmaddr(sectInfo->getBaseAddress());
9633 cmd->set_fileoff(sectInfo->fFileOffset);
9634 }
9635 // <rdar://problem/7712869> if last section is zero-fill don't add size to filesize total
9636 if ( !sectInfo->fAllZeroFill ) {
9637 cmd->set_filesize((sectInfo->fFileOffset+sectInfo->fSize)-cmd->fileoff());
9638 }
9639 cmd->set_vmsize(sectInfo->getBaseAddress() + sectInfo->fSize);
9640 }
9641 sect->set_sectname(sectInfo->fSectionName);
9642 sect->set_segname(sectInfo->fSegmentName);
9643 sect->set_addr(sectInfo->getBaseAddress());
9644 sect->set_size(sectInfo->fSize);
9645 sect->set_offset(sectInfo->fFileOffset);
9646 sect->set_align(sectInfo->fAlignment);
9647 if ( sectInfo->fRelocCount != 0 ) {
9648 sect->set_reloff(sectInfo->fRelocOffset * sizeof(macho_relocation_info<P>) + fWriter.fSectionRelocationsAtom->getFileOffset());
9649 sect->set_nreloc(sectInfo->fRelocCount);
9650 }
9651 if ( sectInfo->fAllZeroFill ) {
9652 sect->set_flags(S_ZEROFILL);
9653 sect->set_offset(0);
9654 }
9655 else if ( sectInfo->fAllLazyPointers ) {
9656 sect->set_flags(S_LAZY_SYMBOL_POINTERS);
9657 sect->set_reserved1(sectInfo->fIndirectSymbolOffset);
9658 }
9659 else if ( sectInfo->fAllLazyDylibPointers ) {
9660 sect->set_flags(S_LAZY_DYLIB_SYMBOL_POINTERS);
9661 sect->set_reserved1(sectInfo->fIndirectSymbolOffset);
9662 }
9663 else if ( sectInfo->fAllNonLazyPointers ) {
9664 sect->set_flags(S_NON_LAZY_SYMBOL_POINTERS);
9665 sect->set_reserved1(sectInfo->fIndirectSymbolOffset);
9666 }
9667 else if ( sectInfo->fAllStubs ) {
9668 sect->set_flags(S_SYMBOL_STUBS | S_ATTR_SOME_INSTRUCTIONS | S_ATTR_PURE_INSTRUCTIONS);
9669 sect->set_reserved1(sectInfo->fIndirectSymbolOffset);
9670 sect->set_reserved2(sectInfo->fSize / sectInfo->fAtoms.size());
9671 if ( sectInfo->fHasTextLocalRelocs )
9672 sect->set_flags(sect->flags() | S_ATTR_LOC_RELOC);
9673 }
9674 else if ( sectInfo->fAllSelfModifyingStubs ) {
9675 sect->set_flags(S_SYMBOL_STUBS | S_ATTR_SELF_MODIFYING_CODE);
9676 sect->set_reserved1(sectInfo->fIndirectSymbolOffset);
9677 sect->set_reserved2(sectInfo->fSize / sectInfo->fAtoms.size());
9678 }
9679 else if ( sectInfo->fAllStubHelpers ) {
9680 sect->set_flags(S_ATTR_SOME_INSTRUCTIONS | S_ATTR_PURE_INSTRUCTIONS);
9681 if ( sectInfo->fHasTextLocalRelocs )
9682 sect->set_flags(sect->flags() | S_ATTR_LOC_RELOC);
9683 }
9684 else if ( sectInfo->fAtoms.at(0)->getContentType() == ObjectFile::Atom::kCStringType ) {
9685 sect->set_flags(S_CSTRING_LITERALS);
9686 }
9687 else if ( sectInfo->fAtoms.at(0)->getContentType() == ObjectFile::Atom::kCFIType ) {
9688 sect->set_flags(S_COALESCED | S_ATTR_NO_TOC | S_ATTR_STRIP_STATIC_SYMS);
9689 }
9690 else if ( (strcmp(sectInfo->fSectionName, "__mod_init_func") == 0) && (strcmp(sectInfo->fSegmentName, "__DATA") == 0) ) {
9691 sect->set_flags(S_MOD_INIT_FUNC_POINTERS);
9692 }
9693 else if ( (strcmp(sectInfo->fSectionName, "__mod_term_func") == 0) && (strcmp(sectInfo->fSegmentName, "__DATA") == 0) ) {
9694 sect->set_flags(S_MOD_TERM_FUNC_POINTERS);
9695 }
9696 else if ( (strcmp(sectInfo->fSectionName, "__textcoal_nt") == 0) && (strcmp(sectInfo->fSegmentName, "__TEXT") == 0) ) {
9697 sect->set_flags(S_COALESCED);
9698 }
9699 else if ( (strcmp(sectInfo->fSectionName, "__const_coal") == 0) && (strcmp(sectInfo->fSegmentName, "__DATA") == 0) ) {
9700 sect->set_flags(S_COALESCED);
9701 }
9702 else if ( (strcmp(sectInfo->fSectionName, "__interpose") == 0) && (strcmp(sectInfo->fSegmentName, "__DATA") == 0) ) {
9703 sect->set_flags(S_INTERPOSING);
9704 }
9705 else if ( (strcmp(sectInfo->fSectionName, "__literal4") == 0) && (strcmp(sectInfo->fSegmentName, "__TEXT") == 0) ) {
9706 sect->set_flags(S_4BYTE_LITERALS);
9707 }
9708 else if ( (strcmp(sectInfo->fSectionName, "__literal8") == 0) && (strcmp(sectInfo->fSegmentName, "__TEXT") == 0) ) {
9709 sect->set_flags(S_8BYTE_LITERALS);
9710 }
9711 else if ( (strcmp(sectInfo->fSectionName, "__literal16") == 0) && (strcmp(sectInfo->fSegmentName, "__TEXT") == 0) ) {
9712 sect->set_flags(S_16BYTE_LITERALS);
9713 }
9714 else if ( (strcmp(sectInfo->fSectionName, "__message_refs") == 0) && (strcmp(sectInfo->fSegmentName, "__OBJC") == 0) ) {
9715 sect->set_flags(S_LITERAL_POINTERS);
9716 }
9717 else if ( (strcmp(sectInfo->fSectionName, "__objc_selrefs") == 0) && (strcmp(sectInfo->fSegmentName, "__DATA") == 0) ) {
9718 sect->set_flags(S_LITERAL_POINTERS);
9719 }
9720 else if ( (strcmp(sectInfo->fSectionName, "__cls_refs") == 0) && (strcmp(sectInfo->fSegmentName, "__OBJC") == 0) ) {
9721 sect->set_flags(S_LITERAL_POINTERS);
9722 }
9723 else if ( (strncmp(sectInfo->fSectionName, "__dof_", 6) == 0) && (strcmp(sectInfo->fSegmentName, "__TEXT") == 0) ) {
9724 sect->set_flags(S_DTRACE_DOF);
9725 }
9726 else if ( (strncmp(sectInfo->fSectionName, "__dof_", 6) == 0) && (strcmp(sectInfo->fSegmentName, "__DATA") == 0) ) {
9727 sect->set_flags(S_DTRACE_DOF);
9728 }
9729 else if ( (strncmp(sectInfo->fSectionName, "__text", 6) == 0) && (strcmp(sectInfo->fSegmentName, "__TEXT") == 0) ) {
9730 sect->set_flags(S_REGULAR | S_ATTR_SOME_INSTRUCTIONS | S_ATTR_PURE_INSTRUCTIONS);
9731 if ( sectInfo->fHasTextLocalRelocs )
9732 sect->set_flags(sect->flags() | S_ATTR_LOC_RELOC);
9733 if ( sectInfo->fHasTextExternalRelocs )
9734 sect->set_flags(sect->flags() | S_ATTR_EXT_RELOC);
9735 }
9736 //fprintf(stderr, "section %s flags=0x%08X\n", sectInfo->fSectionName, sect->flags());
9737 }
9738 }
9739 p = &p[sizeof(macho_segment_command<P>) + sectionsEmitted*sizeof(macho_section<P>)];
9740 cmd->set_cmdsize(sizeof(macho_segment_command<P>) + sectionsEmitted*sizeof(macho_section<P>));
9741 cmd->set_nsects(sectionsEmitted);
9742 }
9743 }
9744
9745
9746 template <typename A>
9747 SymbolTableLoadCommandsAtom<A>::SymbolTableLoadCommandsAtom(Writer<A>& writer)
9748 : LoadCommandAtom<A>(writer), fNeedsDynamicSymbolTable(false)
9749 {
9750 bzero(&fSymbolTable, sizeof(macho_symtab_command<P>));
9751 bzero(&fDynamicSymbolTable, sizeof(macho_dysymtab_command<P>));
9752 switch ( fWriter.fOptions.outputKind() ) {
9753 case Options::kDynamicExecutable:
9754 case Options::kDynamicLibrary:
9755 case Options::kDynamicBundle:
9756 case Options::kDyld:
9757 case Options::kKextBundle:
9758 fNeedsDynamicSymbolTable = true;
9759 break;
9760 case Options::kObjectFile:
9761 case Options::kStaticExecutable:
9762 fNeedsDynamicSymbolTable = false;
9763 case Options::kPreload:
9764 fNeedsDynamicSymbolTable = fWriter.fOptions.positionIndependentExecutable();
9765 break;
9766 }
9767 writer.fSymbolTableCommands = this;
9768 }
9769
9770
9771
9772 template <typename A>
9773 void SymbolTableLoadCommandsAtom<A>::needDynamicTable()
9774 {
9775 fNeedsDynamicSymbolTable = true;
9776 }
9777
9778
9779 template <typename A>
9780 uint64_t SymbolTableLoadCommandsAtom<A>::getSize() const
9781 {
9782 if ( fNeedsDynamicSymbolTable )
9783 return this->alignedSize(sizeof(macho_symtab_command<P>) + sizeof(macho_dysymtab_command<P>));
9784 else
9785 return this->alignedSize(sizeof(macho_symtab_command<P>));
9786 }
9787
9788 template <typename A>
9789 void SymbolTableLoadCommandsAtom<A>::copyRawContent(uint8_t buffer[]) const
9790 {
9791 // build LC_SYMTAB command
9792 macho_symtab_command<P>* symbolTableCmd = (macho_symtab_command<P>*)buffer;
9793 bzero(symbolTableCmd, sizeof(macho_symtab_command<P>));
9794 symbolTableCmd->set_cmd(LC_SYMTAB);
9795 symbolTableCmd->set_cmdsize(sizeof(macho_symtab_command<P>));
9796 symbolTableCmd->set_nsyms(fWriter.fSymbolTableCount);
9797 symbolTableCmd->set_symoff(fWriter.fSymbolTableCount == 0 ? 0 : fWriter.fSymbolTableAtom->getFileOffset());
9798 symbolTableCmd->set_stroff(fWriter.fStringsAtom->getSize() == 0 ? 0 : fWriter.fStringsAtom->getFileOffset());
9799 symbolTableCmd->set_strsize(fWriter.fStringsAtom->getSize());
9800
9801 // build LC_DYSYMTAB command
9802 if ( fNeedsDynamicSymbolTable ) {
9803 macho_dysymtab_command<P>* dynamicSymbolTableCmd = (macho_dysymtab_command<P>*)&buffer[sizeof(macho_symtab_command<P>)];
9804 bzero(dynamicSymbolTableCmd, sizeof(macho_dysymtab_command<P>));
9805 dynamicSymbolTableCmd->set_cmd(LC_DYSYMTAB);
9806 dynamicSymbolTableCmd->set_cmdsize(sizeof(macho_dysymtab_command<P>));
9807 dynamicSymbolTableCmd->set_ilocalsym(fWriter.fSymbolTableStabsStartIndex);
9808 dynamicSymbolTableCmd->set_nlocalsym(fWriter.fSymbolTableStabsCount + fWriter.fSymbolTableLocalCount);
9809 dynamicSymbolTableCmd->set_iextdefsym(fWriter.fSymbolTableExportStartIndex);
9810 dynamicSymbolTableCmd->set_nextdefsym(fWriter.fSymbolTableExportCount);
9811 dynamicSymbolTableCmd->set_iundefsym(fWriter.fSymbolTableImportStartIndex);
9812 dynamicSymbolTableCmd->set_nundefsym(fWriter.fSymbolTableImportCount);
9813 if ( fWriter.fModuleInfoAtom != NULL ) {
9814 dynamicSymbolTableCmd->set_tocoff(fWriter.fModuleInfoAtom->getTableOfContentsFileOffset());
9815 dynamicSymbolTableCmd->set_ntoc(fWriter.fSymbolTableExportCount);
9816 dynamicSymbolTableCmd->set_modtaboff(fWriter.fModuleInfoAtom->getModuleTableFileOffset());
9817 dynamicSymbolTableCmd->set_nmodtab(1);
9818 dynamicSymbolTableCmd->set_extrefsymoff(fWriter.fModuleInfoAtom->getReferencesFileOffset());
9819 dynamicSymbolTableCmd->set_nextrefsyms(fWriter.fModuleInfoAtom->getReferencesCount());
9820 }
9821 dynamicSymbolTableCmd->set_indirectsymoff((fWriter.fIndirectTableAtom == NULL) ? 0 : fWriter.fIndirectTableAtom->getFileOffset());
9822 dynamicSymbolTableCmd->set_nindirectsyms((fWriter.fIndirectTableAtom == NULL) ? 0 : fWriter.fIndirectTableAtom->fTable.size());
9823 if ( fWriter.fOptions.outputKind() != Options::kObjectFile ) {
9824 if ( fWriter.fExternalRelocationsAtom != 0 ) {
9825 dynamicSymbolTableCmd->set_extreloff((fWriter.fExternalRelocs.size()==0) ? 0 : fWriter.fExternalRelocationsAtom->getFileOffset());
9826 dynamicSymbolTableCmd->set_nextrel(fWriter.fExternalRelocs.size());
9827 }
9828 if ( fWriter.fLocalRelocationsAtom != 0 ) {
9829 dynamicSymbolTableCmd->set_locreloff((fWriter.fInternalRelocs.size()==0) ? 0 : fWriter.fLocalRelocationsAtom->getFileOffset());
9830 dynamicSymbolTableCmd->set_nlocrel(fWriter.fInternalRelocs.size());
9831 }
9832 }
9833 }
9834 }
9835
9836
9837 template <typename A>
9838 unsigned int SymbolTableLoadCommandsAtom<A>::commandCount()
9839 {
9840 return fNeedsDynamicSymbolTable ? 2 : 1;
9841 }
9842
9843 template <typename A>
9844 uint64_t DyldLoadCommandsAtom<A>::getSize() const
9845 {
9846 return this->alignedSize(sizeof(macho_dylinker_command<P>) + strlen("/usr/lib/dyld") + 1);
9847 }
9848
9849 template <typename A>
9850 void DyldLoadCommandsAtom<A>::copyRawContent(uint8_t buffer[]) const
9851 {
9852 uint64_t size = this->getSize();
9853 bzero(buffer, size);
9854 macho_dylinker_command<P>* cmd = (macho_dylinker_command<P>*)buffer;
9855 if ( fWriter.fOptions.outputKind() == Options::kDyld )
9856 cmd->set_cmd(LC_ID_DYLINKER);
9857 else
9858 cmd->set_cmd(LC_LOAD_DYLINKER);
9859 cmd->set_cmdsize(this->getSize());
9860 cmd->set_name_offset();
9861 strcpy((char*)&buffer[sizeof(macho_dylinker_command<P>)], "/usr/lib/dyld");
9862 }
9863
9864 template <typename A>
9865 uint64_t AllowableClientLoadCommandsAtom<A>::getSize() const
9866 {
9867 return this->alignedSize(sizeof(macho_sub_client_command<P>) + strlen(this->clientString) + 1);
9868 }
9869
9870 template <typename A>
9871 void AllowableClientLoadCommandsAtom<A>::copyRawContent(uint8_t buffer[]) const
9872 {
9873 uint64_t size = this->getSize();
9874
9875 bzero(buffer, size);
9876 macho_sub_client_command<P>* cmd = (macho_sub_client_command<P>*)buffer;
9877 cmd->set_cmd(LC_SUB_CLIENT);
9878 cmd->set_cmdsize(size);
9879 cmd->set_client_offset();
9880 strcpy((char*)&buffer[sizeof(macho_sub_client_command<P>)], this->clientString);
9881
9882 }
9883
9884 template <typename A>
9885 uint64_t DylibLoadCommandsAtom<A>::getSize() const
9886 {
9887 if ( fOptimizedAway ) {
9888 return 0;
9889 }
9890 else {
9891 const char* path = fInfo.reader->getInstallPath();
9892 return this->alignedSize(sizeof(macho_dylib_command<P>) + strlen(path) + 1);
9893 }
9894 }
9895
9896 template <typename A>
9897 void DylibLoadCommandsAtom<A>::copyRawContent(uint8_t buffer[]) const
9898 {
9899 if ( fOptimizedAway )
9900 return;
9901 uint64_t size = this->getSize();
9902 bzero(buffer, size);
9903 const char* path = fInfo.reader->getInstallPath();
9904 macho_dylib_command<P>* cmd = (macho_dylib_command<P>*)buffer;
9905 // <rdar://problem/5529626> If only weak_import symbols are used, linker should use LD_LOAD_WEAK_DYLIB
9906 bool autoWeakLoadDylib = ( (fWriter.fDylibReadersWithWeakImports.count(fInfo.reader) > 0)
9907 && (fWriter.fDylibReadersWithNonWeakImports.count(fInfo.reader) == 0) );
9908 if ( fInfo.options.fLazyLoad )
9909 cmd->set_cmd(LC_LAZY_LOAD_DYLIB);
9910 else if ( fInfo.options.fWeakImport || autoWeakLoadDylib )
9911 cmd->set_cmd(LC_LOAD_WEAK_DYLIB);
9912 else if ( fInfo.options.fReExport && fWriter.fOptions.useSimplifiedDylibReExports() )
9913 cmd->set_cmd(LC_REEXPORT_DYLIB);
9914 else
9915 cmd->set_cmd(LC_LOAD_DYLIB);
9916 cmd->set_cmdsize(this->getSize());
9917 cmd->set_timestamp(2); // needs to be some constant value that is different than DylibIDLoadCommandsAtom uses
9918 cmd->set_current_version(fInfo.reader->getCurrentVersion());
9919 cmd->set_compatibility_version(fInfo.reader->getCompatibilityVersion());
9920 cmd->set_name_offset();
9921 strcpy((char*)&buffer[sizeof(macho_dylib_command<P>)], path);
9922 }
9923
9924
9925
9926 template <typename A>
9927 uint64_t DylibIDLoadCommandsAtom<A>::getSize() const
9928 {
9929 return this->alignedSize(sizeof(macho_dylib_command<P>) + strlen(fWriter.fOptions.installPath()) + 1);
9930 }
9931
9932 template <typename A>
9933 void DylibIDLoadCommandsAtom<A>::copyRawContent(uint8_t buffer[]) const
9934 {
9935 uint64_t size = this->getSize();
9936 bzero(buffer, size);
9937 macho_dylib_command<P>* cmd = (macho_dylib_command<P>*)buffer;
9938 cmd->set_cmd(LC_ID_DYLIB);
9939 cmd->set_cmdsize(this->getSize());
9940 cmd->set_name_offset();
9941 cmd->set_timestamp(1); // needs to be some constant value that is different than DylibLoadCommandsAtom uses
9942 cmd->set_current_version(fWriter.fOptions.currentVersion());
9943 cmd->set_compatibility_version(fWriter.fOptions.compatibilityVersion());
9944 strcpy((char*)&buffer[sizeof(macho_dylib_command<P>)], fWriter.fOptions.installPath());
9945 }
9946
9947
9948 template <typename A>
9949 void RoutinesLoadCommandsAtom<A>::copyRawContent(uint8_t buffer[]) const
9950 {
9951 uint64_t initAddr = fWriter.getAtomLoadAddress(fWriter.fEntryPoint);
9952 if (fWriter.fEntryPoint->isThumb())
9953 initAddr |= 1ULL;
9954 bzero(buffer, sizeof(macho_routines_command<P>));
9955 macho_routines_command<P>* cmd = (macho_routines_command<P>*)buffer;
9956 cmd->set_cmd(macho_routines_command<P>::CMD);
9957 cmd->set_cmdsize(this->getSize());
9958 cmd->set_init_address(initAddr);
9959 }
9960
9961
9962 template <typename A>
9963 uint64_t SubUmbrellaLoadCommandsAtom<A>::getSize() const
9964 {
9965 return this->alignedSize(sizeof(macho_sub_umbrella_command<P>) + strlen(fName) + 1);
9966 }
9967
9968 template <typename A>
9969 void SubUmbrellaLoadCommandsAtom<A>::copyRawContent(uint8_t buffer[]) const
9970 {
9971 uint64_t size = this->getSize();
9972 bzero(buffer, size);
9973 macho_sub_umbrella_command<P>* cmd = (macho_sub_umbrella_command<P>*)buffer;
9974 cmd->set_cmd(LC_SUB_UMBRELLA);
9975 cmd->set_cmdsize(this->getSize());
9976 cmd->set_sub_umbrella_offset();
9977 strcpy((char*)&buffer[sizeof(macho_sub_umbrella_command<P>)], fName);
9978 }
9979
9980 template <typename A>
9981 void UUIDLoadCommandAtom<A>::generate()
9982 {
9983 switch ( fWriter.fOptions.getUUIDMode() ) {
9984 case Options::kUUIDNone:
9985 fEmit = false;
9986 break;
9987 case Options::kUUIDRandom:
9988 ::uuid_generate_random(fUUID);
9989 fEmit = true;
9990 break;
9991 case Options::kUUIDContent:
9992 bzero(fUUID, 16);
9993 fEmit = true;
9994 break;
9995 }
9996 }
9997
9998 template <typename A>
9999 void UUIDLoadCommandAtom<A>::setContent(const uint8_t uuid[16])
10000 {
10001 memcpy(fUUID, uuid, 16);
10002 }
10003
10004 template <typename A>
10005 void UUIDLoadCommandAtom<A>::copyRawContent(uint8_t buffer[]) const
10006 {
10007 if (fEmit) {
10008 uint64_t size = this->getSize();
10009 bzero(buffer, size);
10010 macho_uuid_command<P>* cmd = (macho_uuid_command<P>*)buffer;
10011 cmd->set_cmd(LC_UUID);
10012 cmd->set_cmdsize(this->getSize());
10013 cmd->set_uuid((uint8_t*)fUUID);
10014 }
10015 }
10016
10017
10018 template <typename A>
10019 uint64_t SubLibraryLoadCommandsAtom<A>::getSize() const
10020 {
10021 return this->alignedSize(sizeof(macho_sub_library_command<P>) + fNameLength + 1);
10022 }
10023
10024 template <typename A>
10025 void SubLibraryLoadCommandsAtom<A>::copyRawContent(uint8_t buffer[]) const
10026 {
10027 uint64_t size = this->getSize();
10028 bzero(buffer, size);
10029 macho_sub_library_command<P>* cmd = (macho_sub_library_command<P>*)buffer;
10030 cmd->set_cmd(LC_SUB_LIBRARY);
10031 cmd->set_cmdsize(this->getSize());
10032 cmd->set_sub_library_offset();
10033 strncpy((char*)&buffer[sizeof(macho_sub_library_command<P>)], fNameStart, fNameLength);
10034 buffer[sizeof(macho_sub_library_command<P>)+fNameLength] = '\0';
10035 }
10036
10037 template <typename A>
10038 uint64_t UmbrellaLoadCommandsAtom<A>::getSize() const
10039 {
10040 return this->alignedSize(sizeof(macho_sub_framework_command<P>) + strlen(fName) + 1);
10041 }
10042
10043 template <typename A>
10044 void UmbrellaLoadCommandsAtom<A>::copyRawContent(uint8_t buffer[]) const
10045 {
10046 uint64_t size = this->getSize();
10047 bzero(buffer, size);
10048 macho_sub_framework_command<P>* cmd = (macho_sub_framework_command<P>*)buffer;
10049 cmd->set_cmd(LC_SUB_FRAMEWORK);
10050 cmd->set_cmdsize(this->getSize());
10051 cmd->set_umbrella_offset();
10052 strcpy((char*)&buffer[sizeof(macho_sub_framework_command<P>)], fName);
10053 }
10054
10055 template <>
10056 uint64_t ThreadsLoadCommandsAtom<ppc>::getSize() const
10057 {
10058 return this->alignedSize(16 + 40*4); // base size + PPC_THREAD_STATE_COUNT * 4
10059 }
10060
10061 template <>
10062 uint64_t ThreadsLoadCommandsAtom<ppc64>::getSize() const
10063 {
10064 return this->alignedSize(16 + 76*4); // base size + PPC_THREAD_STATE64_COUNT * 4
10065 }
10066
10067 template <>
10068 uint64_t ThreadsLoadCommandsAtom<x86>::getSize() const
10069 {
10070 return this->alignedSize(16 + 16*4); // base size + i386_THREAD_STATE_COUNT * 4
10071 }
10072
10073 template <>
10074 uint64_t ThreadsLoadCommandsAtom<x86_64>::getSize() const
10075 {
10076 return this->alignedSize(16 + x86_THREAD_STATE64_COUNT * 4);
10077 }
10078
10079 // We should be picking it up from a header
10080 template <>
10081 uint64_t ThreadsLoadCommandsAtom<arm>::getSize() const
10082 {
10083 return this->alignedSize(16 + 17 * 4); // base size + ARM_THREAD_STATE_COUNT * 4
10084 }
10085
10086 template <>
10087 void ThreadsLoadCommandsAtom<ppc>::copyRawContent(uint8_t buffer[]) const
10088 {
10089 uint64_t size = this->getSize();
10090 uint64_t start = fWriter.getAtomLoadAddress(fWriter.fEntryPoint);
10091 bzero(buffer, size);
10092 macho_thread_command<ppc::P>* cmd = (macho_thread_command<ppc::P>*)buffer;
10093 cmd->set_cmd(LC_UNIXTHREAD);
10094 cmd->set_cmdsize(size);
10095 cmd->set_flavor(1); // PPC_THREAD_STATE
10096 cmd->set_count(40); // PPC_THREAD_STATE_COUNT;
10097 cmd->set_thread_register(0, start);
10098 if ( fWriter.fOptions.hasCustomStack() )
10099 cmd->set_thread_register(3, fWriter.fOptions.customStackAddr()); // r1
10100 }
10101
10102
10103 template <>
10104 void ThreadsLoadCommandsAtom<ppc64>::copyRawContent(uint8_t buffer[]) const
10105 {
10106 uint64_t size = this->getSize();
10107 uint64_t start = fWriter.getAtomLoadAddress(fWriter.fEntryPoint);
10108 bzero(buffer, size);
10109 macho_thread_command<ppc64::P>* cmd = (macho_thread_command<ppc64::P>*)buffer;
10110 cmd->set_cmd(LC_UNIXTHREAD);
10111 cmd->set_cmdsize(size);
10112 cmd->set_flavor(5); // PPC_THREAD_STATE64
10113 cmd->set_count(76); // PPC_THREAD_STATE64_COUNT;
10114 cmd->set_thread_register(0, start);
10115 if ( fWriter.fOptions.hasCustomStack() )
10116 cmd->set_thread_register(3, fWriter.fOptions.customStackAddr()); // r1
10117 }
10118
10119 template <>
10120 void ThreadsLoadCommandsAtom<x86>::copyRawContent(uint8_t buffer[]) const
10121 {
10122 uint64_t size = this->getSize();
10123 uint64_t start = fWriter.getAtomLoadAddress(fWriter.fEntryPoint);
10124 bzero(buffer, size);
10125 macho_thread_command<x86::P>* cmd = (macho_thread_command<x86::P>*)buffer;
10126 cmd->set_cmd(LC_UNIXTHREAD);
10127 cmd->set_cmdsize(size);
10128 cmd->set_flavor(1); // i386_THREAD_STATE
10129 cmd->set_count(16); // i386_THREAD_STATE_COUNT;
10130 cmd->set_thread_register(10, start);
10131 if ( fWriter.fOptions.hasCustomStack() )
10132 cmd->set_thread_register(7, fWriter.fOptions.customStackAddr()); // esp
10133 }
10134
10135 template <>
10136 void ThreadsLoadCommandsAtom<x86_64>::copyRawContent(uint8_t buffer[]) const
10137 {
10138 uint64_t size = this->getSize();
10139 uint64_t start = fWriter.getAtomLoadAddress(fWriter.fEntryPoint);
10140 bzero(buffer, size);
10141 macho_thread_command<x86_64::P>* cmd = (macho_thread_command<x86_64::P>*)buffer;
10142 cmd->set_cmd(LC_UNIXTHREAD);
10143 cmd->set_cmdsize(size);
10144 cmd->set_flavor(x86_THREAD_STATE64);
10145 cmd->set_count(x86_THREAD_STATE64_COUNT);
10146 cmd->set_thread_register(16, start); // rip
10147 if ( fWriter.fOptions.hasCustomStack() )
10148 cmd->set_thread_register(7, fWriter.fOptions.customStackAddr()); // uesp
10149 }
10150
10151 template <>
10152 void ThreadsLoadCommandsAtom<arm>::copyRawContent(uint8_t buffer[]) const
10153 {
10154 uint64_t size = this->getSize();
10155 uint64_t start = fWriter.getAtomLoadAddress(fWriter.fEntryPoint);
10156 if ( fWriter.fEntryPoint->isThumb() )
10157 start |= 1ULL;
10158 bzero(buffer, size);
10159 macho_thread_command<arm::P>* cmd = (macho_thread_command<arm::P>*)buffer;
10160 cmd->set_cmd(LC_UNIXTHREAD);
10161 cmd->set_cmdsize(size);
10162 cmd->set_flavor(1);
10163 cmd->set_count(17);
10164 cmd->set_thread_register(15, start); // pc
10165 if ( fWriter.fOptions.hasCustomStack() )
10166 cmd->set_thread_register(13, fWriter.fOptions.customStackAddr()); // FIXME: sp?
10167 }
10168
10169 template <typename A>
10170 uint64_t RPathLoadCommandsAtom<A>::getSize() const
10171 {
10172 return this->alignedSize(sizeof(macho_rpath_command<P>) + strlen(fPath) + 1);
10173 }
10174
10175 template <typename A>
10176 void RPathLoadCommandsAtom<A>::copyRawContent(uint8_t buffer[]) const
10177 {
10178 uint64_t size = this->getSize();
10179 bzero(buffer, size);
10180 macho_rpath_command<P>* cmd = (macho_rpath_command<P>*)buffer;
10181 cmd->set_cmd(LC_RPATH);
10182 cmd->set_cmdsize(this->getSize());
10183 cmd->set_path_offset();
10184 strcpy((char*)&buffer[sizeof(macho_rpath_command<P>)], fPath);
10185 }
10186
10187
10188
10189 template <typename A>
10190 void EncryptionLoadCommandsAtom<A>::copyRawContent(uint8_t buffer[]) const
10191 {
10192 uint64_t size = this->getSize();
10193 bzero(buffer, size);
10194 macho_encryption_info_command<P>* cmd = (macho_encryption_info_command<P>*)buffer;
10195 cmd->set_cmd(LC_ENCRYPTION_INFO);
10196 cmd->set_cmdsize(this->getSize());
10197 cmd->set_cryptoff(fStartOffset);
10198 cmd->set_cryptsize(fEndOffset-fStartOffset);
10199 cmd->set_cryptid(0);
10200 }
10201
10202
10203
10204 template <typename A>
10205 void LoadCommandsPaddingAtom<A>::copyRawContent(uint8_t buffer[]) const
10206 {
10207 bzero(buffer, fSize);
10208 }
10209
10210 template <typename A>
10211 void LoadCommandsPaddingAtom<A>::setSize(uint64_t newSize)
10212 {
10213 fSize = newSize;
10214 // this resizing by-passes the way fLargestAtomSize is set, so re-check here
10215 if ( fWriter.fLargestAtomSize < newSize )
10216 fWriter.fLargestAtomSize = newSize;
10217 }
10218
10219 template <typename A>
10220 void UnwindInfoAtom<A>::addUnwindInfo(ObjectFile::Atom* func, uint32_t offset, uint32_t encoding,
10221 ObjectFile::Reference* fdeRef, ObjectFile::Reference* lsdaRef,
10222 ObjectFile::Atom* personalityPointer)
10223 {
10224 Info info;
10225 info.func = func;
10226 if ( fdeRef != NULL )
10227 info.fde = &fdeRef->getTarget();
10228 else
10229 info.fde = NULL;
10230 if ( lsdaRef != NULL ) {
10231 info.lsda = &lsdaRef->getTarget();
10232 info.lsdaOffset = lsdaRef->getTargetOffset();
10233 }
10234 else {
10235 info.lsda = NULL;
10236 info.lsdaOffset = 0;
10237 }
10238 info.personalityPointer = personalityPointer;
10239 info.encoding = encoding;
10240 fInfos.push_back(info);
10241 //fprintf(stderr, "addUnwindInfo() encoding=0x%08X, lsda=%p, lsdaOffset=%d, person=%p, func=%s\n",
10242 // encoding, info.lsda, info.lsdaOffset, personalityPointer, func->getDisplayName());
10243 }
10244
10245 template <>
10246 bool UnwindInfoAtom<x86>::encodingMeansUseDwarf(compact_unwind_encoding_t encoding)
10247 {
10248 return ( (encoding & UNWIND_X86_MODE_MASK) == UNWIND_X86_MODE_DWARF);
10249 }
10250
10251 template <>
10252 bool UnwindInfoAtom<x86_64>::encodingMeansUseDwarf(compact_unwind_encoding_t encoding)
10253 {
10254 return ( (encoding & UNWIND_X86_64_MODE_MASK) == UNWIND_X86_64_MODE_DWARF);
10255 }
10256
10257 template <typename A>
10258 bool UnwindInfoAtom<A>::encodingMeansUseDwarf(compact_unwind_encoding_t encoding)
10259 {
10260 return false;
10261 }
10262
10263
10264 template <typename A>
10265 void UnwindInfoAtom<A>::compressDuplicates(std::vector<Info>& uniqueInfos)
10266 {
10267 // build new list removing entries where next function has same encoding
10268 uniqueInfos.reserve(fInfos.size());
10269 Info last;
10270 last.func = NULL;
10271 last.lsda = NULL;
10272 last.lsdaOffset = 0;
10273 last.personalityPointer = NULL;
10274 last.encoding = 0xFFFFFFFF;
10275 for(typename std::vector<Info>::iterator it=fInfos.begin(); it != fInfos.end(); ++it) {
10276 Info& newInfo = *it;
10277 bool newNeedsDwarf = encodingMeansUseDwarf(newInfo.encoding);
10278 // remove infos which have same encoding and personalityPointer as last one
10279 if ( newNeedsDwarf || (newInfo.encoding != last.encoding) || (newInfo.personalityPointer != last.personalityPointer)
10280 || (newInfo.lsda != NULL) || (last.lsda != NULL) ) {
10281 uniqueInfos.push_back(newInfo);
10282 }
10283 last = newInfo;
10284 }
10285 //fprintf(stderr, "compressDuplicates() fInfos.size()=%lu, uniqueInfos.size()=%lu\n", fInfos.size(), uniqueInfos.size());
10286 }
10287
10288 template <typename A>
10289 void UnwindInfoAtom<A>::findCommonEncoding(const std::vector<Info>& uniqueInfos, std::map<uint32_t, unsigned int>& commonEncodings)
10290 {
10291 // scan infos to get frequency counts for each encoding
10292 std::map<uint32_t, unsigned int> encodingsUsed;
10293 unsigned int mostCommonEncodingUsageCount = 0;
10294 for(typename std::vector<Info>::const_iterator it=uniqueInfos.begin(); it != uniqueInfos.end(); ++it) {
10295 // never put dwarf into common table
10296 if ( encodingMeansUseDwarf(it->encoding) )
10297 continue;
10298 std::map<uint32_t, unsigned int>::iterator pos = encodingsUsed.find(it->encoding);
10299 if ( pos == encodingsUsed.end() ) {
10300 encodingsUsed[it->encoding] = 1;
10301 }
10302 else {
10303 encodingsUsed[it->encoding] += 1;
10304 if ( mostCommonEncodingUsageCount < encodingsUsed[it->encoding] )
10305 mostCommonEncodingUsageCount = encodingsUsed[it->encoding];
10306 }
10307 }
10308 // put the most common encodings into the common table, but at most 127 of them
10309 for(unsigned int usages=mostCommonEncodingUsageCount; usages > 1; --usages) {
10310 for (std::map<uint32_t, unsigned int>::iterator euit=encodingsUsed.begin(); euit != encodingsUsed.end(); ++euit) {
10311 if ( euit->second == usages ) {
10312 unsigned int size = commonEncodings.size();
10313 if ( size < 127 ) {
10314 commonEncodings[euit->first] = size;
10315 }
10316 }
10317 }
10318 }
10319 }
10320
10321 template <typename A>
10322 void UnwindInfoAtom<A>::makeLsdaIndex(const std::vector<Info>& uniqueInfos, std::map<ObjectFile::Atom*, uint32_t>& lsdaIndexOffsetMap)
10323 {
10324 for(typename std::vector<Info>::const_iterator it=uniqueInfos.begin(); it != uniqueInfos.end(); ++it) {
10325 lsdaIndexOffsetMap[it->func] = fLSDAIndex.size() * sizeof(macho_unwind_info_section_header_lsda_index_entry<P>);
10326 if ( it->lsda != NULL ) {
10327 LSDAEntry entry;
10328 entry.func = it->func;
10329 entry.lsda = it->lsda;
10330 entry.lsdaOffset = it->lsdaOffset;
10331 fLSDAIndex.push_back(entry);
10332 }
10333 }
10334 }
10335
10336 template <typename A>
10337 void UnwindInfoAtom<A>::makePersonalityIndex(std::vector<Info>& uniqueInfos)
10338 {
10339 for(typename std::vector<Info>::iterator it=uniqueInfos.begin(); it != uniqueInfos.end(); ++it) {
10340 if ( it->personalityPointer != NULL ) {
10341 std::map<ObjectFile::Atom*, uint32_t>::iterator pos = fPersonalityIndexMap.find(it->personalityPointer);
10342 if ( pos == fPersonalityIndexMap.end() ) {
10343 const uint32_t nextIndex = fPersonalityIndexMap.size() + 1;
10344 fPersonalityIndexMap[it->personalityPointer] = nextIndex;
10345 }
10346 uint32_t personalityIndex = fPersonalityIndexMap[it->personalityPointer];
10347 it->encoding |= (personalityIndex << (__builtin_ctz(UNWIND_PERSONALITY_MASK)) );
10348 }
10349 }
10350 }
10351
10352 template <typename A>
10353 unsigned int UnwindInfoAtom<A>::makeRegularSecondLevelPage(const std::vector<Info>& uniqueInfos, uint32_t pageSize,
10354 unsigned int endIndex, uint8_t*& pageEnd)
10355 {
10356 const unsigned int maxEntriesPerPage = (pageSize - sizeof(unwind_info_regular_second_level_page_header))/sizeof(unwind_info_regular_second_level_entry);
10357 const unsigned int entriesToAdd = ((endIndex > maxEntriesPerPage) ? maxEntriesPerPage : endIndex);
10358 uint8_t* pageStart = pageEnd
10359 - entriesToAdd*sizeof(unwind_info_regular_second_level_entry)
10360 - sizeof(unwind_info_regular_second_level_page_header);
10361 macho_unwind_info_regular_second_level_page_header<P>* page = (macho_unwind_info_regular_second_level_page_header<P>*)pageStart;
10362 page->set_kind(UNWIND_SECOND_LEVEL_REGULAR);
10363 page->set_entryPageOffset(sizeof(macho_unwind_info_regular_second_level_page_header<P>));
10364 page->set_entryCount(entriesToAdd);
10365 macho_unwind_info_regular_second_level_entry<P>* entryTable = (macho_unwind_info_regular_second_level_entry<P>*)(pageStart + page->entryPageOffset());
10366 for (unsigned int i=0; i < entriesToAdd; ++i) {
10367 const Info& info = uniqueInfos[endIndex-entriesToAdd+i];
10368 entryTable[i].set_functionOffset(0);
10369 entryTable[i].set_encoding(info.encoding);
10370 RegFixUp fixup;
10371 fixup.contentPointer = (uint8_t*)(&entryTable[i]);
10372 fixup.func = info.func;
10373 fixup.fde = ( encodingMeansUseDwarf(info.encoding) ? info.fde : NULL );
10374 fRegFixUps.push_back(fixup);
10375 }
10376 //fprintf(stderr, "regular page with %u entries\n", entriesToAdd);
10377 pageEnd = pageStart;
10378 return endIndex - entriesToAdd;
10379 }
10380
10381
10382 template <typename A>
10383 unsigned int UnwindInfoAtom<A>::makeCompressedSecondLevelPage(const std::vector<Info>& uniqueInfos,
10384 const std::map<uint32_t,unsigned int> commonEncodings,
10385 uint32_t pageSize, unsigned int endIndex, uint8_t*& pageEnd)
10386 {
10387 const bool log = false;
10388 if (log) fprintf(stderr, "makeCompressedSecondLevelPage(pageSize=%u, endIndex=%u)\n", pageSize, endIndex);
10389 // first pass calculates how many compressed entries we could fit in this sized page
10390 // keep adding entries to page until:
10391 // 1) encoding table plus entry table plus header exceed page size
10392 // 2) the file offset delta from the first to last function > 24 bits
10393 // 3) custom encoding index reachs 255
10394 // 4) run out of uniqueInfos to encode
10395 std::map<uint32_t, unsigned int> pageSpecificEncodings;
10396 uint32_t space4 = (pageSize - sizeof(unwind_info_compressed_second_level_page_header))/sizeof(uint32_t);
10397 std::vector<uint8_t> encodingIndexes;
10398 int index = endIndex-1;
10399 int entryCount = 0;
10400 uint64_t lastEntryAddress = uniqueInfos[index].func->getAddress();
10401 bool canDo = true;
10402 while ( canDo && (index >= 0) ) {
10403 const Info& info = uniqueInfos[index--];
10404 // compute encoding index
10405 unsigned int encodingIndex;
10406 std::map<uint32_t, unsigned int>::const_iterator pos = commonEncodings.find(info.encoding);
10407 if ( pos != commonEncodings.end() ) {
10408 encodingIndex = pos->second;
10409 }
10410 else {
10411 // no commmon entry, so add one on this page
10412 uint32_t encoding = info.encoding;
10413 if ( encodingMeansUseDwarf(encoding) ) {
10414 // make unique pseudo encoding so this dwarf will gets is own encoding entry slot
10415 encoding += (index+1);
10416 }
10417 std::map<uint32_t, unsigned int>::iterator ppos = pageSpecificEncodings.find(encoding);
10418 if ( ppos != pageSpecificEncodings.end() ) {
10419 encodingIndex = pos->second;
10420 }
10421 else {
10422 encodingIndex = commonEncodings.size() + pageSpecificEncodings.size();
10423 if ( encodingIndex <= 255 ) {
10424 pageSpecificEncodings[encoding] = encodingIndex;
10425 }
10426 else {
10427 canDo = false; // case 3)
10428 if (log) fprintf(stderr, "end of compressed page with %u entries, %lu custom encodings because too many custom encodings\n",
10429 entryCount, pageSpecificEncodings.size());
10430 }
10431 }
10432 }
10433 if ( canDo )
10434 encodingIndexes.push_back(encodingIndex);
10435 // compute function offset
10436 uint32_t funcOffsetWithInPage = lastEntryAddress - info.func->getAddress();
10437 if ( funcOffsetWithInPage > 0x00FFFF00 ) {
10438 // don't use 0x00FFFFFF because addresses may vary after atoms are laid out again
10439 canDo = false; // case 2)
10440 if (log) fprintf(stderr, "can't use compressed page with %u entries because function offset too big\n", entryCount);
10441 }
10442 else {
10443 ++entryCount;
10444 }
10445 // check room for entry
10446 if ( (pageSpecificEncodings.size()+entryCount) >= space4 ) {
10447 canDo = false; // case 1)
10448 --entryCount;
10449 if (log) fprintf(stderr, "end of compressed page with %u entries because full\n", entryCount);
10450 }
10451 //if (log) fprintf(stderr, "space4=%d, pageSpecificEncodings.size()=%ld, entryCount=%d\n", space4, pageSpecificEncodings.size(), entryCount);
10452 }
10453
10454 // check for cases where it would be better to use a regular (non-compressed) page
10455 const unsigned int compressPageUsed = sizeof(unwind_info_compressed_second_level_page_header)
10456 + pageSpecificEncodings.size()*sizeof(uint32_t)
10457 + entryCount*sizeof(uint32_t);
10458 if ( (compressPageUsed < (pageSize-4) && (index >= 0) ) ) {
10459 const int regularEntriesPerPage = (pageSize - sizeof(unwind_info_regular_second_level_page_header))/sizeof(unwind_info_regular_second_level_entry);
10460 if ( entryCount < regularEntriesPerPage ) {
10461 return makeRegularSecondLevelPage(uniqueInfos, pageSize, endIndex, pageEnd);
10462 }
10463 }
10464
10465 // check if we need any padding because adding another entry would take 8 bytes but only have room for 4
10466 uint32_t pad = 0;
10467 if ( compressPageUsed == (pageSize-4) )
10468 pad = 4;
10469
10470 // second pass fills in page
10471 uint8_t* pageStart = pageEnd - compressPageUsed - pad;
10472 macho_unwind_info_compressed_second_level_page_header<P>* page = (macho_unwind_info_compressed_second_level_page_header<P>*)pageStart;
10473 page->set_kind(UNWIND_SECOND_LEVEL_COMPRESSED);
10474 page->set_entryPageOffset(sizeof(macho_unwind_info_compressed_second_level_page_header<P>));
10475 page->set_entryCount(entryCount);
10476 page->set_encodingsPageOffset(page->entryPageOffset()+entryCount*sizeof(uint32_t));
10477 page->set_encodingsCount(pageSpecificEncodings.size());
10478 uint32_t* const encodingsArray = (uint32_t*)&pageStart[page->encodingsPageOffset()];
10479 // fill in entry table
10480 uint32_t* const entiresArray = (uint32_t*)&pageStart[page->entryPageOffset()];
10481 ObjectFile::Atom* firstFunc = uniqueInfos[endIndex-entryCount].func;
10482 for(unsigned int i=endIndex-entryCount; i < endIndex; ++i) {
10483 const Info& info = uniqueInfos[i];
10484 uint8_t encodingIndex;
10485 if ( encodingMeansUseDwarf(info.encoding) ) {
10486 // dwarf entries are always in page specific encodings
10487 encodingIndex = pageSpecificEncodings[info.encoding+i];
10488 }
10489 else {
10490 std::map<uint32_t, unsigned int>::const_iterator pos = commonEncodings.find(info.encoding);
10491 if ( pos != commonEncodings.end() )
10492 encodingIndex = pos->second;
10493 else
10494 encodingIndex = pageSpecificEncodings[info.encoding];
10495 }
10496 uint32_t entryIndex = i - endIndex + entryCount;
10497 A::P::E::set32(entiresArray[entryIndex], encodingIndex << 24);
10498 CompressedFixUp funcStartFixUp;
10499 funcStartFixUp.contentPointer = (uint8_t*)(&entiresArray[entryIndex]);
10500 funcStartFixUp.func = info.func;
10501 funcStartFixUp.fromFunc = firstFunc;
10502 fCompressedFixUps.push_back(funcStartFixUp);
10503 if ( encodingMeansUseDwarf(info.encoding) ) {
10504 CompressedEncodingFixUp dwarfStartFixup;
10505 dwarfStartFixup.contentPointer = (uint8_t*)(&encodingsArray[encodingIndex-commonEncodings.size()]);
10506 dwarfStartFixup.fde = info.fde;
10507 fCompressedEncodingFixUps.push_back(dwarfStartFixup);
10508 }
10509 }
10510 // fill in encodings table
10511 for(std::map<uint32_t, unsigned int>::const_iterator it = pageSpecificEncodings.begin(); it != pageSpecificEncodings.end(); ++it) {
10512 A::P::E::set32(encodingsArray[it->second-commonEncodings.size()], it->first);
10513 }
10514
10515 if (log) fprintf(stderr, "compressed page with %u entries, %lu custom encodings\n", entryCount, pageSpecificEncodings.size());
10516
10517 // update pageEnd;
10518 pageEnd = pageStart;
10519 return endIndex-entryCount; // endIndex for next page
10520 }
10521
10522 template <> void UnwindInfoAtom<ppc>::generate() { }
10523 template <> void UnwindInfoAtom<ppc64>::generate() { }
10524 template <> void UnwindInfoAtom<arm>::generate() { }
10525
10526
10527 template <typename A>
10528 void UnwindInfoAtom<A>::generate()
10529 {
10530 // only generate table if there are functions with unwind info
10531 if ( fInfos.size() > 0 ) {
10532 // find offset of end of __unwind_info section
10533 SectionInfo* unwindSectionInfo = (SectionInfo*)this->getSection();
10534
10535 // build new list that has proper offsetInImage and remove entries where next function has same encoding
10536 std::vector<Info> uniqueInfos;
10537 this->compressDuplicates(uniqueInfos);
10538
10539 // build personality index, update encodings with personality index
10540 this->makePersonalityIndex(uniqueInfos);
10541 if ( fPersonalityIndexMap.size() > 3 )
10542 throw "too many personality routines for compact unwind to encode";
10543
10544 // put the most common encodings into the common table, but at most 127 of them
10545 std::map<uint32_t, unsigned int> commonEncodings;
10546 this->findCommonEncoding(uniqueInfos, commonEncodings);
10547
10548 // build lsda index
10549 std::map<ObjectFile::Atom*, uint32_t> lsdaIndexOffsetMap;
10550 this->makeLsdaIndex(uniqueInfos, lsdaIndexOffsetMap);
10551
10552 // calculate worst case size for all unwind info pages when allocating buffer
10553 const unsigned int entriesPerRegularPage = (4096-sizeof(unwind_info_regular_second_level_page_header))/sizeof(unwind_info_regular_second_level_entry);
10554 const unsigned int pageCount = ((uniqueInfos.size() - 1)/entriesPerRegularPage) + 1;
10555 fPagesContentForDelete = (uint8_t*)calloc(pageCount,4096);
10556 fPagesSize = 0;
10557 if ( fPagesContentForDelete == NULL )
10558 throw "could not allocate space for compact unwind info";
10559 ObjectFile::Atom* secondLevelFirstFuncs[pageCount*3];
10560 uint8_t* secondLevelPagesStarts[pageCount*3];
10561
10562 // make last second level page smaller so that all other second level pages can be page aligned
10563 uint32_t maxLastPageSize = unwindSectionInfo->fFileOffset % 4096;
10564 uint32_t tailPad = 0;
10565 if ( maxLastPageSize < 128 ) {
10566 tailPad = maxLastPageSize;
10567 maxLastPageSize = 4096;
10568 }
10569
10570 // fill in pages in reverse order
10571 unsigned int endIndex = uniqueInfos.size();
10572 unsigned int secondLevelPageCount = 0;
10573 uint8_t* pageEnd = &fPagesContentForDelete[pageCount*4096];
10574 uint32_t pageSize = maxLastPageSize;
10575 while ( endIndex > 0 ) {
10576 endIndex = makeCompressedSecondLevelPage(uniqueInfos, commonEncodings, pageSize, endIndex, pageEnd);
10577 secondLevelPagesStarts[secondLevelPageCount] = pageEnd;
10578 secondLevelFirstFuncs[secondLevelPageCount] = uniqueInfos[endIndex].func;
10579 ++secondLevelPageCount;
10580 pageSize = 4096; // last page can be odd size, make rest up to 4096 bytes in size
10581 }
10582 fPagesContent = pageEnd;
10583 fPagesSize = &fPagesContentForDelete[pageCount*4096] - pageEnd;
10584
10585 // calculate section layout
10586 const uint32_t commonEncodingsArraySectionOffset = sizeof(macho_unwind_info_section_header<P>);
10587 const uint32_t commonEncodingsArrayCount = commonEncodings.size();
10588 const uint32_t commonEncodingsArraySize = commonEncodingsArrayCount * sizeof(compact_unwind_encoding_t);
10589 const uint32_t personalityArraySectionOffset = commonEncodingsArraySectionOffset + commonEncodingsArraySize;
10590 const uint32_t personalityArrayCount = fPersonalityIndexMap.size();
10591 const uint32_t personalityArraySize = personalityArrayCount * sizeof(uint32_t);
10592 const uint32_t indexSectionOffset = personalityArraySectionOffset + personalityArraySize;
10593 const uint32_t indexCount = secondLevelPageCount+1;
10594 const uint32_t indexSize = indexCount * sizeof(macho_unwind_info_section_header_index_entry<P>);
10595 const uint32_t lsdaIndexArraySectionOffset = indexSectionOffset + indexSize;
10596 const uint32_t lsdaIndexArrayCount = fLSDAIndex.size();
10597 const uint32_t lsdaIndexArraySize = lsdaIndexArrayCount * sizeof(macho_unwind_info_section_header_lsda_index_entry<P>);
10598 const uint32_t headerEndSectionOffset = lsdaIndexArraySectionOffset + lsdaIndexArraySize;
10599
10600
10601 // allocate and fill in section header
10602 fHeaderSize = headerEndSectionOffset;
10603 fHeaderContent = new uint8_t[fHeaderSize];
10604 bzero(fHeaderContent, fHeaderSize);
10605 macho_unwind_info_section_header<P>* sectionHeader = (macho_unwind_info_section_header<P>*)fHeaderContent;
10606 sectionHeader->set_version(UNWIND_SECTION_VERSION);
10607 sectionHeader->set_commonEncodingsArraySectionOffset(commonEncodingsArraySectionOffset);
10608 sectionHeader->set_commonEncodingsArrayCount(commonEncodingsArrayCount);
10609 sectionHeader->set_personalityArraySectionOffset(personalityArraySectionOffset);
10610 sectionHeader->set_personalityArrayCount(personalityArrayCount);
10611 sectionHeader->set_indexSectionOffset(indexSectionOffset);
10612 sectionHeader->set_indexCount(indexCount);
10613
10614 // copy common encodings
10615 uint32_t* commonEncodingsTable = (uint32_t*)&fHeaderContent[commonEncodingsArraySectionOffset];
10616 for (std::map<uint32_t, unsigned int>::iterator it=commonEncodings.begin(); it != commonEncodings.end(); ++it)
10617 A::P::E::set32(commonEncodingsTable[it->second], it->first);
10618
10619 // make references for personality entries
10620 uint32_t* personalityArray = (uint32_t*)&fHeaderContent[sectionHeader->personalityArraySectionOffset()];
10621 for (std::map<ObjectFile::Atom*, unsigned int>::iterator it=fPersonalityIndexMap.begin(); it != fPersonalityIndexMap.end(); ++it) {
10622 uint32_t offset = (uint8_t*)&personalityArray[it->second-1] - fHeaderContent;
10623 fReferences.push_back(new WriterReference<A>(offset, A::kImageOffset32, it->first));
10624 }
10625
10626 // build first level index and references
10627 macho_unwind_info_section_header_index_entry<P>* indexTable = (macho_unwind_info_section_header_index_entry<P>*)&fHeaderContent[indexSectionOffset];
10628 for (unsigned int i=0; i < secondLevelPageCount; ++i) {
10629 unsigned int reverseIndex = secondLevelPageCount - 1 - i;
10630 indexTable[i].set_functionOffset(0);
10631 indexTable[i].set_secondLevelPagesSectionOffset(secondLevelPagesStarts[reverseIndex]-fPagesContent+headerEndSectionOffset);
10632 indexTable[i].set_lsdaIndexArraySectionOffset(lsdaIndexOffsetMap[secondLevelFirstFuncs[reverseIndex]]+lsdaIndexArraySectionOffset);
10633 uint32_t refOffset = (uint8_t*)&indexTable[i] - fHeaderContent;
10634 fReferences.push_back(new WriterReference<A>(refOffset, A::kImageOffset32, secondLevelFirstFuncs[reverseIndex]));
10635 }
10636 indexTable[secondLevelPageCount].set_functionOffset(0);
10637 indexTable[secondLevelPageCount].set_secondLevelPagesSectionOffset(0);
10638 indexTable[secondLevelPageCount].set_lsdaIndexArraySectionOffset(lsdaIndexArraySectionOffset+lsdaIndexArraySize);
10639 fReferences.push_back(new WriterReference<A>((uint8_t*)&indexTable[secondLevelPageCount] - fHeaderContent, A::kImageOffset32,
10640 fInfos.back().func, fInfos.back().func->getSize()+1));
10641
10642 // build lsda references
10643 uint32_t lsdaEntrySectionOffset = lsdaIndexArraySectionOffset;
10644 for (typename std::vector<LSDAEntry>::iterator it = fLSDAIndex.begin(); it != fLSDAIndex.end(); ++it) {
10645 fReferences.push_back(new WriterReference<A>(lsdaEntrySectionOffset, A::kImageOffset32, it->func));
10646 fReferences.push_back(new WriterReference<A>(lsdaEntrySectionOffset+4, A::kImageOffset32, it->lsda, it->lsdaOffset));
10647 lsdaEntrySectionOffset += sizeof(unwind_info_section_header_lsda_index_entry);
10648 }
10649
10650 // make references for regular second level entries
10651 for (typename std::vector<RegFixUp>::iterator it = fRegFixUps.begin(); it != fRegFixUps.end(); ++it) {
10652 uint32_t offset = (it->contentPointer - fPagesContent) + fHeaderSize;
10653 fReferences.push_back(new WriterReference<A>(offset, A::kImageOffset32, it->func));
10654 if ( it->fde != NULL )
10655 fReferences.push_back(new WriterReference<A>(offset+4, A::kSectionOffset24, it->fde));
10656 }
10657 // make references for compressed second level entries
10658 for (typename std::vector<CompressedFixUp>::iterator it = fCompressedFixUps.begin(); it != fCompressedFixUps.end(); ++it) {
10659 uint32_t offset = (it->contentPointer - fPagesContent) + fHeaderSize;
10660 fReferences.push_back(new WriterReference<A>(offset, A::kPointerDiff24, it->func, 0, it->fromFunc, 0));
10661 }
10662 for (typename std::vector<CompressedEncodingFixUp>::iterator it = fCompressedEncodingFixUps.begin(); it != fCompressedEncodingFixUps.end(); ++it) {
10663 uint32_t offset = (it->contentPointer - fPagesContent) + fHeaderSize;
10664 fReferences.push_back(new WriterReference<A>(offset, A::kSectionOffset24, it->fde));
10665 }
10666
10667 // update section record with new size
10668 unwindSectionInfo->fSize = this->getSize();
10669
10670 // alter alignment so this section lays out so second level tables are page aligned
10671 if ( secondLevelPageCount > 2 )
10672 fAlignment = ObjectFile::Alignment(12, (unwindSectionInfo->fFileOffset - this->getSize()) % 4096);
10673 }
10674
10675 }
10676
10677
10678
10679
10680 template <typename A>
10681 void UnwindInfoAtom<A>::copyRawContent(uint8_t buffer[]) const
10682 {
10683 memcpy(buffer, fHeaderContent, fHeaderSize);
10684 memcpy(&buffer[fHeaderSize], fPagesContent, fPagesSize);
10685 }
10686
10687
10688
10689 template <typename A>
10690 uint64_t LinkEditAtom<A>::getFileOffset() const
10691 {
10692 return ((SectionInfo*)this->getSection())->fFileOffset + this->getSectionOffset();
10693 }
10694
10695
10696 template <typename A>
10697 uint64_t SectionRelocationsLinkEditAtom<A>::getSize() const
10698 {
10699 return fWriter.fSectionRelocs.size() * sizeof(macho_relocation_info<P>);
10700 }
10701
10702 template <typename A>
10703 void SectionRelocationsLinkEditAtom<A>::copyRawContent(uint8_t buffer[]) const
10704 {
10705 memcpy(buffer, &fWriter.fSectionRelocs[0], this->getSize());
10706 }
10707
10708
10709 template <typename A>
10710 uint64_t LocalRelocationsLinkEditAtom<A>::getSize() const
10711 {
10712 return fWriter.fInternalRelocs.size() * sizeof(macho_relocation_info<P>);
10713 }
10714
10715 template <typename A>
10716 void LocalRelocationsLinkEditAtom<A>::copyRawContent(uint8_t buffer[]) const
10717 {
10718 memcpy(buffer, &fWriter.fInternalRelocs[0], this->getSize());
10719 }
10720
10721
10722
10723 template <typename A>
10724 uint64_t SymbolTableLinkEditAtom<A>::getSize() const
10725 {
10726 return fWriter.fSymbolTableCount * sizeof(macho_nlist<P>);
10727 }
10728
10729 template <typename A>
10730 void SymbolTableLinkEditAtom<A>::copyRawContent(uint8_t buffer[]) const
10731 {
10732 memcpy(buffer, fWriter.fSymbolTable, this->getSize());
10733 }
10734
10735 template <typename A>
10736 uint64_t ExternalRelocationsLinkEditAtom<A>::getSize() const
10737 {
10738 return fWriter.fExternalRelocs.size() * sizeof(macho_relocation_info<P>);
10739 }
10740
10741 template <typename A>
10742 void ExternalRelocationsLinkEditAtom<A>::copyRawContent(uint8_t buffer[]) const
10743 {
10744 std::sort(fWriter.fExternalRelocs.begin(), fWriter.fExternalRelocs.end(), ExternalRelocSorter<P>());
10745 memcpy(buffer, &fWriter.fExternalRelocs[0], this->getSize());
10746 }
10747
10748
10749
10750 template <typename A>
10751 uint64_t IndirectTableLinkEditAtom<A>::getSize() const
10752 {
10753 return fTable.size() * sizeof(uint32_t);
10754 }
10755
10756 template <typename A>
10757 void IndirectTableLinkEditAtom<A>::copyRawContent(uint8_t buffer[]) const
10758 {
10759 uint64_t size = this->getSize();
10760 bzero(buffer, size);
10761 const uint32_t indirectTableSize = fTable.size();
10762 uint32_t* indirectTable = (uint32_t*)buffer;
10763 for(std::vector<IndirectEntry>::const_iterator it = fTable.begin(); it != fTable.end(); ++it) {
10764 if ( it->indirectIndex < indirectTableSize )
10765 A::P::E::set32(indirectTable[it->indirectIndex], it->symbolIndex);
10766 else
10767 throwf("malformed indirect table. size=%d, index=%d", indirectTableSize, it->indirectIndex);
10768 }
10769 }
10770
10771
10772
10773 template <typename A>
10774 uint64_t ModuleInfoLinkEditAtom<A>::getSize() const
10775 {
10776 return fWriter.fSymbolTableExportCount*sizeof(macho_dylib_table_of_contents<P>)
10777 + sizeof(macho_dylib_module<P>)
10778 + this->getReferencesCount()*sizeof(uint32_t);
10779 }
10780
10781 template <typename A>
10782 uint32_t ModuleInfoLinkEditAtom<A>::getTableOfContentsFileOffset() const
10783 {
10784 return this->getFileOffset();
10785 }
10786
10787 template <typename A>
10788 uint32_t ModuleInfoLinkEditAtom<A>::getModuleTableFileOffset() const
10789 {
10790 return this->getFileOffset() + fWriter.fSymbolTableExportCount*sizeof(macho_dylib_table_of_contents<P>);
10791 }
10792
10793 template <typename A>
10794 uint32_t ModuleInfoLinkEditAtom<A>::getReferencesFileOffset() const
10795 {
10796 return this->getModuleTableFileOffset() + sizeof(macho_dylib_module<P>);
10797 }
10798
10799 template <typename A>
10800 uint32_t ModuleInfoLinkEditAtom<A>::getReferencesCount() const
10801 {
10802 return fWriter.fSymbolTableExportCount + fWriter.fSymbolTableImportCount;
10803 }
10804
10805 template <typename A>
10806 void ModuleInfoLinkEditAtom<A>::copyRawContent(uint8_t buffer[]) const
10807 {
10808 uint64_t size = this->getSize();
10809 bzero(buffer, size);
10810 // create toc. The symbols are already sorted, they are all in the smae module
10811 macho_dylib_table_of_contents<P>* p = (macho_dylib_table_of_contents<P>*)buffer;
10812 for(uint32_t i=0; i < fWriter.fSymbolTableExportCount; ++i, ++p) {
10813 p->set_symbol_index(fWriter.fSymbolTableExportStartIndex+i);
10814 p->set_module_index(0);
10815 }
10816 // create module table (one entry)
10817 pint_t objcModuleSectionStart = 0;
10818 pint_t objcModuleSectionSize = 0;
10819 uint16_t numInits = 0;
10820 uint16_t numTerms = 0;
10821 std::vector<SegmentInfo*>& segmentInfos = fWriter.fSegmentInfos;
10822 for (std::vector<SegmentInfo*>::iterator segit = segmentInfos.begin(); segit != segmentInfos.end(); ++segit) {
10823 std::vector<SectionInfo*>& sectionInfos = (*segit)->fSections;
10824 if ( strcmp((*segit)->fName, "__DATA") == 0 ) {
10825 for (std::vector<SectionInfo*>::iterator sectit = sectionInfos.begin(); sectit != sectionInfos.end(); ++sectit) {
10826 if ( strcmp((*sectit)->fSectionName, "__mod_init_func") == 0 )
10827 numInits = (*sectit)->fSize / sizeof(typename A::P::uint_t);
10828 else if ( strcmp((*sectit)->fSectionName, "__mod_term_func") == 0 )
10829 numTerms = (*sectit)->fSize / sizeof(typename A::P::uint_t);
10830 }
10831 }
10832 else if ( strcmp((*segit)->fName, "__OBJC") == 0 ) {
10833 for (std::vector<SectionInfo*>::iterator sectit = sectionInfos.begin(); sectit != sectionInfos.end(); ++sectit) {
10834 SectionInfo* sectInfo = (*sectit);
10835 if ( strcmp(sectInfo->fSectionName, "__module_info") == 0 ) {
10836 objcModuleSectionStart = sectInfo->getBaseAddress();
10837 objcModuleSectionSize = sectInfo->fSize;
10838 }
10839 }
10840 }
10841 }
10842 macho_dylib_module<P>* module = (macho_dylib_module<P>*)&buffer[fWriter.fSymbolTableExportCount*sizeof(macho_dylib_table_of_contents<P>)];
10843 module->set_module_name(fModuleNameOffset);
10844 module->set_iextdefsym(fWriter.fSymbolTableExportStartIndex);
10845 module->set_nextdefsym(fWriter.fSymbolTableExportCount);
10846 module->set_irefsym(0);
10847 module->set_nrefsym(this->getReferencesCount());
10848 module->set_ilocalsym(fWriter.fSymbolTableStabsStartIndex);
10849 module->set_nlocalsym(fWriter.fSymbolTableStabsCount+fWriter.fSymbolTableLocalCount);
10850 module->set_iextrel(0);
10851 module->set_nextrel(fWriter.fExternalRelocs.size());
10852 module->set_iinit_iterm(0,0);
10853 module->set_ninit_nterm(numInits,numTerms);
10854 module->set_objc_module_info_addr(objcModuleSectionStart);
10855 module->set_objc_module_info_size(objcModuleSectionSize);
10856 // create reference table
10857 macho_dylib_reference<P>* ref = (macho_dylib_reference<P>*)((uint8_t*)module + sizeof(macho_dylib_module<P>));
10858 for(uint32_t i=0; i < fWriter.fSymbolTableExportCount; ++i, ++ref) {
10859 ref->set_isym(fWriter.fSymbolTableExportStartIndex+i);
10860 ref->set_flags(REFERENCE_FLAG_DEFINED);
10861 }
10862 for(uint32_t i=0; i < fWriter.fSymbolTableImportCount; ++i, ++ref) {
10863 ref->set_isym(fWriter.fSymbolTableImportStartIndex+i);
10864 std::map<const ObjectFile::Atom*,ObjectFile::Atom*>::iterator pos = fWriter.fStubsMap.find(fWriter.fImportedAtoms[i]);
10865 if ( pos != fWriter.fStubsMap.end() )
10866 ref->set_flags(REFERENCE_FLAG_UNDEFINED_LAZY);
10867 else
10868 ref->set_flags(REFERENCE_FLAG_UNDEFINED_NON_LAZY);
10869 }
10870 }
10871
10872
10873
10874 template <typename A>
10875 StringsLinkEditAtom<A>::StringsLinkEditAtom(Writer<A>& writer)
10876 : LinkEditAtom<A>(writer), fCurrentBuffer(NULL), fCurrentBufferUsed(0)
10877 {
10878 fCurrentBuffer = new char[kBufferSize];
10879 // burn first byte of string pool (so zero is never a valid string offset)
10880 fCurrentBuffer[fCurrentBufferUsed++] = ' ';
10881 // make offset 1 always point to an empty string
10882 fCurrentBuffer[fCurrentBufferUsed++] = '\0';
10883 }
10884
10885 template <typename A>
10886 uint64_t StringsLinkEditAtom<A>::getSize() const
10887 {
10888 // align size
10889 return (kBufferSize * fFullBuffers.size() + fCurrentBufferUsed + sizeof(typename A::P::uint_t) - 1) & (-sizeof(typename A::P::uint_t));
10890 }
10891
10892 template <typename A>
10893 void StringsLinkEditAtom<A>::copyRawContent(uint8_t buffer[]) const
10894 {
10895 uint64_t offset = 0;
10896 for (unsigned int i=0; i < fFullBuffers.size(); ++i) {
10897 memcpy(&buffer[offset], fFullBuffers[i], kBufferSize);
10898 offset += kBufferSize;
10899 }
10900 memcpy(&buffer[offset], fCurrentBuffer, fCurrentBufferUsed);
10901 // zero fill end to align
10902 offset += fCurrentBufferUsed;
10903 while ( (offset % sizeof(typename A::P::uint_t)) != 0 )
10904 buffer[offset++] = 0;
10905 }
10906
10907 template <typename A>
10908 int32_t StringsLinkEditAtom<A>::add(const char* name)
10909 {
10910 int32_t offset = kBufferSize * fFullBuffers.size() + fCurrentBufferUsed;
10911 int lenNeeded = strlcpy(&fCurrentBuffer[fCurrentBufferUsed], name, kBufferSize-fCurrentBufferUsed)+1;
10912 if ( (fCurrentBufferUsed+lenNeeded) < kBufferSize ) {
10913 fCurrentBufferUsed += lenNeeded;
10914 }
10915 else {
10916 int copied = kBufferSize-fCurrentBufferUsed-1;
10917 // change trailing '\0' that strlcpy added to real char
10918 fCurrentBuffer[kBufferSize-1] = name[copied];
10919 // alloc next buffer
10920 fFullBuffers.push_back(fCurrentBuffer);
10921 fCurrentBuffer = new char[kBufferSize];
10922 fCurrentBufferUsed = 0;
10923 // append rest of string
10924 this->add(&name[copied+1]);
10925 }
10926 return offset;
10927 }
10928
10929
10930 template <typename A>
10931 int32_t StringsLinkEditAtom<A>::addUnique(const char* name)
10932 {
10933 StringToOffset::iterator pos = fUniqueStrings.find(name);
10934 if ( pos != fUniqueStrings.end() ) {
10935 return pos->second;
10936 }
10937 else {
10938 int32_t offset = this->add(name);
10939 fUniqueStrings[name] = offset;
10940 return offset;
10941 }
10942 }
10943
10944
10945 template <typename A>
10946 const char* StringsLinkEditAtom<A>::stringForIndex(int32_t index) const
10947 {
10948 int32_t currentBufferStartIndex = kBufferSize * fFullBuffers.size();
10949 int32_t maxIndex = currentBufferStartIndex + fCurrentBufferUsed;
10950 // check for out of bounds
10951 if ( index > maxIndex )
10952 return "";
10953 // check for index in fCurrentBuffer
10954 if ( index > currentBufferStartIndex )
10955 return &fCurrentBuffer[index-currentBufferStartIndex];
10956 // otherwise index is in a full buffer
10957 uint32_t fullBufferIndex = index/kBufferSize;
10958 return &fFullBuffers[fullBufferIndex][index-(kBufferSize*fullBufferIndex)];
10959 }
10960
10961
10962
10963 template <typename A>
10964 BranchIslandAtom<A>::BranchIslandAtom(Writer<A>& writer, const char* name, int islandRegion, ObjectFile::Atom& target,
10965 ObjectFile::Atom& finalTarget, uint32_t finalTargetOffset)
10966 : WriterAtom<A>(writer, Segment::fgTextSegment), fTarget(target), fFinalTarget(finalTarget), fFinalTargetOffset(finalTargetOffset)
10967 {
10968 if ( finalTargetOffset == 0 ) {
10969 if ( islandRegion == 0 )
10970 asprintf((char**)&fName, "%s$island", name);
10971 else
10972 asprintf((char**)&fName, "%s$island$%d", name, islandRegion+1);
10973 }
10974 else {
10975 asprintf((char**)&fName, "%s_plus_%d$island$%d", name, finalTargetOffset, islandRegion);
10976 }
10977
10978 if ( finalTarget.isThumb() ) {
10979 if ( writer.fOptions.preferSubArchitecture() && writer.fOptions.subArchitecture() == CPU_SUBTYPE_ARM_V7 ) {
10980 fIslandKind = kBranchIslandToThumb2;
10981 }
10982 else {
10983 if ( writer.fSlideable )
10984 fIslandKind = kBranchIslandToThumb1;
10985 else
10986 fIslandKind = kBranchIslandNoPicToThumb1;
10987 }
10988 }
10989 else {
10990 fIslandKind = kBranchIslandToARM;
10991 }
10992 }
10993
10994
10995 template <>
10996 void BranchIslandAtom<ppc>::copyRawContent(uint8_t buffer[]) const
10997 {
10998 int64_t displacement;
10999 const int64_t bl_sixteenMegLimit = 0x00FFFFFF;
11000 if ( fTarget.getContentType() == ObjectFile::Atom::kBranchIsland ) {
11001 displacement = getFinalTargetAdress() - this->getAddress();
11002 if ( (displacement > bl_sixteenMegLimit) && (displacement < (-bl_sixteenMegLimit)) ) {
11003 displacement = fTarget.getAddress() - this->getAddress();
11004 }
11005 }
11006 else {
11007 displacement = fTarget.getAddress() + fFinalTargetOffset - this->getAddress();
11008 }
11009 int32_t branchInstruction = 0x48000000 | ((uint32_t)displacement & 0x03FFFFFC);
11010 OSWriteBigInt32(buffer, 0, branchInstruction);
11011 }
11012
11013 template <>
11014 void BranchIslandAtom<ppc64>::copyRawContent(uint8_t buffer[]) const
11015 {
11016 int64_t displacement;
11017 const int64_t bl_sixteenMegLimit = 0x00FFFFFF;
11018 if ( fTarget.getContentType() == ObjectFile::Atom::kBranchIsland ) {
11019 displacement = getFinalTargetAdress() - this->getAddress();
11020 if ( (displacement > bl_sixteenMegLimit) && (displacement < (-bl_sixteenMegLimit)) ) {
11021 displacement = fTarget.getAddress() - this->getAddress();
11022 }
11023 }
11024 else {
11025 displacement = fTarget.getAddress() + fFinalTargetOffset - this->getAddress();
11026 }
11027 int32_t branchInstruction = 0x48000000 | ((uint32_t)displacement & 0x03FFFFFC);
11028 OSWriteBigInt32(buffer, 0, branchInstruction);
11029 }
11030
11031 template <>
11032 void BranchIslandAtom<arm>::copyRawContent(uint8_t buffer[]) const
11033 {
11034 const bool log = false;
11035 switch ( fIslandKind ) {
11036 case kBranchIslandToARM:
11037 {
11038 int64_t displacement;
11039 // an ARM branch can branch farther than a thumb branch. The branch
11040 // island generation was conservative and put islands every thumb
11041 // branch distance apart. Check to see if this is a an island
11042 // hopping branch that could be optimized to go directly to target.
11043 if ( fTarget.getContentType() == ObjectFile::Atom::kBranchIsland ) {
11044 displacement = getFinalTargetAdress() - this->getAddress() - 8;
11045 if ( (displacement < 33554428LL) && (displacement > (-33554432LL)) ) {
11046 // can skip branch island and jump straight to target
11047 if (log) fprintf(stderr, "%s: optimized jump to final target at 0x%08llX, thisAddr=0x%08llX\n", fName, getFinalTargetAdress(), this->getAddress());
11048 }
11049 else {
11050 // ultimate target is too far, jump to island
11051 displacement = fTarget.getAddress() - this->getAddress() - 8;
11052 if (log) fprintf(stderr, "%s: jump to branch island at 0x%08llX\n", fName, fTarget.getAddress());
11053 }
11054 }
11055 else {
11056 // target of island is ultimate target
11057 displacement = fTarget.getAddress() + fFinalTargetOffset - this->getAddress() - 8;
11058 if (log) fprintf(stderr, "%s: jump to target at 0x%08llX\n", fName, fTarget.getAddress());
11059 }
11060 uint32_t imm24 = (displacement >> 2) & 0x00FFFFFF;
11061 int32_t branchInstruction = 0xEA000000 | imm24;
11062 OSWriteLittleInt32(buffer, 0, branchInstruction);
11063 }
11064 break;
11065 case kBranchIslandToThumb2:
11066 {
11067 int64_t displacement;
11068 // an ARM branch can branch farther than a thumb branch. The branch
11069 // island generation was conservative and put islands every thumb
11070 // branch distance apart. Check to see if this is a an island
11071 // hopping branch that could be optimized to go directly to target.
11072 if ( fTarget.getContentType() == ObjectFile::Atom::kBranchIsland ) {
11073 displacement = getFinalTargetAdress() - this->getAddress() - 4;
11074 if ( (displacement < 16777214) && (displacement > (-16777216LL)) ) {
11075 // can skip branch island and jump straight to target
11076 if (log) fprintf(stderr, "%s: optimized jump to final target at 0x%08llX, thisAddr=0x%08llX\n", fName, getFinalTargetAdress(), this->getAddress());
11077 }
11078 else {
11079 // ultimate target is too far, jump to island
11080 displacement = fTarget.getAddress() - this->getAddress() - 4;
11081 if (log) fprintf(stderr, "%s: jump to branch island at 0x%08llX\n", fName, fTarget.getAddress());
11082 }
11083 }
11084 else {
11085 // target of island is ultimate target
11086 displacement = fTarget.getAddress() + fFinalTargetOffset - this->getAddress() - 4;
11087 if (log) fprintf(stderr, "%s: jump to target at 0x%08llX\n", fName, fTarget.getAddress());
11088 }
11089 if ( (displacement > 16777214) || (displacement < (-16777216LL)) ) {
11090 throwf("internal branch island error: thumb2 b/bx out of range (%lld max is +/-16M) from %s to %s in %s",
11091 displacement, this->getDisplayName(),
11092 fTarget.getDisplayName(), fTarget.getFile()->getPath());
11093 }
11094 // The instruction is really two instructions:
11095 // The lower 16 bits are the first instruction, which contains the high
11096 // 11 bits of the displacement.
11097 // The upper 16 bits are the second instruction, which contains the low
11098 // 11 bits of the displacement, as well as differentiating bl and blx.
11099 uint32_t s = (uint32_t)(displacement >> 24) & 0x1;
11100 uint32_t i1 = (uint32_t)(displacement >> 23) & 0x1;
11101 uint32_t i2 = (uint32_t)(displacement >> 22) & 0x1;
11102 uint32_t imm10 = (uint32_t)(displacement >> 12) & 0x3FF;
11103 uint32_t imm11 = (uint32_t)(displacement >> 1) & 0x7FF;
11104 uint32_t j1 = (i1 == s);
11105 uint32_t j2 = (i2 == s);
11106 uint32_t opcode = 0x9000F000;
11107 uint32_t nextDisp = (j1 << 13) | (j2 << 11) | imm11;
11108 uint32_t firstDisp = (s << 10) | imm10;
11109 uint32_t newInstruction = opcode | (nextDisp << 16) | firstDisp;
11110 //warning("s=%d, j1=%d, j2=%d, imm10=0x%0X, imm11=0x%0X, opcode=0x%08X, first=0x%04X, next=0x%04X, new=0x%08X, disp=0x%llX for %s to %s\n",
11111 // s, j1, j2, imm10, imm11, opcode, firstDisp, nextDisp, newInstruction, displacement, inAtom->getDisplayName(), ref->getTarget().getDisplayName());
11112 OSWriteLittleInt32(buffer, 0, newInstruction);
11113 }
11114 break;
11115 case kBranchIslandToThumb1:
11116 {
11117 // There is no large displacement thumb1 branch instruction.
11118 // Instead use ARM instructions that can jump to thumb.
11119 // we use a 32-bit displacement, so we can directly jump to target which means no island hopping
11120 int64_t displacement = getFinalTargetAdress() - (this->getAddress() + 12);
11121 if ( fFinalTarget.isThumb() )
11122 displacement |= 1;
11123 if (log) fprintf(stderr, "%s: 4 ARM instruction jump to final target at 0x%08llX\n", fName, getFinalTargetAdress());
11124 OSWriteLittleInt32(&buffer[ 0], 0, 0xe59fc004); // ldr ip, pc + 4
11125 OSWriteLittleInt32(&buffer[ 4], 0, 0xe08fc00c); // add ip, pc, ip
11126 OSWriteLittleInt32(&buffer[ 8], 0, 0xe12fff1c); // bx ip
11127 OSWriteLittleInt32(&buffer[12], 0, displacement); // .long target-this
11128 }
11129 break;
11130 case kBranchIslandNoPicToThumb1:
11131 {
11132 // There is no large displacement thumb1 branch instruction.
11133 // Instead use ARM instructions that can jump to thumb.
11134 // we use a 32-bit displacement, so we can directly jump to target which means no island hopping
11135 uint32_t targetAddr = getFinalTargetAdress();
11136 if ( fFinalTarget.isThumb() )
11137 targetAddr |= 1;
11138 if (log) fprintf(stderr, "%s: 2 ARM instruction jump to final target at 0x%08llX\n", fName, getFinalTargetAdress());
11139 OSWriteLittleInt32(&buffer[0], 0, 0xe51ff004); // ldr pc, [pc, #-4]
11140 OSWriteLittleInt32(&buffer[4], 0, targetAddr); // .long target-this
11141 }
11142 break;
11143 };
11144 }
11145
11146 template <>
11147 uint64_t BranchIslandAtom<ppc>::getSize() const
11148 {
11149 return 4;
11150 }
11151
11152 template <>
11153 uint64_t BranchIslandAtom<ppc64>::getSize() const
11154 {
11155 return 4;
11156 }
11157
11158 template <>
11159 uint64_t BranchIslandAtom<arm>::getSize() const
11160 {
11161 switch ( fIslandKind ) {
11162 case kBranchIslandToARM:
11163 return 4;
11164 case kBranchIslandToThumb1:
11165 return 16;
11166 case kBranchIslandToThumb2:
11167 return 4;
11168 case kBranchIslandNoPicToThumb1:
11169 return 8;
11170 };
11171 throw "internal error: no ARM branch island kind";
11172 }
11173
11174
11175
11176 template <typename A>
11177 uint64_t SegmentSplitInfoLoadCommandsAtom<A>::getSize() const
11178 {
11179 if ( fWriter.fSplitCodeToDataContentAtom->canEncode() )
11180 return this->alignedSize(sizeof(macho_linkedit_data_command<P>));
11181 else
11182 return 0; // a zero size causes the load command to be suppressed
11183 }
11184
11185 template <typename A>
11186 void SegmentSplitInfoLoadCommandsAtom<A>::copyRawContent(uint8_t buffer[]) const
11187 {
11188 uint64_t size = this->getSize();
11189 if ( size > 0 ) {
11190 bzero(buffer, size);
11191 macho_linkedit_data_command<P>* cmd = (macho_linkedit_data_command<P>*)buffer;
11192 cmd->set_cmd(LC_SEGMENT_SPLIT_INFO);
11193 cmd->set_cmdsize(size);
11194 cmd->set_dataoff(fWriter.fSplitCodeToDataContentAtom->getFileOffset());
11195 cmd->set_datasize(fWriter.fSplitCodeToDataContentAtom->getSize());
11196 }
11197 }
11198
11199
11200 template <typename A>
11201 uint64_t SegmentSplitInfoContentAtom<A>::getSize() const
11202 {
11203 return fEncodedData.size();
11204 }
11205
11206 template <typename A>
11207 void SegmentSplitInfoContentAtom<A>::copyRawContent(uint8_t buffer[]) const
11208 {
11209 memcpy(buffer, &fEncodedData[0], fEncodedData.size());
11210 }
11211
11212
11213 template <typename A>
11214 void SegmentSplitInfoContentAtom<A>::uleb128EncodeAddresses(const std::vector<SegmentSplitInfoContentAtom<A>::AtomAndOffset>& locations)
11215 {
11216 pint_t addr = fWriter.fOptions.baseAddress();
11217 for(typename std::vector<AtomAndOffset>::const_iterator it = locations.begin(); it != locations.end(); ++it) {
11218 pint_t nextAddr = it->atom->getAddress() + it->offset;
11219 //fprintf(stderr, "\t0x%0llX\n", (uint64_t)nextAddr);
11220 uint64_t delta = nextAddr - addr;
11221 if ( delta == 0 )
11222 throw "double split seg info for same address";
11223 // uleb128 encode
11224 uint8_t byte;
11225 do {
11226 byte = delta & 0x7F;
11227 delta &= ~0x7F;
11228 if ( delta != 0 )
11229 byte |= 0x80;
11230 fEncodedData.push_back(byte);
11231 delta = delta >> 7;
11232 }
11233 while( byte >= 0x80 );
11234 addr = nextAddr;
11235 }
11236 }
11237
11238 template <typename A>
11239 void SegmentSplitInfoContentAtom<A>::encode()
11240 {
11241 if ( ! fCantEncode ) {
11242 fEncodedData.reserve(8192);
11243
11244 if ( fKind1Locations.size() != 0 ) {
11245 fEncodedData.push_back(1);
11246 //fprintf(stderr, "type 1:\n");
11247 this->uleb128EncodeAddresses(fKind1Locations);
11248 fEncodedData.push_back(0);
11249 }
11250
11251 if ( fKind2Locations.size() != 0 ) {
11252 fEncodedData.push_back(2);
11253 //fprintf(stderr, "type 2:\n");
11254 this->uleb128EncodeAddresses(fKind2Locations);
11255 fEncodedData.push_back(0);
11256 }
11257
11258 if ( fKind3Locations.size() != 0 ) {
11259 fEncodedData.push_back(3);
11260 //fprintf(stderr, "type 3:\n");
11261 this->uleb128EncodeAddresses(fKind3Locations);
11262 fEncodedData.push_back(0);
11263 }
11264
11265 if ( fKind4Locations.size() != 0 ) {
11266 fEncodedData.push_back(4);
11267 //fprintf(stderr, "type 4:\n");
11268 this->uleb128EncodeAddresses(fKind4Locations);
11269 fEncodedData.push_back(0);
11270 }
11271
11272 // always add zero byte to mark end
11273 fEncodedData.push_back(0);
11274
11275 // add zeros to end to align size
11276 while ( (fEncodedData.size() % sizeof(pint_t)) != 0 )
11277 fEncodedData.push_back(0);
11278 }
11279 }
11280
11281
11282 template <typename A>
11283 ObjCInfoAtom<A>::ObjCInfoAtom(Writer<A>& writer, ObjectFile::Reader::ObjcConstraint objcConstraint,
11284 bool objcReplacementClasses, bool abi2override)
11285 : WriterAtom<A>(writer, getInfoSegment(abi2override)), fAbi2override(abi2override)
11286 {
11287 fContent[0] = 0;
11288 uint32_t value = 0;
11289 // struct objc_image_info {
11290 // uint32_t version; // initially 0
11291 // uint32_t flags;
11292 // };
11293 // #define OBJC_IMAGE_SUPPORTS_GC 2
11294 // #define OBJC_IMAGE_GC_ONLY 4
11295 //
11296 if ( objcReplacementClasses )
11297 value = 1;
11298 switch ( objcConstraint ) {
11299 case ObjectFile::Reader::kObjcNone:
11300 case ObjectFile::Reader::kObjcRetainRelease:
11301 break;
11302 case ObjectFile::Reader::kObjcRetainReleaseOrGC:
11303 value |= 2;
11304 break;
11305 case ObjectFile::Reader::kObjcGC:
11306 value |= 6;
11307 break;
11308 }
11309 A::P::E::set32(fContent[1], value);
11310 }
11311
11312 template <typename A>
11313 void ObjCInfoAtom<A>::copyRawContent(uint8_t buffer[]) const
11314 {
11315 memcpy(buffer, &fContent[0], 8);
11316 }
11317
11318
11319 // objc info section is in a different segment and section for 32 vs 64 bit runtimes
11320 template <> const char* ObjCInfoAtom<ppc>::getSectionName() const { return "__image_info"; }
11321 template <> const char* ObjCInfoAtom<x86>::getSectionName() const { return fAbi2override ? "__objc_imageinfo" : "__image_info"; }
11322 template <> const char* ObjCInfoAtom<arm>::getSectionName() const { return "__objc_imageinfo"; }
11323 template <> const char* ObjCInfoAtom<ppc64>::getSectionName() const { return "__objc_imageinfo"; }
11324 template <> const char* ObjCInfoAtom<x86_64>::getSectionName() const { return "__objc_imageinfo"; }
11325
11326 template <> Segment& ObjCInfoAtom<ppc>::getInfoSegment(bool abi2override) const { return Segment::fgObjCSegment; }
11327 template <> Segment& ObjCInfoAtom<x86>::getInfoSegment(bool abi2override) const { return abi2override ? Segment::fgDataSegment : Segment::fgObjCSegment; }
11328 template <> Segment& ObjCInfoAtom<ppc64>::getInfoSegment(bool abi2override) const { return Segment::fgDataSegment; }
11329 template <> Segment& ObjCInfoAtom<x86_64>::getInfoSegment(bool abi2override) const { return Segment::fgDataSegment; }
11330 template <> Segment& ObjCInfoAtom<arm>::getInfoSegment(bool abi2override) const { return Segment::fgDataSegment; }
11331
11332
11333
11334
11335 template <typename A>
11336 void DyldInfoLoadCommandsAtom<A>::copyRawContent(uint8_t buffer[]) const
11337 {
11338 // build LC_DYLD_INFO command
11339 macho_dyld_info_command<P>* cmd = (macho_dyld_info_command<P>*)buffer;
11340 bzero(cmd, sizeof(macho_dyld_info_command<P>));
11341
11342 cmd->set_cmd( fWriter.fOptions.makeClassicDyldInfo() ? LC_DYLD_INFO : LC_DYLD_INFO_ONLY);
11343 cmd->set_cmdsize(sizeof(macho_dyld_info_command<P>));
11344 if ( (fWriter.fCompressedRebaseInfoAtom != NULL) && (fWriter.fCompressedRebaseInfoAtom->getSize() != 0) ) {
11345 cmd->set_rebase_off(fWriter.fCompressedRebaseInfoAtom->getFileOffset());
11346 cmd->set_rebase_size(fWriter.fCompressedRebaseInfoAtom->getSize());
11347 }
11348 if ( (fWriter.fCompressedBindingInfoAtom != NULL) && (fWriter.fCompressedBindingInfoAtom->getSize() != 0) ) {
11349 cmd->set_bind_off(fWriter.fCompressedBindingInfoAtom->getFileOffset());
11350 cmd->set_bind_size(fWriter.fCompressedBindingInfoAtom->getSize());
11351 }
11352 if ( (fWriter.fCompressedWeakBindingInfoAtom != NULL) && (fWriter.fCompressedWeakBindingInfoAtom->getSize() != 0) ) {
11353 cmd->set_weak_bind_off(fWriter.fCompressedWeakBindingInfoAtom->getFileOffset());
11354 cmd->set_weak_bind_size(fWriter.fCompressedWeakBindingInfoAtom->getSize());
11355 }
11356 if ( (fWriter.fCompressedLazyBindingInfoAtom != NULL) && (fWriter.fCompressedLazyBindingInfoAtom->getSize() != 0) ) {
11357 cmd->set_lazy_bind_off(fWriter.fCompressedLazyBindingInfoAtom->getFileOffset());
11358 cmd->set_lazy_bind_size(fWriter.fCompressedLazyBindingInfoAtom->getSize());
11359 }
11360 if ( (fWriter.fCompressedExportInfoAtom != NULL) && (fWriter.fCompressedExportInfoAtom->getSize() != 0) ) {
11361 cmd->set_export_off(fWriter.fCompressedExportInfoAtom->getFileOffset());
11362 cmd->set_export_size(fWriter.fCompressedExportInfoAtom->getSize());
11363 }
11364 }
11365
11366
11367 struct rebase_tmp
11368 {
11369 rebase_tmp(uint8_t op, uint64_t p1, uint64_t p2=0) : opcode(op), operand1(p1), operand2(p2) {}
11370 uint8_t opcode;
11371 uint64_t operand1;
11372 uint64_t operand2;
11373 };
11374
11375
11376 template <typename A>
11377 void CompressedRebaseInfoLinkEditAtom<A>::encode()
11378 {
11379 // sort rebase info by type, then address
11380 const std::vector<SegmentInfo*>& segments = fWriter.fSegmentInfos;
11381 std::vector<RebaseInfo>& info = fWriter.fRebaseInfo;
11382 std::sort(info.begin(), info.end());
11383
11384 // convert to temp encoding that can be more easily optimized
11385 std::vector<rebase_tmp> mid;
11386 const SegmentInfo* currentSegment = NULL;
11387 unsigned int segIndex = 0;
11388 uint8_t type = 0;
11389 uint64_t address = (uint64_t)(-1);
11390 for (std::vector<RebaseInfo>::iterator it = info.begin(); it != info.end(); ++it) {
11391 if ( type != it->fType ) {
11392 mid.push_back(rebase_tmp(REBASE_OPCODE_SET_TYPE_IMM, it->fType));
11393 type = it->fType;
11394 }
11395 if ( address != it->fAddress ) {
11396 if ( (currentSegment == NULL) || (it->fAddress < currentSegment->fBaseAddress)
11397 || ((currentSegment->fBaseAddress+currentSegment->fSize) <= it->fAddress) ) {
11398 segIndex = 0;
11399 for (std::vector<SegmentInfo*>::const_iterator segit = segments.begin(); segit != segments.end(); ++segit) {
11400 if ( ((*segit)->fBaseAddress <= it->fAddress) && (it->fAddress < ((*segit)->fBaseAddress+(*segit)->fSize)) ) {
11401 currentSegment = *segit;
11402 break;
11403 }
11404 ++segIndex;
11405 }
11406 mid.push_back(rebase_tmp(REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB, segIndex, it->fAddress - currentSegment->fBaseAddress));
11407 }
11408 else {
11409 mid.push_back(rebase_tmp(REBASE_OPCODE_ADD_ADDR_ULEB, it->fAddress-address));
11410 }
11411 address = it->fAddress;
11412 }
11413 mid.push_back(rebase_tmp(REBASE_OPCODE_DO_REBASE_ULEB_TIMES, 1));
11414 address += sizeof(pint_t);
11415 }
11416 mid.push_back(rebase_tmp(REBASE_OPCODE_DONE, 0));
11417
11418 // optimize phase 1, compress packed runs of pointers
11419 rebase_tmp* dst = &mid[0];
11420 for (const rebase_tmp* src = &mid[0]; src->opcode != REBASE_OPCODE_DONE; ++src) {
11421 if ( (src->opcode == REBASE_OPCODE_DO_REBASE_ULEB_TIMES) && (src->operand1 == 1) ) {
11422 *dst = *src++;
11423 while (src->opcode == REBASE_OPCODE_DO_REBASE_ULEB_TIMES ) {
11424 dst->operand1 += src->operand1;
11425 ++src;
11426 }
11427 --src;
11428 ++dst;
11429 }
11430 else {
11431 *dst++ = *src;
11432 }
11433 }
11434 dst->opcode = REBASE_OPCODE_DONE;
11435
11436 // optimize phase 2, combine rebase/add pairs
11437 dst = &mid[0];
11438 for (const rebase_tmp* src = &mid[0]; src->opcode != REBASE_OPCODE_DONE; ++src) {
11439 if ( (src->opcode == REBASE_OPCODE_DO_REBASE_ULEB_TIMES)
11440 && (src->operand1 == 1)
11441 && (src[1].opcode == REBASE_OPCODE_ADD_ADDR_ULEB)) {
11442 dst->opcode = REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB;
11443 dst->operand1 = src[1].operand1;
11444 ++src;
11445 ++dst;
11446 }
11447 else {
11448 *dst++ = *src;
11449 }
11450 }
11451 dst->opcode = REBASE_OPCODE_DONE;
11452
11453 // optimize phase 3, compress packed runs of REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB with
11454 // same addr delta into one REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB
11455 dst = &mid[0];
11456 for (const rebase_tmp* src = &mid[0]; src->opcode != REBASE_OPCODE_DONE; ++src) {
11457 uint64_t delta = src->operand1;
11458 if ( (src->opcode == REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB)
11459 && (src[1].opcode == REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB)
11460 && (src[2].opcode == REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB)
11461 && (src[1].operand1 == delta)
11462 && (src[2].operand1 == delta) ) {
11463 // found at least three in a row, this is worth compressing
11464 dst->opcode = REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB;
11465 dst->operand1 = 1;
11466 dst->operand2 = delta;
11467 ++src;
11468 while ( (src->opcode == REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB)
11469 && (src->operand1 == delta) ) {
11470 dst->operand1++;
11471 ++src;
11472 }
11473 --src;
11474 ++dst;
11475 }
11476 else {
11477 *dst++ = *src;
11478 }
11479 }
11480 dst->opcode = REBASE_OPCODE_DONE;
11481
11482 // optimize phase 4, use immediate encodings
11483 for (rebase_tmp* p = &mid[0]; p->opcode != REBASE_OPCODE_DONE; ++p) {
11484 if ( (p->opcode == REBASE_OPCODE_ADD_ADDR_ULEB)
11485 && (p->operand1 < (15*sizeof(pint_t)))
11486 && ((p->operand1 % sizeof(pint_t)) == 0) ) {
11487 p->opcode = REBASE_OPCODE_ADD_ADDR_IMM_SCALED;
11488 p->operand1 = p->operand1/sizeof(pint_t);
11489 }
11490 else if ( (p->opcode == REBASE_OPCODE_DO_REBASE_ULEB_TIMES) && (p->operand1 < 15) ) {
11491 p->opcode = REBASE_OPCODE_DO_REBASE_IMM_TIMES;
11492 }
11493 }
11494
11495 // convert to compressed encoding
11496 const static bool log = false;
11497 fEncodedData.reserve(info.size()*2);
11498 bool done = false;
11499 for (std::vector<rebase_tmp>::iterator it = mid.begin(); !done && it != mid.end() ; ++it) {
11500 switch ( it->opcode ) {
11501 case REBASE_OPCODE_DONE:
11502 if ( log ) fprintf(stderr, "REBASE_OPCODE_DONE()\n");
11503 done = true;
11504 break;
11505 case REBASE_OPCODE_SET_TYPE_IMM:
11506 if ( log ) fprintf(stderr, "REBASE_OPCODE_SET_TYPE_IMM(%lld)\n", it->operand1);
11507 fEncodedData.append_byte(REBASE_OPCODE_SET_TYPE_IMM | it->operand1);
11508 break;
11509 case REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB:
11510 if ( log ) fprintf(stderr, "REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB(%lld, 0x%llX)\n", it->operand1, it->operand2);
11511 fEncodedData.append_byte(REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | it->operand1);
11512 fEncodedData.append_uleb128(it->operand2);
11513 break;
11514 case REBASE_OPCODE_ADD_ADDR_ULEB:
11515 if ( log ) fprintf(stderr, "REBASE_OPCODE_ADD_ADDR_ULEB(0x%llX)\n", it->operand1);
11516 fEncodedData.append_byte(REBASE_OPCODE_ADD_ADDR_ULEB);
11517 fEncodedData.append_uleb128(it->operand1);
11518 break;
11519 case REBASE_OPCODE_ADD_ADDR_IMM_SCALED:
11520 if ( log ) fprintf(stderr, "REBASE_OPCODE_ADD_ADDR_IMM_SCALED(%lld=0x%llX)\n", it->operand1, it->operand1*sizeof(pint_t));
11521 fEncodedData.append_byte(REBASE_OPCODE_ADD_ADDR_IMM_SCALED | it->operand1 );
11522 break;
11523 case REBASE_OPCODE_DO_REBASE_IMM_TIMES:
11524 if ( log ) fprintf(stderr, "REBASE_OPCODE_DO_REBASE_IMM_TIMES(%lld)\n", it->operand1);
11525 fEncodedData.append_byte(REBASE_OPCODE_DO_REBASE_IMM_TIMES | it->operand1);
11526 break;
11527 case REBASE_OPCODE_DO_REBASE_ULEB_TIMES:
11528 if ( log ) fprintf(stderr, "REBASE_OPCODE_DO_REBASE_ULEB_TIMES(%lld)\n", it->operand1);
11529 fEncodedData.append_byte(REBASE_OPCODE_DO_REBASE_ULEB_TIMES);
11530 fEncodedData.append_uleb128(it->operand1);
11531 break;
11532 case REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB:
11533 if ( log ) fprintf(stderr, "REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB(0x%llX)\n", it->operand1);
11534 fEncodedData.append_byte(REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB);
11535 fEncodedData.append_uleb128(it->operand1);
11536 break;
11537 case REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB:
11538 if ( log ) fprintf(stderr, "REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB(%lld, %lld)\n", it->operand1, it->operand2);
11539 fEncodedData.append_byte(REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB);
11540 fEncodedData.append_uleb128(it->operand1);
11541 fEncodedData.append_uleb128(it->operand2);
11542 break;
11543 }
11544 }
11545
11546
11547 // align to pointer size
11548 fEncodedData.pad_to_size(sizeof(pint_t));
11549
11550 if (log) fprintf(stderr, "total rebase info size = %ld\n", fEncodedData.size());
11551 }
11552
11553
11554 struct binding_tmp
11555 {
11556 binding_tmp(uint8_t op, uint64_t p1, uint64_t p2=0, const char* s=NULL)
11557 : opcode(op), operand1(p1), operand2(p2), name(s) {}
11558 uint8_t opcode;
11559 uint64_t operand1;
11560 uint64_t operand2;
11561 const char* name;
11562 };
11563
11564
11565
11566 template <typename A>
11567 void CompressedBindingInfoLinkEditAtom<A>::encode()
11568 {
11569 // sort by library, symbol, type, then address
11570 const std::vector<SegmentInfo*>& segments = fWriter.fSegmentInfos;
11571 std::vector<BindingInfo>& info = fWriter.fBindingInfo;
11572 std::sort(info.begin(), info.end());
11573
11574 // convert to temp encoding that can be more easily optimized
11575 std::vector<binding_tmp> mid;
11576 const SegmentInfo* currentSegment = NULL;
11577 unsigned int segIndex = 0;
11578 int ordinal = 0x80000000;
11579 const char* symbolName = NULL;
11580 uint8_t type = 0;
11581 uint64_t address = (uint64_t)(-1);
11582 int64_t addend = 0;
11583 for (std::vector<BindingInfo>::iterator it = info.begin(); it != info.end(); ++it) {
11584 if ( ordinal != it->fLibraryOrdinal ) {
11585 if ( it->fLibraryOrdinal <= 0 ) {
11586 // special lookups are encoded as negative numbers in BindingInfo
11587 mid.push_back(binding_tmp(BIND_OPCODE_SET_DYLIB_SPECIAL_IMM, it->fLibraryOrdinal));
11588 }
11589 else {
11590 mid.push_back(binding_tmp(BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB, it->fLibraryOrdinal));
11591 }
11592 ordinal = it->fLibraryOrdinal;
11593 }
11594 if ( symbolName != it->fSymbolName ) {
11595 mid.push_back(binding_tmp(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM, it->fFlags, 0, it->fSymbolName));
11596 symbolName = it->fSymbolName;
11597 }
11598 if ( type != it->fType ) {
11599 mid.push_back(binding_tmp(BIND_OPCODE_SET_TYPE_IMM, it->fType));
11600 type = it->fType;
11601 }
11602 if ( address != it->fAddress ) {
11603 if ( (currentSegment == NULL) || (it->fAddress < currentSegment->fBaseAddress)
11604 || ((currentSegment->fBaseAddress+currentSegment->fSize) <=it->fAddress)
11605 || (it->fAddress < address) ) {
11606 segIndex = 0;
11607 for (std::vector<SegmentInfo*>::const_iterator segit = segments.begin(); segit != segments.end(); ++segit) {
11608 if ( ((*segit)->fBaseAddress <= it->fAddress) && (it->fAddress < ((*segit)->fBaseAddress+(*segit)->fSize)) ) {
11609 currentSegment = *segit;
11610 break;
11611 }
11612 ++segIndex;
11613 }
11614 mid.push_back(binding_tmp(BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB, segIndex, it->fAddress - currentSegment->fBaseAddress));
11615 }
11616 else {
11617 mid.push_back(binding_tmp(BIND_OPCODE_ADD_ADDR_ULEB, it->fAddress-address));
11618 }
11619 address = it->fAddress;
11620 }
11621 if ( addend != it->fAddend ) {
11622 mid.push_back(binding_tmp(BIND_OPCODE_SET_ADDEND_SLEB, it->fAddend));
11623 addend = it->fAddend;
11624 }
11625 mid.push_back(binding_tmp(BIND_OPCODE_DO_BIND, 0));
11626 address += sizeof(pint_t);
11627 }
11628 mid.push_back(binding_tmp(BIND_OPCODE_DONE, 0));
11629
11630
11631 // optimize phase 1, combine bind/add pairs
11632 binding_tmp* dst = &mid[0];
11633 for (const binding_tmp* src = &mid[0]; src->opcode != BIND_OPCODE_DONE; ++src) {
11634 if ( (src->opcode == BIND_OPCODE_DO_BIND)
11635 && (src[1].opcode == BIND_OPCODE_ADD_ADDR_ULEB) ) {
11636 dst->opcode = BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB;
11637 dst->operand1 = src[1].operand1;
11638 ++src;
11639 ++dst;
11640 }
11641 else {
11642 *dst++ = *src;
11643 }
11644 }
11645 dst->opcode = BIND_OPCODE_DONE;
11646
11647 // optimize phase 2, compress packed runs of BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB with
11648 // same addr delta into one BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB
11649 dst = &mid[0];
11650 for (const binding_tmp* src = &mid[0]; src->opcode != BIND_OPCODE_DONE; ++src) {
11651 uint64_t delta = src->operand1;
11652 if ( (src->opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB)
11653 && (src[1].opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB)
11654 && (src[1].operand1 == delta) ) {
11655 // found at least two in a row, this is worth compressing
11656 dst->opcode = BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB;
11657 dst->operand1 = 1;
11658 dst->operand2 = delta;
11659 ++src;
11660 while ( (src->opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB)
11661 && (src->operand1 == delta) ) {
11662 dst->operand1++;
11663 ++src;
11664 }
11665 --src;
11666 ++dst;
11667 }
11668 else {
11669 *dst++ = *src;
11670 }
11671 }
11672 dst->opcode = BIND_OPCODE_DONE;
11673
11674 // optimize phase 3, use immediate encodings
11675 for (binding_tmp* p = &mid[0]; p->opcode != REBASE_OPCODE_DONE; ++p) {
11676 if ( (p->opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB)
11677 && (p->operand1 < (15*sizeof(pint_t)))
11678 && ((p->operand1 % sizeof(pint_t)) == 0) ) {
11679 p->opcode = BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED;
11680 p->operand1 = p->operand1/sizeof(pint_t);
11681 }
11682 else if ( (p->opcode == BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB) && (p->operand1 <= 15) ) {
11683 p->opcode = BIND_OPCODE_SET_DYLIB_ORDINAL_IMM;
11684 }
11685 }
11686 dst->opcode = BIND_OPCODE_DONE;
11687
11688 // convert to compressed encoding
11689 const static bool log = false;
11690 fEncodedData.reserve(info.size()*2);
11691 bool done = false;
11692 for (std::vector<binding_tmp>::iterator it = mid.begin(); !done && it != mid.end() ; ++it) {
11693 switch ( it->opcode ) {
11694 case BIND_OPCODE_DONE:
11695 if ( log ) fprintf(stderr, "BIND_OPCODE_DONE()\n");
11696 done = true;
11697 break;
11698 case BIND_OPCODE_SET_DYLIB_ORDINAL_IMM:
11699 if ( log ) fprintf(stderr, "BIND_OPCODE_SET_DYLIB_ORDINAL_IMM(%lld)\n", it->operand1);
11700 fEncodedData.append_byte(BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | it->operand1);
11701 break;
11702 case BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB:
11703 if ( log ) fprintf(stderr, "BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB(%lld)\n", it->operand1);
11704 fEncodedData.append_byte(BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB);
11705 fEncodedData.append_uleb128(it->operand1);
11706 break;
11707 case BIND_OPCODE_SET_DYLIB_SPECIAL_IMM:
11708 if ( log ) fprintf(stderr, "BIND_OPCODE_SET_DYLIB_SPECIAL_IMM(%lld)\n", it->operand1);
11709 fEncodedData.append_byte(BIND_OPCODE_SET_DYLIB_SPECIAL_IMM | (it->operand1 & BIND_IMMEDIATE_MASK));
11710 break;
11711 case BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM:
11712 if ( log ) fprintf(stderr, "BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM(0x%0llX, %s)\n", it->operand1, it->name);
11713 fEncodedData.append_byte(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM | it->operand1);
11714 fEncodedData.append_string(it->name);
11715 break;
11716 case BIND_OPCODE_SET_TYPE_IMM:
11717 if ( log ) fprintf(stderr, "BIND_OPCODE_SET_TYPE_IMM(%lld)\n", it->operand1);
11718 fEncodedData.append_byte(BIND_OPCODE_SET_TYPE_IMM | it->operand1);
11719 break;
11720 case BIND_OPCODE_SET_ADDEND_SLEB:
11721 if ( log ) fprintf(stderr, "BIND_OPCODE_SET_ADDEND_SLEB(%lld)\n", it->operand1);
11722 fEncodedData.append_byte(BIND_OPCODE_SET_ADDEND_SLEB);
11723 fEncodedData.append_sleb128(it->operand1);
11724 break;
11725 case BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB:
11726 if ( log ) fprintf(stderr, "BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB(%lld, 0x%llX)\n", it->operand1, it->operand2);
11727 fEncodedData.append_byte(BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | it->operand1);
11728 fEncodedData.append_uleb128(it->operand2);
11729 break;
11730 case BIND_OPCODE_ADD_ADDR_ULEB:
11731 if ( log ) fprintf(stderr, "BIND_OPCODE_ADD_ADDR_ULEB(0x%llX)\n", it->operand1);
11732 fEncodedData.append_byte(BIND_OPCODE_ADD_ADDR_ULEB);
11733 fEncodedData.append_uleb128(it->operand1);
11734 break;
11735 case BIND_OPCODE_DO_BIND:
11736 if ( log ) fprintf(stderr, "BIND_OPCODE_DO_BIND()\n");
11737 fEncodedData.append_byte(BIND_OPCODE_DO_BIND);
11738 break;
11739 case BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB:
11740 if ( log ) fprintf(stderr, "BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB(0x%llX)\n", it->operand1);
11741 fEncodedData.append_byte(BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB);
11742 fEncodedData.append_uleb128(it->operand1);
11743 break;
11744 case BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED:
11745 if ( log ) fprintf(stderr, "BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED(%lld=0x%llX)\n", it->operand1, it->operand1*sizeof(pint_t));
11746 fEncodedData.append_byte(BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED | it->operand1 );
11747 break;
11748 case BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB:
11749 if ( log ) fprintf(stderr, "BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB(%lld, %lld)\n", it->operand1, it->operand2);
11750 fEncodedData.append_byte(BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB);
11751 fEncodedData.append_uleb128(it->operand1);
11752 fEncodedData.append_uleb128(it->operand2);
11753 break;
11754 }
11755 }
11756
11757 // align to pointer size
11758 fEncodedData.pad_to_size(sizeof(pint_t));
11759
11760 if (log) fprintf(stderr, "total binding info size = %ld\n", fEncodedData.size());
11761
11762 }
11763
11764
11765
11766 struct WeakBindingSorter
11767 {
11768 bool operator()(const BindingInfo& left, const BindingInfo& right)
11769 {
11770 // sort by symbol, type, address
11771 if ( left.fSymbolName != right.fSymbolName )
11772 return ( strcmp(left.fSymbolName, right.fSymbolName) < 0 );
11773 if ( left.fType != right.fType )
11774 return (left.fType < right.fType);
11775 return (left.fAddress < right.fAddress);
11776 }
11777 };
11778
11779
11780
11781 template <typename A>
11782 void CompressedWeakBindingInfoLinkEditAtom<A>::encode()
11783 {
11784 // add regular atoms that override a dylib's weak definitions
11785 for(std::set<const class ObjectFile::Atom*>::iterator it = fWriter.fRegularDefAtomsThatOverrideADylibsWeakDef->begin();
11786 it != fWriter.fRegularDefAtomsThatOverrideADylibsWeakDef->end(); ++it) {
11787 if ( fWriter.shouldExport(**it) )
11788 fWriter.fWeakBindingInfo.push_back(BindingInfo(0, (*it)->getName(), true, 0, 0));
11789 }
11790
11791 // add all exported weak definitions
11792 for(std::vector<class ObjectFile::Atom*>::iterator it = fWriter.fAllAtoms->begin(); it != fWriter.fAllAtoms->end(); ++it) {
11793 ObjectFile::Atom* atom = *it;
11794 if ( (atom->getDefinitionKind() == ObjectFile::Atom::kWeakDefinition) && fWriter.shouldExport(*atom) ) {
11795 fWriter.fWeakBindingInfo.push_back(BindingInfo(0, atom->getName(), false, 0, 0));
11796 }
11797 }
11798
11799 // sort by symbol, type, address
11800 const std::vector<SegmentInfo*>& segments = fWriter.fSegmentInfos;
11801 std::vector<BindingInfo>& info = fWriter.fWeakBindingInfo;
11802 if ( info.size() == 0 )
11803 return;
11804 std::sort(info.begin(), info.end(), WeakBindingSorter());
11805
11806 // convert to temp encoding that can be more easily optimized
11807 std::vector<binding_tmp> mid;
11808 mid.reserve(info.size());
11809 const SegmentInfo* currentSegment = NULL;
11810 unsigned int segIndex = 0;
11811 const char* symbolName = NULL;
11812 uint8_t type = 0;
11813 uint64_t address = (uint64_t)(-1);
11814 int64_t addend = 0;
11815 for (std::vector<BindingInfo>::iterator it = info.begin(); it != info.end(); ++it) {
11816 if ( symbolName != it->fSymbolName ) {
11817 mid.push_back(binding_tmp(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM, it->fFlags, 0, it->fSymbolName));
11818 symbolName = it->fSymbolName;
11819 }
11820 if ( it->fType != 0 ) {
11821 if ( type != it->fType ) {
11822 mid.push_back(binding_tmp(BIND_OPCODE_SET_TYPE_IMM, it->fType));
11823 type = it->fType;
11824 }
11825 if ( address != it->fAddress ) {
11826 // non weak symbols just have BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM
11827 // weak symbols have SET_SEG, ADD_ADDR, SET_ADDED, DO_BIND
11828 if ( (currentSegment == NULL) || (it->fAddress < currentSegment->fBaseAddress)
11829 || ((currentSegment->fBaseAddress+currentSegment->fSize) <=it->fAddress) ) {
11830 segIndex = 0;
11831 for (std::vector<SegmentInfo*>::const_iterator segit = segments.begin(); segit != segments.end(); ++segit) {
11832 if ( ((*segit)->fBaseAddress <= it->fAddress) && (it->fAddress < ((*segit)->fBaseAddress+(*segit)->fSize)) ) {
11833 currentSegment = *segit;
11834 break;
11835 }
11836 ++segIndex;
11837 }
11838 mid.push_back(binding_tmp(BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB, segIndex, it->fAddress - currentSegment->fBaseAddress));
11839 }
11840 else {
11841 mid.push_back(binding_tmp(BIND_OPCODE_ADD_ADDR_ULEB, it->fAddress-address));
11842 }
11843 address = it->fAddress;
11844 }
11845 if ( addend != it->fAddend ) {
11846 mid.push_back(binding_tmp(BIND_OPCODE_SET_ADDEND_SLEB, it->fAddend));
11847 addend = it->fAddend;
11848 }
11849 mid.push_back(binding_tmp(BIND_OPCODE_DO_BIND, 0));
11850 address += sizeof(pint_t);
11851 }
11852 }
11853 mid.push_back(binding_tmp(BIND_OPCODE_DONE, 0));
11854
11855
11856 // optimize phase 1, combine bind/add pairs
11857 binding_tmp* dst = &mid[0];
11858 for (const binding_tmp* src = &mid[0]; src->opcode != BIND_OPCODE_DONE; ++src) {
11859 if ( (src->opcode == BIND_OPCODE_DO_BIND)
11860 && (src[1].opcode == BIND_OPCODE_ADD_ADDR_ULEB) ) {
11861 dst->opcode = BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB;
11862 dst->operand1 = src[1].operand1;
11863 ++src;
11864 ++dst;
11865 }
11866 else {
11867 *dst++ = *src;
11868 }
11869 }
11870 dst->opcode = BIND_OPCODE_DONE;
11871
11872 // optimize phase 2, compress packed runs of BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB with
11873 // same addr delta into one BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB
11874 dst = &mid[0];
11875 for (const binding_tmp* src = &mid[0]; src->opcode != BIND_OPCODE_DONE; ++src) {
11876 uint64_t delta = src->operand1;
11877 if ( (src->opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB)
11878 && (src[1].opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB)
11879 && (src[1].operand1 == delta) ) {
11880 // found at least two in a row, this is worth compressing
11881 dst->opcode = BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB;
11882 dst->operand1 = 1;
11883 dst->operand2 = delta;
11884 ++src;
11885 while ( (src->opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB)
11886 && (src->operand1 == delta) ) {
11887 dst->operand1++;
11888 ++src;
11889 }
11890 --src;
11891 ++dst;
11892 }
11893 else {
11894 *dst++ = *src;
11895 }
11896 }
11897 dst->opcode = BIND_OPCODE_DONE;
11898
11899 // optimize phase 3, use immediate encodings
11900 for (binding_tmp* p = &mid[0]; p->opcode != REBASE_OPCODE_DONE; ++p) {
11901 if ( (p->opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB)
11902 && (p->operand1 < (15*sizeof(pint_t)))
11903 && ((p->operand1 % sizeof(pint_t)) == 0) ) {
11904 p->opcode = BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED;
11905 p->operand1 = p->operand1/sizeof(pint_t);
11906 }
11907 }
11908 dst->opcode = BIND_OPCODE_DONE;
11909
11910
11911 // convert to compressed encoding
11912 const static bool log = false;
11913 fEncodedData.reserve(info.size()*2);
11914 bool done = false;
11915 for (std::vector<binding_tmp>::iterator it = mid.begin(); !done && it != mid.end() ; ++it) {
11916 switch ( it->opcode ) {
11917 case BIND_OPCODE_DONE:
11918 if ( log ) fprintf(stderr, "BIND_OPCODE_DONE()\n");
11919 fEncodedData.append_byte(BIND_OPCODE_DONE);
11920 done = true;
11921 break;
11922 case BIND_OPCODE_SET_DYLIB_ORDINAL_IMM:
11923 if ( log ) fprintf(stderr, "BIND_OPCODE_SET_DYLIB_ORDINAL_IMM(%lld)\n", it->operand1);
11924 fEncodedData.append_byte(BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | it->operand1);
11925 break;
11926 case BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB:
11927 if ( log ) fprintf(stderr, "BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB(%lld)\n", it->operand1);
11928 fEncodedData.append_byte(BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB);
11929 fEncodedData.append_uleb128(it->operand1);
11930 break;
11931 case BIND_OPCODE_SET_DYLIB_SPECIAL_IMM:
11932 if ( log ) fprintf(stderr, "BIND_OPCODE_SET_DYLIB_SPECIAL_IMM(%lld)\n", it->operand1);
11933 fEncodedData.append_byte(BIND_OPCODE_SET_DYLIB_SPECIAL_IMM | (it->operand1 & BIND_IMMEDIATE_MASK));
11934 break;
11935 case BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM:
11936 if ( log ) fprintf(stderr, "BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM(0x%0llX, %s)\n", it->operand1, it->name);
11937 fEncodedData.append_byte(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM | it->operand1);
11938 fEncodedData.append_string(it->name);
11939 break;
11940 case BIND_OPCODE_SET_TYPE_IMM:
11941 if ( log ) fprintf(stderr, "BIND_OPCODE_SET_TYPE_IMM(%lld)\n", it->operand1);
11942 fEncodedData.append_byte(BIND_OPCODE_SET_TYPE_IMM | it->operand1);
11943 break;
11944 case BIND_OPCODE_SET_ADDEND_SLEB:
11945 if ( log ) fprintf(stderr, "BIND_OPCODE_SET_ADDEND_SLEB(%lld)\n", it->operand1);
11946 fEncodedData.append_byte(BIND_OPCODE_SET_ADDEND_SLEB);
11947 fEncodedData.append_sleb128(it->operand1);
11948 break;
11949 case BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB:
11950 if ( log ) fprintf(stderr, "BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB(%lld, 0x%llX)\n", it->operand1, it->operand2);
11951 fEncodedData.append_byte(BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | it->operand1);
11952 fEncodedData.append_uleb128(it->operand2);
11953 break;
11954 case BIND_OPCODE_ADD_ADDR_ULEB:
11955 if ( log ) fprintf(stderr, "BIND_OPCODE_ADD_ADDR_ULEB(0x%llX)\n", it->operand1);
11956 fEncodedData.append_byte(BIND_OPCODE_ADD_ADDR_ULEB);
11957 fEncodedData.append_uleb128(it->operand1);
11958 break;
11959 case BIND_OPCODE_DO_BIND:
11960 if ( log ) fprintf(stderr, "BIND_OPCODE_DO_BIND()\n");
11961 fEncodedData.append_byte(BIND_OPCODE_DO_BIND);
11962 break;
11963 case BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB:
11964 if ( log ) fprintf(stderr, "BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB(0x%llX)\n", it->operand1);
11965 fEncodedData.append_byte(BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB);
11966 fEncodedData.append_uleb128(it->operand1);
11967 break;
11968 case BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED:
11969 if ( log ) fprintf(stderr, "BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED(%lld=0x%llX)\n", it->operand1, it->operand1*sizeof(pint_t));
11970 fEncodedData.append_byte(BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED | it->operand1 );
11971 break;
11972 case BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB:
11973 if ( log ) fprintf(stderr, "BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB(%lld, %lld)\n", it->operand1, it->operand2);
11974 fEncodedData.append_byte(BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB);
11975 fEncodedData.append_uleb128(it->operand1);
11976 fEncodedData.append_uleb128(it->operand2);
11977 break;
11978 }
11979 }
11980
11981 // align to pointer size
11982 fEncodedData.pad_to_size(sizeof(pint_t));
11983
11984 if (log) fprintf(stderr, "total weak binding info size = %ld\n", fEncodedData.size());
11985
11986 }
11987
11988 template <typename A>
11989 void CompressedLazyBindingInfoLinkEditAtom<A>::encode()
11990 {
11991 // stream all lazy bindings and record start offsets
11992 const SegmentInfo* currentSegment = NULL;
11993 uint8_t segIndex = 0;
11994 const std::vector<SegmentInfo*>& segments = fWriter.fSegmentInfos;
11995 std::vector<class LazyPointerAtom<A>*>& allLazys = fWriter.fAllSynthesizedLazyPointers;
11996 for (typename std::vector<class LazyPointerAtom<A>*>::iterator it = allLazys.begin(); it != allLazys.end(); ++it) {
11997 LazyPointerAtom<A>* lazyPointerAtom = *it;
11998 ObjectFile::Atom* lazyPointerTargetAtom = lazyPointerAtom->getTarget();
11999
12000 // skip lazy pointers that are bound non-lazily because they are coalesced
12001 if ( ! fWriter.targetRequiresWeakBinding(*lazyPointerTargetAtom) ) {
12002 // record start offset for use by stub helper
12003 lazyPointerAtom->setLazyBindingInfoOffset(fEncodedData.size());
12004
12005 // write address to bind
12006 pint_t address = lazyPointerAtom->getAddress();
12007 if ( (currentSegment == NULL) || (address < currentSegment->fBaseAddress)
12008 || ((currentSegment->fBaseAddress+currentSegment->fSize) <= address) ) {
12009 segIndex = 0;
12010 for (std::vector<SegmentInfo*>::const_iterator segit = segments.begin(); segit != segments.end(); ++segit) {
12011 if ( ((*segit)->fBaseAddress <= address) && (address < ((*segit)->fBaseAddress+(*segit)->fSize)) ) {
12012 currentSegment = *segit;
12013 break;
12014 }
12015 ++segIndex;
12016 }
12017 }
12018 fEncodedData.append_byte(BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | segIndex);
12019 fEncodedData.append_uleb128(lazyPointerAtom->getAddress() - currentSegment->fBaseAddress);
12020
12021 // write ordinal
12022 int ordinal = fWriter.compressedOrdinalForImortedAtom(lazyPointerTargetAtom);
12023 if ( ordinal <= 0 ) {
12024 // special lookups are encoded as negative numbers in BindingInfo
12025 fEncodedData.append_byte(BIND_OPCODE_SET_DYLIB_SPECIAL_IMM | (ordinal & BIND_IMMEDIATE_MASK) );
12026 }
12027 else if ( ordinal <= 15 ) {
12028 // small ordinals are encoded in opcode
12029 fEncodedData.append_byte(BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | ordinal);
12030 }
12031 else {
12032 fEncodedData.append_byte(BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB);
12033 fEncodedData.append_uleb128(ordinal);
12034 }
12035 // write symbol name
12036 bool weak_import = fWriter.fWeakImportMap[lazyPointerTargetAtom];
12037 if ( weak_import )
12038 fEncodedData.append_byte(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM | BIND_SYMBOL_FLAGS_WEAK_IMPORT);
12039 else
12040 fEncodedData.append_byte(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM);
12041 fEncodedData.append_string(lazyPointerTargetAtom->getName());
12042 // write do bind
12043 fEncodedData.append_byte(BIND_OPCODE_DO_BIND);
12044 fEncodedData.append_byte(BIND_OPCODE_DONE);
12045 }
12046 }
12047 // align to pointer size
12048 fEncodedData.pad_to_size(sizeof(pint_t));
12049
12050 //fprintf(stderr, "lazy binding info size = %ld, for %ld entries\n", fEncodedData.size(), allLazys.size());
12051 }
12052
12053 struct TrieEntriesSorter
12054 {
12055 TrieEntriesSorter(Options& o) : fOptions(o) {}
12056
12057 bool operator()(const mach_o::trie::Entry& left, const mach_o::trie::Entry& right)
12058 {
12059 unsigned int leftOrder;
12060 unsigned int rightOrder;
12061 fOptions.exportedSymbolOrder(left.name, &leftOrder);
12062 fOptions.exportedSymbolOrder(right.name, &rightOrder);
12063 if ( leftOrder != rightOrder )
12064 return (leftOrder < rightOrder);
12065 else
12066 return (left.address < right.address);
12067 }
12068 private:
12069 Options& fOptions;
12070 };
12071
12072
12073 template <typename A>
12074 void CompressedExportInfoLinkEditAtom<A>::encode()
12075 {
12076 // make vector of mach_o::trie::Entry for all exported symbols
12077 std::vector<class ObjectFile::Atom*>& exports = fWriter.fExportedAtoms;
12078 uint64_t imageBaseAddress = fWriter.fMachHeaderAtom->getAddress();
12079 std::vector<mach_o::trie::Entry> entries;
12080 entries.reserve(exports.size());
12081 for (std::vector<ObjectFile::Atom*>::iterator it = exports.begin(); it != exports.end(); ++it) {
12082 ObjectFile::Atom* atom = *it;
12083 uint64_t flags = 0;
12084 if ( atom->getDefinitionKind() == ObjectFile::Atom::kWeakDefinition )
12085 flags |= EXPORT_SYMBOL_FLAGS_WEAK_DEFINITION;
12086 uint64_t address = atom->getAddress() - imageBaseAddress;
12087 if ( atom->isThumb() )
12088 address |= 1;
12089 mach_o::trie::Entry entry;
12090 entry.name = atom->getName();
12091 entry.flags = flags;
12092 entry.address = address;
12093 entries.push_back(entry);
12094 }
12095
12096 // sort vector by -exported_symbols_order, and any others by address
12097 std::sort(entries.begin(), entries.end(), TrieEntriesSorter(fWriter.fOptions));
12098
12099 // create trie
12100 mach_o::trie::makeTrie(entries, fEncodedData.bytes());
12101
12102 // align to pointer size
12103 fEncodedData.pad_to_size(sizeof(pint_t));
12104 }
12105
12106
12107
12108
12109
12110 }; // namespace executable
12111 }; // namespace mach_o
12112
12113
12114 #endif // __EXECUTABLE_MACH_O__