]> git.saurik.com Git - apple/ld64.git/blob - src/ld/MachOWriterExecutable.hpp
95967330007943befbe6f986607b791d04d99cf3
[apple/ld64.git] / src / ld / MachOWriterExecutable.hpp
1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
2 *
3 * Copyright (c) 2005-2009 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25 #ifndef __EXECUTABLE_MACH_O__
26 #define __EXECUTABLE_MACH_O__
27
28 #include <stdint.h>
29 #include <stddef.h>
30 #include <fcntl.h>
31 #include <sys/time.h>
32 #include <uuid/uuid.h>
33 #include <mach/i386/thread_status.h>
34 #include <mach/ppc/thread_status.h>
35 #include <CommonCrypto/CommonDigest.h>
36
37 #include <vector>
38 #include <algorithm>
39 #include <map>
40 #include <set>
41 #include <ext/hash_map>
42
43 #include "ObjectFile.h"
44 #include "ExecutableFile.h"
45 #include "Options.h"
46
47 #include "MachOFileAbstraction.hpp"
48 #include "MachOTrie.hpp"
49
50
51 //
52 //
53 // To implement architecture xxx, you must write template specializations for the following methods:
54 // MachHeaderAtom<xxx>::setHeaderInfo()
55 // ThreadsLoadCommandsAtom<xxx>::getSize()
56 // ThreadsLoadCommandsAtom<xxx>::copyRawContent()
57 // Writer<xxx>::addObjectRelocs()
58 // Writer<xxx>::fixUpReferenceRelocatable()
59 // Writer<xxx>::fixUpReferenceFinal()
60 // Writer<xxx>::stubableReference()
61 // Writer<xxx>::weakImportReferenceKind()
62 // Writer<xxx>::GOTReferenceKind()
63 //
64
65
66 namespace mach_o {
67 namespace executable {
68
69 // forward references
70 template <typename A> class WriterAtom;
71 template <typename A> class PageZeroAtom;
72 template <typename A> class CustomStackAtom;
73 template <typename A> class MachHeaderAtom;
74 template <typename A> class SegmentLoadCommandsAtom;
75 template <typename A> class EncryptionLoadCommandsAtom;
76 template <typename A> class SymbolTableLoadCommandsAtom;
77 template <typename A> class DyldInfoLoadCommandsAtom;
78 template <typename A> class ThreadsLoadCommandsAtom;
79 template <typename A> class DylibIDLoadCommandsAtom;
80 template <typename A> class RoutinesLoadCommandsAtom;
81 template <typename A> class DyldLoadCommandsAtom;
82 template <typename A> class UUIDLoadCommandAtom;
83 template <typename A> class LinkEditAtom;
84 template <typename A> class SectionRelocationsLinkEditAtom;
85 template <typename A> class CompressedRebaseInfoLinkEditAtom;
86 template <typename A> class CompressedBindingInfoLinkEditAtom;
87 template <typename A> class CompressedWeakBindingInfoLinkEditAtom;
88 template <typename A> class CompressedLazyBindingInfoLinkEditAtom;
89 template <typename A> class CompressedExportInfoLinkEditAtom;
90 template <typename A> class LocalRelocationsLinkEditAtom;
91 template <typename A> class ExternalRelocationsLinkEditAtom;
92 template <typename A> class SymbolTableLinkEditAtom;
93 template <typename A> class SegmentSplitInfoLoadCommandsAtom;
94 template <typename A> class SegmentSplitInfoContentAtom;
95 template <typename A> class IndirectTableLinkEditAtom;
96 template <typename A> class ModuleInfoLinkEditAtom;
97 template <typename A> class StringsLinkEditAtom;
98 template <typename A> class LoadCommandsPaddingAtom;
99 template <typename A> class UnwindInfoAtom;
100 template <typename A> class StubAtom;
101 template <typename A> class StubHelperAtom;
102 template <typename A> class ClassicStubHelperAtom;
103 template <typename A> class HybridStubHelperAtom;
104 template <typename A> class HybridStubHelperHelperAtom;
105 template <typename A> class FastStubHelperAtom;
106 template <typename A> class FastStubHelperHelperAtom;
107 template <typename A> class LazyPointerAtom;
108 template <typename A> class NonLazyPointerAtom;
109 template <typename A> class DylibLoadCommandsAtom;
110 template <typename A> class BranchIslandAtom;
111
112
113 // SectionInfo should be nested inside Writer, but I can't figure out how to make the type accessible to the Atom classes
114 class SectionInfo : public ObjectFile::Section {
115 public:
116 SectionInfo() : fFileOffset(0), fSize(0), fRelocCount(0), fRelocOffset(0),
117 fIndirectSymbolOffset(0), fAlignment(0), fAllLazyPointers(false),
118 fAllLazyDylibPointers(false),fAllNonLazyPointers(false), fAllStubs(false),
119 fAllSelfModifyingStubs(false), fAllStubHelpers(false),
120 fAllZeroFill(false), fVirtualSection(false),
121 fHasTextLocalRelocs(false), fHasTextExternalRelocs(false)
122 { fSegmentName[0] = '\0'; fSectionName[0] = '\0'; }
123 void setIndex(unsigned int index) { fIndex=index; }
124 std::vector<ObjectFile::Atom*> fAtoms;
125 char fSegmentName[20];
126 char fSectionName[20];
127 uint64_t fFileOffset;
128 uint64_t fSize;
129 uint32_t fRelocCount;
130 uint32_t fRelocOffset;
131 uint32_t fIndirectSymbolOffset;
132 uint8_t fAlignment;
133 bool fAllLazyPointers;
134 bool fAllLazyDylibPointers;
135 bool fAllNonLazyPointers;
136 bool fAllStubs;
137 bool fAllSelfModifyingStubs;
138 bool fAllStubHelpers;
139 bool fAllZeroFill;
140 bool fVirtualSection;
141 bool fHasTextLocalRelocs;
142 bool fHasTextExternalRelocs;
143 };
144
145 // SegmentInfo should be nested inside Writer, but I can't figure out how to make the type accessible to the Atom classes
146 class SegmentInfo
147 {
148 public:
149 SegmentInfo(uint64_t pageSize) : fInitProtection(0), fMaxProtection(0), fFileOffset(0), fFileSize(0),
150 fBaseAddress(0), fSize(0), fPageSize(pageSize), fFixedAddress(false),
151 fIndependentAddress(false), fHasLoadCommand(true) { fName[0] = '\0'; }
152 std::vector<class SectionInfo*> fSections;
153 char fName[20];
154 uint32_t fInitProtection;
155 uint32_t fMaxProtection;
156 uint64_t fFileOffset;
157 uint64_t fFileSize;
158 uint64_t fBaseAddress;
159 uint64_t fSize;
160 uint64_t fPageSize;
161 bool fFixedAddress;
162 bool fIndependentAddress;
163 bool fHasLoadCommand;
164 };
165
166
167 struct RebaseInfo {
168 RebaseInfo(uint8_t t, uint64_t addr) : fType(t), fAddress(addr) {}
169 uint8_t fType;
170 uint64_t fAddress;
171 // for sorting
172 int operator<(const RebaseInfo& rhs) const {
173 // sort by type, then address
174 if ( this->fType != rhs.fType )
175 return (this->fType < rhs.fType );
176 return (this->fAddress < rhs.fAddress );
177 }
178 };
179
180 struct BindingInfo {
181 BindingInfo(uint8_t t, int ord, const char* sym, bool weak_import, uint64_t addr, int64_t addend)
182 : fType(t), fFlags(weak_import ? BIND_SYMBOL_FLAGS_WEAK_IMPORT : 0 ), fLibraryOrdinal(ord),
183 fSymbolName(sym), fAddress(addr), fAddend(addend) {}
184 BindingInfo(uint8_t t, const char* sym, bool non_weak_definition, uint64_t addr, int64_t addend)
185 : fType(t), fFlags(non_weak_definition ? BIND_SYMBOL_FLAGS_NON_WEAK_DEFINITION : 0 ), fLibraryOrdinal(0),
186 fSymbolName(sym), fAddress(addr), fAddend(addend) {}
187 uint8_t fType;
188 uint8_t fFlags;
189 int fLibraryOrdinal;
190 const char* fSymbolName;
191 uint64_t fAddress;
192 int64_t fAddend;
193
194 // for sorting
195 int operator<(const BindingInfo& rhs) const {
196 // sort by library, symbol, type, then address
197 if ( this->fLibraryOrdinal != rhs.fLibraryOrdinal )
198 return (this->fLibraryOrdinal < rhs.fLibraryOrdinal );
199 if ( this->fSymbolName != rhs.fSymbolName )
200 return ( strcmp(this->fSymbolName, rhs.fSymbolName) < 0 );
201 if ( this->fType != rhs.fType )
202 return (this->fType < rhs.fType );
203 return (this->fAddress < rhs.fAddress );
204 }
205 };
206
207
208 class ByteStream {
209 private:
210 std::vector<uint8_t> fData;
211 public:
212 std::vector<uint8_t>& bytes() { return fData; }
213 unsigned long size() const { return fData.size(); }
214 void reserve(unsigned long l) { fData.reserve(l); }
215 const uint8_t* start() const { return &fData[0]; }
216
217 void append_uleb128(uint64_t value) {
218 uint8_t byte;
219 do {
220 byte = value & 0x7F;
221 value &= ~0x7F;
222 if ( value != 0 )
223 byte |= 0x80;
224 fData.push_back(byte);
225 value = value >> 7;
226 } while( byte >= 0x80 );
227 }
228
229 void append_sleb128(int64_t value) {
230 bool isNeg = ( value < 0 );
231 uint8_t byte;
232 bool more;
233 do {
234 byte = value & 0x7F;
235 value = value >> 7;
236 if ( isNeg )
237 more = ( (value != -1) || ((byte & 0x40) == 0) );
238 else
239 more = ( (value != 0) || ((byte & 0x40) != 0) );
240 if ( more )
241 byte |= 0x80;
242 fData.push_back(byte);
243 }
244 while( more );
245 }
246
247 void append_string(const char* str) {
248 for (const char* s = str; *s != '\0'; ++s)
249 fData.push_back(*s);
250 fData.push_back('\0');
251 }
252
253 void append_byte(uint8_t byte) {
254 fData.push_back(byte);
255 }
256
257 static unsigned int uleb128_size(uint64_t value) {
258 uint32_t result = 0;
259 do {
260 value = value >> 7;
261 ++result;
262 } while ( value != 0 );
263 return result;
264 }
265
266 void pad_to_size(unsigned int alignment) {
267 while ( (fData.size() % alignment) != 0 )
268 fData.push_back(0);
269 }
270 };
271
272
273 template <typename A>
274 class Writer : public ExecutableFile::Writer
275 {
276 public:
277 Writer(const char* path, Options& options, std::vector<ExecutableFile::DyLibUsed>& dynamicLibraries);
278 virtual ~Writer();
279
280 virtual const char* getPath() { return fFilePath; }
281 virtual time_t getModificationTime() { return 0; }
282 virtual DebugInfoKind getDebugInfoKind() { return ObjectFile::Reader::kDebugInfoNone; }
283 virtual std::vector<class ObjectFile::Atom*>& getAtoms() { return fWriterSynthesizedAtoms; }
284 virtual std::vector<class ObjectFile::Atom*>* getJustInTimeAtomsFor(const char* name) { return NULL; }
285 virtual std::vector<Stab>* getStabs() { return NULL; }
286
287 virtual ObjectFile::Atom& makeObjcInfoAtom(ObjectFile::Reader::ObjcConstraint objcContraint,
288 bool objcReplacementClasses);
289 virtual class ObjectFile::Atom* getUndefinedProxyAtom(const char* name);
290 virtual void addSynthesizedAtoms(const std::vector<class ObjectFile::Atom*>& existingAtoms,
291 class ObjectFile::Atom* dyldClassicHelperAtom,
292 class ObjectFile::Atom* dyldCompressedHelperAtom,
293 class ObjectFile::Atom* dyldLazyDylibHelperAtom,
294 bool biggerThanTwoGigs,
295 uint32_t dylibSymbolCount,
296 std::vector<class ObjectFile::Atom*>& newAtoms);
297 virtual uint64_t write(std::vector<class ObjectFile::Atom*>& atoms,
298 std::vector<class ObjectFile::Reader::Stab>& stabs,
299 class ObjectFile::Atom* entryPointAtom,
300 bool createUUID, bool canScatter,
301 ObjectFile::Reader::CpuConstraint cpuConstraint,
302 std::set<const class ObjectFile::Atom*>& atomsThatOverrideWeak,
303 bool hasExternalWeakDefinitions);
304
305 private:
306 typedef typename A::P P;
307 typedef typename A::P::uint_t pint_t;
308
309 enum RelocKind { kRelocNone, kRelocInternal, kRelocExternal };
310
311 void assignFileOffsets();
312 void synthesizeStubs(const std::vector<class ObjectFile::Atom*>& existingAtoms,
313 std::vector<class ObjectFile::Atom*>& newAtoms);
314 void synthesizeKextGOT(const std::vector<class ObjectFile::Atom*>& existingAtoms,
315 std::vector<class ObjectFile::Atom*>& newAtoms);
316 void createSplitSegContent();
317 void synthesizeUnwindInfoTable();
318 void insertDummyStubs();
319 void partitionIntoSections();
320 bool addBranchIslands();
321 bool createBranchIslands();
322 bool isBranchThatMightNeedIsland(uint8_t kind);
323 uint32_t textSizeWhenMightNeedBranchIslands();
324 uint32_t maxDistanceBetweenIslands();
325 void adjustLoadCommandsAndPadding();
326 void createDynamicLinkerCommand();
327 void createDylibCommands();
328 void buildLinkEdit();
329 const char* getArchString();
330 void writeMap();
331 uint64_t writeAtoms();
332 void writeNoOps(int fd, uint32_t from, uint32_t to);
333 void copyNoOps(uint8_t* from, uint8_t* to);
334 bool segmentsCanSplitApart(const ObjectFile::Atom& from, const ObjectFile::Atom& to);
335 void addCrossSegmentRef(const ObjectFile::Atom* atom, const ObjectFile::Reference* ref);
336 void collectExportedAndImportedAndLocalAtoms();
337 void setNlistRange(std::vector<class ObjectFile::Atom*>& atoms, uint32_t startIndex, uint32_t count);
338 void addLocalLabel(ObjectFile::Atom& atom, uint32_t offsetInAtom, const char* name);
339 void addGlobalLabel(ObjectFile::Atom& atom, uint32_t offsetInAtom, const char* name);
340 void buildSymbolTable();
341 bool stringsNeedLabelsInObjects();
342 const char* symbolTableName(const ObjectFile::Atom* atom);
343 void setExportNlist(const ObjectFile::Atom* atom, macho_nlist<P>* entry);
344 void setImportNlist(const ObjectFile::Atom* atom, macho_nlist<P>* entry);
345 void setLocalNlist(const ObjectFile::Atom* atom, macho_nlist<P>* entry);
346 void copyNlistRange(const std::vector<macho_nlist<P> >& entries, uint32_t startIndex);
347 uint64_t getAtomLoadAddress(const ObjectFile::Atom* atom);
348 uint8_t ordinalForLibrary(ObjectFile::Reader* file);
349 bool targetRequiresWeakBinding(const ObjectFile::Atom& target);
350 int compressedOrdinalForImortedAtom(ObjectFile::Atom* target);
351 bool shouldExport(const ObjectFile::Atom& atom) const;
352 void buildFixups();
353 void adjustLinkEditSections();
354 void buildObjectFileFixups();
355 void buildExecutableFixups();
356 bool preboundLazyPointerType(uint8_t* type);
357 uint64_t relocAddressInFinalLinkedImage(uint64_t address, const ObjectFile::Atom* atom) const;
358 void fixUpReferenceFinal(const ObjectFile::Reference* ref, const ObjectFile::Atom* inAtom, uint8_t buffer[]) const;
359 void fixUpReferenceRelocatable(const ObjectFile::Reference* ref, const ObjectFile::Atom* inAtom, uint8_t buffer[]) const;
360 void fixUpReference_powerpc(const ObjectFile::Reference* ref, const ObjectFile::Atom* inAtom,
361 uint8_t buffer[], bool finalLinkedImage) const;
362 uint32_t symbolIndex(ObjectFile::Atom& atom);
363 bool makesExternalRelocatableReference(ObjectFile::Atom& target) const;
364 uint32_t addObjectRelocs(ObjectFile::Atom* atom, ObjectFile::Reference* ref);
365 uint32_t addObjectRelocs_powerpc(ObjectFile::Atom* atom, ObjectFile::Reference* ref);
366 uint8_t getRelocPointerSize();
367 uint64_t maxAddress();
368 bool stubableReference(const ObjectFile::Atom* inAtom, const ObjectFile::Reference* ref);
369 bool GOTReferenceKind(uint8_t kind);
370 bool optimizableGOTReferenceKind(uint8_t kind);
371 bool weakImportReferenceKind(uint8_t kind);
372 unsigned int collectStabs();
373 uint64_t valueForStab(const ObjectFile::Reader::Stab& stab);
374 uint32_t stringOffsetForStab(const ObjectFile::Reader::Stab& stab);
375 uint8_t sectionIndexForStab(const ObjectFile::Reader::Stab& stab);
376 void addStabs(uint32_t startIndex);
377 RelocKind relocationNeededInFinalLinkedImage(const ObjectFile::Atom& target) const;
378 bool illegalRelocInFinalLinkedImage(const ObjectFile::Reference&);
379 bool generatesLocalTextReloc(const ObjectFile::Reference&, const ObjectFile::Atom& atom, SectionInfo* curSection);
380 bool generatesExternalTextReloc(const ObjectFile::Reference&, const ObjectFile::Atom& atom, SectionInfo* curSection);
381 bool mightNeedPadSegment();
382 void scanForAbsoluteReferences();
383 bool needsModuleTable();
384 void optimizeDylibReferences();
385 bool indirectSymbolInRelocatableIsLocal(const ObjectFile::Reference* ref) const;
386
387 struct DirectLibrary {
388 class ObjectFile::Reader* fLibrary;
389 bool fWeak;
390 bool fReExport;
391 };
392
393 friend class WriterAtom<A>;
394 friend class PageZeroAtom<A>;
395 friend class CustomStackAtom<A>;
396 friend class MachHeaderAtom<A>;
397 friend class SegmentLoadCommandsAtom<A>;
398 friend class EncryptionLoadCommandsAtom<A>;
399 friend class SymbolTableLoadCommandsAtom<A>;
400 friend class DyldInfoLoadCommandsAtom<A>;
401 friend class ThreadsLoadCommandsAtom<A>;
402 friend class DylibIDLoadCommandsAtom<A>;
403 friend class RoutinesLoadCommandsAtom<A>;
404 friend class DyldLoadCommandsAtom<A>;
405 friend class UUIDLoadCommandAtom<A>;
406 friend class LinkEditAtom<A>;
407 friend class SectionRelocationsLinkEditAtom<A>;
408 friend class CompressedRebaseInfoLinkEditAtom<A>;
409 friend class CompressedBindingInfoLinkEditAtom<A>;
410 friend class CompressedWeakBindingInfoLinkEditAtom<A>;
411 friend class CompressedLazyBindingInfoLinkEditAtom<A>;
412 friend class CompressedExportInfoLinkEditAtom<A>;
413 friend class LocalRelocationsLinkEditAtom<A>;
414 friend class ExternalRelocationsLinkEditAtom<A>;
415 friend class SymbolTableLinkEditAtom<A>;
416 friend class SegmentSplitInfoLoadCommandsAtom<A>;
417 friend class SegmentSplitInfoContentAtom<A>;
418 friend class IndirectTableLinkEditAtom<A>;
419 friend class ModuleInfoLinkEditAtom<A>;
420 friend class StringsLinkEditAtom<A>;
421 friend class LoadCommandsPaddingAtom<A>;
422 friend class UnwindInfoAtom<A>;
423 friend class StubAtom<A>;
424 friend class StubHelperAtom<A>;
425 friend class ClassicStubHelperAtom<A>;
426 friend class HybridStubHelperAtom<A>;
427 friend class FastStubHelperAtom<A>;
428 friend class FastStubHelperHelperAtom<A>;
429 friend class HybridStubHelperHelperAtom<A>;
430 friend class LazyPointerAtom<A>;
431 friend class NonLazyPointerAtom<A>;
432 friend class DylibLoadCommandsAtom<A>;
433 friend class BranchIslandAtom<A>;
434
435 const char* fFilePath;
436 Options& fOptions;
437 std::vector<class ObjectFile::Atom*>* fAllAtoms;
438 std::vector<class ObjectFile::Reader::Stab>* fStabs;
439 std::set<const class ObjectFile::Atom*>* fRegularDefAtomsThatOverrideADylibsWeakDef;
440 class SectionInfo* fLoadCommandsSection;
441 class SegmentInfo* fLoadCommandsSegment;
442 class MachHeaderAtom<A>* fMachHeaderAtom;
443 class EncryptionLoadCommandsAtom<A>* fEncryptionLoadCommand;
444 class SegmentLoadCommandsAtom<A>* fSegmentCommands;
445 class SymbolTableLoadCommandsAtom<A>* fSymbolTableCommands;
446 class LoadCommandsPaddingAtom<A>* fHeaderPadding;
447 class UnwindInfoAtom<A>* fUnwindInfoAtom;
448 class UUIDLoadCommandAtom<A>* fUUIDAtom;
449 std::vector<class ObjectFile::Atom*> fWriterSynthesizedAtoms;
450 std::vector<SegmentInfo*> fSegmentInfos;
451 class SegmentInfo* fPadSegmentInfo;
452 class ObjectFile::Atom* fEntryPoint;
453 class ObjectFile::Atom* fDyldClassicHelperAtom;
454 class ObjectFile::Atom* fDyldCompressedHelperAtom;
455 class ObjectFile::Atom* fDyldLazyDylibHelper;
456 std::map<class ObjectFile::Reader*, DylibLoadCommandsAtom<A>*> fLibraryToLoadCommand;
457 std::map<class ObjectFile::Reader*, uint32_t> fLibraryToOrdinal;
458 std::map<class ObjectFile::Reader*, class ObjectFile::Reader*> fLibraryAliases;
459 std::set<class ObjectFile::Reader*> fForcedWeakImportReaders;
460 std::vector<class ObjectFile::Atom*> fExportedAtoms;
461 std::vector<class ObjectFile::Atom*> fImportedAtoms;
462 std::vector<class ObjectFile::Atom*> fLocalSymbolAtoms;
463 std::vector<macho_nlist<P> > fLocalExtraLabels;
464 std::vector<macho_nlist<P> > fGlobalExtraLabels;
465 std::map<ObjectFile::Atom*, uint32_t> fAtomToSymbolIndex;
466 class SectionRelocationsLinkEditAtom<A>* fSectionRelocationsAtom;
467 class CompressedRebaseInfoLinkEditAtom<A>* fCompressedRebaseInfoAtom;
468 class CompressedBindingInfoLinkEditAtom<A>* fCompressedBindingInfoAtom;
469 class CompressedWeakBindingInfoLinkEditAtom<A>* fCompressedWeakBindingInfoAtom;
470 class CompressedLazyBindingInfoLinkEditAtom<A>* fCompressedLazyBindingInfoAtom;
471 class CompressedExportInfoLinkEditAtom<A>* fCompressedExportInfoAtom;
472 class LocalRelocationsLinkEditAtom<A>* fLocalRelocationsAtom;
473 class ExternalRelocationsLinkEditAtom<A>* fExternalRelocationsAtom;
474 class SymbolTableLinkEditAtom<A>* fSymbolTableAtom;
475 class SegmentSplitInfoContentAtom<A>* fSplitCodeToDataContentAtom;
476 class IndirectTableLinkEditAtom<A>* fIndirectTableAtom;
477 class ModuleInfoLinkEditAtom<A>* fModuleInfoAtom;
478 class StringsLinkEditAtom<A>* fStringsAtom;
479 class PageZeroAtom<A>* fPageZeroAtom;
480 class NonLazyPointerAtom<A>* fFastStubGOTAtom;
481 macho_nlist<P>* fSymbolTable;
482 std::vector<macho_relocation_info<P> > fSectionRelocs;
483 std::vector<macho_relocation_info<P> > fInternalRelocs;
484 std::vector<macho_relocation_info<P> > fExternalRelocs;
485 std::vector<RebaseInfo> fRebaseInfo;
486 std::vector<BindingInfo> fBindingInfo;
487 std::vector<BindingInfo> fWeakBindingInfo;
488 std::map<const ObjectFile::Atom*,ObjectFile::Atom*> fStubsMap;
489 std::map<ObjectFile::Atom*,ObjectFile::Atom*> fGOTMap;
490 std::vector<class StubAtom<A>*> fAllSynthesizedStubs;
491 std::vector<ObjectFile::Atom*> fAllSynthesizedStubHelpers;
492 std::vector<class LazyPointerAtom<A>*> fAllSynthesizedLazyPointers;
493 std::vector<class LazyPointerAtom<A>*> fAllSynthesizedLazyDylibPointers;
494 std::vector<class NonLazyPointerAtom<A>*> fAllSynthesizedNonLazyPointers;
495 uint32_t fSymbolTableCount;
496 uint32_t fSymbolTableStabsCount;
497 uint32_t fSymbolTableStabsStartIndex;
498 uint32_t fSymbolTableLocalCount;
499 uint32_t fSymbolTableLocalStartIndex;
500 uint32_t fSymbolTableExportCount;
501 uint32_t fSymbolTableExportStartIndex;
502 uint32_t fSymbolTableImportCount;
503 uint32_t fSymbolTableImportStartIndex;
504 uint32_t fLargestAtomSize;
505 uint32_t fDylibSymbolCountUpperBound;
506 bool fEmitVirtualSections;
507 bool fHasWeakExports;
508 bool fReferencesWeakImports;
509 bool fCanScatter;
510 bool fWritableSegmentPastFirst4GB;
511 bool fNoReExportedDylibs;
512 bool fBiggerThanTwoGigs;
513 bool fSlideable;
514 bool fHasThumbBranches;
515 std::map<const ObjectFile::Atom*,bool> fWeakImportMap;
516 std::set<const ObjectFile::Reader*> fDylibReadersWithNonWeakImports;
517 std::set<const ObjectFile::Reader*> fDylibReadersWithWeakImports;
518 SegmentInfo* fFirstWritableSegment;
519 ObjectFile::Reader::CpuConstraint fCpuConstraint;
520 uint32_t fAnonNameIndex;
521 };
522
523
524 class Segment : public ObjectFile::Segment
525 {
526 public:
527 Segment(const char* name, bool readable, bool writable, bool executable, bool fixedAddress)
528 : fName(name), fReadable(readable), fWritable(writable), fExecutable(executable), fFixedAddress(fixedAddress) {}
529 virtual const char* getName() const { return fName; }
530 virtual bool isContentReadable() const { return fReadable; }
531 virtual bool isContentWritable() const { return fWritable; }
532 virtual bool isContentExecutable() const { return fExecutable; }
533 virtual bool hasFixedAddress() const { return fFixedAddress; }
534
535 static Segment fgTextSegment;
536 static Segment fgPageZeroSegment;
537 static Segment fgLinkEditSegment;
538 static Segment fgStackSegment;
539 static Segment fgImportSegment;
540 static Segment fgROImportSegment;
541 static Segment fgDataSegment;
542 static Segment fgObjCSegment;
543 static Segment fgHeaderSegment;
544
545
546 private:
547 const char* fName;
548 const bool fReadable;
549 const bool fWritable;
550 const bool fExecutable;
551 const bool fFixedAddress;
552 };
553
554 Segment Segment::fgPageZeroSegment("__PAGEZERO", false, false, false, true);
555 Segment Segment::fgTextSegment("__TEXT", true, false, true, false);
556 Segment Segment::fgLinkEditSegment("__LINKEDIT", true, false, false, false);
557 Segment Segment::fgStackSegment("__UNIXSTACK", true, true, false, true);
558 Segment Segment::fgImportSegment("__IMPORT", true, true, true, false);
559 Segment Segment::fgROImportSegment("__IMPORT", true, false, true, false);
560 Segment Segment::fgDataSegment("__DATA", true, true, false, false);
561 Segment Segment::fgObjCSegment("__OBJC", true, true, false, false);
562 Segment Segment::fgHeaderSegment("__HEADER", true, false, true, false);
563
564
565 template <typename A>
566 class WriterAtom : public ObjectFile::Atom
567 {
568 public:
569 enum Kind { zeropage, machHeaderApp, machHeaderDylib, machHeaderBundle, machHeaderObject, loadCommands, undefinedProxy };
570 WriterAtom(Writer<A>& writer, Segment& segment) : fWriter(writer), fSegment(segment) { }
571
572 virtual ObjectFile::Reader* getFile() const { return &fWriter; }
573 virtual bool getTranslationUnitSource(const char** dir, const char** name) const { return false; }
574 virtual const char* getName() const { return NULL; }
575 virtual const char* getDisplayName() const { return this->getName(); }
576 virtual Scope getScope() const { return ObjectFile::Atom::scopeTranslationUnit; }
577 virtual DefinitionKind getDefinitionKind() const { return kRegularDefinition; }
578 virtual SymbolTableInclusion getSymbolTableInclusion() const { return ObjectFile::Atom::kSymbolTableNotIn; }
579 virtual bool dontDeadStrip() const { return true; }
580 virtual bool isZeroFill() const { return false; }
581 virtual bool isThumb() const { return false; }
582 virtual std::vector<ObjectFile::Reference*>& getReferences() const { return fgEmptyReferenceList; }
583 virtual bool mustRemainInSection() const { return true; }
584 virtual ObjectFile::Segment& getSegment() const { return fSegment; }
585 virtual ObjectFile::Atom& getFollowOnAtom() const { return *((ObjectFile::Atom*)NULL); }
586 virtual uint32_t getOrdinal() const { return 0; }
587 virtual std::vector<ObjectFile::LineInfo>* getLineInfo() const { return NULL; }
588 virtual ObjectFile::Alignment getAlignment() const { return ObjectFile::Alignment(2); }
589 virtual void copyRawContent(uint8_t buffer[]) const { throw "don't use copyRawContent"; }
590 virtual void setScope(Scope) { }
591
592
593 protected:
594 virtual ~WriterAtom() {}
595 typedef typename A::P P;
596 typedef typename A::P::E E;
597
598 static Segment& headerSegment(Writer<A>& writer) { return (writer.fOptions.outputKind()==Options::kPreload)
599 ? Segment::fgHeaderSegment : Segment::fgTextSegment; }
600
601 static std::vector<ObjectFile::Reference*> fgEmptyReferenceList;
602
603 Writer<A>& fWriter;
604 Segment& fSegment;
605 };
606
607 template <typename A> std::vector<ObjectFile::Reference*> WriterAtom<A>::fgEmptyReferenceList;
608
609
610 template <typename A>
611 class PageZeroAtom : public WriterAtom<A>
612 {
613 public:
614 PageZeroAtom(Writer<A>& writer) : WriterAtom<A>(writer, Segment::fgPageZeroSegment),
615 fSize(fWriter.fOptions.zeroPageSize()) {}
616 virtual const char* getDisplayName() const { return "page zero content"; }
617 virtual bool isZeroFill() const { return true; }
618 virtual uint64_t getSize() const { return fSize; }
619 virtual const char* getSectionName() const { return "._zeropage"; }
620 virtual ObjectFile::Alignment getAlignment() const { return ObjectFile::Alignment(12); }
621 void setSize(uint64_t size) { fSize = size; }
622 private:
623 using WriterAtom<A>::fWriter;
624 typedef typename A::P P;
625 uint64_t fSize;
626 };
627
628
629 template <typename A>
630 class DsoHandleAtom : public WriterAtom<A>
631 {
632 public:
633 DsoHandleAtom(Writer<A>& writer) : WriterAtom<A>(writer, Segment::fgTextSegment) {}
634 virtual const char* getName() const { return "___dso_handle"; }
635 virtual ObjectFile::Atom::Scope getScope() const { return ObjectFile::Atom::scopeLinkageUnit; }
636 virtual ObjectFile::Atom::SymbolTableInclusion getSymbolTableInclusion() const { return ObjectFile::Atom::kSymbolTableNotIn; }
637 virtual uint64_t getSize() const { return 0; }
638 virtual ObjectFile::Alignment getAlignment() const { return ObjectFile::Alignment(12); }
639 virtual const char* getSectionName() const { return "._mach_header"; }
640 virtual void copyRawContent(uint8_t buffer[]) const {}
641 };
642
643
644 template <typename A>
645 class MachHeaderAtom : public WriterAtom<A>
646 {
647 public:
648 MachHeaderAtom(Writer<A>& writer) : WriterAtom<A>(writer, headerSegment(writer)) {}
649 virtual const char* getName() const;
650 virtual const char* getDisplayName() const;
651 virtual ObjectFile::Atom::Scope getScope() const;
652 virtual ObjectFile::Atom::SymbolTableInclusion getSymbolTableInclusion() const;
653 virtual uint64_t getSize() const { return sizeof(macho_header<typename A::P>); }
654 virtual ObjectFile::Alignment getAlignment() const { return ObjectFile::Alignment(12); }
655 virtual const char* getSectionName() const { return "._mach_header"; }
656 virtual uint32_t getOrdinal() const { return 1; }
657 virtual void copyRawContent(uint8_t buffer[]) const;
658 private:
659 using WriterAtom<A>::fWriter;
660 typedef typename A::P P;
661 void setHeaderInfo(macho_header<typename A::P>& header) const;
662 };
663
664 template <typename A>
665 class CustomStackAtom : public WriterAtom<A>
666 {
667 public:
668 CustomStackAtom(Writer<A>& writer);
669 virtual const char* getDisplayName() const { return "custom stack content"; }
670 virtual bool isZeroFill() const { return true; }
671 virtual uint64_t getSize() const { return fWriter.fOptions.customStackSize(); }
672 virtual const char* getSectionName() const { return "._stack"; }
673 virtual ObjectFile::Alignment getAlignment() const { return ObjectFile::Alignment(12); }
674 private:
675 using WriterAtom<A>::fWriter;
676 typedef typename A::P P;
677 static bool stackGrowsDown();
678 };
679
680 template <typename A>
681 class LoadCommandAtom : public WriterAtom<A>
682 {
683 protected:
684 LoadCommandAtom(Writer<A>& writer) : WriterAtom<A>(writer, headerSegment(writer)), fOrdinal(fgCurrentOrdinal++) {}
685 virtual ObjectFile::Alignment getAlignment() const { return ObjectFile::Alignment(log2(sizeof(typename A::P::uint_t))); }
686 virtual const char* getSectionName() const { return "._load_commands"; }
687 virtual uint32_t getOrdinal() const { return fOrdinal; }
688 static uint64_t alignedSize(uint64_t size);
689 protected:
690 uint32_t fOrdinal;
691 static uint32_t fgCurrentOrdinal;
692 };
693
694 template <typename A> uint32_t LoadCommandAtom<A>::fgCurrentOrdinal = 0;
695
696 template <typename A>
697 class SegmentLoadCommandsAtom : public LoadCommandAtom<A>
698 {
699 public:
700 SegmentLoadCommandsAtom(Writer<A>& writer)
701 : LoadCommandAtom<A>(writer), fCommandCount(0), fSize(0)
702 { writer.fSegmentCommands = this; }
703 virtual const char* getDisplayName() const { return "segment load commands"; }
704 virtual uint64_t getSize() const { return fSize; }
705 virtual void copyRawContent(uint8_t buffer[]) const;
706
707 void computeSize();
708 void setup();
709 unsigned int commandCount() { return fCommandCount; }
710 private:
711 using WriterAtom<A>::fWriter;
712 typedef typename A::P P;
713 unsigned int fCommandCount;
714 uint32_t fSize;
715 };
716
717
718 template <typename A>
719 class SymbolTableLoadCommandsAtom : public LoadCommandAtom<A>
720 {
721 public:
722 SymbolTableLoadCommandsAtom(Writer<A>&);
723 virtual const char* getDisplayName() const { return "symbol table load commands"; }
724 virtual uint64_t getSize() const;
725 virtual void copyRawContent(uint8_t buffer[]) const;
726 unsigned int commandCount();
727 void needDynamicTable();
728 private:
729 using WriterAtom<A>::fWriter;
730 typedef typename A::P P;
731 bool fNeedsDynamicSymbolTable;
732 macho_symtab_command<typename A::P> fSymbolTable;
733 macho_dysymtab_command<typename A::P> fDynamicSymbolTable;
734 };
735
736 template <typename A>
737 class ThreadsLoadCommandsAtom : public LoadCommandAtom<A>
738 {
739 public:
740 ThreadsLoadCommandsAtom(Writer<A>& writer)
741 : LoadCommandAtom<A>(writer) {}
742 virtual const char* getDisplayName() const { return "thread load commands"; }
743 virtual uint64_t getSize() const;
744 virtual void copyRawContent(uint8_t buffer[]) const;
745 private:
746 using WriterAtom<A>::fWriter;
747 typedef typename A::P P;
748 uint8_t* fBuffer;
749 uint32_t fBufferSize;
750 };
751
752 template <typename A>
753 class DyldLoadCommandsAtom : public LoadCommandAtom<A>
754 {
755 public:
756 DyldLoadCommandsAtom(Writer<A>& writer) : LoadCommandAtom<A>(writer) {}
757 virtual const char* getDisplayName() const { return "dyld load command"; }
758 virtual uint64_t getSize() const;
759 virtual void copyRawContent(uint8_t buffer[]) const;
760 private:
761 using WriterAtom<A>::fWriter;
762 typedef typename A::P P;
763 };
764
765 template <typename A>
766 class SegmentSplitInfoLoadCommandsAtom : public LoadCommandAtom<A>
767 {
768 public:
769 SegmentSplitInfoLoadCommandsAtom(Writer<A>& writer) : LoadCommandAtom<A>(writer) {}
770 virtual const char* getDisplayName() const { return "segment split info load command"; }
771 virtual uint64_t getSize() const;
772 virtual void copyRawContent(uint8_t buffer[]) const;
773 private:
774 using WriterAtom<A>::fWriter;
775 typedef typename A::P P;
776 };
777
778 template <typename A>
779 class AllowableClientLoadCommandsAtom : public LoadCommandAtom<A>
780 {
781 public:
782 AllowableClientLoadCommandsAtom(Writer<A>& writer, const char* client) :
783 LoadCommandAtom<A>(writer), clientString(client) {}
784 virtual const char* getDisplayName() const { return "allowable_client load command"; }
785 virtual uint64_t getSize() const;
786 virtual void copyRawContent(uint8_t buffer[]) const;
787 private:
788 using WriterAtom<A>::fWriter;
789 typedef typename A::P P;
790 const char* clientString;
791 };
792
793 template <typename A>
794 class DylibLoadCommandsAtom : public LoadCommandAtom<A>
795 {
796 public:
797 DylibLoadCommandsAtom(Writer<A>& writer, ExecutableFile::DyLibUsed& info)
798 : LoadCommandAtom<A>(writer), fInfo(info),
799 fOptimizedAway(false) { if (fInfo.options.fLazyLoad) this->fOrdinal += 256; }
800 virtual const char* getDisplayName() const { return "dylib load command"; }
801 virtual uint64_t getSize() const;
802 virtual void copyRawContent(uint8_t buffer[]) const;
803 virtual void optimizeAway() { fOptimizedAway = true; }
804 bool linkedWeak() { return fInfo.options.fWeakImport; }
805 private:
806 using WriterAtom<A>::fWriter;
807 typedef typename A::P P;
808 ExecutableFile::DyLibUsed fInfo;
809 bool fOptimizedAway;
810 };
811
812 template <typename A>
813 class DylibIDLoadCommandsAtom : public LoadCommandAtom<A>
814 {
815 public:
816 DylibIDLoadCommandsAtom(Writer<A>& writer) : LoadCommandAtom<A>(writer) {}
817 virtual const char* getDisplayName() const { return "dylib ID load command"; }
818 virtual uint64_t getSize() const;
819 virtual void copyRawContent(uint8_t buffer[]) const;
820 private:
821 using WriterAtom<A>::fWriter;
822 typedef typename A::P P;
823 };
824
825 template <typename A>
826 class RoutinesLoadCommandsAtom : public LoadCommandAtom<A>
827 {
828 public:
829 RoutinesLoadCommandsAtom(Writer<A>& writer) : LoadCommandAtom<A>(writer) {}
830 virtual const char* getDisplayName() const { return "routines load command"; }
831 virtual uint64_t getSize() const { return sizeof(macho_routines_command<typename A::P>); }
832 virtual void copyRawContent(uint8_t buffer[]) const;
833 private:
834 using WriterAtom<A>::fWriter;
835 typedef typename A::P P;
836 };
837
838 template <typename A>
839 class SubUmbrellaLoadCommandsAtom : public LoadCommandAtom<A>
840 {
841 public:
842 SubUmbrellaLoadCommandsAtom(Writer<A>& writer, const char* name)
843 : LoadCommandAtom<A>(writer), fName(name) {}
844 virtual const char* getDisplayName() const { return "sub-umbrella load command"; }
845 virtual uint64_t getSize() const;
846 virtual void copyRawContent(uint8_t buffer[]) const;
847 private:
848 typedef typename A::P P;
849 const char* fName;
850 };
851
852 template <typename A>
853 class SubLibraryLoadCommandsAtom : public LoadCommandAtom<A>
854 {
855 public:
856 SubLibraryLoadCommandsAtom(Writer<A>& writer, const char* nameStart, int nameLen)
857 : LoadCommandAtom<A>(writer), fNameStart(nameStart), fNameLength(nameLen) {}
858 virtual const char* getDisplayName() const { return "sub-library load command"; }
859 virtual uint64_t getSize() const;
860 virtual void copyRawContent(uint8_t buffer[]) const;
861 private:
862 using WriterAtom<A>::fWriter;
863 typedef typename A::P P;
864 const char* fNameStart;
865 int fNameLength;
866 };
867
868 template <typename A>
869 class UmbrellaLoadCommandsAtom : public LoadCommandAtom<A>
870 {
871 public:
872 UmbrellaLoadCommandsAtom(Writer<A>& writer, const char* name)
873 : LoadCommandAtom<A>(writer), fName(name) {}
874 virtual const char* getDisplayName() const { return "umbrella load command"; }
875 virtual uint64_t getSize() const;
876 virtual void copyRawContent(uint8_t buffer[]) const;
877 private:
878 using WriterAtom<A>::fWriter;
879 typedef typename A::P P;
880 const char* fName;
881 };
882
883 template <typename A>
884 class UUIDLoadCommandAtom : public LoadCommandAtom<A>
885 {
886 public:
887 UUIDLoadCommandAtom(Writer<A>& writer)
888 : LoadCommandAtom<A>(writer), fEmit(false) {}
889 virtual const char* getDisplayName() const { return "uuid load command"; }
890 virtual uint64_t getSize() const { return fEmit ? sizeof(macho_uuid_command<typename A::P>) : 0; }
891 virtual void copyRawContent(uint8_t buffer[]) const;
892 virtual void generate();
893 void setContent(const uint8_t uuid[16]);
894 const uint8_t* getUUID() { return fUUID; }
895 private:
896 using WriterAtom<A>::fWriter;
897 typedef typename A::P P;
898 uuid_t fUUID;
899 bool fEmit;
900 };
901
902
903 template <typename A>
904 class RPathLoadCommandsAtom : public LoadCommandAtom<A>
905 {
906 public:
907 RPathLoadCommandsAtom(Writer<A>& writer, const char* path)
908 : LoadCommandAtom<A>(writer), fPath(path) {}
909 virtual const char* getDisplayName() const { return "rpath load command"; }
910 virtual uint64_t getSize() const;
911 virtual void copyRawContent(uint8_t buffer[]) const;
912 private:
913 using WriterAtom<A>::fWriter;
914 typedef typename A::P P;
915 const char* fPath;
916 };
917
918 template <typename A>
919 class EncryptionLoadCommandsAtom : public LoadCommandAtom<A>
920 {
921 public:
922 EncryptionLoadCommandsAtom(Writer<A>& writer)
923 : LoadCommandAtom<A>(writer), fStartOffset(0),
924 fEndOffset(0) {}
925 virtual const char* getDisplayName() const { return "encryption info load command"; }
926 virtual uint64_t getSize() const { return sizeof(macho_encryption_info_command<typename A::P>); }
927 virtual void copyRawContent(uint8_t buffer[]) const;
928 void setStartEncryptionOffset(uint32_t off) { fStartOffset = off; }
929 void setEndEncryptionOffset(uint32_t off) { fEndOffset = off; }
930 private:
931 using WriterAtom<A>::fWriter;
932 typedef typename A::P P;
933 uint32_t fStartOffset;
934 uint32_t fEndOffset;
935 };
936
937 template <typename A>
938 class DyldInfoLoadCommandsAtom : public LoadCommandAtom<A>
939 {
940 public:
941 DyldInfoLoadCommandsAtom(Writer<A>& writer)
942 : LoadCommandAtom<A>(writer) {}
943 virtual const char* getDisplayName() const { return "dyld info load command"; }
944 virtual uint64_t getSize() const { return sizeof(macho_dyld_info_command<typename A::P>); }
945 virtual void copyRawContent(uint8_t buffer[]) const;
946 private:
947 using WriterAtom<A>::fWriter;
948 typedef typename A::P P;
949 };
950
951
952 template <typename A>
953 class LoadCommandsPaddingAtom : public WriterAtom<A>
954 {
955 public:
956 LoadCommandsPaddingAtom(Writer<A>& writer)
957 : WriterAtom<A>(writer, headerSegment(writer)), fSize(0) {}
958 virtual const char* getDisplayName() const { return "header padding"; }
959 virtual uint64_t getSize() const { return fSize; }
960 virtual const char* getSectionName() const { return "._load_cmds_pad"; }
961 virtual void copyRawContent(uint8_t buffer[]) const;
962
963 void setSize(uint64_t newSize);
964 private:
965 using WriterAtom<A>::fWriter;
966 typedef typename A::P P;
967 uint64_t fSize;
968 };
969
970 template <typename A>
971 class MinimalTextAtom : public WriterAtom<A>
972 {
973 public:
974 MinimalTextAtom(Writer<A>& writer)
975 : WriterAtom<A>(writer, headerSegment(writer)) {}
976 virtual const char* getDisplayName() const { return "minimal text"; }
977 virtual uint64_t getSize() const { return 0; }
978 virtual const char* getSectionName() const { return "__text"; }
979 virtual void copyRawContent(uint8_t buffer[]) const { }
980 virtual ObjectFile::Atom::SymbolTableInclusion getSymbolTableInclusion() const { return ObjectFile::Atom::kSymbolTableNotIn; }
981
982 private:
983 using WriterAtom<A>::fWriter;
984 };
985
986
987 template <typename A>
988 class UnwindInfoAtom : public WriterAtom<A>
989 {
990 public:
991 UnwindInfoAtom(Writer<A>& writer) : WriterAtom<A>(writer, Segment::fgTextSegment),
992 fHeaderSize(0), fPagesSize(0), fAlignment(4) {}
993 virtual const char* getName() const { return "unwind info"; }
994 virtual ObjectFile::Atom::Scope getScope() const { return ObjectFile::Atom::scopeTranslationUnit; }
995 virtual ObjectFile::Atom::SymbolTableInclusion getSymbolTableInclusion() const { return ObjectFile::Atom::kSymbolTableNotIn; }
996 virtual uint64_t getSize() const { return fHeaderSize+fPagesSize; }
997 virtual ObjectFile::Alignment getAlignment() const { return fAlignment; }
998 virtual const char* getSectionName() const { return "__unwind_info"; }
999 virtual uint32_t getOrdinal() const { return 1; }
1000 virtual std::vector<ObjectFile::Reference*>& getReferences() const { return (std::vector<ObjectFile::Reference*>&)fReferences; }
1001 virtual void copyRawContent(uint8_t buffer[]) const;
1002
1003 void addUnwindInfo(ObjectFile::Atom* func, uint32_t offset, uint32_t encoding,
1004 ObjectFile::Reference* fdeRef, ObjectFile::Reference* lsda,
1005 ObjectFile::Atom* personalityPointer);
1006 void generate();
1007
1008 private:
1009 using WriterAtom<A>::fWriter;
1010 typedef typename A::P P;
1011 struct Info { ObjectFile::Atom* func; ObjectFile::Atom* fde; ObjectFile::Atom* lsda; uint32_t lsdaOffset; ObjectFile::Atom* personalityPointer; uint32_t encoding; };
1012 struct LSDAEntry { ObjectFile::Atom* func; ObjectFile::Atom* lsda; uint32_t lsdaOffset; };
1013 struct RegFixUp { uint8_t* contentPointer; ObjectFile::Atom* func; ObjectFile::Atom* fde; };
1014 struct CompressedFixUp { uint8_t* contentPointer; ObjectFile::Atom* func; ObjectFile::Atom* fromFunc; };
1015 struct CompressedEncodingFixUp { uint8_t* contentPointer; ObjectFile::Atom* fde; };
1016
1017 bool encodingMeansUseDwarf(compact_unwind_encoding_t encoding);
1018 void compressDuplicates(std::vector<Info>& uniqueInfos);
1019 void findCommonEncoding(const std::vector<Info>& uniqueInfos, std::map<uint32_t, unsigned int>& commonEncodings);
1020 void makeLsdaIndex(const std::vector<Info>& uniqueInfos, std::map<ObjectFile::Atom*, uint32_t>& lsdaIndexOffsetMap);
1021 unsigned int makeRegularSecondLevelPage(const std::vector<Info>& uniqueInfos, uint32_t pageSize, unsigned int endIndex,
1022 uint8_t*& pageEnd);
1023 unsigned int makeCompressedSecondLevelPage(const std::vector<Info>& uniqueInfos,
1024 const std::map<uint32_t,unsigned int> commonEncodings,
1025 uint32_t pageSize, unsigned int endIndex, uint8_t*& pageEnd);
1026 void makePersonalityIndex(std::vector<Info>& uniqueInfos);
1027
1028
1029 uint32_t fHeaderSize;
1030 uint32_t fPagesSize;
1031 uint8_t* fHeaderContent;
1032 uint8_t* fPagesContent;
1033 uint8_t* fPagesContentForDelete;
1034 ObjectFile::Alignment fAlignment;
1035 std::vector<Info> fInfos;
1036 std::map<ObjectFile::Atom*, uint32_t> fPersonalityIndexMap;
1037 std::vector<LSDAEntry> fLSDAIndex;
1038 std::vector<RegFixUp> fRegFixUps;
1039 std::vector<CompressedFixUp> fCompressedFixUps;
1040 std::vector<CompressedEncodingFixUp> fCompressedEncodingFixUps;
1041 std::vector<ObjectFile::Reference*> fReferences;
1042 };
1043
1044
1045
1046 template <typename A>
1047 class LinkEditAtom : public WriterAtom<A>
1048 {
1049 public:
1050 LinkEditAtom(Writer<A>& writer) : WriterAtom<A>(writer, Segment::fgLinkEditSegment), fOrdinal(fgCurrentOrdinal++) {}
1051 uint64_t getFileOffset() const;
1052 virtual ObjectFile::Alignment getAlignment() const { return ObjectFile::Alignment(log2(sizeof(typename A::P::uint_t))); }
1053 virtual uint32_t getOrdinal() const { return fOrdinal; }
1054 private:
1055 uint32_t fOrdinal;
1056 static uint32_t fgCurrentOrdinal;
1057 private:
1058 typedef typename A::P P;
1059 };
1060
1061 template <typename A> uint32_t LinkEditAtom<A>::fgCurrentOrdinal = 0;
1062
1063 template <typename A>
1064 class SectionRelocationsLinkEditAtom : public LinkEditAtom<A>
1065 {
1066 public:
1067 SectionRelocationsLinkEditAtom(Writer<A>& writer) : LinkEditAtom<A>(writer) { }
1068 virtual const char* getDisplayName() const { return "section relocations"; }
1069 virtual uint64_t getSize() const;
1070 virtual const char* getSectionName() const { return "._section_relocs"; }
1071 virtual void copyRawContent(uint8_t buffer[]) const;
1072 private:
1073 using WriterAtom<A>::fWriter;
1074 typedef typename A::P P;
1075 };
1076
1077 template <typename A>
1078 class CompressedInfoLinkEditAtom : public LinkEditAtom<A>
1079 {
1080 public:
1081 CompressedInfoLinkEditAtom(Writer<A>& writer) : LinkEditAtom<A>(writer) { }
1082 virtual uint64_t getSize() const { return fEncodedData.size(); }
1083 virtual void copyRawContent(uint8_t buffer[]) const { memcpy(buffer, fEncodedData.start(), fEncodedData.size()); }
1084 protected:
1085 typedef typename A::P::uint_t pint_t;
1086 ByteStream fEncodedData;
1087 private:
1088 using WriterAtom<A>::fWriter;
1089 typedef typename A::P P;
1090 };
1091
1092
1093
1094 template <typename A>
1095 class CompressedRebaseInfoLinkEditAtom : public CompressedInfoLinkEditAtom<A>
1096 {
1097 public:
1098 CompressedRebaseInfoLinkEditAtom(Writer<A>& writer) : CompressedInfoLinkEditAtom<A>(writer) { }
1099 virtual const char* getDisplayName() const { return "compressed rebase info"; }
1100 virtual const char* getSectionName() const { return "._rebase info"; }
1101 void encode();
1102 private:
1103 using CompressedInfoLinkEditAtom<A>::fEncodedData;
1104 using CompressedInfoLinkEditAtom<A>::fWriter;
1105 typedef typename A::P P;
1106 typedef typename A::P::uint_t pint_t;
1107 };
1108
1109 template <typename A>
1110 class CompressedBindingInfoLinkEditAtom : public CompressedInfoLinkEditAtom<A>
1111 {
1112 public:
1113 CompressedBindingInfoLinkEditAtom(Writer<A>& writer) : CompressedInfoLinkEditAtom<A>(writer) { }
1114 virtual const char* getDisplayName() const { return "compressed binding info"; }
1115 virtual const char* getSectionName() const { return "._binding info"; }
1116 void encode();
1117 private:
1118 using CompressedInfoLinkEditAtom<A>::fWriter;
1119 using CompressedInfoLinkEditAtom<A>::fEncodedData;
1120 typedef typename A::P P;
1121 typedef typename A::P::uint_t pint_t;
1122 };
1123
1124 template <typename A>
1125 class CompressedWeakBindingInfoLinkEditAtom : public CompressedInfoLinkEditAtom<A>
1126 {
1127 public:
1128 CompressedWeakBindingInfoLinkEditAtom(Writer<A>& writer) : CompressedInfoLinkEditAtom<A>(writer) { }
1129 virtual const char* getDisplayName() const { return "compressed weak binding info"; }
1130 virtual const char* getSectionName() const { return "._wkbinding info"; }
1131 void encode();
1132 private:
1133 using CompressedInfoLinkEditAtom<A>::fWriter;
1134 using CompressedInfoLinkEditAtom<A>::fEncodedData;
1135 typedef typename A::P P;
1136 typedef typename A::P::uint_t pint_t;
1137 };
1138
1139 template <typename A>
1140 class CompressedLazyBindingInfoLinkEditAtom : public CompressedInfoLinkEditAtom<A>
1141 {
1142 public:
1143 CompressedLazyBindingInfoLinkEditAtom(Writer<A>& writer) : CompressedInfoLinkEditAtom<A>(writer) { }
1144 virtual const char* getDisplayName() const { return "compressed lazy binding info"; }
1145 virtual const char* getSectionName() const { return "._lzbinding info"; }
1146 void encode();
1147 private:
1148 std::vector<uint32_t> fStarts;
1149
1150 using CompressedInfoLinkEditAtom<A>::fWriter;
1151 using CompressedInfoLinkEditAtom<A>::fEncodedData;
1152 typedef typename A::P P;
1153 typedef typename A::P::uint_t pint_t;
1154 };
1155
1156
1157 template <typename A>
1158 class CompressedExportInfoLinkEditAtom : public CompressedInfoLinkEditAtom<A>
1159 {
1160 public:
1161 CompressedExportInfoLinkEditAtom(Writer<A>& writer)
1162 : CompressedInfoLinkEditAtom<A>(writer), fStartNode(strdup("")) { }
1163 virtual const char* getDisplayName() const { return "compressed export info"; }
1164 virtual const char* getSectionName() const { return "._export info"; }
1165 void encode();
1166 private:
1167 using WriterAtom<A>::fWriter;
1168 using CompressedInfoLinkEditAtom<A>::fEncodedData;
1169 typedef typename A::P P;
1170 typedef typename A::P::uint_t pint_t;
1171 struct node;
1172
1173 struct edge
1174 {
1175 edge(const char* s, struct node* n) : fSubString(s), fChild(n) { }
1176 ~edge() { }
1177 const char* fSubString;
1178 struct node* fChild;
1179
1180 };
1181
1182 struct node
1183 {
1184 node(const char* s) : fCummulativeString(s), fAddress(0), fFlags(0), fOrdered(false),
1185 fHaveExportInfo(false), fTrieOffset(0) {}
1186 ~node() { }
1187 const char* fCummulativeString;
1188 std::vector<edge> fChildren;
1189 uint64_t fAddress;
1190 uint32_t fFlags;
1191 bool fOrdered;
1192 bool fHaveExportInfo;
1193 uint32_t fTrieOffset;
1194
1195 void addSymbol(const char* fullStr, uint64_t address, uint32_t flags) {
1196 const char* partialStr = &fullStr[strlen(fCummulativeString)];
1197 for (typename std::vector<edge>::iterator it = fChildren.begin(); it != fChildren.end(); ++it) {
1198 edge& e = *it;
1199 int subStringLen = strlen(e.fSubString);
1200 if ( strncmp(e.fSubString, partialStr, subStringLen) == 0 ) {
1201 // already have matching edge, go down that path
1202 e.fChild->addSymbol(fullStr, address, flags);
1203 return;
1204 }
1205 else {
1206 for (int i=subStringLen-1; i > 0; --i) {
1207 if ( strncmp(e.fSubString, partialStr, i) == 0 ) {
1208 // found a common substring, splice in new node
1209 // was A -> C, now A -> B -> C
1210 char* bNodeCummStr = strdup(e.fChild->fCummulativeString);
1211 bNodeCummStr[strlen(bNodeCummStr)+i-subStringLen] = '\0';
1212 //node* aNode = this;
1213 node* bNode = new node(bNodeCummStr);
1214 node* cNode = e.fChild;
1215 char* abEdgeStr = strdup(e.fSubString);
1216 abEdgeStr[i] = '\0';
1217 char* bcEdgeStr = strdup(&e.fSubString[i]);
1218 edge& abEdge = e;
1219 abEdge.fSubString = abEdgeStr;
1220 abEdge.fChild = bNode;
1221 edge bcEdge(bcEdgeStr, cNode);
1222 bNode->fChildren.push_back(bcEdge);
1223 bNode->addSymbol(fullStr, address, flags);
1224 return;
1225 }
1226 }
1227 }
1228 }
1229 // no commonality with any existing child, make a new edge that is this whole string
1230 node* newNode = new node(strdup(fullStr));
1231 edge newEdge(strdup(partialStr), newNode);
1232 fChildren.push_back(newEdge);
1233 newNode->fAddress = address;
1234 newNode->fFlags = flags;
1235 newNode->fHaveExportInfo = true;
1236 }
1237
1238 void addOrderedNodes(const char* name, std::vector<node*>& orderedNodes) {
1239 if ( !fOrdered ) {
1240 orderedNodes.push_back(this);
1241 //fprintf(stderr, "ordered %p %s\n", this, fCummulativeString);
1242 fOrdered = true;
1243 }
1244 const char* partialStr = &name[strlen(fCummulativeString)];
1245 for (typename std::vector<edge>::iterator it = fChildren.begin(); it != fChildren.end(); ++it) {
1246 edge& e = *it;
1247 int subStringLen = strlen(e.fSubString);
1248 if ( strncmp(e.fSubString, partialStr, subStringLen) == 0 ) {
1249 // already have matching edge, go down that path
1250 e.fChild->addOrderedNodes(name, orderedNodes);
1251 return;
1252 }
1253 }
1254 }
1255
1256 // byte for terminal node size in bytes, or 0x00 if not terminal node
1257 // teminal node (uleb128 flags, uleb128 addr)
1258 // byte for child node count
1259 // each child: zero terminated substring, uleb128 node offset
1260 bool updateOffset(uint32_t& offset) {
1261 uint32_t nodeSize = 1; // byte for length of export info
1262 if ( fHaveExportInfo )
1263 nodeSize += ByteStream::uleb128_size(fFlags) + ByteStream::uleb128_size(fAddress);
1264
1265 // add children
1266 ++nodeSize; // byte for count of chidren
1267 for (typename std::vector<edge>::iterator it = fChildren.begin(); it != fChildren.end(); ++it) {
1268 edge& e = *it;
1269 nodeSize += strlen(e.fSubString) + 1 + ByteStream::uleb128_size(e.fChild->fTrieOffset);
1270 }
1271 bool result = (fTrieOffset != offset);
1272 fTrieOffset = offset;
1273 //fprintf(stderr, "updateOffset %p %05d %s\n", this, fTrieOffset, fCummulativeString);
1274 offset += nodeSize;
1275 // return true if fTrieOffset was changed
1276 return result;
1277 }
1278
1279 void appendToStream(ByteStream& out) {
1280 if ( fHaveExportInfo ) {
1281 // nodes with export info: size, flags, address
1282 out.append_byte(out.uleb128_size(fFlags) + out.uleb128_size(fAddress));
1283 out.append_uleb128(fFlags);
1284 out.append_uleb128(fAddress);
1285 }
1286 else {
1287 // no export info
1288 out.append_byte(0);
1289 }
1290 // write number of children
1291 out.append_byte(fChildren.size());
1292 // write each child
1293 for (typename std::vector<edge>::iterator it = fChildren.begin(); it != fChildren.end(); ++it) {
1294 edge& e = *it;
1295 out.append_string(e.fSubString);
1296 out.append_uleb128(e.fChild->fTrieOffset);
1297 }
1298 }
1299
1300 };
1301
1302
1303 struct node fStartNode;
1304 };
1305
1306 template <typename A>
1307 class LocalRelocationsLinkEditAtom : public LinkEditAtom<A>
1308 {
1309 public:
1310 LocalRelocationsLinkEditAtom(Writer<A>& writer) : LinkEditAtom<A>(writer) { }
1311 virtual const char* getDisplayName() const { return "local relocations"; }
1312 virtual uint64_t getSize() const;
1313 virtual const char* getSectionName() const { return "._local_relocs"; }
1314 virtual void copyRawContent(uint8_t buffer[]) const;
1315 private:
1316 using WriterAtom<A>::fWriter;
1317 typedef typename A::P P;
1318 };
1319
1320 template <typename A>
1321 class SymbolTableLinkEditAtom : public LinkEditAtom<A>
1322 {
1323 public:
1324 SymbolTableLinkEditAtom(Writer<A>& writer) : LinkEditAtom<A>(writer) { }
1325 virtual const char* getDisplayName() const { return "symbol table"; }
1326 virtual uint64_t getSize() const;
1327 virtual const char* getSectionName() const { return "._symbol_table"; }
1328 virtual void copyRawContent(uint8_t buffer[]) const;
1329 private:
1330 using WriterAtom<A>::fWriter;
1331 typedef typename A::P P;
1332 };
1333
1334 template <typename A>
1335 class ExternalRelocationsLinkEditAtom : public LinkEditAtom<A>
1336 {
1337 public:
1338 ExternalRelocationsLinkEditAtom(Writer<A>& writer) : LinkEditAtom<A>(writer) { }
1339 virtual const char* getDisplayName() const { return "external relocations"; }
1340 virtual uint64_t getSize() const;
1341 virtual const char* getSectionName() const { return "._extern_relocs"; }
1342 virtual void copyRawContent(uint8_t buffer[]) const;
1343 private:
1344 using WriterAtom<A>::fWriter;
1345 typedef typename A::P P;
1346 };
1347
1348 struct IndirectEntry {
1349 uint32_t indirectIndex;
1350 uint32_t symbolIndex;
1351 };
1352
1353
1354 template <typename A>
1355 class SegmentSplitInfoContentAtom : public LinkEditAtom<A>
1356 {
1357 public:
1358 SegmentSplitInfoContentAtom(Writer<A>& writer) : LinkEditAtom<A>(writer), fCantEncode(false) { }
1359 virtual const char* getDisplayName() const { return "split segment info"; }
1360 virtual uint64_t getSize() const;
1361 virtual const char* getSectionName() const { return "._split_info"; }
1362 virtual void copyRawContent(uint8_t buffer[]) const;
1363 bool canEncode() { return !fCantEncode; }
1364 void setCantEncode() { fCantEncode = true; }
1365 void add32bitPointerLocation(const ObjectFile::Atom* atom, uint32_t offset) { fKind1Locations.push_back(AtomAndOffset(atom, offset)); }
1366 void add64bitPointerLocation(const ObjectFile::Atom* atom, uint32_t offset) { fKind2Locations.push_back(AtomAndOffset(atom, offset)); }
1367 void addPPCHi16Location(const ObjectFile::Atom* atom, uint32_t offset) { fKind3Locations.push_back(AtomAndOffset(atom, offset)); }
1368 void add32bitImportLocation(const ObjectFile::Atom* atom, uint32_t offset) { fKind4Locations.push_back(AtomAndOffset(atom, offset)); }
1369 void encode();
1370
1371 private:
1372 using WriterAtom<A>::fWriter;
1373 typedef typename A::P P;
1374 typedef typename A::P::uint_t pint_t;
1375 struct AtomAndOffset {
1376 AtomAndOffset(const ObjectFile::Atom* a, uint32_t off) : atom(a), offset(off) {}
1377 const ObjectFile::Atom* atom;
1378 uint32_t offset;
1379 };
1380 void uleb128EncodeAddresses(const std::vector<AtomAndOffset>& locations);
1381
1382 std::vector<AtomAndOffset> fKind1Locations;
1383 std::vector<AtomAndOffset> fKind2Locations;
1384 std::vector<AtomAndOffset> fKind3Locations;
1385 std::vector<AtomAndOffset> fKind4Locations;
1386 std::vector<uint8_t> fEncodedData;
1387 bool fCantEncode;
1388 };
1389
1390 template <typename A>
1391 class IndirectTableLinkEditAtom : public LinkEditAtom<A>
1392 {
1393 public:
1394 IndirectTableLinkEditAtom(Writer<A>& writer) : LinkEditAtom<A>(writer) { }
1395 virtual const char* getDisplayName() const { return "indirect symbol table"; }
1396 virtual uint64_t getSize() const;
1397 virtual const char* getSectionName() const { return "._indirect_syms"; }
1398 virtual void copyRawContent(uint8_t buffer[]) const;
1399
1400 std::vector<IndirectEntry> fTable;
1401
1402 private:
1403 using WriterAtom<A>::fWriter;
1404 typedef typename A::P P;
1405 };
1406
1407 template <typename A>
1408 class ModuleInfoLinkEditAtom : public LinkEditAtom<A>
1409 {
1410 public:
1411 ModuleInfoLinkEditAtom(Writer<A>& writer) : LinkEditAtom<A>(writer), fModuleNameOffset(0) { }
1412 virtual const char* getDisplayName() const { return "module table"; }
1413 virtual uint64_t getSize() const;
1414 virtual const char* getSectionName() const { return "._module_info"; }
1415 virtual void copyRawContent(uint8_t buffer[]) const;
1416
1417 void setName() { fModuleNameOffset = fWriter.fStringsAtom->add("single module"); }
1418 uint32_t getTableOfContentsFileOffset() const;
1419 uint32_t getModuleTableFileOffset() const;
1420 uint32_t getReferencesFileOffset() const;
1421 uint32_t getReferencesCount() const;
1422
1423 private:
1424 using WriterAtom<A>::fWriter;
1425 typedef typename A::P P;
1426 typedef typename A::P::uint_t pint_t;
1427 uint32_t fModuleNameOffset;
1428 };
1429
1430
1431 class CStringEquals
1432 {
1433 public:
1434 bool operator()(const char* left, const char* right) const { return (strcmp(left, right) == 0); }
1435 };
1436
1437 template <typename A>
1438 class StringsLinkEditAtom : public LinkEditAtom<A>
1439 {
1440 public:
1441 StringsLinkEditAtom(Writer<A>& writer);
1442 virtual const char* getDisplayName() const { return "string pool"; }
1443 virtual uint64_t getSize() const;
1444 virtual const char* getSectionName() const { return "._string_pool"; }
1445 virtual void copyRawContent(uint8_t buffer[]) const;
1446
1447 int32_t add(const char* name);
1448 int32_t addUnique(const char* name);
1449 int32_t emptyString() { return 1; }
1450 const char* stringForIndex(int32_t) const;
1451
1452 private:
1453 using WriterAtom<A>::fWriter;
1454 typedef typename A::P P;
1455 enum { kBufferSize = 0x01000000 };
1456 typedef __gnu_cxx::hash_map<const char*, int32_t, __gnu_cxx::hash<const char*>, CStringEquals> StringToOffset;
1457
1458 std::vector<char*> fFullBuffers;
1459 char* fCurrentBuffer;
1460 uint32_t fCurrentBufferUsed;
1461 StringToOffset fUniqueStrings;
1462 };
1463
1464
1465
1466 template <typename A>
1467 class UndefinedSymbolProxyAtom : public WriterAtom<A>
1468 {
1469 public:
1470 UndefinedSymbolProxyAtom(Writer<A>& writer, const char* name) : WriterAtom<A>(writer, Segment::fgLinkEditSegment), fName(name) {}
1471 virtual const char* getName() const { return fName; }
1472 virtual ObjectFile::Atom::Scope getScope() const { return ObjectFile::Atom::scopeGlobal; }
1473 virtual ObjectFile::Atom::DefinitionKind getDefinitionKind() const { return ObjectFile::Atom::kExternalDefinition; }
1474 virtual ObjectFile::Atom::SymbolTableInclusion getSymbolTableInclusion() const { return ObjectFile::Atom::kSymbolTableIn; }
1475 virtual uint64_t getSize() const { return 0; }
1476 virtual const char* getSectionName() const { return "._imports"; }
1477 private:
1478 using WriterAtom<A>::fWriter;
1479 typedef typename A::P P;
1480 const char* fName;
1481 };
1482
1483 template <typename A>
1484 class BranchIslandAtom : public WriterAtom<A>
1485 {
1486 public:
1487 BranchIslandAtom(Writer<A>& writer, const char* name, int islandRegion, ObjectFile::Atom& target,
1488 ObjectFile::Atom& finalTarget, uint32_t finalTargetOffset);
1489 virtual const char* getName() const { return fName; }
1490 virtual ObjectFile::Atom::Scope getScope() const { return ObjectFile::Atom::scopeLinkageUnit; }
1491 virtual uint64_t getSize() const;
1492 virtual bool isThumb() const { return (fIslandKind == kBranchIslandToThumb2); }
1493 virtual ObjectFile::Atom::ContentType getContentType() const { return ObjectFile::Atom::kBranchIsland; }
1494 virtual ObjectFile::Atom::SymbolTableInclusion getSymbolTableInclusion() const { return ObjectFile::Atom::kSymbolTableIn; }
1495 virtual const char* getSectionName() const { return "__text"; }
1496 virtual void copyRawContent(uint8_t buffer[]) const;
1497 uint64_t getFinalTargetAdress() const { return fFinalTarget.getAddress() + fFinalTargetOffset; }
1498 private:
1499 using WriterAtom<A>::fWriter;
1500 enum IslandKind { kBranchIslandToARM, kBranchIslandToThumb2, kBranchIslandToThumb1, kBranchIslandNoPicToThumb1 };
1501 const char* fName;
1502 ObjectFile::Atom& fTarget;
1503 ObjectFile::Atom& fFinalTarget;
1504 uint32_t fFinalTargetOffset;
1505 IslandKind fIslandKind;
1506 };
1507
1508 template <typename A>
1509 class StubAtom : public WriterAtom<A>
1510 {
1511 public:
1512 StubAtom(Writer<A>& writer, ObjectFile::Atom& target, bool forLazyDylib);
1513 virtual const char* getName() const { return fName; }
1514 virtual ObjectFile::Atom::Scope getScope() const { return ObjectFile::Atom::scopeLinkageUnit; }
1515 virtual ObjectFile::Atom::ContentType getContentType() const { return ObjectFile::Atom::kStub; }
1516 virtual uint64_t getSize() const;
1517 virtual ObjectFile::Alignment getAlignment() const;
1518 virtual const char* getSectionName() const { return "__symbol_stub1"; }
1519 virtual std::vector<ObjectFile::Reference*>& getReferences() const { return (std::vector<ObjectFile::Reference*>&)(fReferences); }
1520 virtual void copyRawContent(uint8_t buffer[]) const;
1521 ObjectFile::Atom* getTarget() { return &fTarget; }
1522 virtual uint32_t getOrdinal() const { return fSortingOrdinal; }
1523 void setSortingOrdinal(uint32_t o) { fSortingOrdinal = o; }
1524 private:
1525 static const char* stubName(const char* importName);
1526 friend class LazyPointerAtom<A>;
1527 using WriterAtom<A>::fWriter;
1528 enum StubKind { kStubPIC, kStubNoPIC, kStubShort, kJumpTable };
1529 const char* fName;
1530 ObjectFile::Atom& fTarget;
1531 std::vector<ObjectFile::Reference*> fReferences;
1532 bool fForLazyDylib;
1533 StubKind fKind;
1534 uint32_t fSortingOrdinal;
1535 };
1536
1537
1538 template <typename A>
1539 class FastStubHelperHelperAtom : public WriterAtom<A>
1540 {
1541 public:
1542 FastStubHelperHelperAtom(Writer<A>& writer);
1543 virtual const char* getName() const { return " stub helpers"; } // name sorts to start of helpers
1544 virtual ObjectFile::Atom::SymbolTableInclusion getSymbolTableInclusion() const { return ObjectFile::Atom::kSymbolTableIn; }
1545 virtual ObjectFile::Atom::Scope getScope() const { return ObjectFile::Atom::scopeLinkageUnit; }
1546 virtual ObjectFile::Atom::ContentType getContentType() const { return ObjectFile::Atom::kStubHelper; }
1547 virtual uint64_t getSize() const;
1548 virtual const char* getSectionName() const { return "__stub_helper"; }
1549 virtual std::vector<ObjectFile::Reference*>& getReferences() const { return (std::vector<ObjectFile::Reference*>&)(fReferences); }
1550 virtual void copyRawContent(uint8_t buffer[]) const;
1551 virtual ObjectFile::Alignment getAlignment() const { return ObjectFile::Alignment(0); }
1552 virtual uint32_t getOrdinal() const { return 0; }
1553 protected:
1554 using WriterAtom<A>::fWriter;
1555 std::vector<ObjectFile::Reference*> fReferences;
1556 };
1557
1558 template <typename A>
1559 class HybridStubHelperHelperAtom : public WriterAtom<A>
1560 {
1561 public:
1562 HybridStubHelperHelperAtom(Writer<A>& writer);
1563 virtual const char* getName() const { return " stub helpers"; } // name sorts to start of helpers
1564 virtual ObjectFile::Atom::SymbolTableInclusion getSymbolTableInclusion() const { return ObjectFile::Atom::kSymbolTableIn; }
1565 virtual ObjectFile::Atom::Scope getScope() const { return ObjectFile::Atom::scopeLinkageUnit; }
1566 virtual ObjectFile::Atom::ContentType getContentType() const { return ObjectFile::Atom::kStubHelper; }
1567 virtual uint64_t getSize() const;
1568 virtual const char* getSectionName() const { return "__stub_helper"; }
1569 virtual std::vector<ObjectFile::Reference*>& getReferences() const { return (std::vector<ObjectFile::Reference*>&)(fReferences); }
1570 virtual void copyRawContent(uint8_t buffer[]) const;
1571 virtual ObjectFile::Alignment getAlignment() const { return ObjectFile::Alignment(0); }
1572 virtual uint32_t getOrdinal() const { return 0; }
1573 protected:
1574 using WriterAtom<A>::fWriter;
1575 std::vector<ObjectFile::Reference*> fReferences;
1576 };
1577
1578 template <typename A>
1579 class StubHelperAtom : public WriterAtom<A>
1580 {
1581 public:
1582 StubHelperAtom(Writer<A>& writer, ObjectFile::Atom& target,
1583 LazyPointerAtom<A>& lazyPointer, bool forLazyDylib)
1584 : WriterAtom<A>(writer, Segment::fgTextSegment), fName(stubName(target.getName())),
1585 fTarget(target), fLazyPointerAtom(lazyPointer) {
1586 writer.fAllSynthesizedStubHelpers.push_back(this);
1587 }
1588
1589 virtual const char* getName() const { return fName; }
1590 virtual ObjectFile::Atom::Scope getScope() const { return ObjectFile::Atom::scopeLinkageUnit; }
1591 virtual ObjectFile::Atom::ContentType getContentType() const { return ObjectFile::Atom::kStubHelper; }
1592 virtual const char* getSectionName() const { return "__stub_helper"; }
1593 virtual std::vector<ObjectFile::Reference*>& getReferences() const { return (std::vector<ObjectFile::Reference*>&)(fReferences); }
1594 ObjectFile::Atom* getTarget() { return &fTarget; }
1595 virtual ObjectFile::Alignment getAlignment() const { return ObjectFile::Alignment(0); }
1596 virtual uint32_t getOrdinal() const { return 1; }
1597 protected:
1598 static const char* stubName(const char* importName);
1599 using WriterAtom<A>::fWriter;
1600 const char* fName;
1601 ObjectFile::Atom& fTarget;
1602 LazyPointerAtom<A>& fLazyPointerAtom;
1603 std::vector<ObjectFile::Reference*> fReferences;
1604 };
1605
1606 template <typename A>
1607 class ClassicStubHelperAtom : public StubHelperAtom<A>
1608 {
1609 public:
1610 ClassicStubHelperAtom(Writer<A>& writer, ObjectFile::Atom& target,
1611 class LazyPointerAtom<A>& lazyPointer, bool forLazyDylib);
1612
1613 virtual uint64_t getSize() const;
1614 virtual void copyRawContent(uint8_t buffer[]) const;
1615 };
1616
1617
1618 template <typename A>
1619 class HybridStubHelperAtom : public StubHelperAtom<A>
1620 {
1621 public:
1622 HybridStubHelperAtom(Writer<A>& writer, ObjectFile::Atom& target,
1623 class LazyPointerAtom<A>& lazyPointer, bool forLazyDylib);
1624
1625 virtual uint64_t getSize() const;
1626 virtual void copyRawContent(uint8_t buffer[]) const;
1627 static class HybridStubHelperHelperAtom<A>* fgHelperHelperAtom;
1628 };
1629 template <typename A> class HybridStubHelperHelperAtom<A>* HybridStubHelperAtom<A>::fgHelperHelperAtom = NULL;
1630
1631 template <typename A>
1632 class FastStubHelperAtom : public StubHelperAtom<A>
1633 {
1634 public:
1635 FastStubHelperAtom(Writer<A>& writer, ObjectFile::Atom& target,
1636 class LazyPointerAtom<A>& lazyPointer, bool forLazyDylib);
1637 virtual uint64_t getSize() const;
1638 virtual void copyRawContent(uint8_t buffer[]) const;
1639 static FastStubHelperHelperAtom<A>* fgHelperHelperAtom;
1640 };
1641 template <typename A> FastStubHelperHelperAtom<A>* FastStubHelperAtom<A>::fgHelperHelperAtom = NULL;
1642
1643
1644
1645 template <typename A>
1646 class LazyPointerAtom : public WriterAtom<A>
1647 {
1648 public:
1649 LazyPointerAtom(Writer<A>& writer, ObjectFile::Atom& target,
1650 StubAtom<A>& stub, bool forLazyDylib);
1651 virtual const char* getName() const { return fName; }
1652 virtual ObjectFile::Atom::Scope getScope() const { return ObjectFile::Atom::scopeTranslationUnit; }
1653 virtual ObjectFile::Atom::ContentType getContentType() const { return fForLazyDylib ? ObjectFile::Atom::kLazyDylibPointer : ObjectFile::Atom::kLazyPointer; }
1654 virtual uint64_t getSize() const { return sizeof(typename A::P::uint_t); }
1655 virtual const char* getSectionName() const;
1656 virtual std::vector<ObjectFile::Reference*>& getReferences() const { return (std::vector<ObjectFile::Reference*>&)(fReferences); }
1657 virtual void copyRawContent(uint8_t buffer[]) const;
1658 ObjectFile::Atom* getTarget() { return &fExternalTarget; }
1659 void setLazyBindingInfoOffset(uint32_t off) { fLazyBindingOffset = off; }
1660 uint32_t getLazyBindingInfoOffset() { return fLazyBindingOffset; }
1661 virtual uint32_t getOrdinal() const { return fSortingOrdinal; }
1662 void setSortingOrdinal(uint32_t o) { fSortingOrdinal = o; }
1663 private:
1664 using WriterAtom<A>::fWriter;
1665 static const char* lazyPointerName(const char* importName);
1666 const char* fName;
1667 ObjectFile::Atom& fTarget;
1668 ObjectFile::Atom& fExternalTarget;
1669 std::vector<ObjectFile::Reference*> fReferences;
1670 bool fForLazyDylib;
1671 bool fCloseStub;
1672 uint32_t fLazyBindingOffset;
1673 uint32_t fSortingOrdinal;
1674 };
1675
1676
1677 template <typename A>
1678 class NonLazyPointerAtom : public WriterAtom<A>
1679 {
1680 public:
1681 NonLazyPointerAtom(Writer<A>& writer, ObjectFile::Atom& target);
1682 NonLazyPointerAtom(Writer<A>& writer, const char* targetName);
1683 NonLazyPointerAtom(Writer<A>& writer);
1684 virtual const char* getName() const { return fName; }
1685 virtual ObjectFile::Atom::Scope getScope() const { return ObjectFile::Atom::scopeLinkageUnit; }
1686 virtual ObjectFile::Atom::ContentType getContentType() const { return ObjectFile::Atom::kNonLazyPointer; }
1687 virtual uint64_t getSize() const { return sizeof(typename A::P::uint_t); }
1688 virtual const char* getSectionName() const { return (fWriter.fOptions.outputKind() == Options::kKextBundle) ? "__got" : "__nl_symbol_ptr"; }
1689 virtual std::vector<ObjectFile::Reference*>& getReferences() const { return (std::vector<ObjectFile::Reference*>&)(fReferences); }
1690 virtual void copyRawContent(uint8_t buffer[]) const;
1691 ObjectFile::Atom* getTarget() { return fTarget; }
1692 virtual uint32_t getOrdinal() const { return fSortingOrdinal; }
1693 void setSortingOrdinal(uint32_t o) { fSortingOrdinal = o; }
1694 private:
1695 using WriterAtom<A>::fWriter;
1696 static const char* nonlazyPointerName(const char* importName);
1697 const char* fName;
1698 ObjectFile::Atom* fTarget;
1699 std::vector<ObjectFile::Reference*> fReferences;
1700 uint32_t fSortingOrdinal;
1701 };
1702
1703
1704 template <typename A>
1705 class ObjCInfoAtom : public WriterAtom<A>
1706 {
1707 public:
1708 ObjCInfoAtom(Writer<A>& writer, ObjectFile::Reader::ObjcConstraint objcContraint,
1709 bool objcReplacementClasses);
1710 virtual const char* getName() const { return "objc$info"; }
1711 virtual ObjectFile::Atom::Scope getScope() const { return ObjectFile::Atom::scopeLinkageUnit; }
1712 virtual uint64_t getSize() const { return 8; }
1713 virtual const char* getSectionName() const;
1714 virtual void copyRawContent(uint8_t buffer[]) const;
1715 private:
1716 Segment& getInfoSegment() const;
1717 uint32_t fContent[2];
1718 };
1719
1720
1721 template <typename A>
1722 class WriterReference : public ObjectFile::Reference
1723 {
1724 public:
1725 typedef typename A::ReferenceKinds Kinds;
1726
1727 WriterReference(uint32_t offset, Kinds kind, ObjectFile::Atom* target,
1728 uint32_t toOffset=0, ObjectFile::Atom* fromTarget=NULL, uint32_t fromOffset=0)
1729 : fKind(kind), fFixUpOffsetInSrc(offset), fTarget(target), fTargetName(target->getName()),
1730 fTargetOffset(toOffset), fFromTarget(fromTarget), fFromTargetOffset(fromOffset) {}
1731 WriterReference(uint32_t offset, Kinds kind, const char* targetName)
1732 : fKind(kind), fFixUpOffsetInSrc(offset), fTarget(NULL), fTargetName(targetName),
1733 fTargetOffset(0), fFromTarget(NULL), fFromTargetOffset(0) {}
1734
1735 virtual ~WriterReference() {}
1736
1737 virtual ObjectFile::Reference::TargetBinding getTargetBinding() const { return (fTarget != NULL) ? ObjectFile::Reference::kBoundDirectly : ObjectFile::Reference::kUnboundByName; }
1738 virtual ObjectFile::Reference::TargetBinding getFromTargetBinding() const { return (fFromTarget != NULL) ? ObjectFile::Reference::kBoundDirectly : ObjectFile::Reference::kDontBind; }
1739 virtual uint8_t getKind() const { return (uint8_t)fKind; }
1740 virtual uint64_t getFixUpOffset() const { return fFixUpOffsetInSrc; }
1741 virtual const char* getTargetName() const { return fTargetName; }
1742 virtual ObjectFile::Atom& getTarget() const { return *fTarget; }
1743 virtual uint64_t getTargetOffset() const { return fTargetOffset; }
1744 virtual ObjectFile::Atom& getFromTarget() const { return *fFromTarget; }
1745 virtual const char* getFromTargetName() const { return fFromTarget->getName(); }
1746 virtual void setTarget(ObjectFile::Atom& target, uint64_t offset) { fTarget = &target; fTargetOffset = offset; }
1747 virtual void setFromTarget(ObjectFile::Atom& target) { fFromTarget = &target; }
1748 virtual void setFromTargetName(const char* name) { }
1749 virtual void setFromTargetOffset(uint64_t offset) { fFromTargetOffset = offset; }
1750 virtual const char* getDescription() const { return "writer reference"; }
1751 virtual uint64_t getFromTargetOffset() const { return fFromTargetOffset; }
1752
1753 private:
1754 Kinds fKind;
1755 uint32_t fFixUpOffsetInSrc;
1756 ObjectFile::Atom* fTarget;
1757 const char* fTargetName;
1758 uint32_t fTargetOffset;
1759 ObjectFile::Atom* fFromTarget;
1760 uint32_t fFromTargetOffset;
1761 };
1762
1763
1764 template <typename A>
1765 const char* StubHelperAtom<A>::stubName(const char* name)
1766 {
1767 char* buf;
1768 asprintf(&buf, "%s$stubHelper", name);
1769 return buf;
1770 }
1771
1772 template <>
1773 ClassicStubHelperAtom<x86_64>::ClassicStubHelperAtom(Writer<x86_64>& writer, ObjectFile::Atom& target,
1774 class LazyPointerAtom<x86_64>& lazyPointer, bool forLazyDylib)
1775 : StubHelperAtom<x86_64>(writer, target, lazyPointer, forLazyDylib)
1776 {
1777 fReferences.push_back(new WriterReference<x86_64>(3, x86_64::kPCRel32, &fLazyPointerAtom));
1778 if ( forLazyDylib ) {
1779 if ( fWriter.fDyldLazyDylibHelper == NULL )
1780 throw "symbol dyld_lazy_dylib_stub_binding_helper not defined (usually in lazydylib1.o)";
1781 fReferences.push_back(new WriterReference<x86_64>(8, x86_64::kPCRel32, fWriter.fDyldLazyDylibHelper));
1782 }
1783 else {
1784 if ( fWriter.fDyldClassicHelperAtom == NULL )
1785 throw "symbol dyld_stub_binding_helper not defined (usually in crt1.o/dylib1.o/bundle1.o)";
1786 fReferences.push_back(new WriterReference<x86_64>(8, x86_64::kPCRel32, fWriter.fDyldClassicHelperAtom));
1787 }
1788 }
1789
1790
1791 template <>
1792 uint64_t ClassicStubHelperAtom<x86_64>::getSize() const
1793 {
1794 return 12;
1795 }
1796
1797 template <>
1798 void ClassicStubHelperAtom<x86_64>::copyRawContent(uint8_t buffer[]) const
1799 {
1800 buffer[0] = 0x4C; // lea foo$lazy_ptr(%rip),%r11
1801 buffer[1] = 0x8D;
1802 buffer[2] = 0x1D;
1803 buffer[3] = 0x00;
1804 buffer[4] = 0x00;
1805 buffer[5] = 0x00;
1806 buffer[6] = 0x00;
1807 buffer[7] = 0xE9; // jmp dyld_stub_binding_helper
1808 buffer[8] = 0x00;
1809 buffer[9] = 0x00;
1810 buffer[10] = 0x00;
1811 buffer[11] = 0x00;
1812 }
1813
1814
1815 template <>
1816 FastStubHelperHelperAtom<x86_64>::FastStubHelperHelperAtom(Writer<x86_64>& writer)
1817 : WriterAtom<x86_64>(writer, Segment::fgTextSegment)
1818 {
1819 fReferences.push_back(new WriterReference<x86_64>(3, x86_64::kPCRel32, new NonLazyPointerAtom<x86_64>(writer)));
1820 fReferences.push_back(new WriterReference<x86_64>(11, x86_64::kPCRel32, writer.fFastStubGOTAtom));
1821 }
1822
1823 template <>
1824 uint64_t FastStubHelperHelperAtom<x86_64>::getSize() const
1825 {
1826 return 16;
1827 }
1828
1829 template <>
1830 void FastStubHelperHelperAtom<x86_64>::copyRawContent(uint8_t buffer[]) const
1831 {
1832 buffer[0] = 0x4C; // leaq dyld_mageLoaderCache(%rip),%r11
1833 buffer[1] = 0x8D;
1834 buffer[2] = 0x1D;
1835 buffer[3] = 0x00;
1836 buffer[4] = 0x00;
1837 buffer[5] = 0x00;
1838 buffer[6] = 0x00;
1839 buffer[7] = 0x41; // pushq %r11
1840 buffer[8] = 0x53;
1841 buffer[9] = 0xFF; // jmp *_fast_lazy_bind(%rip)
1842 buffer[10] = 0x25;
1843 buffer[11] = 0x00;
1844 buffer[12] = 0x00;
1845 buffer[13] = 0x00;
1846 buffer[14] = 0x00;
1847 buffer[15] = 0x90; // nop
1848 }
1849
1850
1851 template <>
1852 HybridStubHelperHelperAtom<x86_64>::HybridStubHelperHelperAtom(Writer<x86_64>& writer)
1853 : WriterAtom<x86_64>(writer, Segment::fgTextSegment)
1854 {
1855 if ( writer.fDyldClassicHelperAtom == NULL )
1856 throw "symbol dyld_stub_binding_helper not defined (usually in crt1.o/dylib1.o/bundle1.o)";
1857 fReferences.push_back(new WriterReference<x86_64>(3, x86_64::kPCRel32_1, writer.fFastStubGOTAtom));
1858 fReferences.push_back(new WriterReference<x86_64>(13, x86_64::kPCRel32, new NonLazyPointerAtom<x86_64>(writer)));
1859 fReferences.push_back(new WriterReference<x86_64>(21, x86_64::kPCRel32, writer.fFastStubGOTAtom));
1860 fReferences.push_back(new WriterReference<x86_64>(30, x86_64::kPCRel32, writer.fDyldClassicHelperAtom));
1861 }
1862
1863 template <>
1864 uint64_t HybridStubHelperHelperAtom<x86_64>::getSize() const
1865 {
1866 return 34;
1867 }
1868
1869 template <>
1870 void HybridStubHelperHelperAtom<x86_64>::copyRawContent(uint8_t buffer[]) const
1871 {
1872 buffer[0] = 0x48; // cmpl $0x00,_fast_lazy_bind
1873 buffer[1] = 0x83;
1874 buffer[2] = 0x3D;
1875 buffer[3] = 0x00;
1876 buffer[4] = 0x00;
1877 buffer[5] = 0x00;
1878 buffer[6] = 0x00;
1879 buffer[7] = 0x00;
1880 buffer[8] = 0x74; // je 16
1881 buffer[9] = 0x0F;
1882 buffer[10] = 0x4C; // leaq imageCache(%rip),%r11
1883 buffer[11] = 0x8D;
1884 buffer[12] = 0x1D;
1885 buffer[13] = 0x00;
1886 buffer[14] = 0x00;
1887 buffer[15] = 0x00;
1888 buffer[16] = 0x00;
1889 buffer[17] = 0x41; // pushq %r11
1890 buffer[18] = 0x53;
1891 buffer[19] = 0xFF; // jmp *_fast_lazy_bind(%rip)
1892 buffer[20] = 0x25;
1893 buffer[21] = 0x00;
1894 buffer[22] = 0x00;
1895 buffer[23] = 0x00;
1896 buffer[24] = 0x00;
1897 buffer[25] = 0x48; // addq $8,%rsp
1898 buffer[26] = 0x83;
1899 buffer[27] = 0xC4;
1900 buffer[28] = 0x08;
1901 buffer[29] = 0xE9; // jmp dyld_stub_binding_helper
1902 buffer[30] = 0x00;
1903 buffer[31] = 0x00;
1904 buffer[32] = 0x00;
1905 buffer[33] = 0x00;
1906 }
1907
1908
1909 template <>
1910 HybridStubHelperAtom<x86_64>::HybridStubHelperAtom(Writer<x86_64>& writer, ObjectFile::Atom& target,
1911 class LazyPointerAtom<x86_64>& lazyPointer, bool forLazyDylib)
1912 : StubHelperAtom<x86_64>(writer, target, lazyPointer, forLazyDylib)
1913 {
1914 if ( fgHelperHelperAtom == NULL ) {
1915 fgHelperHelperAtom = new HybridStubHelperHelperAtom<x86_64>::HybridStubHelperHelperAtom(fWriter);
1916 fWriter.fAllSynthesizedStubHelpers.push_back(fgHelperHelperAtom);
1917 }
1918 fReferences.push_back(new WriterReference<x86_64>(8, x86_64::kPCRel32, &fLazyPointerAtom));
1919 fReferences.push_back(new WriterReference<x86_64>(13, x86_64::kPCRel32, fgHelperHelperAtom));
1920 }
1921
1922 template <>
1923 uint64_t HybridStubHelperAtom<x86_64>::getSize() const
1924 {
1925 return 18;
1926 }
1927
1928 template <>
1929 void HybridStubHelperAtom<x86_64>::copyRawContent(uint8_t buffer[]) const
1930 {
1931 buffer[0] = 0x68; // pushq $lazy-info-offset
1932 buffer[1] = 0x00;
1933 buffer[2] = 0x00;
1934 buffer[3] = 0x00;
1935 buffer[4] = 0x00;
1936 buffer[5] = 0x4C; // lea foo$lazy_ptr(%rip),%r11
1937 buffer[6] = 0x8D;
1938 buffer[7] = 0x1D;
1939 buffer[8] = 0x00;
1940 buffer[9] = 0x00;
1941 buffer[10] = 0x00;
1942 buffer[11] = 0x00;
1943 buffer[12] = 0xE9; // jmp helper-helper
1944 buffer[13] = 0x00;
1945 buffer[14] = 0x00;
1946 buffer[15] = 0x00;
1947 buffer[16] = 0x00;
1948 buffer[17] = 0x90; // nop
1949
1950 // the lazy binding info is created later than this helper atom, so there
1951 // is no Reference to update. Instead we blast the offset here.
1952 uint32_t offset;
1953 LittleEndian::set32(offset, fLazyPointerAtom.getLazyBindingInfoOffset());
1954 memcpy(&buffer[1], &offset, 4);
1955 }
1956
1957 template <>
1958 FastStubHelperAtom<x86_64>::FastStubHelperAtom(Writer<x86_64>& writer, ObjectFile::Atom& target,
1959 class LazyPointerAtom<x86_64>& lazyPointer, bool forLazyDylib)
1960 : StubHelperAtom<x86_64>(writer, target, lazyPointer, forLazyDylib)
1961 {
1962 if ( fgHelperHelperAtom == NULL ) {
1963 fgHelperHelperAtom = new FastStubHelperHelperAtom<x86_64>::FastStubHelperHelperAtom(fWriter);
1964 fWriter.fAllSynthesizedStubHelpers.push_back(fgHelperHelperAtom);
1965 }
1966 fReferences.push_back(new WriterReference<x86_64>(6, x86_64::kPCRel32, fgHelperHelperAtom));
1967 }
1968
1969 template <>
1970 uint64_t FastStubHelperAtom<x86_64>::getSize() const
1971 {
1972 return 10;
1973 }
1974
1975 template <>
1976 void FastStubHelperAtom<x86_64>::copyRawContent(uint8_t buffer[]) const
1977 {
1978 buffer[0] = 0x68; // pushq $lazy-info-offset
1979 buffer[1] = 0x00;
1980 buffer[2] = 0x00;
1981 buffer[3] = 0x00;
1982 buffer[4] = 0x00;
1983 buffer[5] = 0xE9; // jmp helperhelper
1984 buffer[6] = 0x00;
1985 buffer[7] = 0x00;
1986 buffer[8] = 0x00;
1987 buffer[9] = 0x00;
1988
1989 // the lazy binding info is created later than this helper atom, so there
1990 // is no Reference to update. Instead we blast the offset here.
1991 uint32_t offset;
1992 LittleEndian::set32(offset, fLazyPointerAtom.getLazyBindingInfoOffset());
1993 memcpy(&buffer[1], &offset, 4);
1994 }
1995
1996 template <>
1997 FastStubHelperHelperAtom<x86>::FastStubHelperHelperAtom(Writer<x86>& writer)
1998 : WriterAtom<x86>(writer, Segment::fgTextSegment)
1999 {
2000 fReferences.push_back(new WriterReference<x86>(1, x86::kAbsolute32, new NonLazyPointerAtom<x86>(writer)));
2001 fReferences.push_back(new WriterReference<x86>(7, x86::kAbsolute32, writer.fFastStubGOTAtom));
2002 }
2003
2004 template <>
2005 uint64_t FastStubHelperHelperAtom<x86>::getSize() const
2006 {
2007 return 12;
2008 }
2009
2010 template <>
2011 void FastStubHelperHelperAtom<x86>::copyRawContent(uint8_t buffer[]) const
2012 {
2013 buffer[0] = 0x68; // pushl $dyld_ImageLoaderCache
2014 buffer[1] = 0x00;
2015 buffer[2] = 0x00;
2016 buffer[3] = 0x00;
2017 buffer[4] = 0x00;
2018 buffer[5] = 0xFF; // jmp *_fast_lazy_bind
2019 buffer[6] = 0x25;
2020 buffer[7] = 0x00;
2021 buffer[8] = 0x00;
2022 buffer[9] = 0x00;
2023 buffer[10] = 0x00;
2024 buffer[11] = 0x90; // nop
2025 }
2026
2027
2028 template <>
2029 FastStubHelperHelperAtom<arm>::FastStubHelperHelperAtom(Writer<arm>& writer)
2030 : WriterAtom<arm>(writer, Segment::fgTextSegment)
2031 {
2032 fReferences.push_back(new WriterReference<arm>(28, arm::kPointerDiff, new NonLazyPointerAtom<arm>(writer), 0, this, 16));
2033 fReferences.push_back(new WriterReference<arm>(32, arm::kPointerDiff, writer.fFastStubGOTAtom, 0, this, 28));
2034 }
2035
2036 template <>
2037 uint64_t FastStubHelperHelperAtom<arm>::getSize() const
2038 {
2039 return 36;
2040 }
2041
2042 template <>
2043 void FastStubHelperHelperAtom<arm>::copyRawContent(uint8_t buffer[]) const
2044 {
2045 // push lazy-info-offset
2046 OSWriteLittleInt32(&buffer[ 0], 0, 0xe52dc004); // str ip, [sp, #-4]!
2047 // push address of dyld_mageLoaderCache
2048 OSWriteLittleInt32(&buffer[ 4], 0, 0xe59fc010); // ldr ip, L1
2049 OSWriteLittleInt32(&buffer[ 8], 0, 0xe08fc00c); // add ip, pc, ip
2050 OSWriteLittleInt32(&buffer[12], 0, 0xe52dc004); // str ip, [sp, #-4]!
2051 // jump through _fast_lazy_bind
2052 OSWriteLittleInt32(&buffer[16], 0, 0xe59fc008); // ldr ip, L2
2053 OSWriteLittleInt32(&buffer[20], 0, 0xe08fc00c); // add ip, pc, ip
2054 OSWriteLittleInt32(&buffer[24], 0, 0xe59cf000); // ldr pc, [ip]
2055 OSWriteLittleInt32(&buffer[28], 0, 0x00000000); // L1: .long fFastStubGOTAtom - (helperhelper+16)
2056 OSWriteLittleInt32(&buffer[32], 0, 0x00000000); // L2: .long _fast_lazy_bind - (helperhelper+28)
2057 }
2058
2059 template <>
2060 ObjectFile::Alignment StubHelperAtom<arm>::getAlignment() const { return ObjectFile::Alignment(2); }
2061
2062 template <>
2063 FastStubHelperAtom<arm>::FastStubHelperAtom(Writer<arm>& writer, ObjectFile::Atom& target,
2064 class LazyPointerAtom<arm>& lazyPointer, bool forLazyDylib)
2065 : StubHelperAtom<arm>(writer, target, lazyPointer, forLazyDylib)
2066 {
2067 if ( fgHelperHelperAtom == NULL ) {
2068 fgHelperHelperAtom = new FastStubHelperHelperAtom<arm>::FastStubHelperHelperAtom(fWriter);
2069 fWriter.fAllSynthesizedStubHelpers.push_back(fgHelperHelperAtom);
2070 }
2071 fReferences.push_back(new WriterReference<arm>(4, arm::kBranch24, fgHelperHelperAtom));
2072 }
2073
2074 template <>
2075 uint64_t FastStubHelperAtom<arm>::getSize() const
2076 {
2077 return 12;
2078 }
2079
2080 template <>
2081 void FastStubHelperAtom<arm>::copyRawContent(uint8_t buffer[]) const
2082 {
2083 OSWriteLittleInt32(&buffer[0], 0, 0xe59fc000); // ldr ip, [pc, #0]
2084 OSWriteLittleInt32(&buffer[4], 0, 0xea000000); // b _helperhelper
2085 // the lazy binding info is created later than this helper atom, so there
2086 // is no Reference to update. Instead we blast the offset here.
2087 OSWriteLittleInt32(&buffer[8], 0, fLazyPointerAtom.getLazyBindingInfoOffset());
2088 }
2089
2090
2091 template <>
2092 HybridStubHelperHelperAtom<x86>::HybridStubHelperHelperAtom(Writer<x86>& writer)
2093 : WriterAtom<x86>(writer, Segment::fgTextSegment)
2094 {
2095 if ( writer.fDyldClassicHelperAtom == NULL )
2096 throw "symbol dyld_stub_binding_helper not defined (usually in crt1.o/dylib1.o/bundle1.o)";
2097 fReferences.push_back(new WriterReference<x86>(2, x86::kAbsolute32, writer.fFastStubGOTAtom));
2098 fReferences.push_back(new WriterReference<x86>(18, x86::kPCRel32, writer.fDyldClassicHelperAtom));
2099 fReferences.push_back(new WriterReference<x86>(26, x86::kAbsolute32, new NonLazyPointerAtom<x86>(writer)));
2100 fReferences.push_back(new WriterReference<x86>(32, x86::kAbsolute32, writer.fFastStubGOTAtom));
2101 }
2102
2103 template <>
2104 uint64_t HybridStubHelperHelperAtom<x86>::getSize() const
2105 {
2106 return 36;
2107 }
2108
2109
2110 template <>
2111 void HybridStubHelperHelperAtom<x86>::copyRawContent(uint8_t buffer[]) const
2112 {
2113 buffer[0] = 0x83; // cmpl $0x00,_fast_lazy_bind
2114 buffer[1] = 0x3D;
2115 buffer[2] = 0x00;
2116 buffer[3] = 0x00;
2117 buffer[4] = 0x00;
2118 buffer[5] = 0x00;
2119 buffer[6] = 0x00;
2120 buffer[7] = 0x75; // jne 22
2121 buffer[8] = 0x0D;
2122 buffer[9] = 0x89; // %eax,4(%esp)
2123 buffer[10] = 0x44;
2124 buffer[11] = 0x24;
2125 buffer[12] = 0x04;
2126 buffer[13] = 0x58; // popl %eax
2127 buffer[14] = 0x87; // xchgl (%esp),%eax
2128 buffer[15] = 0x04;
2129 buffer[16] = 0x24;
2130 buffer[17] = 0xE9; // jmpl dyld_stub_binding_helper
2131 buffer[18] = 0x00;
2132 buffer[19] = 0x00;
2133 buffer[20] = 0x00;
2134 buffer[21] = 0x00;
2135 buffer[22] = 0x83; // addl $0x04,%esp
2136 buffer[23] = 0xC4;
2137 buffer[24] = 0x04;
2138 buffer[25] = 0x68; // pushl imageloadercahce
2139 buffer[26] = 0x00;
2140 buffer[27] = 0x00;
2141 buffer[28] = 0x00;
2142 buffer[29] = 0x00;
2143 buffer[30] = 0xFF; // jmp *_fast_lazy_bind(%rip)
2144 buffer[31] = 0x25;
2145 buffer[32] = 0x00;
2146 buffer[33] = 0x00;
2147 buffer[34] = 0x00;
2148 buffer[35] = 0x00;
2149 }
2150
2151
2152 template <>
2153 ClassicStubHelperAtom<x86>::ClassicStubHelperAtom(Writer<x86>& writer, ObjectFile::Atom& target,
2154 class LazyPointerAtom<x86>& lazyPointer, bool forLazyDylib)
2155 : StubHelperAtom<x86>(writer, target, lazyPointer, forLazyDylib)
2156 {
2157 fReferences.push_back(new WriterReference<x86>(1, x86::kAbsolute32, &fLazyPointerAtom));
2158 if ( forLazyDylib ) {
2159 if ( fWriter.fDyldLazyDylibHelper == NULL )
2160 throw "symbol dyld_lazy_dylib_stub_binding_helper not defined (usually in lazydylib1.o)";
2161 fReferences.push_back(new WriterReference<x86>(6, x86::kPCRel32, fWriter.fDyldLazyDylibHelper));
2162 }
2163 else {
2164 if ( fWriter.fDyldClassicHelperAtom == NULL )
2165 throw "symbol dyld_stub_binding_helper not defined (usually in crt1.o/dylib1.o/bundle1.o)";
2166 fReferences.push_back(new WriterReference<x86>(6, x86::kPCRel32, fWriter.fDyldClassicHelperAtom));
2167 }
2168 }
2169
2170 template <>
2171 uint64_t ClassicStubHelperAtom<x86>::getSize() const
2172 {
2173 return 10;
2174 }
2175
2176 template <>
2177 void ClassicStubHelperAtom<x86>::copyRawContent(uint8_t buffer[]) const
2178 {
2179 buffer[0] = 0x68; // pushl $foo$lazy_ptr
2180 buffer[1] = 0x00;
2181 buffer[2] = 0x00;
2182 buffer[3] = 0x00;
2183 buffer[4] = 0x00;
2184 buffer[5] = 0xE9; // jmp helperhelper
2185 buffer[6] = 0x00;
2186 buffer[7] = 0x00;
2187 buffer[8] = 0x00;
2188 buffer[9] = 0x00;
2189 }
2190
2191 template <>
2192 HybridStubHelperAtom<x86>::HybridStubHelperAtom(Writer<x86>& writer, ObjectFile::Atom& target,
2193 class LazyPointerAtom<x86>& lazyPointer, bool forLazyDylib)
2194 : StubHelperAtom<x86>(writer, target, lazyPointer, forLazyDylib)
2195 {
2196 if ( fgHelperHelperAtom == NULL ) {
2197 fgHelperHelperAtom = new HybridStubHelperHelperAtom<x86>::HybridStubHelperHelperAtom(fWriter);
2198 fWriter.fAllSynthesizedStubHelpers.push_back(fgHelperHelperAtom);
2199 }
2200 fReferences.push_back(new WriterReference<x86>(6, x86::kAbsolute32, &fLazyPointerAtom));
2201 fReferences.push_back(new WriterReference<x86>(11, x86::kPCRel32, fgHelperHelperAtom));
2202 }
2203
2204
2205 template <>
2206 uint64_t HybridStubHelperAtom<x86>::getSize() const
2207 {
2208 return 16;
2209 }
2210
2211 template <>
2212 void HybridStubHelperAtom<x86>::copyRawContent(uint8_t buffer[]) const
2213 {
2214 buffer[0] = 0x68; // pushl $lazy-info-offset
2215 buffer[1] = 0x00;
2216 buffer[2] = 0x00;
2217 buffer[3] = 0x00;
2218 buffer[4] = 0x00;
2219 buffer[5] = 0x68; // pushl $foo$lazy_ptr
2220 buffer[6] = 0x00;
2221 buffer[7] = 0x00;
2222 buffer[8] = 0x00;
2223 buffer[9] = 0x00;
2224 buffer[10] = 0xE9; // jmp dyld_hybrid_stub_binding_helper
2225 buffer[11] = 0x00;
2226 buffer[12] = 0x00;
2227 buffer[13] = 0x00;
2228 buffer[14] = 0x00;
2229 buffer[15] = 0x90; // nop
2230
2231 // the lazy binding info is created later than this helper atom, so there
2232 // is no Reference to update. Instead we blast the offset here.
2233 uint32_t offset;
2234 LittleEndian::set32(offset, fLazyPointerAtom.getLazyBindingInfoOffset());
2235 memcpy(&buffer[1], &offset, 4);
2236 }
2237
2238
2239 template <>
2240 FastStubHelperAtom<x86>::FastStubHelperAtom(Writer<x86>& writer, ObjectFile::Atom& target,
2241 class LazyPointerAtom<x86>& lazyPointer, bool forLazyDylib)
2242 : StubHelperAtom<x86>(writer, target, lazyPointer, forLazyDylib)
2243 {
2244 if ( fgHelperHelperAtom == NULL ) {
2245 fgHelperHelperAtom = new FastStubHelperHelperAtom<x86>::FastStubHelperHelperAtom(fWriter);
2246 fWriter.fAllSynthesizedStubHelpers.push_back(fgHelperHelperAtom);
2247 }
2248 fReferences.push_back(new WriterReference<x86>(6, x86::kPCRel32, fgHelperHelperAtom));
2249 }
2250
2251
2252 template <>
2253 uint64_t FastStubHelperAtom<x86>::getSize() const
2254 {
2255 return 10;
2256 }
2257
2258 template <>
2259 void FastStubHelperAtom<x86>::copyRawContent(uint8_t buffer[]) const
2260 {
2261 buffer[0] = 0x68; // pushl $lazy-info-offset
2262 buffer[1] = 0x00;
2263 buffer[2] = 0x00;
2264 buffer[3] = 0x00;
2265 buffer[4] = 0x00;
2266 buffer[5] = 0xE9; // jmp helperhelper
2267 buffer[6] = 0x00;
2268 buffer[7] = 0x00;
2269 buffer[8] = 0x00;
2270 buffer[9] = 0x00;
2271
2272 // the lazy binding info is created later than this helper atom, so there
2273 // is no Reference to update. Instead we blast the offset here.
2274 uint32_t offset;
2275 LittleEndian::set32(offset, fLazyPointerAtom.getLazyBindingInfoOffset());
2276 memcpy(&buffer[1], &offset, 4);
2277 }
2278
2279 template <typename A>
2280 const char* LazyPointerAtom<A>::getSectionName() const
2281 {
2282 if ( fCloseStub )
2283 return "__lazy_symbol";
2284 else if ( fForLazyDylib )
2285 return "__ld_symbol_ptr";
2286 else
2287 return "__la_symbol_ptr";
2288 }
2289
2290 // specialize lazy pointer for x86_64 to initially pointer to stub helper
2291 template <>
2292 LazyPointerAtom<x86_64>::LazyPointerAtom(Writer<x86_64>& writer, ObjectFile::Atom& target, StubAtom<x86_64>& stub, bool forLazyDylib)
2293 : WriterAtom<x86_64>(writer, Segment::fgDataSegment), fName(lazyPointerName(target.getName())), fTarget(target),
2294 fExternalTarget(*stub.getTarget()), fForLazyDylib(forLazyDylib), fCloseStub(false), fLazyBindingOffset(0)
2295 {
2296 if ( forLazyDylib )
2297 writer.fAllSynthesizedLazyDylibPointers.push_back(this);
2298 else
2299 writer.fAllSynthesizedLazyPointers.push_back(this);
2300
2301 ObjectFile::Atom* helper;
2302 if ( writer.fOptions.makeCompressedDyldInfo() && !forLazyDylib ) {
2303 if ( writer.fOptions.makeClassicDyldInfo() )
2304 // hybrid LINKEDIT, no fast bind info for weak symbols so use traditional helper
2305 if ( writer.targetRequiresWeakBinding(target) )
2306 helper = new ClassicStubHelperAtom<x86_64>(writer, target, *this, forLazyDylib);
2307 else
2308 helper = new HybridStubHelperAtom<x86_64>(writer, target, *this, forLazyDylib);
2309 else {
2310 if ( target.getDefinitionKind() == ObjectFile::Atom::kWeakDefinition )
2311 helper = &target;
2312 else
2313 helper = new FastStubHelperAtom<x86_64>(writer, target, *this, forLazyDylib);
2314 }
2315 }
2316 else {
2317 helper = new ClassicStubHelperAtom<x86_64>(writer, target, *this, forLazyDylib);
2318 }
2319 fReferences.push_back(new WriterReference<x86_64>(0, x86_64::kPointer, helper));
2320 }
2321
2322
2323 // specialize lazy pointer for x86 to initially pointer to stub helper
2324 template <>
2325 LazyPointerAtom<x86>::LazyPointerAtom(Writer<x86>& writer, ObjectFile::Atom& target, StubAtom<x86>& stub, bool forLazyDylib)
2326 : WriterAtom<x86>(writer, Segment::fgDataSegment), fName(lazyPointerName(target.getName())), fTarget(target),
2327 fExternalTarget(*stub.getTarget()), fForLazyDylib(forLazyDylib), fCloseStub(false), fLazyBindingOffset(0)
2328 {
2329 if ( forLazyDylib )
2330 writer.fAllSynthesizedLazyDylibPointers.push_back(this);
2331 else
2332 writer.fAllSynthesizedLazyPointers.push_back(this);
2333
2334 ObjectFile::Atom* helper;
2335 if ( writer.fOptions.makeCompressedDyldInfo() && !forLazyDylib ) {
2336 if ( writer.fOptions.makeClassicDyldInfo() ) {
2337 // hybrid LINKEDIT, no fast bind info for weak symbols so use traditional helper
2338 if ( writer.targetRequiresWeakBinding(target) )
2339 helper = new ClassicStubHelperAtom<x86>(writer, target, *this, forLazyDylib);
2340 else
2341 helper = new HybridStubHelperAtom<x86>(writer, target, *this, forLazyDylib);
2342 }
2343 else {
2344 if ( target.getDefinitionKind() == ObjectFile::Atom::kWeakDefinition )
2345 helper = &target;
2346 else
2347 helper = new FastStubHelperAtom<x86>(writer, target, *this, forLazyDylib);
2348 }
2349 }
2350 else {
2351 helper = new ClassicStubHelperAtom<x86>(writer, target, *this, forLazyDylib);
2352 }
2353 fReferences.push_back(new WriterReference<x86>(0, x86::kPointer, helper));
2354 }
2355
2356 // specialize lazy pointer for arm to initially pointer to stub helper
2357 template <>
2358 LazyPointerAtom<arm>::LazyPointerAtom(Writer<arm>& writer, ObjectFile::Atom& target, StubAtom<arm>& stub, bool forLazyDylib)
2359 : WriterAtom<arm>(writer, Segment::fgDataSegment), fName(lazyPointerName(target.getName())), fTarget(target),
2360 fExternalTarget(*stub.getTarget()), fForLazyDylib(forLazyDylib), fCloseStub(false), fLazyBindingOffset(0)
2361 {
2362 if ( forLazyDylib )
2363 writer.fAllSynthesizedLazyDylibPointers.push_back(this);
2364 else
2365 writer.fAllSynthesizedLazyPointers.push_back(this);
2366
2367 // The one instruction stubs must be close to the lazy pointers
2368 if ( stub.fKind == StubAtom<arm>::kStubShort )
2369 fCloseStub = true;
2370
2371 ObjectFile::Atom* helper;
2372 if ( forLazyDylib ) {
2373 if ( writer.fDyldLazyDylibHelper == NULL )
2374 throw "symbol dyld_lazy_dylib_stub_binding_helper not defined (usually in lazydylib1.o)";
2375 helper = writer.fDyldLazyDylibHelper;
2376 }
2377 else if ( writer.fOptions.makeCompressedDyldInfo() ) {
2378 if ( target.getDefinitionKind() == ObjectFile::Atom::kWeakDefinition )
2379 helper = &target;
2380 else
2381 helper = new FastStubHelperAtom<arm>(writer, target, *this, forLazyDylib);
2382 }
2383 else {
2384 if ( writer.fDyldClassicHelperAtom == NULL )
2385 throw "symbol dyld_stub_binding_helper not defined (usually in crt1.o/dylib1.o/bundle1.o)";
2386 helper = writer.fDyldClassicHelperAtom;
2387 }
2388 fReferences.push_back(new WriterReference<arm>(0, arm::kPointer, helper));
2389 }
2390
2391 template <typename A>
2392 LazyPointerAtom<A>::LazyPointerAtom(Writer<A>& writer, ObjectFile::Atom& target, StubAtom<A>& stub, bool forLazyDylib)
2393 : WriterAtom<A>(writer, Segment::fgDataSegment), fName(lazyPointerName(target.getName())), fTarget(target),
2394 fExternalTarget(*stub.getTarget()), fForLazyDylib(forLazyDylib), fCloseStub(false), fLazyBindingOffset(0)
2395 {
2396 if ( forLazyDylib )
2397 writer.fAllSynthesizedLazyDylibPointers.push_back(this);
2398 else
2399 writer.fAllSynthesizedLazyPointers.push_back(this);
2400
2401 fReferences.push_back(new WriterReference<A>(0, A::kPointer, &target));
2402 }
2403
2404
2405
2406 template <typename A>
2407 const char* LazyPointerAtom<A>::lazyPointerName(const char* name)
2408 {
2409 char* buf;
2410 asprintf(&buf, "%s$lazy_pointer", name);
2411 return buf;
2412 }
2413
2414 template <typename A>
2415 void LazyPointerAtom<A>::copyRawContent(uint8_t buffer[]) const
2416 {
2417 bzero(buffer, getSize());
2418 }
2419
2420
2421 template <typename A>
2422 NonLazyPointerAtom<A>::NonLazyPointerAtom(Writer<A>& writer, ObjectFile::Atom& target)
2423 : WriterAtom<A>(writer, Segment::fgDataSegment), fName(nonlazyPointerName(target.getName())), fTarget(&target)
2424 {
2425 writer.fAllSynthesizedNonLazyPointers.push_back(this);
2426 fReferences.push_back(new WriterReference<A>(0, A::kPointer, &target));
2427 }
2428
2429 template <typename A>
2430 NonLazyPointerAtom<A>::NonLazyPointerAtom(Writer<A>& writer)
2431 : WriterAtom<A>(writer, Segment::fgDataSegment), fName("none"), fTarget(NULL)
2432 {
2433 writer.fAllSynthesizedNonLazyPointers.push_back(this);
2434 }
2435
2436 template <typename A>
2437 NonLazyPointerAtom<A>::NonLazyPointerAtom(Writer<A>& writer, const char* targetName)
2438 : WriterAtom<A>(writer, Segment::fgDataSegment), fName(nonlazyPointerName(targetName)), fTarget(NULL)
2439 {
2440 writer.fAllSynthesizedNonLazyPointers.push_back(this);
2441 fReferences.push_back(new WriterReference<A>(0, A::kPointer, targetName));
2442 }
2443
2444 template <typename A>
2445 const char* NonLazyPointerAtom<A>::nonlazyPointerName(const char* name)
2446 {
2447 char* buf;
2448 asprintf(&buf, "%s$non_lazy_pointer", name);
2449 return buf;
2450 }
2451
2452 template <typename A>
2453 void NonLazyPointerAtom<A>::copyRawContent(uint8_t buffer[]) const
2454 {
2455 bzero(buffer, getSize());
2456 }
2457
2458
2459
2460
2461 template <>
2462 ObjectFile::Alignment StubAtom<ppc>::getAlignment() const
2463 {
2464 return 2;
2465 }
2466
2467 template <>
2468 ObjectFile::Alignment StubAtom<ppc64>::getAlignment() const
2469 {
2470 return 2;
2471 }
2472
2473 template <>
2474 ObjectFile::Alignment StubAtom<arm>::getAlignment() const
2475 {
2476 return 2;
2477 }
2478
2479 template <>
2480 StubAtom<ppc>::StubAtom(Writer<ppc>& writer, ObjectFile::Atom& target, bool forLazyDylib)
2481 : WriterAtom<ppc>(writer, Segment::fgTextSegment), fName(stubName(target.getName())),
2482 fTarget(target), fForLazyDylib(forLazyDylib)
2483 {
2484 writer.fAllSynthesizedStubs.push_back(this);
2485 LazyPointerAtom<ppc>* lp;
2486 if ( fWriter.fOptions.prebind() ) {
2487 // for prebound ppc, lazy pointer starts out pointing to target symbol's address
2488 // if target is a weak definition within this linkage unit or zero if in some dylib
2489 lp = new LazyPointerAtom<ppc>(writer, target, *this, forLazyDylib);
2490 }
2491 else {
2492 // for non-prebound ppc, lazy pointer starts out pointing to dyld_stub_binding_helper glue code
2493 if ( forLazyDylib ) {
2494 if ( writer.fDyldLazyDylibHelper == NULL )
2495 throw "symbol dyld_lazy_dylib_stub_binding_helper not defined (usually in lazydylib1.o)";
2496 lp = new LazyPointerAtom<ppc>(writer, *writer.fDyldLazyDylibHelper, *this, forLazyDylib);
2497 }
2498 else {
2499 if ( writer.fDyldClassicHelperAtom == NULL )
2500 throw "symbol dyld_stub_binding_helper not defined (usually in crt1.o/dylib1.o/bundle1.o)";
2501 lp = new LazyPointerAtom<ppc>(writer, *writer.fDyldClassicHelperAtom, *this, forLazyDylib);
2502 }
2503 }
2504 fKind = ( fWriter.fSlideable ? kStubPIC : kStubNoPIC );
2505 if ( fKind == kStubPIC ) {
2506 // picbase is 8 bytes into atom
2507 fReferences.push_back(new WriterReference<ppc>(12, ppc::kPICBaseHigh16, lp, 0, this, 8));
2508 fReferences.push_back(new WriterReference<ppc>(20, ppc::kPICBaseLow16, lp, 0, this, 8));
2509 }
2510 else {
2511 fReferences.push_back(new WriterReference<ppc>(0, ppc::kAbsHigh16AddLow, lp));
2512 fReferences.push_back(new WriterReference<ppc>(4, ppc::kAbsLow16, lp));
2513 }
2514 }
2515
2516 template <>
2517 StubAtom<ppc64>::StubAtom(Writer<ppc64>& writer, ObjectFile::Atom& target, bool forLazyDylib)
2518 : WriterAtom<ppc64>(writer, Segment::fgTextSegment), fName(stubName(target.getName())),
2519 fTarget(target), fForLazyDylib(forLazyDylib)
2520 {
2521 writer.fAllSynthesizedStubs.push_back(this);
2522
2523 LazyPointerAtom<ppc64>* lp;
2524 if ( forLazyDylib ) {
2525 if ( writer.fDyldLazyDylibHelper == NULL )
2526 throw "symbol dyld_lazy_dylib_stub_binding_helper not defined (usually in lazydylib1.o)";
2527 lp = new LazyPointerAtom<ppc64>(writer, *writer.fDyldLazyDylibHelper, *this, forLazyDylib);
2528 }
2529 else {
2530 if ( writer.fDyldClassicHelperAtom == NULL )
2531 throw "symbol dyld_stub_binding_helper not defined (usually in crt1.o/dylib1.o/bundle1.o)";
2532 lp = new LazyPointerAtom<ppc64>(writer, *writer.fDyldClassicHelperAtom, *this, forLazyDylib);
2533 }
2534 if ( fWriter.fSlideable || ((fWriter.fPageZeroAtom != NULL) && (fWriter.fPageZeroAtom->getSize() > 4096)) )
2535 fKind = kStubPIC;
2536 else
2537 fKind = kStubNoPIC;
2538 if ( fKind == kStubPIC ) {
2539 // picbase is 8 bytes into atom
2540 fReferences.push_back(new WriterReference<ppc64>(12, ppc64::kPICBaseHigh16, lp, 0, this, 8));
2541 fReferences.push_back(new WriterReference<ppc64>(20, ppc64::kPICBaseLow14, lp, 0, this, 8));
2542 }
2543 else {
2544 fReferences.push_back(new WriterReference<ppc64>(0, ppc64::kAbsHigh16AddLow, lp));
2545 fReferences.push_back(new WriterReference<ppc64>(4, ppc64::kAbsLow14, lp));
2546 }
2547 }
2548
2549 template <>
2550 StubAtom<x86>::StubAtom(Writer<x86>& writer, ObjectFile::Atom& target, bool forLazyDylib)
2551 : WriterAtom<x86>(writer, (writer.fOptions.makeCompressedDyldInfo()|| forLazyDylib) ? Segment::fgTextSegment : Segment::fgImportSegment),
2552 fName(NULL), fTarget(target), fForLazyDylib(forLazyDylib)
2553 {
2554 if ( writer.fOptions.makeCompressedDyldInfo() || forLazyDylib ) {
2555 fKind = kStubNoPIC;
2556 fName = stubName(target.getName());
2557 LazyPointerAtom<x86>* lp = new LazyPointerAtom<x86>(writer, target, *this, forLazyDylib);
2558 fReferences.push_back(new WriterReference<x86>(2, x86::kAbsolute32, lp));
2559 writer.fAllSynthesizedStubs.push_back(this);
2560 }
2561 else {
2562 fKind = kJumpTable;
2563 if ( &target == NULL )
2564 asprintf((char**)&fName, "cache-line-crossing-stub %p", this);
2565 else {
2566 fName = stubName(target.getName());
2567 writer.fAllSynthesizedStubs.push_back(this);
2568 }
2569 }
2570 }
2571
2572
2573 template <>
2574 StubAtom<x86_64>::StubAtom(Writer<x86_64>& writer, ObjectFile::Atom& target, bool forLazyDylib)
2575 : WriterAtom<x86_64>(writer, Segment::fgTextSegment), fName(stubName(target.getName())), fTarget(target)
2576 {
2577 writer.fAllSynthesizedStubs.push_back(this);
2578
2579 LazyPointerAtom<x86_64>* lp = new LazyPointerAtom<x86_64>(writer, target, *this, forLazyDylib);
2580 fReferences.push_back(new WriterReference<x86_64>(2, x86_64::kPCRel32, lp));
2581 }
2582
2583 template <>
2584 StubAtom<arm>::StubAtom(Writer<arm>& writer, ObjectFile::Atom& target, bool forLazyDylib)
2585 : WriterAtom<arm>(writer, Segment::fgTextSegment), fName(stubName(target.getName())), fTarget(target)
2586 {
2587 writer.fAllSynthesizedStubs.push_back(this);
2588 if ( (writer.fDylibSymbolCountUpperBound < 900)
2589 && writer.fOptions.makeCompressedDyldInfo()
2590 && (writer.fOptions.outputKind() != Options::kDynamicLibrary)
2591 && !forLazyDylib ) {
2592 // dylibs might have __TEXT and __DATA pulled apart to live in shared region
2593 // if > 1000 stubs, the displacement to the lazy pointer my be > 12 bits.
2594 fKind = kStubShort;
2595 }
2596 else if ( fWriter.fSlideable ) {
2597 fKind = kStubPIC;
2598 }
2599 else {
2600 fKind = kStubNoPIC;
2601 }
2602 LazyPointerAtom<arm>* lp = new LazyPointerAtom<arm>(writer, target, *this, forLazyDylib);
2603 switch ( fKind ) {
2604 case kStubPIC:
2605 fReferences.push_back(new WriterReference<arm>(12, arm::kPointerDiff, lp, 0, this, 12));
2606 break;
2607 case kStubNoPIC:
2608 fReferences.push_back(new WriterReference<arm>(8, arm::kReadOnlyPointer, lp));
2609 break;
2610 case kStubShort:
2611 fReferences.push_back(new WriterReference<arm>(0, arm::kPointerDiff12, lp, 0, this, 8));
2612 break;
2613 default:
2614 throw "internal error";
2615 }
2616 }
2617
2618
2619
2620 template <typename A>
2621 const char* StubAtom<A>::stubName(const char* name)
2622 {
2623 char* buf;
2624 asprintf(&buf, "%s$stub", name);
2625 return buf;
2626 }
2627
2628 template <>
2629 uint64_t StubAtom<ppc>::getSize() const
2630 {
2631
2632 return ( (fKind == kStubPIC) ? 32 : 16 );
2633 }
2634
2635 template <>
2636 uint64_t StubAtom<ppc64>::getSize() const
2637 {
2638 return ( (fKind == kStubPIC) ? 32 : 16 );
2639 }
2640
2641
2642 template <>
2643 uint64_t StubAtom<arm>::getSize() const
2644 {
2645 switch ( fKind ) {
2646 case kStubPIC:
2647 return 16;
2648 case kStubNoPIC:
2649 return 12;
2650 case kStubShort:
2651 return 4;
2652 default:
2653 throw "internal error";
2654 }
2655 }
2656
2657 template <>
2658 uint64_t StubAtom<x86>::getSize() const
2659 {
2660 switch ( fKind ) {
2661 case kStubNoPIC:
2662 return 6;
2663 case kJumpTable:
2664 return 5;
2665 default:
2666 throw "internal error";
2667 }
2668 }
2669
2670 template <>
2671 uint64_t StubAtom<x86_64>::getSize() const
2672 {
2673 return 6;
2674 }
2675
2676 template <>
2677 ObjectFile::Alignment StubAtom<x86>::getAlignment() const
2678 {
2679 switch ( fKind ) {
2680 case kStubNoPIC:
2681 return 1;
2682 case kJumpTable:
2683 return 0; // special case x86 self-modifying stubs to be byte aligned
2684 default:
2685 throw "internal error";
2686 }
2687 }
2688
2689 template <>
2690 void StubAtom<ppc64>::copyRawContent(uint8_t buffer[]) const
2691 {
2692 if ( fKind == kStubPIC ) {
2693 OSWriteBigInt32(&buffer [0], 0, 0x7c0802a6); // mflr r0
2694 OSWriteBigInt32(&buffer[ 4], 0, 0x429f0005); // bcl 20,31,Lpicbase
2695 OSWriteBigInt32(&buffer[ 8], 0, 0x7d6802a6); // Lpicbase: mflr r11
2696 OSWriteBigInt32(&buffer[12], 0, 0x3d6b0000); // addis r11,r11,ha16(L_fwrite$lazy_ptr-Lpicbase)
2697 OSWriteBigInt32(&buffer[16], 0, 0x7c0803a6); // mtlr r0
2698 OSWriteBigInt32(&buffer[20], 0, 0xe98b0001); // ldu r12,lo16(L_fwrite$lazy_ptr-Lpicbase)(r11)
2699 OSWriteBigInt32(&buffer[24], 0, 0x7d8903a6); // mtctr r12
2700 OSWriteBigInt32(&buffer[28], 0, 0x4e800420); // bctr
2701 }
2702 else {
2703 OSWriteBigInt32(&buffer[ 0], 0, 0x3d600000); // lis r11,ha16(L_fwrite$lazy_ptr)
2704 OSWriteBigInt32(&buffer[ 4], 0, 0xe98b0001); // ldu r12,lo16(L_fwrite$lazy_ptr)(r11)
2705 OSWriteBigInt32(&buffer[ 8], 0, 0x7d8903a6); // mtctr r12
2706 OSWriteBigInt32(&buffer[12], 0, 0x4e800420); // bctr
2707 }
2708 }
2709
2710 template <>
2711 void StubAtom<ppc>::copyRawContent(uint8_t buffer[]) const
2712 {
2713 if ( fKind == kStubPIC ) {
2714 OSWriteBigInt32(&buffer[ 0], 0, 0x7c0802a6); // mflr r0
2715 OSWriteBigInt32(&buffer[ 4], 0, 0x429f0005); // bcl 20,31,Lpicbase
2716 OSWriteBigInt32(&buffer[ 8], 0, 0x7d6802a6); // Lpicbase: mflr r11
2717 OSWriteBigInt32(&buffer[12], 0, 0x3d6b0000); // addis r11,r11,ha16(L_fwrite$lazy_ptr-Lpicbase)
2718 OSWriteBigInt32(&buffer[16], 0, 0x7c0803a6); // mtlr r0
2719 OSWriteBigInt32(&buffer[20], 0, 0x858b0000); // lwzu r12,lo16(L_fwrite$lazy_ptr-Lpicbase)(r11)
2720 OSWriteBigInt32(&buffer[24], 0, 0x7d8903a6); // mtctr r12
2721 OSWriteBigInt32(&buffer[28], 0, 0x4e800420); // bctr
2722 }
2723 else {
2724 OSWriteBigInt32(&buffer[ 0], 0, 0x3d600000); // lis r11,ha16(L_fwrite$lazy_ptr)
2725 OSWriteBigInt32(&buffer[ 4], 0, 0x858b0000); // lwzu r12,lo16(L_fwrite$lazy_ptr)(r11)
2726 OSWriteBigInt32(&buffer[ 8], 0, 0x7d8903a6); // mtctr r12
2727 OSWriteBigInt32(&buffer[12], 0, 0x4e800420); // bctr
2728 }
2729 }
2730
2731 template <>
2732 void StubAtom<x86>::copyRawContent(uint8_t buffer[]) const
2733 {
2734 switch ( fKind ) {
2735 case kStubNoPIC:
2736 buffer[0] = 0xFF; // jmp *foo$lazy_pointer
2737 buffer[1] = 0x25;
2738 buffer[2] = 0x00;
2739 buffer[3] = 0x00;
2740 buffer[4] = 0x00;
2741 buffer[5] = 0x00;
2742 break;
2743 case kJumpTable:
2744 if ( fWriter.fOptions.prebind() ) {
2745 uint32_t address = this->getAddress();
2746 int32_t rel32 = 0 - (address+5);
2747 buffer[0] = 0xE9;
2748 buffer[1] = rel32 & 0xFF;
2749 buffer[2] = (rel32 >> 8) & 0xFF;
2750 buffer[3] = (rel32 >> 16) & 0xFF;
2751 buffer[4] = (rel32 >> 24) & 0xFF;
2752 }
2753 else {
2754 buffer[0] = 0xF4;
2755 buffer[1] = 0xF4;
2756 buffer[2] = 0xF4;
2757 buffer[3] = 0xF4;
2758 buffer[4] = 0xF4;
2759 }
2760 break;
2761 default:
2762 throw "internal error";
2763 }
2764 }
2765
2766 template <>
2767 void StubAtom<x86_64>::copyRawContent(uint8_t buffer[]) const
2768 {
2769 buffer[0] = 0xFF; // jmp *foo$lazy_pointer(%rip)
2770 buffer[1] = 0x25;
2771 buffer[2] = 0x00;
2772 buffer[3] = 0x00;
2773 buffer[4] = 0x00;
2774 buffer[5] = 0x00;
2775 }
2776
2777 template <>
2778 void StubAtom<arm>::copyRawContent(uint8_t buffer[]) const
2779 {
2780 switch ( fKind ) {
2781 case kStubPIC:
2782 OSWriteLittleInt32(&buffer[ 0], 0, 0xe59fc004); // ldr ip, pc + 12
2783 OSWriteLittleInt32(&buffer[ 4], 0, 0xe08fc00c); // add ip, pc, ip
2784 OSWriteLittleInt32(&buffer[ 8], 0, 0xe59cf000); // ldr pc, [ip]
2785 OSWriteLittleInt32(&buffer[12], 0, 0x00000000); // .long L_foo$lazy_ptr - (L1$scv + 8)
2786 break;
2787 case kStubNoPIC:
2788 OSWriteLittleInt32(&buffer[ 0], 0, 0xe59fc000); // ldr ip, [pc, #0]
2789 OSWriteLittleInt32(&buffer[ 4], 0, 0xe59cf000); // ldr pc, [ip]
2790 OSWriteLittleInt32(&buffer[ 8], 0, 0x00000000); // .long L_foo$lazy_ptr
2791 break;
2792 case kStubShort:
2793 OSWriteLittleInt32(&buffer[ 0], 0, 0xE59FF000);// ldr pc, [pc, #foo$lazy_ptr]
2794 break;
2795 default:
2796 throw "internal error";
2797 }
2798 }
2799
2800 // x86_64 stubs are 6 bytes
2801 template <>
2802 ObjectFile::Alignment StubAtom<x86_64>::getAlignment() const
2803 {
2804 return 1;
2805 }
2806
2807 template <>
2808 const char* StubAtom<ppc>::getSectionName() const
2809 {
2810 return ( (fKind == kStubPIC) ? "__picsymbolstub1" : "__symbol_stub1");
2811 }
2812
2813 template <>
2814 const char* StubAtom<ppc64>::getSectionName() const
2815 {
2816 return ( (fKind == kStubPIC) ? "__picsymbolstub1" : "__symbol_stub1");
2817 }
2818
2819 template <>
2820 const char* StubAtom<arm>::getSectionName() const
2821 {
2822 switch ( fKind ) {
2823 case kStubPIC:
2824 return "__picsymbolstub4";
2825 case kStubNoPIC:
2826 return "__symbol_stub4";
2827 case kStubShort:
2828 return "__symbolstub1";
2829 default:
2830 throw "internal error";
2831 }
2832 }
2833
2834 template <>
2835 const char* StubAtom<x86>::getSectionName() const
2836 {
2837 switch ( fKind ) {
2838 case kStubNoPIC:
2839 return "__symbol_stub";
2840 case kJumpTable:
2841 return "__jump_table";
2842 default:
2843 throw "internal error";
2844 }
2845 }
2846
2847
2848
2849
2850 struct AtomByNameSorter
2851 {
2852 bool operator()(ObjectFile::Atom* left, ObjectFile::Atom* right)
2853 {
2854 return (strcmp(left->getName(), right->getName()) < 0);
2855 }
2856 };
2857
2858 template <typename P>
2859 struct ExternalRelocSorter
2860 {
2861 bool operator()(const macho_relocation_info<P>& left, const macho_relocation_info<P>& right)
2862 {
2863 // sort first by symbol number
2864 if ( left.r_symbolnum() != right.r_symbolnum() )
2865 return (left.r_symbolnum() < right.r_symbolnum());
2866 // then sort all uses of the same symbol by address
2867 return (left.r_address() < right.r_address());
2868 }
2869 };
2870
2871
2872 template <typename A>
2873 Writer<A>::Writer(const char* path, Options& options, std::vector<ExecutableFile::DyLibUsed>& dynamicLibraries)
2874 : ExecutableFile::Writer(dynamicLibraries), fFilePath(strdup(path)), fOptions(options),
2875 fAllAtoms(NULL), fStabs(NULL), fRegularDefAtomsThatOverrideADylibsWeakDef(NULL), fLoadCommandsSection(NULL),
2876 fLoadCommandsSegment(NULL), fMachHeaderAtom(NULL), fEncryptionLoadCommand(NULL), fSegmentCommands(NULL),
2877 fSymbolTableCommands(NULL), fHeaderPadding(NULL), fUnwindInfoAtom(NULL),
2878 fUUIDAtom(NULL), fPadSegmentInfo(NULL), fEntryPoint( NULL),
2879 fDyldClassicHelperAtom(NULL), fDyldCompressedHelperAtom(NULL), fDyldLazyDylibHelper(NULL),
2880 fSectionRelocationsAtom(NULL), fCompressedRebaseInfoAtom(NULL), fCompressedBindingInfoAtom(NULL),
2881 fCompressedWeakBindingInfoAtom(NULL), fCompressedLazyBindingInfoAtom(NULL), fCompressedExportInfoAtom(NULL),
2882 fLocalRelocationsAtom(NULL), fExternalRelocationsAtom(NULL),
2883 fSymbolTableAtom(NULL), fSplitCodeToDataContentAtom(NULL), fIndirectTableAtom(NULL), fModuleInfoAtom(NULL),
2884 fStringsAtom(NULL), fPageZeroAtom(NULL), fFastStubGOTAtom(NULL), fSymbolTable(NULL), fSymbolTableCount(0),
2885 fSymbolTableStabsCount(0), fSymbolTableLocalCount(0), fSymbolTableExportCount(0), fSymbolTableImportCount(0),
2886 fLargestAtomSize(1),
2887 fEmitVirtualSections(false), fHasWeakExports(false), fReferencesWeakImports(false),
2888 fCanScatter(false), fWritableSegmentPastFirst4GB(false), fNoReExportedDylibs(false),
2889 fBiggerThanTwoGigs(false), fSlideable(false), fHasThumbBranches(false),
2890 fFirstWritableSegment(NULL), fAnonNameIndex(1000)
2891 {
2892 switch ( fOptions.outputKind() ) {
2893 case Options::kDynamicExecutable:
2894 case Options::kStaticExecutable:
2895 if ( fOptions.zeroPageSize() != 0 )
2896 fWriterSynthesizedAtoms.push_back(fPageZeroAtom = new PageZeroAtom<A>(*this));
2897 if ( fOptions.outputKind() == Options::kDynamicExecutable )
2898 fWriterSynthesizedAtoms.push_back(new DsoHandleAtom<A>(*this));
2899 fWriterSynthesizedAtoms.push_back(fMachHeaderAtom = new MachHeaderAtom<A>(*this));
2900 fWriterSynthesizedAtoms.push_back(new SegmentLoadCommandsAtom<A>(*this));
2901 if ( fOptions.makeCompressedDyldInfo() )
2902 fWriterSynthesizedAtoms.push_back(new DyldInfoLoadCommandsAtom<A>(*this));
2903 fWriterSynthesizedAtoms.push_back(new SymbolTableLoadCommandsAtom<A>(*this));
2904 if ( fOptions.outputKind() == Options::kDynamicExecutable )
2905 fWriterSynthesizedAtoms.push_back(new DyldLoadCommandsAtom<A>(*this));
2906 fWriterSynthesizedAtoms.push_back(fUUIDAtom = new UUIDLoadCommandAtom<A>(*this));
2907 fWriterSynthesizedAtoms.push_back(new ThreadsLoadCommandsAtom<A>(*this));
2908 if ( fOptions.hasCustomStack() )
2909 fWriterSynthesizedAtoms.push_back(new CustomStackAtom<A>(*this));
2910 fWriterSynthesizedAtoms.push_back(fHeaderPadding = new LoadCommandsPaddingAtom<A>(*this));
2911 fWriterSynthesizedAtoms.push_back(new MinimalTextAtom<A>(*this));
2912 if ( fOptions.needsUnwindInfoSection() )
2913 fWriterSynthesizedAtoms.push_back(fUnwindInfoAtom = new UnwindInfoAtom<A>(*this));
2914 fWriterSynthesizedAtoms.push_back(fSectionRelocationsAtom = new SectionRelocationsLinkEditAtom<A>(*this));
2915 if ( fOptions.makeCompressedDyldInfo() ) {
2916 fWriterSynthesizedAtoms.push_back(fCompressedRebaseInfoAtom = new CompressedRebaseInfoLinkEditAtom<A>(*this));
2917 fWriterSynthesizedAtoms.push_back(fCompressedBindingInfoAtom = new CompressedBindingInfoLinkEditAtom<A>(*this));
2918 fWriterSynthesizedAtoms.push_back(fCompressedWeakBindingInfoAtom = new CompressedWeakBindingInfoLinkEditAtom<A>(*this));
2919 fWriterSynthesizedAtoms.push_back(fCompressedLazyBindingInfoAtom = new CompressedLazyBindingInfoLinkEditAtom<A>(*this));
2920 fWriterSynthesizedAtoms.push_back(fCompressedExportInfoAtom = new CompressedExportInfoLinkEditAtom<A>(*this));
2921 }
2922 if ( fOptions.makeClassicDyldInfo() )
2923 fWriterSynthesizedAtoms.push_back(fLocalRelocationsAtom = new LocalRelocationsLinkEditAtom<A>(*this));
2924 fWriterSynthesizedAtoms.push_back(fSymbolTableAtom = new SymbolTableLinkEditAtom<A>(*this));
2925 if ( fOptions.makeClassicDyldInfo() )
2926 fWriterSynthesizedAtoms.push_back(fExternalRelocationsAtom = new ExternalRelocationsLinkEditAtom<A>(*this));
2927 fWriterSynthesizedAtoms.push_back(fIndirectTableAtom = new IndirectTableLinkEditAtom<A>(*this));
2928 fWriterSynthesizedAtoms.push_back(fStringsAtom = new StringsLinkEditAtom<A>(*this));
2929 break;
2930 case Options::kPreload:
2931 fWriterSynthesizedAtoms.push_back(fMachHeaderAtom = new MachHeaderAtom<A>(*this));
2932 fWriterSynthesizedAtoms.push_back(new SegmentLoadCommandsAtom<A>(*this));
2933 fWriterSynthesizedAtoms.push_back(new SymbolTableLoadCommandsAtom<A>(*this));
2934 fWriterSynthesizedAtoms.push_back(fUUIDAtom = new UUIDLoadCommandAtom<A>(*this));
2935 fWriterSynthesizedAtoms.push_back(new ThreadsLoadCommandsAtom<A>(*this));
2936 fWriterSynthesizedAtoms.push_back(fHeaderPadding = new LoadCommandsPaddingAtom<A>(*this));
2937 fWriterSynthesizedAtoms.push_back(fSectionRelocationsAtom = new SectionRelocationsLinkEditAtom<A>(*this));
2938 fWriterSynthesizedAtoms.push_back(fLocalRelocationsAtom = new LocalRelocationsLinkEditAtom<A>(*this));
2939 fWriterSynthesizedAtoms.push_back(fExternalRelocationsAtom = new ExternalRelocationsLinkEditAtom<A>(*this));
2940 fWriterSynthesizedAtoms.push_back(fIndirectTableAtom = new IndirectTableLinkEditAtom<A>(*this));
2941 fWriterSynthesizedAtoms.push_back(fSymbolTableAtom = new SymbolTableLinkEditAtom<A>(*this));
2942 fWriterSynthesizedAtoms.push_back(fStringsAtom = new StringsLinkEditAtom<A>(*this));
2943 break;
2944 case Options::kDynamicLibrary:
2945 case Options::kDynamicBundle:
2946 fWriterSynthesizedAtoms.push_back(new DsoHandleAtom<A>(*this));
2947 case Options::kKextBundle:
2948 fWriterSynthesizedAtoms.push_back(fMachHeaderAtom = new MachHeaderAtom<A>(*this));
2949 fWriterSynthesizedAtoms.push_back(new SegmentLoadCommandsAtom<A>(*this));
2950 if ( fOptions.outputKind() == Options::kDynamicLibrary ) {
2951 fWriterSynthesizedAtoms.push_back(new DylibIDLoadCommandsAtom<A>(*this));
2952 if ( fOptions.initFunctionName() != NULL )
2953 fWriterSynthesizedAtoms.push_back(new RoutinesLoadCommandsAtom<A>(*this));
2954 }
2955 fWriterSynthesizedAtoms.push_back(fUUIDAtom = new UUIDLoadCommandAtom<A>(*this));
2956 if ( fOptions.makeCompressedDyldInfo() )
2957 fWriterSynthesizedAtoms.push_back(new DyldInfoLoadCommandsAtom<A>(*this));
2958 fWriterSynthesizedAtoms.push_back(new SymbolTableLoadCommandsAtom<A>(*this));
2959 if ( fOptions.sharedRegionEligible() )
2960 fWriterSynthesizedAtoms.push_back(new SegmentSplitInfoLoadCommandsAtom<A>(*this));
2961 fWriterSynthesizedAtoms.push_back(fHeaderPadding = new LoadCommandsPaddingAtom<A>(*this));
2962 fWriterSynthesizedAtoms.push_back(new MinimalTextAtom<A>(*this));
2963 if ( fOptions.needsUnwindInfoSection() )
2964 fWriterSynthesizedAtoms.push_back(fUnwindInfoAtom = new UnwindInfoAtom<A>(*this));
2965 fWriterSynthesizedAtoms.push_back(fSectionRelocationsAtom = new SectionRelocationsLinkEditAtom<A>(*this));
2966 if ( fOptions.makeCompressedDyldInfo() ) {
2967 fWriterSynthesizedAtoms.push_back(fCompressedRebaseInfoAtom = new CompressedRebaseInfoLinkEditAtom<A>(*this));
2968 fWriterSynthesizedAtoms.push_back(fCompressedBindingInfoAtom = new CompressedBindingInfoLinkEditAtom<A>(*this));
2969 fWriterSynthesizedAtoms.push_back(fCompressedWeakBindingInfoAtom = new CompressedWeakBindingInfoLinkEditAtom<A>(*this));
2970 fWriterSynthesizedAtoms.push_back(fCompressedLazyBindingInfoAtom = new CompressedLazyBindingInfoLinkEditAtom<A>(*this));
2971 fWriterSynthesizedAtoms.push_back(fCompressedExportInfoAtom = new CompressedExportInfoLinkEditAtom<A>(*this));
2972 }
2973 if ( fOptions.makeClassicDyldInfo() )
2974 fWriterSynthesizedAtoms.push_back(fLocalRelocationsAtom = new LocalRelocationsLinkEditAtom<A>(*this));
2975 if ( fOptions.sharedRegionEligible() ) {
2976 fWriterSynthesizedAtoms.push_back(fSplitCodeToDataContentAtom = new SegmentSplitInfoContentAtom<A>(*this));
2977 }
2978 fWriterSynthesizedAtoms.push_back(fSymbolTableAtom = new SymbolTableLinkEditAtom<A>(*this));
2979 if ( fOptions.makeClassicDyldInfo() )
2980 fWriterSynthesizedAtoms.push_back(fExternalRelocationsAtom = new ExternalRelocationsLinkEditAtom<A>(*this));
2981 if ( fOptions.outputKind() != Options::kKextBundle )
2982 fWriterSynthesizedAtoms.push_back(fIndirectTableAtom = new IndirectTableLinkEditAtom<A>(*this));
2983 if ( this->needsModuleTable() )
2984 fWriterSynthesizedAtoms.push_back(fModuleInfoAtom = new ModuleInfoLinkEditAtom<A>(*this));
2985 fWriterSynthesizedAtoms.push_back(fStringsAtom = new StringsLinkEditAtom<A>(*this));
2986 break;
2987 case Options::kObjectFile:
2988 fWriterSynthesizedAtoms.push_back(fMachHeaderAtom = new MachHeaderAtom<A>(*this));
2989 fWriterSynthesizedAtoms.push_back(new SegmentLoadCommandsAtom<A>(*this));
2990 fWriterSynthesizedAtoms.push_back(fUUIDAtom = new UUIDLoadCommandAtom<A>(*this));
2991 fWriterSynthesizedAtoms.push_back(new SymbolTableLoadCommandsAtom<A>(*this));
2992 fWriterSynthesizedAtoms.push_back(fHeaderPadding = new LoadCommandsPaddingAtom<A>(*this));
2993 fWriterSynthesizedAtoms.push_back(fSectionRelocationsAtom = new SectionRelocationsLinkEditAtom<A>(*this));
2994 fWriterSynthesizedAtoms.push_back(fLocalRelocationsAtom = new LocalRelocationsLinkEditAtom<A>(*this));
2995 fWriterSynthesizedAtoms.push_back(fSymbolTableAtom = new SymbolTableLinkEditAtom<A>(*this));
2996 fWriterSynthesizedAtoms.push_back(fExternalRelocationsAtom = new ExternalRelocationsLinkEditAtom<A>(*this));
2997 fWriterSynthesizedAtoms.push_back(fIndirectTableAtom = new IndirectTableLinkEditAtom<A>(*this));
2998 fWriterSynthesizedAtoms.push_back(fStringsAtom = new StringsLinkEditAtom<A>(*this));
2999 break;
3000 case Options::kDyld:
3001 fWriterSynthesizedAtoms.push_back(new DsoHandleAtom<A>(*this));
3002 fWriterSynthesizedAtoms.push_back(fMachHeaderAtom = new MachHeaderAtom<A>(*this));
3003 fWriterSynthesizedAtoms.push_back(new SegmentLoadCommandsAtom<A>(*this));
3004 fWriterSynthesizedAtoms.push_back(new SymbolTableLoadCommandsAtom<A>(*this));
3005 fWriterSynthesizedAtoms.push_back(new DyldLoadCommandsAtom<A>(*this));
3006 fWriterSynthesizedAtoms.push_back(fUUIDAtom = new UUIDLoadCommandAtom<A>(*this));
3007 fWriterSynthesizedAtoms.push_back(new ThreadsLoadCommandsAtom<A>(*this));
3008 fWriterSynthesizedAtoms.push_back(fHeaderPadding = new LoadCommandsPaddingAtom<A>(*this));
3009 if ( fOptions.needsUnwindInfoSection() )
3010 fWriterSynthesizedAtoms.push_back(fUnwindInfoAtom = new UnwindInfoAtom<A>(*this));
3011 fWriterSynthesizedAtoms.push_back(fLocalRelocationsAtom = new LocalRelocationsLinkEditAtom<A>(*this));
3012 fWriterSynthesizedAtoms.push_back(fSymbolTableAtom = new SymbolTableLinkEditAtom<A>(*this));
3013 fWriterSynthesizedAtoms.push_back(fExternalRelocationsAtom = new ExternalRelocationsLinkEditAtom<A>(*this));
3014 fWriterSynthesizedAtoms.push_back(fIndirectTableAtom = new IndirectTableLinkEditAtom<A>(*this));
3015 fWriterSynthesizedAtoms.push_back(fStringsAtom = new StringsLinkEditAtom<A>(*this));
3016 break;
3017 }
3018
3019 // add extra commmands
3020 bool hasReExports = false;
3021 uint32_t ordinal = 1;
3022 switch ( fOptions.outputKind() ) {
3023 case Options::kDynamicExecutable:
3024 if ( fOptions.makeEncryptable() ) {
3025 fEncryptionLoadCommand = new EncryptionLoadCommandsAtom<A>(*this);
3026 fWriterSynthesizedAtoms.push_back(fEncryptionLoadCommand);
3027 }
3028 // fall through
3029 case Options::kDynamicLibrary:
3030 case Options::kDynamicBundle:
3031 {
3032 // add dylib load command atoms for all dynamic libraries
3033 const unsigned int libCount = dynamicLibraries.size();
3034 for (unsigned int i=0; i < libCount; ++i) {
3035 ExecutableFile::DyLibUsed& dylibInfo = dynamicLibraries[i];
3036 //fprintf(stderr, "dynamicLibraries[%d]: reader=%p, %s, install=%s\n", i, dylibInfo.reader, dylibInfo.reader->getPath(), dylibInfo.reader->getInstallPath() );
3037
3038 if ( dylibInfo.options.fReExport ) {
3039 hasReExports = true;
3040 }
3041 else {
3042 const char* parentUmbrella = dylibInfo.reader->parentUmbrella();
3043 if ( (parentUmbrella != NULL) && (fOptions.outputKind() == Options::kDynamicLibrary) ) {
3044 const char* thisIDLastSlash = strrchr(fOptions.installPath(), '/');
3045 if ( (thisIDLastSlash != NULL) && (strcmp(&thisIDLastSlash[1], parentUmbrella) == 0) )
3046 hasReExports = true;
3047 }
3048 }
3049
3050 if ( dylibInfo.options.fWeakImport ) {
3051 fForcedWeakImportReaders.insert(dylibInfo.reader);
3052 }
3053
3054 if ( dylibInfo.options.fBundleLoader ) {
3055 fLibraryToOrdinal[dylibInfo.reader] = EXECUTABLE_ORDINAL;
3056 }
3057 else {
3058 // see if a DylibLoadCommandsAtom has already been created for this install path
3059 bool newDylib = true;
3060 const char* dylibInstallPath = dylibInfo.reader->getInstallPath();
3061 for (unsigned int seenLib=0; seenLib < i; ++seenLib) {
3062 ExecutableFile::DyLibUsed& seenDylibInfo = dynamicLibraries[seenLib];
3063 if ( !seenDylibInfo.options.fBundleLoader ) {
3064 const char* seenDylibInstallPath = seenDylibInfo.reader->getInstallPath();
3065 if ( strcmp(seenDylibInstallPath, dylibInstallPath) == 0 ) {
3066 fLibraryToOrdinal[dylibInfo.reader] = fLibraryToOrdinal[seenDylibInfo.reader];
3067 fLibraryToLoadCommand[dylibInfo.reader] = fLibraryToLoadCommand[seenDylibInfo.reader];
3068 fLibraryAliases[dylibInfo.reader] = seenDylibInfo.reader;
3069 newDylib = false;
3070 break;
3071 }
3072 }
3073 }
3074
3075 if ( newDylib ) {
3076 // assign new ordinal and check for other paired load commands
3077 fLibraryToOrdinal[dylibInfo.reader] = ordinal++;
3078 DylibLoadCommandsAtom<A>* dyliblc = new DylibLoadCommandsAtom<A>(*this, dylibInfo);
3079 fLibraryToLoadCommand[dylibInfo.reader] = dyliblc;
3080 fWriterSynthesizedAtoms.push_back(dyliblc);
3081 if ( dylibInfo.options.fReExport
3082 && !fOptions.useSimplifiedDylibReExports()
3083 && (fOptions.outputKind() == Options::kDynamicLibrary) ) {
3084 // see if child has sub-framework that is this
3085 bool isSubFramework = false;
3086 const char* childInUmbrella = dylibInfo.reader->parentUmbrella();
3087 if ( childInUmbrella != NULL ) {
3088 const char* myLeaf = strrchr(fOptions.installPath(), '/');
3089 if ( myLeaf != NULL ) {
3090 if ( strcmp(childInUmbrella, &myLeaf[1]) == 0 )
3091 isSubFramework = true;
3092 }
3093 }
3094 // LC_SUB_FRAMEWORK is in child, so do nothing in parent
3095 if ( ! isSubFramework ) {
3096 // this dylib also needs a sub_x load command
3097 bool isFrameworkReExport = false;
3098 const char* lastSlash = strrchr(dylibInstallPath, '/');
3099 if ( lastSlash != NULL ) {
3100 char frameworkName[strlen(lastSlash)+20];
3101 sprintf(frameworkName, "/%s.framework/", &lastSlash[1]);
3102 isFrameworkReExport = (strstr(dylibInstallPath, frameworkName) != NULL);
3103 }
3104 if ( isFrameworkReExport ) {
3105 // needs a LC_SUB_UMBRELLA command
3106 fWriterSynthesizedAtoms.push_back(new SubUmbrellaLoadCommandsAtom<A>(*this, &lastSlash[1]));
3107 }
3108 else {
3109 // needs a LC_SUB_LIBRARY command
3110 const char* nameStart = &lastSlash[1];
3111 if ( lastSlash == NULL )
3112 nameStart = dylibInstallPath;
3113 int len = strlen(nameStart);
3114 const char* dot = strchr(nameStart, '.');
3115 if ( dot != NULL )
3116 len = dot - nameStart;
3117 fWriterSynthesizedAtoms.push_back(new SubLibraryLoadCommandsAtom<A>(*this, nameStart, len));
3118 }
3119 }
3120 }
3121 }
3122 }
3123 }
3124 // add umbrella command if needed
3125 if ( fOptions.umbrellaName() != NULL ) {
3126 fWriterSynthesizedAtoms.push_back(new UmbrellaLoadCommandsAtom<A>(*this, fOptions.umbrellaName()));
3127 }
3128 // add allowable client commands if used
3129 std::vector<const char*>& allowableClients = fOptions.allowableClients();
3130 for (std::vector<const char*>::iterator it=allowableClients.begin(); it != allowableClients.end(); ++it)
3131 fWriterSynthesizedAtoms.push_back(new AllowableClientLoadCommandsAtom<A>(*this, *it));
3132 }
3133 break;
3134 case Options::kStaticExecutable:
3135 case Options::kObjectFile:
3136 case Options::kDyld:
3137 case Options::kPreload:
3138 case Options::kKextBundle:
3139 break;
3140 }
3141 fNoReExportedDylibs = !hasReExports;
3142
3143 // add any rpath load commands
3144 for(std::vector<const char*>::const_iterator it=fOptions.rpaths().begin(); it != fOptions.rpaths().end(); ++it) {
3145 fWriterSynthesizedAtoms.push_back(new RPathLoadCommandsAtom<A>(*this, *it));
3146 }
3147
3148 // set up fSlideable
3149 switch ( fOptions.outputKind() ) {
3150 case Options::kObjectFile:
3151 case Options::kStaticExecutable:
3152 fSlideable = false;
3153 break;
3154 case Options::kDynamicExecutable:
3155 fSlideable = fOptions.positionIndependentExecutable();
3156 break;
3157 case Options::kDyld:
3158 case Options::kDynamicLibrary:
3159 case Options::kDynamicBundle:
3160 case Options::kPreload:
3161 case Options::kKextBundle:
3162 fSlideable = true;
3163 break;
3164 }
3165
3166 //fprintf(stderr, "ordinals table:\n");
3167 //for (std::map<class ObjectFile::Reader*, uint32_t>::iterator it = fLibraryToOrdinal.begin(); it != fLibraryToOrdinal.end(); ++it) {
3168 // fprintf(stderr, "%d <== %s\n", it->second, it->first->getPath());
3169 //}
3170 }
3171
3172 template <typename A>
3173 Writer<A>::~Writer()
3174 {
3175 if ( fFilePath != NULL )
3176 free((void*)fFilePath);
3177 if ( fSymbolTable != NULL )
3178 delete [] fSymbolTable;
3179 }
3180
3181
3182 // for ppc64, -mdynamic-no-pic only works in low 2GB, so we might need to split the zeropage into two segments
3183 template <>bool Writer<ppc64>::mightNeedPadSegment() { return (fOptions.zeroPageSize() >= 0x80000000ULL); }
3184 template <typename A> bool Writer<A>::mightNeedPadSegment() { return false; }
3185
3186
3187 template <typename A>
3188 ObjectFile::Atom* Writer<A>::getUndefinedProxyAtom(const char* name)
3189 {
3190 if ( fOptions.outputKind() == Options::kKextBundle ) {
3191 return new UndefinedSymbolProxyAtom<A>(*this, name);
3192 }
3193 else if ( fOptions.outputKind() == Options::kObjectFile ) {
3194 // when doing -r -exported_symbols_list, don't create proxy for a symbol
3195 // that is supposed to be exported. We want an error instead
3196 // <rdar://problem/5062685> ld does not report error when -r is used and exported symbols are not defined.
3197 if ( fOptions.hasExportMaskList() && fOptions.shouldExport(name) )
3198 return NULL;
3199 else
3200 return new UndefinedSymbolProxyAtom<A>(*this, name);
3201 }
3202 else if ( (fOptions.undefinedTreatment() != Options::kUndefinedError) || fOptions.allowedUndefined(name) )
3203 return new UndefinedSymbolProxyAtom<A>(*this, name);
3204 else
3205 return NULL;
3206 }
3207
3208 template <typename A>
3209 uint8_t Writer<A>::ordinalForLibrary(ObjectFile::Reader* lib)
3210 {
3211 // flat namespace images use zero for all ordinals
3212 if ( fOptions.nameSpace() != Options::kTwoLevelNameSpace )
3213 return 0;
3214
3215 // is an UndefinedSymbolProxyAtom
3216 if ( lib == this )
3217 if ( fOptions.nameSpace() == Options::kTwoLevelNameSpace )
3218 return DYNAMIC_LOOKUP_ORDINAL;
3219
3220 std::map<class ObjectFile::Reader*, uint32_t>::iterator pos = fLibraryToOrdinal.find(lib);
3221 if ( pos != fLibraryToOrdinal.end() )
3222 return pos->second;
3223
3224 throw "can't find ordinal for imported symbol";
3225 }
3226
3227 template <typename A>
3228 bool Writer<A>::targetRequiresWeakBinding(const ObjectFile::Atom& target)
3229 {
3230 switch ( target.getDefinitionKind() ) {
3231 case ObjectFile::Atom::kExternalWeakDefinition:
3232 case ObjectFile::Atom::kWeakDefinition:
3233 return true;
3234 case ObjectFile::Atom::kExternalDefinition:
3235 case ObjectFile::Atom::kAbsoluteSymbol:
3236 case ObjectFile::Atom::kRegularDefinition:
3237 case ObjectFile::Atom::kTentativeDefinition:
3238 break;
3239 }
3240 return false;
3241 }
3242
3243 template <typename A>
3244 int Writer<A>::compressedOrdinalForImortedAtom(ObjectFile::Atom* target)
3245 {
3246 // flat namespace images use zero for all ordinals
3247 if ( fOptions.nameSpace() != Options::kTwoLevelNameSpace )
3248 return BIND_SPECIAL_DYLIB_FLAT_LOOKUP;
3249
3250 // is an UndefinedSymbolProxyAtom
3251 ObjectFile::Reader* lib = target->getFile();
3252 if ( lib == this )
3253 if ( fOptions.nameSpace() == Options::kTwoLevelNameSpace )
3254 return BIND_SPECIAL_DYLIB_FLAT_LOOKUP;
3255
3256 std::map<class ObjectFile::Reader*, uint32_t>::iterator pos;
3257 switch ( target->getDefinitionKind() ) {
3258 case ObjectFile::Atom::kExternalDefinition:
3259 case ObjectFile::Atom::kExternalWeakDefinition:
3260 pos = fLibraryToOrdinal.find(lib);
3261 if ( pos != fLibraryToOrdinal.end() ) {
3262 if ( pos->second == EXECUTABLE_ORDINAL )
3263 return BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE;
3264 else
3265 return pos->second;
3266 }
3267 break;
3268 case ObjectFile::Atom::kWeakDefinition:
3269 throw "compressedOrdinalForImortedAtom() should not have been called on a weak definition";
3270 case ObjectFile::Atom::kAbsoluteSymbol:
3271 case ObjectFile::Atom::kRegularDefinition:
3272 case ObjectFile::Atom::kTentativeDefinition:
3273 return BIND_SPECIAL_DYLIB_SELF;
3274 }
3275
3276 throw "can't find ordinal for imported symbol";
3277 }
3278
3279
3280 template <typename A>
3281 ObjectFile::Atom& Writer<A>::makeObjcInfoAtom(ObjectFile::Reader::ObjcConstraint objcContraint, bool objcReplacementClasses)
3282 {
3283 return *(new ObjCInfoAtom<A>(*this, objcContraint, objcReplacementClasses));
3284 }
3285
3286 template <typename A>
3287 void Writer<A>::addSynthesizedAtoms(const std::vector<class ObjectFile::Atom*>& existingAtoms,
3288 class ObjectFile::Atom* dyldClassicHelperAtom,
3289 class ObjectFile::Atom* dyldCompressedHelperAtom,
3290 class ObjectFile::Atom* dyldLazyDylibHelperAtom,
3291 bool biggerThanTwoGigs,
3292 uint32_t dylibSymbolCount,
3293 std::vector<class ObjectFile::Atom*>& newAtoms)
3294 {
3295 fDyldClassicHelperAtom = dyldClassicHelperAtom;
3296 fDyldCompressedHelperAtom = dyldCompressedHelperAtom;
3297 fDyldLazyDylibHelper = dyldLazyDylibHelperAtom;
3298 fBiggerThanTwoGigs = biggerThanTwoGigs;
3299 fDylibSymbolCountUpperBound = dylibSymbolCount;
3300
3301 // create inter-library stubs
3302 synthesizeStubs(existingAtoms, newAtoms);
3303 }
3304
3305
3306 template <typename A>
3307 uint64_t Writer<A>::write(std::vector<class ObjectFile::Atom*>& atoms,
3308 std::vector<class ObjectFile::Reader::Stab>& stabs,
3309 class ObjectFile::Atom* entryPointAtom,
3310 bool createUUID, bool canScatter, ObjectFile::Reader::CpuConstraint cpuConstraint,
3311 std::set<const class ObjectFile::Atom*>& atomsThatOverrideWeak,
3312 bool hasExternalWeakDefinitions)
3313 {
3314 fAllAtoms = &atoms;
3315 fStabs = &stabs;
3316 fEntryPoint = entryPointAtom;
3317 fCanScatter = canScatter;
3318 fCpuConstraint = cpuConstraint;
3319 fHasWeakExports = hasExternalWeakDefinitions; // dyld needs to search this image as if it had weak exports
3320 fRegularDefAtomsThatOverrideADylibsWeakDef = &atomsThatOverrideWeak;
3321
3322
3323 try {
3324 // Set for create UUID
3325 if (createUUID)
3326 fUUIDAtom->generate();
3327
3328 // remove uneeded dylib load commands
3329 optimizeDylibReferences();
3330
3331 // check for mdynamic-no-pic codegen
3332 scanForAbsoluteReferences();
3333
3334 // create table of unwind info
3335 synthesizeUnwindInfoTable();
3336
3337 // create SegmentInfo and SectionInfo objects and assign all atoms to a section
3338 partitionIntoSections();
3339
3340 // segment load command can now be sized and padding can be set
3341 adjustLoadCommandsAndPadding();
3342
3343 // assign each section a file offset
3344 assignFileOffsets();
3345
3346 // if need to add branch islands, reassign file offsets
3347 if ( addBranchIslands() )
3348 assignFileOffsets();
3349
3350 // now that addresses are assigned, create unwind info
3351 if ( fUnwindInfoAtom != NULL ) {
3352 fUnwindInfoAtom->generate();
3353 // re-layout
3354 adjustLoadCommandsAndPadding();
3355 assignFileOffsets();
3356 }
3357
3358 // make spit-seg info now that all atoms exist
3359 createSplitSegContent();
3360
3361 // build symbol table and relocations
3362 buildLinkEdit();
3363
3364 // write map file if requested
3365 writeMap();
3366
3367 // write everything
3368 return writeAtoms();
3369 } catch (...) {
3370 // clean up if any errors
3371 (void)unlink(fFilePath);
3372 throw;
3373 }
3374 }
3375
3376 template <typename A>
3377 void Writer<A>::buildLinkEdit()
3378 {
3379 this->collectExportedAndImportedAndLocalAtoms();
3380 this->buildSymbolTable();
3381 this->buildFixups();
3382 this->adjustLinkEditSections();
3383 }
3384
3385
3386
3387 template <typename A>
3388 uint64_t Writer<A>::getAtomLoadAddress(const ObjectFile::Atom* atom)
3389 {
3390 return atom->getAddress();
3391 // SectionInfo* info = (SectionInfo*)atom->getSection();
3392 // return info->getBaseAddress() + atom->getSectionOffset();
3393 }
3394
3395 template <>
3396 bool Writer<x86_64>::stringsNeedLabelsInObjects()
3397 {
3398 return true;
3399 }
3400
3401 template <typename A>
3402 bool Writer<A>::stringsNeedLabelsInObjects()
3403 {
3404 return false;
3405 }
3406
3407 template <typename A>
3408 const char* Writer<A>::symbolTableName(const ObjectFile::Atom* atom)
3409 {
3410 static unsigned int counter = 0;
3411 const char* name;
3412 if ( stringsNeedLabelsInObjects()
3413 && (atom->getContentType() == ObjectFile::Atom::kCStringType)
3414 && (atom->getDefinitionKind() == ObjectFile::Atom::kWeakDefinition) )
3415 asprintf((char**)&name, "LC%u", counter++);
3416 else
3417 name = atom->getName();
3418 return name;
3419 return atom->getName();
3420 }
3421
3422 template <typename A>
3423 void Writer<A>::setExportNlist(const ObjectFile::Atom* atom, macho_nlist<P>* entry)
3424 {
3425 // set n_strx
3426 entry->set_n_strx(this->fStringsAtom->add(this->symbolTableName(atom)));
3427
3428 // set n_type
3429 if ( atom->getSymbolTableInclusion() == ObjectFile::Atom::kSymbolTableInAsAbsolute ) {
3430 entry->set_n_type(N_EXT | N_ABS);
3431 }
3432 else {
3433 entry->set_n_type(N_EXT | N_SECT);
3434 if ( (atom->getScope() == ObjectFile::Atom::scopeLinkageUnit) && (fOptions.outputKind() == Options::kObjectFile) ) {
3435 if ( fOptions.keepPrivateExterns() )
3436 entry->set_n_type(N_EXT | N_SECT | N_PEXT);
3437 }
3438 }
3439
3440 // set n_sect (section number of implementation )
3441 uint8_t sectionIndex = atom->getSection()->getIndex();
3442 entry->set_n_sect(sectionIndex);
3443
3444 // the __mh_execute_header is magic and must be an absolute symbol
3445 if ( (sectionIndex==0)
3446 && (fOptions.outputKind() == Options::kDynamicExecutable)
3447 && (atom->getSymbolTableInclusion() == ObjectFile::Atom::kSymbolTableInAndNeverStrip ))
3448 entry->set_n_type(N_EXT | N_ABS);
3449
3450 // set n_desc
3451 uint16_t desc = 0;
3452 if ( atom->isThumb() )
3453 desc |= N_ARM_THUMB_DEF;
3454 if ( atom->getSymbolTableInclusion() == ObjectFile::Atom::kSymbolTableInAndNeverStrip )
3455 desc |= REFERENCED_DYNAMICALLY;
3456 if ( atom->dontDeadStrip() && (fOptions.outputKind() == Options::kObjectFile) )
3457 desc |= N_NO_DEAD_STRIP;
3458 if ( atom->getDefinitionKind() == ObjectFile::Atom::kWeakDefinition ) {
3459 desc |= N_WEAK_DEF;
3460 fHasWeakExports = true;
3461 }
3462 entry->set_n_desc(desc);
3463
3464 // set n_value ( address this symbol will be at if this executable is loaded at it preferred address )
3465 if ( atom->getDefinitionKind() == ObjectFile::Atom::kAbsoluteSymbol )
3466 entry->set_n_value(atom->getSectionOffset());
3467 else
3468 entry->set_n_value(this->getAtomLoadAddress(atom));
3469 }
3470
3471 template <typename A>
3472 void Writer<A>::setImportNlist(const ObjectFile::Atom* atom, macho_nlist<P>* entry)
3473 {
3474 // set n_strx
3475 entry->set_n_strx(this->fStringsAtom->add(atom->getName()));
3476
3477 // set n_type
3478 if ( fOptions.outputKind() == Options::kObjectFile ) {
3479 if ( (atom->getScope() == ObjectFile::Atom::scopeLinkageUnit)
3480 && (atom->getDefinitionKind() == ObjectFile::Atom::kTentativeDefinition) )
3481 entry->set_n_type(N_UNDF | N_EXT | N_PEXT);
3482 else
3483 entry->set_n_type(N_UNDF | N_EXT);
3484 }
3485 else {
3486 if ( fOptions.prebind() )
3487 entry->set_n_type(N_PBUD | N_EXT);
3488 else
3489 entry->set_n_type(N_UNDF | N_EXT);
3490 }
3491
3492 // set n_sect
3493 entry->set_n_sect(0);
3494
3495 uint16_t desc = 0;
3496 if ( fOptions.outputKind() != Options::kObjectFile ) {
3497 // set n_desc ( high byte is library ordinal, low byte is reference type )
3498 std::map<const ObjectFile::Atom*,ObjectFile::Atom*>::iterator pos = fStubsMap.find(atom);
3499 if ( pos != fStubsMap.end() || ( strncmp(atom->getName(), ".objc_class_name_", 17) == 0) )
3500 desc = REFERENCE_FLAG_UNDEFINED_LAZY;
3501 else
3502 desc = REFERENCE_FLAG_UNDEFINED_NON_LAZY;
3503 try {
3504 uint8_t ordinal = this->ordinalForLibrary(atom->getFile());
3505 //fprintf(stderr, "ordinal=%u from reader=%p for symbol=%s\n", ordinal, atom->getFile(), atom->getName());
3506 SET_LIBRARY_ORDINAL(desc, ordinal);
3507 }
3508 catch (const char* msg) {
3509 throwf("%s %s from %s", msg, atom->getDisplayName(), atom->getFile()->getPath());
3510 }
3511 }
3512 else if ( atom->getDefinitionKind() == ObjectFile::Atom::kTentativeDefinition ) {
3513 uint8_t align = atom->getAlignment().powerOf2;
3514 // always record custom alignment of common symbols to match what compiler does
3515 SET_COMM_ALIGN(desc, align);
3516 }
3517 if ( atom->isThumb() )
3518 desc |= N_ARM_THUMB_DEF;
3519 if ( atom->getSymbolTableInclusion() == ObjectFile::Atom::kSymbolTableInAndNeverStrip )
3520 desc |= REFERENCED_DYNAMICALLY;
3521 if ( ( fOptions.outputKind() != Options::kObjectFile) && (atom->getDefinitionKind() == ObjectFile::Atom::kExternalWeakDefinition) ) {
3522 desc |= N_REF_TO_WEAK;
3523 fReferencesWeakImports = true;
3524 }
3525 // set weak_import attribute
3526 if ( fWeakImportMap[atom] )
3527 desc |= N_WEAK_REF;
3528 entry->set_n_desc(desc);
3529
3530 // set n_value, zero for import proxy and size for tentative definition
3531 entry->set_n_value(atom->getSize());
3532 }
3533
3534
3535 template <typename A>
3536 void Writer<A>::setLocalNlist(const ObjectFile::Atom* atom, macho_nlist<P>* entry)
3537 {
3538 // set n_strx
3539 const char* symbolName = this->symbolTableName(atom);
3540 char anonName[32];
3541 if ( (fOptions.outputKind() == Options::kObjectFile) && !fOptions.keepLocalSymbol(symbolName) ) {
3542 if ( stringsNeedLabelsInObjects() && (atom->getContentType() == ObjectFile::Atom::kCStringType) ) {
3543 // don't use 'l' labels for x86_64 strings
3544 // <rdar://problem/6605499> x86_64 obj-c runtime confused when static lib is stripped
3545 }
3546 else {
3547 sprintf(anonName, "l%u", fAnonNameIndex++);
3548 symbolName = anonName;
3549 }
3550 }
3551 entry->set_n_strx(this->fStringsAtom->add(symbolName));
3552
3553 // set n_type
3554 uint8_t type = N_SECT;
3555 if ( atom->getDefinitionKind() == ObjectFile::Atom::kAbsoluteSymbol )
3556 type = N_ABS;
3557 if ( atom->getScope() == ObjectFile::Atom::scopeLinkageUnit )
3558 type |= N_PEXT;
3559 entry->set_n_type(type);
3560
3561 // set n_sect (section number of implementation )
3562 uint8_t sectIndex = atom->getSection()->getIndex();
3563 if ( sectIndex == 0 ) {
3564 // see <mach-o/ldsyms.h> synthesized lable for mach_header needs special section number...
3565 if ( strcmp(atom->getSectionName(), "._mach_header") == 0 )
3566 sectIndex = 1;
3567 }
3568 entry->set_n_sect(sectIndex);
3569
3570 // set n_desc
3571 uint16_t desc = 0;
3572 if ( atom->dontDeadStrip() && (fOptions.outputKind() == Options::kObjectFile) )
3573 desc |= N_NO_DEAD_STRIP;
3574 if ( atom->getDefinitionKind() == ObjectFile::Atom::kWeakDefinition )
3575 desc |= N_WEAK_DEF;
3576 if ( atom->isThumb() )
3577 desc |= N_ARM_THUMB_DEF;
3578 entry->set_n_desc(desc);
3579
3580 // set n_value ( address this symbol will be at if this executable is loaded at it preferred address )
3581 if ( atom->getDefinitionKind() == ObjectFile::Atom::kAbsoluteSymbol )
3582 entry->set_n_value(atom->getSectionOffset());
3583 else
3584 entry->set_n_value(this->getAtomLoadAddress(atom));
3585 }
3586
3587
3588 template <typename A>
3589 void Writer<A>::addLocalLabel(ObjectFile::Atom& atom, uint32_t offsetInAtom, const char* name)
3590 {
3591 macho_nlist<P> entry;
3592
3593 // set n_strx
3594 entry.set_n_strx(fStringsAtom->add(name));
3595
3596 // set n_type
3597 entry.set_n_type(N_SECT);
3598
3599 // set n_sect (section number of implementation )
3600 entry.set_n_sect(atom.getSection()->getIndex());
3601
3602 // set n_desc
3603 entry.set_n_desc(0);
3604
3605 // set n_value ( address this symbol will be at if this executable is loaded at it preferred address )
3606 entry.set_n_value(this->getAtomLoadAddress(&atom) + offsetInAtom);
3607
3608 // add
3609 fLocalExtraLabels.push_back(entry);
3610 }
3611
3612
3613
3614 template <typename A>
3615 void Writer<A>::addGlobalLabel(ObjectFile::Atom& atom, uint32_t offsetInAtom, const char* name)
3616 {
3617 macho_nlist<P> entry;
3618
3619 // set n_strx
3620 entry.set_n_strx(fStringsAtom->add(name));
3621
3622 // set n_type
3623 entry.set_n_type(N_SECT|N_EXT);
3624
3625 // set n_sect (section number of implementation )
3626 entry.set_n_sect(atom.getSection()->getIndex());
3627
3628 // set n_desc
3629 entry.set_n_desc(0);
3630
3631 // set n_value ( address this symbol will be at if this executable is loaded at it preferred address )
3632 entry.set_n_value(this->getAtomLoadAddress(&atom) + offsetInAtom);
3633
3634 // add
3635 fGlobalExtraLabels.push_back(entry);
3636 }
3637
3638 template <typename A>
3639 void Writer<A>::setNlistRange(std::vector<class ObjectFile::Atom*>& atoms, uint32_t startIndex, uint32_t count)
3640 {
3641 macho_nlist<P>* entry = &fSymbolTable[startIndex];
3642 for (uint32_t i=0; i < count; ++i, ++entry) {
3643 ObjectFile::Atom* atom = atoms[i];
3644 if ( &atoms == &fExportedAtoms ) {
3645 this->setExportNlist(atom, entry);
3646 }
3647 else if ( &atoms == &fImportedAtoms ) {
3648 this->setImportNlist(atom, entry);
3649 }
3650 else {
3651 this->setLocalNlist(atom, entry);
3652 }
3653 }
3654 }
3655
3656 template <typename A>
3657 void Writer<A>::copyNlistRange(const std::vector<macho_nlist<P> >& entries, uint32_t startIndex)
3658 {
3659 for ( typename std::vector<macho_nlist<P> >::const_iterator it = entries.begin(); it != entries.end(); ++it)
3660 fSymbolTable[startIndex++] = *it;
3661 }
3662
3663
3664 template <typename A>
3665 struct NListNameSorter
3666 {
3667 NListNameSorter(StringsLinkEditAtom<A>* pool) : fStringPool(pool) {}
3668
3669 bool operator()(const macho_nlist<typename A::P>& left, const macho_nlist<typename A::P>& right)
3670 {
3671 return (strcmp(fStringPool->stringForIndex(left.n_strx()), fStringPool->stringForIndex(right.n_strx())) < 0);
3672 }
3673 private:
3674 StringsLinkEditAtom<A>* fStringPool;
3675 };
3676
3677
3678 template <typename A>
3679 void Writer<A>::buildSymbolTable()
3680 {
3681 fSymbolTableStabsStartIndex = 0;
3682 fSymbolTableStabsCount = fStabs->size();
3683 fSymbolTableLocalStartIndex = fSymbolTableStabsStartIndex + fSymbolTableStabsCount;
3684 fSymbolTableLocalCount = fLocalSymbolAtoms.size() + fLocalExtraLabels.size();
3685 fSymbolTableExportStartIndex = fSymbolTableLocalStartIndex + fSymbolTableLocalCount;
3686 fSymbolTableExportCount = fExportedAtoms.size() + fGlobalExtraLabels.size();
3687 fSymbolTableImportStartIndex = fSymbolTableExportStartIndex + fSymbolTableExportCount;
3688 fSymbolTableImportCount = fImportedAtoms.size();
3689
3690 // allocate symbol table
3691 fSymbolTableCount = fSymbolTableStabsCount + fSymbolTableLocalCount + fSymbolTableExportCount + fSymbolTableImportCount;
3692 fSymbolTable = new macho_nlist<P>[fSymbolTableCount];
3693
3694 // fill in symbol table and string pool (do stabs last so strings are at end of pool)
3695 setNlistRange(fLocalSymbolAtoms, fSymbolTableLocalStartIndex, fLocalSymbolAtoms.size());
3696 if ( fLocalExtraLabels.size() != 0 )
3697 copyNlistRange(fLocalExtraLabels, fSymbolTableLocalStartIndex+fLocalSymbolAtoms.size());
3698 setNlistRange(fExportedAtoms, fSymbolTableExportStartIndex, fExportedAtoms.size());
3699 if ( fGlobalExtraLabels.size() != 0 ) {
3700 copyNlistRange(fGlobalExtraLabels, fSymbolTableExportStartIndex+fExportedAtoms.size());
3701 // re-sort combined range
3702 std::sort( &fSymbolTable[fSymbolTableExportStartIndex],
3703 &fSymbolTable[fSymbolTableExportStartIndex+fSymbolTableExportCount],
3704 NListNameSorter<A>(fStringsAtom) );
3705 }
3706 setNlistRange(fImportedAtoms, fSymbolTableImportStartIndex, fSymbolTableImportCount);
3707 addStabs(fSymbolTableStabsStartIndex);
3708
3709 // set up module table
3710 if ( fModuleInfoAtom != NULL )
3711 fModuleInfoAtom->setName();
3712
3713 // create atom to symbol index map
3714 // imports
3715 int i = 0;
3716 for(std::vector<ObjectFile::Atom*>::iterator it=fImportedAtoms.begin(); it != fImportedAtoms.end(); ++it) {
3717 fAtomToSymbolIndex[*it] = i + fSymbolTableImportStartIndex;
3718 ++i;
3719 }
3720 // locals
3721 i = 0;
3722 for(std::vector<ObjectFile::Atom*>::iterator it=fLocalSymbolAtoms.begin(); it != fLocalSymbolAtoms.end(); ++it) {
3723 fAtomToSymbolIndex[*it] = i + fSymbolTableLocalStartIndex;
3724 ++i;
3725 }
3726 // exports
3727 i = 0;
3728 for(std::vector<ObjectFile::Atom*>::iterator it=fExportedAtoms.begin(); it != fExportedAtoms.end(); ++it) {
3729 fAtomToSymbolIndex[*it] = i + fSymbolTableExportStartIndex;
3730 ++i;
3731 }
3732
3733 }
3734
3735
3736
3737 template <typename A>
3738 bool Writer<A>::shouldExport(const ObjectFile::Atom& atom) const
3739 {
3740 switch ( atom.getSymbolTableInclusion() ) {
3741 case ObjectFile::Atom::kSymbolTableNotIn:
3742 return false;
3743 case ObjectFile::Atom::kSymbolTableInAndNeverStrip:
3744 return true;
3745 case ObjectFile::Atom::kSymbolTableInAsAbsolute:
3746 case ObjectFile::Atom::kSymbolTableIn:
3747 switch ( atom.getScope() ) {
3748 case ObjectFile::Atom::scopeGlobal:
3749 return true;
3750 case ObjectFile::Atom::scopeLinkageUnit:
3751 return ( (fOptions.outputKind() == Options::kObjectFile) && fOptions.keepPrivateExterns() );
3752 default:
3753 return false;
3754 }
3755 break;
3756 }
3757 return false;
3758 }
3759
3760 template <typename A>
3761 void Writer<A>::collectExportedAndImportedAndLocalAtoms()
3762 {
3763 const int atomCount = fAllAtoms->size();
3764 // guess at sizes of each bucket to minimize re-allocations
3765 fImportedAtoms.reserve(100);
3766 fExportedAtoms.reserve(atomCount/2);
3767 fLocalSymbolAtoms.reserve(atomCount);
3768
3769 for (std::vector<SegmentInfo*>::iterator segit = fSegmentInfos.begin(); segit != fSegmentInfos.end(); ++segit) {
3770 std::vector<SectionInfo*>& sectionInfos = (*segit)->fSections;
3771 for (std::vector<SectionInfo*>::iterator secit = sectionInfos.begin(); secit != sectionInfos.end(); ++secit) {
3772 std::vector<ObjectFile::Atom*>& sectionAtoms = (*secit)->fAtoms;
3773 for (std::vector<ObjectFile::Atom*>::iterator ait = sectionAtoms.begin(); ait != sectionAtoms.end(); ++ait) {
3774 ObjectFile::Atom* atom = *ait;
3775 // only named atoms go in symbol table
3776 if ( atom->getName() != NULL ) {
3777 // put atom into correct bucket: imports, exports, locals
3778 //fprintf(stderr, "collectExportedAndImportedAndLocalAtoms() name=%s\n", atom->getDisplayName());
3779 switch ( atom->getDefinitionKind() ) {
3780 case ObjectFile::Atom::kExternalDefinition:
3781 case ObjectFile::Atom::kExternalWeakDefinition:
3782 fImportedAtoms.push_back(atom);
3783 break;
3784 case ObjectFile::Atom::kTentativeDefinition:
3785 if ( (fOptions.outputKind() == Options::kObjectFile) && !fOptions.readerOptions().fMakeTentativeDefinitionsReal ) {
3786 fImportedAtoms.push_back(atom);
3787 break;
3788 }
3789 // else fall into
3790 case ObjectFile::Atom::kWeakDefinition:
3791 if ( stringsNeedLabelsInObjects()
3792 && (fOptions.outputKind() == Options::kObjectFile)
3793 && (atom->getSymbolTableInclusion() == ObjectFile::Atom::kSymbolTableIn)
3794 && (atom->getScope() == ObjectFile::Atom::scopeLinkageUnit)
3795 && (atom->getContentType() == ObjectFile::Atom::kCStringType) ) {
3796 fLocalSymbolAtoms.push_back(atom);
3797 break;
3798 }
3799 // else fall into
3800 case ObjectFile::Atom::kRegularDefinition:
3801 case ObjectFile::Atom::kAbsoluteSymbol:
3802 if ( this->shouldExport(*atom) )
3803 fExportedAtoms.push_back(atom);
3804 else if ( (atom->getSymbolTableInclusion() != ObjectFile::Atom::kSymbolTableNotIn)
3805 && ((fOptions.outputKind() == Options::kObjectFile) || fOptions.keepLocalSymbol(atom->getName())) )
3806 fLocalSymbolAtoms.push_back(atom);
3807 break;
3808 }
3809 }
3810 // when geneating a .o file, dtrace static probes become local labels
3811 if ( (fOptions.outputKind() == Options::kObjectFile) && !fOptions.readerOptions().fForStatic ) {
3812 std::vector<ObjectFile::Reference*>& references = atom->getReferences();
3813 for (std::vector<ObjectFile::Reference*>::iterator rit=references.begin(); rit != references.end(); rit++) {
3814 ObjectFile::Reference* ref = *rit;
3815 if ( ref->getKind() == A::kDtraceProbe ) {
3816 // dtrace probe points to be add back into generated .o file
3817 this->addLocalLabel(*atom, ref->getFixUpOffset(), ref->getTargetName());
3818 }
3819 }
3820 }
3821 // when linking kernel, old style dtrace static probes become global labels
3822 else if ( fOptions.readerOptions().fForStatic ) {
3823 std::vector<ObjectFile::Reference*>& references = atom->getReferences();
3824 for (std::vector<ObjectFile::Reference*>::iterator rit=references.begin(); rit != references.end(); rit++) {
3825 ObjectFile::Reference* ref = *rit;
3826 if ( ref->getKind() == A::kDtraceProbe ) {
3827 // dtrace probe points to be add back into generated .o file
3828 this->addGlobalLabel(*atom, ref->getFixUpOffset(), ref->getTargetName());
3829 }
3830 }
3831 }
3832 }
3833 }
3834 }
3835
3836 // sort exported atoms by name
3837 std::sort(fExportedAtoms.begin(), fExportedAtoms.end(), AtomByNameSorter());
3838 // sort imported atoms by name (not required by runtime, but helps make generated files binary diffable)
3839 std::sort(fImportedAtoms.begin(), fImportedAtoms.end(), AtomByNameSorter());
3840 }
3841
3842
3843 template <typename A>
3844 uint64_t Writer<A>::valueForStab(const ObjectFile::Reader::Stab& stab)
3845 {
3846 switch ( stab.type ) {
3847 case N_FUN:
3848 if ( (stab.string == NULL) || (strlen(stab.string) == 0) ) {
3849 // end of function N_FUN has size
3850 return stab.atom->getSize();
3851 }
3852 else {
3853 // start of function N_FUN has address
3854 return getAtomLoadAddress(stab.atom);
3855 }
3856 case N_LBRAC:
3857 case N_RBRAC:
3858 case N_SLINE:
3859 if ( stab.atom == NULL )
3860 // some weird assembly files have slines not associated with a function
3861 return stab.value;
3862 else
3863 // all these stab types need their value changed from an offset in the atom to an address
3864 return getAtomLoadAddress(stab.atom) + stab.value;
3865 case N_STSYM:
3866 case N_LCSYM:
3867 case N_BNSYM:
3868 // all these need address of atom
3869 return getAtomLoadAddress(stab.atom);;
3870 case N_ENSYM:
3871 return stab.atom->getSize();
3872 case N_SO:
3873 if ( stab.atom == NULL ) {
3874 return 0;
3875 }
3876 else {
3877 if ( (stab.string == NULL) || (strlen(stab.string) == 0) ) {
3878 // end of translation unit N_SO has address of end of last atom
3879 return getAtomLoadAddress(stab.atom) + stab.atom->getSize();
3880 }
3881 else {
3882 // start of translation unit N_SO has address of end of first atom
3883 return getAtomLoadAddress(stab.atom);
3884 }
3885 }
3886 break;
3887 default:
3888 return stab.value;
3889 }
3890 }
3891
3892 template <typename A>
3893 uint32_t Writer<A>::stringOffsetForStab(const ObjectFile::Reader::Stab& stab)
3894 {
3895 switch (stab.type) {
3896 case N_SO:
3897 if ( (stab.string == NULL) || stab.string[0] == '\0' ) {
3898 return this->fStringsAtom->emptyString();
3899 break;
3900 }
3901 // fall into uniquing case
3902 case N_SOL:
3903 case N_BINCL:
3904 case N_EXCL:
3905 return this->fStringsAtom->addUnique(stab.string);
3906 break;
3907 default:
3908 if ( stab.string == NULL )
3909 return 0;
3910 else if ( stab.string[0] == '\0' )
3911 return this->fStringsAtom->emptyString();
3912 else
3913 return this->fStringsAtom->add(stab.string);
3914 }
3915 return 0;
3916 }
3917
3918 template <typename A>
3919 uint8_t Writer<A>::sectionIndexForStab(const ObjectFile::Reader::Stab& stab)
3920 {
3921 // in FUN stabs, n_sect field is 0 for start FUN and 1 for end FUN
3922 if ( stab.type == N_FUN )
3923 return stab.other;
3924 else if ( stab.atom != NULL )
3925 return stab.atom->getSection()->getIndex();
3926 else
3927 return stab.other;
3928 }
3929
3930 template <typename A>
3931 void Writer<A>::addStabs(uint32_t startIndex)
3932 {
3933 macho_nlist<P>* entry = &fSymbolTable[startIndex];
3934 for(std::vector<ObjectFile::Reader::Stab>::iterator it = fStabs->begin(); it != fStabs->end(); ++it, ++entry) {
3935 const ObjectFile::Reader::Stab& stab = *it;
3936 entry->set_n_type(stab.type);
3937 entry->set_n_sect(sectionIndexForStab(stab));
3938 entry->set_n_desc(stab.desc);
3939 entry->set_n_value(valueForStab(stab));
3940 entry->set_n_strx(stringOffsetForStab(stab));
3941 }
3942 }
3943
3944
3945
3946 template <typename A>
3947 uint32_t Writer<A>::symbolIndex(ObjectFile::Atom& atom)
3948 {
3949 std::map<ObjectFile::Atom*, uint32_t>::iterator pos = fAtomToSymbolIndex.find(&atom);
3950 if ( pos != fAtomToSymbolIndex.end() )
3951 return pos->second;
3952 throwf("atom not found in symbolIndex(%s) for %s", atom.getDisplayName(), atom.getFile()->getPath());
3953 }
3954
3955
3956 template <>
3957 bool Writer<x86_64>::makesExternalRelocatableReference(ObjectFile::Atom& target) const
3958 {
3959 switch ( target.getSymbolTableInclusion() ) {
3960 case ObjectFile::Atom::kSymbolTableNotIn:
3961 return false;
3962 case ObjectFile::Atom::kSymbolTableInAsAbsolute:
3963 case ObjectFile::Atom::kSymbolTableIn:
3964 case ObjectFile::Atom::kSymbolTableInAndNeverStrip:
3965 return true;
3966 };
3967 return false;
3968 }
3969
3970 template <typename A>
3971 bool Writer<A>::makesExternalRelocatableReference(ObjectFile::Atom& target) const
3972 {
3973 switch ( target.getDefinitionKind() ) {
3974 case ObjectFile::Atom::kRegularDefinition:
3975 case ObjectFile::Atom::kWeakDefinition:
3976 case ObjectFile::Atom::kAbsoluteSymbol:
3977 return false;
3978 case ObjectFile::Atom::kTentativeDefinition:
3979 if ( fOptions.readerOptions().fMakeTentativeDefinitionsReal )
3980 return false;
3981 else
3982 return (target.getScope() != ObjectFile::Atom::scopeTranslationUnit);
3983 case ObjectFile::Atom::kExternalDefinition:
3984 case ObjectFile::Atom::kExternalWeakDefinition:
3985 return shouldExport(target);
3986 }
3987 return false;
3988 }
3989
3990 template <typename A>
3991 void Writer<A>::buildFixups()
3992 {
3993 if ( fOptions.outputKind() == Options::kObjectFile ) {
3994 this->buildObjectFileFixups();
3995 }
3996 else {
3997 if ( fOptions.keepRelocations() )
3998 this->buildObjectFileFixups();
3999 this->buildExecutableFixups();
4000 }
4001 }
4002
4003 template <>
4004 uint32_t Writer<x86_64>::addObjectRelocs(ObjectFile::Atom* atom, ObjectFile::Reference* ref)
4005 {
4006 ObjectFile::Atom& target = ref->getTarget();
4007 bool external = this->makesExternalRelocatableReference(target);
4008 uint32_t symbolIndex = external ? this->symbolIndex(target) : target.getSection()->getIndex();
4009 uint32_t address = atom->getSectionOffset()+ref->getFixUpOffset();
4010 macho_relocation_info<P> reloc1;
4011 macho_relocation_info<P> reloc2;
4012 x86_64::ReferenceKinds kind = (x86_64::ReferenceKinds)ref->getKind();
4013
4014 switch ( kind ) {
4015 case x86_64::kNoFixUp:
4016 case x86_64::kGOTNoFixUp:
4017 case x86_64::kFollowOn:
4018 case x86_64::kGroupSubordinate:
4019 return 0;
4020
4021 case x86_64::kPointer:
4022 case x86_64::kPointerWeakImport:
4023 reloc1.set_r_address(address);
4024 reloc1.set_r_symbolnum(symbolIndex);
4025 reloc1.set_r_pcrel(false);
4026 reloc1.set_r_length(3);
4027 reloc1.set_r_extern(external);
4028 reloc1.set_r_type(X86_64_RELOC_UNSIGNED);
4029 fSectionRelocs.push_back(reloc1);
4030 return 1;
4031
4032 case x86_64::kPointer32:
4033 reloc1.set_r_address(address);
4034 reloc1.set_r_symbolnum(symbolIndex);
4035 reloc1.set_r_pcrel(false);
4036 reloc1.set_r_length(2);
4037 reloc1.set_r_extern(external);
4038 reloc1.set_r_type(X86_64_RELOC_UNSIGNED);
4039 fSectionRelocs.push_back(reloc1);
4040 return 1;
4041
4042 case x86_64::kPointerDiff32:
4043 case x86_64::kPointerDiff:
4044 {
4045 ObjectFile::Atom& fromTarget = ref->getFromTarget();
4046 bool fromExternal = (fromTarget.getSymbolTableInclusion() != ObjectFile::Atom::kSymbolTableNotIn);
4047 uint32_t fromSymbolIndex = fromExternal ? this->symbolIndex(fromTarget) : fromTarget.getSection()->getIndex();
4048 reloc1.set_r_address(address);
4049 reloc1.set_r_symbolnum(symbolIndex);
4050 reloc1.set_r_pcrel(false);
4051 reloc1.set_r_length(kind==x86_64::kPointerDiff32 ? 2 : 3);
4052 reloc1.set_r_extern(external);
4053 reloc1.set_r_type(X86_64_RELOC_UNSIGNED);
4054 reloc2.set_r_address(address);
4055 reloc2.set_r_symbolnum(fromSymbolIndex);
4056 reloc2.set_r_pcrel(false);
4057 reloc2.set_r_length(kind==x86_64::kPointerDiff32 ? 2 : 3);
4058 reloc2.set_r_extern(fromExternal);
4059 reloc2.set_r_type(X86_64_RELOC_SUBTRACTOR);
4060 fSectionRelocs.push_back(reloc1);
4061 fSectionRelocs.push_back(reloc2);
4062 return 2;
4063 }
4064
4065 case x86_64::kBranchPCRel32:
4066 case x86_64::kBranchPCRel32WeakImport:
4067 case x86_64::kDtraceProbeSite:
4068 case x86_64::kDtraceIsEnabledSite:
4069 reloc1.set_r_address(address);
4070 reloc1.set_r_symbolnum(symbolIndex);
4071 reloc1.set_r_pcrel(true);
4072 reloc1.set_r_length(2);
4073 reloc1.set_r_extern(external);
4074 reloc1.set_r_type(X86_64_RELOC_BRANCH);
4075 fSectionRelocs.push_back(reloc1);
4076 return 1;
4077
4078 case x86_64::kPCRel32:
4079 reloc1.set_r_address(address);
4080 reloc1.set_r_symbolnum(symbolIndex);
4081 reloc1.set_r_pcrel(true);
4082 reloc1.set_r_length(2);
4083 reloc1.set_r_extern(external);
4084 reloc1.set_r_type(X86_64_RELOC_SIGNED);
4085 fSectionRelocs.push_back(reloc1);
4086 return 1;
4087
4088 case x86_64::kPCRel32_1:
4089 reloc1.set_r_address(address);
4090 reloc1.set_r_symbolnum(symbolIndex);
4091 reloc1.set_r_pcrel(true);
4092 reloc1.set_r_length(2);
4093 reloc1.set_r_extern(external);
4094 reloc1.set_r_type(X86_64_RELOC_SIGNED_1);
4095 fSectionRelocs.push_back(reloc1);
4096 return 1;
4097
4098 case x86_64::kPCRel32_2:
4099 reloc1.set_r_address(address);
4100 reloc1.set_r_symbolnum(symbolIndex);
4101 reloc1.set_r_pcrel(true);
4102 reloc1.set_r_length(2);
4103 reloc1.set_r_extern(external);
4104 reloc1.set_r_type(X86_64_RELOC_SIGNED_2);
4105 fSectionRelocs.push_back(reloc1);
4106 return 1;
4107
4108 case x86_64::kPCRel32_4:
4109 reloc1.set_r_address(address);
4110 reloc1.set_r_symbolnum(symbolIndex);
4111 reloc1.set_r_pcrel(true);
4112 reloc1.set_r_length(2);
4113 reloc1.set_r_extern(external);
4114 reloc1.set_r_type(X86_64_RELOC_SIGNED_4);
4115 fSectionRelocs.push_back(reloc1);
4116 return 1;
4117
4118 case x86_64::kBranchPCRel8:
4119 reloc1.set_r_address(address);
4120 reloc1.set_r_symbolnum(symbolIndex);
4121 reloc1.set_r_pcrel(true);
4122 reloc1.set_r_length(0);
4123 reloc1.set_r_extern(external);
4124 reloc1.set_r_type(X86_64_RELOC_BRANCH);
4125 fSectionRelocs.push_back(reloc1);
4126 return 1;
4127
4128 case x86_64::kPCRel32GOT:
4129 case x86_64::kPCRel32GOTWeakImport:
4130 reloc1.set_r_address(address);
4131 reloc1.set_r_symbolnum(symbolIndex);
4132 reloc1.set_r_pcrel(true);
4133 reloc1.set_r_length(2);
4134 reloc1.set_r_extern(external);
4135 reloc1.set_r_type(X86_64_RELOC_GOT);
4136 fSectionRelocs.push_back(reloc1);
4137 return 1;
4138
4139 case x86_64::kPCRel32GOTLoad:
4140 case x86_64::kPCRel32GOTLoadWeakImport:
4141 reloc1.set_r_address(address);
4142 reloc1.set_r_symbolnum(symbolIndex);
4143 reloc1.set_r_pcrel(true);
4144 reloc1.set_r_length(2);
4145 reloc1.set_r_extern(external);
4146 reloc1.set_r_type(X86_64_RELOC_GOT_LOAD);
4147 fSectionRelocs.push_back(reloc1);
4148 return 1;
4149
4150 case x86_64::kPointerDiff24:
4151 throw "internal linker error, kPointerDiff24 can't be encoded into object files";
4152
4153 case x86_64::kImageOffset32:
4154 throw "internal linker error, kImageOffset32 can't be encoded into object files";
4155
4156 case x86_64::kSectionOffset24:
4157 throw "internal linker error, kSectionOffset24 can't be encoded into object files";
4158
4159 case x86_64::kDtraceTypeReference:
4160 case x86_64::kDtraceProbe:
4161 // generates no relocs
4162 return 0;
4163 }
4164 return 0;
4165 }
4166
4167
4168 template <>
4169 uint32_t Writer<x86>::addObjectRelocs(ObjectFile::Atom* atom, ObjectFile::Reference* ref)
4170 {
4171 ObjectFile::Atom& target = ref->getTarget();
4172 bool isExtern = this->makesExternalRelocatableReference(target);
4173 uint32_t symbolIndex = 0;
4174 if ( isExtern )
4175 symbolIndex = this->symbolIndex(target);
4176 uint32_t sectionNum = target.getSection()->getIndex();
4177 uint32_t address = atom->getSectionOffset()+ref->getFixUpOffset();
4178 macho_relocation_info<P> reloc1;
4179 macho_relocation_info<P> reloc2;
4180 macho_scattered_relocation_info<P>* sreloc1 = (macho_scattered_relocation_info<P>*)&reloc1;
4181 macho_scattered_relocation_info<P>* sreloc2 = (macho_scattered_relocation_info<P>*)&reloc2;
4182 x86::ReferenceKinds kind = (x86::ReferenceKinds)ref->getKind();
4183
4184 if ( !isExtern && (sectionNum == 0) && (target.getDefinitionKind() != ObjectFile::Atom::kAbsoluteSymbol) )
4185 warning("section index == 0 for %s (kind=%d, scope=%d, inclusion=%d) in %s",
4186 target.getDisplayName(), target.getDefinitionKind(), target.getScope(), target.getSymbolTableInclusion(), target.getFile()->getPath());
4187
4188
4189 switch ( kind ) {
4190 case x86::kNoFixUp:
4191 case x86::kFollowOn:
4192 case x86::kGroupSubordinate:
4193 return 0;
4194
4195 case x86::kPointer:
4196 case x86::kPointerWeakImport:
4197 case x86::kAbsolute32:
4198 if ( !isExtern && (ref->getTargetOffset() != 0) ) {
4199 // use scattered reloc is target offset is non-zero
4200 sreloc1->set_r_scattered(true);
4201 sreloc1->set_r_pcrel(false);
4202 sreloc1->set_r_length(2);
4203 sreloc1->set_r_type(GENERIC_RELOC_VANILLA);
4204 sreloc1->set_r_address(address);
4205 sreloc1->set_r_value(target.getAddress());
4206 }
4207 else {
4208 reloc1.set_r_address(address);
4209 reloc1.set_r_symbolnum(isExtern ? symbolIndex : sectionNum);
4210 reloc1.set_r_pcrel(false);
4211 reloc1.set_r_length(2);
4212 reloc1.set_r_extern(isExtern);
4213 reloc1.set_r_type(GENERIC_RELOC_VANILLA);
4214 }
4215 fSectionRelocs.push_back(reloc1);
4216 return 1;
4217
4218 case x86::kPointerDiff16:
4219 case x86::kPointerDiff:
4220 {
4221 //pint_t fromAddr = ref->getFromTarget().getAddress() + ref->getFromTargetOffset();
4222 //fprintf(stderr, "addObjectRelocs(): refFromTarget=%s, refTarget=%s, refFromTargetAddr=0x%llX, refFromTargetOffset=0x%llX\n",
4223 // ref->getFromTarget().getDisplayName(), ref->getTarget().getDisplayName(),
4224 // ref->getFromTarget().getAddress(), ref->getFromTargetOffset());
4225 sreloc1->set_r_scattered(true);
4226 sreloc1->set_r_pcrel(false);
4227 sreloc1->set_r_length( (kind==x86::kPointerDiff) ? 2 : 1 );
4228 if ( ref->getTarget().getScope() == ObjectFile::Atom::scopeTranslationUnit )
4229 sreloc1->set_r_type(GENERIC_RELOC_LOCAL_SECTDIFF);
4230 else
4231 sreloc1->set_r_type(GENERIC_RELOC_SECTDIFF);
4232 sreloc1->set_r_address(address);
4233 sreloc1->set_r_value(target.getAddress());
4234
4235 sreloc2->set_r_scattered(true);
4236 sreloc2->set_r_pcrel(false);
4237 sreloc2->set_r_length( (kind==x86::kPointerDiff) ? 2 : 1 );
4238 sreloc2->set_r_type(GENERIC_RELOC_PAIR);
4239 sreloc2->set_r_address(0);
4240 if ( &ref->getFromTarget() == atom )
4241 sreloc2->set_r_value(ref->getFromTarget().getAddress()+ref->getFromTargetOffset());
4242 else
4243 sreloc2->set_r_value(ref->getFromTarget().getAddress());
4244 fSectionRelocs.push_back(reloc2);
4245 fSectionRelocs.push_back(reloc1);
4246 return 2;
4247 }
4248
4249 case x86::kPCRel32WeakImport:
4250 case x86::kPCRel32:
4251 case x86::kPCRel16:
4252 case x86::kPCRel8:
4253 case x86::kDtraceProbeSite:
4254 case x86::kDtraceIsEnabledSite:
4255 if ( !isExtern && (ref->getTargetOffset() != 0) ) {
4256 // use scattered reloc is target offset is non-zero
4257 sreloc1->set_r_scattered(true);
4258 sreloc1->set_r_pcrel(true);
4259 sreloc1->set_r_length( (kind==x86::kPCRel8) ? 0 : ((kind==x86::kPCRel16) ? 1 : 2) );
4260 sreloc1->set_r_type(GENERIC_RELOC_VANILLA);
4261 sreloc1->set_r_address(address);
4262 sreloc1->set_r_value(target.getAddress());
4263 }
4264 else {
4265 reloc1.set_r_address(address);
4266 reloc1.set_r_symbolnum(isExtern ? symbolIndex : sectionNum);
4267 reloc1.set_r_pcrel(true);
4268 reloc1.set_r_length( (kind==x86::kPCRel8) ? 0 : ((kind==x86::kPCRel16) ? 1 : 2) );
4269 reloc1.set_r_extern(isExtern);
4270 reloc1.set_r_type(GENERIC_RELOC_VANILLA);
4271 }
4272 fSectionRelocs.push_back(reloc1);
4273 return 1;
4274
4275 case x86::kPointerDiff24:
4276 throw "internal linker error, kPointerDiff24 can't be encoded into object files";
4277
4278 case x86::kImageOffset32:
4279 throw "internal linker error, kImageOffset32 can't be encoded into object files";
4280
4281 case x86::kSectionOffset24:
4282 throw "internal linker error, kSectionOffset24 can't be encoded into object files";
4283
4284 case x86::kDtraceTypeReference:
4285 case x86::kDtraceProbe:
4286 // generates no relocs
4287 return 0;
4288
4289 }
4290 return 0;
4291 }
4292
4293 template <>
4294 uint32_t Writer<arm>::addObjectRelocs(ObjectFile::Atom* atom, ObjectFile::Reference* ref)
4295 {
4296 ObjectFile::Atom& target = ref->getTarget();
4297 bool isExtern = this->makesExternalRelocatableReference(target);
4298 uint32_t symbolIndex = 0;
4299 if ( isExtern )
4300 symbolIndex = this->symbolIndex(target);
4301 uint32_t sectionNum = target.getSection()->getIndex();
4302 uint32_t address = atom->getSectionOffset()+ref->getFixUpOffset();
4303 macho_relocation_info<P> reloc1;
4304 macho_relocation_info<P> reloc2;
4305 macho_scattered_relocation_info<P>* sreloc1 = (macho_scattered_relocation_info<P>*)&reloc1;
4306 macho_scattered_relocation_info<P>* sreloc2 = (macho_scattered_relocation_info<P>*)&reloc2;
4307 arm::ReferenceKinds kind = (arm::ReferenceKinds)ref->getKind();
4308
4309 if ( !isExtern && (sectionNum == 0) && (target.getDefinitionKind() != ObjectFile::Atom::kAbsoluteSymbol) )
4310 warning("section index == 0 for %s (kind=%d, scope=%d, inclusion=%d) in %s",
4311 target.getDisplayName(), target.getDefinitionKind(), target.getScope(), target.getSymbolTableInclusion(), target.getFile()->getPath());
4312
4313
4314 switch ( kind ) {
4315 case arm::kNoFixUp:
4316 case arm::kFollowOn:
4317 case arm::kGroupSubordinate:
4318 return 0;
4319
4320 case arm::kPointer:
4321 case arm::kReadOnlyPointer:
4322 case arm::kPointerWeakImport:
4323 if ( !isExtern && (ref->getTargetOffset() != 0) ) {
4324 // use scattered reloc is target offset is non-zero
4325 sreloc1->set_r_scattered(true);
4326 sreloc1->set_r_pcrel(false);
4327 sreloc1->set_r_length(2);
4328 sreloc1->set_r_type(ARM_RELOC_VANILLA);
4329 sreloc1->set_r_address(address);
4330 sreloc1->set_r_value(target.getAddress());
4331 }
4332 else {
4333 reloc1.set_r_address(address);
4334 reloc1.set_r_symbolnum(isExtern ? symbolIndex : sectionNum);
4335 reloc1.set_r_pcrel(false);
4336 reloc1.set_r_length(2);
4337 reloc1.set_r_extern(isExtern);
4338 reloc1.set_r_type(ARM_RELOC_VANILLA);
4339 }
4340 fSectionRelocs.push_back(reloc1);
4341 return 1;
4342
4343 case arm::kPointerDiff:
4344 {
4345 sreloc1->set_r_scattered(true);
4346 sreloc1->set_r_pcrel(false);
4347 sreloc1->set_r_length(2);
4348 if ( ref->getTarget().getScope() == ObjectFile::Atom::scopeTranslationUnit )
4349 sreloc1->set_r_type(ARM_RELOC_LOCAL_SECTDIFF);
4350 else
4351 sreloc1->set_r_type(ARM_RELOC_SECTDIFF);
4352 sreloc1->set_r_address(address);
4353 sreloc1->set_r_value(target.getAddress());
4354 sreloc2->set_r_scattered(true);
4355 sreloc2->set_r_pcrel(false);
4356 sreloc2->set_r_length(2);
4357 sreloc2->set_r_type(ARM_RELOC_PAIR);
4358 sreloc2->set_r_address(0);
4359 if ( &ref->getFromTarget() == atom )
4360 sreloc2->set_r_value(ref->getFromTarget().getAddress()+ref->getFromTargetOffset());
4361 else
4362 sreloc2->set_r_value(ref->getFromTarget().getAddress());
4363 fSectionRelocs.push_back(reloc2);
4364 fSectionRelocs.push_back(reloc1);
4365 return 2;
4366 }
4367
4368 case arm::kBranch24WeakImport:
4369 case arm::kBranch24:
4370 case arm::kDtraceProbeSite:
4371 case arm::kDtraceIsEnabledSite:
4372 if ( !isExtern && (ref->getTargetOffset() != 0) ) {
4373 // use scattered reloc is target offset is non-zero
4374 sreloc1->set_r_scattered(true);
4375 sreloc1->set_r_pcrel(true);
4376 sreloc1->set_r_length(2);
4377 sreloc1->set_r_type(ARM_RELOC_BR24);
4378 sreloc1->set_r_address(address);
4379 sreloc1->set_r_value(target.getAddress());
4380 }
4381 else {
4382 reloc1.set_r_address(address);
4383 reloc1.set_r_symbolnum(isExtern ? symbolIndex : sectionNum);
4384 reloc1.set_r_pcrel(true);
4385 reloc1.set_r_length(2);
4386 reloc1.set_r_extern(isExtern);
4387 reloc1.set_r_type(ARM_RELOC_BR24);
4388 }
4389 fSectionRelocs.push_back(reloc1);
4390 return 1;
4391
4392 case arm::kThumbBranch22WeakImport:
4393 case arm::kThumbBranch22:
4394 if ( !isExtern && (ref->getTargetOffset() != 0) ) {
4395 // use scattered reloc if target offset is non-zero
4396 sreloc1->set_r_scattered(true);
4397 sreloc1->set_r_pcrel(true);
4398 sreloc1->set_r_length(2);
4399 sreloc1->set_r_type(ARM_THUMB_RELOC_BR22);
4400 sreloc1->set_r_address(address);
4401 sreloc1->set_r_value(target.getAddress());
4402 }
4403 else {
4404 reloc1.set_r_address(address);
4405 reloc1.set_r_symbolnum(isExtern ? symbolIndex : sectionNum);
4406 reloc1.set_r_pcrel(true);
4407 reloc1.set_r_length(2);
4408 reloc1.set_r_extern(isExtern);
4409 reloc1.set_r_type(ARM_THUMB_RELOC_BR22);
4410 }
4411 fSectionRelocs.push_back(reloc1);
4412 return 1;
4413
4414 case arm::kPointerDiff12:
4415 throw "internal error. no reloc for 12-bit pointer diffs";
4416
4417 case arm::kDtraceTypeReference:
4418 case arm::kDtraceProbe:
4419 // generates no relocs
4420 return 0;
4421
4422 }
4423 return 0;
4424 }
4425
4426 template <> uint64_t Writer<ppc>::maxAddress() { return 0xFFFFFFFFULL; }
4427 template <> uint64_t Writer<ppc64>::maxAddress() { return 0xFFFFFFFFFFFFFFFFULL; }
4428 template <> uint64_t Writer<x86>::maxAddress() { return 0xFFFFFFFFULL; }
4429 template <> uint64_t Writer<x86_64>::maxAddress() { return 0xFFFFFFFFFFFFFFFFULL; }
4430 template <> uint64_t Writer<arm>::maxAddress() { return 0xFFFFFFFFULL; }
4431
4432 template <>
4433 uint8_t Writer<ppc>::getRelocPointerSize()
4434 {
4435 return 2;
4436 }
4437
4438 template <>
4439 uint8_t Writer<ppc64>::getRelocPointerSize()
4440 {
4441 return 3;
4442 }
4443
4444 template <>
4445 uint32_t Writer<ppc>::addObjectRelocs(ObjectFile::Atom* atom, ObjectFile::Reference* ref)
4446 {
4447 return addObjectRelocs_powerpc(atom, ref);
4448 }
4449
4450 template <>
4451 uint32_t Writer<ppc64>::addObjectRelocs(ObjectFile::Atom* atom, ObjectFile::Reference* ref)
4452 {
4453 return addObjectRelocs_powerpc(atom, ref);
4454 }
4455
4456 //
4457 // addObjectRelocs<ppc> and addObjectRelocs<ppc64> are almost exactly the same, so
4458 // they use a common addObjectRelocs_powerpc() method.
4459 //
4460 template <typename A>
4461 uint32_t Writer<A>::addObjectRelocs_powerpc(ObjectFile::Atom* atom, ObjectFile::Reference* ref)
4462 {
4463 ObjectFile::Atom& target = ref->getTarget();
4464 bool isExtern = this->makesExternalRelocatableReference(target);
4465 uint32_t symbolIndex = 0;
4466 if ( isExtern )
4467 symbolIndex = this->symbolIndex(target);
4468 uint32_t sectionNum = target.getSection()->getIndex();
4469 uint32_t address = atom->getSectionOffset()+ref->getFixUpOffset();
4470 macho_relocation_info<P> reloc1;
4471 macho_relocation_info<P> reloc2;
4472 macho_scattered_relocation_info<P>* sreloc1 = (macho_scattered_relocation_info<P>*)&reloc1;
4473 macho_scattered_relocation_info<P>* sreloc2 = (macho_scattered_relocation_info<P>*)&reloc2;
4474 typename A::ReferenceKinds kind = (typename A::ReferenceKinds)ref->getKind();
4475
4476 switch ( kind ) {
4477 case A::kNoFixUp:
4478 case A::kFollowOn:
4479 case A::kGroupSubordinate:
4480 return 0;
4481
4482 case A::kPointer:
4483 case A::kPointerWeakImport:
4484 if ( !isExtern && (ref->getTargetOffset() >= target.getSize()) ) {
4485 // use scattered reloc is target offset is outside target
4486 sreloc1->set_r_scattered(true);
4487 sreloc1->set_r_pcrel(false);
4488 sreloc1->set_r_length(getRelocPointerSize());
4489 sreloc1->set_r_type(GENERIC_RELOC_VANILLA);
4490 sreloc1->set_r_address(address);
4491 sreloc1->set_r_value(target.getAddress());
4492 }
4493 else {
4494 reloc1.set_r_address(address);
4495 if ( isExtern )
4496 reloc1.set_r_symbolnum(symbolIndex);
4497 else
4498 reloc1.set_r_symbolnum(sectionNum);
4499 reloc1.set_r_pcrel(false);
4500 reloc1.set_r_length(getRelocPointerSize());
4501 reloc1.set_r_extern(isExtern);
4502 reloc1.set_r_type(GENERIC_RELOC_VANILLA);
4503 }
4504 fSectionRelocs.push_back(reloc1);
4505 return 1;
4506
4507 case A::kPointerDiff16:
4508 case A::kPointerDiff32:
4509 case A::kPointerDiff64:
4510 {
4511 sreloc1->set_r_scattered(true);
4512 sreloc1->set_r_pcrel(false);
4513 sreloc1->set_r_length( (kind == A::kPointerDiff32) ? 2 : ((kind == A::kPointerDiff64) ? 3 : 1));
4514 if ( ref->getTarget().getScope() == ObjectFile::Atom::scopeTranslationUnit )
4515 sreloc1->set_r_type(PPC_RELOC_LOCAL_SECTDIFF);
4516 else
4517 sreloc1->set_r_type(PPC_RELOC_SECTDIFF);
4518 sreloc1->set_r_address(address);
4519 sreloc1->set_r_value(target.getAddress());
4520 sreloc2->set_r_scattered(true);
4521 sreloc2->set_r_pcrel(false);
4522 sreloc2->set_r_length(sreloc1->r_length());
4523 sreloc2->set_r_type(PPC_RELOC_PAIR);
4524 sreloc2->set_r_address(0);
4525 sreloc2->set_r_value(ref->getFromTarget().getAddress()+ref->getFromTargetOffset());
4526 fSectionRelocs.push_back(reloc2);
4527 fSectionRelocs.push_back(reloc1);
4528 return 2;
4529 }
4530
4531 case A::kBranch24WeakImport:
4532 case A::kBranch24:
4533 case A::kDtraceProbeSite:
4534 case A::kDtraceIsEnabledSite:
4535 if ( (ref->getTargetOffset() == 0) || isExtern ) {
4536 reloc1.set_r_address(address);
4537 if ( isExtern )
4538 reloc1.set_r_symbolnum(symbolIndex);
4539 else
4540 reloc1.set_r_symbolnum(sectionNum);
4541 reloc1.set_r_pcrel(true);
4542 reloc1.set_r_length(2);
4543 reloc1.set_r_type(PPC_RELOC_BR24);
4544 reloc1.set_r_extern(isExtern);
4545 }
4546 else {
4547 sreloc1->set_r_scattered(true);
4548 sreloc1->set_r_pcrel(true);
4549 sreloc1->set_r_length(2);
4550 sreloc1->set_r_type(PPC_RELOC_BR24);
4551 sreloc1->set_r_address(address);
4552 sreloc1->set_r_value(target.getAddress());
4553 }
4554 fSectionRelocs.push_back(reloc1);
4555 return 1;
4556
4557 case A::kBranch14:
4558 if ( (ref->getTargetOffset() == 0) || isExtern ) {
4559 reloc1.set_r_address(address);
4560 if ( isExtern )
4561 reloc1.set_r_symbolnum(symbolIndex);
4562 else
4563 reloc1.set_r_symbolnum(sectionNum);
4564 reloc1.set_r_pcrel(true);
4565 reloc1.set_r_length(2);
4566 reloc1.set_r_type(PPC_RELOC_BR14);
4567 reloc1.set_r_extern(isExtern);
4568 }
4569 else {
4570 sreloc1->set_r_scattered(true);
4571 sreloc1->set_r_pcrel(true);
4572 sreloc1->set_r_length(2);
4573 sreloc1->set_r_type(PPC_RELOC_BR14);
4574 sreloc1->set_r_address(address);
4575 sreloc1->set_r_value(target.getAddress());
4576 }
4577 fSectionRelocs.push_back(reloc1);
4578 return 1;
4579
4580 case A::kPICBaseLow16:
4581 case A::kPICBaseLow14:
4582 {
4583 pint_t fromAddr = atom->getAddress() + ref->getFromTargetOffset();
4584 pint_t toAddr = target.getAddress() + ref->getTargetOffset();
4585 sreloc1->set_r_scattered(true);
4586 sreloc1->set_r_pcrel(false);
4587 sreloc1->set_r_length(2);
4588 sreloc1->set_r_type(kind == A::kPICBaseLow16 ? PPC_RELOC_LO16_SECTDIFF : PPC_RELOC_LO14_SECTDIFF);
4589 sreloc1->set_r_address(address);
4590 sreloc1->set_r_value(target.getAddress());
4591 sreloc2->set_r_scattered(true);
4592 sreloc2->set_r_pcrel(false);
4593 sreloc2->set_r_length(2);
4594 sreloc2->set_r_type(PPC_RELOC_PAIR);
4595 sreloc2->set_r_address(((toAddr-fromAddr) >> 16) & 0xFFFF);
4596 sreloc2->set_r_value(fromAddr);
4597 fSectionRelocs.push_back(reloc2);
4598 fSectionRelocs.push_back(reloc1);
4599 return 2;
4600 }
4601
4602 case A::kPICBaseHigh16:
4603 {
4604 pint_t fromAddr = atom->getAddress() + ref->getFromTargetOffset();
4605 pint_t toAddr = target.getAddress() + ref->getTargetOffset();
4606 sreloc1->set_r_scattered(true);
4607 sreloc1->set_r_pcrel(false);
4608 sreloc1->set_r_length(2);
4609 sreloc1->set_r_type(PPC_RELOC_HA16_SECTDIFF);
4610 sreloc1->set_r_address(address);
4611 sreloc1->set_r_value(target.getAddress());
4612 sreloc2->set_r_scattered(true);
4613 sreloc2->set_r_pcrel(false);
4614 sreloc2->set_r_length(2);
4615 sreloc2->set_r_type(PPC_RELOC_PAIR);
4616 sreloc2->set_r_address((toAddr-fromAddr) & 0xFFFF);
4617 sreloc2->set_r_value(fromAddr);
4618 fSectionRelocs.push_back(reloc2);
4619 fSectionRelocs.push_back(reloc1);
4620 return 2;
4621 }
4622
4623 case A::kAbsLow14:
4624 case A::kAbsLow16:
4625 {
4626 pint_t toAddr = target.getAddress() + ref->getTargetOffset();
4627 if ( (ref->getTargetOffset() == 0) || isExtern ) {
4628 reloc1.set_r_address(address);
4629 if ( isExtern )
4630 reloc1.set_r_symbolnum(symbolIndex);
4631 else
4632 reloc1.set_r_symbolnum(sectionNum);
4633 reloc1.set_r_pcrel(false);
4634 reloc1.set_r_length(2);
4635 reloc1.set_r_extern(isExtern);
4636 reloc1.set_r_type(kind==A::kAbsLow16 ? PPC_RELOC_LO16 : PPC_RELOC_LO14);
4637 }
4638 else {
4639 sreloc1->set_r_scattered(true);
4640 sreloc1->set_r_pcrel(false);
4641 sreloc1->set_r_length(2);
4642 sreloc1->set_r_type(kind==A::kAbsLow16 ? PPC_RELOC_LO16 : PPC_RELOC_LO14);
4643 sreloc1->set_r_address(address);
4644 sreloc1->set_r_value(target.getAddress());
4645 }
4646 if ( isExtern )
4647 reloc2.set_r_address(ref->getTargetOffset() >> 16);
4648 else
4649 reloc2.set_r_address(toAddr >> 16);
4650 reloc2.set_r_symbolnum(0);
4651 reloc2.set_r_pcrel(false);
4652 reloc2.set_r_length(2);
4653 reloc2.set_r_extern(false);
4654 reloc2.set_r_type(PPC_RELOC_PAIR);
4655 fSectionRelocs.push_back(reloc2);
4656 fSectionRelocs.push_back(reloc1);
4657 return 2;
4658 }
4659
4660 case A::kAbsHigh16:
4661 {
4662 pint_t toAddr = target.getAddress() + ref->getTargetOffset();
4663 if ( (ref->getTargetOffset() == 0) || isExtern ) {
4664 reloc1.set_r_address(address);
4665 if ( isExtern )
4666 reloc1.set_r_symbolnum(symbolIndex);
4667 else
4668 reloc1.set_r_symbolnum(sectionNum);
4669 reloc1.set_r_pcrel(false);
4670 reloc1.set_r_length(2);
4671 reloc1.set_r_extern(isExtern);
4672 reloc1.set_r_type(PPC_RELOC_HI16);
4673 }
4674 else {
4675 sreloc1->set_r_scattered(true);
4676 sreloc1->set_r_pcrel(false);
4677 sreloc1->set_r_length(2);
4678 sreloc1->set_r_type(PPC_RELOC_HI16);
4679 sreloc1->set_r_address(address);
4680 sreloc1->set_r_value(target.getAddress());
4681 }
4682 if ( isExtern )
4683 reloc2.set_r_address(ref->getTargetOffset() & 0xFFFF);
4684 else
4685 reloc2.set_r_address(toAddr & 0xFFFF);
4686 reloc2.set_r_symbolnum(0);
4687 reloc2.set_r_pcrel(false);
4688 reloc2.set_r_length(2);
4689 reloc2.set_r_extern(false);
4690 reloc2.set_r_type(PPC_RELOC_PAIR);
4691 fSectionRelocs.push_back(reloc2);
4692 fSectionRelocs.push_back(reloc1);
4693 return 2;
4694 }
4695
4696 case A::kAbsHigh16AddLow:
4697 {
4698 pint_t toAddr = target.getAddress() + ref->getTargetOffset();
4699 uint32_t overflow = 0;
4700 if ( (toAddr & 0x00008000) != 0 )
4701 overflow = 0x10000;
4702 if ( (ref->getTargetOffset() == 0) || isExtern ) {
4703 reloc1.set_r_address(address);
4704 if ( isExtern )
4705 reloc1.set_r_symbolnum(symbolIndex);
4706 else
4707 reloc1.set_r_symbolnum(sectionNum);
4708 reloc1.set_r_pcrel(false);
4709 reloc1.set_r_length(2);
4710 reloc1.set_r_extern(isExtern);
4711 reloc1.set_r_type(PPC_RELOC_HA16);
4712 }
4713 else {
4714 sreloc1->set_r_scattered(true);
4715 sreloc1->set_r_pcrel(false);
4716 sreloc1->set_r_length(2);
4717 sreloc1->set_r_type(PPC_RELOC_HA16);
4718 sreloc1->set_r_address(address);
4719 sreloc1->set_r_value(target.getAddress());
4720 }
4721 if ( isExtern )
4722 reloc2.set_r_address(ref->getTargetOffset() & 0xFFFF);
4723 else
4724 reloc2.set_r_address(toAddr & 0xFFFF);
4725 reloc2.set_r_symbolnum(0);
4726 reloc2.set_r_pcrel(false);
4727 reloc2.set_r_length(2);
4728 reloc2.set_r_extern(false);
4729 reloc2.set_r_type(PPC_RELOC_PAIR);
4730 fSectionRelocs.push_back(reloc2);
4731 fSectionRelocs.push_back(reloc1);
4732 return 2;
4733 }
4734
4735 case A::kDtraceTypeReference:
4736 case A::kDtraceProbe:
4737 // generates no relocs
4738 return 0;
4739 }
4740 return 0;
4741 }
4742
4743
4744
4745 //
4746 // There are cases when an entry in the indirect symbol table is the magic value
4747 // INDIRECT_SYMBOL_LOCAL instead of being a symbol index. When that happens
4748 // the content of the corresponding part of the __nl_symbol_pointer section
4749 // must also change.
4750 //
4751 template <typename A>
4752 bool Writer<A>::indirectSymbolInRelocatableIsLocal(const ObjectFile::Reference* ref) const
4753 {
4754 // cannot use INDIRECT_SYMBOL_LOCAL to tentative definitions in object files
4755 // because tentative defs don't have addresses
4756 if ( ref->getTarget().getDefinitionKind() == ObjectFile::Atom::kTentativeDefinition )
4757 return false;
4758
4759 // must use INDIRECT_SYMBOL_LOCAL if there is an addend
4760 if ( ref->getTargetOffset() != 0 )
4761 return true;
4762
4763 // don't use INDIRECT_SYMBOL_LOCAL for external symbols
4764 return ! this->shouldExport(ref->getTarget());
4765 }
4766
4767
4768 template <typename A>
4769 void Writer<A>::buildObjectFileFixups()
4770 {
4771 uint32_t relocIndex = 0;
4772 std::vector<SegmentInfo*>& segmentInfos = fSegmentInfos;
4773 const int segCount = segmentInfos.size();
4774 for(int i=0; i < segCount; ++i) {
4775 SegmentInfo* curSegment = segmentInfos[i];
4776 std::vector<SectionInfo*>& sectionInfos = curSegment->fSections;
4777 const int sectionCount = sectionInfos.size();
4778 for(int j=0; j < sectionCount; ++j) {
4779 SectionInfo* curSection = sectionInfos[j];
4780 //fprintf(stderr, "buildObjectFileFixups(): starting section %s\n", curSection->fSectionName);
4781 std::vector<ObjectFile::Atom*>& sectionAtoms = curSection->fAtoms;
4782 if ( ! curSection->fAllZeroFill ) {
4783 if ( curSection->fAllNonLazyPointers || curSection->fAllLazyPointers
4784 || curSection->fAllLazyDylibPointers || curSection->fAllStubs )
4785 curSection->fIndirectSymbolOffset = fIndirectTableAtom->fTable.size();
4786 curSection->fRelocOffset = relocIndex;
4787 const int atomCount = sectionAtoms.size();
4788 for (int k=0; k < atomCount; ++k) {
4789 ObjectFile::Atom* atom = sectionAtoms[k];
4790 //fprintf(stderr, "buildObjectFileFixups(): atom %s has %lu references\n", atom->getDisplayName(), atom->getReferences().size());
4791 std::vector<ObjectFile::Reference*>& refs = atom->getReferences();
4792 const int refCount = refs.size();
4793 for (int l=0; l < refCount; ++l) {
4794 ObjectFile::Reference* ref = refs[l];
4795 if ( curSection->fAllNonLazyPointers || curSection->fAllLazyPointers
4796 || curSection->fAllLazyDylibPointers || curSection->fAllStubs ) {
4797 uint32_t offsetInSection = atom->getSectionOffset();
4798 uint32_t indexInSection = offsetInSection / atom->getSize();
4799 uint32_t undefinedSymbolIndex;
4800 if ( curSection->fAllStubs ) {
4801 ObjectFile::Atom& stubTarget =ref->getTarget();
4802 ObjectFile::Atom& stubTargetTarget = stubTarget.getReferences()[0]->getTarget();
4803 undefinedSymbolIndex = this->symbolIndex(stubTargetTarget);
4804 //fprintf(stderr, "stub %s ==> %s ==> %s ==> index:%u\n", atom->getDisplayName(), stubTarget.getDisplayName(), stubTargetTarget.getDisplayName(), undefinedSymbolIndex);
4805 }
4806 else if ( curSection->fAllNonLazyPointers) {
4807 // only use INDIRECT_SYMBOL_LOCAL in non-lazy-pointers for atoms that won't be in symbol table or have an addend
4808 if ( this->indirectSymbolInRelocatableIsLocal(ref) )
4809 undefinedSymbolIndex = INDIRECT_SYMBOL_LOCAL;
4810 else
4811 undefinedSymbolIndex = this->symbolIndex(ref->getTarget());
4812 }
4813 else {
4814 // should never get here, fAllLazyPointers not used in generated .o files
4815 undefinedSymbolIndex = INDIRECT_SYMBOL_LOCAL;
4816 }
4817 uint32_t indirectTableIndex = indexInSection + curSection->fIndirectSymbolOffset;
4818 IndirectEntry entry = { indirectTableIndex, undefinedSymbolIndex };
4819 //printf("fIndirectTableAtom->fTable.add(sectionIndex=%u, indirectTableIndex=%u => %u), size=%lld\n", indexInSection, indirectTableIndex, undefinedSymbolIndex, atom->getSize());
4820 fIndirectTableAtom->fTable.push_back(entry);
4821 if ( curSection->fAllLazyPointers ) {
4822 ObjectFile::Atom& target = ref->getTarget();
4823 ObjectFile::Atom& fromTarget = ref->getFromTarget();
4824 if ( &fromTarget == NULL ) {
4825 warning("lazy pointer %s missing initial binding", atom->getDisplayName());
4826 }
4827 else {
4828 bool isExtern = ( ((target.getDefinitionKind() == ObjectFile::Atom::kExternalDefinition)
4829 || (target.getDefinitionKind() == ObjectFile::Atom::kExternalWeakDefinition))
4830 && (target.getSymbolTableInclusion() != ObjectFile::Atom::kSymbolTableNotIn) );
4831 macho_relocation_info<P> reloc1;
4832 reloc1.set_r_address(atom->getSectionOffset());
4833 reloc1.set_r_symbolnum(isExtern ? this->symbolIndex(target) : target.getSection()->getIndex());
4834 reloc1.set_r_pcrel(false);
4835 reloc1.set_r_length();
4836 reloc1.set_r_extern(isExtern);
4837 reloc1.set_r_type(GENERIC_RELOC_VANILLA);
4838 fSectionRelocs.push_back(reloc1);
4839 ++relocIndex;
4840 }
4841 }
4842 else if ( curSection->fAllStubs ) {
4843 relocIndex += this->addObjectRelocs(atom, ref);
4844 }
4845 }
4846 else if ( (ref->getKind() != A::kNoFixUp) && (ref->getTargetBinding() != ObjectFile::Reference::kDontBind) ) {
4847 relocIndex += this->addObjectRelocs(atom, ref);
4848 }
4849 }
4850 }
4851 curSection->fRelocCount = relocIndex - curSection->fRelocOffset;
4852 }
4853 }
4854 }
4855
4856 // reverse the relocs
4857 std::reverse(fSectionRelocs.begin(), fSectionRelocs.end());
4858
4859 // now reverse section reloc offsets
4860 for(int i=0; i < segCount; ++i) {
4861 SegmentInfo* curSegment = segmentInfos[i];
4862 std::vector<SectionInfo*>& sectionInfos = curSegment->fSections;
4863 const int sectionCount = sectionInfos.size();
4864 for(int j=0; j < sectionCount; ++j) {
4865 SectionInfo* curSection = sectionInfos[j];
4866 curSection->fRelocOffset = relocIndex - curSection->fRelocOffset - curSection->fRelocCount;
4867 }
4868 }
4869
4870 }
4871
4872
4873 template <>
4874 uint64_t Writer<x86_64>::relocAddressInFinalLinkedImage(uint64_t address, const ObjectFile::Atom* atom) const
4875 {
4876 uint64_t result;
4877 if ( fOptions.outputKind() == Options::kKextBundle ) {
4878 // for x86_64 kext bundles, the r_address field in relocs
4879 // is the offset from the start address of the first segment
4880 result = address - fSegmentInfos[0]->fBaseAddress;
4881 if ( result > 0xFFFFFFFF ) {
4882 throwf("kext bundle too large: address can't fit in 31-bit r_address field in %s from %s",
4883 atom->getDisplayName(), atom->getFile()->getPath());
4884 }
4885 }
4886 else {
4887 // for x86_64, the r_address field in relocs for final linked images
4888 // is the offset from the start address of the first writable segment
4889 result = address - fFirstWritableSegment->fBaseAddress;
4890 if ( result > 0xFFFFFFFF ) {
4891 if ( strcmp(atom->getSegment().getName(), "__TEXT") == 0 )
4892 throwf("text relocs not supported for x86_64 in %s from %s",
4893 atom->getDisplayName(), atom->getFile()->getPath());
4894 else
4895 throwf("image too large: address can't fit in 32-bit r_address field in %s from %s",
4896 atom->getDisplayName(), atom->getFile()->getPath());
4897 }
4898 }
4899 return result;
4900 }
4901
4902
4903 template <>
4904 bool Writer<ppc>::illegalRelocInFinalLinkedImage(const ObjectFile::Reference& ref)
4905 {
4906 switch ( ref.getKind() ) {
4907 case ppc::kAbsLow16:
4908 case ppc::kAbsLow14:
4909 case ppc::kAbsHigh16:
4910 case ppc::kAbsHigh16AddLow:
4911 if ( fSlideable )
4912 return true;
4913 }
4914 return false;
4915 }
4916
4917
4918 template <>
4919 bool Writer<ppc64>::illegalRelocInFinalLinkedImage(const ObjectFile::Reference& ref)
4920 {
4921 switch ( ref.getKind() ) {
4922 case ppc::kAbsLow16:
4923 case ppc::kAbsLow14:
4924 case ppc::kAbsHigh16:
4925 case ppc::kAbsHigh16AddLow:
4926 if ( fSlideable )
4927 return true;
4928 }
4929 return false;
4930 }
4931
4932 template <>
4933 bool Writer<x86>::illegalRelocInFinalLinkedImage(const ObjectFile::Reference& ref)
4934 {
4935 if ( ref.getKind() == x86::kAbsolute32 ) {
4936 switch ( ref.getTarget().getDefinitionKind() ) {
4937 case ObjectFile::Atom::kTentativeDefinition:
4938 case ObjectFile::Atom::kRegularDefinition:
4939 case ObjectFile::Atom::kWeakDefinition:
4940 // illegal in dylibs/bundles, until we support TEXT relocs
4941 return fSlideable;
4942 case ObjectFile::Atom::kExternalDefinition:
4943 case ObjectFile::Atom::kExternalWeakDefinition:
4944 // illegal until we support TEXT relocs
4945 return true;
4946 case ObjectFile::Atom::kAbsoluteSymbol:
4947 // absolute symbbols only allowed in static executables
4948 return ( fOptions.outputKind() != Options::kStaticExecutable);
4949 }
4950 }
4951 return false;
4952 }
4953
4954 template <>
4955 bool Writer<x86_64>::illegalRelocInFinalLinkedImage(const ObjectFile::Reference& ref)
4956 {
4957 if ( fOptions.outputKind() == Options::kKextBundle ) {
4958 switch ( ref.getTarget().getDefinitionKind() ) {
4959 case ObjectFile::Atom::kTentativeDefinition:
4960 case ObjectFile::Atom::kRegularDefinition:
4961 case ObjectFile::Atom::kWeakDefinition:
4962 case ObjectFile::Atom::kAbsoluteSymbol:
4963 return false;
4964 case ObjectFile::Atom::kExternalDefinition:
4965 case ObjectFile::Atom::kExternalWeakDefinition:
4966 // true means we need a TEXT relocs
4967 switch ( ref.getKind() ) {
4968 case x86_64::kBranchPCRel32:
4969 case x86_64::kBranchPCRel32WeakImport:
4970 case x86_64::kPCRel32GOTLoad:
4971 case x86_64::kPCRel32GOTLoadWeakImport:
4972 case x86_64::kPCRel32GOT:
4973 case x86_64::kPCRel32GOTWeakImport:
4974 return true;
4975 }
4976 break;
4977 }
4978 }
4979 return false;
4980 }
4981
4982 template <>
4983 bool Writer<arm>::illegalRelocInFinalLinkedImage(const ObjectFile::Reference& ref)
4984 {
4985 switch ( fOptions.outputKind()) {
4986 case Options::kStaticExecutable:
4987 case Options::kPreload:
4988 // all relocations allowed in static executables
4989 return false;
4990 default:
4991 break;
4992 }
4993 if ( ref.getKind() == arm::kReadOnlyPointer ) {
4994 switch ( ref.getTarget().getDefinitionKind() ) {
4995 case ObjectFile::Atom::kTentativeDefinition:
4996 case ObjectFile::Atom::kRegularDefinition:
4997 case ObjectFile::Atom::kWeakDefinition:
4998 // illegal in dylibs/bundles, until we support TEXT relocs
4999 return fSlideable;
5000 case ObjectFile::Atom::kExternalDefinition:
5001 case ObjectFile::Atom::kExternalWeakDefinition:
5002 // illegal until we support TEXT relocs
5003 return true;
5004 case ObjectFile::Atom::kAbsoluteSymbol:
5005 // absolute symbbols only allowed in static executables
5006 return true;
5007 }
5008 }
5009 return false;
5010 }
5011
5012 template <>
5013 bool Writer<x86>::generatesLocalTextReloc(const ObjectFile::Reference& ref, const ObjectFile::Atom& atom, SectionInfo* atomSection)
5014 {
5015 if ( ref.getKind() == x86::kAbsolute32 ) {
5016 switch ( ref.getTarget().getDefinitionKind() ) {
5017 case ObjectFile::Atom::kTentativeDefinition:
5018 case ObjectFile::Atom::kRegularDefinition:
5019 case ObjectFile::Atom::kWeakDefinition:
5020 // a reference to the absolute address of something in this same linkage unit can be
5021 // encoded as a local text reloc in a dylib or bundle
5022 if ( fSlideable ) {
5023 macho_relocation_info<P> reloc;
5024 SectionInfo* sectInfo = (SectionInfo*)(ref.getTarget().getSection());
5025 reloc.set_r_address(this->relocAddressInFinalLinkedImage(atom.getAddress() + ref.getFixUpOffset(), &atom));
5026 reloc.set_r_symbolnum(sectInfo->getIndex());
5027 reloc.set_r_pcrel(false);
5028 reloc.set_r_length();
5029 reloc.set_r_extern(false);
5030 reloc.set_r_type(GENERIC_RELOC_VANILLA);
5031 fInternalRelocs.push_back(reloc);
5032 atomSection->fHasTextLocalRelocs = true;
5033 if ( fOptions.makeCompressedDyldInfo() ) {
5034 fRebaseInfo.push_back(RebaseInfo(REBASE_TYPE_TEXT_ABSOLUTE32, atom.getAddress() + ref.getFixUpOffset()));
5035 }
5036 return true;
5037 }
5038 return false;
5039 case ObjectFile::Atom::kExternalDefinition:
5040 case ObjectFile::Atom::kExternalWeakDefinition:
5041 case ObjectFile::Atom::kAbsoluteSymbol:
5042 return false;
5043 }
5044 }
5045 return false;
5046 }
5047
5048 template <>
5049 bool Writer<ppc>::generatesLocalTextReloc(const ObjectFile::Reference& ref, const ObjectFile::Atom& atom, SectionInfo* atomSection)
5050 {
5051 macho_relocation_info<P> reloc1;
5052 macho_relocation_info<P> reloc2;
5053 switch ( ref.getTarget().getDefinitionKind() ) {
5054 case ObjectFile::Atom::kTentativeDefinition:
5055 case ObjectFile::Atom::kRegularDefinition:
5056 case ObjectFile::Atom::kWeakDefinition:
5057 switch ( ref.getKind() ) {
5058 case ppc::kAbsLow16:
5059 case ppc::kAbsLow14:
5060 // a reference to the absolute address of something in this same linkage unit can be
5061 // encoded as a local text reloc in a dylib or bundle
5062 if ( fSlideable ) {
5063 SectionInfo* sectInfo = (SectionInfo*)(ref.getTarget().getSection());
5064 uint32_t targetAddr = ref.getTarget().getAddress() + ref.getTargetOffset();
5065 reloc1.set_r_address(this->relocAddressInFinalLinkedImage(atom.getAddress() + ref.getFixUpOffset(), &atom));
5066 reloc1.set_r_symbolnum(sectInfo->getIndex());
5067 reloc1.set_r_pcrel(false);
5068 reloc1.set_r_length(2);
5069 reloc1.set_r_extern(false);
5070 reloc1.set_r_type(ref.getKind()==ppc::kAbsLow16 ? PPC_RELOC_LO16 : PPC_RELOC_LO14);
5071 reloc2.set_r_address(targetAddr >> 16);
5072 reloc2.set_r_symbolnum(0);
5073 reloc2.set_r_pcrel(false);
5074 reloc2.set_r_length(2);
5075 reloc2.set_r_extern(false);
5076 reloc2.set_r_type(PPC_RELOC_PAIR);
5077 fInternalRelocs.push_back(reloc1);
5078 fInternalRelocs.push_back(reloc2);
5079 atomSection->fHasTextLocalRelocs = true;
5080 return true;
5081 }
5082 break;
5083 case ppc::kAbsHigh16:
5084 case ppc::kAbsHigh16AddLow:
5085 if ( fSlideable ) {
5086 SectionInfo* sectInfo = (SectionInfo*)(ref.getTarget().getSection());
5087 uint32_t targetAddr = ref.getTarget().getAddress() + ref.getTargetOffset();
5088 reloc1.set_r_address(this->relocAddressInFinalLinkedImage(atom.getAddress() + ref.getFixUpOffset(), &atom));
5089 reloc1.set_r_symbolnum(sectInfo->getIndex());
5090 reloc1.set_r_pcrel(false);
5091 reloc1.set_r_length(2);
5092 reloc1.set_r_extern(false);
5093 reloc1.set_r_type(ref.getKind()==ppc::kAbsHigh16AddLow ? PPC_RELOC_HA16 : PPC_RELOC_HI16);
5094 reloc2.set_r_address(targetAddr & 0xFFFF);
5095 reloc2.set_r_symbolnum(0);
5096 reloc2.set_r_pcrel(false);
5097 reloc2.set_r_length(2);
5098 reloc2.set_r_extern(false);
5099 reloc2.set_r_type(PPC_RELOC_PAIR);
5100 fInternalRelocs.push_back(reloc1);
5101 fInternalRelocs.push_back(reloc2);
5102 atomSection->fHasTextLocalRelocs = true;
5103 return true;
5104 }
5105 }
5106 break;
5107 case ObjectFile::Atom::kExternalDefinition:
5108 case ObjectFile::Atom::kExternalWeakDefinition:
5109 case ObjectFile::Atom::kAbsoluteSymbol:
5110 return false;
5111 }
5112 return false;
5113 }
5114
5115 template <>
5116 bool Writer<arm>::generatesLocalTextReloc(const ObjectFile::Reference& ref, const ObjectFile::Atom& atom, SectionInfo* atomSection)
5117 {
5118 if ( ref.getKind() == arm::kReadOnlyPointer ) {
5119 switch ( ref.getTarget().getDefinitionKind() ) {
5120 case ObjectFile::Atom::kTentativeDefinition:
5121 case ObjectFile::Atom::kRegularDefinition:
5122 case ObjectFile::Atom::kWeakDefinition:
5123 // a reference to the absolute address of something in this same linkage unit can be
5124 // encoded as a local text reloc in a dylib or bundle
5125 if ( fSlideable ) {
5126 macho_relocation_info<P> reloc;
5127 SectionInfo* sectInfo = (SectionInfo*)(ref.getTarget().getSection());
5128 reloc.set_r_address(this->relocAddressInFinalLinkedImage(atom.getAddress() + ref.getFixUpOffset(), &atom));
5129 reloc.set_r_symbolnum(sectInfo->getIndex());
5130 reloc.set_r_pcrel(false);
5131 reloc.set_r_length();
5132 reloc.set_r_extern(false);
5133 reloc.set_r_type(GENERIC_RELOC_VANILLA);
5134 fInternalRelocs.push_back(reloc);
5135 atomSection->fHasTextLocalRelocs = true;
5136 if ( fOptions.makeCompressedDyldInfo() ) {
5137 fRebaseInfo.push_back(RebaseInfo(REBASE_TYPE_TEXT_ABSOLUTE32, atom.getAddress() + ref.getFixUpOffset()));
5138 }
5139 return true;
5140 }
5141 return false;
5142 case ObjectFile::Atom::kExternalDefinition:
5143 case ObjectFile::Atom::kExternalWeakDefinition:
5144 case ObjectFile::Atom::kAbsoluteSymbol:
5145 return false;
5146 }
5147 }
5148 return false;
5149 }
5150
5151
5152 template <>
5153 bool Writer<x86_64>::generatesLocalTextReloc(const ObjectFile::Reference&, const ObjectFile::Atom& atom, SectionInfo* curSection)
5154 {
5155 // text relocs not supported (usually never needed because of RIP addressing)
5156 return false;
5157 }
5158
5159 template <>
5160 bool Writer<ppc64>::generatesLocalTextReloc(const ObjectFile::Reference&, const ObjectFile::Atom& atom, SectionInfo* curSection)
5161 {
5162 // text relocs not supported
5163 return false;
5164 }
5165
5166 template <>
5167 bool Writer<x86>::generatesExternalTextReloc(const ObjectFile::Reference& ref, const ObjectFile::Atom& atom, SectionInfo* atomSection)
5168 {
5169 if ( ref.getKind() == x86::kAbsolute32 ) {
5170 macho_relocation_info<P> reloc;
5171 switch ( ref.getTarget().getDefinitionKind() ) {
5172 case ObjectFile::Atom::kTentativeDefinition:
5173 case ObjectFile::Atom::kRegularDefinition:
5174 case ObjectFile::Atom::kWeakDefinition:
5175 return false;
5176 case ObjectFile::Atom::kExternalDefinition:
5177 case ObjectFile::Atom::kExternalWeakDefinition:
5178 // a reference to the absolute address of something in another linkage unit can be
5179 // encoded as an external text reloc in a dylib or bundle
5180 reloc.set_r_address(this->relocAddressInFinalLinkedImage(atom.getAddress() + ref.getFixUpOffset(), &atom));
5181 reloc.set_r_symbolnum(this->symbolIndex(ref.getTarget()));
5182 reloc.set_r_pcrel(false);
5183 reloc.set_r_length();
5184 reloc.set_r_extern(true);
5185 reloc.set_r_type(GENERIC_RELOC_VANILLA);
5186 fExternalRelocs.push_back(reloc);
5187 atomSection->fHasTextExternalRelocs = true;
5188 return true;
5189 case ObjectFile::Atom::kAbsoluteSymbol:
5190 return false;
5191 }
5192 }
5193 return false;
5194 }
5195
5196 template <>
5197 bool Writer<x86_64>::generatesExternalTextReloc(const ObjectFile::Reference& ref, const ObjectFile::Atom& atom, SectionInfo* atomSection)
5198 {
5199 if ( fOptions.outputKind() == Options::kKextBundle ) {
5200 macho_relocation_info<P> reloc;
5201 switch ( ref.getTarget().getDefinitionKind() ) {
5202 case ObjectFile::Atom::kTentativeDefinition:
5203 case ObjectFile::Atom::kRegularDefinition:
5204 case ObjectFile::Atom::kWeakDefinition:
5205 case ObjectFile::Atom::kAbsoluteSymbol:
5206 return false;
5207 case ObjectFile::Atom::kExternalDefinition:
5208 case ObjectFile::Atom::kExternalWeakDefinition:
5209 switch ( ref.getKind() ) {
5210 case x86_64::kBranchPCRel32:
5211 case x86_64::kBranchPCRel32WeakImport:
5212 // a branch to something in another linkage unit is
5213 // encoded as an external text reloc in a kext bundle
5214 reloc.set_r_address(this->relocAddressInFinalLinkedImage(atom.getAddress() + ref.getFixUpOffset(), &atom));
5215 reloc.set_r_symbolnum(this->symbolIndex(ref.getTarget()));
5216 reloc.set_r_pcrel(true);
5217 reloc.set_r_length(2);
5218 reloc.set_r_extern(true);
5219 reloc.set_r_type(X86_64_RELOC_BRANCH);
5220 fExternalRelocs.push_back(reloc);
5221 atomSection->fHasTextExternalRelocs = true;
5222 return true;
5223 case x86_64::kPCRel32GOTLoad:
5224 case x86_64::kPCRel32GOTLoadWeakImport:
5225 // a load of the GOT entry for a symbol in another linkage unit is
5226 // encoded as an external text reloc in a kext bundle
5227 reloc.set_r_address(this->relocAddressInFinalLinkedImage(atom.getAddress() + ref.getFixUpOffset(), &atom));
5228 reloc.set_r_symbolnum(this->symbolIndex(ref.getTarget()));
5229 reloc.set_r_pcrel(true);
5230 reloc.set_r_length(2);
5231 reloc.set_r_extern(true);
5232 reloc.set_r_type(X86_64_RELOC_GOT_LOAD);
5233 fExternalRelocs.push_back(reloc);
5234 atomSection->fHasTextExternalRelocs = true;
5235 return true;
5236 case x86_64::kPCRel32GOT:
5237 case x86_64::kPCRel32GOTWeakImport:
5238 // a use of the GOT entry for a symbol in another linkage unit is
5239 // encoded as an external text reloc in a kext bundle
5240 reloc.set_r_address(this->relocAddressInFinalLinkedImage(atom.getAddress() + ref.getFixUpOffset(), &atom));
5241 reloc.set_r_symbolnum(this->symbolIndex(ref.getTarget()));
5242 reloc.set_r_pcrel(true);
5243 reloc.set_r_length(2);
5244 reloc.set_r_extern(true);
5245 reloc.set_r_type(X86_64_RELOC_GOT);
5246 fExternalRelocs.push_back(reloc);
5247 atomSection->fHasTextExternalRelocs = true;
5248 return true;
5249 }
5250 break;
5251 }
5252 }
5253 return false;
5254 }
5255
5256
5257 template <typename A>
5258 bool Writer<A>::generatesExternalTextReloc(const ObjectFile::Reference&, const ObjectFile::Atom& atom, SectionInfo* curSection)
5259 {
5260 return false;
5261 }
5262
5263
5264
5265
5266 template <typename A>
5267 typename Writer<A>::RelocKind Writer<A>::relocationNeededInFinalLinkedImage(const ObjectFile::Atom& target) const
5268 {
5269 switch ( target.getDefinitionKind() ) {
5270 case ObjectFile::Atom::kTentativeDefinition:
5271 case ObjectFile::Atom::kRegularDefinition:
5272 // in main executables, the only way regular symbols are indirected is if -interposable is used
5273 if ( fOptions.outputKind() == Options::kDynamicExecutable ) {
5274 if ( this->shouldExport(target) && fOptions.interposable(target.getName()) )
5275 return kRelocExternal;
5276 else if ( fSlideable )
5277 return kRelocInternal;
5278 else
5279 return kRelocNone;
5280 }
5281 // for flat-namespace or interposable two-level-namespace
5282 // all references to exported symbols get indirected
5283 else if ( this->shouldExport(target) &&
5284 ((fOptions.nameSpace() == Options::kFlatNameSpace)
5285 || (fOptions.nameSpace() == Options::kForceFlatNameSpace)
5286 || fOptions.interposable(target.getName()))
5287 && (target.getName() != NULL)
5288 && (strncmp(target.getName(), ".objc_class_", 12) != 0) ) // <rdar://problem/5254468>
5289 return kRelocExternal;
5290 else if ( fSlideable )
5291 return kRelocInternal;
5292 else
5293 return kRelocNone;
5294 case ObjectFile::Atom::kWeakDefinition:
5295 // in static executables, references to weak definitions are not indirected
5296 if ( fOptions.outputKind() == Options::kStaticExecutable)
5297 return kRelocNone;
5298 // in dynamic code, all calls to global weak definitions get indirected
5299 if ( this->shouldExport(target) )
5300 return kRelocExternal;
5301 else if ( fSlideable )
5302 return kRelocInternal;
5303 else
5304 return kRelocNone;
5305 case ObjectFile::Atom::kExternalDefinition:
5306 case ObjectFile::Atom::kExternalWeakDefinition:
5307 return kRelocExternal;
5308 case ObjectFile::Atom::kAbsoluteSymbol:
5309 return kRelocNone;
5310 }
5311 return kRelocNone;
5312 }
5313
5314 template <typename A>
5315 uint64_t Writer<A>::relocAddressInFinalLinkedImage(uint64_t address, const ObjectFile::Atom* atom) const
5316 {
5317 // for 32-bit architectures, the r_address field in relocs
5318 // for final linked images is the offset from the first segment
5319 uint64_t result = address - fSegmentInfos[0]->fBaseAddress;
5320 if ( fOptions.outputKind() == Options::kPreload ) {
5321 // kPreload uses a virtual __HEADER segment to cover the load commands
5322 result = address - fSegmentInfos[1]->fBaseAddress;
5323 }
5324 // or the offset from the first writable segment if built split-seg
5325 if ( fOptions.splitSeg() )
5326 result = address - fFirstWritableSegment->fBaseAddress;
5327 if ( result > 0x7FFFFFFF ) {
5328 throwf("image too large: address can't fit in 31-bit r_address field in %s from %s",
5329 atom->getDisplayName(), atom->getFile()->getPath());
5330 }
5331 return result;
5332 }
5333
5334 template <>
5335 uint64_t Writer<ppc64>::relocAddressInFinalLinkedImage(uint64_t address, const ObjectFile::Atom* atom) const
5336 {
5337 // for ppc64, the Mac OS X 10.4 dyld assumes r_address is always the offset from the base address.
5338 // the 10.5 dyld, iterprets the r_address as:
5339 // 1) an offset from the base address, iff there are no writable segments with a address > 4GB from base address, otherwise
5340 // 2) an offset from the base address of the first writable segment
5341 // For dyld, r_address is always the offset from the base address
5342 uint64_t result;
5343 bool badFor10_4 = false;
5344 if ( fWritableSegmentPastFirst4GB ) {
5345 if ( fOptions.macosxVersionMin() < ObjectFile::ReaderOptions::k10_5 )
5346 badFor10_4 = true;
5347 result = address - fFirstWritableSegment->fBaseAddress;
5348 if ( result > 0xFFFFFFFF ) {
5349 throwf("image too large: address can't fit in 32-bit r_address field in %s from %s",
5350 atom->getDisplayName(), atom->getFile()->getPath());
5351 }
5352 }
5353 else {
5354 result = address - fSegmentInfos[0]->fBaseAddress;
5355 if ( (fOptions.macosxVersionMin() < ObjectFile::ReaderOptions::k10_5) && (result > 0x7FFFFFFF) )
5356 badFor10_4 = true;
5357 }
5358 if ( badFor10_4 ) {
5359 throwf("image or pagezero_size too large for Mac OS X 10.4: address can't fit in 31-bit r_address field for %s from %s",
5360 atom->getDisplayName(), atom->getFile()->getPath());
5361 }
5362 return result;
5363 }
5364
5365
5366 template <> bool Writer<ppc>::preboundLazyPointerType(uint8_t* type) { *type = PPC_RELOC_PB_LA_PTR; return true; }
5367 template <> bool Writer<ppc64>::preboundLazyPointerType(uint8_t* type) { throw "prebinding not supported"; }
5368 template <> bool Writer<x86>::preboundLazyPointerType(uint8_t* type) { *type = GENERIC_RELOC_PB_LA_PTR; return true; }
5369 template <> bool Writer<x86_64>::preboundLazyPointerType(uint8_t* type) { throw "prebinding not supported"; }
5370 template <> bool Writer<arm>::preboundLazyPointerType(uint8_t* type) { *type = ARM_RELOC_PB_LA_PTR; return true; }
5371
5372 template <typename A>
5373 void Writer<A>::buildExecutableFixups()
5374 {
5375 if ( fIndirectTableAtom != NULL )
5376 fIndirectTableAtom->fTable.reserve(50); // minimize reallocations
5377 std::vector<SegmentInfo*>& segmentInfos = fSegmentInfos;
5378 const int segCount = segmentInfos.size();
5379 for(int i=0; i < segCount; ++i) {
5380 SegmentInfo* curSegment = segmentInfos[i];
5381 std::vector<SectionInfo*>& sectionInfos = curSegment->fSections;
5382 const int sectionCount = sectionInfos.size();
5383 for(int j=0; j < sectionCount; ++j) {
5384 SectionInfo* curSection = sectionInfos[j];
5385 //fprintf(stderr, "starting section %s\n", curSection->fSectionName);
5386 std::vector<ObjectFile::Atom*>& sectionAtoms = curSection->fAtoms;
5387 if ( ! curSection->fAllZeroFill ) {
5388 if ( curSection->fAllNonLazyPointers || curSection->fAllLazyPointers || curSection->fAllLazyDylibPointers
5389 || curSection->fAllStubs || curSection->fAllSelfModifyingStubs ) {
5390 if ( fIndirectTableAtom != NULL )
5391 curSection->fIndirectSymbolOffset = fIndirectTableAtom->fTable.size();
5392 }
5393 const int atomCount = sectionAtoms.size();
5394 for (int k=0; k < atomCount; ++k) {
5395 ObjectFile::Atom* atom = sectionAtoms[k];
5396 std::vector<ObjectFile::Reference*>& refs = atom->getReferences();
5397 const int refCount = refs.size();
5398 //fprintf(stderr, "atom %s has %d references in section %s, %p\n", atom->getDisplayName(), refCount, curSection->fSectionName, atom->getSection());
5399 if ( curSection->fAllNonLazyPointers && (refCount == 0) ) {
5400 // handle imageloadercache GOT slot
5401 uint32_t offsetInSection = atom->getSectionOffset();
5402 uint32_t indexInSection = offsetInSection / sizeof(pint_t);
5403 uint32_t indirectTableIndex = indexInSection + curSection->fIndirectSymbolOffset;
5404 // use INDIRECT_SYMBOL_ABS so 10.5 dyld will leave value as zero
5405 IndirectEntry entry = { indirectTableIndex, INDIRECT_SYMBOL_ABS };
5406 //fprintf(stderr,"fIndirectTableAtom->fTable.push_back(tableIndex=%d, symIndex=0x%X, section=%s)\n",
5407 // indirectTableIndex, INDIRECT_SYMBOL_LOCAL, curSection->fSectionName);
5408 fIndirectTableAtom->fTable.push_back(entry);
5409 }
5410 for (int l=0; l < refCount; ++l) {
5411 ObjectFile::Reference* ref = refs[l];
5412 if ( (fOptions.outputKind() != Options::kKextBundle) &&
5413 (curSection->fAllNonLazyPointers || curSection->fAllLazyPointers || curSection->fAllLazyDylibPointers) ) {
5414 // if atom is in (non)lazy_pointer section, this is encoded as an indirect symbol
5415 if ( atom->getSize() != sizeof(pint_t) ) {
5416 warning("wrong size pointer atom %s from file %s", atom->getDisplayName(), atom->getFile()->getPath());
5417 }
5418 ObjectFile::Atom* pointerTarget = &(ref->getTarget());
5419 if ( curSection->fAllLazyPointers || curSection->fAllLazyDylibPointers ) {
5420 pointerTarget = ((LazyPointerAtom<A>*)atom)->getTarget();
5421 }
5422 uint32_t offsetInSection = atom->getSectionOffset();
5423 uint32_t indexInSection = offsetInSection / sizeof(pint_t);
5424 uint32_t undefinedSymbolIndex = INDIRECT_SYMBOL_LOCAL;
5425 if (atom == fFastStubGOTAtom)
5426 undefinedSymbolIndex = INDIRECT_SYMBOL_ABS;
5427 else if ( this->relocationNeededInFinalLinkedImage(*pointerTarget) == kRelocExternal )
5428 undefinedSymbolIndex = this->symbolIndex(*pointerTarget);
5429 uint32_t indirectTableIndex = indexInSection + curSection->fIndirectSymbolOffset;
5430 IndirectEntry entry = { indirectTableIndex, undefinedSymbolIndex };
5431 //fprintf(stderr,"fIndirectTableAtom->fTable.push_back(tableIndex=%d, symIndex=0x%X, section=%s)\n",
5432 // indirectTableIndex, undefinedSymbolIndex, curSection->fSectionName);
5433 fIndirectTableAtom->fTable.push_back(entry);
5434 if ( curSection->fAllLazyPointers || curSection->fAllLazyDylibPointers ) {
5435 uint8_t preboundLazyType;
5436 if ( fOptions.prebind() && (fDyldClassicHelperAtom != NULL)
5437 && curSection->fAllLazyPointers && preboundLazyPointerType(&preboundLazyType) ) {
5438 // this is a prebound image, need special relocs for dyld to reset lazy pointers if prebinding is invalid
5439 macho_scattered_relocation_info<P> pblaReloc;
5440 pblaReloc.set_r_scattered(true);
5441 pblaReloc.set_r_pcrel(false);
5442 pblaReloc.set_r_length();
5443 pblaReloc.set_r_type(preboundLazyType);
5444 pblaReloc.set_r_address(relocAddressInFinalLinkedImage(atom->getAddress(), atom));
5445 pblaReloc.set_r_value(fDyldClassicHelperAtom->getAddress());
5446 fInternalRelocs.push_back(*((macho_relocation_info<P>*)&pblaReloc));
5447 }
5448 else if ( fSlideable ) {
5449 // this is a non-prebound dylib/bundle, need vanilla internal relocation to fix up binding handler if image slides
5450 macho_relocation_info<P> dyldHelperReloc;
5451 uint32_t sectionNum = 1;
5452 if ( fDyldClassicHelperAtom != NULL )
5453 sectionNum = ((SectionInfo*)(fDyldClassicHelperAtom->getSection()))->getIndex();
5454 //fprintf(stderr, "lazy pointer reloc, section index=%u, section name=%s\n", sectionNum, curSection->fSectionName);
5455 dyldHelperReloc.set_r_address(relocAddressInFinalLinkedImage(atom->getAddress(), atom));
5456 dyldHelperReloc.set_r_symbolnum(sectionNum);
5457 dyldHelperReloc.set_r_pcrel(false);
5458 dyldHelperReloc.set_r_length();
5459 dyldHelperReloc.set_r_extern(false);
5460 dyldHelperReloc.set_r_type(GENERIC_RELOC_VANILLA);
5461 fInternalRelocs.push_back(dyldHelperReloc);
5462 if ( fOptions.makeCompressedDyldInfo() ) {
5463 fRebaseInfo.push_back(RebaseInfo(REBASE_TYPE_POINTER,atom->getAddress()));
5464 }
5465 }
5466 if ( fOptions.makeCompressedDyldInfo() ) {
5467 uint8_t type = BIND_TYPE_POINTER;
5468 uint64_t addresss = atom->getAddress() + ref->getFixUpOffset();
5469 if ( pointerTarget->getDefinitionKind() == ObjectFile::Atom::kExternalWeakDefinition ) {
5470 // This is a referece to a weak def in some dylib (e.g. operator new)
5471 // need to bind into to directly bind this
5472 // later weak binding info may override
5473 int ordinal = compressedOrdinalForImortedAtom(pointerTarget);
5474 fBindingInfo.push_back(BindingInfo(type, ordinal, pointerTarget->getName(), false, addresss, 0));
5475 }
5476 if ( targetRequiresWeakBinding(*pointerTarget) ) {
5477 // note: lazy pointers to weak symbols are not bound lazily
5478 fWeakBindingInfo.push_back(BindingInfo(type, pointerTarget->getName(), false, addresss, 0));
5479 }
5480 }
5481 }
5482 if ( curSection->fAllNonLazyPointers && fOptions.makeCompressedDyldInfo() ) {
5483 if ( pointerTarget != NULL ) {
5484 switch ( this->relocationNeededInFinalLinkedImage(*pointerTarget) ) {
5485 case kRelocNone:
5486 // no rebase or binding info needed
5487 break;
5488 case kRelocInternal:
5489 // a non-lazy pointer that has been optimized to LOCAL needs rebasing info
5490 // but not the magic fFastStubGOTAtom atom
5491 if (atom != fFastStubGOTAtom)
5492 fRebaseInfo.push_back(RebaseInfo(REBASE_TYPE_POINTER,atom->getAddress()));
5493 break;
5494 case kRelocExternal:
5495 {
5496 uint8_t type = BIND_TYPE_POINTER;
5497 uint64_t addresss = atom->getAddress();
5498 if ( targetRequiresWeakBinding(ref->getTarget()) ) {
5499 fWeakBindingInfo.push_back(BindingInfo(type, ref->getTarget().getName(), false, addresss, 0));
5500 // if this is a non-lazy pointer to a weak definition within this linkage unit
5501 // the pointer needs to initially point within linkage unit and have
5502 // rebase command to slide it.
5503 if ( ref->getTarget().getDefinitionKind() == ObjectFile::Atom::kWeakDefinition ) {
5504 // unless if this is a hybrid format, in which case the non-lazy pointer
5505 // is zero on disk. So use a bind instead of a rebase to set initial value
5506 if ( fOptions.makeClassicDyldInfo() )
5507 fBindingInfo.push_back(BindingInfo(type, BIND_SPECIAL_DYLIB_SELF, ref->getTarget().getName(), false, addresss, 0));
5508 else
5509 fRebaseInfo.push_back(RebaseInfo(REBASE_TYPE_POINTER,atom->getAddress()));
5510 }
5511 // if this is a non-lazy pointer to a weak definition in a dylib,
5512 // the pointer needs to initially bind to the dylib
5513 else if ( ref->getTarget().getDefinitionKind() == ObjectFile::Atom::kExternalWeakDefinition ) {
5514 int ordinal = compressedOrdinalForImortedAtom(pointerTarget);
5515 fBindingInfo.push_back(BindingInfo(BIND_TYPE_POINTER, ordinal, pointerTarget->getName(), false, addresss, 0));
5516 }
5517 }
5518 else {
5519 int ordinal = compressedOrdinalForImortedAtom(pointerTarget);
5520 bool weak_import = fWeakImportMap[pointerTarget];
5521 fBindingInfo.push_back(BindingInfo(type, ordinal, ref->getTarget().getName(), weak_import, addresss, 0));
5522 }
5523 }
5524 }
5525 }
5526 }
5527 }
5528 else if ( (ref->getKind() == A::kPointer) || (ref->getKind() == A::kPointerWeakImport) ) {
5529 if ( fSlideable && ((curSegment->fInitProtection & VM_PROT_WRITE) == 0) ) {
5530 if ( fOptions.allowTextRelocs() ) {
5531 if ( fOptions.warnAboutTextRelocs() )
5532 warning("text reloc in %s to %s", atom->getDisplayName(), ref->getTargetName());
5533 }
5534 else {
5535 throwf("pointer in read-only segment not allowed in slidable image, used in %s from %s",
5536 atom->getDisplayName(), atom->getFile()->getPath());
5537 }
5538 }
5539 switch ( this->relocationNeededInFinalLinkedImage(ref->getTarget()) ) {
5540 case kRelocNone:
5541 // no reloc needed
5542 break;
5543 case kRelocInternal:
5544 {
5545 macho_relocation_info<P> internalReloc;
5546 SectionInfo* sectInfo = (SectionInfo*)ref->getTarget().getSection();
5547 uint32_t sectionNum = sectInfo->getIndex();
5548 // special case _mh_dylib_header and friends which are not in any real section
5549 if ( (sectionNum ==0) && sectInfo->fVirtualSection && (strcmp(sectInfo->fSectionName, "._mach_header") == 0) )
5550 sectionNum = 1;
5551 internalReloc.set_r_address(this->relocAddressInFinalLinkedImage(atom->getAddress() + ref->getFixUpOffset(), atom));
5552 internalReloc.set_r_symbolnum(sectionNum);
5553 internalReloc.set_r_pcrel(false);
5554 internalReloc.set_r_length();
5555 internalReloc.set_r_extern(false);
5556 internalReloc.set_r_type(GENERIC_RELOC_VANILLA);
5557 fInternalRelocs.push_back(internalReloc);
5558 if ( fOptions.makeCompressedDyldInfo() ) {
5559 fRebaseInfo.push_back(RebaseInfo(REBASE_TYPE_POINTER, atom->getAddress() + ref->getFixUpOffset()));
5560 }
5561 }
5562 break;
5563 case kRelocExternal:
5564 {
5565 macho_relocation_info<P> externalReloc;
5566 externalReloc.set_r_address(this->relocAddressInFinalLinkedImage(atom->getAddress() + ref->getFixUpOffset(), atom));
5567 externalReloc.set_r_symbolnum(this->symbolIndex(ref->getTarget()));
5568 externalReloc.set_r_pcrel(false);
5569 externalReloc.set_r_length();
5570 externalReloc.set_r_extern(true);
5571 externalReloc.set_r_type(GENERIC_RELOC_VANILLA);
5572 fExternalRelocs.push_back(externalReloc);
5573 if ( fOptions.makeCompressedDyldInfo() ) {
5574 int64_t addend = ref->getTargetOffset();
5575 uint64_t addresss = atom->getAddress() + ref->getFixUpOffset();
5576 if ( !fOptions.makeClassicDyldInfo() ) {
5577 if ( ref->getTarget().getDefinitionKind() == ObjectFile::Atom::kWeakDefinition ) {
5578 // pointers to internal weak defs need a rebase
5579 fRebaseInfo.push_back(RebaseInfo(REBASE_TYPE_POINTER, addresss));
5580 }
5581 }
5582 uint8_t type = BIND_TYPE_POINTER;
5583 if ( targetRequiresWeakBinding(ref->getTarget()) ) {
5584 fWeakBindingInfo.push_back(BindingInfo(type, ref->getTarget().getName(), false, addresss, addend));
5585 if ( fOptions.makeClassicDyldInfo() && (ref->getTarget().getDefinitionKind() == ObjectFile::Atom::kWeakDefinition) ) {
5586 // hybrid linkedit puts addend in data, so we need bind phase to reset pointer to local definifion
5587 fBindingInfo.push_back(BindingInfo(type, BIND_SPECIAL_DYLIB_SELF, ref->getTarget().getName(), false, addresss, addend));
5588 }
5589 // if this is a pointer to a weak definition in a dylib,
5590 // the pointer needs to initially bind to the dylib
5591 else if ( ref->getTarget().getDefinitionKind() == ObjectFile::Atom::kExternalWeakDefinition ) {
5592 int ordinal = compressedOrdinalForImortedAtom(&ref->getTarget());
5593 fBindingInfo.push_back(BindingInfo(BIND_TYPE_POINTER, ordinal, ref->getTarget().getName(), false, addresss, addend));
5594 }
5595 }
5596 else {
5597 int ordinal = compressedOrdinalForImortedAtom(&ref->getTarget());
5598 bool weak_import = fWeakImportMap[&(ref->getTarget())];
5599 fBindingInfo.push_back(BindingInfo(type, ordinal, ref->getTarget().getName(), weak_import, addresss, addend));
5600 }
5601 }
5602 }
5603 break;
5604 }
5605 }
5606 else if ( this->illegalRelocInFinalLinkedImage(*ref) ) {
5607 // new x86 stubs always require text relocs
5608 if ( curSection->fAllStubs || curSection->fAllStubHelpers ) {
5609 if ( this->generatesLocalTextReloc(*ref, *atom, curSection) ) {
5610 // relocs added to fInternalRelocs
5611 }
5612 }
5613 else if ( fOptions.allowTextRelocs() && !atom->getSegment().isContentWritable() ) {
5614 if ( fOptions.warnAboutTextRelocs() )
5615 warning("text reloc in %s to %s", atom->getDisplayName(), ref->getTargetName());
5616 if ( this->generatesLocalTextReloc(*ref, *atom, curSection) ) {
5617 // relocs added to fInternalRelocs
5618 }
5619 else if ( this->generatesExternalTextReloc(*ref, *atom, curSection) ) {
5620 // relocs added to fExternalRelocs
5621 }
5622 else {
5623 throwf("relocation used in %s from %s not allowed in slidable image", atom->getDisplayName(), atom->getFile()->getPath());
5624 }
5625 }
5626 else {
5627 throwf("absolute addressing (perhaps -mdynamic-no-pic) used in %s from %s not allowed in slidable image. "
5628 "Use '-read_only_relocs suppress' to enable text relocs", atom->getDisplayName(), atom->getFile()->getPath());
5629 }
5630 }
5631 }
5632 if ( curSection->fAllSelfModifyingStubs || curSection->fAllStubs ) {
5633 ObjectFile::Atom* stubTarget = ((StubAtom<A>*)atom)->getTarget();
5634 uint32_t undefinedSymbolIndex = (stubTarget != NULL) ? this->symbolIndex(*stubTarget) : INDIRECT_SYMBOL_ABS;
5635 uint32_t offsetInSection = atom->getSectionOffset();
5636 uint32_t indexInSection = offsetInSection / atom->getSize();
5637 uint32_t indirectTableIndex = indexInSection + curSection->fIndirectSymbolOffset;
5638 IndirectEntry entry = { indirectTableIndex, undefinedSymbolIndex };
5639 //fprintf(stderr,"for stub: fIndirectTableAtom->fTable.add(%d-%d => 0x%X-%s), size=%lld\n", indexInSection, indirectTableIndex, undefinedSymbolIndex, stubTarget->getName(), atom->getSize());
5640 fIndirectTableAtom->fTable.push_back(entry);
5641 }
5642 }
5643 }
5644 }
5645 }
5646 if ( fSplitCodeToDataContentAtom != NULL )
5647 fSplitCodeToDataContentAtom->encode();
5648 if ( fCompressedRebaseInfoAtom != NULL )
5649 fCompressedRebaseInfoAtom->encode();
5650 if ( fCompressedBindingInfoAtom != NULL )
5651 fCompressedBindingInfoAtom->encode();
5652 if ( fCompressedWeakBindingInfoAtom != NULL )
5653 fCompressedWeakBindingInfoAtom->encode();
5654 if ( fCompressedLazyBindingInfoAtom != NULL )
5655 fCompressedLazyBindingInfoAtom->encode();
5656 if ( fCompressedExportInfoAtom != NULL )
5657 fCompressedExportInfoAtom->encode();
5658 }
5659
5660
5661 template <>
5662 void Writer<ppc>::addCrossSegmentRef(const ObjectFile::Atom* atom, const ObjectFile::Reference* ref)
5663 {
5664 switch ( (ppc::ReferenceKinds)ref->getKind() ) {
5665 case ppc::kPICBaseHigh16:
5666 fSplitCodeToDataContentAtom->addPPCHi16Location(atom, ref->getFixUpOffset());
5667 break;
5668 case ppc::kPointerDiff32:
5669 fSplitCodeToDataContentAtom->add32bitPointerLocation(atom, ref->getFixUpOffset());
5670 break;
5671 case ppc::kPointerDiff64:
5672 fSplitCodeToDataContentAtom->add64bitPointerLocation(atom, ref->getFixUpOffset());
5673 break;
5674 case ppc::kNoFixUp:
5675 case ppc::kGroupSubordinate:
5676 case ppc::kPointer:
5677 case ppc::kPointerWeakImport:
5678 case ppc::kPICBaseLow16:
5679 case ppc::kPICBaseLow14:
5680 // ignore
5681 break;
5682 default:
5683 warning("codegen with reference kind %d in %s prevents image from loading in dyld shared cache", ref->getKind(), atom->getDisplayName());
5684 fSplitCodeToDataContentAtom->setCantEncode();
5685 }
5686 }
5687
5688 template <>
5689 void Writer<ppc64>::addCrossSegmentRef(const ObjectFile::Atom* atom, const ObjectFile::Reference* ref)
5690 {
5691 switch ( (ppc64::ReferenceKinds)ref->getKind() ) {
5692 case ppc64::kPICBaseHigh16:
5693 fSplitCodeToDataContentAtom->addPPCHi16Location(atom, ref->getFixUpOffset());
5694 break;
5695 case ppc64::kPointerDiff32:
5696 fSplitCodeToDataContentAtom->add32bitPointerLocation(atom, ref->getFixUpOffset());
5697 break;
5698 case ppc64::kPointerDiff64:
5699 fSplitCodeToDataContentAtom->add64bitPointerLocation(atom, ref->getFixUpOffset());
5700 break;
5701 case ppc64::kNoFixUp:
5702 case ppc64::kGroupSubordinate:
5703 case ppc64::kPointer:
5704 case ppc64::kPointerWeakImport:
5705 case ppc64::kPICBaseLow16:
5706 case ppc64::kPICBaseLow14:
5707 // ignore
5708 break;
5709 default:
5710 warning("codegen with reference kind %d in %s prevents image from loading in dyld shared cache", ref->getKind(), atom->getDisplayName());
5711 fSplitCodeToDataContentAtom->setCantEncode();
5712 }
5713 }
5714
5715 template <>
5716 void Writer<x86>::addCrossSegmentRef(const ObjectFile::Atom* atom, const ObjectFile::Reference* ref)
5717 {
5718 switch ( (x86::ReferenceKinds)ref->getKind() ) {
5719 case x86::kPointerDiff:
5720 case x86::kImageOffset32:
5721 if ( strcmp(ref->getTarget().getSegment().getName(), "__IMPORT") == 0 )
5722 fSplitCodeToDataContentAtom->add32bitImportLocation(atom, ref->getFixUpOffset());
5723 else
5724 fSplitCodeToDataContentAtom->add32bitPointerLocation(atom, ref->getFixUpOffset());
5725 break;
5726 case x86::kNoFixUp:
5727 case x86::kGroupSubordinate:
5728 case x86::kPointer:
5729 case x86::kPointerWeakImport:
5730 // ignore
5731 break;
5732 case x86::kPCRel32:
5733 case x86::kPCRel32WeakImport:
5734 if ( (&(ref->getTarget().getSegment()) == &Segment::fgImportSegment)
5735 || (&(ref->getTarget().getSegment()) == &Segment::fgROImportSegment) ) {
5736 fSplitCodeToDataContentAtom->add32bitImportLocation(atom, ref->getFixUpOffset());
5737 break;
5738 }
5739 // fall into warning case
5740 default:
5741 if ( fOptions.makeCompressedDyldInfo() && (ref->getKind() == x86::kAbsolute32) ) {
5742 // will be encoded in rebase info
5743 }
5744 else {
5745 warning("codegen in %s (offset 0x%08llX) prevents image from loading in dyld shared cache", atom->getDisplayName(), ref->getFixUpOffset());
5746 fSplitCodeToDataContentAtom->setCantEncode();
5747 }
5748 }
5749 }
5750
5751 template <>
5752 void Writer<x86_64>::addCrossSegmentRef(const ObjectFile::Atom* atom, const ObjectFile::Reference* ref)
5753 {
5754 switch ( (x86_64::ReferenceKinds)ref->getKind() ) {
5755 case x86_64::kPCRel32:
5756 case x86_64::kPCRel32_1:
5757 case x86_64::kPCRel32_2:
5758 case x86_64::kPCRel32_4:
5759 case x86_64::kPCRel32GOTLoad:
5760 case x86_64::kPCRel32GOTLoadWeakImport:
5761 case x86_64::kPCRel32GOT:
5762 case x86_64::kPCRel32GOTWeakImport:
5763 case x86_64::kPointerDiff32:
5764 case x86_64::kImageOffset32:
5765 fSplitCodeToDataContentAtom->add32bitPointerLocation(atom, ref->getFixUpOffset());
5766 break;
5767 case x86_64::kPointerDiff:
5768 fSplitCodeToDataContentAtom->add64bitPointerLocation(atom, ref->getFixUpOffset());
5769 break;
5770 case x86_64::kNoFixUp:
5771 case x86_64::kGroupSubordinate:
5772 case x86_64::kPointer:
5773 case x86_64::kGOTNoFixUp:
5774 // ignore
5775 break;
5776 default:
5777 warning("codegen in %s with kind %d prevents image from loading in dyld shared cache", atom->getDisplayName(), ref->getKind());
5778 fSplitCodeToDataContentAtom->setCantEncode();
5779 }
5780 }
5781
5782 template <>
5783 void Writer<arm>::addCrossSegmentRef(const ObjectFile::Atom* atom, const ObjectFile::Reference* ref)
5784 {
5785 switch ( (arm::ReferenceKinds)ref->getKind() ) {
5786 case arm::kPointerDiff:
5787 fSplitCodeToDataContentAtom->add32bitPointerLocation(atom, ref->getFixUpOffset());
5788 break;
5789 case arm::kNoFixUp:
5790 case arm::kGroupSubordinate:
5791 case arm::kPointer:
5792 case arm::kPointerWeakImport:
5793 case arm::kReadOnlyPointer:
5794 // ignore
5795 break;
5796 default:
5797 warning("codegen in %s prevents image from loading in dyld shared cache", atom->getDisplayName());
5798 fSplitCodeToDataContentAtom->setCantEncode();
5799 }
5800 }
5801
5802 template <typename A>
5803 bool Writer<A>::segmentsCanSplitApart(const ObjectFile::Atom& from, const ObjectFile::Atom& to)
5804 {
5805 switch ( to.getDefinitionKind() ) {
5806 case ObjectFile::Atom::kExternalDefinition:
5807 case ObjectFile::Atom::kExternalWeakDefinition:
5808 case ObjectFile::Atom::kAbsoluteSymbol:
5809 return false;
5810 case ObjectFile::Atom::kRegularDefinition:
5811 case ObjectFile::Atom::kWeakDefinition:
5812 case ObjectFile::Atom::kTentativeDefinition:
5813 // segments with same permissions slide together
5814 return ( (from.getSegment().isContentExecutable() != to.getSegment().isContentExecutable())
5815 || (from.getSegment().isContentWritable() != to.getSegment().isContentWritable()) );
5816 }
5817 throw "ld64 internal error";
5818 }
5819
5820
5821 template <>
5822 void Writer<ppc>::writeNoOps(int fd, uint32_t from, uint32_t to)
5823 {
5824 uint32_t ppcNop;
5825 OSWriteBigInt32(&ppcNop, 0, 0x60000000);
5826 for (uint32_t p=from; p < to; p += 4)
5827 ::pwrite(fd, &ppcNop, 4, p);
5828 }
5829
5830 template <>
5831 void Writer<ppc64>::writeNoOps(int fd, uint32_t from, uint32_t to)
5832 {
5833 uint32_t ppcNop;
5834 OSWriteBigInt32(&ppcNop, 0, 0x60000000);
5835 for (uint32_t p=from; p < to; p += 4)
5836 ::pwrite(fd, &ppcNop, 4, p);
5837 }
5838
5839 template <>
5840 void Writer<x86>::writeNoOps(int fd, uint32_t from, uint32_t to)
5841 {
5842 uint8_t x86Nop = 0x90;
5843 for (uint32_t p=from; p < to; ++p)
5844 ::pwrite(fd, &x86Nop, 1, p);
5845 }
5846
5847 template <>
5848 void Writer<x86_64>::writeNoOps(int fd, uint32_t from, uint32_t to)
5849 {
5850 uint8_t x86Nop = 0x90;
5851 for (uint32_t p=from; p < to; ++p)
5852 ::pwrite(fd, &x86Nop, 1, p);
5853 }
5854
5855 template <>
5856 void Writer<arm>::writeNoOps(int fd, uint32_t from, uint32_t to)
5857 {
5858 // FIXME: need thumb nop?
5859 uint32_t armNop;
5860 OSWriteLittleInt32(&armNop, 0, 0xe1a00000);
5861 for (uint32_t p=from; p < to; p += 4)
5862 ::pwrite(fd, &armNop, 4, p);
5863 }
5864
5865 template <>
5866 void Writer<ppc>::copyNoOps(uint8_t* from, uint8_t* to)
5867 {
5868 for (uint8_t* p=from; p < to; p += 4)
5869 OSWriteBigInt32((uint32_t*)p, 0, 0x60000000);
5870 }
5871
5872 template <>
5873 void Writer<ppc64>::copyNoOps(uint8_t* from, uint8_t* to)
5874 {
5875 for (uint8_t* p=from; p < to; p += 4)
5876 OSWriteBigInt32((uint32_t*)p, 0, 0x60000000);
5877 }
5878
5879 template <>
5880 void Writer<x86>::copyNoOps(uint8_t* from, uint8_t* to)
5881 {
5882 for (uint8_t* p=from; p < to; ++p)
5883 *p = 0x90;
5884 }
5885
5886 template <>
5887 void Writer<x86_64>::copyNoOps(uint8_t* from, uint8_t* to)
5888 {
5889 for (uint8_t* p=from; p < to; ++p)
5890 *p = 0x90;
5891 }
5892
5893 template <>
5894 void Writer<arm>::copyNoOps(uint8_t* from, uint8_t* to)
5895 {
5896 // fixme: need thumb nop?
5897 for (uint8_t* p=from; p < to; p += 4)
5898 OSWriteBigInt32((uint32_t*)p, 0, 0xe1a00000);
5899 }
5900
5901 static const char* stringName(const char* str)
5902 {
5903 if ( strncmp(str, "cstring=", 8) == 0) {
5904 static char buffer[1024];
5905 char* t = buffer;
5906 *t++ = '\"';
5907 for(const char*s = &str[8]; *s != '\0'; ++s) {
5908 switch(*s) {
5909 case '\n':
5910 *t++ = '\\';
5911 *t++ = 'n';
5912 break;
5913 case '\t':
5914 *t++ = '\\';
5915 *t++ = 't';
5916 break;
5917 default:
5918 *t++ = *s;
5919 break;
5920 }
5921 if ( t > &buffer[1020] ) {
5922 *t++= '\"';
5923 *t++= '.';
5924 *t++= '.';
5925 *t++= '.';
5926 *t++= '\0';
5927 return buffer;
5928 }
5929 }
5930 *t++= '\"';
5931 *t++= '\0';
5932 return buffer;
5933 }
5934 else {
5935 return str;
5936 }
5937 }
5938
5939
5940 template <> const char* Writer<ppc>::getArchString() { return "ppc"; }
5941 template <> const char* Writer<ppc64>::getArchString() { return "ppc64"; }
5942 template <> const char* Writer<x86>::getArchString() { return "i386"; }
5943 template <> const char* Writer<x86_64>::getArchString() { return "x86_64"; }
5944 template <> const char* Writer<arm>::getArchString() { return "arm"; }
5945
5946 template <typename A>
5947 void Writer<A>::writeMap()
5948 {
5949 if ( fOptions.generatedMapPath() != NULL ) {
5950 FILE* mapFile = fopen(fOptions.generatedMapPath(), "w");
5951 if ( mapFile != NULL ) {
5952 // write output path
5953 fprintf(mapFile, "# Path: %s\n", fFilePath);
5954 // write output architecure
5955 fprintf(mapFile, "# Arch: %s\n", getArchString());
5956 // write UUID
5957 if ( fUUIDAtom != NULL ) {
5958 const uint8_t* uuid = fUUIDAtom->getUUID();
5959 fprintf(mapFile, "# UUID: %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X \n",
5960 uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5], uuid[6], uuid[7],
5961 uuid[8], uuid[9], uuid[10], uuid[11], uuid[12], uuid[13], uuid[14], uuid[15]);
5962 }
5963 // write table of object files
5964 std::map<ObjectFile::Reader*, uint32_t> readerToOrdinal;
5965 std::map<uint32_t, ObjectFile::Reader*> ordinalToReader;
5966 std::map<ObjectFile::Reader*, uint32_t> readerToFileOrdinal;
5967 for (std::vector<SegmentInfo*>::iterator segit = fSegmentInfos.begin(); segit != fSegmentInfos.end(); ++segit) {
5968 std::vector<SectionInfo*>& sectionInfos = (*segit)->fSections;
5969 for (std::vector<SectionInfo*>::iterator secit = sectionInfos.begin(); secit != sectionInfos.end(); ++secit) {
5970 if ( ! (*secit)->fVirtualSection ) {
5971 std::vector<ObjectFile::Atom*>& sectionAtoms = (*secit)->fAtoms;
5972 for (std::vector<ObjectFile::Atom*>::iterator ait = sectionAtoms.begin(); ait != sectionAtoms.end(); ++ait) {
5973 ObjectFile::Reader* reader = (*ait)->getFile();
5974 uint32_t readerOrdinal = (*ait)->getOrdinal();
5975 std::map<ObjectFile::Reader*, uint32_t>::iterator pos = readerToOrdinal.find(reader);
5976 if ( pos == readerToOrdinal.end() ) {
5977 readerToOrdinal[reader] = readerOrdinal;
5978 ordinalToReader[readerOrdinal] = reader;
5979 }
5980 }
5981 }
5982 }
5983 }
5984 fprintf(mapFile, "# Object files:\n");
5985 fprintf(mapFile, "[%3u] %s\n", 0, "linker synthesized");
5986 uint32_t fileIndex = 0;
5987 readerToFileOrdinal[this] = fileIndex++;
5988 for(std::map<uint32_t, ObjectFile::Reader*>::iterator it = ordinalToReader.begin(); it != ordinalToReader.end(); ++it) {
5989 if ( it->first != 0 ) {
5990 fprintf(mapFile, "[%3u] %s\n", fileIndex, it->second->getPath());
5991 readerToFileOrdinal[it->second] = fileIndex++;
5992 }
5993 }
5994 // write table of sections
5995 fprintf(mapFile, "# Sections:\n");
5996 fprintf(mapFile, "# Address\tSize \tSegment\tSection\n");
5997 for (std::vector<SegmentInfo*>::iterator segit = fSegmentInfos.begin(); segit != fSegmentInfos.end(); ++segit) {
5998 std::vector<SectionInfo*>& sectionInfos = (*segit)->fSections;
5999 for (std::vector<SectionInfo*>::iterator secit = sectionInfos.begin(); secit != sectionInfos.end(); ++secit) {
6000 if ( ! (*secit)->fVirtualSection ) {
6001 SectionInfo* sect = *secit;
6002 fprintf(mapFile, "0x%08llX\t0x%08llX\t%s\t%s\n", sect->getBaseAddress(), sect->fSize,
6003 (*segit)->fName, sect->fSectionName);
6004 }
6005 }
6006 }
6007 // write table of symbols
6008 fprintf(mapFile, "# Symbols:\n");
6009 fprintf(mapFile, "# Address\tSize \tFile Name\n");
6010 for (std::vector<SegmentInfo*>::iterator segit = fSegmentInfos.begin(); segit != fSegmentInfos.end(); ++segit) {
6011 std::vector<SectionInfo*>& sectionInfos = (*segit)->fSections;
6012 for (std::vector<SectionInfo*>::iterator secit = sectionInfos.begin(); secit != sectionInfos.end(); ++secit) {
6013 if ( ! (*secit)->fVirtualSection ) {
6014 std::vector<ObjectFile::Atom*>& sectionAtoms = (*secit)->fAtoms;
6015 bool isCstring = (strcmp((*secit)->fSectionName, "__cstring") == 0);
6016 for (std::vector<ObjectFile::Atom*>::iterator ait = sectionAtoms.begin(); ait != sectionAtoms.end(); ++ait) {
6017 ObjectFile::Atom* atom = *ait;
6018 fprintf(mapFile, "0x%08llX\t0x%08llX\t[%3u] %s\n", atom->getAddress(), atom->getSize(),
6019 readerToFileOrdinal[atom->getFile()], isCstring ? stringName(atom->getDisplayName()): atom->getDisplayName());
6020 }
6021 }
6022 }
6023 }
6024 fclose(mapFile);
6025 }
6026 else {
6027 warning("could not write map file: %s\n", fOptions.generatedMapPath());
6028 }
6029 }
6030 }
6031
6032 static const char* sCleanupFile = NULL;
6033 static void cleanup(int sig)
6034 {
6035 ::signal(sig, SIG_DFL);
6036 if ( sCleanupFile != NULL ) {
6037 ::unlink(sCleanupFile);
6038 }
6039 if ( sig == SIGINT )
6040 ::exit(1);
6041 }
6042
6043
6044 template <typename A>
6045 uint64_t Writer<A>::writeAtoms()
6046 {
6047 // for UNIX conformance, error if file exists and is not writable
6048 if ( (access(fFilePath, F_OK) == 0) && (access(fFilePath, W_OK) == -1) )
6049 throwf("can't write output file: %s", fFilePath);
6050
6051 int permissions = 0777;
6052 if ( fOptions.outputKind() == Options::kObjectFile )
6053 permissions = 0666;
6054 // Calling unlink first assures the file is gone so that open creates it with correct permissions
6055 // It also handles the case where fFilePath file is not writable but its directory is
6056 // And it means we don't have to truncate the file when done writing (in case new is smaller than old)
6057 (void)unlink(fFilePath);
6058
6059 // try to allocate buffer for entire output file content
6060 int fd = -1;
6061 SectionInfo* lastSection = fSegmentInfos.back()->fSections.back();
6062 uint64_t fileBufferSize = (lastSection->fFileOffset + lastSection->fSize + 4095) & (-4096);
6063 uint8_t* wholeBuffer = (uint8_t*)calloc(fileBufferSize, 1);
6064 uint8_t* atomBuffer = NULL;
6065 bool streaming = false;
6066 if ( wholeBuffer == NULL ) {
6067 fd = open(fFilePath, O_CREAT | O_WRONLY | O_TRUNC, permissions);
6068 if ( fd == -1 )
6069 throwf("can't open output file for writing: %s, errno=%d", fFilePath, errno);
6070 atomBuffer = new uint8_t[(fLargestAtomSize+4095) & (-4096)];
6071 streaming = true;
6072 // install signal handlers to delete output file if program is killed
6073 sCleanupFile = fFilePath;
6074 ::signal(SIGINT, cleanup);
6075 ::signal(SIGBUS, cleanup);
6076 ::signal(SIGSEGV, cleanup);
6077 }
6078 uint32_t size = 0;
6079 uint32_t end = 0;
6080 try {
6081 for (std::vector<SegmentInfo*>::iterator segit = fSegmentInfos.begin(); segit != fSegmentInfos.end(); ++segit) {
6082 SegmentInfo* curSegment = *segit;
6083 std::vector<SectionInfo*>& sectionInfos = curSegment->fSections;
6084 for (std::vector<SectionInfo*>::iterator secit = sectionInfos.begin(); secit != sectionInfos.end(); ++secit) {
6085 SectionInfo* curSection = *secit;
6086 std::vector<ObjectFile::Atom*>& sectionAtoms = curSection->fAtoms;
6087 //printf("writing with max atom size 0x%X\n", fLargestAtomSize);
6088 //fprintf(stderr, "writing %lu atoms for section %p %s at file offset 0x%08llX\n", sectionAtoms.size(), curSection, curSection->fSectionName, curSection->fFileOffset);
6089 if ( ! curSection->fAllZeroFill ) {
6090 bool needsNops = ((strcmp(curSection->fSegmentName, "__TEXT") == 0) && (strncmp(curSection->fSectionName, "__text", 6) == 0));
6091 for (std::vector<ObjectFile::Atom*>::iterator ait = sectionAtoms.begin(); ait != sectionAtoms.end(); ++ait) {
6092 ObjectFile::Atom* atom = *ait;
6093 if ( (atom->getDefinitionKind() != ObjectFile::Atom::kExternalDefinition)
6094 && (atom->getDefinitionKind() != ObjectFile::Atom::kExternalWeakDefinition)
6095 && (atom->getDefinitionKind() != ObjectFile::Atom::kAbsoluteSymbol) ) {
6096 uint32_t fileOffset = curSection->fFileOffset + atom->getSectionOffset();
6097 if ( fileOffset != end ) {
6098 //fprintf(stderr, "writing %d pad bytes, needsNops=%d\n", fileOffset-end, needsNops);
6099 if ( needsNops ) {
6100 // fill gaps with no-ops
6101 if ( streaming )
6102 writeNoOps(fd, end, fileOffset);
6103 else
6104 copyNoOps(&wholeBuffer[end], &wholeBuffer[fileOffset]);
6105 }
6106 else if ( streaming ) {
6107 // zero fill gaps
6108 if ( (fileOffset-end) == 4 ) {
6109 uint32_t zero = 0;
6110 ::pwrite(fd, &zero, 4, end);
6111 }
6112 else {
6113 uint8_t zero = 0x00;
6114 for (uint32_t p=end; p < fileOffset; ++p)
6115 ::pwrite(fd, &zero, 1, p);
6116 }
6117 }
6118 }
6119 uint64_t atomSize = atom->getSize();
6120 if ( streaming ) {
6121 if ( atomSize > fLargestAtomSize )
6122 throwf("ld64 internal error: atom \"%s\"is larger than expected 0x%llX > 0x%X",
6123 atom->getDisplayName(), atomSize, fLargestAtomSize);
6124 }
6125 else {
6126 if ( fileOffset > fileBufferSize )
6127 throwf("ld64 internal error: atom \"%s\" has file offset greater thatn expceted 0x%X > 0x%llX",
6128 atom->getDisplayName(), fileOffset, fileBufferSize);
6129 }
6130 uint8_t* buffer = streaming ? atomBuffer : &wholeBuffer[fileOffset];
6131 end = fileOffset+atomSize;
6132 // copy raw bytes
6133 atom->copyRawContent(buffer);
6134 // apply any fix-ups
6135 try {
6136 std::vector<ObjectFile::Reference*>& references = atom->getReferences();
6137 for (std::vector<ObjectFile::Reference*>::iterator it=references.begin(); it != references.end(); it++) {
6138 ObjectFile::Reference* ref = *it;
6139 if ( fOptions.outputKind() == Options::kObjectFile ) {
6140 // doing ld -r
6141 // skip fix-ups for undefined targets
6142 if ( &(ref->getTarget()) != NULL )
6143 this->fixUpReferenceRelocatable(ref, atom, buffer);
6144 }
6145 else {
6146 // producing final linked image
6147 this->fixUpReferenceFinal(ref, atom, buffer);
6148 }
6149 }
6150 }
6151 catch (const char* msg) {
6152 throwf("%s in %s from %s", msg, atom->getDisplayName(), atom->getFile()->getPath());
6153 }
6154 //fprintf(stderr, "writing 0x%08X -> 0x%08X (addr=0x%llX, size=0x%llX), atom %p %s from %s\n",
6155 // fileOffset, end, atom->getAddress(), atom->getSize(), atom, atom->getDisplayName(), atom->getFile()->getPath());
6156 if ( streaming ) {
6157 // write out
6158 ::pwrite(fd, buffer, atomSize, fileOffset);
6159 }
6160 else {
6161 if ( (fileOffset + atomSize) > size )
6162 size = fileOffset + atomSize;
6163 }
6164 }
6165 }
6166 }
6167 }
6168 }
6169
6170 // update content based UUID
6171 if ( fOptions.getUUIDMode() == Options::kUUIDContent ) {
6172 uint8_t digest[CC_MD5_DIGEST_LENGTH];
6173 if ( streaming ) {
6174 // if output file file did not fit in memory, re-read file to generate md5 hash
6175 uint32_t kMD5BufferSize = 16*1024;
6176 uint8_t* md5Buffer = (uint8_t*)::malloc(kMD5BufferSize);
6177 if ( md5Buffer != NULL ) {
6178 CC_MD5_CTX md5State;
6179 CC_MD5_Init(&md5State);
6180 ::lseek(fd, 0, SEEK_SET);
6181 ssize_t len;
6182 while ( (len = ::read(fd, md5Buffer, kMD5BufferSize)) > 0 )
6183 CC_MD5_Update(&md5State, md5Buffer, len);
6184 CC_MD5_Final(digest, &md5State);
6185 ::free(md5Buffer);
6186 }
6187 else {
6188 // if malloc fails, fall back to random uuid
6189 ::uuid_generate_random(digest);
6190 }
6191 fUUIDAtom->setContent(digest);
6192 uint32_t uuidOffset = ((SectionInfo*)fUUIDAtom->getSection())->fFileOffset + fUUIDAtom->getSectionOffset();
6193 fUUIDAtom->copyRawContent(atomBuffer);
6194 ::pwrite(fd, atomBuffer, fUUIDAtom->getSize(), uuidOffset);
6195 }
6196 else {
6197 // if output file fit in memory, just genrate an md5 hash in memory
6198 #if 1
6199 // temp hack for building on Tiger
6200 CC_MD5_CTX md5State;
6201 CC_MD5_Init(&md5State);
6202 CC_MD5_Update(&md5State, wholeBuffer, size);
6203 CC_MD5_Final(digest, &md5State);
6204 #else
6205 CC_MD5(wholeBuffer, size, digest);
6206 #endif
6207 fUUIDAtom->setContent(digest);
6208 uint32_t uuidOffset = ((SectionInfo*)fUUIDAtom->getSection())->fFileOffset + fUUIDAtom->getSectionOffset();
6209 fUUIDAtom->copyRawContent(&wholeBuffer[uuidOffset]);
6210 }
6211 }
6212 }
6213 catch (...) {
6214 if ( sCleanupFile != NULL )
6215 ::unlink(sCleanupFile);
6216 throw;
6217 }
6218
6219 // finish up
6220 if ( streaming ) {
6221 delete [] atomBuffer;
6222 close(fd);
6223 // restore default signal handlers
6224 sCleanupFile = NULL;
6225 ::signal(SIGINT, SIG_DFL);
6226 ::signal(SIGBUS, SIG_DFL);
6227 ::signal(SIGSEGV, SIG_DFL);
6228 }
6229 else {
6230 // write whole output file in one chunk
6231 fd = open(fFilePath, O_CREAT | O_WRONLY | O_TRUNC, permissions);
6232 if ( fd == -1 )
6233 throwf("can't open output file for writing: %s, errno=%d", fFilePath, errno);
6234 ::pwrite(fd, wholeBuffer, size, 0);
6235 close(fd);
6236 delete [] wholeBuffer;
6237 }
6238
6239 return end;
6240 }
6241
6242 template <>
6243 void Writer<arm>::fixUpReferenceFinal(const ObjectFile::Reference* ref, const ObjectFile::Atom* inAtom, uint8_t buffer[]) const
6244 {
6245 int64_t displacement;
6246 int64_t baseAddr;
6247 uint32_t instruction;
6248 uint32_t newInstruction;
6249 uint64_t targetAddr = 0;
6250 uint32_t firstDisp;
6251 uint32_t nextDisp;
6252 uint32_t opcode = 0;
6253 bool relocateableExternal = false;
6254 bool is_bl;
6255 bool is_blx;
6256 bool targetIsThumb;
6257
6258 if ( ref->getTargetBinding() != ObjectFile::Reference::kDontBind ) {
6259 targetAddr = ref->getTarget().getAddress() + ref->getTargetOffset();
6260 relocateableExternal = (relocationNeededInFinalLinkedImage(ref->getTarget()) == kRelocExternal);
6261 }
6262
6263 uint32_t* fixUp = (uint32_t*)&buffer[ref->getFixUpOffset()];
6264 switch ( (arm::ReferenceKinds)(ref->getKind()) ) {
6265 case arm::kNoFixUp:
6266 case arm::kFollowOn:
6267 case arm::kGroupSubordinate:
6268 // do nothing
6269 break;
6270 case arm::kPointerWeakImport:
6271 case arm::kPointer:
6272 // If this is the lazy pointers section, then set all lazy pointers to
6273 // point to the dyld stub binding helper.
6274 if ( ((SectionInfo*)inAtom->getSection())->fAllLazyPointers
6275 || ((SectionInfo*)inAtom->getSection())->fAllLazyDylibPointers ) {
6276 switch (ref->getTarget().getDefinitionKind()) {
6277 case ObjectFile::Atom::kExternalDefinition:
6278 case ObjectFile::Atom::kExternalWeakDefinition:
6279 // prebound lazy pointer to another dylib ==> pointer contains zero
6280 LittleEndian::set32(*fixUp, 0);
6281 break;
6282 case ObjectFile::Atom::kTentativeDefinition:
6283 case ObjectFile::Atom::kRegularDefinition:
6284 case ObjectFile::Atom::kWeakDefinition:
6285 case ObjectFile::Atom::kAbsoluteSymbol:
6286 // prebound lazy pointer to withing this dylib ==> pointer contains address
6287 if ( ref->getTarget().isThumb() && (ref->getTargetOffset() == 0) )
6288 targetAddr |= 1;
6289 LittleEndian::set32(*fixUp, targetAddr);
6290 break;
6291 }
6292 }
6293 else if ( relocateableExternal ) {
6294 if ( fOptions.prebind() ) {
6295 switch (ref->getTarget().getDefinitionKind()) {
6296 case ObjectFile::Atom::kExternalDefinition:
6297 case ObjectFile::Atom::kExternalWeakDefinition:
6298 // prebound external relocation ==> pointer contains addend
6299 LittleEndian::set32(*fixUp, ref->getTargetOffset());
6300 break;
6301 case ObjectFile::Atom::kTentativeDefinition:
6302 case ObjectFile::Atom::kRegularDefinition:
6303 case ObjectFile::Atom::kWeakDefinition:
6304 // prebound external relocation to internal atom ==> pointer contains target address + addend
6305 if ( ref->getTarget().isThumb() && (ref->getTargetOffset() == 0) )
6306 targetAddr |= 1;
6307 LittleEndian::set32(*fixUp, targetAddr);
6308 break;
6309 case ObjectFile::Atom::kAbsoluteSymbol:
6310 break;
6311 }
6312 }
6313 else if ( !fOptions.makeClassicDyldInfo()
6314 && (ref->getTarget().getDefinitionKind() == ObjectFile::Atom::kWeakDefinition) ) {
6315 // when using only compressed dyld info, pointer is initially set to point directly to weak definition
6316 if ( ref->getTarget().isThumb() )
6317 targetAddr |= 1;
6318 LittleEndian::set32(*fixUp, targetAddr);
6319 }
6320 else {
6321 // external relocation ==> pointer contains addend
6322 LittleEndian::set32(*fixUp, ref->getTargetOffset());
6323 }
6324 }
6325 else {
6326 // pointer contains target address
6327 if ( ref->getTarget().isThumb() && (ref->getTargetOffset() == 0))
6328 targetAddr |= 1;
6329 LittleEndian::set32(*fixUp, targetAddr);
6330 }
6331 break;
6332 case arm::kPointerDiff:
6333 LittleEndian::set32(*fixUp,
6334 (ref->getTarget().getAddress() + ref->getTargetOffset()) - (ref->getFromTarget().getAddress() + ref->getFromTargetOffset()) );
6335 break;
6336 case arm::kReadOnlyPointer:
6337 if ( ref->getTarget().isThumb() && (ref->getTargetOffset() == 0))
6338 targetAddr |= 1;
6339 switch ( ref->getTarget().getDefinitionKind() ) {
6340 case ObjectFile::Atom::kRegularDefinition:
6341 case ObjectFile::Atom::kWeakDefinition:
6342 case ObjectFile::Atom::kTentativeDefinition:
6343 // pointer contains target address
6344 LittleEndian::set32(*fixUp, targetAddr);
6345 break;
6346 case ObjectFile::Atom::kExternalDefinition:
6347 case ObjectFile::Atom::kExternalWeakDefinition:
6348 // external relocation ==> pointer contains addend
6349 LittleEndian::set32(*fixUp, ref->getTargetOffset());
6350 break;
6351 case ObjectFile::Atom::kAbsoluteSymbol:
6352 // pointer contains target address
6353 LittleEndian::set32(*fixUp, targetAddr);
6354 break;
6355 }
6356 break;
6357 case arm::kBranch24WeakImport:
6358 case arm::kBranch24:
6359 displacement = targetAddr - (inAtom->getAddress() + ref->getFixUpOffset());
6360 // check if this is a branch to a branch island that can be skipped
6361 if ( ref->getTarget().getContentType() == ObjectFile::Atom::kBranchIsland ) {
6362 uint64_t finalTargetAddress = ((BranchIslandAtom<arm>*)(&(ref->getTarget())))->getFinalTargetAdress();
6363 int64_t altDisplacment = finalTargetAddress - (inAtom->getAddress() + ref->getFixUpOffset());
6364 if ( (altDisplacment < 33554428LL) && (altDisplacment > (-33554432LL)) ) {
6365 //fprintf(stderr, "using altDisplacment = %lld\n", altDisplacment);
6366 // yes, we can skip the branch island
6367 displacement = altDisplacment;
6368 }
6369 }
6370 // The pc added will be +8 from the pc
6371 displacement -= 8;
6372 //fprintf(stderr, "bl/blx fixup to %s at 0x%08llX, displacement = 0x%08llX\n", ref->getTarget().getDisplayName(), ref->getTarget().getAddress(), displacement);
6373 // max positive displacement is 0x007FFFFF << 2
6374 // max negative displacement is 0xFF800000 << 2
6375 if ( (displacement > 33554428LL) || (displacement < (-33554432LL)) ) {
6376 throwf("b/bl/blx out of range (%lld max is +/-32M) from 0x%08llX %s in %s to 0x%08llX %s in %s",
6377 displacement, inAtom->getAddress(), inAtom->getDisplayName(), inAtom->getFile()->getPath(),
6378 ref->getTarget().getAddress(), ref->getTarget().getDisplayName(), ref->getTarget().getFile()->getPath());
6379 }
6380 instruction = LittleEndian::get32(*fixUp);
6381 // Make sure we are calling arm with bl, thumb with blx
6382 is_bl = ((instruction & 0xFF000000) == 0xEB000000);
6383 is_blx = ((instruction & 0xFE000000) == 0xFA000000);
6384 if ( is_bl && ref->getTarget().isThumb() ) {
6385 uint32_t opcode = 0xFA000000;
6386 uint32_t disp = (uint32_t)(displacement >> 2) & 0x00FFFFFF;
6387 uint32_t h_bit = (uint32_t)(displacement << 23) & 0x01000000;
6388 newInstruction = opcode | h_bit | disp;
6389 }
6390 else if ( is_blx && !ref->getTarget().isThumb() ) {
6391 uint32_t opcode = 0xEB000000;
6392 uint32_t disp = (uint32_t)(displacement >> 2) & 0x00FFFFFF;
6393 newInstruction = opcode | disp;
6394 }
6395 else if ( !is_bl && !is_blx && ref->getTarget().isThumb() ) {
6396 throwf("don't know how to convert instruction %x referencing %s to thumb",
6397 instruction, ref->getTarget().getDisplayName());
6398 }
6399 else {
6400 newInstruction = (instruction & 0xFF000000) | ((uint32_t)(displacement >> 2) & 0x00FFFFFF);
6401 }
6402 LittleEndian::set32(*fixUp, newInstruction);
6403 break;
6404 case arm::kThumbBranch22WeakImport:
6405 case arm::kThumbBranch22:
6406 instruction = LittleEndian::get32(*fixUp);
6407 is_bl = ((instruction & 0xD000F800) == 0xD000F000);
6408 is_blx = ((instruction & 0xD000F800) == 0xC000F000);
6409 targetIsThumb = ref->getTarget().isThumb();
6410
6411 // The pc added will be +4 from the pc
6412 baseAddr = inAtom->getAddress() + ref->getFixUpOffset() + 4;
6413 // If the target is not thumb, we will be generating a blx instruction
6414 // Since blx cannot have the low bit set, set bit[1] of the target to
6415 // bit[1] of the base address, so that the difference is a multiple of
6416 // 4 bytes.
6417 if ( !targetIsThumb ) {
6418 targetAddr &= -3ULL;
6419 targetAddr |= (baseAddr & 2LL);
6420 }
6421 displacement = targetAddr - baseAddr;
6422
6423 // max positive displacement is 0x003FFFFE
6424 // max negative displacement is 0xFFC00000
6425 if ( (displacement > 4194302LL) || (displacement < (-4194304LL)) ) {
6426 // armv7 supports a larger displacement
6427 if ( fOptions.preferSubArchitecture() && fOptions.subArchitecture() == CPU_SUBTYPE_ARM_V7 ) {
6428 if ( (displacement > 16777214) || (displacement < (-16777216LL)) ) {
6429 throwf("thumb bl/blx out of range (%lld max is +/-16M) from %s in %s to %s in %s",
6430 displacement, inAtom->getDisplayName(), inAtom->getFile()->getPath(),
6431 ref->getTarget().getDisplayName(), ref->getTarget().getFile()->getPath());
6432 }
6433 else {
6434 // The instruction is really two instructions:
6435 // The lower 16 bits are the first instruction, which contains the high
6436 // 11 bits of the displacement.
6437 // The upper 16 bits are the second instruction, which contains the low
6438 // 11 bits of the displacement, as well as differentiating bl and blx.
6439 uint32_t s = (uint32_t)(displacement >> 24) & 0x1;
6440 uint32_t i1 = (uint32_t)(displacement >> 23) & 0x1;
6441 uint32_t i2 = (uint32_t)(displacement >> 22) & 0x1;
6442 uint32_t imm10 = (uint32_t)(displacement >> 12) & 0x3FF;
6443 uint32_t imm11 = (uint32_t)(displacement >> 1) & 0x7FF;
6444 uint32_t j1 = (i1 == s);
6445 uint32_t j2 = (i2 == s);
6446 if ( is_bl ) {
6447 if ( targetIsThumb )
6448 opcode = 0xD000F000; // keep bl
6449 else
6450 opcode = 0xC000F000; // change to blx
6451 }
6452 else if ( is_blx ) {
6453 if ( targetIsThumb )
6454 opcode = 0xD000F000; // change to bl
6455 else
6456 opcode = 0xC000F000; // keep blx
6457 }
6458 else if ( !is_bl && !is_blx && !targetIsThumb ) {
6459 throwf("don't know how to convert instruction %x referencing %s to arm",
6460 instruction, ref->getTarget().getDisplayName());
6461 }
6462 nextDisp = (j1 << 13) | (j2 << 11) | imm11;
6463 firstDisp = (s << 10) | imm10;
6464 newInstruction = opcode | (nextDisp << 16) | firstDisp;
6465 //warning("s=%d, j1=%d, j2=%d, imm10=0x%0X, imm11=0x%0X, opcode=0x%08X, first=0x%04X, next=0x%04X, new=0x%08X, disp=0x%llX for %s to %s\n",
6466 // s, j1, j2, imm10, imm11, opcode, firstDisp, nextDisp, newInstruction, displacement, inAtom->getDisplayName(), ref->getTarget().getDisplayName());
6467 LittleEndian::set32(*fixUp, newInstruction);
6468 }
6469 }
6470 else {
6471 throwf("thumb bl/blx out of range (%lld max is +/-4M) from %s in %s to %s in %s",
6472 displacement, inAtom->getDisplayName(), inAtom->getFile()->getPath(),
6473 ref->getTarget().getDisplayName(), ref->getTarget().getFile()->getPath());
6474 }
6475 }
6476 else {
6477 // The instruction is really two instructions:
6478 // The lower 16 bits are the first instruction, which contains the high
6479 // 11 bits of the displacement.
6480 // The upper 16 bits are the second instruction, which contains the low
6481 // 11 bits of the displacement, as well as differentiating bl and blx.
6482 firstDisp = (uint32_t)(displacement >> 12) & 0x7FF;
6483 nextDisp = (uint32_t)(displacement >> 1) & 0x7FF;
6484 if ( is_bl && !targetIsThumb ) {
6485 opcode = 0xE800F000;
6486 }
6487 else if ( is_blx && targetIsThumb ) {
6488 opcode = 0xF800F000;
6489 }
6490 else if ( !is_bl && !is_blx && !targetIsThumb ) {
6491 throwf("don't know how to convert instruction %x referencing %s to arm",
6492 instruction, ref->getTarget().getDisplayName());
6493 }
6494 else {
6495 opcode = instruction & 0xF800F800;
6496 }
6497 newInstruction = opcode | (nextDisp << 16) | firstDisp;
6498 LittleEndian::set32(*fixUp, newInstruction);
6499 }
6500 break;
6501 case arm::kDtraceProbeSite:
6502 if ( inAtom->isThumb() ) {
6503 // change 32-bit blx call site to two thumb NOPs
6504 LittleEndian::set32(*fixUp, 0x46C046C0);
6505 }
6506 else {
6507 // change call site to a NOP
6508 LittleEndian::set32(*fixUp, 0xE1A00000);
6509 }
6510 break;
6511 case arm::kDtraceIsEnabledSite:
6512 if ( inAtom->isThumb() ) {
6513 // change 32-bit blx call site to 'nop', 'eor r0, r0'
6514 LittleEndian::set32(*fixUp, 0x46C04040);
6515 }
6516 else {
6517 // change call site to 'eor r0, r0, r0'
6518 LittleEndian::set32(*fixUp, 0xE0200000);
6519 }
6520 break;
6521 case arm::kDtraceTypeReference:
6522 case arm::kDtraceProbe:
6523 // nothing to fix up
6524 break;
6525 case arm::kPointerDiff12:
6526 displacement = (ref->getTarget().getAddress() + ref->getTargetOffset()) - (ref->getFromTarget().getAddress() + ref->getFromTargetOffset());
6527 if ( (displacement > 4092LL) || (displacement <-4092LL) ) {
6528 throwf("ldr 12-bit displacement out of range (%lld max +/-4096) in %s", displacement, inAtom->getDisplayName());
6529 }
6530 instruction = LittleEndian::get32(*fixUp);
6531 if ( displacement >= 0 ) {
6532 instruction &= 0xFFFFF000;
6533 instruction |= ((uint32_t)displacement & 0xFFF);
6534 }
6535 else {
6536 instruction &= 0xFF7FF000;
6537 instruction |= ((uint32_t)(-displacement) & 0xFFF);
6538 }
6539 LittleEndian::set32(*fixUp, instruction);
6540 break;
6541 }
6542 }
6543
6544 template <>
6545 void Writer<arm>::fixUpReferenceRelocatable(const ObjectFile::Reference* ref, const ObjectFile::Atom* inAtom, uint8_t buffer[]) const
6546 {
6547 int64_t displacement;
6548 uint32_t instruction;
6549 uint32_t newInstruction;
6550 uint64_t targetAddr = 0;
6551 int64_t baseAddr;
6552 uint32_t firstDisp;
6553 uint32_t nextDisp;
6554 uint32_t opcode = 0;
6555 bool relocateableExternal = false;
6556 bool is_bl;
6557 bool is_blx;
6558 bool targetIsThumb;
6559
6560 if ( ref->getTargetBinding() != ObjectFile::Reference::kDontBind ) {
6561 targetAddr = ref->getTarget().getAddress() + ref->getTargetOffset();
6562 relocateableExternal = this->makesExternalRelocatableReference(ref->getTarget());
6563 }
6564
6565 uint32_t* fixUp = (uint32_t*)&buffer[ref->getFixUpOffset()];
6566 switch ( (arm::ReferenceKinds)(ref->getKind()) ) {
6567 case arm::kNoFixUp:
6568 case arm::kFollowOn:
6569 case arm::kGroupSubordinate:
6570 // do nothing
6571 break;
6572 case arm::kPointer:
6573 case arm::kReadOnlyPointer:
6574 case arm::kPointerWeakImport:
6575 {
6576 if ( ((SectionInfo*)inAtom->getSection())->fAllNonLazyPointers ) {
6577 // indirect symbol table has INDIRECT_SYMBOL_LOCAL, so we must put address in content
6578 if ( this->indirectSymbolInRelocatableIsLocal(ref) )
6579 LittleEndian::set32(*fixUp, targetAddr);
6580 else
6581 LittleEndian::set32(*fixUp, 0);
6582 }
6583 else if ( relocateableExternal ) {
6584 if ( fOptions.prebind() ) {
6585 switch (ref->getTarget().getDefinitionKind()) {
6586 case ObjectFile::Atom::kExternalDefinition:
6587 case ObjectFile::Atom::kExternalWeakDefinition:
6588 // prebound external relocation ==> pointer contains addend
6589 LittleEndian::set32(*fixUp, ref->getTargetOffset());
6590 break;
6591 case ObjectFile::Atom::kTentativeDefinition:
6592 case ObjectFile::Atom::kRegularDefinition:
6593 case ObjectFile::Atom::kWeakDefinition:
6594 // prebound external relocation to internal atom ==> pointer contains target address + addend
6595 LittleEndian::set32(*fixUp, targetAddr);
6596 break;
6597 case ObjectFile::Atom::kAbsoluteSymbol:
6598 break;
6599 }
6600 }
6601 }
6602 else {
6603 // internal relocation
6604 if ( ref->getTarget().getDefinitionKind() != ObjectFile::Atom::kTentativeDefinition ) {
6605 // pointer contains target address
6606 if ( ref->getTarget().isThumb() && (ref->getTargetOffset() == 0))
6607 targetAddr |= 1;
6608 LittleEndian::set32(*fixUp, targetAddr);
6609 }
6610 else {
6611 // pointer contains addend
6612 LittleEndian::set32(*fixUp, ref->getTargetOffset());
6613 }
6614 }
6615 }
6616 break;
6617 case arm::kPointerDiff:
6618 LittleEndian::set32(*fixUp,
6619 (ref->getTarget().getAddress() + ref->getTargetOffset()) - (ref->getFromTarget().getAddress() + ref->getFromTargetOffset()) );
6620 break;
6621 case arm::kDtraceProbeSite:
6622 case arm::kDtraceIsEnabledSite:
6623 case arm::kBranch24WeakImport:
6624 case arm::kBranch24:
6625 displacement = targetAddr - (inAtom->getAddress() + ref->getFixUpOffset());
6626 // The pc added will be +8 from the pc
6627 displacement -= 8;
6628 // fprintf(stderr, "b/bl/blx fixup to %s at 0x%08llX, displacement = 0x%08llX\n", ref->getTarget().getDisplayName(), ref->getTarget().getAddress(), displacement);
6629 if ( relocateableExternal ) {
6630 // doing "ld -r" to an external symbol
6631 // the mach-o way of encoding this is that the bl instruction's target addr is the offset into the target
6632 displacement -= ref->getTarget().getAddress();
6633 }
6634 else {
6635 // max positive displacement is 0x007FFFFF << 2
6636 // max negative displacement is 0xFF800000 << 2
6637 if ( (displacement > 33554428LL) || (displacement < (-33554432LL)) ) {
6638 throwf("arm b/bl/blx out of range (%lld max is +/-32M) from %s in %s to %s in %s",
6639 displacement, inAtom->getDisplayName(), inAtom->getFile()->getPath(),
6640 ref->getTarget().getDisplayName(), ref->getTarget().getFile()->getPath());
6641 }
6642 }
6643 instruction = LittleEndian::get32(*fixUp);
6644 // Make sure we are calling arm with bl, thumb with blx
6645 is_bl = ((instruction & 0xFF000000) == 0xEB000000);
6646 is_blx = ((instruction & 0xFE000000) == 0xFA000000);
6647 if ( is_bl && ref->getTarget().isThumb() ) {
6648 uint32_t opcode = 0xFA000000;
6649 uint32_t disp = (uint32_t)(displacement >> 2) & 0x00FFFFFF;
6650 uint32_t h_bit = (uint32_t)(displacement << 23) & 0x01000000;
6651 newInstruction = opcode | h_bit | disp;
6652 }
6653 else if ( is_blx && !ref->getTarget().isThumb() ) {
6654 uint32_t opcode = 0xEB000000;
6655 uint32_t disp = (uint32_t)(displacement >> 2) & 0x00FFFFFF;
6656 newInstruction = opcode | disp;
6657 }
6658 else if ( !is_bl && !is_blx && ref->getTarget().isThumb() ) {
6659 throwf("don't know how to convert instruction %x referencing %s to thumb",
6660 instruction, ref->getTarget().getDisplayName());
6661 }
6662 else {
6663 newInstruction = (instruction & 0xFF000000) | ((uint32_t)(displacement >> 2) & 0x00FFFFFF);
6664 }
6665 LittleEndian::set32(*fixUp, newInstruction);
6666 break;
6667 case arm::kThumbBranch22WeakImport:
6668 case arm::kThumbBranch22:
6669 instruction = LittleEndian::get32(*fixUp);
6670 is_bl = ((instruction & 0xF8000000) == 0xF8000000);
6671 is_blx = ((instruction & 0xF8000000) == 0xE8000000);
6672 targetIsThumb = ref->getTarget().isThumb();
6673
6674 // The pc added will be +4 from the pc
6675 baseAddr = inAtom->getAddress() + ref->getFixUpOffset() + 4;
6676 // If the target is not thumb, we will be generating a blx instruction
6677 // Since blx cannot have the low bit set, set bit[1] of the target to
6678 // bit[1] of the base address, so that the difference is a multiple of
6679 // 4 bytes.
6680 if (!targetIsThumb) {
6681 targetAddr &= -3ULL;
6682 targetAddr |= (baseAddr & 2LL);
6683 }
6684 displacement = targetAddr - baseAddr;
6685
6686 //fprintf(stderr, "thumb %s fixup to %s at 0x%08llX, baseAddr = 0x%08llX, displacement = 0x%08llX, %d\n", is_blx ? "blx" : "bl", ref->getTarget().getDisplayName(), targetAddr, baseAddr, displacement, targetIsThumb);
6687 if ( relocateableExternal ) {
6688 // doing "ld -r" to an external symbol
6689 // the mach-o way of encoding this is that the bl instruction's target addr is the offset into the target
6690 displacement -= ref->getTarget().getAddress();
6691 }
6692
6693 if ( (displacement > 4194302LL) || (displacement < (-4194304LL)) ) {
6694 // armv7 supports a larger displacement
6695 if ( fOptions.preferSubArchitecture() && fOptions.subArchitecture() == CPU_SUBTYPE_ARM_V7 ) {
6696 if ( (displacement > 16777214) || (displacement < (-16777216LL)) ) {
6697 throwf("thumb bl/blx out of range (%lld max is +/-16M) from %s in %s to %s in %s",
6698 displacement, inAtom->getDisplayName(), inAtom->getFile()->getPath(),
6699 ref->getTarget().getDisplayName(), ref->getTarget().getFile()->getPath());
6700 }
6701 else {
6702 // The instruction is really two instructions:
6703 // The lower 16 bits are the first instruction, which contains the high
6704 // 11 bits of the displacement.
6705 // The upper 16 bits are the second instruction, which contains the low
6706 // 11 bits of the displacement, as well as differentiating bl and blx.
6707 uint32_t s = (uint32_t)(displacement >> 24) & 0x1;
6708 uint32_t i1 = (uint32_t)(displacement >> 23) & 0x1;
6709 uint32_t i2 = (uint32_t)(displacement >> 22) & 0x1;
6710 uint32_t imm10 = (uint32_t)(displacement >> 12) & 0x3FF;
6711 uint32_t imm11 = (uint32_t)(displacement >> 1) & 0x7FF;
6712 uint32_t j1 = (i1 == s);
6713 uint32_t j2 = (i2 == s);
6714 if ( is_bl ) {
6715 if ( targetIsThumb )
6716 opcode = 0xD000F000; // keep bl
6717 else
6718 opcode = 0xC000F000; // change to blx
6719 }
6720 else if ( is_blx ) {
6721 if ( targetIsThumb )
6722 opcode = 0xD000F000; // change to bl
6723 else
6724 opcode = 0xC000F000; // keep blx
6725 }
6726 else if ( !is_bl && !is_blx && !targetIsThumb ) {
6727 throwf("don't know how to convert instruction %x referencing %s to arm",
6728 instruction, ref->getTarget().getDisplayName());
6729 }
6730 nextDisp = (j1 << 13) | (j2 << 11) | imm11;
6731 firstDisp = (s << 10) | imm10;
6732 newInstruction = opcode | (nextDisp << 16) | firstDisp;
6733 //warning("s=%d, j1=%d, j2=%d, imm10=0x%0X, imm11=0x%0X, opcode=0x%08X, first=0x%04X, next=0x%04X, new=0x%08X, disp=0x%llX for %s to %s\n",
6734 // s, j1, j2, imm10, imm11, opcode, firstDisp, nextDisp, newInstruction, displacement, inAtom->getDisplayName(), ref->getTarget().getDisplayName());
6735 LittleEndian::set32(*fixUp, newInstruction);
6736 break;
6737 }
6738 }
6739 else {
6740 throwf("thumb bl/blx out of range (%lld max is +/-4M) from %s in %s to %s in %s",
6741 displacement, inAtom->getDisplayName(), inAtom->getFile()->getPath(),
6742 ref->getTarget().getDisplayName(), ref->getTarget().getFile()->getPath());
6743 }
6744 }
6745 // The instruction is really two instructions:
6746 // The lower 16 bits are the first instruction, which contains the first
6747 // 11 bits of the displacement.
6748 // The upper 16 bits are the second instruction, which contains the next
6749 // 11 bits of the displacement, as well as differentiating bl and blx.
6750 firstDisp = (uint32_t)(displacement >> 12) & 0x7FF;
6751 nextDisp = (uint32_t)(displacement >> 1) & 0x7FF;
6752 if ( is_bl && !targetIsThumb ) {
6753 opcode = 0xE800F000;
6754 }
6755 else if ( is_blx && targetIsThumb ) {
6756 opcode = 0xF800F000;
6757 }
6758 else if ( !is_bl && !is_blx && !targetIsThumb ) {
6759 throwf("don't know how to convert instruction %x referencing %s to arm",
6760 instruction, ref->getTarget().getDisplayName());
6761 }
6762 else {
6763 opcode = instruction & 0xF800F800;
6764 }
6765 newInstruction = opcode | (nextDisp << 16) | firstDisp;
6766 LittleEndian::set32(*fixUp, newInstruction);
6767 break;
6768 case arm::kDtraceProbe:
6769 case arm::kDtraceTypeReference:
6770 // nothing to fix up
6771 break;
6772 case arm::kPointerDiff12:
6773 throw "internal error. no reloc for 12-bit pointer diffs";
6774 }
6775 }
6776
6777 template <>
6778 void Writer<x86>::fixUpReferenceFinal(const ObjectFile::Reference* ref, const ObjectFile::Atom* inAtom, uint8_t buffer[]) const
6779 {
6780 uint32_t* fixUp = (uint32_t*)&buffer[ref->getFixUpOffset()];
6781 uint8_t* dtraceProbeSite;
6782 const int64_t kTwoGigLimit = 0x7FFFFFFF;
6783 const int64_t kSixteenMegLimit = 0x00FFFFFF;
6784 const int64_t kSixtyFourKiloLimit = 0x7FFF;
6785 const int64_t kOneTwentyEightLimit = 0x7F;
6786 int64_t displacement;
6787 uint32_t temp;
6788 x86::ReferenceKinds kind = (x86::ReferenceKinds)(ref->getKind());
6789 switch ( kind ) {
6790 case x86::kNoFixUp:
6791 case x86::kFollowOn:
6792 case x86::kGroupSubordinate:
6793 // do nothing
6794 break;
6795 case x86::kPointerWeakImport:
6796 case x86::kPointer:
6797 {
6798 if ( this->relocationNeededInFinalLinkedImage(ref->getTarget()) == kRelocExternal ) {
6799 if ( fOptions.prebind() ) {
6800 switch (ref->getTarget().getDefinitionKind()) {
6801 case ObjectFile::Atom::kExternalDefinition:
6802 case ObjectFile::Atom::kExternalWeakDefinition:
6803 // prebound external relocation ==> pointer contains addend
6804 LittleEndian::set32(*fixUp, ref->getTargetOffset());
6805 break;
6806 case ObjectFile::Atom::kTentativeDefinition:
6807 case ObjectFile::Atom::kRegularDefinition:
6808 case ObjectFile::Atom::kWeakDefinition:
6809 // prebound external relocation to internal atom ==> pointer contains target address + addend
6810 LittleEndian::set32(*fixUp, ref->getTarget().getAddress() + ref->getTargetOffset());
6811 break;
6812 case ObjectFile::Atom::kAbsoluteSymbol:
6813 break;
6814 }
6815 }
6816 else if ( !fOptions.makeClassicDyldInfo()
6817 && (ref->getTarget().getDefinitionKind() == ObjectFile::Atom::kWeakDefinition) ) {
6818 // when using only compressed dyld info, pointer is initially set to point directly to weak definition
6819 LittleEndian::set32(*fixUp, ref->getTarget().getAddress() + ref->getTargetOffset());
6820 }
6821 else {
6822 // external relocation ==> pointer contains addend
6823 LittleEndian::set32(*fixUp, ref->getTargetOffset());
6824 }
6825 }
6826 else {
6827 // pointer contains target address
6828 //printf("Atom::fixUpReferenceFinal() target.name=%s, target.address=0x%08llX\n", target.getDisplayName(), target.getAddress());
6829 LittleEndian::set32(*fixUp, ref->getTarget().getAddress() + ref->getTargetOffset());
6830 }
6831 }
6832 break;
6833 case x86::kPointerDiff:
6834 displacement = (ref->getTarget().getAddress() + ref->getTargetOffset()) - (ref->getFromTarget().getAddress() + ref->getFromTargetOffset());
6835 LittleEndian::set32(*fixUp, (uint32_t)displacement);
6836 break;
6837 case x86::kPointerDiff16:
6838 displacement = (ref->getTarget().getAddress() + ref->getTargetOffset()) - (ref->getFromTarget().getAddress() + ref->getFromTargetOffset());
6839 if ( (displacement > kSixtyFourKiloLimit) || (displacement < -(kSixtyFourKiloLimit)) )
6840 throwf("16-bit pointer diff out of range in %s", inAtom->getDisplayName());
6841 LittleEndian::set16(*((uint16_t*)fixUp), (uint16_t)displacement);
6842 break;
6843 case x86::kPointerDiff24:
6844 displacement = (ref->getTarget().getAddress() + ref->getTargetOffset()) - (ref->getFromTarget().getAddress() + ref->getFromTargetOffset());
6845 if ( (displacement > kSixteenMegLimit) || (displacement < 0) )
6846 throwf("24-bit pointer diff out of range in %s", inAtom->getDisplayName());
6847 temp = LittleEndian::get32(*fixUp);
6848 temp &= 0xFF000000;
6849 temp |= (displacement & 0x00FFFFFF);
6850 LittleEndian::set32(*fixUp, temp);
6851 break;
6852 case x86::kSectionOffset24:
6853 displacement = ref->getTarget().getSectionOffset();
6854 if ( (displacement > kSixteenMegLimit) || (displacement < 0) )
6855 throwf("24-bit pointer diff out of range in %s", inAtom->getDisplayName());
6856 temp = LittleEndian::get32(*fixUp);
6857 temp &= 0xFF000000;
6858 temp |= (displacement & 0x00FFFFFF);
6859 LittleEndian::set32(*fixUp, temp);
6860 break;
6861 case x86::kDtraceProbeSite:
6862 // change call site to a NOP
6863 dtraceProbeSite = (uint8_t*)fixUp;
6864 dtraceProbeSite[-1] = 0x90; // 1-byte nop
6865 dtraceProbeSite[0] = 0x0F; // 4-byte nop
6866 dtraceProbeSite[1] = 0x1F;
6867 dtraceProbeSite[2] = 0x40;
6868 dtraceProbeSite[3] = 0x00;
6869 break;
6870 case x86::kDtraceIsEnabledSite:
6871 // change call site to a clear eax
6872 dtraceProbeSite = (uint8_t*)fixUp;
6873 dtraceProbeSite[-1] = 0x33; // xorl eax,eax
6874 dtraceProbeSite[0] = 0xC0;
6875 dtraceProbeSite[1] = 0x90; // 1-byte nop
6876 dtraceProbeSite[2] = 0x90; // 1-byte nop
6877 dtraceProbeSite[3] = 0x90; // 1-byte nop
6878 break;
6879 case x86::kPCRel32WeakImport:
6880 case x86::kPCRel32:
6881 case x86::kPCRel16:
6882 case x86::kPCRel8:
6883 displacement = 0;
6884 switch ( ref->getTarget().getDefinitionKind() ) {
6885 case ObjectFile::Atom::kRegularDefinition:
6886 case ObjectFile::Atom::kWeakDefinition:
6887 displacement = (ref->getTarget().getAddress() + ref->getTargetOffset()) - (inAtom->getAddress() + ref->getFixUpOffset() + 4);
6888 break;
6889 case ObjectFile::Atom::kExternalDefinition:
6890 case ObjectFile::Atom::kExternalWeakDefinition:
6891 throw "codegen problem, can't use rel32 to external symbol";
6892 case ObjectFile::Atom::kTentativeDefinition:
6893 displacement = 0;
6894 break;
6895 case ObjectFile::Atom::kAbsoluteSymbol:
6896 displacement = (ref->getTarget().getSectionOffset() + ref->getTargetOffset()) - (inAtom->getAddress() + ref->getFixUpOffset() + 4);
6897 break;
6898 }
6899 if ( kind == x86::kPCRel8 ) {
6900 displacement += 3;
6901 if ( (displacement > kOneTwentyEightLimit) || (displacement < -(kOneTwentyEightLimit)) ) {
6902 //fprintf(stderr, "call out of range from %s in %s to %s in %s\n", this->getDisplayName(), this->getFile()->getPath(), target.getDisplayName(), target.getFile()->getPath());
6903 throwf("rel8 out of range in %s", inAtom->getDisplayName());
6904 }
6905 *(int8_t*)fixUp = (int8_t)displacement;
6906 }
6907 else if ( kind == x86::kPCRel16 ) {
6908 displacement += 2;
6909 if ( (displacement > kSixtyFourKiloLimit) || (displacement < -(kSixtyFourKiloLimit)) ) {
6910 //fprintf(stderr, "call out of range from %s in %s to %s in %s\n", this->getDisplayName(), this->getFile()->getPath(), target.getDisplayName(), target.getFile()->getPath());
6911 throwf("rel16 out of range in %s", inAtom->getDisplayName());
6912 }
6913 LittleEndian::set16(*((uint16_t*)fixUp), (uint16_t)displacement);
6914 }
6915 else {
6916 if ( (displacement > kTwoGigLimit) || (displacement < (-kTwoGigLimit)) ) {
6917 //fprintf(stderr, "call out of range from %s in %s to %s in %s\n", this->getDisplayName(), this->getFile()->getPath(), target.getDisplayName(), target.getFile()->getPath());
6918 throwf("rel32 out of range in %s", inAtom->getDisplayName());
6919 }
6920 LittleEndian::set32(*fixUp, (int32_t)displacement);
6921 }
6922 break;
6923 case x86::kAbsolute32:
6924 switch ( ref->getTarget().getDefinitionKind() ) {
6925 case ObjectFile::Atom::kRegularDefinition:
6926 case ObjectFile::Atom::kWeakDefinition:
6927 case ObjectFile::Atom::kTentativeDefinition:
6928 // pointer contains target address
6929 LittleEndian::set32(*fixUp, ref->getTarget().getAddress() + ref->getTargetOffset());
6930 break;
6931 case ObjectFile::Atom::kExternalDefinition:
6932 case ObjectFile::Atom::kExternalWeakDefinition:
6933 // external relocation ==> pointer contains addend
6934 LittleEndian::set32(*fixUp, ref->getTargetOffset());
6935 break;
6936 case ObjectFile::Atom::kAbsoluteSymbol:
6937 // pointer contains target address
6938 LittleEndian::set32(*fixUp, ref->getTarget().getSectionOffset() + ref->getTargetOffset());
6939 break;
6940 }
6941 break;
6942 case x86::kImageOffset32:
6943 // offset of target atom from mach_header
6944 displacement = ref->getTarget().getAddress() + ref->getTargetOffset() - fMachHeaderAtom->getAddress();
6945 LittleEndian::set32(*fixUp, (int32_t)displacement);
6946 break;
6947 case x86::kDtraceTypeReference:
6948 case x86::kDtraceProbe:
6949 // nothing to fix up
6950 break;
6951 }
6952 }
6953
6954
6955
6956 template <>
6957 void Writer<x86>::fixUpReferenceRelocatable(const ObjectFile::Reference* ref, const ObjectFile::Atom* inAtom, uint8_t buffer[]) const
6958 {
6959 const int64_t kTwoGigLimit = 0x7FFFFFFF;
6960 const int64_t kSixtyFourKiloLimit = 0x7FFF;
6961 const int64_t kOneTwentyEightLimit = 0x7F;
6962 uint32_t* fixUp = (uint32_t*)&buffer[ref->getFixUpOffset()];
6963 bool isExtern = this->makesExternalRelocatableReference(ref->getTarget());
6964 int64_t displacement;
6965 x86::ReferenceKinds kind = (x86::ReferenceKinds)(ref->getKind());
6966 switch ( kind ) {
6967 case x86::kNoFixUp:
6968 case x86::kFollowOn:
6969 case x86::kGroupSubordinate:
6970 // do nothing
6971 break;
6972 case x86::kPointer:
6973 case x86::kPointerWeakImport:
6974 case x86::kAbsolute32:
6975 {
6976 if ( ((SectionInfo*)inAtom->getSection())->fAllNonLazyPointers ) {
6977 // if INDIRECT_SYMBOL_LOCAL the content is pointer, else it is zero
6978 if ( this->indirectSymbolInRelocatableIsLocal(ref) )
6979 LittleEndian::set32(*fixUp, ref->getTarget().getAddress() + ref->getTargetOffset());
6980 else
6981 LittleEndian::set32(*fixUp, 0);
6982 }
6983 else if ( isExtern ) {
6984 // external relocation ==> pointer contains addend
6985 LittleEndian::set32(*fixUp, ref->getTargetOffset());
6986 }
6987 else if ( ref->getTarget().getDefinitionKind() != ObjectFile::Atom::kTentativeDefinition ) {
6988 // internal relocation => pointer contains target address
6989 LittleEndian::set32(*fixUp, ref->getTarget().getAddress() + ref->getTargetOffset());
6990 }
6991 else {
6992 // internal relocation to tentative ==> pointer contains addend
6993 LittleEndian::set32(*fixUp, ref->getTargetOffset());
6994 }
6995 }
6996 break;
6997 case x86::kPointerDiff:
6998 displacement = (ref->getTarget().getAddress() + ref->getTargetOffset()) - (ref->getFromTarget().getAddress() + ref->getFromTargetOffset());
6999 LittleEndian::set32(*fixUp, (uint32_t)displacement);
7000 break;
7001 case x86::kPointerDiff16:
7002 displacement = (ref->getTarget().getAddress() + ref->getTargetOffset()) - (ref->getFromTarget().getAddress() + ref->getFromTargetOffset());
7003 if ( (displacement > kSixtyFourKiloLimit) || (displacement < -(kSixtyFourKiloLimit)) )
7004 throwf("16-bit pointer diff out of range in %s", inAtom->getDisplayName());
7005 LittleEndian::set16(*((uint16_t*)fixUp), (uint16_t)displacement);
7006 break;
7007 case x86::kPCRel8:
7008 case x86::kPCRel16:
7009 case x86::kPCRel32:
7010 case x86::kPCRel32WeakImport:
7011 case x86::kDtraceProbeSite:
7012 case x86::kDtraceIsEnabledSite:
7013 {
7014 if ( isExtern )
7015 displacement = ref->getTargetOffset() - (inAtom->getAddress() + ref->getFixUpOffset() + 4);
7016 else
7017 displacement = (ref->getTarget().getAddress() + ref->getTargetOffset()) - (inAtom->getAddress() + ref->getFixUpOffset() + 4);
7018 if ( kind == x86::kPCRel8 ) {
7019 displacement += 3;
7020 if ( (displacement > kOneTwentyEightLimit) || (displacement < -(kOneTwentyEightLimit)) ) {
7021 //fprintf(stderr, "call out of range from %s in %s to %s in %s\n", this->getDisplayName(), this->getFile()->getPath(), target.getDisplayName(), target.getFile()->getPath());
7022 throwf("rel8 out of range (%lld)in %s", displacement, inAtom->getDisplayName());
7023 }
7024 int8_t byte = (int8_t)displacement;
7025 *((int8_t*)fixUp) = byte;
7026 }
7027 else if ( kind == x86::kPCRel16 ) {
7028 displacement += 2;
7029 if ( (displacement > kSixtyFourKiloLimit) || (displacement < -(kSixtyFourKiloLimit)) ) {
7030 //fprintf(stderr, "call out of range from %s in %s to %s in %s\n", this->getDisplayName(), this->getFile()->getPath(), target.getDisplayName(), target.getFile()->getPath());
7031 throwf("rel16 out of range in %s", inAtom->getDisplayName());
7032 }
7033 int16_t word = (int16_t)displacement;
7034 LittleEndian::set16(*((uint16_t*)fixUp), word);
7035 }
7036 else {
7037 if ( (displacement > kTwoGigLimit) || (displacement < (-kTwoGigLimit)) ) {
7038 //fprintf(stderr, "call out of range, displacement=ox%llX, from %s in %s to %s in %s\n", displacement,
7039 // inAtom->getDisplayName(), inAtom->getFile()->getPath(), ref->getTarget().getDisplayName(), ref->getTarget().getFile()->getPath());
7040 throwf("rel32 out of range in %s", inAtom->getDisplayName());
7041 }
7042 LittleEndian::set32(*fixUp, (int32_t)displacement);
7043 }
7044 }
7045 break;
7046 case x86::kPointerDiff24:
7047 throw "internal linker error, kPointerDiff24 can't be encoded into object files";
7048 case x86::kImageOffset32:
7049 throw "internal linker error, kImageOffset32 can't be encoded into object files";
7050 case x86::kSectionOffset24:
7051 throw "internal linker error, kSectionOffset24 can't be encoded into object files";
7052 case x86::kDtraceProbe:
7053 case x86::kDtraceTypeReference:
7054 // nothing to fix up
7055 break;
7056 }
7057 }
7058
7059 template <>
7060 void Writer<x86_64>::fixUpReferenceFinal(const ObjectFile::Reference* ref, const ObjectFile::Atom* inAtom, uint8_t buffer[]) const
7061 {
7062 const int64_t twoGigLimit = 0x7FFFFFFF;
7063 const int64_t kSixteenMegLimit = 0x00FFFFFF;
7064 uint64_t* fixUp = (uint64_t*)&buffer[ref->getFixUpOffset()];
7065 uint8_t* dtraceProbeSite;
7066 int64_t displacement = 0;
7067 uint32_t temp;
7068 switch ( (x86_64::ReferenceKinds)(ref->getKind()) ) {
7069 case x86_64::kNoFixUp:
7070 case x86_64::kGOTNoFixUp:
7071 case x86_64::kFollowOn:
7072 case x86_64::kGroupSubordinate:
7073 // do nothing
7074 break;
7075 case x86_64::kPointerWeakImport:
7076 case x86_64::kPointer:
7077 {
7078 if ( &ref->getTarget() != NULL ) {
7079 //fprintf(stderr, "fixUpReferenceFinal: %s reference to %s\n", this->getDisplayName(), target.getDisplayName());
7080 if ( this->relocationNeededInFinalLinkedImage(ref->getTarget()) == kRelocExternal) {
7081 if ( !fOptions.makeClassicDyldInfo()
7082 && (ref->getTarget().getDefinitionKind() == ObjectFile::Atom::kWeakDefinition) ) {
7083 // when using only compressed dyld info, pointer is initially set to point directly to weak definition
7084 LittleEndian::set64(*fixUp, ref->getTarget().getAddress() + ref->getTargetOffset());
7085 }
7086 else {
7087 // external relocation ==> pointer contains addend
7088 LittleEndian::set64(*fixUp, ref->getTargetOffset());
7089 }
7090 }
7091 else {
7092 // internal relocation
7093 // pointer contains target address
7094 //printf("Atom::fixUpReferenceFinal) target.name=%s, target.address=0x%08llX\n", target.getDisplayName(), target.getAddress());
7095 LittleEndian::set64(*fixUp, ref->getTarget().getAddress() + ref->getTargetOffset());
7096 }
7097 }
7098 }
7099 break;
7100 case x86_64::kPointer32:
7101 {
7102 //fprintf(stderr, "fixUpReferenceFinal: %s reference to %s\n", this->getDisplayName(), target.getDisplayName());
7103 if ( this->relocationNeededInFinalLinkedImage(ref->getTarget()) == kRelocExternal ) {
7104 // external relocation
7105 throwf("32-bit pointer to dylib or weak symbol %s not supported for x86_64",ref->getTarget().getDisplayName());
7106 }
7107 else {
7108 // internal relocation
7109 // pointer contains target address
7110 //printf("Atom::fixUpReferenceFinal) target.name=%s, target.address=0x%08llX\n", target.getDisplayName(), target.getAddress());
7111 displacement = ref->getTarget().getAddress() + ref->getTargetOffset();
7112 switch ( fOptions.outputKind() ) {
7113 case Options::kObjectFile:
7114 case Options::kPreload:
7115 case Options::kDyld:
7116 case Options::kDynamicLibrary:
7117 case Options::kDynamicBundle:
7118 case Options::kKextBundle:
7119 throwf("32-bit pointer to symbol %s not supported for x86_64",ref->getTarget().getDisplayName());
7120 case Options::kDynamicExecutable:
7121 // <rdar://problem/5855588> allow x86_64 main executables to use 32-bit pointers if program loads in load 2GB
7122 if ( (displacement > twoGigLimit) || (displacement < (-twoGigLimit)) )
7123 throw "32-bit pointer out of range";
7124 break;
7125 case Options::kStaticExecutable:
7126 // <rdar://problem/5855588> allow x86_64 mach_kernel to truncate pointers
7127 break;
7128 }
7129 LittleEndian::set32(*((uint32_t*)fixUp), (uint32_t)displacement);
7130 }
7131 }
7132 break;
7133 case x86_64::kPointerDiff32:
7134 displacement = (ref->getTarget().getAddress() + ref->getTargetOffset()) - (ref->getFromTarget().getAddress() + ref->getFromTargetOffset());
7135 if ( (displacement > twoGigLimit) || (displacement < (-twoGigLimit)) )
7136 throw "32-bit pointer difference out of range";
7137 LittleEndian::set32(*((uint32_t*)fixUp), (uint32_t)displacement);
7138 break;
7139 case x86_64::kPointerDiff:
7140 LittleEndian::set64(*fixUp,
7141 (ref->getTarget().getAddress() + ref->getTargetOffset()) - (ref->getFromTarget().getAddress() + ref->getFromTargetOffset()) );
7142 break;
7143 case x86_64::kPointerDiff24:
7144 displacement = (ref->getTarget().getAddress() + ref->getTargetOffset()) - (ref->getFromTarget().getAddress() + ref->getFromTargetOffset());
7145 if ( (displacement > kSixteenMegLimit) || (displacement < 0) )
7146 throwf("24-bit pointer diff out of range in %s", inAtom->getDisplayName());
7147 temp = LittleEndian::get32(*((uint32_t*)fixUp));
7148 temp &= 0xFF000000;
7149 temp |= (displacement & 0x00FFFFFF);
7150 LittleEndian::set32(*((uint32_t*)fixUp), temp);
7151 break;
7152 case x86_64::kSectionOffset24:
7153 displacement = ref->getTarget().getSectionOffset();
7154 if ( (displacement > kSixteenMegLimit) || (displacement < 0) )
7155 throwf("24-bit pointer diff out of range in %s", inAtom->getDisplayName());
7156 temp = LittleEndian::get32(*((uint32_t*)fixUp));
7157 temp &= 0xFF000000;
7158 temp |= (displacement & 0x00FFFFFF);
7159 LittleEndian::set32(*((uint32_t*)fixUp), temp);
7160 break;
7161 case x86_64::kPCRel32GOTLoad:
7162 case x86_64::kPCRel32GOTLoadWeakImport:
7163 // if GOT entry was optimized away, change movq instruction to a leaq
7164 if ( std::find(fAllSynthesizedNonLazyPointers.begin(), fAllSynthesizedNonLazyPointers.end(), &(ref->getTarget())) == fAllSynthesizedNonLazyPointers.end() ) {
7165 //fprintf(stderr, "GOT for %s optimized away\n", ref->getTarget().getDisplayName());
7166 uint8_t* opcodes = (uint8_t*)fixUp;
7167 if ( opcodes[-2] != 0x8B )
7168 throw "GOT load reloc does not point to a movq instruction";
7169 opcodes[-2] = 0x8D;
7170 }
7171 // fall into general rel32 case
7172 case x86_64::kBranchPCRel32WeakImport:
7173 case x86_64::kBranchPCRel32:
7174 case x86_64::kBranchPCRel8:
7175 case x86_64::kPCRel32:
7176 case x86_64::kPCRel32_1:
7177 case x86_64::kPCRel32_2:
7178 case x86_64::kPCRel32_4:
7179 case x86_64::kPCRel32GOT:
7180 case x86_64::kPCRel32GOTWeakImport:
7181 switch ( ref->getTarget().getDefinitionKind() ) {
7182 case ObjectFile::Atom::kRegularDefinition:
7183 case ObjectFile::Atom::kWeakDefinition:
7184 case ObjectFile::Atom::kTentativeDefinition:
7185 displacement = (ref->getTarget().getAddress() + (int32_t)ref->getTargetOffset()) - (inAtom->getAddress() + ref->getFixUpOffset() + 4);
7186 break;
7187 case ObjectFile::Atom::kAbsoluteSymbol:
7188 displacement = (ref->getTarget().getSectionOffset() + (int32_t)ref->getTargetOffset()) - (inAtom->getAddress() + ref->getFixUpOffset() + 4);
7189 break;
7190 case ObjectFile::Atom::kExternalDefinition:
7191 case ObjectFile::Atom::kExternalWeakDefinition:
7192 if ( fOptions.outputKind() == Options::kKextBundle )
7193 displacement = 0;
7194 else
7195 throwf("codegen problem, can't use rel32 to external symbol %s", ref->getTarget().getDisplayName());
7196 break;
7197 }
7198 switch ( ref->getKind() ) {
7199 case x86_64::kPCRel32_1:
7200 displacement -= 1;
7201 break;
7202 case x86_64::kPCRel32_2:
7203 displacement -= 2;
7204 break;
7205 case x86_64::kPCRel32_4:
7206 displacement -= 4;
7207 break;
7208 case x86_64::kBranchPCRel8:
7209 displacement += 3;
7210 break;
7211 }
7212 if ( ref->getKind() == x86_64::kBranchPCRel8 ) {
7213 if ( (displacement > 127) || (displacement < (-128)) ) {
7214 fprintf(stderr, "branch out of range from %s (%llX) in %s to %s (%llX) in %s\n",
7215 inAtom->getDisplayName(), inAtom->getAddress(), inAtom->getFile()->getPath(), ref->getTarget().getDisplayName(), ref->getTarget().getAddress(), ref->getTarget().getFile()->getPath());
7216 throw "rel8 out of range";
7217 }
7218 *((int8_t*)fixUp) = (int8_t)displacement;
7219 }
7220 else {
7221 if ( (displacement > twoGigLimit) || (displacement < (-twoGigLimit)) ) {
7222 fprintf(stderr, "reference out of range from %s (%llX) in %s to %s (%llX) in %s\n",
7223 inAtom->getDisplayName(), inAtom->getAddress(), inAtom->getFile()->getPath(), ref->getTarget().getDisplayName(), ref->getTarget().getAddress(), ref->getTarget().getFile()->getPath());
7224 throw "rel32 out of range";
7225 }
7226 LittleEndian::set32(*((uint32_t*)fixUp), (int32_t)displacement);
7227 }
7228 break;
7229 case x86_64::kImageOffset32:
7230 // offset of target atom from mach_header
7231 displacement = ref->getTarget().getAddress() + ref->getTargetOffset() - fMachHeaderAtom->getAddress();
7232 LittleEndian::set32(*((uint32_t*)fixUp), (int32_t)displacement);
7233 break;
7234 case x86_64::kDtraceProbeSite:
7235 // change call site to a NOP
7236 dtraceProbeSite = (uint8_t*)fixUp;
7237 dtraceProbeSite[-1] = 0x90; // 1-byte nop
7238 dtraceProbeSite[0] = 0x0F; // 4-byte nop
7239 dtraceProbeSite[1] = 0x1F;
7240 dtraceProbeSite[2] = 0x40;
7241 dtraceProbeSite[3] = 0x00;
7242 break;
7243 case x86_64::kDtraceIsEnabledSite:
7244 // change call site to a clear eax
7245 dtraceProbeSite = (uint8_t*)fixUp;
7246 dtraceProbeSite[-1] = 0x48; // xorq eax,eax
7247 dtraceProbeSite[0] = 0x33;
7248 dtraceProbeSite[1] = 0xC0;
7249 dtraceProbeSite[2] = 0x90; // 1-byte nop
7250 dtraceProbeSite[3] = 0x90; // 1-byte nop
7251 break;
7252 case x86_64::kDtraceTypeReference:
7253 case x86_64::kDtraceProbe:
7254 // nothing to fix up
7255 break;
7256 }
7257 }
7258
7259 template <>
7260 void Writer<x86_64>::fixUpReferenceRelocatable(const ObjectFile::Reference* ref, const ObjectFile::Atom* inAtom, uint8_t buffer[]) const
7261 {
7262 const int64_t twoGigLimit = 0x7FFFFFFF;
7263 bool external = this->makesExternalRelocatableReference(ref->getTarget());
7264 uint64_t* fixUp = (uint64_t*)&buffer[ref->getFixUpOffset()];
7265 int64_t displacement = 0;
7266 int32_t temp32;
7267 switch ( (x86_64::ReferenceKinds)(ref->getKind()) ) {
7268 case x86_64::kNoFixUp:
7269 case x86_64::kGOTNoFixUp:
7270 case x86_64::kFollowOn:
7271 case x86_64::kGroupSubordinate:
7272 // do nothing
7273 break;
7274 case x86_64::kPointer:
7275 case x86_64::kPointerWeakImport:
7276 {
7277 if ( external ) {
7278 // external relocation ==> pointer contains addend
7279 LittleEndian::set64(*fixUp, ref->getTargetOffset());
7280 }
7281 else {
7282 // internal relocation ==> pointer contains target address
7283 LittleEndian::set64(*fixUp, ref->getTarget().getAddress() + ref->getTargetOffset());
7284 }
7285 }
7286 break;
7287 case x86_64::kPointer32:
7288 {
7289 if ( external ) {
7290 // external relocation ==> pointer contains addend
7291 LittleEndian::set32(*((uint32_t*)fixUp), ref->getTargetOffset());
7292 }
7293 else {
7294 // internal relocation ==> pointer contains target address
7295 LittleEndian::set32(*((uint32_t*)fixUp), ref->getTarget().getAddress() + ref->getTargetOffset());
7296 }
7297 }
7298 break;
7299 case x86_64::kPointerDiff32:
7300 displacement = ref->getTargetOffset() - ref->getFromTargetOffset();
7301 if ( ref->getTarget().getSymbolTableInclusion() == ObjectFile::Atom::kSymbolTableNotIn )
7302 displacement += ref->getTarget().getAddress();
7303 if ( ref->getFromTarget().getSymbolTableInclusion() == ObjectFile::Atom::kSymbolTableNotIn )
7304 displacement -= ref->getFromTarget().getAddress();
7305 LittleEndian::set32(*((uint32_t*)fixUp), displacement);
7306 break;
7307 case x86_64::kPointerDiff:
7308 displacement = ref->getTargetOffset() - ref->getFromTargetOffset();
7309 if ( ref->getTarget().getSymbolTableInclusion() == ObjectFile::Atom::kSymbolTableNotIn )
7310 displacement += ref->getTarget().getAddress();
7311 if ( ref->getFromTarget().getSymbolTableInclusion() == ObjectFile::Atom::kSymbolTableNotIn )
7312 displacement -= ref->getFromTarget().getAddress();
7313 LittleEndian::set64(*fixUp, displacement);
7314 break;
7315 case x86_64::kBranchPCRel32:
7316 case x86_64::kBranchPCRel32WeakImport:
7317 case x86_64::kDtraceProbeSite:
7318 case x86_64::kDtraceIsEnabledSite:
7319 case x86_64::kPCRel32:
7320 case x86_64::kPCRel32_1:
7321 case x86_64::kPCRel32_2:
7322 case x86_64::kPCRel32_4:
7323 // turn unsigned 64-bit target offset in signed 32-bit offset, since that is what source originally had
7324 temp32 = ref->getTargetOffset();
7325 if ( external ) {
7326 // extern relocation contains addend
7327 displacement = temp32;
7328 }
7329 else {
7330 // internal relocations contain delta to target address
7331 displacement = (ref->getTarget().getAddress() + temp32) - (inAtom->getAddress() + ref->getFixUpOffset() + 4);
7332 }
7333 switch ( ref->getKind() ) {
7334 case x86_64::kPCRel32_1:
7335 displacement -= 1;
7336 break;
7337 case x86_64::kPCRel32_2:
7338 displacement -= 2;
7339 break;
7340 case x86_64::kPCRel32_4:
7341 displacement -= 4;
7342 break;
7343 }
7344 if ( (displacement > twoGigLimit) || (displacement < (-twoGigLimit)) ) {
7345 //fprintf(stderr, "call out of range from %s in %s to %s in %s\n", this->getDisplayName(), this->getFile()->getPath(), target.getDisplayName(), target.getFile()->getPath());
7346 throw "rel32 out of range";
7347 }
7348 LittleEndian::set32(*((uint32_t*)fixUp), (int32_t)displacement);
7349 break;
7350 case x86_64::kBranchPCRel8:
7351 // turn unsigned 64-bit target offset in signed 32-bit offset, since that is what source originally had
7352 temp32 = ref->getTargetOffset();
7353 if ( external ) {
7354 // extern relocation contains addend
7355 displacement = temp32;
7356 }
7357 else {
7358 // internal relocations contain delta to target address
7359 displacement = (ref->getTarget().getAddress() + temp32) - (inAtom->getAddress() + ref->getFixUpOffset() + 1);
7360 }
7361 if ( (displacement > 127) || (displacement < (-128)) ) {
7362 //fprintf(stderr, "call out of range from %s in %s to %s in %s\n", this->getDisplayName(), this->getFile()->getPath(), target.getDisplayName(), target.getFile()->getPath());
7363 throw "rel8 out of range";
7364 }
7365 *((int8_t*)fixUp) = (int8_t)displacement;
7366 break;
7367 case x86_64::kPCRel32GOT:
7368 case x86_64::kPCRel32GOTLoad:
7369 case x86_64::kPCRel32GOTWeakImport:
7370 case x86_64::kPCRel32GOTLoadWeakImport:
7371 // contains addend (usually zero)
7372 LittleEndian::set32(*((uint32_t*)fixUp), (uint32_t)(ref->getTargetOffset()));
7373 break;
7374 case x86_64::kPointerDiff24:
7375 throw "internal linker error, kPointerDiff24 can't be encoded into object files";
7376 case x86_64::kImageOffset32:
7377 throw "internal linker error, kImageOffset32 can't be encoded into object files";
7378 case x86_64::kSectionOffset24:
7379 throw "internal linker error, kSectionOffset24 can't be encoded into object files";
7380 case x86_64::kDtraceTypeReference:
7381 case x86_64::kDtraceProbe:
7382 // nothing to fix up
7383 break;
7384 }
7385 }
7386
7387 template <>
7388 void Writer<ppc>::fixUpReferenceFinal(const ObjectFile::Reference* ref, const ObjectFile::Atom* inAtom, uint8_t buffer[]) const
7389 {
7390 fixUpReference_powerpc(ref, inAtom, buffer, true);
7391 }
7392
7393 template <>
7394 void Writer<ppc64>::fixUpReferenceFinal(const ObjectFile::Reference* ref, const ObjectFile::Atom* inAtom, uint8_t buffer[]) const
7395 {
7396 fixUpReference_powerpc(ref, inAtom, buffer, true);
7397 }
7398
7399 template <>
7400 void Writer<ppc>::fixUpReferenceRelocatable(const ObjectFile::Reference* ref, const ObjectFile::Atom* inAtom, uint8_t buffer[]) const
7401 {
7402 fixUpReference_powerpc(ref, inAtom, buffer, false);
7403 }
7404
7405 template <>
7406 void Writer<ppc64>::fixUpReferenceRelocatable(const ObjectFile::Reference* ref, const ObjectFile::Atom* inAtom, uint8_t buffer[]) const
7407 {
7408 fixUpReference_powerpc(ref, inAtom, buffer, false);
7409 }
7410
7411 //
7412 // ppc and ppc64 are mostly the same, so they share a template specialzation
7413 //
7414 template <typename A>
7415 void Writer<A>::fixUpReference_powerpc(const ObjectFile::Reference* ref, const ObjectFile::Atom* inAtom, uint8_t buffer[], bool finalLinkedImage) const
7416 {
7417 uint32_t instruction;
7418 uint32_t newInstruction;
7419 int64_t displacement;
7420 uint64_t targetAddr = 0;
7421 uint64_t picBaseAddr;
7422 uint16_t instructionLowHalf;
7423 uint16_t instructionHighHalf;
7424 uint32_t* fixUp = (uint32_t*)&buffer[ref->getFixUpOffset()];
7425 pint_t* fixUpPointer = (pint_t*)&buffer[ref->getFixUpOffset()];
7426 bool relocateableExternal = false;
7427 const int64_t picbase_twoGigLimit = 0x80000000;
7428
7429 if ( ref->getTargetBinding() != ObjectFile::Reference::kDontBind ) {
7430 targetAddr = ref->getTarget().getAddress() + ref->getTargetOffset();
7431 if ( finalLinkedImage )
7432 relocateableExternal = (relocationNeededInFinalLinkedImage(ref->getTarget()) == kRelocExternal);
7433 else
7434 relocateableExternal = this->makesExternalRelocatableReference(ref->getTarget());
7435 }
7436
7437 switch ( (typename A::ReferenceKinds)(ref->getKind()) ) {
7438 case A::kNoFixUp:
7439 case A::kFollowOn:
7440 case A::kGroupSubordinate:
7441 // do nothing
7442 break;
7443 case A::kPointerWeakImport:
7444 case A::kPointer:
7445 {
7446 //fprintf(stderr, "fixUpReferenceFinal: %s reference to %s\n", this->getDisplayName(), target.getDisplayName());
7447 if ( finalLinkedImage && (((SectionInfo*)inAtom->getSection())->fAllLazyPointers
7448 || ((SectionInfo*)inAtom->getSection())->fAllLazyDylibPointers) ) {
7449 switch (ref->getTarget().getDefinitionKind()) {
7450 case ObjectFile::Atom::kExternalDefinition:
7451 case ObjectFile::Atom::kExternalWeakDefinition:
7452 // prebound lazy pointer to another dylib ==> pointer contains zero
7453 P::setP(*fixUpPointer, 0);
7454 break;
7455 case ObjectFile::Atom::kTentativeDefinition:
7456 case ObjectFile::Atom::kRegularDefinition:
7457 case ObjectFile::Atom::kWeakDefinition:
7458 case ObjectFile::Atom::kAbsoluteSymbol:
7459 // prebound lazy pointer to withing this dylib ==> pointer contains address
7460 P::setP(*fixUpPointer, targetAddr);
7461 break;
7462 }
7463 }
7464 else if ( !finalLinkedImage && ((SectionInfo*)inAtom->getSection())->fAllNonLazyPointers ) {
7465 // if INDIRECT_SYMBOL_LOCAL the content is pointer, else it is zero
7466 if ( this->indirectSymbolInRelocatableIsLocal(ref) )
7467 P::setP(*fixUpPointer, targetAddr);
7468 else
7469 P::setP(*fixUpPointer, 0);
7470 }
7471 else if ( relocateableExternal ) {
7472 if ( fOptions.prebind() ) {
7473 switch (ref->getTarget().getDefinitionKind()) {
7474 case ObjectFile::Atom::kExternalDefinition:
7475 case ObjectFile::Atom::kExternalWeakDefinition:
7476 // prebound external relocation ==> pointer contains addend
7477 P::setP(*fixUpPointer, ref->getTargetOffset());
7478 break;
7479 case ObjectFile::Atom::kTentativeDefinition:
7480 case ObjectFile::Atom::kRegularDefinition:
7481 case ObjectFile::Atom::kWeakDefinition:
7482 // prebound external relocation to internal atom ==> pointer contains target address + addend
7483 P::setP(*fixUpPointer, targetAddr);
7484 break;
7485 case ObjectFile::Atom::kAbsoluteSymbol:
7486 break;
7487 }
7488 }
7489 else {
7490 // external relocation ==> pointer contains addend
7491 P::setP(*fixUpPointer, ref->getTargetOffset());
7492 }
7493 }
7494 else {
7495 // internal relocation
7496 if ( finalLinkedImage || (ref->getTarget().getDefinitionKind() != ObjectFile::Atom::kTentativeDefinition) ) {
7497 // pointer contains target address
7498 //printf("Atom::fixUpReference_powerpc() target.name=%s, target.address=0x%08llX\n", ref->getTarget().getDisplayName(), targetAddr);
7499 P::setP(*fixUpPointer, targetAddr);
7500 }
7501 else {
7502 // pointer contains addend
7503 P::setP(*fixUpPointer, ref->getTargetOffset());
7504 }
7505 }
7506 }
7507 break;
7508 case A::kPointerDiff64:
7509 P::setP(*fixUpPointer, targetAddr - (ref->getFromTarget().getAddress() + ref->getFromTargetOffset()) );
7510 break;
7511 case A::kPointerDiff32:
7512 P::E::set32(*fixUp, targetAddr - (ref->getFromTarget().getAddress() + ref->getFromTargetOffset()) );
7513 break;
7514 case A::kPointerDiff16:
7515 P::E::set16(*((uint16_t*)fixUp), targetAddr - (ref->getFromTarget().getAddress() + ref->getFromTargetOffset()) );
7516 break;
7517 case A::kDtraceProbeSite:
7518 if ( finalLinkedImage ) {
7519 // change call site to a NOP
7520 BigEndian::set32(*fixUp, 0x60000000);
7521 }
7522 else {
7523 // set bl instuction to branch to address zero in .o file
7524 int64_t displacement = ref->getTargetOffset() - (inAtom->getAddress() + ref->getFixUpOffset());
7525 instruction = BigEndian::get32(*fixUp);
7526 newInstruction = (instruction & 0xFC000003) | ((uint32_t)displacement & 0x03FFFFFC);
7527 BigEndian::set32(*fixUp, newInstruction);
7528 }
7529 break;
7530 case A::kDtraceIsEnabledSite:
7531 if ( finalLinkedImage ) {
7532 // change call site to a li r3,0
7533 BigEndian::set32(*fixUp, 0x38600000);
7534 }
7535 else {
7536 // set bl instuction to branch to address zero in .o file
7537 int64_t displacement = ref->getTargetOffset() - (inAtom->getAddress() + ref->getFixUpOffset());
7538 instruction = BigEndian::get32(*fixUp);
7539 newInstruction = (instruction & 0xFC000003) | ((uint32_t)displacement & 0x03FFFFFC);
7540 BigEndian::set32(*fixUp, newInstruction);
7541 }
7542 break;
7543 case A::kBranch24WeakImport:
7544 case A::kBranch24:
7545 {
7546 //fprintf(stderr, "bl fixup to %s at 0x%08llX, ", target.getDisplayName(), target.getAddress());
7547 int64_t displacement = targetAddr - (inAtom->getAddress() + ref->getFixUpOffset());
7548 if ( relocateableExternal ) {
7549 // doing "ld -r" to an external symbol
7550 // the mach-o way of encoding this is that the bl instruction's target addr is the offset into the target
7551 displacement -= ref->getTarget().getAddress();
7552 }
7553 else {
7554 const int64_t bl_eightMegLimit = 0x00FFFFFF;
7555 if ( (displacement > bl_eightMegLimit) || (displacement < (-bl_eightMegLimit)) ) {
7556 //fprintf(stderr, "bl out of range (%lld max is +/-16M) from %s in %s to %s in %s\n", displacement, this->getDisplayName(), this->getFile()->getPath(), target.getDisplayName(), target.getFile()->getPath());
7557 throwf("bl out of range (%lld max is +/-16M) from %s at 0x%08llX in %s of %s to %s at 0x%08llX in %s of %s",
7558 displacement, inAtom->getDisplayName(), inAtom->getAddress(), inAtom->getSectionName(), inAtom->getFile()->getPath(),
7559 ref->getTarget().getDisplayName(), ref->getTarget().getAddress(), ref->getTarget().getSectionName(), ref->getTarget().getFile()->getPath());
7560 }
7561 }
7562 instruction = BigEndian::get32(*fixUp);
7563 newInstruction = (instruction & 0xFC000003) | ((uint32_t)displacement & 0x03FFFFFC);
7564 //fprintf(stderr, "bl fixup: 0x%08X -> 0x%08X\n", instruction, newInstruction);
7565 BigEndian::set32(*fixUp, newInstruction);
7566 }
7567 break;
7568 case A::kBranch14:
7569 {
7570 int64_t displacement = targetAddr - (inAtom->getAddress() + ref->getFixUpOffset());
7571 if ( relocateableExternal ) {
7572 // doing "ld -r" to an external symbol
7573 // the mach-o way of encoding this is that the bl instruction's target addr is the offset into the target
7574 displacement -= ref->getTarget().getAddress();
7575 }
7576 const int64_t b_sixtyFourKiloLimit = 0x0000FFFF;
7577 if ( (displacement > b_sixtyFourKiloLimit) || (displacement < (-b_sixtyFourKiloLimit)) ) {
7578 //fprintf(stderr, "bl out of range (%lld max is +/-16M) from %s in %s to %s in %s\n", displacement, this->getDisplayName(), this->getFile()->getPath(), target.getDisplayName(), target.getFile()->getPath());
7579 throwf("bcc out of range (%lld max is +/-64K) from %s in %s to %s in %s",
7580 displacement, inAtom->getDisplayName(), inAtom->getFile()->getPath(),
7581 ref->getTarget().getDisplayName(), ref->getTarget().getFile()->getPath());
7582 }
7583
7584 //fprintf(stderr, "bcc fixup displacement=0x%08llX, atom.addr=0x%08llX, atom.offset=0x%08X\n", displacement, inAtom->getAddress(), (uint32_t)ref->getFixUpOffset());
7585 instruction = BigEndian::get32(*fixUp);
7586 newInstruction = (instruction & 0xFFFF0003) | ((uint32_t)displacement & 0x0000FFFC);
7587 //fprintf(stderr, "bc fixup: 0x%08X -> 0x%08X\n", instruction, newInstruction);
7588 BigEndian::set32(*fixUp, newInstruction);
7589 }
7590 break;
7591 case A::kPICBaseLow16:
7592 picBaseAddr = ref->getFromTarget().getAddress() + ref->getFromTargetOffset();
7593 displacement = targetAddr - picBaseAddr;
7594 if ( (displacement > picbase_twoGigLimit) || (displacement < (-picbase_twoGigLimit)) )
7595 throw "32-bit pic-base out of range";
7596 instructionLowHalf = (displacement & 0xFFFF);
7597 instruction = BigEndian::get32(*fixUp);
7598 newInstruction = (instruction & 0xFFFF0000) | instructionLowHalf;
7599 BigEndian::set32(*fixUp, newInstruction);
7600 break;
7601 case A::kPICBaseLow14:
7602 picBaseAddr = ref->getFromTarget().getAddress() + ref->getFromTargetOffset();
7603 displacement = targetAddr - picBaseAddr;
7604 if ( (displacement > picbase_twoGigLimit) || (displacement < (-picbase_twoGigLimit)) )
7605 throw "32-bit pic-base out of range";
7606 if ( (displacement & 0x3) != 0 )
7607 throwf("bad offset (0x%08X) for lo14 instruction pic-base fix-up", (uint32_t)displacement);
7608 instructionLowHalf = (displacement & 0xFFFC);
7609 instruction = BigEndian::get32(*fixUp);
7610 newInstruction = (instruction & 0xFFFF0003) | instructionLowHalf;
7611 BigEndian::set32(*fixUp, newInstruction);
7612 break;
7613 case A::kPICBaseHigh16:
7614 picBaseAddr = ref->getFromTarget().getAddress() + ref->getFromTargetOffset();
7615 displacement = targetAddr - picBaseAddr;
7616 if ( (displacement > picbase_twoGigLimit) || (displacement < (-picbase_twoGigLimit)) )
7617 throw "32-bit pic-base out of range";
7618 instructionLowHalf = displacement >> 16;
7619 if ( (displacement & 0x00008000) != 0 )
7620 ++instructionLowHalf;
7621 instruction = BigEndian::get32(*fixUp);
7622 newInstruction = (instruction & 0xFFFF0000) | instructionLowHalf;
7623 BigEndian::set32(*fixUp, newInstruction);
7624 break;
7625 case A::kAbsLow16:
7626 if ( relocateableExternal && !finalLinkedImage )
7627 targetAddr -= ref->getTarget().getAddress();
7628 instructionLowHalf = (targetAddr & 0xFFFF);
7629 instruction = BigEndian::get32(*fixUp);
7630 newInstruction = (instruction & 0xFFFF0000) | instructionLowHalf;
7631 BigEndian::set32(*fixUp, newInstruction);
7632 break;
7633 case A::kAbsLow14:
7634 if ( relocateableExternal && !finalLinkedImage )
7635 targetAddr -= ref->getTarget().getAddress();
7636 if ( (targetAddr & 0x3) != 0 )
7637 throw "bad address for absolute lo14 instruction fix-up";
7638 instructionLowHalf = (targetAddr & 0xFFFF);
7639 instruction = BigEndian::get32(*fixUp);
7640 newInstruction = (instruction & 0xFFFF0003) | instructionLowHalf;
7641 BigEndian::set32(*fixUp, newInstruction);
7642 break;
7643 case A::kAbsHigh16:
7644 if ( relocateableExternal ) {
7645 if ( finalLinkedImage ) {
7646 switch (ref->getTarget().getDefinitionKind()) {
7647 case ObjectFile::Atom::kExternalDefinition:
7648 case ObjectFile::Atom::kExternalWeakDefinition:
7649 throwf("absolute address to symbol %s in a different linkage unit not supported", ref->getTargetName());
7650 break;
7651 case ObjectFile::Atom::kTentativeDefinition:
7652 case ObjectFile::Atom::kRegularDefinition:
7653 case ObjectFile::Atom::kWeakDefinition:
7654 // use target address
7655 break;
7656 case ObjectFile::Atom::kAbsoluteSymbol:
7657 targetAddr = ref->getTarget().getSectionOffset();
7658 break;
7659 }
7660 }
7661 else {
7662 targetAddr -= ref->getTarget().getAddress();
7663 }
7664 }
7665 instructionHighHalf = (targetAddr >> 16);
7666 instruction = BigEndian::get32(*fixUp);
7667 newInstruction = (instruction & 0xFFFF0000) | instructionHighHalf;
7668 BigEndian::set32(*fixUp, newInstruction);
7669 break;
7670 case A::kAbsHigh16AddLow:
7671 if ( relocateableExternal ) {
7672 if ( finalLinkedImage ) {
7673 switch (ref->getTarget().getDefinitionKind()) {
7674 case ObjectFile::Atom::kExternalDefinition:
7675 case ObjectFile::Atom::kExternalWeakDefinition:
7676 throwf("absolute address to symbol %s in a different linkage unit not supported", ref->getTargetName());
7677 break;
7678 case ObjectFile::Atom::kTentativeDefinition:
7679 case ObjectFile::Atom::kRegularDefinition:
7680 case ObjectFile::Atom::kWeakDefinition:
7681 // use target address
7682 break;
7683 case ObjectFile::Atom::kAbsoluteSymbol:
7684 targetAddr = ref->getTarget().getSectionOffset();
7685 break;
7686 }
7687 }
7688 else {
7689 targetAddr -= ref->getTarget().getAddress();
7690 }
7691 }
7692 if ( targetAddr & 0x00008000 )
7693 targetAddr += 0x00010000;
7694 instruction = BigEndian::get32(*fixUp);
7695 newInstruction = (instruction & 0xFFFF0000) | (targetAddr >> 16);
7696 BigEndian::set32(*fixUp, newInstruction);
7697 break;
7698 case A::kDtraceTypeReference:
7699 case A::kDtraceProbe:
7700 // nothing to fix up
7701 break;
7702 }
7703 }
7704
7705 template <>
7706 bool Writer<ppc>::stubableReference(const ObjectFile::Atom* inAtom, const ObjectFile::Reference* ref)
7707 {
7708 uint8_t kind = ref->getKind();
7709 switch ( (ppc::ReferenceKinds)kind ) {
7710 case ppc::kNoFixUp:
7711 case ppc::kFollowOn:
7712 case ppc::kGroupSubordinate:
7713 case ppc::kPointer:
7714 case ppc::kPointerWeakImport:
7715 case ppc::kPointerDiff16:
7716 case ppc::kPointerDiff32:
7717 case ppc::kPointerDiff64:
7718 case ppc::kDtraceProbe:
7719 case ppc::kDtraceProbeSite:
7720 case ppc::kDtraceIsEnabledSite:
7721 case ppc::kDtraceTypeReference:
7722 // these are never used to call external functions
7723 return false;
7724 case ppc::kBranch24:
7725 case ppc::kBranch24WeakImport:
7726 case ppc::kBranch14:
7727 // these are used to call external functions
7728 return true;
7729 case ppc::kPICBaseLow16:
7730 case ppc::kPICBaseLow14:
7731 case ppc::kPICBaseHigh16:
7732 case ppc::kAbsLow16:
7733 case ppc::kAbsLow14:
7734 case ppc::kAbsHigh16:
7735 case ppc::kAbsHigh16AddLow:
7736 // these are only used to call external functions
7737 // in -mlong-branch stubs
7738 switch ( ref->getTarget().getDefinitionKind() ) {
7739 case ObjectFile::Atom::kExternalDefinition:
7740 case ObjectFile::Atom::kExternalWeakDefinition:
7741 // if the .o file this atom came from has long-branch stubs,
7742 // then assume these instructions in a stub.
7743 // Otherwise, these are a direct reference to something (maybe a runtime text reloc)
7744 return ( inAtom->getFile()->hasLongBranchStubs() );
7745 case ObjectFile::Atom::kTentativeDefinition:
7746 case ObjectFile::Atom::kRegularDefinition:
7747 case ObjectFile::Atom::kWeakDefinition:
7748 case ObjectFile::Atom::kAbsoluteSymbol:
7749 return false;
7750 }
7751 break;
7752 }
7753 return false;
7754 }
7755
7756 template <>
7757 bool Writer<arm>::stubableReference(const ObjectFile::Atom* inAtom, const ObjectFile::Reference* ref)
7758 {
7759 uint8_t kind = ref->getKind();
7760 switch ( (arm::ReferenceKinds)kind ) {
7761 case arm::kBranch24:
7762 case arm::kBranch24WeakImport:
7763 return true;
7764 case arm::kThumbBranch22:
7765 case arm::kThumbBranch22WeakImport:
7766 fHasThumbBranches = true;
7767 return true;
7768 case arm::kNoFixUp:
7769 case arm::kFollowOn:
7770 case arm::kGroupSubordinate:
7771 case arm::kPointer:
7772 case arm::kReadOnlyPointer:
7773 case arm::kPointerWeakImport:
7774 case arm::kPointerDiff:
7775 case arm::kDtraceProbe:
7776 case arm::kDtraceProbeSite:
7777 case arm::kDtraceIsEnabledSite:
7778 case arm::kDtraceTypeReference:
7779 case arm::kPointerDiff12:
7780 return false;
7781 }
7782 return false;
7783 }
7784
7785 template <>
7786 bool Writer<ppc64>::stubableReference(const ObjectFile::Atom* inAtom, const ObjectFile::Reference* ref)
7787 {
7788 uint8_t kind = ref->getKind();
7789 switch ( (ppc64::ReferenceKinds)kind ) {
7790 case ppc::kNoFixUp:
7791 case ppc::kFollowOn:
7792 case ppc::kGroupSubordinate:
7793 case ppc::kPointer:
7794 case ppc::kPointerWeakImport:
7795 case ppc::kPointerDiff16:
7796 case ppc::kPointerDiff32:
7797 case ppc::kPointerDiff64:
7798 case ppc::kPICBaseLow16:
7799 case ppc::kPICBaseLow14:
7800 case ppc::kPICBaseHigh16:
7801 case ppc::kAbsLow16:
7802 case ppc::kAbsLow14:
7803 case ppc::kAbsHigh16:
7804 case ppc::kAbsHigh16AddLow:
7805 case ppc::kDtraceProbe:
7806 case ppc::kDtraceProbeSite:
7807 case ppc::kDtraceIsEnabledSite:
7808 case ppc::kDtraceTypeReference:
7809 // these are never used to call external functions
7810 return false;
7811 case ppc::kBranch24:
7812 case ppc::kBranch24WeakImport:
7813 case ppc::kBranch14:
7814 // these are used to call external functions
7815 return true;
7816 }
7817 return false;
7818 }
7819
7820 template <>
7821 bool Writer<x86>::stubableReference(const ObjectFile::Atom* inAtom, const ObjectFile::Reference* ref)
7822 {
7823 uint8_t kind = ref->getKind();
7824 return (kind == x86::kPCRel32 || kind == x86::kPCRel32WeakImport);
7825 }
7826
7827 template <>
7828 bool Writer<x86_64>::stubableReference(const ObjectFile::Atom* inAtom, const ObjectFile::Reference* ref)
7829 {
7830 uint8_t kind = ref->getKind();
7831 return (kind == x86_64::kBranchPCRel32 || kind == x86_64::kBranchPCRel32WeakImport);
7832 }
7833
7834
7835 template <>
7836 bool Writer<ppc>::weakImportReferenceKind(uint8_t kind)
7837 {
7838 return (kind == ppc::kBranch24WeakImport || kind == ppc::kPointerWeakImport);
7839 }
7840
7841 template <>
7842 bool Writer<ppc64>::weakImportReferenceKind(uint8_t kind)
7843 {
7844 return (kind == ppc64::kBranch24WeakImport || kind == ppc64::kPointerWeakImport);
7845 }
7846
7847 template <>
7848 bool Writer<x86>::weakImportReferenceKind(uint8_t kind)
7849 {
7850 return (kind == x86::kPCRel32WeakImport || kind == x86::kPointerWeakImport);
7851 }
7852
7853 template <>
7854 bool Writer<x86_64>::weakImportReferenceKind(uint8_t kind)
7855 {
7856 switch ( kind ) {
7857 case x86_64::kPointerWeakImport:
7858 case x86_64::kBranchPCRel32WeakImport:
7859 case x86_64::kPCRel32GOTWeakImport:
7860 case x86_64::kPCRel32GOTLoadWeakImport:
7861 return true;
7862 }
7863 return false;
7864 }
7865
7866 template <>
7867 bool Writer<arm>::weakImportReferenceKind(uint8_t kind)
7868 {
7869 return (kind == arm::kBranch24WeakImport || kind == arm::kThumbBranch22WeakImport ||
7870 kind == arm::kPointerWeakImport);
7871 }
7872
7873 template <>
7874 bool Writer<ppc>::GOTReferenceKind(uint8_t kind)
7875 {
7876 return false;
7877 }
7878
7879 template <>
7880 bool Writer<ppc64>::GOTReferenceKind(uint8_t kind)
7881 {
7882 return false;
7883 }
7884
7885 template <>
7886 bool Writer<x86>::GOTReferenceKind(uint8_t kind)
7887 {
7888 return false;
7889 }
7890
7891 template <>
7892 bool Writer<x86_64>::GOTReferenceKind(uint8_t kind)
7893 {
7894 switch ( kind ) {
7895 case x86_64::kPCRel32GOT:
7896 case x86_64::kPCRel32GOTWeakImport:
7897 case x86_64::kPCRel32GOTLoad:
7898 case x86_64::kPCRel32GOTLoadWeakImport:
7899 case x86_64::kGOTNoFixUp:
7900 return true;
7901 }
7902 return false;
7903 }
7904
7905 template <>
7906 bool Writer<arm>::GOTReferenceKind(uint8_t kind)
7907 {
7908 return false;
7909 }
7910
7911 template <>
7912 bool Writer<ppc>::optimizableGOTReferenceKind(uint8_t kind)
7913 {
7914 return false;
7915 }
7916
7917 template <>
7918 bool Writer<ppc64>::optimizableGOTReferenceKind(uint8_t kind)
7919 {
7920 return false;
7921 }
7922
7923 template <>
7924 bool Writer<x86>::optimizableGOTReferenceKind(uint8_t kind)
7925 {
7926 return false;
7927 }
7928
7929 template <>
7930 bool Writer<x86_64>::optimizableGOTReferenceKind(uint8_t kind)
7931 {
7932 switch ( kind ) {
7933 case x86_64::kPCRel32GOTLoad:
7934 case x86_64::kPCRel32GOTLoadWeakImport:
7935 return true;
7936 }
7937 return false;
7938 }
7939
7940 template <>
7941 bool Writer<arm>::optimizableGOTReferenceKind(uint8_t kind)
7942 {
7943 return false;
7944 }
7945
7946 // 64-bit architectures never need module table, 32-bit sometimes do for backwards compatiblity
7947 template <typename A> bool Writer<A>::needsModuleTable() {return fOptions.needsModuleTable(); }
7948 template <> bool Writer<ppc64>::needsModuleTable() { return false; }
7949 template <> bool Writer<x86_64>::needsModuleTable() { return false; }
7950
7951
7952 template <typename A>
7953 void Writer<A>::optimizeDylibReferences()
7954 {
7955 //fprintf(stderr, "original ordinals table:\n");
7956 //for (std::map<class ObjectFile::Reader*, uint32_t>::iterator it = fLibraryToOrdinal.begin(); it != fLibraryToOrdinal.end(); ++it) {
7957 // fprintf(stderr, "%u <== %p/%s\n", it->second, it->first, it->first->getPath());
7958 //}
7959 // find unused dylibs that can be removed
7960 std::map<uint32_t, ObjectFile::Reader*> ordinalToReader;
7961 std::map<ObjectFile::Reader*, ObjectFile::Reader*> readerAliases;
7962 for (std::map<ObjectFile::Reader*, uint32_t>::iterator it = fLibraryToOrdinal.begin(); it != fLibraryToOrdinal.end(); ++it) {
7963 ObjectFile::Reader* reader = it->first;
7964 std::map<ObjectFile::Reader*, ObjectFile::Reader*>::iterator aliasPos = fLibraryAliases.find(reader);
7965 if ( aliasPos != fLibraryAliases.end() ) {
7966 // already noticed that this reader has same install name as another reader
7967 readerAliases[reader] = aliasPos->second;
7968 }
7969 else if ( !reader->providedExportAtom() && (reader->implicitlyLinked() || reader->deadStrippable() || fOptions.deadStripDylibs()) ) {
7970 // this reader can be optimized away
7971 it->second = 0xFFFFFFFF;
7972 typename std::map<class ObjectFile::Reader*, class DylibLoadCommandsAtom<A>* >::iterator pos = fLibraryToLoadCommand.find(reader);
7973 if ( pos != fLibraryToLoadCommand.end() )
7974 pos->second->optimizeAway();
7975 }
7976 else {
7977 // mark this reader as using it ordinal
7978 std::map<uint32_t, ObjectFile::Reader*>::iterator pos = ordinalToReader.find(it->second);
7979 if ( pos == ordinalToReader.end() )
7980 ordinalToReader[it->second] = reader;
7981 else
7982 readerAliases[reader] = pos->second;
7983 }
7984 }
7985 // renumber ordinals (depends on iterator walking in ordinal order)
7986 // all LC_LAZY_LOAD_DYLIB load commands must have highest ordinals
7987 uint32_t newOrdinal = 0;
7988 for (std::map<uint32_t, ObjectFile::Reader*>::iterator it = ordinalToReader.begin(); it != ordinalToReader.end(); ++it) {
7989 if ( it->first <= fLibraryToOrdinal.size() ) {
7990 if ( ! it->second->isLazyLoadedDylib() )
7991 fLibraryToOrdinal[it->second] = ++newOrdinal;
7992 }
7993 }
7994 for (std::map<uint32_t, ObjectFile::Reader*>::iterator it = ordinalToReader.begin(); it != ordinalToReader.end(); ++it) {
7995 if ( it->first <= fLibraryToOrdinal.size() ) {
7996 if ( it->second->isLazyLoadedDylib() ) {
7997 fLibraryToOrdinal[it->second] = ++newOrdinal;
7998 }
7999 }
8000 }
8001
8002 // <rdar://problem/5504954> linker does not error when dylib ordinal exceeds 250
8003 if ( (newOrdinal >= MAX_LIBRARY_ORDINAL) && (fOptions.nameSpace() == Options::kTwoLevelNameSpace) )
8004 throwf("two level namespace mach-o files can link with at most %d dylibs, this link would use %d dylibs", MAX_LIBRARY_ORDINAL, newOrdinal);
8005
8006 // add aliases (e.g. -lm points to libSystem.dylib)
8007 for (std::map<ObjectFile::Reader*, ObjectFile::Reader*>::iterator it = readerAliases.begin(); it != readerAliases.end(); ++it) {
8008 fLibraryToOrdinal[it->first] = fLibraryToOrdinal[it->second];
8009 }
8010
8011 //fprintf(stderr, "new ordinals table:\n");
8012 //for (std::map<class ObjectFile::Reader*, uint32_t>::iterator it = fLibraryToOrdinal.begin(); it != fLibraryToOrdinal.end(); ++it) {
8013 // fprintf(stderr, "%u <== %p/%s\n", it->second, it->first, it->first->getPath());
8014 //}
8015 }
8016
8017
8018 template <>
8019 void Writer<arm>::scanForAbsoluteReferences()
8020 {
8021 // arm codegen never has absolute references. FIXME: Is this correct?
8022 }
8023
8024 template <>
8025 void Writer<x86_64>::scanForAbsoluteReferences()
8026 {
8027 // x86_64 codegen never has absolute references
8028 }
8029
8030 template <>
8031 void Writer<x86>::scanForAbsoluteReferences()
8032 {
8033 // when linking -pie verify there are no absolute addressing, unless -read_only_relocs is also used
8034 if ( fOptions.positionIndependentExecutable() && !fOptions.allowTextRelocs() ) {
8035 for (std::vector<ObjectFile::Atom*>::iterator it=fAllAtoms->begin(); it != fAllAtoms->end(); it++) {
8036 ObjectFile::Atom* atom = *it;
8037 if ( atom->getContentType() == ObjectFile::Atom::kStub )
8038 continue;
8039 if ( atom->getContentType() == ObjectFile::Atom::kStubHelper )
8040 continue;
8041 std::vector<ObjectFile::Reference*>& references = atom->getReferences();
8042 for (std::vector<ObjectFile::Reference*>::iterator rit=references.begin(); rit != references.end(); rit++) {
8043 ObjectFile::Reference* ref = *rit;
8044 switch (ref->getKind()) {
8045 case x86::kAbsolute32:
8046 throwf("cannot link -pie: -mdynamic-no-pic codegen found in %s from %s", atom->getDisplayName(), atom->getFile()->getPath());
8047 return;
8048 }
8049 }
8050 }
8051 }
8052 }
8053
8054 template <>
8055 void Writer<ppc>::scanForAbsoluteReferences()
8056 {
8057 // when linking -pie verify there are no absolute addressing, unless -read_only_relocs is also used
8058 if ( fOptions.positionIndependentExecutable() && !fOptions.allowTextRelocs() ) {
8059 for (std::vector<ObjectFile::Atom*>::iterator it=fAllAtoms->begin(); it != fAllAtoms->end(); it++) {
8060 ObjectFile::Atom* atom = *it;
8061 std::vector<ObjectFile::Reference*>& references = atom->getReferences();
8062 for (std::vector<ObjectFile::Reference*>::iterator rit=references.begin(); rit != references.end(); rit++) {
8063 ObjectFile::Reference* ref = *rit;
8064 switch (ref->getKind()) {
8065 case ppc::kAbsLow16:
8066 case ppc::kAbsLow14:
8067 case ppc::kAbsHigh16:
8068 case ppc::kAbsHigh16AddLow:
8069 throwf("cannot link -pie: -mdynamic-no-pic codegen found in %s from %s", atom->getDisplayName(), atom->getFile()->getPath());
8070 return;
8071 }
8072 }
8073 }
8074 }
8075 }
8076
8077
8078 // for ppc64 look for any -mdynamic-no-pic codegen
8079 template <>
8080 void Writer<ppc64>::scanForAbsoluteReferences()
8081 {
8082 // only do this for main executable
8083 if ( mightNeedPadSegment() && (fPageZeroAtom != NULL) ) {
8084 for (std::vector<ObjectFile::Atom*>::iterator it=fAllAtoms->begin(); it != fAllAtoms->end(); it++) {
8085 ObjectFile::Atom* atom = *it;
8086 std::vector<ObjectFile::Reference*>& references = atom->getReferences();
8087 for (std::vector<ObjectFile::Reference*>::iterator rit=references.begin(); rit != references.end(); rit++) {
8088 ObjectFile::Reference* ref = *rit;
8089 switch (ref->getKind()) {
8090 case ppc64::kAbsLow16:
8091 case ppc64::kAbsLow14:
8092 case ppc64::kAbsHigh16:
8093 case ppc64::kAbsHigh16AddLow:
8094 //fprintf(stderr, "found -mdynamic-no-pic codegen in %s in %s\n", atom->getDisplayName(), atom->getFile()->getPath());
8095 // shrink page-zero and add pad segment to compensate
8096 fPadSegmentInfo = new SegmentInfo(4096);
8097 strcpy(fPadSegmentInfo->fName, "__4GBFILL");
8098 fPageZeroAtom->setSize(0x1000);
8099 return;
8100 }
8101 }
8102 }
8103 }
8104 }
8105
8106
8107 template <typename A>
8108 void Writer<A>::insertDummyStubs()
8109 {
8110 // only needed for x86
8111 }
8112
8113 template <>
8114 void Writer<x86>::insertDummyStubs()
8115 {
8116 // any 5-byte stubs that cross a 32-byte cache line may update incorrectly
8117 std::vector<class StubAtom<x86>*> betterStubs;
8118 for (std::vector<class StubAtom<x86>*>::iterator it=fAllSynthesizedStubs.begin(); it != fAllSynthesizedStubs.end(); it++) {
8119 switch (betterStubs.size() % 64 ) {
8120 case 12:// stub would occupy 0x3C->0x41
8121 case 25:// stub would occupy 0x7D->0x82
8122 case 38:// stub would occupy 0xBE->0xC3
8123 case 51:// stub would occupy 0xFF->0x04
8124 betterStubs.push_back(new StubAtom<x86>(*this, *((ObjectFile::Atom*)NULL), false)); //pad with dummy stub
8125 break;
8126 }
8127 betterStubs.push_back(*it);
8128 }
8129 // replace
8130 fAllSynthesizedStubs.clear();
8131 fAllSynthesizedStubs.insert(fAllSynthesizedStubs.begin(), betterStubs.begin(), betterStubs.end());
8132 }
8133
8134
8135 template <typename A>
8136 void Writer<A>::synthesizeKextGOT(const std::vector<class ObjectFile::Atom*>& existingAtoms,
8137 std::vector<class ObjectFile::Atom*>& newAtoms)
8138 {
8139 // walk every atom and reference
8140 for (std::vector<ObjectFile::Atom*>::const_iterator it=existingAtoms.begin(); it != existingAtoms.end(); it++) {
8141 const ObjectFile::Atom* atom = *it;
8142 std::vector<ObjectFile::Reference*>& references = atom->getReferences();
8143 for (std::vector<ObjectFile::Reference*>::iterator rit=references.begin(); rit != references.end(); rit++) {
8144 ObjectFile::Reference* ref = *rit;
8145 switch ( ref->getTargetBinding()) {
8146 case ObjectFile::Reference::kUnboundByName:
8147 case ObjectFile::Reference::kDontBind:
8148 break;
8149 case ObjectFile::Reference::kBoundByName:
8150 case ObjectFile::Reference::kBoundDirectly:
8151 ObjectFile::Atom& target = ref->getTarget();
8152 // create GOT slots (non-lazy pointers) as needed
8153 if ( this->GOTReferenceKind(ref->getKind()) ) {
8154 bool useGOT = ( this->relocationNeededInFinalLinkedImage(ref->getTarget()) == kRelocExternal );
8155 // if this GOT usage cannot be optimized away then make a GOT enry
8156 if ( ! this->optimizableGOTReferenceKind(ref->getKind()) )
8157 useGOT = true;
8158 if ( useGOT ) {
8159 ObjectFile::Atom* nlp = NULL;
8160 std::map<ObjectFile::Atom*,ObjectFile::Atom*>::iterator pos = fGOTMap.find(&target);
8161 if ( pos == fGOTMap.end() ) {
8162 nlp = new NonLazyPointerAtom<A>(*this, target);
8163 fGOTMap[&target] = nlp;
8164 newAtoms.push_back(nlp);
8165 }
8166 else {
8167 nlp = pos->second;
8168 }
8169 // alter reference to use non lazy pointer instead
8170 ref->setTarget(*nlp, ref->getTargetOffset());
8171 }
8172 }
8173 // build map of which symbols need weak importing
8174 if ( (target.getDefinitionKind() == ObjectFile::Atom::kExternalDefinition)
8175 || (target.getDefinitionKind() == ObjectFile::Atom::kExternalWeakDefinition) ) {
8176 if ( this->weakImportReferenceKind(ref->getKind()) ) {
8177 fWeakImportMap[&target] = true;
8178 }
8179 }
8180 break;
8181 }
8182 }
8183 }
8184 }
8185
8186
8187 template <typename A>
8188 void Writer<A>::synthesizeStubs(const std::vector<class ObjectFile::Atom*>& existingAtoms,
8189 std::vector<class ObjectFile::Atom*>& newAtoms)
8190 {
8191 switch ( fOptions.outputKind() ) {
8192 case Options::kObjectFile:
8193 case Options::kPreload:
8194 // these output kinds never have stubs
8195 return;
8196 case Options::kKextBundle:
8197 // new kext need a synthesized GOT only
8198 synthesizeKextGOT(existingAtoms, newAtoms);
8199 return;
8200 case Options::kStaticExecutable:
8201 case Options::kDyld:
8202 case Options::kDynamicLibrary:
8203 case Options::kDynamicBundle:
8204 case Options::kDynamicExecutable:
8205 // try to synthesize stubs for these
8206 break;
8207 }
8208
8209 // walk every atom and reference
8210 for (std::vector<ObjectFile::Atom*>::const_iterator it=existingAtoms.begin(); it != existingAtoms.end(); it++) {
8211 ObjectFile::Atom* atom = *it;
8212 std::vector<ObjectFile::Reference*>& references = atom->getReferences();
8213 for (std::vector<ObjectFile::Reference*>::iterator rit=references.begin(); rit != references.end(); rit++) {
8214 ObjectFile::Reference* ref = *rit;
8215 switch ( ref->getTargetBinding()) {
8216 case ObjectFile::Reference::kUnboundByName:
8217 case ObjectFile::Reference::kDontBind:
8218 break;
8219 case ObjectFile::Reference::kBoundByName:
8220 case ObjectFile::Reference::kBoundDirectly:
8221 ObjectFile::Atom& target = ref->getTarget();
8222 // build map of which symbols need weak importing
8223 if ( (target.getDefinitionKind() == ObjectFile::Atom::kExternalDefinition)
8224 || (target.getDefinitionKind() == ObjectFile::Atom::kExternalWeakDefinition) ) {
8225 bool weakImport = this->weakImportReferenceKind(ref->getKind());
8226 // <rdar://problem/5633081> Obj-C Symbols in Leopard Can't Be Weak Linked
8227 // dyld in Mac OS X 10.3 and earlier need N_WEAK_REF bit set on undefines to objc symbols
8228 // in dylibs that are weakly linked.
8229 if ( (ref->getKind() == A::kNoFixUp) && (strncmp(target.getName(), ".objc_class_name_", 17) == 0) ) {
8230 typename std::map<class ObjectFile::Reader*, class DylibLoadCommandsAtom<A>* >::iterator pos;
8231 pos = fLibraryToLoadCommand.find(target.getFile());
8232 if ( pos != fLibraryToLoadCommand.end() ) {
8233 if ( pos->second->linkedWeak() )
8234 weakImport = true;
8235 }
8236 }
8237 // <rdar://problem/6186838> -weak_library no longer forces uses to be weak_import
8238 if ( fForcedWeakImportReaders.count(target.getFile()) != 0 ) {
8239 fWeakImportMap[&target] = true;
8240 weakImport = true;
8241 }
8242
8243 std::map<const ObjectFile::Atom*,bool>::iterator pos = fWeakImportMap.find(&target);
8244 if ( pos == fWeakImportMap.end() ) {
8245 // target not in fWeakImportMap, so add
8246 fWeakImportMap[&target] = weakImport;
8247 }
8248 else {
8249 // target in fWeakImportMap, check for weakness mismatch
8250 if ( pos->second != weakImport ) {
8251 // found mismatch
8252 switch ( fOptions.weakReferenceMismatchTreatment() ) {
8253 case Options::kWeakReferenceMismatchError:
8254 throwf("mismatching weak references for symbol: %s", target.getName());
8255 case Options::kWeakReferenceMismatchWeak:
8256 pos->second = true;
8257 break;
8258 case Options::kWeakReferenceMismatchNonWeak:
8259 pos->second = false;
8260 break;
8261 }
8262 }
8263 }
8264 // update if we use a weak_import or a strong import from this dylib
8265 if ( fWeakImportMap[&target] )
8266 fDylibReadersWithWeakImports.insert(target.getFile());
8267 else
8268 fDylibReadersWithNonWeakImports.insert(target.getFile());
8269 }
8270 // create stubs as needed
8271 if ( this->stubableReference(atom, ref)
8272 && (ref->getTargetOffset() == 0)
8273 && this->relocationNeededInFinalLinkedImage(target) == kRelocExternal ) {
8274 ObjectFile::Atom* stub = NULL;
8275 std::map<const ObjectFile::Atom*,ObjectFile::Atom*>::iterator pos = fStubsMap.find(&target);
8276 if ( pos == fStubsMap.end() ) {
8277 bool forLazyDylib = false;
8278 switch ( target.getDefinitionKind() ) {
8279 case ObjectFile::Atom::kRegularDefinition:
8280 case ObjectFile::Atom::kWeakDefinition:
8281 case ObjectFile::Atom::kAbsoluteSymbol:
8282 case ObjectFile::Atom::kTentativeDefinition:
8283 break;
8284 case ObjectFile::Atom::kExternalDefinition:
8285 case ObjectFile::Atom::kExternalWeakDefinition:
8286 if ( target.getFile()->isLazyLoadedDylib() )
8287 forLazyDylib = true;
8288 break;
8289 }
8290 // just-in-time, create GOT slot to dyld_stub_binder
8291 if ( fOptions.makeCompressedDyldInfo() && (fFastStubGOTAtom == NULL) ) {
8292 if ( fDyldCompressedHelperAtom == NULL )
8293 throw "missing symbol dyld_stub_binder";
8294 fFastStubGOTAtom = new NonLazyPointerAtom<A>(*this, *fDyldCompressedHelperAtom);
8295 }
8296 stub = new StubAtom<A>(*this, target, forLazyDylib);
8297 fStubsMap[&target] = stub;
8298 }
8299 else {
8300 stub = pos->second;
8301 }
8302 // alter reference to use stub instead
8303 ref->setTarget(*stub, 0);
8304 }
8305 else if ( fOptions.usingLazyDylibLinking() && target.getFile()->isLazyLoadedDylib() ) {
8306 throwf("illegal reference to %s in lazy loaded dylib from %s in %s",
8307 target.getDisplayName(), atom->getDisplayName(),
8308 atom->getFile()->getPath());
8309 }
8310 // create GOT slots (non-lazy pointers) as needed
8311 else if ( this->GOTReferenceKind(ref->getKind()) ) {
8312 //
8313 bool mustUseGOT = ( this->relocationNeededInFinalLinkedImage(ref->getTarget()) == kRelocExternal );
8314 bool useGOT;
8315 if ( fBiggerThanTwoGigs ) {
8316 // in big images use GOT for all zero fill atoms
8317 // this is just a heuristic and may need to be re-examined
8318 useGOT = mustUseGOT || ref->getTarget().isZeroFill();
8319 }
8320 else {
8321 // < 2GB image so remove all GOT entries that we can
8322 useGOT = mustUseGOT;
8323 }
8324 // if this GOT usage cannot be optimized away then make a GOT enry
8325 if ( ! this->optimizableGOTReferenceKind(ref->getKind()) )
8326 useGOT = true;
8327 if ( useGOT ) {
8328 ObjectFile::Atom* nlp = NULL;
8329 std::map<ObjectFile::Atom*,ObjectFile::Atom*>::iterator pos = fGOTMap.find(&target);
8330 if ( pos == fGOTMap.end() ) {
8331 nlp = new NonLazyPointerAtom<A>(*this, target);
8332 fGOTMap[&target] = nlp;
8333 }
8334 else {
8335 nlp = pos->second;
8336 }
8337 // alter reference to use non lazy pointer instead
8338 ref->setTarget(*nlp, ref->getTargetOffset());
8339 }
8340 }
8341 }
8342 }
8343 }
8344
8345 // sort stubs
8346 std::sort(fAllSynthesizedStubs.begin(), fAllSynthesizedStubs.end(), AtomByNameSorter());
8347 // add dummy self-modifying stubs (x86 only)
8348 if ( ! fOptions.makeCompressedDyldInfo() )
8349 this->insertDummyStubs();
8350 // set ordinals so sorting is preserved
8351 uint32_t sortOrder = 0;
8352 for (typename std::vector<StubAtom<A>*>::iterator it=fAllSynthesizedStubs.begin(); it != fAllSynthesizedStubs.end(); it++)
8353 (*it)->setSortingOrdinal(sortOrder++);
8354 std::sort(fAllSynthesizedStubHelpers.begin(), fAllSynthesizedStubHelpers.end(), AtomByNameSorter());
8355
8356 // sort lazy pointers
8357 std::sort(fAllSynthesizedLazyPointers.begin(), fAllSynthesizedLazyPointers.end(), AtomByNameSorter());
8358 sortOrder = 0;
8359 for (typename std::vector<LazyPointerAtom<A>*>::iterator it=fAllSynthesizedLazyPointers.begin(); it != fAllSynthesizedLazyPointers.end(); it++)
8360 (*it)->setSortingOrdinal(sortOrder++);
8361 std::sort(fAllSynthesizedLazyDylibPointers.begin(), fAllSynthesizedLazyDylibPointers.end(), AtomByNameSorter());
8362
8363 // sort non-lazy pointers
8364 std::sort(fAllSynthesizedNonLazyPointers.begin(), fAllSynthesizedNonLazyPointers.end(), AtomByNameSorter());
8365 sortOrder = 0;
8366 for (typename std::vector<NonLazyPointerAtom<A>*>::iterator it=fAllSynthesizedNonLazyPointers.begin(); it != fAllSynthesizedNonLazyPointers.end(); it++)
8367 (*it)->setSortingOrdinal(sortOrder++);
8368 std::sort(fAllSynthesizedNonLazyPointers.begin(), fAllSynthesizedNonLazyPointers.end(), AtomByNameSorter());
8369
8370 // tell linker about all synthesized atoms
8371 newAtoms.insert(newAtoms.end(), fAllSynthesizedStubs.begin(), fAllSynthesizedStubs.end());
8372 newAtoms.insert(newAtoms.end(), fAllSynthesizedStubHelpers.begin(), fAllSynthesizedStubHelpers.end());
8373 newAtoms.insert(newAtoms.end(), fAllSynthesizedLazyPointers.begin(), fAllSynthesizedLazyPointers.end());
8374 newAtoms.insert(newAtoms.end(), fAllSynthesizedLazyDylibPointers.begin(), fAllSynthesizedLazyDylibPointers.end());
8375 newAtoms.insert(newAtoms.end(), fAllSynthesizedNonLazyPointers.begin(), fAllSynthesizedNonLazyPointers.end());
8376
8377 }
8378
8379 template <typename A>
8380 void Writer<A>::createSplitSegContent()
8381 {
8382 // build LC_SEGMENT_SPLIT_INFO once all atoms exist
8383 if ( fSplitCodeToDataContentAtom != NULL ) {
8384 for (std::vector<ObjectFile::Atom*>::iterator it=fAllAtoms->begin(); it != fAllAtoms->end(); it++) {
8385 ObjectFile::Atom* atom = *it;
8386 std::vector<ObjectFile::Reference*>& references = atom->getReferences();
8387 for (std::vector<ObjectFile::Reference*>::iterator rit=references.begin(); rit != references.end(); rit++) {
8388 ObjectFile::Reference* ref = *rit;
8389 switch ( ref->getTargetBinding()) {
8390 case ObjectFile::Reference::kUnboundByName:
8391 case ObjectFile::Reference::kDontBind:
8392 break;
8393 case ObjectFile::Reference::kBoundByName:
8394 case ObjectFile::Reference::kBoundDirectly:
8395 if ( this->segmentsCanSplitApart(*atom, ref->getTarget()) ) {
8396 this->addCrossSegmentRef(atom, ref);
8397 }
8398 break;
8399 }
8400 }
8401 }
8402 // bad codegen may cause LC_SEGMENT_SPLIT_INFO to be removed
8403 adjustLoadCommandsAndPadding();
8404 }
8405
8406 }
8407
8408
8409 template <typename A>
8410 void Writer<A>::synthesizeUnwindInfoTable()
8411 {
8412 if ( fUnwindInfoAtom != NULL ) {
8413 // walk every atom and gets its unwind info
8414 for (std::vector<ObjectFile::Atom*>::iterator it=fAllAtoms->begin(); it != fAllAtoms->end(); it++) {
8415 ObjectFile::Atom* atom = *it;
8416 if ( atom->beginUnwind() == atom->endUnwind() ) {
8417 // be sure to mark that we have no unwind info for stuff in the TEXT segment without unwind info
8418 if ( strcmp(atom->getSegment().getName(), "__TEXT") == 0 )
8419 fUnwindInfoAtom->addUnwindInfo(atom, 0, 0, NULL, NULL, NULL);
8420 }
8421 else {
8422 // atom has unwind
8423 for ( ObjectFile::UnwindInfo::iterator uit = atom->beginUnwind(); uit != atom->endUnwind(); ++uit ) {
8424 fUnwindInfoAtom->addUnwindInfo(atom, uit->startOffset, uit->unwindInfo, atom->getFDE(), atom->getLSDA(), atom->getPersonalityPointer());
8425 }
8426 }
8427 }
8428 }
8429 }
8430
8431
8432 template <typename A>
8433 void Writer<A>::partitionIntoSections()
8434 {
8435 const bool oneSegmentCommand = (fOptions.outputKind() == Options::kObjectFile);
8436
8437 // for every atom, set its sectionInfo object and section offset
8438 // build up fSegmentInfos along the way
8439 ObjectFile::Section* curSection = (ObjectFile::Section*)(-1);
8440 SectionInfo* currentSectionInfo = NULL;
8441 SegmentInfo* currentSegmentInfo = NULL;
8442 SectionInfo* cstringSectionInfo = NULL;
8443 unsigned int sectionIndex = 1;
8444 fSegmentInfos.reserve(8);
8445 for (unsigned int i=0; i < fAllAtoms->size(); ++i) {
8446 ObjectFile::Atom* atom = (*fAllAtoms)[i];
8447 if ( ((atom->getSection() != curSection) || (curSection==NULL))
8448 && ((currentSectionInfo == NULL)
8449 || (strcmp(atom->getSectionName(),currentSectionInfo->fSectionName) != 0)
8450 || (strcmp(atom->getSegment().getName(),currentSectionInfo->fSegmentName) != 0)) ) {
8451 if ( oneSegmentCommand ) {
8452 if ( currentSegmentInfo == NULL ) {
8453 currentSegmentInfo = new SegmentInfo(fOptions.segmentAlignment());
8454 currentSegmentInfo->fInitProtection = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
8455 currentSegmentInfo->fMaxProtection = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
8456 this->fSegmentInfos.push_back(currentSegmentInfo);
8457 }
8458 currentSectionInfo = new SectionInfo();
8459 strcpy(currentSectionInfo->fSectionName, atom->getSectionName());
8460 strcpy(currentSectionInfo->fSegmentName, atom->getSegment().getName());
8461 currentSectionInfo->fAlignment = atom->getAlignment().powerOf2;
8462 currentSectionInfo->fAllZeroFill = atom->isZeroFill();
8463 currentSectionInfo->fVirtualSection = (currentSectionInfo->fSectionName[0] == '.');
8464 if ( !currentSectionInfo->fVirtualSection || fEmitVirtualSections )
8465 currentSectionInfo->setIndex(sectionIndex++);
8466 currentSegmentInfo->fSections.push_back(currentSectionInfo);
8467 if ( (strcmp(currentSectionInfo->fSegmentName, "__TEXT") == 0) && (strcmp(currentSectionInfo->fSectionName, "__cstring") == 0) )
8468 cstringSectionInfo = currentSectionInfo;
8469 }
8470 else {
8471 if ( (currentSegmentInfo == NULL) || (strcmp(currentSegmentInfo->fName, atom->getSegment().getName()) != 0) ) {
8472 currentSegmentInfo = new SegmentInfo(fOptions.segmentAlignment());
8473 strcpy(currentSegmentInfo->fName, atom->getSegment().getName());
8474 uint32_t initprot = 0;
8475 if ( atom->getSegment().isContentReadable() )
8476 initprot |= VM_PROT_READ;
8477 if ( atom->getSegment().isContentWritable() )
8478 initprot |= VM_PROT_WRITE;
8479 if ( atom->getSegment().isContentExecutable() )
8480 initprot |= VM_PROT_EXECUTE;
8481 if ( fOptions.readOnlyx86Stubs() && (strcmp(atom->getSegment().getName(), "__IMPORT") == 0) )
8482 initprot &= ~VM_PROT_WRITE; // hack until i386 __pointers section is synthesized by linker
8483 currentSegmentInfo->fInitProtection = initprot;
8484 if ( initprot == 0 )
8485 currentSegmentInfo->fMaxProtection = 0; // pagezero should have maxprot==initprot==0
8486 else if ( fOptions.architecture() == CPU_TYPE_ARM )
8487 currentSegmentInfo->fMaxProtection = currentSegmentInfo->fInitProtection; // iPhoneOS wants max==init
8488 else
8489 currentSegmentInfo->fMaxProtection = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
8490 std::vector<Options::SegmentProtect>& customSegProtections = fOptions.customSegmentProtections();
8491 for(std::vector<Options::SegmentProtect>::iterator it = customSegProtections.begin(); it != customSegProtections.end(); ++it) {
8492 if ( strcmp(it->name, currentSegmentInfo->fName) == 0 ) {
8493 currentSegmentInfo->fInitProtection = it->init;
8494 currentSegmentInfo->fMaxProtection = it->max;
8495 }
8496 }
8497 currentSegmentInfo->fBaseAddress = atom->getSegment().getBaseAddress();
8498 currentSegmentInfo->fFixedAddress = atom->getSegment().hasFixedAddress();
8499 if ( currentSegmentInfo->fFixedAddress && (&(atom->getSegment()) == &Segment::fgStackSegment) )
8500 currentSegmentInfo->fIndependentAddress = true;
8501 if ( (fOptions.outputKind() == Options::kPreload) && (strcmp(currentSegmentInfo->fName, "__LINKEDIT")==0) )
8502 currentSegmentInfo->fHasLoadCommand = false;
8503 if ( strcmp(currentSegmentInfo->fName, "__HEADER")==0 )
8504 currentSegmentInfo->fHasLoadCommand = false;
8505 this->fSegmentInfos.push_back(currentSegmentInfo);
8506 }
8507 currentSectionInfo = new SectionInfo();
8508 currentSectionInfo->fAtoms.reserve(fAllAtoms->size()/4); // reduce reallocations by starting large
8509 strcpy(currentSectionInfo->fSectionName, atom->getSectionName());
8510 strcpy(currentSectionInfo->fSegmentName, atom->getSegment().getName());
8511 currentSectionInfo->fAlignment = atom->getAlignment().powerOf2;
8512 // check for -sectalign override
8513 std::vector<Options::SectionAlignment>& alignmentOverrides = fOptions.sectionAlignments();
8514 for(std::vector<Options::SectionAlignment>::iterator it=alignmentOverrides.begin(); it != alignmentOverrides.end(); ++it) {
8515 if ( (strcmp(it->segmentName, currentSectionInfo->fSegmentName) == 0) && (strcmp(it->sectionName, currentSectionInfo->fSectionName) == 0) )
8516 currentSectionInfo->fAlignment = it->alignment;
8517 }
8518 currentSectionInfo->fAllZeroFill = atom->isZeroFill();
8519 currentSectionInfo->fVirtualSection = ( currentSectionInfo->fSectionName[0] == '.');
8520 if ( !currentSectionInfo->fVirtualSection || fEmitVirtualSections )
8521 currentSectionInfo->setIndex(sectionIndex++);
8522 currentSegmentInfo->fSections.push_back(currentSectionInfo);
8523 }
8524 //fprintf(stderr, "new section %s for atom %s\n", atom->getSectionName(), atom->getDisplayName());
8525 if ( strcmp(currentSectionInfo->fSectionName, "._load_commands") == 0 ) {
8526 fLoadCommandsSection = currentSectionInfo;
8527 fLoadCommandsSegment = currentSegmentInfo;
8528 }
8529 switch ( atom->getContentType() ) {
8530 case ObjectFile::Atom::kLazyPointer:
8531 currentSectionInfo->fAllLazyPointers = true;
8532 fSymbolTableCommands->needDynamicTable();
8533 break;
8534 case ObjectFile::Atom::kNonLazyPointer:
8535 currentSectionInfo->fAllNonLazyPointers = true;
8536 fSymbolTableCommands->needDynamicTable();
8537 break;
8538 case ObjectFile::Atom::kLazyDylibPointer:
8539 currentSectionInfo->fAllLazyDylibPointers = true;
8540 break;
8541 case ObjectFile::Atom::kStubHelper:
8542 currentSectionInfo->fAllStubHelpers = true;
8543 break;
8544 case ObjectFile::Atom::kCFIType:
8545 currentSectionInfo->fAlignment = __builtin_ctz(sizeof(pint_t)); // always start CFI info pointer aligned
8546 break;
8547 case ObjectFile::Atom::kStub:
8548 if ( (strcmp(currentSectionInfo->fSegmentName, "__IMPORT") == 0) && (strcmp(currentSectionInfo->fSectionName, "__jump_table") == 0) ) {
8549 currentSectionInfo->fAllSelfModifyingStubs = true;
8550 currentSectionInfo->fAlignment = 6; // force x86 fast stubs to start on 64-byte boundary
8551 }
8552 else {
8553 currentSectionInfo->fAllStubs = true;
8554 }
8555 fSymbolTableCommands->needDynamicTable();
8556 break;
8557 default:
8558 break;
8559 }
8560 curSection = atom->getSection();
8561 }
8562 // any non-zero fill atoms make whole section marked not-zero-fill
8563 if ( currentSectionInfo->fAllZeroFill && ! atom->isZeroFill() )
8564 currentSectionInfo->fAllZeroFill = false;
8565 // change section object to be Writer's SectionInfo object
8566 atom->setSection(currentSectionInfo);
8567 // section alignment is that of a contained atom with the greatest alignment
8568 uint8_t atomAlign = atom->getAlignment().powerOf2;
8569 if ( currentSectionInfo->fAlignment < atomAlign )
8570 currentSectionInfo->fAlignment = atomAlign;
8571 // calculate section offset for this atom
8572 uint64_t offset = currentSectionInfo->fSize;
8573 uint64_t alignment = 1 << atomAlign;
8574 uint64_t currentModulus = (offset % alignment);
8575 uint64_t requiredModulus = atom->getAlignment().modulus;
8576 if ( currentModulus != requiredModulus ) {
8577 if ( requiredModulus > currentModulus )
8578 offset += requiredModulus-currentModulus;
8579 else
8580 offset += requiredModulus+alignment-currentModulus;
8581 }
8582 atom->setSectionOffset(offset);
8583 uint64_t curAtomSize = atom->getSize();
8584 currentSectionInfo->fSize = offset + curAtomSize;
8585 // add atom to section vector
8586 currentSectionInfo->fAtoms.push_back(atom);
8587 //fprintf(stderr, " adding atom %p %s size=0x%0llX to section %p %s from %s\n", atom, atom->getDisplayName(), atom->getSize(),
8588 // currentSectionInfo, currentSectionInfo->fSectionName, atom->getFile()->getPath());
8589 // update largest size
8590 if ( !currentSectionInfo->fAllZeroFill && (curAtomSize > fLargestAtomSize) )
8591 fLargestAtomSize = curAtomSize;
8592 }
8593 if ( (cstringSectionInfo != NULL) && (cstringSectionInfo->fAlignment > 0) ) {
8594 // when merging cstring sections in .o files, all strings need to use the max alignment
8595 uint64_t offset = 0;
8596 uint64_t cstringAlignment = 1 << cstringSectionInfo->fAlignment;
8597 for (std::vector<ObjectFile::Atom*>::iterator it=cstringSectionInfo->fAtoms.begin(); it != cstringSectionInfo->fAtoms.end(); it++) {
8598 offset = (offset + (cstringAlignment-1)) & (-cstringAlignment);
8599 ObjectFile::Atom* atom = *it;
8600 atom->setSectionOffset(offset);
8601 offset += atom->getSize();
8602 }
8603 cstringSectionInfo->fSize = offset;
8604 }
8605 }
8606
8607
8608 struct TargetAndOffset { ObjectFile::Atom* atom; uint32_t offset; };
8609 class TargetAndOffsetComparor
8610 {
8611 public:
8612 bool operator()(const TargetAndOffset& left, const TargetAndOffset& right) const
8613 {
8614 if ( left.atom != right.atom )
8615 return ( left.atom < right.atom );
8616 return ( left.offset < right.offset );
8617 }
8618 };
8619
8620 template <>
8621 bool Writer<ppc>::addBranchIslands()
8622 {
8623 return this->createBranchIslands();
8624 }
8625
8626 template <>
8627 bool Writer<ppc64>::addBranchIslands()
8628 {
8629 return this->createBranchIslands();
8630 }
8631
8632 template <>
8633 bool Writer<x86>::addBranchIslands()
8634 {
8635 // x86 branches can reach entire 4G address space, so no need for branch islands
8636 return false;
8637 }
8638
8639 template <>
8640 bool Writer<x86_64>::addBranchIslands()
8641 {
8642 // x86 branches can reach entire 4G size of largest image
8643 return false;
8644 }
8645
8646 template <>
8647 bool Writer<arm>::addBranchIslands()
8648 {
8649 return this->createBranchIslands();
8650 }
8651
8652 template <>
8653 bool Writer<ppc>::isBranchThatMightNeedIsland(uint8_t kind)
8654 {
8655 switch (kind) {
8656 case ppc::kBranch24:
8657 case ppc::kBranch24WeakImport:
8658 return true;
8659 }
8660 return false;
8661 }
8662
8663 template <>
8664 bool Writer<ppc64>::isBranchThatMightNeedIsland(uint8_t kind)
8665 {
8666 switch (kind) {
8667 case ppc64::kBranch24:
8668 case ppc64::kBranch24WeakImport:
8669 return true;
8670 }
8671 return false;
8672 }
8673
8674 template <>
8675 bool Writer<arm>::isBranchThatMightNeedIsland(uint8_t kind)
8676 {
8677 switch (kind) {
8678 case arm::kBranch24:
8679 case arm::kBranch24WeakImport:
8680 case arm::kThumbBranch22:
8681 case arm::kThumbBranch22WeakImport:
8682 return true;
8683 }
8684 return false;
8685 }
8686
8687 template <>
8688 uint32_t Writer<ppc>::textSizeWhenMightNeedBranchIslands()
8689 {
8690 return 16000000;
8691 }
8692
8693 template <>
8694 uint32_t Writer<ppc64>::textSizeWhenMightNeedBranchIslands()
8695 {
8696 return 16000000;
8697 }
8698
8699 template <>
8700 uint32_t Writer<arm>::textSizeWhenMightNeedBranchIslands()
8701 {
8702 if ( fHasThumbBranches == false )
8703 return 32000000; // ARM can branch +/- 32MB
8704 else if ( fOptions.preferSubArchitecture() && fOptions.subArchitecture() == CPU_SUBTYPE_ARM_V7 )
8705 return 16000000; // thumb2 can branch +/- 16MB
8706 else
8707 return 4000000; // thumb1 can branch +/- 4MB
8708 }
8709
8710 template <>
8711 uint32_t Writer<ppc>::maxDistanceBetweenIslands()
8712 {
8713 return 14*1024*1024;
8714 }
8715
8716 template <>
8717 uint32_t Writer<ppc64>::maxDistanceBetweenIslands()
8718 {
8719 return 14*1024*1024;
8720 }
8721
8722 template <>
8723 uint32_t Writer<arm>::maxDistanceBetweenIslands()
8724 {
8725 if ( fHasThumbBranches == false )
8726 return 30*1024*1024;
8727 else if ( fOptions.preferSubArchitecture() && fOptions.subArchitecture() == CPU_SUBTYPE_ARM_V7 )
8728 return 14*1024*1024;
8729 else
8730 return 3500000;
8731 }
8732
8733
8734 //
8735 // PowerPC can do PC relative branches as far as +/-16MB.
8736 // If a branch target is >16MB then we insert one or more
8737 // "branch islands" between the branch and its target that
8738 // allows island hopping to the target.
8739 //
8740 // Branch Island Algorithm
8741 //
8742 // If the __TEXT segment < 16MB, then no branch islands needed
8743 // Otherwise, every 14MB into the __TEXT segment a region is
8744 // added which can contain branch islands. Every out-of-range
8745 // bl instruction is checked. If it crosses a region, an island
8746 // is added to that region with the same target and the bl is
8747 // adjusted to target the island instead.
8748 //
8749 // In theory, if too many islands are added to one region, it
8750 // could grow the __TEXT enough that other previously in-range
8751 // bl branches could be pushed out of range. We reduce the
8752 // probability this could happen by placing the ranges every
8753 // 14MB which means the region would have to be 2MB (512,000 islands)
8754 // before any branches could be pushed out of range.
8755 //
8756 template <typename A>
8757 bool Writer<A>::createBranchIslands()
8758 {
8759 bool log = false;
8760 bool result = false;
8761 // Can only possibly need branch islands if __TEXT segment > 16M
8762 if ( fLoadCommandsSegment->fSize > textSizeWhenMightNeedBranchIslands() ) {
8763 if ( log) fprintf(stderr, "ld: checking for branch islands, __TEXT segment size=%llu\n", fLoadCommandsSegment->fSize);
8764 const uint32_t kBetweenRegions = maxDistanceBetweenIslands(); // place regions of islands every 14MB in __text section
8765 SectionInfo* textSection = NULL;
8766 for (std::vector<SectionInfo*>::iterator it=fLoadCommandsSegment->fSections.begin(); it != fLoadCommandsSegment->fSections.end(); it++) {
8767 if ( strcmp((*it)->fSectionName, "__text") == 0 ) {
8768 textSection = *it;
8769 if ( log) fprintf(stderr, "ld: checking for branch islands, __text section size=%llu\n", textSection->fSize);
8770 break;
8771 }
8772 }
8773 const int kIslandRegionsCount = fLoadCommandsSegment->fSize / kBetweenRegions;
8774 typedef std::map<TargetAndOffset,ObjectFile::Atom*, TargetAndOffsetComparor> AtomToIsland;
8775 AtomToIsland regionsMap[kIslandRegionsCount];
8776 std::vector<ObjectFile::Atom*> regionsIslands[kIslandRegionsCount];
8777 unsigned int islandCount = 0;
8778 if (log) fprintf(stderr, "ld: will use %u branch island regions\n", kIslandRegionsCount);
8779
8780 // create islands for branch references that are out of range
8781 for (std::vector<ObjectFile::Atom*>::iterator it=fAllAtoms->begin(); it != fAllAtoms->end(); it++) {
8782 ObjectFile::Atom* atom = *it;
8783 std::vector<ObjectFile::Reference*>& references = atom->getReferences();
8784 for (std::vector<ObjectFile::Reference*>::iterator rit=references.begin(); rit != references.end(); rit++) {
8785 ObjectFile::Reference* ref = *rit;
8786 if ( this->isBranchThatMightNeedIsland(ref->getKind()) ) {
8787 ObjectFile::Atom& target = ref->getTarget();
8788 int64_t srcAddr = atom->getAddress() + ref->getFixUpOffset();
8789 int64_t dstAddr = target.getAddress() + ref->getTargetOffset();
8790 int64_t displacement = dstAddr - srcAddr;
8791 TargetAndOffset finalTargetAndOffset = { &target, ref->getTargetOffset() };
8792 const int64_t kBranchLimit = kBetweenRegions;
8793 if ( displacement > kBranchLimit ) {
8794 // create forward branch chain
8795 ObjectFile::Atom* nextTarget = &target;
8796 for (int i=kIslandRegionsCount-1; i >=0 ; --i) {
8797 AtomToIsland* region = &regionsMap[i];
8798 int64_t islandRegionAddr = kBetweenRegions * (i+1) + textSection->getBaseAddress();
8799 if ( (srcAddr < islandRegionAddr) && (islandRegionAddr <= dstAddr) ) {
8800 AtomToIsland::iterator pos = region->find(finalTargetAndOffset);
8801 if ( pos == region->end() ) {
8802 BranchIslandAtom<A>* island = new BranchIslandAtom<A>(*this, target.getDisplayName(), i, *nextTarget, *finalTargetAndOffset.atom, finalTargetAndOffset.offset);
8803 island->setSection(textSection);
8804 (*region)[finalTargetAndOffset] = island;
8805 if (log) fprintf(stderr, "added island %s to region %d for %s\n", island->getDisplayName(), i, atom->getDisplayName());
8806 regionsIslands[i].push_back(island);
8807 ++islandCount;
8808 nextTarget = island;
8809 }
8810 else {
8811 nextTarget = pos->second;
8812 }
8813 }
8814 }
8815 if (log) fprintf(stderr, "using island %s for branch to %s from %s\n", nextTarget->getDisplayName(), target.getDisplayName(), atom->getDisplayName());
8816 ref->setTarget(*nextTarget, 0);
8817 }
8818 else if ( displacement < (-kBranchLimit) ) {
8819 // create back branching chain
8820 ObjectFile::Atom* prevTarget = &target;
8821 for (int i=0; i < kIslandRegionsCount ; ++i) {
8822 AtomToIsland* region = &regionsMap[i];
8823 int64_t islandRegionAddr = kBetweenRegions * (i+1);
8824 if ( (dstAddr <= islandRegionAddr) && (islandRegionAddr < srcAddr) ) {
8825 AtomToIsland::iterator pos = region->find(finalTargetAndOffset);
8826 if ( pos == region->end() ) {
8827 BranchIslandAtom<A>* island = new BranchIslandAtom<A>(*this, target.getDisplayName(), i, *prevTarget, *finalTargetAndOffset.atom, finalTargetAndOffset.offset);
8828 island->setSection(textSection);
8829 (*region)[finalTargetAndOffset] = island;
8830 if (log) fprintf(stderr, "added back island %s to region %d for %s\n", island->getDisplayName(), i, atom->getDisplayName());
8831 regionsIslands[i].push_back(island);
8832 ++islandCount;
8833 prevTarget = island;
8834 }
8835 else {
8836 prevTarget = pos->second;
8837 }
8838 }
8839 }
8840 if (log) fprintf(stderr, "using back island %s for %s\n", prevTarget->getDisplayName(), atom->getDisplayName());
8841 ref->setTarget(*prevTarget, 0);
8842 }
8843 }
8844 }
8845 }
8846
8847 // insert islands into __text section and adjust section offsets
8848 if ( islandCount > 0 ) {
8849 if ( log ) fprintf(stderr, "ld: %u branch islands required in %u regions\n", islandCount, kIslandRegionsCount);
8850 std::vector<ObjectFile::Atom*> newAtomList;
8851 newAtomList.reserve(textSection->fAtoms.size()+islandCount);
8852 uint64_t islandRegionAddr = kBetweenRegions + textSection->getBaseAddress();
8853 uint64_t textSectionAlignment = (1 << textSection->fAlignment);
8854 int regionIndex = 0;
8855 uint64_t atomSlide = 0;
8856 uint64_t sectionOffset = 0;
8857 for (std::vector<ObjectFile::Atom*>::iterator it=textSection->fAtoms.begin(); it != textSection->fAtoms.end(); it++) {
8858 ObjectFile::Atom* atom = *it;
8859 if ( (atom->getAddress()+atom->getSize()) > islandRegionAddr ) {
8860 uint64_t islandStartOffset = atom->getSectionOffset() + atomSlide;
8861 sectionOffset = islandStartOffset;
8862 std::vector<ObjectFile::Atom*>* regionIslands = &regionsIslands[regionIndex];
8863 for (std::vector<ObjectFile::Atom*>::iterator rit=regionIslands->begin(); rit != regionIslands->end(); rit++) {
8864 ObjectFile::Atom* islandAtom = *rit;
8865 newAtomList.push_back(islandAtom);
8866 uint64_t alignment = 1 << (islandAtom->getAlignment().powerOf2);
8867 sectionOffset = ( (sectionOffset+alignment-1) & (-alignment) );
8868 islandAtom->setSectionOffset(sectionOffset);
8869 if ( log ) fprintf(stderr, "assigning __text offset 0x%08llx to %s\n", sectionOffset, islandAtom->getDisplayName());
8870 sectionOffset += islandAtom->getSize();
8871 }
8872 ++regionIndex;
8873 islandRegionAddr += kBetweenRegions;
8874 uint64_t islandRegionAlignmentBlocks = (sectionOffset - islandStartOffset + textSectionAlignment - 1) / textSectionAlignment;
8875 atomSlide += (islandRegionAlignmentBlocks * textSectionAlignment);
8876 }
8877 newAtomList.push_back(atom);
8878 if ( atomSlide != 0 )
8879 atom->setSectionOffset(atom->getSectionOffset()+atomSlide);
8880 }
8881 sectionOffset = textSection->fSize+atomSlide;
8882 // put any remaining islands at end of __text section
8883 if ( regionIndex < kIslandRegionsCount ) {
8884 std::vector<ObjectFile::Atom*>* regionIslands = &regionsIslands[regionIndex];
8885 for (std::vector<ObjectFile::Atom*>::iterator rit=regionIslands->begin(); rit != regionIslands->end(); rit++) {
8886 ObjectFile::Atom* islandAtom = *rit;
8887 newAtomList.push_back(islandAtom);
8888 uint64_t alignment = 1 << (islandAtom->getAlignment().powerOf2);
8889 sectionOffset = ( (sectionOffset+alignment-1) & (-alignment) );
8890 islandAtom->setSectionOffset(sectionOffset);
8891 if ( log ) fprintf(stderr, "assigning __text offset 0x%08llx to %s\n", sectionOffset, islandAtom->getDisplayName());
8892 sectionOffset += islandAtom->getSize();
8893 }
8894 }
8895
8896 textSection->fAtoms = newAtomList;
8897 textSection->fSize = sectionOffset;
8898 result = true;
8899 }
8900
8901 }
8902 return result;
8903 }
8904
8905
8906 template <typename A>
8907 void Writer<A>::adjustLoadCommandsAndPadding()
8908 {
8909 fSegmentCommands->computeSize();
8910
8911 // recompute load command section offsets
8912 uint64_t offset = 0;
8913 std::vector<class ObjectFile::Atom*>& loadCommandAtoms = fLoadCommandsSection->fAtoms;
8914 const unsigned int atomCount = loadCommandAtoms.size();
8915 for (unsigned int i=0; i < atomCount; ++i) {
8916 ObjectFile::Atom* atom = loadCommandAtoms[i];
8917 uint64_t alignment = 1 << atom->getAlignment().powerOf2;
8918 offset = ( (offset+alignment-1) & (-alignment) );
8919 atom->setSectionOffset(offset);
8920 uint32_t atomSize = atom->getSize();
8921 if ( atomSize > fLargestAtomSize )
8922 fLargestAtomSize = atomSize;
8923 offset += atomSize;
8924 fLoadCommandsSection->fSize = offset;
8925 }
8926 const uint32_t sizeOfLoadCommandsPlusHeader = offset + sizeof(macho_header<typename A::P>);
8927
8928 std::vector<SectionInfo*>& sectionInfos = fLoadCommandsSegment->fSections;
8929 const int sectionCount = sectionInfos.size();
8930 uint32_t totalSizeOfTEXTLessHeaderAndLoadCommands = 0;
8931 for(int j=0; j < sectionCount; ++j) {
8932 SectionInfo* curSection = sectionInfos[j];
8933 if ( strcmp(curSection->fSectionName, fHeaderPadding->getSectionName()) == 0 )
8934 break;
8935 totalSizeOfTEXTLessHeaderAndLoadCommands += curSection->fSize;
8936 }
8937 uint64_t paddingSize = 0;
8938 if ( fOptions.outputKind() == Options::kDyld ) {
8939 // dyld itself has special padding requirements. We want the beginning __text section to start at a stable address
8940 paddingSize = 4096 - (totalSizeOfTEXTLessHeaderAndLoadCommands % 4096);
8941 }
8942 else if ( fOptions.outputKind() == Options::kObjectFile ) {
8943 // mach-o .o files need no padding between load commands and first section
8944 // but leave enough room that the object file could be signed
8945 paddingSize = 32;
8946 }
8947 else if ( fOptions.outputKind() == Options::kPreload ) {
8948 // mach-o MH_PRELOAD files need no padding between load commands and first section
8949 paddingSize = 0;
8950 }
8951 else {
8952 // work backwards from end of segment and lay out sections so that extra room goes to padding atom
8953 uint64_t addr = 0;
8954 for(int j=sectionCount-1; j >=0; --j) {
8955 SectionInfo* curSection = sectionInfos[j];
8956 if ( strcmp(curSection->fSectionName, fHeaderPadding->getSectionName()) == 0 ) {
8957 addr -= (fLoadCommandsSection->fSize+fMachHeaderAtom->getSize());
8958 paddingSize = addr % fOptions.segmentAlignment();
8959 break;
8960 }
8961 addr -= curSection->fSize;
8962 addr = addr & (0 - (1 << curSection->fAlignment));
8963 }
8964
8965 // if command line requires more padding than this
8966 uint32_t minPad = fOptions.minimumHeaderPad();
8967 if ( fOptions.maxMminimumHeaderPad() ) {
8968 // -headerpad_max_install_names means there should be room for every path load command to grow to 1204 bytes
8969 uint32_t altMin = fLibraryToOrdinal.size() * MAXPATHLEN;
8970 if ( fOptions.outputKind() == Options::kDynamicLibrary )
8971 altMin += MAXPATHLEN;
8972 if ( altMin > minPad )
8973 minPad = altMin;
8974 }
8975 if ( paddingSize < minPad ) {
8976 int extraPages = (minPad - paddingSize + fOptions.segmentAlignment() - 1)/fOptions.segmentAlignment();
8977 paddingSize += extraPages * fOptions.segmentAlignment();
8978 }
8979
8980 if ( fOptions.makeEncryptable() ) {
8981 // load commands must be on a separate non-encrypted page
8982 int loadCommandsPage = (sizeOfLoadCommandsPlusHeader + minPad)/fOptions.segmentAlignment();
8983 int textPage = (sizeOfLoadCommandsPlusHeader + paddingSize)/fOptions.segmentAlignment();
8984 if ( loadCommandsPage == textPage ) {
8985 paddingSize += fOptions.segmentAlignment();
8986 textPage += 1;
8987 }
8988
8989 //paddingSize = 4096 - ((totalSizeOfTEXTLessHeaderAndLoadCommands+fOptions.minimumHeaderPad()) % 4096) + fOptions.minimumHeaderPad();
8990 fEncryptionLoadCommand->setStartEncryptionOffset(textPage*fOptions.segmentAlignment());
8991 }
8992 }
8993
8994 // adjust atom size and update section size
8995 fHeaderPadding->setSize(paddingSize);
8996 for(int j=0; j < sectionCount; ++j) {
8997 SectionInfo* curSection = sectionInfos[j];
8998 if ( strcmp(curSection->fSectionName, fHeaderPadding->getSectionName()) == 0 )
8999 curSection->fSize = paddingSize;
9000 }
9001 }
9002
9003 static uint64_t segmentAlign(uint64_t addr, uint64_t alignment)
9004 {
9005 return ((addr+alignment-1) & (-alignment));
9006 }
9007
9008 // assign file offsets and logical address to all segments
9009 template <typename A>
9010 void Writer<A>::assignFileOffsets()
9011 {
9012 const bool virtualSectionOccupyAddressSpace = ((fOptions.outputKind() != Options::kObjectFile)
9013 && (fOptions.outputKind() != Options::kPreload));
9014 bool haveFixedSegments = false;
9015 uint64_t fileOffset = 0;
9016 uint64_t nextContiguousAddress = fOptions.baseAddress();
9017 uint64_t nextReadOnlyAddress = fOptions.baseAddress();
9018 uint64_t nextWritableAddress = fOptions.baseWritableAddress();
9019
9020 // process segments with fixed addresses (-segaddr)
9021 for (std::vector<Options::SegmentStart>::iterator it = fOptions.customSegmentAddresses().begin(); it != fOptions.customSegmentAddresses().end(); ++it) {
9022 for (std::vector<SegmentInfo*>::iterator segit = fSegmentInfos.begin(); segit != fSegmentInfos.end(); ++segit) {
9023 SegmentInfo* curSegment = *segit;
9024 if ( strcmp(curSegment->fName, it->name) == 0 ) {
9025 curSegment->fBaseAddress = it->address;
9026 curSegment->fFixedAddress = true;
9027 break;
9028 }
9029 }
9030 }
9031
9032 // process segments with fixed addresses (-seg_page_size)
9033 for (std::vector<Options::SegmentSize>::iterator it = fOptions.customSegmentSizes().begin(); it != fOptions.customSegmentSizes().end(); ++it) {
9034 for (std::vector<SegmentInfo*>::iterator segit = fSegmentInfos.begin(); segit != fSegmentInfos.end(); ++segit) {
9035 SegmentInfo* curSegment = *segit;
9036 if ( strcmp(curSegment->fName, it->name) == 0 ) {
9037 curSegment->fPageSize = it->size;
9038 break;
9039 }
9040 }
9041 }
9042
9043 // Run through the segments and each segment's sections to assign addresses
9044 for (std::vector<SegmentInfo*>::iterator segit = fSegmentInfos.begin(); segit != fSegmentInfos.end(); ++segit) {
9045 SegmentInfo* curSegment = *segit;
9046
9047 if ( fOptions.splitSeg() ) {
9048 if ( curSegment->fInitProtection & VM_PROT_WRITE )
9049 nextContiguousAddress = nextWritableAddress;
9050 else
9051 nextContiguousAddress = nextReadOnlyAddress;
9052 }
9053
9054 if ( fOptions.outputKind() == Options::kPreload ) {
9055 if ( strcmp(curSegment->fName, "__HEADER") == 0 )
9056 nextContiguousAddress = 0;
9057 else if ( strcmp(curSegment->fName, "__TEXT") == 0 )
9058 nextContiguousAddress = fOptions.baseAddress();
9059 }
9060
9061 fileOffset = segmentAlign(fileOffset, curSegment->fPageSize);
9062 curSegment->fFileOffset = fileOffset;
9063
9064 // Set the segment base address
9065 if ( curSegment->fFixedAddress )
9066 haveFixedSegments = true;
9067 else
9068 curSegment->fBaseAddress = segmentAlign(nextContiguousAddress, curSegment->fPageSize);
9069
9070 // We've set the segment address, now run through each section.
9071 uint64_t address = curSegment->fBaseAddress;
9072 SectionInfo* firstZeroFillSection = NULL;
9073 SectionInfo* prevSection = NULL;
9074
9075 std::vector<SectionInfo*>& sectionInfos = curSegment->fSections;
9076
9077 for (std::vector<SectionInfo*>::iterator it = sectionInfos.begin(); it != sectionInfos.end(); ++it) {
9078 SectionInfo* curSection = *it;
9079
9080 // adjust section address based on alignment
9081 uint64_t alignment = 1 << curSection->fAlignment;
9082 if ( curSection->fAtoms.size() == 1 ) {
9083 // if there is only one atom in section, use modulus for even better layout
9084 ObjectFile::Alignment atomAlign = curSection->fAtoms[0]->getAlignment();
9085 uint64_t atomAlignP2 = (1 << atomAlign.powerOf2);
9086 uint64_t currentModulus = (address % atomAlignP2);
9087 if ( currentModulus != atomAlign.modulus ) {
9088 if ( atomAlign.modulus > currentModulus )
9089 address += atomAlign.modulus-currentModulus;
9090 else
9091 address += atomAlign.modulus+atomAlignP2-currentModulus;
9092 }
9093 }
9094 else {
9095 address = ( (address+alignment-1) & (-alignment) );
9096 }
9097 // adjust file offset to match address
9098 if ( prevSection != NULL ) {
9099 if ( virtualSectionOccupyAddressSpace || !prevSection->fVirtualSection )
9100 fileOffset = (address - prevSection->getBaseAddress()) + prevSection->fFileOffset;
9101 else
9102 fileOffset = ( (fileOffset+alignment-1) & (-alignment) );
9103 }
9104
9105 // update section info
9106 curSection->fFileOffset = fileOffset;
9107 curSection->setBaseAddress(address);
9108 //fprintf(stderr, "%s %s addr=0x%llX, fileoffset=0x%llX, size=0x%llX\n", curSegment->fName, curSection->fSectionName, address, fileOffset, curSection->fSize);
9109
9110 // keep track of trailing zero fill sections
9111 if ( curSection->fAllZeroFill && (firstZeroFillSection == NULL) )
9112 firstZeroFillSection = curSection;
9113 if ( !curSection->fAllZeroFill && (firstZeroFillSection != NULL) && (fOptions.outputKind() != Options::kObjectFile) )
9114 throwf("zero-fill section %s not at end of segment", curSection->fSectionName);
9115
9116 // update running pointers
9117 if ( virtualSectionOccupyAddressSpace || !curSection->fVirtualSection )
9118 address += curSection->fSize;
9119 fileOffset += curSection->fSize;
9120
9121 // sanity check size of 32-bit binaries
9122 if ( address > maxAddress() )
9123 throwf("section %s exceeds 4GB limit", curSection->fSectionName);
9124
9125 // update segment info
9126 curSegment->fFileSize = fileOffset - curSegment->fFileOffset;
9127 curSegment->fSize = curSegment->fFileSize;
9128 prevSection = curSection;
9129 }
9130
9131 if ( fOptions.outputKind() == Options::kObjectFile ) {
9132 // don't page align .o files
9133 }
9134 else {
9135 // optimize trailing zero-fill sections to not occupy disk space
9136 if ( firstZeroFillSection != NULL ) {
9137 curSegment->fFileSize = firstZeroFillSection->fFileOffset - curSegment->fFileOffset;
9138 fileOffset = firstZeroFillSection->fFileOffset;
9139 }
9140 // page align segment size
9141 curSegment->fFileSize = segmentAlign(curSegment->fFileSize, curSegment->fPageSize);
9142 curSegment->fSize = segmentAlign(curSegment->fSize, curSegment->fPageSize);
9143 if ( !curSegment->fIndependentAddress && (curSegment->fBaseAddress >= nextContiguousAddress) ) {
9144 nextContiguousAddress = segmentAlign(curSegment->fBaseAddress+curSegment->fSize, curSegment->fPageSize);
9145 fileOffset = segmentAlign(fileOffset, curSegment->fPageSize);
9146 if ( curSegment->fInitProtection & VM_PROT_WRITE )
9147 nextWritableAddress = nextContiguousAddress;
9148 else
9149 nextReadOnlyAddress = nextContiguousAddress;
9150 }
9151 }
9152 //fprintf(stderr, "end of seg %s, fileoffset=0x%llX, nextContiguousAddress=0x%llX\n", curSegment->fName, fileOffset, nextContiguousAddress);
9153 }
9154
9155 // check for segment overlaps caused by user specified fixed segments (e.g. __PAGEZERO, __UNIXSTACK)
9156 if ( haveFixedSegments ) {
9157 int segCount = fSegmentInfos.size();
9158 for(int i=0; i < segCount; ++i) {
9159 SegmentInfo* segment1 = fSegmentInfos[i];
9160
9161 for(int j=0; j < segCount; ++j) {
9162 if ( i != j ) {
9163 SegmentInfo* segment2 = fSegmentInfos[j];
9164
9165 if ( segment1->fBaseAddress < segment2->fBaseAddress ) {
9166 if ( (segment1->fBaseAddress+segment1->fSize) > segment2->fBaseAddress )
9167 throwf("segments overlap: %s (0x%08llX + 0x%08llX) and %s (0x%08llX + 0x%08llX)",
9168 segment1->fName, segment1->fBaseAddress, segment1->fSize, segment2->fName, segment2->fBaseAddress, segment2->fSize);
9169 }
9170 else if ( segment1->fBaseAddress > segment2->fBaseAddress ) {
9171 if ( (segment2->fBaseAddress+segment2->fSize) > segment1->fBaseAddress )
9172 throwf("segments overlap: %s (0x%08llX + 0x%08llX) and %s (0x%08llX + 0x%08llX)",
9173 segment1->fName, segment1->fBaseAddress, segment1->fSize, segment2->fName, segment2->fBaseAddress, segment2->fSize);
9174 }
9175 else if ( (segment1->fSize != 0) && (segment2->fSize != 0) ) {
9176 throwf("segments overlap: %s (0x%08llX + 0x%08llX) and %s (0x%08llX + 0x%08llX)",
9177 segment1->fName, segment1->fBaseAddress, segment1->fSize, segment2->fName, segment2->fBaseAddress, segment2->fSize);
9178 }
9179 }
9180 }
9181 }
9182 }
9183
9184 // set up fFirstWritableSegment and fWritableSegmentPastFirst4GB
9185 for (std::vector<SegmentInfo*>::iterator segit = fSegmentInfos.begin(); segit != fSegmentInfos.end(); ++segit) {
9186 SegmentInfo* curSegment = *segit;
9187 if ( (curSegment->fInitProtection & VM_PROT_WRITE) != 0 ) {
9188 if ( fFirstWritableSegment == NULL )
9189 fFirstWritableSegment = curSegment;
9190 if ( (curSegment->fBaseAddress + curSegment->fSize - fOptions.baseAddress()) >= 0x100000000LL )
9191 fWritableSegmentPastFirst4GB = true;
9192 }
9193 }
9194
9195 // record size of encrypted part of __TEXT segment
9196 if ( fOptions.makeEncryptable() ) {
9197 for (std::vector<SegmentInfo*>::iterator segit = fSegmentInfos.begin(); segit != fSegmentInfos.end(); ++segit) {
9198 SegmentInfo* curSegment = *segit;
9199 if ( strcmp(curSegment->fName, "__TEXT") == 0 ) {
9200 fEncryptionLoadCommand->setEndEncryptionOffset(curSegment->fFileSize);
9201 break;
9202 }
9203 }
9204 }
9205
9206 }
9207
9208 template <typename A>
9209 void Writer<A>::adjustLinkEditSections()
9210 {
9211 // link edit content is always in last segment
9212 SegmentInfo* lastSeg = fSegmentInfos[fSegmentInfos.size()-1];
9213 unsigned int firstLinkEditSectionIndex = 0;
9214 while ( strcmp(lastSeg->fSections[firstLinkEditSectionIndex]->fSegmentName, "__LINKEDIT") != 0 )
9215 ++firstLinkEditSectionIndex;
9216
9217 const unsigned int linkEditSectionCount = lastSeg->fSections.size();
9218 uint64_t fileOffset = lastSeg->fSections[firstLinkEditSectionIndex]->fFileOffset;
9219 uint64_t address = lastSeg->fSections[firstLinkEditSectionIndex]->getBaseAddress();
9220 if ( fPadSegmentInfo != NULL ) {
9221 // insert __4GBFILL segment into segments vector before LINKEDIT
9222 for(std::vector<SegmentInfo*>::iterator it = fSegmentInfos.begin(); it != fSegmentInfos.end(); ++it) {
9223 if ( *it == lastSeg ) {
9224 fSegmentInfos.insert(it, fPadSegmentInfo);
9225 break;
9226 }
9227 }
9228 // adjust __4GBFILL segment to span from end of last segment to zeroPageSize
9229 fPadSegmentInfo->fSize = fOptions.zeroPageSize() - address;
9230 fPadSegmentInfo->fBaseAddress = address;
9231 // adjust LINKEDIT to start at zeroPageSize
9232 address = fOptions.zeroPageSize();
9233 lastSeg->fBaseAddress = fOptions.zeroPageSize();
9234 }
9235 for (unsigned int i=firstLinkEditSectionIndex; i < linkEditSectionCount; ++i) {
9236 std::vector<class ObjectFile::Atom*>& atoms = lastSeg->fSections[i]->fAtoms;
9237 // adjust section address based on alignment
9238 uint64_t sectionAlignment = 1 << lastSeg->fSections[i]->fAlignment;
9239 uint64_t pad = ((address+sectionAlignment-1) & (-sectionAlignment)) - address;
9240 address += pad;
9241 fileOffset += pad; // adjust file offset to match address
9242 lastSeg->fSections[i]->setBaseAddress(address);
9243 if ( strcmp(lastSeg->fSections[i]->fSectionName, "._absolute") == 0 )
9244 lastSeg->fSections[i]->setBaseAddress(0);
9245 lastSeg->fSections[i]->fFileOffset = fileOffset;
9246 uint64_t sectionOffset = 0;
9247 for (unsigned int j=0; j < atoms.size(); ++j) {
9248 ObjectFile::Atom* atom = atoms[j];
9249 uint64_t alignment = 1 << atom->getAlignment().powerOf2;
9250 sectionOffset = ( (sectionOffset+alignment-1) & (-alignment) );
9251 atom->setSectionOffset(sectionOffset);
9252 uint64_t size = atom->getSize();
9253 sectionOffset += size;
9254 if ( size > fLargestAtomSize )
9255 fLargestAtomSize = size;
9256 }
9257 //fprintf(stderr, "setting: lastSeg->fSections[%d]->fSize = 0x%08llX\n", i, sectionOffset);
9258 lastSeg->fSections[i]->fSize = sectionOffset;
9259 fileOffset += sectionOffset;
9260 address += sectionOffset;
9261 }
9262 if ( fOptions.outputKind() == Options::kObjectFile ) {
9263 //lastSeg->fBaseAddress = 0;
9264 //lastSeg->fSize = lastSeg->fSections[firstLinkEditSectionIndex]->
9265 //lastSeg->fFileOffset = 0;
9266 //lastSeg->fFileSize =
9267 }
9268 else {
9269 lastSeg->fFileSize = fileOffset - lastSeg->fFileOffset;
9270 lastSeg->fSize = (address - lastSeg->fBaseAddress+4095) & (-4096);
9271 }
9272 }
9273
9274
9275 template <typename A>
9276 ObjectFile::Atom::Scope MachHeaderAtom<A>::getScope() const
9277 {
9278 switch ( fWriter.fOptions.outputKind() ) {
9279 case Options::kDynamicExecutable:
9280 case Options::kStaticExecutable:
9281 return ObjectFile::Atom::scopeGlobal;
9282 case Options::kDynamicLibrary:
9283 case Options::kDynamicBundle:
9284 case Options::kDyld:
9285 case Options::kObjectFile:
9286 case Options::kPreload:
9287 case Options::kKextBundle:
9288 return ObjectFile::Atom::scopeLinkageUnit;
9289 }
9290 throw "unknown header type";
9291 }
9292
9293 template <typename A>
9294 ObjectFile::Atom::SymbolTableInclusion MachHeaderAtom<A>::getSymbolTableInclusion() const
9295 {
9296 switch ( fWriter.fOptions.outputKind() ) {
9297 case Options::kDynamicExecutable:
9298 return ObjectFile::Atom::kSymbolTableInAndNeverStrip;
9299 case Options::kStaticExecutable:
9300 return ObjectFile::Atom::kSymbolTableInAsAbsolute;
9301 case Options::kDynamicLibrary:
9302 case Options::kDynamicBundle:
9303 case Options::kDyld:
9304 return ObjectFile::Atom::kSymbolTableIn;
9305 case Options::kObjectFile:
9306 case Options::kPreload:
9307 case Options::kKextBundle:
9308 return ObjectFile::Atom::kSymbolTableNotIn;
9309 }
9310 throw "unknown header type";
9311 }
9312
9313 template <typename A>
9314 const char* MachHeaderAtom<A>::getName() const
9315 {
9316 switch ( fWriter.fOptions.outputKind() ) {
9317 case Options::kDynamicExecutable:
9318 case Options::kStaticExecutable:
9319 return "__mh_execute_header";
9320 case Options::kDynamicLibrary:
9321 return "__mh_dylib_header";
9322 case Options::kDynamicBundle:
9323 return "__mh_bundle_header";
9324 case Options::kObjectFile:
9325 case Options::kPreload:
9326 case Options::kKextBundle:
9327 return NULL;
9328 case Options::kDyld:
9329 return "__mh_dylinker_header";
9330 }
9331 throw "unknown header type";
9332 }
9333
9334 template <typename A>
9335 const char* MachHeaderAtom<A>::getDisplayName() const
9336 {
9337 switch ( fWriter.fOptions.outputKind() ) {
9338 case Options::kDynamicExecutable:
9339 case Options::kStaticExecutable:
9340 case Options::kDynamicLibrary:
9341 case Options::kDynamicBundle:
9342 case Options::kDyld:
9343 return this->getName();
9344 case Options::kObjectFile:
9345 case Options::kPreload:
9346 case Options::kKextBundle:
9347 return "mach header";
9348 }
9349 throw "unknown header type";
9350 }
9351
9352 template <typename A>
9353 void MachHeaderAtom<A>::copyRawContent(uint8_t buffer[]) const
9354 {
9355 // get file type
9356 uint32_t fileType = 0;
9357 switch ( fWriter.fOptions.outputKind() ) {
9358 case Options::kDynamicExecutable:
9359 case Options::kStaticExecutable:
9360 fileType = MH_EXECUTE;
9361 break;
9362 case Options::kDynamicLibrary:
9363 fileType = MH_DYLIB;
9364 break;
9365 case Options::kDynamicBundle:
9366 fileType = MH_BUNDLE;
9367 break;
9368 case Options::kObjectFile:
9369 fileType = MH_OBJECT;
9370 break;
9371 case Options::kDyld:
9372 fileType = MH_DYLINKER;
9373 break;
9374 case Options::kPreload:
9375 fileType = MH_PRELOAD;
9376 break;
9377 case Options::kKextBundle:
9378 fileType = MH_KEXT_BUNDLE;
9379 break;
9380 }
9381
9382 // get flags
9383 uint32_t flags = 0;
9384 if ( fWriter.fOptions.outputKind() == Options::kObjectFile ) {
9385 if ( fWriter.fCanScatter )
9386 flags = MH_SUBSECTIONS_VIA_SYMBOLS;
9387 }
9388 else {
9389 if ( fWriter.fOptions.outputKind() == Options::kStaticExecutable ) {
9390 flags |= MH_NOUNDEFS;
9391 }
9392 else if ( fWriter.fOptions.outputKind() == Options::kPreload ) {
9393 flags |= MH_NOUNDEFS;
9394 if ( fWriter.fOptions.positionIndependentExecutable() )
9395 flags |= MH_PIE;
9396 }
9397 else {
9398 flags = MH_DYLDLINK;
9399 if ( fWriter.fOptions.bindAtLoad() )
9400 flags |= MH_BINDATLOAD;
9401 switch ( fWriter.fOptions.nameSpace() ) {
9402 case Options::kTwoLevelNameSpace:
9403 flags |= MH_TWOLEVEL | MH_NOUNDEFS;
9404 break;
9405 case Options::kFlatNameSpace:
9406 break;
9407 case Options::kForceFlatNameSpace:
9408 flags |= MH_FORCE_FLAT;
9409 break;
9410 }
9411 bool hasWeakDefines = fWriter.fHasWeakExports;
9412 if ( fWriter.fRegularDefAtomsThatOverrideADylibsWeakDef->size() != 0 ) {
9413 for(std::set<const ObjectFile::Atom*>::iterator it = fWriter.fRegularDefAtomsThatOverrideADylibsWeakDef->begin();
9414 it != fWriter.fRegularDefAtomsThatOverrideADylibsWeakDef->end(); ++it) {
9415 if ( fWriter.shouldExport(**it) ) {
9416 hasWeakDefines = true;
9417 break;
9418 }
9419 }
9420 }
9421 if ( hasWeakDefines )
9422 flags |= MH_WEAK_DEFINES;
9423 if ( fWriter.fReferencesWeakImports || fWriter.fHasWeakExports )
9424 flags |= MH_BINDS_TO_WEAK;
9425 if ( fWriter.fOptions.prebind() )
9426 flags |= MH_PREBOUND;
9427 if ( fWriter.fOptions.splitSeg() )
9428 flags |= MH_SPLIT_SEGS;
9429 if ( (fWriter.fOptions.outputKind() == Options::kDynamicLibrary) && fWriter.fNoReExportedDylibs )
9430 flags |= MH_NO_REEXPORTED_DYLIBS;
9431 if ( fWriter.fOptions.positionIndependentExecutable() )
9432 flags |= MH_PIE;
9433 if ( fWriter.fOptions.markAutoDeadStripDylib() )
9434 flags |= MH_DEAD_STRIPPABLE_DYLIB;
9435 }
9436 if ( fWriter.fOptions.hasExecutableStack() )
9437 flags |= MH_ALLOW_STACK_EXECUTION;
9438 if ( fWriter.fOptions.readerOptions().fRootSafe )
9439 flags |= MH_ROOT_SAFE;
9440 if ( fWriter.fOptions.readerOptions().fSetuidSafe )
9441 flags |= MH_SETUID_SAFE;
9442 }
9443
9444 // get commands info
9445 uint32_t commandsSize = 0;
9446 uint32_t commandsCount = 0;
9447
9448 std::vector<class ObjectFile::Atom*>& loadCommandAtoms = fWriter.fLoadCommandsSection->fAtoms;
9449 for (std::vector<ObjectFile::Atom*>::iterator it=loadCommandAtoms.begin(); it != loadCommandAtoms.end(); it++) {
9450 ObjectFile::Atom* atom = *it;
9451 commandsSize += atom->getSize();
9452 // segment and symbol table atoms can contain more than one load command
9453 if ( atom == fWriter.fSegmentCommands )
9454 commandsCount += fWriter.fSegmentCommands->commandCount();
9455 else if ( atom == fWriter.fSymbolTableCommands )
9456 commandsCount += fWriter.fSymbolTableCommands->commandCount();
9457 else if ( atom->getSize() != 0 )
9458 ++commandsCount;
9459 }
9460
9461 // fill out mach_header
9462 macho_header<typename A::P>* mh = (macho_header<typename A::P>*)buffer;
9463 setHeaderInfo(*mh);
9464 mh->set_filetype(fileType);
9465 mh->set_ncmds(commandsCount);
9466 mh->set_sizeofcmds(commandsSize);
9467 mh->set_flags(flags);
9468 }
9469
9470 template <>
9471 void MachHeaderAtom<ppc>::setHeaderInfo(macho_header<ppc::P>& header) const
9472 {
9473 header.set_magic(MH_MAGIC);
9474 header.set_cputype(CPU_TYPE_POWERPC);
9475 header.set_cpusubtype(fWriter.fCpuConstraint);
9476 }
9477
9478 template <>
9479 void MachHeaderAtom<ppc64>::setHeaderInfo(macho_header<ppc64::P>& header) const
9480 {
9481 header.set_magic(MH_MAGIC_64);
9482 header.set_cputype(CPU_TYPE_POWERPC64);
9483 if ( (fWriter.fOptions.outputKind() == Options::kDynamicExecutable) && (fWriter.fOptions.macosxVersionMin() >= ObjectFile::ReaderOptions::k10_5) )
9484 header.set_cpusubtype(CPU_SUBTYPE_POWERPC_ALL | 0x80000000);
9485 else
9486 header.set_cpusubtype(CPU_SUBTYPE_POWERPC_ALL);
9487 header.set_reserved(0);
9488 }
9489
9490 template <>
9491 void MachHeaderAtom<x86>::setHeaderInfo(macho_header<x86::P>& header) const
9492 {
9493 header.set_magic(MH_MAGIC);
9494 header.set_cputype(CPU_TYPE_I386);
9495 header.set_cpusubtype(CPU_SUBTYPE_I386_ALL);
9496 }
9497
9498 template <>
9499 void MachHeaderAtom<x86_64>::setHeaderInfo(macho_header<x86_64::P>& header) const
9500 {
9501 header.set_magic(MH_MAGIC_64);
9502 header.set_cputype(CPU_TYPE_X86_64);
9503 if ( (fWriter.fOptions.outputKind() == Options::kDynamicExecutable) && (fWriter.fOptions.macosxVersionMin() >= ObjectFile::ReaderOptions::k10_5) )
9504 header.set_cpusubtype(CPU_SUBTYPE_X86_64_ALL | 0x80000000);
9505 else
9506 header.set_cpusubtype(CPU_SUBTYPE_X86_64_ALL);
9507 header.set_reserved(0);
9508 }
9509
9510 template <>
9511 void MachHeaderAtom<arm>::setHeaderInfo(macho_header<arm::P>& header) const
9512 {
9513 header.set_magic(MH_MAGIC);
9514 header.set_cputype(CPU_TYPE_ARM);
9515 header.set_cpusubtype(fWriter.fCpuConstraint);
9516 }
9517
9518 template <typename A>
9519 CustomStackAtom<A>::CustomStackAtom(Writer<A>& writer)
9520 : WriterAtom<A>(writer, Segment::fgStackSegment)
9521 {
9522 if ( stackGrowsDown() )
9523 Segment::fgStackSegment.setBaseAddress(writer.fOptions.customStackAddr() - writer.fOptions.customStackSize());
9524 else
9525 Segment::fgStackSegment.setBaseAddress(writer.fOptions.customStackAddr());
9526 }
9527
9528
9529 template <> bool CustomStackAtom<ppc>::stackGrowsDown() { return true; }
9530 template <> bool CustomStackAtom<ppc64>::stackGrowsDown() { return true; }
9531 template <> bool CustomStackAtom<x86>::stackGrowsDown() { return true; }
9532 template <> bool CustomStackAtom<x86_64>::stackGrowsDown() { return true; }
9533 template <> bool CustomStackAtom<arm>::stackGrowsDown() { return true; }
9534
9535 template <typename A>
9536 void SegmentLoadCommandsAtom<A>::computeSize()
9537 {
9538 uint64_t size = 0;
9539 std::vector<SegmentInfo*>& segmentInfos = fWriter.fSegmentInfos;
9540 int segCount = 0;
9541 for(std::vector<SegmentInfo*>::iterator it = segmentInfos.begin(); it != segmentInfos.end(); ++it) {
9542 SegmentInfo* seg = *it;
9543 if ( seg->fHasLoadCommand ) {
9544 ++segCount;
9545 size += sizeof(macho_segment_command<P>);
9546 std::vector<SectionInfo*>& sectionInfos = seg->fSections;
9547 const int sectionCount = sectionInfos.size();
9548 for(int j=0; j < sectionCount; ++j) {
9549 if ( fWriter.fEmitVirtualSections || ! sectionInfos[j]->fVirtualSection )
9550 size += sizeof(macho_section<P>);
9551 }
9552 }
9553 }
9554 fSize = size;
9555 fCommandCount = segCount;
9556 if ( fWriter.fPadSegmentInfo != NULL ) {
9557 ++fCommandCount;
9558 fSize += sizeof(macho_segment_command<P>);
9559 }
9560 }
9561
9562 template <>
9563 uint64_t LoadCommandAtom<ppc>::alignedSize(uint64_t size)
9564 {
9565 return ((size+3) & (-4)); // 4-byte align all load commands for 32-bit mach-o
9566 }
9567
9568 template <>
9569 uint64_t LoadCommandAtom<ppc64>::alignedSize(uint64_t size)
9570 {
9571 return ((size+7) & (-8)); // 8-byte align all load commands for 64-bit mach-o
9572 }
9573
9574 template <>
9575 uint64_t LoadCommandAtom<x86>::alignedSize(uint64_t size)
9576 {
9577 return ((size+3) & (-4)); // 4-byte align all load commands for 32-bit mach-o
9578 }
9579
9580 template <>
9581 uint64_t LoadCommandAtom<x86_64>::alignedSize(uint64_t size)
9582 {
9583 return ((size+7) & (-8)); // 8-byte align all load commands for 64-bit mach-o
9584 }
9585
9586 template <>
9587 uint64_t LoadCommandAtom<arm>::alignedSize(uint64_t size)
9588 {
9589 return ((size+3) & (-4)); // 4-byte align all load commands for 32-bit mach-o
9590 }
9591
9592 template <typename A>
9593 void SegmentLoadCommandsAtom<A>::copyRawContent(uint8_t buffer[]) const
9594 {
9595 uint64_t size = this->getSize();
9596 const bool oneSegment =( fWriter.fOptions.outputKind() == Options::kObjectFile );
9597 bzero(buffer, size);
9598 uint8_t* p = buffer;
9599 typename std::vector<SegmentInfo*>& segmentInfos = fWriter.fSegmentInfos;
9600 for(std::vector<SegmentInfo*>::iterator it = segmentInfos.begin(); it != segmentInfos.end(); ++it) {
9601 SegmentInfo* segInfo = *it;
9602 if ( ! segInfo->fHasLoadCommand )
9603 continue;
9604 const int sectionCount = segInfo->fSections.size();
9605 macho_segment_command<P>* cmd = (macho_segment_command<P>*)p;
9606 cmd->set_cmd(macho_segment_command<P>::CMD);
9607 cmd->set_segname(segInfo->fName);
9608 cmd->set_vmaddr(segInfo->fBaseAddress);
9609 cmd->set_vmsize(oneSegment ? 0 : segInfo->fSize);
9610 cmd->set_fileoff(segInfo->fFileOffset);
9611 cmd->set_filesize(oneSegment ? 0 : segInfo->fFileSize);
9612 cmd->set_maxprot(segInfo->fMaxProtection);
9613 cmd->set_initprot(segInfo->fInitProtection);
9614 // add sections array
9615 macho_section<P>* const sections = (macho_section<P>*)&p[sizeof(macho_segment_command<P>)];
9616 unsigned int sectionsEmitted = 0;
9617 for (int j=0; j < sectionCount; ++j) {
9618 SectionInfo* sectInfo = segInfo->fSections[j];
9619 if ( fWriter.fEmitVirtualSections || !sectInfo->fVirtualSection ) {
9620 macho_section<P>* sect = &sections[sectionsEmitted++];
9621 if ( oneSegment ) {
9622 // .o file segment does not cover load commands, so recalc at first real section
9623 if ( sectionsEmitted == 1 ) {
9624 cmd->set_vmaddr(sectInfo->getBaseAddress());
9625 cmd->set_fileoff(sectInfo->fFileOffset);
9626 }
9627 cmd->set_filesize((sectInfo->fFileOffset+sectInfo->fSize)-cmd->fileoff());
9628 cmd->set_vmsize(sectInfo->getBaseAddress() + sectInfo->fSize);
9629 }
9630 sect->set_sectname(sectInfo->fSectionName);
9631 sect->set_segname(sectInfo->fSegmentName);
9632 sect->set_addr(sectInfo->getBaseAddress());
9633 sect->set_size(sectInfo->fSize);
9634 sect->set_offset(sectInfo->fFileOffset);
9635 sect->set_align(sectInfo->fAlignment);
9636 if ( sectInfo->fRelocCount != 0 ) {
9637 sect->set_reloff(sectInfo->fRelocOffset * sizeof(macho_relocation_info<P>) + fWriter.fSectionRelocationsAtom->getFileOffset());
9638 sect->set_nreloc(sectInfo->fRelocCount);
9639 }
9640 if ( sectInfo->fAllZeroFill ) {
9641 sect->set_flags(S_ZEROFILL);
9642 sect->set_offset(0);
9643 }
9644 else if ( sectInfo->fAllLazyPointers ) {
9645 sect->set_flags(S_LAZY_SYMBOL_POINTERS);
9646 sect->set_reserved1(sectInfo->fIndirectSymbolOffset);
9647 }
9648 else if ( sectInfo->fAllLazyDylibPointers ) {
9649 sect->set_flags(S_LAZY_DYLIB_SYMBOL_POINTERS);
9650 sect->set_reserved1(sectInfo->fIndirectSymbolOffset);
9651 }
9652 else if ( sectInfo->fAllNonLazyPointers ) {
9653 sect->set_flags(S_NON_LAZY_SYMBOL_POINTERS);
9654 sect->set_reserved1(sectInfo->fIndirectSymbolOffset);
9655 }
9656 else if ( sectInfo->fAllStubs ) {
9657 sect->set_flags(S_SYMBOL_STUBS | S_ATTR_SOME_INSTRUCTIONS | S_ATTR_PURE_INSTRUCTIONS);
9658 sect->set_reserved1(sectInfo->fIndirectSymbolOffset);
9659 sect->set_reserved2(sectInfo->fSize / sectInfo->fAtoms.size());
9660 if ( sectInfo->fHasTextLocalRelocs )
9661 sect->set_flags(sect->flags() | S_ATTR_LOC_RELOC);
9662 }
9663 else if ( sectInfo->fAllSelfModifyingStubs ) {
9664 sect->set_flags(S_SYMBOL_STUBS | S_ATTR_SELF_MODIFYING_CODE);
9665 sect->set_reserved1(sectInfo->fIndirectSymbolOffset);
9666 sect->set_reserved2(sectInfo->fSize / sectInfo->fAtoms.size());
9667 }
9668 else if ( sectInfo->fAllStubHelpers ) {
9669 sect->set_flags(S_ATTR_SOME_INSTRUCTIONS | S_ATTR_PURE_INSTRUCTIONS);
9670 if ( sectInfo->fHasTextLocalRelocs )
9671 sect->set_flags(sect->flags() | S_ATTR_LOC_RELOC);
9672 }
9673 else if ( sectInfo->fAtoms.at(0)->getContentType() == ObjectFile::Atom::kCStringType ) {
9674 sect->set_flags(S_CSTRING_LITERALS);
9675 }
9676 else if ( sectInfo->fAtoms.at(0)->getContentType() == ObjectFile::Atom::kCFIType ) {
9677 sect->set_flags(S_COALESCED | S_ATTR_NO_TOC | S_ATTR_STRIP_STATIC_SYMS);
9678 }
9679 else if ( (strcmp(sectInfo->fSectionName, "__mod_init_func") == 0) && (strcmp(sectInfo->fSegmentName, "__DATA") == 0) ) {
9680 sect->set_flags(S_MOD_INIT_FUNC_POINTERS);
9681 }
9682 else if ( (strcmp(sectInfo->fSectionName, "__mod_term_func") == 0) && (strcmp(sectInfo->fSegmentName, "__DATA") == 0) ) {
9683 sect->set_flags(S_MOD_TERM_FUNC_POINTERS);
9684 }
9685 else if ( (strcmp(sectInfo->fSectionName, "__textcoal_nt") == 0) && (strcmp(sectInfo->fSegmentName, "__TEXT") == 0) ) {
9686 sect->set_flags(S_COALESCED);
9687 }
9688 else if ( (strcmp(sectInfo->fSectionName, "__const_coal") == 0) && (strcmp(sectInfo->fSegmentName, "__DATA") == 0) ) {
9689 sect->set_flags(S_COALESCED);
9690 }
9691 else if ( (strcmp(sectInfo->fSectionName, "__interpose") == 0) && (strcmp(sectInfo->fSegmentName, "__DATA") == 0) ) {
9692 sect->set_flags(S_INTERPOSING);
9693 }
9694 else if ( (strcmp(sectInfo->fSectionName, "__literal4") == 0) && (strcmp(sectInfo->fSegmentName, "__TEXT") == 0) ) {
9695 sect->set_flags(S_4BYTE_LITERALS);
9696 }
9697 else if ( (strcmp(sectInfo->fSectionName, "__literal8") == 0) && (strcmp(sectInfo->fSegmentName, "__TEXT") == 0) ) {
9698 sect->set_flags(S_8BYTE_LITERALS);
9699 }
9700 else if ( (strcmp(sectInfo->fSectionName, "__literal16") == 0) && (strcmp(sectInfo->fSegmentName, "__TEXT") == 0) ) {
9701 sect->set_flags(S_16BYTE_LITERALS);
9702 }
9703 else if ( (strcmp(sectInfo->fSectionName, "__message_refs") == 0) && (strcmp(sectInfo->fSegmentName, "__OBJC") == 0) ) {
9704 sect->set_flags(S_LITERAL_POINTERS);
9705 }
9706 else if ( (strcmp(sectInfo->fSectionName, "__objc_selrefs") == 0) && (strcmp(sectInfo->fSegmentName, "__DATA") == 0) ) {
9707 sect->set_flags(S_LITERAL_POINTERS);
9708 }
9709 else if ( (strcmp(sectInfo->fSectionName, "__cls_refs") == 0) && (strcmp(sectInfo->fSegmentName, "__OBJC") == 0) ) {
9710 sect->set_flags(S_LITERAL_POINTERS);
9711 }
9712 else if ( (strncmp(sectInfo->fSectionName, "__dof_", 6) == 0) && (strcmp(sectInfo->fSegmentName, "__TEXT") == 0) ) {
9713 sect->set_flags(S_DTRACE_DOF);
9714 }
9715 else if ( (strncmp(sectInfo->fSectionName, "__dof_", 6) == 0) && (strcmp(sectInfo->fSegmentName, "__DATA") == 0) ) {
9716 sect->set_flags(S_DTRACE_DOF);
9717 }
9718 else if ( (strncmp(sectInfo->fSectionName, "__text", 6) == 0) && (strcmp(sectInfo->fSegmentName, "__TEXT") == 0) ) {
9719 sect->set_flags(S_REGULAR | S_ATTR_SOME_INSTRUCTIONS | S_ATTR_PURE_INSTRUCTIONS);
9720 if ( sectInfo->fHasTextLocalRelocs )
9721 sect->set_flags(sect->flags() | S_ATTR_LOC_RELOC);
9722 if ( sectInfo->fHasTextExternalRelocs )
9723 sect->set_flags(sect->flags() | S_ATTR_EXT_RELOC);
9724 }
9725 //fprintf(stderr, "section %s flags=0x%08X\n", sectInfo->fSectionName, sect->flags());
9726 }
9727 }
9728 p = &p[sizeof(macho_segment_command<P>) + sectionsEmitted*sizeof(macho_section<P>)];
9729 cmd->set_cmdsize(sizeof(macho_segment_command<P>) + sectionsEmitted*sizeof(macho_section<P>));
9730 cmd->set_nsects(sectionsEmitted);
9731 }
9732 }
9733
9734
9735 template <typename A>
9736 SymbolTableLoadCommandsAtom<A>::SymbolTableLoadCommandsAtom(Writer<A>& writer)
9737 : LoadCommandAtom<A>(writer), fNeedsDynamicSymbolTable(false)
9738 {
9739 bzero(&fSymbolTable, sizeof(macho_symtab_command<P>));
9740 bzero(&fDynamicSymbolTable, sizeof(macho_dysymtab_command<P>));
9741 switch ( fWriter.fOptions.outputKind() ) {
9742 case Options::kDynamicExecutable:
9743 case Options::kDynamicLibrary:
9744 case Options::kDynamicBundle:
9745 case Options::kDyld:
9746 case Options::kKextBundle:
9747 fNeedsDynamicSymbolTable = true;
9748 break;
9749 case Options::kObjectFile:
9750 case Options::kStaticExecutable:
9751 fNeedsDynamicSymbolTable = false;
9752 case Options::kPreload:
9753 fNeedsDynamicSymbolTable = fWriter.fOptions.positionIndependentExecutable();
9754 break;
9755 }
9756 writer.fSymbolTableCommands = this;
9757 }
9758
9759
9760
9761 template <typename A>
9762 void SymbolTableLoadCommandsAtom<A>::needDynamicTable()
9763 {
9764 fNeedsDynamicSymbolTable = true;
9765 }
9766
9767
9768 template <typename A>
9769 uint64_t SymbolTableLoadCommandsAtom<A>::getSize() const
9770 {
9771 if ( fNeedsDynamicSymbolTable )
9772 return this->alignedSize(sizeof(macho_symtab_command<P>) + sizeof(macho_dysymtab_command<P>));
9773 else
9774 return this->alignedSize(sizeof(macho_symtab_command<P>));
9775 }
9776
9777 template <typename A>
9778 void SymbolTableLoadCommandsAtom<A>::copyRawContent(uint8_t buffer[]) const
9779 {
9780 // build LC_SYMTAB command
9781 macho_symtab_command<P>* symbolTableCmd = (macho_symtab_command<P>*)buffer;
9782 bzero(symbolTableCmd, sizeof(macho_symtab_command<P>));
9783 symbolTableCmd->set_cmd(LC_SYMTAB);
9784 symbolTableCmd->set_cmdsize(sizeof(macho_symtab_command<P>));
9785 symbolTableCmd->set_nsyms(fWriter.fSymbolTableCount);
9786 symbolTableCmd->set_symoff(fWriter.fSymbolTableCount == 0 ? 0 : fWriter.fSymbolTableAtom->getFileOffset());
9787 symbolTableCmd->set_stroff(fWriter.fStringsAtom->getSize() == 0 ? 0 : fWriter.fStringsAtom->getFileOffset());
9788 symbolTableCmd->set_strsize(fWriter.fStringsAtom->getSize());
9789
9790 // build LC_DYSYMTAB command
9791 if ( fNeedsDynamicSymbolTable ) {
9792 macho_dysymtab_command<P>* dynamicSymbolTableCmd = (macho_dysymtab_command<P>*)&buffer[sizeof(macho_symtab_command<P>)];
9793 bzero(dynamicSymbolTableCmd, sizeof(macho_dysymtab_command<P>));
9794 dynamicSymbolTableCmd->set_cmd(LC_DYSYMTAB);
9795 dynamicSymbolTableCmd->set_cmdsize(sizeof(macho_dysymtab_command<P>));
9796 dynamicSymbolTableCmd->set_ilocalsym(fWriter.fSymbolTableStabsStartIndex);
9797 dynamicSymbolTableCmd->set_nlocalsym(fWriter.fSymbolTableStabsCount + fWriter.fSymbolTableLocalCount);
9798 dynamicSymbolTableCmd->set_iextdefsym(fWriter.fSymbolTableExportStartIndex);
9799 dynamicSymbolTableCmd->set_nextdefsym(fWriter.fSymbolTableExportCount);
9800 dynamicSymbolTableCmd->set_iundefsym(fWriter.fSymbolTableImportStartIndex);
9801 dynamicSymbolTableCmd->set_nundefsym(fWriter.fSymbolTableImportCount);
9802 if ( fWriter.fModuleInfoAtom != NULL ) {
9803 dynamicSymbolTableCmd->set_tocoff(fWriter.fModuleInfoAtom->getTableOfContentsFileOffset());
9804 dynamicSymbolTableCmd->set_ntoc(fWriter.fSymbolTableExportCount);
9805 dynamicSymbolTableCmd->set_modtaboff(fWriter.fModuleInfoAtom->getModuleTableFileOffset());
9806 dynamicSymbolTableCmd->set_nmodtab(1);
9807 dynamicSymbolTableCmd->set_extrefsymoff(fWriter.fModuleInfoAtom->getReferencesFileOffset());
9808 dynamicSymbolTableCmd->set_nextrefsyms(fWriter.fModuleInfoAtom->getReferencesCount());
9809 }
9810 dynamicSymbolTableCmd->set_indirectsymoff((fWriter.fIndirectTableAtom == NULL) ? 0 : fWriter.fIndirectTableAtom->getFileOffset());
9811 dynamicSymbolTableCmd->set_nindirectsyms((fWriter.fIndirectTableAtom == NULL) ? 0 : fWriter.fIndirectTableAtom->fTable.size());
9812 if ( fWriter.fOptions.outputKind() != Options::kObjectFile ) {
9813 if ( fWriter.fExternalRelocationsAtom != 0 ) {
9814 dynamicSymbolTableCmd->set_extreloff((fWriter.fExternalRelocs.size()==0) ? 0 : fWriter.fExternalRelocationsAtom->getFileOffset());
9815 dynamicSymbolTableCmd->set_nextrel(fWriter.fExternalRelocs.size());
9816 }
9817 if ( fWriter.fLocalRelocationsAtom != 0 ) {
9818 dynamicSymbolTableCmd->set_locreloff((fWriter.fInternalRelocs.size()==0) ? 0 : fWriter.fLocalRelocationsAtom->getFileOffset());
9819 dynamicSymbolTableCmd->set_nlocrel(fWriter.fInternalRelocs.size());
9820 }
9821 }
9822 }
9823 }
9824
9825
9826 template <typename A>
9827 unsigned int SymbolTableLoadCommandsAtom<A>::commandCount()
9828 {
9829 return fNeedsDynamicSymbolTable ? 2 : 1;
9830 }
9831
9832 template <typename A>
9833 uint64_t DyldLoadCommandsAtom<A>::getSize() const
9834 {
9835 return this->alignedSize(sizeof(macho_dylinker_command<P>) + strlen("/usr/lib/dyld") + 1);
9836 }
9837
9838 template <typename A>
9839 void DyldLoadCommandsAtom<A>::copyRawContent(uint8_t buffer[]) const
9840 {
9841 uint64_t size = this->getSize();
9842 bzero(buffer, size);
9843 macho_dylinker_command<P>* cmd = (macho_dylinker_command<P>*)buffer;
9844 if ( fWriter.fOptions.outputKind() == Options::kDyld )
9845 cmd->set_cmd(LC_ID_DYLINKER);
9846 else
9847 cmd->set_cmd(LC_LOAD_DYLINKER);
9848 cmd->set_cmdsize(this->getSize());
9849 cmd->set_name_offset();
9850 strcpy((char*)&buffer[sizeof(macho_dylinker_command<P>)], "/usr/lib/dyld");
9851 }
9852
9853 template <typename A>
9854 uint64_t AllowableClientLoadCommandsAtom<A>::getSize() const
9855 {
9856 return this->alignedSize(sizeof(macho_sub_client_command<P>) + strlen(this->clientString) + 1);
9857 }
9858
9859 template <typename A>
9860 void AllowableClientLoadCommandsAtom<A>::copyRawContent(uint8_t buffer[]) const
9861 {
9862 uint64_t size = this->getSize();
9863
9864 bzero(buffer, size);
9865 macho_sub_client_command<P>* cmd = (macho_sub_client_command<P>*)buffer;
9866 cmd->set_cmd(LC_SUB_CLIENT);
9867 cmd->set_cmdsize(size);
9868 cmd->set_client_offset();
9869 strcpy((char*)&buffer[sizeof(macho_sub_client_command<P>)], this->clientString);
9870
9871 }
9872
9873 template <typename A>
9874 uint64_t DylibLoadCommandsAtom<A>::getSize() const
9875 {
9876 if ( fOptimizedAway ) {
9877 return 0;
9878 }
9879 else {
9880 const char* path = fInfo.reader->getInstallPath();
9881 return this->alignedSize(sizeof(macho_dylib_command<P>) + strlen(path) + 1);
9882 }
9883 }
9884
9885 template <typename A>
9886 void DylibLoadCommandsAtom<A>::copyRawContent(uint8_t buffer[]) const
9887 {
9888 if ( fOptimizedAway )
9889 return;
9890 uint64_t size = this->getSize();
9891 bzero(buffer, size);
9892 const char* path = fInfo.reader->getInstallPath();
9893 macho_dylib_command<P>* cmd = (macho_dylib_command<P>*)buffer;
9894 // <rdar://problem/5529626> If only weak_import symbols are used, linker should use LD_LOAD_WEAK_DYLIB
9895 bool autoWeakLoadDylib = ( (fWriter.fDylibReadersWithWeakImports.count(fInfo.reader) > 0)
9896 && (fWriter.fDylibReadersWithNonWeakImports.count(fInfo.reader) == 0) );
9897 if ( fInfo.options.fLazyLoad )
9898 cmd->set_cmd(LC_LAZY_LOAD_DYLIB);
9899 else if ( fInfo.options.fWeakImport || autoWeakLoadDylib )
9900 cmd->set_cmd(LC_LOAD_WEAK_DYLIB);
9901 else if ( fInfo.options.fReExport && fWriter.fOptions.useSimplifiedDylibReExports() )
9902 cmd->set_cmd(LC_REEXPORT_DYLIB);
9903 else
9904 cmd->set_cmd(LC_LOAD_DYLIB);
9905 cmd->set_cmdsize(this->getSize());
9906 cmd->set_timestamp(2); // needs to be some constant value that is different than DylibIDLoadCommandsAtom uses
9907 cmd->set_current_version(fInfo.reader->getCurrentVersion());
9908 cmd->set_compatibility_version(fInfo.reader->getCompatibilityVersion());
9909 cmd->set_name_offset();
9910 strcpy((char*)&buffer[sizeof(macho_dylib_command<P>)], path);
9911 }
9912
9913
9914
9915 template <typename A>
9916 uint64_t DylibIDLoadCommandsAtom<A>::getSize() const
9917 {
9918 return this->alignedSize(sizeof(macho_dylib_command<P>) + strlen(fWriter.fOptions.installPath()) + 1);
9919 }
9920
9921 template <typename A>
9922 void DylibIDLoadCommandsAtom<A>::copyRawContent(uint8_t buffer[]) const
9923 {
9924 uint64_t size = this->getSize();
9925 bzero(buffer, size);
9926 macho_dylib_command<P>* cmd = (macho_dylib_command<P>*)buffer;
9927 cmd->set_cmd(LC_ID_DYLIB);
9928 cmd->set_cmdsize(this->getSize());
9929 cmd->set_name_offset();
9930 cmd->set_timestamp(1); // needs to be some constant value that is different than DylibLoadCommandsAtom uses
9931 cmd->set_current_version(fWriter.fOptions.currentVersion());
9932 cmd->set_compatibility_version(fWriter.fOptions.compatibilityVersion());
9933 strcpy((char*)&buffer[sizeof(macho_dylib_command<P>)], fWriter.fOptions.installPath());
9934 }
9935
9936
9937 template <typename A>
9938 void RoutinesLoadCommandsAtom<A>::copyRawContent(uint8_t buffer[]) const
9939 {
9940 uint64_t initAddr = fWriter.getAtomLoadAddress(fWriter.fEntryPoint);
9941 if (fWriter.fEntryPoint->isThumb())
9942 initAddr |= 1ULL;
9943 bzero(buffer, sizeof(macho_routines_command<P>));
9944 macho_routines_command<P>* cmd = (macho_routines_command<P>*)buffer;
9945 cmd->set_cmd(macho_routines_command<P>::CMD);
9946 cmd->set_cmdsize(this->getSize());
9947 cmd->set_init_address(initAddr);
9948 }
9949
9950
9951 template <typename A>
9952 uint64_t SubUmbrellaLoadCommandsAtom<A>::getSize() const
9953 {
9954 return this->alignedSize(sizeof(macho_sub_umbrella_command<P>) + strlen(fName) + 1);
9955 }
9956
9957 template <typename A>
9958 void SubUmbrellaLoadCommandsAtom<A>::copyRawContent(uint8_t buffer[]) const
9959 {
9960 uint64_t size = this->getSize();
9961 bzero(buffer, size);
9962 macho_sub_umbrella_command<P>* cmd = (macho_sub_umbrella_command<P>*)buffer;
9963 cmd->set_cmd(LC_SUB_UMBRELLA);
9964 cmd->set_cmdsize(this->getSize());
9965 cmd->set_sub_umbrella_offset();
9966 strcpy((char*)&buffer[sizeof(macho_sub_umbrella_command<P>)], fName);
9967 }
9968
9969 template <typename A>
9970 void UUIDLoadCommandAtom<A>::generate()
9971 {
9972 switch ( fWriter.fOptions.getUUIDMode() ) {
9973 case Options::kUUIDNone:
9974 fEmit = false;
9975 break;
9976 case Options::kUUIDRandom:
9977 ::uuid_generate_random(fUUID);
9978 fEmit = true;
9979 break;
9980 case Options::kUUIDContent:
9981 bzero(fUUID, 16);
9982 fEmit = true;
9983 break;
9984 }
9985 }
9986
9987 template <typename A>
9988 void UUIDLoadCommandAtom<A>::setContent(const uint8_t uuid[16])
9989 {
9990 memcpy(fUUID, uuid, 16);
9991 }
9992
9993 template <typename A>
9994 void UUIDLoadCommandAtom<A>::copyRawContent(uint8_t buffer[]) const
9995 {
9996 if (fEmit) {
9997 uint64_t size = this->getSize();
9998 bzero(buffer, size);
9999 macho_uuid_command<P>* cmd = (macho_uuid_command<P>*)buffer;
10000 cmd->set_cmd(LC_UUID);
10001 cmd->set_cmdsize(this->getSize());
10002 cmd->set_uuid((uint8_t*)fUUID);
10003 }
10004 }
10005
10006
10007 template <typename A>
10008 uint64_t SubLibraryLoadCommandsAtom<A>::getSize() const
10009 {
10010 return this->alignedSize(sizeof(macho_sub_library_command<P>) + fNameLength + 1);
10011 }
10012
10013 template <typename A>
10014 void SubLibraryLoadCommandsAtom<A>::copyRawContent(uint8_t buffer[]) const
10015 {
10016 uint64_t size = this->getSize();
10017 bzero(buffer, size);
10018 macho_sub_library_command<P>* cmd = (macho_sub_library_command<P>*)buffer;
10019 cmd->set_cmd(LC_SUB_LIBRARY);
10020 cmd->set_cmdsize(this->getSize());
10021 cmd->set_sub_library_offset();
10022 strncpy((char*)&buffer[sizeof(macho_sub_library_command<P>)], fNameStart, fNameLength);
10023 buffer[sizeof(macho_sub_library_command<P>)+fNameLength] = '\0';
10024 }
10025
10026 template <typename A>
10027 uint64_t UmbrellaLoadCommandsAtom<A>::getSize() const
10028 {
10029 return this->alignedSize(sizeof(macho_sub_framework_command<P>) + strlen(fName) + 1);
10030 }
10031
10032 template <typename A>
10033 void UmbrellaLoadCommandsAtom<A>::copyRawContent(uint8_t buffer[]) const
10034 {
10035 uint64_t size = this->getSize();
10036 bzero(buffer, size);
10037 macho_sub_framework_command<P>* cmd = (macho_sub_framework_command<P>*)buffer;
10038 cmd->set_cmd(LC_SUB_FRAMEWORK);
10039 cmd->set_cmdsize(this->getSize());
10040 cmd->set_umbrella_offset();
10041 strcpy((char*)&buffer[sizeof(macho_sub_framework_command<P>)], fName);
10042 }
10043
10044 template <>
10045 uint64_t ThreadsLoadCommandsAtom<ppc>::getSize() const
10046 {
10047 return this->alignedSize(16 + 40*4); // base size + PPC_THREAD_STATE_COUNT * 4
10048 }
10049
10050 template <>
10051 uint64_t ThreadsLoadCommandsAtom<ppc64>::getSize() const
10052 {
10053 return this->alignedSize(16 + 76*4); // base size + PPC_THREAD_STATE64_COUNT * 4
10054 }
10055
10056 template <>
10057 uint64_t ThreadsLoadCommandsAtom<x86>::getSize() const
10058 {
10059 return this->alignedSize(16 + 16*4); // base size + i386_THREAD_STATE_COUNT * 4
10060 }
10061
10062 template <>
10063 uint64_t ThreadsLoadCommandsAtom<x86_64>::getSize() const
10064 {
10065 return this->alignedSize(16 + x86_THREAD_STATE64_COUNT * 4);
10066 }
10067
10068 // We should be picking it up from a header
10069 template <>
10070 uint64_t ThreadsLoadCommandsAtom<arm>::getSize() const
10071 {
10072 return this->alignedSize(16 + 17 * 4); // base size + ARM_THREAD_STATE_COUNT * 4
10073 }
10074
10075 template <>
10076 void ThreadsLoadCommandsAtom<ppc>::copyRawContent(uint8_t buffer[]) const
10077 {
10078 uint64_t size = this->getSize();
10079 uint64_t start = fWriter.getAtomLoadAddress(fWriter.fEntryPoint);
10080 bzero(buffer, size);
10081 macho_thread_command<ppc::P>* cmd = (macho_thread_command<ppc::P>*)buffer;
10082 cmd->set_cmd(LC_UNIXTHREAD);
10083 cmd->set_cmdsize(size);
10084 cmd->set_flavor(1); // PPC_THREAD_STATE
10085 cmd->set_count(40); // PPC_THREAD_STATE_COUNT;
10086 cmd->set_thread_register(0, start);
10087 if ( fWriter.fOptions.hasCustomStack() )
10088 cmd->set_thread_register(3, fWriter.fOptions.customStackAddr()); // r1
10089 }
10090
10091
10092 template <>
10093 void ThreadsLoadCommandsAtom<ppc64>::copyRawContent(uint8_t buffer[]) const
10094 {
10095 uint64_t size = this->getSize();
10096 uint64_t start = fWriter.getAtomLoadAddress(fWriter.fEntryPoint);
10097 bzero(buffer, size);
10098 macho_thread_command<ppc64::P>* cmd = (macho_thread_command<ppc64::P>*)buffer;
10099 cmd->set_cmd(LC_UNIXTHREAD);
10100 cmd->set_cmdsize(size);
10101 cmd->set_flavor(5); // PPC_THREAD_STATE64
10102 cmd->set_count(76); // PPC_THREAD_STATE64_COUNT;
10103 cmd->set_thread_register(0, start);
10104 if ( fWriter.fOptions.hasCustomStack() )
10105 cmd->set_thread_register(3, fWriter.fOptions.customStackAddr()); // r1
10106 }
10107
10108 template <>
10109 void ThreadsLoadCommandsAtom<x86>::copyRawContent(uint8_t buffer[]) const
10110 {
10111 uint64_t size = this->getSize();
10112 uint64_t start = fWriter.getAtomLoadAddress(fWriter.fEntryPoint);
10113 bzero(buffer, size);
10114 macho_thread_command<x86::P>* cmd = (macho_thread_command<x86::P>*)buffer;
10115 cmd->set_cmd(LC_UNIXTHREAD);
10116 cmd->set_cmdsize(size);
10117 cmd->set_flavor(1); // i386_THREAD_STATE
10118 cmd->set_count(16); // i386_THREAD_STATE_COUNT;
10119 cmd->set_thread_register(10, start);
10120 if ( fWriter.fOptions.hasCustomStack() )
10121 cmd->set_thread_register(7, fWriter.fOptions.customStackAddr()); // esp
10122 }
10123
10124 template <>
10125 void ThreadsLoadCommandsAtom<x86_64>::copyRawContent(uint8_t buffer[]) const
10126 {
10127 uint64_t size = this->getSize();
10128 uint64_t start = fWriter.getAtomLoadAddress(fWriter.fEntryPoint);
10129 bzero(buffer, size);
10130 macho_thread_command<x86_64::P>* cmd = (macho_thread_command<x86_64::P>*)buffer;
10131 cmd->set_cmd(LC_UNIXTHREAD);
10132 cmd->set_cmdsize(size);
10133 cmd->set_flavor(x86_THREAD_STATE64);
10134 cmd->set_count(x86_THREAD_STATE64_COUNT);
10135 cmd->set_thread_register(16, start); // rip
10136 if ( fWriter.fOptions.hasCustomStack() )
10137 cmd->set_thread_register(7, fWriter.fOptions.customStackAddr()); // uesp
10138 }
10139
10140 template <>
10141 void ThreadsLoadCommandsAtom<arm>::copyRawContent(uint8_t buffer[]) const
10142 {
10143 uint64_t size = this->getSize();
10144 uint64_t start = fWriter.getAtomLoadAddress(fWriter.fEntryPoint);
10145 if ( fWriter.fEntryPoint->isThumb() )
10146 start |= 1ULL;
10147 bzero(buffer, size);
10148 macho_thread_command<arm::P>* cmd = (macho_thread_command<arm::P>*)buffer;
10149 cmd->set_cmd(LC_UNIXTHREAD);
10150 cmd->set_cmdsize(size);
10151 cmd->set_flavor(1);
10152 cmd->set_count(17);
10153 cmd->set_thread_register(15, start); // pc
10154 if ( fWriter.fOptions.hasCustomStack() )
10155 cmd->set_thread_register(13, fWriter.fOptions.customStackAddr()); // FIXME: sp?
10156 }
10157
10158 template <typename A>
10159 uint64_t RPathLoadCommandsAtom<A>::getSize() const
10160 {
10161 return this->alignedSize(sizeof(macho_rpath_command<P>) + strlen(fPath) + 1);
10162 }
10163
10164 template <typename A>
10165 void RPathLoadCommandsAtom<A>::copyRawContent(uint8_t buffer[]) const
10166 {
10167 uint64_t size = this->getSize();
10168 bzero(buffer, size);
10169 macho_rpath_command<P>* cmd = (macho_rpath_command<P>*)buffer;
10170 cmd->set_cmd(LC_RPATH);
10171 cmd->set_cmdsize(this->getSize());
10172 cmd->set_path_offset();
10173 strcpy((char*)&buffer[sizeof(macho_rpath_command<P>)], fPath);
10174 }
10175
10176
10177
10178 template <typename A>
10179 void EncryptionLoadCommandsAtom<A>::copyRawContent(uint8_t buffer[]) const
10180 {
10181 uint64_t size = this->getSize();
10182 bzero(buffer, size);
10183 macho_encryption_info_command<P>* cmd = (macho_encryption_info_command<P>*)buffer;
10184 cmd->set_cmd(LC_ENCRYPTION_INFO);
10185 cmd->set_cmdsize(this->getSize());
10186 cmd->set_cryptoff(fStartOffset);
10187 cmd->set_cryptsize(fEndOffset-fStartOffset);
10188 cmd->set_cryptid(0);
10189 }
10190
10191
10192
10193 template <typename A>
10194 void LoadCommandsPaddingAtom<A>::copyRawContent(uint8_t buffer[]) const
10195 {
10196 bzero(buffer, fSize);
10197 }
10198
10199 template <typename A>
10200 void LoadCommandsPaddingAtom<A>::setSize(uint64_t newSize)
10201 {
10202 fSize = newSize;
10203 // this resizing by-passes the way fLargestAtomSize is set, so re-check here
10204 if ( fWriter.fLargestAtomSize < newSize )
10205 fWriter.fLargestAtomSize = newSize;
10206 }
10207
10208 template <typename A>
10209 void UnwindInfoAtom<A>::addUnwindInfo(ObjectFile::Atom* func, uint32_t offset, uint32_t encoding,
10210 ObjectFile::Reference* fdeRef, ObjectFile::Reference* lsdaRef,
10211 ObjectFile::Atom* personalityPointer)
10212 {
10213 Info info;
10214 info.func = func;
10215 if ( fdeRef != NULL )
10216 info.fde = &fdeRef->getTarget();
10217 else
10218 info.fde = NULL;
10219 if ( lsdaRef != NULL ) {
10220 info.lsda = &lsdaRef->getTarget();
10221 info.lsdaOffset = lsdaRef->getTargetOffset();
10222 }
10223 else {
10224 info.lsda = NULL;
10225 info.lsdaOffset = 0;
10226 }
10227 info.personalityPointer = personalityPointer;
10228 info.encoding = encoding;
10229 fInfos.push_back(info);
10230 //fprintf(stderr, "addUnwindInfo() encoding=0x%08X, lsda=%p, lsdaOffset=%d, person=%p, func=%s\n",
10231 // encoding, info.lsda, info.lsdaOffset, personalityPointer, func->getDisplayName());
10232 }
10233
10234 template <>
10235 bool UnwindInfoAtom<x86>::encodingMeansUseDwarf(compact_unwind_encoding_t encoding)
10236 {
10237 return ( (encoding & UNWIND_X86_MODE_MASK) == UNWIND_X86_MODE_DWARF);
10238 }
10239
10240 template <>
10241 bool UnwindInfoAtom<x86_64>::encodingMeansUseDwarf(compact_unwind_encoding_t encoding)
10242 {
10243 return ( (encoding & UNWIND_X86_64_MODE_MASK) == UNWIND_X86_64_MODE_DWARF);
10244 }
10245
10246 template <typename A>
10247 bool UnwindInfoAtom<A>::encodingMeansUseDwarf(compact_unwind_encoding_t encoding)
10248 {
10249 return false;
10250 }
10251
10252
10253 template <typename A>
10254 void UnwindInfoAtom<A>::compressDuplicates(std::vector<Info>& uniqueInfos)
10255 {
10256 // build new list removing entries where next function has same encoding
10257 uniqueInfos.reserve(fInfos.size());
10258 Info last;
10259 last.func = NULL;
10260 last.lsda = NULL;
10261 last.lsdaOffset = 0;
10262 last.personalityPointer = NULL;
10263 last.encoding = 0xFFFFFFFF;
10264 for(typename std::vector<Info>::iterator it=fInfos.begin(); it != fInfos.end(); ++it) {
10265 Info& newInfo = *it;
10266 bool newNeedsDwarf = encodingMeansUseDwarf(newInfo.encoding);
10267 // remove infos which have same encoding and personalityPointer as last one
10268 if ( newNeedsDwarf || (newInfo.encoding != last.encoding) || (newInfo.personalityPointer != last.personalityPointer)
10269 || (newInfo.lsda != NULL) || (last.lsda != NULL) ) {
10270 uniqueInfos.push_back(newInfo);
10271 }
10272 last = newInfo;
10273 }
10274 //fprintf(stderr, "compressDuplicates() fInfos.size()=%lu, uniqueInfos.size()=%lu\n", fInfos.size(), uniqueInfos.size());
10275 }
10276
10277 template <typename A>
10278 void UnwindInfoAtom<A>::findCommonEncoding(const std::vector<Info>& uniqueInfos, std::map<uint32_t, unsigned int>& commonEncodings)
10279 {
10280 // scan infos to get frequency counts for each encoding
10281 std::map<uint32_t, unsigned int> encodingsUsed;
10282 unsigned int mostCommonEncodingUsageCount = 0;
10283 for(typename std::vector<Info>::const_iterator it=uniqueInfos.begin(); it != uniqueInfos.end(); ++it) {
10284 // never put dwarf into common table
10285 if ( encodingMeansUseDwarf(it->encoding) )
10286 continue;
10287 std::map<uint32_t, unsigned int>::iterator pos = encodingsUsed.find(it->encoding);
10288 if ( pos == encodingsUsed.end() ) {
10289 encodingsUsed[it->encoding] = 1;
10290 }
10291 else {
10292 encodingsUsed[it->encoding] += 1;
10293 if ( mostCommonEncodingUsageCount < encodingsUsed[it->encoding] )
10294 mostCommonEncodingUsageCount = encodingsUsed[it->encoding];
10295 }
10296 }
10297 // put the most common encodings into the common table, but at most 127 of them
10298 for(unsigned int usages=mostCommonEncodingUsageCount; usages > 1; --usages) {
10299 for (std::map<uint32_t, unsigned int>::iterator euit=encodingsUsed.begin(); euit != encodingsUsed.end(); ++euit) {
10300 if ( euit->second == usages ) {
10301 unsigned int size = commonEncodings.size();
10302 if ( size < 127 ) {
10303 commonEncodings[euit->first] = size;
10304 }
10305 }
10306 }
10307 }
10308 }
10309
10310 template <typename A>
10311 void UnwindInfoAtom<A>::makeLsdaIndex(const std::vector<Info>& uniqueInfos, std::map<ObjectFile::Atom*, uint32_t>& lsdaIndexOffsetMap)
10312 {
10313 for(typename std::vector<Info>::const_iterator it=uniqueInfos.begin(); it != uniqueInfos.end(); ++it) {
10314 lsdaIndexOffsetMap[it->func] = fLSDAIndex.size() * sizeof(macho_unwind_info_section_header_lsda_index_entry<P>);
10315 if ( it->lsda != NULL ) {
10316 LSDAEntry entry;
10317 entry.func = it->func;
10318 entry.lsda = it->lsda;
10319 entry.lsdaOffset = it->lsdaOffset;
10320 fLSDAIndex.push_back(entry);
10321 }
10322 }
10323 }
10324
10325 template <typename A>
10326 void UnwindInfoAtom<A>::makePersonalityIndex(std::vector<Info>& uniqueInfos)
10327 {
10328 for(typename std::vector<Info>::iterator it=uniqueInfos.begin(); it != uniqueInfos.end(); ++it) {
10329 if ( it->personalityPointer != NULL ) {
10330 std::map<ObjectFile::Atom*, uint32_t>::iterator pos = fPersonalityIndexMap.find(it->personalityPointer);
10331 if ( pos == fPersonalityIndexMap.end() ) {
10332 const uint32_t nextIndex = fPersonalityIndexMap.size() + 1;
10333 fPersonalityIndexMap[it->personalityPointer] = nextIndex;
10334 }
10335 uint32_t personalityIndex = fPersonalityIndexMap[it->personalityPointer];
10336 it->encoding |= (personalityIndex << (__builtin_ctz(UNWIND_PERSONALITY_MASK)) );
10337 }
10338 }
10339 }
10340
10341 template <typename A>
10342 unsigned int UnwindInfoAtom<A>::makeRegularSecondLevelPage(const std::vector<Info>& uniqueInfos, uint32_t pageSize,
10343 unsigned int endIndex, uint8_t*& pageEnd)
10344 {
10345 const unsigned int maxEntriesPerPage = (pageSize - sizeof(unwind_info_regular_second_level_page_header))/sizeof(unwind_info_regular_second_level_entry);
10346 const unsigned int entriesToAdd = ((endIndex > maxEntriesPerPage) ? maxEntriesPerPage : endIndex);
10347 uint8_t* pageStart = pageEnd
10348 - entriesToAdd*sizeof(unwind_info_regular_second_level_entry)
10349 - sizeof(unwind_info_regular_second_level_page_header);
10350 macho_unwind_info_regular_second_level_page_header<P>* page = (macho_unwind_info_regular_second_level_page_header<P>*)pageStart;
10351 page->set_kind(UNWIND_SECOND_LEVEL_REGULAR);
10352 page->set_entryPageOffset(sizeof(macho_unwind_info_regular_second_level_page_header<P>));
10353 page->set_entryCount(entriesToAdd);
10354 macho_unwind_info_regular_second_level_entry<P>* entryTable = (macho_unwind_info_regular_second_level_entry<P>*)(pageStart + page->entryPageOffset());
10355 for (unsigned int i=0; i < entriesToAdd; ++i) {
10356 const Info& info = uniqueInfos[endIndex-entriesToAdd+i];
10357 entryTable[i].set_functionOffset(0);
10358 entryTable[i].set_encoding(info.encoding);
10359 RegFixUp fixup;
10360 fixup.contentPointer = (uint8_t*)(&entryTable[i]);
10361 fixup.func = info.func;
10362 fixup.fde = ( encodingMeansUseDwarf(info.encoding) ? info.fde : NULL );
10363 fRegFixUps.push_back(fixup);
10364 }
10365 //fprintf(stderr, "regular page with %u entries\n", entriesToAdd);
10366 pageEnd = pageStart;
10367 return endIndex - entriesToAdd;
10368 }
10369
10370
10371 template <typename A>
10372 unsigned int UnwindInfoAtom<A>::makeCompressedSecondLevelPage(const std::vector<Info>& uniqueInfos,
10373 const std::map<uint32_t,unsigned int> commonEncodings,
10374 uint32_t pageSize, unsigned int endIndex, uint8_t*& pageEnd)
10375 {
10376 const bool log = false;
10377 if (log) fprintf(stderr, "makeCompressedSecondLevelPage(pageSize=%u, endIndex=%u)\n", pageSize, endIndex);
10378 // first pass calculates how many compressed entries we could fit in this sized page
10379 // keep adding entries to page until:
10380 // 1) encoding table plus entry table plus header exceed page size
10381 // 2) the file offset delta from the first to last function > 24 bits
10382 // 3) custom encoding index reachs 255
10383 // 4) run out of uniqueInfos to encode
10384 std::map<uint32_t, unsigned int> pageSpecificEncodings;
10385 uint32_t space4 = (pageSize - sizeof(unwind_info_compressed_second_level_page_header))/sizeof(uint32_t);
10386 std::vector<uint8_t> encodingIndexes;
10387 int index = endIndex-1;
10388 int entryCount = 0;
10389 uint64_t lastEntryAddress = uniqueInfos[index].func->getAddress();
10390 bool canDo = true;
10391 while ( canDo && (index >= 0) ) {
10392 const Info& info = uniqueInfos[index--];
10393 // compute encoding index
10394 unsigned int encodingIndex;
10395 std::map<uint32_t, unsigned int>::const_iterator pos = commonEncodings.find(info.encoding);
10396 if ( pos != commonEncodings.end() ) {
10397 encodingIndex = pos->second;
10398 }
10399 else {
10400 // no commmon entry, so add one on this page
10401 uint32_t encoding = info.encoding;
10402 if ( encodingMeansUseDwarf(encoding) ) {
10403 // make unique pseudo encoding so this dwarf will gets is own encoding entry slot
10404 encoding += (index+1);
10405 }
10406 std::map<uint32_t, unsigned int>::iterator ppos = pageSpecificEncodings.find(encoding);
10407 if ( ppos != pageSpecificEncodings.end() ) {
10408 encodingIndex = pos->second;
10409 }
10410 else {
10411 encodingIndex = commonEncodings.size() + pageSpecificEncodings.size();
10412 if ( encodingIndex <= 255 ) {
10413 pageSpecificEncodings[encoding] = encodingIndex;
10414 }
10415 else {
10416 canDo = false; // case 3)
10417 if (log) fprintf(stderr, "end of compressed page with %u entries, %lu custom encodings because too many custom encodings\n",
10418 entryCount, pageSpecificEncodings.size());
10419 }
10420 }
10421 }
10422 if ( canDo )
10423 encodingIndexes.push_back(encodingIndex);
10424 // compute function offset
10425 uint32_t funcOffsetWithInPage = lastEntryAddress - info.func->getAddress();
10426 if ( funcOffsetWithInPage > 0x00FFFF00 ) {
10427 // don't use 0x00FFFFFF because addresses may vary after atoms are laid out again
10428 canDo = false; // case 2)
10429 if (log) fprintf(stderr, "can't use compressed page with %u entries because function offset too big\n", entryCount);
10430 }
10431 else {
10432 ++entryCount;
10433 }
10434 // check room for entry
10435 if ( (pageSpecificEncodings.size()+entryCount) >= space4 ) {
10436 canDo = false; // case 1)
10437 --entryCount;
10438 if (log) fprintf(stderr, "end of compressed page with %u entries because full\n", entryCount);
10439 }
10440 //if (log) fprintf(stderr, "space4=%d, pageSpecificEncodings.size()=%ld, entryCount=%d\n", space4, pageSpecificEncodings.size(), entryCount);
10441 }
10442
10443 // check for cases where it would be better to use a regular (non-compressed) page
10444 const unsigned int compressPageUsed = sizeof(unwind_info_compressed_second_level_page_header)
10445 + pageSpecificEncodings.size()*sizeof(uint32_t)
10446 + entryCount*sizeof(uint32_t);
10447 if ( (compressPageUsed < (pageSize-4) && (index >= 0) ) ) {
10448 const int regularEntriesPerPage = (pageSize - sizeof(unwind_info_regular_second_level_page_header))/sizeof(unwind_info_regular_second_level_entry);
10449 if ( entryCount < regularEntriesPerPage ) {
10450 return makeRegularSecondLevelPage(uniqueInfos, pageSize, endIndex, pageEnd);
10451 }
10452 }
10453
10454 // check if we need any padding because adding another entry would take 8 bytes but only have room for 4
10455 uint32_t pad = 0;
10456 if ( compressPageUsed == (pageSize-4) )
10457 pad = 4;
10458
10459 // second pass fills in page
10460 uint8_t* pageStart = pageEnd - compressPageUsed - pad;
10461 macho_unwind_info_compressed_second_level_page_header<P>* page = (macho_unwind_info_compressed_second_level_page_header<P>*)pageStart;
10462 page->set_kind(UNWIND_SECOND_LEVEL_COMPRESSED);
10463 page->set_entryPageOffset(sizeof(macho_unwind_info_compressed_second_level_page_header<P>));
10464 page->set_entryCount(entryCount);
10465 page->set_encodingsPageOffset(page->entryPageOffset()+entryCount*sizeof(uint32_t));
10466 page->set_encodingsCount(pageSpecificEncodings.size());
10467 uint32_t* const encodingsArray = (uint32_t*)&pageStart[page->encodingsPageOffset()];
10468 // fill in entry table
10469 uint32_t* const entiresArray = (uint32_t*)&pageStart[page->entryPageOffset()];
10470 ObjectFile::Atom* firstFunc = uniqueInfos[endIndex-entryCount].func;
10471 for(unsigned int i=endIndex-entryCount; i < endIndex; ++i) {
10472 const Info& info = uniqueInfos[i];
10473 uint8_t encodingIndex;
10474 if ( encodingMeansUseDwarf(info.encoding) ) {
10475 // dwarf entries are always in page specific encodings
10476 encodingIndex = pageSpecificEncodings[info.encoding+i];
10477 }
10478 else {
10479 std::map<uint32_t, unsigned int>::const_iterator pos = commonEncodings.find(info.encoding);
10480 if ( pos != commonEncodings.end() )
10481 encodingIndex = pos->second;
10482 else
10483 encodingIndex = pageSpecificEncodings[info.encoding];
10484 }
10485 uint32_t entryIndex = i - endIndex + entryCount;
10486 A::P::E::set32(entiresArray[entryIndex], encodingIndex << 24);
10487 CompressedFixUp funcStartFixUp;
10488 funcStartFixUp.contentPointer = (uint8_t*)(&entiresArray[entryIndex]);
10489 funcStartFixUp.func = info.func;
10490 funcStartFixUp.fromFunc = firstFunc;
10491 fCompressedFixUps.push_back(funcStartFixUp);
10492 if ( encodingMeansUseDwarf(info.encoding) ) {
10493 CompressedEncodingFixUp dwarfStartFixup;
10494 dwarfStartFixup.contentPointer = (uint8_t*)(&encodingsArray[encodingIndex-commonEncodings.size()]);
10495 dwarfStartFixup.fde = info.fde;
10496 fCompressedEncodingFixUps.push_back(dwarfStartFixup);
10497 }
10498 }
10499 // fill in encodings table
10500 for(std::map<uint32_t, unsigned int>::const_iterator it = pageSpecificEncodings.begin(); it != pageSpecificEncodings.end(); ++it) {
10501 A::P::E::set32(encodingsArray[it->second-commonEncodings.size()], it->first);
10502 }
10503
10504 if (log) fprintf(stderr, "compressed page with %u entries, %lu custom encodings\n", entryCount, pageSpecificEncodings.size());
10505
10506 // update pageEnd;
10507 pageEnd = pageStart;
10508 return endIndex-entryCount; // endIndex for next page
10509 }
10510
10511 template <> void UnwindInfoAtom<ppc>::generate() { }
10512 template <> void UnwindInfoAtom<ppc64>::generate() { }
10513 template <> void UnwindInfoAtom<arm>::generate() { }
10514
10515
10516 template <typename A>
10517 void UnwindInfoAtom<A>::generate()
10518 {
10519 // only generate table if there are functions with unwind info
10520 if ( fInfos.size() > 0 ) {
10521 // find offset of end of __unwind_info section
10522 SectionInfo* unwindSectionInfo = (SectionInfo*)this->getSection();
10523
10524 // build new list that has proper offsetInImage and remove entries where next function has same encoding
10525 std::vector<Info> uniqueInfos;
10526 this->compressDuplicates(uniqueInfos);
10527
10528 // build personality index, update encodings with personality index
10529 this->makePersonalityIndex(uniqueInfos);
10530 if ( fPersonalityIndexMap.size() > 3 )
10531 throw "too many personality routines for compact unwind to encode";
10532
10533 // put the most common encodings into the common table, but at most 127 of them
10534 std::map<uint32_t, unsigned int> commonEncodings;
10535 this->findCommonEncoding(uniqueInfos, commonEncodings);
10536
10537 // build lsda index
10538 std::map<ObjectFile::Atom*, uint32_t> lsdaIndexOffsetMap;
10539 this->makeLsdaIndex(uniqueInfos, lsdaIndexOffsetMap);
10540
10541 // calculate worst case size for all unwind info pages when allocating buffer
10542 const unsigned int entriesPerRegularPage = (4096-sizeof(unwind_info_regular_second_level_page_header))/sizeof(unwind_info_regular_second_level_entry);
10543 const unsigned int pageCount = ((uniqueInfos.size() - 1)/entriesPerRegularPage) + 1;
10544 fPagesContentForDelete = (uint8_t*)calloc(pageCount,4096);
10545 fPagesSize = 0;
10546 if ( fPagesContentForDelete == NULL )
10547 throw "could not allocate space for compact unwind info";
10548 ObjectFile::Atom* secondLevelFirstFuncs[pageCount*3];
10549 uint8_t* secondLevelPagesStarts[pageCount*3];
10550
10551 // make last second level page smaller so that all other second level pages can be page aligned
10552 uint32_t maxLastPageSize = unwindSectionInfo->fFileOffset % 4096;
10553 uint32_t tailPad = 0;
10554 if ( maxLastPageSize < 128 ) {
10555 tailPad = maxLastPageSize;
10556 maxLastPageSize = 4096;
10557 }
10558
10559 // fill in pages in reverse order
10560 unsigned int endIndex = uniqueInfos.size();
10561 unsigned int secondLevelPageCount = 0;
10562 uint8_t* pageEnd = &fPagesContentForDelete[pageCount*4096];
10563 uint32_t pageSize = maxLastPageSize;
10564 while ( endIndex > 0 ) {
10565 endIndex = makeCompressedSecondLevelPage(uniqueInfos, commonEncodings, pageSize, endIndex, pageEnd);
10566 secondLevelPagesStarts[secondLevelPageCount] = pageEnd;
10567 secondLevelFirstFuncs[secondLevelPageCount] = uniqueInfos[endIndex].func;
10568 ++secondLevelPageCount;
10569 pageSize = 4096; // last page can be odd size, make rest up to 4096 bytes in size
10570 }
10571 fPagesContent = pageEnd;
10572 fPagesSize = &fPagesContentForDelete[pageCount*4096] - pageEnd;
10573
10574 // calculate section layout
10575 const uint32_t commonEncodingsArraySectionOffset = sizeof(macho_unwind_info_section_header<P>);
10576 const uint32_t commonEncodingsArrayCount = commonEncodings.size();
10577 const uint32_t commonEncodingsArraySize = commonEncodingsArrayCount * sizeof(compact_unwind_encoding_t);
10578 const uint32_t personalityArraySectionOffset = commonEncodingsArraySectionOffset + commonEncodingsArraySize;
10579 const uint32_t personalityArrayCount = fPersonalityIndexMap.size();
10580 const uint32_t personalityArraySize = personalityArrayCount * sizeof(uint32_t);
10581 const uint32_t indexSectionOffset = personalityArraySectionOffset + personalityArraySize;
10582 const uint32_t indexCount = secondLevelPageCount+1;
10583 const uint32_t indexSize = indexCount * sizeof(macho_unwind_info_section_header_index_entry<P>);
10584 const uint32_t lsdaIndexArraySectionOffset = indexSectionOffset + indexSize;
10585 const uint32_t lsdaIndexArrayCount = fLSDAIndex.size();
10586 const uint32_t lsdaIndexArraySize = lsdaIndexArrayCount * sizeof(macho_unwind_info_section_header_lsda_index_entry<P>);
10587 const uint32_t headerEndSectionOffset = lsdaIndexArraySectionOffset + lsdaIndexArraySize;
10588
10589
10590 // allocate and fill in section header
10591 fHeaderSize = headerEndSectionOffset;
10592 fHeaderContent = new uint8_t[fHeaderSize];
10593 bzero(fHeaderContent, fHeaderSize);
10594 macho_unwind_info_section_header<P>* sectionHeader = (macho_unwind_info_section_header<P>*)fHeaderContent;
10595 sectionHeader->set_version(UNWIND_SECTION_VERSION);
10596 sectionHeader->set_commonEncodingsArraySectionOffset(commonEncodingsArraySectionOffset);
10597 sectionHeader->set_commonEncodingsArrayCount(commonEncodingsArrayCount);
10598 sectionHeader->set_personalityArraySectionOffset(personalityArraySectionOffset);
10599 sectionHeader->set_personalityArrayCount(personalityArrayCount);
10600 sectionHeader->set_indexSectionOffset(indexSectionOffset);
10601 sectionHeader->set_indexCount(indexCount);
10602
10603 // copy common encodings
10604 uint32_t* commonEncodingsTable = (uint32_t*)&fHeaderContent[commonEncodingsArraySectionOffset];
10605 for (std::map<uint32_t, unsigned int>::iterator it=commonEncodings.begin(); it != commonEncodings.end(); ++it)
10606 A::P::E::set32(commonEncodingsTable[it->second], it->first);
10607
10608 // make references for personality entries
10609 uint32_t* personalityArray = (uint32_t*)&fHeaderContent[sectionHeader->personalityArraySectionOffset()];
10610 for (std::map<ObjectFile::Atom*, unsigned int>::iterator it=fPersonalityIndexMap.begin(); it != fPersonalityIndexMap.end(); ++it) {
10611 uint32_t offset = (uint8_t*)&personalityArray[it->second-1] - fHeaderContent;
10612 fReferences.push_back(new WriterReference<A>(offset, A::kImageOffset32, it->first));
10613 }
10614
10615 // build first level index and references
10616 macho_unwind_info_section_header_index_entry<P>* indexTable = (macho_unwind_info_section_header_index_entry<P>*)&fHeaderContent[indexSectionOffset];
10617 for (unsigned int i=0; i < secondLevelPageCount; ++i) {
10618 unsigned int reverseIndex = secondLevelPageCount - 1 - i;
10619 indexTable[i].set_functionOffset(0);
10620 indexTable[i].set_secondLevelPagesSectionOffset(secondLevelPagesStarts[reverseIndex]-fPagesContent+headerEndSectionOffset);
10621 indexTable[i].set_lsdaIndexArraySectionOffset(lsdaIndexOffsetMap[secondLevelFirstFuncs[reverseIndex]]+lsdaIndexArraySectionOffset);
10622 uint32_t refOffset = (uint8_t*)&indexTable[i] - fHeaderContent;
10623 fReferences.push_back(new WriterReference<A>(refOffset, A::kImageOffset32, secondLevelFirstFuncs[reverseIndex]));
10624 }
10625 indexTable[secondLevelPageCount].set_functionOffset(0);
10626 indexTable[secondLevelPageCount].set_secondLevelPagesSectionOffset(0);
10627 indexTable[secondLevelPageCount].set_lsdaIndexArraySectionOffset(lsdaIndexArraySectionOffset+lsdaIndexArraySize);
10628 fReferences.push_back(new WriterReference<A>((uint8_t*)&indexTable[secondLevelPageCount] - fHeaderContent, A::kImageOffset32,
10629 fInfos.back().func, fInfos.back().func->getSize()+1));
10630
10631 // build lsda references
10632 uint32_t lsdaEntrySectionOffset = lsdaIndexArraySectionOffset;
10633 for (typename std::vector<LSDAEntry>::iterator it = fLSDAIndex.begin(); it != fLSDAIndex.end(); ++it) {
10634 fReferences.push_back(new WriterReference<A>(lsdaEntrySectionOffset, A::kImageOffset32, it->func));
10635 fReferences.push_back(new WriterReference<A>(lsdaEntrySectionOffset+4, A::kImageOffset32, it->lsda, it->lsdaOffset));
10636 lsdaEntrySectionOffset += sizeof(unwind_info_section_header_lsda_index_entry);
10637 }
10638
10639 // make references for regular second level entries
10640 for (typename std::vector<RegFixUp>::iterator it = fRegFixUps.begin(); it != fRegFixUps.end(); ++it) {
10641 uint32_t offset = (it->contentPointer - fPagesContent) + fHeaderSize;
10642 fReferences.push_back(new WriterReference<A>(offset, A::kImageOffset32, it->func));
10643 if ( it->fde != NULL )
10644 fReferences.push_back(new WriterReference<A>(offset+4, A::kSectionOffset24, it->fde));
10645 }
10646 // make references for compressed second level entries
10647 for (typename std::vector<CompressedFixUp>::iterator it = fCompressedFixUps.begin(); it != fCompressedFixUps.end(); ++it) {
10648 uint32_t offset = (it->contentPointer - fPagesContent) + fHeaderSize;
10649 fReferences.push_back(new WriterReference<A>(offset, A::kPointerDiff24, it->func, 0, it->fromFunc, 0));
10650 }
10651 for (typename std::vector<CompressedEncodingFixUp>::iterator it = fCompressedEncodingFixUps.begin(); it != fCompressedEncodingFixUps.end(); ++it) {
10652 uint32_t offset = (it->contentPointer - fPagesContent) + fHeaderSize;
10653 fReferences.push_back(new WriterReference<A>(offset, A::kSectionOffset24, it->fde));
10654 }
10655
10656 // update section record with new size
10657 unwindSectionInfo->fSize = this->getSize();
10658
10659 // alter alignment so this section lays out so second level tables are page aligned
10660 if ( secondLevelPageCount > 2 )
10661 fAlignment = ObjectFile::Alignment(12, (unwindSectionInfo->fFileOffset - this->getSize()) % 4096);
10662 }
10663
10664 }
10665
10666
10667
10668
10669 template <typename A>
10670 void UnwindInfoAtom<A>::copyRawContent(uint8_t buffer[]) const
10671 {
10672 memcpy(buffer, fHeaderContent, fHeaderSize);
10673 memcpy(&buffer[fHeaderSize], fPagesContent, fPagesSize);
10674 }
10675
10676
10677
10678 template <typename A>
10679 uint64_t LinkEditAtom<A>::getFileOffset() const
10680 {
10681 return ((SectionInfo*)this->getSection())->fFileOffset + this->getSectionOffset();
10682 }
10683
10684
10685 template <typename A>
10686 uint64_t SectionRelocationsLinkEditAtom<A>::getSize() const
10687 {
10688 return fWriter.fSectionRelocs.size() * sizeof(macho_relocation_info<P>);
10689 }
10690
10691 template <typename A>
10692 void SectionRelocationsLinkEditAtom<A>::copyRawContent(uint8_t buffer[]) const
10693 {
10694 memcpy(buffer, &fWriter.fSectionRelocs[0], this->getSize());
10695 }
10696
10697
10698 template <typename A>
10699 uint64_t LocalRelocationsLinkEditAtom<A>::getSize() const
10700 {
10701 return fWriter.fInternalRelocs.size() * sizeof(macho_relocation_info<P>);
10702 }
10703
10704 template <typename A>
10705 void LocalRelocationsLinkEditAtom<A>::copyRawContent(uint8_t buffer[]) const
10706 {
10707 memcpy(buffer, &fWriter.fInternalRelocs[0], this->getSize());
10708 }
10709
10710
10711
10712 template <typename A>
10713 uint64_t SymbolTableLinkEditAtom<A>::getSize() const
10714 {
10715 return fWriter.fSymbolTableCount * sizeof(macho_nlist<P>);
10716 }
10717
10718 template <typename A>
10719 void SymbolTableLinkEditAtom<A>::copyRawContent(uint8_t buffer[]) const
10720 {
10721 memcpy(buffer, fWriter.fSymbolTable, this->getSize());
10722 }
10723
10724 template <typename A>
10725 uint64_t ExternalRelocationsLinkEditAtom<A>::getSize() const
10726 {
10727 return fWriter.fExternalRelocs.size() * sizeof(macho_relocation_info<P>);
10728 }
10729
10730 template <typename A>
10731 void ExternalRelocationsLinkEditAtom<A>::copyRawContent(uint8_t buffer[]) const
10732 {
10733 std::sort(fWriter.fExternalRelocs.begin(), fWriter.fExternalRelocs.end(), ExternalRelocSorter<P>());
10734 memcpy(buffer, &fWriter.fExternalRelocs[0], this->getSize());
10735 }
10736
10737
10738
10739 template <typename A>
10740 uint64_t IndirectTableLinkEditAtom<A>::getSize() const
10741 {
10742 return fTable.size() * sizeof(uint32_t);
10743 }
10744
10745 template <typename A>
10746 void IndirectTableLinkEditAtom<A>::copyRawContent(uint8_t buffer[]) const
10747 {
10748 uint64_t size = this->getSize();
10749 bzero(buffer, size);
10750 const uint32_t indirectTableSize = fTable.size();
10751 uint32_t* indirectTable = (uint32_t*)buffer;
10752 for(std::vector<IndirectEntry>::const_iterator it = fTable.begin(); it != fTable.end(); ++it) {
10753 if ( it->indirectIndex < indirectTableSize )
10754 A::P::E::set32(indirectTable[it->indirectIndex], it->symbolIndex);
10755 else
10756 throwf("malformed indirect table. size=%d, index=%d", indirectTableSize, it->indirectIndex);
10757 }
10758 }
10759
10760
10761
10762 template <typename A>
10763 uint64_t ModuleInfoLinkEditAtom<A>::getSize() const
10764 {
10765 return fWriter.fSymbolTableExportCount*sizeof(macho_dylib_table_of_contents<P>)
10766 + sizeof(macho_dylib_module<P>)
10767 + this->getReferencesCount()*sizeof(uint32_t);
10768 }
10769
10770 template <typename A>
10771 uint32_t ModuleInfoLinkEditAtom<A>::getTableOfContentsFileOffset() const
10772 {
10773 return this->getFileOffset();
10774 }
10775
10776 template <typename A>
10777 uint32_t ModuleInfoLinkEditAtom<A>::getModuleTableFileOffset() const
10778 {
10779 return this->getFileOffset() + fWriter.fSymbolTableExportCount*sizeof(macho_dylib_table_of_contents<P>);
10780 }
10781
10782 template <typename A>
10783 uint32_t ModuleInfoLinkEditAtom<A>::getReferencesFileOffset() const
10784 {
10785 return this->getModuleTableFileOffset() + sizeof(macho_dylib_module<P>);
10786 }
10787
10788 template <typename A>
10789 uint32_t ModuleInfoLinkEditAtom<A>::getReferencesCount() const
10790 {
10791 return fWriter.fSymbolTableExportCount + fWriter.fSymbolTableImportCount;
10792 }
10793
10794 template <typename A>
10795 void ModuleInfoLinkEditAtom<A>::copyRawContent(uint8_t buffer[]) const
10796 {
10797 uint64_t size = this->getSize();
10798 bzero(buffer, size);
10799 // create toc. The symbols are already sorted, they are all in the smae module
10800 macho_dylib_table_of_contents<P>* p = (macho_dylib_table_of_contents<P>*)buffer;
10801 for(uint32_t i=0; i < fWriter.fSymbolTableExportCount; ++i, ++p) {
10802 p->set_symbol_index(fWriter.fSymbolTableExportStartIndex+i);
10803 p->set_module_index(0);
10804 }
10805 // create module table (one entry)
10806 pint_t objcModuleSectionStart = 0;
10807 pint_t objcModuleSectionSize = 0;
10808 uint16_t numInits = 0;
10809 uint16_t numTerms = 0;
10810 std::vector<SegmentInfo*>& segmentInfos = fWriter.fSegmentInfos;
10811 for (std::vector<SegmentInfo*>::iterator segit = segmentInfos.begin(); segit != segmentInfos.end(); ++segit) {
10812 std::vector<SectionInfo*>& sectionInfos = (*segit)->fSections;
10813 if ( strcmp((*segit)->fName, "__DATA") == 0 ) {
10814 for (std::vector<SectionInfo*>::iterator sectit = sectionInfos.begin(); sectit != sectionInfos.end(); ++sectit) {
10815 if ( strcmp((*sectit)->fSectionName, "__mod_init_func") == 0 )
10816 numInits = (*sectit)->fSize / sizeof(typename A::P::uint_t);
10817 else if ( strcmp((*sectit)->fSectionName, "__mod_term_func") == 0 )
10818 numTerms = (*sectit)->fSize / sizeof(typename A::P::uint_t);
10819 }
10820 }
10821 else if ( strcmp((*segit)->fName, "__OBJC") == 0 ) {
10822 for (std::vector<SectionInfo*>::iterator sectit = sectionInfos.begin(); sectit != sectionInfos.end(); ++sectit) {
10823 SectionInfo* sectInfo = (*sectit);
10824 if ( strcmp(sectInfo->fSectionName, "__module_info") == 0 ) {
10825 objcModuleSectionStart = sectInfo->getBaseAddress();
10826 objcModuleSectionSize = sectInfo->fSize;
10827 }
10828 }
10829 }
10830 }
10831 macho_dylib_module<P>* module = (macho_dylib_module<P>*)&buffer[fWriter.fSymbolTableExportCount*sizeof(macho_dylib_table_of_contents<P>)];
10832 module->set_module_name(fModuleNameOffset);
10833 module->set_iextdefsym(fWriter.fSymbolTableExportStartIndex);
10834 module->set_nextdefsym(fWriter.fSymbolTableExportCount);
10835 module->set_irefsym(0);
10836 module->set_nrefsym(this->getReferencesCount());
10837 module->set_ilocalsym(fWriter.fSymbolTableStabsStartIndex);
10838 module->set_nlocalsym(fWriter.fSymbolTableStabsCount+fWriter.fSymbolTableLocalCount);
10839 module->set_iextrel(0);
10840 module->set_nextrel(fWriter.fExternalRelocs.size());
10841 module->set_iinit_iterm(0,0);
10842 module->set_ninit_nterm(numInits,numTerms);
10843 module->set_objc_module_info_addr(objcModuleSectionStart);
10844 module->set_objc_module_info_size(objcModuleSectionSize);
10845 // create reference table
10846 macho_dylib_reference<P>* ref = (macho_dylib_reference<P>*)((uint8_t*)module + sizeof(macho_dylib_module<P>));
10847 for(uint32_t i=0; i < fWriter.fSymbolTableExportCount; ++i, ++ref) {
10848 ref->set_isym(fWriter.fSymbolTableExportStartIndex+i);
10849 ref->set_flags(REFERENCE_FLAG_DEFINED);
10850 }
10851 for(uint32_t i=0; i < fWriter.fSymbolTableImportCount; ++i, ++ref) {
10852 ref->set_isym(fWriter.fSymbolTableImportStartIndex+i);
10853 std::map<const ObjectFile::Atom*,ObjectFile::Atom*>::iterator pos = fWriter.fStubsMap.find(fWriter.fImportedAtoms[i]);
10854 if ( pos != fWriter.fStubsMap.end() )
10855 ref->set_flags(REFERENCE_FLAG_UNDEFINED_LAZY);
10856 else
10857 ref->set_flags(REFERENCE_FLAG_UNDEFINED_NON_LAZY);
10858 }
10859 }
10860
10861
10862
10863 template <typename A>
10864 StringsLinkEditAtom<A>::StringsLinkEditAtom(Writer<A>& writer)
10865 : LinkEditAtom<A>(writer), fCurrentBuffer(NULL), fCurrentBufferUsed(0)
10866 {
10867 fCurrentBuffer = new char[kBufferSize];
10868 // burn first byte of string pool (so zero is never a valid string offset)
10869 fCurrentBuffer[fCurrentBufferUsed++] = ' ';
10870 // make offset 1 always point to an empty string
10871 fCurrentBuffer[fCurrentBufferUsed++] = '\0';
10872 }
10873
10874 template <typename A>
10875 uint64_t StringsLinkEditAtom<A>::getSize() const
10876 {
10877 // align size
10878 return (kBufferSize * fFullBuffers.size() + fCurrentBufferUsed + sizeof(typename A::P::uint_t) - 1) & (-sizeof(typename A::P::uint_t));
10879 }
10880
10881 template <typename A>
10882 void StringsLinkEditAtom<A>::copyRawContent(uint8_t buffer[]) const
10883 {
10884 uint64_t offset = 0;
10885 for (unsigned int i=0; i < fFullBuffers.size(); ++i) {
10886 memcpy(&buffer[offset], fFullBuffers[i], kBufferSize);
10887 offset += kBufferSize;
10888 }
10889 memcpy(&buffer[offset], fCurrentBuffer, fCurrentBufferUsed);
10890 // zero fill end to align
10891 offset += fCurrentBufferUsed;
10892 while ( (offset % sizeof(typename A::P::uint_t)) != 0 )
10893 buffer[offset++] = 0;
10894 }
10895
10896 template <typename A>
10897 int32_t StringsLinkEditAtom<A>::add(const char* name)
10898 {
10899 int32_t offset = kBufferSize * fFullBuffers.size() + fCurrentBufferUsed;
10900 int lenNeeded = strlcpy(&fCurrentBuffer[fCurrentBufferUsed], name, kBufferSize-fCurrentBufferUsed)+1;
10901 if ( (fCurrentBufferUsed+lenNeeded) < kBufferSize ) {
10902 fCurrentBufferUsed += lenNeeded;
10903 }
10904 else {
10905 int copied = kBufferSize-fCurrentBufferUsed-1;
10906 // change trailing '\0' that strlcpy added to real char
10907 fCurrentBuffer[kBufferSize-1] = name[copied];
10908 // alloc next buffer
10909 fFullBuffers.push_back(fCurrentBuffer);
10910 fCurrentBuffer = new char[kBufferSize];
10911 fCurrentBufferUsed = 0;
10912 // append rest of string
10913 this->add(&name[copied+1]);
10914 }
10915 return offset;
10916 }
10917
10918
10919 template <typename A>
10920 int32_t StringsLinkEditAtom<A>::addUnique(const char* name)
10921 {
10922 StringToOffset::iterator pos = fUniqueStrings.find(name);
10923 if ( pos != fUniqueStrings.end() ) {
10924 return pos->second;
10925 }
10926 else {
10927 int32_t offset = this->add(name);
10928 fUniqueStrings[name] = offset;
10929 return offset;
10930 }
10931 }
10932
10933
10934 template <typename A>
10935 const char* StringsLinkEditAtom<A>::stringForIndex(int32_t index) const
10936 {
10937 int32_t currentBufferStartIndex = kBufferSize * fFullBuffers.size();
10938 int32_t maxIndex = currentBufferStartIndex + fCurrentBufferUsed;
10939 // check for out of bounds
10940 if ( index > maxIndex )
10941 return "";
10942 // check for index in fCurrentBuffer
10943 if ( index > currentBufferStartIndex )
10944 return &fCurrentBuffer[index-currentBufferStartIndex];
10945 // otherwise index is in a full buffer
10946 uint32_t fullBufferIndex = index/kBufferSize;
10947 return &fFullBuffers[fullBufferIndex][index-(kBufferSize*fullBufferIndex)];
10948 }
10949
10950
10951
10952 template <typename A>
10953 BranchIslandAtom<A>::BranchIslandAtom(Writer<A>& writer, const char* name, int islandRegion, ObjectFile::Atom& target,
10954 ObjectFile::Atom& finalTarget, uint32_t finalTargetOffset)
10955 : WriterAtom<A>(writer, Segment::fgTextSegment), fTarget(target), fFinalTarget(finalTarget), fFinalTargetOffset(finalTargetOffset)
10956 {
10957 if ( finalTargetOffset == 0 ) {
10958 if ( islandRegion == 0 )
10959 asprintf((char**)&fName, "%s$island", name);
10960 else
10961 asprintf((char**)&fName, "%s$island$%d", name, islandRegion+1);
10962 }
10963 else {
10964 asprintf((char**)&fName, "%s_plus_%d$island$%d", name, finalTargetOffset, islandRegion);
10965 }
10966
10967 if ( finalTarget.isThumb() ) {
10968 if ( writer.fOptions.preferSubArchitecture() && writer.fOptions.subArchitecture() == CPU_SUBTYPE_ARM_V7 ) {
10969 fIslandKind = kBranchIslandToThumb2;
10970 }
10971 else {
10972 if ( writer.fSlideable )
10973 fIslandKind = kBranchIslandToThumb1;
10974 else
10975 fIslandKind = kBranchIslandNoPicToThumb1;
10976 }
10977 }
10978 else {
10979 fIslandKind = kBranchIslandToARM;
10980 }
10981 }
10982
10983
10984 template <>
10985 void BranchIslandAtom<ppc>::copyRawContent(uint8_t buffer[]) const
10986 {
10987 int64_t displacement;
10988 const int64_t bl_sixteenMegLimit = 0x00FFFFFF;
10989 if ( fTarget.getContentType() == ObjectFile::Atom::kBranchIsland ) {
10990 displacement = getFinalTargetAdress() - this->getAddress();
10991 if ( (displacement > bl_sixteenMegLimit) && (displacement < (-bl_sixteenMegLimit)) ) {
10992 displacement = fTarget.getAddress() - this->getAddress();
10993 }
10994 }
10995 else {
10996 displacement = fTarget.getAddress() + fFinalTargetOffset - this->getAddress();
10997 }
10998 int32_t branchInstruction = 0x48000000 | ((uint32_t)displacement & 0x03FFFFFC);
10999 OSWriteBigInt32(buffer, 0, branchInstruction);
11000 }
11001
11002 template <>
11003 void BranchIslandAtom<ppc64>::copyRawContent(uint8_t buffer[]) const
11004 {
11005 int64_t displacement;
11006 const int64_t bl_sixteenMegLimit = 0x00FFFFFF;
11007 if ( fTarget.getContentType() == ObjectFile::Atom::kBranchIsland ) {
11008 displacement = getFinalTargetAdress() - this->getAddress();
11009 if ( (displacement > bl_sixteenMegLimit) && (displacement < (-bl_sixteenMegLimit)) ) {
11010 displacement = fTarget.getAddress() - this->getAddress();
11011 }
11012 }
11013 else {
11014 displacement = fTarget.getAddress() + fFinalTargetOffset - this->getAddress();
11015 }
11016 int32_t branchInstruction = 0x48000000 | ((uint32_t)displacement & 0x03FFFFFC);
11017 OSWriteBigInt32(buffer, 0, branchInstruction);
11018 }
11019
11020 template <>
11021 void BranchIslandAtom<arm>::copyRawContent(uint8_t buffer[]) const
11022 {
11023 const bool log = false;
11024 switch ( fIslandKind ) {
11025 case kBranchIslandToARM:
11026 {
11027 int64_t displacement;
11028 // an ARM branch can branch farther than a thumb branch. The branch
11029 // island generation was conservative and put islands every thumb
11030 // branch distance apart. Check to see if this is a an island
11031 // hopping branch that could be optimized to go directly to target.
11032 if ( fTarget.getContentType() == ObjectFile::Atom::kBranchIsland ) {
11033 displacement = getFinalTargetAdress() - this->getAddress() - 8;
11034 if ( (displacement < 33554428LL) && (displacement > (-33554432LL)) ) {
11035 // can skip branch island and jump straight to target
11036 if (log) fprintf(stderr, "%s: optimized jump to final target at 0x%08llX, thisAddr=0x%08llX\n", fName, getFinalTargetAdress(), this->getAddress());
11037 }
11038 else {
11039 // ultimate target is too far, jump to island
11040 displacement = fTarget.getAddress() - this->getAddress() - 8;
11041 if (log) fprintf(stderr, "%s: jump to branch island at 0x%08llX\n", fName, fTarget.getAddress());
11042 }
11043 }
11044 else {
11045 // target of island is ultimate target
11046 displacement = fTarget.getAddress() + fFinalTargetOffset - this->getAddress() - 8;
11047 if (log) fprintf(stderr, "%s: jump to target at 0x%08llX\n", fName, fTarget.getAddress());
11048 }
11049 uint32_t imm24 = (displacement >> 2) & 0x00FFFFFF;
11050 int32_t branchInstruction = 0xEA000000 | imm24;
11051 OSWriteLittleInt32(buffer, 0, branchInstruction);
11052 }
11053 break;
11054 case kBranchIslandToThumb2:
11055 {
11056 int64_t displacement;
11057 // an ARM branch can branch farther than a thumb branch. The branch
11058 // island generation was conservative and put islands every thumb
11059 // branch distance apart. Check to see if this is a an island
11060 // hopping branch that could be optimized to go directly to target.
11061 if ( fTarget.getContentType() == ObjectFile::Atom::kBranchIsland ) {
11062 displacement = getFinalTargetAdress() - this->getAddress() - 4;
11063 if ( (displacement < 16777214) && (displacement > (-16777216LL)) ) {
11064 // can skip branch island and jump straight to target
11065 if (log) fprintf(stderr, "%s: optimized jump to final target at 0x%08llX, thisAddr=0x%08llX\n", fName, getFinalTargetAdress(), this->getAddress());
11066 }
11067 else {
11068 // ultimate target is too far, jump to island
11069 displacement = fTarget.getAddress() - this->getAddress() - 4;
11070 if (log) fprintf(stderr, "%s: jump to branch island at 0x%08llX\n", fName, fTarget.getAddress());
11071 }
11072 }
11073 else {
11074 // target of island is ultimate target
11075 displacement = fTarget.getAddress() + fFinalTargetOffset - this->getAddress() - 4;
11076 if (log) fprintf(stderr, "%s: jump to target at 0x%08llX\n", fName, fTarget.getAddress());
11077 }
11078 if ( (displacement > 16777214) || (displacement < (-16777216LL)) ) {
11079 throwf("internal branch island error: thumb2 b/bx out of range (%lld max is +/-16M) from %s to %s in %s",
11080 displacement, this->getDisplayName(),
11081 fTarget.getDisplayName(), fTarget.getFile()->getPath());
11082 }
11083 // The instruction is really two instructions:
11084 // The lower 16 bits are the first instruction, which contains the high
11085 // 11 bits of the displacement.
11086 // The upper 16 bits are the second instruction, which contains the low
11087 // 11 bits of the displacement, as well as differentiating bl and blx.
11088 uint32_t s = (uint32_t)(displacement >> 24) & 0x1;
11089 uint32_t i1 = (uint32_t)(displacement >> 23) & 0x1;
11090 uint32_t i2 = (uint32_t)(displacement >> 22) & 0x1;
11091 uint32_t imm10 = (uint32_t)(displacement >> 12) & 0x3FF;
11092 uint32_t imm11 = (uint32_t)(displacement >> 1) & 0x7FF;
11093 uint32_t j1 = (i1 == s);
11094 uint32_t j2 = (i2 == s);
11095 uint32_t opcode = 0x9000F000;
11096 uint32_t nextDisp = (j1 << 13) | (j2 << 11) | imm11;
11097 uint32_t firstDisp = (s << 10) | imm10;
11098 uint32_t newInstruction = opcode | (nextDisp << 16) | firstDisp;
11099 //warning("s=%d, j1=%d, j2=%d, imm10=0x%0X, imm11=0x%0X, opcode=0x%08X, first=0x%04X, next=0x%04X, new=0x%08X, disp=0x%llX for %s to %s\n",
11100 // s, j1, j2, imm10, imm11, opcode, firstDisp, nextDisp, newInstruction, displacement, inAtom->getDisplayName(), ref->getTarget().getDisplayName());
11101 OSWriteLittleInt32(buffer, 0, newInstruction);
11102 }
11103 break;
11104 case kBranchIslandToThumb1:
11105 {
11106 // There is no large displacement thumb1 branch instruction.
11107 // Instead use ARM instructions that can jump to thumb.
11108 // we use a 32-bit displacement, so we can directly jump to target which means no island hopping
11109 int64_t displacement = getFinalTargetAdress() - (this->getAddress() + 12);
11110 if ( fFinalTarget.isThumb() )
11111 displacement |= 1;
11112 if (log) fprintf(stderr, "%s: 4 ARM instruction jump to final target at 0x%08llX\n", fName, getFinalTargetAdress());
11113 OSWriteLittleInt32(&buffer[ 0], 0, 0xe59fc004); // ldr ip, pc + 4
11114 OSWriteLittleInt32(&buffer[ 4], 0, 0xe08fc00c); // add ip, pc, ip
11115 OSWriteLittleInt32(&buffer[ 8], 0, 0xe12fff1c); // bx ip
11116 OSWriteLittleInt32(&buffer[12], 0, displacement); // .long target-this
11117 }
11118 break;
11119 case kBranchIslandNoPicToThumb1:
11120 {
11121 // There is no large displacement thumb1 branch instruction.
11122 // Instead use ARM instructions that can jump to thumb.
11123 // we use a 32-bit displacement, so we can directly jump to target which means no island hopping
11124 uint32_t targetAddr = getFinalTargetAdress();
11125 if ( fFinalTarget.isThumb() )
11126 targetAddr |= 1;
11127 if (log) fprintf(stderr, "%s: 2 ARM instruction jump to final target at 0x%08llX\n", fName, getFinalTargetAdress());
11128 OSWriteLittleInt32(&buffer[0], 0, 0xe51ff004); // ldr pc, [pc, #-4]
11129 OSWriteLittleInt32(&buffer[4], 0, targetAddr); // .long target-this
11130 }
11131 break;
11132 };
11133 }
11134
11135 template <>
11136 uint64_t BranchIslandAtom<ppc>::getSize() const
11137 {
11138 return 4;
11139 }
11140
11141 template <>
11142 uint64_t BranchIslandAtom<ppc64>::getSize() const
11143 {
11144 return 4;
11145 }
11146
11147 template <>
11148 uint64_t BranchIslandAtom<arm>::getSize() const
11149 {
11150 switch ( fIslandKind ) {
11151 case kBranchIslandToARM:
11152 return 4;
11153 case kBranchIslandToThumb1:
11154 return 16;
11155 case kBranchIslandToThumb2:
11156 return 4;
11157 case kBranchIslandNoPicToThumb1:
11158 return 8;
11159 };
11160 throw "internal error: no ARM branch island kind";
11161 }
11162
11163
11164
11165 template <typename A>
11166 uint64_t SegmentSplitInfoLoadCommandsAtom<A>::getSize() const
11167 {
11168 if ( fWriter.fSplitCodeToDataContentAtom->canEncode() )
11169 return this->alignedSize(sizeof(macho_linkedit_data_command<P>));
11170 else
11171 return 0; // a zero size causes the load command to be suppressed
11172 }
11173
11174 template <typename A>
11175 void SegmentSplitInfoLoadCommandsAtom<A>::copyRawContent(uint8_t buffer[]) const
11176 {
11177 uint64_t size = this->getSize();
11178 if ( size > 0 ) {
11179 bzero(buffer, size);
11180 macho_linkedit_data_command<P>* cmd = (macho_linkedit_data_command<P>*)buffer;
11181 cmd->set_cmd(LC_SEGMENT_SPLIT_INFO);
11182 cmd->set_cmdsize(size);
11183 cmd->set_dataoff(fWriter.fSplitCodeToDataContentAtom->getFileOffset());
11184 cmd->set_datasize(fWriter.fSplitCodeToDataContentAtom->getSize());
11185 }
11186 }
11187
11188
11189 template <typename A>
11190 uint64_t SegmentSplitInfoContentAtom<A>::getSize() const
11191 {
11192 return fEncodedData.size();
11193 }
11194
11195 template <typename A>
11196 void SegmentSplitInfoContentAtom<A>::copyRawContent(uint8_t buffer[]) const
11197 {
11198 memcpy(buffer, &fEncodedData[0], fEncodedData.size());
11199 }
11200
11201
11202 template <typename A>
11203 void SegmentSplitInfoContentAtom<A>::uleb128EncodeAddresses(const std::vector<SegmentSplitInfoContentAtom<A>::AtomAndOffset>& locations)
11204 {
11205 pint_t addr = fWriter.fOptions.baseAddress();
11206 for(typename std::vector<AtomAndOffset>::const_iterator it = locations.begin(); it != locations.end(); ++it) {
11207 pint_t nextAddr = it->atom->getAddress() + it->offset;
11208 //fprintf(stderr, "\t0x%0llX\n", (uint64_t)nextAddr);
11209 uint64_t delta = nextAddr - addr;
11210 if ( delta == 0 )
11211 throw "double split seg info for same address";
11212 // uleb128 encode
11213 uint8_t byte;
11214 do {
11215 byte = delta & 0x7F;
11216 delta &= ~0x7F;
11217 if ( delta != 0 )
11218 byte |= 0x80;
11219 fEncodedData.push_back(byte);
11220 delta = delta >> 7;
11221 }
11222 while( byte >= 0x80 );
11223 addr = nextAddr;
11224 }
11225 }
11226
11227 template <typename A>
11228 void SegmentSplitInfoContentAtom<A>::encode()
11229 {
11230 if ( ! fCantEncode ) {
11231 fEncodedData.reserve(8192);
11232
11233 if ( fKind1Locations.size() != 0 ) {
11234 fEncodedData.push_back(1);
11235 //fprintf(stderr, "type 1:\n");
11236 this->uleb128EncodeAddresses(fKind1Locations);
11237 fEncodedData.push_back(0);
11238 }
11239
11240 if ( fKind2Locations.size() != 0 ) {
11241 fEncodedData.push_back(2);
11242 //fprintf(stderr, "type 2:\n");
11243 this->uleb128EncodeAddresses(fKind2Locations);
11244 fEncodedData.push_back(0);
11245 }
11246
11247 if ( fKind3Locations.size() != 0 ) {
11248 fEncodedData.push_back(3);
11249 //fprintf(stderr, "type 3:\n");
11250 this->uleb128EncodeAddresses(fKind3Locations);
11251 fEncodedData.push_back(0);
11252 }
11253
11254 if ( fKind4Locations.size() != 0 ) {
11255 fEncodedData.push_back(4);
11256 //fprintf(stderr, "type 4:\n");
11257 this->uleb128EncodeAddresses(fKind4Locations);
11258 fEncodedData.push_back(0);
11259 }
11260
11261 // always add zero byte to mark end
11262 fEncodedData.push_back(0);
11263
11264 // add zeros to end to align size
11265 while ( (fEncodedData.size() % sizeof(pint_t)) != 0 )
11266 fEncodedData.push_back(0);
11267 }
11268 }
11269
11270
11271 template <typename A>
11272 ObjCInfoAtom<A>::ObjCInfoAtom(Writer<A>& writer, ObjectFile::Reader::ObjcConstraint objcConstraint, bool objcReplacementClasses)
11273 : WriterAtom<A>(writer, getInfoSegment())
11274 {
11275 fContent[0] = 0;
11276 uint32_t value = 0;
11277 // struct objc_image_info {
11278 // uint32_t version; // initially 0
11279 // uint32_t flags;
11280 // };
11281 // #define OBJC_IMAGE_SUPPORTS_GC 2
11282 // #define OBJC_IMAGE_GC_ONLY 4
11283 //
11284 if ( objcReplacementClasses )
11285 value = 1;
11286 switch ( objcConstraint ) {
11287 case ObjectFile::Reader::kObjcNone:
11288 case ObjectFile::Reader::kObjcRetainRelease:
11289 break;
11290 case ObjectFile::Reader::kObjcRetainReleaseOrGC:
11291 value |= 2;
11292 break;
11293 case ObjectFile::Reader::kObjcGC:
11294 value |= 6;
11295 break;
11296 }
11297 A::P::E::set32(fContent[1], value);
11298 }
11299
11300 template <typename A>
11301 void ObjCInfoAtom<A>::copyRawContent(uint8_t buffer[]) const
11302 {
11303 memcpy(buffer, &fContent[0], 8);
11304 }
11305
11306
11307 // objc info section is in a different segment and section for 32 vs 64 bit runtimes
11308 template <> const char* ObjCInfoAtom<ppc>::getSectionName() const { return "__image_info"; }
11309 template <> const char* ObjCInfoAtom<x86>::getSectionName() const { return "__image_info"; }
11310 template <> const char* ObjCInfoAtom<arm>::getSectionName() const { return "__objc_imageinfo"; }
11311 template <> const char* ObjCInfoAtom<ppc64>::getSectionName() const { return "__objc_imageinfo"; }
11312 template <> const char* ObjCInfoAtom<x86_64>::getSectionName() const { return "__objc_imageinfo"; }
11313
11314 template <> Segment& ObjCInfoAtom<ppc>::getInfoSegment() const { return Segment::fgObjCSegment; }
11315 template <> Segment& ObjCInfoAtom<x86>::getInfoSegment() const { return Segment::fgObjCSegment; }
11316 template <> Segment& ObjCInfoAtom<ppc64>::getInfoSegment() const { return Segment::fgDataSegment; }
11317 template <> Segment& ObjCInfoAtom<x86_64>::getInfoSegment() const { return Segment::fgDataSegment; }
11318 template <> Segment& ObjCInfoAtom<arm>::getInfoSegment() const { return Segment::fgDataSegment; }
11319
11320
11321
11322
11323 template <typename A>
11324 void DyldInfoLoadCommandsAtom<A>::copyRawContent(uint8_t buffer[]) const
11325 {
11326 // build LC_DYLD_INFO command
11327 macho_dyld_info_command<P>* cmd = (macho_dyld_info_command<P>*)buffer;
11328 bzero(cmd, sizeof(macho_dyld_info_command<P>));
11329
11330 cmd->set_cmd( fWriter.fOptions.makeClassicDyldInfo() ? LC_DYLD_INFO : LC_DYLD_INFO_ONLY);
11331 cmd->set_cmdsize(sizeof(macho_dyld_info_command<P>));
11332 if ( (fWriter.fCompressedRebaseInfoAtom != NULL) && (fWriter.fCompressedRebaseInfoAtom->getSize() != 0) ) {
11333 cmd->set_rebase_off(fWriter.fCompressedRebaseInfoAtom->getFileOffset());
11334 cmd->set_rebase_size(fWriter.fCompressedRebaseInfoAtom->getSize());
11335 }
11336 if ( (fWriter.fCompressedBindingInfoAtom != NULL) && (fWriter.fCompressedBindingInfoAtom->getSize() != 0) ) {
11337 cmd->set_bind_off(fWriter.fCompressedBindingInfoAtom->getFileOffset());
11338 cmd->set_bind_size(fWriter.fCompressedBindingInfoAtom->getSize());
11339 }
11340 if ( (fWriter.fCompressedWeakBindingInfoAtom != NULL) && (fWriter.fCompressedWeakBindingInfoAtom->getSize() != 0) ) {
11341 cmd->set_weak_bind_off(fWriter.fCompressedWeakBindingInfoAtom->getFileOffset());
11342 cmd->set_weak_bind_size(fWriter.fCompressedWeakBindingInfoAtom->getSize());
11343 }
11344 if ( (fWriter.fCompressedLazyBindingInfoAtom != NULL) && (fWriter.fCompressedLazyBindingInfoAtom->getSize() != 0) ) {
11345 cmd->set_lazy_bind_off(fWriter.fCompressedLazyBindingInfoAtom->getFileOffset());
11346 cmd->set_lazy_bind_size(fWriter.fCompressedLazyBindingInfoAtom->getSize());
11347 }
11348 if ( (fWriter.fCompressedExportInfoAtom != NULL) && (fWriter.fCompressedExportInfoAtom->getSize() != 0) ) {
11349 cmd->set_export_off(fWriter.fCompressedExportInfoAtom->getFileOffset());
11350 cmd->set_export_size(fWriter.fCompressedExportInfoAtom->getSize());
11351 }
11352 }
11353
11354
11355 struct rebase_tmp
11356 {
11357 rebase_tmp(uint8_t op, uint64_t p1, uint64_t p2=0) : opcode(op), operand1(p1), operand2(p2) {}
11358 uint8_t opcode;
11359 uint64_t operand1;
11360 uint64_t operand2;
11361 };
11362
11363
11364 template <typename A>
11365 void CompressedRebaseInfoLinkEditAtom<A>::encode()
11366 {
11367 // sort rebase info by type, then address
11368 const std::vector<SegmentInfo*>& segments = fWriter.fSegmentInfos;
11369 std::vector<RebaseInfo>& info = fWriter.fRebaseInfo;
11370 std::sort(info.begin(), info.end());
11371
11372 // convert to temp encoding that can be more easily optimized
11373 std::vector<rebase_tmp> mid;
11374 const SegmentInfo* currentSegment = NULL;
11375 unsigned int segIndex = 0;
11376 uint8_t type = 0;
11377 uint64_t address = (uint64_t)(-1);
11378 for (std::vector<RebaseInfo>::iterator it = info.begin(); it != info.end(); ++it) {
11379 if ( type != it->fType ) {
11380 mid.push_back(rebase_tmp(REBASE_OPCODE_SET_TYPE_IMM, it->fType));
11381 type = it->fType;
11382 }
11383 if ( address != it->fAddress ) {
11384 if ( (currentSegment == NULL) || (it->fAddress < currentSegment->fBaseAddress)
11385 || ((currentSegment->fBaseAddress+currentSegment->fSize) <= it->fAddress) ) {
11386 segIndex = 0;
11387 for (std::vector<SegmentInfo*>::const_iterator segit = segments.begin(); segit != segments.end(); ++segit) {
11388 if ( ((*segit)->fBaseAddress <= it->fAddress) && (it->fAddress < ((*segit)->fBaseAddress+(*segit)->fSize)) ) {
11389 currentSegment = *segit;
11390 break;
11391 }
11392 ++segIndex;
11393 }
11394 mid.push_back(rebase_tmp(REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB, segIndex, it->fAddress - currentSegment->fBaseAddress));
11395 }
11396 else {
11397 mid.push_back(rebase_tmp(REBASE_OPCODE_ADD_ADDR_ULEB, it->fAddress-address));
11398 }
11399 address = it->fAddress;
11400 }
11401 mid.push_back(rebase_tmp(REBASE_OPCODE_DO_REBASE_ULEB_TIMES, 1));
11402 address += sizeof(pint_t);
11403 }
11404 mid.push_back(rebase_tmp(REBASE_OPCODE_DONE, 0));
11405
11406 // optimize phase 1, compress packed runs of pointers
11407 rebase_tmp* dst = &mid[0];
11408 for (const rebase_tmp* src = &mid[0]; src->opcode != REBASE_OPCODE_DONE; ++src) {
11409 if ( (src->opcode == REBASE_OPCODE_DO_REBASE_ULEB_TIMES) && (src->operand1 == 1) ) {
11410 *dst = *src++;
11411 while (src->opcode == REBASE_OPCODE_DO_REBASE_ULEB_TIMES ) {
11412 dst->operand1 += src->operand1;
11413 ++src;
11414 }
11415 --src;
11416 ++dst;
11417 }
11418 else {
11419 *dst++ = *src;
11420 }
11421 }
11422 dst->opcode = REBASE_OPCODE_DONE;
11423
11424 // optimize phase 2, combine rebase/add pairs
11425 dst = &mid[0];
11426 for (const rebase_tmp* src = &mid[0]; src->opcode != REBASE_OPCODE_DONE; ++src) {
11427 if ( (src->opcode == REBASE_OPCODE_DO_REBASE_ULEB_TIMES)
11428 && (src->operand1 == 1)
11429 && (src[1].opcode == REBASE_OPCODE_ADD_ADDR_ULEB)) {
11430 dst->opcode = REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB;
11431 dst->operand1 = src[1].operand1;
11432 ++src;
11433 ++dst;
11434 }
11435 else {
11436 *dst++ = *src;
11437 }
11438 }
11439 dst->opcode = REBASE_OPCODE_DONE;
11440
11441 // optimize phase 3, compress packed runs of REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB with
11442 // same addr delta into one REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB
11443 dst = &mid[0];
11444 for (const rebase_tmp* src = &mid[0]; src->opcode != REBASE_OPCODE_DONE; ++src) {
11445 uint64_t delta = src->operand1;
11446 if ( (src->opcode == REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB)
11447 && (src[1].opcode == REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB)
11448 && (src[2].opcode == REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB)
11449 && (src[1].operand1 == delta)
11450 && (src[2].operand1 == delta) ) {
11451 // found at least three in a row, this is worth compressing
11452 dst->opcode = REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB;
11453 dst->operand1 = 1;
11454 dst->operand2 = delta;
11455 ++src;
11456 while ( (src->opcode == REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB)
11457 && (src->operand1 == delta) ) {
11458 dst->operand1++;
11459 ++src;
11460 }
11461 --src;
11462 ++dst;
11463 }
11464 else {
11465 *dst++ = *src;
11466 }
11467 }
11468 dst->opcode = REBASE_OPCODE_DONE;
11469
11470 // optimize phase 4, use immediate encodings
11471 for (rebase_tmp* p = &mid[0]; p->opcode != REBASE_OPCODE_DONE; ++p) {
11472 if ( (p->opcode == REBASE_OPCODE_ADD_ADDR_ULEB)
11473 && (p->operand1 < (15*sizeof(pint_t)))
11474 && ((p->operand1 % sizeof(pint_t)) == 0) ) {
11475 p->opcode = REBASE_OPCODE_ADD_ADDR_IMM_SCALED;
11476 p->operand1 = p->operand1/sizeof(pint_t);
11477 }
11478 else if ( (p->opcode == REBASE_OPCODE_DO_REBASE_ULEB_TIMES) && (p->operand1 < 15) ) {
11479 p->opcode = REBASE_OPCODE_DO_REBASE_IMM_TIMES;
11480 }
11481 }
11482
11483 // convert to compressed encoding
11484 const static bool log = false;
11485 fEncodedData.reserve(info.size()*2);
11486 bool done = false;
11487 for (std::vector<rebase_tmp>::iterator it = mid.begin(); !done && it != mid.end() ; ++it) {
11488 switch ( it->opcode ) {
11489 case REBASE_OPCODE_DONE:
11490 if ( log ) fprintf(stderr, "REBASE_OPCODE_DONE()\n");
11491 done = true;
11492 break;
11493 case REBASE_OPCODE_SET_TYPE_IMM:
11494 if ( log ) fprintf(stderr, "REBASE_OPCODE_SET_TYPE_IMM(%lld)\n", it->operand1);
11495 fEncodedData.append_byte(REBASE_OPCODE_SET_TYPE_IMM | it->operand1);
11496 break;
11497 case REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB:
11498 if ( log ) fprintf(stderr, "REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB(%lld, 0x%llX)\n", it->operand1, it->operand2);
11499 fEncodedData.append_byte(REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | it->operand1);
11500 fEncodedData.append_uleb128(it->operand2);
11501 break;
11502 case REBASE_OPCODE_ADD_ADDR_ULEB:
11503 if ( log ) fprintf(stderr, "REBASE_OPCODE_ADD_ADDR_ULEB(0x%llX)\n", it->operand1);
11504 fEncodedData.append_byte(REBASE_OPCODE_ADD_ADDR_ULEB);
11505 fEncodedData.append_uleb128(it->operand1);
11506 break;
11507 case REBASE_OPCODE_ADD_ADDR_IMM_SCALED:
11508 if ( log ) fprintf(stderr, "REBASE_OPCODE_ADD_ADDR_IMM_SCALED(%lld=0x%llX)\n", it->operand1, it->operand1*sizeof(pint_t));
11509 fEncodedData.append_byte(REBASE_OPCODE_ADD_ADDR_IMM_SCALED | it->operand1 );
11510 break;
11511 case REBASE_OPCODE_DO_REBASE_IMM_TIMES:
11512 if ( log ) fprintf(stderr, "REBASE_OPCODE_DO_REBASE_IMM_TIMES(%lld)\n", it->operand1);
11513 fEncodedData.append_byte(REBASE_OPCODE_DO_REBASE_IMM_TIMES | it->operand1);
11514 break;
11515 case REBASE_OPCODE_DO_REBASE_ULEB_TIMES:
11516 if ( log ) fprintf(stderr, "REBASE_OPCODE_DO_REBASE_ULEB_TIMES(%lld)\n", it->operand1);
11517 fEncodedData.append_byte(REBASE_OPCODE_DO_REBASE_ULEB_TIMES);
11518 fEncodedData.append_uleb128(it->operand1);
11519 break;
11520 case REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB:
11521 if ( log ) fprintf(stderr, "REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB(0x%llX)\n", it->operand1);
11522 fEncodedData.append_byte(REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB);
11523 fEncodedData.append_uleb128(it->operand1);
11524 break;
11525 case REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB:
11526 if ( log ) fprintf(stderr, "REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB(%lld, %lld)\n", it->operand1, it->operand2);
11527 fEncodedData.append_byte(REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB);
11528 fEncodedData.append_uleb128(it->operand1);
11529 fEncodedData.append_uleb128(it->operand2);
11530 break;
11531 }
11532 }
11533
11534
11535 // align to pointer size
11536 fEncodedData.pad_to_size(sizeof(pint_t));
11537
11538 if (log) fprintf(stderr, "total rebase info size = %ld\n", fEncodedData.size());
11539 }
11540
11541
11542 struct binding_tmp
11543 {
11544 binding_tmp(uint8_t op, uint64_t p1, uint64_t p2=0, const char* s=NULL)
11545 : opcode(op), operand1(p1), operand2(p2), name(s) {}
11546 uint8_t opcode;
11547 uint64_t operand1;
11548 uint64_t operand2;
11549 const char* name;
11550 };
11551
11552
11553
11554 template <typename A>
11555 void CompressedBindingInfoLinkEditAtom<A>::encode()
11556 {
11557 // sort by library, symbol, type, then address
11558 const std::vector<SegmentInfo*>& segments = fWriter.fSegmentInfos;
11559 std::vector<BindingInfo>& info = fWriter.fBindingInfo;
11560 std::sort(info.begin(), info.end());
11561
11562 // convert to temp encoding that can be more easily optimized
11563 std::vector<binding_tmp> mid;
11564 const SegmentInfo* currentSegment = NULL;
11565 unsigned int segIndex = 0;
11566 int ordinal = 0x80000000;
11567 const char* symbolName = NULL;
11568 uint8_t type = 0;
11569 uint64_t address = (uint64_t)(-1);
11570 int64_t addend = 0;
11571 for (std::vector<BindingInfo>::iterator it = info.begin(); it != info.end(); ++it) {
11572 if ( ordinal != it->fLibraryOrdinal ) {
11573 if ( it->fLibraryOrdinal <= 0 ) {
11574 // special lookups are encoded as negative numbers in BindingInfo
11575 mid.push_back(binding_tmp(BIND_OPCODE_SET_DYLIB_SPECIAL_IMM, it->fLibraryOrdinal));
11576 }
11577 else {
11578 mid.push_back(binding_tmp(BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB, it->fLibraryOrdinal));
11579 }
11580 ordinal = it->fLibraryOrdinal;
11581 }
11582 if ( symbolName != it->fSymbolName ) {
11583 mid.push_back(binding_tmp(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM, it->fFlags, 0, it->fSymbolName));
11584 symbolName = it->fSymbolName;
11585 }
11586 if ( type != it->fType ) {
11587 mid.push_back(binding_tmp(BIND_OPCODE_SET_TYPE_IMM, it->fType));
11588 type = it->fType;
11589 }
11590 if ( address != it->fAddress ) {
11591 if ( (currentSegment == NULL) || (it->fAddress < currentSegment->fBaseAddress)
11592 || ((currentSegment->fBaseAddress+currentSegment->fSize) <=it->fAddress)
11593 || (it->fAddress < address) ) {
11594 segIndex = 0;
11595 for (std::vector<SegmentInfo*>::const_iterator segit = segments.begin(); segit != segments.end(); ++segit) {
11596 if ( ((*segit)->fBaseAddress <= it->fAddress) && (it->fAddress < ((*segit)->fBaseAddress+(*segit)->fSize)) ) {
11597 currentSegment = *segit;
11598 break;
11599 }
11600 ++segIndex;
11601 }
11602 mid.push_back(binding_tmp(BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB, segIndex, it->fAddress - currentSegment->fBaseAddress));
11603 }
11604 else {
11605 mid.push_back(binding_tmp(BIND_OPCODE_ADD_ADDR_ULEB, it->fAddress-address));
11606 }
11607 address = it->fAddress;
11608 }
11609 if ( addend != it->fAddend ) {
11610 mid.push_back(binding_tmp(BIND_OPCODE_SET_ADDEND_SLEB, it->fAddend));
11611 addend = it->fAddend;
11612 }
11613 mid.push_back(binding_tmp(BIND_OPCODE_DO_BIND, 0));
11614 address += sizeof(pint_t);
11615 }
11616 mid.push_back(binding_tmp(BIND_OPCODE_DONE, 0));
11617
11618
11619 // optimize phase 1, combine bind/add pairs
11620 binding_tmp* dst = &mid[0];
11621 for (const binding_tmp* src = &mid[0]; src->opcode != BIND_OPCODE_DONE; ++src) {
11622 if ( (src->opcode == BIND_OPCODE_DO_BIND)
11623 && (src[1].opcode == BIND_OPCODE_ADD_ADDR_ULEB) ) {
11624 dst->opcode = BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB;
11625 dst->operand1 = src[1].operand1;
11626 ++src;
11627 ++dst;
11628 }
11629 else {
11630 *dst++ = *src;
11631 }
11632 }
11633 dst->opcode = BIND_OPCODE_DONE;
11634
11635 // optimize phase 2, compress packed runs of BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB with
11636 // same addr delta into one BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB
11637 dst = &mid[0];
11638 for (const binding_tmp* src = &mid[0]; src->opcode != BIND_OPCODE_DONE; ++src) {
11639 uint64_t delta = src->operand1;
11640 if ( (src->opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB)
11641 && (src[1].opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB)
11642 && (src[1].operand1 == delta) ) {
11643 // found at least two in a row, this is worth compressing
11644 dst->opcode = BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB;
11645 dst->operand1 = 1;
11646 dst->operand2 = delta;
11647 ++src;
11648 while ( (src->opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB)
11649 && (src->operand1 == delta) ) {
11650 dst->operand1++;
11651 ++src;
11652 }
11653 --src;
11654 ++dst;
11655 }
11656 else {
11657 *dst++ = *src;
11658 }
11659 }
11660 dst->opcode = BIND_OPCODE_DONE;
11661
11662 // optimize phase 3, use immediate encodings
11663 for (binding_tmp* p = &mid[0]; p->opcode != REBASE_OPCODE_DONE; ++p) {
11664 if ( (p->opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB)
11665 && (p->operand1 < (15*sizeof(pint_t)))
11666 && ((p->operand1 % sizeof(pint_t)) == 0) ) {
11667 p->opcode = BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED;
11668 p->operand1 = p->operand1/sizeof(pint_t);
11669 }
11670 else if ( (p->opcode == BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB) && (p->operand1 <= 15) ) {
11671 p->opcode = BIND_OPCODE_SET_DYLIB_ORDINAL_IMM;
11672 }
11673 }
11674 dst->opcode = BIND_OPCODE_DONE;
11675
11676 // convert to compressed encoding
11677 const static bool log = false;
11678 fEncodedData.reserve(info.size()*2);
11679 bool done = false;
11680 for (std::vector<binding_tmp>::iterator it = mid.begin(); !done && it != mid.end() ; ++it) {
11681 switch ( it->opcode ) {
11682 case BIND_OPCODE_DONE:
11683 if ( log ) fprintf(stderr, "BIND_OPCODE_DONE()\n");
11684 done = true;
11685 break;
11686 case BIND_OPCODE_SET_DYLIB_ORDINAL_IMM:
11687 if ( log ) fprintf(stderr, "BIND_OPCODE_SET_DYLIB_ORDINAL_IMM(%lld)\n", it->operand1);
11688 fEncodedData.append_byte(BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | it->operand1);
11689 break;
11690 case BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB:
11691 if ( log ) fprintf(stderr, "BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB(%lld)\n", it->operand1);
11692 fEncodedData.append_byte(BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB);
11693 fEncodedData.append_uleb128(it->operand1);
11694 break;
11695 case BIND_OPCODE_SET_DYLIB_SPECIAL_IMM:
11696 if ( log ) fprintf(stderr, "BIND_OPCODE_SET_DYLIB_SPECIAL_IMM(%lld)\n", it->operand1);
11697 fEncodedData.append_byte(BIND_OPCODE_SET_DYLIB_SPECIAL_IMM | (it->operand1 & BIND_IMMEDIATE_MASK));
11698 break;
11699 case BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM:
11700 if ( log ) fprintf(stderr, "BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM(0x%0llX, %s)\n", it->operand1, it->name);
11701 fEncodedData.append_byte(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM | it->operand1);
11702 fEncodedData.append_string(it->name);
11703 break;
11704 case BIND_OPCODE_SET_TYPE_IMM:
11705 if ( log ) fprintf(stderr, "BIND_OPCODE_SET_TYPE_IMM(%lld)\n", it->operand1);
11706 fEncodedData.append_byte(BIND_OPCODE_SET_TYPE_IMM | it->operand1);
11707 break;
11708 case BIND_OPCODE_SET_ADDEND_SLEB:
11709 if ( log ) fprintf(stderr, "BIND_OPCODE_SET_ADDEND_SLEB(%lld)\n", it->operand1);
11710 fEncodedData.append_byte(BIND_OPCODE_SET_ADDEND_SLEB);
11711 fEncodedData.append_sleb128(it->operand1);
11712 break;
11713 case BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB:
11714 if ( log ) fprintf(stderr, "BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB(%lld, 0x%llX)\n", it->operand1, it->operand2);
11715 fEncodedData.append_byte(BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | it->operand1);
11716 fEncodedData.append_uleb128(it->operand2);
11717 break;
11718 case BIND_OPCODE_ADD_ADDR_ULEB:
11719 if ( log ) fprintf(stderr, "BIND_OPCODE_ADD_ADDR_ULEB(0x%llX)\n", it->operand1);
11720 fEncodedData.append_byte(BIND_OPCODE_ADD_ADDR_ULEB);
11721 fEncodedData.append_uleb128(it->operand1);
11722 break;
11723 case BIND_OPCODE_DO_BIND:
11724 if ( log ) fprintf(stderr, "BIND_OPCODE_DO_BIND()\n");
11725 fEncodedData.append_byte(BIND_OPCODE_DO_BIND);
11726 break;
11727 case BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB:
11728 if ( log ) fprintf(stderr, "BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB(0x%llX)\n", it->operand1);
11729 fEncodedData.append_byte(BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB);
11730 fEncodedData.append_uleb128(it->operand1);
11731 break;
11732 case BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED:
11733 if ( log ) fprintf(stderr, "BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED(%lld=0x%llX)\n", it->operand1, it->operand1*sizeof(pint_t));
11734 fEncodedData.append_byte(BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED | it->operand1 );
11735 break;
11736 case BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB:
11737 if ( log ) fprintf(stderr, "BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB(%lld, %lld)\n", it->operand1, it->operand2);
11738 fEncodedData.append_byte(BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB);
11739 fEncodedData.append_uleb128(it->operand1);
11740 fEncodedData.append_uleb128(it->operand2);
11741 break;
11742 }
11743 }
11744
11745 // align to pointer size
11746 fEncodedData.pad_to_size(sizeof(pint_t));
11747
11748 if (log) fprintf(stderr, "total binding info size = %ld\n", fEncodedData.size());
11749
11750 }
11751
11752
11753
11754 struct WeakBindingSorter
11755 {
11756 bool operator()(const BindingInfo& left, const BindingInfo& right)
11757 {
11758 // sort by symbol, type, address
11759 if ( left.fSymbolName != right.fSymbolName )
11760 return ( strcmp(left.fSymbolName, right.fSymbolName) < 0 );
11761 if ( left.fType != right.fType )
11762 return (left.fType < right.fType);
11763 return (left.fAddress < right.fAddress);
11764 }
11765 };
11766
11767
11768
11769 template <typename A>
11770 void CompressedWeakBindingInfoLinkEditAtom<A>::encode()
11771 {
11772 // add regular atoms that override a dylib's weak definitions
11773 for(std::set<const class ObjectFile::Atom*>::iterator it = fWriter.fRegularDefAtomsThatOverrideADylibsWeakDef->begin();
11774 it != fWriter.fRegularDefAtomsThatOverrideADylibsWeakDef->end(); ++it) {
11775 if ( fWriter.shouldExport(**it) )
11776 fWriter.fWeakBindingInfo.push_back(BindingInfo(0, (*it)->getName(), true, 0, 0));
11777 }
11778
11779 // add all exported weak definitions
11780 for(std::vector<class ObjectFile::Atom*>::iterator it = fWriter.fAllAtoms->begin(); it != fWriter.fAllAtoms->end(); ++it) {
11781 ObjectFile::Atom* atom = *it;
11782 if ( (atom->getDefinitionKind() == ObjectFile::Atom::kWeakDefinition) && fWriter.shouldExport(*atom) ) {
11783 fWriter.fWeakBindingInfo.push_back(BindingInfo(0, atom->getName(), false, 0, 0));
11784 }
11785 }
11786
11787 // sort by symbol, type, address
11788 const std::vector<SegmentInfo*>& segments = fWriter.fSegmentInfos;
11789 std::vector<BindingInfo>& info = fWriter.fWeakBindingInfo;
11790 if ( info.size() == 0 )
11791 return;
11792 std::sort(info.begin(), info.end(), WeakBindingSorter());
11793
11794 // convert to temp encoding that can be more easily optimized
11795 std::vector<binding_tmp> mid;
11796 mid.reserve(info.size());
11797 const SegmentInfo* currentSegment = NULL;
11798 unsigned int segIndex = 0;
11799 const char* symbolName = NULL;
11800 uint8_t type = 0;
11801 uint64_t address = (uint64_t)(-1);
11802 int64_t addend = 0;
11803 for (std::vector<BindingInfo>::iterator it = info.begin(); it != info.end(); ++it) {
11804 if ( symbolName != it->fSymbolName ) {
11805 mid.push_back(binding_tmp(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM, it->fFlags, 0, it->fSymbolName));
11806 symbolName = it->fSymbolName;
11807 }
11808 if ( it->fType != 0 ) {
11809 if ( type != it->fType ) {
11810 mid.push_back(binding_tmp(BIND_OPCODE_SET_TYPE_IMM, it->fType));
11811 type = it->fType;
11812 }
11813 if ( address != it->fAddress ) {
11814 // non weak symbols just have BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM
11815 // weak symbols have SET_SEG, ADD_ADDR, SET_ADDED, DO_BIND
11816 if ( (currentSegment == NULL) || (it->fAddress < currentSegment->fBaseAddress)
11817 || ((currentSegment->fBaseAddress+currentSegment->fSize) <=it->fAddress) ) {
11818 segIndex = 0;
11819 for (std::vector<SegmentInfo*>::const_iterator segit = segments.begin(); segit != segments.end(); ++segit) {
11820 if ( ((*segit)->fBaseAddress <= it->fAddress) && (it->fAddress < ((*segit)->fBaseAddress+(*segit)->fSize)) ) {
11821 currentSegment = *segit;
11822 break;
11823 }
11824 ++segIndex;
11825 }
11826 mid.push_back(binding_tmp(BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB, segIndex, it->fAddress - currentSegment->fBaseAddress));
11827 }
11828 else {
11829 mid.push_back(binding_tmp(BIND_OPCODE_ADD_ADDR_ULEB, it->fAddress-address));
11830 }
11831 address = it->fAddress;
11832 }
11833 if ( addend != it->fAddend ) {
11834 mid.push_back(binding_tmp(BIND_OPCODE_SET_ADDEND_SLEB, it->fAddend));
11835 addend = it->fAddend;
11836 }
11837 mid.push_back(binding_tmp(BIND_OPCODE_DO_BIND, 0));
11838 address += sizeof(pint_t);
11839 }
11840 }
11841 mid.push_back(binding_tmp(BIND_OPCODE_DONE, 0));
11842
11843
11844 // optimize phase 1, combine bind/add pairs
11845 binding_tmp* dst = &mid[0];
11846 for (const binding_tmp* src = &mid[0]; src->opcode != BIND_OPCODE_DONE; ++src) {
11847 if ( (src->opcode == BIND_OPCODE_DO_BIND)
11848 && (src[1].opcode == BIND_OPCODE_ADD_ADDR_ULEB) ) {
11849 dst->opcode = BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB;
11850 dst->operand1 = src[1].operand1;
11851 ++src;
11852 ++dst;
11853 }
11854 else {
11855 *dst++ = *src;
11856 }
11857 }
11858 dst->opcode = BIND_OPCODE_DONE;
11859
11860 // optimize phase 2, compress packed runs of BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB with
11861 // same addr delta into one BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB
11862 dst = &mid[0];
11863 for (const binding_tmp* src = &mid[0]; src->opcode != BIND_OPCODE_DONE; ++src) {
11864 uint64_t delta = src->operand1;
11865 if ( (src->opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB)
11866 && (src[1].opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB)
11867 && (src[1].operand1 == delta) ) {
11868 // found at least two in a row, this is worth compressing
11869 dst->opcode = BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB;
11870 dst->operand1 = 1;
11871 dst->operand2 = delta;
11872 ++src;
11873 while ( (src->opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB)
11874 && (src->operand1 == delta) ) {
11875 dst->operand1++;
11876 ++src;
11877 }
11878 --src;
11879 ++dst;
11880 }
11881 else {
11882 *dst++ = *src;
11883 }
11884 }
11885 dst->opcode = BIND_OPCODE_DONE;
11886
11887 // optimize phase 3, use immediate encodings
11888 for (binding_tmp* p = &mid[0]; p->opcode != REBASE_OPCODE_DONE; ++p) {
11889 if ( (p->opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB)
11890 && (p->operand1 < (15*sizeof(pint_t)))
11891 && ((p->operand1 % sizeof(pint_t)) == 0) ) {
11892 p->opcode = BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED;
11893 p->operand1 = p->operand1/sizeof(pint_t);
11894 }
11895 }
11896 dst->opcode = BIND_OPCODE_DONE;
11897
11898
11899 // convert to compressed encoding
11900 const static bool log = false;
11901 fEncodedData.reserve(info.size()*2);
11902 bool done = false;
11903 for (std::vector<binding_tmp>::iterator it = mid.begin(); !done && it != mid.end() ; ++it) {
11904 switch ( it->opcode ) {
11905 case BIND_OPCODE_DONE:
11906 if ( log ) fprintf(stderr, "BIND_OPCODE_DONE()\n");
11907 fEncodedData.append_byte(BIND_OPCODE_DONE);
11908 done = true;
11909 break;
11910 case BIND_OPCODE_SET_DYLIB_ORDINAL_IMM:
11911 if ( log ) fprintf(stderr, "BIND_OPCODE_SET_DYLIB_ORDINAL_IMM(%lld)\n", it->operand1);
11912 fEncodedData.append_byte(BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | it->operand1);
11913 break;
11914 case BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB:
11915 if ( log ) fprintf(stderr, "BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB(%lld)\n", it->operand1);
11916 fEncodedData.append_byte(BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB);
11917 fEncodedData.append_uleb128(it->operand1);
11918 break;
11919 case BIND_OPCODE_SET_DYLIB_SPECIAL_IMM:
11920 if ( log ) fprintf(stderr, "BIND_OPCODE_SET_DYLIB_SPECIAL_IMM(%lld)\n", it->operand1);
11921 fEncodedData.append_byte(BIND_OPCODE_SET_DYLIB_SPECIAL_IMM | (it->operand1 & BIND_IMMEDIATE_MASK));
11922 break;
11923 case BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM:
11924 if ( log ) fprintf(stderr, "BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM(0x%0llX, %s)\n", it->operand1, it->name);
11925 fEncodedData.append_byte(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM | it->operand1);
11926 fEncodedData.append_string(it->name);
11927 break;
11928 case BIND_OPCODE_SET_TYPE_IMM:
11929 if ( log ) fprintf(stderr, "BIND_OPCODE_SET_TYPE_IMM(%lld)\n", it->operand1);
11930 fEncodedData.append_byte(BIND_OPCODE_SET_TYPE_IMM | it->operand1);
11931 break;
11932 case BIND_OPCODE_SET_ADDEND_SLEB:
11933 if ( log ) fprintf(stderr, "BIND_OPCODE_SET_ADDEND_SLEB(%lld)\n", it->operand1);
11934 fEncodedData.append_byte(BIND_OPCODE_SET_ADDEND_SLEB);
11935 fEncodedData.append_sleb128(it->operand1);
11936 break;
11937 case BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB:
11938 if ( log ) fprintf(stderr, "BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB(%lld, 0x%llX)\n", it->operand1, it->operand2);
11939 fEncodedData.append_byte(BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | it->operand1);
11940 fEncodedData.append_uleb128(it->operand2);
11941 break;
11942 case BIND_OPCODE_ADD_ADDR_ULEB:
11943 if ( log ) fprintf(stderr, "BIND_OPCODE_ADD_ADDR_ULEB(0x%llX)\n", it->operand1);
11944 fEncodedData.append_byte(BIND_OPCODE_ADD_ADDR_ULEB);
11945 fEncodedData.append_uleb128(it->operand1);
11946 break;
11947 case BIND_OPCODE_DO_BIND:
11948 if ( log ) fprintf(stderr, "BIND_OPCODE_DO_BIND()\n");
11949 fEncodedData.append_byte(BIND_OPCODE_DO_BIND);
11950 break;
11951 case BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB:
11952 if ( log ) fprintf(stderr, "BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB(0x%llX)\n", it->operand1);
11953 fEncodedData.append_byte(BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB);
11954 fEncodedData.append_uleb128(it->operand1);
11955 break;
11956 case BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED:
11957 if ( log ) fprintf(stderr, "BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED(%lld=0x%llX)\n", it->operand1, it->operand1*sizeof(pint_t));
11958 fEncodedData.append_byte(BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED | it->operand1 );
11959 break;
11960 case BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB:
11961 if ( log ) fprintf(stderr, "BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB(%lld, %lld)\n", it->operand1, it->operand2);
11962 fEncodedData.append_byte(BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB);
11963 fEncodedData.append_uleb128(it->operand1);
11964 fEncodedData.append_uleb128(it->operand2);
11965 break;
11966 }
11967 }
11968
11969 // align to pointer size
11970 fEncodedData.pad_to_size(sizeof(pint_t));
11971
11972 if (log) fprintf(stderr, "total weak binding info size = %ld\n", fEncodedData.size());
11973
11974 }
11975
11976 template <typename A>
11977 void CompressedLazyBindingInfoLinkEditAtom<A>::encode()
11978 {
11979 // stream all lazy bindings and record start offsets
11980 const SegmentInfo* currentSegment = NULL;
11981 uint8_t segIndex = 0;
11982 const std::vector<SegmentInfo*>& segments = fWriter.fSegmentInfos;
11983 std::vector<class LazyPointerAtom<A>*>& allLazys = fWriter.fAllSynthesizedLazyPointers;
11984 for (typename std::vector<class LazyPointerAtom<A>*>::iterator it = allLazys.begin(); it != allLazys.end(); ++it) {
11985 LazyPointerAtom<A>* lazyPointerAtom = *it;
11986 ObjectFile::Atom* lazyPointerTargetAtom = lazyPointerAtom->getTarget();
11987
11988 // skip lazy pointers that are bound non-lazily because they are coalesced
11989 if ( ! fWriter.targetRequiresWeakBinding(*lazyPointerTargetAtom) ) {
11990 // record start offset for use by stub helper
11991 lazyPointerAtom->setLazyBindingInfoOffset(fEncodedData.size());
11992
11993 // write address to bind
11994 pint_t address = lazyPointerAtom->getAddress();
11995 if ( (currentSegment == NULL) || (address < currentSegment->fBaseAddress)
11996 || ((currentSegment->fBaseAddress+currentSegment->fSize) <= address) ) {
11997 segIndex = 0;
11998 for (std::vector<SegmentInfo*>::const_iterator segit = segments.begin(); segit != segments.end(); ++segit) {
11999 if ( ((*segit)->fBaseAddress <= address) && (address < ((*segit)->fBaseAddress+(*segit)->fSize)) ) {
12000 currentSegment = *segit;
12001 break;
12002 }
12003 ++segIndex;
12004 }
12005 }
12006 fEncodedData.append_byte(BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | segIndex);
12007 fEncodedData.append_uleb128(lazyPointerAtom->getAddress() - currentSegment->fBaseAddress);
12008
12009 // write ordinal
12010 int ordinal = fWriter.compressedOrdinalForImortedAtom(lazyPointerTargetAtom);
12011 if ( ordinal <= 0 ) {
12012 // special lookups are encoded as negative numbers in BindingInfo
12013 fEncodedData.append_byte(BIND_OPCODE_SET_DYLIB_SPECIAL_IMM | (ordinal & BIND_IMMEDIATE_MASK) );
12014 }
12015 else if ( ordinal <= 15 ) {
12016 // small ordinals are encoded in opcode
12017 fEncodedData.append_byte(BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | ordinal);
12018 }
12019 else {
12020 fEncodedData.append_byte(BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB);
12021 fEncodedData.append_uleb128(ordinal);
12022 }
12023 // write symbol name
12024 bool weak_import = fWriter.fWeakImportMap[lazyPointerTargetAtom];
12025 if ( weak_import )
12026 fEncodedData.append_byte(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM | BIND_SYMBOL_FLAGS_WEAK_IMPORT);
12027 else
12028 fEncodedData.append_byte(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM);
12029 fEncodedData.append_string(lazyPointerTargetAtom->getName());
12030 // write do bind
12031 fEncodedData.append_byte(BIND_OPCODE_DO_BIND);
12032 fEncodedData.append_byte(BIND_OPCODE_DONE);
12033 }
12034 }
12035 // align to pointer size
12036 fEncodedData.pad_to_size(sizeof(pint_t));
12037
12038 //fprintf(stderr, "lazy binding info size = %ld, for %ld entries\n", fEncodedData.size(), allLazys.size());
12039 }
12040
12041 struct TrieEntriesSorter
12042 {
12043 TrieEntriesSorter(Options& o) : fOptions(o) {}
12044
12045 bool operator()(const mach_o::trie::Entry& left, const mach_o::trie::Entry& right)
12046 {
12047 unsigned int leftOrder;
12048 unsigned int rightOrder;
12049 fOptions.exportedSymbolOrder(left.name, &leftOrder);
12050 fOptions.exportedSymbolOrder(right.name, &rightOrder);
12051 if ( leftOrder != rightOrder )
12052 return (leftOrder < rightOrder);
12053 else
12054 return (left.address < right.address);
12055 }
12056 private:
12057 Options& fOptions;
12058 };
12059
12060
12061 template <typename A>
12062 void CompressedExportInfoLinkEditAtom<A>::encode()
12063 {
12064 // make vector of mach_o::trie::Entry for all exported symbols
12065 std::vector<class ObjectFile::Atom*>& exports = fWriter.fExportedAtoms;
12066 uint64_t imageBaseAddress = fWriter.fMachHeaderAtom->getAddress();
12067 std::vector<mach_o::trie::Entry> entries;
12068 entries.reserve(exports.size());
12069 for (std::vector<ObjectFile::Atom*>::iterator it = exports.begin(); it != exports.end(); ++it) {
12070 ObjectFile::Atom* atom = *it;
12071 uint64_t flags = 0;
12072 if ( atom->getDefinitionKind() == ObjectFile::Atom::kWeakDefinition )
12073 flags |= EXPORT_SYMBOL_FLAGS_WEAK_DEFINITION;
12074 uint64_t address = atom->getAddress() - imageBaseAddress;
12075 if ( atom->isThumb() )
12076 address |= 1;
12077 mach_o::trie::Entry entry;
12078 entry.name = atom->getName();
12079 entry.flags = flags;
12080 entry.address = address;
12081 entries.push_back(entry);
12082 }
12083
12084 // sort vector by -exported_symbols_order, and any others by address
12085 std::sort(entries.begin(), entries.end(), TrieEntriesSorter(fWriter.fOptions));
12086
12087 // create trie
12088 mach_o::trie::makeTrie(entries, fEncodedData.bytes());
12089
12090 // align to pointer size
12091 fEncodedData.pad_to_size(sizeof(pint_t));
12092 }
12093
12094
12095
12096
12097
12098 }; // namespace executable
12099 }; // namespace mach_o
12100
12101
12102 #endif // __EXECUTABLE_MACH_O__