static ld::Section _s_text_section("__TEXT", "__text", ld::Section::typeCode);
+#if SUPPORT_ARCH_arm64
+
+class ARM64BranchIslandAtom : public ld::Atom {
+public:
+ ARM64BranchIslandAtom(const char* nm, const ld::Atom* target, TargetAndOffset finalTarget)
+ : ld::Atom(_s_text_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
+ ld::Atom::scopeLinkageUnit, ld::Atom::typeBranchIsland,
+ ld::Atom::symbolTableIn, false, false, false, ld::Atom::Alignment(2)),
+ _name(nm),
+ _fixup1(0, ld::Fixup::k1of1, ld::Fixup::kindStoreTargetAddressARM64Branch26, target),
+ _fixup2(0, ld::Fixup::k1of1, ld::Fixup::kindIslandTarget, finalTarget.atom) {
+ if (_s_log) fprintf(stderr, "%p: ARM64 branch island to final target %s\n",
+ this, finalTarget.atom->name());
+ }
+
+ virtual const ld::File* file() const { return NULL; }
+ virtual const char* name() const { return _name; }
+ virtual uint64_t size() const { return 4; }
+ virtual uint64_t objectAddress() const { return 0; }
+ virtual void copyRawContent(uint8_t buffer[]) const {
+ OSWriteLittleInt32(buffer, 0, 0x14000000);
+ }
+ virtual void setScope(Scope) { }
+ virtual ld::Fixup::iterator fixupsBegin() const { return (ld::Fixup*)&_fixup1; }
+ virtual ld::Fixup::iterator fixupsEnd() const { return &((ld::Fixup*)&_fixup2)[1]; }
+
+private:
+ const char* _name;
+ ld::Fixup _fixup1;
+ ld::Fixup _fixup2;
+};
+#endif
+
class ARMtoARMBranchIslandAtom : public ld::Atom {
public:
return new ARMtoARMBranchIslandAtom(name, nextTarget, finalTarget);
}
break;
+#if SUPPORT_ARCH_arm64
+ case ld::Fixup::kindStoreARM64Branch26:
+ case ld::Fixup::kindStoreTargetAddressARM64Branch26:
+ return new ARM64BranchIslandAtom(name, nextTarget, finalTarget);
+ break;
+#endif
default:
assert(0 && "unexpected branch kind");
break;
else
return 4000000; // thumb1 can branch +/- 4MB
break;
+#if SUPPORT_ARCH_arm64
+ case CPU_TYPE_ARM64:
+ return 128000000; // arm64 can branch +/- 128MB
+ break;
+#endif
}
assert(0 && "unexpected architecture");
return 0x100000000LL;
else
return 3500000; // 0.5MB of branch islands per 4MB
break;
+#if SUPPORT_ARCH_arm64
+ case CPU_TYPE_ARM64:
+ return 124*1024*1024; // 4MB of branch islands per 128MB
+ break;
+#endif
}
assert(0 && "unexpected architecture");
return 0x100000000LL;
case ld::Fixup::kindStoreThumbBranch22:
case ld::Fixup::kindStoreTargetAddressARMBranch24:
case ld::Fixup::kindStoreTargetAddressThumbBranch22:
+#if SUPPORT_ARCH_arm64
+ case ld::Fixup::kindStoreARM64Branch26:
+ case ld::Fixup::kindStoreTargetAddressARM64Branch26:
+#endif
haveBranch = true;
break;
default:
if ( !opts.allowBranchIslands() )
return;
- // only ARM needs branch islands
+ // only ARM[64] needs branch islands
switch ( opts.architecture() ) {
case CPU_TYPE_ARM:
+#if SUPPORT_ARCH_arm64
+ case CPU_TYPE_ARM64:
+#endif
break;
default:
return;
if ( opts.outputKind() == Options::kObjectFile )
return;
+ // pre-fill gotMap with existing non-lazy pointers
+ std::map<const ld::Atom*, const ld::Atom*> gotMap;
+ for (ld::Internal::FinalSection* sect : internal.sections) {
+ if ( sect->type() != ld::Section::typeNonLazyPointer )
+ continue;
+ for (const ld::Atom* atom : sect->atoms) {
+ const ld::Atom* target = NULL;
+ for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
+ switch (fit->kind) {
+ case ld::Fixup::kindStoreTargetAddressLittleEndian64:
+ case ld::Fixup::kindStoreTargetAddressLittleEndian32:
+ switch ( fit->binding ) {
+ case ld::Fixup::bindingsIndirectlyBound:
+ target = internal.indirectBindingTable[fit->u.bindingIndex];
+ break;
+ case ld::Fixup::bindingDirectlyBound:
+ target = fit->u.target;
+ break;
+ default:
+ fprintf(stderr, "non-pointer is got entry\n");
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ if ( target != NULL ) {
+ if (log) fprintf(stderr, "found existing got entry to %s\n", target->name());
+ gotMap[target] = atom;
+ }
+ }
+ }
+
// walk all atoms and fixups looking for GOT-able references
// don't create GOT atoms during this loop because that could invalidate the sections iterator
std::vector<const ld::Atom*> atomsReferencingGOT;
- std::map<const ld::Atom*,ld::Atom*> gotMap;
std::map<const ld::Atom*,bool> weakImportMap;
atomsReferencingGOT.reserve(128);
for (std::vector<ld::Internal::FinalSection*>::iterator sit=internal.sections.begin(); sit != internal.sections.end(); ++sit) {
}
else {
// remember that we need to use GOT in this function
- if ( log ) fprintf(stderr, "found GOT use in %s to %s\n", atom->name(), targetOfGOT->name());
+ if ( log ) fprintf(stderr, "found GOT use in %s\n", atom->name());
if ( !atomUsesGOT ) {
atomsReferencingGOT.push_back(atom);
atomUsesGOT = true;
}
- gotMap[targetOfGOT] = NULL;
+ if ( gotMap.count(targetOfGOT) == 0 )
+ gotMap[targetOfGOT] = NULL;
// record weak_import attribute
std::map<const ld::Atom*,bool>::iterator pos = weakImportMap.find(targetOfGOT);
if ( pos == weakImportMap.end() ) {
#endif
}
- // make GOT entries
- for (std::map<const ld::Atom*,ld::Atom*>::iterator it = gotMap.begin(); it != gotMap.end(); ++it) {
- it->second = new GOTEntryAtom(internal, it->first, weakImportMap[it->first], is64);
+ // make GOT entries
+ for (auto& entry : gotMap) {
+ if ( entry.second == NULL ) {
+ entry.second = new GOTEntryAtom(internal, entry.first, weakImportMap[entry.first], is64);
+ if (log) fprintf(stderr, "making new GOT slot for %s, gotMap[%p] = %p\n", entry.first->name(), entry.first, entry.second);
+ }
}
-
+
+
// update atoms to use GOT entries
for (std::vector<const ld::Atom*>::iterator it=atomsReferencingGOT.begin(); it != atomsReferencingGOT.end(); ++it) {
const ld::Atom* atom = *it;
switch ( fitThatSetTarget->binding ) {
case ld::Fixup::bindingsIndirectlyBound:
case ld::Fixup::bindingDirectlyBound:
+ if ( log ) fprintf(stderr, "updating GOT use in %s to %s\n", atom->name(), targetOfGOT->name());
fitThatSetTarget->binding = ld::Fixup::bindingDirectlyBound;
fitThatSetTarget->u.target = gotMap[targetOfGOT];
break;