X-Git-Url: https://git.saurik.com/apple/ld64.git/blobdiff_plain/a645023da60d22e86be13f7b4d97adeff8bc6665..7f09b9353af9897bf18933788d6a59c152c29edd:/src/ld/passes/got.cpp diff --git a/src/ld/passes/got.cpp b/src/ld/passes/got.cpp index ff18e00..01c2e30 100644 --- a/src/ld/passes/got.cpp +++ b/src/ld/passes/got.cpp @@ -30,10 +30,11 @@ #include #include -#include +#include "MachOFileAbstraction.hpp" #include "ld.hpp" #include "got.h" +#include "configure.h" namespace ld { namespace passes { @@ -43,19 +44,18 @@ class File; // forward reference class GOTEntryAtom : public ld::Atom { public: - GOTEntryAtom(ld::Internal& internal, const ld::Atom* target, bool weakImport) - : ld::Atom(_s_section, ld::Atom::definitionRegular, ld::Atom::combineNever, + GOTEntryAtom(ld::Internal& internal, const ld::Atom* target, bool weakImport, bool weakDef, bool is64) + : ld::Atom(weakDef ? _s_sectionWeak : _s_section, ld::Atom::definitionRegular, ld::Atom::combineNever, ld::Atom::scopeLinkageUnit, ld::Atom::typeNonLazyPointer, - symbolTableNotIn, false, false, false, ld::Atom::Alignment(3)), - _fixup(0, ld::Fixup::k1of1, ld::Fixup::kindStoreTargetAddressLittleEndian64, target), - _target(target) + symbolTableNotIn, false, false, false, (is64 ? ld::Atom::Alignment(3) : ld::Atom::Alignment(2))), + _fixup(0, ld::Fixup::k1of1, (is64 ? ld::Fixup::kindStoreTargetAddressLittleEndian64 : ld::Fixup::kindStoreTargetAddressLittleEndian32), target), + _target(target), + _is64(is64) { _fixup.weakImport = weakImport; internal.addAtom(*this); } virtual const ld::File* file() const { return NULL; } - virtual bool translationUnitSource(const char** dir, const char**) const - { return false; } virtual const char* name() const { return _target->name(); } - virtual uint64_t size() const { return 8; } + virtual uint64_t size() const { return (_is64 ? 8 : 4); } virtual uint64_t objectAddress() const { return 0; } virtual void copyRawContent(uint8_t buffer[]) const { } virtual void setScope(Scope) { } @@ -65,17 +65,25 @@ public: private: mutable ld::Fixup _fixup; const ld::Atom* _target; + bool _is64; static ld::Section _s_section; + static ld::Section _s_sectionWeak; }; ld::Section GOTEntryAtom::_s_section("__DATA", "__got", ld::Section::typeNonLazyPointer); +ld::Section GOTEntryAtom::_s_sectionWeak("__DATA", "__got_weak", ld::Section::typeNonLazyPointer); -static bool gotFixup(const Options& opts, ld::Internal& internal, const ld::Atom* targetOfGOT, const ld::Fixup* fixup, bool* optimizable) +static bool gotFixup(const Options& opts, ld::Internal& internal, const ld::Atom* targetOfGOT, const ld::Fixup* fixup, bool* optimizable, bool* targetIsExternalWeakDef) { + *targetIsExternalWeakDef = false; switch (fixup->kind) { case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad: +#if SUPPORT_ARCH_arm64 + case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21: + case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12: +#endif // start by assuming this can be optimized *optimizable = true; // cannot do LEA optimization if target is in another dylib @@ -87,10 +95,24 @@ static bool gotFixup(const Options& opts, ld::Internal& internal, const ld::Atom || (targetOfGOT->section().type() == ld::Section::typeTentativeDefs)) ) { *optimizable = false; } - if ( targetOfGOT->scope() == ld::Atom::scopeGlobal ) { + if ( targetOfGOT->scope() == ld::Atom::scopeGlobal ) { // cannot do LEA optimization if target is weak exported symbol - if ( (targetOfGOT->definition() == ld::Atom::definitionRegular) && (targetOfGOT->combine() == ld::Atom::combineByName) ) - *optimizable = false; + if ( ((targetOfGOT->definition() == ld::Atom::definitionRegular) || (targetOfGOT->definition() == ld::Atom::definitionProxy)) && (targetOfGOT->combine() == ld::Atom::combineByName) ) { + switch ( opts.outputKind() ) { + case Options::kDynamicExecutable: + case Options::kDynamicLibrary: + case Options::kDynamicBundle: + case Options::kKextBundle: + *targetIsExternalWeakDef = true; + *optimizable = false; + break; + case Options::kStaticExecutable: + case Options::kDyld: + case Options::kPreload: + case Options::kObjectFile: + break; + } + } // cannot do LEA optimization if target is interposable if ( opts.interposable(targetOfGOT->name()) ) *optimizable = false; @@ -101,8 +123,23 @@ static bool gotFixup(const Options& opts, ld::Internal& internal, const ld::Atom if ( opts.nameSpace() != Options::kTwoLevelNameSpace ) *optimizable = false; } + else if ( targetOfGOT->scope() == ld::Atom::scopeLinkageUnit) { + // don't do optimization if target is in custom segment + if ( opts.sharedRegionEligible() ) { + const char* segName = targetOfGOT->section().segmentName(); + if ( (strcmp(segName, "__TEXT") != 0) && (strcmp(segName, "__DATA") != 0) ) { + *optimizable = false; + } + } + } return true; case ld::Fixup::kindStoreX86PCRel32GOT: +#if SUPPORT_ARCH_arm64 + case ld::Fixup::kindStoreARM64PCRelToGOT: +#endif + *optimizable = false; + return true; + case ld::Fixup::kindNoneGroupSubordinatePersonality: *optimizable = false; return true; default: @@ -128,11 +165,45 @@ void doPass(const Options& opts, ld::Internal& internal) if ( opts.outputKind() == Options::kObjectFile ) return; - // walk all atoms and fixups looking for stubable references - // don't create stubs inline because that could invalidate the sections walk + // pre-fill gotMap with existing non-lazy pointers + std::map gotMap; + for (ld::Internal::FinalSection* sect : internal.sections) { + if ( sect->type() != ld::Section::typeNonLazyPointer ) + continue; + for (const ld::Atom* atom : sect->atoms) { + const ld::Atom* target = NULL; + for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) { + switch (fit->kind) { + case ld::Fixup::kindStoreTargetAddressLittleEndian64: + case ld::Fixup::kindStoreTargetAddressLittleEndian32: + switch ( fit->binding ) { + case ld::Fixup::bindingsIndirectlyBound: + target = internal.indirectBindingTable[fit->u.bindingIndex]; + break; + case ld::Fixup::bindingDirectlyBound: + target = fit->u.target; + break; + default: + fprintf(stderr, "non-pointer is got entry\n"); + break; + } + break; + default: + break; + } + } + if ( target != NULL ) { + if (log) fprintf(stderr, "found existing got entry to %s\n", target->name()); + gotMap[target] = atom; + } + } + } + + // walk all atoms and fixups looking for GOT-able references + // don't create GOT atoms during this loop because that could invalidate the sections iterator std::vector atomsReferencingGOT; - std::map gotMap; std::map weakImportMap; + std::map weakDefMap; atomsReferencingGOT.reserve(128); for (std::vector::iterator sit=internal.sections.begin(); sit != internal.sections.end(); ++sit) { ld::Internal::FinalSection* sect = *sit; @@ -140,21 +211,25 @@ void doPass(const Options& opts, ld::Internal& internal) const ld::Atom* atom = *ait; bool atomUsesGOT = false; const ld::Atom* targetOfGOT = NULL; + bool targetIsWeakImport = false; for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) { if ( fit->firstInCluster() ) targetOfGOT = NULL; switch ( fit->binding ) { case ld::Fixup::bindingsIndirectlyBound: targetOfGOT = internal.indirectBindingTable[fit->u.bindingIndex]; + targetIsWeakImport = fit->weakImport; break; case ld::Fixup::bindingDirectlyBound: targetOfGOT = fit->u.target; + targetIsWeakImport = fit->weakImport; break; default: break; } bool optimizable; - if ( !gotFixup(opts, internal, targetOfGOT, fit, &optimizable) ) + bool targetIsExternalWeakDef; + if ( !gotFixup(opts, internal, targetOfGOT, fit, &optimizable, &targetIsExternalWeakDef) ) continue; if ( optimizable ) { // change from load of GOT entry to lea of target @@ -164,7 +239,22 @@ void doPass(const Options& opts, ld::Internal& internal) case ld::Fixup::bindingDirectlyBound: fit->binding = ld::Fixup::bindingDirectlyBound; fit->u.target = targetOfGOT; - fit->kind = ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA; + switch ( fit->kind ) { + case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad: + fit->kind = ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA; + break; +#if SUPPORT_ARCH_arm64 + case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21: + fit->kind = ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21; + break; + case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12: + fit->kind = ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12; + break; +#endif + default: + assert(0 && "unsupported GOT reference kind"); + break; + } break; default: assert(0 && "unsupported GOT reference"); @@ -173,29 +263,25 @@ void doPass(const Options& opts, ld::Internal& internal) } else { // remember that we need to use GOT in this function - if ( log ) fprintf(stderr, "found GOT use in %s to %s\n", atom->name(), targetOfGOT->name()); + if ( log ) fprintf(stderr, "found GOT use in %s\n", atom->name()); if ( !atomUsesGOT ) { atomsReferencingGOT.push_back(atom); atomUsesGOT = true; } - gotMap[targetOfGOT] = NULL; + if ( gotMap.count(targetOfGOT) == 0 ) + gotMap[targetOfGOT] = NULL; + // record if target is weak def + weakDefMap[targetOfGOT] = targetIsExternalWeakDef; // record weak_import attribute std::map::iterator pos = weakImportMap.find(targetOfGOT); if ( pos == weakImportMap.end() ) { // target not in weakImportMap, so add - weakImportMap[targetOfGOT] = fit->weakImport; - // If only weak_import symbols are used, linker should use LD_LOAD_WEAK_DYLIB - const ld::dylib::File* dylib = dynamic_cast(targetOfGOT->file()); - if ( dylib != NULL ) { - if ( fit->weakImport ) - (const_cast(dylib))->setUsingWeakImportedSymbols(); - else - (const_cast(dylib))->setUsingNonWeakImportedSymbols(); - } + if ( log ) fprintf(stderr, "weakImportMap[%s] = %d\n", targetOfGOT->name(), targetIsWeakImport); + weakImportMap[targetOfGOT] = targetIsWeakImport; } else { // target in weakImportMap, check for weakness mismatch - if ( pos->second != fit->weakImport ) { + if ( pos->second != targetIsWeakImport ) { // found mismatch switch ( opts.weakReferenceMismatchTreatment() ) { case Options::kWeakReferenceMismatchError: @@ -214,11 +300,39 @@ void doPass(const Options& opts, ld::Internal& internal) } } - // make GOT entries - for (std::map::iterator it = gotMap.begin(); it != gotMap.end(); ++it) { - it->second = new GOTEntryAtom(internal, it->first, weakImportMap[it->first]); + bool is64 = false; + switch ( opts.architecture() ) { +#if SUPPORT_ARCH_i386 + case CPU_TYPE_I386: + is64 = false; + break; +#endif +#if SUPPORT_ARCH_x86_64 + case CPU_TYPE_X86_64: + is64 = true; + break; +#endif +#if SUPPORT_ARCH_arm_any + case CPU_TYPE_ARM: + is64 = false; + break; +#endif +#if SUPPORT_ARCH_arm64 + case CPU_TYPE_ARM64: + is64 = true; + break; +#endif } + // make GOT entries + for (auto& entry : gotMap) { + if ( entry.second == NULL ) { + entry.second = new GOTEntryAtom(internal, entry.first, weakImportMap[entry.first], opts.useDataConstSegment() && weakDefMap[entry.first], is64); + if (log) fprintf(stderr, "making new GOT slot for %s, gotMap[%p] = %p\n", entry.first->name(), entry.first, entry.second); + } + } + + // update atoms to use GOT entries for (std::vector::iterator it=atomsReferencingGOT.begin(); it != atomsReferencingGOT.end(); ++it) { const ld::Atom* atom = *it; @@ -242,7 +356,8 @@ void doPass(const Options& opts, ld::Internal& internal) break; } bool optimizable; - if ( (targetOfGOT == NULL) || !gotFixup(opts, internal, targetOfGOT, fit, &optimizable) ) + bool targetIsExternalWeakDef; + if ( (targetOfGOT == NULL) || !gotFixup(opts, internal, targetOfGOT, fit, &optimizable, &targetIsExternalWeakDef) ) continue; if ( !optimizable ) { // GOT use not optimized away, update to bind to GOT entry @@ -250,6 +365,7 @@ void doPass(const Options& opts, ld::Internal& internal) switch ( fitThatSetTarget->binding ) { case ld::Fixup::bindingsIndirectlyBound: case ld::Fixup::bindingDirectlyBound: + if ( log ) fprintf(stderr, "updating GOT use in %s to %s\n", atom->name(), targetOfGOT->name()); fitThatSetTarget->binding = ld::Fixup::bindingDirectlyBound; fitThatSetTarget->u.target = gotMap[targetOfGOT]; break;