#include <vector>
#include <map>
-#include <ext/hash_map>
+#include "MachOFileAbstraction.hpp"
#include "ld.hpp"
#include "got.h"
+#include "configure.h"
namespace ld {
namespace passes {
class GOTEntryAtom : public ld::Atom {
public:
- GOTEntryAtom(ld::Internal& internal, const ld::Atom* target, bool weakImport)
- : ld::Atom(_s_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
+ GOTEntryAtom(ld::Internal& internal, const ld::Atom* target, bool weakImport, bool weakDef, bool is64)
+ : ld::Atom(weakDef ? _s_sectionWeak : _s_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
ld::Atom::scopeLinkageUnit, ld::Atom::typeNonLazyPointer,
- symbolTableNotIn, false, false, false, ld::Atom::Alignment(3)),
- _fixup(0, ld::Fixup::k1of1, ld::Fixup::kindStoreTargetAddressLittleEndian64, target),
- _target(target)
+ symbolTableNotIn, false, false, false, (is64 ? ld::Atom::Alignment(3) : ld::Atom::Alignment(2))),
+ _fixup(0, ld::Fixup::k1of1, (is64 ? ld::Fixup::kindStoreTargetAddressLittleEndian64 : ld::Fixup::kindStoreTargetAddressLittleEndian32), target),
+ _target(target),
+ _is64(is64)
{ _fixup.weakImport = weakImport; internal.addAtom(*this); }
virtual const ld::File* file() const { return NULL; }
- virtual bool translationUnitSource(const char** dir, const char**) const
- { return false; }
virtual const char* name() const { return _target->name(); }
- virtual uint64_t size() const { return 8; }
+ virtual uint64_t size() const { return (_is64 ? 8 : 4); }
virtual uint64_t objectAddress() const { return 0; }
virtual void copyRawContent(uint8_t buffer[]) const { }
virtual void setScope(Scope) { }
private:
mutable ld::Fixup _fixup;
const ld::Atom* _target;
+ bool _is64;
static ld::Section _s_section;
+ static ld::Section _s_sectionWeak;
};
ld::Section GOTEntryAtom::_s_section("__DATA", "__got", ld::Section::typeNonLazyPointer);
+ld::Section GOTEntryAtom::_s_sectionWeak("__DATA", "__got_weak", ld::Section::typeNonLazyPointer);
-static bool gotFixup(const Options& opts, ld::Internal& internal, const ld::Atom* targetOfGOT, const ld::Fixup* fixup, bool* optimizable)
+static bool gotFixup(const Options& opts, ld::Internal& internal, const ld::Atom* targetOfGOT, const ld::Fixup* fixup, bool* optimizable, bool* targetIsExternalWeakDef)
{
+ *targetIsExternalWeakDef = false;
switch (fixup->kind) {
case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad:
+#if SUPPORT_ARCH_arm64
+ case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
+ case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12:
+#endif
// start by assuming this can be optimized
*optimizable = true;
// cannot do LEA optimization if target is in another dylib
|| (targetOfGOT->section().type() == ld::Section::typeTentativeDefs)) ) {
*optimizable = false;
}
- if ( targetOfGOT->scope() == ld::Atom::scopeGlobal ) {
+ if ( targetOfGOT->scope() == ld::Atom::scopeGlobal ) {
// cannot do LEA optimization if target is weak exported symbol
- if ( (targetOfGOT->definition() == ld::Atom::definitionRegular) && (targetOfGOT->combine() == ld::Atom::combineByName) )
- *optimizable = false;
+ if ( ((targetOfGOT->definition() == ld::Atom::definitionRegular) || (targetOfGOT->definition() == ld::Atom::definitionProxy)) && (targetOfGOT->combine() == ld::Atom::combineByName) ) {
+ switch ( opts.outputKind() ) {
+ case Options::kDynamicExecutable:
+ case Options::kDynamicLibrary:
+ case Options::kDynamicBundle:
+ case Options::kKextBundle:
+ *targetIsExternalWeakDef = true;
+ *optimizable = false;
+ break;
+ case Options::kStaticExecutable:
+ case Options::kDyld:
+ case Options::kPreload:
+ case Options::kObjectFile:
+ break;
+ }
+ }
// cannot do LEA optimization if target is interposable
if ( opts.interposable(targetOfGOT->name()) )
*optimizable = false;
if ( opts.nameSpace() != Options::kTwoLevelNameSpace )
*optimizable = false;
}
+ else if ( targetOfGOT->scope() == ld::Atom::scopeLinkageUnit) {
+ // <rdar://problem/12379969> don't do optimization if target is in custom segment
+ if ( opts.sharedRegionEligible() ) {
+ const char* segName = targetOfGOT->section().segmentName();
+ if ( (strcmp(segName, "__TEXT") != 0) && (strcmp(segName, "__DATA") != 0) ) {
+ *optimizable = false;
+ }
+ }
+ }
return true;
case ld::Fixup::kindStoreX86PCRel32GOT:
+#if SUPPORT_ARCH_arm64
+ case ld::Fixup::kindStoreARM64PCRelToGOT:
+#endif
*optimizable = false;
return true;
case ld::Fixup::kindNoneGroupSubordinatePersonality:
if ( opts.outputKind() == Options::kObjectFile )
return;
+ // pre-fill gotMap with existing non-lazy pointers
+ std::map<const ld::Atom*, const ld::Atom*> gotMap;
+ for (ld::Internal::FinalSection* sect : internal.sections) {
+ if ( sect->type() != ld::Section::typeNonLazyPointer )
+ continue;
+ for (const ld::Atom* atom : sect->atoms) {
+ const ld::Atom* target = NULL;
+ for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
+ switch (fit->kind) {
+ case ld::Fixup::kindStoreTargetAddressLittleEndian64:
+ case ld::Fixup::kindStoreTargetAddressLittleEndian32:
+ switch ( fit->binding ) {
+ case ld::Fixup::bindingsIndirectlyBound:
+ target = internal.indirectBindingTable[fit->u.bindingIndex];
+ break;
+ case ld::Fixup::bindingDirectlyBound:
+ target = fit->u.target;
+ break;
+ default:
+ fprintf(stderr, "non-pointer is got entry\n");
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ if ( target != NULL ) {
+ if (log) fprintf(stderr, "found existing got entry to %s\n", target->name());
+ gotMap[target] = atom;
+ }
+ }
+ }
+
// walk all atoms and fixups looking for GOT-able references
// don't create GOT atoms during this loop because that could invalidate the sections iterator
std::vector<const ld::Atom*> atomsReferencingGOT;
- std::map<const ld::Atom*,ld::Atom*> gotMap;
std::map<const ld::Atom*,bool> weakImportMap;
+ std::map<const ld::Atom*,bool> weakDefMap;
atomsReferencingGOT.reserve(128);
for (std::vector<ld::Internal::FinalSection*>::iterator sit=internal.sections.begin(); sit != internal.sections.end(); ++sit) {
ld::Internal::FinalSection* sect = *sit;
break;
}
bool optimizable;
- if ( !gotFixup(opts, internal, targetOfGOT, fit, &optimizable) )
+ bool targetIsExternalWeakDef;
+ if ( !gotFixup(opts, internal, targetOfGOT, fit, &optimizable, &targetIsExternalWeakDef) )
continue;
if ( optimizable ) {
// change from load of GOT entry to lea of target
case ld::Fixup::bindingDirectlyBound:
fit->binding = ld::Fixup::bindingDirectlyBound;
fit->u.target = targetOfGOT;
- fit->kind = ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA;
+ switch ( fit->kind ) {
+ case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad:
+ fit->kind = ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA;
+ break;
+#if SUPPORT_ARCH_arm64
+ case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
+ fit->kind = ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21;
+ break;
+ case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12:
+ fit->kind = ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12;
+ break;
+#endif
+ default:
+ assert(0 && "unsupported GOT reference kind");
+ break;
+ }
break;
default:
assert(0 && "unsupported GOT reference");
}
else {
// remember that we need to use GOT in this function
- if ( log ) fprintf(stderr, "found GOT use in %s to %s\n", atom->name(), targetOfGOT->name());
+ if ( log ) fprintf(stderr, "found GOT use in %s\n", atom->name());
if ( !atomUsesGOT ) {
atomsReferencingGOT.push_back(atom);
atomUsesGOT = true;
}
- gotMap[targetOfGOT] = NULL;
+ if ( gotMap.count(targetOfGOT) == 0 )
+ gotMap[targetOfGOT] = NULL;
+ // record if target is weak def
+ weakDefMap[targetOfGOT] = targetIsExternalWeakDef;
// record weak_import attribute
std::map<const ld::Atom*,bool>::iterator pos = weakImportMap.find(targetOfGOT);
if ( pos == weakImportMap.end() ) {
}
}
- // make GOT entries
- for (std::map<const ld::Atom*,ld::Atom*>::iterator it = gotMap.begin(); it != gotMap.end(); ++it) {
- it->second = new GOTEntryAtom(internal, it->first, weakImportMap[it->first]);
+ bool is64 = false;
+ switch ( opts.architecture() ) {
+#if SUPPORT_ARCH_i386
+ case CPU_TYPE_I386:
+ is64 = false;
+ break;
+#endif
+#if SUPPORT_ARCH_x86_64
+ case CPU_TYPE_X86_64:
+ is64 = true;
+ break;
+#endif
+#if SUPPORT_ARCH_arm_any
+ case CPU_TYPE_ARM:
+ is64 = false;
+ break;
+#endif
+#if SUPPORT_ARCH_arm64
+ case CPU_TYPE_ARM64:
+ is64 = true;
+ break;
+#endif
}
+ // make GOT entries
+ for (auto& entry : gotMap) {
+ if ( entry.second == NULL ) {
+ entry.second = new GOTEntryAtom(internal, entry.first, weakImportMap[entry.first], opts.useDataConstSegment() && weakDefMap[entry.first], is64);
+ if (log) fprintf(stderr, "making new GOT slot for %s, gotMap[%p] = %p\n", entry.first->name(), entry.first, entry.second);
+ }
+ }
+
+
// update atoms to use GOT entries
for (std::vector<const ld::Atom*>::iterator it=atomsReferencingGOT.begin(); it != atomsReferencingGOT.end(); ++it) {
const ld::Atom* atom = *it;
break;
}
bool optimizable;
- if ( (targetOfGOT == NULL) || !gotFixup(opts, internal, targetOfGOT, fit, &optimizable) )
+ bool targetIsExternalWeakDef;
+ if ( (targetOfGOT == NULL) || !gotFixup(opts, internal, targetOfGOT, fit, &optimizable, &targetIsExternalWeakDef) )
continue;
if ( !optimizable ) {
// GOT use not optimized away, update to bind to GOT entry
switch ( fitThatSetTarget->binding ) {
case ld::Fixup::bindingsIndirectlyBound:
case ld::Fixup::bindingDirectlyBound:
+ if ( log ) fprintf(stderr, "updating GOT use in %s to %s\n", atom->name(), targetOfGOT->name());
fitThatSetTarget->binding = ld::Fixup::bindingDirectlyBound;
fitThatSetTarget->u.target = gotMap[targetOfGOT];
break;