#include <unistd.h>
#include <vector>
+#include <unordered_map>
#include "Options.h"
#include "ld.hpp"
while( more );
}
+ void append_delta_encoded_uleb128_run(uint64_t start, const std::vector<uint64_t>& locations) {
+ uint64_t lastAddr = start;
+ for(std::vector<uint64_t>::const_iterator it = locations.begin(); it != locations.end(); ++it) {
+ uint64_t nextAddr = *it;
+ uint64_t delta = nextAddr - lastAddr;
+ assert(delta != 0);
+ append_uleb128(delta);
+ lastAddr = nextAddr;
+ }
+ }
+
void append_string(const char* str) {
for (const char* s = str; *s != '\0'; ++s)
_data.push_back(*s);
}
mid.push_back(rebase_tmp(REBASE_OPCODE_DO_REBASE_ULEB_TIMES, 1));
address += sizeof(pint_t);
+ if ( address >= curSegEnd )
+ address = 0;
}
mid.push_back(rebase_tmp(REBASE_OPCODE_DONE, 0));
std::vector<const ld::Atom*>& exports = this->_writer._exportedAtoms;
uint64_t imageBaseAddress = this->_writer.headerAndLoadCommandsSection->address;
std::vector<mach_o::trie::Entry> entries;
+ unsigned int padding = 0;
entries.reserve(exports.size());
for (std::vector<const ld::Atom*>::const_iterator it = exports.begin(); it != exports.end(); ++it) {
const ld::Atom* atom = *it;
entry.flags |= EXPORT_SYMBOL_FLAGS_WEAK_DEFINITION;
entry.other = this->_writer.compressedOrdinalForAtom(atom);
if ( entry.other == BIND_SPECIAL_DYLIB_SELF ) {
- warning("not adding explict export for symbol %s because it is already re-exported from dylib %s", entry.name, atom->file()->path());
+ warning("not adding explict export for symbol %s because it is already re-exported from dylib %s", entry.name, atom->safeFilePath());
continue;
}
if ( atom->isAlias() ) {
entries.push_back(entry);
//fprintf(stderr, "re-export %s from lib %llu as %s\n", entry.importName, entry.other, entry.name);
}
+ else if ( atom->definition() == ld::Atom::definitionAbsolute ) {
+ entry.name = atom->name();
+ entry.flags = _options.canUseAbsoluteSymbols() ? EXPORT_SYMBOL_FLAGS_KIND_ABSOLUTE : EXPORT_SYMBOL_FLAGS_KIND_REGULAR;
+ entry.address = address;
+ entry.other = other;
+ entry.importName = NULL;
+ entries.push_back(entry);
+ }
else {
if ( (atom->definition() == ld::Atom::definitionRegular) && (atom->combine() == ld::Atom::combineByName) )
flags |= EXPORT_SYMBOL_FLAGS_WEAK_DEFINITION;
entry.importName = NULL;
entries.push_back(entry);
}
+
+ if (_options.sharedRegionEligible() && strncmp(atom->section().segmentName(), "__DATA", 6) == 0) {
+ // Maximum address is 64bit which is 10 bytes as a uleb128. Minimum is 1 byte
+ // Pad the section out so we can deal with addresses getting larger when __DATA segment
+ // is moved before __TEXT in dyld shared cache.
+ padding += 9;
+ }
}
// sort vector by -exported_symbols_order, and any others by address
// create trie
mach_o::trie::makeTrie(entries, this->_encodedData.bytes());
+ //Add additional data padding for the unoptimized shared cache
+ for (unsigned int i = 0; i < padding; ++i)
+ this->_encodedData.append_byte(0);
+
// align to pointer size
this->_encodedData.pad_to_size(sizeof(pint_t));
template <typename A>
-class SplitSegInfoAtom : public LinkEditAtom
+class SplitSegInfoV1Atom : public LinkEditAtom
{
public:
- SplitSegInfoAtom(const Options& opts, ld::Internal& state, OutputFile& writer)
+ SplitSegInfoV1Atom(const Options& opts, ld::Internal& state, OutputFile& writer)
: LinkEditAtom(opts, state, writer, _s_section, sizeof(pint_t)) { }
// overrides of ld::Atom
mutable std::vector<uint64_t> _thumbHi16Locations[16];
mutable std::vector<uint64_t> _armLo16Locations;
mutable std::vector<uint64_t> _armHi16Locations[16];
+ mutable std::vector<uint64_t> _adrpLocations;
static ld::Section _s_section;
};
template <typename A>
-ld::Section SplitSegInfoAtom<A>::_s_section("__LINKEDIT", "__splitSegInfo", ld::Section::typeLinkEdit, true);
+ld::Section SplitSegInfoV1Atom<A>::_s_section("__LINKEDIT", "__splitSegInfo", ld::Section::typeLinkEdit, true);
template <>
-void SplitSegInfoAtom<x86_64>::addSplitSegInfo(uint64_t address, ld::Fixup::Kind kind, uint32_t extra) const
+void SplitSegInfoV1Atom<x86_64>::addSplitSegInfo(uint64_t address, ld::Fixup::Kind kind, uint32_t extra) const
{
switch (kind) {
case ld::Fixup::kindStoreX86PCRel32:
case ld::Fixup::kindStoreTargetAddressX86PCRel32:
case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad:
case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA:
+ case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad:
+ case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA:
_32bitPointerLocations.push_back(address);
break;
case ld::Fixup::kindStoreLittleEndian64:
}
template <>
-void SplitSegInfoAtom<x86>::addSplitSegInfo(uint64_t address, ld::Fixup::Kind kind, uint32_t extra) const
+void SplitSegInfoV1Atom<x86>::addSplitSegInfo(uint64_t address, ld::Fixup::Kind kind, uint32_t extra) const
{
switch (kind) {
case ld::Fixup::kindStoreLittleEndian32:
case ld::Fixup::kindStoreTargetAddressLittleEndian32:
+ case ld::Fixup::kindStoreX86PCRel32TLVLoad:
+ case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA:
_32bitPointerLocations.push_back(address);
break;
default:
}
template <>
-void SplitSegInfoAtom<arm>::addSplitSegInfo(uint64_t address, ld::Fixup::Kind kind, uint32_t extra) const
+void SplitSegInfoV1Atom<arm>::addSplitSegInfo(uint64_t address, ld::Fixup::Kind kind, uint32_t extra) const
{
switch (kind) {
case ld::Fixup::kindStoreLittleEndian32:
}
}
-
+#if SUPPORT_ARCH_arm64
+template <>
+void SplitSegInfoV1Atom<arm64>::addSplitSegInfo(uint64_t address, ld::Fixup::Kind kind, uint32_t extra) const
+{
+ switch (kind) {
+ case ld::Fixup::kindStoreARM64Page21:
+ case ld::Fixup::kindStoreARM64GOTLoadPage21:
+ case ld::Fixup::kindStoreARM64GOTLeaPage21:
+ case ld::Fixup::kindStoreARM64TLVPLoadPage21:
+ case ld::Fixup::kindStoreTargetAddressARM64Page21:
+ case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
+ case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21:
+ _adrpLocations.push_back(address);
+ break;
+ case ld::Fixup::kindStoreLittleEndian32:
+ case ld::Fixup::kindStoreARM64PCRelToGOT:
+ _32bitPointerLocations.push_back(address);
+ break;
+ case ld::Fixup::kindStoreLittleEndian64:
+ case ld::Fixup::kindStoreTargetAddressLittleEndian64:
+ _64bitPointerLocations.push_back(address);
+ break;
+ default:
+ warning("codegen at address 0x%08llX prevents image from working in dyld shared cache", address);
+ break;
+ }
+}
+#endif
template <typename A>
-void SplitSegInfoAtom<A>::uleb128EncodeAddresses(const std::vector<uint64_t>& locations) const
+void SplitSegInfoV1Atom<A>::uleb128EncodeAddresses(const std::vector<uint64_t>& locations) const
{
pint_t addr = this->_options.baseAddress();
for(typename std::vector<uint64_t>::const_iterator it = locations.begin(); it != locations.end(); ++it) {
template <typename A>
-void SplitSegInfoAtom<A>::encode() const
+void SplitSegInfoV1Atom<A>::encode() const
{
// sort into group by pointer adjustment kind
std::vector<OutputFile::SplitSegInfoEntry>& info = this->_writer._splitSegInfos;
for (std::vector<OutputFile::SplitSegInfoEntry>::const_iterator it = info.begin(); it != info.end(); ++it) {
- this->addSplitSegInfo(it->address, it->kind, it->extra);
+ this->addSplitSegInfo(it->fixupAddress, it->kind, it->extra);
}
// delta compress runs of addresses
this->_encodedData.append_byte(0); // terminator
}
+ if ( _adrpLocations.size() != 0 ) {
+ this->_encodedData.append_byte(3);
+ //fprintf(stderr, "type 3:\n");
+ std::sort(_adrpLocations.begin(), _adrpLocations.end());
+ this->uleb128EncodeAddresses(_adrpLocations);
+ this->_encodedData.append_byte(0); // terminator
+ }
+
if ( _thumbLo16Locations.size() != 0 ) {
this->_encodedData.append_byte(5);
//fprintf(stderr, "type 5:\n");
_64bitPointerLocations.clear();
}
+
+template <typename A>
+class SplitSegInfoV2Atom : public LinkEditAtom
+{
+public:
+ SplitSegInfoV2Atom(const Options& opts, ld::Internal& state, OutputFile& writer)
+ : LinkEditAtom(opts, state, writer, _s_section, sizeof(pint_t)) { }
+
+ // overrides of ld::Atom
+ virtual const char* name() const { return "split seg info"; }
+ // overrides of LinkEditAtom
+ virtual void encode() const;
+
+private:
+ typedef typename A::P P;
+ typedef typename A::P::E E;
+ typedef typename A::P::uint_t pint_t;
+
+ // Whole :== <count> FromToSection+
+ // FromToSection :== <from-sect-index> <to-sect-index> <count> ToOffset+
+ // ToOffset :== <to-sect-offset-delta> <count> FromOffset+
+ // FromOffset :== <kind> <count> <from-sect-offset-delta>
+
+ typedef uint32_t SectionIndexes;
+ typedef std::map<uint8_t, std::vector<uint64_t> > FromOffsetMap;
+ typedef std::map<uint64_t, FromOffsetMap> ToOffsetMap;
+ typedef std::map<SectionIndexes, ToOffsetMap> WholeMap;
+
+
+ static ld::Section _s_section;
+};
+
+template <typename A>
+ld::Section SplitSegInfoV2Atom<A>::_s_section("__LINKEDIT", "__splitSegInfo", ld::Section::typeLinkEdit, true);
+
+
+template <typename A>
+void SplitSegInfoV2Atom<A>::encode() const
+{
+ // sort into group by adjustment kind
+ //fprintf(stderr, "_splitSegV2Infos.size=%lu\n", this->_writer._splitSegV2Infos.size());
+ WholeMap whole;
+ for (const OutputFile::SplitSegInfoV2Entry& entry : this->_writer._splitSegV2Infos) {
+ //fprintf(stderr, "from=%d, to=%d\n", entry.fixupSectionIndex, entry.targetSectionIndex);
+ SectionIndexes index = entry.fixupSectionIndex << 16 | entry.targetSectionIndex;
+ ToOffsetMap& toOffsets = whole[index];
+ FromOffsetMap& fromOffsets = toOffsets[entry.targetSectionOffset];
+ fromOffsets[entry.referenceKind].push_back(entry.fixupSectionOffset);
+ }
+
+ // Add marker that this is V2 data
+ this->_encodedData.reserve(8192);
+ this->_encodedData.append_byte(DYLD_CACHE_ADJ_V2_FORMAT);
+
+ // stream out
+ // Whole :== <count> FromToSection+
+ this->_encodedData.append_uleb128(whole.size());
+ for (auto& fromToSection : whole) {
+ uint8_t fromSectionIndex = fromToSection.first >> 16;
+ uint8_t toSectionIndex = fromToSection.first & 0xFFFF;
+ ToOffsetMap& toOffsets = fromToSection.second;
+ // FromToSection :== <from-sect-index> <to-sect-index> <count> ToOffset+
+ this->_encodedData.append_uleb128(fromSectionIndex);
+ this->_encodedData.append_uleb128(toSectionIndex);
+ this->_encodedData.append_uleb128(toOffsets.size());
+ //fprintf(stderr, "from sect=%d, to sect=%d, count=%lu\n", fromSectionIndex, toSectionIndex, toOffsets.size());
+ uint64_t lastToOffset = 0;
+ for (auto& fromToOffsets : toOffsets) {
+ uint64_t toSectionOffset = fromToOffsets.first;
+ FromOffsetMap& fromOffsets = fromToOffsets.second;
+ // ToOffset :== <to-sect-offset-delta> <count> FromOffset+
+ this->_encodedData.append_uleb128(toSectionOffset - lastToOffset);
+ this->_encodedData.append_uleb128(fromOffsets.size());
+ for (auto& kindAndOffsets : fromOffsets) {
+ uint8_t kind = kindAndOffsets.first;
+ std::vector<uint64_t>& fromOffsets = kindAndOffsets.second;
+ // FromOffset :== <kind> <count> <from-sect-offset-delta>
+ this->_encodedData.append_uleb128(kind);
+ this->_encodedData.append_uleb128(fromOffsets.size());
+ std::sort(fromOffsets.begin(), fromOffsets.end());
+ uint64_t lastFromOffset = 0;
+ for (uint64_t offset : fromOffsets) {
+ this->_encodedData.append_uleb128(offset - lastFromOffset);
+ lastFromOffset = offset;
+ }
+ }
+ lastToOffset = toSectionOffset;
+ }
+ }
+
+
+ // always add zero byte to mark end
+ this->_encodedData.append_byte(0);
+
+ // align to pointer size
+ this->_encodedData.pad_to_size(sizeof(pint_t));
+
+ this->_encoded = true;
+}
+
+
+
template <typename A>
class FunctionStartsAtom : public LinkEditAtom
{
-// <rdar://problem/7209249> linker needs to cache "Designated Requirements" in linked binary
+
template <typename A>
-class DependentDRAtom : public LinkEditAtom
+class OptimizationHintsAtom : public LinkEditAtom
{
public:
- DependentDRAtom(const Options& opts, ld::Internal& state, OutputFile& writer)
- : LinkEditAtom(opts, state, writer, _s_section, sizeof(pint_t)) { }
+ OptimizationHintsAtom(const Options& opts, ld::Internal& state, OutputFile& writer)
+ : LinkEditAtom(opts, state, writer, _s_section, sizeof(pint_t)) {
+ assert(opts.outputKind() == Options::kObjectFile);
+ }
// overrides of ld::Atom
- virtual const char* name() const { return "dependent dylib DR info"; }
+ virtual const char* name() const { return "linker optimization hints"; }
// overrides of LinkEditAtom
virtual void encode() const;
};
template <typename A>
-ld::Section DependentDRAtom<A>::_s_section("__LINKEDIT", "__dependentDR", ld::Section::typeLinkEdit, true);
-
+ld::Section OptimizationHintsAtom<A>::_s_section("__LINKEDIT", "__opt_hints", ld::Section::typeLinkEdit, true);
template <typename A>
-void DependentDRAtom<A>::encode() const
+void OptimizationHintsAtom<A>::encode() const
{
- Security::SuperBlobCore<Security::SuperBlob<Security::kSecCodeMagicDRList>, Security::kSecCodeMagicDRList, uint32_t>::Maker maker;
-
- uint32_t index = 0;
- for(std::vector<ld::dylib::File*>::iterator it=_state.dylibs.begin(); it != _state.dylibs.end(); ++it) {
- const ld::dylib::File* dylib = *it;
- Security::BlobCore* dylibDR = (Security::BlobCore*)dylib->codeSignatureDR();
- void* dup = NULL;
- if ( dylibDR != NULL ) {
- // <rdar://problem/11315321> Maker takes ownership of every blob added
- // We need to make a copy here because dylib still owns the pointer returned by codeSignatureDR()
- dup = ::malloc(dylibDR->length());
- ::memcpy(dup, dylibDR, dylibDR->length());
+ if ( _state.someObjectHasOptimizationHints ) {
+ for (std::vector<ld::Internal::FinalSection*>::iterator sit = _state.sections.begin(); sit != _state.sections.end(); ++sit) {
+ ld::Internal::FinalSection* sect = *sit;
+ if ( sect->type() != ld::Section::typeCode )
+ continue;
+ for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
+ const ld::Atom* atom = *ait;
+ uint64_t address = atom->finalAddress();
+ for (ld::Fixup::iterator fit = atom->fixupsBegin(); fit != atom->fixupsEnd(); ++fit) {
+ if ( fit->kind != ld::Fixup::kindLinkerOptimizationHint)
+ continue;
+ ld::Fixup::LOH_arm64 extra;
+ extra.addend = fit->u.addend;
+ _encodedData.append_uleb128(extra.info.kind);
+ _encodedData.append_uleb128(extra.info.count+1);
+ _encodedData.append_uleb128((extra.info.delta1 << 2) + fit->offsetInAtom + address);
+ if ( extra.info.count > 0 )
+ _encodedData.append_uleb128((extra.info.delta2 << 2) + fit->offsetInAtom + address);
+ if ( extra.info.count > 1 )
+ _encodedData.append_uleb128((extra.info.delta3 << 2) + fit->offsetInAtom + address);
+ if ( extra.info.count > 2 )
+ _encodedData.append_uleb128((extra.info.delta4 << 2) + fit->offsetInAtom + address);
+ }
+ }
}
- maker.add(index, (Security::BlobCore*)dup);
- ++index;
+
+ this->_encodedData.pad_to_size(sizeof(pint_t));
}
- Security::SuperBlob<Security::kSecCodeMagicDRList>* topBlob = maker.make();
- const uint8_t* data = (uint8_t*)topBlob->data();
- for(size_t i=0; i < topBlob->length(); ++i)
- _encodedData.append_byte(data[i]);
-
- this->_encodedData.pad_to_size(sizeof(pint_t));
-
this->_encoded = true;
}
-
} // namespace tool
} // namespace ld