#include <unistd.h>
#include <vector>
+#include <unordered_map>
#include "Options.h"
#include "ld.hpp"
while( more );
}
+ void append_delta_encoded_uleb128_run(uint64_t start, const std::vector<uint64_t>& locations) {
+ uint64_t lastAddr = start;
+ for(std::vector<uint64_t>::const_iterator it = locations.begin(); it != locations.end(); ++it) {
+ uint64_t nextAddr = *it;
+ uint64_t delta = nextAddr - lastAddr;
+ assert(delta != 0);
+ append_uleb128(delta);
+ lastAddr = nextAddr;
+ }
+ }
+
void append_string(const char* str) {
for (const char* s = str; *s != '\0'; ++s)
_data.push_back(*s);
virtual void encode() const;
private:
+ void encodeV1() const;
+
struct rebase_tmp
{
rebase_tmp(uint8_t op, uint64_t p1, uint64_t p2=0) : opcode(op), operand1(p1), operand2(p2) {}
// sort rebase info by type, then address
std::vector<OutputFile::RebaseInfo>& info = this->_writer._rebaseInfo;
+ if (info.empty())
+ return;
+
std::sort(info.begin(), info.end());
+ // use encoding based on target minOS
+ if ( _options.useLinkedListBinding() && !this->_writer._hasUnalignedFixup ) {
+ if ( info.back()._type != REBASE_TYPE_POINTER )
+ throw "unsupported rebase type with linked list opcodes";
+ // As the binding and rebasing are both linked lists, just use the binds
+ // to do everything.
+ } else {
+ encodeV1();
+ }
+}
+
+
+template <typename A>
+void RebaseInfoAtom<A>::encodeV1() const
+{
+ std::vector<OutputFile::RebaseInfo>& info = this->_writer._rebaseInfo;
+
// convert to temp encoding that can be more easily optimized
std::vector<rebase_tmp> mid;
uint64_t curSegStart = 0;
}
mid.push_back(rebase_tmp(REBASE_OPCODE_DO_REBASE_ULEB_TIMES, 1));
address += sizeof(pint_t);
+ if ( address >= curSegEnd )
+ address = 0;
}
mid.push_back(rebase_tmp(REBASE_OPCODE_DONE, 0));
private:
+ void encodeV1() const;
+ void encodeV2() const;
+
typedef typename A::P P;
typedef typename A::P::E E;
typedef typename A::P::uint_t pint_t;
template <typename A>
void BindingInfoAtom<A>::encode() const
+{
+ // use encoding based on target minOS
+ if ( _options.useLinkedListBinding() && !this->_writer._hasUnalignedFixup ) {
+ encodeV2();
+ } else {
+ encodeV1();
+ }
+}
+
+
+template <typename A>
+void BindingInfoAtom<A>::encodeV1() const
{
// sort by library, symbol, type, then address
std::vector<OutputFile::BindingInfo>& info = this->_writer._bindingInfo;
std::sort(info.begin(), info.end());
-
+
// convert to temp encoding that can be more easily optimized
std::vector<binding_tmp> mid;
uint64_t curSegStart = 0;
if (log) fprintf(stderr, "total binding info size = %ld\n", this->_encodedData.size());
}
+template <typename A>
+void BindingInfoAtom<A>::encodeV2() const
+{
+ std::vector<OutputFile::BindingInfo>& bindInfo = this->_writer._bindingInfo;
+ std::vector<OutputFile::RebaseInfo>& rebaseInfo = this->_writer._rebaseInfo;
+ const static bool log = false;
+
+ std::sort(bindInfo.begin(), bindInfo.end());
+
+ // convert to temp encoding that can be more easily optimized
+ std::vector<binding_tmp> mid;
+ uint64_t curSegStart = 0;
+ uint64_t curSegEnd = 0;
+ uint32_t curSegIndex = 0;
+ int ordinal = 0x80000000;
+ const char* symbolName = NULL;
+ uint8_t type = 0;
+ uint64_t address = (uint64_t)(-1);
+ int64_t addend = 0;
+ uint64_t numBinds = (uint64_t)(-1);
+ for (std::vector<OutputFile::BindingInfo>::iterator it = bindInfo.begin(); it != bindInfo.end(); ++it) {
+ bool madeChange = false;
+ if ( ordinal != it->_libraryOrdinal ) {
+ if ( it->_libraryOrdinal <= 0 ) {
+ // special lookups are encoded as negative numbers in BindingInfo
+ mid.push_back(binding_tmp(BIND_OPCODE_SET_DYLIB_SPECIAL_IMM, it->_libraryOrdinal));
+ }
+ else if ( it->_libraryOrdinal <= 15 ) {
+ mid.push_back(binding_tmp(BIND_OPCODE_SET_DYLIB_ORDINAL_IMM, it->_libraryOrdinal));
+ }
+ else {
+ mid.push_back(binding_tmp(BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB, it->_libraryOrdinal));
+ }
+ ordinal = it->_libraryOrdinal;
+ madeChange = true;
+ }
+ if ( symbolName != it->_symbolName ) {
+ mid.push_back(binding_tmp(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM, it->_flags, 0, it->_symbolName));
+ symbolName = it->_symbolName;
+ madeChange = true;
+ }
+ if ( type != it->_type ) {
+ if ( it->_type != BIND_TYPE_POINTER )
+ throw "unsupported bind type with linked list opcodes";
+ mid.push_back(binding_tmp(BIND_OPCODE_SET_TYPE_IMM, it->_type));
+ type = it->_type;
+ madeChange = true;
+ }
+ if ( address != it->_address ) {
+ // Note, we don't push the addresses here. That is all done later with the threaded chains
+ if ( (it->_address < curSegStart) || ( it->_address >= curSegEnd) ) {
+ if ( ! this->_writer.findSegment(this->_state, it->_address, &curSegStart, &curSegEnd, &curSegIndex) )
+ throw "binding address outside range of any segment";
+ }
+ address = it->_address;
+ }
+ if ( addend != it->_addend ) {
+ mid.push_back(binding_tmp(BIND_OPCODE_SET_ADDEND_SLEB, it->_addend));
+ addend = it->_addend;
+ madeChange = true;
+ }
+
+ if (madeChange) {
+ ++numBinds;
+ mid.push_back(binding_tmp(BIND_OPCODE_DO_BIND, 0));
+ }
+ it->_threadedBindOrdinal = numBinds;
+ }
+
+ // We can only support 2^16 bind ordinals.
+ if ( (numBinds > 0x10000) && (numBinds != (uint64_t)(-1)) )
+ throwf("too many binds (%llu). The limit is 65536", numBinds);
+
+ // Now that we have the bind ordinal table populate, set the page starts.
+
+ std::vector<int64_t>& threadedRebaseBindIndices = this->_writer._threadedRebaseBindIndices;
+ threadedRebaseBindIndices.reserve(bindInfo.size() + rebaseInfo.size());
+
+ for (int64_t i = 0, e = rebaseInfo.size(); i != e; ++i)
+ threadedRebaseBindIndices.push_back(-i);
+
+ for (int64_t i = 0, e = bindInfo.size(); i != e; ++i)
+ threadedRebaseBindIndices.push_back(i + 1);
+
+ // Now sort the entries by address.
+ std::sort(threadedRebaseBindIndices.begin(), threadedRebaseBindIndices.end(),
+ [&rebaseInfo, &bindInfo](int64_t indexA, int64_t indexB) {
+ if (indexA == indexB)
+ return false;
+ uint64_t addressA = indexA <= 0 ? rebaseInfo[-indexA]._address : bindInfo[indexA - 1]._address;
+ uint64_t addressB = indexB <= 0 ? rebaseInfo[-indexB]._address : bindInfo[indexB - 1]._address;
+ assert(addressA != addressB);
+ return addressA < addressB;
+ });
+
+ curSegStart = 0;
+ curSegEnd = 0;
+ curSegIndex = 0;
+ uint64_t prevPageIndex = 0;
+ for (int64_t entryIndex : threadedRebaseBindIndices) {
+ OutputFile::RebaseInfo* rebase = nullptr;
+ OutputFile::BindingInfo* bind = nullptr;
+ uint64_t address = 0;
+ if (entryIndex <= 0) {
+ rebase = &rebaseInfo[-entryIndex];
+ address = rebase->_address;
+ } else {
+ bind = &bindInfo[entryIndex - 1];
+ address = bind->_address;
+ }
+ assert((address & 7) == 0);
+
+ bool newSegment = false;
+ if ( (address < curSegStart) || ( address >= curSegEnd) ) {
+ // Start of a new segment.
+ if ( ! this->_writer.findSegment(this->_state, address, &curSegStart, &curSegEnd, &curSegIndex) )
+ throw "binding address outside range of any segment";
+ newSegment = true;
+ }
+
+ // At this point we know we have the page starts array space reserved
+ // so set the page start for this entry if we haven't got one already.
+ uint64_t pageIndex = ( address - curSegStart ) / 4096;
+ if ( newSegment || (pageIndex != prevPageIndex) ) {
+ mid.push_back(binding_tmp(BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB, curSegIndex, address - curSegStart));
+ mid.push_back(binding_tmp(BIND_OPCODE_THREADED | BIND_SUBOPCODE_THREADED_APPLY, 0));
+ }
+ prevPageIndex = pageIndex;
+ }
+ mid.push_back(binding_tmp(BIND_OPCODE_DONE, 0));
+
+ // convert to compressed encoding
+ this->_encodedData.reserve(bindInfo.size()*2);
+
+ // First push the total number of binds so that we can allocate space for this in dyld.
+ if ( log ) fprintf(stderr, "BIND_SUBOPCODE_THREADED_SET_BIND_ORDINAL_TABLE_SIZE_ULEB(%lld)\n", numBinds + 1);
+ this->_encodedData.append_byte(BIND_OPCODE_THREADED | BIND_SUBOPCODE_THREADED_SET_BIND_ORDINAL_TABLE_SIZE_ULEB);
+ this->_encodedData.append_uleb128(numBinds + 1);
+
+ bool done = false;
+ for (typename std::vector<binding_tmp>::iterator it = mid.begin(); !done && it != mid.end() ; ++it) {
+ switch ( it->opcode ) {
+ case BIND_OPCODE_DONE:
+ if ( log ) fprintf(stderr, "BIND_OPCODE_DONE()\n");
+ done = true;
+ break;
+ case BIND_OPCODE_SET_DYLIB_ORDINAL_IMM:
+ if ( log ) fprintf(stderr, "BIND_OPCODE_SET_DYLIB_ORDINAL_IMM(%lld)\n", it->operand1);
+ this->_encodedData.append_byte(BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | it->operand1);
+ break;
+ case BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB:
+ if ( log ) fprintf(stderr, "BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB(%lld)\n", it->operand1);
+ this->_encodedData.append_byte(BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB);
+ this->_encodedData.append_uleb128(it->operand1);
+ break;
+ case BIND_OPCODE_SET_DYLIB_SPECIAL_IMM:
+ if ( log ) fprintf(stderr, "BIND_OPCODE_SET_DYLIB_SPECIAL_IMM(%lld)\n", it->operand1);
+ this->_encodedData.append_byte(BIND_OPCODE_SET_DYLIB_SPECIAL_IMM | (it->operand1 & BIND_IMMEDIATE_MASK));
+ break;
+ case BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM:
+ if ( log ) fprintf(stderr, "BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM(0x%0llX, %s)\n", it->operand1, it->name);
+ this->_encodedData.append_byte(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM | it->operand1);
+ this->_encodedData.append_string(it->name);
+ break;
+ case BIND_OPCODE_SET_TYPE_IMM:
+ if ( log ) fprintf(stderr, "BIND_OPCODE_SET_TYPE_IMM(%lld)\n", it->operand1);
+ this->_encodedData.append_byte(BIND_OPCODE_SET_TYPE_IMM | it->operand1);
+ break;
+ case BIND_OPCODE_SET_ADDEND_SLEB:
+ if ( log ) fprintf(stderr, "BIND_OPCODE_SET_ADDEND_SLEB(%lld)\n", it->operand1);
+ this->_encodedData.append_byte(BIND_OPCODE_SET_ADDEND_SLEB);
+ this->_encodedData.append_sleb128(it->operand1);
+ break;
+ case BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB:
+ if ( log ) fprintf(stderr, "BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB(%lld, 0x%llX)\n", it->operand1, it->operand2);
+ this->_encodedData.append_byte(BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | it->operand1);
+ this->_encodedData.append_uleb128(it->operand2);
+ break;
+ case BIND_OPCODE_ADD_ADDR_ULEB:
+ if ( log ) fprintf(stderr, "BIND_OPCODE_ADD_ADDR_ULEB(0x%llX)\n", it->operand1);
+ this->_encodedData.append_byte(BIND_OPCODE_ADD_ADDR_ULEB);
+ this->_encodedData.append_uleb128(it->operand1);
+ break;
+ case BIND_OPCODE_DO_BIND:
+ if ( log ) fprintf(stderr, "BIND_OPCODE_DO_BIND()\n");
+ this->_encodedData.append_byte(BIND_OPCODE_DO_BIND);
+ break;
+ case BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB:
+ if ( log ) fprintf(stderr, "BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB(0x%llX)\n", it->operand1);
+ this->_encodedData.append_byte(BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB);
+ this->_encodedData.append_uleb128(it->operand1);
+ break;
+ case BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED:
+ if ( log ) fprintf(stderr, "BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED(%lld=0x%llX)\n", it->operand1, it->operand1*sizeof(pint_t));
+ this->_encodedData.append_byte(BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED | it->operand1 );
+ break;
+ case BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB:
+ if ( log ) fprintf(stderr, "BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB(%lld, %lld)\n", it->operand1, it->operand2);
+ this->_encodedData.append_byte(BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB);
+ this->_encodedData.append_uleb128(it->operand1);
+ this->_encodedData.append_uleb128(it->operand2);
+ break;
+ case BIND_OPCODE_THREADED | BIND_SUBOPCODE_THREADED_SET_BIND_ORDINAL_TABLE_SIZE_ULEB:
+ if ( log ) fprintf(stderr, "BIND_SUBOPCODE_THREADED_SET_BIND_ORDINAL_TABLE_SIZE_ULEB(%lld)\n", it->operand1);
+ this->_encodedData.append_byte(BIND_OPCODE_THREADED | BIND_SUBOPCODE_THREADED_SET_BIND_ORDINAL_TABLE_SIZE_ULEB);
+ this->_encodedData.append_uleb128(it->operand1);
+ break;
+ case BIND_OPCODE_THREADED | BIND_SUBOPCODE_THREADED_APPLY:
+ this->_encodedData.append_byte(BIND_OPCODE_THREADED | BIND_SUBOPCODE_THREADED_APPLY);
+ if ( log ) fprintf(stderr, "BIND_SUBOPCODE_THREADED_APPLY()\n");
+ break;
+ }
+ }
+
+ // align to pointer size
+ this->_encodedData.append_byte(BIND_OPCODE_DONE);
+ this->_encodedData.pad_to_size(sizeof(pint_t));
+
+ this->_encoded = true;
+
+ if (log) fprintf(stderr, "total binding info size = %ld\n", this->_encodedData.size());
+}
+
template <typename A>
std::vector<const ld::Atom*>& exports = this->_writer._exportedAtoms;
uint64_t imageBaseAddress = this->_writer.headerAndLoadCommandsSection->address;
std::vector<mach_o::trie::Entry> entries;
+ unsigned int padding = 0;
entries.reserve(exports.size());
for (std::vector<const ld::Atom*>::const_iterator it = exports.begin(); it != exports.end(); ++it) {
const ld::Atom* atom = *it;
uint64_t flags = (atom->contentType() == ld::Atom::typeTLV) ? EXPORT_SYMBOL_FLAGS_KIND_THREAD_LOCAL : EXPORT_SYMBOL_FLAGS_KIND_REGULAR;
uint64_t other = 0;
uint64_t address = atom->finalAddress() - imageBaseAddress;
- if ( (atom->definition() == ld::Atom::definitionRegular) && (atom->combine() == ld::Atom::combineByName) )
- flags |= EXPORT_SYMBOL_FLAGS_WEAK_DEFINITION;
if ( atom->definition() == ld::Atom::definitionProxy ) {
entry.name = atom->name();
entry.flags = flags | EXPORT_SYMBOL_FLAGS_REEXPORT;
+ if ( atom->combine() == ld::Atom::combineByName )
+ entry.flags |= EXPORT_SYMBOL_FLAGS_WEAK_DEFINITION;
entry.other = this->_writer.compressedOrdinalForAtom(atom);
if ( entry.other == BIND_SPECIAL_DYLIB_SELF ) {
- warning("not adding explict export for symbol %s because it is already re-exported from dylib %s", entry.name, atom->file()->path());
+ warning("not adding explict export for symbol %s because it is already re-exported from dylib %s", entry.name, atom->safeFilePath());
continue;
}
if ( atom->isAlias() ) {
entries.push_back(entry);
//fprintf(stderr, "re-export %s from lib %llu as %s\n", entry.importName, entry.other, entry.name);
}
+ else if ( atom->definition() == ld::Atom::definitionAbsolute ) {
+ entry.name = atom->name();
+ entry.flags = _options.canUseAbsoluteSymbols() ? EXPORT_SYMBOL_FLAGS_KIND_ABSOLUTE : EXPORT_SYMBOL_FLAGS_KIND_REGULAR;
+ entry.address = address;
+ entry.other = other;
+ entry.importName = NULL;
+ entries.push_back(entry);
+ }
else {
+ if ( (atom->definition() == ld::Atom::definitionRegular) && (atom->combine() == ld::Atom::combineByName) )
+ flags |= EXPORT_SYMBOL_FLAGS_WEAK_DEFINITION;
if ( atom->isThumb() )
address |= 1;
if ( atom->contentType() == ld::Atom::typeResolver ) {
entry.importName = NULL;
entries.push_back(entry);
}
+
+ if (_options.sharedRegionEligible() && strncmp(atom->section().segmentName(), "__DATA", 6) == 0) {
+ // Maximum address is 64bit which is 10 bytes as a uleb128. Minimum is 1 byte
+ // Pad the section out so we can deal with addresses getting larger when __DATA segment
+ // is moved before __TEXT in dyld shared cache.
+ padding += 9;
+ }
}
// sort vector by -exported_symbols_order, and any others by address
// create trie
mach_o::trie::makeTrie(entries, this->_encodedData.bytes());
+ //Add additional data padding for the unoptimized shared cache
+ for (unsigned int i = 0; i < padding; ++i)
+ this->_encodedData.append_byte(0);
+
// align to pointer size
this->_encodedData.pad_to_size(sizeof(pint_t));
template <typename A>
-class SplitSegInfoAtom : public LinkEditAtom
+class SplitSegInfoV1Atom : public LinkEditAtom
{
public:
- SplitSegInfoAtom(const Options& opts, ld::Internal& state, OutputFile& writer)
+ SplitSegInfoV1Atom(const Options& opts, ld::Internal& state, OutputFile& writer)
: LinkEditAtom(opts, state, writer, _s_section, sizeof(pint_t)) { }
// overrides of ld::Atom
mutable std::vector<uint64_t> _thumbHi16Locations[16];
mutable std::vector<uint64_t> _armLo16Locations;
mutable std::vector<uint64_t> _armHi16Locations[16];
+ mutable std::vector<uint64_t> _adrpLocations;
static ld::Section _s_section;
};
template <typename A>
-ld::Section SplitSegInfoAtom<A>::_s_section("__LINKEDIT", "__splitSegInfo", ld::Section::typeLinkEdit, true);
+ld::Section SplitSegInfoV1Atom<A>::_s_section("__LINKEDIT", "__splitSegInfo", ld::Section::typeLinkEdit, true);
template <>
-void SplitSegInfoAtom<x86_64>::addSplitSegInfo(uint64_t address, ld::Fixup::Kind kind, uint32_t extra) const
+void SplitSegInfoV1Atom<x86_64>::addSplitSegInfo(uint64_t address, ld::Fixup::Kind kind, uint32_t extra) const
{
switch (kind) {
case ld::Fixup::kindStoreX86PCRel32:
case ld::Fixup::kindStoreTargetAddressX86PCRel32:
case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad:
case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA:
+ case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad:
+ case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA:
_32bitPointerLocations.push_back(address);
break;
case ld::Fixup::kindStoreLittleEndian64:
case ld::Fixup::kindStoreTargetAddressLittleEndian64:
_64bitPointerLocations.push_back(address);
break;
+#if SUPPORT_ARCH_arm64e
+ case ld::Fixup::kindStoreLittleEndianAuth64:
+ case ld::Fixup::kindStoreTargetAddressLittleEndianAuth64:
+ assert(false);
+ break;
+#endif
default:
warning("codegen at address 0x%08llX prevents image from working in dyld shared cache", address);
break;
}
template <>
-void SplitSegInfoAtom<x86>::addSplitSegInfo(uint64_t address, ld::Fixup::Kind kind, uint32_t extra) const
+void SplitSegInfoV1Atom<x86>::addSplitSegInfo(uint64_t address, ld::Fixup::Kind kind, uint32_t extra) const
{
switch (kind) {
case ld::Fixup::kindStoreLittleEndian32:
case ld::Fixup::kindStoreTargetAddressLittleEndian32:
+ case ld::Fixup::kindStoreX86PCRel32TLVLoad:
+ case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA:
_32bitPointerLocations.push_back(address);
break;
default:
}
template <>
-void SplitSegInfoAtom<arm>::addSplitSegInfo(uint64_t address, ld::Fixup::Kind kind, uint32_t extra) const
+void SplitSegInfoV1Atom<arm>::addSplitSegInfo(uint64_t address, ld::Fixup::Kind kind, uint32_t extra) const
{
switch (kind) {
case ld::Fixup::kindStoreLittleEndian32:
}
}
-
+#if SUPPORT_ARCH_arm64
+template <>
+void SplitSegInfoV1Atom<arm64>::addSplitSegInfo(uint64_t address, ld::Fixup::Kind kind, uint32_t extra) const
+{
+ switch (kind) {
+ case ld::Fixup::kindStoreARM64Page21:
+ case ld::Fixup::kindStoreARM64GOTLoadPage21:
+ case ld::Fixup::kindStoreARM64GOTLeaPage21:
+ case ld::Fixup::kindStoreARM64TLVPLoadPage21:
+ case ld::Fixup::kindStoreTargetAddressARM64Page21:
+ case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
+ case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21:
+ _adrpLocations.push_back(address);
+ break;
+ case ld::Fixup::kindStoreLittleEndian32:
+ case ld::Fixup::kindStoreARM64PCRelToGOT:
+ _32bitPointerLocations.push_back(address);
+ break;
+ case ld::Fixup::kindStoreLittleEndian64:
+ case ld::Fixup::kindStoreTargetAddressLittleEndian64:
+ _64bitPointerLocations.push_back(address);
+ break;
+#if SUPPORT_ARCH_arm64e
+ case ld::Fixup::kindStoreLittleEndianAuth64:
+ case ld::Fixup::kindStoreTargetAddressLittleEndianAuth64:
+ warning("authenticated pointer at address 0x%08llX prevents image from working in dyld shared cache", address);
+ break;
+#endif
+ default:
+ warning("codegen at address 0x%08llX prevents image from working in dyld shared cache", address);
+ break;
+ }
+}
+#endif
template <typename A>
-void SplitSegInfoAtom<A>::uleb128EncodeAddresses(const std::vector<uint64_t>& locations) const
+void SplitSegInfoV1Atom<A>::uleb128EncodeAddresses(const std::vector<uint64_t>& locations) const
{
pint_t addr = this->_options.baseAddress();
for(typename std::vector<uint64_t>::const_iterator it = locations.begin(); it != locations.end(); ++it) {
template <typename A>
-void SplitSegInfoAtom<A>::encode() const
+void SplitSegInfoV1Atom<A>::encode() const
{
// sort into group by pointer adjustment kind
std::vector<OutputFile::SplitSegInfoEntry>& info = this->_writer._splitSegInfos;
for (std::vector<OutputFile::SplitSegInfoEntry>::const_iterator it = info.begin(); it != info.end(); ++it) {
- this->addSplitSegInfo(it->address, it->kind, it->extra);
+ this->addSplitSegInfo(it->fixupAddress, it->kind, it->extra);
}
// delta compress runs of addresses
this->_encodedData.append_byte(0); // terminator
}
+ if ( _adrpLocations.size() != 0 ) {
+ this->_encodedData.append_byte(3);
+ //fprintf(stderr, "type 3:\n");
+ std::sort(_adrpLocations.begin(), _adrpLocations.end());
+ this->uleb128EncodeAddresses(_adrpLocations);
+ this->_encodedData.append_byte(0); // terminator
+ }
+
if ( _thumbLo16Locations.size() != 0 ) {
this->_encodedData.append_byte(5);
//fprintf(stderr, "type 5:\n");
_64bitPointerLocations.clear();
}
+
+template <typename A>
+class SplitSegInfoV2Atom : public LinkEditAtom
+{
+public:
+ SplitSegInfoV2Atom(const Options& opts, ld::Internal& state, OutputFile& writer)
+ : LinkEditAtom(opts, state, writer, _s_section, sizeof(pint_t)) { }
+
+ // overrides of ld::Atom
+ virtual const char* name() const { return "split seg info"; }
+ // overrides of LinkEditAtom
+ virtual void encode() const;
+
+private:
+ typedef typename A::P P;
+ typedef typename A::P::E E;
+ typedef typename A::P::uint_t pint_t;
+
+ // Whole :== <count> FromToSection+
+ // FromToSection :== <from-sect-index> <to-sect-index> <count> ToOffset+
+ // ToOffset :== <to-sect-offset-delta> <count> FromOffset+
+ // FromOffset :== <kind> <count> <from-sect-offset-delta>
+
+ typedef uint32_t SectionIndexes;
+ typedef std::map<uint8_t, std::vector<uint64_t> > FromOffsetMap;
+ typedef std::map<uint64_t, FromOffsetMap> ToOffsetMap;
+ typedef std::map<SectionIndexes, ToOffsetMap> WholeMap;
+
+
+ static ld::Section _s_section;
+};
+
+template <typename A>
+ld::Section SplitSegInfoV2Atom<A>::_s_section("__LINKEDIT", "__splitSegInfo", ld::Section::typeLinkEdit, true);
+
+
+template <typename A>
+void SplitSegInfoV2Atom<A>::encode() const
+{
+ // sort into group by adjustment kind
+ //fprintf(stderr, "_splitSegV2Infos.size=%lu\n", this->_writer._splitSegV2Infos.size());
+ WholeMap whole;
+ for (const OutputFile::SplitSegInfoV2Entry& entry : this->_writer._splitSegV2Infos) {
+ //fprintf(stderr, "from=%d, to=%d\n", entry.fixupSectionIndex, entry.targetSectionIndex);
+ SectionIndexes index = entry.fixupSectionIndex << 16 | entry.targetSectionIndex;
+ ToOffsetMap& toOffsets = whole[index];
+ FromOffsetMap& fromOffsets = toOffsets[entry.targetSectionOffset];
+ fromOffsets[entry.referenceKind].push_back(entry.fixupSectionOffset);
+ }
+
+ // Add marker that this is V2 data
+ this->_encodedData.reserve(8192);
+ this->_encodedData.append_byte(DYLD_CACHE_ADJ_V2_FORMAT);
+
+ // stream out
+ // Whole :== <count> FromToSection+
+ this->_encodedData.append_uleb128(whole.size());
+ for (auto& fromToSection : whole) {
+ uint8_t fromSectionIndex = fromToSection.first >> 16;
+ uint8_t toSectionIndex = fromToSection.first & 0xFFFF;
+ ToOffsetMap& toOffsets = fromToSection.second;
+ // FromToSection :== <from-sect-index> <to-sect-index> <count> ToOffset+
+ this->_encodedData.append_uleb128(fromSectionIndex);
+ this->_encodedData.append_uleb128(toSectionIndex);
+ this->_encodedData.append_uleb128(toOffsets.size());
+ //fprintf(stderr, "from sect=%d, to sect=%d, count=%lu\n", fromSectionIndex, toSectionIndex, toOffsets.size());
+ uint64_t lastToOffset = 0;
+ for (auto& fromToOffsets : toOffsets) {
+ uint64_t toSectionOffset = fromToOffsets.first;
+ FromOffsetMap& fromOffsets = fromToOffsets.second;
+ // ToOffset :== <to-sect-offset-delta> <count> FromOffset+
+ this->_encodedData.append_uleb128(toSectionOffset - lastToOffset);
+ this->_encodedData.append_uleb128(fromOffsets.size());
+ for (auto& kindAndOffsets : fromOffsets) {
+ uint8_t kind = kindAndOffsets.first;
+ std::vector<uint64_t>& fromOffsets = kindAndOffsets.second;
+ // FromOffset :== <kind> <count> <from-sect-offset-delta>
+ this->_encodedData.append_uleb128(kind);
+ this->_encodedData.append_uleb128(fromOffsets.size());
+ std::sort(fromOffsets.begin(), fromOffsets.end());
+ uint64_t lastFromOffset = 0;
+ for (uint64_t offset : fromOffsets) {
+ this->_encodedData.append_uleb128(offset - lastFromOffset);
+ lastFromOffset = offset;
+ }
+ }
+ lastToOffset = toSectionOffset;
+ }
+ }
+
+
+ // always add zero byte to mark end
+ this->_encodedData.append_byte(0);
+
+ // align to pointer size
+ this->_encodedData.pad_to_size(sizeof(pint_t));
+
+ this->_encoded = true;
+}
+
+
+
template <typename A>
class FunctionStartsAtom : public LinkEditAtom
{
-// <rdar://problem/7209249> linker needs to cache "Designated Requirements" in linked binary
+
template <typename A>
-class DependentDRAtom : public LinkEditAtom
+class OptimizationHintsAtom : public LinkEditAtom
{
public:
- DependentDRAtom(const Options& opts, ld::Internal& state, OutputFile& writer)
- : LinkEditAtom(opts, state, writer, _s_section, sizeof(pint_t)) { }
+ OptimizationHintsAtom(const Options& opts, ld::Internal& state, OutputFile& writer)
+ : LinkEditAtom(opts, state, writer, _s_section, sizeof(pint_t)) {
+ assert(opts.outputKind() == Options::kObjectFile);
+ }
// overrides of ld::Atom
- virtual const char* name() const { return "dependent dylib DR info"; }
+ virtual const char* name() const { return "linker optimization hints"; }
// overrides of LinkEditAtom
virtual void encode() const;
};
template <typename A>
-ld::Section DependentDRAtom<A>::_s_section("__LINKEDIT", "__dependentDR", ld::Section::typeLinkEdit, true);
-
+ld::Section OptimizationHintsAtom<A>::_s_section("__LINKEDIT", "__opt_hints", ld::Section::typeLinkEdit, true);
template <typename A>
-void DependentDRAtom<A>::encode() const
+void OptimizationHintsAtom<A>::encode() const
{
- Security::SuperBlobCore<Security::SuperBlob<Security::kSecCodeMagicDRList>, Security::kSecCodeMagicDRList, uint32_t>::Maker maker;
-
- uint32_t index = 0;
- for(std::vector<ld::dylib::File*>::iterator it=_state.dylibs.begin(); it != _state.dylibs.end(); ++it) {
- const ld::dylib::File* dylib = *it;
- Security::BlobCore* dylibDR = (Security::BlobCore*)dylib->codeSignatureDR();
- void* dup = NULL;
- if ( dylibDR != NULL ) {
- // <rdar://problem/11315321> Maker takes ownership of every blob added
- // We need to make a copy here because dylib still owns the pointer returned by codeSignatureDR()
- dup = ::malloc(dylibDR->length());
- ::memcpy(dup, dylibDR, dylibDR->length());
+ if ( _state.someObjectHasOptimizationHints ) {
+ for (std::vector<ld::Internal::FinalSection*>::iterator sit = _state.sections.begin(); sit != _state.sections.end(); ++sit) {
+ ld::Internal::FinalSection* sect = *sit;
+ if ( sect->type() != ld::Section::typeCode )
+ continue;
+ for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
+ const ld::Atom* atom = *ait;
+ uint64_t address = atom->finalAddress();
+ for (ld::Fixup::iterator fit = atom->fixupsBegin(); fit != atom->fixupsEnd(); ++fit) {
+ if ( fit->kind != ld::Fixup::kindLinkerOptimizationHint)
+ continue;
+ ld::Fixup::LOH_arm64 extra;
+ extra.addend = fit->u.addend;
+ _encodedData.append_uleb128(extra.info.kind);
+ _encodedData.append_uleb128(extra.info.count+1);
+ _encodedData.append_uleb128((extra.info.delta1 << 2) + fit->offsetInAtom + address);
+ if ( extra.info.count > 0 )
+ _encodedData.append_uleb128((extra.info.delta2 << 2) + fit->offsetInAtom + address);
+ if ( extra.info.count > 1 )
+ _encodedData.append_uleb128((extra.info.delta3 << 2) + fit->offsetInAtom + address);
+ if ( extra.info.count > 2 )
+ _encodedData.append_uleb128((extra.info.delta4 << 2) + fit->offsetInAtom + address);
+ }
+ }
}
- maker.add(index, (Security::BlobCore*)dup);
- ++index;
+
+ this->_encodedData.pad_to_size(sizeof(pint_t));
}
- Security::SuperBlob<Security::kSecCodeMagicDRList>* topBlob = maker.make();
- const uint8_t* data = (uint8_t*)topBlob->data();
- for(size_t i=0; i < topBlob->length(); ++i)
- _encodedData.append_byte(data[i]);
-
- this->_encodedData.pad_to_size(sizeof(pint_t));
-
this->_encoded = true;
}
-
} // namespace tool
} // namespace ld