X-Git-Url: https://git.saurik.com/apple/ld64.git/blobdiff_plain/f80fe69f3f29962e8aa43a99f8ed9201548f3d78..e456bf1059cf7e6b8b71545d1b2f2092b55a9684:/src/ld/LinkEdit.hpp diff --git a/src/ld/LinkEdit.hpp b/src/ld/LinkEdit.hpp index 1b41fb2..4387ffd 100644 --- a/src/ld/LinkEdit.hpp +++ b/src/ld/LinkEdit.hpp @@ -32,6 +32,7 @@ #include #include +#include #include "Options.h" #include "ld.hpp" @@ -81,6 +82,17 @@ public: while( more ); } + void append_delta_encoded_uleb128_run(uint64_t start, const std::vector& locations) { + uint64_t lastAddr = start; + for(std::vector::const_iterator it = locations.begin(); it != locations.end(); ++it) { + uint64_t nextAddr = *it; + uint64_t delta = nextAddr - lastAddr; + assert(delta != 0); + append_uleb128(delta); + lastAddr = nextAddr; + } + } + void append_string(const char* str) { for (const char* s = str; *s != '\0'; ++s) _data.push_back(*s); @@ -164,6 +176,8 @@ public: virtual void encode() const; private: + void encodeV1() const; + struct rebase_tmp { rebase_tmp(uint8_t op, uint64_t p1, uint64_t p2=0) : opcode(op), operand1(p1), operand2(p2) {} @@ -192,8 +206,28 @@ void RebaseInfoAtom::encode() const // sort rebase info by type, then address std::vector& info = this->_writer._rebaseInfo; + if (info.empty()) + return; + std::sort(info.begin(), info.end()); + // use encoding based on target minOS + if ( _options.useLinkedListBinding() && !this->_writer._hasUnalignedFixup ) { + if ( info.back()._type != REBASE_TYPE_POINTER ) + throw "unsupported rebase type with linked list opcodes"; + // As the binding and rebasing are both linked lists, just use the binds + // to do everything. + } else { + encodeV1(); + } +} + + +template +void RebaseInfoAtom::encodeV1() const +{ + std::vector& info = this->_writer._rebaseInfo; + // convert to temp encoding that can be more easily optimized std::vector mid; uint64_t curSegStart = 0; @@ -219,6 +253,8 @@ void RebaseInfoAtom::encode() const } mid.push_back(rebase_tmp(REBASE_OPCODE_DO_REBASE_ULEB_TIMES, 1)); address += sizeof(pint_t); + if ( address >= curSegEnd ) + address = 0; } mid.push_back(rebase_tmp(REBASE_OPCODE_DONE, 0)); @@ -374,6 +410,9 @@ public: private: + void encodeV1() const; + void encodeV2() const; + typedef typename A::P P; typedef typename A::P::E E; typedef typename A::P::uint_t pint_t; @@ -397,11 +436,23 @@ ld::Section BindingInfoAtom::_s_section("__LINKEDIT", "__binding", ld::Sectio template void BindingInfoAtom::encode() const +{ + // use encoding based on target minOS + if ( _options.useLinkedListBinding() && !this->_writer._hasUnalignedFixup ) { + encodeV2(); + } else { + encodeV1(); + } +} + + +template +void BindingInfoAtom::encodeV1() const { // sort by library, symbol, type, then address std::vector& info = this->_writer._bindingInfo; std::sort(info.begin(), info.end()); - + // convert to temp encoding that can be more easily optimized std::vector mid; uint64_t curSegStart = 0; @@ -586,6 +637,229 @@ void BindingInfoAtom::encode() const if (log) fprintf(stderr, "total binding info size = %ld\n", this->_encodedData.size()); } +template +void BindingInfoAtom::encodeV2() const +{ + std::vector& bindInfo = this->_writer._bindingInfo; + std::vector& rebaseInfo = this->_writer._rebaseInfo; + const static bool log = false; + + std::sort(bindInfo.begin(), bindInfo.end()); + + // convert to temp encoding that can be more easily optimized + std::vector mid; + uint64_t curSegStart = 0; + uint64_t curSegEnd = 0; + uint32_t curSegIndex = 0; + int ordinal = 0x80000000; + const char* symbolName = NULL; + uint8_t type = 0; + uint64_t address = (uint64_t)(-1); + int64_t addend = 0; + uint64_t numBinds = (uint64_t)(-1); + for (std::vector::iterator it = bindInfo.begin(); it != bindInfo.end(); ++it) { + bool madeChange = false; + if ( ordinal != it->_libraryOrdinal ) { + if ( it->_libraryOrdinal <= 0 ) { + // special lookups are encoded as negative numbers in BindingInfo + mid.push_back(binding_tmp(BIND_OPCODE_SET_DYLIB_SPECIAL_IMM, it->_libraryOrdinal)); + } + else if ( it->_libraryOrdinal <= 15 ) { + mid.push_back(binding_tmp(BIND_OPCODE_SET_DYLIB_ORDINAL_IMM, it->_libraryOrdinal)); + } + else { + mid.push_back(binding_tmp(BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB, it->_libraryOrdinal)); + } + ordinal = it->_libraryOrdinal; + madeChange = true; + } + if ( symbolName != it->_symbolName ) { + mid.push_back(binding_tmp(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM, it->_flags, 0, it->_symbolName)); + symbolName = it->_symbolName; + madeChange = true; + } + if ( type != it->_type ) { + if ( it->_type != BIND_TYPE_POINTER ) + throw "unsupported bind type with linked list opcodes"; + mid.push_back(binding_tmp(BIND_OPCODE_SET_TYPE_IMM, it->_type)); + type = it->_type; + madeChange = true; + } + if ( address != it->_address ) { + // Note, we don't push the addresses here. That is all done later with the threaded chains + if ( (it->_address < curSegStart) || ( it->_address >= curSegEnd) ) { + if ( ! this->_writer.findSegment(this->_state, it->_address, &curSegStart, &curSegEnd, &curSegIndex) ) + throw "binding address outside range of any segment"; + } + address = it->_address; + } + if ( addend != it->_addend ) { + mid.push_back(binding_tmp(BIND_OPCODE_SET_ADDEND_SLEB, it->_addend)); + addend = it->_addend; + madeChange = true; + } + + if (madeChange) { + ++numBinds; + mid.push_back(binding_tmp(BIND_OPCODE_DO_BIND, 0)); + } + it->_threadedBindOrdinal = numBinds; + } + + // We can only support 2^16 bind ordinals. + if ( (numBinds > 0x10000) && (numBinds != (uint64_t)(-1)) ) + throwf("too many binds (%llu). The limit is 65536", numBinds); + + // Now that we have the bind ordinal table populate, set the page starts. + + std::vector& threadedRebaseBindIndices = this->_writer._threadedRebaseBindIndices; + threadedRebaseBindIndices.reserve(bindInfo.size() + rebaseInfo.size()); + + for (int64_t i = 0, e = rebaseInfo.size(); i != e; ++i) + threadedRebaseBindIndices.push_back(-i); + + for (int64_t i = 0, e = bindInfo.size(); i != e; ++i) + threadedRebaseBindIndices.push_back(i + 1); + + // Now sort the entries by address. + std::sort(threadedRebaseBindIndices.begin(), threadedRebaseBindIndices.end(), + [&rebaseInfo, &bindInfo](int64_t indexA, int64_t indexB) { + if (indexA == indexB) + return false; + uint64_t addressA = indexA <= 0 ? rebaseInfo[-indexA]._address : bindInfo[indexA - 1]._address; + uint64_t addressB = indexB <= 0 ? rebaseInfo[-indexB]._address : bindInfo[indexB - 1]._address; + assert(addressA != addressB); + return addressA < addressB; + }); + + curSegStart = 0; + curSegEnd = 0; + curSegIndex = 0; + uint64_t prevPageIndex = 0; + for (int64_t entryIndex : threadedRebaseBindIndices) { + OutputFile::RebaseInfo* rebase = nullptr; + OutputFile::BindingInfo* bind = nullptr; + uint64_t address = 0; + if (entryIndex <= 0) { + rebase = &rebaseInfo[-entryIndex]; + address = rebase->_address; + } else { + bind = &bindInfo[entryIndex - 1]; + address = bind->_address; + } + assert((address & 7) == 0); + + bool newSegment = false; + if ( (address < curSegStart) || ( address >= curSegEnd) ) { + // Start of a new segment. + if ( ! this->_writer.findSegment(this->_state, address, &curSegStart, &curSegEnd, &curSegIndex) ) + throw "binding address outside range of any segment"; + newSegment = true; + } + + // At this point we know we have the page starts array space reserved + // so set the page start for this entry if we haven't got one already. + uint64_t pageIndex = ( address - curSegStart ) / 4096; + if ( newSegment || (pageIndex != prevPageIndex) ) { + mid.push_back(binding_tmp(BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB, curSegIndex, address - curSegStart)); + mid.push_back(binding_tmp(BIND_OPCODE_THREADED | BIND_SUBOPCODE_THREADED_APPLY, 0)); + } + prevPageIndex = pageIndex; + } + mid.push_back(binding_tmp(BIND_OPCODE_DONE, 0)); + + // convert to compressed encoding + this->_encodedData.reserve(bindInfo.size()*2); + + // First push the total number of binds so that we can allocate space for this in dyld. + if ( log ) fprintf(stderr, "BIND_SUBOPCODE_THREADED_SET_BIND_ORDINAL_TABLE_SIZE_ULEB(%lld)\n", numBinds + 1); + this->_encodedData.append_byte(BIND_OPCODE_THREADED | BIND_SUBOPCODE_THREADED_SET_BIND_ORDINAL_TABLE_SIZE_ULEB); + this->_encodedData.append_uleb128(numBinds + 1); + + bool done = false; + for (typename std::vector::iterator it = mid.begin(); !done && it != mid.end() ; ++it) { + switch ( it->opcode ) { + case BIND_OPCODE_DONE: + if ( log ) fprintf(stderr, "BIND_OPCODE_DONE()\n"); + done = true; + break; + case BIND_OPCODE_SET_DYLIB_ORDINAL_IMM: + if ( log ) fprintf(stderr, "BIND_OPCODE_SET_DYLIB_ORDINAL_IMM(%lld)\n", it->operand1); + this->_encodedData.append_byte(BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | it->operand1); + break; + case BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB: + if ( log ) fprintf(stderr, "BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB(%lld)\n", it->operand1); + this->_encodedData.append_byte(BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB); + this->_encodedData.append_uleb128(it->operand1); + break; + case BIND_OPCODE_SET_DYLIB_SPECIAL_IMM: + if ( log ) fprintf(stderr, "BIND_OPCODE_SET_DYLIB_SPECIAL_IMM(%lld)\n", it->operand1); + this->_encodedData.append_byte(BIND_OPCODE_SET_DYLIB_SPECIAL_IMM | (it->operand1 & BIND_IMMEDIATE_MASK)); + break; + case BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM: + if ( log ) fprintf(stderr, "BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM(0x%0llX, %s)\n", it->operand1, it->name); + this->_encodedData.append_byte(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM | it->operand1); + this->_encodedData.append_string(it->name); + break; + case BIND_OPCODE_SET_TYPE_IMM: + if ( log ) fprintf(stderr, "BIND_OPCODE_SET_TYPE_IMM(%lld)\n", it->operand1); + this->_encodedData.append_byte(BIND_OPCODE_SET_TYPE_IMM | it->operand1); + break; + case BIND_OPCODE_SET_ADDEND_SLEB: + if ( log ) fprintf(stderr, "BIND_OPCODE_SET_ADDEND_SLEB(%lld)\n", it->operand1); + this->_encodedData.append_byte(BIND_OPCODE_SET_ADDEND_SLEB); + this->_encodedData.append_sleb128(it->operand1); + break; + case BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB: + if ( log ) fprintf(stderr, "BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB(%lld, 0x%llX)\n", it->operand1, it->operand2); + this->_encodedData.append_byte(BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | it->operand1); + this->_encodedData.append_uleb128(it->operand2); + break; + case BIND_OPCODE_ADD_ADDR_ULEB: + if ( log ) fprintf(stderr, "BIND_OPCODE_ADD_ADDR_ULEB(0x%llX)\n", it->operand1); + this->_encodedData.append_byte(BIND_OPCODE_ADD_ADDR_ULEB); + this->_encodedData.append_uleb128(it->operand1); + break; + case BIND_OPCODE_DO_BIND: + if ( log ) fprintf(stderr, "BIND_OPCODE_DO_BIND()\n"); + this->_encodedData.append_byte(BIND_OPCODE_DO_BIND); + break; + case BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB: + if ( log ) fprintf(stderr, "BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB(0x%llX)\n", it->operand1); + this->_encodedData.append_byte(BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB); + this->_encodedData.append_uleb128(it->operand1); + break; + case BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED: + if ( log ) fprintf(stderr, "BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED(%lld=0x%llX)\n", it->operand1, it->operand1*sizeof(pint_t)); + this->_encodedData.append_byte(BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED | it->operand1 ); + break; + case BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB: + if ( log ) fprintf(stderr, "BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB(%lld, %lld)\n", it->operand1, it->operand2); + this->_encodedData.append_byte(BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB); + this->_encodedData.append_uleb128(it->operand1); + this->_encodedData.append_uleb128(it->operand2); + break; + case BIND_OPCODE_THREADED | BIND_SUBOPCODE_THREADED_SET_BIND_ORDINAL_TABLE_SIZE_ULEB: + if ( log ) fprintf(stderr, "BIND_SUBOPCODE_THREADED_SET_BIND_ORDINAL_TABLE_SIZE_ULEB(%lld)\n", it->operand1); + this->_encodedData.append_byte(BIND_OPCODE_THREADED | BIND_SUBOPCODE_THREADED_SET_BIND_ORDINAL_TABLE_SIZE_ULEB); + this->_encodedData.append_uleb128(it->operand1); + break; + case BIND_OPCODE_THREADED | BIND_SUBOPCODE_THREADED_APPLY: + this->_encodedData.append_byte(BIND_OPCODE_THREADED | BIND_SUBOPCODE_THREADED_APPLY); + if ( log ) fprintf(stderr, "BIND_SUBOPCODE_THREADED_APPLY()\n"); + break; + } + } + + // align to pointer size + this->_encodedData.append_byte(BIND_OPCODE_DONE); + this->_encodedData.pad_to_size(sizeof(pint_t)); + + this->_encoded = true; + + if (log) fprintf(stderr, "total binding info size = %ld\n", this->_encodedData.size()); +} + template @@ -969,6 +1243,7 @@ void ExportInfoAtom::encode() const std::vector& exports = this->_writer._exportedAtoms; uint64_t imageBaseAddress = this->_writer.headerAndLoadCommandsSection->address; std::vector entries; + unsigned int padding = 0; entries.reserve(exports.size()); for (std::vector::const_iterator it = exports.begin(); it != exports.end(); ++it) { const ld::Atom* atom = *it; @@ -983,7 +1258,7 @@ void ExportInfoAtom::encode() const entry.flags |= EXPORT_SYMBOL_FLAGS_WEAK_DEFINITION; entry.other = this->_writer.compressedOrdinalForAtom(atom); if ( entry.other == BIND_SPECIAL_DYLIB_SELF ) { - warning("not adding explict export for symbol %s because it is already re-exported from dylib %s", entry.name, atom->file()->path()); + warning("not adding explict export for symbol %s because it is already re-exported from dylib %s", entry.name, atom->safeFilePath()); continue; } if ( atom->isAlias() ) { @@ -1035,6 +1310,13 @@ void ExportInfoAtom::encode() const entry.importName = NULL; entries.push_back(entry); } + + if (_options.sharedRegionEligible() && strncmp(atom->section().segmentName(), "__DATA", 6) == 0) { + // Maximum address is 64bit which is 10 bytes as a uleb128. Minimum is 1 byte + // Pad the section out so we can deal with addresses getting larger when __DATA segment + // is moved before __TEXT in dyld shared cache. + padding += 9; + } } // sort vector by -exported_symbols_order, and any others by address @@ -1043,6 +1325,10 @@ void ExportInfoAtom::encode() const // create trie mach_o::trie::makeTrie(entries, this->_encodedData.bytes()); + //Add additional data padding for the unoptimized shared cache + for (unsigned int i = 0; i < padding; ++i) + this->_encodedData.append_byte(0); + // align to pointer size this->_encodedData.pad_to_size(sizeof(pint_t)); @@ -1051,10 +1337,10 @@ void ExportInfoAtom::encode() const template -class SplitSegInfoAtom : public LinkEditAtom +class SplitSegInfoV1Atom : public LinkEditAtom { public: - SplitSegInfoAtom(const Options& opts, ld::Internal& state, OutputFile& writer) + SplitSegInfoV1Atom(const Options& opts, ld::Internal& state, OutputFile& writer) : LinkEditAtom(opts, state, writer, _s_section, sizeof(pint_t)) { } // overrides of ld::Atom @@ -1083,10 +1369,10 @@ private: }; template -ld::Section SplitSegInfoAtom::_s_section("__LINKEDIT", "__splitSegInfo", ld::Section::typeLinkEdit, true); +ld::Section SplitSegInfoV1Atom::_s_section("__LINKEDIT", "__splitSegInfo", ld::Section::typeLinkEdit, true); template <> -void SplitSegInfoAtom::addSplitSegInfo(uint64_t address, ld::Fixup::Kind kind, uint32_t extra) const +void SplitSegInfoV1Atom::addSplitSegInfo(uint64_t address, ld::Fixup::Kind kind, uint32_t extra) const { switch (kind) { case ld::Fixup::kindStoreX86PCRel32: @@ -1109,6 +1395,12 @@ void SplitSegInfoAtom::addSplitSegInfo(uint64_t address, ld::Fixup::Kind case ld::Fixup::kindStoreTargetAddressLittleEndian64: _64bitPointerLocations.push_back(address); break; +#if SUPPORT_ARCH_arm64e + case ld::Fixup::kindStoreLittleEndianAuth64: + case ld::Fixup::kindStoreTargetAddressLittleEndianAuth64: + assert(false); + break; +#endif default: warning("codegen at address 0x%08llX prevents image from working in dyld shared cache", address); break; @@ -1116,7 +1408,7 @@ void SplitSegInfoAtom::addSplitSegInfo(uint64_t address, ld::Fixup::Kind } template <> -void SplitSegInfoAtom::addSplitSegInfo(uint64_t address, ld::Fixup::Kind kind, uint32_t extra) const +void SplitSegInfoV1Atom::addSplitSegInfo(uint64_t address, ld::Fixup::Kind kind, uint32_t extra) const { switch (kind) { case ld::Fixup::kindStoreLittleEndian32: @@ -1132,7 +1424,7 @@ void SplitSegInfoAtom::addSplitSegInfo(uint64_t address, ld::Fixup::Kind ki } template <> -void SplitSegInfoAtom::addSplitSegInfo(uint64_t address, ld::Fixup::Kind kind, uint32_t extra) const +void SplitSegInfoV1Atom::addSplitSegInfo(uint64_t address, ld::Fixup::Kind kind, uint32_t extra) const { switch (kind) { case ld::Fixup::kindStoreLittleEndian32: @@ -1160,7 +1452,7 @@ void SplitSegInfoAtom::addSplitSegInfo(uint64_t address, ld::Fixup::Kind ki #if SUPPORT_ARCH_arm64 template <> -void SplitSegInfoAtom::addSplitSegInfo(uint64_t address, ld::Fixup::Kind kind, uint32_t extra) const +void SplitSegInfoV1Atom::addSplitSegInfo(uint64_t address, ld::Fixup::Kind kind, uint32_t extra) const { switch (kind) { case ld::Fixup::kindStoreARM64Page21: @@ -1180,6 +1472,12 @@ void SplitSegInfoAtom::addSplitSegInfo(uint64_t address, ld::Fixup::Kind case ld::Fixup::kindStoreTargetAddressLittleEndian64: _64bitPointerLocations.push_back(address); break; +#if SUPPORT_ARCH_arm64e + case ld::Fixup::kindStoreLittleEndianAuth64: + case ld::Fixup::kindStoreTargetAddressLittleEndianAuth64: + warning("authenticated pointer at address 0x%08llX prevents image from working in dyld shared cache", address); + break; +#endif default: warning("codegen at address 0x%08llX prevents image from working in dyld shared cache", address); break; @@ -1188,7 +1486,7 @@ void SplitSegInfoAtom::addSplitSegInfo(uint64_t address, ld::Fixup::Kind #endif template -void SplitSegInfoAtom::uleb128EncodeAddresses(const std::vector& locations) const +void SplitSegInfoV1Atom::uleb128EncodeAddresses(const std::vector& locations) const { pint_t addr = this->_options.baseAddress(); for(typename std::vector::const_iterator it = locations.begin(); it != locations.end(); ++it) { @@ -1215,12 +1513,12 @@ void SplitSegInfoAtom::uleb128EncodeAddresses(const std::vector& lo template -void SplitSegInfoAtom::encode() const +void SplitSegInfoV1Atom::encode() const { // sort into group by pointer adjustment kind std::vector& info = this->_writer._splitSegInfos; for (std::vector::const_iterator it = info.begin(); it != info.end(); ++it) { - this->addSplitSegInfo(it->address, it->kind, it->extra); + this->addSplitSegInfo(it->fixupAddress, it->kind, it->extra); } // delta compress runs of addresses @@ -1298,6 +1596,108 @@ void SplitSegInfoAtom::encode() const _64bitPointerLocations.clear(); } + +template +class SplitSegInfoV2Atom : public LinkEditAtom +{ +public: + SplitSegInfoV2Atom(const Options& opts, ld::Internal& state, OutputFile& writer) + : LinkEditAtom(opts, state, writer, _s_section, sizeof(pint_t)) { } + + // overrides of ld::Atom + virtual const char* name() const { return "split seg info"; } + // overrides of LinkEditAtom + virtual void encode() const; + +private: + typedef typename A::P P; + typedef typename A::P::E E; + typedef typename A::P::uint_t pint_t; + + // Whole :== FromToSection+ + // FromToSection :== ToOffset+ + // ToOffset :== FromOffset+ + // FromOffset :== + + typedef uint32_t SectionIndexes; + typedef std::map > FromOffsetMap; + typedef std::map ToOffsetMap; + typedef std::map WholeMap; + + + static ld::Section _s_section; +}; + +template +ld::Section SplitSegInfoV2Atom::_s_section("__LINKEDIT", "__splitSegInfo", ld::Section::typeLinkEdit, true); + + +template +void SplitSegInfoV2Atom::encode() const +{ + // sort into group by adjustment kind + //fprintf(stderr, "_splitSegV2Infos.size=%lu\n", this->_writer._splitSegV2Infos.size()); + WholeMap whole; + for (const OutputFile::SplitSegInfoV2Entry& entry : this->_writer._splitSegV2Infos) { + //fprintf(stderr, "from=%d, to=%d\n", entry.fixupSectionIndex, entry.targetSectionIndex); + SectionIndexes index = entry.fixupSectionIndex << 16 | entry.targetSectionIndex; + ToOffsetMap& toOffsets = whole[index]; + FromOffsetMap& fromOffsets = toOffsets[entry.targetSectionOffset]; + fromOffsets[entry.referenceKind].push_back(entry.fixupSectionOffset); + } + + // Add marker that this is V2 data + this->_encodedData.reserve(8192); + this->_encodedData.append_byte(DYLD_CACHE_ADJ_V2_FORMAT); + + // stream out + // Whole :== FromToSection+ + this->_encodedData.append_uleb128(whole.size()); + for (auto& fromToSection : whole) { + uint8_t fromSectionIndex = fromToSection.first >> 16; + uint8_t toSectionIndex = fromToSection.first & 0xFFFF; + ToOffsetMap& toOffsets = fromToSection.second; + // FromToSection :== ToOffset+ + this->_encodedData.append_uleb128(fromSectionIndex); + this->_encodedData.append_uleb128(toSectionIndex); + this->_encodedData.append_uleb128(toOffsets.size()); + //fprintf(stderr, "from sect=%d, to sect=%d, count=%lu\n", fromSectionIndex, toSectionIndex, toOffsets.size()); + uint64_t lastToOffset = 0; + for (auto& fromToOffsets : toOffsets) { + uint64_t toSectionOffset = fromToOffsets.first; + FromOffsetMap& fromOffsets = fromToOffsets.second; + // ToOffset :== FromOffset+ + this->_encodedData.append_uleb128(toSectionOffset - lastToOffset); + this->_encodedData.append_uleb128(fromOffsets.size()); + for (auto& kindAndOffsets : fromOffsets) { + uint8_t kind = kindAndOffsets.first; + std::vector& fromOffsets = kindAndOffsets.second; + // FromOffset :== + this->_encodedData.append_uleb128(kind); + this->_encodedData.append_uleb128(fromOffsets.size()); + std::sort(fromOffsets.begin(), fromOffsets.end()); + uint64_t lastFromOffset = 0; + for (uint64_t offset : fromOffsets) { + this->_encodedData.append_uleb128(offset - lastFromOffset); + lastFromOffset = offset; + } + } + lastToOffset = toSectionOffset; + } + } + + + // always add zero byte to mark end + this->_encodedData.append_byte(0); + + // align to pointer size + this->_encodedData.pad_to_size(sizeof(pint_t)); + + this->_encoded = true; +} + + + template class FunctionStartsAtom : public LinkEditAtom { @@ -1497,16 +1897,18 @@ void DataInCodeAtom::encode() const -// linker needs to cache "Designated Requirements" in linked binary + template -class DependentDRAtom : public LinkEditAtom +class OptimizationHintsAtom : public LinkEditAtom { public: - DependentDRAtom(const Options& opts, ld::Internal& state, OutputFile& writer) - : LinkEditAtom(opts, state, writer, _s_section, sizeof(pint_t)) { } + OptimizationHintsAtom(const Options& opts, ld::Internal& state, OutputFile& writer) + : LinkEditAtom(opts, state, writer, _s_section, sizeof(pint_t)) { + assert(opts.outputKind() == Options::kObjectFile); + } // overrides of ld::Atom - virtual const char* name() const { return "dependent dylib DR info"; } + virtual const char* name() const { return "linker optimization hints"; } // overrides of LinkEditAtom virtual void encode() const; @@ -1520,41 +1922,44 @@ private: }; template -ld::Section DependentDRAtom::_s_section("__LINKEDIT", "__dependentDR", ld::Section::typeLinkEdit, true); - +ld::Section OptimizationHintsAtom::_s_section("__LINKEDIT", "__opt_hints", ld::Section::typeLinkEdit, true); template -void DependentDRAtom::encode() const +void OptimizationHintsAtom::encode() const { - Security::SuperBlobCore, Security::kSecCodeMagicDRList, uint32_t>::Maker maker; - - uint32_t index = 0; - for(std::vector::iterator it=_state.dylibs.begin(); it != _state.dylibs.end(); ++it) { - const ld::dylib::File* dylib = *it; - Security::BlobCore* dylibDR = (Security::BlobCore*)dylib->codeSignatureDR(); - void* dup = NULL; - if ( dylibDR != NULL ) { - // Maker takes ownership of every blob added - // We need to make a copy here because dylib still owns the pointer returned by codeSignatureDR() - dup = ::malloc(dylibDR->length()); - ::memcpy(dup, dylibDR, dylibDR->length()); + if ( _state.someObjectHasOptimizationHints ) { + for (std::vector::iterator sit = _state.sections.begin(); sit != _state.sections.end(); ++sit) { + ld::Internal::FinalSection* sect = *sit; + if ( sect->type() != ld::Section::typeCode ) + continue; + for (std::vector::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) { + const ld::Atom* atom = *ait; + uint64_t address = atom->finalAddress(); + for (ld::Fixup::iterator fit = atom->fixupsBegin(); fit != atom->fixupsEnd(); ++fit) { + if ( fit->kind != ld::Fixup::kindLinkerOptimizationHint) + continue; + ld::Fixup::LOH_arm64 extra; + extra.addend = fit->u.addend; + _encodedData.append_uleb128(extra.info.kind); + _encodedData.append_uleb128(extra.info.count+1); + _encodedData.append_uleb128((extra.info.delta1 << 2) + fit->offsetInAtom + address); + if ( extra.info.count > 0 ) + _encodedData.append_uleb128((extra.info.delta2 << 2) + fit->offsetInAtom + address); + if ( extra.info.count > 1 ) + _encodedData.append_uleb128((extra.info.delta3 << 2) + fit->offsetInAtom + address); + if ( extra.info.count > 2 ) + _encodedData.append_uleb128((extra.info.delta4 << 2) + fit->offsetInAtom + address); + } + } } - maker.add(index, (Security::BlobCore*)dup); - ++index; + + this->_encodedData.pad_to_size(sizeof(pint_t)); } - Security::SuperBlob* topBlob = maker.make(); - const uint8_t* data = (uint8_t*)topBlob->data(); - for(size_t i=0; i < topBlob->length(); ++i) - _encodedData.append_byte(data[i]); - - this->_encodedData.pad_to_size(sizeof(pint_t)); - this->_encoded = true; } - } // namespace tool } // namespace ld