X-Git-Url: https://git.saurik.com/apple/ld64.git/blobdiff_plain/ba348e2165668ae0f4af8b349fc4a6d0910950ed..7f09b9353af9897bf18933788d6a59c152c29edd:/src/ld/passes/compact_unwind.cpp?ds=inline diff --git a/src/ld/passes/compact_unwind.cpp b/src/ld/passes/compact_unwind.cpp index 6c1308d..8b2afbc 100644 --- a/src/ld/passes/compact_unwind.cpp +++ b/src/ld/passes/compact_unwind.cpp @@ -144,8 +144,7 @@ UnwindInfoAtom::UnwindInfoAtom(const std::vector& entries, uint6 std::map personalityIndexMap; makePersonalityIndexes(uniqueEntries, personalityIndexMap); if ( personalityIndexMap.size() > 3 ) { - warning("too many personality routines for compact unwind to encode"); - return; + throw "too many personality routines for compact unwind to encode"; } // put the most common encodings into the common table, but at most 127 of them @@ -304,6 +303,7 @@ bool UnwindInfoAtom::encodingMeansUseDwarf(compact_unwind_encoding_t enc) return ((enc & UNWIND_ARM64_MODE_MASK) == UNWIND_ARM64_MODE_DWARF); } + template <> bool UnwindInfoAtom::encodingMeansUseDwarf(compact_unwind_encoding_t enc) { @@ -425,6 +425,7 @@ void UnwindInfoAtom::addCompressedAddressOffsetFixup(uint32_t offset, con _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndianLow24of32)); } + template <> void UnwindInfoAtom::addCompressedAddressOffsetFixup(uint32_t offset, const ld::Atom* func, const ld::Atom* fromFunc) { @@ -462,6 +463,7 @@ void UnwindInfoAtom::addCompressedEncodingFixup(uint32_t offset, const ld _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32)); } + template <> void UnwindInfoAtom::addCompressedEncodingFixup(uint32_t offset, const ld::Atom* fde) { @@ -490,6 +492,7 @@ void UnwindInfoAtom::addRegularAddressFixup(uint32_t offset, const ld::At _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32)); } + template <> void UnwindInfoAtom::addRegularAddressFixup(uint32_t offset, const ld::Atom* func) { @@ -518,6 +521,7 @@ void UnwindInfoAtom::addRegularFDEOffsetFixup(uint32_t offset, const ld:: _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32)); } + template <> void UnwindInfoAtom::addRegularFDEOffsetFixup(uint32_t offset, const ld::Atom* fde) { @@ -546,6 +550,7 @@ void UnwindInfoAtom::addImageOffsetFixup(uint32_t offset, const ld::Atom* _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32)); } + template <> void UnwindInfoAtom::addImageOffsetFixup(uint32_t offset, const ld::Atom* targ) { @@ -577,6 +582,7 @@ void UnwindInfoAtom::addImageOffsetFixupPlusAddend(uint32_t offset, const _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndian32)); } + template <> void UnwindInfoAtom::addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend) { @@ -631,11 +637,10 @@ unsigned int UnwindInfoAtom::makeCompressedSecondLevelPage(const std::vector< // keep adding entries to page until: // 1) encoding table plus entry table plus header exceed page size // 2) the file offset delta from the first to last function > 24 bits - // 3) custom encoding index reachs 255 + // 3) custom encoding index reaches 255 // 4) run out of uniqueInfos to encode std::map pageSpecificEncodings; uint32_t space4 = (pageSize - sizeof(unwind_info_compressed_second_level_page_header))/sizeof(uint32_t); - std::vector encodingIndexes; int index = endIndex-1; int entryCount = 0; uint64_t lastEntryAddress = uniqueInfos[index].funcTentAddress; @@ -647,6 +652,7 @@ unsigned int UnwindInfoAtom::makeCompressedSecondLevelPage(const std::vector< std::map::const_iterator pos = commonEncodings.find(info.encoding); if ( pos != commonEncodings.end() ) { encodingIndex = pos->second; + if (_s_log) fprintf(stderr, "makeCompressedSecondLevelPage(): funcIndex=%d, re-use commonEncodings[%d]=0x%08X\n", index, encodingIndex, info.encoding); } else { // no commmon entry, so add one on this page @@ -658,12 +664,13 @@ unsigned int UnwindInfoAtom::makeCompressedSecondLevelPage(const std::vector< std::map::iterator ppos = pageSpecificEncodings.find(encoding); if ( ppos != pageSpecificEncodings.end() ) { encodingIndex = pos->second; + if (_s_log) fprintf(stderr, "makeCompressedSecondLevelPage(): funcIndex=%d, re-use pageSpecificEncodings[%d]=0x%08X\n", index, encodingIndex, encoding); } else { encodingIndex = commonEncodings.size() + pageSpecificEncodings.size(); if ( encodingIndex <= 255 ) { pageSpecificEncodings[encoding] = encodingIndex; - if (_s_log) fprintf(stderr, "makeCompressedSecondLevelPage(): pageSpecificEncodings[%d]=0x%08X\n", encodingIndex, encoding); + if (_s_log) fprintf(stderr, "makeCompressedSecondLevelPage(): funcIndex=%d, pageSpecificEncodings[%d]=0x%08X\n", index, encodingIndex, encoding); } else { canDo = false; // case 3) @@ -672,8 +679,6 @@ unsigned int UnwindInfoAtom::makeCompressedSecondLevelPage(const std::vector< } } } - if ( canDo ) - encodingIndexes.push_back(encodingIndex); // compute function offset uint32_t funcOffsetWithInPage = lastEntryAddress - info.funcTentAddress; if ( funcOffsetWithInPage > 0x00FFFF00 ) { @@ -681,16 +686,16 @@ unsigned int UnwindInfoAtom::makeCompressedSecondLevelPage(const std::vector< canDo = false; // case 2) if (_s_log) fprintf(stderr, "can't use compressed page with %u entries because function offset too big\n", entryCount); } - else { - ++entryCount; - } // check room for entry - if ( (pageSpecificEncodings.size()+entryCount) >= space4 ) { + if ( (pageSpecificEncodings.size()+entryCount) > space4 ) { canDo = false; // case 1) --entryCount; if (_s_log) fprintf(stderr, "end of compressed page with %u entries because full\n", entryCount); } //if (_s_log) fprintf(stderr, "space4=%d, pageSpecificEncodings.size()=%ld, entryCount=%d\n", space4, pageSpecificEncodings.size(), entryCount); + if ( canDo ) { + ++entryCount; + } } // check for cases where it would be better to use a regular (non-compressed) page @@ -726,6 +731,7 @@ unsigned int UnwindInfoAtom::makeCompressedSecondLevelPage(const std::vector< uint8_t encodingIndex; if ( encodingMeansUseDwarf(info.encoding) ) { // dwarf entries are always in page specific encodings + assert(pageSpecificEncodings.find(info.encoding+i) != pageSpecificEncodings.end()); encodingIndex = pageSpecificEncodings[info.encoding+i]; } else { @@ -761,16 +767,22 @@ unsigned int UnwindInfoAtom::makeCompressedSecondLevelPage(const std::vector< - - -static uint64_t calculateEHFrameSize(const ld::Internal& state) +static uint64_t calculateEHFrameSize(ld::Internal& state) { + bool allCIEs = true; uint64_t size = 0; - for (std::vector::const_iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) { - ld::Internal::FinalSection* sect = *sit; + for (ld::Internal::FinalSection* sect : state.sections) { if ( sect->type() == ld::Section::typeCFI ) { - for (std::vector::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) { - size += (*ait)->size(); + for (const ld::Atom* atom : sect->atoms) { + size += atom->size(); + if ( strcmp(atom->name(), "CIE") != 0 ) + allCIEs = false; + } + if ( allCIEs ) { + // Linker generates eh_frame data even when there's only an unused CIEs in it + sect->atoms.clear(); + state.sections.erase(std::remove(state.sections.begin(), state.sections.end(), sect), state.sections.end()); + return 0; } } }