X-Git-Url: https://git.saurik.com/apple/ld64.git/blobdiff_plain/599556ff3dd31aab68bb9685f1ed7fc4867803e7..7f09b9353af9897bf18933788d6a59c152c29edd:/src/ld/passes/compact_unwind.cpp?ds=sidebyside diff --git a/src/ld/passes/compact_unwind.cpp b/src/ld/passes/compact_unwind.cpp index 3ddd9b8..8b2afbc 100644 --- a/src/ld/passes/compact_unwind.cpp +++ b/src/ld/passes/compact_unwind.cpp @@ -144,8 +144,7 @@ UnwindInfoAtom::UnwindInfoAtom(const std::vector& entries, uint6 std::map personalityIndexMap; makePersonalityIndexes(uniqueEntries, personalityIndexMap); if ( personalityIndexMap.size() > 3 ) { - warning("too many personality routines for compact unwind to encode"); - return; + throw "too many personality routines for compact unwind to encode"; } // put the most common encodings into the common table, but at most 127 of them @@ -304,6 +303,14 @@ bool UnwindInfoAtom::encodingMeansUseDwarf(compact_unwind_encoding_t enc) return ((enc & UNWIND_ARM64_MODE_MASK) == UNWIND_ARM64_MODE_DWARF); } + +template <> +bool UnwindInfoAtom::encodingMeansUseDwarf(compact_unwind_encoding_t enc) +{ + return ((enc & UNWIND_ARM_MODE_MASK) == UNWIND_ARM_MODE_DWARF); +} + + template void UnwindInfoAtom::compressDuplicates(const std::vector& entries, std::vector& uniqueEntries) { @@ -418,6 +425,23 @@ void UnwindInfoAtom::addCompressedAddressOffsetFixup(uint32_t offset, con _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndianLow24of32)); } + +template <> +void UnwindInfoAtom::addCompressedAddressOffsetFixup(uint32_t offset, const ld::Atom* func, const ld::Atom* fromFunc) +{ + if ( fromFunc->isThumb() ) { + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of4, ld::Fixup::kindSetTargetAddress, func)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of4, ld::Fixup::kindSubtractTargetAddress, fromFunc)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of4, ld::Fixup::kindSubtractAddend, 1)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k4of4, ld::Fixup::kindStoreLittleEndianLow24of32)); + } + else { + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetAddress, func)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindSubtractTargetAddress, fromFunc)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndianLow24of32)); + } +} + template <> void UnwindInfoAtom::addCompressedEncodingFixup(uint32_t offset, const ld::Atom* fde) { @@ -439,6 +463,14 @@ void UnwindInfoAtom::addCompressedEncodingFixup(uint32_t offset, const ld _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32)); } + +template <> +void UnwindInfoAtom::addCompressedEncodingFixup(uint32_t offset, const ld::Atom* fde) +{ + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32)); +} + template <> void UnwindInfoAtom::addRegularAddressFixup(uint32_t offset, const ld::Atom* func) { @@ -460,6 +492,14 @@ void UnwindInfoAtom::addRegularAddressFixup(uint32_t offset, const ld::At _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32)); } + +template <> +void UnwindInfoAtom::addRegularAddressFixup(uint32_t offset, const ld::Atom* func) +{ + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, func)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32)); +} + template <> void UnwindInfoAtom::addRegularFDEOffsetFixup(uint32_t offset, const ld::Atom* fde) { @@ -481,6 +521,14 @@ void UnwindInfoAtom::addRegularFDEOffsetFixup(uint32_t offset, const ld:: _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32)); } + +template <> +void UnwindInfoAtom::addRegularFDEOffsetFixup(uint32_t offset, const ld::Atom* fde) +{ + _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde)); + _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32)); +} + template <> void UnwindInfoAtom::addImageOffsetFixup(uint32_t offset, const ld::Atom* targ) { @@ -502,6 +550,14 @@ void UnwindInfoAtom::addImageOffsetFixup(uint32_t offset, const ld::Atom* _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32)); } + +template <> +void UnwindInfoAtom::addImageOffsetFixup(uint32_t offset, const ld::Atom* targ) +{ + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, targ)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32)); +} + template <> void UnwindInfoAtom::addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend) { @@ -527,6 +583,15 @@ void UnwindInfoAtom::addImageOffsetFixupPlusAddend(uint32_t offset, const } +template <> +void UnwindInfoAtom::addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend) +{ + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetImageOffset, targ)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindAddAddend, addend)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndian32)); +} + + template @@ -572,11 +637,10 @@ unsigned int UnwindInfoAtom::makeCompressedSecondLevelPage(const std::vector< // keep adding entries to page until: // 1) encoding table plus entry table plus header exceed page size // 2) the file offset delta from the first to last function > 24 bits - // 3) custom encoding index reachs 255 + // 3) custom encoding index reaches 255 // 4) run out of uniqueInfos to encode std::map pageSpecificEncodings; uint32_t space4 = (pageSize - sizeof(unwind_info_compressed_second_level_page_header))/sizeof(uint32_t); - std::vector encodingIndexes; int index = endIndex-1; int entryCount = 0; uint64_t lastEntryAddress = uniqueInfos[index].funcTentAddress; @@ -588,6 +652,7 @@ unsigned int UnwindInfoAtom::makeCompressedSecondLevelPage(const std::vector< std::map::const_iterator pos = commonEncodings.find(info.encoding); if ( pos != commonEncodings.end() ) { encodingIndex = pos->second; + if (_s_log) fprintf(stderr, "makeCompressedSecondLevelPage(): funcIndex=%d, re-use commonEncodings[%d]=0x%08X\n", index, encodingIndex, info.encoding); } else { // no commmon entry, so add one on this page @@ -599,12 +664,13 @@ unsigned int UnwindInfoAtom::makeCompressedSecondLevelPage(const std::vector< std::map::iterator ppos = pageSpecificEncodings.find(encoding); if ( ppos != pageSpecificEncodings.end() ) { encodingIndex = pos->second; + if (_s_log) fprintf(stderr, "makeCompressedSecondLevelPage(): funcIndex=%d, re-use pageSpecificEncodings[%d]=0x%08X\n", index, encodingIndex, encoding); } else { encodingIndex = commonEncodings.size() + pageSpecificEncodings.size(); if ( encodingIndex <= 255 ) { pageSpecificEncodings[encoding] = encodingIndex; - if (_s_log) fprintf(stderr, "makeCompressedSecondLevelPage(): pageSpecificEncodings[%d]=0x%08X\n", encodingIndex, encoding); + if (_s_log) fprintf(stderr, "makeCompressedSecondLevelPage(): funcIndex=%d, pageSpecificEncodings[%d]=0x%08X\n", index, encodingIndex, encoding); } else { canDo = false; // case 3) @@ -613,8 +679,6 @@ unsigned int UnwindInfoAtom::makeCompressedSecondLevelPage(const std::vector< } } } - if ( canDo ) - encodingIndexes.push_back(encodingIndex); // compute function offset uint32_t funcOffsetWithInPage = lastEntryAddress - info.funcTentAddress; if ( funcOffsetWithInPage > 0x00FFFF00 ) { @@ -622,16 +686,16 @@ unsigned int UnwindInfoAtom::makeCompressedSecondLevelPage(const std::vector< canDo = false; // case 2) if (_s_log) fprintf(stderr, "can't use compressed page with %u entries because function offset too big\n", entryCount); } - else { - ++entryCount; - } // check room for entry - if ( (pageSpecificEncodings.size()+entryCount) >= space4 ) { + if ( (pageSpecificEncodings.size()+entryCount) > space4 ) { canDo = false; // case 1) --entryCount; if (_s_log) fprintf(stderr, "end of compressed page with %u entries because full\n", entryCount); } //if (_s_log) fprintf(stderr, "space4=%d, pageSpecificEncodings.size()=%ld, entryCount=%d\n", space4, pageSpecificEncodings.size(), entryCount); + if ( canDo ) { + ++entryCount; + } } // check for cases where it would be better to use a regular (non-compressed) page @@ -667,6 +731,7 @@ unsigned int UnwindInfoAtom::makeCompressedSecondLevelPage(const std::vector< uint8_t encodingIndex; if ( encodingMeansUseDwarf(info.encoding) ) { // dwarf entries are always in page specific encodings + assert(pageSpecificEncodings.find(info.encoding+i) != pageSpecificEncodings.end()); encodingIndex = pageSpecificEncodings[info.encoding+i]; } else { @@ -702,16 +767,22 @@ unsigned int UnwindInfoAtom::makeCompressedSecondLevelPage(const std::vector< - - -static uint64_t calculateEHFrameSize(const ld::Internal& state) +static uint64_t calculateEHFrameSize(ld::Internal& state) { + bool allCIEs = true; uint64_t size = 0; - for (std::vector::const_iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) { - ld::Internal::FinalSection* sect = *sit; + for (ld::Internal::FinalSection* sect : state.sections) { if ( sect->type() == ld::Section::typeCFI ) { - for (std::vector::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) { - size += (*ait)->size(); + for (const ld::Atom* atom : sect->atoms) { + size += atom->size(); + if ( strcmp(atom->name(), "CIE") != 0 ) + allCIEs = false; + } + if ( allCIEs ) { + // Linker generates eh_frame data even when there's only an unused CIEs in it + sect->atoms.clear(); + state.sections.erase(std::remove(state.sections.begin(), state.sections.end(), sect), state.sections.end()); + return 0; } } } @@ -839,6 +910,12 @@ static void makeFinalLinkedImageCompactUnwindSection(const Options& opts, ld::In case CPU_TYPE_ARM64: state.addAtom(*new UnwindInfoAtom(entries, ehFrameSize)); break; +#endif +#if SUPPORT_ARCH_arm_any + case CPU_TYPE_ARM: + if ( opts.armUsesZeroCostExceptions() ) + state.addAtom(*new UnwindInfoAtom(entries, ehFrameSize)); + break; #endif default: assert(0 && "no compact unwind for arch"); @@ -892,6 +969,8 @@ template <> ld::Fixup::Kind CompactUnwindAtom::_s_pointerStoreKind = ld: template <> ld::Fixup::Kind CompactUnwindAtom::_s_pointerKind = ld::Fixup::kindStoreLittleEndian64; template <> ld::Fixup::Kind CompactUnwindAtom::_s_pointerStoreKind = ld::Fixup::kindStoreTargetAddressLittleEndian64; #endif +template <> ld::Fixup::Kind CompactUnwindAtom::_s_pointerKind = ld::Fixup::kindStoreLittleEndian32; +template <> ld::Fixup::Kind CompactUnwindAtom::_s_pointerStoreKind = ld::Fixup::kindStoreTargetAddressLittleEndian32; template CompactUnwindAtom::CompactUnwindAtom(ld::Internal& state,const ld::Atom* funcAtom, uint32_t startOffset, @@ -953,6 +1032,9 @@ static void makeCompactUnwindAtom(const Options& opts, ld::Internal& state, cons state.addAtom(*new CompactUnwindAtom(state, atom, startOffset, endOffset-startOffset, cui)); break; #endif + case CPU_TYPE_ARM: + state.addAtom(*new CompactUnwindAtom(state, atom, startOffset, endOffset-startOffset, cui)); + break; } }