X-Git-Url: https://git.saurik.com/apple/ld64.git/blobdiff_plain/9543cb2f21e50a417dc8cf37eb7173f353536979..e456bf1059cf7e6b8b71545d1b2f2092b55a9684:/src/ld/passes/compact_unwind.cpp diff --git a/src/ld/passes/compact_unwind.cpp b/src/ld/passes/compact_unwind.cpp index ad8a504..8b2afbc 100644 --- a/src/ld/passes/compact_unwind.cpp +++ b/src/ld/passes/compact_unwind.cpp @@ -108,6 +108,7 @@ private: void addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend); uint8_t* _pagesForDelete; + uint8_t* _pageAlignedPages; uint8_t* _pages; uint64_t _pagesSize; uint8_t* _header; @@ -129,8 +130,8 @@ template UnwindInfoAtom::UnwindInfoAtom(const std::vector& entries, uint64_t ehFrameSize) : ld::Atom(_s_section, ld::Atom::definitionRegular, ld::Atom::combineNever, ld::Atom::scopeLinkageUnit, ld::Atom::typeUnclassified, - symbolTableNotIn, false, false, false, ld::Atom::Alignment(0)), - _pagesForDelete(NULL), _pages(NULL), _pagesSize(0), _header(NULL), _headerSize(0) + symbolTableNotIn, false, false, false, ld::Atom::Alignment(2)), + _pagesForDelete(NULL), _pageAlignedPages(NULL), _pages(NULL), _pagesSize(0), _header(NULL), _headerSize(0) { // build new compressed list by removing entries where next function has same encoding std::vector uniqueEntries; @@ -143,8 +144,7 @@ UnwindInfoAtom::UnwindInfoAtom(const std::vector& entries, uint6 std::map personalityIndexMap; makePersonalityIndexes(uniqueEntries, personalityIndexMap); if ( personalityIndexMap.size() > 3 ) { - warning("too many personality routines for compact unwind to encode"); - return; + throw "too many personality routines for compact unwind to encode"; } // put the most common encodings into the common table, but at most 127 of them @@ -160,11 +160,12 @@ UnwindInfoAtom::UnwindInfoAtom(const std::vector& entries, uint6 const unsigned int entriesPerRegularPage = (4096-sizeof(unwind_info_regular_second_level_page_header))/sizeof(unwind_info_regular_second_level_entry); assert(uniqueEntries.size() > 0); const unsigned int pageCount = ((uniqueEntries.size() - 1)/entriesPerRegularPage) + 2; - _pagesForDelete = (uint8_t*)calloc(pageCount,4096); + _pagesForDelete = (uint8_t*)calloc(pageCount+1,4096); if ( _pagesForDelete == NULL ) { warning("could not allocate space for compact unwind info"); return; } + _pageAlignedPages = (uint8_t*)((((uintptr_t)_pagesForDelete) + 4095) & -4096); // make last second level page smaller so that all other second level pages can be page aligned uint32_t maxLastPageSize = 4096 - (ehFrameSize % 4096); @@ -179,7 +180,7 @@ UnwindInfoAtom::UnwindInfoAtom(const std::vector& entries, uint6 uint8_t* secondLevelPagesStarts[pageCount*3]; unsigned int endIndex = uniqueEntries.size(); unsigned int secondLevelPageCount = 0; - uint8_t* pageEnd = &_pagesForDelete[pageCount*4096]; + uint8_t* pageEnd = &_pageAlignedPages[pageCount*4096]; uint32_t pageSize = maxLastPageSize; while ( endIndex > 0 ) { endIndex = makeCompressedSecondLevelPage(uniqueEntries, commonEncodings, pageSize, endIndex, pageEnd); @@ -193,9 +194,8 @@ UnwindInfoAtom::UnwindInfoAtom(const std::vector& entries, uint6 } } _pages = pageEnd; - _pagesSize = &_pagesForDelete[pageCount*4096] - pageEnd; - - + _pagesSize = &_pageAlignedPages[pageCount*4096] - pageEnd; + // calculate section layout const uint32_t commonEncodingsArraySectionOffset = sizeof(macho_unwind_info_section_header

); const uint32_t commonEncodingsArrayCount = commonEncodings.size(); @@ -212,7 +212,7 @@ UnwindInfoAtom::UnwindInfoAtom(const std::vector& entries, uint6 const uint32_t headerEndSectionOffset = lsdaIndexArraySectionOffset + lsdaIndexArraySize; // now that we know the size of the header, slide all existing fixups on the pages - const int32_t fixupSlide = headerEndSectionOffset + (_pagesForDelete - _pages); + const int32_t fixupSlide = headerEndSectionOffset + (_pageAlignedPages - _pages); for(std::vector::iterator it = _fixups.begin(); it != _fixups.end(); ++it) { it->offsetInAtom += fixupSlide; } @@ -303,6 +303,14 @@ bool UnwindInfoAtom::encodingMeansUseDwarf(compact_unwind_encoding_t enc) return ((enc & UNWIND_ARM64_MODE_MASK) == UNWIND_ARM64_MODE_DWARF); } + +template <> +bool UnwindInfoAtom::encodingMeansUseDwarf(compact_unwind_encoding_t enc) +{ + return ((enc & UNWIND_ARM_MODE_MASK) == UNWIND_ARM_MODE_DWARF); +} + + template void UnwindInfoAtom::compressDuplicates(const std::vector& entries, std::vector& uniqueEntries) { @@ -417,6 +425,23 @@ void UnwindInfoAtom::addCompressedAddressOffsetFixup(uint32_t offset, con _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndianLow24of32)); } + +template <> +void UnwindInfoAtom::addCompressedAddressOffsetFixup(uint32_t offset, const ld::Atom* func, const ld::Atom* fromFunc) +{ + if ( fromFunc->isThumb() ) { + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of4, ld::Fixup::kindSetTargetAddress, func)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of4, ld::Fixup::kindSubtractTargetAddress, fromFunc)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of4, ld::Fixup::kindSubtractAddend, 1)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k4of4, ld::Fixup::kindStoreLittleEndianLow24of32)); + } + else { + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetAddress, func)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindSubtractTargetAddress, fromFunc)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndianLow24of32)); + } +} + template <> void UnwindInfoAtom::addCompressedEncodingFixup(uint32_t offset, const ld::Atom* fde) { @@ -438,6 +463,14 @@ void UnwindInfoAtom::addCompressedEncodingFixup(uint32_t offset, const ld _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32)); } + +template <> +void UnwindInfoAtom::addCompressedEncodingFixup(uint32_t offset, const ld::Atom* fde) +{ + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32)); +} + template <> void UnwindInfoAtom::addRegularAddressFixup(uint32_t offset, const ld::Atom* func) { @@ -459,6 +492,14 @@ void UnwindInfoAtom::addRegularAddressFixup(uint32_t offset, const ld::At _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32)); } + +template <> +void UnwindInfoAtom::addRegularAddressFixup(uint32_t offset, const ld::Atom* func) +{ + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, func)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32)); +} + template <> void UnwindInfoAtom::addRegularFDEOffsetFixup(uint32_t offset, const ld::Atom* fde) { @@ -480,6 +521,14 @@ void UnwindInfoAtom::addRegularFDEOffsetFixup(uint32_t offset, const ld:: _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32)); } + +template <> +void UnwindInfoAtom::addRegularFDEOffsetFixup(uint32_t offset, const ld::Atom* fde) +{ + _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde)); + _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32)); +} + template <> void UnwindInfoAtom::addImageOffsetFixup(uint32_t offset, const ld::Atom* targ) { @@ -501,6 +550,14 @@ void UnwindInfoAtom::addImageOffsetFixup(uint32_t offset, const ld::Atom* _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32)); } + +template <> +void UnwindInfoAtom::addImageOffsetFixup(uint32_t offset, const ld::Atom* targ) +{ + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, targ)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32)); +} + template <> void UnwindInfoAtom::addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend) { @@ -526,6 +583,15 @@ void UnwindInfoAtom::addImageOffsetFixupPlusAddend(uint32_t offset, const } +template <> +void UnwindInfoAtom::addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend) +{ + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetImageOffset, targ)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindAddAddend, addend)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndian32)); +} + + template @@ -547,11 +613,11 @@ unsigned int UnwindInfoAtom::makeRegularSecondLevelPage(const std::vectoraddRegularAddressFixup(offset, info.func); if ( encodingMeansUseDwarf(info.encoding) ) { // add fixup for dwarf offset part of page specific encoding - uint32_t encOffset = (uint8_t*)(&entryTable[i]) - _pagesForDelete; + uint32_t encOffset = (uint8_t*)(&entryTable[i]) - _pageAlignedPages; this->addRegularFDEOffsetFixup(encOffset, info.fde); } } @@ -571,11 +637,10 @@ unsigned int UnwindInfoAtom::makeCompressedSecondLevelPage(const std::vector< // keep adding entries to page until: // 1) encoding table plus entry table plus header exceed page size // 2) the file offset delta from the first to last function > 24 bits - // 3) custom encoding index reachs 255 + // 3) custom encoding index reaches 255 // 4) run out of uniqueInfos to encode std::map pageSpecificEncodings; uint32_t space4 = (pageSize - sizeof(unwind_info_compressed_second_level_page_header))/sizeof(uint32_t); - std::vector encodingIndexes; int index = endIndex-1; int entryCount = 0; uint64_t lastEntryAddress = uniqueInfos[index].funcTentAddress; @@ -587,6 +652,7 @@ unsigned int UnwindInfoAtom::makeCompressedSecondLevelPage(const std::vector< std::map::const_iterator pos = commonEncodings.find(info.encoding); if ( pos != commonEncodings.end() ) { encodingIndex = pos->second; + if (_s_log) fprintf(stderr, "makeCompressedSecondLevelPage(): funcIndex=%d, re-use commonEncodings[%d]=0x%08X\n", index, encodingIndex, info.encoding); } else { // no commmon entry, so add one on this page @@ -598,12 +664,13 @@ unsigned int UnwindInfoAtom::makeCompressedSecondLevelPage(const std::vector< std::map::iterator ppos = pageSpecificEncodings.find(encoding); if ( ppos != pageSpecificEncodings.end() ) { encodingIndex = pos->second; + if (_s_log) fprintf(stderr, "makeCompressedSecondLevelPage(): funcIndex=%d, re-use pageSpecificEncodings[%d]=0x%08X\n", index, encodingIndex, encoding); } else { encodingIndex = commonEncodings.size() + pageSpecificEncodings.size(); if ( encodingIndex <= 255 ) { pageSpecificEncodings[encoding] = encodingIndex; - if (_s_log) fprintf(stderr, "makeCompressedSecondLevelPage(): pageSpecificEncodings[%d]=0x%08X\n", encodingIndex, encoding); + if (_s_log) fprintf(stderr, "makeCompressedSecondLevelPage(): funcIndex=%d, pageSpecificEncodings[%d]=0x%08X\n", index, encodingIndex, encoding); } else { canDo = false; // case 3) @@ -612,8 +679,6 @@ unsigned int UnwindInfoAtom::makeCompressedSecondLevelPage(const std::vector< } } } - if ( canDo ) - encodingIndexes.push_back(encodingIndex); // compute function offset uint32_t funcOffsetWithInPage = lastEntryAddress - info.funcTentAddress; if ( funcOffsetWithInPage > 0x00FFFF00 ) { @@ -621,16 +686,16 @@ unsigned int UnwindInfoAtom::makeCompressedSecondLevelPage(const std::vector< canDo = false; // case 2) if (_s_log) fprintf(stderr, "can't use compressed page with %u entries because function offset too big\n", entryCount); } - else { - ++entryCount; - } // check room for entry - if ( (pageSpecificEncodings.size()+entryCount) >= space4 ) { + if ( (pageSpecificEncodings.size()+entryCount) > space4 ) { canDo = false; // case 1) --entryCount; if (_s_log) fprintf(stderr, "end of compressed page with %u entries because full\n", entryCount); } //if (_s_log) fprintf(stderr, "space4=%d, pageSpecificEncodings.size()=%ld, entryCount=%d\n", space4, pageSpecificEncodings.size(), entryCount); + if ( canDo ) { + ++entryCount; + } } // check for cases where it would be better to use a regular (non-compressed) page @@ -666,6 +731,7 @@ unsigned int UnwindInfoAtom::makeCompressedSecondLevelPage(const std::vector< uint8_t encodingIndex; if ( encodingMeansUseDwarf(info.encoding) ) { // dwarf entries are always in page specific encodings + assert(pageSpecificEncodings.find(info.encoding+i) != pageSpecificEncodings.end()); encodingIndex = pageSpecificEncodings[info.encoding+i]; } else { @@ -678,11 +744,11 @@ unsigned int UnwindInfoAtom::makeCompressedSecondLevelPage(const std::vector< uint32_t entryIndex = i - endIndex + entryCount; E::set32(entiresArray[entryIndex], encodingIndex << 24); // add fixup for address part of entry - uint32_t offset = (uint8_t*)(&entiresArray[entryIndex]) - _pagesForDelete; + uint32_t offset = (uint8_t*)(&entiresArray[entryIndex]) - _pageAlignedPages; this->addCompressedAddressOffsetFixup(offset, info.func, firstFunc); if ( encodingMeansUseDwarf(info.encoding) ) { // add fixup for dwarf offset part of page specific encoding - uint32_t encOffset = (uint8_t*)(&encodingsArray[encodingIndex-commonEncodings.size()]) - _pagesForDelete; + uint32_t encOffset = (uint8_t*)(&encodingsArray[encodingIndex-commonEncodings.size()]) - _pageAlignedPages; this->addCompressedEncodingFixup(encOffset, info.fde); } } @@ -701,16 +767,22 @@ unsigned int UnwindInfoAtom::makeCompressedSecondLevelPage(const std::vector< - - -static uint64_t calculateEHFrameSize(const ld::Internal& state) +static uint64_t calculateEHFrameSize(ld::Internal& state) { + bool allCIEs = true; uint64_t size = 0; - for (std::vector::const_iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) { - ld::Internal::FinalSection* sect = *sit; + for (ld::Internal::FinalSection* sect : state.sections) { if ( sect->type() == ld::Section::typeCFI ) { - for (std::vector::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) { - size += (*ait)->size(); + for (const ld::Atom* atom : sect->atoms) { + size += atom->size(); + if ( strcmp(atom->name(), "CIE") != 0 ) + allCIEs = false; + } + if ( allCIEs ) { + // Linker generates eh_frame data even when there's only an unused CIEs in it + sect->atoms.clear(); + state.sections.erase(std::remove(state.sections.begin(), state.sections.end(), sect), state.sections.end()); + return 0; } } } @@ -838,6 +910,12 @@ static void makeFinalLinkedImageCompactUnwindSection(const Options& opts, ld::In case CPU_TYPE_ARM64: state.addAtom(*new UnwindInfoAtom(entries, ehFrameSize)); break; +#endif +#if SUPPORT_ARCH_arm_any + case CPU_TYPE_ARM: + if ( opts.armUsesZeroCostExceptions() ) + state.addAtom(*new UnwindInfoAtom(entries, ehFrameSize)); + break; #endif default: assert(0 && "no compact unwind for arch"); @@ -891,6 +969,8 @@ template <> ld::Fixup::Kind CompactUnwindAtom::_s_pointerStoreKind = ld: template <> ld::Fixup::Kind CompactUnwindAtom::_s_pointerKind = ld::Fixup::kindStoreLittleEndian64; template <> ld::Fixup::Kind CompactUnwindAtom::_s_pointerStoreKind = ld::Fixup::kindStoreTargetAddressLittleEndian64; #endif +template <> ld::Fixup::Kind CompactUnwindAtom::_s_pointerKind = ld::Fixup::kindStoreLittleEndian32; +template <> ld::Fixup::Kind CompactUnwindAtom::_s_pointerStoreKind = ld::Fixup::kindStoreTargetAddressLittleEndian32; template CompactUnwindAtom::CompactUnwindAtom(ld::Internal& state,const ld::Atom* funcAtom, uint32_t startOffset, @@ -952,6 +1032,9 @@ static void makeCompactUnwindAtom(const Options& opts, ld::Internal& state, cons state.addAtom(*new CompactUnwindAtom(state, atom, startOffset, endOffset-startOffset, cui)); break; #endif + case CPU_TYPE_ARM: + state.addAtom(*new CompactUnwindAtom(state, atom, startOffset, endOffset-startOffset, cui)); + break; } }