X-Git-Url: https://git.saurik.com/apple/ld64.git/blobdiff_plain/afe874b1634377ecb27057ee76deb04915bb34d7..7f09b9353af9897bf18933788d6a59c152c29edd:/src/ld/passes/compact_unwind.cpp diff --git a/src/ld/passes/compact_unwind.cpp b/src/ld/passes/compact_unwind.cpp index 86c8eec..8b2afbc 100644 --- a/src/ld/passes/compact_unwind.cpp +++ b/src/ld/passes/compact_unwind.cpp @@ -71,8 +71,6 @@ public: ~UnwindInfoAtom(); virtual const ld::File* file() const { return NULL; } - virtual bool translationUnitSource(const char** dir, const char**) const - { return false; } virtual const char* name() const { return "compact unwind info"; } virtual uint64_t size() const { return _headerSize+_pagesSize; } virtual uint64_t objectAddress() const { return 0; } @@ -110,6 +108,7 @@ private: void addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend); uint8_t* _pagesForDelete; + uint8_t* _pageAlignedPages; uint8_t* _pages; uint64_t _pagesSize; uint8_t* _header; @@ -131,8 +130,8 @@ template UnwindInfoAtom::UnwindInfoAtom(const std::vector& entries, uint64_t ehFrameSize) : ld::Atom(_s_section, ld::Atom::definitionRegular, ld::Atom::combineNever, ld::Atom::scopeLinkageUnit, ld::Atom::typeUnclassified, - symbolTableNotIn, false, false, false, ld::Atom::Alignment(0)), - _pagesForDelete(NULL), _pages(NULL), _pagesSize(0), _header(NULL), _headerSize(0) + symbolTableNotIn, false, false, false, ld::Atom::Alignment(2)), + _pagesForDelete(NULL), _pageAlignedPages(NULL), _pages(NULL), _pagesSize(0), _header(NULL), _headerSize(0) { // build new compressed list by removing entries where next function has same encoding std::vector uniqueEntries; @@ -145,8 +144,7 @@ UnwindInfoAtom::UnwindInfoAtom(const std::vector& entries, uint6 std::map personalityIndexMap; makePersonalityIndexes(uniqueEntries, personalityIndexMap); if ( personalityIndexMap.size() > 3 ) { - warning("too many personality routines for compact unwind to encode"); - return; + throw "too many personality routines for compact unwind to encode"; } // put the most common encodings into the common table, but at most 127 of them @@ -158,16 +156,16 @@ UnwindInfoAtom::UnwindInfoAtom(const std::vector& entries, uint6 std::vector lsdaIndex; makeLsdaIndex(uniqueEntries, lsdaIndex, lsdaIndexOffsetMap); - // calculate worst case size for all unwind info pages when allocating buffer const unsigned int entriesPerRegularPage = (4096-sizeof(unwind_info_regular_second_level_page_header))/sizeof(unwind_info_regular_second_level_entry); assert(uniqueEntries.size() > 0); - const unsigned int pageCount = ((uniqueEntries.size() - 1)/entriesPerRegularPage) + 1; - _pagesForDelete = (uint8_t*)calloc(pageCount,4096); + const unsigned int pageCount = ((uniqueEntries.size() - 1)/entriesPerRegularPage) + 2; + _pagesForDelete = (uint8_t*)calloc(pageCount+1,4096); if ( _pagesForDelete == NULL ) { warning("could not allocate space for compact unwind info"); return; } + _pageAlignedPages = (uint8_t*)((((uintptr_t)_pagesForDelete) + 4095) & -4096); // make last second level page smaller so that all other second level pages can be page aligned uint32_t maxLastPageSize = 4096 - (ehFrameSize % 4096); @@ -182,19 +180,22 @@ UnwindInfoAtom::UnwindInfoAtom(const std::vector& entries, uint6 uint8_t* secondLevelPagesStarts[pageCount*3]; unsigned int endIndex = uniqueEntries.size(); unsigned int secondLevelPageCount = 0; - uint8_t* pageEnd = &_pagesForDelete[pageCount*4096]; + uint8_t* pageEnd = &_pageAlignedPages[pageCount*4096]; uint32_t pageSize = maxLastPageSize; while ( endIndex > 0 ) { endIndex = makeCompressedSecondLevelPage(uniqueEntries, commonEncodings, pageSize, endIndex, pageEnd); secondLevelPagesStarts[secondLevelPageCount] = pageEnd; secondLevelFirstFuncs[secondLevelPageCount] = uniqueEntries[endIndex].func; ++secondLevelPageCount; - pageSize = 4096; // last page can be odd size, make rest up to 4096 bytes in size + // if this requires more than one page, align so that next starts on page boundary + if ( (pageSize != 4096) && (endIndex > 0) ) { + pageEnd = (uint8_t*)((uintptr_t)(pageEnd) & -4096); + pageSize = 4096; // last page can be odd size, make rest up to 4096 bytes in size + } } _pages = pageEnd; - _pagesSize = &_pagesForDelete[pageCount*4096] - pageEnd; - - + _pagesSize = &_pageAlignedPages[pageCount*4096] - pageEnd; + // calculate section layout const uint32_t commonEncodingsArraySectionOffset = sizeof(macho_unwind_info_section_header

); const uint32_t commonEncodingsArrayCount = commonEncodings.size(); @@ -211,7 +212,7 @@ UnwindInfoAtom::UnwindInfoAtom(const std::vector& entries, uint6 const uint32_t headerEndSectionOffset = lsdaIndexArraySectionOffset + lsdaIndexArraySize; // now that we know the size of the header, slide all existing fixups on the pages - const int32_t fixupSlide = headerEndSectionOffset + (_pagesForDelete - _pages); + const int32_t fixupSlide = headerEndSectionOffset + (_pageAlignedPages - _pages); for(std::vector::iterator it = _fixups.begin(); it != _fixups.end(); ++it) { it->offsetInAtom += fixupSlide; } @@ -296,6 +297,20 @@ bool UnwindInfoAtom::encodingMeansUseDwarf(compact_unwind_encoding_t enc return ((enc & UNWIND_X86_64_MODE_MASK) == UNWIND_X86_64_MODE_DWARF); } +template <> +bool UnwindInfoAtom::encodingMeansUseDwarf(compact_unwind_encoding_t enc) +{ + return ((enc & UNWIND_ARM64_MODE_MASK) == UNWIND_ARM64_MODE_DWARF); +} + + +template <> +bool UnwindInfoAtom::encodingMeansUseDwarf(compact_unwind_encoding_t enc) +{ + return ((enc & UNWIND_ARM_MODE_MASK) == UNWIND_ARM_MODE_DWARF); +} + + template void UnwindInfoAtom::compressDuplicates(const std::vector& entries, std::vector& uniqueEntries) { @@ -402,6 +417,31 @@ void UnwindInfoAtom::addCompressedAddressOffsetFixup(uint32_t offset, co _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndianLow24of32)); } +template <> +void UnwindInfoAtom::addCompressedAddressOffsetFixup(uint32_t offset, const ld::Atom* func, const ld::Atom* fromFunc) +{ + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetAddress, func)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindSubtractTargetAddress, fromFunc)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndianLow24of32)); +} + + +template <> +void UnwindInfoAtom::addCompressedAddressOffsetFixup(uint32_t offset, const ld::Atom* func, const ld::Atom* fromFunc) +{ + if ( fromFunc->isThumb() ) { + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of4, ld::Fixup::kindSetTargetAddress, func)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of4, ld::Fixup::kindSubtractTargetAddress, fromFunc)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of4, ld::Fixup::kindSubtractAddend, 1)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k4of4, ld::Fixup::kindStoreLittleEndianLow24of32)); + } + else { + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetAddress, func)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindSubtractTargetAddress, fromFunc)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndianLow24of32)); + } +} + template <> void UnwindInfoAtom::addCompressedEncodingFixup(uint32_t offset, const ld::Atom* fde) { @@ -416,6 +456,20 @@ void UnwindInfoAtom::addCompressedEncodingFixup(uint32_t offset, const l _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32)); } +template <> +void UnwindInfoAtom::addCompressedEncodingFixup(uint32_t offset, const ld::Atom* fde) +{ + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32)); +} + + +template <> +void UnwindInfoAtom::addCompressedEncodingFixup(uint32_t offset, const ld::Atom* fde) +{ + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32)); +} template <> void UnwindInfoAtom::addRegularAddressFixup(uint32_t offset, const ld::Atom* func) @@ -431,6 +485,21 @@ void UnwindInfoAtom::addRegularAddressFixup(uint32_t offset, const ld::A _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32)); } +template <> +void UnwindInfoAtom::addRegularAddressFixup(uint32_t offset, const ld::Atom* func) +{ + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, func)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32)); +} + + +template <> +void UnwindInfoAtom::addRegularAddressFixup(uint32_t offset, const ld::Atom* func) +{ + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, func)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32)); +} + template <> void UnwindInfoAtom::addRegularFDEOffsetFixup(uint32_t offset, const ld::Atom* fde) { @@ -445,6 +514,21 @@ void UnwindInfoAtom::addRegularFDEOffsetFixup(uint32_t offset, const ld: _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32)); } +template <> +void UnwindInfoAtom::addRegularFDEOffsetFixup(uint32_t offset, const ld::Atom* fde) +{ + _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde)); + _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32)); +} + + +template <> +void UnwindInfoAtom::addRegularFDEOffsetFixup(uint32_t offset, const ld::Atom* fde) +{ + _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde)); + _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32)); +} + template <> void UnwindInfoAtom::addImageOffsetFixup(uint32_t offset, const ld::Atom* targ) { @@ -459,6 +543,21 @@ void UnwindInfoAtom::addImageOffsetFixup(uint32_t offset, const ld::Atom _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32)); } +template <> +void UnwindInfoAtom::addImageOffsetFixup(uint32_t offset, const ld::Atom* targ) +{ + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, targ)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32)); +} + + +template <> +void UnwindInfoAtom::addImageOffsetFixup(uint32_t offset, const ld::Atom* targ) +{ + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, targ)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32)); +} + template <> void UnwindInfoAtom::addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend) { @@ -475,6 +574,22 @@ void UnwindInfoAtom::addImageOffsetFixupPlusAddend(uint32_t offset, cons _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndian32)); } +template <> +void UnwindInfoAtom::addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend) +{ + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetImageOffset, targ)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindAddAddend, addend)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndian32)); +} + + +template <> +void UnwindInfoAtom::addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend) +{ + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetImageOffset, targ)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindAddAddend, addend)); + _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndian32)); +} @@ -498,11 +613,11 @@ unsigned int UnwindInfoAtom::makeRegularSecondLevelPage(const std::vectoraddRegularAddressFixup(offset, info.func); if ( encodingMeansUseDwarf(info.encoding) ) { // add fixup for dwarf offset part of page specific encoding - uint32_t encOffset = (uint8_t*)(&entryTable[i]) - _pagesForDelete; + uint32_t encOffset = (uint8_t*)(&entryTable[i]) - _pageAlignedPages; this->addRegularFDEOffsetFixup(encOffset, info.fde); } } @@ -522,11 +637,10 @@ unsigned int UnwindInfoAtom::makeCompressedSecondLevelPage(const std::vector< // keep adding entries to page until: // 1) encoding table plus entry table plus header exceed page size // 2) the file offset delta from the first to last function > 24 bits - // 3) custom encoding index reachs 255 + // 3) custom encoding index reaches 255 // 4) run out of uniqueInfos to encode std::map pageSpecificEncodings; uint32_t space4 = (pageSize - sizeof(unwind_info_compressed_second_level_page_header))/sizeof(uint32_t); - std::vector encodingIndexes; int index = endIndex-1; int entryCount = 0; uint64_t lastEntryAddress = uniqueInfos[index].funcTentAddress; @@ -538,6 +652,7 @@ unsigned int UnwindInfoAtom::makeCompressedSecondLevelPage(const std::vector< std::map::const_iterator pos = commonEncodings.find(info.encoding); if ( pos != commonEncodings.end() ) { encodingIndex = pos->second; + if (_s_log) fprintf(stderr, "makeCompressedSecondLevelPage(): funcIndex=%d, re-use commonEncodings[%d]=0x%08X\n", index, encodingIndex, info.encoding); } else { // no commmon entry, so add one on this page @@ -549,11 +664,13 @@ unsigned int UnwindInfoAtom::makeCompressedSecondLevelPage(const std::vector< std::map::iterator ppos = pageSpecificEncodings.find(encoding); if ( ppos != pageSpecificEncodings.end() ) { encodingIndex = pos->second; + if (_s_log) fprintf(stderr, "makeCompressedSecondLevelPage(): funcIndex=%d, re-use pageSpecificEncodings[%d]=0x%08X\n", index, encodingIndex, encoding); } else { encodingIndex = commonEncodings.size() + pageSpecificEncodings.size(); if ( encodingIndex <= 255 ) { pageSpecificEncodings[encoding] = encodingIndex; + if (_s_log) fprintf(stderr, "makeCompressedSecondLevelPage(): funcIndex=%d, pageSpecificEncodings[%d]=0x%08X\n", index, encodingIndex, encoding); } else { canDo = false; // case 3) @@ -562,8 +679,6 @@ unsigned int UnwindInfoAtom::makeCompressedSecondLevelPage(const std::vector< } } } - if ( canDo ) - encodingIndexes.push_back(encodingIndex); // compute function offset uint32_t funcOffsetWithInPage = lastEntryAddress - info.funcTentAddress; if ( funcOffsetWithInPage > 0x00FFFF00 ) { @@ -571,16 +686,16 @@ unsigned int UnwindInfoAtom::makeCompressedSecondLevelPage(const std::vector< canDo = false; // case 2) if (_s_log) fprintf(stderr, "can't use compressed page with %u entries because function offset too big\n", entryCount); } - else { - ++entryCount; - } // check room for entry - if ( (pageSpecificEncodings.size()+entryCount) >= space4 ) { + if ( (pageSpecificEncodings.size()+entryCount) > space4 ) { canDo = false; // case 1) --entryCount; if (_s_log) fprintf(stderr, "end of compressed page with %u entries because full\n", entryCount); } //if (_s_log) fprintf(stderr, "space4=%d, pageSpecificEncodings.size()=%ld, entryCount=%d\n", space4, pageSpecificEncodings.size(), entryCount); + if ( canDo ) { + ++entryCount; + } } // check for cases where it would be better to use a regular (non-compressed) page @@ -616,6 +731,7 @@ unsigned int UnwindInfoAtom::makeCompressedSecondLevelPage(const std::vector< uint8_t encodingIndex; if ( encodingMeansUseDwarf(info.encoding) ) { // dwarf entries are always in page specific encodings + assert(pageSpecificEncodings.find(info.encoding+i) != pageSpecificEncodings.end()); encodingIndex = pageSpecificEncodings[info.encoding+i]; } else { @@ -628,11 +744,11 @@ unsigned int UnwindInfoAtom::makeCompressedSecondLevelPage(const std::vector< uint32_t entryIndex = i - endIndex + entryCount; E::set32(entiresArray[entryIndex], encodingIndex << 24); // add fixup for address part of entry - uint32_t offset = (uint8_t*)(&entiresArray[entryIndex]) - _pagesForDelete; + uint32_t offset = (uint8_t*)(&entiresArray[entryIndex]) - _pageAlignedPages; this->addCompressedAddressOffsetFixup(offset, info.func, firstFunc); if ( encodingMeansUseDwarf(info.encoding) ) { // add fixup for dwarf offset part of page specific encoding - uint32_t encOffset = (uint8_t*)(&encodingsArray[encodingIndex-commonEncodings.size()]) - _pagesForDelete; + uint32_t encOffset = (uint8_t*)(&encodingsArray[encodingIndex-commonEncodings.size()]) - _pageAlignedPages; this->addCompressedEncodingFixup(encOffset, info.fde); } } @@ -651,16 +767,22 @@ unsigned int UnwindInfoAtom::makeCompressedSecondLevelPage(const std::vector< - - -static uint64_t calculateEHFrameSize(const ld::Internal& state) +static uint64_t calculateEHFrameSize(ld::Internal& state) { + bool allCIEs = true; uint64_t size = 0; - for (std::vector::const_iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) { - ld::Internal::FinalSection* sect = *sit; + for (ld::Internal::FinalSection* sect : state.sections) { if ( sect->type() == ld::Section::typeCFI ) { - for (std::vector::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) { - size += (*ait)->size(); + for (const ld::Atom* atom : sect->atoms) { + size += atom->size(); + if ( strcmp(atom->name(), "CIE") != 0 ) + allCIEs = false; + } + if ( allCIEs ) { + // Linker generates eh_frame data even when there's only an unused CIEs in it + sect->atoms.clear(); + state.sections.erase(std::remove(state.sections.begin(), state.sections.end(), sect), state.sections.end()); + return 0; } } } @@ -687,7 +809,7 @@ static void getAllUnwindInfos(const ld::Internal& state, std::vectorbeginUnwind() == atom->endUnwind() ) { // be sure to mark that we have no unwind info for stuff in the TEXT segment without unwind info - if ( atom->section().type() == ld::Section::typeCode ) { + if ( (atom->section().type() == ld::Section::typeCode) && (atom->size() !=0) ) { entries.push_back(UnwindEntry(atom, address, 0, NULL, NULL, NULL, 0)); } } @@ -774,12 +896,27 @@ static void makeFinalLinkedImageCompactUnwindSection(const Options& opts, ld::In // create atom that contains the whole compact unwind table switch ( opts.architecture() ) { +#if SUPPORT_ARCH_x86_64 case CPU_TYPE_X86_64: state.addAtom(*new UnwindInfoAtom(entries, ehFrameSize)); break; +#endif +#if SUPPORT_ARCH_i386 case CPU_TYPE_I386: state.addAtom(*new UnwindInfoAtom(entries, ehFrameSize)); break; +#endif +#if SUPPORT_ARCH_arm64 + case CPU_TYPE_ARM64: + state.addAtom(*new UnwindInfoAtom(entries, ehFrameSize)); + break; +#endif +#if SUPPORT_ARCH_arm_any + case CPU_TYPE_ARM: + if ( opts.armUsesZeroCostExceptions() ) + state.addAtom(*new UnwindInfoAtom(entries, ehFrameSize)); + break; +#endif default: assert(0 && "no compact unwind for arch"); } @@ -795,8 +932,6 @@ public: ~CompactUnwindAtom() {} virtual const ld::File* file() const { return NULL; } - virtual bool translationUnitSource(const char** dir, const char**) const - { return false; } virtual const char* name() const { return "compact unwind info"; } virtual uint64_t size() const { return sizeof(macho_compact_unwind_entry

); } virtual uint64_t objectAddress() const { return 0; } @@ -830,13 +965,19 @@ template <> ld::Fixup::Kind CompactUnwindAtom::_s_pointerKind = ld::Fixup:: template <> ld::Fixup::Kind CompactUnwindAtom::_s_pointerStoreKind = ld::Fixup::kindStoreTargetAddressLittleEndian32; template <> ld::Fixup::Kind CompactUnwindAtom::_s_pointerKind = ld::Fixup::kindStoreLittleEndian64; template <> ld::Fixup::Kind CompactUnwindAtom::_s_pointerStoreKind = ld::Fixup::kindStoreTargetAddressLittleEndian64; +#if SUPPORT_ARCH_arm64 +template <> ld::Fixup::Kind CompactUnwindAtom::_s_pointerKind = ld::Fixup::kindStoreLittleEndian64; +template <> ld::Fixup::Kind CompactUnwindAtom::_s_pointerStoreKind = ld::Fixup::kindStoreTargetAddressLittleEndian64; +#endif +template <> ld::Fixup::Kind CompactUnwindAtom::_s_pointerKind = ld::Fixup::kindStoreLittleEndian32; +template <> ld::Fixup::Kind CompactUnwindAtom::_s_pointerStoreKind = ld::Fixup::kindStoreTargetAddressLittleEndian32; template CompactUnwindAtom::CompactUnwindAtom(ld::Internal& state,const ld::Atom* funcAtom, uint32_t startOffset, uint32_t len, uint32_t cui) : ld::Atom(_s_section, ld::Atom::definitionRegular, ld::Atom::combineNever, ld::Atom::scopeTranslationUnit, ld::Atom::typeUnclassified, - symbolTableNotIn, false, false, false, ld::Atom::Alignment(0)), + symbolTableNotIn, false, false, false, ld::Atom::Alignment(log2(sizeof(pint_t)))), _atom(funcAtom), _startOffset(startOffset), _len(len), _compactUnwindInfo(cui) { _fixups.push_back(ld::Fixup(macho_compact_unwind_entry

::codeStartFieldOffset(), ld::Fixup::k1of3, ld::Fixup::kindSetTargetAddress, funcAtom)); @@ -876,12 +1017,24 @@ static void makeCompactUnwindAtom(const Options& opts, ld::Internal& state, cons uint32_t startOffset, uint32_t endOffset, uint32_t cui) { switch ( opts.architecture() ) { +#if SUPPORT_ARCH_x86_64 case CPU_TYPE_X86_64: state.addAtom(*new CompactUnwindAtom(state, atom, startOffset, endOffset-startOffset, cui)); break; +#endif +#if SUPPORT_ARCH_i386 case CPU_TYPE_I386: state.addAtom(*new CompactUnwindAtom(state, atom, startOffset, endOffset-startOffset, cui)); break; +#endif +#if SUPPORT_ARCH_arm64 + case CPU_TYPE_ARM64: + state.addAtom(*new CompactUnwindAtom(state, atom, startOffset, endOffset-startOffset, cui)); + break; +#endif + case CPU_TYPE_ARM: + state.addAtom(*new CompactUnwindAtom(state, atom, startOffset, endOffset-startOffset, cui)); + break; } }