~UnwindInfoAtom();
virtual const ld::File* file() const { return NULL; }
- virtual bool translationUnitSource(const char** dir, const char**) const
- { return false; }
virtual const char* name() const { return "compact unwind info"; }
virtual uint64_t size() const { return _headerSize+_pagesSize; }
virtual uint64_t objectAddress() const { return 0; }
void addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend);
uint8_t* _pagesForDelete;
+ uint8_t* _pageAlignedPages;
uint8_t* _pages;
uint64_t _pagesSize;
uint8_t* _header;
UnwindInfoAtom<A>::UnwindInfoAtom(const std::vector<UnwindEntry>& entries, uint64_t ehFrameSize)
: ld::Atom(_s_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
ld::Atom::scopeLinkageUnit, ld::Atom::typeUnclassified,
- symbolTableNotIn, false, false, false, ld::Atom::Alignment(0)),
- _pagesForDelete(NULL), _pages(NULL), _pagesSize(0), _header(NULL), _headerSize(0)
+ symbolTableNotIn, false, false, false, ld::Atom::Alignment(2)),
+ _pagesForDelete(NULL), _pageAlignedPages(NULL), _pages(NULL), _pagesSize(0), _header(NULL), _headerSize(0)
{
// build new compressed list by removing entries where next function has same encoding
std::vector<UnwindEntry> uniqueEntries;
std::map<const ld::Atom*, uint32_t> personalityIndexMap;
makePersonalityIndexes(uniqueEntries, personalityIndexMap);
if ( personalityIndexMap.size() > 3 ) {
- warning("too many personality routines for compact unwind to encode");
- return;
+ throw "too many personality routines for compact unwind to encode";
}
// put the most common encodings into the common table, but at most 127 of them
std::vector<LSDAEntry> lsdaIndex;
makeLsdaIndex(uniqueEntries, lsdaIndex, lsdaIndexOffsetMap);
-
// calculate worst case size for all unwind info pages when allocating buffer
const unsigned int entriesPerRegularPage = (4096-sizeof(unwind_info_regular_second_level_page_header))/sizeof(unwind_info_regular_second_level_entry);
assert(uniqueEntries.size() > 0);
- const unsigned int pageCount = ((uniqueEntries.size() - 1)/entriesPerRegularPage) + 1;
- _pagesForDelete = (uint8_t*)calloc(pageCount,4096);
+ const unsigned int pageCount = ((uniqueEntries.size() - 1)/entriesPerRegularPage) + 2;
+ _pagesForDelete = (uint8_t*)calloc(pageCount+1,4096);
if ( _pagesForDelete == NULL ) {
warning("could not allocate space for compact unwind info");
return;
}
+ _pageAlignedPages = (uint8_t*)((((uintptr_t)_pagesForDelete) + 4095) & -4096);
// make last second level page smaller so that all other second level pages can be page aligned
uint32_t maxLastPageSize = 4096 - (ehFrameSize % 4096);
uint8_t* secondLevelPagesStarts[pageCount*3];
unsigned int endIndex = uniqueEntries.size();
unsigned int secondLevelPageCount = 0;
- uint8_t* pageEnd = &_pagesForDelete[pageCount*4096];
+ uint8_t* pageEnd = &_pageAlignedPages[pageCount*4096];
uint32_t pageSize = maxLastPageSize;
while ( endIndex > 0 ) {
endIndex = makeCompressedSecondLevelPage(uniqueEntries, commonEncodings, pageSize, endIndex, pageEnd);
secondLevelPagesStarts[secondLevelPageCount] = pageEnd;
secondLevelFirstFuncs[secondLevelPageCount] = uniqueEntries[endIndex].func;
++secondLevelPageCount;
- pageSize = 4096; // last page can be odd size, make rest up to 4096 bytes in size
+ // if this requires more than one page, align so that next starts on page boundary
+ if ( (pageSize != 4096) && (endIndex > 0) ) {
+ pageEnd = (uint8_t*)((uintptr_t)(pageEnd) & -4096);
+ pageSize = 4096; // last page can be odd size, make rest up to 4096 bytes in size
+ }
}
_pages = pageEnd;
- _pagesSize = &_pagesForDelete[pageCount*4096] - pageEnd;
-
-
+ _pagesSize = &_pageAlignedPages[pageCount*4096] - pageEnd;
+
// calculate section layout
const uint32_t commonEncodingsArraySectionOffset = sizeof(macho_unwind_info_section_header<P>);
const uint32_t commonEncodingsArrayCount = commonEncodings.size();
const uint32_t headerEndSectionOffset = lsdaIndexArraySectionOffset + lsdaIndexArraySize;
// now that we know the size of the header, slide all existing fixups on the pages
- const int32_t fixupSlide = headerEndSectionOffset + (_pagesForDelete - _pages);
+ const int32_t fixupSlide = headerEndSectionOffset + (_pageAlignedPages - _pages);
for(std::vector<ld::Fixup>::iterator it = _fixups.begin(); it != _fixups.end(); ++it) {
it->offsetInAtom += fixupSlide;
}
return ((enc & UNWIND_X86_64_MODE_MASK) == UNWIND_X86_64_MODE_DWARF);
}
+template <>
+bool UnwindInfoAtom<arm64>::encodingMeansUseDwarf(compact_unwind_encoding_t enc)
+{
+ return ((enc & UNWIND_ARM64_MODE_MASK) == UNWIND_ARM64_MODE_DWARF);
+}
+
+
+template <>
+bool UnwindInfoAtom<arm>::encodingMeansUseDwarf(compact_unwind_encoding_t enc)
+{
+ return ((enc & UNWIND_ARM_MODE_MASK) == UNWIND_ARM_MODE_DWARF);
+}
+
+
template <typename A>
void UnwindInfoAtom<A>::compressDuplicates(const std::vector<UnwindEntry>& entries, std::vector<UnwindEntry>& uniqueEntries)
{
_fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndianLow24of32));
}
+template <>
+void UnwindInfoAtom<arm64>::addCompressedAddressOffsetFixup(uint32_t offset, const ld::Atom* func, const ld::Atom* fromFunc)
+{
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetAddress, func));
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindSubtractTargetAddress, fromFunc));
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndianLow24of32));
+}
+
+
+template <>
+void UnwindInfoAtom<arm>::addCompressedAddressOffsetFixup(uint32_t offset, const ld::Atom* func, const ld::Atom* fromFunc)
+{
+ if ( fromFunc->isThumb() ) {
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of4, ld::Fixup::kindSetTargetAddress, func));
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of4, ld::Fixup::kindSubtractTargetAddress, fromFunc));
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of4, ld::Fixup::kindSubtractAddend, 1));
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k4of4, ld::Fixup::kindStoreLittleEndianLow24of32));
+ }
+ else {
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetAddress, func));
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindSubtractTargetAddress, fromFunc));
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndianLow24of32));
+ }
+}
+
template <>
void UnwindInfoAtom<x86>::addCompressedEncodingFixup(uint32_t offset, const ld::Atom* fde)
{
_fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32));
}
+template <>
+void UnwindInfoAtom<arm64>::addCompressedEncodingFixup(uint32_t offset, const ld::Atom* fde)
+{
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde));
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32));
+}
+
+
+template <>
+void UnwindInfoAtom<arm>::addCompressedEncodingFixup(uint32_t offset, const ld::Atom* fde)
+{
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde));
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32));
+}
template <>
void UnwindInfoAtom<x86>::addRegularAddressFixup(uint32_t offset, const ld::Atom* func)
_fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32));
}
+template <>
+void UnwindInfoAtom<arm64>::addRegularAddressFixup(uint32_t offset, const ld::Atom* func)
+{
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, func));
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32));
+}
+
+
+template <>
+void UnwindInfoAtom<arm>::addRegularAddressFixup(uint32_t offset, const ld::Atom* func)
+{
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, func));
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32));
+}
+
template <>
void UnwindInfoAtom<x86>::addRegularFDEOffsetFixup(uint32_t offset, const ld::Atom* fde)
{
_fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32));
}
+template <>
+void UnwindInfoAtom<arm64>::addRegularFDEOffsetFixup(uint32_t offset, const ld::Atom* fde)
+{
+ _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde));
+ _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32));
+}
+
+
+template <>
+void UnwindInfoAtom<arm>::addRegularFDEOffsetFixup(uint32_t offset, const ld::Atom* fde)
+{
+ _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde));
+ _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32));
+}
+
template <>
void UnwindInfoAtom<x86>::addImageOffsetFixup(uint32_t offset, const ld::Atom* targ)
{
_fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32));
}
+template <>
+void UnwindInfoAtom<arm64>::addImageOffsetFixup(uint32_t offset, const ld::Atom* targ)
+{
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, targ));
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32));
+}
+
+
+template <>
+void UnwindInfoAtom<arm>::addImageOffsetFixup(uint32_t offset, const ld::Atom* targ)
+{
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, targ));
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32));
+}
+
template <>
void UnwindInfoAtom<x86>::addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend)
{
_fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndian32));
}
+template <>
+void UnwindInfoAtom<arm64>::addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend)
+{
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetImageOffset, targ));
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindAddAddend, addend));
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndian32));
+}
+
+
+template <>
+void UnwindInfoAtom<arm>::addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend)
+{
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetImageOffset, targ));
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindAddAddend, addend));
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndian32));
+}
entryTable[i].set_functionOffset(0);
entryTable[i].set_encoding(info.encoding);
// add fixup for address part of entry
- uint32_t offset = (uint8_t*)(&entryTable[i]) - _pagesForDelete;
+ uint32_t offset = (uint8_t*)(&entryTable[i]) - _pageAlignedPages;
this->addRegularAddressFixup(offset, info.func);
if ( encodingMeansUseDwarf(info.encoding) ) {
// add fixup for dwarf offset part of page specific encoding
- uint32_t encOffset = (uint8_t*)(&entryTable[i]) - _pagesForDelete;
+ uint32_t encOffset = (uint8_t*)(&entryTable[i]) - _pageAlignedPages;
this->addRegularFDEOffsetFixup(encOffset, info.fde);
}
}
// keep adding entries to page until:
// 1) encoding table plus entry table plus header exceed page size
// 2) the file offset delta from the first to last function > 24 bits
- // 3) custom encoding index reachs 255
+ // 3) custom encoding index reaches 255
// 4) run out of uniqueInfos to encode
std::map<compact_unwind_encoding_t, unsigned int> pageSpecificEncodings;
uint32_t space4 = (pageSize - sizeof(unwind_info_compressed_second_level_page_header))/sizeof(uint32_t);
- std::vector<uint8_t> encodingIndexes;
int index = endIndex-1;
int entryCount = 0;
uint64_t lastEntryAddress = uniqueInfos[index].funcTentAddress;
std::map<compact_unwind_encoding_t, unsigned int>::const_iterator pos = commonEncodings.find(info.encoding);
if ( pos != commonEncodings.end() ) {
encodingIndex = pos->second;
+ if (_s_log) fprintf(stderr, "makeCompressedSecondLevelPage(): funcIndex=%d, re-use commonEncodings[%d]=0x%08X\n", index, encodingIndex, info.encoding);
}
else {
// no commmon entry, so add one on this page
std::map<compact_unwind_encoding_t, unsigned int>::iterator ppos = pageSpecificEncodings.find(encoding);
if ( ppos != pageSpecificEncodings.end() ) {
encodingIndex = pos->second;
+ if (_s_log) fprintf(stderr, "makeCompressedSecondLevelPage(): funcIndex=%d, re-use pageSpecificEncodings[%d]=0x%08X\n", index, encodingIndex, encoding);
}
else {
encodingIndex = commonEncodings.size() + pageSpecificEncodings.size();
if ( encodingIndex <= 255 ) {
pageSpecificEncodings[encoding] = encodingIndex;
+ if (_s_log) fprintf(stderr, "makeCompressedSecondLevelPage(): funcIndex=%d, pageSpecificEncodings[%d]=0x%08X\n", index, encodingIndex, encoding);
}
else {
canDo = false; // case 3)
}
}
}
- if ( canDo )
- encodingIndexes.push_back(encodingIndex);
// compute function offset
uint32_t funcOffsetWithInPage = lastEntryAddress - info.funcTentAddress;
if ( funcOffsetWithInPage > 0x00FFFF00 ) {
canDo = false; // case 2)
if (_s_log) fprintf(stderr, "can't use compressed page with %u entries because function offset too big\n", entryCount);
}
- else {
- ++entryCount;
- }
// check room for entry
- if ( (pageSpecificEncodings.size()+entryCount) >= space4 ) {
+ if ( (pageSpecificEncodings.size()+entryCount) > space4 ) {
canDo = false; // case 1)
--entryCount;
if (_s_log) fprintf(stderr, "end of compressed page with %u entries because full\n", entryCount);
}
//if (_s_log) fprintf(stderr, "space4=%d, pageSpecificEncodings.size()=%ld, entryCount=%d\n", space4, pageSpecificEncodings.size(), entryCount);
+ if ( canDo ) {
+ ++entryCount;
+ }
}
// check for cases where it would be better to use a regular (non-compressed) page
uint8_t encodingIndex;
if ( encodingMeansUseDwarf(info.encoding) ) {
// dwarf entries are always in page specific encodings
+ assert(pageSpecificEncodings.find(info.encoding+i) != pageSpecificEncodings.end());
encodingIndex = pageSpecificEncodings[info.encoding+i];
}
else {
uint32_t entryIndex = i - endIndex + entryCount;
E::set32(entiresArray[entryIndex], encodingIndex << 24);
// add fixup for address part of entry
- uint32_t offset = (uint8_t*)(&entiresArray[entryIndex]) - _pagesForDelete;
+ uint32_t offset = (uint8_t*)(&entiresArray[entryIndex]) - _pageAlignedPages;
this->addCompressedAddressOffsetFixup(offset, info.func, firstFunc);
if ( encodingMeansUseDwarf(info.encoding) ) {
// add fixup for dwarf offset part of page specific encoding
- uint32_t encOffset = (uint8_t*)(&encodingsArray[encodingIndex-commonEncodings.size()]) - _pagesForDelete;
+ uint32_t encOffset = (uint8_t*)(&encodingsArray[encodingIndex-commonEncodings.size()]) - _pageAlignedPages;
this->addCompressedEncodingFixup(encOffset, info.fde);
}
}
-
-
-static uint64_t calculateEHFrameSize(const ld::Internal& state)
+static uint64_t calculateEHFrameSize(ld::Internal& state)
{
+ bool allCIEs = true;
uint64_t size = 0;
- for (std::vector<ld::Internal::FinalSection*>::const_iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) {
- ld::Internal::FinalSection* sect = *sit;
+ for (ld::Internal::FinalSection* sect : state.sections) {
if ( sect->type() == ld::Section::typeCFI ) {
- for (std::vector<const ld::Atom*>::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
- size += (*ait)->size();
+ for (const ld::Atom* atom : sect->atoms) {
+ size += atom->size();
+ if ( strcmp(atom->name(), "CIE") != 0 )
+ allCIEs = false;
+ }
+ if ( allCIEs ) {
+ // <rdar://problem/21427393> Linker generates eh_frame data even when there's only an unused CIEs in it
+ sect->atoms.clear();
+ state.sections.erase(std::remove(state.sections.begin(), state.sections.end(), sect), state.sections.end());
+ return 0;
}
}
}
if ( atom->beginUnwind() == atom->endUnwind() ) {
// be sure to mark that we have no unwind info for stuff in the TEXT segment without unwind info
- if ( atom->section().type() == ld::Section::typeCode ) {
+ if ( (atom->section().type() == ld::Section::typeCode) && (atom->size() !=0) ) {
entries.push_back(UnwindEntry(atom, address, 0, NULL, NULL, NULL, 0));
}
}
assert(fit->binding == ld::Fixup::bindingDirectlyBound);
lsda = fit->u.target;
break;
+ case ld::Fixup::kindNoneGroupSubordinatePersonality:
+ assert(fit->binding == ld::Fixup::bindingDirectlyBound);
+ personalityPointer = fit->u.target;
+ assert(personalityPointer->section().type() == ld::Section::typeNonLazyPointer);
+ break;
default:
break;
}
}
-
-
-void doPass(const Options& opts, ld::Internal& state)
+static void makeFinalLinkedImageCompactUnwindSection(const Options& opts, ld::Internal& state)
{
- //const bool log = false;
-
- // only make make __unwind_info in final linked images
- if ( !opts.needsUnwindInfoSection() )
- return;
-
// walk every atom and gets its unwind info
std::vector<UnwindEntry> entries;
entries.reserve(64);
// create atom that contains the whole compact unwind table
switch ( opts.architecture() ) {
+#if SUPPORT_ARCH_x86_64
case CPU_TYPE_X86_64:
state.addAtom(*new UnwindInfoAtom<x86_64>(entries, ehFrameSize));
break;
+#endif
+#if SUPPORT_ARCH_i386
case CPU_TYPE_I386:
state.addAtom(*new UnwindInfoAtom<x86>(entries, ehFrameSize));
break;
+#endif
+#if SUPPORT_ARCH_arm64
+ case CPU_TYPE_ARM64:
+ state.addAtom(*new UnwindInfoAtom<arm64>(entries, ehFrameSize));
+ break;
+#endif
+#if SUPPORT_ARCH_arm_any
+ case CPU_TYPE_ARM:
+ if ( opts.armUsesZeroCostExceptions() )
+ state.addAtom(*new UnwindInfoAtom<arm>(entries, ehFrameSize));
+ break;
+#endif
default:
assert(0 && "no compact unwind for arch");
}
}
+
+template <typename A>
+class CompactUnwindAtom : public ld::Atom {
+public:
+ CompactUnwindAtom(ld::Internal& state,const ld::Atom* funcAtom,
+ uint32_t startOffset, uint32_t len, uint32_t cui);
+ ~CompactUnwindAtom() {}
+
+ virtual const ld::File* file() const { return NULL; }
+ virtual const char* name() const { return "compact unwind info"; }
+ virtual uint64_t size() const { return sizeof(macho_compact_unwind_entry<P>); }
+ virtual uint64_t objectAddress() const { return 0; }
+ virtual void copyRawContent(uint8_t buffer[]) const;
+ virtual void setScope(Scope) { }
+ virtual ld::Fixup::iterator fixupsBegin() const { return (ld::Fixup*)&_fixups[0]; }
+ virtual ld::Fixup::iterator fixupsEnd() const { return (ld::Fixup*)&_fixups[_fixups.size()]; }
+
+private:
+ typedef typename A::P P;
+ typedef typename A::P::E E;
+ typedef typename A::P::uint_t pint_t;
+
+
+ const ld::Atom* _atom;
+ const uint32_t _startOffset;
+ const uint32_t _len;
+ const uint32_t _compactUnwindInfo;
+ std::vector<ld::Fixup> _fixups;
+
+ static ld::Fixup::Kind _s_pointerKind;
+ static ld::Fixup::Kind _s_pointerStoreKind;
+ static ld::Section _s_section;
+};
+
+
+template <typename A>
+ld::Section CompactUnwindAtom<A>::_s_section("__LD", "__compact_unwind", ld::Section::typeDebug);
+
+template <> ld::Fixup::Kind CompactUnwindAtom<x86>::_s_pointerKind = ld::Fixup::kindStoreLittleEndian32;
+template <> ld::Fixup::Kind CompactUnwindAtom<x86>::_s_pointerStoreKind = ld::Fixup::kindStoreTargetAddressLittleEndian32;
+template <> ld::Fixup::Kind CompactUnwindAtom<x86_64>::_s_pointerKind = ld::Fixup::kindStoreLittleEndian64;
+template <> ld::Fixup::Kind CompactUnwindAtom<x86_64>::_s_pointerStoreKind = ld::Fixup::kindStoreTargetAddressLittleEndian64;
+#if SUPPORT_ARCH_arm64
+template <> ld::Fixup::Kind CompactUnwindAtom<arm64>::_s_pointerKind = ld::Fixup::kindStoreLittleEndian64;
+template <> ld::Fixup::Kind CompactUnwindAtom<arm64>::_s_pointerStoreKind = ld::Fixup::kindStoreTargetAddressLittleEndian64;
+#endif
+template <> ld::Fixup::Kind CompactUnwindAtom<arm>::_s_pointerKind = ld::Fixup::kindStoreLittleEndian32;
+template <> ld::Fixup::Kind CompactUnwindAtom<arm>::_s_pointerStoreKind = ld::Fixup::kindStoreTargetAddressLittleEndian32;
+
+template <typename A>
+CompactUnwindAtom<A>::CompactUnwindAtom(ld::Internal& state,const ld::Atom* funcAtom, uint32_t startOffset,
+ uint32_t len, uint32_t cui)
+ : ld::Atom(_s_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
+ ld::Atom::scopeTranslationUnit, ld::Atom::typeUnclassified,
+ symbolTableNotIn, false, false, false, ld::Atom::Alignment(log2(sizeof(pint_t)))),
+ _atom(funcAtom), _startOffset(startOffset), _len(len), _compactUnwindInfo(cui)
+{
+ _fixups.push_back(ld::Fixup(macho_compact_unwind_entry<P>::codeStartFieldOffset(), ld::Fixup::k1of3, ld::Fixup::kindSetTargetAddress, funcAtom));
+ _fixups.push_back(ld::Fixup(macho_compact_unwind_entry<P>::codeStartFieldOffset(), ld::Fixup::k2of3, ld::Fixup::kindAddAddend, _startOffset));
+ _fixups.push_back(ld::Fixup(macho_compact_unwind_entry<P>::codeStartFieldOffset(), ld::Fixup::k3of3, _s_pointerKind));
+ // see if atom has subordinate personality function or lsda
+ for (ld::Fixup::iterator fit = funcAtom->fixupsBegin(), end=funcAtom->fixupsEnd(); fit != end; ++fit) {
+ switch ( fit->kind ) {
+ case ld::Fixup::kindNoneGroupSubordinatePersonality:
+ assert(fit->binding == ld::Fixup::bindingsIndirectlyBound);
+ _fixups.push_back(ld::Fixup(macho_compact_unwind_entry<P>::personalityFieldOffset(), ld::Fixup::k1of1, _s_pointerStoreKind, state.indirectBindingTable[fit->u.bindingIndex]));
+ break;
+ case ld::Fixup::kindNoneGroupSubordinateLSDA:
+ assert(fit->binding == ld::Fixup::bindingDirectlyBound);
+ _fixups.push_back(ld::Fixup(macho_compact_unwind_entry<P>::lsdaFieldOffset(), ld::Fixup::k1of1, _s_pointerStoreKind, fit->u.target));
+ break;
+ default:
+ break;
+ }
+ }
+
+}
+
+template <typename A>
+void CompactUnwindAtom<A>::copyRawContent(uint8_t buffer[]) const
+{
+ macho_compact_unwind_entry<P>* buf = (macho_compact_unwind_entry<P>*)buffer;
+ buf->set_codeStart(0);
+ buf->set_codeLen(_len);
+ buf->set_compactUnwindInfo(_compactUnwindInfo);
+ buf->set_personality(0);
+ buf->set_lsda(0);
+}
+
+
+static void makeCompactUnwindAtom(const Options& opts, ld::Internal& state, const ld::Atom* atom,
+ uint32_t startOffset, uint32_t endOffset, uint32_t cui)
+{
+ switch ( opts.architecture() ) {
+#if SUPPORT_ARCH_x86_64
+ case CPU_TYPE_X86_64:
+ state.addAtom(*new CompactUnwindAtom<x86_64>(state, atom, startOffset, endOffset-startOffset, cui));
+ break;
+#endif
+#if SUPPORT_ARCH_i386
+ case CPU_TYPE_I386:
+ state.addAtom(*new CompactUnwindAtom<x86>(state, atom, startOffset, endOffset-startOffset, cui));
+ break;
+#endif
+#if SUPPORT_ARCH_arm64
+ case CPU_TYPE_ARM64:
+ state.addAtom(*new CompactUnwindAtom<arm64>(state, atom, startOffset, endOffset-startOffset, cui));
+ break;
+#endif
+ case CPU_TYPE_ARM:
+ state.addAtom(*new CompactUnwindAtom<arm>(state, atom, startOffset, endOffset-startOffset, cui));
+ break;
+ }
+}
+
+static void makeRelocateableCompactUnwindSection(const Options& opts, ld::Internal& state)
+{
+ // can't add CompactUnwindAtom atoms will iterating, so pre-scan
+ std::vector<const ld::Atom*> atomsWithUnwind;
+ for (std::vector<ld::Internal::FinalSection*>::const_iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) {
+ ld::Internal::FinalSection* sect = *sit;
+ for (std::vector<const ld::Atom*>::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
+ const ld::Atom* atom = *ait;
+ if ( atom->beginUnwind() != atom->endUnwind() )
+ atomsWithUnwind.push_back(atom);
+ }
+ }
+ // make one CompactUnwindAtom for each compact unwind range in each atom
+ for (std::vector<const ld::Atom*>::iterator it = atomsWithUnwind.begin(); it != atomsWithUnwind.end(); ++it) {
+ const ld::Atom* atom = *it;
+ uint32_t lastOffset = 0;
+ uint32_t lastCUE = 0;
+ bool first = true;
+ for (ld::Atom::UnwindInfo::iterator uit=atom->beginUnwind(); uit != atom->endUnwind(); ++uit) {
+ if ( !first ) {
+ makeCompactUnwindAtom(opts, state, atom, lastOffset, uit->startOffset, lastCUE);
+ }
+ lastOffset = uit->startOffset;
+ lastCUE = uit->unwindInfo;
+ first = false;
+ }
+ makeCompactUnwindAtom(opts, state, atom, lastOffset, (uint32_t)atom->size(), lastCUE);
+ }
+}
+
+
+void doPass(const Options& opts, ld::Internal& state)
+{
+ if ( opts.outputKind() == Options::kObjectFile )
+ makeRelocateableCompactUnwindSection(opts, state);
+
+ else if ( opts.needsUnwindInfoSection() )
+ makeFinalLinkedImageCompactUnwindSection(opts, state);
+}
+
+
} // namespace compact_unwind
} // namespace passes
} // namespace ld