std::vector<LSDAEntry> lsdaIndex;
makeLsdaIndex(uniqueEntries, lsdaIndex, lsdaIndexOffsetMap);
-
// calculate worst case size for all unwind info pages when allocating buffer
const unsigned int entriesPerRegularPage = (4096-sizeof(unwind_info_regular_second_level_page_header))/sizeof(unwind_info_regular_second_level_entry);
assert(uniqueEntries.size() > 0);
return ((enc & UNWIND_X86_64_MODE_MASK) == UNWIND_X86_64_MODE_DWARF);
}
+template <>
+bool UnwindInfoAtom<arm64>::encodingMeansUseDwarf(compact_unwind_encoding_t enc)
+{
+ return ((enc & UNWIND_ARM64_MODE_MASK) == UNWIND_ARM64_MODE_DWARF);
+}
+
template <typename A>
void UnwindInfoAtom<A>::compressDuplicates(const std::vector<UnwindEntry>& entries, std::vector<UnwindEntry>& uniqueEntries)
{
_fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndianLow24of32));
}
+template <>
+void UnwindInfoAtom<arm64>::addCompressedAddressOffsetFixup(uint32_t offset, const ld::Atom* func, const ld::Atom* fromFunc)
+{
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetAddress, func));
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindSubtractTargetAddress, fromFunc));
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndianLow24of32));
+}
+
template <>
void UnwindInfoAtom<x86>::addCompressedEncodingFixup(uint32_t offset, const ld::Atom* fde)
{
_fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32));
}
+template <>
+void UnwindInfoAtom<arm64>::addCompressedEncodingFixup(uint32_t offset, const ld::Atom* fde)
+{
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde));
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32));
+}
template <>
void UnwindInfoAtom<x86>::addRegularAddressFixup(uint32_t offset, const ld::Atom* func)
_fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32));
}
+template <>
+void UnwindInfoAtom<arm64>::addRegularAddressFixup(uint32_t offset, const ld::Atom* func)
+{
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, func));
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32));
+}
+
template <>
void UnwindInfoAtom<x86>::addRegularFDEOffsetFixup(uint32_t offset, const ld::Atom* fde)
{
_fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32));
}
+template <>
+void UnwindInfoAtom<arm64>::addRegularFDEOffsetFixup(uint32_t offset, const ld::Atom* fde)
+{
+ _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde));
+ _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32));
+}
+
template <>
void UnwindInfoAtom<x86>::addImageOffsetFixup(uint32_t offset, const ld::Atom* targ)
{
_fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32));
}
+template <>
+void UnwindInfoAtom<arm64>::addImageOffsetFixup(uint32_t offset, const ld::Atom* targ)
+{
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, targ));
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32));
+}
+
template <>
void UnwindInfoAtom<x86>::addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend)
{
_fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndian32));
}
+template <>
+void UnwindInfoAtom<arm64>::addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend)
+{
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetImageOffset, targ));
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindAddAddend, addend));
+ _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndian32));
+}
encodingIndex = commonEncodings.size() + pageSpecificEncodings.size();
if ( encodingIndex <= 255 ) {
pageSpecificEncodings[encoding] = encodingIndex;
+ if (_s_log) fprintf(stderr, "makeCompressedSecondLevelPage(): pageSpecificEncodings[%d]=0x%08X\n", encodingIndex, encoding);
}
else {
canDo = false; // case 3)
if ( atom->beginUnwind() == atom->endUnwind() ) {
// be sure to mark that we have no unwind info for stuff in the TEXT segment without unwind info
- if ( atom->section().type() == ld::Section::typeCode ) {
+ if ( (atom->section().type() == ld::Section::typeCode) && (atom->size() !=0) ) {
entries.push_back(UnwindEntry(atom, address, 0, NULL, NULL, NULL, 0));
}
}
case CPU_TYPE_I386:
state.addAtom(*new UnwindInfoAtom<x86>(entries, ehFrameSize));
break;
+#endif
+#if SUPPORT_ARCH_arm64
+ case CPU_TYPE_ARM64:
+ state.addAtom(*new UnwindInfoAtom<arm64>(entries, ehFrameSize));
+ break;
#endif
default:
assert(0 && "no compact unwind for arch");
template <> ld::Fixup::Kind CompactUnwindAtom<x86>::_s_pointerStoreKind = ld::Fixup::kindStoreTargetAddressLittleEndian32;
template <> ld::Fixup::Kind CompactUnwindAtom<x86_64>::_s_pointerKind = ld::Fixup::kindStoreLittleEndian64;
template <> ld::Fixup::Kind CompactUnwindAtom<x86_64>::_s_pointerStoreKind = ld::Fixup::kindStoreTargetAddressLittleEndian64;
+#if SUPPORT_ARCH_arm64
+template <> ld::Fixup::Kind CompactUnwindAtom<arm64>::_s_pointerKind = ld::Fixup::kindStoreLittleEndian64;
+template <> ld::Fixup::Kind CompactUnwindAtom<arm64>::_s_pointerStoreKind = ld::Fixup::kindStoreTargetAddressLittleEndian64;
+#endif
template <typename A>
CompactUnwindAtom<A>::CompactUnwindAtom(ld::Internal& state,const ld::Atom* funcAtom, uint32_t startOffset,
uint32_t len, uint32_t cui)
: ld::Atom(_s_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
ld::Atom::scopeTranslationUnit, ld::Atom::typeUnclassified,
- symbolTableNotIn, false, false, false, ld::Atom::Alignment(0)),
+ symbolTableNotIn, false, false, false, ld::Atom::Alignment(log2(sizeof(pint_t)))),
_atom(funcAtom), _startOffset(startOffset), _len(len), _compactUnwindInfo(cui)
{
_fixups.push_back(ld::Fixup(macho_compact_unwind_entry<P>::codeStartFieldOffset(), ld::Fixup::k1of3, ld::Fixup::kindSetTargetAddress, funcAtom));
case CPU_TYPE_I386:
state.addAtom(*new CompactUnwindAtom<x86>(state, atom, startOffset, endOffset-startOffset, cui));
break;
+#endif
+#if SUPPORT_ARCH_arm64
+ case CPU_TYPE_ARM64:
+ state.addAtom(*new CompactUnwindAtom<arm64>(state, atom, startOffset, endOffset-startOffset, cui));
+ break;
#endif
}
}