return ((enc & UNWIND_ARM64_MODE_MASK) == UNWIND_ARM64_MODE_DWARF);
}
+
template <>
bool UnwindInfoAtom<arm>::encodingMeansUseDwarf(compact_unwind_encoding_t enc)
{
_fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndianLow24of32));
}
+
template <>
void UnwindInfoAtom<arm>::addCompressedAddressOffsetFixup(uint32_t offset, const ld::Atom* func, const ld::Atom* fromFunc)
{
_fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32));
}
+
template <>
void UnwindInfoAtom<arm>::addCompressedEncodingFixup(uint32_t offset, const ld::Atom* fde)
{
_fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32));
}
+
template <>
void UnwindInfoAtom<arm>::addRegularAddressFixup(uint32_t offset, const ld::Atom* func)
{
_fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32));
}
+
template <>
void UnwindInfoAtom<arm>::addRegularFDEOffsetFixup(uint32_t offset, const ld::Atom* fde)
{
_fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32));
}
+
template <>
void UnwindInfoAtom<arm>::addImageOffsetFixup(uint32_t offset, const ld::Atom* targ)
{
_fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndian32));
}
+
template <>
void UnwindInfoAtom<arm>::addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend)
{
// keep adding entries to page until:
// 1) encoding table plus entry table plus header exceed page size
// 2) the file offset delta from the first to last function > 24 bits
- // 3) custom encoding index reachs 255
+ // 3) custom encoding index reaches 255
// 4) run out of uniqueInfos to encode
std::map<compact_unwind_encoding_t, unsigned int> pageSpecificEncodings;
uint32_t space4 = (pageSize - sizeof(unwind_info_compressed_second_level_page_header))/sizeof(uint32_t);
- std::vector<uint8_t> encodingIndexes;
int index = endIndex-1;
int entryCount = 0;
uint64_t lastEntryAddress = uniqueInfos[index].funcTentAddress;
std::map<compact_unwind_encoding_t, unsigned int>::const_iterator pos = commonEncodings.find(info.encoding);
if ( pos != commonEncodings.end() ) {
encodingIndex = pos->second;
+ if (_s_log) fprintf(stderr, "makeCompressedSecondLevelPage(): funcIndex=%d, re-use commonEncodings[%d]=0x%08X\n", index, encodingIndex, info.encoding);
}
else {
// no commmon entry, so add one on this page
std::map<compact_unwind_encoding_t, unsigned int>::iterator ppos = pageSpecificEncodings.find(encoding);
if ( ppos != pageSpecificEncodings.end() ) {
encodingIndex = pos->second;
+ if (_s_log) fprintf(stderr, "makeCompressedSecondLevelPage(): funcIndex=%d, re-use pageSpecificEncodings[%d]=0x%08X\n", index, encodingIndex, encoding);
}
else {
encodingIndex = commonEncodings.size() + pageSpecificEncodings.size();
if ( encodingIndex <= 255 ) {
pageSpecificEncodings[encoding] = encodingIndex;
- if (_s_log) fprintf(stderr, "makeCompressedSecondLevelPage(): pageSpecificEncodings[%d]=0x%08X\n", encodingIndex, encoding);
+ if (_s_log) fprintf(stderr, "makeCompressedSecondLevelPage(): funcIndex=%d, pageSpecificEncodings[%d]=0x%08X\n", index, encodingIndex, encoding);
}
else {
canDo = false; // case 3)
}
}
}
- if ( canDo )
- encodingIndexes.push_back(encodingIndex);
// compute function offset
uint32_t funcOffsetWithInPage = lastEntryAddress - info.funcTentAddress;
if ( funcOffsetWithInPage > 0x00FFFF00 ) {
canDo = false; // case 2)
if (_s_log) fprintf(stderr, "can't use compressed page with %u entries because function offset too big\n", entryCount);
}
- else {
- ++entryCount;
- }
// check room for entry
- if ( (pageSpecificEncodings.size()+entryCount) >= space4 ) {
+ if ( (pageSpecificEncodings.size()+entryCount) > space4 ) {
canDo = false; // case 1)
--entryCount;
if (_s_log) fprintf(stderr, "end of compressed page with %u entries because full\n", entryCount);
}
//if (_s_log) fprintf(stderr, "space4=%d, pageSpecificEncodings.size()=%ld, entryCount=%d\n", space4, pageSpecificEncodings.size(), entryCount);
+ if ( canDo ) {
+ ++entryCount;
+ }
}
// check for cases where it would be better to use a regular (non-compressed) page
uint8_t encodingIndex;
if ( encodingMeansUseDwarf(info.encoding) ) {
// dwarf entries are always in page specific encodings
+ assert(pageSpecificEncodings.find(info.encoding+i) != pageSpecificEncodings.end());
encodingIndex = pageSpecificEncodings[info.encoding+i];
}
else {
-
-
-static uint64_t calculateEHFrameSize(const ld::Internal& state)
+static uint64_t calculateEHFrameSize(ld::Internal& state)
{
+ bool allCIEs = true;
uint64_t size = 0;
- for (std::vector<ld::Internal::FinalSection*>::const_iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) {
- ld::Internal::FinalSection* sect = *sit;
+ for (ld::Internal::FinalSection* sect : state.sections) {
if ( sect->type() == ld::Section::typeCFI ) {
- for (std::vector<const ld::Atom*>::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
- size += (*ait)->size();
+ for (const ld::Atom* atom : sect->atoms) {
+ size += atom->size();
+ if ( strcmp(atom->name(), "CIE") != 0 )
+ allCIEs = false;
+ }
+ if ( allCIEs ) {
+ // <rdar://problem/21427393> Linker generates eh_frame data even when there's only an unused CIEs in it
+ sect->atoms.clear();
+ state.sections.erase(std::remove(state.sections.begin(), state.sections.end(), sect), state.sections.end());
+ return 0;
}
}
}