1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
3 * Copyright (c) 2009 Apple Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
30 #include <mach/machine.h>
31 #include <mach-o/compact_unwind_encoding.h>
37 #include "compact_unwind.h"
38 #include "Architectures.hpp"
39 #include "MachOFileAbstraction.hpp"
44 namespace compact_unwind
{
48 UnwindEntry(const ld::Atom
* f
, uint64_t a
, uint32_t o
, const ld::Atom
* d
,
49 const ld::Atom
* l
, const ld::Atom
* p
, uint32_t en
)
50 : func(f
), fde(d
), lsda(l
), personalityPointer(p
), funcTentAddress(a
),
51 functionOffset(o
), encoding(en
) { }
55 const ld::Atom
* personalityPointer
;
56 uint64_t funcTentAddress
;
57 uint32_t functionOffset
;
58 compact_unwind_encoding_t encoding
;
68 class UnwindInfoAtom
: public ld::Atom
{
70 UnwindInfoAtom(const std::vector
<UnwindEntry
>& entries
,uint64_t ehFrameSize
);
73 virtual const ld::File
* file() const { return NULL
; }
74 virtual const char* name() const { return "compact unwind info"; }
75 virtual uint64_t size() const { return _headerSize
+_pagesSize
; }
76 virtual uint64_t objectAddress() const { return 0; }
77 virtual void copyRawContent(uint8_t buffer
[]) const;
78 virtual void setScope(Scope
) { }
79 virtual ld::Fixup::iterator
fixupsBegin() const { return (ld::Fixup
*)&_fixups
[0]; }
80 virtual ld::Fixup::iterator
fixupsEnd() const { return (ld::Fixup
*)&_fixups
[_fixups
.size()]; }
83 typedef typename
A::P P
;
84 typedef typename
A::P::E E
;
85 typedef typename
A::P::uint_t pint_t
;
87 typedef macho_unwind_info_compressed_second_level_page_header
<P
> CSLP
;
89 bool encodingMeansUseDwarf(compact_unwind_encoding_t enc
);
90 void compressDuplicates(const std::vector
<UnwindEntry
>& entries
,
91 std::vector
<UnwindEntry
>& uniqueEntries
);
92 void makePersonalityIndexes(std::vector
<UnwindEntry
>& entries
,
93 std::map
<const ld::Atom
*, uint32_t>& personalityIndexMap
);
94 void findCommonEncoding(const std::vector
<UnwindEntry
>& entries
,
95 std::map
<compact_unwind_encoding_t
, unsigned int>& commonEncodings
);
96 void makeLsdaIndex(const std::vector
<UnwindEntry
>& entries
, std::vector
<LSDAEntry
>& lsdaIndex
,
97 std::map
<const ld::Atom
*, uint32_t>& lsdaIndexOffsetMap
);
98 unsigned int makeCompressedSecondLevelPage(const std::vector
<UnwindEntry
>& uniqueInfos
,
99 const std::map
<compact_unwind_encoding_t
,unsigned int> commonEncodings
,
100 uint32_t pageSize
, unsigned int endIndex
, uint8_t*& pageEnd
);
101 unsigned int makeRegularSecondLevelPage(const std::vector
<UnwindEntry
>& uniqueInfos
, uint32_t pageSize
,
102 unsigned int endIndex
, uint8_t*& pageEnd
);
103 void addCompressedAddressOffsetFixup(uint32_t offset
, const ld::Atom
* func
, const ld::Atom
* fromFunc
);
104 void addCompressedEncodingFixup(uint32_t offset
, const ld::Atom
* fde
);
105 void addRegularAddressFixup(uint32_t offset
, const ld::Atom
* func
);
106 void addRegularFDEOffsetFixup(uint32_t offset
, const ld::Atom
* fde
);
107 void addImageOffsetFixup(uint32_t offset
, const ld::Atom
* targ
);
108 void addImageOffsetFixupPlusAddend(uint32_t offset
, const ld::Atom
* targ
, uint32_t addend
);
110 uint8_t* _pagesForDelete
;
111 uint8_t* _pageAlignedPages
;
115 uint64_t _headerSize
;
116 std::vector
<ld::Fixup
> _fixups
;
119 static ld::Section _s_section
;
122 template <typename A
>
123 bool UnwindInfoAtom
<A
>::_s_log
= false;
125 template <typename A
>
126 ld::Section UnwindInfoAtom
<A
>::_s_section("__TEXT", "__unwind_info", ld::Section::typeUnwindInfo
);
129 template <typename A
>
130 UnwindInfoAtom
<A
>::UnwindInfoAtom(const std::vector
<UnwindEntry
>& entries
, uint64_t ehFrameSize
)
131 : ld::Atom(_s_section
, ld::Atom::definitionRegular
, ld::Atom::combineNever
,
132 ld::Atom::scopeLinkageUnit
, ld::Atom::typeUnclassified
,
133 symbolTableNotIn
, false, false, false, ld::Atom::Alignment(2)),
134 _pagesForDelete(NULL
), _pageAlignedPages(NULL
), _pages(NULL
), _pagesSize(0), _header(NULL
), _headerSize(0)
136 // build new compressed list by removing entries where next function has same encoding
137 std::vector
<UnwindEntry
> uniqueEntries
;
138 compressDuplicates(entries
, uniqueEntries
);
140 // reserve room so _fixups vector is not reallocated a bunch of times
141 _fixups
.reserve(uniqueEntries
.size()*3);
143 // build personality index, update encodings with personality index
144 std::map
<const ld::Atom
*, uint32_t> personalityIndexMap
;
145 makePersonalityIndexes(uniqueEntries
, personalityIndexMap
);
146 if ( personalityIndexMap
.size() > 3 ) {
147 throw "too many personality routines for compact unwind to encode";
150 // put the most common encodings into the common table, but at most 127 of them
151 std::map
<compact_unwind_encoding_t
, unsigned int> commonEncodings
;
152 findCommonEncoding(uniqueEntries
, commonEncodings
);
155 std::map
<const ld::Atom
*, uint32_t> lsdaIndexOffsetMap
;
156 std::vector
<LSDAEntry
> lsdaIndex
;
157 makeLsdaIndex(uniqueEntries
, lsdaIndex
, lsdaIndexOffsetMap
);
159 // calculate worst case size for all unwind info pages when allocating buffer
160 const unsigned int entriesPerRegularPage
= (4096-sizeof(unwind_info_regular_second_level_page_header
))/sizeof(unwind_info_regular_second_level_entry
);
161 assert(uniqueEntries
.size() > 0);
162 const unsigned int pageCount
= ((uniqueEntries
.size() - 1)/entriesPerRegularPage
) + 2;
163 _pagesForDelete
= (uint8_t*)calloc(pageCount
+1,4096);
164 if ( _pagesForDelete
== NULL
) {
165 warning("could not allocate space for compact unwind info");
168 _pageAlignedPages
= (uint8_t*)((((uintptr_t)_pagesForDelete
) + 4095) & -4096);
170 // make last second level page smaller so that all other second level pages can be page aligned
171 uint32_t maxLastPageSize
= 4096 - (ehFrameSize
% 4096);
172 uint32_t tailPad
= 0;
173 if ( maxLastPageSize
< 128 ) {
174 tailPad
= maxLastPageSize
;
175 maxLastPageSize
= 4096;
178 // fill in pages in reverse order
179 const ld::Atom
* secondLevelFirstFuncs
[pageCount
*3];
180 uint8_t* secondLevelPagesStarts
[pageCount
*3];
181 unsigned int endIndex
= uniqueEntries
.size();
182 unsigned int secondLevelPageCount
= 0;
183 uint8_t* pageEnd
= &_pageAlignedPages
[pageCount
*4096];
184 uint32_t pageSize
= maxLastPageSize
;
185 while ( endIndex
> 0 ) {
186 endIndex
= makeCompressedSecondLevelPage(uniqueEntries
, commonEncodings
, pageSize
, endIndex
, pageEnd
);
187 secondLevelPagesStarts
[secondLevelPageCount
] = pageEnd
;
188 secondLevelFirstFuncs
[secondLevelPageCount
] = uniqueEntries
[endIndex
].func
;
189 ++secondLevelPageCount
;
190 // if this requires more than one page, align so that next starts on page boundary
191 if ( (pageSize
!= 4096) && (endIndex
> 0) ) {
192 pageEnd
= (uint8_t*)((uintptr_t)(pageEnd
) & -4096);
193 pageSize
= 4096; // last page can be odd size, make rest up to 4096 bytes in size
197 _pagesSize
= &_pageAlignedPages
[pageCount
*4096] - pageEnd
;
199 // calculate section layout
200 const uint32_t commonEncodingsArraySectionOffset
= sizeof(macho_unwind_info_section_header
<P
>);
201 const uint32_t commonEncodingsArrayCount
= commonEncodings
.size();
202 const uint32_t commonEncodingsArraySize
= commonEncodingsArrayCount
* sizeof(compact_unwind_encoding_t
);
203 const uint32_t personalityArraySectionOffset
= commonEncodingsArraySectionOffset
+ commonEncodingsArraySize
;
204 const uint32_t personalityArrayCount
= personalityIndexMap
.size();
205 const uint32_t personalityArraySize
= personalityArrayCount
* sizeof(uint32_t);
206 const uint32_t indexSectionOffset
= personalityArraySectionOffset
+ personalityArraySize
;
207 const uint32_t indexCount
= secondLevelPageCount
+1;
208 const uint32_t indexSize
= indexCount
* sizeof(macho_unwind_info_section_header_index_entry
<P
>);
209 const uint32_t lsdaIndexArraySectionOffset
= indexSectionOffset
+ indexSize
;
210 const uint32_t lsdaIndexArrayCount
= lsdaIndex
.size();
211 const uint32_t lsdaIndexArraySize
= lsdaIndexArrayCount
* sizeof(macho_unwind_info_section_header_lsda_index_entry
<P
>);
212 const uint32_t headerEndSectionOffset
= lsdaIndexArraySectionOffset
+ lsdaIndexArraySize
;
214 // now that we know the size of the header, slide all existing fixups on the pages
215 const int32_t fixupSlide
= headerEndSectionOffset
+ (_pageAlignedPages
- _pages
);
216 for(std::vector
<ld::Fixup
>::iterator it
= _fixups
.begin(); it
!= _fixups
.end(); ++it
) {
217 it
->offsetInAtom
+= fixupSlide
;
220 // allocate and fill in section header
221 _headerSize
= headerEndSectionOffset
;
222 _header
= new uint8_t[_headerSize
];
223 bzero(_header
, _headerSize
);
224 macho_unwind_info_section_header
<P
>* sectionHeader
= (macho_unwind_info_section_header
<P
>*)_header
;
225 sectionHeader
->set_version(UNWIND_SECTION_VERSION
);
226 sectionHeader
->set_commonEncodingsArraySectionOffset(commonEncodingsArraySectionOffset
);
227 sectionHeader
->set_commonEncodingsArrayCount(commonEncodingsArrayCount
);
228 sectionHeader
->set_personalityArraySectionOffset(personalityArraySectionOffset
);
229 sectionHeader
->set_personalityArrayCount(personalityArrayCount
);
230 sectionHeader
->set_indexSectionOffset(indexSectionOffset
);
231 sectionHeader
->set_indexCount(indexCount
);
233 // copy common encodings
234 uint32_t* commonEncodingsTable
= (uint32_t*)&_header
[commonEncodingsArraySectionOffset
];
235 for (std::map
<uint32_t, unsigned int>::iterator it
=commonEncodings
.begin(); it
!= commonEncodings
.end(); ++it
)
236 E::set32(commonEncodingsTable
[it
->second
], it
->first
);
238 // make references for personality entries
239 uint32_t* personalityArray
= (uint32_t*)&_header
[sectionHeader
->personalityArraySectionOffset()];
240 for (std::map
<const ld::Atom
*, unsigned int>::iterator it
=personalityIndexMap
.begin(); it
!= personalityIndexMap
.end(); ++it
) {
241 uint32_t offset
= (uint8_t*)&personalityArray
[it
->second
-1] - _header
;
242 this->addImageOffsetFixup(offset
, it
->first
);
245 // build first level index and references
246 macho_unwind_info_section_header_index_entry
<P
>* indexTable
= (macho_unwind_info_section_header_index_entry
<P
>*)&_header
[indexSectionOffset
];
248 for (unsigned int i
=0; i
< secondLevelPageCount
; ++i
) {
249 unsigned int reverseIndex
= secondLevelPageCount
- 1 - i
;
250 indexTable
[i
].set_functionOffset(0);
251 indexTable
[i
].set_secondLevelPagesSectionOffset(secondLevelPagesStarts
[reverseIndex
]-_pages
+headerEndSectionOffset
);
252 indexTable
[i
].set_lsdaIndexArraySectionOffset(lsdaIndexOffsetMap
[secondLevelFirstFuncs
[reverseIndex
]]+lsdaIndexArraySectionOffset
);
253 refOffset
= (uint8_t*)&indexTable
[i
] - _header
;
254 this->addImageOffsetFixup(refOffset
, secondLevelFirstFuncs
[reverseIndex
]);
256 indexTable
[secondLevelPageCount
].set_functionOffset(0);
257 indexTable
[secondLevelPageCount
].set_secondLevelPagesSectionOffset(0);
258 indexTable
[secondLevelPageCount
].set_lsdaIndexArraySectionOffset(lsdaIndexArraySectionOffset
+lsdaIndexArraySize
);
259 refOffset
= (uint8_t*)&indexTable
[secondLevelPageCount
] - _header
;
260 this->addImageOffsetFixupPlusAddend(refOffset
, entries
.back().func
, entries
.back().func
->size()+1);
262 // build lsda references
263 uint32_t lsdaEntrySectionOffset
= lsdaIndexArraySectionOffset
;
264 for (std::vector
<LSDAEntry
>::iterator it
= lsdaIndex
.begin(); it
!= lsdaIndex
.end(); ++it
) {
265 this->addImageOffsetFixup(lsdaEntrySectionOffset
, it
->func
);
266 this->addImageOffsetFixup(lsdaEntrySectionOffset
+4, it
->lsda
);
267 lsdaEntrySectionOffset
+= sizeof(unwind_info_section_header_lsda_index_entry
);
272 template <typename A
>
273 UnwindInfoAtom
<A
>::~UnwindInfoAtom()
275 free(_pagesForDelete
);
279 template <typename A
>
280 void UnwindInfoAtom
<A
>::copyRawContent(uint8_t buffer
[]) const
282 // content is in two parts
283 memcpy(buffer
, _header
, _headerSize
);
284 memcpy(&buffer
[_headerSize
], _pages
, _pagesSize
);
289 bool UnwindInfoAtom
<x86
>::encodingMeansUseDwarf(compact_unwind_encoding_t enc
)
291 return ((enc
& UNWIND_X86_MODE_MASK
) == UNWIND_X86_MODE_DWARF
);
295 bool UnwindInfoAtom
<x86_64
>::encodingMeansUseDwarf(compact_unwind_encoding_t enc
)
297 return ((enc
& UNWIND_X86_64_MODE_MASK
) == UNWIND_X86_64_MODE_DWARF
);
301 bool UnwindInfoAtom
<arm64
>::encodingMeansUseDwarf(compact_unwind_encoding_t enc
)
303 return ((enc
& UNWIND_ARM64_MODE_MASK
) == UNWIND_ARM64_MODE_DWARF
);
308 bool UnwindInfoAtom
<arm
>::encodingMeansUseDwarf(compact_unwind_encoding_t enc
)
310 return ((enc
& UNWIND_ARM_MODE_MASK
) == UNWIND_ARM_MODE_DWARF
);
314 template <typename A
>
315 void UnwindInfoAtom
<A
>::compressDuplicates(const std::vector
<UnwindEntry
>& entries
, std::vector
<UnwindEntry
>& uniqueEntries
)
317 // build new list removing entries where next function has same encoding
318 uniqueEntries
.reserve(entries
.size());
319 UnwindEntry
last(NULL
, 0, 0, NULL
, NULL
, NULL
, 0xFFFFFFFF);
320 for(std::vector
<UnwindEntry
>::const_iterator it
=entries
.begin(); it
!= entries
.end(); ++it
) {
321 const UnwindEntry
& next
= *it
;
322 bool newNeedsDwarf
= encodingMeansUseDwarf(next
.encoding
);
323 // remove entries which have same encoding and personalityPointer as last one
324 if ( newNeedsDwarf
|| (next
.encoding
!= last
.encoding
) || (next
.personalityPointer
!= last
.personalityPointer
)
325 || (next
.lsda
!= NULL
) || (last
.lsda
!= NULL
) ) {
326 uniqueEntries
.push_back(next
);
330 if (_s_log
) fprintf(stderr
, "compressDuplicates() entries.size()=%lu, uniqueEntries.size()=%lu\n",
331 entries
.size(), uniqueEntries
.size());
334 template <typename A
>
335 void UnwindInfoAtom
<A
>::makePersonalityIndexes(std::vector
<UnwindEntry
>& entries
, std::map
<const ld::Atom
*, uint32_t>& personalityIndexMap
)
337 for(std::vector
<UnwindEntry
>::iterator it
=entries
.begin(); it
!= entries
.end(); ++it
) {
338 if ( it
->personalityPointer
!= NULL
) {
339 std::map
<const ld::Atom
*, uint32_t>::iterator pos
= personalityIndexMap
.find(it
->personalityPointer
);
340 if ( pos
== personalityIndexMap
.end() ) {
341 const uint32_t nextIndex
= personalityIndexMap
.size() + 1;
342 personalityIndexMap
[it
->personalityPointer
] = nextIndex
;
344 uint32_t personalityIndex
= personalityIndexMap
[it
->personalityPointer
];
345 it
->encoding
|= (personalityIndex
<< (__builtin_ctz(UNWIND_PERSONALITY_MASK
)) );
348 if (_s_log
) fprintf(stderr
, "makePersonalityIndexes() %lu personality routines used\n", personalityIndexMap
.size());
352 template <typename A
>
353 void UnwindInfoAtom
<A
>::findCommonEncoding(const std::vector
<UnwindEntry
>& entries
,
354 std::map
<compact_unwind_encoding_t
, unsigned int>& commonEncodings
)
356 // scan infos to get frequency counts for each encoding
357 std::map
<compact_unwind_encoding_t
, unsigned int> encodingsUsed
;
358 unsigned int mostCommonEncodingUsageCount
= 0;
359 for(std::vector
<UnwindEntry
>::const_iterator it
=entries
.begin(); it
!= entries
.end(); ++it
) {
360 // never put dwarf into common table
361 if ( encodingMeansUseDwarf(it
->encoding
) )
363 std::map
<compact_unwind_encoding_t
, unsigned int>::iterator pos
= encodingsUsed
.find(it
->encoding
);
364 if ( pos
== encodingsUsed
.end() ) {
365 encodingsUsed
[it
->encoding
] = 1;
368 encodingsUsed
[it
->encoding
] += 1;
369 if ( mostCommonEncodingUsageCount
< encodingsUsed
[it
->encoding
] )
370 mostCommonEncodingUsageCount
= encodingsUsed
[it
->encoding
];
373 // put the most common encodings into the common table, but at most 127 of them
374 for(unsigned int usages
=mostCommonEncodingUsageCount
; usages
> 1; --usages
) {
375 for (std::map
<compact_unwind_encoding_t
, unsigned int>::iterator euit
=encodingsUsed
.begin(); euit
!= encodingsUsed
.end(); ++euit
) {
376 if ( euit
->second
== usages
) {
377 unsigned int sz
= commonEncodings
.size();
379 commonEncodings
[euit
->first
] = sz
;
384 if (_s_log
) fprintf(stderr
, "findCommonEncoding() %lu common encodings found\n", commonEncodings
.size());
388 template <typename A
>
389 void UnwindInfoAtom
<A
>::makeLsdaIndex(const std::vector
<UnwindEntry
>& entries
, std::vector
<LSDAEntry
>& lsdaIndex
, std::map
<const ld::Atom
*, uint32_t>& lsdaIndexOffsetMap
)
391 for(std::vector
<UnwindEntry
>::const_iterator it
=entries
.begin(); it
!= entries
.end(); ++it
) {
392 lsdaIndexOffsetMap
[it
->func
] = lsdaIndex
.size() * sizeof(unwind_info_section_header_lsda_index_entry
);
393 if ( it
->lsda
!= NULL
) {
395 entry
.func
= it
->func
;
396 entry
.lsda
= it
->lsda
;
397 lsdaIndex
.push_back(entry
);
400 if (_s_log
) fprintf(stderr
, "makeLsdaIndex() %lu LSDAs found\n", lsdaIndex
.size());
405 void UnwindInfoAtom
<x86
>::addCompressedAddressOffsetFixup(uint32_t offset
, const ld::Atom
* func
, const ld::Atom
* fromFunc
)
407 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of3
, ld::Fixup::kindSetTargetAddress
, func
));
408 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of3
, ld::Fixup::kindSubtractTargetAddress
, fromFunc
));
409 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k3of3
, ld::Fixup::kindStoreLittleEndianLow24of32
));
413 void UnwindInfoAtom
<x86_64
>::addCompressedAddressOffsetFixup(uint32_t offset
, const ld::Atom
* func
, const ld::Atom
* fromFunc
)
415 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of3
, ld::Fixup::kindSetTargetAddress
, func
));
416 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of3
, ld::Fixup::kindSubtractTargetAddress
, fromFunc
));
417 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k3of3
, ld::Fixup::kindStoreLittleEndianLow24of32
));
421 void UnwindInfoAtom
<arm64
>::addCompressedAddressOffsetFixup(uint32_t offset
, const ld::Atom
* func
, const ld::Atom
* fromFunc
)
423 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of3
, ld::Fixup::kindSetTargetAddress
, func
));
424 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of3
, ld::Fixup::kindSubtractTargetAddress
, fromFunc
));
425 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k3of3
, ld::Fixup::kindStoreLittleEndianLow24of32
));
430 void UnwindInfoAtom
<arm
>::addCompressedAddressOffsetFixup(uint32_t offset
, const ld::Atom
* func
, const ld::Atom
* fromFunc
)
432 if ( fromFunc
->isThumb() ) {
433 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of4
, ld::Fixup::kindSetTargetAddress
, func
));
434 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of4
, ld::Fixup::kindSubtractTargetAddress
, fromFunc
));
435 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k3of4
, ld::Fixup::kindSubtractAddend
, 1));
436 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k4of4
, ld::Fixup::kindStoreLittleEndianLow24of32
));
439 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of3
, ld::Fixup::kindSetTargetAddress
, func
));
440 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of3
, ld::Fixup::kindSubtractTargetAddress
, fromFunc
));
441 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k3of3
, ld::Fixup::kindStoreLittleEndianLow24of32
));
446 void UnwindInfoAtom
<x86
>::addCompressedEncodingFixup(uint32_t offset
, const ld::Atom
* fde
)
448 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of2
, ld::Fixup::kindSetTargetSectionOffset
, fde
));
449 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of2
, ld::Fixup::kindStoreLittleEndianLow24of32
));
453 void UnwindInfoAtom
<x86_64
>::addCompressedEncodingFixup(uint32_t offset
, const ld::Atom
* fde
)
455 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of2
, ld::Fixup::kindSetTargetSectionOffset
, fde
));
456 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of2
, ld::Fixup::kindStoreLittleEndianLow24of32
));
460 void UnwindInfoAtom
<arm64
>::addCompressedEncodingFixup(uint32_t offset
, const ld::Atom
* fde
)
462 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of2
, ld::Fixup::kindSetTargetSectionOffset
, fde
));
463 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of2
, ld::Fixup::kindStoreLittleEndianLow24of32
));
468 void UnwindInfoAtom
<arm
>::addCompressedEncodingFixup(uint32_t offset
, const ld::Atom
* fde
)
470 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of2
, ld::Fixup::kindSetTargetSectionOffset
, fde
));
471 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of2
, ld::Fixup::kindStoreLittleEndianLow24of32
));
475 void UnwindInfoAtom
<x86
>::addRegularAddressFixup(uint32_t offset
, const ld::Atom
* func
)
477 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of2
, ld::Fixup::kindSetTargetImageOffset
, func
));
478 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of2
, ld::Fixup::kindStoreLittleEndian32
));
482 void UnwindInfoAtom
<x86_64
>::addRegularAddressFixup(uint32_t offset
, const ld::Atom
* func
)
484 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of2
, ld::Fixup::kindSetTargetImageOffset
, func
));
485 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of2
, ld::Fixup::kindStoreLittleEndian32
));
489 void UnwindInfoAtom
<arm64
>::addRegularAddressFixup(uint32_t offset
, const ld::Atom
* func
)
491 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of2
, ld::Fixup::kindSetTargetImageOffset
, func
));
492 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of2
, ld::Fixup::kindStoreLittleEndian32
));
497 void UnwindInfoAtom
<arm
>::addRegularAddressFixup(uint32_t offset
, const ld::Atom
* func
)
499 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of2
, ld::Fixup::kindSetTargetImageOffset
, func
));
500 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of2
, ld::Fixup::kindStoreLittleEndian32
));
504 void UnwindInfoAtom
<x86
>::addRegularFDEOffsetFixup(uint32_t offset
, const ld::Atom
* fde
)
506 _fixups
.push_back(ld::Fixup(offset
+4, ld::Fixup::k1of2
, ld::Fixup::kindSetTargetSectionOffset
, fde
));
507 _fixups
.push_back(ld::Fixup(offset
+4, ld::Fixup::k2of2
, ld::Fixup::kindStoreLittleEndianLow24of32
));
511 void UnwindInfoAtom
<x86_64
>::addRegularFDEOffsetFixup(uint32_t offset
, const ld::Atom
* fde
)
513 _fixups
.push_back(ld::Fixup(offset
+4, ld::Fixup::k1of2
, ld::Fixup::kindSetTargetSectionOffset
, fde
));
514 _fixups
.push_back(ld::Fixup(offset
+4, ld::Fixup::k2of2
, ld::Fixup::kindStoreLittleEndianLow24of32
));
518 void UnwindInfoAtom
<arm64
>::addRegularFDEOffsetFixup(uint32_t offset
, const ld::Atom
* fde
)
520 _fixups
.push_back(ld::Fixup(offset
+4, ld::Fixup::k1of2
, ld::Fixup::kindSetTargetSectionOffset
, fde
));
521 _fixups
.push_back(ld::Fixup(offset
+4, ld::Fixup::k2of2
, ld::Fixup::kindStoreLittleEndianLow24of32
));
526 void UnwindInfoAtom
<arm
>::addRegularFDEOffsetFixup(uint32_t offset
, const ld::Atom
* fde
)
528 _fixups
.push_back(ld::Fixup(offset
+4, ld::Fixup::k1of2
, ld::Fixup::kindSetTargetSectionOffset
, fde
));
529 _fixups
.push_back(ld::Fixup(offset
+4, ld::Fixup::k2of2
, ld::Fixup::kindStoreLittleEndianLow24of32
));
533 void UnwindInfoAtom
<x86
>::addImageOffsetFixup(uint32_t offset
, const ld::Atom
* targ
)
535 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of2
, ld::Fixup::kindSetTargetImageOffset
, targ
));
536 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of2
, ld::Fixup::kindStoreLittleEndian32
));
540 void UnwindInfoAtom
<x86_64
>::addImageOffsetFixup(uint32_t offset
, const ld::Atom
* targ
)
542 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of2
, ld::Fixup::kindSetTargetImageOffset
, targ
));
543 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of2
, ld::Fixup::kindStoreLittleEndian32
));
547 void UnwindInfoAtom
<arm64
>::addImageOffsetFixup(uint32_t offset
, const ld::Atom
* targ
)
549 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of2
, ld::Fixup::kindSetTargetImageOffset
, targ
));
550 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of2
, ld::Fixup::kindStoreLittleEndian32
));
555 void UnwindInfoAtom
<arm
>::addImageOffsetFixup(uint32_t offset
, const ld::Atom
* targ
)
557 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of2
, ld::Fixup::kindSetTargetImageOffset
, targ
));
558 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of2
, ld::Fixup::kindStoreLittleEndian32
));
562 void UnwindInfoAtom
<x86
>::addImageOffsetFixupPlusAddend(uint32_t offset
, const ld::Atom
* targ
, uint32_t addend
)
564 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of3
, ld::Fixup::kindSetTargetImageOffset
, targ
));
565 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of3
, ld::Fixup::kindAddAddend
, addend
));
566 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k3of3
, ld::Fixup::kindStoreLittleEndian32
));
570 void UnwindInfoAtom
<x86_64
>::addImageOffsetFixupPlusAddend(uint32_t offset
, const ld::Atom
* targ
, uint32_t addend
)
572 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of3
, ld::Fixup::kindSetTargetImageOffset
, targ
));
573 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of3
, ld::Fixup::kindAddAddend
, addend
));
574 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k3of3
, ld::Fixup::kindStoreLittleEndian32
));
578 void UnwindInfoAtom
<arm64
>::addImageOffsetFixupPlusAddend(uint32_t offset
, const ld::Atom
* targ
, uint32_t addend
)
580 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of3
, ld::Fixup::kindSetTargetImageOffset
, targ
));
581 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of3
, ld::Fixup::kindAddAddend
, addend
));
582 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k3of3
, ld::Fixup::kindStoreLittleEndian32
));
587 void UnwindInfoAtom
<arm
>::addImageOffsetFixupPlusAddend(uint32_t offset
, const ld::Atom
* targ
, uint32_t addend
)
589 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of3
, ld::Fixup::kindSetTargetImageOffset
, targ
));
590 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of3
, ld::Fixup::kindAddAddend
, addend
));
591 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k3of3
, ld::Fixup::kindStoreLittleEndian32
));
597 template <typename A
>
598 unsigned int UnwindInfoAtom
<A
>::makeRegularSecondLevelPage(const std::vector
<UnwindEntry
>& uniqueInfos
, uint32_t pageSize
,
599 unsigned int endIndex
, uint8_t*& pageEnd
)
601 const unsigned int maxEntriesPerPage
= (pageSize
- sizeof(unwind_info_regular_second_level_page_header
))/sizeof(unwind_info_regular_second_level_entry
);
602 const unsigned int entriesToAdd
= ((endIndex
> maxEntriesPerPage
) ? maxEntriesPerPage
: endIndex
);
603 uint8_t* pageStart
= pageEnd
604 - entriesToAdd
*sizeof(unwind_info_regular_second_level_entry
)
605 - sizeof(unwind_info_regular_second_level_page_header
);
606 macho_unwind_info_regular_second_level_page_header
<P
>* page
= (macho_unwind_info_regular_second_level_page_header
<P
>*)pageStart
;
607 page
->set_kind(UNWIND_SECOND_LEVEL_REGULAR
);
608 page
->set_entryPageOffset(sizeof(macho_unwind_info_regular_second_level_page_header
<P
>));
609 page
->set_entryCount(entriesToAdd
);
610 macho_unwind_info_regular_second_level_entry
<P
>* entryTable
= (macho_unwind_info_regular_second_level_entry
<P
>*)(pageStart
+ page
->entryPageOffset());
611 for (unsigned int i
=0; i
< entriesToAdd
; ++i
) {
612 const UnwindEntry
& info
= uniqueInfos
[endIndex
-entriesToAdd
+i
];
613 entryTable
[i
].set_functionOffset(0);
614 entryTable
[i
].set_encoding(info
.encoding
);
615 // add fixup for address part of entry
616 uint32_t offset
= (uint8_t*)(&entryTable
[i
]) - _pageAlignedPages
;
617 this->addRegularAddressFixup(offset
, info
.func
);
618 if ( encodingMeansUseDwarf(info
.encoding
) ) {
619 // add fixup for dwarf offset part of page specific encoding
620 uint32_t encOffset
= (uint8_t*)(&entryTable
[i
]) - _pageAlignedPages
;
621 this->addRegularFDEOffsetFixup(encOffset
, info
.fde
);
624 if (_s_log
) fprintf(stderr
, "regular page with %u entries\n", entriesToAdd
);
626 return endIndex
- entriesToAdd
;
630 template <typename A
>
631 unsigned int UnwindInfoAtom
<A
>::makeCompressedSecondLevelPage(const std::vector
<UnwindEntry
>& uniqueInfos
,
632 const std::map
<compact_unwind_encoding_t
,unsigned int> commonEncodings
,
633 uint32_t pageSize
, unsigned int endIndex
, uint8_t*& pageEnd
)
635 if (_s_log
) fprintf(stderr
, "makeCompressedSecondLevelPage(pageSize=%u, endIndex=%u)\n", pageSize
, endIndex
);
636 // first pass calculates how many compressed entries we could fit in this sized page
637 // keep adding entries to page until:
638 // 1) encoding table plus entry table plus header exceed page size
639 // 2) the file offset delta from the first to last function > 24 bits
640 // 3) custom encoding index reaches 255
641 // 4) run out of uniqueInfos to encode
642 std::map
<compact_unwind_encoding_t
, unsigned int> pageSpecificEncodings
;
643 uint32_t space4
= (pageSize
- sizeof(unwind_info_compressed_second_level_page_header
))/sizeof(uint32_t);
644 int index
= endIndex
-1;
646 uint64_t lastEntryAddress
= uniqueInfos
[index
].funcTentAddress
;
648 while ( canDo
&& (index
>= 0) ) {
649 const UnwindEntry
& info
= uniqueInfos
[index
--];
650 // compute encoding index
651 unsigned int encodingIndex
;
652 std::map
<compact_unwind_encoding_t
, unsigned int>::const_iterator pos
= commonEncodings
.find(info
.encoding
);
653 if ( pos
!= commonEncodings
.end() ) {
654 encodingIndex
= pos
->second
;
655 if (_s_log
) fprintf(stderr
, "makeCompressedSecondLevelPage(): funcIndex=%d, re-use commonEncodings[%d]=0x%08X\n", index
, encodingIndex
, info
.encoding
);
658 // no commmon entry, so add one on this page
659 uint32_t encoding
= info
.encoding
;
660 if ( encodingMeansUseDwarf(encoding
) ) {
661 // make unique pseudo encoding so this dwarf will gets is own encoding entry slot
662 encoding
+= (index
+1);
664 std::map
<compact_unwind_encoding_t
, unsigned int>::iterator ppos
= pageSpecificEncodings
.find(encoding
);
665 if ( ppos
!= pageSpecificEncodings
.end() ) {
666 encodingIndex
= pos
->second
;
667 if (_s_log
) fprintf(stderr
, "makeCompressedSecondLevelPage(): funcIndex=%d, re-use pageSpecificEncodings[%d]=0x%08X\n", index
, encodingIndex
, encoding
);
670 encodingIndex
= commonEncodings
.size() + pageSpecificEncodings
.size();
671 if ( encodingIndex
<= 255 ) {
672 pageSpecificEncodings
[encoding
] = encodingIndex
;
673 if (_s_log
) fprintf(stderr
, "makeCompressedSecondLevelPage(): funcIndex=%d, pageSpecificEncodings[%d]=0x%08X\n", index
, encodingIndex
, encoding
);
676 canDo
= false; // case 3)
677 if (_s_log
) fprintf(stderr
, "end of compressed page with %u entries, %lu custom encodings because too many custom encodings\n",
678 entryCount
, pageSpecificEncodings
.size());
682 // compute function offset
683 uint32_t funcOffsetWithInPage
= lastEntryAddress
- info
.funcTentAddress
;
684 if ( funcOffsetWithInPage
> 0x00FFFF00 ) {
685 // don't use 0x00FFFFFF because addresses may vary after atoms are laid out again
686 canDo
= false; // case 2)
687 if (_s_log
) fprintf(stderr
, "can't use compressed page with %u entries because function offset too big\n", entryCount
);
689 // check room for entry
690 if ( (pageSpecificEncodings
.size()+entryCount
) > space4
) {
691 canDo
= false; // case 1)
693 if (_s_log
) fprintf(stderr
, "end of compressed page with %u entries because full\n", entryCount
);
695 //if (_s_log) fprintf(stderr, "space4=%d, pageSpecificEncodings.size()=%ld, entryCount=%d\n", space4, pageSpecificEncodings.size(), entryCount);
701 // check for cases where it would be better to use a regular (non-compressed) page
702 const unsigned int compressPageUsed
= sizeof(unwind_info_compressed_second_level_page_header
)
703 + pageSpecificEncodings
.size()*sizeof(uint32_t)
704 + entryCount
*sizeof(uint32_t);
705 if ( (compressPageUsed
< (pageSize
-4) && (index
>= 0) ) ) {
706 const int regularEntriesPerPage
= (pageSize
- sizeof(unwind_info_regular_second_level_page_header
))/sizeof(unwind_info_regular_second_level_entry
);
707 if ( entryCount
< regularEntriesPerPage
) {
708 return makeRegularSecondLevelPage(uniqueInfos
, pageSize
, endIndex
, pageEnd
);
712 // check if we need any padding because adding another entry would take 8 bytes but only have room for 4
714 if ( compressPageUsed
== (pageSize
-4) )
717 // second pass fills in page
718 uint8_t* pageStart
= pageEnd
- compressPageUsed
- pad
;
719 CSLP
* page
= (CSLP
*)pageStart
;
720 page
->set_kind(UNWIND_SECOND_LEVEL_COMPRESSED
);
721 page
->set_entryPageOffset(sizeof(CSLP
));
722 page
->set_entryCount(entryCount
);
723 page
->set_encodingsPageOffset(page
->entryPageOffset()+entryCount
*sizeof(uint32_t));
724 page
->set_encodingsCount(pageSpecificEncodings
.size());
725 uint32_t* const encodingsArray
= (uint32_t*)&pageStart
[page
->encodingsPageOffset()];
726 // fill in entry table
727 uint32_t* const entiresArray
= (uint32_t*)&pageStart
[page
->entryPageOffset()];
728 const ld::Atom
* firstFunc
= uniqueInfos
[endIndex
-entryCount
].func
;
729 for(unsigned int i
=endIndex
-entryCount
; i
< endIndex
; ++i
) {
730 const UnwindEntry
& info
= uniqueInfos
[i
];
731 uint8_t encodingIndex
;
732 if ( encodingMeansUseDwarf(info
.encoding
) ) {
733 // dwarf entries are always in page specific encodings
734 assert(pageSpecificEncodings
.find(info
.encoding
+i
) != pageSpecificEncodings
.end());
735 encodingIndex
= pageSpecificEncodings
[info
.encoding
+i
];
738 std::map
<uint32_t, unsigned int>::const_iterator pos
= commonEncodings
.find(info
.encoding
);
739 if ( pos
!= commonEncodings
.end() )
740 encodingIndex
= pos
->second
;
742 encodingIndex
= pageSpecificEncodings
[info
.encoding
];
744 uint32_t entryIndex
= i
- endIndex
+ entryCount
;
745 E::set32(entiresArray
[entryIndex
], encodingIndex
<< 24);
746 // add fixup for address part of entry
747 uint32_t offset
= (uint8_t*)(&entiresArray
[entryIndex
]) - _pageAlignedPages
;
748 this->addCompressedAddressOffsetFixup(offset
, info
.func
, firstFunc
);
749 if ( encodingMeansUseDwarf(info
.encoding
) ) {
750 // add fixup for dwarf offset part of page specific encoding
751 uint32_t encOffset
= (uint8_t*)(&encodingsArray
[encodingIndex
-commonEncodings
.size()]) - _pageAlignedPages
;
752 this->addCompressedEncodingFixup(encOffset
, info
.fde
);
755 // fill in encodings table
756 for(std::map
<uint32_t, unsigned int>::const_iterator it
= pageSpecificEncodings
.begin(); it
!= pageSpecificEncodings
.end(); ++it
) {
757 E::set32(encodingsArray
[it
->second
-commonEncodings
.size()], it
->first
);
760 if (_s_log
) fprintf(stderr
, "compressed page with %u entries, %lu custom encodings\n", entryCount
, pageSpecificEncodings
.size());
764 return endIndex
-entryCount
; // endIndex for next page
770 static uint64_t calculateEHFrameSize(ld::Internal
& state
)
774 for (ld::Internal::FinalSection
* sect
: state
.sections
) {
775 if ( sect
->type() == ld::Section::typeCFI
) {
776 for (const ld::Atom
* atom
: sect
->atoms
) {
777 size
+= atom
->size();
778 if ( strcmp(atom
->name(), "CIE") != 0 )
782 // <rdar://problem/21427393> Linker generates eh_frame data even when there's only an unused CIEs in it
784 state
.sections
.erase(std::remove(state
.sections
.begin(), state
.sections
.end(), sect
), state
.sections
.end());
792 static void getAllUnwindInfos(const ld::Internal
& state
, std::vector
<UnwindEntry
>& entries
)
794 uint64_t address
= 0;
795 for (std::vector
<ld::Internal::FinalSection
*>::const_iterator sit
=state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
796 ld::Internal::FinalSection
* sect
= *sit
;
797 for (std::vector
<const ld::Atom
*>::iterator ait
=sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
798 const ld::Atom
* atom
= *ait
;
799 // adjust address for atom alignment
800 uint64_t alignment
= 1 << atom
->alignment().powerOf2
;
801 uint64_t currentModulus
= (address
% alignment
);
802 uint64_t requiredModulus
= atom
->alignment().modulus
;
803 if ( currentModulus
!= requiredModulus
) {
804 if ( requiredModulus
> currentModulus
)
805 address
+= requiredModulus
-currentModulus
;
807 address
+= requiredModulus
+alignment
-currentModulus
;
810 if ( atom
->beginUnwind() == atom
->endUnwind() ) {
811 // be sure to mark that we have no unwind info for stuff in the TEXT segment without unwind info
812 if ( (atom
->section().type() == ld::Section::typeCode
) && (atom
->size() !=0) ) {
813 entries
.push_back(UnwindEntry(atom
, address
, 0, NULL
, NULL
, NULL
, 0));
817 // atom has unwind info(s), add entry for each
818 const ld::Atom
* fde
= NULL
;
819 const ld::Atom
* lsda
= NULL
;
820 const ld::Atom
* personalityPointer
= NULL
;
821 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
822 switch ( fit
->kind
) {
823 case ld::Fixup::kindNoneGroupSubordinateFDE
:
824 assert(fit
->binding
== ld::Fixup::bindingDirectlyBound
);
827 case ld::Fixup::kindNoneGroupSubordinateLSDA
:
828 assert(fit
->binding
== ld::Fixup::bindingDirectlyBound
);
829 lsda
= fit
->u
.target
;
831 case ld::Fixup::kindNoneGroupSubordinatePersonality
:
832 assert(fit
->binding
== ld::Fixup::bindingDirectlyBound
);
833 personalityPointer
= fit
->u
.target
;
834 assert(personalityPointer
->section().type() == ld::Section::typeNonLazyPointer
);
841 // find CIE for this FDE
842 const ld::Atom
* cie
= NULL
;
843 for (ld::Fixup::iterator fit
= fde
->fixupsBegin(), end
=fde
->fixupsEnd(); fit
!= end
; ++fit
) {
844 if ( fit
->kind
!= ld::Fixup::kindSubtractTargetAddress
)
846 if ( fit
->binding
!= ld::Fixup::bindingDirectlyBound
)
849 // CIE is only direct subtracted target in FDE
850 assert(cie
->section().type() == ld::Section::typeCFI
);
854 // if CIE can have just one fixup - to the personality pointer
855 for (ld::Fixup::iterator fit
= cie
->fixupsBegin(), end
=cie
->fixupsEnd(); fit
!= end
; ++fit
) {
856 if ( fit
->kind
== ld::Fixup::kindSetTargetAddress
) {
857 switch ( fit
->binding
) {
858 case ld::Fixup::bindingsIndirectlyBound
:
859 personalityPointer
= state
.indirectBindingTable
[fit
->u
.bindingIndex
];
860 assert(personalityPointer
->section().type() == ld::Section::typeNonLazyPointer
);
862 case ld::Fixup::bindingDirectlyBound
:
863 personalityPointer
= fit
->u
.target
;
864 assert(personalityPointer
->section().type() == ld::Section::typeNonLazyPointer
);
873 for ( ld::Atom::UnwindInfo::iterator uit
= atom
->beginUnwind(); uit
!= atom
->endUnwind(); ++uit
) {
874 entries
.push_back(UnwindEntry(atom
, address
, uit
->startOffset
, fde
, lsda
, personalityPointer
, uit
->unwindInfo
));
877 address
+= atom
->size();
883 static void makeFinalLinkedImageCompactUnwindSection(const Options
& opts
, ld::Internal
& state
)
885 // walk every atom and gets its unwind info
886 std::vector
<UnwindEntry
> entries
;
888 getAllUnwindInfos(state
, entries
);
890 // don't generate an __unwind_info section if there is no code in this linkage unit
891 if ( entries
.size() == 0 )
894 // calculate size of __eh_frame section, so __unwind_info can go before it and page align
895 uint64_t ehFrameSize
= calculateEHFrameSize(state
);
897 // create atom that contains the whole compact unwind table
898 switch ( opts
.architecture() ) {
899 #if SUPPORT_ARCH_x86_64
900 case CPU_TYPE_X86_64
:
901 state
.addAtom(*new UnwindInfoAtom
<x86_64
>(entries
, ehFrameSize
));
904 #if SUPPORT_ARCH_i386
906 state
.addAtom(*new UnwindInfoAtom
<x86
>(entries
, ehFrameSize
));
909 #if SUPPORT_ARCH_arm64
911 state
.addAtom(*new UnwindInfoAtom
<arm64
>(entries
, ehFrameSize
));
914 #if SUPPORT_ARCH_arm_any
916 if ( opts
.armUsesZeroCostExceptions() )
917 state
.addAtom(*new UnwindInfoAtom
<arm
>(entries
, ehFrameSize
));
921 assert(0 && "no compact unwind for arch");
927 template <typename A
>
928 class CompactUnwindAtom
: public ld::Atom
{
930 CompactUnwindAtom(ld::Internal
& state
,const ld::Atom
* funcAtom
,
931 uint32_t startOffset
, uint32_t len
, uint32_t cui
);
932 ~CompactUnwindAtom() {}
934 virtual const ld::File
* file() const { return NULL
; }
935 virtual const char* name() const { return "compact unwind info"; }
936 virtual uint64_t size() const { return sizeof(macho_compact_unwind_entry
<P
>); }
937 virtual uint64_t objectAddress() const { return 0; }
938 virtual void copyRawContent(uint8_t buffer
[]) const;
939 virtual void setScope(Scope
) { }
940 virtual ld::Fixup::iterator
fixupsBegin() const { return (ld::Fixup
*)&_fixups
[0]; }
941 virtual ld::Fixup::iterator
fixupsEnd() const { return (ld::Fixup
*)&_fixups
[_fixups
.size()]; }
944 typedef typename
A::P P
;
945 typedef typename
A::P::E E
;
946 typedef typename
A::P::uint_t pint_t
;
949 const ld::Atom
* _atom
;
950 const uint32_t _startOffset
;
952 const uint32_t _compactUnwindInfo
;
953 std::vector
<ld::Fixup
> _fixups
;
955 static ld::Fixup::Kind _s_pointerKind
;
956 static ld::Fixup::Kind _s_pointerStoreKind
;
957 static ld::Section _s_section
;
961 template <typename A
>
962 ld::Section CompactUnwindAtom
<A
>::_s_section("__LD", "__compact_unwind", ld::Section::typeDebug
);
964 template <> ld::Fixup::Kind CompactUnwindAtom
<x86
>::_s_pointerKind
= ld::Fixup::kindStoreLittleEndian32
;
965 template <> ld::Fixup::Kind CompactUnwindAtom
<x86
>::_s_pointerStoreKind
= ld::Fixup::kindStoreTargetAddressLittleEndian32
;
966 template <> ld::Fixup::Kind CompactUnwindAtom
<x86_64
>::_s_pointerKind
= ld::Fixup::kindStoreLittleEndian64
;
967 template <> ld::Fixup::Kind CompactUnwindAtom
<x86_64
>::_s_pointerStoreKind
= ld::Fixup::kindStoreTargetAddressLittleEndian64
;
968 #if SUPPORT_ARCH_arm64
969 template <> ld::Fixup::Kind CompactUnwindAtom
<arm64
>::_s_pointerKind
= ld::Fixup::kindStoreLittleEndian64
;
970 template <> ld::Fixup::Kind CompactUnwindAtom
<arm64
>::_s_pointerStoreKind
= ld::Fixup::kindStoreTargetAddressLittleEndian64
;
972 template <> ld::Fixup::Kind CompactUnwindAtom
<arm
>::_s_pointerKind
= ld::Fixup::kindStoreLittleEndian32
;
973 template <> ld::Fixup::Kind CompactUnwindAtom
<arm
>::_s_pointerStoreKind
= ld::Fixup::kindStoreTargetAddressLittleEndian32
;
975 template <typename A
>
976 CompactUnwindAtom
<A
>::CompactUnwindAtom(ld::Internal
& state
,const ld::Atom
* funcAtom
, uint32_t startOffset
,
977 uint32_t len
, uint32_t cui
)
978 : ld::Atom(_s_section
, ld::Atom::definitionRegular
, ld::Atom::combineNever
,
979 ld::Atom::scopeTranslationUnit
, ld::Atom::typeUnclassified
,
980 symbolTableNotIn
, false, false, false, ld::Atom::Alignment(log2(sizeof(pint_t
)))),
981 _atom(funcAtom
), _startOffset(startOffset
), _len(len
), _compactUnwindInfo(cui
)
983 _fixups
.push_back(ld::Fixup(macho_compact_unwind_entry
<P
>::codeStartFieldOffset(), ld::Fixup::k1of3
, ld::Fixup::kindSetTargetAddress
, funcAtom
));
984 _fixups
.push_back(ld::Fixup(macho_compact_unwind_entry
<P
>::codeStartFieldOffset(), ld::Fixup::k2of3
, ld::Fixup::kindAddAddend
, _startOffset
));
985 _fixups
.push_back(ld::Fixup(macho_compact_unwind_entry
<P
>::codeStartFieldOffset(), ld::Fixup::k3of3
, _s_pointerKind
));
986 // see if atom has subordinate personality function or lsda
987 for (ld::Fixup::iterator fit
= funcAtom
->fixupsBegin(), end
=funcAtom
->fixupsEnd(); fit
!= end
; ++fit
) {
988 switch ( fit
->kind
) {
989 case ld::Fixup::kindNoneGroupSubordinatePersonality
:
990 assert(fit
->binding
== ld::Fixup::bindingsIndirectlyBound
);
991 _fixups
.push_back(ld::Fixup(macho_compact_unwind_entry
<P
>::personalityFieldOffset(), ld::Fixup::k1of1
, _s_pointerStoreKind
, state
.indirectBindingTable
[fit
->u
.bindingIndex
]));
993 case ld::Fixup::kindNoneGroupSubordinateLSDA
:
994 assert(fit
->binding
== ld::Fixup::bindingDirectlyBound
);
995 _fixups
.push_back(ld::Fixup(macho_compact_unwind_entry
<P
>::lsdaFieldOffset(), ld::Fixup::k1of1
, _s_pointerStoreKind
, fit
->u
.target
));
1004 template <typename A
>
1005 void CompactUnwindAtom
<A
>::copyRawContent(uint8_t buffer
[]) const
1007 macho_compact_unwind_entry
<P
>* buf
= (macho_compact_unwind_entry
<P
>*)buffer
;
1008 buf
->set_codeStart(0);
1009 buf
->set_codeLen(_len
);
1010 buf
->set_compactUnwindInfo(_compactUnwindInfo
);
1011 buf
->set_personality(0);
1016 static void makeCompactUnwindAtom(const Options
& opts
, ld::Internal
& state
, const ld::Atom
* atom
,
1017 uint32_t startOffset
, uint32_t endOffset
, uint32_t cui
)
1019 switch ( opts
.architecture() ) {
1020 #if SUPPORT_ARCH_x86_64
1021 case CPU_TYPE_X86_64
:
1022 state
.addAtom(*new CompactUnwindAtom
<x86_64
>(state
, atom
, startOffset
, endOffset
-startOffset
, cui
));
1025 #if SUPPORT_ARCH_i386
1027 state
.addAtom(*new CompactUnwindAtom
<x86
>(state
, atom
, startOffset
, endOffset
-startOffset
, cui
));
1030 #if SUPPORT_ARCH_arm64
1031 case CPU_TYPE_ARM64
:
1032 state
.addAtom(*new CompactUnwindAtom
<arm64
>(state
, atom
, startOffset
, endOffset
-startOffset
, cui
));
1036 state
.addAtom(*new CompactUnwindAtom
<arm
>(state
, atom
, startOffset
, endOffset
-startOffset
, cui
));
1041 static void makeRelocateableCompactUnwindSection(const Options
& opts
, ld::Internal
& state
)
1043 // can't add CompactUnwindAtom atoms will iterating, so pre-scan
1044 std::vector
<const ld::Atom
*> atomsWithUnwind
;
1045 for (std::vector
<ld::Internal::FinalSection
*>::const_iterator sit
=state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
1046 ld::Internal::FinalSection
* sect
= *sit
;
1047 for (std::vector
<const ld::Atom
*>::iterator ait
=sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
1048 const ld::Atom
* atom
= *ait
;
1049 if ( atom
->beginUnwind() != atom
->endUnwind() )
1050 atomsWithUnwind
.push_back(atom
);
1053 // make one CompactUnwindAtom for each compact unwind range in each atom
1054 for (std::vector
<const ld::Atom
*>::iterator it
= atomsWithUnwind
.begin(); it
!= atomsWithUnwind
.end(); ++it
) {
1055 const ld::Atom
* atom
= *it
;
1056 uint32_t lastOffset
= 0;
1057 uint32_t lastCUE
= 0;
1059 for (ld::Atom::UnwindInfo::iterator uit
=atom
->beginUnwind(); uit
!= atom
->endUnwind(); ++uit
) {
1061 makeCompactUnwindAtom(opts
, state
, atom
, lastOffset
, uit
->startOffset
, lastCUE
);
1063 lastOffset
= uit
->startOffset
;
1064 lastCUE
= uit
->unwindInfo
;
1067 makeCompactUnwindAtom(opts
, state
, atom
, lastOffset
, (uint32_t)atom
->size(), lastCUE
);
1072 void doPass(const Options
& opts
, ld::Internal
& state
)
1074 if ( opts
.outputKind() == Options::kObjectFile
)
1075 makeRelocateableCompactUnwindSection(opts
, state
);
1077 else if ( opts
.needsUnwindInfoSection() )
1078 makeFinalLinkedImageCompactUnwindSection(opts
, state
);
1082 } // namespace compact_unwind
1083 } // namespace passes