1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
3 * Copyright (c) 2009 Apple Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
30 #include <mach/machine.h>
31 #include <mach-o/compact_unwind_encoding.h>
37 #include "compact_unwind.h"
38 #include "Architectures.hpp"
39 #include "MachOFileAbstraction.hpp"
44 namespace compact_unwind
{
48 UnwindEntry(const ld::Atom
* f
, uint64_t a
, uint32_t o
, const ld::Atom
* d
,
49 const ld::Atom
* l
, const ld::Atom
* p
, uint32_t en
)
50 : func(f
), fde(d
), lsda(l
), personalityPointer(p
), funcTentAddress(a
),
51 functionOffset(o
), encoding(en
) { }
55 const ld::Atom
* personalityPointer
;
56 uint64_t funcTentAddress
;
57 uint32_t functionOffset
;
58 compact_unwind_encoding_t encoding
;
68 class UnwindInfoAtom
: public ld::Atom
{
70 UnwindInfoAtom(const std::vector
<UnwindEntry
>& entries
,uint64_t ehFrameSize
);
73 virtual const ld::File
* file() const { return NULL
; }
74 virtual const char* name() const { return "compact unwind info"; }
75 virtual uint64_t size() const { return _headerSize
+_pagesSize
; }
76 virtual uint64_t objectAddress() const { return 0; }
77 virtual void copyRawContent(uint8_t buffer
[]) const;
78 virtual void setScope(Scope
) { }
79 virtual ld::Fixup::iterator
fixupsBegin() const { return (ld::Fixup
*)&_fixups
[0]; }
80 virtual ld::Fixup::iterator
fixupsEnd() const { return (ld::Fixup
*)&_fixups
[_fixups
.size()]; }
83 typedef typename
A::P P
;
84 typedef typename
A::P::E E
;
85 typedef typename
A::P::uint_t pint_t
;
87 typedef macho_unwind_info_compressed_second_level_page_header
<P
> CSLP
;
89 bool encodingMeansUseDwarf(compact_unwind_encoding_t enc
);
90 void compressDuplicates(const std::vector
<UnwindEntry
>& entries
,
91 std::vector
<UnwindEntry
>& uniqueEntries
);
92 void makePersonalityIndexes(std::vector
<UnwindEntry
>& entries
,
93 std::map
<const ld::Atom
*, uint32_t>& personalityIndexMap
);
94 void findCommonEncoding(const std::vector
<UnwindEntry
>& entries
,
95 std::map
<compact_unwind_encoding_t
, unsigned int>& commonEncodings
);
96 void makeLsdaIndex(const std::vector
<UnwindEntry
>& entries
, std::vector
<LSDAEntry
>& lsdaIndex
,
97 std::map
<const ld::Atom
*, uint32_t>& lsdaIndexOffsetMap
);
98 unsigned int makeCompressedSecondLevelPage(const std::vector
<UnwindEntry
>& uniqueInfos
,
99 const std::map
<compact_unwind_encoding_t
,unsigned int> commonEncodings
,
100 uint32_t pageSize
, unsigned int endIndex
, uint8_t*& pageEnd
);
101 unsigned int makeRegularSecondLevelPage(const std::vector
<UnwindEntry
>& uniqueInfos
, uint32_t pageSize
,
102 unsigned int endIndex
, uint8_t*& pageEnd
);
103 void addCompressedAddressOffsetFixup(uint32_t offset
, const ld::Atom
* func
, const ld::Atom
* fromFunc
);
104 void addCompressedEncodingFixup(uint32_t offset
, const ld::Atom
* fde
);
105 void addRegularAddressFixup(uint32_t offset
, const ld::Atom
* func
);
106 void addRegularFDEOffsetFixup(uint32_t offset
, const ld::Atom
* fde
);
107 void addImageOffsetFixup(uint32_t offset
, const ld::Atom
* targ
);
108 void addImageOffsetFixupPlusAddend(uint32_t offset
, const ld::Atom
* targ
, uint32_t addend
);
110 uint8_t* _pagesForDelete
;
114 uint64_t _headerSize
;
115 std::vector
<ld::Fixup
> _fixups
;
118 static ld::Section _s_section
;
121 template <typename A
>
122 bool UnwindInfoAtom
<A
>::_s_log
= false;
124 template <typename A
>
125 ld::Section UnwindInfoAtom
<A
>::_s_section("__TEXT", "__unwind_info", ld::Section::typeUnwindInfo
);
128 template <typename A
>
129 UnwindInfoAtom
<A
>::UnwindInfoAtom(const std::vector
<UnwindEntry
>& entries
, uint64_t ehFrameSize
)
130 : ld::Atom(_s_section
, ld::Atom::definitionRegular
, ld::Atom::combineNever
,
131 ld::Atom::scopeLinkageUnit
, ld::Atom::typeUnclassified
,
132 symbolTableNotIn
, false, false, false, ld::Atom::Alignment(0)),
133 _pagesForDelete(NULL
), _pages(NULL
), _pagesSize(0), _header(NULL
), _headerSize(0)
135 // build new compressed list by removing entries where next function has same encoding
136 std::vector
<UnwindEntry
> uniqueEntries
;
137 compressDuplicates(entries
, uniqueEntries
);
139 // reserve room so _fixups vector is not reallocated a bunch of times
140 _fixups
.reserve(uniqueEntries
.size()*3);
142 // build personality index, update encodings with personality index
143 std::map
<const ld::Atom
*, uint32_t> personalityIndexMap
;
144 makePersonalityIndexes(uniqueEntries
, personalityIndexMap
);
145 if ( personalityIndexMap
.size() > 3 ) {
146 warning("too many personality routines for compact unwind to encode");
150 // put the most common encodings into the common table, but at most 127 of them
151 std::map
<compact_unwind_encoding_t
, unsigned int> commonEncodings
;
152 findCommonEncoding(uniqueEntries
, commonEncodings
);
155 std::map
<const ld::Atom
*, uint32_t> lsdaIndexOffsetMap
;
156 std::vector
<LSDAEntry
> lsdaIndex
;
157 makeLsdaIndex(uniqueEntries
, lsdaIndex
, lsdaIndexOffsetMap
);
159 // calculate worst case size for all unwind info pages when allocating buffer
160 const unsigned int entriesPerRegularPage
= (4096-sizeof(unwind_info_regular_second_level_page_header
))/sizeof(unwind_info_regular_second_level_entry
);
161 assert(uniqueEntries
.size() > 0);
162 const unsigned int pageCount
= ((uniqueEntries
.size() - 1)/entriesPerRegularPage
) + 1;
163 _pagesForDelete
= (uint8_t*)calloc(pageCount
,4096);
164 if ( _pagesForDelete
== NULL
) {
165 warning("could not allocate space for compact unwind info");
169 // make last second level page smaller so that all other second level pages can be page aligned
170 uint32_t maxLastPageSize
= 4096 - (ehFrameSize
% 4096);
171 uint32_t tailPad
= 0;
172 if ( maxLastPageSize
< 128 ) {
173 tailPad
= maxLastPageSize
;
174 maxLastPageSize
= 4096;
177 // fill in pages in reverse order
178 const ld::Atom
* secondLevelFirstFuncs
[pageCount
*3];
179 uint8_t* secondLevelPagesStarts
[pageCount
*3];
180 unsigned int endIndex
= uniqueEntries
.size();
181 unsigned int secondLevelPageCount
= 0;
182 uint8_t* pageEnd
= &_pagesForDelete
[pageCount
*4096];
183 uint32_t pageSize
= maxLastPageSize
;
184 while ( endIndex
> 0 ) {
185 endIndex
= makeCompressedSecondLevelPage(uniqueEntries
, commonEncodings
, pageSize
, endIndex
, pageEnd
);
186 secondLevelPagesStarts
[secondLevelPageCount
] = pageEnd
;
187 secondLevelFirstFuncs
[secondLevelPageCount
] = uniqueEntries
[endIndex
].func
;
188 ++secondLevelPageCount
;
189 pageSize
= 4096; // last page can be odd size, make rest up to 4096 bytes in size
192 _pagesSize
= &_pagesForDelete
[pageCount
*4096] - pageEnd
;
195 // calculate section layout
196 const uint32_t commonEncodingsArraySectionOffset
= sizeof(macho_unwind_info_section_header
<P
>);
197 const uint32_t commonEncodingsArrayCount
= commonEncodings
.size();
198 const uint32_t commonEncodingsArraySize
= commonEncodingsArrayCount
* sizeof(compact_unwind_encoding_t
);
199 const uint32_t personalityArraySectionOffset
= commonEncodingsArraySectionOffset
+ commonEncodingsArraySize
;
200 const uint32_t personalityArrayCount
= personalityIndexMap
.size();
201 const uint32_t personalityArraySize
= personalityArrayCount
* sizeof(uint32_t);
202 const uint32_t indexSectionOffset
= personalityArraySectionOffset
+ personalityArraySize
;
203 const uint32_t indexCount
= secondLevelPageCount
+1;
204 const uint32_t indexSize
= indexCount
* sizeof(macho_unwind_info_section_header_index_entry
<P
>);
205 const uint32_t lsdaIndexArraySectionOffset
= indexSectionOffset
+ indexSize
;
206 const uint32_t lsdaIndexArrayCount
= lsdaIndex
.size();
207 const uint32_t lsdaIndexArraySize
= lsdaIndexArrayCount
* sizeof(macho_unwind_info_section_header_lsda_index_entry
<P
>);
208 const uint32_t headerEndSectionOffset
= lsdaIndexArraySectionOffset
+ lsdaIndexArraySize
;
210 // now that we know the size of the header, slide all existing fixups on the pages
211 const int32_t fixupSlide
= headerEndSectionOffset
+ (_pagesForDelete
- _pages
);
212 for(std::vector
<ld::Fixup
>::iterator it
= _fixups
.begin(); it
!= _fixups
.end(); ++it
) {
213 it
->offsetInAtom
+= fixupSlide
;
216 // allocate and fill in section header
217 _headerSize
= headerEndSectionOffset
;
218 _header
= new uint8_t[_headerSize
];
219 bzero(_header
, _headerSize
);
220 macho_unwind_info_section_header
<P
>* sectionHeader
= (macho_unwind_info_section_header
<P
>*)_header
;
221 sectionHeader
->set_version(UNWIND_SECTION_VERSION
);
222 sectionHeader
->set_commonEncodingsArraySectionOffset(commonEncodingsArraySectionOffset
);
223 sectionHeader
->set_commonEncodingsArrayCount(commonEncodingsArrayCount
);
224 sectionHeader
->set_personalityArraySectionOffset(personalityArraySectionOffset
);
225 sectionHeader
->set_personalityArrayCount(personalityArrayCount
);
226 sectionHeader
->set_indexSectionOffset(indexSectionOffset
);
227 sectionHeader
->set_indexCount(indexCount
);
229 // copy common encodings
230 uint32_t* commonEncodingsTable
= (uint32_t*)&_header
[commonEncodingsArraySectionOffset
];
231 for (std::map
<uint32_t, unsigned int>::iterator it
=commonEncodings
.begin(); it
!= commonEncodings
.end(); ++it
)
232 E::set32(commonEncodingsTable
[it
->second
], it
->first
);
234 // make references for personality entries
235 uint32_t* personalityArray
= (uint32_t*)&_header
[sectionHeader
->personalityArraySectionOffset()];
236 for (std::map
<const ld::Atom
*, unsigned int>::iterator it
=personalityIndexMap
.begin(); it
!= personalityIndexMap
.end(); ++it
) {
237 uint32_t offset
= (uint8_t*)&personalityArray
[it
->second
-1] - _header
;
238 this->addImageOffsetFixup(offset
, it
->first
);
241 // build first level index and references
242 macho_unwind_info_section_header_index_entry
<P
>* indexTable
= (macho_unwind_info_section_header_index_entry
<P
>*)&_header
[indexSectionOffset
];
244 for (unsigned int i
=0; i
< secondLevelPageCount
; ++i
) {
245 unsigned int reverseIndex
= secondLevelPageCount
- 1 - i
;
246 indexTable
[i
].set_functionOffset(0);
247 indexTable
[i
].set_secondLevelPagesSectionOffset(secondLevelPagesStarts
[reverseIndex
]-_pages
+headerEndSectionOffset
);
248 indexTable
[i
].set_lsdaIndexArraySectionOffset(lsdaIndexOffsetMap
[secondLevelFirstFuncs
[reverseIndex
]]+lsdaIndexArraySectionOffset
);
249 refOffset
= (uint8_t*)&indexTable
[i
] - _header
;
250 this->addImageOffsetFixup(refOffset
, secondLevelFirstFuncs
[reverseIndex
]);
252 indexTable
[secondLevelPageCount
].set_functionOffset(0);
253 indexTable
[secondLevelPageCount
].set_secondLevelPagesSectionOffset(0);
254 indexTable
[secondLevelPageCount
].set_lsdaIndexArraySectionOffset(lsdaIndexArraySectionOffset
+lsdaIndexArraySize
);
255 refOffset
= (uint8_t*)&indexTable
[secondLevelPageCount
] - _header
;
256 this->addImageOffsetFixupPlusAddend(refOffset
, entries
.back().func
, entries
.back().func
->size()+1);
258 // build lsda references
259 uint32_t lsdaEntrySectionOffset
= lsdaIndexArraySectionOffset
;
260 for (std::vector
<LSDAEntry
>::iterator it
= lsdaIndex
.begin(); it
!= lsdaIndex
.end(); ++it
) {
261 this->addImageOffsetFixup(lsdaEntrySectionOffset
, it
->func
);
262 this->addImageOffsetFixup(lsdaEntrySectionOffset
+4, it
->lsda
);
263 lsdaEntrySectionOffset
+= sizeof(unwind_info_section_header_lsda_index_entry
);
268 template <typename A
>
269 UnwindInfoAtom
<A
>::~UnwindInfoAtom()
271 free(_pagesForDelete
);
275 template <typename A
>
276 void UnwindInfoAtom
<A
>::copyRawContent(uint8_t buffer
[]) const
278 // content is in two parts
279 memcpy(buffer
, _header
, _headerSize
);
280 memcpy(&buffer
[_headerSize
], _pages
, _pagesSize
);
285 bool UnwindInfoAtom
<x86
>::encodingMeansUseDwarf(compact_unwind_encoding_t enc
)
287 return ((enc
& UNWIND_X86_MODE_MASK
) == UNWIND_X86_MODE_DWARF
);
291 bool UnwindInfoAtom
<x86_64
>::encodingMeansUseDwarf(compact_unwind_encoding_t enc
)
293 return ((enc
& UNWIND_X86_64_MODE_MASK
) == UNWIND_X86_64_MODE_DWARF
);
297 bool UnwindInfoAtom
<arm64
>::encodingMeansUseDwarf(compact_unwind_encoding_t enc
)
299 return ((enc
& UNWIND_ARM64_MODE_MASK
) == UNWIND_ARM64_MODE_DWARF
);
302 template <typename A
>
303 void UnwindInfoAtom
<A
>::compressDuplicates(const std::vector
<UnwindEntry
>& entries
, std::vector
<UnwindEntry
>& uniqueEntries
)
305 // build new list removing entries where next function has same encoding
306 uniqueEntries
.reserve(entries
.size());
307 UnwindEntry
last(NULL
, 0, 0, NULL
, NULL
, NULL
, 0xFFFFFFFF);
308 for(std::vector
<UnwindEntry
>::const_iterator it
=entries
.begin(); it
!= entries
.end(); ++it
) {
309 const UnwindEntry
& next
= *it
;
310 bool newNeedsDwarf
= encodingMeansUseDwarf(next
.encoding
);
311 // remove entries which have same encoding and personalityPointer as last one
312 if ( newNeedsDwarf
|| (next
.encoding
!= last
.encoding
) || (next
.personalityPointer
!= last
.personalityPointer
)
313 || (next
.lsda
!= NULL
) || (last
.lsda
!= NULL
) ) {
314 uniqueEntries
.push_back(next
);
318 if (_s_log
) fprintf(stderr
, "compressDuplicates() entries.size()=%lu, uniqueEntries.size()=%lu\n",
319 entries
.size(), uniqueEntries
.size());
322 template <typename A
>
323 void UnwindInfoAtom
<A
>::makePersonalityIndexes(std::vector
<UnwindEntry
>& entries
, std::map
<const ld::Atom
*, uint32_t>& personalityIndexMap
)
325 for(std::vector
<UnwindEntry
>::iterator it
=entries
.begin(); it
!= entries
.end(); ++it
) {
326 if ( it
->personalityPointer
!= NULL
) {
327 std::map
<const ld::Atom
*, uint32_t>::iterator pos
= personalityIndexMap
.find(it
->personalityPointer
);
328 if ( pos
== personalityIndexMap
.end() ) {
329 const uint32_t nextIndex
= personalityIndexMap
.size() + 1;
330 personalityIndexMap
[it
->personalityPointer
] = nextIndex
;
332 uint32_t personalityIndex
= personalityIndexMap
[it
->personalityPointer
];
333 it
->encoding
|= (personalityIndex
<< (__builtin_ctz(UNWIND_PERSONALITY_MASK
)) );
336 if (_s_log
) fprintf(stderr
, "makePersonalityIndexes() %lu personality routines used\n", personalityIndexMap
.size());
340 template <typename A
>
341 void UnwindInfoAtom
<A
>::findCommonEncoding(const std::vector
<UnwindEntry
>& entries
,
342 std::map
<compact_unwind_encoding_t
, unsigned int>& commonEncodings
)
344 // scan infos to get frequency counts for each encoding
345 std::map
<compact_unwind_encoding_t
, unsigned int> encodingsUsed
;
346 unsigned int mostCommonEncodingUsageCount
= 0;
347 for(std::vector
<UnwindEntry
>::const_iterator it
=entries
.begin(); it
!= entries
.end(); ++it
) {
348 // never put dwarf into common table
349 if ( encodingMeansUseDwarf(it
->encoding
) )
351 std::map
<compact_unwind_encoding_t
, unsigned int>::iterator pos
= encodingsUsed
.find(it
->encoding
);
352 if ( pos
== encodingsUsed
.end() ) {
353 encodingsUsed
[it
->encoding
] = 1;
356 encodingsUsed
[it
->encoding
] += 1;
357 if ( mostCommonEncodingUsageCount
< encodingsUsed
[it
->encoding
] )
358 mostCommonEncodingUsageCount
= encodingsUsed
[it
->encoding
];
361 // put the most common encodings into the common table, but at most 127 of them
362 for(unsigned int usages
=mostCommonEncodingUsageCount
; usages
> 1; --usages
) {
363 for (std::map
<compact_unwind_encoding_t
, unsigned int>::iterator euit
=encodingsUsed
.begin(); euit
!= encodingsUsed
.end(); ++euit
) {
364 if ( euit
->second
== usages
) {
365 unsigned int sz
= commonEncodings
.size();
367 commonEncodings
[euit
->first
] = sz
;
372 if (_s_log
) fprintf(stderr
, "findCommonEncoding() %lu common encodings found\n", commonEncodings
.size());
376 template <typename A
>
377 void UnwindInfoAtom
<A
>::makeLsdaIndex(const std::vector
<UnwindEntry
>& entries
, std::vector
<LSDAEntry
>& lsdaIndex
, std::map
<const ld::Atom
*, uint32_t>& lsdaIndexOffsetMap
)
379 for(std::vector
<UnwindEntry
>::const_iterator it
=entries
.begin(); it
!= entries
.end(); ++it
) {
380 lsdaIndexOffsetMap
[it
->func
] = lsdaIndex
.size() * sizeof(unwind_info_section_header_lsda_index_entry
);
381 if ( it
->lsda
!= NULL
) {
383 entry
.func
= it
->func
;
384 entry
.lsda
= it
->lsda
;
385 lsdaIndex
.push_back(entry
);
388 if (_s_log
) fprintf(stderr
, "makeLsdaIndex() %lu LSDAs found\n", lsdaIndex
.size());
393 void UnwindInfoAtom
<x86
>::addCompressedAddressOffsetFixup(uint32_t offset
, const ld::Atom
* func
, const ld::Atom
* fromFunc
)
395 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of3
, ld::Fixup::kindSetTargetAddress
, func
));
396 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of3
, ld::Fixup::kindSubtractTargetAddress
, fromFunc
));
397 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k3of3
, ld::Fixup::kindStoreLittleEndianLow24of32
));
401 void UnwindInfoAtom
<x86_64
>::addCompressedAddressOffsetFixup(uint32_t offset
, const ld::Atom
* func
, const ld::Atom
* fromFunc
)
403 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of3
, ld::Fixup::kindSetTargetAddress
, func
));
404 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of3
, ld::Fixup::kindSubtractTargetAddress
, fromFunc
));
405 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k3of3
, ld::Fixup::kindStoreLittleEndianLow24of32
));
409 void UnwindInfoAtom
<arm64
>::addCompressedAddressOffsetFixup(uint32_t offset
, const ld::Atom
* func
, const ld::Atom
* fromFunc
)
411 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of3
, ld::Fixup::kindSetTargetAddress
, func
));
412 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of3
, ld::Fixup::kindSubtractTargetAddress
, fromFunc
));
413 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k3of3
, ld::Fixup::kindStoreLittleEndianLow24of32
));
417 void UnwindInfoAtom
<x86
>::addCompressedEncodingFixup(uint32_t offset
, const ld::Atom
* fde
)
419 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of2
, ld::Fixup::kindSetTargetSectionOffset
, fde
));
420 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of2
, ld::Fixup::kindStoreLittleEndianLow24of32
));
424 void UnwindInfoAtom
<x86_64
>::addCompressedEncodingFixup(uint32_t offset
, const ld::Atom
* fde
)
426 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of2
, ld::Fixup::kindSetTargetSectionOffset
, fde
));
427 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of2
, ld::Fixup::kindStoreLittleEndianLow24of32
));
431 void UnwindInfoAtom
<arm64
>::addCompressedEncodingFixup(uint32_t offset
, const ld::Atom
* fde
)
433 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of2
, ld::Fixup::kindSetTargetSectionOffset
, fde
));
434 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of2
, ld::Fixup::kindStoreLittleEndianLow24of32
));
438 void UnwindInfoAtom
<x86
>::addRegularAddressFixup(uint32_t offset
, const ld::Atom
* func
)
440 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of2
, ld::Fixup::kindSetTargetImageOffset
, func
));
441 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of2
, ld::Fixup::kindStoreLittleEndian32
));
445 void UnwindInfoAtom
<x86_64
>::addRegularAddressFixup(uint32_t offset
, const ld::Atom
* func
)
447 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of2
, ld::Fixup::kindSetTargetImageOffset
, func
));
448 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of2
, ld::Fixup::kindStoreLittleEndian32
));
452 void UnwindInfoAtom
<arm64
>::addRegularAddressFixup(uint32_t offset
, const ld::Atom
* func
)
454 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of2
, ld::Fixup::kindSetTargetImageOffset
, func
));
455 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of2
, ld::Fixup::kindStoreLittleEndian32
));
459 void UnwindInfoAtom
<x86
>::addRegularFDEOffsetFixup(uint32_t offset
, const ld::Atom
* fde
)
461 _fixups
.push_back(ld::Fixup(offset
+4, ld::Fixup::k1of2
, ld::Fixup::kindSetTargetSectionOffset
, fde
));
462 _fixups
.push_back(ld::Fixup(offset
+4, ld::Fixup::k2of2
, ld::Fixup::kindStoreLittleEndianLow24of32
));
466 void UnwindInfoAtom
<x86_64
>::addRegularFDEOffsetFixup(uint32_t offset
, const ld::Atom
* fde
)
468 _fixups
.push_back(ld::Fixup(offset
+4, ld::Fixup::k1of2
, ld::Fixup::kindSetTargetSectionOffset
, fde
));
469 _fixups
.push_back(ld::Fixup(offset
+4, ld::Fixup::k2of2
, ld::Fixup::kindStoreLittleEndianLow24of32
));
473 void UnwindInfoAtom
<arm64
>::addRegularFDEOffsetFixup(uint32_t offset
, const ld::Atom
* fde
)
475 _fixups
.push_back(ld::Fixup(offset
+4, ld::Fixup::k1of2
, ld::Fixup::kindSetTargetSectionOffset
, fde
));
476 _fixups
.push_back(ld::Fixup(offset
+4, ld::Fixup::k2of2
, ld::Fixup::kindStoreLittleEndianLow24of32
));
480 void UnwindInfoAtom
<x86
>::addImageOffsetFixup(uint32_t offset
, const ld::Atom
* targ
)
482 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of2
, ld::Fixup::kindSetTargetImageOffset
, targ
));
483 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of2
, ld::Fixup::kindStoreLittleEndian32
));
487 void UnwindInfoAtom
<x86_64
>::addImageOffsetFixup(uint32_t offset
, const ld::Atom
* targ
)
489 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of2
, ld::Fixup::kindSetTargetImageOffset
, targ
));
490 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of2
, ld::Fixup::kindStoreLittleEndian32
));
494 void UnwindInfoAtom
<arm64
>::addImageOffsetFixup(uint32_t offset
, const ld::Atom
* targ
)
496 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of2
, ld::Fixup::kindSetTargetImageOffset
, targ
));
497 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of2
, ld::Fixup::kindStoreLittleEndian32
));
501 void UnwindInfoAtom
<x86
>::addImageOffsetFixupPlusAddend(uint32_t offset
, const ld::Atom
* targ
, uint32_t addend
)
503 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of3
, ld::Fixup::kindSetTargetImageOffset
, targ
));
504 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of3
, ld::Fixup::kindAddAddend
, addend
));
505 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k3of3
, ld::Fixup::kindStoreLittleEndian32
));
509 void UnwindInfoAtom
<x86_64
>::addImageOffsetFixupPlusAddend(uint32_t offset
, const ld::Atom
* targ
, uint32_t addend
)
511 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of3
, ld::Fixup::kindSetTargetImageOffset
, targ
));
512 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of3
, ld::Fixup::kindAddAddend
, addend
));
513 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k3of3
, ld::Fixup::kindStoreLittleEndian32
));
517 void UnwindInfoAtom
<arm64
>::addImageOffsetFixupPlusAddend(uint32_t offset
, const ld::Atom
* targ
, uint32_t addend
)
519 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k1of3
, ld::Fixup::kindSetTargetImageOffset
, targ
));
520 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k2of3
, ld::Fixup::kindAddAddend
, addend
));
521 _fixups
.push_back(ld::Fixup(offset
, ld::Fixup::k3of3
, ld::Fixup::kindStoreLittleEndian32
));
527 template <typename A
>
528 unsigned int UnwindInfoAtom
<A
>::makeRegularSecondLevelPage(const std::vector
<UnwindEntry
>& uniqueInfos
, uint32_t pageSize
,
529 unsigned int endIndex
, uint8_t*& pageEnd
)
531 const unsigned int maxEntriesPerPage
= (pageSize
- sizeof(unwind_info_regular_second_level_page_header
))/sizeof(unwind_info_regular_second_level_entry
);
532 const unsigned int entriesToAdd
= ((endIndex
> maxEntriesPerPage
) ? maxEntriesPerPage
: endIndex
);
533 uint8_t* pageStart
= pageEnd
534 - entriesToAdd
*sizeof(unwind_info_regular_second_level_entry
)
535 - sizeof(unwind_info_regular_second_level_page_header
);
536 macho_unwind_info_regular_second_level_page_header
<P
>* page
= (macho_unwind_info_regular_second_level_page_header
<P
>*)pageStart
;
537 page
->set_kind(UNWIND_SECOND_LEVEL_REGULAR
);
538 page
->set_entryPageOffset(sizeof(macho_unwind_info_regular_second_level_page_header
<P
>));
539 page
->set_entryCount(entriesToAdd
);
540 macho_unwind_info_regular_second_level_entry
<P
>* entryTable
= (macho_unwind_info_regular_second_level_entry
<P
>*)(pageStart
+ page
->entryPageOffset());
541 for (unsigned int i
=0; i
< entriesToAdd
; ++i
) {
542 const UnwindEntry
& info
= uniqueInfos
[endIndex
-entriesToAdd
+i
];
543 entryTable
[i
].set_functionOffset(0);
544 entryTable
[i
].set_encoding(info
.encoding
);
545 // add fixup for address part of entry
546 uint32_t offset
= (uint8_t*)(&entryTable
[i
]) - _pagesForDelete
;
547 this->addRegularAddressFixup(offset
, info
.func
);
548 if ( encodingMeansUseDwarf(info
.encoding
) ) {
549 // add fixup for dwarf offset part of page specific encoding
550 uint32_t encOffset
= (uint8_t*)(&entryTable
[i
]) - _pagesForDelete
;
551 this->addRegularFDEOffsetFixup(encOffset
, info
.fde
);
554 if (_s_log
) fprintf(stderr
, "regular page with %u entries\n", entriesToAdd
);
556 return endIndex
- entriesToAdd
;
560 template <typename A
>
561 unsigned int UnwindInfoAtom
<A
>::makeCompressedSecondLevelPage(const std::vector
<UnwindEntry
>& uniqueInfos
,
562 const std::map
<compact_unwind_encoding_t
,unsigned int> commonEncodings
,
563 uint32_t pageSize
, unsigned int endIndex
, uint8_t*& pageEnd
)
565 if (_s_log
) fprintf(stderr
, "makeCompressedSecondLevelPage(pageSize=%u, endIndex=%u)\n", pageSize
, endIndex
);
566 // first pass calculates how many compressed entries we could fit in this sized page
567 // keep adding entries to page until:
568 // 1) encoding table plus entry table plus header exceed page size
569 // 2) the file offset delta from the first to last function > 24 bits
570 // 3) custom encoding index reachs 255
571 // 4) run out of uniqueInfos to encode
572 std::map
<compact_unwind_encoding_t
, unsigned int> pageSpecificEncodings
;
573 uint32_t space4
= (pageSize
- sizeof(unwind_info_compressed_second_level_page_header
))/sizeof(uint32_t);
574 std::vector
<uint8_t> encodingIndexes
;
575 int index
= endIndex
-1;
577 uint64_t lastEntryAddress
= uniqueInfos
[index
].funcTentAddress
;
579 while ( canDo
&& (index
>= 0) ) {
580 const UnwindEntry
& info
= uniqueInfos
[index
--];
581 // compute encoding index
582 unsigned int encodingIndex
;
583 std::map
<compact_unwind_encoding_t
, unsigned int>::const_iterator pos
= commonEncodings
.find(info
.encoding
);
584 if ( pos
!= commonEncodings
.end() ) {
585 encodingIndex
= pos
->second
;
588 // no commmon entry, so add one on this page
589 uint32_t encoding
= info
.encoding
;
590 if ( encodingMeansUseDwarf(encoding
) ) {
591 // make unique pseudo encoding so this dwarf will gets is own encoding entry slot
592 encoding
+= (index
+1);
594 std::map
<compact_unwind_encoding_t
, unsigned int>::iterator ppos
= pageSpecificEncodings
.find(encoding
);
595 if ( ppos
!= pageSpecificEncodings
.end() ) {
596 encodingIndex
= pos
->second
;
599 encodingIndex
= commonEncodings
.size() + pageSpecificEncodings
.size();
600 if ( encodingIndex
<= 255 ) {
601 pageSpecificEncodings
[encoding
] = encodingIndex
;
602 if (_s_log
) fprintf(stderr
, "makeCompressedSecondLevelPage(): pageSpecificEncodings[%d]=0x%08X\n", encodingIndex
, encoding
);
605 canDo
= false; // case 3)
606 if (_s_log
) fprintf(stderr
, "end of compressed page with %u entries, %lu custom encodings because too many custom encodings\n",
607 entryCount
, pageSpecificEncodings
.size());
612 encodingIndexes
.push_back(encodingIndex
);
613 // compute function offset
614 uint32_t funcOffsetWithInPage
= lastEntryAddress
- info
.funcTentAddress
;
615 if ( funcOffsetWithInPage
> 0x00FFFF00 ) {
616 // don't use 0x00FFFFFF because addresses may vary after atoms are laid out again
617 canDo
= false; // case 2)
618 if (_s_log
) fprintf(stderr
, "can't use compressed page with %u entries because function offset too big\n", entryCount
);
623 // check room for entry
624 if ( (pageSpecificEncodings
.size()+entryCount
) >= space4
) {
625 canDo
= false; // case 1)
627 if (_s_log
) fprintf(stderr
, "end of compressed page with %u entries because full\n", entryCount
);
629 //if (_s_log) fprintf(stderr, "space4=%d, pageSpecificEncodings.size()=%ld, entryCount=%d\n", space4, pageSpecificEncodings.size(), entryCount);
632 // check for cases where it would be better to use a regular (non-compressed) page
633 const unsigned int compressPageUsed
= sizeof(unwind_info_compressed_second_level_page_header
)
634 + pageSpecificEncodings
.size()*sizeof(uint32_t)
635 + entryCount
*sizeof(uint32_t);
636 if ( (compressPageUsed
< (pageSize
-4) && (index
>= 0) ) ) {
637 const int regularEntriesPerPage
= (pageSize
- sizeof(unwind_info_regular_second_level_page_header
))/sizeof(unwind_info_regular_second_level_entry
);
638 if ( entryCount
< regularEntriesPerPage
) {
639 return makeRegularSecondLevelPage(uniqueInfos
, pageSize
, endIndex
, pageEnd
);
643 // check if we need any padding because adding another entry would take 8 bytes but only have room for 4
645 if ( compressPageUsed
== (pageSize
-4) )
648 // second pass fills in page
649 uint8_t* pageStart
= pageEnd
- compressPageUsed
- pad
;
650 CSLP
* page
= (CSLP
*)pageStart
;
651 page
->set_kind(UNWIND_SECOND_LEVEL_COMPRESSED
);
652 page
->set_entryPageOffset(sizeof(CSLP
));
653 page
->set_entryCount(entryCount
);
654 page
->set_encodingsPageOffset(page
->entryPageOffset()+entryCount
*sizeof(uint32_t));
655 page
->set_encodingsCount(pageSpecificEncodings
.size());
656 uint32_t* const encodingsArray
= (uint32_t*)&pageStart
[page
->encodingsPageOffset()];
657 // fill in entry table
658 uint32_t* const entiresArray
= (uint32_t*)&pageStart
[page
->entryPageOffset()];
659 const ld::Atom
* firstFunc
= uniqueInfos
[endIndex
-entryCount
].func
;
660 for(unsigned int i
=endIndex
-entryCount
; i
< endIndex
; ++i
) {
661 const UnwindEntry
& info
= uniqueInfos
[i
];
662 uint8_t encodingIndex
;
663 if ( encodingMeansUseDwarf(info
.encoding
) ) {
664 // dwarf entries are always in page specific encodings
665 encodingIndex
= pageSpecificEncodings
[info
.encoding
+i
];
668 std::map
<uint32_t, unsigned int>::const_iterator pos
= commonEncodings
.find(info
.encoding
);
669 if ( pos
!= commonEncodings
.end() )
670 encodingIndex
= pos
->second
;
672 encodingIndex
= pageSpecificEncodings
[info
.encoding
];
674 uint32_t entryIndex
= i
- endIndex
+ entryCount
;
675 E::set32(entiresArray
[entryIndex
], encodingIndex
<< 24);
676 // add fixup for address part of entry
677 uint32_t offset
= (uint8_t*)(&entiresArray
[entryIndex
]) - _pagesForDelete
;
678 this->addCompressedAddressOffsetFixup(offset
, info
.func
, firstFunc
);
679 if ( encodingMeansUseDwarf(info
.encoding
) ) {
680 // add fixup for dwarf offset part of page specific encoding
681 uint32_t encOffset
= (uint8_t*)(&encodingsArray
[encodingIndex
-commonEncodings
.size()]) - _pagesForDelete
;
682 this->addCompressedEncodingFixup(encOffset
, info
.fde
);
685 // fill in encodings table
686 for(std::map
<uint32_t, unsigned int>::const_iterator it
= pageSpecificEncodings
.begin(); it
!= pageSpecificEncodings
.end(); ++it
) {
687 E::set32(encodingsArray
[it
->second
-commonEncodings
.size()], it
->first
);
690 if (_s_log
) fprintf(stderr
, "compressed page with %u entries, %lu custom encodings\n", entryCount
, pageSpecificEncodings
.size());
694 return endIndex
-entryCount
; // endIndex for next page
702 static uint64_t calculateEHFrameSize(const ld::Internal
& state
)
705 for (std::vector
<ld::Internal::FinalSection
*>::const_iterator sit
=state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
706 ld::Internal::FinalSection
* sect
= *sit
;
707 if ( sect
->type() == ld::Section::typeCFI
) {
708 for (std::vector
<const ld::Atom
*>::iterator ait
=sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
709 size
+= (*ait
)->size();
716 static void getAllUnwindInfos(const ld::Internal
& state
, std::vector
<UnwindEntry
>& entries
)
718 uint64_t address
= 0;
719 for (std::vector
<ld::Internal::FinalSection
*>::const_iterator sit
=state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
720 ld::Internal::FinalSection
* sect
= *sit
;
721 for (std::vector
<const ld::Atom
*>::iterator ait
=sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
722 const ld::Atom
* atom
= *ait
;
723 // adjust address for atom alignment
724 uint64_t alignment
= 1 << atom
->alignment().powerOf2
;
725 uint64_t currentModulus
= (address
% alignment
);
726 uint64_t requiredModulus
= atom
->alignment().modulus
;
727 if ( currentModulus
!= requiredModulus
) {
728 if ( requiredModulus
> currentModulus
)
729 address
+= requiredModulus
-currentModulus
;
731 address
+= requiredModulus
+alignment
-currentModulus
;
734 if ( atom
->beginUnwind() == atom
->endUnwind() ) {
735 // be sure to mark that we have no unwind info for stuff in the TEXT segment without unwind info
736 if ( (atom
->section().type() == ld::Section::typeCode
) && (atom
->size() !=0) ) {
737 entries
.push_back(UnwindEntry(atom
, address
, 0, NULL
, NULL
, NULL
, 0));
741 // atom has unwind info(s), add entry for each
742 const ld::Atom
* fde
= NULL
;
743 const ld::Atom
* lsda
= NULL
;
744 const ld::Atom
* personalityPointer
= NULL
;
745 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
746 switch ( fit
->kind
) {
747 case ld::Fixup::kindNoneGroupSubordinateFDE
:
748 assert(fit
->binding
== ld::Fixup::bindingDirectlyBound
);
751 case ld::Fixup::kindNoneGroupSubordinateLSDA
:
752 assert(fit
->binding
== ld::Fixup::bindingDirectlyBound
);
753 lsda
= fit
->u
.target
;
755 case ld::Fixup::kindNoneGroupSubordinatePersonality
:
756 assert(fit
->binding
== ld::Fixup::bindingDirectlyBound
);
757 personalityPointer
= fit
->u
.target
;
758 assert(personalityPointer
->section().type() == ld::Section::typeNonLazyPointer
);
765 // find CIE for this FDE
766 const ld::Atom
* cie
= NULL
;
767 for (ld::Fixup::iterator fit
= fde
->fixupsBegin(), end
=fde
->fixupsEnd(); fit
!= end
; ++fit
) {
768 if ( fit
->kind
!= ld::Fixup::kindSubtractTargetAddress
)
770 if ( fit
->binding
!= ld::Fixup::bindingDirectlyBound
)
773 // CIE is only direct subtracted target in FDE
774 assert(cie
->section().type() == ld::Section::typeCFI
);
778 // if CIE can have just one fixup - to the personality pointer
779 for (ld::Fixup::iterator fit
= cie
->fixupsBegin(), end
=cie
->fixupsEnd(); fit
!= end
; ++fit
) {
780 if ( fit
->kind
== ld::Fixup::kindSetTargetAddress
) {
781 switch ( fit
->binding
) {
782 case ld::Fixup::bindingsIndirectlyBound
:
783 personalityPointer
= state
.indirectBindingTable
[fit
->u
.bindingIndex
];
784 assert(personalityPointer
->section().type() == ld::Section::typeNonLazyPointer
);
786 case ld::Fixup::bindingDirectlyBound
:
787 personalityPointer
= fit
->u
.target
;
788 assert(personalityPointer
->section().type() == ld::Section::typeNonLazyPointer
);
797 for ( ld::Atom::UnwindInfo::iterator uit
= atom
->beginUnwind(); uit
!= atom
->endUnwind(); ++uit
) {
798 entries
.push_back(UnwindEntry(atom
, address
, uit
->startOffset
, fde
, lsda
, personalityPointer
, uit
->unwindInfo
));
801 address
+= atom
->size();
807 static void makeFinalLinkedImageCompactUnwindSection(const Options
& opts
, ld::Internal
& state
)
809 // walk every atom and gets its unwind info
810 std::vector
<UnwindEntry
> entries
;
812 getAllUnwindInfos(state
, entries
);
814 // don't generate an __unwind_info section if there is no code in this linkage unit
815 if ( entries
.size() == 0 )
818 // calculate size of __eh_frame section, so __unwind_info can go before it and page align
819 uint64_t ehFrameSize
= calculateEHFrameSize(state
);
821 // create atom that contains the whole compact unwind table
822 switch ( opts
.architecture() ) {
823 #if SUPPORT_ARCH_x86_64
824 case CPU_TYPE_X86_64
:
825 state
.addAtom(*new UnwindInfoAtom
<x86_64
>(entries
, ehFrameSize
));
828 #if SUPPORT_ARCH_i386
830 state
.addAtom(*new UnwindInfoAtom
<x86
>(entries
, ehFrameSize
));
833 #if SUPPORT_ARCH_arm64
835 state
.addAtom(*new UnwindInfoAtom
<arm64
>(entries
, ehFrameSize
));
839 assert(0 && "no compact unwind for arch");
845 template <typename A
>
846 class CompactUnwindAtom
: public ld::Atom
{
848 CompactUnwindAtom(ld::Internal
& state
,const ld::Atom
* funcAtom
,
849 uint32_t startOffset
, uint32_t len
, uint32_t cui
);
850 ~CompactUnwindAtom() {}
852 virtual const ld::File
* file() const { return NULL
; }
853 virtual const char* name() const { return "compact unwind info"; }
854 virtual uint64_t size() const { return sizeof(macho_compact_unwind_entry
<P
>); }
855 virtual uint64_t objectAddress() const { return 0; }
856 virtual void copyRawContent(uint8_t buffer
[]) const;
857 virtual void setScope(Scope
) { }
858 virtual ld::Fixup::iterator
fixupsBegin() const { return (ld::Fixup
*)&_fixups
[0]; }
859 virtual ld::Fixup::iterator
fixupsEnd() const { return (ld::Fixup
*)&_fixups
[_fixups
.size()]; }
862 typedef typename
A::P P
;
863 typedef typename
A::P::E E
;
864 typedef typename
A::P::uint_t pint_t
;
867 const ld::Atom
* _atom
;
868 const uint32_t _startOffset
;
870 const uint32_t _compactUnwindInfo
;
871 std::vector
<ld::Fixup
> _fixups
;
873 static ld::Fixup::Kind _s_pointerKind
;
874 static ld::Fixup::Kind _s_pointerStoreKind
;
875 static ld::Section _s_section
;
879 template <typename A
>
880 ld::Section CompactUnwindAtom
<A
>::_s_section("__LD", "__compact_unwind", ld::Section::typeDebug
);
882 template <> ld::Fixup::Kind CompactUnwindAtom
<x86
>::_s_pointerKind
= ld::Fixup::kindStoreLittleEndian32
;
883 template <> ld::Fixup::Kind CompactUnwindAtom
<x86
>::_s_pointerStoreKind
= ld::Fixup::kindStoreTargetAddressLittleEndian32
;
884 template <> ld::Fixup::Kind CompactUnwindAtom
<x86_64
>::_s_pointerKind
= ld::Fixup::kindStoreLittleEndian64
;
885 template <> ld::Fixup::Kind CompactUnwindAtom
<x86_64
>::_s_pointerStoreKind
= ld::Fixup::kindStoreTargetAddressLittleEndian64
;
886 #if SUPPORT_ARCH_arm64
887 template <> ld::Fixup::Kind CompactUnwindAtom
<arm64
>::_s_pointerKind
= ld::Fixup::kindStoreLittleEndian64
;
888 template <> ld::Fixup::Kind CompactUnwindAtom
<arm64
>::_s_pointerStoreKind
= ld::Fixup::kindStoreTargetAddressLittleEndian64
;
891 template <typename A
>
892 CompactUnwindAtom
<A
>::CompactUnwindAtom(ld::Internal
& state
,const ld::Atom
* funcAtom
, uint32_t startOffset
,
893 uint32_t len
, uint32_t cui
)
894 : ld::Atom(_s_section
, ld::Atom::definitionRegular
, ld::Atom::combineNever
,
895 ld::Atom::scopeTranslationUnit
, ld::Atom::typeUnclassified
,
896 symbolTableNotIn
, false, false, false, ld::Atom::Alignment(log2(sizeof(pint_t
)))),
897 _atom(funcAtom
), _startOffset(startOffset
), _len(len
), _compactUnwindInfo(cui
)
899 _fixups
.push_back(ld::Fixup(macho_compact_unwind_entry
<P
>::codeStartFieldOffset(), ld::Fixup::k1of3
, ld::Fixup::kindSetTargetAddress
, funcAtom
));
900 _fixups
.push_back(ld::Fixup(macho_compact_unwind_entry
<P
>::codeStartFieldOffset(), ld::Fixup::k2of3
, ld::Fixup::kindAddAddend
, _startOffset
));
901 _fixups
.push_back(ld::Fixup(macho_compact_unwind_entry
<P
>::codeStartFieldOffset(), ld::Fixup::k3of3
, _s_pointerKind
));
902 // see if atom has subordinate personality function or lsda
903 for (ld::Fixup::iterator fit
= funcAtom
->fixupsBegin(), end
=funcAtom
->fixupsEnd(); fit
!= end
; ++fit
) {
904 switch ( fit
->kind
) {
905 case ld::Fixup::kindNoneGroupSubordinatePersonality
:
906 assert(fit
->binding
== ld::Fixup::bindingsIndirectlyBound
);
907 _fixups
.push_back(ld::Fixup(macho_compact_unwind_entry
<P
>::personalityFieldOffset(), ld::Fixup::k1of1
, _s_pointerStoreKind
, state
.indirectBindingTable
[fit
->u
.bindingIndex
]));
909 case ld::Fixup::kindNoneGroupSubordinateLSDA
:
910 assert(fit
->binding
== ld::Fixup::bindingDirectlyBound
);
911 _fixups
.push_back(ld::Fixup(macho_compact_unwind_entry
<P
>::lsdaFieldOffset(), ld::Fixup::k1of1
, _s_pointerStoreKind
, fit
->u
.target
));
920 template <typename A
>
921 void CompactUnwindAtom
<A
>::copyRawContent(uint8_t buffer
[]) const
923 macho_compact_unwind_entry
<P
>* buf
= (macho_compact_unwind_entry
<P
>*)buffer
;
924 buf
->set_codeStart(0);
925 buf
->set_codeLen(_len
);
926 buf
->set_compactUnwindInfo(_compactUnwindInfo
);
927 buf
->set_personality(0);
932 static void makeCompactUnwindAtom(const Options
& opts
, ld::Internal
& state
, const ld::Atom
* atom
,
933 uint32_t startOffset
, uint32_t endOffset
, uint32_t cui
)
935 switch ( opts
.architecture() ) {
936 #if SUPPORT_ARCH_x86_64
937 case CPU_TYPE_X86_64
:
938 state
.addAtom(*new CompactUnwindAtom
<x86_64
>(state
, atom
, startOffset
, endOffset
-startOffset
, cui
));
941 #if SUPPORT_ARCH_i386
943 state
.addAtom(*new CompactUnwindAtom
<x86
>(state
, atom
, startOffset
, endOffset
-startOffset
, cui
));
946 #if SUPPORT_ARCH_arm64
948 state
.addAtom(*new CompactUnwindAtom
<arm64
>(state
, atom
, startOffset
, endOffset
-startOffset
, cui
));
954 static void makeRelocateableCompactUnwindSection(const Options
& opts
, ld::Internal
& state
)
956 // can't add CompactUnwindAtom atoms will iterating, so pre-scan
957 std::vector
<const ld::Atom
*> atomsWithUnwind
;
958 for (std::vector
<ld::Internal::FinalSection
*>::const_iterator sit
=state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
959 ld::Internal::FinalSection
* sect
= *sit
;
960 for (std::vector
<const ld::Atom
*>::iterator ait
=sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
961 const ld::Atom
* atom
= *ait
;
962 if ( atom
->beginUnwind() != atom
->endUnwind() )
963 atomsWithUnwind
.push_back(atom
);
966 // make one CompactUnwindAtom for each compact unwind range in each atom
967 for (std::vector
<const ld::Atom
*>::iterator it
= atomsWithUnwind
.begin(); it
!= atomsWithUnwind
.end(); ++it
) {
968 const ld::Atom
* atom
= *it
;
969 uint32_t lastOffset
= 0;
970 uint32_t lastCUE
= 0;
972 for (ld::Atom::UnwindInfo::iterator uit
=atom
->beginUnwind(); uit
!= atom
->endUnwind(); ++uit
) {
974 makeCompactUnwindAtom(opts
, state
, atom
, lastOffset
, uit
->startOffset
, lastCUE
);
976 lastOffset
= uit
->startOffset
;
977 lastCUE
= uit
->unwindInfo
;
980 makeCompactUnwindAtom(opts
, state
, atom
, lastOffset
, (uint32_t)atom
->size(), lastCUE
);
985 void doPass(const Options
& opts
, ld::Internal
& state
)
987 if ( opts
.outputKind() == Options::kObjectFile
)
988 makeRelocateableCompactUnwindSection(opts
, state
);
990 else if ( opts
.needsUnwindInfoSection() )
991 makeFinalLinkedImageCompactUnwindSection(opts
, state
);
995 } // namespace compact_unwind
996 } // namespace passes