]> git.saurik.com Git - apple/ld64.git/blame - src/ld/passes/compact_unwind.cpp
ld64-127.2.tar.gz
[apple/ld64.git] / src / ld / passes / compact_unwind.cpp
CommitLineData
a645023d
A
1/* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
2 *
3 * Copyright (c) 2009 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26#include <stdint.h>
27#include <math.h>
28#include <unistd.h>
29#include <dlfcn.h>
30#include <mach/machine.h>
31#include <mach-o/compact_unwind_encoding.h>
32
33#include <vector>
34#include <map>
35
36#include "ld.hpp"
37#include "compact_unwind.h"
38#include "Architectures.hpp"
39#include "MachOFileAbstraction.hpp"
40
41
42namespace ld {
43namespace passes {
44namespace compact_unwind {
45
46
47struct UnwindEntry {
48 UnwindEntry(const ld::Atom* f, uint64_t a, uint32_t o, const ld::Atom* d,
49 const ld::Atom* l, const ld::Atom* p, uint32_t en)
50 : func(f), fde(d), lsda(l), personalityPointer(p), funcTentAddress(a),
51 functionOffset(o), encoding(en) { }
52 const ld::Atom* func;
53 const ld::Atom* fde;
54 const ld::Atom* lsda;
55 const ld::Atom* personalityPointer;
56 uint64_t funcTentAddress;
57 uint32_t functionOffset;
58 compact_unwind_encoding_t encoding;
59};
60
61struct LSDAEntry {
62 const ld::Atom* func;
63 const ld::Atom* lsda;
64};
65
66
67template <typename A>
68class UnwindInfoAtom : public ld::Atom {
69public:
70 UnwindInfoAtom(const std::vector<UnwindEntry>& entries,uint64_t ehFrameSize);
71 ~UnwindInfoAtom();
72
73 virtual const ld::File* file() const { return NULL; }
74 virtual bool translationUnitSource(const char** dir, const char**) const
75 { return false; }
76 virtual const char* name() const { return "compact unwind info"; }
77 virtual uint64_t size() const { return _headerSize+_pagesSize; }
78 virtual uint64_t objectAddress() const { return 0; }
79 virtual void copyRawContent(uint8_t buffer[]) const;
80 virtual void setScope(Scope) { }
81 virtual ld::Fixup::iterator fixupsBegin() const { return (ld::Fixup*)&_fixups[0]; }
82 virtual ld::Fixup::iterator fixupsEnd() const { return (ld::Fixup*)&_fixups[_fixups.size()]; }
83
84private:
85 typedef typename A::P P;
86 typedef typename A::P::E E;
87 typedef typename A::P::uint_t pint_t;
88
89 typedef macho_unwind_info_compressed_second_level_page_header<P> CSLP;
90
91 bool encodingMeansUseDwarf(compact_unwind_encoding_t enc);
92 void compressDuplicates(const std::vector<UnwindEntry>& entries,
93 std::vector<UnwindEntry>& uniqueEntries);
94 void makePersonalityIndexes(std::vector<UnwindEntry>& entries,
95 std::map<const ld::Atom*, uint32_t>& personalityIndexMap);
96 void findCommonEncoding(const std::vector<UnwindEntry>& entries,
97 std::map<compact_unwind_encoding_t, unsigned int>& commonEncodings);
98 void makeLsdaIndex(const std::vector<UnwindEntry>& entries, std::vector<LSDAEntry>& lsdaIndex,
99 std::map<const ld::Atom*, uint32_t>& lsdaIndexOffsetMap);
100 unsigned int makeCompressedSecondLevelPage(const std::vector<UnwindEntry>& uniqueInfos,
101 const std::map<compact_unwind_encoding_t,unsigned int> commonEncodings,
102 uint32_t pageSize, unsigned int endIndex, uint8_t*& pageEnd);
103 unsigned int makeRegularSecondLevelPage(const std::vector<UnwindEntry>& uniqueInfos, uint32_t pageSize,
104 unsigned int endIndex, uint8_t*& pageEnd);
105 void addCompressedAddressOffsetFixup(uint32_t offset, const ld::Atom* func, const ld::Atom* fromFunc);
106 void addCompressedEncodingFixup(uint32_t offset, const ld::Atom* fde);
107 void addRegularAddressFixup(uint32_t offset, const ld::Atom* func);
108 void addRegularFDEOffsetFixup(uint32_t offset, const ld::Atom* fde);
109 void addImageOffsetFixup(uint32_t offset, const ld::Atom* targ);
110 void addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend);
111
112 uint8_t* _pagesForDelete;
113 uint8_t* _pages;
114 uint64_t _pagesSize;
115 uint8_t* _header;
116 uint64_t _headerSize;
117 std::vector<ld::Fixup> _fixups;
118
119 static bool _s_log;
120 static ld::Section _s_section;
121};
122
123template <typename A>
124bool UnwindInfoAtom<A>::_s_log = false;
125
126template <typename A>
127ld::Section UnwindInfoAtom<A>::_s_section("__TEXT", "__unwind_info", ld::Section::typeUnwindInfo);
128
129
130template <typename A>
131UnwindInfoAtom<A>::UnwindInfoAtom(const std::vector<UnwindEntry>& entries, uint64_t ehFrameSize)
132 : ld::Atom(_s_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
133 ld::Atom::scopeLinkageUnit, ld::Atom::typeUnclassified,
134 symbolTableNotIn, false, false, false, ld::Atom::Alignment(0)),
135 _pagesForDelete(NULL), _pages(NULL), _pagesSize(0), _header(NULL), _headerSize(0)
136{
137 // build new compressed list by removing entries where next function has same encoding
138 std::vector<UnwindEntry> uniqueEntries;
139 compressDuplicates(entries, uniqueEntries);
140
141 // reserve room so _fixups vector is not reallocated a bunch of times
142 _fixups.reserve(uniqueEntries.size()*3);
143
144 // build personality index, update encodings with personality index
145 std::map<const ld::Atom*, uint32_t> personalityIndexMap;
146 makePersonalityIndexes(uniqueEntries, personalityIndexMap);
147 if ( personalityIndexMap.size() > 3 ) {
148 warning("too many personality routines for compact unwind to encode");
149 return;
150 }
151
152 // put the most common encodings into the common table, but at most 127 of them
153 std::map<compact_unwind_encoding_t, unsigned int> commonEncodings;
154 findCommonEncoding(uniqueEntries, commonEncodings);
155
156 // build lsda index
157 std::map<const ld::Atom*, uint32_t> lsdaIndexOffsetMap;
158 std::vector<LSDAEntry> lsdaIndex;
159 makeLsdaIndex(uniqueEntries, lsdaIndex, lsdaIndexOffsetMap);
160
161
162 // calculate worst case size for all unwind info pages when allocating buffer
163 const unsigned int entriesPerRegularPage = (4096-sizeof(unwind_info_regular_second_level_page_header))/sizeof(unwind_info_regular_second_level_entry);
164 assert(uniqueEntries.size() > 0);
165 const unsigned int pageCount = ((uniqueEntries.size() - 1)/entriesPerRegularPage) + 1;
166 _pagesForDelete = (uint8_t*)calloc(pageCount,4096);
167 if ( _pagesForDelete == NULL ) {
168 warning("could not allocate space for compact unwind info");
169 return;
170 }
171
172 // make last second level page smaller so that all other second level pages can be page aligned
173 uint32_t maxLastPageSize = 4096 - (ehFrameSize % 4096);
174 uint32_t tailPad = 0;
175 if ( maxLastPageSize < 128 ) {
176 tailPad = maxLastPageSize;
177 maxLastPageSize = 4096;
178 }
179
180 // fill in pages in reverse order
181 const ld::Atom* secondLevelFirstFuncs[pageCount*3];
182 uint8_t* secondLevelPagesStarts[pageCount*3];
183 unsigned int endIndex = uniqueEntries.size();
184 unsigned int secondLevelPageCount = 0;
185 uint8_t* pageEnd = &_pagesForDelete[pageCount*4096];
186 uint32_t pageSize = maxLastPageSize;
187 while ( endIndex > 0 ) {
188 endIndex = makeCompressedSecondLevelPage(uniqueEntries, commonEncodings, pageSize, endIndex, pageEnd);
189 secondLevelPagesStarts[secondLevelPageCount] = pageEnd;
190 secondLevelFirstFuncs[secondLevelPageCount] = uniqueEntries[endIndex].func;
191 ++secondLevelPageCount;
192 pageSize = 4096; // last page can be odd size, make rest up to 4096 bytes in size
193 }
194 _pages = pageEnd;
195 _pagesSize = &_pagesForDelete[pageCount*4096] - pageEnd;
196
197
198 // calculate section layout
199 const uint32_t commonEncodingsArraySectionOffset = sizeof(macho_unwind_info_section_header<P>);
200 const uint32_t commonEncodingsArrayCount = commonEncodings.size();
201 const uint32_t commonEncodingsArraySize = commonEncodingsArrayCount * sizeof(compact_unwind_encoding_t);
202 const uint32_t personalityArraySectionOffset = commonEncodingsArraySectionOffset + commonEncodingsArraySize;
203 const uint32_t personalityArrayCount = personalityIndexMap.size();
204 const uint32_t personalityArraySize = personalityArrayCount * sizeof(uint32_t);
205 const uint32_t indexSectionOffset = personalityArraySectionOffset + personalityArraySize;
206 const uint32_t indexCount = secondLevelPageCount+1;
207 const uint32_t indexSize = indexCount * sizeof(macho_unwind_info_section_header_index_entry<P>);
208 const uint32_t lsdaIndexArraySectionOffset = indexSectionOffset + indexSize;
209 const uint32_t lsdaIndexArrayCount = lsdaIndex.size();
210 const uint32_t lsdaIndexArraySize = lsdaIndexArrayCount * sizeof(macho_unwind_info_section_header_lsda_index_entry<P>);
211 const uint32_t headerEndSectionOffset = lsdaIndexArraySectionOffset + lsdaIndexArraySize;
212
213 // now that we know the size of the header, slide all existing fixups on the pages
214 const int32_t fixupSlide = headerEndSectionOffset + (_pagesForDelete - _pages);
215 for(std::vector<ld::Fixup>::iterator it = _fixups.begin(); it != _fixups.end(); ++it) {
216 it->offsetInAtom += fixupSlide;
217 }
218
219 // allocate and fill in section header
220 _headerSize = headerEndSectionOffset;
221 _header = new uint8_t[_headerSize];
222 bzero(_header, _headerSize);
223 macho_unwind_info_section_header<P>* sectionHeader = (macho_unwind_info_section_header<P>*)_header;
224 sectionHeader->set_version(UNWIND_SECTION_VERSION);
225 sectionHeader->set_commonEncodingsArraySectionOffset(commonEncodingsArraySectionOffset);
226 sectionHeader->set_commonEncodingsArrayCount(commonEncodingsArrayCount);
227 sectionHeader->set_personalityArraySectionOffset(personalityArraySectionOffset);
228 sectionHeader->set_personalityArrayCount(personalityArrayCount);
229 sectionHeader->set_indexSectionOffset(indexSectionOffset);
230 sectionHeader->set_indexCount(indexCount);
231
232 // copy common encodings
233 uint32_t* commonEncodingsTable = (uint32_t*)&_header[commonEncodingsArraySectionOffset];
234 for (std::map<uint32_t, unsigned int>::iterator it=commonEncodings.begin(); it != commonEncodings.end(); ++it)
235 E::set32(commonEncodingsTable[it->second], it->first);
236
237 // make references for personality entries
238 uint32_t* personalityArray = (uint32_t*)&_header[sectionHeader->personalityArraySectionOffset()];
239 for (std::map<const ld::Atom*, unsigned int>::iterator it=personalityIndexMap.begin(); it != personalityIndexMap.end(); ++it) {
240 uint32_t offset = (uint8_t*)&personalityArray[it->second-1] - _header;
241 this->addImageOffsetFixup(offset, it->first);
242 }
243
244 // build first level index and references
245 macho_unwind_info_section_header_index_entry<P>* indexTable = (macho_unwind_info_section_header_index_entry<P>*)&_header[indexSectionOffset];
246 uint32_t refOffset;
247 for (unsigned int i=0; i < secondLevelPageCount; ++i) {
248 unsigned int reverseIndex = secondLevelPageCount - 1 - i;
249 indexTable[i].set_functionOffset(0);
250 indexTable[i].set_secondLevelPagesSectionOffset(secondLevelPagesStarts[reverseIndex]-_pages+headerEndSectionOffset);
251 indexTable[i].set_lsdaIndexArraySectionOffset(lsdaIndexOffsetMap[secondLevelFirstFuncs[reverseIndex]]+lsdaIndexArraySectionOffset);
252 refOffset = (uint8_t*)&indexTable[i] - _header;
253 this->addImageOffsetFixup(refOffset, secondLevelFirstFuncs[reverseIndex]);
254 }
255 indexTable[secondLevelPageCount].set_functionOffset(0);
256 indexTable[secondLevelPageCount].set_secondLevelPagesSectionOffset(0);
257 indexTable[secondLevelPageCount].set_lsdaIndexArraySectionOffset(lsdaIndexArraySectionOffset+lsdaIndexArraySize);
258 refOffset = (uint8_t*)&indexTable[secondLevelPageCount] - _header;
259 this->addImageOffsetFixupPlusAddend(refOffset, entries.back().func, entries.back().func->size()+1);
260
261 // build lsda references
262 uint32_t lsdaEntrySectionOffset = lsdaIndexArraySectionOffset;
263 for (std::vector<LSDAEntry>::iterator it = lsdaIndex.begin(); it != lsdaIndex.end(); ++it) {
264 this->addImageOffsetFixup(lsdaEntrySectionOffset, it->func);
265 this->addImageOffsetFixup(lsdaEntrySectionOffset+4, it->lsda);
266 lsdaEntrySectionOffset += sizeof(unwind_info_section_header_lsda_index_entry);
267 }
268
269}
270
271template <typename A>
272UnwindInfoAtom<A>::~UnwindInfoAtom()
273{
274 free(_pagesForDelete);
275 free(_header);
276}
277
278template <typename A>
279void UnwindInfoAtom<A>::copyRawContent(uint8_t buffer[]) const
280{
281 // content is in two parts
282 memcpy(buffer, _header, _headerSize);
283 memcpy(&buffer[_headerSize], _pages, _pagesSize);
284}
285
286
287template <>
288bool UnwindInfoAtom<x86>::encodingMeansUseDwarf(compact_unwind_encoding_t enc)
289{
290 return ((enc & UNWIND_X86_MODE_MASK) == UNWIND_X86_MODE_DWARF);
291}
292
293template <>
294bool UnwindInfoAtom<x86_64>::encodingMeansUseDwarf(compact_unwind_encoding_t enc)
295{
296 return ((enc & UNWIND_X86_64_MODE_MASK) == UNWIND_X86_64_MODE_DWARF);
297}
298
299template <typename A>
300void UnwindInfoAtom<A>::compressDuplicates(const std::vector<UnwindEntry>& entries, std::vector<UnwindEntry>& uniqueEntries)
301{
302 // build new list removing entries where next function has same encoding
303 uniqueEntries.reserve(entries.size());
304 UnwindEntry last(NULL, 0, 0, NULL, NULL, NULL, 0xFFFFFFFF);
305 for(std::vector<UnwindEntry>::const_iterator it=entries.begin(); it != entries.end(); ++it) {
306 const UnwindEntry& next = *it;
307 bool newNeedsDwarf = encodingMeansUseDwarf(next.encoding);
308 // remove entries which have same encoding and personalityPointer as last one
309 if ( newNeedsDwarf || (next.encoding != last.encoding) || (next.personalityPointer != last.personalityPointer)
310 || (next.lsda != NULL) || (last.lsda != NULL) ) {
311 uniqueEntries.push_back(next);
312 }
313 last = next;
314 }
315 if (_s_log) fprintf(stderr, "compressDuplicates() entries.size()=%lu, uniqueEntries.size()=%lu\n",
316 entries.size(), uniqueEntries.size());
317}
318
319template <typename A>
320void UnwindInfoAtom<A>::makePersonalityIndexes(std::vector<UnwindEntry>& entries, std::map<const ld::Atom*, uint32_t>& personalityIndexMap)
321{
322 for(std::vector<UnwindEntry>::iterator it=entries.begin(); it != entries.end(); ++it) {
323 if ( it->personalityPointer != NULL ) {
324 std::map<const ld::Atom*, uint32_t>::iterator pos = personalityIndexMap.find(it->personalityPointer);
325 if ( pos == personalityIndexMap.end() ) {
326 const uint32_t nextIndex = personalityIndexMap.size() + 1;
327 personalityIndexMap[it->personalityPointer] = nextIndex;
328 }
329 uint32_t personalityIndex = personalityIndexMap[it->personalityPointer];
330 it->encoding |= (personalityIndex << (__builtin_ctz(UNWIND_PERSONALITY_MASK)) );
331 }
332 }
333 if (_s_log) fprintf(stderr, "makePersonalityIndexes() %lu personality routines used\n", personalityIndexMap.size());
334}
335
336
337template <typename A>
338void UnwindInfoAtom<A>::findCommonEncoding(const std::vector<UnwindEntry>& entries,
339 std::map<compact_unwind_encoding_t, unsigned int>& commonEncodings)
340{
341 // scan infos to get frequency counts for each encoding
342 std::map<compact_unwind_encoding_t, unsigned int> encodingsUsed;
343 unsigned int mostCommonEncodingUsageCount = 0;
344 for(std::vector<UnwindEntry>::const_iterator it=entries.begin(); it != entries.end(); ++it) {
345 // never put dwarf into common table
346 if ( encodingMeansUseDwarf(it->encoding) )
347 continue;
348 std::map<compact_unwind_encoding_t, unsigned int>::iterator pos = encodingsUsed.find(it->encoding);
349 if ( pos == encodingsUsed.end() ) {
350 encodingsUsed[it->encoding] = 1;
351 }
352 else {
353 encodingsUsed[it->encoding] += 1;
354 if ( mostCommonEncodingUsageCount < encodingsUsed[it->encoding] )
355 mostCommonEncodingUsageCount = encodingsUsed[it->encoding];
356 }
357 }
358 // put the most common encodings into the common table, but at most 127 of them
359 for(unsigned int usages=mostCommonEncodingUsageCount; usages > 1; --usages) {
360 for (std::map<compact_unwind_encoding_t, unsigned int>::iterator euit=encodingsUsed.begin(); euit != encodingsUsed.end(); ++euit) {
361 if ( euit->second == usages ) {
362 unsigned int sz = commonEncodings.size();
363 if ( sz < 127 ) {
364 commonEncodings[euit->first] = sz;
365 }
366 }
367 }
368 }
369 if (_s_log) fprintf(stderr, "findCommonEncoding() %lu common encodings found\n", commonEncodings.size());
370}
371
372
373template <typename A>
374void UnwindInfoAtom<A>::makeLsdaIndex(const std::vector<UnwindEntry>& entries, std::vector<LSDAEntry>& lsdaIndex, std::map<const ld::Atom*, uint32_t>& lsdaIndexOffsetMap)
375{
376 for(std::vector<UnwindEntry>::const_iterator it=entries.begin(); it != entries.end(); ++it) {
377 lsdaIndexOffsetMap[it->func] = lsdaIndex.size() * sizeof(unwind_info_section_header_lsda_index_entry);
378 if ( it->lsda != NULL ) {
379 LSDAEntry entry;
380 entry.func = it->func;
381 entry.lsda = it->lsda;
382 lsdaIndex.push_back(entry);
383 }
384 }
385 if (_s_log) fprintf(stderr, "makeLsdaIndex() %lu LSDAs found\n", lsdaIndex.size());
386}
387
388
389template <>
390void UnwindInfoAtom<x86>::addCompressedAddressOffsetFixup(uint32_t offset, const ld::Atom* func, const ld::Atom* fromFunc)
391{
392 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetAddress, func));
393 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindSubtractTargetAddress, fromFunc));
394 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndianLow24of32));
395}
396
397template <>
398void UnwindInfoAtom<x86_64>::addCompressedAddressOffsetFixup(uint32_t offset, const ld::Atom* func, const ld::Atom* fromFunc)
399{
400 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetAddress, func));
401 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindSubtractTargetAddress, fromFunc));
402 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndianLow24of32));
403}
404
405template <>
406void UnwindInfoAtom<x86>::addCompressedEncodingFixup(uint32_t offset, const ld::Atom* fde)
407{
408 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde));
409 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32));
410}
411
412template <>
413void UnwindInfoAtom<x86_64>::addCompressedEncodingFixup(uint32_t offset, const ld::Atom* fde)
414{
415 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde));
416 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32));
417}
418
419
420template <>
421void UnwindInfoAtom<x86>::addRegularAddressFixup(uint32_t offset, const ld::Atom* func)
422{
423 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, func));
424 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32));
425}
426
427template <>
428void UnwindInfoAtom<x86_64>::addRegularAddressFixup(uint32_t offset, const ld::Atom* func)
429{
430 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, func));
431 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32));
432}
433
434template <>
435void UnwindInfoAtom<x86>::addRegularFDEOffsetFixup(uint32_t offset, const ld::Atom* fde)
436{
437 _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde));
438 _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32));
439}
440
441template <>
442void UnwindInfoAtom<x86_64>::addRegularFDEOffsetFixup(uint32_t offset, const ld::Atom* fde)
443{
444 _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde));
445 _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32));
446}
447
448template <>
449void UnwindInfoAtom<x86>::addImageOffsetFixup(uint32_t offset, const ld::Atom* targ)
450{
451 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, targ));
452 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32));
453}
454
455template <>
456void UnwindInfoAtom<x86_64>::addImageOffsetFixup(uint32_t offset, const ld::Atom* targ)
457{
458 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, targ));
459 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32));
460}
461
462template <>
463void UnwindInfoAtom<x86>::addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend)
464{
465 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetImageOffset, targ));
466 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindAddAddend, addend));
467 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndian32));
468}
469
470template <>
471void UnwindInfoAtom<x86_64>::addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend)
472{
473 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetImageOffset, targ));
474 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindAddAddend, addend));
475 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndian32));
476}
477
478
479
480
481
482template <typename A>
483unsigned int UnwindInfoAtom<A>::makeRegularSecondLevelPage(const std::vector<UnwindEntry>& uniqueInfos, uint32_t pageSize,
484 unsigned int endIndex, uint8_t*& pageEnd)
485{
486 const unsigned int maxEntriesPerPage = (pageSize - sizeof(unwind_info_regular_second_level_page_header))/sizeof(unwind_info_regular_second_level_entry);
487 const unsigned int entriesToAdd = ((endIndex > maxEntriesPerPage) ? maxEntriesPerPage : endIndex);
488 uint8_t* pageStart = pageEnd
489 - entriesToAdd*sizeof(unwind_info_regular_second_level_entry)
490 - sizeof(unwind_info_regular_second_level_page_header);
491 macho_unwind_info_regular_second_level_page_header<P>* page = (macho_unwind_info_regular_second_level_page_header<P>*)pageStart;
492 page->set_kind(UNWIND_SECOND_LEVEL_REGULAR);
493 page->set_entryPageOffset(sizeof(macho_unwind_info_regular_second_level_page_header<P>));
494 page->set_entryCount(entriesToAdd);
495 macho_unwind_info_regular_second_level_entry<P>* entryTable = (macho_unwind_info_regular_second_level_entry<P>*)(pageStart + page->entryPageOffset());
496 for (unsigned int i=0; i < entriesToAdd; ++i) {
497 const UnwindEntry& info = uniqueInfos[endIndex-entriesToAdd+i];
498 entryTable[i].set_functionOffset(0);
499 entryTable[i].set_encoding(info.encoding);
500 // add fixup for address part of entry
501 uint32_t offset = (uint8_t*)(&entryTable[i]) - _pagesForDelete;
502 this->addRegularAddressFixup(offset, info.func);
503 if ( encodingMeansUseDwarf(info.encoding) ) {
504 // add fixup for dwarf offset part of page specific encoding
505 uint32_t encOffset = (uint8_t*)(&entryTable[i]) - _pagesForDelete;
506 this->addRegularFDEOffsetFixup(encOffset, info.fde);
507 }
508 }
509 if (_s_log) fprintf(stderr, "regular page with %u entries\n", entriesToAdd);
510 pageEnd = pageStart;
511 return endIndex - entriesToAdd;
512}
513
514
515template <typename A>
516unsigned int UnwindInfoAtom<A>::makeCompressedSecondLevelPage(const std::vector<UnwindEntry>& uniqueInfos,
517 const std::map<compact_unwind_encoding_t,unsigned int> commonEncodings,
518 uint32_t pageSize, unsigned int endIndex, uint8_t*& pageEnd)
519{
520 if (_s_log) fprintf(stderr, "makeCompressedSecondLevelPage(pageSize=%u, endIndex=%u)\n", pageSize, endIndex);
521 // first pass calculates how many compressed entries we could fit in this sized page
522 // keep adding entries to page until:
523 // 1) encoding table plus entry table plus header exceed page size
524 // 2) the file offset delta from the first to last function > 24 bits
525 // 3) custom encoding index reachs 255
526 // 4) run out of uniqueInfos to encode
527 std::map<compact_unwind_encoding_t, unsigned int> pageSpecificEncodings;
528 uint32_t space4 = (pageSize - sizeof(unwind_info_compressed_second_level_page_header))/sizeof(uint32_t);
529 std::vector<uint8_t> encodingIndexes;
530 int index = endIndex-1;
531 int entryCount = 0;
532 uint64_t lastEntryAddress = uniqueInfos[index].funcTentAddress;
533 bool canDo = true;
534 while ( canDo && (index >= 0) ) {
535 const UnwindEntry& info = uniqueInfos[index--];
536 // compute encoding index
537 unsigned int encodingIndex;
538 std::map<compact_unwind_encoding_t, unsigned int>::const_iterator pos = commonEncodings.find(info.encoding);
539 if ( pos != commonEncodings.end() ) {
540 encodingIndex = pos->second;
541 }
542 else {
543 // no commmon entry, so add one on this page
544 uint32_t encoding = info.encoding;
545 if ( encodingMeansUseDwarf(encoding) ) {
546 // make unique pseudo encoding so this dwarf will gets is own encoding entry slot
547 encoding += (index+1);
548 }
549 std::map<compact_unwind_encoding_t, unsigned int>::iterator ppos = pageSpecificEncodings.find(encoding);
550 if ( ppos != pageSpecificEncodings.end() ) {
551 encodingIndex = pos->second;
552 }
553 else {
554 encodingIndex = commonEncodings.size() + pageSpecificEncodings.size();
555 if ( encodingIndex <= 255 ) {
556 pageSpecificEncodings[encoding] = encodingIndex;
557 }
558 else {
559 canDo = false; // case 3)
560 if (_s_log) fprintf(stderr, "end of compressed page with %u entries, %lu custom encodings because too many custom encodings\n",
561 entryCount, pageSpecificEncodings.size());
562 }
563 }
564 }
565 if ( canDo )
566 encodingIndexes.push_back(encodingIndex);
567 // compute function offset
568 uint32_t funcOffsetWithInPage = lastEntryAddress - info.funcTentAddress;
569 if ( funcOffsetWithInPage > 0x00FFFF00 ) {
570 // don't use 0x00FFFFFF because addresses may vary after atoms are laid out again
571 canDo = false; // case 2)
572 if (_s_log) fprintf(stderr, "can't use compressed page with %u entries because function offset too big\n", entryCount);
573 }
574 else {
575 ++entryCount;
576 }
577 // check room for entry
578 if ( (pageSpecificEncodings.size()+entryCount) >= space4 ) {
579 canDo = false; // case 1)
580 --entryCount;
581 if (_s_log) fprintf(stderr, "end of compressed page with %u entries because full\n", entryCount);
582 }
583 //if (_s_log) fprintf(stderr, "space4=%d, pageSpecificEncodings.size()=%ld, entryCount=%d\n", space4, pageSpecificEncodings.size(), entryCount);
584 }
585
586 // check for cases where it would be better to use a regular (non-compressed) page
587 const unsigned int compressPageUsed = sizeof(unwind_info_compressed_second_level_page_header)
588 + pageSpecificEncodings.size()*sizeof(uint32_t)
589 + entryCount*sizeof(uint32_t);
590 if ( (compressPageUsed < (pageSize-4) && (index >= 0) ) ) {
591 const int regularEntriesPerPage = (pageSize - sizeof(unwind_info_regular_second_level_page_header))/sizeof(unwind_info_regular_second_level_entry);
592 if ( entryCount < regularEntriesPerPage ) {
593 return makeRegularSecondLevelPage(uniqueInfos, pageSize, endIndex, pageEnd);
594 }
595 }
596
597 // check if we need any padding because adding another entry would take 8 bytes but only have room for 4
598 uint32_t pad = 0;
599 if ( compressPageUsed == (pageSize-4) )
600 pad = 4;
601
602 // second pass fills in page
603 uint8_t* pageStart = pageEnd - compressPageUsed - pad;
604 CSLP* page = (CSLP*)pageStart;
605 page->set_kind(UNWIND_SECOND_LEVEL_COMPRESSED);
606 page->set_entryPageOffset(sizeof(CSLP));
607 page->set_entryCount(entryCount);
608 page->set_encodingsPageOffset(page->entryPageOffset()+entryCount*sizeof(uint32_t));
609 page->set_encodingsCount(pageSpecificEncodings.size());
610 uint32_t* const encodingsArray = (uint32_t*)&pageStart[page->encodingsPageOffset()];
611 // fill in entry table
612 uint32_t* const entiresArray = (uint32_t*)&pageStart[page->entryPageOffset()];
613 const ld::Atom* firstFunc = uniqueInfos[endIndex-entryCount].func;
614 for(unsigned int i=endIndex-entryCount; i < endIndex; ++i) {
615 const UnwindEntry& info = uniqueInfos[i];
616 uint8_t encodingIndex;
617 if ( encodingMeansUseDwarf(info.encoding) ) {
618 // dwarf entries are always in page specific encodings
619 encodingIndex = pageSpecificEncodings[info.encoding+i];
620 }
621 else {
622 std::map<uint32_t, unsigned int>::const_iterator pos = commonEncodings.find(info.encoding);
623 if ( pos != commonEncodings.end() )
624 encodingIndex = pos->second;
625 else
626 encodingIndex = pageSpecificEncodings[info.encoding];
627 }
628 uint32_t entryIndex = i - endIndex + entryCount;
629 E::set32(entiresArray[entryIndex], encodingIndex << 24);
630 // add fixup for address part of entry
631 uint32_t offset = (uint8_t*)(&entiresArray[entryIndex]) - _pagesForDelete;
632 this->addCompressedAddressOffsetFixup(offset, info.func, firstFunc);
633 if ( encodingMeansUseDwarf(info.encoding) ) {
634 // add fixup for dwarf offset part of page specific encoding
635 uint32_t encOffset = (uint8_t*)(&encodingsArray[encodingIndex-commonEncodings.size()]) - _pagesForDelete;
636 this->addCompressedEncodingFixup(encOffset, info.fde);
637 }
638 }
639 // fill in encodings table
640 for(std::map<uint32_t, unsigned int>::const_iterator it = pageSpecificEncodings.begin(); it != pageSpecificEncodings.end(); ++it) {
641 E::set32(encodingsArray[it->second-commonEncodings.size()], it->first);
642 }
643
644 if (_s_log) fprintf(stderr, "compressed page with %u entries, %lu custom encodings\n", entryCount, pageSpecificEncodings.size());
645
646 // update pageEnd;
647 pageEnd = pageStart;
648 return endIndex-entryCount; // endIndex for next page
649}
650
651
652
653
654
655
656static uint64_t calculateEHFrameSize(const ld::Internal& state)
657{
658 uint64_t size = 0;
659 for (std::vector<ld::Internal::FinalSection*>::const_iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) {
660 ld::Internal::FinalSection* sect = *sit;
661 if ( sect->type() == ld::Section::typeCFI ) {
662 for (std::vector<const ld::Atom*>::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
663 size += (*ait)->size();
664 }
665 }
666 }
667 return size;
668}
669
670static void getAllUnwindInfos(const ld::Internal& state, std::vector<UnwindEntry>& entries)
671{
672 uint64_t address = 0;
673 for (std::vector<ld::Internal::FinalSection*>::const_iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) {
674 ld::Internal::FinalSection* sect = *sit;
675 for (std::vector<const ld::Atom*>::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
676 const ld::Atom* atom = *ait;
677 // adjust address for atom alignment
678 uint64_t alignment = 1 << atom->alignment().powerOf2;
679 uint64_t currentModulus = (address % alignment);
680 uint64_t requiredModulus = atom->alignment().modulus;
681 if ( currentModulus != requiredModulus ) {
682 if ( requiredModulus > currentModulus )
683 address += requiredModulus-currentModulus;
684 else
685 address += requiredModulus+alignment-currentModulus;
686 }
687
688 if ( atom->beginUnwind() == atom->endUnwind() ) {
689 // be sure to mark that we have no unwind info for stuff in the TEXT segment without unwind info
690 if ( atom->section().type() == ld::Section::typeCode ) {
691 entries.push_back(UnwindEntry(atom, address, 0, NULL, NULL, NULL, 0));
692 }
693 }
694 else {
695 // atom has unwind info(s), add entry for each
696 const ld::Atom* fde = NULL;
697 const ld::Atom* lsda = NULL;
698 const ld::Atom* personalityPointer = NULL;
699 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
700 switch ( fit->kind ) {
701 case ld::Fixup::kindNoneGroupSubordinateFDE:
702 assert(fit->binding == ld::Fixup::bindingDirectlyBound);
703 fde = fit->u.target;
704 break;
705 case ld::Fixup::kindNoneGroupSubordinateLSDA:
706 assert(fit->binding == ld::Fixup::bindingDirectlyBound);
707 lsda = fit->u.target;
708 break;
afe874b1
A
709 case ld::Fixup::kindNoneGroupSubordinatePersonality:
710 assert(fit->binding == ld::Fixup::bindingDirectlyBound);
711 personalityPointer = fit->u.target;
712 assert(personalityPointer->section().type() == ld::Section::typeNonLazyPointer);
713 break;
a645023d
A
714 default:
715 break;
716 }
717 }
718 if ( fde != NULL ) {
719 // find CIE for this FDE
720 const ld::Atom* cie = NULL;
721 for (ld::Fixup::iterator fit = fde->fixupsBegin(), end=fde->fixupsEnd(); fit != end; ++fit) {
722 if ( fit->kind != ld::Fixup::kindSubtractTargetAddress )
723 continue;
724 if ( fit->binding != ld::Fixup::bindingDirectlyBound )
725 continue;
726 cie = fit->u.target;
727 // CIE is only direct subtracted target in FDE
728 assert(cie->section().type() == ld::Section::typeCFI);
729 break;
730 }
731 if ( cie != NULL ) {
732 // if CIE can have just one fixup - to the personality pointer
733 for (ld::Fixup::iterator fit = cie->fixupsBegin(), end=cie->fixupsEnd(); fit != end; ++fit) {
734 if ( fit->kind == ld::Fixup::kindSetTargetAddress ) {
735 switch ( fit->binding ) {
736 case ld::Fixup::bindingsIndirectlyBound:
737 personalityPointer = state.indirectBindingTable[fit->u.bindingIndex];
738 assert(personalityPointer->section().type() == ld::Section::typeNonLazyPointer);
739 break;
740 case ld::Fixup::bindingDirectlyBound:
741 personalityPointer = fit->u.target;
742 assert(personalityPointer->section().type() == ld::Section::typeNonLazyPointer);
743 break;
744 default:
745 break;
746 }
747 }
748 }
749 }
750 }
751 for ( ld::Atom::UnwindInfo::iterator uit = atom->beginUnwind(); uit != atom->endUnwind(); ++uit ) {
752 entries.push_back(UnwindEntry(atom, address, uit->startOffset, fde, lsda, personalityPointer, uit->unwindInfo));
753 }
754 }
755 address += atom->size();
756 }
757 }
758}
759
760
afe874b1 761static void makeFinalLinkedImageCompactUnwindSection(const Options& opts, ld::Internal& state)
a645023d 762{
a645023d
A
763 // walk every atom and gets its unwind info
764 std::vector<UnwindEntry> entries;
765 entries.reserve(64);
766 getAllUnwindInfos(state, entries);
767
768 // don't generate an __unwind_info section if there is no code in this linkage unit
769 if ( entries.size() == 0 )
770 return;
771
772 // calculate size of __eh_frame section, so __unwind_info can go before it and page align
773 uint64_t ehFrameSize = calculateEHFrameSize(state);
774
775 // create atom that contains the whole compact unwind table
776 switch ( opts.architecture() ) {
777 case CPU_TYPE_X86_64:
778 state.addAtom(*new UnwindInfoAtom<x86_64>(entries, ehFrameSize));
779 break;
780 case CPU_TYPE_I386:
781 state.addAtom(*new UnwindInfoAtom<x86>(entries, ehFrameSize));
782 break;
783 default:
784 assert(0 && "no compact unwind for arch");
785 }
786}
787
788
afe874b1
A
789
790template <typename A>
791class CompactUnwindAtom : public ld::Atom {
792public:
793 CompactUnwindAtom(ld::Internal& state,const ld::Atom* funcAtom,
794 uint32_t startOffset, uint32_t len, uint32_t cui);
795 ~CompactUnwindAtom() {}
796
797 virtual const ld::File* file() const { return NULL; }
798 virtual bool translationUnitSource(const char** dir, const char**) const
799 { return false; }
800 virtual const char* name() const { return "compact unwind info"; }
801 virtual uint64_t size() const { return sizeof(macho_compact_unwind_entry<P>); }
802 virtual uint64_t objectAddress() const { return 0; }
803 virtual void copyRawContent(uint8_t buffer[]) const;
804 virtual void setScope(Scope) { }
805 virtual ld::Fixup::iterator fixupsBegin() const { return (ld::Fixup*)&_fixups[0]; }
806 virtual ld::Fixup::iterator fixupsEnd() const { return (ld::Fixup*)&_fixups[_fixups.size()]; }
807
808private:
809 typedef typename A::P P;
810 typedef typename A::P::E E;
811 typedef typename A::P::uint_t pint_t;
812
813
814 const ld::Atom* _atom;
815 const uint32_t _startOffset;
816 const uint32_t _len;
817 const uint32_t _compactUnwindInfo;
818 std::vector<ld::Fixup> _fixups;
819
820 static ld::Fixup::Kind _s_pointerKind;
821 static ld::Fixup::Kind _s_pointerStoreKind;
822 static ld::Section _s_section;
823};
824
825
826template <typename A>
827ld::Section CompactUnwindAtom<A>::_s_section("__LD", "__compact_unwind", ld::Section::typeDebug);
828
829template <> ld::Fixup::Kind CompactUnwindAtom<x86>::_s_pointerKind = ld::Fixup::kindStoreLittleEndian32;
830template <> ld::Fixup::Kind CompactUnwindAtom<x86>::_s_pointerStoreKind = ld::Fixup::kindStoreTargetAddressLittleEndian32;
831template <> ld::Fixup::Kind CompactUnwindAtom<x86_64>::_s_pointerKind = ld::Fixup::kindStoreLittleEndian64;
832template <> ld::Fixup::Kind CompactUnwindAtom<x86_64>::_s_pointerStoreKind = ld::Fixup::kindStoreTargetAddressLittleEndian64;
833
834template <typename A>
835CompactUnwindAtom<A>::CompactUnwindAtom(ld::Internal& state,const ld::Atom* funcAtom, uint32_t startOffset,
836 uint32_t len, uint32_t cui)
837 : ld::Atom(_s_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
838 ld::Atom::scopeTranslationUnit, ld::Atom::typeUnclassified,
839 symbolTableNotIn, false, false, false, ld::Atom::Alignment(0)),
840 _atom(funcAtom), _startOffset(startOffset), _len(len), _compactUnwindInfo(cui)
841{
842 _fixups.push_back(ld::Fixup(macho_compact_unwind_entry<P>::codeStartFieldOffset(), ld::Fixup::k1of3, ld::Fixup::kindSetTargetAddress, funcAtom));
843 _fixups.push_back(ld::Fixup(macho_compact_unwind_entry<P>::codeStartFieldOffset(), ld::Fixup::k2of3, ld::Fixup::kindAddAddend, _startOffset));
844 _fixups.push_back(ld::Fixup(macho_compact_unwind_entry<P>::codeStartFieldOffset(), ld::Fixup::k3of3, _s_pointerKind));
845 // see if atom has subordinate personality function or lsda
846 for (ld::Fixup::iterator fit = funcAtom->fixupsBegin(), end=funcAtom->fixupsEnd(); fit != end; ++fit) {
847 switch ( fit->kind ) {
848 case ld::Fixup::kindNoneGroupSubordinatePersonality:
849 assert(fit->binding == ld::Fixup::bindingsIndirectlyBound);
850 _fixups.push_back(ld::Fixup(macho_compact_unwind_entry<P>::personalityFieldOffset(), ld::Fixup::k1of1, _s_pointerStoreKind, state.indirectBindingTable[fit->u.bindingIndex]));
851 break;
852 case ld::Fixup::kindNoneGroupSubordinateLSDA:
853 assert(fit->binding == ld::Fixup::bindingDirectlyBound);
854 _fixups.push_back(ld::Fixup(macho_compact_unwind_entry<P>::lsdaFieldOffset(), ld::Fixup::k1of1, _s_pointerStoreKind, fit->u.target));
855 break;
856 default:
857 break;
858 }
859 }
860
861}
862
863template <typename A>
864void CompactUnwindAtom<A>::copyRawContent(uint8_t buffer[]) const
865{
866 macho_compact_unwind_entry<P>* buf = (macho_compact_unwind_entry<P>*)buffer;
867 buf->set_codeStart(0);
868 buf->set_codeLen(_len);
869 buf->set_compactUnwindInfo(_compactUnwindInfo);
870 buf->set_personality(0);
871 buf->set_lsda(0);
872}
873
874
875static void makeCompactUnwindAtom(const Options& opts, ld::Internal& state, const ld::Atom* atom,
876 uint32_t startOffset, uint32_t endOffset, uint32_t cui)
877{
878 switch ( opts.architecture() ) {
879 case CPU_TYPE_X86_64:
880 state.addAtom(*new CompactUnwindAtom<x86_64>(state, atom, startOffset, endOffset-startOffset, cui));
881 break;
882 case CPU_TYPE_I386:
883 state.addAtom(*new CompactUnwindAtom<x86>(state, atom, startOffset, endOffset-startOffset, cui));
884 break;
885 }
886}
887
888static void makeRelocateableCompactUnwindSection(const Options& opts, ld::Internal& state)
889{
890 // can't add CompactUnwindAtom atoms will iterating, so pre-scan
891 std::vector<const ld::Atom*> atomsWithUnwind;
892 for (std::vector<ld::Internal::FinalSection*>::const_iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) {
893 ld::Internal::FinalSection* sect = *sit;
894 for (std::vector<const ld::Atom*>::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
895 const ld::Atom* atom = *ait;
896 if ( atom->beginUnwind() != atom->endUnwind() )
897 atomsWithUnwind.push_back(atom);
898 }
899 }
900 // make one CompactUnwindAtom for each compact unwind range in each atom
901 for (std::vector<const ld::Atom*>::iterator it = atomsWithUnwind.begin(); it != atomsWithUnwind.end(); ++it) {
902 const ld::Atom* atom = *it;
903 uint32_t lastOffset = 0;
904 uint32_t lastCUE = 0;
905 bool first = true;
906 for (ld::Atom::UnwindInfo::iterator uit=atom->beginUnwind(); uit != atom->endUnwind(); ++uit) {
907 if ( !first ) {
908 makeCompactUnwindAtom(opts, state, atom, lastOffset, uit->startOffset, lastCUE);
909 }
910 lastOffset = uit->startOffset;
911 lastCUE = uit->unwindInfo;
912 first = false;
913 }
914 makeCompactUnwindAtom(opts, state, atom, lastOffset, (uint32_t)atom->size(), lastCUE);
915 }
916}
917
918
919void doPass(const Options& opts, ld::Internal& state)
920{
921 if ( opts.outputKind() == Options::kObjectFile )
922 makeRelocateableCompactUnwindSection(opts, state);
923
924 else if ( opts.needsUnwindInfoSection() )
925 makeFinalLinkedImageCompactUnwindSection(opts, state);
926}
927
928
a645023d
A
929} // namespace compact_unwind
930} // namespace passes
931} // namespace ld