]> git.saurik.com Git - apple/ld64.git/blob - src/ld/passes/compact_unwind.cpp
f86aee003662208d95a7c411ac4bb7bccabcb690
[apple/ld64.git] / src / ld / passes / compact_unwind.cpp
1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
2 *
3 * Copyright (c) 2009 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26 #include <stdint.h>
27 #include <math.h>
28 #include <unistd.h>
29 #include <dlfcn.h>
30 #include <mach/machine.h>
31 #include <mach-o/compact_unwind_encoding.h>
32
33 #include <vector>
34 #include <map>
35
36 #include "ld.hpp"
37 #include "compact_unwind.h"
38 #include "Architectures.hpp"
39 #include "MachOFileAbstraction.hpp"
40
41
42 namespace ld {
43 namespace passes {
44 namespace compact_unwind {
45
46
47 struct UnwindEntry {
48 UnwindEntry(const ld::Atom* f, uint64_t a, uint32_t o, const ld::Atom* d,
49 const ld::Atom* l, const ld::Atom* p, uint32_t en)
50 : func(f), fde(d), lsda(l), personalityPointer(p), funcTentAddress(a),
51 functionOffset(o), encoding(en) { }
52 const ld::Atom* func;
53 const ld::Atom* fde;
54 const ld::Atom* lsda;
55 const ld::Atom* personalityPointer;
56 uint64_t funcTentAddress;
57 uint32_t functionOffset;
58 compact_unwind_encoding_t encoding;
59 };
60
61 struct LSDAEntry {
62 const ld::Atom* func;
63 const ld::Atom* lsda;
64 };
65
66
67 template <typename A>
68 class UnwindInfoAtom : public ld::Atom {
69 public:
70 UnwindInfoAtom(const std::vector<UnwindEntry>& entries,uint64_t ehFrameSize);
71 ~UnwindInfoAtom();
72
73 virtual const ld::File* file() const { return NULL; }
74 virtual const char* name() const { return "compact unwind info"; }
75 virtual uint64_t size() const { return _headerSize+_pagesSize; }
76 virtual uint64_t objectAddress() const { return 0; }
77 virtual void copyRawContent(uint8_t buffer[]) const;
78 virtual void setScope(Scope) { }
79 virtual ld::Fixup::iterator fixupsBegin() const { return (ld::Fixup*)&_fixups[0]; }
80 virtual ld::Fixup::iterator fixupsEnd() const { return (ld::Fixup*)&_fixups[_fixups.size()]; }
81
82 private:
83 typedef typename A::P P;
84 typedef typename A::P::E E;
85 typedef typename A::P::uint_t pint_t;
86
87 typedef macho_unwind_info_compressed_second_level_page_header<P> CSLP;
88
89 bool encodingMeansUseDwarf(compact_unwind_encoding_t enc);
90 void compressDuplicates(const std::vector<UnwindEntry>& entries,
91 std::vector<UnwindEntry>& uniqueEntries);
92 void makePersonalityIndexes(std::vector<UnwindEntry>& entries,
93 std::map<const ld::Atom*, uint32_t>& personalityIndexMap);
94 void findCommonEncoding(const std::vector<UnwindEntry>& entries,
95 std::map<compact_unwind_encoding_t, unsigned int>& commonEncodings);
96 void makeLsdaIndex(const std::vector<UnwindEntry>& entries, std::vector<LSDAEntry>& lsdaIndex,
97 std::map<const ld::Atom*, uint32_t>& lsdaIndexOffsetMap);
98 unsigned int makeCompressedSecondLevelPage(const std::vector<UnwindEntry>& uniqueInfos,
99 const std::map<compact_unwind_encoding_t,unsigned int> commonEncodings,
100 uint32_t pageSize, unsigned int endIndex, uint8_t*& pageEnd);
101 unsigned int makeRegularSecondLevelPage(const std::vector<UnwindEntry>& uniqueInfos, uint32_t pageSize,
102 unsigned int endIndex, uint8_t*& pageEnd);
103 void addCompressedAddressOffsetFixup(uint32_t offset, const ld::Atom* func, const ld::Atom* fromFunc);
104 void addCompressedEncodingFixup(uint32_t offset, const ld::Atom* fde);
105 void addRegularAddressFixup(uint32_t offset, const ld::Atom* func);
106 void addRegularFDEOffsetFixup(uint32_t offset, const ld::Atom* fde);
107 void addImageOffsetFixup(uint32_t offset, const ld::Atom* targ);
108 void addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend);
109
110 uint8_t* _pagesForDelete;
111 uint8_t* _pages;
112 uint64_t _pagesSize;
113 uint8_t* _header;
114 uint64_t _headerSize;
115 std::vector<ld::Fixup> _fixups;
116
117 static bool _s_log;
118 static ld::Section _s_section;
119 };
120
121 template <typename A>
122 bool UnwindInfoAtom<A>::_s_log = false;
123
124 template <typename A>
125 ld::Section UnwindInfoAtom<A>::_s_section("__TEXT", "__unwind_info", ld::Section::typeUnwindInfo);
126
127
128 template <typename A>
129 UnwindInfoAtom<A>::UnwindInfoAtom(const std::vector<UnwindEntry>& entries, uint64_t ehFrameSize)
130 : ld::Atom(_s_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
131 ld::Atom::scopeLinkageUnit, ld::Atom::typeUnclassified,
132 symbolTableNotIn, false, false, false, ld::Atom::Alignment(0)),
133 _pagesForDelete(NULL), _pages(NULL), _pagesSize(0), _header(NULL), _headerSize(0)
134 {
135 // build new compressed list by removing entries where next function has same encoding
136 std::vector<UnwindEntry> uniqueEntries;
137 compressDuplicates(entries, uniqueEntries);
138
139 // reserve room so _fixups vector is not reallocated a bunch of times
140 _fixups.reserve(uniqueEntries.size()*3);
141
142 // build personality index, update encodings with personality index
143 std::map<const ld::Atom*, uint32_t> personalityIndexMap;
144 makePersonalityIndexes(uniqueEntries, personalityIndexMap);
145 if ( personalityIndexMap.size() > 3 ) {
146 warning("too many personality routines for compact unwind to encode");
147 return;
148 }
149
150 // put the most common encodings into the common table, but at most 127 of them
151 std::map<compact_unwind_encoding_t, unsigned int> commonEncodings;
152 findCommonEncoding(uniqueEntries, commonEncodings);
153
154 // build lsda index
155 std::map<const ld::Atom*, uint32_t> lsdaIndexOffsetMap;
156 std::vector<LSDAEntry> lsdaIndex;
157 makeLsdaIndex(uniqueEntries, lsdaIndex, lsdaIndexOffsetMap);
158
159
160 // calculate worst case size for all unwind info pages when allocating buffer
161 const unsigned int entriesPerRegularPage = (4096-sizeof(unwind_info_regular_second_level_page_header))/sizeof(unwind_info_regular_second_level_entry);
162 assert(uniqueEntries.size() > 0);
163 const unsigned int pageCount = ((uniqueEntries.size() - 1)/entriesPerRegularPage) + 1;
164 _pagesForDelete = (uint8_t*)calloc(pageCount,4096);
165 if ( _pagesForDelete == NULL ) {
166 warning("could not allocate space for compact unwind info");
167 return;
168 }
169
170 // make last second level page smaller so that all other second level pages can be page aligned
171 uint32_t maxLastPageSize = 4096 - (ehFrameSize % 4096);
172 uint32_t tailPad = 0;
173 if ( maxLastPageSize < 128 ) {
174 tailPad = maxLastPageSize;
175 maxLastPageSize = 4096;
176 }
177
178 // fill in pages in reverse order
179 const ld::Atom* secondLevelFirstFuncs[pageCount*3];
180 uint8_t* secondLevelPagesStarts[pageCount*3];
181 unsigned int endIndex = uniqueEntries.size();
182 unsigned int secondLevelPageCount = 0;
183 uint8_t* pageEnd = &_pagesForDelete[pageCount*4096];
184 uint32_t pageSize = maxLastPageSize;
185 while ( endIndex > 0 ) {
186 endIndex = makeCompressedSecondLevelPage(uniqueEntries, commonEncodings, pageSize, endIndex, pageEnd);
187 secondLevelPagesStarts[secondLevelPageCount] = pageEnd;
188 secondLevelFirstFuncs[secondLevelPageCount] = uniqueEntries[endIndex].func;
189 ++secondLevelPageCount;
190 pageSize = 4096; // last page can be odd size, make rest up to 4096 bytes in size
191 }
192 _pages = pageEnd;
193 _pagesSize = &_pagesForDelete[pageCount*4096] - pageEnd;
194
195
196 // calculate section layout
197 const uint32_t commonEncodingsArraySectionOffset = sizeof(macho_unwind_info_section_header<P>);
198 const uint32_t commonEncodingsArrayCount = commonEncodings.size();
199 const uint32_t commonEncodingsArraySize = commonEncodingsArrayCount * sizeof(compact_unwind_encoding_t);
200 const uint32_t personalityArraySectionOffset = commonEncodingsArraySectionOffset + commonEncodingsArraySize;
201 const uint32_t personalityArrayCount = personalityIndexMap.size();
202 const uint32_t personalityArraySize = personalityArrayCount * sizeof(uint32_t);
203 const uint32_t indexSectionOffset = personalityArraySectionOffset + personalityArraySize;
204 const uint32_t indexCount = secondLevelPageCount+1;
205 const uint32_t indexSize = indexCount * sizeof(macho_unwind_info_section_header_index_entry<P>);
206 const uint32_t lsdaIndexArraySectionOffset = indexSectionOffset + indexSize;
207 const uint32_t lsdaIndexArrayCount = lsdaIndex.size();
208 const uint32_t lsdaIndexArraySize = lsdaIndexArrayCount * sizeof(macho_unwind_info_section_header_lsda_index_entry<P>);
209 const uint32_t headerEndSectionOffset = lsdaIndexArraySectionOffset + lsdaIndexArraySize;
210
211 // now that we know the size of the header, slide all existing fixups on the pages
212 const int32_t fixupSlide = headerEndSectionOffset + (_pagesForDelete - _pages);
213 for(std::vector<ld::Fixup>::iterator it = _fixups.begin(); it != _fixups.end(); ++it) {
214 it->offsetInAtom += fixupSlide;
215 }
216
217 // allocate and fill in section header
218 _headerSize = headerEndSectionOffset;
219 _header = new uint8_t[_headerSize];
220 bzero(_header, _headerSize);
221 macho_unwind_info_section_header<P>* sectionHeader = (macho_unwind_info_section_header<P>*)_header;
222 sectionHeader->set_version(UNWIND_SECTION_VERSION);
223 sectionHeader->set_commonEncodingsArraySectionOffset(commonEncodingsArraySectionOffset);
224 sectionHeader->set_commonEncodingsArrayCount(commonEncodingsArrayCount);
225 sectionHeader->set_personalityArraySectionOffset(personalityArraySectionOffset);
226 sectionHeader->set_personalityArrayCount(personalityArrayCount);
227 sectionHeader->set_indexSectionOffset(indexSectionOffset);
228 sectionHeader->set_indexCount(indexCount);
229
230 // copy common encodings
231 uint32_t* commonEncodingsTable = (uint32_t*)&_header[commonEncodingsArraySectionOffset];
232 for (std::map<uint32_t, unsigned int>::iterator it=commonEncodings.begin(); it != commonEncodings.end(); ++it)
233 E::set32(commonEncodingsTable[it->second], it->first);
234
235 // make references for personality entries
236 uint32_t* personalityArray = (uint32_t*)&_header[sectionHeader->personalityArraySectionOffset()];
237 for (std::map<const ld::Atom*, unsigned int>::iterator it=personalityIndexMap.begin(); it != personalityIndexMap.end(); ++it) {
238 uint32_t offset = (uint8_t*)&personalityArray[it->second-1] - _header;
239 this->addImageOffsetFixup(offset, it->first);
240 }
241
242 // build first level index and references
243 macho_unwind_info_section_header_index_entry<P>* indexTable = (macho_unwind_info_section_header_index_entry<P>*)&_header[indexSectionOffset];
244 uint32_t refOffset;
245 for (unsigned int i=0; i < secondLevelPageCount; ++i) {
246 unsigned int reverseIndex = secondLevelPageCount - 1 - i;
247 indexTable[i].set_functionOffset(0);
248 indexTable[i].set_secondLevelPagesSectionOffset(secondLevelPagesStarts[reverseIndex]-_pages+headerEndSectionOffset);
249 indexTable[i].set_lsdaIndexArraySectionOffset(lsdaIndexOffsetMap[secondLevelFirstFuncs[reverseIndex]]+lsdaIndexArraySectionOffset);
250 refOffset = (uint8_t*)&indexTable[i] - _header;
251 this->addImageOffsetFixup(refOffset, secondLevelFirstFuncs[reverseIndex]);
252 }
253 indexTable[secondLevelPageCount].set_functionOffset(0);
254 indexTable[secondLevelPageCount].set_secondLevelPagesSectionOffset(0);
255 indexTable[secondLevelPageCount].set_lsdaIndexArraySectionOffset(lsdaIndexArraySectionOffset+lsdaIndexArraySize);
256 refOffset = (uint8_t*)&indexTable[secondLevelPageCount] - _header;
257 this->addImageOffsetFixupPlusAddend(refOffset, entries.back().func, entries.back().func->size()+1);
258
259 // build lsda references
260 uint32_t lsdaEntrySectionOffset = lsdaIndexArraySectionOffset;
261 for (std::vector<LSDAEntry>::iterator it = lsdaIndex.begin(); it != lsdaIndex.end(); ++it) {
262 this->addImageOffsetFixup(lsdaEntrySectionOffset, it->func);
263 this->addImageOffsetFixup(lsdaEntrySectionOffset+4, it->lsda);
264 lsdaEntrySectionOffset += sizeof(unwind_info_section_header_lsda_index_entry);
265 }
266
267 }
268
269 template <typename A>
270 UnwindInfoAtom<A>::~UnwindInfoAtom()
271 {
272 free(_pagesForDelete);
273 free(_header);
274 }
275
276 template <typename A>
277 void UnwindInfoAtom<A>::copyRawContent(uint8_t buffer[]) const
278 {
279 // content is in two parts
280 memcpy(buffer, _header, _headerSize);
281 memcpy(&buffer[_headerSize], _pages, _pagesSize);
282 }
283
284
285 template <>
286 bool UnwindInfoAtom<x86>::encodingMeansUseDwarf(compact_unwind_encoding_t enc)
287 {
288 return ((enc & UNWIND_X86_MODE_MASK) == UNWIND_X86_MODE_DWARF);
289 }
290
291 template <>
292 bool UnwindInfoAtom<x86_64>::encodingMeansUseDwarf(compact_unwind_encoding_t enc)
293 {
294 return ((enc & UNWIND_X86_64_MODE_MASK) == UNWIND_X86_64_MODE_DWARF);
295 }
296
297 template <typename A>
298 void UnwindInfoAtom<A>::compressDuplicates(const std::vector<UnwindEntry>& entries, std::vector<UnwindEntry>& uniqueEntries)
299 {
300 // build new list removing entries where next function has same encoding
301 uniqueEntries.reserve(entries.size());
302 UnwindEntry last(NULL, 0, 0, NULL, NULL, NULL, 0xFFFFFFFF);
303 for(std::vector<UnwindEntry>::const_iterator it=entries.begin(); it != entries.end(); ++it) {
304 const UnwindEntry& next = *it;
305 bool newNeedsDwarf = encodingMeansUseDwarf(next.encoding);
306 // remove entries which have same encoding and personalityPointer as last one
307 if ( newNeedsDwarf || (next.encoding != last.encoding) || (next.personalityPointer != last.personalityPointer)
308 || (next.lsda != NULL) || (last.lsda != NULL) ) {
309 uniqueEntries.push_back(next);
310 }
311 last = next;
312 }
313 if (_s_log) fprintf(stderr, "compressDuplicates() entries.size()=%lu, uniqueEntries.size()=%lu\n",
314 entries.size(), uniqueEntries.size());
315 }
316
317 template <typename A>
318 void UnwindInfoAtom<A>::makePersonalityIndexes(std::vector<UnwindEntry>& entries, std::map<const ld::Atom*, uint32_t>& personalityIndexMap)
319 {
320 for(std::vector<UnwindEntry>::iterator it=entries.begin(); it != entries.end(); ++it) {
321 if ( it->personalityPointer != NULL ) {
322 std::map<const ld::Atom*, uint32_t>::iterator pos = personalityIndexMap.find(it->personalityPointer);
323 if ( pos == personalityIndexMap.end() ) {
324 const uint32_t nextIndex = personalityIndexMap.size() + 1;
325 personalityIndexMap[it->personalityPointer] = nextIndex;
326 }
327 uint32_t personalityIndex = personalityIndexMap[it->personalityPointer];
328 it->encoding |= (personalityIndex << (__builtin_ctz(UNWIND_PERSONALITY_MASK)) );
329 }
330 }
331 if (_s_log) fprintf(stderr, "makePersonalityIndexes() %lu personality routines used\n", personalityIndexMap.size());
332 }
333
334
335 template <typename A>
336 void UnwindInfoAtom<A>::findCommonEncoding(const std::vector<UnwindEntry>& entries,
337 std::map<compact_unwind_encoding_t, unsigned int>& commonEncodings)
338 {
339 // scan infos to get frequency counts for each encoding
340 std::map<compact_unwind_encoding_t, unsigned int> encodingsUsed;
341 unsigned int mostCommonEncodingUsageCount = 0;
342 for(std::vector<UnwindEntry>::const_iterator it=entries.begin(); it != entries.end(); ++it) {
343 // never put dwarf into common table
344 if ( encodingMeansUseDwarf(it->encoding) )
345 continue;
346 std::map<compact_unwind_encoding_t, unsigned int>::iterator pos = encodingsUsed.find(it->encoding);
347 if ( pos == encodingsUsed.end() ) {
348 encodingsUsed[it->encoding] = 1;
349 }
350 else {
351 encodingsUsed[it->encoding] += 1;
352 if ( mostCommonEncodingUsageCount < encodingsUsed[it->encoding] )
353 mostCommonEncodingUsageCount = encodingsUsed[it->encoding];
354 }
355 }
356 // put the most common encodings into the common table, but at most 127 of them
357 for(unsigned int usages=mostCommonEncodingUsageCount; usages > 1; --usages) {
358 for (std::map<compact_unwind_encoding_t, unsigned int>::iterator euit=encodingsUsed.begin(); euit != encodingsUsed.end(); ++euit) {
359 if ( euit->second == usages ) {
360 unsigned int sz = commonEncodings.size();
361 if ( sz < 127 ) {
362 commonEncodings[euit->first] = sz;
363 }
364 }
365 }
366 }
367 if (_s_log) fprintf(stderr, "findCommonEncoding() %lu common encodings found\n", commonEncodings.size());
368 }
369
370
371 template <typename A>
372 void UnwindInfoAtom<A>::makeLsdaIndex(const std::vector<UnwindEntry>& entries, std::vector<LSDAEntry>& lsdaIndex, std::map<const ld::Atom*, uint32_t>& lsdaIndexOffsetMap)
373 {
374 for(std::vector<UnwindEntry>::const_iterator it=entries.begin(); it != entries.end(); ++it) {
375 lsdaIndexOffsetMap[it->func] = lsdaIndex.size() * sizeof(unwind_info_section_header_lsda_index_entry);
376 if ( it->lsda != NULL ) {
377 LSDAEntry entry;
378 entry.func = it->func;
379 entry.lsda = it->lsda;
380 lsdaIndex.push_back(entry);
381 }
382 }
383 if (_s_log) fprintf(stderr, "makeLsdaIndex() %lu LSDAs found\n", lsdaIndex.size());
384 }
385
386
387 template <>
388 void UnwindInfoAtom<x86>::addCompressedAddressOffsetFixup(uint32_t offset, const ld::Atom* func, const ld::Atom* fromFunc)
389 {
390 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetAddress, func));
391 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindSubtractTargetAddress, fromFunc));
392 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndianLow24of32));
393 }
394
395 template <>
396 void UnwindInfoAtom<x86_64>::addCompressedAddressOffsetFixup(uint32_t offset, const ld::Atom* func, const ld::Atom* fromFunc)
397 {
398 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetAddress, func));
399 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindSubtractTargetAddress, fromFunc));
400 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndianLow24of32));
401 }
402
403 template <>
404 void UnwindInfoAtom<x86>::addCompressedEncodingFixup(uint32_t offset, const ld::Atom* fde)
405 {
406 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde));
407 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32));
408 }
409
410 template <>
411 void UnwindInfoAtom<x86_64>::addCompressedEncodingFixup(uint32_t offset, const ld::Atom* fde)
412 {
413 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde));
414 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32));
415 }
416
417
418 template <>
419 void UnwindInfoAtom<x86>::addRegularAddressFixup(uint32_t offset, const ld::Atom* func)
420 {
421 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, func));
422 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32));
423 }
424
425 template <>
426 void UnwindInfoAtom<x86_64>::addRegularAddressFixup(uint32_t offset, const ld::Atom* func)
427 {
428 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, func));
429 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32));
430 }
431
432 template <>
433 void UnwindInfoAtom<x86>::addRegularFDEOffsetFixup(uint32_t offset, const ld::Atom* fde)
434 {
435 _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde));
436 _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32));
437 }
438
439 template <>
440 void UnwindInfoAtom<x86_64>::addRegularFDEOffsetFixup(uint32_t offset, const ld::Atom* fde)
441 {
442 _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde));
443 _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32));
444 }
445
446 template <>
447 void UnwindInfoAtom<x86>::addImageOffsetFixup(uint32_t offset, const ld::Atom* targ)
448 {
449 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, targ));
450 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32));
451 }
452
453 template <>
454 void UnwindInfoAtom<x86_64>::addImageOffsetFixup(uint32_t offset, const ld::Atom* targ)
455 {
456 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, targ));
457 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32));
458 }
459
460 template <>
461 void UnwindInfoAtom<x86>::addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend)
462 {
463 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetImageOffset, targ));
464 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindAddAddend, addend));
465 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndian32));
466 }
467
468 template <>
469 void UnwindInfoAtom<x86_64>::addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend)
470 {
471 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetImageOffset, targ));
472 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindAddAddend, addend));
473 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndian32));
474 }
475
476
477
478
479
480 template <typename A>
481 unsigned int UnwindInfoAtom<A>::makeRegularSecondLevelPage(const std::vector<UnwindEntry>& uniqueInfos, uint32_t pageSize,
482 unsigned int endIndex, uint8_t*& pageEnd)
483 {
484 const unsigned int maxEntriesPerPage = (pageSize - sizeof(unwind_info_regular_second_level_page_header))/sizeof(unwind_info_regular_second_level_entry);
485 const unsigned int entriesToAdd = ((endIndex > maxEntriesPerPage) ? maxEntriesPerPage : endIndex);
486 uint8_t* pageStart = pageEnd
487 - entriesToAdd*sizeof(unwind_info_regular_second_level_entry)
488 - sizeof(unwind_info_regular_second_level_page_header);
489 macho_unwind_info_regular_second_level_page_header<P>* page = (macho_unwind_info_regular_second_level_page_header<P>*)pageStart;
490 page->set_kind(UNWIND_SECOND_LEVEL_REGULAR);
491 page->set_entryPageOffset(sizeof(macho_unwind_info_regular_second_level_page_header<P>));
492 page->set_entryCount(entriesToAdd);
493 macho_unwind_info_regular_second_level_entry<P>* entryTable = (macho_unwind_info_regular_second_level_entry<P>*)(pageStart + page->entryPageOffset());
494 for (unsigned int i=0; i < entriesToAdd; ++i) {
495 const UnwindEntry& info = uniqueInfos[endIndex-entriesToAdd+i];
496 entryTable[i].set_functionOffset(0);
497 entryTable[i].set_encoding(info.encoding);
498 // add fixup for address part of entry
499 uint32_t offset = (uint8_t*)(&entryTable[i]) - _pagesForDelete;
500 this->addRegularAddressFixup(offset, info.func);
501 if ( encodingMeansUseDwarf(info.encoding) ) {
502 // add fixup for dwarf offset part of page specific encoding
503 uint32_t encOffset = (uint8_t*)(&entryTable[i]) - _pagesForDelete;
504 this->addRegularFDEOffsetFixup(encOffset, info.fde);
505 }
506 }
507 if (_s_log) fprintf(stderr, "regular page with %u entries\n", entriesToAdd);
508 pageEnd = pageStart;
509 return endIndex - entriesToAdd;
510 }
511
512
513 template <typename A>
514 unsigned int UnwindInfoAtom<A>::makeCompressedSecondLevelPage(const std::vector<UnwindEntry>& uniqueInfos,
515 const std::map<compact_unwind_encoding_t,unsigned int> commonEncodings,
516 uint32_t pageSize, unsigned int endIndex, uint8_t*& pageEnd)
517 {
518 if (_s_log) fprintf(stderr, "makeCompressedSecondLevelPage(pageSize=%u, endIndex=%u)\n", pageSize, endIndex);
519 // first pass calculates how many compressed entries we could fit in this sized page
520 // keep adding entries to page until:
521 // 1) encoding table plus entry table plus header exceed page size
522 // 2) the file offset delta from the first to last function > 24 bits
523 // 3) custom encoding index reachs 255
524 // 4) run out of uniqueInfos to encode
525 std::map<compact_unwind_encoding_t, unsigned int> pageSpecificEncodings;
526 uint32_t space4 = (pageSize - sizeof(unwind_info_compressed_second_level_page_header))/sizeof(uint32_t);
527 std::vector<uint8_t> encodingIndexes;
528 int index = endIndex-1;
529 int entryCount = 0;
530 uint64_t lastEntryAddress = uniqueInfos[index].funcTentAddress;
531 bool canDo = true;
532 while ( canDo && (index >= 0) ) {
533 const UnwindEntry& info = uniqueInfos[index--];
534 // compute encoding index
535 unsigned int encodingIndex;
536 std::map<compact_unwind_encoding_t, unsigned int>::const_iterator pos = commonEncodings.find(info.encoding);
537 if ( pos != commonEncodings.end() ) {
538 encodingIndex = pos->second;
539 }
540 else {
541 // no commmon entry, so add one on this page
542 uint32_t encoding = info.encoding;
543 if ( encodingMeansUseDwarf(encoding) ) {
544 // make unique pseudo encoding so this dwarf will gets is own encoding entry slot
545 encoding += (index+1);
546 }
547 std::map<compact_unwind_encoding_t, unsigned int>::iterator ppos = pageSpecificEncodings.find(encoding);
548 if ( ppos != pageSpecificEncodings.end() ) {
549 encodingIndex = pos->second;
550 }
551 else {
552 encodingIndex = commonEncodings.size() + pageSpecificEncodings.size();
553 if ( encodingIndex <= 255 ) {
554 pageSpecificEncodings[encoding] = encodingIndex;
555 }
556 else {
557 canDo = false; // case 3)
558 if (_s_log) fprintf(stderr, "end of compressed page with %u entries, %lu custom encodings because too many custom encodings\n",
559 entryCount, pageSpecificEncodings.size());
560 }
561 }
562 }
563 if ( canDo )
564 encodingIndexes.push_back(encodingIndex);
565 // compute function offset
566 uint32_t funcOffsetWithInPage = lastEntryAddress - info.funcTentAddress;
567 if ( funcOffsetWithInPage > 0x00FFFF00 ) {
568 // don't use 0x00FFFFFF because addresses may vary after atoms are laid out again
569 canDo = false; // case 2)
570 if (_s_log) fprintf(stderr, "can't use compressed page with %u entries because function offset too big\n", entryCount);
571 }
572 else {
573 ++entryCount;
574 }
575 // check room for entry
576 if ( (pageSpecificEncodings.size()+entryCount) >= space4 ) {
577 canDo = false; // case 1)
578 --entryCount;
579 if (_s_log) fprintf(stderr, "end of compressed page with %u entries because full\n", entryCount);
580 }
581 //if (_s_log) fprintf(stderr, "space4=%d, pageSpecificEncodings.size()=%ld, entryCount=%d\n", space4, pageSpecificEncodings.size(), entryCount);
582 }
583
584 // check for cases where it would be better to use a regular (non-compressed) page
585 const unsigned int compressPageUsed = sizeof(unwind_info_compressed_second_level_page_header)
586 + pageSpecificEncodings.size()*sizeof(uint32_t)
587 + entryCount*sizeof(uint32_t);
588 if ( (compressPageUsed < (pageSize-4) && (index >= 0) ) ) {
589 const int regularEntriesPerPage = (pageSize - sizeof(unwind_info_regular_second_level_page_header))/sizeof(unwind_info_regular_second_level_entry);
590 if ( entryCount < regularEntriesPerPage ) {
591 return makeRegularSecondLevelPage(uniqueInfos, pageSize, endIndex, pageEnd);
592 }
593 }
594
595 // check if we need any padding because adding another entry would take 8 bytes but only have room for 4
596 uint32_t pad = 0;
597 if ( compressPageUsed == (pageSize-4) )
598 pad = 4;
599
600 // second pass fills in page
601 uint8_t* pageStart = pageEnd - compressPageUsed - pad;
602 CSLP* page = (CSLP*)pageStart;
603 page->set_kind(UNWIND_SECOND_LEVEL_COMPRESSED);
604 page->set_entryPageOffset(sizeof(CSLP));
605 page->set_entryCount(entryCount);
606 page->set_encodingsPageOffset(page->entryPageOffset()+entryCount*sizeof(uint32_t));
607 page->set_encodingsCount(pageSpecificEncodings.size());
608 uint32_t* const encodingsArray = (uint32_t*)&pageStart[page->encodingsPageOffset()];
609 // fill in entry table
610 uint32_t* const entiresArray = (uint32_t*)&pageStart[page->entryPageOffset()];
611 const ld::Atom* firstFunc = uniqueInfos[endIndex-entryCount].func;
612 for(unsigned int i=endIndex-entryCount; i < endIndex; ++i) {
613 const UnwindEntry& info = uniqueInfos[i];
614 uint8_t encodingIndex;
615 if ( encodingMeansUseDwarf(info.encoding) ) {
616 // dwarf entries are always in page specific encodings
617 encodingIndex = pageSpecificEncodings[info.encoding+i];
618 }
619 else {
620 std::map<uint32_t, unsigned int>::const_iterator pos = commonEncodings.find(info.encoding);
621 if ( pos != commonEncodings.end() )
622 encodingIndex = pos->second;
623 else
624 encodingIndex = pageSpecificEncodings[info.encoding];
625 }
626 uint32_t entryIndex = i - endIndex + entryCount;
627 E::set32(entiresArray[entryIndex], encodingIndex << 24);
628 // add fixup for address part of entry
629 uint32_t offset = (uint8_t*)(&entiresArray[entryIndex]) - _pagesForDelete;
630 this->addCompressedAddressOffsetFixup(offset, info.func, firstFunc);
631 if ( encodingMeansUseDwarf(info.encoding) ) {
632 // add fixup for dwarf offset part of page specific encoding
633 uint32_t encOffset = (uint8_t*)(&encodingsArray[encodingIndex-commonEncodings.size()]) - _pagesForDelete;
634 this->addCompressedEncodingFixup(encOffset, info.fde);
635 }
636 }
637 // fill in encodings table
638 for(std::map<uint32_t, unsigned int>::const_iterator it = pageSpecificEncodings.begin(); it != pageSpecificEncodings.end(); ++it) {
639 E::set32(encodingsArray[it->second-commonEncodings.size()], it->first);
640 }
641
642 if (_s_log) fprintf(stderr, "compressed page with %u entries, %lu custom encodings\n", entryCount, pageSpecificEncodings.size());
643
644 // update pageEnd;
645 pageEnd = pageStart;
646 return endIndex-entryCount; // endIndex for next page
647 }
648
649
650
651
652
653
654 static uint64_t calculateEHFrameSize(const ld::Internal& state)
655 {
656 uint64_t size = 0;
657 for (std::vector<ld::Internal::FinalSection*>::const_iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) {
658 ld::Internal::FinalSection* sect = *sit;
659 if ( sect->type() == ld::Section::typeCFI ) {
660 for (std::vector<const ld::Atom*>::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
661 size += (*ait)->size();
662 }
663 }
664 }
665 return size;
666 }
667
668 static void getAllUnwindInfos(const ld::Internal& state, std::vector<UnwindEntry>& entries)
669 {
670 uint64_t address = 0;
671 for (std::vector<ld::Internal::FinalSection*>::const_iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) {
672 ld::Internal::FinalSection* sect = *sit;
673 for (std::vector<const ld::Atom*>::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
674 const ld::Atom* atom = *ait;
675 // adjust address for atom alignment
676 uint64_t alignment = 1 << atom->alignment().powerOf2;
677 uint64_t currentModulus = (address % alignment);
678 uint64_t requiredModulus = atom->alignment().modulus;
679 if ( currentModulus != requiredModulus ) {
680 if ( requiredModulus > currentModulus )
681 address += requiredModulus-currentModulus;
682 else
683 address += requiredModulus+alignment-currentModulus;
684 }
685
686 if ( atom->beginUnwind() == atom->endUnwind() ) {
687 // be sure to mark that we have no unwind info for stuff in the TEXT segment without unwind info
688 if ( atom->section().type() == ld::Section::typeCode ) {
689 entries.push_back(UnwindEntry(atom, address, 0, NULL, NULL, NULL, 0));
690 }
691 }
692 else {
693 // atom has unwind info(s), add entry for each
694 const ld::Atom* fde = NULL;
695 const ld::Atom* lsda = NULL;
696 const ld::Atom* personalityPointer = NULL;
697 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
698 switch ( fit->kind ) {
699 case ld::Fixup::kindNoneGroupSubordinateFDE:
700 assert(fit->binding == ld::Fixup::bindingDirectlyBound);
701 fde = fit->u.target;
702 break;
703 case ld::Fixup::kindNoneGroupSubordinateLSDA:
704 assert(fit->binding == ld::Fixup::bindingDirectlyBound);
705 lsda = fit->u.target;
706 break;
707 case ld::Fixup::kindNoneGroupSubordinatePersonality:
708 assert(fit->binding == ld::Fixup::bindingDirectlyBound);
709 personalityPointer = fit->u.target;
710 assert(personalityPointer->section().type() == ld::Section::typeNonLazyPointer);
711 break;
712 default:
713 break;
714 }
715 }
716 if ( fde != NULL ) {
717 // find CIE for this FDE
718 const ld::Atom* cie = NULL;
719 for (ld::Fixup::iterator fit = fde->fixupsBegin(), end=fde->fixupsEnd(); fit != end; ++fit) {
720 if ( fit->kind != ld::Fixup::kindSubtractTargetAddress )
721 continue;
722 if ( fit->binding != ld::Fixup::bindingDirectlyBound )
723 continue;
724 cie = fit->u.target;
725 // CIE is only direct subtracted target in FDE
726 assert(cie->section().type() == ld::Section::typeCFI);
727 break;
728 }
729 if ( cie != NULL ) {
730 // if CIE can have just one fixup - to the personality pointer
731 for (ld::Fixup::iterator fit = cie->fixupsBegin(), end=cie->fixupsEnd(); fit != end; ++fit) {
732 if ( fit->kind == ld::Fixup::kindSetTargetAddress ) {
733 switch ( fit->binding ) {
734 case ld::Fixup::bindingsIndirectlyBound:
735 personalityPointer = state.indirectBindingTable[fit->u.bindingIndex];
736 assert(personalityPointer->section().type() == ld::Section::typeNonLazyPointer);
737 break;
738 case ld::Fixup::bindingDirectlyBound:
739 personalityPointer = fit->u.target;
740 assert(personalityPointer->section().type() == ld::Section::typeNonLazyPointer);
741 break;
742 default:
743 break;
744 }
745 }
746 }
747 }
748 }
749 for ( ld::Atom::UnwindInfo::iterator uit = atom->beginUnwind(); uit != atom->endUnwind(); ++uit ) {
750 entries.push_back(UnwindEntry(atom, address, uit->startOffset, fde, lsda, personalityPointer, uit->unwindInfo));
751 }
752 }
753 address += atom->size();
754 }
755 }
756 }
757
758
759 static void makeFinalLinkedImageCompactUnwindSection(const Options& opts, ld::Internal& state)
760 {
761 // walk every atom and gets its unwind info
762 std::vector<UnwindEntry> entries;
763 entries.reserve(64);
764 getAllUnwindInfos(state, entries);
765
766 // don't generate an __unwind_info section if there is no code in this linkage unit
767 if ( entries.size() == 0 )
768 return;
769
770 // calculate size of __eh_frame section, so __unwind_info can go before it and page align
771 uint64_t ehFrameSize = calculateEHFrameSize(state);
772
773 // create atom that contains the whole compact unwind table
774 switch ( opts.architecture() ) {
775 #if SUPPORT_ARCH_x86_64
776 case CPU_TYPE_X86_64:
777 state.addAtom(*new UnwindInfoAtom<x86_64>(entries, ehFrameSize));
778 break;
779 #endif
780 #if SUPPORT_ARCH_i386
781 case CPU_TYPE_I386:
782 state.addAtom(*new UnwindInfoAtom<x86>(entries, ehFrameSize));
783 break;
784 #endif
785 default:
786 assert(0 && "no compact unwind for arch");
787 }
788 }
789
790
791
792 template <typename A>
793 class CompactUnwindAtom : public ld::Atom {
794 public:
795 CompactUnwindAtom(ld::Internal& state,const ld::Atom* funcAtom,
796 uint32_t startOffset, uint32_t len, uint32_t cui);
797 ~CompactUnwindAtom() {}
798
799 virtual const ld::File* file() const { return NULL; }
800 virtual const char* name() const { return "compact unwind info"; }
801 virtual uint64_t size() const { return sizeof(macho_compact_unwind_entry<P>); }
802 virtual uint64_t objectAddress() const { return 0; }
803 virtual void copyRawContent(uint8_t buffer[]) const;
804 virtual void setScope(Scope) { }
805 virtual ld::Fixup::iterator fixupsBegin() const { return (ld::Fixup*)&_fixups[0]; }
806 virtual ld::Fixup::iterator fixupsEnd() const { return (ld::Fixup*)&_fixups[_fixups.size()]; }
807
808 private:
809 typedef typename A::P P;
810 typedef typename A::P::E E;
811 typedef typename A::P::uint_t pint_t;
812
813
814 const ld::Atom* _atom;
815 const uint32_t _startOffset;
816 const uint32_t _len;
817 const uint32_t _compactUnwindInfo;
818 std::vector<ld::Fixup> _fixups;
819
820 static ld::Fixup::Kind _s_pointerKind;
821 static ld::Fixup::Kind _s_pointerStoreKind;
822 static ld::Section _s_section;
823 };
824
825
826 template <typename A>
827 ld::Section CompactUnwindAtom<A>::_s_section("__LD", "__compact_unwind", ld::Section::typeDebug);
828
829 template <> ld::Fixup::Kind CompactUnwindAtom<x86>::_s_pointerKind = ld::Fixup::kindStoreLittleEndian32;
830 template <> ld::Fixup::Kind CompactUnwindAtom<x86>::_s_pointerStoreKind = ld::Fixup::kindStoreTargetAddressLittleEndian32;
831 template <> ld::Fixup::Kind CompactUnwindAtom<x86_64>::_s_pointerKind = ld::Fixup::kindStoreLittleEndian64;
832 template <> ld::Fixup::Kind CompactUnwindAtom<x86_64>::_s_pointerStoreKind = ld::Fixup::kindStoreTargetAddressLittleEndian64;
833
834 template <typename A>
835 CompactUnwindAtom<A>::CompactUnwindAtom(ld::Internal& state,const ld::Atom* funcAtom, uint32_t startOffset,
836 uint32_t len, uint32_t cui)
837 : ld::Atom(_s_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
838 ld::Atom::scopeTranslationUnit, ld::Atom::typeUnclassified,
839 symbolTableNotIn, false, false, false, ld::Atom::Alignment(0)),
840 _atom(funcAtom), _startOffset(startOffset), _len(len), _compactUnwindInfo(cui)
841 {
842 _fixups.push_back(ld::Fixup(macho_compact_unwind_entry<P>::codeStartFieldOffset(), ld::Fixup::k1of3, ld::Fixup::kindSetTargetAddress, funcAtom));
843 _fixups.push_back(ld::Fixup(macho_compact_unwind_entry<P>::codeStartFieldOffset(), ld::Fixup::k2of3, ld::Fixup::kindAddAddend, _startOffset));
844 _fixups.push_back(ld::Fixup(macho_compact_unwind_entry<P>::codeStartFieldOffset(), ld::Fixup::k3of3, _s_pointerKind));
845 // see if atom has subordinate personality function or lsda
846 for (ld::Fixup::iterator fit = funcAtom->fixupsBegin(), end=funcAtom->fixupsEnd(); fit != end; ++fit) {
847 switch ( fit->kind ) {
848 case ld::Fixup::kindNoneGroupSubordinatePersonality:
849 assert(fit->binding == ld::Fixup::bindingsIndirectlyBound);
850 _fixups.push_back(ld::Fixup(macho_compact_unwind_entry<P>::personalityFieldOffset(), ld::Fixup::k1of1, _s_pointerStoreKind, state.indirectBindingTable[fit->u.bindingIndex]));
851 break;
852 case ld::Fixup::kindNoneGroupSubordinateLSDA:
853 assert(fit->binding == ld::Fixup::bindingDirectlyBound);
854 _fixups.push_back(ld::Fixup(macho_compact_unwind_entry<P>::lsdaFieldOffset(), ld::Fixup::k1of1, _s_pointerStoreKind, fit->u.target));
855 break;
856 default:
857 break;
858 }
859 }
860
861 }
862
863 template <typename A>
864 void CompactUnwindAtom<A>::copyRawContent(uint8_t buffer[]) const
865 {
866 macho_compact_unwind_entry<P>* buf = (macho_compact_unwind_entry<P>*)buffer;
867 buf->set_codeStart(0);
868 buf->set_codeLen(_len);
869 buf->set_compactUnwindInfo(_compactUnwindInfo);
870 buf->set_personality(0);
871 buf->set_lsda(0);
872 }
873
874
875 static void makeCompactUnwindAtom(const Options& opts, ld::Internal& state, const ld::Atom* atom,
876 uint32_t startOffset, uint32_t endOffset, uint32_t cui)
877 {
878 switch ( opts.architecture() ) {
879 #if SUPPORT_ARCH_x86_64
880 case CPU_TYPE_X86_64:
881 state.addAtom(*new CompactUnwindAtom<x86_64>(state, atom, startOffset, endOffset-startOffset, cui));
882 break;
883 #endif
884 #if SUPPORT_ARCH_i386
885 case CPU_TYPE_I386:
886 state.addAtom(*new CompactUnwindAtom<x86>(state, atom, startOffset, endOffset-startOffset, cui));
887 break;
888 #endif
889 }
890 }
891
892 static void makeRelocateableCompactUnwindSection(const Options& opts, ld::Internal& state)
893 {
894 // can't add CompactUnwindAtom atoms will iterating, so pre-scan
895 std::vector<const ld::Atom*> atomsWithUnwind;
896 for (std::vector<ld::Internal::FinalSection*>::const_iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) {
897 ld::Internal::FinalSection* sect = *sit;
898 for (std::vector<const ld::Atom*>::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
899 const ld::Atom* atom = *ait;
900 if ( atom->beginUnwind() != atom->endUnwind() )
901 atomsWithUnwind.push_back(atom);
902 }
903 }
904 // make one CompactUnwindAtom for each compact unwind range in each atom
905 for (std::vector<const ld::Atom*>::iterator it = atomsWithUnwind.begin(); it != atomsWithUnwind.end(); ++it) {
906 const ld::Atom* atom = *it;
907 uint32_t lastOffset = 0;
908 uint32_t lastCUE = 0;
909 bool first = true;
910 for (ld::Atom::UnwindInfo::iterator uit=atom->beginUnwind(); uit != atom->endUnwind(); ++uit) {
911 if ( !first ) {
912 makeCompactUnwindAtom(opts, state, atom, lastOffset, uit->startOffset, lastCUE);
913 }
914 lastOffset = uit->startOffset;
915 lastCUE = uit->unwindInfo;
916 first = false;
917 }
918 makeCompactUnwindAtom(opts, state, atom, lastOffset, (uint32_t)atom->size(), lastCUE);
919 }
920 }
921
922
923 void doPass(const Options& opts, ld::Internal& state)
924 {
925 if ( opts.outputKind() == Options::kObjectFile )
926 makeRelocateableCompactUnwindSection(opts, state);
927
928 else if ( opts.needsUnwindInfoSection() )
929 makeFinalLinkedImageCompactUnwindSection(opts, state);
930 }
931
932
933 } // namespace compact_unwind
934 } // namespace passes
935 } // namespace ld