]> git.saurik.com Git - apple/ld64.git/blob - src/ld/passes/compact_unwind.cpp
3ddd9b8b9e520641fcff14d5130dd893ee69d319
[apple/ld64.git] / src / ld / passes / compact_unwind.cpp
1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
2 *
3 * Copyright (c) 2009 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26 #include <stdint.h>
27 #include <math.h>
28 #include <unistd.h>
29 #include <dlfcn.h>
30 #include <mach/machine.h>
31 #include <mach-o/compact_unwind_encoding.h>
32
33 #include <vector>
34 #include <map>
35
36 #include "ld.hpp"
37 #include "compact_unwind.h"
38 #include "Architectures.hpp"
39 #include "MachOFileAbstraction.hpp"
40
41
42 namespace ld {
43 namespace passes {
44 namespace compact_unwind {
45
46
47 struct UnwindEntry {
48 UnwindEntry(const ld::Atom* f, uint64_t a, uint32_t o, const ld::Atom* d,
49 const ld::Atom* l, const ld::Atom* p, uint32_t en)
50 : func(f), fde(d), lsda(l), personalityPointer(p), funcTentAddress(a),
51 functionOffset(o), encoding(en) { }
52 const ld::Atom* func;
53 const ld::Atom* fde;
54 const ld::Atom* lsda;
55 const ld::Atom* personalityPointer;
56 uint64_t funcTentAddress;
57 uint32_t functionOffset;
58 compact_unwind_encoding_t encoding;
59 };
60
61 struct LSDAEntry {
62 const ld::Atom* func;
63 const ld::Atom* lsda;
64 };
65
66
67 template <typename A>
68 class UnwindInfoAtom : public ld::Atom {
69 public:
70 UnwindInfoAtom(const std::vector<UnwindEntry>& entries,uint64_t ehFrameSize);
71 ~UnwindInfoAtom();
72
73 virtual const ld::File* file() const { return NULL; }
74 virtual const char* name() const { return "compact unwind info"; }
75 virtual uint64_t size() const { return _headerSize+_pagesSize; }
76 virtual uint64_t objectAddress() const { return 0; }
77 virtual void copyRawContent(uint8_t buffer[]) const;
78 virtual void setScope(Scope) { }
79 virtual ld::Fixup::iterator fixupsBegin() const { return (ld::Fixup*)&_fixups[0]; }
80 virtual ld::Fixup::iterator fixupsEnd() const { return (ld::Fixup*)&_fixups[_fixups.size()]; }
81
82 private:
83 typedef typename A::P P;
84 typedef typename A::P::E E;
85 typedef typename A::P::uint_t pint_t;
86
87 typedef macho_unwind_info_compressed_second_level_page_header<P> CSLP;
88
89 bool encodingMeansUseDwarf(compact_unwind_encoding_t enc);
90 void compressDuplicates(const std::vector<UnwindEntry>& entries,
91 std::vector<UnwindEntry>& uniqueEntries);
92 void makePersonalityIndexes(std::vector<UnwindEntry>& entries,
93 std::map<const ld::Atom*, uint32_t>& personalityIndexMap);
94 void findCommonEncoding(const std::vector<UnwindEntry>& entries,
95 std::map<compact_unwind_encoding_t, unsigned int>& commonEncodings);
96 void makeLsdaIndex(const std::vector<UnwindEntry>& entries, std::vector<LSDAEntry>& lsdaIndex,
97 std::map<const ld::Atom*, uint32_t>& lsdaIndexOffsetMap);
98 unsigned int makeCompressedSecondLevelPage(const std::vector<UnwindEntry>& uniqueInfos,
99 const std::map<compact_unwind_encoding_t,unsigned int> commonEncodings,
100 uint32_t pageSize, unsigned int endIndex, uint8_t*& pageEnd);
101 unsigned int makeRegularSecondLevelPage(const std::vector<UnwindEntry>& uniqueInfos, uint32_t pageSize,
102 unsigned int endIndex, uint8_t*& pageEnd);
103 void addCompressedAddressOffsetFixup(uint32_t offset, const ld::Atom* func, const ld::Atom* fromFunc);
104 void addCompressedEncodingFixup(uint32_t offset, const ld::Atom* fde);
105 void addRegularAddressFixup(uint32_t offset, const ld::Atom* func);
106 void addRegularFDEOffsetFixup(uint32_t offset, const ld::Atom* fde);
107 void addImageOffsetFixup(uint32_t offset, const ld::Atom* targ);
108 void addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend);
109
110 uint8_t* _pagesForDelete;
111 uint8_t* _pageAlignedPages;
112 uint8_t* _pages;
113 uint64_t _pagesSize;
114 uint8_t* _header;
115 uint64_t _headerSize;
116 std::vector<ld::Fixup> _fixups;
117
118 static bool _s_log;
119 static ld::Section _s_section;
120 };
121
122 template <typename A>
123 bool UnwindInfoAtom<A>::_s_log = false;
124
125 template <typename A>
126 ld::Section UnwindInfoAtom<A>::_s_section("__TEXT", "__unwind_info", ld::Section::typeUnwindInfo);
127
128
129 template <typename A>
130 UnwindInfoAtom<A>::UnwindInfoAtom(const std::vector<UnwindEntry>& entries, uint64_t ehFrameSize)
131 : ld::Atom(_s_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
132 ld::Atom::scopeLinkageUnit, ld::Atom::typeUnclassified,
133 symbolTableNotIn, false, false, false, ld::Atom::Alignment(2)),
134 _pagesForDelete(NULL), _pageAlignedPages(NULL), _pages(NULL), _pagesSize(0), _header(NULL), _headerSize(0)
135 {
136 // build new compressed list by removing entries where next function has same encoding
137 std::vector<UnwindEntry> uniqueEntries;
138 compressDuplicates(entries, uniqueEntries);
139
140 // reserve room so _fixups vector is not reallocated a bunch of times
141 _fixups.reserve(uniqueEntries.size()*3);
142
143 // build personality index, update encodings with personality index
144 std::map<const ld::Atom*, uint32_t> personalityIndexMap;
145 makePersonalityIndexes(uniqueEntries, personalityIndexMap);
146 if ( personalityIndexMap.size() > 3 ) {
147 warning("too many personality routines for compact unwind to encode");
148 return;
149 }
150
151 // put the most common encodings into the common table, but at most 127 of them
152 std::map<compact_unwind_encoding_t, unsigned int> commonEncodings;
153 findCommonEncoding(uniqueEntries, commonEncodings);
154
155 // build lsda index
156 std::map<const ld::Atom*, uint32_t> lsdaIndexOffsetMap;
157 std::vector<LSDAEntry> lsdaIndex;
158 makeLsdaIndex(uniqueEntries, lsdaIndex, lsdaIndexOffsetMap);
159
160 // calculate worst case size for all unwind info pages when allocating buffer
161 const unsigned int entriesPerRegularPage = (4096-sizeof(unwind_info_regular_second_level_page_header))/sizeof(unwind_info_regular_second_level_entry);
162 assert(uniqueEntries.size() > 0);
163 const unsigned int pageCount = ((uniqueEntries.size() - 1)/entriesPerRegularPage) + 2;
164 _pagesForDelete = (uint8_t*)calloc(pageCount+1,4096);
165 if ( _pagesForDelete == NULL ) {
166 warning("could not allocate space for compact unwind info");
167 return;
168 }
169 _pageAlignedPages = (uint8_t*)((((uintptr_t)_pagesForDelete) + 4095) & -4096);
170
171 // make last second level page smaller so that all other second level pages can be page aligned
172 uint32_t maxLastPageSize = 4096 - (ehFrameSize % 4096);
173 uint32_t tailPad = 0;
174 if ( maxLastPageSize < 128 ) {
175 tailPad = maxLastPageSize;
176 maxLastPageSize = 4096;
177 }
178
179 // fill in pages in reverse order
180 const ld::Atom* secondLevelFirstFuncs[pageCount*3];
181 uint8_t* secondLevelPagesStarts[pageCount*3];
182 unsigned int endIndex = uniqueEntries.size();
183 unsigned int secondLevelPageCount = 0;
184 uint8_t* pageEnd = &_pageAlignedPages[pageCount*4096];
185 uint32_t pageSize = maxLastPageSize;
186 while ( endIndex > 0 ) {
187 endIndex = makeCompressedSecondLevelPage(uniqueEntries, commonEncodings, pageSize, endIndex, pageEnd);
188 secondLevelPagesStarts[secondLevelPageCount] = pageEnd;
189 secondLevelFirstFuncs[secondLevelPageCount] = uniqueEntries[endIndex].func;
190 ++secondLevelPageCount;
191 // if this requires more than one page, align so that next starts on page boundary
192 if ( (pageSize != 4096) && (endIndex > 0) ) {
193 pageEnd = (uint8_t*)((uintptr_t)(pageEnd) & -4096);
194 pageSize = 4096; // last page can be odd size, make rest up to 4096 bytes in size
195 }
196 }
197 _pages = pageEnd;
198 _pagesSize = &_pageAlignedPages[pageCount*4096] - pageEnd;
199
200 // calculate section layout
201 const uint32_t commonEncodingsArraySectionOffset = sizeof(macho_unwind_info_section_header<P>);
202 const uint32_t commonEncodingsArrayCount = commonEncodings.size();
203 const uint32_t commonEncodingsArraySize = commonEncodingsArrayCount * sizeof(compact_unwind_encoding_t);
204 const uint32_t personalityArraySectionOffset = commonEncodingsArraySectionOffset + commonEncodingsArraySize;
205 const uint32_t personalityArrayCount = personalityIndexMap.size();
206 const uint32_t personalityArraySize = personalityArrayCount * sizeof(uint32_t);
207 const uint32_t indexSectionOffset = personalityArraySectionOffset + personalityArraySize;
208 const uint32_t indexCount = secondLevelPageCount+1;
209 const uint32_t indexSize = indexCount * sizeof(macho_unwind_info_section_header_index_entry<P>);
210 const uint32_t lsdaIndexArraySectionOffset = indexSectionOffset + indexSize;
211 const uint32_t lsdaIndexArrayCount = lsdaIndex.size();
212 const uint32_t lsdaIndexArraySize = lsdaIndexArrayCount * sizeof(macho_unwind_info_section_header_lsda_index_entry<P>);
213 const uint32_t headerEndSectionOffset = lsdaIndexArraySectionOffset + lsdaIndexArraySize;
214
215 // now that we know the size of the header, slide all existing fixups on the pages
216 const int32_t fixupSlide = headerEndSectionOffset + (_pageAlignedPages - _pages);
217 for(std::vector<ld::Fixup>::iterator it = _fixups.begin(); it != _fixups.end(); ++it) {
218 it->offsetInAtom += fixupSlide;
219 }
220
221 // allocate and fill in section header
222 _headerSize = headerEndSectionOffset;
223 _header = new uint8_t[_headerSize];
224 bzero(_header, _headerSize);
225 macho_unwind_info_section_header<P>* sectionHeader = (macho_unwind_info_section_header<P>*)_header;
226 sectionHeader->set_version(UNWIND_SECTION_VERSION);
227 sectionHeader->set_commonEncodingsArraySectionOffset(commonEncodingsArraySectionOffset);
228 sectionHeader->set_commonEncodingsArrayCount(commonEncodingsArrayCount);
229 sectionHeader->set_personalityArraySectionOffset(personalityArraySectionOffset);
230 sectionHeader->set_personalityArrayCount(personalityArrayCount);
231 sectionHeader->set_indexSectionOffset(indexSectionOffset);
232 sectionHeader->set_indexCount(indexCount);
233
234 // copy common encodings
235 uint32_t* commonEncodingsTable = (uint32_t*)&_header[commonEncodingsArraySectionOffset];
236 for (std::map<uint32_t, unsigned int>::iterator it=commonEncodings.begin(); it != commonEncodings.end(); ++it)
237 E::set32(commonEncodingsTable[it->second], it->first);
238
239 // make references for personality entries
240 uint32_t* personalityArray = (uint32_t*)&_header[sectionHeader->personalityArraySectionOffset()];
241 for (std::map<const ld::Atom*, unsigned int>::iterator it=personalityIndexMap.begin(); it != personalityIndexMap.end(); ++it) {
242 uint32_t offset = (uint8_t*)&personalityArray[it->second-1] - _header;
243 this->addImageOffsetFixup(offset, it->first);
244 }
245
246 // build first level index and references
247 macho_unwind_info_section_header_index_entry<P>* indexTable = (macho_unwind_info_section_header_index_entry<P>*)&_header[indexSectionOffset];
248 uint32_t refOffset;
249 for (unsigned int i=0; i < secondLevelPageCount; ++i) {
250 unsigned int reverseIndex = secondLevelPageCount - 1 - i;
251 indexTable[i].set_functionOffset(0);
252 indexTable[i].set_secondLevelPagesSectionOffset(secondLevelPagesStarts[reverseIndex]-_pages+headerEndSectionOffset);
253 indexTable[i].set_lsdaIndexArraySectionOffset(lsdaIndexOffsetMap[secondLevelFirstFuncs[reverseIndex]]+lsdaIndexArraySectionOffset);
254 refOffset = (uint8_t*)&indexTable[i] - _header;
255 this->addImageOffsetFixup(refOffset, secondLevelFirstFuncs[reverseIndex]);
256 }
257 indexTable[secondLevelPageCount].set_functionOffset(0);
258 indexTable[secondLevelPageCount].set_secondLevelPagesSectionOffset(0);
259 indexTable[secondLevelPageCount].set_lsdaIndexArraySectionOffset(lsdaIndexArraySectionOffset+lsdaIndexArraySize);
260 refOffset = (uint8_t*)&indexTable[secondLevelPageCount] - _header;
261 this->addImageOffsetFixupPlusAddend(refOffset, entries.back().func, entries.back().func->size()+1);
262
263 // build lsda references
264 uint32_t lsdaEntrySectionOffset = lsdaIndexArraySectionOffset;
265 for (std::vector<LSDAEntry>::iterator it = lsdaIndex.begin(); it != lsdaIndex.end(); ++it) {
266 this->addImageOffsetFixup(lsdaEntrySectionOffset, it->func);
267 this->addImageOffsetFixup(lsdaEntrySectionOffset+4, it->lsda);
268 lsdaEntrySectionOffset += sizeof(unwind_info_section_header_lsda_index_entry);
269 }
270
271 }
272
273 template <typename A>
274 UnwindInfoAtom<A>::~UnwindInfoAtom()
275 {
276 free(_pagesForDelete);
277 free(_header);
278 }
279
280 template <typename A>
281 void UnwindInfoAtom<A>::copyRawContent(uint8_t buffer[]) const
282 {
283 // content is in two parts
284 memcpy(buffer, _header, _headerSize);
285 memcpy(&buffer[_headerSize], _pages, _pagesSize);
286 }
287
288
289 template <>
290 bool UnwindInfoAtom<x86>::encodingMeansUseDwarf(compact_unwind_encoding_t enc)
291 {
292 return ((enc & UNWIND_X86_MODE_MASK) == UNWIND_X86_MODE_DWARF);
293 }
294
295 template <>
296 bool UnwindInfoAtom<x86_64>::encodingMeansUseDwarf(compact_unwind_encoding_t enc)
297 {
298 return ((enc & UNWIND_X86_64_MODE_MASK) == UNWIND_X86_64_MODE_DWARF);
299 }
300
301 template <>
302 bool UnwindInfoAtom<arm64>::encodingMeansUseDwarf(compact_unwind_encoding_t enc)
303 {
304 return ((enc & UNWIND_ARM64_MODE_MASK) == UNWIND_ARM64_MODE_DWARF);
305 }
306
307 template <typename A>
308 void UnwindInfoAtom<A>::compressDuplicates(const std::vector<UnwindEntry>& entries, std::vector<UnwindEntry>& uniqueEntries)
309 {
310 // build new list removing entries where next function has same encoding
311 uniqueEntries.reserve(entries.size());
312 UnwindEntry last(NULL, 0, 0, NULL, NULL, NULL, 0xFFFFFFFF);
313 for(std::vector<UnwindEntry>::const_iterator it=entries.begin(); it != entries.end(); ++it) {
314 const UnwindEntry& next = *it;
315 bool newNeedsDwarf = encodingMeansUseDwarf(next.encoding);
316 // remove entries which have same encoding and personalityPointer as last one
317 if ( newNeedsDwarf || (next.encoding != last.encoding) || (next.personalityPointer != last.personalityPointer)
318 || (next.lsda != NULL) || (last.lsda != NULL) ) {
319 uniqueEntries.push_back(next);
320 }
321 last = next;
322 }
323 if (_s_log) fprintf(stderr, "compressDuplicates() entries.size()=%lu, uniqueEntries.size()=%lu\n",
324 entries.size(), uniqueEntries.size());
325 }
326
327 template <typename A>
328 void UnwindInfoAtom<A>::makePersonalityIndexes(std::vector<UnwindEntry>& entries, std::map<const ld::Atom*, uint32_t>& personalityIndexMap)
329 {
330 for(std::vector<UnwindEntry>::iterator it=entries.begin(); it != entries.end(); ++it) {
331 if ( it->personalityPointer != NULL ) {
332 std::map<const ld::Atom*, uint32_t>::iterator pos = personalityIndexMap.find(it->personalityPointer);
333 if ( pos == personalityIndexMap.end() ) {
334 const uint32_t nextIndex = personalityIndexMap.size() + 1;
335 personalityIndexMap[it->personalityPointer] = nextIndex;
336 }
337 uint32_t personalityIndex = personalityIndexMap[it->personalityPointer];
338 it->encoding |= (personalityIndex << (__builtin_ctz(UNWIND_PERSONALITY_MASK)) );
339 }
340 }
341 if (_s_log) fprintf(stderr, "makePersonalityIndexes() %lu personality routines used\n", personalityIndexMap.size());
342 }
343
344
345 template <typename A>
346 void UnwindInfoAtom<A>::findCommonEncoding(const std::vector<UnwindEntry>& entries,
347 std::map<compact_unwind_encoding_t, unsigned int>& commonEncodings)
348 {
349 // scan infos to get frequency counts for each encoding
350 std::map<compact_unwind_encoding_t, unsigned int> encodingsUsed;
351 unsigned int mostCommonEncodingUsageCount = 0;
352 for(std::vector<UnwindEntry>::const_iterator it=entries.begin(); it != entries.end(); ++it) {
353 // never put dwarf into common table
354 if ( encodingMeansUseDwarf(it->encoding) )
355 continue;
356 std::map<compact_unwind_encoding_t, unsigned int>::iterator pos = encodingsUsed.find(it->encoding);
357 if ( pos == encodingsUsed.end() ) {
358 encodingsUsed[it->encoding] = 1;
359 }
360 else {
361 encodingsUsed[it->encoding] += 1;
362 if ( mostCommonEncodingUsageCount < encodingsUsed[it->encoding] )
363 mostCommonEncodingUsageCount = encodingsUsed[it->encoding];
364 }
365 }
366 // put the most common encodings into the common table, but at most 127 of them
367 for(unsigned int usages=mostCommonEncodingUsageCount; usages > 1; --usages) {
368 for (std::map<compact_unwind_encoding_t, unsigned int>::iterator euit=encodingsUsed.begin(); euit != encodingsUsed.end(); ++euit) {
369 if ( euit->second == usages ) {
370 unsigned int sz = commonEncodings.size();
371 if ( sz < 127 ) {
372 commonEncodings[euit->first] = sz;
373 }
374 }
375 }
376 }
377 if (_s_log) fprintf(stderr, "findCommonEncoding() %lu common encodings found\n", commonEncodings.size());
378 }
379
380
381 template <typename A>
382 void UnwindInfoAtom<A>::makeLsdaIndex(const std::vector<UnwindEntry>& entries, std::vector<LSDAEntry>& lsdaIndex, std::map<const ld::Atom*, uint32_t>& lsdaIndexOffsetMap)
383 {
384 for(std::vector<UnwindEntry>::const_iterator it=entries.begin(); it != entries.end(); ++it) {
385 lsdaIndexOffsetMap[it->func] = lsdaIndex.size() * sizeof(unwind_info_section_header_lsda_index_entry);
386 if ( it->lsda != NULL ) {
387 LSDAEntry entry;
388 entry.func = it->func;
389 entry.lsda = it->lsda;
390 lsdaIndex.push_back(entry);
391 }
392 }
393 if (_s_log) fprintf(stderr, "makeLsdaIndex() %lu LSDAs found\n", lsdaIndex.size());
394 }
395
396
397 template <>
398 void UnwindInfoAtom<x86>::addCompressedAddressOffsetFixup(uint32_t offset, const ld::Atom* func, const ld::Atom* fromFunc)
399 {
400 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetAddress, func));
401 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindSubtractTargetAddress, fromFunc));
402 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndianLow24of32));
403 }
404
405 template <>
406 void UnwindInfoAtom<x86_64>::addCompressedAddressOffsetFixup(uint32_t offset, const ld::Atom* func, const ld::Atom* fromFunc)
407 {
408 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetAddress, func));
409 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindSubtractTargetAddress, fromFunc));
410 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndianLow24of32));
411 }
412
413 template <>
414 void UnwindInfoAtom<arm64>::addCompressedAddressOffsetFixup(uint32_t offset, const ld::Atom* func, const ld::Atom* fromFunc)
415 {
416 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetAddress, func));
417 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindSubtractTargetAddress, fromFunc));
418 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndianLow24of32));
419 }
420
421 template <>
422 void UnwindInfoAtom<x86>::addCompressedEncodingFixup(uint32_t offset, const ld::Atom* fde)
423 {
424 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde));
425 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32));
426 }
427
428 template <>
429 void UnwindInfoAtom<x86_64>::addCompressedEncodingFixup(uint32_t offset, const ld::Atom* fde)
430 {
431 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde));
432 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32));
433 }
434
435 template <>
436 void UnwindInfoAtom<arm64>::addCompressedEncodingFixup(uint32_t offset, const ld::Atom* fde)
437 {
438 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde));
439 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32));
440 }
441
442 template <>
443 void UnwindInfoAtom<x86>::addRegularAddressFixup(uint32_t offset, const ld::Atom* func)
444 {
445 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, func));
446 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32));
447 }
448
449 template <>
450 void UnwindInfoAtom<x86_64>::addRegularAddressFixup(uint32_t offset, const ld::Atom* func)
451 {
452 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, func));
453 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32));
454 }
455
456 template <>
457 void UnwindInfoAtom<arm64>::addRegularAddressFixup(uint32_t offset, const ld::Atom* func)
458 {
459 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, func));
460 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32));
461 }
462
463 template <>
464 void UnwindInfoAtom<x86>::addRegularFDEOffsetFixup(uint32_t offset, const ld::Atom* fde)
465 {
466 _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde));
467 _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32));
468 }
469
470 template <>
471 void UnwindInfoAtom<x86_64>::addRegularFDEOffsetFixup(uint32_t offset, const ld::Atom* fde)
472 {
473 _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde));
474 _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32));
475 }
476
477 template <>
478 void UnwindInfoAtom<arm64>::addRegularFDEOffsetFixup(uint32_t offset, const ld::Atom* fde)
479 {
480 _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde));
481 _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32));
482 }
483
484 template <>
485 void UnwindInfoAtom<x86>::addImageOffsetFixup(uint32_t offset, const ld::Atom* targ)
486 {
487 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, targ));
488 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32));
489 }
490
491 template <>
492 void UnwindInfoAtom<x86_64>::addImageOffsetFixup(uint32_t offset, const ld::Atom* targ)
493 {
494 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, targ));
495 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32));
496 }
497
498 template <>
499 void UnwindInfoAtom<arm64>::addImageOffsetFixup(uint32_t offset, const ld::Atom* targ)
500 {
501 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, targ));
502 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32));
503 }
504
505 template <>
506 void UnwindInfoAtom<x86>::addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend)
507 {
508 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetImageOffset, targ));
509 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindAddAddend, addend));
510 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndian32));
511 }
512
513 template <>
514 void UnwindInfoAtom<x86_64>::addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend)
515 {
516 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetImageOffset, targ));
517 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindAddAddend, addend));
518 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndian32));
519 }
520
521 template <>
522 void UnwindInfoAtom<arm64>::addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend)
523 {
524 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetImageOffset, targ));
525 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindAddAddend, addend));
526 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndian32));
527 }
528
529
530
531
532 template <typename A>
533 unsigned int UnwindInfoAtom<A>::makeRegularSecondLevelPage(const std::vector<UnwindEntry>& uniqueInfos, uint32_t pageSize,
534 unsigned int endIndex, uint8_t*& pageEnd)
535 {
536 const unsigned int maxEntriesPerPage = (pageSize - sizeof(unwind_info_regular_second_level_page_header))/sizeof(unwind_info_regular_second_level_entry);
537 const unsigned int entriesToAdd = ((endIndex > maxEntriesPerPage) ? maxEntriesPerPage : endIndex);
538 uint8_t* pageStart = pageEnd
539 - entriesToAdd*sizeof(unwind_info_regular_second_level_entry)
540 - sizeof(unwind_info_regular_second_level_page_header);
541 macho_unwind_info_regular_second_level_page_header<P>* page = (macho_unwind_info_regular_second_level_page_header<P>*)pageStart;
542 page->set_kind(UNWIND_SECOND_LEVEL_REGULAR);
543 page->set_entryPageOffset(sizeof(macho_unwind_info_regular_second_level_page_header<P>));
544 page->set_entryCount(entriesToAdd);
545 macho_unwind_info_regular_second_level_entry<P>* entryTable = (macho_unwind_info_regular_second_level_entry<P>*)(pageStart + page->entryPageOffset());
546 for (unsigned int i=0; i < entriesToAdd; ++i) {
547 const UnwindEntry& info = uniqueInfos[endIndex-entriesToAdd+i];
548 entryTable[i].set_functionOffset(0);
549 entryTable[i].set_encoding(info.encoding);
550 // add fixup for address part of entry
551 uint32_t offset = (uint8_t*)(&entryTable[i]) - _pageAlignedPages;
552 this->addRegularAddressFixup(offset, info.func);
553 if ( encodingMeansUseDwarf(info.encoding) ) {
554 // add fixup for dwarf offset part of page specific encoding
555 uint32_t encOffset = (uint8_t*)(&entryTable[i]) - _pageAlignedPages;
556 this->addRegularFDEOffsetFixup(encOffset, info.fde);
557 }
558 }
559 if (_s_log) fprintf(stderr, "regular page with %u entries\n", entriesToAdd);
560 pageEnd = pageStart;
561 return endIndex - entriesToAdd;
562 }
563
564
565 template <typename A>
566 unsigned int UnwindInfoAtom<A>::makeCompressedSecondLevelPage(const std::vector<UnwindEntry>& uniqueInfos,
567 const std::map<compact_unwind_encoding_t,unsigned int> commonEncodings,
568 uint32_t pageSize, unsigned int endIndex, uint8_t*& pageEnd)
569 {
570 if (_s_log) fprintf(stderr, "makeCompressedSecondLevelPage(pageSize=%u, endIndex=%u)\n", pageSize, endIndex);
571 // first pass calculates how many compressed entries we could fit in this sized page
572 // keep adding entries to page until:
573 // 1) encoding table plus entry table plus header exceed page size
574 // 2) the file offset delta from the first to last function > 24 bits
575 // 3) custom encoding index reachs 255
576 // 4) run out of uniqueInfos to encode
577 std::map<compact_unwind_encoding_t, unsigned int> pageSpecificEncodings;
578 uint32_t space4 = (pageSize - sizeof(unwind_info_compressed_second_level_page_header))/sizeof(uint32_t);
579 std::vector<uint8_t> encodingIndexes;
580 int index = endIndex-1;
581 int entryCount = 0;
582 uint64_t lastEntryAddress = uniqueInfos[index].funcTentAddress;
583 bool canDo = true;
584 while ( canDo && (index >= 0) ) {
585 const UnwindEntry& info = uniqueInfos[index--];
586 // compute encoding index
587 unsigned int encodingIndex;
588 std::map<compact_unwind_encoding_t, unsigned int>::const_iterator pos = commonEncodings.find(info.encoding);
589 if ( pos != commonEncodings.end() ) {
590 encodingIndex = pos->second;
591 }
592 else {
593 // no commmon entry, so add one on this page
594 uint32_t encoding = info.encoding;
595 if ( encodingMeansUseDwarf(encoding) ) {
596 // make unique pseudo encoding so this dwarf will gets is own encoding entry slot
597 encoding += (index+1);
598 }
599 std::map<compact_unwind_encoding_t, unsigned int>::iterator ppos = pageSpecificEncodings.find(encoding);
600 if ( ppos != pageSpecificEncodings.end() ) {
601 encodingIndex = pos->second;
602 }
603 else {
604 encodingIndex = commonEncodings.size() + pageSpecificEncodings.size();
605 if ( encodingIndex <= 255 ) {
606 pageSpecificEncodings[encoding] = encodingIndex;
607 if (_s_log) fprintf(stderr, "makeCompressedSecondLevelPage(): pageSpecificEncodings[%d]=0x%08X\n", encodingIndex, encoding);
608 }
609 else {
610 canDo = false; // case 3)
611 if (_s_log) fprintf(stderr, "end of compressed page with %u entries, %lu custom encodings because too many custom encodings\n",
612 entryCount, pageSpecificEncodings.size());
613 }
614 }
615 }
616 if ( canDo )
617 encodingIndexes.push_back(encodingIndex);
618 // compute function offset
619 uint32_t funcOffsetWithInPage = lastEntryAddress - info.funcTentAddress;
620 if ( funcOffsetWithInPage > 0x00FFFF00 ) {
621 // don't use 0x00FFFFFF because addresses may vary after atoms are laid out again
622 canDo = false; // case 2)
623 if (_s_log) fprintf(stderr, "can't use compressed page with %u entries because function offset too big\n", entryCount);
624 }
625 else {
626 ++entryCount;
627 }
628 // check room for entry
629 if ( (pageSpecificEncodings.size()+entryCount) >= space4 ) {
630 canDo = false; // case 1)
631 --entryCount;
632 if (_s_log) fprintf(stderr, "end of compressed page with %u entries because full\n", entryCount);
633 }
634 //if (_s_log) fprintf(stderr, "space4=%d, pageSpecificEncodings.size()=%ld, entryCount=%d\n", space4, pageSpecificEncodings.size(), entryCount);
635 }
636
637 // check for cases where it would be better to use a regular (non-compressed) page
638 const unsigned int compressPageUsed = sizeof(unwind_info_compressed_second_level_page_header)
639 + pageSpecificEncodings.size()*sizeof(uint32_t)
640 + entryCount*sizeof(uint32_t);
641 if ( (compressPageUsed < (pageSize-4) && (index >= 0) ) ) {
642 const int regularEntriesPerPage = (pageSize - sizeof(unwind_info_regular_second_level_page_header))/sizeof(unwind_info_regular_second_level_entry);
643 if ( entryCount < regularEntriesPerPage ) {
644 return makeRegularSecondLevelPage(uniqueInfos, pageSize, endIndex, pageEnd);
645 }
646 }
647
648 // check if we need any padding because adding another entry would take 8 bytes but only have room for 4
649 uint32_t pad = 0;
650 if ( compressPageUsed == (pageSize-4) )
651 pad = 4;
652
653 // second pass fills in page
654 uint8_t* pageStart = pageEnd - compressPageUsed - pad;
655 CSLP* page = (CSLP*)pageStart;
656 page->set_kind(UNWIND_SECOND_LEVEL_COMPRESSED);
657 page->set_entryPageOffset(sizeof(CSLP));
658 page->set_entryCount(entryCount);
659 page->set_encodingsPageOffset(page->entryPageOffset()+entryCount*sizeof(uint32_t));
660 page->set_encodingsCount(pageSpecificEncodings.size());
661 uint32_t* const encodingsArray = (uint32_t*)&pageStart[page->encodingsPageOffset()];
662 // fill in entry table
663 uint32_t* const entiresArray = (uint32_t*)&pageStart[page->entryPageOffset()];
664 const ld::Atom* firstFunc = uniqueInfos[endIndex-entryCount].func;
665 for(unsigned int i=endIndex-entryCount; i < endIndex; ++i) {
666 const UnwindEntry& info = uniqueInfos[i];
667 uint8_t encodingIndex;
668 if ( encodingMeansUseDwarf(info.encoding) ) {
669 // dwarf entries are always in page specific encodings
670 encodingIndex = pageSpecificEncodings[info.encoding+i];
671 }
672 else {
673 std::map<uint32_t, unsigned int>::const_iterator pos = commonEncodings.find(info.encoding);
674 if ( pos != commonEncodings.end() )
675 encodingIndex = pos->second;
676 else
677 encodingIndex = pageSpecificEncodings[info.encoding];
678 }
679 uint32_t entryIndex = i - endIndex + entryCount;
680 E::set32(entiresArray[entryIndex], encodingIndex << 24);
681 // add fixup for address part of entry
682 uint32_t offset = (uint8_t*)(&entiresArray[entryIndex]) - _pageAlignedPages;
683 this->addCompressedAddressOffsetFixup(offset, info.func, firstFunc);
684 if ( encodingMeansUseDwarf(info.encoding) ) {
685 // add fixup for dwarf offset part of page specific encoding
686 uint32_t encOffset = (uint8_t*)(&encodingsArray[encodingIndex-commonEncodings.size()]) - _pageAlignedPages;
687 this->addCompressedEncodingFixup(encOffset, info.fde);
688 }
689 }
690 // fill in encodings table
691 for(std::map<uint32_t, unsigned int>::const_iterator it = pageSpecificEncodings.begin(); it != pageSpecificEncodings.end(); ++it) {
692 E::set32(encodingsArray[it->second-commonEncodings.size()], it->first);
693 }
694
695 if (_s_log) fprintf(stderr, "compressed page with %u entries, %lu custom encodings\n", entryCount, pageSpecificEncodings.size());
696
697 // update pageEnd;
698 pageEnd = pageStart;
699 return endIndex-entryCount; // endIndex for next page
700 }
701
702
703
704
705
706
707 static uint64_t calculateEHFrameSize(const ld::Internal& state)
708 {
709 uint64_t size = 0;
710 for (std::vector<ld::Internal::FinalSection*>::const_iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) {
711 ld::Internal::FinalSection* sect = *sit;
712 if ( sect->type() == ld::Section::typeCFI ) {
713 for (std::vector<const ld::Atom*>::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
714 size += (*ait)->size();
715 }
716 }
717 }
718 return size;
719 }
720
721 static void getAllUnwindInfos(const ld::Internal& state, std::vector<UnwindEntry>& entries)
722 {
723 uint64_t address = 0;
724 for (std::vector<ld::Internal::FinalSection*>::const_iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) {
725 ld::Internal::FinalSection* sect = *sit;
726 for (std::vector<const ld::Atom*>::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
727 const ld::Atom* atom = *ait;
728 // adjust address for atom alignment
729 uint64_t alignment = 1 << atom->alignment().powerOf2;
730 uint64_t currentModulus = (address % alignment);
731 uint64_t requiredModulus = atom->alignment().modulus;
732 if ( currentModulus != requiredModulus ) {
733 if ( requiredModulus > currentModulus )
734 address += requiredModulus-currentModulus;
735 else
736 address += requiredModulus+alignment-currentModulus;
737 }
738
739 if ( atom->beginUnwind() == atom->endUnwind() ) {
740 // be sure to mark that we have no unwind info for stuff in the TEXT segment without unwind info
741 if ( (atom->section().type() == ld::Section::typeCode) && (atom->size() !=0) ) {
742 entries.push_back(UnwindEntry(atom, address, 0, NULL, NULL, NULL, 0));
743 }
744 }
745 else {
746 // atom has unwind info(s), add entry for each
747 const ld::Atom* fde = NULL;
748 const ld::Atom* lsda = NULL;
749 const ld::Atom* personalityPointer = NULL;
750 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
751 switch ( fit->kind ) {
752 case ld::Fixup::kindNoneGroupSubordinateFDE:
753 assert(fit->binding == ld::Fixup::bindingDirectlyBound);
754 fde = fit->u.target;
755 break;
756 case ld::Fixup::kindNoneGroupSubordinateLSDA:
757 assert(fit->binding == ld::Fixup::bindingDirectlyBound);
758 lsda = fit->u.target;
759 break;
760 case ld::Fixup::kindNoneGroupSubordinatePersonality:
761 assert(fit->binding == ld::Fixup::bindingDirectlyBound);
762 personalityPointer = fit->u.target;
763 assert(personalityPointer->section().type() == ld::Section::typeNonLazyPointer);
764 break;
765 default:
766 break;
767 }
768 }
769 if ( fde != NULL ) {
770 // find CIE for this FDE
771 const ld::Atom* cie = NULL;
772 for (ld::Fixup::iterator fit = fde->fixupsBegin(), end=fde->fixupsEnd(); fit != end; ++fit) {
773 if ( fit->kind != ld::Fixup::kindSubtractTargetAddress )
774 continue;
775 if ( fit->binding != ld::Fixup::bindingDirectlyBound )
776 continue;
777 cie = fit->u.target;
778 // CIE is only direct subtracted target in FDE
779 assert(cie->section().type() == ld::Section::typeCFI);
780 break;
781 }
782 if ( cie != NULL ) {
783 // if CIE can have just one fixup - to the personality pointer
784 for (ld::Fixup::iterator fit = cie->fixupsBegin(), end=cie->fixupsEnd(); fit != end; ++fit) {
785 if ( fit->kind == ld::Fixup::kindSetTargetAddress ) {
786 switch ( fit->binding ) {
787 case ld::Fixup::bindingsIndirectlyBound:
788 personalityPointer = state.indirectBindingTable[fit->u.bindingIndex];
789 assert(personalityPointer->section().type() == ld::Section::typeNonLazyPointer);
790 break;
791 case ld::Fixup::bindingDirectlyBound:
792 personalityPointer = fit->u.target;
793 assert(personalityPointer->section().type() == ld::Section::typeNonLazyPointer);
794 break;
795 default:
796 break;
797 }
798 }
799 }
800 }
801 }
802 for ( ld::Atom::UnwindInfo::iterator uit = atom->beginUnwind(); uit != atom->endUnwind(); ++uit ) {
803 entries.push_back(UnwindEntry(atom, address, uit->startOffset, fde, lsda, personalityPointer, uit->unwindInfo));
804 }
805 }
806 address += atom->size();
807 }
808 }
809 }
810
811
812 static void makeFinalLinkedImageCompactUnwindSection(const Options& opts, ld::Internal& state)
813 {
814 // walk every atom and gets its unwind info
815 std::vector<UnwindEntry> entries;
816 entries.reserve(64);
817 getAllUnwindInfos(state, entries);
818
819 // don't generate an __unwind_info section if there is no code in this linkage unit
820 if ( entries.size() == 0 )
821 return;
822
823 // calculate size of __eh_frame section, so __unwind_info can go before it and page align
824 uint64_t ehFrameSize = calculateEHFrameSize(state);
825
826 // create atom that contains the whole compact unwind table
827 switch ( opts.architecture() ) {
828 #if SUPPORT_ARCH_x86_64
829 case CPU_TYPE_X86_64:
830 state.addAtom(*new UnwindInfoAtom<x86_64>(entries, ehFrameSize));
831 break;
832 #endif
833 #if SUPPORT_ARCH_i386
834 case CPU_TYPE_I386:
835 state.addAtom(*new UnwindInfoAtom<x86>(entries, ehFrameSize));
836 break;
837 #endif
838 #if SUPPORT_ARCH_arm64
839 case CPU_TYPE_ARM64:
840 state.addAtom(*new UnwindInfoAtom<arm64>(entries, ehFrameSize));
841 break;
842 #endif
843 default:
844 assert(0 && "no compact unwind for arch");
845 }
846 }
847
848
849
850 template <typename A>
851 class CompactUnwindAtom : public ld::Atom {
852 public:
853 CompactUnwindAtom(ld::Internal& state,const ld::Atom* funcAtom,
854 uint32_t startOffset, uint32_t len, uint32_t cui);
855 ~CompactUnwindAtom() {}
856
857 virtual const ld::File* file() const { return NULL; }
858 virtual const char* name() const { return "compact unwind info"; }
859 virtual uint64_t size() const { return sizeof(macho_compact_unwind_entry<P>); }
860 virtual uint64_t objectAddress() const { return 0; }
861 virtual void copyRawContent(uint8_t buffer[]) const;
862 virtual void setScope(Scope) { }
863 virtual ld::Fixup::iterator fixupsBegin() const { return (ld::Fixup*)&_fixups[0]; }
864 virtual ld::Fixup::iterator fixupsEnd() const { return (ld::Fixup*)&_fixups[_fixups.size()]; }
865
866 private:
867 typedef typename A::P P;
868 typedef typename A::P::E E;
869 typedef typename A::P::uint_t pint_t;
870
871
872 const ld::Atom* _atom;
873 const uint32_t _startOffset;
874 const uint32_t _len;
875 const uint32_t _compactUnwindInfo;
876 std::vector<ld::Fixup> _fixups;
877
878 static ld::Fixup::Kind _s_pointerKind;
879 static ld::Fixup::Kind _s_pointerStoreKind;
880 static ld::Section _s_section;
881 };
882
883
884 template <typename A>
885 ld::Section CompactUnwindAtom<A>::_s_section("__LD", "__compact_unwind", ld::Section::typeDebug);
886
887 template <> ld::Fixup::Kind CompactUnwindAtom<x86>::_s_pointerKind = ld::Fixup::kindStoreLittleEndian32;
888 template <> ld::Fixup::Kind CompactUnwindAtom<x86>::_s_pointerStoreKind = ld::Fixup::kindStoreTargetAddressLittleEndian32;
889 template <> ld::Fixup::Kind CompactUnwindAtom<x86_64>::_s_pointerKind = ld::Fixup::kindStoreLittleEndian64;
890 template <> ld::Fixup::Kind CompactUnwindAtom<x86_64>::_s_pointerStoreKind = ld::Fixup::kindStoreTargetAddressLittleEndian64;
891 #if SUPPORT_ARCH_arm64
892 template <> ld::Fixup::Kind CompactUnwindAtom<arm64>::_s_pointerKind = ld::Fixup::kindStoreLittleEndian64;
893 template <> ld::Fixup::Kind CompactUnwindAtom<arm64>::_s_pointerStoreKind = ld::Fixup::kindStoreTargetAddressLittleEndian64;
894 #endif
895
896 template <typename A>
897 CompactUnwindAtom<A>::CompactUnwindAtom(ld::Internal& state,const ld::Atom* funcAtom, uint32_t startOffset,
898 uint32_t len, uint32_t cui)
899 : ld::Atom(_s_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
900 ld::Atom::scopeTranslationUnit, ld::Atom::typeUnclassified,
901 symbolTableNotIn, false, false, false, ld::Atom::Alignment(log2(sizeof(pint_t)))),
902 _atom(funcAtom), _startOffset(startOffset), _len(len), _compactUnwindInfo(cui)
903 {
904 _fixups.push_back(ld::Fixup(macho_compact_unwind_entry<P>::codeStartFieldOffset(), ld::Fixup::k1of3, ld::Fixup::kindSetTargetAddress, funcAtom));
905 _fixups.push_back(ld::Fixup(macho_compact_unwind_entry<P>::codeStartFieldOffset(), ld::Fixup::k2of3, ld::Fixup::kindAddAddend, _startOffset));
906 _fixups.push_back(ld::Fixup(macho_compact_unwind_entry<P>::codeStartFieldOffset(), ld::Fixup::k3of3, _s_pointerKind));
907 // see if atom has subordinate personality function or lsda
908 for (ld::Fixup::iterator fit = funcAtom->fixupsBegin(), end=funcAtom->fixupsEnd(); fit != end; ++fit) {
909 switch ( fit->kind ) {
910 case ld::Fixup::kindNoneGroupSubordinatePersonality:
911 assert(fit->binding == ld::Fixup::bindingsIndirectlyBound);
912 _fixups.push_back(ld::Fixup(macho_compact_unwind_entry<P>::personalityFieldOffset(), ld::Fixup::k1of1, _s_pointerStoreKind, state.indirectBindingTable[fit->u.bindingIndex]));
913 break;
914 case ld::Fixup::kindNoneGroupSubordinateLSDA:
915 assert(fit->binding == ld::Fixup::bindingDirectlyBound);
916 _fixups.push_back(ld::Fixup(macho_compact_unwind_entry<P>::lsdaFieldOffset(), ld::Fixup::k1of1, _s_pointerStoreKind, fit->u.target));
917 break;
918 default:
919 break;
920 }
921 }
922
923 }
924
925 template <typename A>
926 void CompactUnwindAtom<A>::copyRawContent(uint8_t buffer[]) const
927 {
928 macho_compact_unwind_entry<P>* buf = (macho_compact_unwind_entry<P>*)buffer;
929 buf->set_codeStart(0);
930 buf->set_codeLen(_len);
931 buf->set_compactUnwindInfo(_compactUnwindInfo);
932 buf->set_personality(0);
933 buf->set_lsda(0);
934 }
935
936
937 static void makeCompactUnwindAtom(const Options& opts, ld::Internal& state, const ld::Atom* atom,
938 uint32_t startOffset, uint32_t endOffset, uint32_t cui)
939 {
940 switch ( opts.architecture() ) {
941 #if SUPPORT_ARCH_x86_64
942 case CPU_TYPE_X86_64:
943 state.addAtom(*new CompactUnwindAtom<x86_64>(state, atom, startOffset, endOffset-startOffset, cui));
944 break;
945 #endif
946 #if SUPPORT_ARCH_i386
947 case CPU_TYPE_I386:
948 state.addAtom(*new CompactUnwindAtom<x86>(state, atom, startOffset, endOffset-startOffset, cui));
949 break;
950 #endif
951 #if SUPPORT_ARCH_arm64
952 case CPU_TYPE_ARM64:
953 state.addAtom(*new CompactUnwindAtom<arm64>(state, atom, startOffset, endOffset-startOffset, cui));
954 break;
955 #endif
956 }
957 }
958
959 static void makeRelocateableCompactUnwindSection(const Options& opts, ld::Internal& state)
960 {
961 // can't add CompactUnwindAtom atoms will iterating, so pre-scan
962 std::vector<const ld::Atom*> atomsWithUnwind;
963 for (std::vector<ld::Internal::FinalSection*>::const_iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) {
964 ld::Internal::FinalSection* sect = *sit;
965 for (std::vector<const ld::Atom*>::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
966 const ld::Atom* atom = *ait;
967 if ( atom->beginUnwind() != atom->endUnwind() )
968 atomsWithUnwind.push_back(atom);
969 }
970 }
971 // make one CompactUnwindAtom for each compact unwind range in each atom
972 for (std::vector<const ld::Atom*>::iterator it = atomsWithUnwind.begin(); it != atomsWithUnwind.end(); ++it) {
973 const ld::Atom* atom = *it;
974 uint32_t lastOffset = 0;
975 uint32_t lastCUE = 0;
976 bool first = true;
977 for (ld::Atom::UnwindInfo::iterator uit=atom->beginUnwind(); uit != atom->endUnwind(); ++uit) {
978 if ( !first ) {
979 makeCompactUnwindAtom(opts, state, atom, lastOffset, uit->startOffset, lastCUE);
980 }
981 lastOffset = uit->startOffset;
982 lastCUE = uit->unwindInfo;
983 first = false;
984 }
985 makeCompactUnwindAtom(opts, state, atom, lastOffset, (uint32_t)atom->size(), lastCUE);
986 }
987 }
988
989
990 void doPass(const Options& opts, ld::Internal& state)
991 {
992 if ( opts.outputKind() == Options::kObjectFile )
993 makeRelocateableCompactUnwindSection(opts, state);
994
995 else if ( opts.needsUnwindInfoSection() )
996 makeFinalLinkedImageCompactUnwindSection(opts, state);
997 }
998
999
1000 } // namespace compact_unwind
1001 } // namespace passes
1002 } // namespace ld