]> git.saurik.com Git - apple/ld64.git/blame - src/ld/passes/compact_unwind.cpp
ld64-224.1.tar.gz
[apple/ld64.git] / src / ld / passes / compact_unwind.cpp
CommitLineData
a645023d
A
1/* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
2 *
3 * Copyright (c) 2009 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26#include <stdint.h>
27#include <math.h>
28#include <unistd.h>
29#include <dlfcn.h>
30#include <mach/machine.h>
31#include <mach-o/compact_unwind_encoding.h>
32
33#include <vector>
34#include <map>
35
36#include "ld.hpp"
37#include "compact_unwind.h"
38#include "Architectures.hpp"
39#include "MachOFileAbstraction.hpp"
40
41
42namespace ld {
43namespace passes {
44namespace compact_unwind {
45
46
47struct UnwindEntry {
48 UnwindEntry(const ld::Atom* f, uint64_t a, uint32_t o, const ld::Atom* d,
49 const ld::Atom* l, const ld::Atom* p, uint32_t en)
50 : func(f), fde(d), lsda(l), personalityPointer(p), funcTentAddress(a),
51 functionOffset(o), encoding(en) { }
52 const ld::Atom* func;
53 const ld::Atom* fde;
54 const ld::Atom* lsda;
55 const ld::Atom* personalityPointer;
56 uint64_t funcTentAddress;
57 uint32_t functionOffset;
58 compact_unwind_encoding_t encoding;
59};
60
61struct LSDAEntry {
62 const ld::Atom* func;
63 const ld::Atom* lsda;
64};
65
66
67template <typename A>
68class UnwindInfoAtom : public ld::Atom {
69public:
70 UnwindInfoAtom(const std::vector<UnwindEntry>& entries,uint64_t ehFrameSize);
71 ~UnwindInfoAtom();
72
73 virtual const ld::File* file() const { return NULL; }
a645023d
A
74 virtual const char* name() const { return "compact unwind info"; }
75 virtual uint64_t size() const { return _headerSize+_pagesSize; }
76 virtual uint64_t objectAddress() const { return 0; }
77 virtual void copyRawContent(uint8_t buffer[]) const;
78 virtual void setScope(Scope) { }
79 virtual ld::Fixup::iterator fixupsBegin() const { return (ld::Fixup*)&_fixups[0]; }
80 virtual ld::Fixup::iterator fixupsEnd() const { return (ld::Fixup*)&_fixups[_fixups.size()]; }
81
82private:
83 typedef typename A::P P;
84 typedef typename A::P::E E;
85 typedef typename A::P::uint_t pint_t;
86
87 typedef macho_unwind_info_compressed_second_level_page_header<P> CSLP;
88
89 bool encodingMeansUseDwarf(compact_unwind_encoding_t enc);
90 void compressDuplicates(const std::vector<UnwindEntry>& entries,
91 std::vector<UnwindEntry>& uniqueEntries);
92 void makePersonalityIndexes(std::vector<UnwindEntry>& entries,
93 std::map<const ld::Atom*, uint32_t>& personalityIndexMap);
94 void findCommonEncoding(const std::vector<UnwindEntry>& entries,
95 std::map<compact_unwind_encoding_t, unsigned int>& commonEncodings);
96 void makeLsdaIndex(const std::vector<UnwindEntry>& entries, std::vector<LSDAEntry>& lsdaIndex,
97 std::map<const ld::Atom*, uint32_t>& lsdaIndexOffsetMap);
98 unsigned int makeCompressedSecondLevelPage(const std::vector<UnwindEntry>& uniqueInfos,
99 const std::map<compact_unwind_encoding_t,unsigned int> commonEncodings,
100 uint32_t pageSize, unsigned int endIndex, uint8_t*& pageEnd);
101 unsigned int makeRegularSecondLevelPage(const std::vector<UnwindEntry>& uniqueInfos, uint32_t pageSize,
102 unsigned int endIndex, uint8_t*& pageEnd);
103 void addCompressedAddressOffsetFixup(uint32_t offset, const ld::Atom* func, const ld::Atom* fromFunc);
104 void addCompressedEncodingFixup(uint32_t offset, const ld::Atom* fde);
105 void addRegularAddressFixup(uint32_t offset, const ld::Atom* func);
106 void addRegularFDEOffsetFixup(uint32_t offset, const ld::Atom* fde);
107 void addImageOffsetFixup(uint32_t offset, const ld::Atom* targ);
108 void addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend);
109
110 uint8_t* _pagesForDelete;
111 uint8_t* _pages;
112 uint64_t _pagesSize;
113 uint8_t* _header;
114 uint64_t _headerSize;
115 std::vector<ld::Fixup> _fixups;
116
117 static bool _s_log;
118 static ld::Section _s_section;
119};
120
121template <typename A>
122bool UnwindInfoAtom<A>::_s_log = false;
123
124template <typename A>
125ld::Section UnwindInfoAtom<A>::_s_section("__TEXT", "__unwind_info", ld::Section::typeUnwindInfo);
126
127
128template <typename A>
129UnwindInfoAtom<A>::UnwindInfoAtom(const std::vector<UnwindEntry>& entries, uint64_t ehFrameSize)
130 : ld::Atom(_s_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
131 ld::Atom::scopeLinkageUnit, ld::Atom::typeUnclassified,
132 symbolTableNotIn, false, false, false, ld::Atom::Alignment(0)),
133 _pagesForDelete(NULL), _pages(NULL), _pagesSize(0), _header(NULL), _headerSize(0)
134{
135 // build new compressed list by removing entries where next function has same encoding
136 std::vector<UnwindEntry> uniqueEntries;
137 compressDuplicates(entries, uniqueEntries);
138
139 // reserve room so _fixups vector is not reallocated a bunch of times
140 _fixups.reserve(uniqueEntries.size()*3);
141
142 // build personality index, update encodings with personality index
143 std::map<const ld::Atom*, uint32_t> personalityIndexMap;
144 makePersonalityIndexes(uniqueEntries, personalityIndexMap);
145 if ( personalityIndexMap.size() > 3 ) {
146 warning("too many personality routines for compact unwind to encode");
147 return;
148 }
149
150 // put the most common encodings into the common table, but at most 127 of them
151 std::map<compact_unwind_encoding_t, unsigned int> commonEncodings;
152 findCommonEncoding(uniqueEntries, commonEncodings);
153
154 // build lsda index
155 std::map<const ld::Atom*, uint32_t> lsdaIndexOffsetMap;
156 std::vector<LSDAEntry> lsdaIndex;
157 makeLsdaIndex(uniqueEntries, lsdaIndex, lsdaIndexOffsetMap);
158
a645023d
A
159 // calculate worst case size for all unwind info pages when allocating buffer
160 const unsigned int entriesPerRegularPage = (4096-sizeof(unwind_info_regular_second_level_page_header))/sizeof(unwind_info_regular_second_level_entry);
161 assert(uniqueEntries.size() > 0);
162 const unsigned int pageCount = ((uniqueEntries.size() - 1)/entriesPerRegularPage) + 1;
163 _pagesForDelete = (uint8_t*)calloc(pageCount,4096);
164 if ( _pagesForDelete == NULL ) {
165 warning("could not allocate space for compact unwind info");
166 return;
167 }
168
169 // make last second level page smaller so that all other second level pages can be page aligned
170 uint32_t maxLastPageSize = 4096 - (ehFrameSize % 4096);
171 uint32_t tailPad = 0;
172 if ( maxLastPageSize < 128 ) {
173 tailPad = maxLastPageSize;
174 maxLastPageSize = 4096;
175 }
176
177 // fill in pages in reverse order
178 const ld::Atom* secondLevelFirstFuncs[pageCount*3];
179 uint8_t* secondLevelPagesStarts[pageCount*3];
180 unsigned int endIndex = uniqueEntries.size();
181 unsigned int secondLevelPageCount = 0;
182 uint8_t* pageEnd = &_pagesForDelete[pageCount*4096];
183 uint32_t pageSize = maxLastPageSize;
184 while ( endIndex > 0 ) {
185 endIndex = makeCompressedSecondLevelPage(uniqueEntries, commonEncodings, pageSize, endIndex, pageEnd);
186 secondLevelPagesStarts[secondLevelPageCount] = pageEnd;
187 secondLevelFirstFuncs[secondLevelPageCount] = uniqueEntries[endIndex].func;
188 ++secondLevelPageCount;
189 pageSize = 4096; // last page can be odd size, make rest up to 4096 bytes in size
190 }
191 _pages = pageEnd;
192 _pagesSize = &_pagesForDelete[pageCount*4096] - pageEnd;
193
194
195 // calculate section layout
196 const uint32_t commonEncodingsArraySectionOffset = sizeof(macho_unwind_info_section_header<P>);
197 const uint32_t commonEncodingsArrayCount = commonEncodings.size();
198 const uint32_t commonEncodingsArraySize = commonEncodingsArrayCount * sizeof(compact_unwind_encoding_t);
199 const uint32_t personalityArraySectionOffset = commonEncodingsArraySectionOffset + commonEncodingsArraySize;
200 const uint32_t personalityArrayCount = personalityIndexMap.size();
201 const uint32_t personalityArraySize = personalityArrayCount * sizeof(uint32_t);
202 const uint32_t indexSectionOffset = personalityArraySectionOffset + personalityArraySize;
203 const uint32_t indexCount = secondLevelPageCount+1;
204 const uint32_t indexSize = indexCount * sizeof(macho_unwind_info_section_header_index_entry<P>);
205 const uint32_t lsdaIndexArraySectionOffset = indexSectionOffset + indexSize;
206 const uint32_t lsdaIndexArrayCount = lsdaIndex.size();
207 const uint32_t lsdaIndexArraySize = lsdaIndexArrayCount * sizeof(macho_unwind_info_section_header_lsda_index_entry<P>);
208 const uint32_t headerEndSectionOffset = lsdaIndexArraySectionOffset + lsdaIndexArraySize;
209
210 // now that we know the size of the header, slide all existing fixups on the pages
211 const int32_t fixupSlide = headerEndSectionOffset + (_pagesForDelete - _pages);
212 for(std::vector<ld::Fixup>::iterator it = _fixups.begin(); it != _fixups.end(); ++it) {
213 it->offsetInAtom += fixupSlide;
214 }
215
216 // allocate and fill in section header
217 _headerSize = headerEndSectionOffset;
218 _header = new uint8_t[_headerSize];
219 bzero(_header, _headerSize);
220 macho_unwind_info_section_header<P>* sectionHeader = (macho_unwind_info_section_header<P>*)_header;
221 sectionHeader->set_version(UNWIND_SECTION_VERSION);
222 sectionHeader->set_commonEncodingsArraySectionOffset(commonEncodingsArraySectionOffset);
223 sectionHeader->set_commonEncodingsArrayCount(commonEncodingsArrayCount);
224 sectionHeader->set_personalityArraySectionOffset(personalityArraySectionOffset);
225 sectionHeader->set_personalityArrayCount(personalityArrayCount);
226 sectionHeader->set_indexSectionOffset(indexSectionOffset);
227 sectionHeader->set_indexCount(indexCount);
228
229 // copy common encodings
230 uint32_t* commonEncodingsTable = (uint32_t*)&_header[commonEncodingsArraySectionOffset];
231 for (std::map<uint32_t, unsigned int>::iterator it=commonEncodings.begin(); it != commonEncodings.end(); ++it)
232 E::set32(commonEncodingsTable[it->second], it->first);
233
234 // make references for personality entries
235 uint32_t* personalityArray = (uint32_t*)&_header[sectionHeader->personalityArraySectionOffset()];
236 for (std::map<const ld::Atom*, unsigned int>::iterator it=personalityIndexMap.begin(); it != personalityIndexMap.end(); ++it) {
237 uint32_t offset = (uint8_t*)&personalityArray[it->second-1] - _header;
238 this->addImageOffsetFixup(offset, it->first);
239 }
240
241 // build first level index and references
242 macho_unwind_info_section_header_index_entry<P>* indexTable = (macho_unwind_info_section_header_index_entry<P>*)&_header[indexSectionOffset];
243 uint32_t refOffset;
244 for (unsigned int i=0; i < secondLevelPageCount; ++i) {
245 unsigned int reverseIndex = secondLevelPageCount - 1 - i;
246 indexTable[i].set_functionOffset(0);
247 indexTable[i].set_secondLevelPagesSectionOffset(secondLevelPagesStarts[reverseIndex]-_pages+headerEndSectionOffset);
248 indexTable[i].set_lsdaIndexArraySectionOffset(lsdaIndexOffsetMap[secondLevelFirstFuncs[reverseIndex]]+lsdaIndexArraySectionOffset);
249 refOffset = (uint8_t*)&indexTable[i] - _header;
250 this->addImageOffsetFixup(refOffset, secondLevelFirstFuncs[reverseIndex]);
251 }
252 indexTable[secondLevelPageCount].set_functionOffset(0);
253 indexTable[secondLevelPageCount].set_secondLevelPagesSectionOffset(0);
254 indexTable[secondLevelPageCount].set_lsdaIndexArraySectionOffset(lsdaIndexArraySectionOffset+lsdaIndexArraySize);
255 refOffset = (uint8_t*)&indexTable[secondLevelPageCount] - _header;
256 this->addImageOffsetFixupPlusAddend(refOffset, entries.back().func, entries.back().func->size()+1);
257
258 // build lsda references
259 uint32_t lsdaEntrySectionOffset = lsdaIndexArraySectionOffset;
260 for (std::vector<LSDAEntry>::iterator it = lsdaIndex.begin(); it != lsdaIndex.end(); ++it) {
261 this->addImageOffsetFixup(lsdaEntrySectionOffset, it->func);
262 this->addImageOffsetFixup(lsdaEntrySectionOffset+4, it->lsda);
263 lsdaEntrySectionOffset += sizeof(unwind_info_section_header_lsda_index_entry);
264 }
265
266}
267
268template <typename A>
269UnwindInfoAtom<A>::~UnwindInfoAtom()
270{
271 free(_pagesForDelete);
272 free(_header);
273}
274
275template <typename A>
276void UnwindInfoAtom<A>::copyRawContent(uint8_t buffer[]) const
277{
278 // content is in two parts
279 memcpy(buffer, _header, _headerSize);
280 memcpy(&buffer[_headerSize], _pages, _pagesSize);
281}
282
283
284template <>
285bool UnwindInfoAtom<x86>::encodingMeansUseDwarf(compact_unwind_encoding_t enc)
286{
287 return ((enc & UNWIND_X86_MODE_MASK) == UNWIND_X86_MODE_DWARF);
288}
289
290template <>
291bool UnwindInfoAtom<x86_64>::encodingMeansUseDwarf(compact_unwind_encoding_t enc)
292{
293 return ((enc & UNWIND_X86_64_MODE_MASK) == UNWIND_X86_64_MODE_DWARF);
294}
295
f80fe69f
A
296template <>
297bool UnwindInfoAtom<arm64>::encodingMeansUseDwarf(compact_unwind_encoding_t enc)
298{
299 return ((enc & UNWIND_ARM64_MODE_MASK) == UNWIND_ARM64_MODE_DWARF);
300}
301
a645023d
A
302template <typename A>
303void UnwindInfoAtom<A>::compressDuplicates(const std::vector<UnwindEntry>& entries, std::vector<UnwindEntry>& uniqueEntries)
304{
305 // build new list removing entries where next function has same encoding
306 uniqueEntries.reserve(entries.size());
307 UnwindEntry last(NULL, 0, 0, NULL, NULL, NULL, 0xFFFFFFFF);
308 for(std::vector<UnwindEntry>::const_iterator it=entries.begin(); it != entries.end(); ++it) {
309 const UnwindEntry& next = *it;
310 bool newNeedsDwarf = encodingMeansUseDwarf(next.encoding);
311 // remove entries which have same encoding and personalityPointer as last one
312 if ( newNeedsDwarf || (next.encoding != last.encoding) || (next.personalityPointer != last.personalityPointer)
313 || (next.lsda != NULL) || (last.lsda != NULL) ) {
314 uniqueEntries.push_back(next);
315 }
316 last = next;
317 }
318 if (_s_log) fprintf(stderr, "compressDuplicates() entries.size()=%lu, uniqueEntries.size()=%lu\n",
319 entries.size(), uniqueEntries.size());
320}
321
322template <typename A>
323void UnwindInfoAtom<A>::makePersonalityIndexes(std::vector<UnwindEntry>& entries, std::map<const ld::Atom*, uint32_t>& personalityIndexMap)
324{
325 for(std::vector<UnwindEntry>::iterator it=entries.begin(); it != entries.end(); ++it) {
326 if ( it->personalityPointer != NULL ) {
327 std::map<const ld::Atom*, uint32_t>::iterator pos = personalityIndexMap.find(it->personalityPointer);
328 if ( pos == personalityIndexMap.end() ) {
329 const uint32_t nextIndex = personalityIndexMap.size() + 1;
330 personalityIndexMap[it->personalityPointer] = nextIndex;
331 }
332 uint32_t personalityIndex = personalityIndexMap[it->personalityPointer];
333 it->encoding |= (personalityIndex << (__builtin_ctz(UNWIND_PERSONALITY_MASK)) );
334 }
335 }
336 if (_s_log) fprintf(stderr, "makePersonalityIndexes() %lu personality routines used\n", personalityIndexMap.size());
337}
338
339
340template <typename A>
341void UnwindInfoAtom<A>::findCommonEncoding(const std::vector<UnwindEntry>& entries,
342 std::map<compact_unwind_encoding_t, unsigned int>& commonEncodings)
343{
344 // scan infos to get frequency counts for each encoding
345 std::map<compact_unwind_encoding_t, unsigned int> encodingsUsed;
346 unsigned int mostCommonEncodingUsageCount = 0;
347 for(std::vector<UnwindEntry>::const_iterator it=entries.begin(); it != entries.end(); ++it) {
348 // never put dwarf into common table
349 if ( encodingMeansUseDwarf(it->encoding) )
350 continue;
351 std::map<compact_unwind_encoding_t, unsigned int>::iterator pos = encodingsUsed.find(it->encoding);
352 if ( pos == encodingsUsed.end() ) {
353 encodingsUsed[it->encoding] = 1;
354 }
355 else {
356 encodingsUsed[it->encoding] += 1;
357 if ( mostCommonEncodingUsageCount < encodingsUsed[it->encoding] )
358 mostCommonEncodingUsageCount = encodingsUsed[it->encoding];
359 }
360 }
361 // put the most common encodings into the common table, but at most 127 of them
362 for(unsigned int usages=mostCommonEncodingUsageCount; usages > 1; --usages) {
363 for (std::map<compact_unwind_encoding_t, unsigned int>::iterator euit=encodingsUsed.begin(); euit != encodingsUsed.end(); ++euit) {
364 if ( euit->second == usages ) {
365 unsigned int sz = commonEncodings.size();
366 if ( sz < 127 ) {
367 commonEncodings[euit->first] = sz;
368 }
369 }
370 }
371 }
372 if (_s_log) fprintf(stderr, "findCommonEncoding() %lu common encodings found\n", commonEncodings.size());
373}
374
375
376template <typename A>
377void UnwindInfoAtom<A>::makeLsdaIndex(const std::vector<UnwindEntry>& entries, std::vector<LSDAEntry>& lsdaIndex, std::map<const ld::Atom*, uint32_t>& lsdaIndexOffsetMap)
378{
379 for(std::vector<UnwindEntry>::const_iterator it=entries.begin(); it != entries.end(); ++it) {
380 lsdaIndexOffsetMap[it->func] = lsdaIndex.size() * sizeof(unwind_info_section_header_lsda_index_entry);
381 if ( it->lsda != NULL ) {
382 LSDAEntry entry;
383 entry.func = it->func;
384 entry.lsda = it->lsda;
385 lsdaIndex.push_back(entry);
386 }
387 }
388 if (_s_log) fprintf(stderr, "makeLsdaIndex() %lu LSDAs found\n", lsdaIndex.size());
389}
390
391
392template <>
393void UnwindInfoAtom<x86>::addCompressedAddressOffsetFixup(uint32_t offset, const ld::Atom* func, const ld::Atom* fromFunc)
394{
395 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetAddress, func));
396 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindSubtractTargetAddress, fromFunc));
397 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndianLow24of32));
398}
399
400template <>
401void UnwindInfoAtom<x86_64>::addCompressedAddressOffsetFixup(uint32_t offset, const ld::Atom* func, const ld::Atom* fromFunc)
402{
403 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetAddress, func));
404 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindSubtractTargetAddress, fromFunc));
405 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndianLow24of32));
406}
407
f80fe69f
A
408template <>
409void UnwindInfoAtom<arm64>::addCompressedAddressOffsetFixup(uint32_t offset, const ld::Atom* func, const ld::Atom* fromFunc)
410{
411 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetAddress, func));
412 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindSubtractTargetAddress, fromFunc));
413 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndianLow24of32));
414}
415
a645023d
A
416template <>
417void UnwindInfoAtom<x86>::addCompressedEncodingFixup(uint32_t offset, const ld::Atom* fde)
418{
419 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde));
420 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32));
421}
422
423template <>
424void UnwindInfoAtom<x86_64>::addCompressedEncodingFixup(uint32_t offset, const ld::Atom* fde)
425{
426 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde));
427 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32));
428}
429
f80fe69f
A
430template <>
431void UnwindInfoAtom<arm64>::addCompressedEncodingFixup(uint32_t offset, const ld::Atom* fde)
432{
433 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde));
434 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32));
435}
a645023d
A
436
437template <>
438void UnwindInfoAtom<x86>::addRegularAddressFixup(uint32_t offset, const ld::Atom* func)
439{
440 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, func));
441 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32));
442}
443
444template <>
445void UnwindInfoAtom<x86_64>::addRegularAddressFixup(uint32_t offset, const ld::Atom* func)
446{
447 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, func));
448 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32));
449}
450
f80fe69f
A
451template <>
452void UnwindInfoAtom<arm64>::addRegularAddressFixup(uint32_t offset, const ld::Atom* func)
453{
454 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, func));
455 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32));
456}
457
a645023d
A
458template <>
459void UnwindInfoAtom<x86>::addRegularFDEOffsetFixup(uint32_t offset, const ld::Atom* fde)
460{
461 _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde));
462 _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32));
463}
464
465template <>
466void UnwindInfoAtom<x86_64>::addRegularFDEOffsetFixup(uint32_t offset, const ld::Atom* fde)
467{
468 _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde));
469 _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32));
470}
471
f80fe69f
A
472template <>
473void UnwindInfoAtom<arm64>::addRegularFDEOffsetFixup(uint32_t offset, const ld::Atom* fde)
474{
475 _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde));
476 _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32));
477}
478
a645023d
A
479template <>
480void UnwindInfoAtom<x86>::addImageOffsetFixup(uint32_t offset, const ld::Atom* targ)
481{
482 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, targ));
483 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32));
484}
485
486template <>
487void UnwindInfoAtom<x86_64>::addImageOffsetFixup(uint32_t offset, const ld::Atom* targ)
488{
489 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, targ));
490 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32));
491}
492
f80fe69f
A
493template <>
494void UnwindInfoAtom<arm64>::addImageOffsetFixup(uint32_t offset, const ld::Atom* targ)
495{
496 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, targ));
497 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32));
498}
499
a645023d
A
500template <>
501void UnwindInfoAtom<x86>::addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend)
502{
503 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetImageOffset, targ));
504 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindAddAddend, addend));
505 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndian32));
506}
507
508template <>
509void UnwindInfoAtom<x86_64>::addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend)
510{
511 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetImageOffset, targ));
512 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindAddAddend, addend));
513 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndian32));
514}
515
f80fe69f
A
516template <>
517void UnwindInfoAtom<arm64>::addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend)
518{
519 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetImageOffset, targ));
520 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindAddAddend, addend));
521 _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndian32));
522}
a645023d
A
523
524
525
526
527template <typename A>
528unsigned int UnwindInfoAtom<A>::makeRegularSecondLevelPage(const std::vector<UnwindEntry>& uniqueInfos, uint32_t pageSize,
529 unsigned int endIndex, uint8_t*& pageEnd)
530{
531 const unsigned int maxEntriesPerPage = (pageSize - sizeof(unwind_info_regular_second_level_page_header))/sizeof(unwind_info_regular_second_level_entry);
532 const unsigned int entriesToAdd = ((endIndex > maxEntriesPerPage) ? maxEntriesPerPage : endIndex);
533 uint8_t* pageStart = pageEnd
534 - entriesToAdd*sizeof(unwind_info_regular_second_level_entry)
535 - sizeof(unwind_info_regular_second_level_page_header);
536 macho_unwind_info_regular_second_level_page_header<P>* page = (macho_unwind_info_regular_second_level_page_header<P>*)pageStart;
537 page->set_kind(UNWIND_SECOND_LEVEL_REGULAR);
538 page->set_entryPageOffset(sizeof(macho_unwind_info_regular_second_level_page_header<P>));
539 page->set_entryCount(entriesToAdd);
540 macho_unwind_info_regular_second_level_entry<P>* entryTable = (macho_unwind_info_regular_second_level_entry<P>*)(pageStart + page->entryPageOffset());
541 for (unsigned int i=0; i < entriesToAdd; ++i) {
542 const UnwindEntry& info = uniqueInfos[endIndex-entriesToAdd+i];
543 entryTable[i].set_functionOffset(0);
544 entryTable[i].set_encoding(info.encoding);
545 // add fixup for address part of entry
546 uint32_t offset = (uint8_t*)(&entryTable[i]) - _pagesForDelete;
547 this->addRegularAddressFixup(offset, info.func);
548 if ( encodingMeansUseDwarf(info.encoding) ) {
549 // add fixup for dwarf offset part of page specific encoding
550 uint32_t encOffset = (uint8_t*)(&entryTable[i]) - _pagesForDelete;
551 this->addRegularFDEOffsetFixup(encOffset, info.fde);
552 }
553 }
554 if (_s_log) fprintf(stderr, "regular page with %u entries\n", entriesToAdd);
555 pageEnd = pageStart;
556 return endIndex - entriesToAdd;
557}
558
559
560template <typename A>
561unsigned int UnwindInfoAtom<A>::makeCompressedSecondLevelPage(const std::vector<UnwindEntry>& uniqueInfos,
562 const std::map<compact_unwind_encoding_t,unsigned int> commonEncodings,
563 uint32_t pageSize, unsigned int endIndex, uint8_t*& pageEnd)
564{
565 if (_s_log) fprintf(stderr, "makeCompressedSecondLevelPage(pageSize=%u, endIndex=%u)\n", pageSize, endIndex);
566 // first pass calculates how many compressed entries we could fit in this sized page
567 // keep adding entries to page until:
568 // 1) encoding table plus entry table plus header exceed page size
569 // 2) the file offset delta from the first to last function > 24 bits
570 // 3) custom encoding index reachs 255
571 // 4) run out of uniqueInfos to encode
572 std::map<compact_unwind_encoding_t, unsigned int> pageSpecificEncodings;
573 uint32_t space4 = (pageSize - sizeof(unwind_info_compressed_second_level_page_header))/sizeof(uint32_t);
574 std::vector<uint8_t> encodingIndexes;
575 int index = endIndex-1;
576 int entryCount = 0;
577 uint64_t lastEntryAddress = uniqueInfos[index].funcTentAddress;
578 bool canDo = true;
579 while ( canDo && (index >= 0) ) {
580 const UnwindEntry& info = uniqueInfos[index--];
581 // compute encoding index
582 unsigned int encodingIndex;
583 std::map<compact_unwind_encoding_t, unsigned int>::const_iterator pos = commonEncodings.find(info.encoding);
584 if ( pos != commonEncodings.end() ) {
585 encodingIndex = pos->second;
586 }
587 else {
588 // no commmon entry, so add one on this page
589 uint32_t encoding = info.encoding;
590 if ( encodingMeansUseDwarf(encoding) ) {
591 // make unique pseudo encoding so this dwarf will gets is own encoding entry slot
592 encoding += (index+1);
593 }
594 std::map<compact_unwind_encoding_t, unsigned int>::iterator ppos = pageSpecificEncodings.find(encoding);
595 if ( ppos != pageSpecificEncodings.end() ) {
596 encodingIndex = pos->second;
597 }
598 else {
599 encodingIndex = commonEncodings.size() + pageSpecificEncodings.size();
600 if ( encodingIndex <= 255 ) {
601 pageSpecificEncodings[encoding] = encodingIndex;
f80fe69f 602 if (_s_log) fprintf(stderr, "makeCompressedSecondLevelPage(): pageSpecificEncodings[%d]=0x%08X\n", encodingIndex, encoding);
a645023d
A
603 }
604 else {
605 canDo = false; // case 3)
606 if (_s_log) fprintf(stderr, "end of compressed page with %u entries, %lu custom encodings because too many custom encodings\n",
607 entryCount, pageSpecificEncodings.size());
608 }
609 }
610 }
611 if ( canDo )
612 encodingIndexes.push_back(encodingIndex);
613 // compute function offset
614 uint32_t funcOffsetWithInPage = lastEntryAddress - info.funcTentAddress;
615 if ( funcOffsetWithInPage > 0x00FFFF00 ) {
616 // don't use 0x00FFFFFF because addresses may vary after atoms are laid out again
617 canDo = false; // case 2)
618 if (_s_log) fprintf(stderr, "can't use compressed page with %u entries because function offset too big\n", entryCount);
619 }
620 else {
621 ++entryCount;
622 }
623 // check room for entry
624 if ( (pageSpecificEncodings.size()+entryCount) >= space4 ) {
625 canDo = false; // case 1)
626 --entryCount;
627 if (_s_log) fprintf(stderr, "end of compressed page with %u entries because full\n", entryCount);
628 }
629 //if (_s_log) fprintf(stderr, "space4=%d, pageSpecificEncodings.size()=%ld, entryCount=%d\n", space4, pageSpecificEncodings.size(), entryCount);
630 }
631
632 // check for cases where it would be better to use a regular (non-compressed) page
633 const unsigned int compressPageUsed = sizeof(unwind_info_compressed_second_level_page_header)
634 + pageSpecificEncodings.size()*sizeof(uint32_t)
635 + entryCount*sizeof(uint32_t);
636 if ( (compressPageUsed < (pageSize-4) && (index >= 0) ) ) {
637 const int regularEntriesPerPage = (pageSize - sizeof(unwind_info_regular_second_level_page_header))/sizeof(unwind_info_regular_second_level_entry);
638 if ( entryCount < regularEntriesPerPage ) {
639 return makeRegularSecondLevelPage(uniqueInfos, pageSize, endIndex, pageEnd);
640 }
641 }
642
643 // check if we need any padding because adding another entry would take 8 bytes but only have room for 4
644 uint32_t pad = 0;
645 if ( compressPageUsed == (pageSize-4) )
646 pad = 4;
647
648 // second pass fills in page
649 uint8_t* pageStart = pageEnd - compressPageUsed - pad;
650 CSLP* page = (CSLP*)pageStart;
651 page->set_kind(UNWIND_SECOND_LEVEL_COMPRESSED);
652 page->set_entryPageOffset(sizeof(CSLP));
653 page->set_entryCount(entryCount);
654 page->set_encodingsPageOffset(page->entryPageOffset()+entryCount*sizeof(uint32_t));
655 page->set_encodingsCount(pageSpecificEncodings.size());
656 uint32_t* const encodingsArray = (uint32_t*)&pageStart[page->encodingsPageOffset()];
657 // fill in entry table
658 uint32_t* const entiresArray = (uint32_t*)&pageStart[page->entryPageOffset()];
659 const ld::Atom* firstFunc = uniqueInfos[endIndex-entryCount].func;
660 for(unsigned int i=endIndex-entryCount; i < endIndex; ++i) {
661 const UnwindEntry& info = uniqueInfos[i];
662 uint8_t encodingIndex;
663 if ( encodingMeansUseDwarf(info.encoding) ) {
664 // dwarf entries are always in page specific encodings
665 encodingIndex = pageSpecificEncodings[info.encoding+i];
666 }
667 else {
668 std::map<uint32_t, unsigned int>::const_iterator pos = commonEncodings.find(info.encoding);
669 if ( pos != commonEncodings.end() )
670 encodingIndex = pos->second;
671 else
672 encodingIndex = pageSpecificEncodings[info.encoding];
673 }
674 uint32_t entryIndex = i - endIndex + entryCount;
675 E::set32(entiresArray[entryIndex], encodingIndex << 24);
676 // add fixup for address part of entry
677 uint32_t offset = (uint8_t*)(&entiresArray[entryIndex]) - _pagesForDelete;
678 this->addCompressedAddressOffsetFixup(offset, info.func, firstFunc);
679 if ( encodingMeansUseDwarf(info.encoding) ) {
680 // add fixup for dwarf offset part of page specific encoding
681 uint32_t encOffset = (uint8_t*)(&encodingsArray[encodingIndex-commonEncodings.size()]) - _pagesForDelete;
682 this->addCompressedEncodingFixup(encOffset, info.fde);
683 }
684 }
685 // fill in encodings table
686 for(std::map<uint32_t, unsigned int>::const_iterator it = pageSpecificEncodings.begin(); it != pageSpecificEncodings.end(); ++it) {
687 E::set32(encodingsArray[it->second-commonEncodings.size()], it->first);
688 }
689
690 if (_s_log) fprintf(stderr, "compressed page with %u entries, %lu custom encodings\n", entryCount, pageSpecificEncodings.size());
691
692 // update pageEnd;
693 pageEnd = pageStart;
694 return endIndex-entryCount; // endIndex for next page
695}
696
697
698
699
700
701
702static uint64_t calculateEHFrameSize(const ld::Internal& state)
703{
704 uint64_t size = 0;
705 for (std::vector<ld::Internal::FinalSection*>::const_iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) {
706 ld::Internal::FinalSection* sect = *sit;
707 if ( sect->type() == ld::Section::typeCFI ) {
708 for (std::vector<const ld::Atom*>::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
709 size += (*ait)->size();
710 }
711 }
712 }
713 return size;
714}
715
716static void getAllUnwindInfos(const ld::Internal& state, std::vector<UnwindEntry>& entries)
717{
718 uint64_t address = 0;
719 for (std::vector<ld::Internal::FinalSection*>::const_iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) {
720 ld::Internal::FinalSection* sect = *sit;
721 for (std::vector<const ld::Atom*>::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
722 const ld::Atom* atom = *ait;
723 // adjust address for atom alignment
724 uint64_t alignment = 1 << atom->alignment().powerOf2;
725 uint64_t currentModulus = (address % alignment);
726 uint64_t requiredModulus = atom->alignment().modulus;
727 if ( currentModulus != requiredModulus ) {
728 if ( requiredModulus > currentModulus )
729 address += requiredModulus-currentModulus;
730 else
731 address += requiredModulus+alignment-currentModulus;
732 }
733
734 if ( atom->beginUnwind() == atom->endUnwind() ) {
735 // be sure to mark that we have no unwind info for stuff in the TEXT segment without unwind info
f80fe69f 736 if ( (atom->section().type() == ld::Section::typeCode) && (atom->size() !=0) ) {
a645023d
A
737 entries.push_back(UnwindEntry(atom, address, 0, NULL, NULL, NULL, 0));
738 }
739 }
740 else {
741 // atom has unwind info(s), add entry for each
742 const ld::Atom* fde = NULL;
743 const ld::Atom* lsda = NULL;
744 const ld::Atom* personalityPointer = NULL;
745 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
746 switch ( fit->kind ) {
747 case ld::Fixup::kindNoneGroupSubordinateFDE:
748 assert(fit->binding == ld::Fixup::bindingDirectlyBound);
749 fde = fit->u.target;
750 break;
751 case ld::Fixup::kindNoneGroupSubordinateLSDA:
752 assert(fit->binding == ld::Fixup::bindingDirectlyBound);
753 lsda = fit->u.target;
754 break;
afe874b1
A
755 case ld::Fixup::kindNoneGroupSubordinatePersonality:
756 assert(fit->binding == ld::Fixup::bindingDirectlyBound);
757 personalityPointer = fit->u.target;
758 assert(personalityPointer->section().type() == ld::Section::typeNonLazyPointer);
759 break;
a645023d
A
760 default:
761 break;
762 }
763 }
764 if ( fde != NULL ) {
765 // find CIE for this FDE
766 const ld::Atom* cie = NULL;
767 for (ld::Fixup::iterator fit = fde->fixupsBegin(), end=fde->fixupsEnd(); fit != end; ++fit) {
768 if ( fit->kind != ld::Fixup::kindSubtractTargetAddress )
769 continue;
770 if ( fit->binding != ld::Fixup::bindingDirectlyBound )
771 continue;
772 cie = fit->u.target;
773 // CIE is only direct subtracted target in FDE
774 assert(cie->section().type() == ld::Section::typeCFI);
775 break;
776 }
777 if ( cie != NULL ) {
778 // if CIE can have just one fixup - to the personality pointer
779 for (ld::Fixup::iterator fit = cie->fixupsBegin(), end=cie->fixupsEnd(); fit != end; ++fit) {
780 if ( fit->kind == ld::Fixup::kindSetTargetAddress ) {
781 switch ( fit->binding ) {
782 case ld::Fixup::bindingsIndirectlyBound:
783 personalityPointer = state.indirectBindingTable[fit->u.bindingIndex];
784 assert(personalityPointer->section().type() == ld::Section::typeNonLazyPointer);
785 break;
786 case ld::Fixup::bindingDirectlyBound:
787 personalityPointer = fit->u.target;
788 assert(personalityPointer->section().type() == ld::Section::typeNonLazyPointer);
789 break;
790 default:
791 break;
792 }
793 }
794 }
795 }
796 }
797 for ( ld::Atom::UnwindInfo::iterator uit = atom->beginUnwind(); uit != atom->endUnwind(); ++uit ) {
798 entries.push_back(UnwindEntry(atom, address, uit->startOffset, fde, lsda, personalityPointer, uit->unwindInfo));
799 }
800 }
801 address += atom->size();
802 }
803 }
804}
805
806
afe874b1 807static void makeFinalLinkedImageCompactUnwindSection(const Options& opts, ld::Internal& state)
a645023d 808{
a645023d
A
809 // walk every atom and gets its unwind info
810 std::vector<UnwindEntry> entries;
811 entries.reserve(64);
812 getAllUnwindInfos(state, entries);
813
814 // don't generate an __unwind_info section if there is no code in this linkage unit
815 if ( entries.size() == 0 )
816 return;
817
818 // calculate size of __eh_frame section, so __unwind_info can go before it and page align
819 uint64_t ehFrameSize = calculateEHFrameSize(state);
820
821 // create atom that contains the whole compact unwind table
822 switch ( opts.architecture() ) {
ebf6f434 823#if SUPPORT_ARCH_x86_64
a645023d
A
824 case CPU_TYPE_X86_64:
825 state.addAtom(*new UnwindInfoAtom<x86_64>(entries, ehFrameSize));
826 break;
ebf6f434
A
827#endif
828#if SUPPORT_ARCH_i386
a645023d
A
829 case CPU_TYPE_I386:
830 state.addAtom(*new UnwindInfoAtom<x86>(entries, ehFrameSize));
831 break;
f80fe69f
A
832#endif
833#if SUPPORT_ARCH_arm64
834 case CPU_TYPE_ARM64:
835 state.addAtom(*new UnwindInfoAtom<arm64>(entries, ehFrameSize));
836 break;
ebf6f434 837#endif
a645023d
A
838 default:
839 assert(0 && "no compact unwind for arch");
840 }
841}
842
843
afe874b1
A
844
845template <typename A>
846class CompactUnwindAtom : public ld::Atom {
847public:
848 CompactUnwindAtom(ld::Internal& state,const ld::Atom* funcAtom,
849 uint32_t startOffset, uint32_t len, uint32_t cui);
850 ~CompactUnwindAtom() {}
851
852 virtual const ld::File* file() const { return NULL; }
afe874b1
A
853 virtual const char* name() const { return "compact unwind info"; }
854 virtual uint64_t size() const { return sizeof(macho_compact_unwind_entry<P>); }
855 virtual uint64_t objectAddress() const { return 0; }
856 virtual void copyRawContent(uint8_t buffer[]) const;
857 virtual void setScope(Scope) { }
858 virtual ld::Fixup::iterator fixupsBegin() const { return (ld::Fixup*)&_fixups[0]; }
859 virtual ld::Fixup::iterator fixupsEnd() const { return (ld::Fixup*)&_fixups[_fixups.size()]; }
860
861private:
862 typedef typename A::P P;
863 typedef typename A::P::E E;
864 typedef typename A::P::uint_t pint_t;
865
866
867 const ld::Atom* _atom;
868 const uint32_t _startOffset;
869 const uint32_t _len;
870 const uint32_t _compactUnwindInfo;
871 std::vector<ld::Fixup> _fixups;
872
873 static ld::Fixup::Kind _s_pointerKind;
874 static ld::Fixup::Kind _s_pointerStoreKind;
875 static ld::Section _s_section;
876};
877
878
879template <typename A>
880ld::Section CompactUnwindAtom<A>::_s_section("__LD", "__compact_unwind", ld::Section::typeDebug);
881
882template <> ld::Fixup::Kind CompactUnwindAtom<x86>::_s_pointerKind = ld::Fixup::kindStoreLittleEndian32;
883template <> ld::Fixup::Kind CompactUnwindAtom<x86>::_s_pointerStoreKind = ld::Fixup::kindStoreTargetAddressLittleEndian32;
884template <> ld::Fixup::Kind CompactUnwindAtom<x86_64>::_s_pointerKind = ld::Fixup::kindStoreLittleEndian64;
885template <> ld::Fixup::Kind CompactUnwindAtom<x86_64>::_s_pointerStoreKind = ld::Fixup::kindStoreTargetAddressLittleEndian64;
f80fe69f
A
886#if SUPPORT_ARCH_arm64
887template <> ld::Fixup::Kind CompactUnwindAtom<arm64>::_s_pointerKind = ld::Fixup::kindStoreLittleEndian64;
888template <> ld::Fixup::Kind CompactUnwindAtom<arm64>::_s_pointerStoreKind = ld::Fixup::kindStoreTargetAddressLittleEndian64;
889#endif
afe874b1
A
890
891template <typename A>
892CompactUnwindAtom<A>::CompactUnwindAtom(ld::Internal& state,const ld::Atom* funcAtom, uint32_t startOffset,
893 uint32_t len, uint32_t cui)
894 : ld::Atom(_s_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
895 ld::Atom::scopeTranslationUnit, ld::Atom::typeUnclassified,
f80fe69f 896 symbolTableNotIn, false, false, false, ld::Atom::Alignment(log2(sizeof(pint_t)))),
afe874b1
A
897 _atom(funcAtom), _startOffset(startOffset), _len(len), _compactUnwindInfo(cui)
898{
899 _fixups.push_back(ld::Fixup(macho_compact_unwind_entry<P>::codeStartFieldOffset(), ld::Fixup::k1of3, ld::Fixup::kindSetTargetAddress, funcAtom));
900 _fixups.push_back(ld::Fixup(macho_compact_unwind_entry<P>::codeStartFieldOffset(), ld::Fixup::k2of3, ld::Fixup::kindAddAddend, _startOffset));
901 _fixups.push_back(ld::Fixup(macho_compact_unwind_entry<P>::codeStartFieldOffset(), ld::Fixup::k3of3, _s_pointerKind));
902 // see if atom has subordinate personality function or lsda
903 for (ld::Fixup::iterator fit = funcAtom->fixupsBegin(), end=funcAtom->fixupsEnd(); fit != end; ++fit) {
904 switch ( fit->kind ) {
905 case ld::Fixup::kindNoneGroupSubordinatePersonality:
906 assert(fit->binding == ld::Fixup::bindingsIndirectlyBound);
907 _fixups.push_back(ld::Fixup(macho_compact_unwind_entry<P>::personalityFieldOffset(), ld::Fixup::k1of1, _s_pointerStoreKind, state.indirectBindingTable[fit->u.bindingIndex]));
908 break;
909 case ld::Fixup::kindNoneGroupSubordinateLSDA:
910 assert(fit->binding == ld::Fixup::bindingDirectlyBound);
911 _fixups.push_back(ld::Fixup(macho_compact_unwind_entry<P>::lsdaFieldOffset(), ld::Fixup::k1of1, _s_pointerStoreKind, fit->u.target));
912 break;
913 default:
914 break;
915 }
916 }
917
918}
919
920template <typename A>
921void CompactUnwindAtom<A>::copyRawContent(uint8_t buffer[]) const
922{
923 macho_compact_unwind_entry<P>* buf = (macho_compact_unwind_entry<P>*)buffer;
924 buf->set_codeStart(0);
925 buf->set_codeLen(_len);
926 buf->set_compactUnwindInfo(_compactUnwindInfo);
927 buf->set_personality(0);
928 buf->set_lsda(0);
929}
930
931
932static void makeCompactUnwindAtom(const Options& opts, ld::Internal& state, const ld::Atom* atom,
933 uint32_t startOffset, uint32_t endOffset, uint32_t cui)
934{
935 switch ( opts.architecture() ) {
ebf6f434 936#if SUPPORT_ARCH_x86_64
afe874b1
A
937 case CPU_TYPE_X86_64:
938 state.addAtom(*new CompactUnwindAtom<x86_64>(state, atom, startOffset, endOffset-startOffset, cui));
939 break;
ebf6f434
A
940#endif
941#if SUPPORT_ARCH_i386
afe874b1
A
942 case CPU_TYPE_I386:
943 state.addAtom(*new CompactUnwindAtom<x86>(state, atom, startOffset, endOffset-startOffset, cui));
944 break;
f80fe69f
A
945#endif
946#if SUPPORT_ARCH_arm64
947 case CPU_TYPE_ARM64:
948 state.addAtom(*new CompactUnwindAtom<arm64>(state, atom, startOffset, endOffset-startOffset, cui));
949 break;
ebf6f434 950#endif
afe874b1
A
951 }
952}
953
954static void makeRelocateableCompactUnwindSection(const Options& opts, ld::Internal& state)
955{
956 // can't add CompactUnwindAtom atoms will iterating, so pre-scan
957 std::vector<const ld::Atom*> atomsWithUnwind;
958 for (std::vector<ld::Internal::FinalSection*>::const_iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) {
959 ld::Internal::FinalSection* sect = *sit;
960 for (std::vector<const ld::Atom*>::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
961 const ld::Atom* atom = *ait;
962 if ( atom->beginUnwind() != atom->endUnwind() )
963 atomsWithUnwind.push_back(atom);
964 }
965 }
966 // make one CompactUnwindAtom for each compact unwind range in each atom
967 for (std::vector<const ld::Atom*>::iterator it = atomsWithUnwind.begin(); it != atomsWithUnwind.end(); ++it) {
968 const ld::Atom* atom = *it;
969 uint32_t lastOffset = 0;
970 uint32_t lastCUE = 0;
971 bool first = true;
972 for (ld::Atom::UnwindInfo::iterator uit=atom->beginUnwind(); uit != atom->endUnwind(); ++uit) {
973 if ( !first ) {
974 makeCompactUnwindAtom(opts, state, atom, lastOffset, uit->startOffset, lastCUE);
975 }
976 lastOffset = uit->startOffset;
977 lastCUE = uit->unwindInfo;
978 first = false;
979 }
980 makeCompactUnwindAtom(opts, state, atom, lastOffset, (uint32_t)atom->size(), lastCUE);
981 }
982}
983
984
985void doPass(const Options& opts, ld::Internal& state)
986{
987 if ( opts.outputKind() == Options::kObjectFile )
988 makeRelocateableCompactUnwindSection(opts, state);
989
990 else if ( opts.needsUnwindInfoSection() )
991 makeFinalLinkedImageCompactUnwindSection(opts, state);
992}
993
994
a645023d
A
995} // namespace compact_unwind
996} // namespace passes
997} // namespace ld