]>
Commit | Line | Data |
---|---|---|
a645023d A |
1 | /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*- |
2 | * | |
3 | * Copyright (c) 2009 Apple Inc. All rights reserved. | |
4 | * | |
5 | * @APPLE_LICENSE_HEADER_START@ | |
6 | * | |
7 | * This file contains Original Code and/or Modifications of Original Code | |
8 | * as defined in and that are subject to the Apple Public Source License | |
9 | * Version 2.0 (the 'License'). You may not use this file except in | |
10 | * compliance with the License. Please obtain a copy of the License at | |
11 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
12 | * file. | |
13 | * | |
14 | * The Original Code and all software distributed under the License are | |
15 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
16 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
17 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
19 | * Please see the License for the specific language governing rights and | |
20 | * limitations under the License. | |
21 | * | |
22 | * @APPLE_LICENSE_HEADER_END@ | |
23 | */ | |
24 | ||
25 | ||
26 | #include <stdint.h> | |
27 | #include <math.h> | |
28 | #include <unistd.h> | |
29 | #include <dlfcn.h> | |
30 | #include <mach/machine.h> | |
31 | #include <mach-o/compact_unwind_encoding.h> | |
32 | ||
33 | #include <vector> | |
34 | #include <map> | |
35 | ||
36 | #include "ld.hpp" | |
37 | #include "compact_unwind.h" | |
38 | #include "Architectures.hpp" | |
39 | #include "MachOFileAbstraction.hpp" | |
40 | ||
41 | ||
42 | namespace ld { | |
43 | namespace passes { | |
44 | namespace compact_unwind { | |
45 | ||
46 | ||
47 | struct UnwindEntry { | |
48 | UnwindEntry(const ld::Atom* f, uint64_t a, uint32_t o, const ld::Atom* d, | |
49 | const ld::Atom* l, const ld::Atom* p, uint32_t en) | |
50 | : func(f), fde(d), lsda(l), personalityPointer(p), funcTentAddress(a), | |
51 | functionOffset(o), encoding(en) { } | |
52 | const ld::Atom* func; | |
53 | const ld::Atom* fde; | |
54 | const ld::Atom* lsda; | |
55 | const ld::Atom* personalityPointer; | |
56 | uint64_t funcTentAddress; | |
57 | uint32_t functionOffset; | |
58 | compact_unwind_encoding_t encoding; | |
59 | }; | |
60 | ||
61 | struct LSDAEntry { | |
62 | const ld::Atom* func; | |
63 | const ld::Atom* lsda; | |
64 | }; | |
65 | ||
66 | ||
67 | template <typename A> | |
68 | class UnwindInfoAtom : public ld::Atom { | |
69 | public: | |
70 | UnwindInfoAtom(const std::vector<UnwindEntry>& entries,uint64_t ehFrameSize); | |
71 | ~UnwindInfoAtom(); | |
72 | ||
73 | virtual const ld::File* file() const { return NULL; } | |
a645023d A |
74 | virtual const char* name() const { return "compact unwind info"; } |
75 | virtual uint64_t size() const { return _headerSize+_pagesSize; } | |
76 | virtual uint64_t objectAddress() const { return 0; } | |
77 | virtual void copyRawContent(uint8_t buffer[]) const; | |
78 | virtual void setScope(Scope) { } | |
79 | virtual ld::Fixup::iterator fixupsBegin() const { return (ld::Fixup*)&_fixups[0]; } | |
80 | virtual ld::Fixup::iterator fixupsEnd() const { return (ld::Fixup*)&_fixups[_fixups.size()]; } | |
81 | ||
82 | private: | |
83 | typedef typename A::P P; | |
84 | typedef typename A::P::E E; | |
85 | typedef typename A::P::uint_t pint_t; | |
86 | ||
87 | typedef macho_unwind_info_compressed_second_level_page_header<P> CSLP; | |
88 | ||
89 | bool encodingMeansUseDwarf(compact_unwind_encoding_t enc); | |
90 | void compressDuplicates(const std::vector<UnwindEntry>& entries, | |
91 | std::vector<UnwindEntry>& uniqueEntries); | |
92 | void makePersonalityIndexes(std::vector<UnwindEntry>& entries, | |
93 | std::map<const ld::Atom*, uint32_t>& personalityIndexMap); | |
94 | void findCommonEncoding(const std::vector<UnwindEntry>& entries, | |
95 | std::map<compact_unwind_encoding_t, unsigned int>& commonEncodings); | |
96 | void makeLsdaIndex(const std::vector<UnwindEntry>& entries, std::vector<LSDAEntry>& lsdaIndex, | |
97 | std::map<const ld::Atom*, uint32_t>& lsdaIndexOffsetMap); | |
98 | unsigned int makeCompressedSecondLevelPage(const std::vector<UnwindEntry>& uniqueInfos, | |
99 | const std::map<compact_unwind_encoding_t,unsigned int> commonEncodings, | |
100 | uint32_t pageSize, unsigned int endIndex, uint8_t*& pageEnd); | |
101 | unsigned int makeRegularSecondLevelPage(const std::vector<UnwindEntry>& uniqueInfos, uint32_t pageSize, | |
102 | unsigned int endIndex, uint8_t*& pageEnd); | |
103 | void addCompressedAddressOffsetFixup(uint32_t offset, const ld::Atom* func, const ld::Atom* fromFunc); | |
104 | void addCompressedEncodingFixup(uint32_t offset, const ld::Atom* fde); | |
105 | void addRegularAddressFixup(uint32_t offset, const ld::Atom* func); | |
106 | void addRegularFDEOffsetFixup(uint32_t offset, const ld::Atom* fde); | |
107 | void addImageOffsetFixup(uint32_t offset, const ld::Atom* targ); | |
108 | void addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend); | |
109 | ||
110 | uint8_t* _pagesForDelete; | |
599556ff | 111 | uint8_t* _pageAlignedPages; |
a645023d A |
112 | uint8_t* _pages; |
113 | uint64_t _pagesSize; | |
114 | uint8_t* _header; | |
115 | uint64_t _headerSize; | |
116 | std::vector<ld::Fixup> _fixups; | |
117 | ||
118 | static bool _s_log; | |
119 | static ld::Section _s_section; | |
120 | }; | |
121 | ||
122 | template <typename A> | |
123 | bool UnwindInfoAtom<A>::_s_log = false; | |
124 | ||
125 | template <typename A> | |
126 | ld::Section UnwindInfoAtom<A>::_s_section("__TEXT", "__unwind_info", ld::Section::typeUnwindInfo); | |
127 | ||
128 | ||
129 | template <typename A> | |
130 | UnwindInfoAtom<A>::UnwindInfoAtom(const std::vector<UnwindEntry>& entries, uint64_t ehFrameSize) | |
131 | : ld::Atom(_s_section, ld::Atom::definitionRegular, ld::Atom::combineNever, | |
132 | ld::Atom::scopeLinkageUnit, ld::Atom::typeUnclassified, | |
599556ff A |
133 | symbolTableNotIn, false, false, false, ld::Atom::Alignment(2)), |
134 | _pagesForDelete(NULL), _pageAlignedPages(NULL), _pages(NULL), _pagesSize(0), _header(NULL), _headerSize(0) | |
a645023d A |
135 | { |
136 | // build new compressed list by removing entries where next function has same encoding | |
137 | std::vector<UnwindEntry> uniqueEntries; | |
138 | compressDuplicates(entries, uniqueEntries); | |
139 | ||
140 | // reserve room so _fixups vector is not reallocated a bunch of times | |
141 | _fixups.reserve(uniqueEntries.size()*3); | |
142 | ||
143 | // build personality index, update encodings with personality index | |
144 | std::map<const ld::Atom*, uint32_t> personalityIndexMap; | |
145 | makePersonalityIndexes(uniqueEntries, personalityIndexMap); | |
146 | if ( personalityIndexMap.size() > 3 ) { | |
e667b16e | 147 | throw "too many personality routines for compact unwind to encode"; |
a645023d A |
148 | } |
149 | ||
150 | // put the most common encodings into the common table, but at most 127 of them | |
151 | std::map<compact_unwind_encoding_t, unsigned int> commonEncodings; | |
152 | findCommonEncoding(uniqueEntries, commonEncodings); | |
153 | ||
154 | // build lsda index | |
155 | std::map<const ld::Atom*, uint32_t> lsdaIndexOffsetMap; | |
156 | std::vector<LSDAEntry> lsdaIndex; | |
157 | makeLsdaIndex(uniqueEntries, lsdaIndex, lsdaIndexOffsetMap); | |
158 | ||
a645023d A |
159 | // calculate worst case size for all unwind info pages when allocating buffer |
160 | const unsigned int entriesPerRegularPage = (4096-sizeof(unwind_info_regular_second_level_page_header))/sizeof(unwind_info_regular_second_level_entry); | |
161 | assert(uniqueEntries.size() > 0); | |
9543cb2f | 162 | const unsigned int pageCount = ((uniqueEntries.size() - 1)/entriesPerRegularPage) + 2; |
599556ff | 163 | _pagesForDelete = (uint8_t*)calloc(pageCount+1,4096); |
a645023d A |
164 | if ( _pagesForDelete == NULL ) { |
165 | warning("could not allocate space for compact unwind info"); | |
166 | return; | |
167 | } | |
599556ff | 168 | _pageAlignedPages = (uint8_t*)((((uintptr_t)_pagesForDelete) + 4095) & -4096); |
a645023d A |
169 | |
170 | // make last second level page smaller so that all other second level pages can be page aligned | |
171 | uint32_t maxLastPageSize = 4096 - (ehFrameSize % 4096); | |
172 | uint32_t tailPad = 0; | |
173 | if ( maxLastPageSize < 128 ) { | |
174 | tailPad = maxLastPageSize; | |
175 | maxLastPageSize = 4096; | |
176 | } | |
177 | ||
178 | // fill in pages in reverse order | |
179 | const ld::Atom* secondLevelFirstFuncs[pageCount*3]; | |
180 | uint8_t* secondLevelPagesStarts[pageCount*3]; | |
181 | unsigned int endIndex = uniqueEntries.size(); | |
182 | unsigned int secondLevelPageCount = 0; | |
599556ff | 183 | uint8_t* pageEnd = &_pageAlignedPages[pageCount*4096]; |
a645023d A |
184 | uint32_t pageSize = maxLastPageSize; |
185 | while ( endIndex > 0 ) { | |
186 | endIndex = makeCompressedSecondLevelPage(uniqueEntries, commonEncodings, pageSize, endIndex, pageEnd); | |
187 | secondLevelPagesStarts[secondLevelPageCount] = pageEnd; | |
188 | secondLevelFirstFuncs[secondLevelPageCount] = uniqueEntries[endIndex].func; | |
189 | ++secondLevelPageCount; | |
9543cb2f A |
190 | // if this requires more than one page, align so that next starts on page boundary |
191 | if ( (pageSize != 4096) && (endIndex > 0) ) { | |
192 | pageEnd = (uint8_t*)((uintptr_t)(pageEnd) & -4096); | |
193 | pageSize = 4096; // last page can be odd size, make rest up to 4096 bytes in size | |
194 | } | |
a645023d A |
195 | } |
196 | _pages = pageEnd; | |
599556ff A |
197 | _pagesSize = &_pageAlignedPages[pageCount*4096] - pageEnd; |
198 | ||
a645023d A |
199 | // calculate section layout |
200 | const uint32_t commonEncodingsArraySectionOffset = sizeof(macho_unwind_info_section_header<P>); | |
201 | const uint32_t commonEncodingsArrayCount = commonEncodings.size(); | |
202 | const uint32_t commonEncodingsArraySize = commonEncodingsArrayCount * sizeof(compact_unwind_encoding_t); | |
203 | const uint32_t personalityArraySectionOffset = commonEncodingsArraySectionOffset + commonEncodingsArraySize; | |
204 | const uint32_t personalityArrayCount = personalityIndexMap.size(); | |
205 | const uint32_t personalityArraySize = personalityArrayCount * sizeof(uint32_t); | |
206 | const uint32_t indexSectionOffset = personalityArraySectionOffset + personalityArraySize; | |
207 | const uint32_t indexCount = secondLevelPageCount+1; | |
208 | const uint32_t indexSize = indexCount * sizeof(macho_unwind_info_section_header_index_entry<P>); | |
209 | const uint32_t lsdaIndexArraySectionOffset = indexSectionOffset + indexSize; | |
210 | const uint32_t lsdaIndexArrayCount = lsdaIndex.size(); | |
211 | const uint32_t lsdaIndexArraySize = lsdaIndexArrayCount * sizeof(macho_unwind_info_section_header_lsda_index_entry<P>); | |
212 | const uint32_t headerEndSectionOffset = lsdaIndexArraySectionOffset + lsdaIndexArraySize; | |
213 | ||
214 | // now that we know the size of the header, slide all existing fixups on the pages | |
599556ff | 215 | const int32_t fixupSlide = headerEndSectionOffset + (_pageAlignedPages - _pages); |
a645023d A |
216 | for(std::vector<ld::Fixup>::iterator it = _fixups.begin(); it != _fixups.end(); ++it) { |
217 | it->offsetInAtom += fixupSlide; | |
218 | } | |
219 | ||
220 | // allocate and fill in section header | |
221 | _headerSize = headerEndSectionOffset; | |
222 | _header = new uint8_t[_headerSize]; | |
223 | bzero(_header, _headerSize); | |
224 | macho_unwind_info_section_header<P>* sectionHeader = (macho_unwind_info_section_header<P>*)_header; | |
225 | sectionHeader->set_version(UNWIND_SECTION_VERSION); | |
226 | sectionHeader->set_commonEncodingsArraySectionOffset(commonEncodingsArraySectionOffset); | |
227 | sectionHeader->set_commonEncodingsArrayCount(commonEncodingsArrayCount); | |
228 | sectionHeader->set_personalityArraySectionOffset(personalityArraySectionOffset); | |
229 | sectionHeader->set_personalityArrayCount(personalityArrayCount); | |
230 | sectionHeader->set_indexSectionOffset(indexSectionOffset); | |
231 | sectionHeader->set_indexCount(indexCount); | |
232 | ||
233 | // copy common encodings | |
234 | uint32_t* commonEncodingsTable = (uint32_t*)&_header[commonEncodingsArraySectionOffset]; | |
235 | for (std::map<uint32_t, unsigned int>::iterator it=commonEncodings.begin(); it != commonEncodings.end(); ++it) | |
236 | E::set32(commonEncodingsTable[it->second], it->first); | |
237 | ||
238 | // make references for personality entries | |
239 | uint32_t* personalityArray = (uint32_t*)&_header[sectionHeader->personalityArraySectionOffset()]; | |
240 | for (std::map<const ld::Atom*, unsigned int>::iterator it=personalityIndexMap.begin(); it != personalityIndexMap.end(); ++it) { | |
241 | uint32_t offset = (uint8_t*)&personalityArray[it->second-1] - _header; | |
242 | this->addImageOffsetFixup(offset, it->first); | |
243 | } | |
244 | ||
245 | // build first level index and references | |
246 | macho_unwind_info_section_header_index_entry<P>* indexTable = (macho_unwind_info_section_header_index_entry<P>*)&_header[indexSectionOffset]; | |
247 | uint32_t refOffset; | |
248 | for (unsigned int i=0; i < secondLevelPageCount; ++i) { | |
249 | unsigned int reverseIndex = secondLevelPageCount - 1 - i; | |
250 | indexTable[i].set_functionOffset(0); | |
251 | indexTable[i].set_secondLevelPagesSectionOffset(secondLevelPagesStarts[reverseIndex]-_pages+headerEndSectionOffset); | |
252 | indexTable[i].set_lsdaIndexArraySectionOffset(lsdaIndexOffsetMap[secondLevelFirstFuncs[reverseIndex]]+lsdaIndexArraySectionOffset); | |
253 | refOffset = (uint8_t*)&indexTable[i] - _header; | |
254 | this->addImageOffsetFixup(refOffset, secondLevelFirstFuncs[reverseIndex]); | |
255 | } | |
256 | indexTable[secondLevelPageCount].set_functionOffset(0); | |
257 | indexTable[secondLevelPageCount].set_secondLevelPagesSectionOffset(0); | |
258 | indexTable[secondLevelPageCount].set_lsdaIndexArraySectionOffset(lsdaIndexArraySectionOffset+lsdaIndexArraySize); | |
259 | refOffset = (uint8_t*)&indexTable[secondLevelPageCount] - _header; | |
260 | this->addImageOffsetFixupPlusAddend(refOffset, entries.back().func, entries.back().func->size()+1); | |
261 | ||
262 | // build lsda references | |
263 | uint32_t lsdaEntrySectionOffset = lsdaIndexArraySectionOffset; | |
264 | for (std::vector<LSDAEntry>::iterator it = lsdaIndex.begin(); it != lsdaIndex.end(); ++it) { | |
265 | this->addImageOffsetFixup(lsdaEntrySectionOffset, it->func); | |
266 | this->addImageOffsetFixup(lsdaEntrySectionOffset+4, it->lsda); | |
267 | lsdaEntrySectionOffset += sizeof(unwind_info_section_header_lsda_index_entry); | |
268 | } | |
269 | ||
270 | } | |
271 | ||
272 | template <typename A> | |
273 | UnwindInfoAtom<A>::~UnwindInfoAtom() | |
274 | { | |
275 | free(_pagesForDelete); | |
276 | free(_header); | |
277 | } | |
278 | ||
279 | template <typename A> | |
280 | void UnwindInfoAtom<A>::copyRawContent(uint8_t buffer[]) const | |
281 | { | |
282 | // content is in two parts | |
283 | memcpy(buffer, _header, _headerSize); | |
284 | memcpy(&buffer[_headerSize], _pages, _pagesSize); | |
285 | } | |
286 | ||
287 | ||
288 | template <> | |
289 | bool UnwindInfoAtom<x86>::encodingMeansUseDwarf(compact_unwind_encoding_t enc) | |
290 | { | |
291 | return ((enc & UNWIND_X86_MODE_MASK) == UNWIND_X86_MODE_DWARF); | |
292 | } | |
293 | ||
294 | template <> | |
295 | bool UnwindInfoAtom<x86_64>::encodingMeansUseDwarf(compact_unwind_encoding_t enc) | |
296 | { | |
297 | return ((enc & UNWIND_X86_64_MODE_MASK) == UNWIND_X86_64_MODE_DWARF); | |
298 | } | |
299 | ||
f80fe69f A |
300 | template <> |
301 | bool UnwindInfoAtom<arm64>::encodingMeansUseDwarf(compact_unwind_encoding_t enc) | |
302 | { | |
303 | return ((enc & UNWIND_ARM64_MODE_MASK) == UNWIND_ARM64_MODE_DWARF); | |
304 | } | |
305 | ||
0a8dc3df | 306 | |
ba348e21 A |
307 | template <> |
308 | bool UnwindInfoAtom<arm>::encodingMeansUseDwarf(compact_unwind_encoding_t enc) | |
309 | { | |
310 | return ((enc & UNWIND_ARM_MODE_MASK) == UNWIND_ARM_MODE_DWARF); | |
311 | } | |
312 | ||
313 | ||
a645023d A |
314 | template <typename A> |
315 | void UnwindInfoAtom<A>::compressDuplicates(const std::vector<UnwindEntry>& entries, std::vector<UnwindEntry>& uniqueEntries) | |
316 | { | |
317 | // build new list removing entries where next function has same encoding | |
318 | uniqueEntries.reserve(entries.size()); | |
319 | UnwindEntry last(NULL, 0, 0, NULL, NULL, NULL, 0xFFFFFFFF); | |
320 | for(std::vector<UnwindEntry>::const_iterator it=entries.begin(); it != entries.end(); ++it) { | |
321 | const UnwindEntry& next = *it; | |
322 | bool newNeedsDwarf = encodingMeansUseDwarf(next.encoding); | |
323 | // remove entries which have same encoding and personalityPointer as last one | |
324 | if ( newNeedsDwarf || (next.encoding != last.encoding) || (next.personalityPointer != last.personalityPointer) | |
325 | || (next.lsda != NULL) || (last.lsda != NULL) ) { | |
326 | uniqueEntries.push_back(next); | |
327 | } | |
328 | last = next; | |
329 | } | |
330 | if (_s_log) fprintf(stderr, "compressDuplicates() entries.size()=%lu, uniqueEntries.size()=%lu\n", | |
331 | entries.size(), uniqueEntries.size()); | |
332 | } | |
333 | ||
334 | template <typename A> | |
335 | void UnwindInfoAtom<A>::makePersonalityIndexes(std::vector<UnwindEntry>& entries, std::map<const ld::Atom*, uint32_t>& personalityIndexMap) | |
336 | { | |
337 | for(std::vector<UnwindEntry>::iterator it=entries.begin(); it != entries.end(); ++it) { | |
338 | if ( it->personalityPointer != NULL ) { | |
339 | std::map<const ld::Atom*, uint32_t>::iterator pos = personalityIndexMap.find(it->personalityPointer); | |
340 | if ( pos == personalityIndexMap.end() ) { | |
341 | const uint32_t nextIndex = personalityIndexMap.size() + 1; | |
342 | personalityIndexMap[it->personalityPointer] = nextIndex; | |
343 | } | |
344 | uint32_t personalityIndex = personalityIndexMap[it->personalityPointer]; | |
345 | it->encoding |= (personalityIndex << (__builtin_ctz(UNWIND_PERSONALITY_MASK)) ); | |
346 | } | |
347 | } | |
348 | if (_s_log) fprintf(stderr, "makePersonalityIndexes() %lu personality routines used\n", personalityIndexMap.size()); | |
349 | } | |
350 | ||
351 | ||
352 | template <typename A> | |
353 | void UnwindInfoAtom<A>::findCommonEncoding(const std::vector<UnwindEntry>& entries, | |
354 | std::map<compact_unwind_encoding_t, unsigned int>& commonEncodings) | |
355 | { | |
356 | // scan infos to get frequency counts for each encoding | |
357 | std::map<compact_unwind_encoding_t, unsigned int> encodingsUsed; | |
358 | unsigned int mostCommonEncodingUsageCount = 0; | |
359 | for(std::vector<UnwindEntry>::const_iterator it=entries.begin(); it != entries.end(); ++it) { | |
360 | // never put dwarf into common table | |
361 | if ( encodingMeansUseDwarf(it->encoding) ) | |
362 | continue; | |
363 | std::map<compact_unwind_encoding_t, unsigned int>::iterator pos = encodingsUsed.find(it->encoding); | |
364 | if ( pos == encodingsUsed.end() ) { | |
365 | encodingsUsed[it->encoding] = 1; | |
366 | } | |
367 | else { | |
368 | encodingsUsed[it->encoding] += 1; | |
369 | if ( mostCommonEncodingUsageCount < encodingsUsed[it->encoding] ) | |
370 | mostCommonEncodingUsageCount = encodingsUsed[it->encoding]; | |
371 | } | |
372 | } | |
373 | // put the most common encodings into the common table, but at most 127 of them | |
374 | for(unsigned int usages=mostCommonEncodingUsageCount; usages > 1; --usages) { | |
375 | for (std::map<compact_unwind_encoding_t, unsigned int>::iterator euit=encodingsUsed.begin(); euit != encodingsUsed.end(); ++euit) { | |
376 | if ( euit->second == usages ) { | |
377 | unsigned int sz = commonEncodings.size(); | |
378 | if ( sz < 127 ) { | |
379 | commonEncodings[euit->first] = sz; | |
380 | } | |
381 | } | |
382 | } | |
383 | } | |
384 | if (_s_log) fprintf(stderr, "findCommonEncoding() %lu common encodings found\n", commonEncodings.size()); | |
385 | } | |
386 | ||
387 | ||
388 | template <typename A> | |
389 | void UnwindInfoAtom<A>::makeLsdaIndex(const std::vector<UnwindEntry>& entries, std::vector<LSDAEntry>& lsdaIndex, std::map<const ld::Atom*, uint32_t>& lsdaIndexOffsetMap) | |
390 | { | |
391 | for(std::vector<UnwindEntry>::const_iterator it=entries.begin(); it != entries.end(); ++it) { | |
392 | lsdaIndexOffsetMap[it->func] = lsdaIndex.size() * sizeof(unwind_info_section_header_lsda_index_entry); | |
393 | if ( it->lsda != NULL ) { | |
394 | LSDAEntry entry; | |
395 | entry.func = it->func; | |
396 | entry.lsda = it->lsda; | |
397 | lsdaIndex.push_back(entry); | |
398 | } | |
399 | } | |
400 | if (_s_log) fprintf(stderr, "makeLsdaIndex() %lu LSDAs found\n", lsdaIndex.size()); | |
401 | } | |
402 | ||
403 | ||
404 | template <> | |
405 | void UnwindInfoAtom<x86>::addCompressedAddressOffsetFixup(uint32_t offset, const ld::Atom* func, const ld::Atom* fromFunc) | |
406 | { | |
407 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetAddress, func)); | |
408 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindSubtractTargetAddress, fromFunc)); | |
409 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndianLow24of32)); | |
410 | } | |
411 | ||
412 | template <> | |
413 | void UnwindInfoAtom<x86_64>::addCompressedAddressOffsetFixup(uint32_t offset, const ld::Atom* func, const ld::Atom* fromFunc) | |
414 | { | |
415 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetAddress, func)); | |
416 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindSubtractTargetAddress, fromFunc)); | |
417 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndianLow24of32)); | |
418 | } | |
419 | ||
f80fe69f A |
420 | template <> |
421 | void UnwindInfoAtom<arm64>::addCompressedAddressOffsetFixup(uint32_t offset, const ld::Atom* func, const ld::Atom* fromFunc) | |
422 | { | |
423 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetAddress, func)); | |
424 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindSubtractTargetAddress, fromFunc)); | |
425 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndianLow24of32)); | |
426 | } | |
427 | ||
0a8dc3df | 428 | |
ba348e21 A |
429 | template <> |
430 | void UnwindInfoAtom<arm>::addCompressedAddressOffsetFixup(uint32_t offset, const ld::Atom* func, const ld::Atom* fromFunc) | |
431 | { | |
432 | if ( fromFunc->isThumb() ) { | |
433 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of4, ld::Fixup::kindSetTargetAddress, func)); | |
434 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of4, ld::Fixup::kindSubtractTargetAddress, fromFunc)); | |
435 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of4, ld::Fixup::kindSubtractAddend, 1)); | |
436 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k4of4, ld::Fixup::kindStoreLittleEndianLow24of32)); | |
437 | } | |
438 | else { | |
439 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetAddress, func)); | |
440 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindSubtractTargetAddress, fromFunc)); | |
441 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndianLow24of32)); | |
442 | } | |
443 | } | |
444 | ||
a645023d A |
445 | template <> |
446 | void UnwindInfoAtom<x86>::addCompressedEncodingFixup(uint32_t offset, const ld::Atom* fde) | |
447 | { | |
448 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde)); | |
449 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32)); | |
450 | } | |
451 | ||
452 | template <> | |
453 | void UnwindInfoAtom<x86_64>::addCompressedEncodingFixup(uint32_t offset, const ld::Atom* fde) | |
454 | { | |
455 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde)); | |
456 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32)); | |
457 | } | |
458 | ||
f80fe69f A |
459 | template <> |
460 | void UnwindInfoAtom<arm64>::addCompressedEncodingFixup(uint32_t offset, const ld::Atom* fde) | |
461 | { | |
462 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde)); | |
463 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32)); | |
464 | } | |
a645023d | 465 | |
0a8dc3df | 466 | |
ba348e21 A |
467 | template <> |
468 | void UnwindInfoAtom<arm>::addCompressedEncodingFixup(uint32_t offset, const ld::Atom* fde) | |
469 | { | |
470 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde)); | |
471 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32)); | |
472 | } | |
473 | ||
a645023d A |
474 | template <> |
475 | void UnwindInfoAtom<x86>::addRegularAddressFixup(uint32_t offset, const ld::Atom* func) | |
476 | { | |
477 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, func)); | |
478 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32)); | |
479 | } | |
480 | ||
481 | template <> | |
482 | void UnwindInfoAtom<x86_64>::addRegularAddressFixup(uint32_t offset, const ld::Atom* func) | |
483 | { | |
484 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, func)); | |
485 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32)); | |
486 | } | |
487 | ||
f80fe69f A |
488 | template <> |
489 | void UnwindInfoAtom<arm64>::addRegularAddressFixup(uint32_t offset, const ld::Atom* func) | |
490 | { | |
491 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, func)); | |
492 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32)); | |
493 | } | |
494 | ||
0a8dc3df | 495 | |
ba348e21 A |
496 | template <> |
497 | void UnwindInfoAtom<arm>::addRegularAddressFixup(uint32_t offset, const ld::Atom* func) | |
498 | { | |
499 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, func)); | |
500 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32)); | |
501 | } | |
502 | ||
a645023d A |
503 | template <> |
504 | void UnwindInfoAtom<x86>::addRegularFDEOffsetFixup(uint32_t offset, const ld::Atom* fde) | |
505 | { | |
506 | _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde)); | |
507 | _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32)); | |
508 | } | |
509 | ||
510 | template <> | |
511 | void UnwindInfoAtom<x86_64>::addRegularFDEOffsetFixup(uint32_t offset, const ld::Atom* fde) | |
512 | { | |
513 | _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde)); | |
514 | _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32)); | |
515 | } | |
516 | ||
f80fe69f A |
517 | template <> |
518 | void UnwindInfoAtom<arm64>::addRegularFDEOffsetFixup(uint32_t offset, const ld::Atom* fde) | |
519 | { | |
520 | _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde)); | |
521 | _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32)); | |
522 | } | |
523 | ||
0a8dc3df | 524 | |
ba348e21 A |
525 | template <> |
526 | void UnwindInfoAtom<arm>::addRegularFDEOffsetFixup(uint32_t offset, const ld::Atom* fde) | |
527 | { | |
528 | _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k1of2, ld::Fixup::kindSetTargetSectionOffset, fde)); | |
529 | _fixups.push_back(ld::Fixup(offset+4, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndianLow24of32)); | |
530 | } | |
531 | ||
a645023d A |
532 | template <> |
533 | void UnwindInfoAtom<x86>::addImageOffsetFixup(uint32_t offset, const ld::Atom* targ) | |
534 | { | |
535 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, targ)); | |
536 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32)); | |
537 | } | |
538 | ||
539 | template <> | |
540 | void UnwindInfoAtom<x86_64>::addImageOffsetFixup(uint32_t offset, const ld::Atom* targ) | |
541 | { | |
542 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, targ)); | |
543 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32)); | |
544 | } | |
545 | ||
f80fe69f A |
546 | template <> |
547 | void UnwindInfoAtom<arm64>::addImageOffsetFixup(uint32_t offset, const ld::Atom* targ) | |
548 | { | |
549 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, targ)); | |
550 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32)); | |
551 | } | |
552 | ||
0a8dc3df | 553 | |
ba348e21 A |
554 | template <> |
555 | void UnwindInfoAtom<arm>::addImageOffsetFixup(uint32_t offset, const ld::Atom* targ) | |
556 | { | |
557 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of2, ld::Fixup::kindSetTargetImageOffset, targ)); | |
558 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of2, ld::Fixup::kindStoreLittleEndian32)); | |
559 | } | |
560 | ||
a645023d A |
561 | template <> |
562 | void UnwindInfoAtom<x86>::addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend) | |
563 | { | |
564 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetImageOffset, targ)); | |
565 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindAddAddend, addend)); | |
566 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndian32)); | |
567 | } | |
568 | ||
569 | template <> | |
570 | void UnwindInfoAtom<x86_64>::addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend) | |
571 | { | |
572 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetImageOffset, targ)); | |
573 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindAddAddend, addend)); | |
574 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndian32)); | |
575 | } | |
576 | ||
f80fe69f A |
577 | template <> |
578 | void UnwindInfoAtom<arm64>::addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend) | |
579 | { | |
580 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetImageOffset, targ)); | |
581 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindAddAddend, addend)); | |
582 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndian32)); | |
583 | } | |
a645023d | 584 | |
0a8dc3df | 585 | |
ba348e21 A |
586 | template <> |
587 | void UnwindInfoAtom<arm>::addImageOffsetFixupPlusAddend(uint32_t offset, const ld::Atom* targ, uint32_t addend) | |
588 | { | |
589 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k1of3, ld::Fixup::kindSetTargetImageOffset, targ)); | |
590 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k2of3, ld::Fixup::kindAddAddend, addend)); | |
591 | _fixups.push_back(ld::Fixup(offset, ld::Fixup::k3of3, ld::Fixup::kindStoreLittleEndian32)); | |
592 | } | |
593 | ||
a645023d A |
594 | |
595 | ||
596 | ||
597 | template <typename A> | |
598 | unsigned int UnwindInfoAtom<A>::makeRegularSecondLevelPage(const std::vector<UnwindEntry>& uniqueInfos, uint32_t pageSize, | |
599 | unsigned int endIndex, uint8_t*& pageEnd) | |
600 | { | |
601 | const unsigned int maxEntriesPerPage = (pageSize - sizeof(unwind_info_regular_second_level_page_header))/sizeof(unwind_info_regular_second_level_entry); | |
602 | const unsigned int entriesToAdd = ((endIndex > maxEntriesPerPage) ? maxEntriesPerPage : endIndex); | |
603 | uint8_t* pageStart = pageEnd | |
604 | - entriesToAdd*sizeof(unwind_info_regular_second_level_entry) | |
605 | - sizeof(unwind_info_regular_second_level_page_header); | |
606 | macho_unwind_info_regular_second_level_page_header<P>* page = (macho_unwind_info_regular_second_level_page_header<P>*)pageStart; | |
607 | page->set_kind(UNWIND_SECOND_LEVEL_REGULAR); | |
608 | page->set_entryPageOffset(sizeof(macho_unwind_info_regular_second_level_page_header<P>)); | |
609 | page->set_entryCount(entriesToAdd); | |
610 | macho_unwind_info_regular_second_level_entry<P>* entryTable = (macho_unwind_info_regular_second_level_entry<P>*)(pageStart + page->entryPageOffset()); | |
611 | for (unsigned int i=0; i < entriesToAdd; ++i) { | |
612 | const UnwindEntry& info = uniqueInfos[endIndex-entriesToAdd+i]; | |
613 | entryTable[i].set_functionOffset(0); | |
614 | entryTable[i].set_encoding(info.encoding); | |
615 | // add fixup for address part of entry | |
599556ff | 616 | uint32_t offset = (uint8_t*)(&entryTable[i]) - _pageAlignedPages; |
a645023d A |
617 | this->addRegularAddressFixup(offset, info.func); |
618 | if ( encodingMeansUseDwarf(info.encoding) ) { | |
619 | // add fixup for dwarf offset part of page specific encoding | |
599556ff | 620 | uint32_t encOffset = (uint8_t*)(&entryTable[i]) - _pageAlignedPages; |
a645023d A |
621 | this->addRegularFDEOffsetFixup(encOffset, info.fde); |
622 | } | |
623 | } | |
624 | if (_s_log) fprintf(stderr, "regular page with %u entries\n", entriesToAdd); | |
625 | pageEnd = pageStart; | |
626 | return endIndex - entriesToAdd; | |
627 | } | |
628 | ||
629 | ||
630 | template <typename A> | |
631 | unsigned int UnwindInfoAtom<A>::makeCompressedSecondLevelPage(const std::vector<UnwindEntry>& uniqueInfos, | |
632 | const std::map<compact_unwind_encoding_t,unsigned int> commonEncodings, | |
633 | uint32_t pageSize, unsigned int endIndex, uint8_t*& pageEnd) | |
634 | { | |
635 | if (_s_log) fprintf(stderr, "makeCompressedSecondLevelPage(pageSize=%u, endIndex=%u)\n", pageSize, endIndex); | |
636 | // first pass calculates how many compressed entries we could fit in this sized page | |
637 | // keep adding entries to page until: | |
638 | // 1) encoding table plus entry table plus header exceed page size | |
639 | // 2) the file offset delta from the first to last function > 24 bits | |
ec29ba20 | 640 | // 3) custom encoding index reaches 255 |
a645023d A |
641 | // 4) run out of uniqueInfos to encode |
642 | std::map<compact_unwind_encoding_t, unsigned int> pageSpecificEncodings; | |
643 | uint32_t space4 = (pageSize - sizeof(unwind_info_compressed_second_level_page_header))/sizeof(uint32_t); | |
a645023d A |
644 | int index = endIndex-1; |
645 | int entryCount = 0; | |
646 | uint64_t lastEntryAddress = uniqueInfos[index].funcTentAddress; | |
647 | bool canDo = true; | |
648 | while ( canDo && (index >= 0) ) { | |
649 | const UnwindEntry& info = uniqueInfos[index--]; | |
650 | // compute encoding index | |
651 | unsigned int encodingIndex; | |
652 | std::map<compact_unwind_encoding_t, unsigned int>::const_iterator pos = commonEncodings.find(info.encoding); | |
653 | if ( pos != commonEncodings.end() ) { | |
654 | encodingIndex = pos->second; | |
ec29ba20 | 655 | if (_s_log) fprintf(stderr, "makeCompressedSecondLevelPage(): funcIndex=%d, re-use commonEncodings[%d]=0x%08X\n", index, encodingIndex, info.encoding); |
a645023d A |
656 | } |
657 | else { | |
658 | // no commmon entry, so add one on this page | |
659 | uint32_t encoding = info.encoding; | |
660 | if ( encodingMeansUseDwarf(encoding) ) { | |
661 | // make unique pseudo encoding so this dwarf will gets is own encoding entry slot | |
662 | encoding += (index+1); | |
663 | } | |
664 | std::map<compact_unwind_encoding_t, unsigned int>::iterator ppos = pageSpecificEncodings.find(encoding); | |
665 | if ( ppos != pageSpecificEncodings.end() ) { | |
666 | encodingIndex = pos->second; | |
ec29ba20 | 667 | if (_s_log) fprintf(stderr, "makeCompressedSecondLevelPage(): funcIndex=%d, re-use pageSpecificEncodings[%d]=0x%08X\n", index, encodingIndex, encoding); |
a645023d A |
668 | } |
669 | else { | |
670 | encodingIndex = commonEncodings.size() + pageSpecificEncodings.size(); | |
671 | if ( encodingIndex <= 255 ) { | |
672 | pageSpecificEncodings[encoding] = encodingIndex; | |
ec29ba20 | 673 | if (_s_log) fprintf(stderr, "makeCompressedSecondLevelPage(): funcIndex=%d, pageSpecificEncodings[%d]=0x%08X\n", index, encodingIndex, encoding); |
a645023d A |
674 | } |
675 | else { | |
676 | canDo = false; // case 3) | |
677 | if (_s_log) fprintf(stderr, "end of compressed page with %u entries, %lu custom encodings because too many custom encodings\n", | |
678 | entryCount, pageSpecificEncodings.size()); | |
679 | } | |
680 | } | |
681 | } | |
a645023d A |
682 | // compute function offset |
683 | uint32_t funcOffsetWithInPage = lastEntryAddress - info.funcTentAddress; | |
684 | if ( funcOffsetWithInPage > 0x00FFFF00 ) { | |
685 | // don't use 0x00FFFFFF because addresses may vary after atoms are laid out again | |
686 | canDo = false; // case 2) | |
687 | if (_s_log) fprintf(stderr, "can't use compressed page with %u entries because function offset too big\n", entryCount); | |
688 | } | |
a645023d | 689 | // check room for entry |
ec29ba20 | 690 | if ( (pageSpecificEncodings.size()+entryCount) > space4 ) { |
a645023d A |
691 | canDo = false; // case 1) |
692 | --entryCount; | |
693 | if (_s_log) fprintf(stderr, "end of compressed page with %u entries because full\n", entryCount); | |
694 | } | |
695 | //if (_s_log) fprintf(stderr, "space4=%d, pageSpecificEncodings.size()=%ld, entryCount=%d\n", space4, pageSpecificEncodings.size(), entryCount); | |
ec29ba20 A |
696 | if ( canDo ) { |
697 | ++entryCount; | |
698 | } | |
a645023d A |
699 | } |
700 | ||
701 | // check for cases where it would be better to use a regular (non-compressed) page | |
702 | const unsigned int compressPageUsed = sizeof(unwind_info_compressed_second_level_page_header) | |
703 | + pageSpecificEncodings.size()*sizeof(uint32_t) | |
704 | + entryCount*sizeof(uint32_t); | |
705 | if ( (compressPageUsed < (pageSize-4) && (index >= 0) ) ) { | |
706 | const int regularEntriesPerPage = (pageSize - sizeof(unwind_info_regular_second_level_page_header))/sizeof(unwind_info_regular_second_level_entry); | |
707 | if ( entryCount < regularEntriesPerPage ) { | |
708 | return makeRegularSecondLevelPage(uniqueInfos, pageSize, endIndex, pageEnd); | |
709 | } | |
710 | } | |
711 | ||
712 | // check if we need any padding because adding another entry would take 8 bytes but only have room for 4 | |
713 | uint32_t pad = 0; | |
714 | if ( compressPageUsed == (pageSize-4) ) | |
715 | pad = 4; | |
716 | ||
717 | // second pass fills in page | |
718 | uint8_t* pageStart = pageEnd - compressPageUsed - pad; | |
719 | CSLP* page = (CSLP*)pageStart; | |
720 | page->set_kind(UNWIND_SECOND_LEVEL_COMPRESSED); | |
721 | page->set_entryPageOffset(sizeof(CSLP)); | |
722 | page->set_entryCount(entryCount); | |
723 | page->set_encodingsPageOffset(page->entryPageOffset()+entryCount*sizeof(uint32_t)); | |
724 | page->set_encodingsCount(pageSpecificEncodings.size()); | |
725 | uint32_t* const encodingsArray = (uint32_t*)&pageStart[page->encodingsPageOffset()]; | |
726 | // fill in entry table | |
727 | uint32_t* const entiresArray = (uint32_t*)&pageStart[page->entryPageOffset()]; | |
728 | const ld::Atom* firstFunc = uniqueInfos[endIndex-entryCount].func; | |
729 | for(unsigned int i=endIndex-entryCount; i < endIndex; ++i) { | |
730 | const UnwindEntry& info = uniqueInfos[i]; | |
731 | uint8_t encodingIndex; | |
732 | if ( encodingMeansUseDwarf(info.encoding) ) { | |
733 | // dwarf entries are always in page specific encodings | |
ec29ba20 | 734 | assert(pageSpecificEncodings.find(info.encoding+i) != pageSpecificEncodings.end()); |
a645023d A |
735 | encodingIndex = pageSpecificEncodings[info.encoding+i]; |
736 | } | |
737 | else { | |
738 | std::map<uint32_t, unsigned int>::const_iterator pos = commonEncodings.find(info.encoding); | |
739 | if ( pos != commonEncodings.end() ) | |
740 | encodingIndex = pos->second; | |
741 | else | |
742 | encodingIndex = pageSpecificEncodings[info.encoding]; | |
743 | } | |
744 | uint32_t entryIndex = i - endIndex + entryCount; | |
745 | E::set32(entiresArray[entryIndex], encodingIndex << 24); | |
746 | // add fixup for address part of entry | |
599556ff | 747 | uint32_t offset = (uint8_t*)(&entiresArray[entryIndex]) - _pageAlignedPages; |
a645023d A |
748 | this->addCompressedAddressOffsetFixup(offset, info.func, firstFunc); |
749 | if ( encodingMeansUseDwarf(info.encoding) ) { | |
750 | // add fixup for dwarf offset part of page specific encoding | |
599556ff | 751 | uint32_t encOffset = (uint8_t*)(&encodingsArray[encodingIndex-commonEncodings.size()]) - _pageAlignedPages; |
a645023d A |
752 | this->addCompressedEncodingFixup(encOffset, info.fde); |
753 | } | |
754 | } | |
755 | // fill in encodings table | |
756 | for(std::map<uint32_t, unsigned int>::const_iterator it = pageSpecificEncodings.begin(); it != pageSpecificEncodings.end(); ++it) { | |
757 | E::set32(encodingsArray[it->second-commonEncodings.size()], it->first); | |
758 | } | |
759 | ||
760 | if (_s_log) fprintf(stderr, "compressed page with %u entries, %lu custom encodings\n", entryCount, pageSpecificEncodings.size()); | |
761 | ||
762 | // update pageEnd; | |
763 | pageEnd = pageStart; | |
764 | return endIndex-entryCount; // endIndex for next page | |
765 | } | |
766 | ||
767 | ||
768 | ||
769 | ||
ec29ba20 | 770 | static uint64_t calculateEHFrameSize(ld::Internal& state) |
a645023d | 771 | { |
ec29ba20 | 772 | bool allCIEs = true; |
a645023d | 773 | uint64_t size = 0; |
ec29ba20 | 774 | for (ld::Internal::FinalSection* sect : state.sections) { |
a645023d | 775 | if ( sect->type() == ld::Section::typeCFI ) { |
ec29ba20 A |
776 | for (const ld::Atom* atom : sect->atoms) { |
777 | size += atom->size(); | |
778 | if ( strcmp(atom->name(), "CIE") != 0 ) | |
779 | allCIEs = false; | |
780 | } | |
781 | if ( allCIEs ) { | |
782 | // <rdar://problem/21427393> Linker generates eh_frame data even when there's only an unused CIEs in it | |
783 | sect->atoms.clear(); | |
784 | state.sections.erase(std::remove(state.sections.begin(), state.sections.end(), sect), state.sections.end()); | |
785 | return 0; | |
a645023d A |
786 | } |
787 | } | |
788 | } | |
789 | return size; | |
790 | } | |
791 | ||
792 | static void getAllUnwindInfos(const ld::Internal& state, std::vector<UnwindEntry>& entries) | |
793 | { | |
794 | uint64_t address = 0; | |
795 | for (std::vector<ld::Internal::FinalSection*>::const_iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) { | |
796 | ld::Internal::FinalSection* sect = *sit; | |
797 | for (std::vector<const ld::Atom*>::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) { | |
798 | const ld::Atom* atom = *ait; | |
799 | // adjust address for atom alignment | |
800 | uint64_t alignment = 1 << atom->alignment().powerOf2; | |
801 | uint64_t currentModulus = (address % alignment); | |
802 | uint64_t requiredModulus = atom->alignment().modulus; | |
803 | if ( currentModulus != requiredModulus ) { | |
804 | if ( requiredModulus > currentModulus ) | |
805 | address += requiredModulus-currentModulus; | |
806 | else | |
807 | address += requiredModulus+alignment-currentModulus; | |
808 | } | |
809 | ||
810 | if ( atom->beginUnwind() == atom->endUnwind() ) { | |
811 | // be sure to mark that we have no unwind info for stuff in the TEXT segment without unwind info | |
f80fe69f | 812 | if ( (atom->section().type() == ld::Section::typeCode) && (atom->size() !=0) ) { |
a645023d A |
813 | entries.push_back(UnwindEntry(atom, address, 0, NULL, NULL, NULL, 0)); |
814 | } | |
815 | } | |
816 | else { | |
817 | // atom has unwind info(s), add entry for each | |
818 | const ld::Atom* fde = NULL; | |
819 | const ld::Atom* lsda = NULL; | |
820 | const ld::Atom* personalityPointer = NULL; | |
821 | for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) { | |
822 | switch ( fit->kind ) { | |
823 | case ld::Fixup::kindNoneGroupSubordinateFDE: | |
824 | assert(fit->binding == ld::Fixup::bindingDirectlyBound); | |
825 | fde = fit->u.target; | |
826 | break; | |
827 | case ld::Fixup::kindNoneGroupSubordinateLSDA: | |
828 | assert(fit->binding == ld::Fixup::bindingDirectlyBound); | |
829 | lsda = fit->u.target; | |
830 | break; | |
afe874b1 A |
831 | case ld::Fixup::kindNoneGroupSubordinatePersonality: |
832 | assert(fit->binding == ld::Fixup::bindingDirectlyBound); | |
833 | personalityPointer = fit->u.target; | |
834 | assert(personalityPointer->section().type() == ld::Section::typeNonLazyPointer); | |
835 | break; | |
a645023d A |
836 | default: |
837 | break; | |
838 | } | |
839 | } | |
840 | if ( fde != NULL ) { | |
841 | // find CIE for this FDE | |
842 | const ld::Atom* cie = NULL; | |
843 | for (ld::Fixup::iterator fit = fde->fixupsBegin(), end=fde->fixupsEnd(); fit != end; ++fit) { | |
844 | if ( fit->kind != ld::Fixup::kindSubtractTargetAddress ) | |
845 | continue; | |
846 | if ( fit->binding != ld::Fixup::bindingDirectlyBound ) | |
847 | continue; | |
848 | cie = fit->u.target; | |
849 | // CIE is only direct subtracted target in FDE | |
850 | assert(cie->section().type() == ld::Section::typeCFI); | |
851 | break; | |
852 | } | |
853 | if ( cie != NULL ) { | |
854 | // if CIE can have just one fixup - to the personality pointer | |
855 | for (ld::Fixup::iterator fit = cie->fixupsBegin(), end=cie->fixupsEnd(); fit != end; ++fit) { | |
856 | if ( fit->kind == ld::Fixup::kindSetTargetAddress ) { | |
857 | switch ( fit->binding ) { | |
858 | case ld::Fixup::bindingsIndirectlyBound: | |
859 | personalityPointer = state.indirectBindingTable[fit->u.bindingIndex]; | |
860 | assert(personalityPointer->section().type() == ld::Section::typeNonLazyPointer); | |
861 | break; | |
862 | case ld::Fixup::bindingDirectlyBound: | |
863 | personalityPointer = fit->u.target; | |
864 | assert(personalityPointer->section().type() == ld::Section::typeNonLazyPointer); | |
865 | break; | |
866 | default: | |
867 | break; | |
868 | } | |
869 | } | |
870 | } | |
871 | } | |
872 | } | |
873 | for ( ld::Atom::UnwindInfo::iterator uit = atom->beginUnwind(); uit != atom->endUnwind(); ++uit ) { | |
874 | entries.push_back(UnwindEntry(atom, address, uit->startOffset, fde, lsda, personalityPointer, uit->unwindInfo)); | |
875 | } | |
876 | } | |
877 | address += atom->size(); | |
878 | } | |
879 | } | |
880 | } | |
881 | ||
882 | ||
afe874b1 | 883 | static void makeFinalLinkedImageCompactUnwindSection(const Options& opts, ld::Internal& state) |
a645023d | 884 | { |
a645023d A |
885 | // walk every atom and gets its unwind info |
886 | std::vector<UnwindEntry> entries; | |
887 | entries.reserve(64); | |
888 | getAllUnwindInfos(state, entries); | |
889 | ||
890 | // don't generate an __unwind_info section if there is no code in this linkage unit | |
891 | if ( entries.size() == 0 ) | |
892 | return; | |
893 | ||
894 | // calculate size of __eh_frame section, so __unwind_info can go before it and page align | |
895 | uint64_t ehFrameSize = calculateEHFrameSize(state); | |
896 | ||
897 | // create atom that contains the whole compact unwind table | |
898 | switch ( opts.architecture() ) { | |
ebf6f434 | 899 | #if SUPPORT_ARCH_x86_64 |
a645023d A |
900 | case CPU_TYPE_X86_64: |
901 | state.addAtom(*new UnwindInfoAtom<x86_64>(entries, ehFrameSize)); | |
902 | break; | |
ebf6f434 A |
903 | #endif |
904 | #if SUPPORT_ARCH_i386 | |
a645023d A |
905 | case CPU_TYPE_I386: |
906 | state.addAtom(*new UnwindInfoAtom<x86>(entries, ehFrameSize)); | |
907 | break; | |
f80fe69f A |
908 | #endif |
909 | #if SUPPORT_ARCH_arm64 | |
910 | case CPU_TYPE_ARM64: | |
911 | state.addAtom(*new UnwindInfoAtom<arm64>(entries, ehFrameSize)); | |
912 | break; | |
ba348e21 A |
913 | #endif |
914 | #if SUPPORT_ARCH_arm_any | |
915 | case CPU_TYPE_ARM: | |
916 | if ( opts.armUsesZeroCostExceptions() ) | |
917 | state.addAtom(*new UnwindInfoAtom<arm>(entries, ehFrameSize)); | |
918 | break; | |
ebf6f434 | 919 | #endif |
a645023d A |
920 | default: |
921 | assert(0 && "no compact unwind for arch"); | |
922 | } | |
923 | } | |
924 | ||
925 | ||
afe874b1 A |
926 | |
927 | template <typename A> | |
928 | class CompactUnwindAtom : public ld::Atom { | |
929 | public: | |
930 | CompactUnwindAtom(ld::Internal& state,const ld::Atom* funcAtom, | |
931 | uint32_t startOffset, uint32_t len, uint32_t cui); | |
932 | ~CompactUnwindAtom() {} | |
933 | ||
934 | virtual const ld::File* file() const { return NULL; } | |
afe874b1 A |
935 | virtual const char* name() const { return "compact unwind info"; } |
936 | virtual uint64_t size() const { return sizeof(macho_compact_unwind_entry<P>); } | |
937 | virtual uint64_t objectAddress() const { return 0; } | |
938 | virtual void copyRawContent(uint8_t buffer[]) const; | |
939 | virtual void setScope(Scope) { } | |
940 | virtual ld::Fixup::iterator fixupsBegin() const { return (ld::Fixup*)&_fixups[0]; } | |
941 | virtual ld::Fixup::iterator fixupsEnd() const { return (ld::Fixup*)&_fixups[_fixups.size()]; } | |
942 | ||
943 | private: | |
944 | typedef typename A::P P; | |
945 | typedef typename A::P::E E; | |
946 | typedef typename A::P::uint_t pint_t; | |
947 | ||
948 | ||
949 | const ld::Atom* _atom; | |
950 | const uint32_t _startOffset; | |
951 | const uint32_t _len; | |
952 | const uint32_t _compactUnwindInfo; | |
953 | std::vector<ld::Fixup> _fixups; | |
954 | ||
955 | static ld::Fixup::Kind _s_pointerKind; | |
956 | static ld::Fixup::Kind _s_pointerStoreKind; | |
957 | static ld::Section _s_section; | |
958 | }; | |
959 | ||
960 | ||
961 | template <typename A> | |
962 | ld::Section CompactUnwindAtom<A>::_s_section("__LD", "__compact_unwind", ld::Section::typeDebug); | |
963 | ||
964 | template <> ld::Fixup::Kind CompactUnwindAtom<x86>::_s_pointerKind = ld::Fixup::kindStoreLittleEndian32; | |
965 | template <> ld::Fixup::Kind CompactUnwindAtom<x86>::_s_pointerStoreKind = ld::Fixup::kindStoreTargetAddressLittleEndian32; | |
966 | template <> ld::Fixup::Kind CompactUnwindAtom<x86_64>::_s_pointerKind = ld::Fixup::kindStoreLittleEndian64; | |
967 | template <> ld::Fixup::Kind CompactUnwindAtom<x86_64>::_s_pointerStoreKind = ld::Fixup::kindStoreTargetAddressLittleEndian64; | |
f80fe69f A |
968 | #if SUPPORT_ARCH_arm64 |
969 | template <> ld::Fixup::Kind CompactUnwindAtom<arm64>::_s_pointerKind = ld::Fixup::kindStoreLittleEndian64; | |
970 | template <> ld::Fixup::Kind CompactUnwindAtom<arm64>::_s_pointerStoreKind = ld::Fixup::kindStoreTargetAddressLittleEndian64; | |
971 | #endif | |
ba348e21 A |
972 | template <> ld::Fixup::Kind CompactUnwindAtom<arm>::_s_pointerKind = ld::Fixup::kindStoreLittleEndian32; |
973 | template <> ld::Fixup::Kind CompactUnwindAtom<arm>::_s_pointerStoreKind = ld::Fixup::kindStoreTargetAddressLittleEndian32; | |
afe874b1 A |
974 | |
975 | template <typename A> | |
976 | CompactUnwindAtom<A>::CompactUnwindAtom(ld::Internal& state,const ld::Atom* funcAtom, uint32_t startOffset, | |
977 | uint32_t len, uint32_t cui) | |
978 | : ld::Atom(_s_section, ld::Atom::definitionRegular, ld::Atom::combineNever, | |
979 | ld::Atom::scopeTranslationUnit, ld::Atom::typeUnclassified, | |
f80fe69f | 980 | symbolTableNotIn, false, false, false, ld::Atom::Alignment(log2(sizeof(pint_t)))), |
afe874b1 A |
981 | _atom(funcAtom), _startOffset(startOffset), _len(len), _compactUnwindInfo(cui) |
982 | { | |
983 | _fixups.push_back(ld::Fixup(macho_compact_unwind_entry<P>::codeStartFieldOffset(), ld::Fixup::k1of3, ld::Fixup::kindSetTargetAddress, funcAtom)); | |
984 | _fixups.push_back(ld::Fixup(macho_compact_unwind_entry<P>::codeStartFieldOffset(), ld::Fixup::k2of3, ld::Fixup::kindAddAddend, _startOffset)); | |
985 | _fixups.push_back(ld::Fixup(macho_compact_unwind_entry<P>::codeStartFieldOffset(), ld::Fixup::k3of3, _s_pointerKind)); | |
986 | // see if atom has subordinate personality function or lsda | |
987 | for (ld::Fixup::iterator fit = funcAtom->fixupsBegin(), end=funcAtom->fixupsEnd(); fit != end; ++fit) { | |
988 | switch ( fit->kind ) { | |
989 | case ld::Fixup::kindNoneGroupSubordinatePersonality: | |
990 | assert(fit->binding == ld::Fixup::bindingsIndirectlyBound); | |
991 | _fixups.push_back(ld::Fixup(macho_compact_unwind_entry<P>::personalityFieldOffset(), ld::Fixup::k1of1, _s_pointerStoreKind, state.indirectBindingTable[fit->u.bindingIndex])); | |
992 | break; | |
993 | case ld::Fixup::kindNoneGroupSubordinateLSDA: | |
994 | assert(fit->binding == ld::Fixup::bindingDirectlyBound); | |
995 | _fixups.push_back(ld::Fixup(macho_compact_unwind_entry<P>::lsdaFieldOffset(), ld::Fixup::k1of1, _s_pointerStoreKind, fit->u.target)); | |
996 | break; | |
997 | default: | |
998 | break; | |
999 | } | |
1000 | } | |
1001 | ||
1002 | } | |
1003 | ||
1004 | template <typename A> | |
1005 | void CompactUnwindAtom<A>::copyRawContent(uint8_t buffer[]) const | |
1006 | { | |
1007 | macho_compact_unwind_entry<P>* buf = (macho_compact_unwind_entry<P>*)buffer; | |
1008 | buf->set_codeStart(0); | |
1009 | buf->set_codeLen(_len); | |
1010 | buf->set_compactUnwindInfo(_compactUnwindInfo); | |
1011 | buf->set_personality(0); | |
1012 | buf->set_lsda(0); | |
1013 | } | |
1014 | ||
1015 | ||
1016 | static void makeCompactUnwindAtom(const Options& opts, ld::Internal& state, const ld::Atom* atom, | |
1017 | uint32_t startOffset, uint32_t endOffset, uint32_t cui) | |
1018 | { | |
1019 | switch ( opts.architecture() ) { | |
ebf6f434 | 1020 | #if SUPPORT_ARCH_x86_64 |
afe874b1 A |
1021 | case CPU_TYPE_X86_64: |
1022 | state.addAtom(*new CompactUnwindAtom<x86_64>(state, atom, startOffset, endOffset-startOffset, cui)); | |
1023 | break; | |
ebf6f434 A |
1024 | #endif |
1025 | #if SUPPORT_ARCH_i386 | |
afe874b1 A |
1026 | case CPU_TYPE_I386: |
1027 | state.addAtom(*new CompactUnwindAtom<x86>(state, atom, startOffset, endOffset-startOffset, cui)); | |
1028 | break; | |
f80fe69f A |
1029 | #endif |
1030 | #if SUPPORT_ARCH_arm64 | |
1031 | case CPU_TYPE_ARM64: | |
1032 | state.addAtom(*new CompactUnwindAtom<arm64>(state, atom, startOffset, endOffset-startOffset, cui)); | |
1033 | break; | |
ebf6f434 | 1034 | #endif |
ba348e21 A |
1035 | case CPU_TYPE_ARM: |
1036 | state.addAtom(*new CompactUnwindAtom<arm>(state, atom, startOffset, endOffset-startOffset, cui)); | |
1037 | break; | |
afe874b1 A |
1038 | } |
1039 | } | |
1040 | ||
1041 | static void makeRelocateableCompactUnwindSection(const Options& opts, ld::Internal& state) | |
1042 | { | |
1043 | // can't add CompactUnwindAtom atoms will iterating, so pre-scan | |
1044 | std::vector<const ld::Atom*> atomsWithUnwind; | |
1045 | for (std::vector<ld::Internal::FinalSection*>::const_iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) { | |
1046 | ld::Internal::FinalSection* sect = *sit; | |
1047 | for (std::vector<const ld::Atom*>::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) { | |
1048 | const ld::Atom* atom = *ait; | |
1049 | if ( atom->beginUnwind() != atom->endUnwind() ) | |
1050 | atomsWithUnwind.push_back(atom); | |
1051 | } | |
1052 | } | |
1053 | // make one CompactUnwindAtom for each compact unwind range in each atom | |
1054 | for (std::vector<const ld::Atom*>::iterator it = atomsWithUnwind.begin(); it != atomsWithUnwind.end(); ++it) { | |
1055 | const ld::Atom* atom = *it; | |
1056 | uint32_t lastOffset = 0; | |
1057 | uint32_t lastCUE = 0; | |
1058 | bool first = true; | |
1059 | for (ld::Atom::UnwindInfo::iterator uit=atom->beginUnwind(); uit != atom->endUnwind(); ++uit) { | |
1060 | if ( !first ) { | |
1061 | makeCompactUnwindAtom(opts, state, atom, lastOffset, uit->startOffset, lastCUE); | |
1062 | } | |
1063 | lastOffset = uit->startOffset; | |
1064 | lastCUE = uit->unwindInfo; | |
1065 | first = false; | |
1066 | } | |
1067 | makeCompactUnwindAtom(opts, state, atom, lastOffset, (uint32_t)atom->size(), lastCUE); | |
1068 | } | |
1069 | } | |
1070 | ||
1071 | ||
1072 | void doPass(const Options& opts, ld::Internal& state) | |
1073 | { | |
1074 | if ( opts.outputKind() == Options::kObjectFile ) | |
1075 | makeRelocateableCompactUnwindSection(opts, state); | |
1076 | ||
1077 | else if ( opts.needsUnwindInfoSection() ) | |
1078 | makeFinalLinkedImageCompactUnwindSection(opts, state); | |
1079 | } | |
1080 | ||
1081 | ||
a645023d A |
1082 | } // namespace compact_unwind |
1083 | } // namespace passes | |
1084 | } // namespace ld |