]> git.saurik.com Git - apple/ld64.git/blob - src/ld/passes/branch_island.cpp
ld64-242.tar.gz
[apple/ld64.git] / src / ld / passes / branch_island.cpp
1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
2 *
3 * Copyright (c) 2009 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26 #include <stdint.h>
27 #include <math.h>
28 #include <unistd.h>
29 #include <dlfcn.h>
30 #include <libkern/OSByteOrder.h>
31
32 #include <vector>
33 #include <map>
34
35 #include "MachOFileAbstraction.hpp"
36 #include "ld.hpp"
37 #include "branch_island.h"
38
39 namespace ld {
40 namespace passes {
41 namespace branch_island {
42
43
44 static std::map<const Atom*, uint64_t> sAtomToAddress;
45
46
47 struct TargetAndOffset { const ld::Atom* atom; uint32_t offset; };
48 class TargetAndOffsetComparor
49 {
50 public:
51 bool operator()(const TargetAndOffset& left, const TargetAndOffset& right) const
52 {
53 if ( left.atom != right.atom )
54 return ( left.atom < right.atom );
55 return ( left.offset < right.offset );
56 }
57 };
58
59
60 static bool _s_log = false;
61 static ld::Section _s_text_section("__TEXT", "__text", ld::Section::typeCode);
62
63
64
65 class ARMtoARMBranchIslandAtom : public ld::Atom {
66 public:
67 ARMtoARMBranchIslandAtom(const char* nm, const ld::Atom* target, TargetAndOffset finalTarget)
68 : ld::Atom(_s_text_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
69 ld::Atom::scopeLinkageUnit, ld::Atom::typeBranchIsland,
70 ld::Atom::symbolTableIn, false, false, false, ld::Atom::Alignment(2)),
71 _name(nm),
72 _fixup1(0, ld::Fixup::k1of1, ld::Fixup::kindStoreTargetAddressARMBranch24, target),
73 _fixup2(0, ld::Fixup::k1of1, ld::Fixup::kindIslandTarget, finalTarget.atom) {
74 if (_s_log) fprintf(stderr, "%p: ARM-to-ARM branch island to final target %s\n",
75 this, finalTarget.atom->name());
76 }
77
78 virtual const ld::File* file() const { return NULL; }
79 virtual const char* name() const { return _name; }
80 virtual uint64_t size() const { return 4; }
81 virtual uint64_t objectAddress() const { return 0; }
82 virtual void copyRawContent(uint8_t buffer[]) const {
83 OSWriteLittleInt32(buffer, 0, 0xEA000000);
84 }
85 virtual void setScope(Scope) { }
86 virtual ld::Fixup::iterator fixupsBegin() const { return (ld::Fixup*)&_fixup1; }
87 virtual ld::Fixup::iterator fixupsEnd() const { return &((ld::Fixup*)&_fixup2)[1]; }
88
89 private:
90 const char* _name;
91 ld::Fixup _fixup1;
92 ld::Fixup _fixup2;
93 };
94
95
96
97 class ARMtoThumb1BranchIslandAtom : public ld::Atom {
98 public:
99 ARMtoThumb1BranchIslandAtom(const char* nm, const ld::Atom* target, TargetAndOffset finalTarget)
100 : ld::Atom(_s_text_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
101 ld::Atom::scopeLinkageUnit, ld::Atom::typeBranchIsland,
102 ld::Atom::symbolTableIn, false, false, false, ld::Atom::Alignment(2)),
103 _name(nm),
104 _finalTarget(finalTarget) {
105 if (_s_log) fprintf(stderr, "%p: ARM-to-thumb1 branch island to final target %s\n",
106 this, finalTarget.atom->name());
107 }
108
109 virtual const ld::File* file() const { return NULL; }
110 virtual const char* name() const { return _name; }
111 virtual uint64_t size() const { return 16; }
112 virtual uint64_t objectAddress() const { return 0; }
113 virtual void copyRawContent(uint8_t buffer[]) const {
114 // There is no large displacement thumb1 branch instruction.
115 // Instead use ARM instructions that can jump to thumb.
116 // we use a 32-bit displacement, so we can directly jump to target which means no island hopping
117 int64_t displacement = _finalTarget.atom->finalAddress() + _finalTarget.offset - (this->finalAddress() + 12);
118 if ( _finalTarget.atom->isThumb() )
119 displacement |= 1;
120 OSWriteLittleInt32(&buffer[ 0], 0, 0xe59fc004); // ldr ip, pc + 4
121 OSWriteLittleInt32(&buffer[ 4], 0, 0xe08fc00c); // add ip, pc, ip
122 OSWriteLittleInt32(&buffer[ 8], 0, 0xe12fff1c); // bx ip
123 OSWriteLittleInt32(&buffer[12], 0, displacement); // .long target-this
124 }
125 virtual void setScope(Scope) { }
126
127 private:
128 const char* _name;
129 TargetAndOffset _finalTarget;
130 };
131
132
133
134 class Thumb2toThumbBranchIslandAtom : public ld::Atom {
135 public:
136 Thumb2toThumbBranchIslandAtom(const char* nm, const ld::Atom* target, TargetAndOffset finalTarget)
137 : ld::Atom(_s_text_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
138 ld::Atom::scopeLinkageUnit, ld::Atom::typeBranchIsland,
139 ld::Atom::symbolTableIn, false, true, false, ld::Atom::Alignment(1)),
140 _name(nm),
141 _fixup1(0, ld::Fixup::k1of1, ld::Fixup::kindStoreTargetAddressThumbBranch22, target),
142 _fixup2(0, ld::Fixup::k1of1, ld::Fixup::kindIslandTarget, finalTarget.atom) {
143 if (_s_log) fprintf(stderr, "%p: Thumb-to-thumb branch island to final target %s\n",
144 this, finalTarget.atom->name());
145 }
146
147 virtual const ld::File* file() const { return NULL; }
148 virtual const char* name() const { return _name; }
149 virtual uint64_t size() const { return 4; }
150 virtual uint64_t objectAddress() const { return 0; }
151 virtual void copyRawContent(uint8_t buffer[]) const {
152 OSWriteLittleInt32(buffer, 0, 0xf0008000);
153 }
154 virtual void setScope(Scope) { }
155 virtual ld::Fixup::iterator fixupsBegin() const { return (ld::Fixup*)&_fixup1; }
156 virtual ld::Fixup::iterator fixupsEnd() const { return &((ld::Fixup*)&_fixup2)[1]; }
157
158 private:
159 const char* _name;
160 ld::Fixup _fixup1;
161 ld::Fixup _fixup2;
162 };
163
164
165
166 class Thumb2toThumbBranchAbsoluteIslandAtom : public ld::Atom {
167 public:
168 Thumb2toThumbBranchAbsoluteIslandAtom(const char* nm, const ld::Section& inSect, TargetAndOffset finalTarget)
169 : ld::Atom(inSect, ld::Atom::definitionRegular, ld::Atom::combineNever,
170 ld::Atom::scopeLinkageUnit, ld::Atom::typeBranchIsland,
171 ld::Atom::symbolTableIn, false, true, false, ld::Atom::Alignment(1)),
172 _name(nm),
173 _fixup1(0, ld::Fixup::k1of2, ld::Fixup::kindSetTargetAddress, finalTarget.atom),
174 _fixup2(0, ld::Fixup::k2of2, ld::Fixup::kindStoreThumbLow16),
175 _fixup3(4, ld::Fixup::k1of2, ld::Fixup::kindSetTargetAddress, finalTarget.atom),
176 _fixup4(4, ld::Fixup::k2of2, ld::Fixup::kindStoreThumbHigh16),
177 _fixup5(0, ld::Fixup::k1of1, ld::Fixup::kindIslandTarget, finalTarget.atom) {
178 if (_s_log) fprintf(stderr, "%p: Thumb-to-thumb absolute branch island to final target %s\n",
179 this, finalTarget.atom->name());
180 }
181
182 virtual const ld::File* file() const { return NULL; }
183 virtual const char* name() const { return _name; }
184 virtual uint64_t size() const { return 10; }
185 virtual uint64_t objectAddress() const { return 0; }
186 virtual void copyRawContent(uint8_t buffer[]) const {
187 OSWriteLittleInt32(&buffer[0], 0, 0x0c00f240); // movw r12, #0x5678
188 OSWriteLittleInt32(&buffer[4], 0, 0x0c00f2c0); // movt r12, #0x1234
189 OSWriteLittleInt16(&buffer[8], 0, 0x4760); // bx r12
190 }
191 virtual void setScope(Scope) { }
192 virtual ld::Fixup::iterator fixupsBegin() const { return (ld::Fixup*)&_fixup1; }
193 virtual ld::Fixup::iterator fixupsEnd() const { return &((ld::Fixup*)&_fixup5)[1]; }
194
195 private:
196 const char* _name;
197 ld::Fixup _fixup1;
198 ld::Fixup _fixup2;
199 ld::Fixup _fixup3;
200 ld::Fixup _fixup4;
201 ld::Fixup _fixup5;
202 };
203
204
205
206 class NoPicARMtoThumbMBranchIslandAtom : public ld::Atom {
207 public:
208 NoPicARMtoThumbMBranchIslandAtom(const char* nm, const ld::Atom* target, TargetAndOffset finalTarget)
209 : ld::Atom(_s_text_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
210 ld::Atom::scopeLinkageUnit, ld::Atom::typeBranchIsland,
211 ld::Atom::symbolTableIn, false, false, false, ld::Atom::Alignment(2)),
212 _name(nm),
213 _finalTarget(finalTarget) {
214 if (_s_log) fprintf(stderr, "%p: NoPIC ARM-to-Thumb branch island to final target %s\n",
215 this, finalTarget.atom->name());
216 }
217
218 virtual const ld::File* file() const { return NULL; }
219 virtual const char* name() const { return _name; }
220 virtual uint64_t size() const { return 8; }
221 virtual uint64_t objectAddress() const { return 0; }
222 virtual void copyRawContent(uint8_t buffer[]) const {
223 // There is no large displacement thumb1 branch instruction.
224 // Instead use ARM instructions that can jump to thumb.
225 // we use a 32-bit displacement, so we can directly jump to final target which means no island hopping
226 uint32_t targetAddr = _finalTarget.atom->finalAddress();
227 if ( _finalTarget.atom->isThumb() )
228 targetAddr |= 1;
229 OSWriteLittleInt32(&buffer[0], 0, 0xe51ff004); // ldr pc, [pc, #-4]
230 OSWriteLittleInt32(&buffer[4], 0, targetAddr); // .long target-this
231 }
232 virtual void setScope(Scope) { }
233
234 private:
235 const char* _name;
236 TargetAndOffset _finalTarget;
237 };
238
239
240 static ld::Atom* makeBranchIsland(const Options& opts, ld::Fixup::Kind kind, int islandRegion, const ld::Atom* nextTarget,
241 TargetAndOffset finalTarget, const ld::Section& inSect, bool crossSectionBranch)
242 {
243 char* name;
244 if ( finalTarget.offset == 0 ) {
245 if ( islandRegion == 0 )
246 asprintf(&name, "%s.island", finalTarget.atom->name());
247 else
248 asprintf(&name, "%s.island.%d", finalTarget.atom->name(), islandRegion+1);
249 }
250 else {
251 asprintf(&name, "%s_plus_%d.island.%d", finalTarget.atom->name(), finalTarget.offset, islandRegion);
252 }
253
254 switch ( kind ) {
255 case ld::Fixup::kindStoreARMBranch24:
256 case ld::Fixup::kindStoreThumbBranch22:
257 case ld::Fixup::kindStoreTargetAddressARMBranch24:
258 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
259 if ( crossSectionBranch && opts.preferSubArchitecture() && opts.archSupportsThumb2() ) {
260 return new Thumb2toThumbBranchAbsoluteIslandAtom(name, inSect, finalTarget);
261 }
262 else if ( finalTarget.atom->isThumb() ) {
263 if ( opts.preferSubArchitecture() && opts.archSupportsThumb2() ) {
264 return new Thumb2toThumbBranchIslandAtom(name, nextTarget, finalTarget);
265 }
266 else if ( opts.outputSlidable() ) {
267 return new ARMtoThumb1BranchIslandAtom(name, nextTarget, finalTarget);
268 }
269 else {
270 return new NoPicARMtoThumbMBranchIslandAtom(name, nextTarget, finalTarget);
271 }
272 }
273 else {
274 return new ARMtoARMBranchIslandAtom(name, nextTarget, finalTarget);
275 }
276 break;
277 default:
278 assert(0 && "unexpected branch kind");
279 break;
280 }
281 return NULL;
282 }
283
284
285 static uint64_t textSizeWhenMightNeedBranchIslands(const Options& opts, bool seenThumbBranch)
286 {
287 switch ( opts.architecture() ) {
288 case CPU_TYPE_ARM:
289 if ( ! seenThumbBranch )
290 return 32000000; // ARM can branch +/- 32MB
291 else if ( opts.preferSubArchitecture() && opts.archSupportsThumb2() )
292 return 16000000; // thumb2 can branch +/- 16MB
293 else
294 return 4000000; // thumb1 can branch +/- 4MB
295 break;
296 }
297 assert(0 && "unexpected architecture");
298 return 0x100000000LL;
299 }
300
301
302 static uint64_t maxDistanceBetweenIslands(const Options& opts, bool seenThumbBranch)
303 {
304 switch ( opts.architecture() ) {
305 case CPU_TYPE_ARM:
306 if ( ! seenThumbBranch )
307 return 30*1024*1024; // 2MB of branch islands per 32MB
308 else if ( opts.preferSubArchitecture() && opts.archSupportsThumb2() )
309 return 14*1024*1024; // 2MB of branch islands per 16MB
310 else
311 return 3500000; // 0.5MB of branch islands per 4MB
312 break;
313 }
314 assert(0 && "unexpected architecture");
315 return 0x100000000LL;
316 }
317
318
319 //
320 // PowerPC can do PC relative branches as far as +/-16MB.
321 // If a branch target is >16MB then we insert one or more
322 // "branch islands" between the branch and its target that
323 // allows island hopping to the target.
324 //
325 // Branch Island Algorithm
326 //
327 // If the __TEXT segment < 16MB, then no branch islands needed
328 // Otherwise, every 14MB into the __TEXT segment a region is
329 // added which can contain branch islands. Every out-of-range
330 // bl instruction is checked. If it crosses a region, an island
331 // is added to that region with the same target and the bl is
332 // adjusted to target the island instead.
333 //
334 // In theory, if too many islands are added to one region, it
335 // could grow the __TEXT enough that other previously in-range
336 // bl branches could be pushed out of range. We reduce the
337 // probability this could happen by placing the ranges every
338 // 14MB which means the region would have to be 2MB (512,000 islands)
339 // before any branches could be pushed out of range.
340 //
341
342
343 static void makeIslandsForSection(const Options& opts, ld::Internal& state, ld::Internal::FinalSection* textSection)
344 {
345 // assign section offsets to each atom in __text section, watch for thumb branches, and find total size
346 bool hasThumbBranches = false;
347 bool haveCrossSectionBranches = false;
348 const bool preload = (opts.outputKind() == Options::kPreload);
349 uint64_t offset = 0;
350 for (std::vector<const ld::Atom*>::iterator ait=textSection->atoms.begin(); ait != textSection->atoms.end(); ++ait) {
351 const ld::Atom* atom = *ait;
352 // check for thumb branches and cross section branches
353 const ld::Atom* target = NULL;
354 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
355 if ( fit->firstInCluster() ) {
356 target = NULL;
357 }
358 switch ( fit->binding ) {
359 case ld::Fixup::bindingNone:
360 case ld::Fixup::bindingByNameUnbound:
361 break;
362 case ld::Fixup::bindingByContentBound:
363 case ld::Fixup::bindingDirectlyBound:
364 target = fit->u.target;
365 break;
366 case ld::Fixup::bindingsIndirectlyBound:
367 target = state.indirectBindingTable[fit->u.bindingIndex];
368 break;
369 }
370 bool haveBranch = false;
371 switch (fit->kind) {
372 case ld::Fixup::kindStoreThumbBranch22:
373 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
374 hasThumbBranches = true;
375 // fall into arm branch case
376 case ld::Fixup::kindStoreARMBranch24:
377 case ld::Fixup::kindStoreTargetAddressARMBranch24:
378 haveBranch = true;
379 break;
380 default:
381 break;
382 }
383 if ( haveBranch && (target->contentType() != ld::Atom::typeStub) ) {
384 // <rdar://problem/14792124> haveCrossSectionBranches only applies to -preload builds
385 if ( preload && (atom->section() != target->section()) )
386 haveCrossSectionBranches = true;
387 }
388 }
389 // align atom
390 ld::Atom::Alignment atomAlign = atom->alignment();
391 uint64_t atomAlignP2 = (1 << atomAlign.powerOf2);
392 uint64_t currentModulus = (offset % atomAlignP2);
393 if ( currentModulus != atomAlign.modulus ) {
394 if ( atomAlign.modulus > currentModulus )
395 offset += atomAlign.modulus-currentModulus;
396 else
397 offset += atomAlign.modulus+atomAlignP2-currentModulus;
398 }
399 (const_cast<ld::Atom*>(atom))->setSectionOffset(offset);
400 offset += atom->size();
401 }
402 uint64_t totalTextSize = offset;
403 if ( (totalTextSize < textSizeWhenMightNeedBranchIslands(opts, hasThumbBranches)) && !haveCrossSectionBranches )
404 return;
405 if (_s_log) fprintf(stderr, "ld: section %s size=%llu, might need branch islands\n", textSection->sectionName(), totalTextSize);
406
407 // Figure out how many regions of branch islands will be needed, and their locations.
408 // Construct a vector containing the atoms after which branch islands will be inserted,
409 // taking into account follow on fixups. No atom run without an island can exceed kBetweenRegions.
410 const uint64_t kBetweenRegions = maxDistanceBetweenIslands(opts, hasThumbBranches); // place regions of islands every 14MB in __text section
411 std::vector<const ld::Atom*> branchIslandInsertionPoints; // atoms in the atom list after which branch islands will be inserted
412 uint64_t previousIslandEndAddr = 0;
413 const ld::Atom *insertionPoint = NULL;
414 branchIslandInsertionPoints.reserve(totalTextSize/kBetweenRegions*2);
415 for (std::vector<const ld::Atom*>::iterator it=textSection->atoms.begin(); it != textSection->atoms.end(); it++) {
416 const ld::Atom* atom = *it;
417 // if we move past the next atom, will the run length exceed kBetweenRegions?
418 if ( atom->sectionOffset() + atom->size() > previousIslandEndAddr + kBetweenRegions ) {
419 // yes. Add the last known good location (atom) for inserting a branch island.
420 if ( insertionPoint == NULL )
421 throwf("Unable to insert branch island. No insertion point available.");
422 branchIslandInsertionPoints.push_back(insertionPoint);
423 previousIslandEndAddr = insertionPoint->sectionOffset()+insertionPoint->size();
424 insertionPoint = NULL;
425 }
426 // Can we insert an island after this atom? If so then keep track of it.
427 if ( !atom->hasFixupsOfKind(ld::Fixup::kindNoneFollowOn) )
428 insertionPoint = atom;
429 }
430 // add one more island after the last atom if close to limit
431 if ( (insertionPoint != NULL) && (insertionPoint->sectionOffset() + insertionPoint->size() > previousIslandEndAddr + (kBetweenRegions-0x100000)) )
432 branchIslandInsertionPoints.push_back(insertionPoint);
433 if ( haveCrossSectionBranches && branchIslandInsertionPoints.empty() ) {
434 branchIslandInsertionPoints.push_back(textSection->atoms.back());
435 }
436 const int kIslandRegionsCount = branchIslandInsertionPoints.size();
437
438 if (_s_log) fprintf(stderr, "ld: will use %u branch island regions\n", kIslandRegionsCount);
439 typedef std::map<TargetAndOffset,const ld::Atom*, TargetAndOffsetComparor> AtomToIsland;
440 AtomToIsland* regionsMap[kIslandRegionsCount];
441 uint64_t regionAddresses[kIslandRegionsCount];
442 std::vector<const ld::Atom*>* regionsIslands[kIslandRegionsCount];
443 for(int i=0; i < kIslandRegionsCount; ++i) {
444 regionsMap[i] = new AtomToIsland();
445 regionsIslands[i] = new std::vector<const ld::Atom*>();
446 regionAddresses[i] = branchIslandInsertionPoints[i]->sectionOffset() + branchIslandInsertionPoints[i]->size();
447 if (_s_log) fprintf(stderr, "ld: branch islands will be inserted at 0x%08llX after %s\n", regionAddresses[i], branchIslandInsertionPoints[i]->name());
448 }
449 unsigned int islandCount = 0;
450
451 // create islands for branches in __text that are out of range
452 for (std::vector<const ld::Atom*>::iterator ait=textSection->atoms.begin(); ait != textSection->atoms.end(); ++ait) {
453 const ld::Atom* atom = *ait;
454 const ld::Atom* target = NULL;
455 uint64_t addend = 0;
456 ld::Fixup* fixupWithTarget = NULL;
457 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
458 if ( fit->firstInCluster() ) {
459 target = NULL;
460 fixupWithTarget = NULL;
461 addend = 0;
462 }
463 switch ( fit->binding ) {
464 case ld::Fixup::bindingNone:
465 case ld::Fixup::bindingByNameUnbound:
466 break;
467 case ld::Fixup::bindingByContentBound:
468 case ld::Fixup::bindingDirectlyBound:
469 target = fit->u.target;
470 fixupWithTarget = fit;
471 break;
472 case ld::Fixup::bindingsIndirectlyBound:
473 target = state.indirectBindingTable[fit->u.bindingIndex];
474 fixupWithTarget = fit;
475 break;
476 }
477 bool haveBranch = false;
478 switch (fit->kind) {
479 case ld::Fixup::kindAddAddend:
480 addend = fit->u.addend;
481 break;
482 case ld::Fixup::kindStoreARMBranch24:
483 case ld::Fixup::kindStoreThumbBranch22:
484 case ld::Fixup::kindStoreTargetAddressARMBranch24:
485 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
486 haveBranch = true;
487 break;
488 default:
489 break;
490 }
491 if ( haveBranch ) {
492 bool crossSectionBranch = ( preload && (atom->section() != target->section()) );
493 int64_t srcAddr = atom->sectionOffset() + fit->offsetInAtom;
494 int64_t dstAddr = target->sectionOffset() + addend;
495 if ( preload ) {
496 srcAddr = sAtomToAddress[atom] + fit->offsetInAtom;
497 dstAddr = sAtomToAddress[target] + addend;
498 }
499 if ( target->section().type() == ld::Section::typeStub )
500 dstAddr = totalTextSize;
501 int64_t displacement = dstAddr - srcAddr;
502 TargetAndOffset finalTargetAndOffset = { target, addend };
503 const int64_t kBranchLimit = kBetweenRegions;
504 if ( crossSectionBranch && ((displacement > kBranchLimit) || (displacement < (-kBranchLimit))) ) {
505 const ld::Atom* island;
506 AtomToIsland* region = regionsMap[0];
507 AtomToIsland::iterator pos = region->find(finalTargetAndOffset);
508 if ( pos == region->end() ) {
509 island = makeBranchIsland(opts, fit->kind, 0, target, finalTargetAndOffset, atom->section(), true);
510 (*region)[finalTargetAndOffset] = island;
511 if (_s_log) fprintf(stderr, "added absolute branching island %p %s, displacement=%lld\n",
512 island, island->name(), displacement);
513 ++islandCount;
514 regionsIslands[0]->push_back(island);
515 }
516 else {
517 island = pos->second;
518 }
519 if (_s_log) fprintf(stderr, "using island %p %s for branch to %s from %s\n", island, island->name(), target->name(), atom->name());
520 fixupWithTarget->u.target = island;
521 fixupWithTarget->binding = ld::Fixup::bindingDirectlyBound;
522 }
523 else if ( displacement > kBranchLimit ) {
524 // create forward branch chain
525 const ld::Atom* nextTarget = target;
526 if (_s_log) fprintf(stderr, "need forward branching island srcAdr=0x%08llX, dstAdr=0x%08llX, target=%s\n",
527 srcAddr, dstAddr, target->name());
528 for (int i=kIslandRegionsCount-1; i >=0 ; --i) {
529 AtomToIsland* region = regionsMap[i];
530 int64_t islandRegionAddr = regionAddresses[i];
531 if ( (srcAddr < islandRegionAddr) && ((islandRegionAddr <= dstAddr)) ) {
532 AtomToIsland::iterator pos = region->find(finalTargetAndOffset);
533 if ( pos == region->end() ) {
534 ld::Atom* island = makeBranchIsland(opts, fit->kind, i, nextTarget, finalTargetAndOffset, atom->section(), false);
535 (*region)[finalTargetAndOffset] = island;
536 if (_s_log) fprintf(stderr, "added forward branching island %p %s to region %d for %s\n", island, island->name(), i, atom->name());
537 regionsIslands[i]->push_back(island);
538 ++islandCount;
539 nextTarget = island;
540 }
541 else {
542 nextTarget = pos->second;
543 }
544 }
545 }
546 if (_s_log) fprintf(stderr, "using island %p %s for branch to %s from %s\n", nextTarget, nextTarget->name(), target->name(), atom->name());
547 fixupWithTarget->u.target = nextTarget;
548 fixupWithTarget->binding = ld::Fixup::bindingDirectlyBound;
549 }
550 else if ( displacement < (-kBranchLimit) ) {
551 // create back branching chain
552 const ld::Atom* prevTarget = target;
553 for (int i=0; i < kIslandRegionsCount ; ++i) {
554 AtomToIsland* region = regionsMap[i];
555 int64_t islandRegionAddr = regionAddresses[i];
556 if ( (dstAddr < islandRegionAddr) && (islandRegionAddr <= srcAddr) ) {
557 if (_s_log) fprintf(stderr, "need backward branching island srcAdr=0x%08llX, dstAdr=0x%08llX, target=%s\n", srcAddr, dstAddr, target->name());
558 AtomToIsland::iterator pos = region->find(finalTargetAndOffset);
559 if ( pos == region->end() ) {
560 ld::Atom* island = makeBranchIsland(opts, fit->kind, i, prevTarget, finalTargetAndOffset, atom->section(), false);
561 (*region)[finalTargetAndOffset] = island;
562 if (_s_log) fprintf(stderr, "added back branching island %p %s to region %d for %s\n", island, island->name(), i, atom->name());
563 regionsIslands[i]->push_back(island);
564 ++islandCount;
565 prevTarget = island;
566 }
567 else {
568 prevTarget = pos->second;
569 }
570 }
571 }
572 if (_s_log) fprintf(stderr, "using back island %p %s for %s\n", prevTarget, prevTarget->name(), atom->name());
573 fixupWithTarget->u.target = prevTarget;
574 fixupWithTarget->binding = ld::Fixup::bindingDirectlyBound;
575 }
576 }
577 }
578 }
579
580
581 // insert islands into __text section and adjust section offsets
582 if ( islandCount > 0 ) {
583 if ( _s_log ) fprintf(stderr, "ld: %u branch islands required in %u regions\n", islandCount, kIslandRegionsCount);
584 std::vector<const ld::Atom*> newAtomList;
585 newAtomList.reserve(textSection->atoms.size()+islandCount);
586
587 int regionIndex = 0;
588 for (std::vector<const ld::Atom*>::iterator ait=textSection->atoms.begin(); ait != textSection->atoms.end(); ait++) {
589 const ld::Atom* atom = *ait;
590 newAtomList.push_back(atom);
591 if ( (regionIndex < kIslandRegionsCount) && (atom == branchIslandInsertionPoints[regionIndex]) ) {
592 std::vector<const ld::Atom*>* islands = regionsIslands[regionIndex];
593 newAtomList.insert(newAtomList.end(), islands->begin(), islands->end());
594 ++regionIndex;
595 }
596 }
597 // swap in new list of atoms for __text section
598 textSection->atoms.clear();
599 textSection->atoms = newAtomList;
600 }
601
602 }
603
604
605 static void buildAddressMap(const Options& opts, ld::Internal& state) {
606 // Assign addresses to sections
607 state.setSectionSizesAndAlignments();
608 state.assignFileOffsets();
609
610 // Assign addresses to atoms in a side table
611 const bool log = false;
612 if ( log ) fprintf(stderr, "buildAddressMap()\n");
613 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
614 ld::Internal::FinalSection* sect = *sit;
615 uint16_t maxAlignment = 0;
616 uint64_t offset = 0;
617 if ( log ) fprintf(stderr, " section=%s/%s, address=0x%08llX\n", sect->segmentName(), sect->sectionName(), sect->address);
618 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
619 const ld::Atom* atom = *ait;
620 uint32_t atomAlignmentPowerOf2 = atom->alignment().powerOf2;
621 uint32_t atomModulus = atom->alignment().modulus;
622 if ( atomAlignmentPowerOf2 > maxAlignment )
623 maxAlignment = atomAlignmentPowerOf2;
624 // calculate section offset for this atom
625 uint64_t alignment = 1 << atomAlignmentPowerOf2;
626 uint64_t currentModulus = (offset % alignment);
627 uint64_t requiredModulus = atomModulus;
628 if ( currentModulus != requiredModulus ) {
629 if ( requiredModulus > currentModulus )
630 offset += requiredModulus-currentModulus;
631 else
632 offset += requiredModulus+alignment-currentModulus;
633 }
634
635 if ( log ) fprintf(stderr, " 0x%08llX atom=%p, name=%s\n", sect->address+offset, atom, atom->name());
636 sAtomToAddress[atom] = sect->address + offset;
637
638 offset += atom->size();
639 }
640 }
641
642
643 }
644
645 void doPass(const Options& opts, ld::Internal& state)
646 {
647 // only make branch islands in final linked images
648 if ( opts.outputKind() == Options::kObjectFile )
649 return;
650
651 // Allow user to disable branch island generation
652 if ( !opts.allowBranchIslands() )
653 return;
654
655 // only ARM needs branch islands
656 switch ( opts.architecture() ) {
657 case CPU_TYPE_ARM:
658 break;
659 default:
660 return;
661 }
662
663 if ( opts.outputKind() == Options::kPreload ) {
664 buildAddressMap(opts, state);
665 }
666
667 // scan sections and add island to each code section
668 for (std::vector<ld::Internal::FinalSection*>::iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) {
669 ld::Internal::FinalSection* sect = *sit;
670 if ( sect->type() == ld::Section::typeCode )
671 makeIslandsForSection(opts, state, sect);
672 }
673 }
674
675
676 } // namespace branch_island
677 } // namespace passes
678 } // namespace ld