]> git.saurik.com Git - apple/ld64.git/blob - src/ld/passes/branch_island.cpp
06953a709e9c1ede1a3cb232be3237f17ab6064c
[apple/ld64.git] / src / ld / passes / branch_island.cpp
1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
2 *
3 * Copyright (c) 2009 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26 #include <stdint.h>
27 #include <math.h>
28 #include <unistd.h>
29 #include <dlfcn.h>
30 #include <libkern/OSByteOrder.h>
31
32 #include <vector>
33 #include <map>
34
35 #include "MachOFileAbstraction.hpp"
36 #include "ld.hpp"
37 #include "branch_island.h"
38
39 namespace ld {
40 namespace passes {
41 namespace branch_island {
42
43
44
45
46 struct TargetAndOffset { const ld::Atom* atom; uint32_t offset; };
47 class TargetAndOffsetComparor
48 {
49 public:
50 bool operator()(const TargetAndOffset& left, const TargetAndOffset& right) const
51 {
52 if ( left.atom != right.atom )
53 return ( left.atom < right.atom );
54 return ( left.offset < right.offset );
55 }
56 };
57
58
59 static bool _s_log = false;
60 static ld::Section _s_text_section("__TEXT", "__text", ld::Section::typeCode);
61
62
63
64 class ARMtoARMBranchIslandAtom : public ld::Atom {
65 public:
66 ARMtoARMBranchIslandAtom(const char* nm, const ld::Atom* target, TargetAndOffset finalTarget)
67 : ld::Atom(_s_text_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
68 ld::Atom::scopeLinkageUnit, ld::Atom::typeBranchIsland,
69 ld::Atom::symbolTableIn, false, false, false, ld::Atom::Alignment(2)),
70 _name(nm),
71 _target(target),
72 _finalTarget(finalTarget) { }
73
74 virtual const ld::File* file() const { return NULL; }
75 virtual bool translationUnitSource(const char** dir, const char**) const
76 { return false; }
77 virtual const char* name() const { return _name; }
78 virtual uint64_t size() const { return 4; }
79 virtual uint64_t objectAddress() const { return 0; }
80 virtual void copyRawContent(uint8_t buffer[]) const {
81 int64_t displacement = _target->finalAddress() - this->finalAddress() - 8;
82 if ( _target->contentType() == ld::Atom::typeBranchIsland ) {
83 // an ARM branch can branch farther than a thumb branch. The branch
84 // island generation was conservative and put islands every thumb
85 // branch distance apart. Check to see if this is a an island
86 // hopping branch that could be optimized to go directly to target.
87 int64_t skipToFinalDisplacement = _finalTarget.atom->finalAddress() + _finalTarget.offset - this->finalAddress() - 8;
88 if ( (skipToFinalDisplacement < 33554428LL) && (skipToFinalDisplacement > (-33554432LL)) ) {
89 // can skip branch island and jump straight to target
90 if (_s_log) fprintf(stderr, "%s: optimized jump to final target at 0x%08llX, thisAddr=0x%08llX\n",
91 _target->name(), _finalTarget.atom->finalAddress(), this->finalAddress());
92 displacement = skipToFinalDisplacement;
93 }
94 else {
95 // ultimate target is too far, jump to island
96 if (_s_log) fprintf(stderr, "%s: jump to branch island at 0x%08llX\n",
97 _target->name(), _finalTarget.atom->finalAddress());
98 }
99 }
100 uint32_t imm24 = (displacement >> 2) & 0x00FFFFFF;
101 int32_t branchInstruction = 0xEA000000 | imm24;
102 OSWriteLittleInt32(buffer, 0, branchInstruction);
103 }
104 virtual void setScope(Scope) { }
105
106 private:
107 const char* _name;
108 const ld::Atom* _target;
109 TargetAndOffset _finalTarget;
110 };
111
112
113
114 class ARMtoThumb1BranchIslandAtom : public ld::Atom {
115 public:
116 ARMtoThumb1BranchIslandAtom(const char* nm, const ld::Atom* target, TargetAndOffset finalTarget)
117 : ld::Atom(_s_text_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
118 ld::Atom::scopeLinkageUnit, ld::Atom::typeBranchIsland,
119 ld::Atom::symbolTableIn, false, false, false, ld::Atom::Alignment(2)),
120 _name(nm),
121 _target(target),
122 _finalTarget(finalTarget) { }
123
124 virtual const ld::File* file() const { return NULL; }
125 virtual bool translationUnitSource(const char** dir, const char**) const
126 { return false; }
127 virtual const char* name() const { return _name; }
128 virtual uint64_t size() const { return 16; }
129 virtual uint64_t objectAddress() const { return 0; }
130 virtual void copyRawContent(uint8_t buffer[]) const {
131 // There is no large displacement thumb1 branch instruction.
132 // Instead use ARM instructions that can jump to thumb.
133 // we use a 32-bit displacement, so we can directly jump to target which means no island hopping
134 int64_t displacement = _finalTarget.atom->finalAddress() + _finalTarget.offset - (this->finalAddress() + 12);
135 if ( _finalTarget.atom->isThumb() )
136 displacement |= 1;
137 if (_s_log) fprintf(stderr, "%s: 4 ARM instruction jump to final target at 0x%08llX\n",
138 _target->name(), _finalTarget.atom->finalAddress());
139 OSWriteLittleInt32(&buffer[ 0], 0, 0xe59fc004); // ldr ip, pc + 4
140 OSWriteLittleInt32(&buffer[ 4], 0, 0xe08fc00c); // add ip, pc, ip
141 OSWriteLittleInt32(&buffer[ 8], 0, 0xe12fff1c); // bx ip
142 OSWriteLittleInt32(&buffer[12], 0, displacement); // .long target-this
143 }
144 virtual void setScope(Scope) { }
145
146 private:
147 const char* _name;
148 const ld::Atom* _target;
149 TargetAndOffset _finalTarget;
150 };
151
152
153
154 class Thumb2toThumbBranchIslandAtom : public ld::Atom {
155 public:
156 Thumb2toThumbBranchIslandAtom(const char* nm, const ld::Atom* target, TargetAndOffset finalTarget)
157 : ld::Atom(_s_text_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
158 ld::Atom::scopeLinkageUnit, ld::Atom::typeBranchIsland,
159 ld::Atom::symbolTableIn, false, true, false, ld::Atom::Alignment(1)),
160 _name(nm),
161 _target(target),
162 _finalTarget(finalTarget) { }
163
164 virtual const ld::File* file() const { return NULL; }
165 virtual bool translationUnitSource(const char** dir, const char**) const
166 { return false; }
167 virtual const char* name() const { return _name; }
168 virtual uint64_t size() const { return 4; }
169 virtual uint64_t objectAddress() const { return 0; }
170 virtual void copyRawContent(uint8_t buffer[]) const {
171 int64_t displacement = _target->finalAddress() - this->finalAddress() - 4;
172 if ( _target->contentType() == ld::Atom::typeBranchIsland ) {
173 // an ARM branch can branch farther than a thumb branch. The branch
174 // island generation was conservative and put islands every thumb
175 // branch distance apart. Check to see if this is a an island
176 // hopping branch that could be optimized to go directly to target.
177 int64_t skipToFinalDisplacement = _finalTarget.atom->finalAddress() + _finalTarget.offset - this->finalAddress() - 4;
178 if ( (skipToFinalDisplacement < 16777214) && (skipToFinalDisplacement > (-16777216LL)) ) {
179 // can skip branch island and jump straight to target
180 if (_s_log) fprintf(stderr, "%s: optimized jump to final target at 0x%08llX, thisAddr=0x%08llX\n",
181 _target->name(), _finalTarget.atom->finalAddress(), this->finalAddress());
182 displacement = skipToFinalDisplacement;
183 }
184 else {
185 // ultimate target is too far for thumb2 branch, jump to island
186 if (_s_log) fprintf(stderr, "%s: jump to branch island at 0x%08llX\n",
187 _target->name(), _finalTarget.atom->finalAddress());
188 }
189 }
190 // The instruction is really two instructions:
191 // The lower 16 bits are the first instruction, which contains the high
192 // 11 bits of the displacement.
193 // The upper 16 bits are the second instruction, which contains the low
194 // 11 bits of the displacement, as well as differentiating bl and blx.
195 uint32_t s = (uint32_t)(displacement >> 24) & 0x1;
196 uint32_t i1 = (uint32_t)(displacement >> 23) & 0x1;
197 uint32_t i2 = (uint32_t)(displacement >> 22) & 0x1;
198 uint32_t imm10 = (uint32_t)(displacement >> 12) & 0x3FF;
199 uint32_t imm11 = (uint32_t)(displacement >> 1) & 0x7FF;
200 uint32_t j1 = (i1 == s);
201 uint32_t j2 = (i2 == s);
202 uint32_t opcode = 0x9000F000;
203 uint32_t nextDisp = (j1 << 13) | (j2 << 11) | imm11;
204 uint32_t firstDisp = (s << 10) | imm10;
205 uint32_t newInstruction = opcode | (nextDisp << 16) | firstDisp;
206 //warning("s=%d, j1=%d, j2=%d, imm10=0x%0X, imm11=0x%0X, opcode=0x%08X, first=0x%04X, next=0x%04X, new=0x%08X, disp=0x%llX for %s to %s\n",
207 // s, j1, j2, imm10, imm11, opcode, firstDisp, nextDisp, newInstruction, displacement, inAtom->getDisplayName(), ref->getTarget().getDisplayName());
208 OSWriteLittleInt32(buffer, 0, newInstruction);
209 }
210 virtual void setScope(Scope) { }
211
212 private:
213 const char* _name;
214 const ld::Atom* _target;
215 TargetAndOffset _finalTarget;
216 };
217
218
219 class NoPicARMtoThumbMBranchIslandAtom : public ld::Atom {
220 public:
221 NoPicARMtoThumbMBranchIslandAtom(const char* nm, const ld::Atom* target, TargetAndOffset finalTarget)
222 : ld::Atom(_s_text_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
223 ld::Atom::scopeLinkageUnit, ld::Atom::typeBranchIsland,
224 ld::Atom::symbolTableIn, false, false, false, ld::Atom::Alignment(2)),
225 _name(nm),
226 _target(target),
227 _finalTarget(finalTarget) { }
228
229 virtual const ld::File* file() const { return NULL; }
230 virtual bool translationUnitSource(const char** dir, const char**) const
231 { return false; }
232 virtual const char* name() const { return _name; }
233 virtual uint64_t size() const { return 8; }
234 virtual uint64_t objectAddress() const { return 0; }
235 virtual void copyRawContent(uint8_t buffer[]) const {
236 // There is no large displacement thumb1 branch instruction.
237 // Instead use ARM instructions that can jump to thumb.
238 // we use a 32-bit displacement, so we can directly jump to final target which means no island hopping
239 uint32_t targetAddr = _finalTarget.atom->finalAddress();
240 if ( _finalTarget.atom->isThumb() )
241 targetAddr |= 1;
242 if (_s_log) fprintf(stderr, "%s: 2 ARM instruction jump to final target at 0x%08llX\n",
243 _target->name(), _finalTarget.atom->finalAddress());
244 OSWriteLittleInt32(&buffer[0], 0, 0xe51ff004); // ldr pc, [pc, #-4]
245 OSWriteLittleInt32(&buffer[4], 0, targetAddr); // .long target-this
246 }
247 virtual void setScope(Scope) { }
248
249 private:
250 const char* _name;
251 const ld::Atom* _target;
252 TargetAndOffset _finalTarget;
253 };
254
255
256 static ld::Atom* makeBranchIsland(const Options& opts, ld::Fixup::Kind kind, int islandRegion, const ld::Atom* nextTarget, TargetAndOffset finalTarget)
257 {
258 char* name;
259 if ( finalTarget.offset == 0 ) {
260 if ( islandRegion == 0 )
261 asprintf(&name, "%s.island", finalTarget.atom->name());
262 else
263 asprintf(&name, "%s.island.%d", finalTarget.atom->name(), islandRegion+1);
264 }
265 else {
266 asprintf(&name, "%s_plus_%d.island.%d", finalTarget.atom->name(), finalTarget.offset, islandRegion);
267 }
268
269 switch ( kind ) {
270 case ld::Fixup::kindStoreARMBranch24:
271 case ld::Fixup::kindStoreThumbBranch22:
272 case ld::Fixup::kindStoreTargetAddressARMBranch24:
273 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
274 if ( finalTarget.atom->isThumb() ) {
275 if ( opts.preferSubArchitecture() && opts.archSupportsThumb2() ) {
276 return new Thumb2toThumbBranchIslandAtom(name, nextTarget, finalTarget);
277 }
278 else if ( opts.outputSlidable() ) {
279 return new ARMtoThumb1BranchIslandAtom(name, nextTarget, finalTarget);
280 }
281 else {
282 return new NoPicARMtoThumbMBranchIslandAtom(name, nextTarget, finalTarget);
283 }
284 }
285 else {
286 return new ARMtoARMBranchIslandAtom(name, nextTarget, finalTarget);
287 }
288 break;
289 default:
290 assert(0 && "unexpected branch kind");
291 break;
292 }
293 return NULL;
294 }
295
296
297 static uint64_t textSizeWhenMightNeedBranchIslands(const Options& opts, bool seenThumbBranch)
298 {
299 switch ( opts.architecture() ) {
300 case CPU_TYPE_ARM:
301 if ( ! seenThumbBranch )
302 return 32000000; // ARM can branch +/- 32MB
303 else if ( opts.preferSubArchitecture() && opts.archSupportsThumb2() )
304 return 16000000; // thumb2 can branch +/- 16MB
305 else
306 return 4000000; // thumb1 can branch +/- 4MB
307 break;
308 }
309 assert(0 && "unexpected architecture");
310 return 0x100000000LL;
311 }
312
313
314 static uint64_t maxDistanceBetweenIslands(const Options& opts, bool seenThumbBranch)
315 {
316 switch ( opts.architecture() ) {
317 case CPU_TYPE_ARM:
318 if ( ! seenThumbBranch )
319 return 30*1024*1024; // 2MB of branch islands per 32MB
320 else if ( opts.preferSubArchitecture() && opts.archSupportsThumb2() )
321 return 14*1024*1024; // 2MB of branch islands per 16MB
322 else
323 return 3500000; // 0.5MB of branch islands per 4MB
324 break;
325 }
326 assert(0 && "unexpected architecture");
327 return 0x100000000LL;
328 }
329
330
331 //
332 // PowerPC can do PC relative branches as far as +/-16MB.
333 // If a branch target is >16MB then we insert one or more
334 // "branch islands" between the branch and its target that
335 // allows island hopping to the target.
336 //
337 // Branch Island Algorithm
338 //
339 // If the __TEXT segment < 16MB, then no branch islands needed
340 // Otherwise, every 14MB into the __TEXT segment a region is
341 // added which can contain branch islands. Every out-of-range
342 // bl instruction is checked. If it crosses a region, an island
343 // is added to that region with the same target and the bl is
344 // adjusted to target the island instead.
345 //
346 // In theory, if too many islands are added to one region, it
347 // could grow the __TEXT enough that other previously in-range
348 // bl branches could be pushed out of range. We reduce the
349 // probability this could happen by placing the ranges every
350 // 14MB which means the region would have to be 2MB (512,000 islands)
351 // before any branches could be pushed out of range.
352 //
353
354 void doPass(const Options& opts, ld::Internal& state)
355 {
356 // only make branch islands in final linked images
357 if ( opts.outputKind() == Options::kObjectFile )
358 return;
359
360 // only ARM needs branch islands
361 switch ( opts.architecture() ) {
362 case CPU_TYPE_ARM:
363 break;
364 default:
365 return;
366 }
367
368 // scan to find __text section
369 ld::Internal::FinalSection* textSection = NULL;
370 for (std::vector<ld::Internal::FinalSection*>::iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) {
371 ld::Internal::FinalSection* sect = *sit;
372 if ( strcmp(sect->sectionName(), "__text") == 0 )
373 textSection = sect;
374 }
375 if ( textSection == NULL )
376 return;
377
378 // assign section offsets to each atom in __text section, watch for thumb branches, and find total size
379 const bool isARM = (opts.architecture() == CPU_TYPE_ARM);
380 bool hasThumbBranches = false;
381 uint64_t offset = 0;
382 for (std::vector<const ld::Atom*>::iterator ait=textSection->atoms.begin(); ait != textSection->atoms.end(); ++ait) {
383 const ld::Atom* atom = *ait;
384 // check for thumb branches
385 if ( isARM && ~hasThumbBranches ) {
386 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
387 switch ( fit->kind ) {
388 case ld::Fixup::kindStoreThumbBranch22:
389 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
390 hasThumbBranches = true;
391 break;
392 default:
393 break;
394 }
395 }
396 }
397 // align atom
398 ld::Atom::Alignment atomAlign = atom->alignment();
399 uint64_t atomAlignP2 = (1 << atomAlign.powerOf2);
400 uint64_t currentModulus = (offset % atomAlignP2);
401 if ( currentModulus != atomAlign.modulus ) {
402 if ( atomAlign.modulus > currentModulus )
403 offset += atomAlign.modulus-currentModulus;
404 else
405 offset += atomAlign.modulus+atomAlignP2-currentModulus;
406 }
407 (const_cast<ld::Atom*>(atom))->setSectionOffset(offset);
408 offset += atom->size();
409 }
410 uint64_t totalTextSize = offset;
411 if ( totalTextSize < textSizeWhenMightNeedBranchIslands(opts, hasThumbBranches) )
412 return;
413 if (_s_log) fprintf(stderr, "ld: __text section size=%llu, might need branch islands\n", totalTextSize);
414
415 // Figure out how many regions of branch islands will be needed, and their locations.
416 // Construct a vector containing the atoms after which branch islands will be inserted,
417 // taking into account follow on fixups. No atom run without an island can exceed kBetweenRegions.
418 const uint64_t kBetweenRegions = maxDistanceBetweenIslands(opts, hasThumbBranches); // place regions of islands every 14MB in __text section
419 std::vector<const ld::Atom*> branchIslandInsertionPoints; // atoms in the atom list after which branch islands will be inserted
420 uint64_t previousIslandEndAddr = 0;
421 const ld::Atom *insertionPoint;
422 branchIslandInsertionPoints.reserve(totalTextSize/kBetweenRegions*2);
423 for (std::vector<const ld::Atom*>::iterator it=textSection->atoms.begin(); it != textSection->atoms.end(); it++) {
424 const ld::Atom* atom = *it;
425 // if we move past the next atom, will the run length exceed kBetweenRegions?
426 if ( atom->sectionOffset() + atom->size() - previousIslandEndAddr > kBetweenRegions ) {
427 // yes. Add the last known good location (atom) for inserting a branch island.
428 if ( insertionPoint == NULL )
429 throwf("Unable to insert branch island. No insertion point available.");
430 branchIslandInsertionPoints.push_back(insertionPoint);
431 previousIslandEndAddr = insertionPoint->sectionOffset()+insertionPoint->size();
432 insertionPoint = NULL;
433 }
434 // Can we insert an island after this atom? If so then keep track of it.
435 if ( !atom->hasFixupsOfKind(ld::Fixup::kindNoneFollowOn) )
436 insertionPoint = atom;
437 }
438 // add one more island after the last atom
439 if (insertionPoint != NULL)
440 branchIslandInsertionPoints.push_back(insertionPoint);
441 const int kIslandRegionsCount = branchIslandInsertionPoints.size();
442 if (_s_log) {
443 fprintf(stderr, "ld: will use %u branch island regions\n", kIslandRegionsCount);
444 for (std::vector<const ld::Atom*>::iterator it = branchIslandInsertionPoints.begin(); it != branchIslandInsertionPoints.end(); ++it) {
445 const ld::Atom* atom = *it;
446 const ld::File *file = atom->file();
447 fprintf(stderr, "ld: branch island will be inserted at 0x%llx after %s", atom->sectionOffset()+atom->size(), atom->name());
448 if (file) fprintf(stderr, " (%s)", atom->file()->path());
449 fprintf(stderr, "\n");
450 }
451 }
452
453
454 typedef std::map<TargetAndOffset,const ld::Atom*, TargetAndOffsetComparor> AtomToIsland;
455 AtomToIsland* regionsMap[kIslandRegionsCount];
456 std::vector<const ld::Atom*>* regionsIslands[kIslandRegionsCount];
457 for(int i=0; i < kIslandRegionsCount; ++i) {
458 regionsMap[i] = new AtomToIsland();
459 regionsIslands[i] = new std::vector<const ld::Atom*>();
460 }
461 unsigned int islandCount = 0;
462
463 // create islands for branches in __text that are out of range
464 for (std::vector<const ld::Atom*>::iterator ait=textSection->atoms.begin(); ait != textSection->atoms.end(); ++ait) {
465 const ld::Atom* atom = *ait;
466 const ld::Atom* target = NULL;
467 uint64_t addend = 0;
468 ld::Fixup* fixupWithTarget = NULL;
469 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
470 if ( fit->firstInCluster() ) {
471 target = NULL;
472 fixupWithTarget = NULL;
473 addend = 0;
474 }
475 switch ( fit->binding ) {
476 case ld::Fixup::bindingNone:
477 case ld::Fixup::bindingByNameUnbound:
478 break;
479 case ld::Fixup::bindingByContentBound:
480 case ld::Fixup::bindingDirectlyBound:
481 target = fit->u.target;
482 fixupWithTarget = fit;
483 break;
484 case ld::Fixup::bindingsIndirectlyBound:
485 target = state.indirectBindingTable[fit->u.bindingIndex];
486 fixupWithTarget = fit;
487 break;
488 }
489 bool haveBranch = false;
490 switch (fit->kind) {
491 case ld::Fixup::kindAddAddend:
492 addend = fit->u.addend;
493 break;
494 case ld::Fixup::kindStoreARMBranch24:
495 case ld::Fixup::kindStoreThumbBranch22:
496 case ld::Fixup::kindStoreTargetAddressARMBranch24:
497 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
498 haveBranch = true;
499 break;
500 default:
501 break;
502 }
503 if ( haveBranch ) {
504 int64_t srcAddr = atom->sectionOffset() + fit->offsetInAtom;
505 int64_t dstAddr = target->sectionOffset() + addend;
506 if ( target->section().type() == ld::Section::typeStub )
507 dstAddr = totalTextSize;
508 int64_t displacement = dstAddr - srcAddr;
509 TargetAndOffset finalTargetAndOffset = { target, addend };
510 const int64_t kBranchLimit = kBetweenRegions;
511 if ( displacement > kBranchLimit ) {
512 // create forward branch chain
513 const ld::Atom* nextTarget = target;
514 for (int i=kIslandRegionsCount-1; i >=0 ; --i) {
515 AtomToIsland* region = regionsMap[i];
516 int64_t islandRegionAddr = kBetweenRegions * (i+1);
517 if ( (srcAddr < islandRegionAddr) && (islandRegionAddr <= dstAddr) ) {
518 AtomToIsland::iterator pos = region->find(finalTargetAndOffset);
519 if ( pos == region->end() ) {
520 ld::Atom* island = makeBranchIsland(opts, fit->kind, i, nextTarget, finalTargetAndOffset);
521 (*region)[finalTargetAndOffset] = island;
522 if (_s_log) fprintf(stderr, "added island %s to region %d for %s\n", island->name(), i, atom->name());
523 regionsIslands[i]->push_back(island);
524 ++islandCount;
525 nextTarget = island;
526 }
527 else {
528 nextTarget = pos->second;
529 }
530 }
531 }
532 if (_s_log) fprintf(stderr, "using island %s for branch to %s from %s\n", nextTarget->name(), target->name(), atom->name());
533 fixupWithTarget->u.target = nextTarget;
534 fixupWithTarget->binding = ld::Fixup::bindingDirectlyBound;
535 }
536 else if ( displacement < (-kBranchLimit) ) {
537 // create back branching chain
538 const ld::Atom* prevTarget = target;
539 for (int i=0; i < kIslandRegionsCount ; ++i) {
540 AtomToIsland* region = regionsMap[i];
541 int64_t islandRegionAddr = kBetweenRegions * (i+1);
542 if ( (dstAddr <= islandRegionAddr) && (islandRegionAddr < srcAddr) ) {
543 AtomToIsland::iterator pos = region->find(finalTargetAndOffset);
544 if ( pos == region->end() ) {
545 ld::Atom* island = makeBranchIsland(opts, fit->kind, i, prevTarget, finalTargetAndOffset);
546 (*region)[finalTargetAndOffset] = island;
547 if (_s_log) fprintf(stderr, "added back island %s to region %d for %s\n", island->name(), i, atom->name());
548 regionsIslands[i]->push_back(island);
549 ++islandCount;
550 prevTarget = island;
551 }
552 else {
553 prevTarget = pos->second;
554 }
555 }
556 }
557 if (_s_log) fprintf(stderr, "using back island %s for %s\n", prevTarget->name(), atom->name());
558 fixupWithTarget->u.target = prevTarget;
559 fixupWithTarget->binding = ld::Fixup::bindingDirectlyBound;
560 }
561 }
562 }
563 }
564
565
566 // insert islands into __text section and adjust section offsets
567 if ( islandCount > 0 ) {
568 if ( _s_log ) fprintf(stderr, "ld: %u branch islands required in %u regions\n", islandCount, kIslandRegionsCount);
569 std::vector<const ld::Atom*> newAtomList;
570 newAtomList.reserve(textSection->atoms.size()+islandCount);
571
572 uint64_t regionIndex = 0;
573 for (std::vector<const ld::Atom*>::iterator ait=textSection->atoms.begin(); ait != textSection->atoms.end(); ait++) {
574 newAtomList.push_back(*ait);
575 // copy over atoms until we find an island insertion point
576 // Note that the last insertion point is the last atom, so this loop never moves the iterator to atoms.end().
577 while (*ait != branchIslandInsertionPoints[regionIndex]) {
578 ait++;
579 newAtomList.push_back(*ait);
580 }
581
582 // insert the branch island atoms after the insertion point atom
583 std::vector<const ld::Atom*>* regionIslands = regionsIslands[regionIndex];
584 for (std::vector<const ld::Atom*>::iterator rit=regionIslands->begin(); rit != regionIslands->end(); rit++) {
585 const ld::Atom* islandAtom = *rit;
586 newAtomList.push_back(islandAtom);
587 if ( _s_log ) fprintf(stderr, "inserting island %s into __text section\n", islandAtom->name());
588 }
589 regionIndex++;
590 }
591 // swap in new list of atoms for __text section
592 textSection->atoms.clear();
593 textSection->atoms = newAtomList;
594 }
595
596 }
597
598
599 } // namespace branch_island
600 } // namespace passes
601 } // namespace ld