]> git.saurik.com Git - apple/ld64.git/blob - ld64-134.9/src/ld/passes/branch_island.cpp
96b6d35c59c218f1eec991ca13b37b2eca3ac80b
[apple/ld64.git] / ld64-134.9 / src / ld / passes / branch_island.cpp
1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
2 *
3 * Copyright (c) 2009 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26 #include <stdint.h>
27 #include <math.h>
28 #include <unistd.h>
29 #include <dlfcn.h>
30 #include <libkern/OSByteOrder.h>
31
32 #include <vector>
33 #include <map>
34
35 #include "MachOFileAbstraction.hpp"
36 #include "ld.hpp"
37 #include "branch_island.h"
38
39 namespace ld {
40 namespace passes {
41 namespace branch_island {
42
43
44
45
46 struct TargetAndOffset { const ld::Atom* atom; uint32_t offset; };
47 class TargetAndOffsetComparor
48 {
49 public:
50 bool operator()(const TargetAndOffset& left, const TargetAndOffset& right) const
51 {
52 if ( left.atom != right.atom )
53 return ( left.atom < right.atom );
54 return ( left.offset < right.offset );
55 }
56 };
57
58
59 static bool _s_log = false;
60 static ld::Section _s_text_section("__TEXT", "__text", ld::Section::typeCode);
61
62
63
64 class ARMtoARMBranchIslandAtom : public ld::Atom {
65 public:
66 ARMtoARMBranchIslandAtom(const char* nm, const ld::Atom* target, TargetAndOffset finalTarget)
67 : ld::Atom(_s_text_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
68 ld::Atom::scopeLinkageUnit, ld::Atom::typeBranchIsland,
69 ld::Atom::symbolTableIn, false, false, false, ld::Atom::Alignment(2)),
70 _name(nm),
71 _target(target),
72 _finalTarget(finalTarget) { }
73
74 virtual const ld::File* file() const { return NULL; }
75 virtual const char* name() const { return _name; }
76 virtual uint64_t size() const { return 4; }
77 virtual uint64_t objectAddress() const { return 0; }
78 virtual void copyRawContent(uint8_t buffer[]) const {
79 int64_t displacement = _target->finalAddress() - this->finalAddress() - 8;
80 if ( _target->contentType() == ld::Atom::typeBranchIsland ) {
81 // an ARM branch can branch farther than a thumb branch. The branch
82 // island generation was conservative and put islands every thumb
83 // branch distance apart. Check to see if this is a an island
84 // hopping branch that could be optimized to go directly to target.
85 int64_t skipToFinalDisplacement = _finalTarget.atom->finalAddress() + _finalTarget.offset - this->finalAddress() - 8;
86 if ( (skipToFinalDisplacement < 33554428LL) && (skipToFinalDisplacement > (-33554432LL)) ) {
87 // can skip branch island and jump straight to target
88 if (_s_log) fprintf(stderr, "%s: optimized jump to final target at 0x%08llX, thisAddr=0x%08llX\n",
89 _target->name(), _finalTarget.atom->finalAddress(), this->finalAddress());
90 displacement = skipToFinalDisplacement;
91 }
92 else {
93 // ultimate target is too far, jump to island
94 if (_s_log) fprintf(stderr, "%s: jump to branch island at 0x%08llX\n",
95 _target->name(), _finalTarget.atom->finalAddress());
96 }
97 }
98 uint32_t imm24 = (displacement >> 2) & 0x00FFFFFF;
99 int32_t branchInstruction = 0xEA000000 | imm24;
100 OSWriteLittleInt32(buffer, 0, branchInstruction);
101 }
102 virtual void setScope(Scope) { }
103
104 private:
105 const char* _name;
106 const ld::Atom* _target;
107 TargetAndOffset _finalTarget;
108 };
109
110
111
112 class ARMtoThumb1BranchIslandAtom : public ld::Atom {
113 public:
114 ARMtoThumb1BranchIslandAtom(const char* nm, const ld::Atom* target, TargetAndOffset finalTarget)
115 : ld::Atom(_s_text_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
116 ld::Atom::scopeLinkageUnit, ld::Atom::typeBranchIsland,
117 ld::Atom::symbolTableIn, false, false, false, ld::Atom::Alignment(2)),
118 _name(nm),
119 _target(target),
120 _finalTarget(finalTarget) { }
121
122 virtual const ld::File* file() const { return NULL; }
123 virtual const char* name() const { return _name; }
124 virtual uint64_t size() const { return 16; }
125 virtual uint64_t objectAddress() const { return 0; }
126 virtual void copyRawContent(uint8_t buffer[]) const {
127 // There is no large displacement thumb1 branch instruction.
128 // Instead use ARM instructions that can jump to thumb.
129 // we use a 32-bit displacement, so we can directly jump to target which means no island hopping
130 int64_t displacement = _finalTarget.atom->finalAddress() + _finalTarget.offset - (this->finalAddress() + 12);
131 if ( _finalTarget.atom->isThumb() )
132 displacement |= 1;
133 if (_s_log) fprintf(stderr, "%s: 4 ARM instruction jump to final target at 0x%08llX\n",
134 _target->name(), _finalTarget.atom->finalAddress());
135 OSWriteLittleInt32(&buffer[ 0], 0, 0xe59fc004); // ldr ip, pc + 4
136 OSWriteLittleInt32(&buffer[ 4], 0, 0xe08fc00c); // add ip, pc, ip
137 OSWriteLittleInt32(&buffer[ 8], 0, 0xe12fff1c); // bx ip
138 OSWriteLittleInt32(&buffer[12], 0, displacement); // .long target-this
139 }
140 virtual void setScope(Scope) { }
141
142 private:
143 const char* _name;
144 const ld::Atom* _target;
145 TargetAndOffset _finalTarget;
146 };
147
148
149
150 class Thumb2toThumbBranchIslandAtom : public ld::Atom {
151 public:
152 Thumb2toThumbBranchIslandAtom(const char* nm, const ld::Atom* target, TargetAndOffset finalTarget)
153 : ld::Atom(_s_text_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
154 ld::Atom::scopeLinkageUnit, ld::Atom::typeBranchIsland,
155 ld::Atom::symbolTableIn, false, true, false, ld::Atom::Alignment(1)),
156 _name(nm),
157 _target(target),
158 _finalTarget(finalTarget) { }
159
160 virtual const ld::File* file() const { return NULL; }
161 virtual const char* name() const { return _name; }
162 virtual uint64_t size() const { return 4; }
163 virtual uint64_t objectAddress() const { return 0; }
164 virtual void copyRawContent(uint8_t buffer[]) const {
165 int64_t displacement = _target->finalAddress() - this->finalAddress() - 4;
166 if ( _target->contentType() == ld::Atom::typeBranchIsland ) {
167 // an ARM branch can branch farther than a thumb branch. The branch
168 // island generation was conservative and put islands every thumb
169 // branch distance apart. Check to see if this is a an island
170 // hopping branch that could be optimized to go directly to target.
171 int64_t skipToFinalDisplacement = _finalTarget.atom->finalAddress() + _finalTarget.offset - this->finalAddress() - 4;
172 if ( (skipToFinalDisplacement < 16777214) && (skipToFinalDisplacement > (-16777216LL)) ) {
173 // can skip branch island and jump straight to target
174 if (_s_log) fprintf(stderr, "%s: optimized jump to final target at 0x%08llX, thisAddr=0x%08llX\n",
175 _target->name(), _finalTarget.atom->finalAddress(), this->finalAddress());
176 displacement = skipToFinalDisplacement;
177 }
178 else {
179 // ultimate target is too far for thumb2 branch, jump to island
180 if (_s_log) fprintf(stderr, "%s: jump to branch island at 0x%08llX\n",
181 _target->name(), _finalTarget.atom->finalAddress());
182 }
183 }
184 // The instruction is really two instructions:
185 // The lower 16 bits are the first instruction, which contains the high
186 // 11 bits of the displacement.
187 // The upper 16 bits are the second instruction, which contains the low
188 // 11 bits of the displacement, as well as differentiating bl and blx.
189 uint32_t s = (uint32_t)(displacement >> 24) & 0x1;
190 uint32_t i1 = (uint32_t)(displacement >> 23) & 0x1;
191 uint32_t i2 = (uint32_t)(displacement >> 22) & 0x1;
192 uint32_t imm10 = (uint32_t)(displacement >> 12) & 0x3FF;
193 uint32_t imm11 = (uint32_t)(displacement >> 1) & 0x7FF;
194 uint32_t j1 = (i1 == s);
195 uint32_t j2 = (i2 == s);
196 uint32_t opcode = 0x9000F000;
197 uint32_t nextDisp = (j1 << 13) | (j2 << 11) | imm11;
198 uint32_t firstDisp = (s << 10) | imm10;
199 uint32_t newInstruction = opcode | (nextDisp << 16) | firstDisp;
200 //warning("s=%d, j1=%d, j2=%d, imm10=0x%0X, imm11=0x%0X, opcode=0x%08X, first=0x%04X, next=0x%04X, new=0x%08X, disp=0x%llX for %s to %s\n",
201 // s, j1, j2, imm10, imm11, opcode, firstDisp, nextDisp, newInstruction, displacement, inAtom->getDisplayName(), ref->getTarget().getDisplayName());
202 OSWriteLittleInt32(buffer, 0, newInstruction);
203 }
204 virtual void setScope(Scope) { }
205
206 private:
207 const char* _name;
208 const ld::Atom* _target;
209 TargetAndOffset _finalTarget;
210 };
211
212
213 class NoPicARMtoThumbMBranchIslandAtom : public ld::Atom {
214 public:
215 NoPicARMtoThumbMBranchIslandAtom(const char* nm, const ld::Atom* target, TargetAndOffset finalTarget)
216 : ld::Atom(_s_text_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
217 ld::Atom::scopeLinkageUnit, ld::Atom::typeBranchIsland,
218 ld::Atom::symbolTableIn, false, false, false, ld::Atom::Alignment(2)),
219 _name(nm),
220 _target(target),
221 _finalTarget(finalTarget) { }
222
223 virtual const ld::File* file() const { return NULL; }
224 virtual const char* name() const { return _name; }
225 virtual uint64_t size() const { return 8; }
226 virtual uint64_t objectAddress() const { return 0; }
227 virtual void copyRawContent(uint8_t buffer[]) const {
228 // There is no large displacement thumb1 branch instruction.
229 // Instead use ARM instructions that can jump to thumb.
230 // we use a 32-bit displacement, so we can directly jump to final target which means no island hopping
231 uint32_t targetAddr = _finalTarget.atom->finalAddress();
232 if ( _finalTarget.atom->isThumb() )
233 targetAddr |= 1;
234 if (_s_log) fprintf(stderr, "%s: 2 ARM instruction jump to final target at 0x%08llX\n",
235 _target->name(), _finalTarget.atom->finalAddress());
236 OSWriteLittleInt32(&buffer[0], 0, 0xe51ff004); // ldr pc, [pc, #-4]
237 OSWriteLittleInt32(&buffer[4], 0, targetAddr); // .long target-this
238 }
239 virtual void setScope(Scope) { }
240
241 private:
242 const char* _name;
243 const ld::Atom* _target;
244 TargetAndOffset _finalTarget;
245 };
246
247
248 static ld::Atom* makeBranchIsland(const Options& opts, ld::Fixup::Kind kind, int islandRegion, const ld::Atom* nextTarget, TargetAndOffset finalTarget)
249 {
250 char* name;
251 if ( finalTarget.offset == 0 ) {
252 if ( islandRegion == 0 )
253 asprintf(&name, "%s.island", finalTarget.atom->name());
254 else
255 asprintf(&name, "%s.island.%d", finalTarget.atom->name(), islandRegion+1);
256 }
257 else {
258 asprintf(&name, "%s_plus_%d.island.%d", finalTarget.atom->name(), finalTarget.offset, islandRegion);
259 }
260
261 switch ( kind ) {
262 case ld::Fixup::kindStoreARMBranch24:
263 case ld::Fixup::kindStoreThumbBranch22:
264 case ld::Fixup::kindStoreTargetAddressARMBranch24:
265 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
266 if ( finalTarget.atom->isThumb() ) {
267 if ( opts.preferSubArchitecture() && opts.archSupportsThumb2() ) {
268 return new Thumb2toThumbBranchIslandAtom(name, nextTarget, finalTarget);
269 }
270 else if ( opts.outputSlidable() ) {
271 return new ARMtoThumb1BranchIslandAtom(name, nextTarget, finalTarget);
272 }
273 else {
274 return new NoPicARMtoThumbMBranchIslandAtom(name, nextTarget, finalTarget);
275 }
276 }
277 else {
278 return new ARMtoARMBranchIslandAtom(name, nextTarget, finalTarget);
279 }
280 break;
281 default:
282 assert(0 && "unexpected branch kind");
283 break;
284 }
285 return NULL;
286 }
287
288
289 static uint64_t textSizeWhenMightNeedBranchIslands(const Options& opts, bool seenThumbBranch)
290 {
291 switch ( opts.architecture() ) {
292 case CPU_TYPE_ARM:
293 if ( ! seenThumbBranch )
294 return 32000000; // ARM can branch +/- 32MB
295 else if ( opts.preferSubArchitecture() && opts.archSupportsThumb2() )
296 return 16000000; // thumb2 can branch +/- 16MB
297 else
298 return 4000000; // thumb1 can branch +/- 4MB
299 break;
300 }
301 assert(0 && "unexpected architecture");
302 return 0x100000000LL;
303 }
304
305
306 static uint64_t maxDistanceBetweenIslands(const Options& opts, bool seenThumbBranch)
307 {
308 switch ( opts.architecture() ) {
309 case CPU_TYPE_ARM:
310 if ( ! seenThumbBranch )
311 return 30*1024*1024; // 2MB of branch islands per 32MB
312 else if ( opts.preferSubArchitecture() && opts.archSupportsThumb2() )
313 return 14*1024*1024; // 2MB of branch islands per 16MB
314 else
315 return 3500000; // 0.5MB of branch islands per 4MB
316 break;
317 }
318 assert(0 && "unexpected architecture");
319 return 0x100000000LL;
320 }
321
322
323 //
324 // PowerPC can do PC relative branches as far as +/-16MB.
325 // If a branch target is >16MB then we insert one or more
326 // "branch islands" between the branch and its target that
327 // allows island hopping to the target.
328 //
329 // Branch Island Algorithm
330 //
331 // If the __TEXT segment < 16MB, then no branch islands needed
332 // Otherwise, every 14MB into the __TEXT segment a region is
333 // added which can contain branch islands. Every out-of-range
334 // bl instruction is checked. If it crosses a region, an island
335 // is added to that region with the same target and the bl is
336 // adjusted to target the island instead.
337 //
338 // In theory, if too many islands are added to one region, it
339 // could grow the __TEXT enough that other previously in-range
340 // bl branches could be pushed out of range. We reduce the
341 // probability this could happen by placing the ranges every
342 // 14MB which means the region would have to be 2MB (512,000 islands)
343 // before any branches could be pushed out of range.
344 //
345
346 void doPass(const Options& opts, ld::Internal& state)
347 {
348 // only make branch islands in final linked images
349 if ( opts.outputKind() == Options::kObjectFile )
350 return;
351
352 // only ARM needs branch islands
353 switch ( opts.architecture() ) {
354 case CPU_TYPE_ARM:
355 break;
356 default:
357 return;
358 }
359
360 // scan to find __text section
361 ld::Internal::FinalSection* textSection = NULL;
362 for (std::vector<ld::Internal::FinalSection*>::iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) {
363 ld::Internal::FinalSection* sect = *sit;
364 if ( strcmp(sect->sectionName(), "__text") == 0 )
365 textSection = sect;
366 }
367 if ( textSection == NULL )
368 return;
369
370 // assign section offsets to each atom in __text section, watch for thumb branches, and find total size
371 const bool isARM = (opts.architecture() == CPU_TYPE_ARM);
372 bool hasThumbBranches = false;
373 uint64_t offset = 0;
374 for (std::vector<const ld::Atom*>::iterator ait=textSection->atoms.begin(); ait != textSection->atoms.end(); ++ait) {
375 const ld::Atom* atom = *ait;
376 // check for thumb branches
377 if ( isARM && ~hasThumbBranches ) {
378 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
379 switch ( fit->kind ) {
380 case ld::Fixup::kindStoreThumbBranch22:
381 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
382 hasThumbBranches = true;
383 break;
384 default:
385 break;
386 }
387 }
388 }
389 // align atom
390 ld::Atom::Alignment atomAlign = atom->alignment();
391 uint64_t atomAlignP2 = (1 << atomAlign.powerOf2);
392 uint64_t currentModulus = (offset % atomAlignP2);
393 if ( currentModulus != atomAlign.modulus ) {
394 if ( atomAlign.modulus > currentModulus )
395 offset += atomAlign.modulus-currentModulus;
396 else
397 offset += atomAlign.modulus+atomAlignP2-currentModulus;
398 }
399 (const_cast<ld::Atom*>(atom))->setSectionOffset(offset);
400 offset += atom->size();
401 }
402 uint64_t totalTextSize = offset;
403 if ( totalTextSize < textSizeWhenMightNeedBranchIslands(opts, hasThumbBranches) )
404 return;
405 if (_s_log) fprintf(stderr, "ld: __text section size=%llu, might need branch islands\n", totalTextSize);
406
407 // Figure out how many regions of branch islands will be needed, and their locations.
408 // Construct a vector containing the atoms after which branch islands will be inserted,
409 // taking into account follow on fixups. No atom run without an island can exceed kBetweenRegions.
410 const uint64_t kBetweenRegions = maxDistanceBetweenIslands(opts, hasThumbBranches); // place regions of islands every 14MB in __text section
411 std::vector<const ld::Atom*> branchIslandInsertionPoints; // atoms in the atom list after which branch islands will be inserted
412 uint64_t previousIslandEndAddr = 0;
413 const ld::Atom *insertionPoint;
414 branchIslandInsertionPoints.reserve(totalTextSize/kBetweenRegions*2);
415 for (std::vector<const ld::Atom*>::iterator it=textSection->atoms.begin(); it != textSection->atoms.end(); it++) {
416 const ld::Atom* atom = *it;
417 // if we move past the next atom, will the run length exceed kBetweenRegions?
418 if ( atom->sectionOffset() + atom->size() - previousIslandEndAddr > kBetweenRegions ) {
419 // yes. Add the last known good location (atom) for inserting a branch island.
420 if ( insertionPoint == NULL )
421 throwf("Unable to insert branch island. No insertion point available.");
422 branchIslandInsertionPoints.push_back(insertionPoint);
423 previousIslandEndAddr = insertionPoint->sectionOffset()+insertionPoint->size();
424 insertionPoint = NULL;
425 }
426 // Can we insert an island after this atom? If so then keep track of it.
427 if ( !atom->hasFixupsOfKind(ld::Fixup::kindNoneFollowOn) )
428 insertionPoint = atom;
429 }
430 // add one more island after the last atom
431 if (insertionPoint != NULL)
432 branchIslandInsertionPoints.push_back(insertionPoint);
433 const int kIslandRegionsCount = branchIslandInsertionPoints.size();
434 if (_s_log) {
435 fprintf(stderr, "ld: will use %u branch island regions\n", kIslandRegionsCount);
436 for (std::vector<const ld::Atom*>::iterator it = branchIslandInsertionPoints.begin(); it != branchIslandInsertionPoints.end(); ++it) {
437 const ld::Atom* atom = *it;
438 const ld::File *file = atom->file();
439 fprintf(stderr, "ld: branch island will be inserted at 0x%llx after %s", atom->sectionOffset()+atom->size(), atom->name());
440 if (file) fprintf(stderr, " (%s)", atom->file()->path());
441 fprintf(stderr, "\n");
442 }
443 }
444
445
446 typedef std::map<TargetAndOffset,const ld::Atom*, TargetAndOffsetComparor> AtomToIsland;
447 AtomToIsland* regionsMap[kIslandRegionsCount];
448 std::vector<const ld::Atom*>* regionsIslands[kIslandRegionsCount];
449 for(int i=0; i < kIslandRegionsCount; ++i) {
450 regionsMap[i] = new AtomToIsland();
451 regionsIslands[i] = new std::vector<const ld::Atom*>();
452 }
453 unsigned int islandCount = 0;
454
455 // create islands for branches in __text that are out of range
456 for (std::vector<const ld::Atom*>::iterator ait=textSection->atoms.begin(); ait != textSection->atoms.end(); ++ait) {
457 const ld::Atom* atom = *ait;
458 const ld::Atom* target = NULL;
459 uint64_t addend = 0;
460 ld::Fixup* fixupWithTarget = NULL;
461 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
462 if ( fit->firstInCluster() ) {
463 target = NULL;
464 fixupWithTarget = NULL;
465 addend = 0;
466 }
467 switch ( fit->binding ) {
468 case ld::Fixup::bindingNone:
469 case ld::Fixup::bindingByNameUnbound:
470 break;
471 case ld::Fixup::bindingByContentBound:
472 case ld::Fixup::bindingDirectlyBound:
473 target = fit->u.target;
474 fixupWithTarget = fit;
475 break;
476 case ld::Fixup::bindingsIndirectlyBound:
477 target = state.indirectBindingTable[fit->u.bindingIndex];
478 fixupWithTarget = fit;
479 break;
480 }
481 bool haveBranch = false;
482 switch (fit->kind) {
483 case ld::Fixup::kindAddAddend:
484 addend = fit->u.addend;
485 break;
486 case ld::Fixup::kindStoreARMBranch24:
487 case ld::Fixup::kindStoreThumbBranch22:
488 case ld::Fixup::kindStoreTargetAddressARMBranch24:
489 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
490 haveBranch = true;
491 break;
492 default:
493 break;
494 }
495 if ( haveBranch ) {
496 int64_t srcAddr = atom->sectionOffset() + fit->offsetInAtom;
497 int64_t dstAddr = target->sectionOffset() + addend;
498 if ( target->section().type() == ld::Section::typeStub )
499 dstAddr = totalTextSize;
500 int64_t displacement = dstAddr - srcAddr;
501 TargetAndOffset finalTargetAndOffset = { target, addend };
502 const int64_t kBranchLimit = kBetweenRegions;
503 if ( displacement > kBranchLimit ) {
504 // create forward branch chain
505 const ld::Atom* nextTarget = target;
506 for (int i=kIslandRegionsCount-1; i >=0 ; --i) {
507 AtomToIsland* region = regionsMap[i];
508 int64_t islandRegionAddr = kBetweenRegions * (i+1);
509 if ( (srcAddr < islandRegionAddr) && (islandRegionAddr <= dstAddr) ) {
510 AtomToIsland::iterator pos = region->find(finalTargetAndOffset);
511 if ( pos == region->end() ) {
512 ld::Atom* island = makeBranchIsland(opts, fit->kind, i, nextTarget, finalTargetAndOffset);
513 (*region)[finalTargetAndOffset] = island;
514 if (_s_log) fprintf(stderr, "added island %s to region %d for %s\n", island->name(), i, atom->name());
515 regionsIslands[i]->push_back(island);
516 ++islandCount;
517 nextTarget = island;
518 }
519 else {
520 nextTarget = pos->second;
521 }
522 }
523 }
524 if (_s_log) fprintf(stderr, "using island %s for branch to %s from %s\n", nextTarget->name(), target->name(), atom->name());
525 fixupWithTarget->u.target = nextTarget;
526 fixupWithTarget->binding = ld::Fixup::bindingDirectlyBound;
527 }
528 else if ( displacement < (-kBranchLimit) ) {
529 // create back branching chain
530 const ld::Atom* prevTarget = target;
531 for (int i=0; i < kIslandRegionsCount ; ++i) {
532 AtomToIsland* region = regionsMap[i];
533 int64_t islandRegionAddr = kBetweenRegions * (i+1);
534 if ( (dstAddr <= islandRegionAddr) && (islandRegionAddr < srcAddr) ) {
535 AtomToIsland::iterator pos = region->find(finalTargetAndOffset);
536 if ( pos == region->end() ) {
537 ld::Atom* island = makeBranchIsland(opts, fit->kind, i, prevTarget, finalTargetAndOffset);
538 (*region)[finalTargetAndOffset] = island;
539 if (_s_log) fprintf(stderr, "added back island %s to region %d for %s\n", island->name(), i, atom->name());
540 regionsIslands[i]->push_back(island);
541 ++islandCount;
542 prevTarget = island;
543 }
544 else {
545 prevTarget = pos->second;
546 }
547 }
548 }
549 if (_s_log) fprintf(stderr, "using back island %s for %s\n", prevTarget->name(), atom->name());
550 fixupWithTarget->u.target = prevTarget;
551 fixupWithTarget->binding = ld::Fixup::bindingDirectlyBound;
552 }
553 }
554 }
555 }
556
557
558 // insert islands into __text section and adjust section offsets
559 if ( islandCount > 0 ) {
560 if ( _s_log ) fprintf(stderr, "ld: %u branch islands required in %u regions\n", islandCount, kIslandRegionsCount);
561 std::vector<const ld::Atom*> newAtomList;
562 newAtomList.reserve(textSection->atoms.size()+islandCount);
563
564 uint64_t regionIndex = 0;
565 for (std::vector<const ld::Atom*>::iterator ait=textSection->atoms.begin(); ait != textSection->atoms.end(); ait++) {
566 newAtomList.push_back(*ait);
567 // copy over atoms until we find an island insertion point
568 // Note that the last insertion point is the last atom, so this loop never moves the iterator to atoms.end().
569 while (*ait != branchIslandInsertionPoints[regionIndex]) {
570 ait++;
571 newAtomList.push_back(*ait);
572 }
573
574 // insert the branch island atoms after the insertion point atom
575 std::vector<const ld::Atom*>* regionIslands = regionsIslands[regionIndex];
576 for (std::vector<const ld::Atom*>::iterator rit=regionIslands->begin(); rit != regionIslands->end(); rit++) {
577 const ld::Atom* islandAtom = *rit;
578 newAtomList.push_back(islandAtom);
579 if ( _s_log ) fprintf(stderr, "inserting island %s into __text section\n", islandAtom->name());
580 }
581 regionIndex++;
582 }
583 // swap in new list of atoms for __text section
584 textSection->atoms.clear();
585 textSection->atoms = newAtomList;
586 }
587
588 }
589
590
591 } // namespace branch_island
592 } // namespace passes
593 } // namespace ld