]> git.saurik.com Git - apple/ld64.git/blob - src/ld/passes/branch_island.cpp
99b3eb8f4b70eacb8b6d263f3ceba6219e2d57fe
[apple/ld64.git] / src / ld / passes / branch_island.cpp
1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
2 *
3 * Copyright (c) 2009 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26 #include <stdint.h>
27 #include <math.h>
28 #include <unistd.h>
29 #include <dlfcn.h>
30 #include <libkern/OSByteOrder.h>
31
32 #include <vector>
33 #include <map>
34
35 #include "MachOFileAbstraction.hpp"
36 #include "ld.hpp"
37 #include "branch_island.h"
38
39 namespace ld {
40 namespace passes {
41 namespace branch_island {
42
43
44
45
46 struct TargetAndOffset { const ld::Atom* atom; uint32_t offset; };
47 class TargetAndOffsetComparor
48 {
49 public:
50 bool operator()(const TargetAndOffset& left, const TargetAndOffset& right) const
51 {
52 if ( left.atom != right.atom )
53 return ( left.atom < right.atom );
54 return ( left.offset < right.offset );
55 }
56 };
57
58
59 static bool _s_log = false;
60 static ld::Section _s_text_section("__TEXT", "__text", ld::Section::typeCode);
61
62 class PPCBranchIslandAtom : public ld::Atom {
63 public:
64 PPCBranchIslandAtom(const char* nm, const ld::Atom* target, TargetAndOffset finalTarget)
65 : ld::Atom(_s_text_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
66 ld::Atom::scopeLinkageUnit, ld::Atom::typeBranchIsland,
67 ld::Atom::symbolTableIn, false, false, false, ld::Atom::Alignment(2)),
68 _name(nm),
69 _target(target),
70 _finalTarget(finalTarget) { }
71
72 virtual const ld::File* file() const { return NULL; }
73 virtual bool translationUnitSource(const char** dir, const char**) const
74 { return false; }
75 virtual const char* name() const { return _name; }
76 virtual uint64_t size() const { return 4; }
77 virtual uint64_t objectAddress() const { return 0; }
78 virtual void copyRawContent(uint8_t buffer[]) const {
79 int64_t displacement = _target->finalAddress() - this->finalAddress();
80 const int64_t bl_sixteenMegLimit = 0x00FFFFFF;
81 if ( _target->contentType() == ld::Atom::typeBranchIsland ) {
82 // try optimizing away intermediate islands
83 int64_t skipToFinalDisplacement = _finalTarget.atom->finalAddress() + _finalTarget.offset - this->finalAddress();
84 if ( (skipToFinalDisplacement > bl_sixteenMegLimit) && (skipToFinalDisplacement < (-bl_sixteenMegLimit)) ) {
85 displacement = skipToFinalDisplacement;
86 }
87 }
88 int32_t branchInstruction = 0x48000000 | ((uint32_t)displacement & 0x03FFFFFC);
89 OSWriteBigInt32(buffer, 0, branchInstruction);
90 }
91 virtual void setScope(Scope) { }
92
93 private:
94 const char* _name;
95 const ld::Atom* _target;
96 TargetAndOffset _finalTarget;
97 };
98
99
100 class ARMtoARMBranchIslandAtom : public ld::Atom {
101 public:
102 ARMtoARMBranchIslandAtom(const char* nm, const ld::Atom* target, TargetAndOffset finalTarget)
103 : ld::Atom(_s_text_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
104 ld::Atom::scopeLinkageUnit, ld::Atom::typeBranchIsland,
105 ld::Atom::symbolTableIn, false, false, false, ld::Atom::Alignment(2)),
106 _name(nm),
107 _target(target),
108 _finalTarget(finalTarget) { }
109
110 virtual const ld::File* file() const { return NULL; }
111 virtual bool translationUnitSource(const char** dir, const char**) const
112 { return false; }
113 virtual const char* name() const { return _name; }
114 virtual uint64_t size() const { return 4; }
115 virtual uint64_t objectAddress() const { return 0; }
116 virtual void copyRawContent(uint8_t buffer[]) const {
117 int64_t displacement = _target->finalAddress() - this->finalAddress() - 8;
118 if ( _target->contentType() == ld::Atom::typeBranchIsland ) {
119 // an ARM branch can branch farther than a thumb branch. The branch
120 // island generation was conservative and put islands every thumb
121 // branch distance apart. Check to see if this is a an island
122 // hopping branch that could be optimized to go directly to target.
123 int64_t skipToFinalDisplacement = _finalTarget.atom->finalAddress() + _finalTarget.offset - this->finalAddress() - 8;
124 if ( (skipToFinalDisplacement < 33554428LL) && (skipToFinalDisplacement > (-33554432LL)) ) {
125 // can skip branch island and jump straight to target
126 if (_s_log) fprintf(stderr, "%s: optimized jump to final target at 0x%08llX, thisAddr=0x%08llX\n",
127 _target->name(), _finalTarget.atom->finalAddress(), this->finalAddress());
128 displacement = skipToFinalDisplacement;
129 }
130 else {
131 // ultimate target is too far, jump to island
132 if (_s_log) fprintf(stderr, "%s: jump to branch island at 0x%08llX\n",
133 _target->name(), _finalTarget.atom->finalAddress());
134 }
135 }
136 uint32_t imm24 = (displacement >> 2) & 0x00FFFFFF;
137 int32_t branchInstruction = 0xEA000000 | imm24;
138 OSWriteLittleInt32(buffer, 0, branchInstruction);
139 }
140 virtual void setScope(Scope) { }
141
142 private:
143 const char* _name;
144 const ld::Atom* _target;
145 TargetAndOffset _finalTarget;
146 };
147
148
149
150 class ARMtoThumb1BranchIslandAtom : public ld::Atom {
151 public:
152 ARMtoThumb1BranchIslandAtom(const char* nm, const ld::Atom* target, TargetAndOffset finalTarget)
153 : ld::Atom(_s_text_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
154 ld::Atom::scopeLinkageUnit, ld::Atom::typeBranchIsland,
155 ld::Atom::symbolTableIn, false, false, false, ld::Atom::Alignment(2)),
156 _name(nm),
157 _target(target),
158 _finalTarget(finalTarget) { }
159
160 virtual const ld::File* file() const { return NULL; }
161 virtual bool translationUnitSource(const char** dir, const char**) const
162 { return false; }
163 virtual const char* name() const { return _name; }
164 virtual uint64_t size() const { return 16; }
165 virtual uint64_t objectAddress() const { return 0; }
166 virtual void copyRawContent(uint8_t buffer[]) const {
167 // There is no large displacement thumb1 branch instruction.
168 // Instead use ARM instructions that can jump to thumb.
169 // we use a 32-bit displacement, so we can directly jump to target which means no island hopping
170 int64_t displacement = _finalTarget.atom->finalAddress() + _finalTarget.offset - (this->finalAddress() + 12);
171 if ( _finalTarget.atom->isThumb() )
172 displacement |= 1;
173 if (_s_log) fprintf(stderr, "%s: 4 ARM instruction jump to final target at 0x%08llX\n",
174 _target->name(), _finalTarget.atom->finalAddress());
175 OSWriteLittleInt32(&buffer[ 0], 0, 0xe59fc004); // ldr ip, pc + 4
176 OSWriteLittleInt32(&buffer[ 4], 0, 0xe08fc00c); // add ip, pc, ip
177 OSWriteLittleInt32(&buffer[ 8], 0, 0xe12fff1c); // bx ip
178 OSWriteLittleInt32(&buffer[12], 0, displacement); // .long target-this
179 }
180 virtual void setScope(Scope) { }
181
182 private:
183 const char* _name;
184 const ld::Atom* _target;
185 TargetAndOffset _finalTarget;
186 };
187
188
189
190 class Thumb2toThumbBranchIslandAtom : public ld::Atom {
191 public:
192 Thumb2toThumbBranchIslandAtom(const char* nm, const ld::Atom* target, TargetAndOffset finalTarget)
193 : ld::Atom(_s_text_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
194 ld::Atom::scopeLinkageUnit, ld::Atom::typeBranchIsland,
195 ld::Atom::symbolTableIn, false, true, false, ld::Atom::Alignment(1)),
196 _name(nm),
197 _target(target),
198 _finalTarget(finalTarget) { }
199
200 virtual const ld::File* file() const { return NULL; }
201 virtual bool translationUnitSource(const char** dir, const char**) const
202 { return false; }
203 virtual const char* name() const { return _name; }
204 virtual uint64_t size() const { return 4; }
205 virtual uint64_t objectAddress() const { return 0; }
206 virtual void copyRawContent(uint8_t buffer[]) const {
207 int64_t displacement = _target->finalAddress() - this->finalAddress() - 4;
208 if ( _target->contentType() == ld::Atom::typeBranchIsland ) {
209 // an ARM branch can branch farther than a thumb branch. The branch
210 // island generation was conservative and put islands every thumb
211 // branch distance apart. Check to see if this is a an island
212 // hopping branch that could be optimized to go directly to target.
213 int64_t skipToFinalDisplacement = _finalTarget.atom->finalAddress() + _finalTarget.offset - this->finalAddress() - 4;
214 if ( (skipToFinalDisplacement < 16777214) && (skipToFinalDisplacement > (-16777216LL)) ) {
215 // can skip branch island and jump straight to target
216 if (_s_log) fprintf(stderr, "%s: optimized jump to final target at 0x%08llX, thisAddr=0x%08llX\n",
217 _target->name(), _finalTarget.atom->finalAddress(), this->finalAddress());
218 displacement = skipToFinalDisplacement;
219 }
220 else {
221 // ultimate target is too far for thumb2 branch, jump to island
222 if (_s_log) fprintf(stderr, "%s: jump to branch island at 0x%08llX\n",
223 _target->name(), _finalTarget.atom->finalAddress());
224 }
225 }
226 // The instruction is really two instructions:
227 // The lower 16 bits are the first instruction, which contains the high
228 // 11 bits of the displacement.
229 // The upper 16 bits are the second instruction, which contains the low
230 // 11 bits of the displacement, as well as differentiating bl and blx.
231 uint32_t s = (uint32_t)(displacement >> 24) & 0x1;
232 uint32_t i1 = (uint32_t)(displacement >> 23) & 0x1;
233 uint32_t i2 = (uint32_t)(displacement >> 22) & 0x1;
234 uint32_t imm10 = (uint32_t)(displacement >> 12) & 0x3FF;
235 uint32_t imm11 = (uint32_t)(displacement >> 1) & 0x7FF;
236 uint32_t j1 = (i1 == s);
237 uint32_t j2 = (i2 == s);
238 uint32_t opcode = 0x9000F000;
239 uint32_t nextDisp = (j1 << 13) | (j2 << 11) | imm11;
240 uint32_t firstDisp = (s << 10) | imm10;
241 uint32_t newInstruction = opcode | (nextDisp << 16) | firstDisp;
242 //warning("s=%d, j1=%d, j2=%d, imm10=0x%0X, imm11=0x%0X, opcode=0x%08X, first=0x%04X, next=0x%04X, new=0x%08X, disp=0x%llX for %s to %s\n",
243 // s, j1, j2, imm10, imm11, opcode, firstDisp, nextDisp, newInstruction, displacement, inAtom->getDisplayName(), ref->getTarget().getDisplayName());
244 OSWriteLittleInt32(buffer, 0, newInstruction);
245 }
246 virtual void setScope(Scope) { }
247
248 private:
249 const char* _name;
250 const ld::Atom* _target;
251 TargetAndOffset _finalTarget;
252 };
253
254
255 class NoPicARMtoThumbMBranchIslandAtom : public ld::Atom {
256 public:
257 NoPicARMtoThumbMBranchIslandAtom(const char* nm, const ld::Atom* target, TargetAndOffset finalTarget)
258 : ld::Atom(_s_text_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
259 ld::Atom::scopeLinkageUnit, ld::Atom::typeBranchIsland,
260 ld::Atom::symbolTableIn, false, false, false, ld::Atom::Alignment(2)),
261 _name(nm),
262 _target(target),
263 _finalTarget(finalTarget) { }
264
265 virtual const ld::File* file() const { return NULL; }
266 virtual bool translationUnitSource(const char** dir, const char**) const
267 { return false; }
268 virtual const char* name() const { return _name; }
269 virtual uint64_t size() const { return 8; }
270 virtual uint64_t objectAddress() const { return 0; }
271 virtual void copyRawContent(uint8_t buffer[]) const {
272 // There is no large displacement thumb1 branch instruction.
273 // Instead use ARM instructions that can jump to thumb.
274 // we use a 32-bit displacement, so we can directly jump to final target which means no island hopping
275 uint32_t targetAddr = _finalTarget.atom->finalAddress();
276 if ( _finalTarget.atom->isThumb() )
277 targetAddr |= 1;
278 if (_s_log) fprintf(stderr, "%s: 2 ARM instruction jump to final target at 0x%08llX\n",
279 _target->name(), _finalTarget.atom->finalAddress());
280 OSWriteLittleInt32(&buffer[0], 0, 0xe51ff004); // ldr pc, [pc, #-4]
281 OSWriteLittleInt32(&buffer[4], 0, targetAddr); // .long target-this
282 }
283 virtual void setScope(Scope) { }
284
285 private:
286 const char* _name;
287 const ld::Atom* _target;
288 TargetAndOffset _finalTarget;
289 };
290
291
292 static ld::Atom* makeBranchIsland(const Options& opts, ld::Fixup::Kind kind, int islandRegion, const ld::Atom* nextTarget, TargetAndOffset finalTarget)
293 {
294 char* name;
295 if ( finalTarget.offset == 0 ) {
296 if ( islandRegion == 0 )
297 asprintf(&name, "%s.island", finalTarget.atom->name());
298 else
299 asprintf(&name, "%s.island.%d", finalTarget.atom->name(), islandRegion+1);
300 }
301 else {
302 asprintf(&name, "%s_plus_%d.island.%d", finalTarget.atom->name(), finalTarget.offset, islandRegion);
303 }
304
305 switch ( kind ) {
306 case ld::Fixup::kindStorePPCBranch24:
307 case ld::Fixup::kindStoreTargetAddressPPCBranch24:
308 return new PPCBranchIslandAtom(name, nextTarget, finalTarget);
309 break;
310 case ld::Fixup::kindStoreARMBranch24:
311 case ld::Fixup::kindStoreThumbBranch22:
312 case ld::Fixup::kindStoreTargetAddressARMBranch24:
313 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
314 if ( finalTarget.atom->isThumb() ) {
315 if ( opts.preferSubArchitecture() && opts.archSupportsThumb2() ) {
316 return new Thumb2toThumbBranchIslandAtom(name, nextTarget, finalTarget);
317 }
318 else if ( opts.outputSlidable() ) {
319 return new ARMtoThumb1BranchIslandAtom(name, nextTarget, finalTarget);
320 }
321 else {
322 return new NoPicARMtoThumbMBranchIslandAtom(name, nextTarget, finalTarget);
323 }
324 }
325 else {
326 return new ARMtoARMBranchIslandAtom(name, nextTarget, finalTarget);
327 }
328 break;
329 default:
330 assert(0 && "unexpected branch kind");
331 break;
332 }
333 return NULL;
334 }
335
336
337 static uint64_t textSizeWhenMightNeedBranchIslands(const Options& opts, bool seenThumbBranch)
338 {
339 switch ( opts.architecture() ) {
340 case CPU_TYPE_POWERPC:
341 case CPU_TYPE_POWERPC64:
342 return 16000000;
343 break;
344 case CPU_TYPE_ARM:
345 if ( ! seenThumbBranch )
346 return 32000000; // ARM can branch +/- 32MB
347 else if ( opts.preferSubArchitecture() && opts.archSupportsThumb2() )
348 return 16000000; // thumb2 can branch +/- 16MB
349 else
350 return 4000000; // thumb1 can branch +/- 4MB
351 break;
352 }
353 assert(0 && "unexpected architecture");
354 return 0x100000000LL;
355 }
356
357
358 static uint64_t maxDistanceBetweenIslands(const Options& opts, bool seenThumbBranch)
359 {
360 switch ( opts.architecture() ) {
361 case CPU_TYPE_POWERPC:
362 case CPU_TYPE_POWERPC64:
363 return 14*1024*1024;
364 break;
365 case CPU_TYPE_ARM:
366 if ( ! seenThumbBranch )
367 return 30*1024*1024; // 2MB of branch islands per 32MB
368 else if ( opts.preferSubArchitecture() && opts.archSupportsThumb2() )
369 return 14*1024*1024; // 2MB of branch islands per 16MB
370 else
371 return 3500000; // 0.5MB of branch islands per 4MB
372 break;
373 }
374 assert(0 && "unexpected architecture");
375 return 0x100000000LL;
376 }
377
378
379 //
380 // PowerPC can do PC relative branches as far as +/-16MB.
381 // If a branch target is >16MB then we insert one or more
382 // "branch islands" between the branch and its target that
383 // allows island hopping to the target.
384 //
385 // Branch Island Algorithm
386 //
387 // If the __TEXT segment < 16MB, then no branch islands needed
388 // Otherwise, every 14MB into the __TEXT segment a region is
389 // added which can contain branch islands. Every out-of-range
390 // bl instruction is checked. If it crosses a region, an island
391 // is added to that region with the same target and the bl is
392 // adjusted to target the island instead.
393 //
394 // In theory, if too many islands are added to one region, it
395 // could grow the __TEXT enough that other previously in-range
396 // bl branches could be pushed out of range. We reduce the
397 // probability this could happen by placing the ranges every
398 // 14MB which means the region would have to be 2MB (512,000 islands)
399 // before any branches could be pushed out of range.
400 //
401
402 void doPass(const Options& opts, ld::Internal& state)
403 {
404 // only make branch islands in final linked images
405 if ( opts.outputKind() == Options::kObjectFile )
406 return;
407
408 // only PowerPC and ARM need branch islands
409 switch ( opts.architecture() ) {
410 case CPU_TYPE_POWERPC:
411 case CPU_TYPE_POWERPC64:
412 case CPU_TYPE_ARM:
413 break;
414 default:
415 return;
416 }
417
418 // scan to find __text section
419 ld::Internal::FinalSection* textSection = NULL;
420 for (std::vector<ld::Internal::FinalSection*>::iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) {
421 ld::Internal::FinalSection* sect = *sit;
422 if ( strcmp(sect->sectionName(), "__text") == 0 )
423 textSection = sect;
424 }
425 if ( textSection == NULL )
426 return;
427
428 // assign section offsets to each atom in __text section, watch for thumb branches, and find total size
429 const bool isARM = (opts.architecture() == CPU_TYPE_ARM);
430 bool hasThumbBranches = false;
431 uint64_t offset = 0;
432 for (std::vector<const ld::Atom*>::iterator ait=textSection->atoms.begin(); ait != textSection->atoms.end(); ++ait) {
433 const ld::Atom* atom = *ait;
434 // check for thumb branches
435 if ( isARM && ~hasThumbBranches ) {
436 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
437 switch ( fit->kind ) {
438 case ld::Fixup::kindStoreThumbBranch22:
439 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
440 hasThumbBranches = true;
441 break;
442 default:
443 break;
444 }
445 }
446 }
447 // align atom
448 ld::Atom::Alignment atomAlign = atom->alignment();
449 uint64_t atomAlignP2 = (1 << atomAlign.powerOf2);
450 uint64_t currentModulus = (offset % atomAlignP2);
451 if ( currentModulus != atomAlign.modulus ) {
452 if ( atomAlign.modulus > currentModulus )
453 offset += atomAlign.modulus-currentModulus;
454 else
455 offset += atomAlign.modulus+atomAlignP2-currentModulus;
456 }
457 (const_cast<ld::Atom*>(atom))->setSectionOffset(offset);
458 offset += atom->size();
459 }
460 uint64_t totalTextSize = offset;
461 if ( totalTextSize < textSizeWhenMightNeedBranchIslands(opts, hasThumbBranches) )
462 return;
463 if (_s_log) fprintf(stderr, "ld: __text section size=%llu, might need branch islands\n", totalTextSize);
464
465 // figure out how many regions of branch islands will be needed
466 const uint32_t kBetweenRegions = maxDistanceBetweenIslands(opts, hasThumbBranches); // place regions of islands every 14MB in __text section
467 const int kIslandRegionsCount = totalTextSize / kBetweenRegions;
468 typedef std::map<TargetAndOffset,const ld::Atom*, TargetAndOffsetComparor> AtomToIsland;
469 AtomToIsland* regionsMap[kIslandRegionsCount];
470 std::vector<const ld::Atom*>* regionsIslands[kIslandRegionsCount];
471 for(int i=0; i < kIslandRegionsCount; ++i) {
472 regionsMap[i] = new AtomToIsland();
473 regionsIslands[i] = new std::vector<const ld::Atom*>();
474 }
475 unsigned int islandCount = 0;
476 if (_s_log) fprintf(stderr, "ld: will use %u branch island regions\n", kIslandRegionsCount);
477
478 // create islands for branches in __text that are out of range
479 for (std::vector<const ld::Atom*>::iterator ait=textSection->atoms.begin(); ait != textSection->atoms.end(); ++ait) {
480 const ld::Atom* atom = *ait;
481 const ld::Atom* target = NULL;
482 uint64_t addend = 0;
483 ld::Fixup* fixupWithTarget = NULL;
484 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
485 if ( fit->firstInCluster() ) {
486 target = NULL;
487 fixupWithTarget = NULL;
488 addend = 0;
489 }
490 switch ( fit->binding ) {
491 case ld::Fixup::bindingNone:
492 case ld::Fixup::bindingByNameUnbound:
493 break;
494 case ld::Fixup::bindingByContentBound:
495 case ld::Fixup::bindingDirectlyBound:
496 target = fit->u.target;
497 fixupWithTarget = fit;
498 break;
499 case ld::Fixup::bindingsIndirectlyBound:
500 target = state.indirectBindingTable[fit->u.bindingIndex];
501 fixupWithTarget = fit;
502 break;
503 }
504 bool haveBranch = false;
505 switch (fit->kind) {
506 case ld::Fixup::kindAddAddend:
507 addend = fit->u.addend;
508 break;
509 case ld::Fixup::kindStorePPCBranch24:
510 case ld::Fixup::kindStoreTargetAddressPPCBranch24:
511 case ld::Fixup::kindStoreARMBranch24:
512 case ld::Fixup::kindStoreThumbBranch22:
513 case ld::Fixup::kindStoreTargetAddressARMBranch24:
514 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
515 haveBranch = true;
516 break;
517 default:
518 break;
519 }
520 if ( haveBranch ) {
521 int64_t srcAddr = atom->sectionOffset() + fit->offsetInAtom;
522 int64_t dstAddr = target->sectionOffset() + addend;
523 if ( target->section().type() == ld::Section::typeStub )
524 dstAddr = totalTextSize;
525 int64_t displacement = dstAddr - srcAddr;
526 TargetAndOffset finalTargetAndOffset = { target, addend };
527 const int64_t kBranchLimit = kBetweenRegions;
528 if ( displacement > kBranchLimit ) {
529 // create forward branch chain
530 const ld::Atom* nextTarget = target;
531 for (int i=kIslandRegionsCount-1; i >=0 ; --i) {
532 AtomToIsland* region = regionsMap[i];
533 int64_t islandRegionAddr = kBetweenRegions * (i+1);
534 if ( (srcAddr < islandRegionAddr) && (islandRegionAddr <= dstAddr) ) {
535 AtomToIsland::iterator pos = region->find(finalTargetAndOffset);
536 if ( pos == region->end() ) {
537 ld::Atom* island = makeBranchIsland(opts, fit->kind, i, nextTarget, finalTargetAndOffset);
538 (*region)[finalTargetAndOffset] = island;
539 if (_s_log) fprintf(stderr, "added island %s to region %d for %s\n", island->name(), i, atom->name());
540 regionsIslands[i]->push_back(island);
541 ++islandCount;
542 nextTarget = island;
543 }
544 else {
545 nextTarget = pos->second;
546 }
547 }
548 }
549 if (_s_log) fprintf(stderr, "using island %s for branch to %s from %s\n", nextTarget->name(), target->name(), atom->name());
550 fixupWithTarget->u.target = nextTarget;
551 fixupWithTarget->binding = ld::Fixup::bindingDirectlyBound;
552 }
553 else if ( displacement < (-kBranchLimit) ) {
554 // create back branching chain
555 const ld::Atom* prevTarget = target;
556 for (int i=0; i < kIslandRegionsCount ; ++i) {
557 AtomToIsland* region = regionsMap[i];
558 int64_t islandRegionAddr = kBetweenRegions * (i+1);
559 if ( (dstAddr <= islandRegionAddr) && (islandRegionAddr < srcAddr) ) {
560 AtomToIsland::iterator pos = region->find(finalTargetAndOffset);
561 if ( pos == region->end() ) {
562 ld::Atom* island = makeBranchIsland(opts, fit->kind, i, prevTarget, finalTargetAndOffset);
563 (*region)[finalTargetAndOffset] = island;
564 if (_s_log) fprintf(stderr, "added back island %s to region %d for %s\n", island->name(), i, atom->name());
565 regionsIslands[i]->push_back(island);
566 ++islandCount;
567 prevTarget = island;
568 }
569 else {
570 prevTarget = pos->second;
571 }
572 }
573 }
574 if (_s_log) fprintf(stderr, "using back island %s for %s\n", prevTarget->name(), atom->name());
575 fixupWithTarget->u.target = prevTarget;
576 fixupWithTarget->binding = ld::Fixup::bindingDirectlyBound;
577 }
578 }
579 }
580 }
581
582
583 // insert islands into __text section and adjust section offsets
584 if ( islandCount > 0 ) {
585 if ( _s_log ) fprintf(stderr, "ld: %u branch islands required in %u regions\n", islandCount, kIslandRegionsCount);
586 std::vector<const ld::Atom*> newAtomList;
587 newAtomList.reserve(textSection->atoms.size()+islandCount);
588 uint64_t islandRegionAddr = kBetweenRegions;;
589 int regionIndex = 0;
590 for (std::vector<const ld::Atom*>::iterator it=textSection->atoms.begin(); it != textSection->atoms.end(); it++) {
591 const ld::Atom* atom = *it;
592 if ( (atom->sectionOffset()+atom->size()) > islandRegionAddr ) {
593 std::vector<const ld::Atom*>* regionIslands = regionsIslands[regionIndex];
594 for (std::vector<const ld::Atom*>::iterator rit=regionIslands->begin(); rit != regionIslands->end(); rit++) {
595 const ld::Atom* islandAtom = *rit;
596 newAtomList.push_back(islandAtom);
597 if ( _s_log ) fprintf(stderr, "inserting island %s into __text section\n", islandAtom->name());
598 }
599 ++regionIndex;
600 islandRegionAddr += kBetweenRegions;
601 }
602 newAtomList.push_back(atom);
603 }
604 // put any remaining islands at end of __text section
605 if ( regionIndex < kIslandRegionsCount ) {
606 std::vector<const ld::Atom*>* regionIslands = regionsIslands[regionIndex];
607 for (std::vector<const ld::Atom*>::iterator rit=regionIslands->begin(); rit != regionIslands->end(); rit++) {
608 const ld::Atom* islandAtom = *rit;
609 newAtomList.push_back(islandAtom);
610 if ( _s_log ) fprintf(stderr, "inserting island %s into __text section\n", islandAtom->name());
611 }
612 }
613 // swap in new list of atoms for __text section
614 textSection->atoms.clear();
615 textSection->atoms = newAtomList;
616 }
617
618 }
619
620
621 } // namespace branch_island
622 } // namespace passes
623 } // namespace ld