1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
3 * Copyright (c) 2009 Apple Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
30 #include <libkern/OSByteOrder.h>
35 #include "MachOFileAbstraction.hpp"
37 #include "branch_island.h"
41 namespace branch_island
{
46 struct TargetAndOffset
{ const ld::Atom
* atom
; uint32_t offset
; };
47 class TargetAndOffsetComparor
50 bool operator()(const TargetAndOffset
& left
, const TargetAndOffset
& right
) const
52 if ( left
.atom
!= right
.atom
)
53 return ( left
.atom
< right
.atom
);
54 return ( left
.offset
< right
.offset
);
59 static bool _s_log
= false;
60 static ld::Section
_s_text_section("__TEXT", "__text", ld::Section::typeCode
);
62 class PPCBranchIslandAtom
: public ld::Atom
{
64 PPCBranchIslandAtom(const char* nm
, const ld::Atom
* target
, TargetAndOffset finalTarget
)
65 : ld::Atom(_s_text_section
, ld::Atom::definitionRegular
, ld::Atom::combineNever
,
66 ld::Atom::scopeLinkageUnit
, ld::Atom::typeBranchIsland
,
67 ld::Atom::symbolTableIn
, false, false, false, ld::Atom::Alignment(2)),
70 _finalTarget(finalTarget
) { }
72 virtual const ld::File
* file() const { return NULL
; }
73 virtual bool translationUnitSource(const char** dir
, const char**) const
75 virtual const char* name() const { return _name
; }
76 virtual uint64_t size() const { return 4; }
77 virtual uint64_t objectAddress() const { return 0; }
78 virtual void copyRawContent(uint8_t buffer
[]) const {
79 int64_t displacement
= _target
->finalAddress() - this->finalAddress();
80 const int64_t bl_sixteenMegLimit
= 0x00FFFFFF;
81 if ( _target
->contentType() == ld::Atom::typeBranchIsland
) {
82 // try optimizing away intermediate islands
83 int64_t skipToFinalDisplacement
= _finalTarget
.atom
->finalAddress() + _finalTarget
.offset
- this->finalAddress();
84 if ( (skipToFinalDisplacement
> bl_sixteenMegLimit
) && (skipToFinalDisplacement
< (-bl_sixteenMegLimit
)) ) {
85 displacement
= skipToFinalDisplacement
;
88 int32_t branchInstruction
= 0x48000000 | ((uint32_t)displacement
& 0x03FFFFFC);
89 OSWriteBigInt32(buffer
, 0, branchInstruction
);
91 virtual void setScope(Scope
) { }
95 const ld::Atom
* _target
;
96 TargetAndOffset _finalTarget
;
100 class ARMtoARMBranchIslandAtom
: public ld::Atom
{
102 ARMtoARMBranchIslandAtom(const char* nm
, const ld::Atom
* target
, TargetAndOffset finalTarget
)
103 : ld::Atom(_s_text_section
, ld::Atom::definitionRegular
, ld::Atom::combineNever
,
104 ld::Atom::scopeLinkageUnit
, ld::Atom::typeBranchIsland
,
105 ld::Atom::symbolTableIn
, false, false, false, ld::Atom::Alignment(2)),
108 _finalTarget(finalTarget
) { }
110 virtual const ld::File
* file() const { return NULL
; }
111 virtual bool translationUnitSource(const char** dir
, const char**) const
113 virtual const char* name() const { return _name
; }
114 virtual uint64_t size() const { return 4; }
115 virtual uint64_t objectAddress() const { return 0; }
116 virtual void copyRawContent(uint8_t buffer
[]) const {
117 int64_t displacement
= _target
->finalAddress() - this->finalAddress() - 8;
118 if ( _target
->contentType() == ld::Atom::typeBranchIsland
) {
119 // an ARM branch can branch farther than a thumb branch. The branch
120 // island generation was conservative and put islands every thumb
121 // branch distance apart. Check to see if this is a an island
122 // hopping branch that could be optimized to go directly to target.
123 int64_t skipToFinalDisplacement
= _finalTarget
.atom
->finalAddress() + _finalTarget
.offset
- this->finalAddress() - 8;
124 if ( (skipToFinalDisplacement
< 33554428LL) && (skipToFinalDisplacement
> (-33554432LL)) ) {
125 // can skip branch island and jump straight to target
126 if (_s_log
) fprintf(stderr
, "%s: optimized jump to final target at 0x%08llX, thisAddr=0x%08llX\n",
127 _target
->name(), _finalTarget
.atom
->finalAddress(), this->finalAddress());
128 displacement
= skipToFinalDisplacement
;
131 // ultimate target is too far, jump to island
132 if (_s_log
) fprintf(stderr
, "%s: jump to branch island at 0x%08llX\n",
133 _target
->name(), _finalTarget
.atom
->finalAddress());
136 uint32_t imm24
= (displacement
>> 2) & 0x00FFFFFF;
137 int32_t branchInstruction
= 0xEA000000 | imm24
;
138 OSWriteLittleInt32(buffer
, 0, branchInstruction
);
140 virtual void setScope(Scope
) { }
144 const ld::Atom
* _target
;
145 TargetAndOffset _finalTarget
;
150 class ARMtoThumb1BranchIslandAtom
: public ld::Atom
{
152 ARMtoThumb1BranchIslandAtom(const char* nm
, const ld::Atom
* target
, TargetAndOffset finalTarget
)
153 : ld::Atom(_s_text_section
, ld::Atom::definitionRegular
, ld::Atom::combineNever
,
154 ld::Atom::scopeLinkageUnit
, ld::Atom::typeBranchIsland
,
155 ld::Atom::symbolTableIn
, false, false, false, ld::Atom::Alignment(2)),
158 _finalTarget(finalTarget
) { }
160 virtual const ld::File
* file() const { return NULL
; }
161 virtual bool translationUnitSource(const char** dir
, const char**) const
163 virtual const char* name() const { return _name
; }
164 virtual uint64_t size() const { return 16; }
165 virtual uint64_t objectAddress() const { return 0; }
166 virtual void copyRawContent(uint8_t buffer
[]) const {
167 // There is no large displacement thumb1 branch instruction.
168 // Instead use ARM instructions that can jump to thumb.
169 // we use a 32-bit displacement, so we can directly jump to target which means no island hopping
170 int64_t displacement
= _finalTarget
.atom
->finalAddress() + _finalTarget
.offset
- (this->finalAddress() + 12);
171 if ( _finalTarget
.atom
->isThumb() )
173 if (_s_log
) fprintf(stderr
, "%s: 4 ARM instruction jump to final target at 0x%08llX\n",
174 _target
->name(), _finalTarget
.atom
->finalAddress());
175 OSWriteLittleInt32(&buffer
[ 0], 0, 0xe59fc004); // ldr ip, pc + 4
176 OSWriteLittleInt32(&buffer
[ 4], 0, 0xe08fc00c); // add ip, pc, ip
177 OSWriteLittleInt32(&buffer
[ 8], 0, 0xe12fff1c); // bx ip
178 OSWriteLittleInt32(&buffer
[12], 0, displacement
); // .long target-this
180 virtual void setScope(Scope
) { }
184 const ld::Atom
* _target
;
185 TargetAndOffset _finalTarget
;
190 class Thumb2toThumbBranchIslandAtom
: public ld::Atom
{
192 Thumb2toThumbBranchIslandAtom(const char* nm
, const ld::Atom
* target
, TargetAndOffset finalTarget
)
193 : ld::Atom(_s_text_section
, ld::Atom::definitionRegular
, ld::Atom::combineNever
,
194 ld::Atom::scopeLinkageUnit
, ld::Atom::typeBranchIsland
,
195 ld::Atom::symbolTableIn
, false, true, false, ld::Atom::Alignment(1)),
198 _finalTarget(finalTarget
) { }
200 virtual const ld::File
* file() const { return NULL
; }
201 virtual bool translationUnitSource(const char** dir
, const char**) const
203 virtual const char* name() const { return _name
; }
204 virtual uint64_t size() const { return 4; }
205 virtual uint64_t objectAddress() const { return 0; }
206 virtual void copyRawContent(uint8_t buffer
[]) const {
207 int64_t displacement
= _target
->finalAddress() - this->finalAddress() - 4;
208 if ( _target
->contentType() == ld::Atom::typeBranchIsland
) {
209 // an ARM branch can branch farther than a thumb branch. The branch
210 // island generation was conservative and put islands every thumb
211 // branch distance apart. Check to see if this is a an island
212 // hopping branch that could be optimized to go directly to target.
213 int64_t skipToFinalDisplacement
= _finalTarget
.atom
->finalAddress() + _finalTarget
.offset
- this->finalAddress() - 4;
214 if ( (skipToFinalDisplacement
< 16777214) && (skipToFinalDisplacement
> (-16777216LL)) ) {
215 // can skip branch island and jump straight to target
216 if (_s_log
) fprintf(stderr
, "%s: optimized jump to final target at 0x%08llX, thisAddr=0x%08llX\n",
217 _target
->name(), _finalTarget
.atom
->finalAddress(), this->finalAddress());
218 displacement
= skipToFinalDisplacement
;
221 // ultimate target is too far for thumb2 branch, jump to island
222 if (_s_log
) fprintf(stderr
, "%s: jump to branch island at 0x%08llX\n",
223 _target
->name(), _finalTarget
.atom
->finalAddress());
226 // The instruction is really two instructions:
227 // The lower 16 bits are the first instruction, which contains the high
228 // 11 bits of the displacement.
229 // The upper 16 bits are the second instruction, which contains the low
230 // 11 bits of the displacement, as well as differentiating bl and blx.
231 uint32_t s
= (uint32_t)(displacement
>> 24) & 0x1;
232 uint32_t i1
= (uint32_t)(displacement
>> 23) & 0x1;
233 uint32_t i2
= (uint32_t)(displacement
>> 22) & 0x1;
234 uint32_t imm10
= (uint32_t)(displacement
>> 12) & 0x3FF;
235 uint32_t imm11
= (uint32_t)(displacement
>> 1) & 0x7FF;
236 uint32_t j1
= (i1
== s
);
237 uint32_t j2
= (i2
== s
);
238 uint32_t opcode
= 0x9000F000;
239 uint32_t nextDisp
= (j1
<< 13) | (j2
<< 11) | imm11
;
240 uint32_t firstDisp
= (s
<< 10) | imm10
;
241 uint32_t newInstruction
= opcode
| (nextDisp
<< 16) | firstDisp
;
242 //warning("s=%d, j1=%d, j2=%d, imm10=0x%0X, imm11=0x%0X, opcode=0x%08X, first=0x%04X, next=0x%04X, new=0x%08X, disp=0x%llX for %s to %s\n",
243 // s, j1, j2, imm10, imm11, opcode, firstDisp, nextDisp, newInstruction, displacement, inAtom->getDisplayName(), ref->getTarget().getDisplayName());
244 OSWriteLittleInt32(buffer
, 0, newInstruction
);
246 virtual void setScope(Scope
) { }
250 const ld::Atom
* _target
;
251 TargetAndOffset _finalTarget
;
255 class NoPicARMtoThumbMBranchIslandAtom
: public ld::Atom
{
257 NoPicARMtoThumbMBranchIslandAtom(const char* nm
, const ld::Atom
* target
, TargetAndOffset finalTarget
)
258 : ld::Atom(_s_text_section
, ld::Atom::definitionRegular
, ld::Atom::combineNever
,
259 ld::Atom::scopeLinkageUnit
, ld::Atom::typeBranchIsland
,
260 ld::Atom::symbolTableIn
, false, false, false, ld::Atom::Alignment(2)),
263 _finalTarget(finalTarget
) { }
265 virtual const ld::File
* file() const { return NULL
; }
266 virtual bool translationUnitSource(const char** dir
, const char**) const
268 virtual const char* name() const { return _name
; }
269 virtual uint64_t size() const { return 8; }
270 virtual uint64_t objectAddress() const { return 0; }
271 virtual void copyRawContent(uint8_t buffer
[]) const {
272 // There is no large displacement thumb1 branch instruction.
273 // Instead use ARM instructions that can jump to thumb.
274 // we use a 32-bit displacement, so we can directly jump to final target which means no island hopping
275 uint32_t targetAddr
= _finalTarget
.atom
->finalAddress();
276 if ( _finalTarget
.atom
->isThumb() )
278 if (_s_log
) fprintf(stderr
, "%s: 2 ARM instruction jump to final target at 0x%08llX\n",
279 _target
->name(), _finalTarget
.atom
->finalAddress());
280 OSWriteLittleInt32(&buffer
[0], 0, 0xe51ff004); // ldr pc, [pc, #-4]
281 OSWriteLittleInt32(&buffer
[4], 0, targetAddr
); // .long target-this
283 virtual void setScope(Scope
) { }
287 const ld::Atom
* _target
;
288 TargetAndOffset _finalTarget
;
292 static ld::Atom
* makeBranchIsland(const Options
& opts
, ld::Fixup::Kind kind
, int islandRegion
, const ld::Atom
* nextTarget
, TargetAndOffset finalTarget
)
295 if ( finalTarget
.offset
== 0 ) {
296 if ( islandRegion
== 0 )
297 asprintf(&name
, "%s.island", finalTarget
.atom
->name());
299 asprintf(&name
, "%s.island.%d", finalTarget
.atom
->name(), islandRegion
+1);
302 asprintf(&name
, "%s_plus_%d.island.%d", finalTarget
.atom
->name(), finalTarget
.offset
, islandRegion
);
306 case ld::Fixup::kindStorePPCBranch24
:
307 case ld::Fixup::kindStoreTargetAddressPPCBranch24
:
308 return new PPCBranchIslandAtom(name
, nextTarget
, finalTarget
);
310 case ld::Fixup::kindStoreARMBranch24
:
311 case ld::Fixup::kindStoreThumbBranch22
:
312 case ld::Fixup::kindStoreTargetAddressARMBranch24
:
313 case ld::Fixup::kindStoreTargetAddressThumbBranch22
:
314 if ( finalTarget
.atom
->isThumb() ) {
315 if ( opts
.preferSubArchitecture() && opts
.subArchitecture() == CPU_SUBTYPE_ARM_V7
) {
316 return new Thumb2toThumbBranchIslandAtom(name
, nextTarget
, finalTarget
);
318 else if ( opts
.outputSlidable() ) {
319 return new ARMtoThumb1BranchIslandAtom(name
, nextTarget
, finalTarget
);
322 return new NoPicARMtoThumbMBranchIslandAtom(name
, nextTarget
, finalTarget
);
326 return new ARMtoARMBranchIslandAtom(name
, nextTarget
, finalTarget
);
330 assert(0 && "unexpected branch kind");
337 static uint64_t textSizeWhenMightNeedBranchIslands(const Options
& opts
, bool seenThumbBranch
)
339 switch ( opts
.architecture() ) {
340 case CPU_TYPE_POWERPC
:
341 case CPU_TYPE_POWERPC64
:
345 if ( ! seenThumbBranch
)
346 return 32000000; // ARM can branch +/- 32MB
347 else if ( opts
.preferSubArchitecture() && opts
.subArchitecture() == CPU_SUBTYPE_ARM_V7
)
348 return 16000000; // thumb2 can branch +/- 16MB
350 return 4000000; // thumb1 can branch +/- 4MB
353 assert(0 && "unexpected architecture");
354 return 0x100000000LL
;
358 static uint64_t maxDistanceBetweenIslands(const Options
& opts
, bool seenThumbBranch
)
360 switch ( opts
.architecture() ) {
361 case CPU_TYPE_POWERPC
:
362 case CPU_TYPE_POWERPC64
:
366 if ( ! seenThumbBranch
)
367 return 30*1024*1024; // 2MB of branch islands per 32MB
368 else if ( opts
.preferSubArchitecture() && opts
.subArchitecture() == CPU_SUBTYPE_ARM_V7
)
369 return 14*1024*1024; // 2MB of branch islands per 16MB
371 return 3500000; // 0.5MB of branch islands per 4MB
374 assert(0 && "unexpected architecture");
375 return 0x100000000LL
;
380 // PowerPC can do PC relative branches as far as +/-16MB.
381 // If a branch target is >16MB then we insert one or more
382 // "branch islands" between the branch and its target that
383 // allows island hopping to the target.
385 // Branch Island Algorithm
387 // If the __TEXT segment < 16MB, then no branch islands needed
388 // Otherwise, every 14MB into the __TEXT segment a region is
389 // added which can contain branch islands. Every out-of-range
390 // bl instruction is checked. If it crosses a region, an island
391 // is added to that region with the same target and the bl is
392 // adjusted to target the island instead.
394 // In theory, if too many islands are added to one region, it
395 // could grow the __TEXT enough that other previously in-range
396 // bl branches could be pushed out of range. We reduce the
397 // probability this could happen by placing the ranges every
398 // 14MB which means the region would have to be 2MB (512,000 islands)
399 // before any branches could be pushed out of range.
402 void doPass(const Options
& opts
, ld::Internal
& state
)
404 // only make branch islands in final linked images
405 if ( opts
.outputKind() == Options::kObjectFile
)
408 // only PowerPC and ARM need branch islands
409 switch ( opts
.architecture() ) {
410 case CPU_TYPE_POWERPC
:
411 case CPU_TYPE_POWERPC64
:
418 // scan to find __text section
419 ld::Internal::FinalSection
* textSection
= NULL
;
420 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
=state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
421 ld::Internal::FinalSection
* sect
= *sit
;
422 if ( strcmp(sect
->sectionName(), "__text") == 0 )
425 if ( textSection
== NULL
)
428 // assign section offsets to each atom in __text section, watch for thumb branches, and find total size
429 const bool isARM
= (opts
.architecture() == CPU_TYPE_ARM
);
430 bool hasThumbBranches
= false;
432 for (std::vector
<const ld::Atom
*>::iterator ait
=textSection
->atoms
.begin(); ait
!= textSection
->atoms
.end(); ++ait
) {
433 const ld::Atom
* atom
= *ait
;
434 // check for thumb branches
435 if ( isARM
&& ~hasThumbBranches
) {
436 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
437 switch ( fit
->kind
) {
438 case ld::Fixup::kindStoreThumbBranch22
:
439 case ld::Fixup::kindStoreTargetAddressThumbBranch22
:
440 hasThumbBranches
= true;
448 ld::Atom::Alignment atomAlign
= atom
->alignment();
449 uint64_t atomAlignP2
= (1 << atomAlign
.powerOf2
);
450 uint64_t currentModulus
= (offset
% atomAlignP2
);
451 if ( currentModulus
!= atomAlign
.modulus
) {
452 if ( atomAlign
.modulus
> currentModulus
)
453 offset
+= atomAlign
.modulus
-currentModulus
;
455 offset
+= atomAlign
.modulus
+atomAlignP2
-currentModulus
;
457 (const_cast<ld::Atom
*>(atom
))->setSectionOffset(offset
);
458 offset
+= atom
->size();
460 uint64_t totalTextSize
= offset
;
461 if ( totalTextSize
< textSizeWhenMightNeedBranchIslands(opts
, hasThumbBranches
) )
463 if (_s_log
) fprintf(stderr
, "ld: __text section size=%llu, might need branch islands\n", totalTextSize
);
465 // figure out how many regions of branch islands will be needed
466 const uint32_t kBetweenRegions
= maxDistanceBetweenIslands(opts
, hasThumbBranches
); // place regions of islands every 14MB in __text section
467 const int kIslandRegionsCount
= totalTextSize
/ kBetweenRegions
;
468 typedef std::map
<TargetAndOffset
,const ld::Atom
*, TargetAndOffsetComparor
> AtomToIsland
;
469 AtomToIsland
* regionsMap
[kIslandRegionsCount
];
470 std::vector
<const ld::Atom
*>* regionsIslands
[kIslandRegionsCount
];
471 for(int i
=0; i
< kIslandRegionsCount
; ++i
) {
472 regionsMap
[i
] = new AtomToIsland();
473 regionsIslands
[i
] = new std::vector
<const ld::Atom
*>();
475 unsigned int islandCount
= 0;
476 if (_s_log
) fprintf(stderr
, "ld: will use %u branch island regions\n", kIslandRegionsCount
);
478 // create islands for branches in __text that are out of range
479 for (std::vector
<const ld::Atom
*>::iterator ait
=textSection
->atoms
.begin(); ait
!= textSection
->atoms
.end(); ++ait
) {
480 const ld::Atom
* atom
= *ait
;
481 const ld::Atom
* target
= NULL
;
483 ld::Fixup
* fixupWithTarget
= NULL
;
484 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
485 if ( fit
->firstInCluster() ) {
487 fixupWithTarget
= NULL
;
490 switch ( fit
->binding
) {
491 case ld::Fixup::bindingNone
:
492 case ld::Fixup::bindingByNameUnbound
:
494 case ld::Fixup::bindingByContentBound
:
495 case ld::Fixup::bindingDirectlyBound
:
496 target
= fit
->u
.target
;
497 fixupWithTarget
= fit
;
499 case ld::Fixup::bindingsIndirectlyBound
:
500 target
= state
.indirectBindingTable
[fit
->u
.bindingIndex
];
501 fixupWithTarget
= fit
;
504 bool haveBranch
= false;
506 case ld::Fixup::kindAddAddend
:
507 addend
= fit
->u
.addend
;
509 case ld::Fixup::kindStorePPCBranch24
:
510 case ld::Fixup::kindStoreTargetAddressPPCBranch24
:
511 case ld::Fixup::kindStoreARMBranch24
:
512 case ld::Fixup::kindStoreThumbBranch22
:
513 case ld::Fixup::kindStoreTargetAddressARMBranch24
:
514 case ld::Fixup::kindStoreTargetAddressThumbBranch22
:
521 int64_t srcAddr
= atom
->sectionOffset() + fit
->offsetInAtom
;
522 int64_t dstAddr
= target
->sectionOffset() + addend
;
523 if ( target
->section().type() == ld::Section::typeStub
)
524 dstAddr
= totalTextSize
;
525 int64_t displacement
= dstAddr
- srcAddr
;
526 TargetAndOffset finalTargetAndOffset
= { target
, addend
};
527 const int64_t kBranchLimit
= kBetweenRegions
;
528 if ( displacement
> kBranchLimit
) {
529 // create forward branch chain
530 const ld::Atom
* nextTarget
= target
;
531 for (int i
=kIslandRegionsCount
-1; i
>=0 ; --i
) {
532 AtomToIsland
* region
= regionsMap
[i
];
533 int64_t islandRegionAddr
= kBetweenRegions
* (i
+1);
534 if ( (srcAddr
< islandRegionAddr
) && (islandRegionAddr
<= dstAddr
) ) {
535 AtomToIsland::iterator pos
= region
->find(finalTargetAndOffset
);
536 if ( pos
== region
->end() ) {
537 ld::Atom
* island
= makeBranchIsland(opts
, fit
->kind
, i
, nextTarget
, finalTargetAndOffset
);
538 (*region
)[finalTargetAndOffset
] = island
;
539 if (_s_log
) fprintf(stderr
, "added island %s to region %d for %s\n", island
->name(), i
, atom
->name());
540 regionsIslands
[i
]->push_back(island
);
545 nextTarget
= pos
->second
;
549 if (_s_log
) fprintf(stderr
, "using island %s for branch to %s from %s\n", nextTarget
->name(), target
->name(), atom
->name());
550 fixupWithTarget
->u
.target
= nextTarget
;
551 fixupWithTarget
->binding
= ld::Fixup::bindingDirectlyBound
;
553 else if ( displacement
< (-kBranchLimit
) ) {
554 // create back branching chain
555 const ld::Atom
* prevTarget
= target
;
556 for (int i
=0; i
< kIslandRegionsCount
; ++i
) {
557 AtomToIsland
* region
= regionsMap
[i
];
558 int64_t islandRegionAddr
= kBetweenRegions
* (i
+1);
559 if ( (dstAddr
<= islandRegionAddr
) && (islandRegionAddr
< srcAddr
) ) {
560 AtomToIsland::iterator pos
= region
->find(finalTargetAndOffset
);
561 if ( pos
== region
->end() ) {
562 ld::Atom
* island
= makeBranchIsland(opts
, fit
->kind
, i
, prevTarget
, finalTargetAndOffset
);
563 (*region
)[finalTargetAndOffset
] = island
;
564 if (_s_log
) fprintf(stderr
, "added back island %s to region %d for %s\n", island
->name(), i
, atom
->name());
565 regionsIslands
[i
]->push_back(island
);
570 prevTarget
= pos
->second
;
574 if (_s_log
) fprintf(stderr
, "using back island %s for %s\n", prevTarget
->name(), atom
->name());
575 fixupWithTarget
->u
.target
= prevTarget
;
576 fixupWithTarget
->binding
= ld::Fixup::bindingDirectlyBound
;
583 // insert islands into __text section and adjust section offsets
584 if ( islandCount
> 0 ) {
585 if ( _s_log
) fprintf(stderr
, "ld: %u branch islands required in %u regions\n", islandCount
, kIslandRegionsCount
);
586 std::vector
<const ld::Atom
*> newAtomList
;
587 newAtomList
.reserve(textSection
->atoms
.size()+islandCount
);
588 uint64_t islandRegionAddr
= kBetweenRegions
;;
590 for (std::vector
<const ld::Atom
*>::iterator it
=textSection
->atoms
.begin(); it
!= textSection
->atoms
.end(); it
++) {
591 const ld::Atom
* atom
= *it
;
592 if ( (atom
->sectionOffset()+atom
->size()) > islandRegionAddr
) {
593 std::vector
<const ld::Atom
*>* regionIslands
= regionsIslands
[regionIndex
];
594 for (std::vector
<const ld::Atom
*>::iterator rit
=regionIslands
->begin(); rit
!= regionIslands
->end(); rit
++) {
595 const ld::Atom
* islandAtom
= *rit
;
596 newAtomList
.push_back(islandAtom
);
597 if ( _s_log
) fprintf(stderr
, "inserting island %s into __text section\n", islandAtom
->name());
600 islandRegionAddr
+= kBetweenRegions
;
602 newAtomList
.push_back(atom
);
604 // put any remaining islands at end of __text section
605 if ( regionIndex
< kIslandRegionsCount
) {
606 std::vector
<const ld::Atom
*>* regionIslands
= regionsIslands
[regionIndex
];
607 for (std::vector
<const ld::Atom
*>::iterator rit
=regionIslands
->begin(); rit
!= regionIslands
->end(); rit
++) {
608 const ld::Atom
* islandAtom
= *rit
;
609 newAtomList
.push_back(islandAtom
);
610 if ( _s_log
) fprintf(stderr
, "inserting island %s into __text section\n", islandAtom
->name());
613 // swap in new list of atoms for __text section
614 textSection
->atoms
.clear();
615 textSection
->atoms
= newAtomList
;
621 } // namespace branch_island
622 } // namespace passes