1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
3 * Copyright (c) 2009 Apple Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
30 #include <libkern/OSByteOrder.h>
35 #include "MachOFileAbstraction.hpp"
37 #include "branch_island.h"
41 namespace branch_island
{
44 static std::map
<const Atom
*, uint64_t> sAtomToAddress
;
47 struct TargetAndOffset
{ const ld::Atom
* atom
; uint32_t offset
; };
48 class TargetAndOffsetComparor
51 bool operator()(const TargetAndOffset
& left
, const TargetAndOffset
& right
) const
53 if ( left
.atom
!= right
.atom
)
54 return ( left
.atom
< right
.atom
);
55 return ( left
.offset
< right
.offset
);
60 static bool _s_log
= false;
61 static ld::Section
_s_text_section("__TEXT", "__text", ld::Section::typeCode
);
65 class ARMtoARMBranchIslandAtom
: public ld::Atom
{
67 ARMtoARMBranchIslandAtom(const char* nm
, const ld::Atom
* target
, TargetAndOffset finalTarget
)
68 : ld::Atom(_s_text_section
, ld::Atom::definitionRegular
, ld::Atom::combineNever
,
69 ld::Atom::scopeLinkageUnit
, ld::Atom::typeBranchIsland
,
70 ld::Atom::symbolTableIn
, false, false, false, ld::Atom::Alignment(2)),
72 _fixup1(0, ld::Fixup::k1of1
, ld::Fixup::kindStoreTargetAddressARMBranch24
, target
),
73 _fixup2(0, ld::Fixup::k1of1
, ld::Fixup::kindIslandTarget
, finalTarget
.atom
) {
74 if (_s_log
) fprintf(stderr
, "%p: ARM-to-ARM branch island to final target %s\n",
75 this, finalTarget
.atom
->name());
78 virtual const ld::File
* file() const { return NULL
; }
79 virtual const char* name() const { return _name
; }
80 virtual uint64_t size() const { return 4; }
81 virtual uint64_t objectAddress() const { return 0; }
82 virtual void copyRawContent(uint8_t buffer
[]) const {
83 OSWriteLittleInt32(buffer
, 0, 0xEA000000);
85 virtual void setScope(Scope
) { }
86 virtual ld::Fixup::iterator
fixupsBegin() const { return (ld::Fixup
*)&_fixup1
; }
87 virtual ld::Fixup::iterator
fixupsEnd() const { return &((ld::Fixup
*)&_fixup2
)[1]; }
97 class ARMtoThumb1BranchIslandAtom
: public ld::Atom
{
99 ARMtoThumb1BranchIslandAtom(const char* nm
, const ld::Atom
* target
, TargetAndOffset finalTarget
)
100 : ld::Atom(_s_text_section
, ld::Atom::definitionRegular
, ld::Atom::combineNever
,
101 ld::Atom::scopeLinkageUnit
, ld::Atom::typeBranchIsland
,
102 ld::Atom::symbolTableIn
, false, false, false, ld::Atom::Alignment(2)),
104 _finalTarget(finalTarget
) {
105 if (_s_log
) fprintf(stderr
, "%p: ARM-to-thumb1 branch island to final target %s\n",
106 this, finalTarget
.atom
->name());
109 virtual const ld::File
* file() const { return NULL
; }
110 virtual const char* name() const { return _name
; }
111 virtual uint64_t size() const { return 16; }
112 virtual uint64_t objectAddress() const { return 0; }
113 virtual void copyRawContent(uint8_t buffer
[]) const {
114 // There is no large displacement thumb1 branch instruction.
115 // Instead use ARM instructions that can jump to thumb.
116 // we use a 32-bit displacement, so we can directly jump to target which means no island hopping
117 int64_t displacement
= _finalTarget
.atom
->finalAddress() + _finalTarget
.offset
- (this->finalAddress() + 12);
118 if ( _finalTarget
.atom
->isThumb() )
120 OSWriteLittleInt32(&buffer
[ 0], 0, 0xe59fc004); // ldr ip, pc + 4
121 OSWriteLittleInt32(&buffer
[ 4], 0, 0xe08fc00c); // add ip, pc, ip
122 OSWriteLittleInt32(&buffer
[ 8], 0, 0xe12fff1c); // bx ip
123 OSWriteLittleInt32(&buffer
[12], 0, displacement
); // .long target-this
125 virtual void setScope(Scope
) { }
129 TargetAndOffset _finalTarget
;
134 class Thumb2toThumbBranchIslandAtom
: public ld::Atom
{
136 Thumb2toThumbBranchIslandAtom(const char* nm
, const ld::Atom
* target
, TargetAndOffset finalTarget
)
137 : ld::Atom(_s_text_section
, ld::Atom::definitionRegular
, ld::Atom::combineNever
,
138 ld::Atom::scopeLinkageUnit
, ld::Atom::typeBranchIsland
,
139 ld::Atom::symbolTableIn
, false, true, false, ld::Atom::Alignment(1)),
141 _fixup1(0, ld::Fixup::k1of1
, ld::Fixup::kindStoreTargetAddressThumbBranch22
, target
),
142 _fixup2(0, ld::Fixup::k1of1
, ld::Fixup::kindIslandTarget
, finalTarget
.atom
) {
143 if (_s_log
) fprintf(stderr
, "%p: Thumb-to-thumb branch island to final target %s\n",
144 this, finalTarget
.atom
->name());
147 virtual const ld::File
* file() const { return NULL
; }
148 virtual const char* name() const { return _name
; }
149 virtual uint64_t size() const { return 4; }
150 virtual uint64_t objectAddress() const { return 0; }
151 virtual void copyRawContent(uint8_t buffer
[]) const {
152 OSWriteLittleInt32(buffer
, 0, 0xf0008000);
154 virtual void setScope(Scope
) { }
155 virtual ld::Fixup::iterator
fixupsBegin() const { return (ld::Fixup
*)&_fixup1
; }
156 virtual ld::Fixup::iterator
fixupsEnd() const { return &((ld::Fixup
*)&_fixup2
)[1]; }
166 class Thumb2toThumbBranchAbsoluteIslandAtom
: public ld::Atom
{
168 Thumb2toThumbBranchAbsoluteIslandAtom(const char* nm
, const ld::Section
& inSect
, TargetAndOffset finalTarget
)
169 : ld::Atom(inSect
, ld::Atom::definitionRegular
, ld::Atom::combineNever
,
170 ld::Atom::scopeLinkageUnit
, ld::Atom::typeBranchIsland
,
171 ld::Atom::symbolTableIn
, false, true, false, ld::Atom::Alignment(1)),
173 _fixup1(0, ld::Fixup::k1of2
, ld::Fixup::kindSetTargetAddress
, finalTarget
.atom
),
174 _fixup2(0, ld::Fixup::k2of2
, ld::Fixup::kindStoreThumbLow16
),
175 _fixup3(4, ld::Fixup::k1of2
, ld::Fixup::kindSetTargetAddress
, finalTarget
.atom
),
176 _fixup4(4, ld::Fixup::k2of2
, ld::Fixup::kindStoreThumbHigh16
),
177 _fixup5(0, ld::Fixup::k1of1
, ld::Fixup::kindIslandTarget
, finalTarget
.atom
) {
178 if (_s_log
) fprintf(stderr
, "%p: Thumb-to-thumb absolute branch island to final target %s\n",
179 this, finalTarget
.atom
->name());
182 virtual const ld::File
* file() const { return NULL
; }
183 virtual const char* name() const { return _name
; }
184 virtual uint64_t size() const { return 10; }
185 virtual uint64_t objectAddress() const { return 0; }
186 virtual void copyRawContent(uint8_t buffer
[]) const {
187 OSWriteLittleInt32(&buffer
[0], 0, 0x0c00f240); // movw r12, #0x5678
188 OSWriteLittleInt32(&buffer
[4], 0, 0x0c00f2c0); // movt r12, #0x1234
189 OSWriteLittleInt16(&buffer
[8], 0, 0x4760); // bx r12
191 virtual void setScope(Scope
) { }
192 virtual ld::Fixup::iterator
fixupsBegin() const { return (ld::Fixup
*)&_fixup1
; }
193 virtual ld::Fixup::iterator
fixupsEnd() const { return &((ld::Fixup
*)&_fixup5
)[1]; }
206 class NoPicARMtoThumbMBranchIslandAtom
: public ld::Atom
{
208 NoPicARMtoThumbMBranchIslandAtom(const char* nm
, const ld::Atom
* target
, TargetAndOffset finalTarget
)
209 : ld::Atom(_s_text_section
, ld::Atom::definitionRegular
, ld::Atom::combineNever
,
210 ld::Atom::scopeLinkageUnit
, ld::Atom::typeBranchIsland
,
211 ld::Atom::symbolTableIn
, false, false, false, ld::Atom::Alignment(2)),
213 _finalTarget(finalTarget
) {
214 if (_s_log
) fprintf(stderr
, "%p: NoPIC ARM-to-Thumb branch island to final target %s\n",
215 this, finalTarget
.atom
->name());
218 virtual const ld::File
* file() const { return NULL
; }
219 virtual const char* name() const { return _name
; }
220 virtual uint64_t size() const { return 8; }
221 virtual uint64_t objectAddress() const { return 0; }
222 virtual void copyRawContent(uint8_t buffer
[]) const {
223 // There is no large displacement thumb1 branch instruction.
224 // Instead use ARM instructions that can jump to thumb.
225 // we use a 32-bit displacement, so we can directly jump to final target which means no island hopping
226 uint32_t targetAddr
= _finalTarget
.atom
->finalAddress();
227 if ( _finalTarget
.atom
->isThumb() )
229 OSWriteLittleInt32(&buffer
[0], 0, 0xe51ff004); // ldr pc, [pc, #-4]
230 OSWriteLittleInt32(&buffer
[4], 0, targetAddr
); // .long target-this
232 virtual void setScope(Scope
) { }
236 TargetAndOffset _finalTarget
;
240 static ld::Atom
* makeBranchIsland(const Options
& opts
, ld::Fixup::Kind kind
, int islandRegion
, const ld::Atom
* nextTarget
,
241 TargetAndOffset finalTarget
, const ld::Section
& inSect
, bool crossSectionBranch
)
244 if ( finalTarget
.offset
== 0 ) {
245 if ( islandRegion
== 0 )
246 asprintf(&name
, "%s.island", finalTarget
.atom
->name());
248 asprintf(&name
, "%s.island.%d", finalTarget
.atom
->name(), islandRegion
+1);
251 asprintf(&name
, "%s_plus_%d.island.%d", finalTarget
.atom
->name(), finalTarget
.offset
, islandRegion
);
255 case ld::Fixup::kindStoreARMBranch24
:
256 case ld::Fixup::kindStoreThumbBranch22
:
257 case ld::Fixup::kindStoreTargetAddressARMBranch24
:
258 case ld::Fixup::kindStoreTargetAddressThumbBranch22
:
259 if ( crossSectionBranch
&& opts
.preferSubArchitecture() && opts
.archSupportsThumb2() ) {
260 return new Thumb2toThumbBranchAbsoluteIslandAtom(name
, inSect
, finalTarget
);
262 else if ( finalTarget
.atom
->isThumb() ) {
263 if ( opts
.preferSubArchitecture() && opts
.archSupportsThumb2() ) {
264 return new Thumb2toThumbBranchIslandAtom(name
, nextTarget
, finalTarget
);
266 else if ( opts
.outputSlidable() ) {
267 return new ARMtoThumb1BranchIslandAtom(name
, nextTarget
, finalTarget
);
270 return new NoPicARMtoThumbMBranchIslandAtom(name
, nextTarget
, finalTarget
);
274 return new ARMtoARMBranchIslandAtom(name
, nextTarget
, finalTarget
);
278 assert(0 && "unexpected branch kind");
285 static uint64_t textSizeWhenMightNeedBranchIslands(const Options
& opts
, bool seenThumbBranch
)
287 switch ( opts
.architecture() ) {
289 if ( ! seenThumbBranch
)
290 return 32000000; // ARM can branch +/- 32MB
291 else if ( opts
.preferSubArchitecture() && opts
.archSupportsThumb2() )
292 return 16000000; // thumb2 can branch +/- 16MB
294 return 4000000; // thumb1 can branch +/- 4MB
297 assert(0 && "unexpected architecture");
298 return 0x100000000LL
;
302 static uint64_t maxDistanceBetweenIslands(const Options
& opts
, bool seenThumbBranch
)
304 switch ( opts
.architecture() ) {
306 if ( ! seenThumbBranch
)
307 return 30*1024*1024; // 2MB of branch islands per 32MB
308 else if ( opts
.preferSubArchitecture() && opts
.archSupportsThumb2() )
309 return 14*1024*1024; // 2MB of branch islands per 16MB
311 return 3500000; // 0.5MB of branch islands per 4MB
314 assert(0 && "unexpected architecture");
315 return 0x100000000LL
;
320 // PowerPC can do PC relative branches as far as +/-16MB.
321 // If a branch target is >16MB then we insert one or more
322 // "branch islands" between the branch and its target that
323 // allows island hopping to the target.
325 // Branch Island Algorithm
327 // If the __TEXT segment < 16MB, then no branch islands needed
328 // Otherwise, every 14MB into the __TEXT segment a region is
329 // added which can contain branch islands. Every out-of-range
330 // bl instruction is checked. If it crosses a region, an island
331 // is added to that region with the same target and the bl is
332 // adjusted to target the island instead.
334 // In theory, if too many islands are added to one region, it
335 // could grow the __TEXT enough that other previously in-range
336 // bl branches could be pushed out of range. We reduce the
337 // probability this could happen by placing the ranges every
338 // 14MB which means the region would have to be 2MB (512,000 islands)
339 // before any branches could be pushed out of range.
343 static void makeIslandsForSection(const Options
& opts
, ld::Internal
& state
, ld::Internal::FinalSection
* textSection
)
345 // assign section offsets to each atom in __text section, watch for thumb branches, and find total size
346 bool hasThumbBranches
= false;
347 bool haveCrossSectionBranches
= false;
348 const bool preload
= (opts
.outputKind() == Options::kPreload
);
350 for (std::vector
<const ld::Atom
*>::iterator ait
=textSection
->atoms
.begin(); ait
!= textSection
->atoms
.end(); ++ait
) {
351 const ld::Atom
* atom
= *ait
;
352 // check for thumb branches and cross section branches
353 const ld::Atom
* target
= NULL
;
354 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
355 if ( fit
->firstInCluster() ) {
358 switch ( fit
->binding
) {
359 case ld::Fixup::bindingNone
:
360 case ld::Fixup::bindingByNameUnbound
:
362 case ld::Fixup::bindingByContentBound
:
363 case ld::Fixup::bindingDirectlyBound
:
364 target
= fit
->u
.target
;
366 case ld::Fixup::bindingsIndirectlyBound
:
367 target
= state
.indirectBindingTable
[fit
->u
.bindingIndex
];
370 bool haveBranch
= false;
372 case ld::Fixup::kindStoreThumbBranch22
:
373 case ld::Fixup::kindStoreTargetAddressThumbBranch22
:
374 hasThumbBranches
= true;
375 // fall into arm branch case
376 case ld::Fixup::kindStoreARMBranch24
:
377 case ld::Fixup::kindStoreTargetAddressARMBranch24
:
383 if ( haveBranch
&& (target
->contentType() != ld::Atom::typeStub
) ) {
384 // <rdar://problem/14792124> haveCrossSectionBranches only applies to -preload builds
385 if ( preload
&& (atom
->section() != target
->section()) )
386 haveCrossSectionBranches
= true;
390 ld::Atom::Alignment atomAlign
= atom
->alignment();
391 uint64_t atomAlignP2
= (1 << atomAlign
.powerOf2
);
392 uint64_t currentModulus
= (offset
% atomAlignP2
);
393 if ( currentModulus
!= atomAlign
.modulus
) {
394 if ( atomAlign
.modulus
> currentModulus
)
395 offset
+= atomAlign
.modulus
-currentModulus
;
397 offset
+= atomAlign
.modulus
+atomAlignP2
-currentModulus
;
399 (const_cast<ld::Atom
*>(atom
))->setSectionOffset(offset
);
400 offset
+= atom
->size();
402 uint64_t totalTextSize
= offset
;
403 if ( (totalTextSize
< textSizeWhenMightNeedBranchIslands(opts
, hasThumbBranches
)) && !haveCrossSectionBranches
)
405 if (_s_log
) fprintf(stderr
, "ld: section %s size=%llu, might need branch islands\n", textSection
->sectionName(), totalTextSize
);
407 // Figure out how many regions of branch islands will be needed, and their locations.
408 // Construct a vector containing the atoms after which branch islands will be inserted,
409 // taking into account follow on fixups. No atom run without an island can exceed kBetweenRegions.
410 const uint64_t kBetweenRegions
= maxDistanceBetweenIslands(opts
, hasThumbBranches
); // place regions of islands every 14MB in __text section
411 std::vector
<const ld::Atom
*> branchIslandInsertionPoints
; // atoms in the atom list after which branch islands will be inserted
412 uint64_t previousIslandEndAddr
= 0;
413 const ld::Atom
*insertionPoint
= NULL
;
414 branchIslandInsertionPoints
.reserve(totalTextSize
/kBetweenRegions
*2);
415 for (std::vector
<const ld::Atom
*>::iterator it
=textSection
->atoms
.begin(); it
!= textSection
->atoms
.end(); it
++) {
416 const ld::Atom
* atom
= *it
;
417 // if we move past the next atom, will the run length exceed kBetweenRegions?
418 if ( atom
->sectionOffset() + atom
->size() > previousIslandEndAddr
+ kBetweenRegions
) {
419 // yes. Add the last known good location (atom) for inserting a branch island.
420 if ( insertionPoint
== NULL
)
421 throwf("Unable to insert branch island. No insertion point available.");
422 branchIslandInsertionPoints
.push_back(insertionPoint
);
423 previousIslandEndAddr
= insertionPoint
->sectionOffset()+insertionPoint
->size();
424 insertionPoint
= NULL
;
426 // Can we insert an island after this atom? If so then keep track of it.
427 if ( !atom
->hasFixupsOfKind(ld::Fixup::kindNoneFollowOn
) )
428 insertionPoint
= atom
;
430 // add one more island after the last atom if close to limit
431 if ( (insertionPoint
!= NULL
) && (insertionPoint
->sectionOffset() + insertionPoint
->size() > previousIslandEndAddr
+ (kBetweenRegions
-0x100000)) )
432 branchIslandInsertionPoints
.push_back(insertionPoint
);
433 if ( haveCrossSectionBranches
&& branchIslandInsertionPoints
.empty() ) {
434 branchIslandInsertionPoints
.push_back(textSection
->atoms
.back());
436 const int kIslandRegionsCount
= branchIslandInsertionPoints
.size();
438 if (_s_log
) fprintf(stderr
, "ld: will use %u branch island regions\n", kIslandRegionsCount
);
439 typedef std::map
<TargetAndOffset
,const ld::Atom
*, TargetAndOffsetComparor
> AtomToIsland
;
440 AtomToIsland
* regionsMap
[kIslandRegionsCount
];
441 uint64_t regionAddresses
[kIslandRegionsCount
];
442 std::vector
<const ld::Atom
*>* regionsIslands
[kIslandRegionsCount
];
443 for(int i
=0; i
< kIslandRegionsCount
; ++i
) {
444 regionsMap
[i
] = new AtomToIsland();
445 regionsIslands
[i
] = new std::vector
<const ld::Atom
*>();
446 regionAddresses
[i
] = branchIslandInsertionPoints
[i
]->sectionOffset() + branchIslandInsertionPoints
[i
]->size();
447 if (_s_log
) fprintf(stderr
, "ld: branch islands will be inserted at 0x%08llX after %s\n", regionAddresses
[i
], branchIslandInsertionPoints
[i
]->name());
449 unsigned int islandCount
= 0;
451 // create islands for branches in __text that are out of range
452 for (std::vector
<const ld::Atom
*>::iterator ait
=textSection
->atoms
.begin(); ait
!= textSection
->atoms
.end(); ++ait
) {
453 const ld::Atom
* atom
= *ait
;
454 const ld::Atom
* target
= NULL
;
456 ld::Fixup
* fixupWithTarget
= NULL
;
457 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
458 if ( fit
->firstInCluster() ) {
460 fixupWithTarget
= NULL
;
463 switch ( fit
->binding
) {
464 case ld::Fixup::bindingNone
:
465 case ld::Fixup::bindingByNameUnbound
:
467 case ld::Fixup::bindingByContentBound
:
468 case ld::Fixup::bindingDirectlyBound
:
469 target
= fit
->u
.target
;
470 fixupWithTarget
= fit
;
472 case ld::Fixup::bindingsIndirectlyBound
:
473 target
= state
.indirectBindingTable
[fit
->u
.bindingIndex
];
474 fixupWithTarget
= fit
;
477 bool haveBranch
= false;
479 case ld::Fixup::kindAddAddend
:
480 addend
= fit
->u
.addend
;
482 case ld::Fixup::kindStoreARMBranch24
:
483 case ld::Fixup::kindStoreThumbBranch22
:
484 case ld::Fixup::kindStoreTargetAddressARMBranch24
:
485 case ld::Fixup::kindStoreTargetAddressThumbBranch22
:
492 bool crossSectionBranch
= ( preload
&& (atom
->section() != target
->section()) );
493 int64_t srcAddr
= atom
->sectionOffset() + fit
->offsetInAtom
;
494 int64_t dstAddr
= target
->sectionOffset() + addend
;
496 srcAddr
= sAtomToAddress
[atom
] + fit
->offsetInAtom
;
497 dstAddr
= sAtomToAddress
[target
] + addend
;
499 if ( target
->section().type() == ld::Section::typeStub
)
500 dstAddr
= totalTextSize
;
501 int64_t displacement
= dstAddr
- srcAddr
;
502 TargetAndOffset finalTargetAndOffset
= { target
, addend
};
503 const int64_t kBranchLimit
= kBetweenRegions
;
504 if ( crossSectionBranch
&& ((displacement
> kBranchLimit
) || (displacement
< (-kBranchLimit
))) ) {
505 const ld::Atom
* island
;
506 AtomToIsland
* region
= regionsMap
[0];
507 AtomToIsland::iterator pos
= region
->find(finalTargetAndOffset
);
508 if ( pos
== region
->end() ) {
509 island
= makeBranchIsland(opts
, fit
->kind
, 0, target
, finalTargetAndOffset
, atom
->section(), true);
510 (*region
)[finalTargetAndOffset
] = island
;
511 if (_s_log
) fprintf(stderr
, "added absolute branching island %p %s, displacement=%lld\n",
512 island
, island
->name(), displacement
);
514 regionsIslands
[0]->push_back(island
);
517 island
= pos
->second
;
519 if (_s_log
) fprintf(stderr
, "using island %p %s for branch to %s from %s\n", island
, island
->name(), target
->name(), atom
->name());
520 fixupWithTarget
->u
.target
= island
;
521 fixupWithTarget
->binding
= ld::Fixup::bindingDirectlyBound
;
523 else if ( displacement
> kBranchLimit
) {
524 // create forward branch chain
525 const ld::Atom
* nextTarget
= target
;
526 if (_s_log
) fprintf(stderr
, "need forward branching island srcAdr=0x%08llX, dstAdr=0x%08llX, target=%s\n",
527 srcAddr
, dstAddr
, target
->name());
528 for (int i
=kIslandRegionsCount
-1; i
>=0 ; --i
) {
529 AtomToIsland
* region
= regionsMap
[i
];
530 int64_t islandRegionAddr
= regionAddresses
[i
];
531 if ( (srcAddr
< islandRegionAddr
) && ((islandRegionAddr
<= dstAddr
)) ) {
532 AtomToIsland::iterator pos
= region
->find(finalTargetAndOffset
);
533 if ( pos
== region
->end() ) {
534 ld::Atom
* island
= makeBranchIsland(opts
, fit
->kind
, i
, nextTarget
, finalTargetAndOffset
, atom
->section(), false);
535 (*region
)[finalTargetAndOffset
] = island
;
536 if (_s_log
) fprintf(stderr
, "added forward branching island %p %s to region %d for %s\n", island
, island
->name(), i
, atom
->name());
537 regionsIslands
[i
]->push_back(island
);
542 nextTarget
= pos
->second
;
546 if (_s_log
) fprintf(stderr
, "using island %p %s for branch to %s from %s\n", nextTarget
, nextTarget
->name(), target
->name(), atom
->name());
547 fixupWithTarget
->u
.target
= nextTarget
;
548 fixupWithTarget
->binding
= ld::Fixup::bindingDirectlyBound
;
550 else if ( displacement
< (-kBranchLimit
) ) {
551 // create back branching chain
552 const ld::Atom
* prevTarget
= target
;
553 for (int i
=0; i
< kIslandRegionsCount
; ++i
) {
554 AtomToIsland
* region
= regionsMap
[i
];
555 int64_t islandRegionAddr
= regionAddresses
[i
];
556 if ( (dstAddr
< islandRegionAddr
) && (islandRegionAddr
<= srcAddr
) ) {
557 if (_s_log
) fprintf(stderr
, "need backward branching island srcAdr=0x%08llX, dstAdr=0x%08llX, target=%s\n", srcAddr
, dstAddr
, target
->name());
558 AtomToIsland::iterator pos
= region
->find(finalTargetAndOffset
);
559 if ( pos
== region
->end() ) {
560 ld::Atom
* island
= makeBranchIsland(opts
, fit
->kind
, i
, prevTarget
, finalTargetAndOffset
, atom
->section(), false);
561 (*region
)[finalTargetAndOffset
] = island
;
562 if (_s_log
) fprintf(stderr
, "added back branching island %p %s to region %d for %s\n", island
, island
->name(), i
, atom
->name());
563 regionsIslands
[i
]->push_back(island
);
568 prevTarget
= pos
->second
;
572 if (_s_log
) fprintf(stderr
, "using back island %p %s for %s\n", prevTarget
, prevTarget
->name(), atom
->name());
573 fixupWithTarget
->u
.target
= prevTarget
;
574 fixupWithTarget
->binding
= ld::Fixup::bindingDirectlyBound
;
581 // insert islands into __text section and adjust section offsets
582 if ( islandCount
> 0 ) {
583 if ( _s_log
) fprintf(stderr
, "ld: %u branch islands required in %u regions\n", islandCount
, kIslandRegionsCount
);
584 std::vector
<const ld::Atom
*> newAtomList
;
585 newAtomList
.reserve(textSection
->atoms
.size()+islandCount
);
588 for (std::vector
<const ld::Atom
*>::iterator ait
=textSection
->atoms
.begin(); ait
!= textSection
->atoms
.end(); ait
++) {
589 const ld::Atom
* atom
= *ait
;
590 newAtomList
.push_back(atom
);
591 if ( (regionIndex
< kIslandRegionsCount
) && (atom
== branchIslandInsertionPoints
[regionIndex
]) ) {
592 std::vector
<const ld::Atom
*>* islands
= regionsIslands
[regionIndex
];
593 newAtomList
.insert(newAtomList
.end(), islands
->begin(), islands
->end());
597 // swap in new list of atoms for __text section
598 textSection
->atoms
.clear();
599 textSection
->atoms
= newAtomList
;
605 static void buildAddressMap(const Options
& opts
, ld::Internal
& state
) {
606 // Assign addresses to sections
607 state
.setSectionSizesAndAlignments();
608 state
.assignFileOffsets();
610 // Assign addresses to atoms in a side table
611 const bool log
= false;
612 if ( log
) fprintf(stderr
, "buildAddressMap()\n");
613 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
= state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
614 ld::Internal::FinalSection
* sect
= *sit
;
615 uint16_t maxAlignment
= 0;
617 if ( log
) fprintf(stderr
, " section=%s/%s, address=0x%08llX\n", sect
->segmentName(), sect
->sectionName(), sect
->address
);
618 for (std::vector
<const ld::Atom
*>::iterator ait
= sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
619 const ld::Atom
* atom
= *ait
;
620 uint32_t atomAlignmentPowerOf2
= atom
->alignment().powerOf2
;
621 uint32_t atomModulus
= atom
->alignment().modulus
;
622 if ( atomAlignmentPowerOf2
> maxAlignment
)
623 maxAlignment
= atomAlignmentPowerOf2
;
624 // calculate section offset for this atom
625 uint64_t alignment
= 1 << atomAlignmentPowerOf2
;
626 uint64_t currentModulus
= (offset
% alignment
);
627 uint64_t requiredModulus
= atomModulus
;
628 if ( currentModulus
!= requiredModulus
) {
629 if ( requiredModulus
> currentModulus
)
630 offset
+= requiredModulus
-currentModulus
;
632 offset
+= requiredModulus
+alignment
-currentModulus
;
635 if ( log
) fprintf(stderr
, " 0x%08llX atom=%p, name=%s\n", sect
->address
+offset
, atom
, atom
->name());
636 sAtomToAddress
[atom
] = sect
->address
+ offset
;
638 offset
+= atom
->size();
645 void doPass(const Options
& opts
, ld::Internal
& state
)
647 // only make branch islands in final linked images
648 if ( opts
.outputKind() == Options::kObjectFile
)
651 // Allow user to disable branch island generation
652 if ( !opts
.allowBranchIslands() )
655 // only ARM needs branch islands
656 switch ( opts
.architecture() ) {
663 if ( opts
.outputKind() == Options::kPreload
) {
664 buildAddressMap(opts
, state
);
667 // scan sections and add island to each code section
668 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
=state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
669 ld::Internal::FinalSection
* sect
= *sit
;
670 if ( sect
->type() == ld::Section::typeCode
)
671 makeIslandsForSection(opts
, state
, sect
);
676 } // namespace branch_island
677 } // namespace passes