1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
3 * Copyright (c) 2009 Apple Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
30 #include <libkern/OSByteOrder.h>
35 #include "MachOFileAbstraction.hpp"
37 #include "branch_island.h"
41 namespace branch_island
{
46 struct TargetAndOffset
{ const ld::Atom
* atom
; uint32_t offset
; };
47 class TargetAndOffsetComparor
50 bool operator()(const TargetAndOffset
& left
, const TargetAndOffset
& right
) const
52 if ( left
.atom
!= right
.atom
)
53 return ( left
.atom
< right
.atom
);
54 return ( left
.offset
< right
.offset
);
59 static bool _s_log
= false;
60 static ld::Section
_s_text_section("__TEXT", "__text", ld::Section::typeCode
);
64 class ARMtoARMBranchIslandAtom
: public ld::Atom
{
66 ARMtoARMBranchIslandAtom(const char* nm
, const ld::Atom
* target
, TargetAndOffset finalTarget
)
67 : ld::Atom(_s_text_section
, ld::Atom::definitionRegular
, ld::Atom::combineNever
,
68 ld::Atom::scopeLinkageUnit
, ld::Atom::typeBranchIsland
,
69 ld::Atom::symbolTableIn
, false, false, false, ld::Atom::Alignment(2)),
72 _finalTarget(finalTarget
) { }
74 virtual const ld::File
* file() const { return NULL
; }
75 virtual bool translationUnitSource(const char** dir
, const char**) const
77 virtual const char* name() const { return _name
; }
78 virtual uint64_t size() const { return 4; }
79 virtual uint64_t objectAddress() const { return 0; }
80 virtual void copyRawContent(uint8_t buffer
[]) const {
81 int64_t displacement
= _target
->finalAddress() - this->finalAddress() - 8;
82 if ( _target
->contentType() == ld::Atom::typeBranchIsland
) {
83 // an ARM branch can branch farther than a thumb branch. The branch
84 // island generation was conservative and put islands every thumb
85 // branch distance apart. Check to see if this is a an island
86 // hopping branch that could be optimized to go directly to target.
87 int64_t skipToFinalDisplacement
= _finalTarget
.atom
->finalAddress() + _finalTarget
.offset
- this->finalAddress() - 8;
88 if ( (skipToFinalDisplacement
< 33554428LL) && (skipToFinalDisplacement
> (-33554432LL)) ) {
89 // can skip branch island and jump straight to target
90 if (_s_log
) fprintf(stderr
, "%s: optimized jump to final target at 0x%08llX, thisAddr=0x%08llX\n",
91 _target
->name(), _finalTarget
.atom
->finalAddress(), this->finalAddress());
92 displacement
= skipToFinalDisplacement
;
95 // ultimate target is too far, jump to island
96 if (_s_log
) fprintf(stderr
, "%s: jump to branch island at 0x%08llX\n",
97 _target
->name(), _finalTarget
.atom
->finalAddress());
100 uint32_t imm24
= (displacement
>> 2) & 0x00FFFFFF;
101 int32_t branchInstruction
= 0xEA000000 | imm24
;
102 OSWriteLittleInt32(buffer
, 0, branchInstruction
);
104 virtual void setScope(Scope
) { }
108 const ld::Atom
* _target
;
109 TargetAndOffset _finalTarget
;
114 class ARMtoThumb1BranchIslandAtom
: public ld::Atom
{
116 ARMtoThumb1BranchIslandAtom(const char* nm
, const ld::Atom
* target
, TargetAndOffset finalTarget
)
117 : ld::Atom(_s_text_section
, ld::Atom::definitionRegular
, ld::Atom::combineNever
,
118 ld::Atom::scopeLinkageUnit
, ld::Atom::typeBranchIsland
,
119 ld::Atom::symbolTableIn
, false, false, false, ld::Atom::Alignment(2)),
122 _finalTarget(finalTarget
) { }
124 virtual const ld::File
* file() const { return NULL
; }
125 virtual bool translationUnitSource(const char** dir
, const char**) const
127 virtual const char* name() const { return _name
; }
128 virtual uint64_t size() const { return 16; }
129 virtual uint64_t objectAddress() const { return 0; }
130 virtual void copyRawContent(uint8_t buffer
[]) const {
131 // There is no large displacement thumb1 branch instruction.
132 // Instead use ARM instructions that can jump to thumb.
133 // we use a 32-bit displacement, so we can directly jump to target which means no island hopping
134 int64_t displacement
= _finalTarget
.atom
->finalAddress() + _finalTarget
.offset
- (this->finalAddress() + 12);
135 if ( _finalTarget
.atom
->isThumb() )
137 if (_s_log
) fprintf(stderr
, "%s: 4 ARM instruction jump to final target at 0x%08llX\n",
138 _target
->name(), _finalTarget
.atom
->finalAddress());
139 OSWriteLittleInt32(&buffer
[ 0], 0, 0xe59fc004); // ldr ip, pc + 4
140 OSWriteLittleInt32(&buffer
[ 4], 0, 0xe08fc00c); // add ip, pc, ip
141 OSWriteLittleInt32(&buffer
[ 8], 0, 0xe12fff1c); // bx ip
142 OSWriteLittleInt32(&buffer
[12], 0, displacement
); // .long target-this
144 virtual void setScope(Scope
) { }
148 const ld::Atom
* _target
;
149 TargetAndOffset _finalTarget
;
154 class Thumb2toThumbBranchIslandAtom
: public ld::Atom
{
156 Thumb2toThumbBranchIslandAtom(const char* nm
, const ld::Atom
* target
, TargetAndOffset finalTarget
)
157 : ld::Atom(_s_text_section
, ld::Atom::definitionRegular
, ld::Atom::combineNever
,
158 ld::Atom::scopeLinkageUnit
, ld::Atom::typeBranchIsland
,
159 ld::Atom::symbolTableIn
, false, true, false, ld::Atom::Alignment(1)),
162 _finalTarget(finalTarget
) { }
164 virtual const ld::File
* file() const { return NULL
; }
165 virtual bool translationUnitSource(const char** dir
, const char**) const
167 virtual const char* name() const { return _name
; }
168 virtual uint64_t size() const { return 4; }
169 virtual uint64_t objectAddress() const { return 0; }
170 virtual void copyRawContent(uint8_t buffer
[]) const {
171 int64_t displacement
= _target
->finalAddress() - this->finalAddress() - 4;
172 if ( _target
->contentType() == ld::Atom::typeBranchIsland
) {
173 // an ARM branch can branch farther than a thumb branch. The branch
174 // island generation was conservative and put islands every thumb
175 // branch distance apart. Check to see if this is a an island
176 // hopping branch that could be optimized to go directly to target.
177 int64_t skipToFinalDisplacement
= _finalTarget
.atom
->finalAddress() + _finalTarget
.offset
- this->finalAddress() - 4;
178 if ( (skipToFinalDisplacement
< 16777214) && (skipToFinalDisplacement
> (-16777216LL)) ) {
179 // can skip branch island and jump straight to target
180 if (_s_log
) fprintf(stderr
, "%s: optimized jump to final target at 0x%08llX, thisAddr=0x%08llX\n",
181 _target
->name(), _finalTarget
.atom
->finalAddress(), this->finalAddress());
182 displacement
= skipToFinalDisplacement
;
185 // ultimate target is too far for thumb2 branch, jump to island
186 if (_s_log
) fprintf(stderr
, "%s: jump to branch island at 0x%08llX\n",
187 _target
->name(), _finalTarget
.atom
->finalAddress());
190 // The instruction is really two instructions:
191 // The lower 16 bits are the first instruction, which contains the high
192 // 11 bits of the displacement.
193 // The upper 16 bits are the second instruction, which contains the low
194 // 11 bits of the displacement, as well as differentiating bl and blx.
195 uint32_t s
= (uint32_t)(displacement
>> 24) & 0x1;
196 uint32_t i1
= (uint32_t)(displacement
>> 23) & 0x1;
197 uint32_t i2
= (uint32_t)(displacement
>> 22) & 0x1;
198 uint32_t imm10
= (uint32_t)(displacement
>> 12) & 0x3FF;
199 uint32_t imm11
= (uint32_t)(displacement
>> 1) & 0x7FF;
200 uint32_t j1
= (i1
== s
);
201 uint32_t j2
= (i2
== s
);
202 uint32_t opcode
= 0x9000F000;
203 uint32_t nextDisp
= (j1
<< 13) | (j2
<< 11) | imm11
;
204 uint32_t firstDisp
= (s
<< 10) | imm10
;
205 uint32_t newInstruction
= opcode
| (nextDisp
<< 16) | firstDisp
;
206 //warning("s=%d, j1=%d, j2=%d, imm10=0x%0X, imm11=0x%0X, opcode=0x%08X, first=0x%04X, next=0x%04X, new=0x%08X, disp=0x%llX for %s to %s\n",
207 // s, j1, j2, imm10, imm11, opcode, firstDisp, nextDisp, newInstruction, displacement, inAtom->getDisplayName(), ref->getTarget().getDisplayName());
208 OSWriteLittleInt32(buffer
, 0, newInstruction
);
210 virtual void setScope(Scope
) { }
214 const ld::Atom
* _target
;
215 TargetAndOffset _finalTarget
;
219 class NoPicARMtoThumbMBranchIslandAtom
: public ld::Atom
{
221 NoPicARMtoThumbMBranchIslandAtom(const char* nm
, const ld::Atom
* target
, TargetAndOffset finalTarget
)
222 : ld::Atom(_s_text_section
, ld::Atom::definitionRegular
, ld::Atom::combineNever
,
223 ld::Atom::scopeLinkageUnit
, ld::Atom::typeBranchIsland
,
224 ld::Atom::symbolTableIn
, false, false, false, ld::Atom::Alignment(2)),
227 _finalTarget(finalTarget
) { }
229 virtual const ld::File
* file() const { return NULL
; }
230 virtual bool translationUnitSource(const char** dir
, const char**) const
232 virtual const char* name() const { return _name
; }
233 virtual uint64_t size() const { return 8; }
234 virtual uint64_t objectAddress() const { return 0; }
235 virtual void copyRawContent(uint8_t buffer
[]) const {
236 // There is no large displacement thumb1 branch instruction.
237 // Instead use ARM instructions that can jump to thumb.
238 // we use a 32-bit displacement, so we can directly jump to final target which means no island hopping
239 uint32_t targetAddr
= _finalTarget
.atom
->finalAddress();
240 if ( _finalTarget
.atom
->isThumb() )
242 if (_s_log
) fprintf(stderr
, "%s: 2 ARM instruction jump to final target at 0x%08llX\n",
243 _target
->name(), _finalTarget
.atom
->finalAddress());
244 OSWriteLittleInt32(&buffer
[0], 0, 0xe51ff004); // ldr pc, [pc, #-4]
245 OSWriteLittleInt32(&buffer
[4], 0, targetAddr
); // .long target-this
247 virtual void setScope(Scope
) { }
251 const ld::Atom
* _target
;
252 TargetAndOffset _finalTarget
;
256 static ld::Atom
* makeBranchIsland(const Options
& opts
, ld::Fixup::Kind kind
, int islandRegion
, const ld::Atom
* nextTarget
, TargetAndOffset finalTarget
)
259 if ( finalTarget
.offset
== 0 ) {
260 if ( islandRegion
== 0 )
261 asprintf(&name
, "%s.island", finalTarget
.atom
->name());
263 asprintf(&name
, "%s.island.%d", finalTarget
.atom
->name(), islandRegion
+1);
266 asprintf(&name
, "%s_plus_%d.island.%d", finalTarget
.atom
->name(), finalTarget
.offset
, islandRegion
);
270 case ld::Fixup::kindStoreARMBranch24
:
271 case ld::Fixup::kindStoreThumbBranch22
:
272 case ld::Fixup::kindStoreTargetAddressARMBranch24
:
273 case ld::Fixup::kindStoreTargetAddressThumbBranch22
:
274 if ( finalTarget
.atom
->isThumb() ) {
275 if ( opts
.preferSubArchitecture() && opts
.archSupportsThumb2() ) {
276 return new Thumb2toThumbBranchIslandAtom(name
, nextTarget
, finalTarget
);
278 else if ( opts
.outputSlidable() ) {
279 return new ARMtoThumb1BranchIslandAtom(name
, nextTarget
, finalTarget
);
282 return new NoPicARMtoThumbMBranchIslandAtom(name
, nextTarget
, finalTarget
);
286 return new ARMtoARMBranchIslandAtom(name
, nextTarget
, finalTarget
);
290 assert(0 && "unexpected branch kind");
297 static uint64_t textSizeWhenMightNeedBranchIslands(const Options
& opts
, bool seenThumbBranch
)
299 switch ( opts
.architecture() ) {
301 if ( ! seenThumbBranch
)
302 return 32000000; // ARM can branch +/- 32MB
303 else if ( opts
.preferSubArchitecture() && opts
.archSupportsThumb2() )
304 return 16000000; // thumb2 can branch +/- 16MB
306 return 4000000; // thumb1 can branch +/- 4MB
309 assert(0 && "unexpected architecture");
310 return 0x100000000LL
;
314 static uint64_t maxDistanceBetweenIslands(const Options
& opts
, bool seenThumbBranch
)
316 switch ( opts
.architecture() ) {
318 if ( ! seenThumbBranch
)
319 return 30*1024*1024; // 2MB of branch islands per 32MB
320 else if ( opts
.preferSubArchitecture() && opts
.archSupportsThumb2() )
321 return 14*1024*1024; // 2MB of branch islands per 16MB
323 return 3500000; // 0.5MB of branch islands per 4MB
326 assert(0 && "unexpected architecture");
327 return 0x100000000LL
;
332 // PowerPC can do PC relative branches as far as +/-16MB.
333 // If a branch target is >16MB then we insert one or more
334 // "branch islands" between the branch and its target that
335 // allows island hopping to the target.
337 // Branch Island Algorithm
339 // If the __TEXT segment < 16MB, then no branch islands needed
340 // Otherwise, every 14MB into the __TEXT segment a region is
341 // added which can contain branch islands. Every out-of-range
342 // bl instruction is checked. If it crosses a region, an island
343 // is added to that region with the same target and the bl is
344 // adjusted to target the island instead.
346 // In theory, if too many islands are added to one region, it
347 // could grow the __TEXT enough that other previously in-range
348 // bl branches could be pushed out of range. We reduce the
349 // probability this could happen by placing the ranges every
350 // 14MB which means the region would have to be 2MB (512,000 islands)
351 // before any branches could be pushed out of range.
354 void doPass(const Options
& opts
, ld::Internal
& state
)
356 // only make branch islands in final linked images
357 if ( opts
.outputKind() == Options::kObjectFile
)
360 // only ARM needs branch islands
361 switch ( opts
.architecture() ) {
368 // scan to find __text section
369 ld::Internal::FinalSection
* textSection
= NULL
;
370 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
=state
.sections
.begin(); sit
!= state
.sections
.end(); ++sit
) {
371 ld::Internal::FinalSection
* sect
= *sit
;
372 if ( strcmp(sect
->sectionName(), "__text") == 0 )
375 if ( textSection
== NULL
)
378 // assign section offsets to each atom in __text section, watch for thumb branches, and find total size
379 const bool isARM
= (opts
.architecture() == CPU_TYPE_ARM
);
380 bool hasThumbBranches
= false;
382 for (std::vector
<const ld::Atom
*>::iterator ait
=textSection
->atoms
.begin(); ait
!= textSection
->atoms
.end(); ++ait
) {
383 const ld::Atom
* atom
= *ait
;
384 // check for thumb branches
385 if ( isARM
&& ~hasThumbBranches
) {
386 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
387 switch ( fit
->kind
) {
388 case ld::Fixup::kindStoreThumbBranch22
:
389 case ld::Fixup::kindStoreTargetAddressThumbBranch22
:
390 hasThumbBranches
= true;
398 ld::Atom::Alignment atomAlign
= atom
->alignment();
399 uint64_t atomAlignP2
= (1 << atomAlign
.powerOf2
);
400 uint64_t currentModulus
= (offset
% atomAlignP2
);
401 if ( currentModulus
!= atomAlign
.modulus
) {
402 if ( atomAlign
.modulus
> currentModulus
)
403 offset
+= atomAlign
.modulus
-currentModulus
;
405 offset
+= atomAlign
.modulus
+atomAlignP2
-currentModulus
;
407 (const_cast<ld::Atom
*>(atom
))->setSectionOffset(offset
);
408 offset
+= atom
->size();
410 uint64_t totalTextSize
= offset
;
411 if ( totalTextSize
< textSizeWhenMightNeedBranchIslands(opts
, hasThumbBranches
) )
413 if (_s_log
) fprintf(stderr
, "ld: __text section size=%llu, might need branch islands\n", totalTextSize
);
415 // figure out how many regions of branch islands will be needed
416 const uint32_t kBetweenRegions
= maxDistanceBetweenIslands(opts
, hasThumbBranches
); // place regions of islands every 14MB in __text section
417 const int kIslandRegionsCount
= totalTextSize
/ kBetweenRegions
;
418 typedef std::map
<TargetAndOffset
,const ld::Atom
*, TargetAndOffsetComparor
> AtomToIsland
;
419 AtomToIsland
* regionsMap
[kIslandRegionsCount
];
420 std::vector
<const ld::Atom
*>* regionsIslands
[kIslandRegionsCount
];
421 for(int i
=0; i
< kIslandRegionsCount
; ++i
) {
422 regionsMap
[i
] = new AtomToIsland();
423 regionsIslands
[i
] = new std::vector
<const ld::Atom
*>();
425 unsigned int islandCount
= 0;
426 if (_s_log
) fprintf(stderr
, "ld: will use %u branch island regions\n", kIslandRegionsCount
);
428 // create islands for branches in __text that are out of range
429 for (std::vector
<const ld::Atom
*>::iterator ait
=textSection
->atoms
.begin(); ait
!= textSection
->atoms
.end(); ++ait
) {
430 const ld::Atom
* atom
= *ait
;
431 const ld::Atom
* target
= NULL
;
433 ld::Fixup
* fixupWithTarget
= NULL
;
434 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
435 if ( fit
->firstInCluster() ) {
437 fixupWithTarget
= NULL
;
440 switch ( fit
->binding
) {
441 case ld::Fixup::bindingNone
:
442 case ld::Fixup::bindingByNameUnbound
:
444 case ld::Fixup::bindingByContentBound
:
445 case ld::Fixup::bindingDirectlyBound
:
446 target
= fit
->u
.target
;
447 fixupWithTarget
= fit
;
449 case ld::Fixup::bindingsIndirectlyBound
:
450 target
= state
.indirectBindingTable
[fit
->u
.bindingIndex
];
451 fixupWithTarget
= fit
;
454 bool haveBranch
= false;
456 case ld::Fixup::kindAddAddend
:
457 addend
= fit
->u
.addend
;
459 case ld::Fixup::kindStoreARMBranch24
:
460 case ld::Fixup::kindStoreThumbBranch22
:
461 case ld::Fixup::kindStoreTargetAddressARMBranch24
:
462 case ld::Fixup::kindStoreTargetAddressThumbBranch22
:
469 int64_t srcAddr
= atom
->sectionOffset() + fit
->offsetInAtom
;
470 int64_t dstAddr
= target
->sectionOffset() + addend
;
471 if ( target
->section().type() == ld::Section::typeStub
)
472 dstAddr
= totalTextSize
;
473 int64_t displacement
= dstAddr
- srcAddr
;
474 TargetAndOffset finalTargetAndOffset
= { target
, addend
};
475 const int64_t kBranchLimit
= kBetweenRegions
;
476 if ( displacement
> kBranchLimit
) {
477 // create forward branch chain
478 const ld::Atom
* nextTarget
= target
;
479 for (int i
=kIslandRegionsCount
-1; i
>=0 ; --i
) {
480 AtomToIsland
* region
= regionsMap
[i
];
481 int64_t islandRegionAddr
= kBetweenRegions
* (i
+1);
482 if ( (srcAddr
< islandRegionAddr
) && (islandRegionAddr
<= dstAddr
) ) {
483 AtomToIsland::iterator pos
= region
->find(finalTargetAndOffset
);
484 if ( pos
== region
->end() ) {
485 ld::Atom
* island
= makeBranchIsland(opts
, fit
->kind
, i
, nextTarget
, finalTargetAndOffset
);
486 (*region
)[finalTargetAndOffset
] = island
;
487 if (_s_log
) fprintf(stderr
, "added island %s to region %d for %s\n", island
->name(), i
, atom
->name());
488 regionsIslands
[i
]->push_back(island
);
493 nextTarget
= pos
->second
;
497 if (_s_log
) fprintf(stderr
, "using island %s for branch to %s from %s\n", nextTarget
->name(), target
->name(), atom
->name());
498 fixupWithTarget
->u
.target
= nextTarget
;
499 fixupWithTarget
->binding
= ld::Fixup::bindingDirectlyBound
;
501 else if ( displacement
< (-kBranchLimit
) ) {
502 // create back branching chain
503 const ld::Atom
* prevTarget
= target
;
504 for (int i
=0; i
< kIslandRegionsCount
; ++i
) {
505 AtomToIsland
* region
= regionsMap
[i
];
506 int64_t islandRegionAddr
= kBetweenRegions
* (i
+1);
507 if ( (dstAddr
<= islandRegionAddr
) && (islandRegionAddr
< srcAddr
) ) {
508 AtomToIsland::iterator pos
= region
->find(finalTargetAndOffset
);
509 if ( pos
== region
->end() ) {
510 ld::Atom
* island
= makeBranchIsland(opts
, fit
->kind
, i
, prevTarget
, finalTargetAndOffset
);
511 (*region
)[finalTargetAndOffset
] = island
;
512 if (_s_log
) fprintf(stderr
, "added back island %s to region %d for %s\n", island
->name(), i
, atom
->name());
513 regionsIslands
[i
]->push_back(island
);
518 prevTarget
= pos
->second
;
522 if (_s_log
) fprintf(stderr
, "using back island %s for %s\n", prevTarget
->name(), atom
->name());
523 fixupWithTarget
->u
.target
= prevTarget
;
524 fixupWithTarget
->binding
= ld::Fixup::bindingDirectlyBound
;
531 // insert islands into __text section and adjust section offsets
532 if ( islandCount
> 0 ) {
533 if ( _s_log
) fprintf(stderr
, "ld: %u branch islands required in %u regions\n", islandCount
, kIslandRegionsCount
);
534 std::vector
<const ld::Atom
*> newAtomList
;
535 newAtomList
.reserve(textSection
->atoms
.size()+islandCount
);
536 uint64_t islandRegionAddr
= kBetweenRegions
;;
538 for (std::vector
<const ld::Atom
*>::iterator it
=textSection
->atoms
.begin(); it
!= textSection
->atoms
.end(); it
++) {
539 const ld::Atom
* atom
= *it
;
540 if ( (atom
->sectionOffset()+atom
->size()) > islandRegionAddr
) {
541 std::vector
<const ld::Atom
*>* regionIslands
= regionsIslands
[regionIndex
];
542 for (std::vector
<const ld::Atom
*>::iterator rit
=regionIslands
->begin(); rit
!= regionIslands
->end(); rit
++) {
543 const ld::Atom
* islandAtom
= *rit
;
544 newAtomList
.push_back(islandAtom
);
545 if ( _s_log
) fprintf(stderr
, "inserting island %s into __text section\n", islandAtom
->name());
548 islandRegionAddr
+= kBetweenRegions
;
550 newAtomList
.push_back(atom
);
552 // put any remaining islands at end of __text section
553 if ( regionIndex
< kIslandRegionsCount
) {
554 std::vector
<const ld::Atom
*>* regionIslands
= regionsIslands
[regionIndex
];
555 for (std::vector
<const ld::Atom
*>::iterator rit
=regionIslands
->begin(); rit
!= regionIslands
->end(); rit
++) {
556 const ld::Atom
* islandAtom
= *rit
;
557 newAtomList
.push_back(islandAtom
);
558 if ( _s_log
) fprintf(stderr
, "inserting island %s into __text section\n", islandAtom
->name());
561 // swap in new list of atoms for __text section
562 textSection
->atoms
.clear();
563 textSection
->atoms
= newAtomList
;
569 } // namespace branch_island
570 } // namespace passes