]> git.saurik.com Git - apple/ld64.git/blame_incremental - src/ld/passes/branch_island.cpp
ld64-128.2.tar.gz
[apple/ld64.git] / src / ld / passes / branch_island.cpp
... / ...
CommitLineData
1/* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
2 *
3 * Copyright (c) 2009 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26#include <stdint.h>
27#include <math.h>
28#include <unistd.h>
29#include <dlfcn.h>
30#include <libkern/OSByteOrder.h>
31
32#include <vector>
33#include <map>
34
35#include "MachOFileAbstraction.hpp"
36#include "ld.hpp"
37#include "branch_island.h"
38
39namespace ld {
40namespace passes {
41namespace branch_island {
42
43
44
45
46struct TargetAndOffset { const ld::Atom* atom; uint32_t offset; };
47class TargetAndOffsetComparor
48{
49public:
50 bool operator()(const TargetAndOffset& left, const TargetAndOffset& right) const
51 {
52 if ( left.atom != right.atom )
53 return ( left.atom < right.atom );
54 return ( left.offset < right.offset );
55 }
56};
57
58
59static bool _s_log = false;
60static ld::Section _s_text_section("__TEXT", "__text", ld::Section::typeCode);
61
62
63
64class ARMtoARMBranchIslandAtom : public ld::Atom {
65public:
66 ARMtoARMBranchIslandAtom(const char* nm, const ld::Atom* target, TargetAndOffset finalTarget)
67 : ld::Atom(_s_text_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
68 ld::Atom::scopeLinkageUnit, ld::Atom::typeBranchIsland,
69 ld::Atom::symbolTableIn, false, false, false, ld::Atom::Alignment(2)),
70 _name(nm),
71 _target(target),
72 _finalTarget(finalTarget) { }
73
74 virtual const ld::File* file() const { return NULL; }
75 virtual bool translationUnitSource(const char** dir, const char**) const
76 { return false; }
77 virtual const char* name() const { return _name; }
78 virtual uint64_t size() const { return 4; }
79 virtual uint64_t objectAddress() const { return 0; }
80 virtual void copyRawContent(uint8_t buffer[]) const {
81 int64_t displacement = _target->finalAddress() - this->finalAddress() - 8;
82 if ( _target->contentType() == ld::Atom::typeBranchIsland ) {
83 // an ARM branch can branch farther than a thumb branch. The branch
84 // island generation was conservative and put islands every thumb
85 // branch distance apart. Check to see if this is a an island
86 // hopping branch that could be optimized to go directly to target.
87 int64_t skipToFinalDisplacement = _finalTarget.atom->finalAddress() + _finalTarget.offset - this->finalAddress() - 8;
88 if ( (skipToFinalDisplacement < 33554428LL) && (skipToFinalDisplacement > (-33554432LL)) ) {
89 // can skip branch island and jump straight to target
90 if (_s_log) fprintf(stderr, "%s: optimized jump to final target at 0x%08llX, thisAddr=0x%08llX\n",
91 _target->name(), _finalTarget.atom->finalAddress(), this->finalAddress());
92 displacement = skipToFinalDisplacement;
93 }
94 else {
95 // ultimate target is too far, jump to island
96 if (_s_log) fprintf(stderr, "%s: jump to branch island at 0x%08llX\n",
97 _target->name(), _finalTarget.atom->finalAddress());
98 }
99 }
100 uint32_t imm24 = (displacement >> 2) & 0x00FFFFFF;
101 int32_t branchInstruction = 0xEA000000 | imm24;
102 OSWriteLittleInt32(buffer, 0, branchInstruction);
103 }
104 virtual void setScope(Scope) { }
105
106private:
107 const char* _name;
108 const ld::Atom* _target;
109 TargetAndOffset _finalTarget;
110};
111
112
113
114class ARMtoThumb1BranchIslandAtom : public ld::Atom {
115public:
116 ARMtoThumb1BranchIslandAtom(const char* nm, const ld::Atom* target, TargetAndOffset finalTarget)
117 : ld::Atom(_s_text_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
118 ld::Atom::scopeLinkageUnit, ld::Atom::typeBranchIsland,
119 ld::Atom::symbolTableIn, false, false, false, ld::Atom::Alignment(2)),
120 _name(nm),
121 _target(target),
122 _finalTarget(finalTarget) { }
123
124 virtual const ld::File* file() const { return NULL; }
125 virtual bool translationUnitSource(const char** dir, const char**) const
126 { return false; }
127 virtual const char* name() const { return _name; }
128 virtual uint64_t size() const { return 16; }
129 virtual uint64_t objectAddress() const { return 0; }
130 virtual void copyRawContent(uint8_t buffer[]) const {
131 // There is no large displacement thumb1 branch instruction.
132 // Instead use ARM instructions that can jump to thumb.
133 // we use a 32-bit displacement, so we can directly jump to target which means no island hopping
134 int64_t displacement = _finalTarget.atom->finalAddress() + _finalTarget.offset - (this->finalAddress() + 12);
135 if ( _finalTarget.atom->isThumb() )
136 displacement |= 1;
137 if (_s_log) fprintf(stderr, "%s: 4 ARM instruction jump to final target at 0x%08llX\n",
138 _target->name(), _finalTarget.atom->finalAddress());
139 OSWriteLittleInt32(&buffer[ 0], 0, 0xe59fc004); // ldr ip, pc + 4
140 OSWriteLittleInt32(&buffer[ 4], 0, 0xe08fc00c); // add ip, pc, ip
141 OSWriteLittleInt32(&buffer[ 8], 0, 0xe12fff1c); // bx ip
142 OSWriteLittleInt32(&buffer[12], 0, displacement); // .long target-this
143 }
144 virtual void setScope(Scope) { }
145
146private:
147 const char* _name;
148 const ld::Atom* _target;
149 TargetAndOffset _finalTarget;
150};
151
152
153
154class Thumb2toThumbBranchIslandAtom : public ld::Atom {
155public:
156 Thumb2toThumbBranchIslandAtom(const char* nm, const ld::Atom* target, TargetAndOffset finalTarget)
157 : ld::Atom(_s_text_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
158 ld::Atom::scopeLinkageUnit, ld::Atom::typeBranchIsland,
159 ld::Atom::symbolTableIn, false, true, false, ld::Atom::Alignment(1)),
160 _name(nm),
161 _target(target),
162 _finalTarget(finalTarget) { }
163
164 virtual const ld::File* file() const { return NULL; }
165 virtual bool translationUnitSource(const char** dir, const char**) const
166 { return false; }
167 virtual const char* name() const { return _name; }
168 virtual uint64_t size() const { return 4; }
169 virtual uint64_t objectAddress() const { return 0; }
170 virtual void copyRawContent(uint8_t buffer[]) const {
171 int64_t displacement = _target->finalAddress() - this->finalAddress() - 4;
172 if ( _target->contentType() == ld::Atom::typeBranchIsland ) {
173 // an ARM branch can branch farther than a thumb branch. The branch
174 // island generation was conservative and put islands every thumb
175 // branch distance apart. Check to see if this is a an island
176 // hopping branch that could be optimized to go directly to target.
177 int64_t skipToFinalDisplacement = _finalTarget.atom->finalAddress() + _finalTarget.offset - this->finalAddress() - 4;
178 if ( (skipToFinalDisplacement < 16777214) && (skipToFinalDisplacement > (-16777216LL)) ) {
179 // can skip branch island and jump straight to target
180 if (_s_log) fprintf(stderr, "%s: optimized jump to final target at 0x%08llX, thisAddr=0x%08llX\n",
181 _target->name(), _finalTarget.atom->finalAddress(), this->finalAddress());
182 displacement = skipToFinalDisplacement;
183 }
184 else {
185 // ultimate target is too far for thumb2 branch, jump to island
186 if (_s_log) fprintf(stderr, "%s: jump to branch island at 0x%08llX\n",
187 _target->name(), _finalTarget.atom->finalAddress());
188 }
189 }
190 // The instruction is really two instructions:
191 // The lower 16 bits are the first instruction, which contains the high
192 // 11 bits of the displacement.
193 // The upper 16 bits are the second instruction, which contains the low
194 // 11 bits of the displacement, as well as differentiating bl and blx.
195 uint32_t s = (uint32_t)(displacement >> 24) & 0x1;
196 uint32_t i1 = (uint32_t)(displacement >> 23) & 0x1;
197 uint32_t i2 = (uint32_t)(displacement >> 22) & 0x1;
198 uint32_t imm10 = (uint32_t)(displacement >> 12) & 0x3FF;
199 uint32_t imm11 = (uint32_t)(displacement >> 1) & 0x7FF;
200 uint32_t j1 = (i1 == s);
201 uint32_t j2 = (i2 == s);
202 uint32_t opcode = 0x9000F000;
203 uint32_t nextDisp = (j1 << 13) | (j2 << 11) | imm11;
204 uint32_t firstDisp = (s << 10) | imm10;
205 uint32_t newInstruction = opcode | (nextDisp << 16) | firstDisp;
206 //warning("s=%d, j1=%d, j2=%d, imm10=0x%0X, imm11=0x%0X, opcode=0x%08X, first=0x%04X, next=0x%04X, new=0x%08X, disp=0x%llX for %s to %s\n",
207 // s, j1, j2, imm10, imm11, opcode, firstDisp, nextDisp, newInstruction, displacement, inAtom->getDisplayName(), ref->getTarget().getDisplayName());
208 OSWriteLittleInt32(buffer, 0, newInstruction);
209 }
210 virtual void setScope(Scope) { }
211
212private:
213 const char* _name;
214 const ld::Atom* _target;
215 TargetAndOffset _finalTarget;
216};
217
218
219class NoPicARMtoThumbMBranchIslandAtom : public ld::Atom {
220public:
221 NoPicARMtoThumbMBranchIslandAtom(const char* nm, const ld::Atom* target, TargetAndOffset finalTarget)
222 : ld::Atom(_s_text_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
223 ld::Atom::scopeLinkageUnit, ld::Atom::typeBranchIsland,
224 ld::Atom::symbolTableIn, false, false, false, ld::Atom::Alignment(2)),
225 _name(nm),
226 _target(target),
227 _finalTarget(finalTarget) { }
228
229 virtual const ld::File* file() const { return NULL; }
230 virtual bool translationUnitSource(const char** dir, const char**) const
231 { return false; }
232 virtual const char* name() const { return _name; }
233 virtual uint64_t size() const { return 8; }
234 virtual uint64_t objectAddress() const { return 0; }
235 virtual void copyRawContent(uint8_t buffer[]) const {
236 // There is no large displacement thumb1 branch instruction.
237 // Instead use ARM instructions that can jump to thumb.
238 // we use a 32-bit displacement, so we can directly jump to final target which means no island hopping
239 uint32_t targetAddr = _finalTarget.atom->finalAddress();
240 if ( _finalTarget.atom->isThumb() )
241 targetAddr |= 1;
242 if (_s_log) fprintf(stderr, "%s: 2 ARM instruction jump to final target at 0x%08llX\n",
243 _target->name(), _finalTarget.atom->finalAddress());
244 OSWriteLittleInt32(&buffer[0], 0, 0xe51ff004); // ldr pc, [pc, #-4]
245 OSWriteLittleInt32(&buffer[4], 0, targetAddr); // .long target-this
246 }
247 virtual void setScope(Scope) { }
248
249private:
250 const char* _name;
251 const ld::Atom* _target;
252 TargetAndOffset _finalTarget;
253};
254
255
256static ld::Atom* makeBranchIsland(const Options& opts, ld::Fixup::Kind kind, int islandRegion, const ld::Atom* nextTarget, TargetAndOffset finalTarget)
257{
258 char* name;
259 if ( finalTarget.offset == 0 ) {
260 if ( islandRegion == 0 )
261 asprintf(&name, "%s.island", finalTarget.atom->name());
262 else
263 asprintf(&name, "%s.island.%d", finalTarget.atom->name(), islandRegion+1);
264 }
265 else {
266 asprintf(&name, "%s_plus_%d.island.%d", finalTarget.atom->name(), finalTarget.offset, islandRegion);
267 }
268
269 switch ( kind ) {
270 case ld::Fixup::kindStoreARMBranch24:
271 case ld::Fixup::kindStoreThumbBranch22:
272 case ld::Fixup::kindStoreTargetAddressARMBranch24:
273 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
274 if ( finalTarget.atom->isThumb() ) {
275 if ( opts.preferSubArchitecture() && opts.archSupportsThumb2() ) {
276 return new Thumb2toThumbBranchIslandAtom(name, nextTarget, finalTarget);
277 }
278 else if ( opts.outputSlidable() ) {
279 return new ARMtoThumb1BranchIslandAtom(name, nextTarget, finalTarget);
280 }
281 else {
282 return new NoPicARMtoThumbMBranchIslandAtom(name, nextTarget, finalTarget);
283 }
284 }
285 else {
286 return new ARMtoARMBranchIslandAtom(name, nextTarget, finalTarget);
287 }
288 break;
289 default:
290 assert(0 && "unexpected branch kind");
291 break;
292 }
293 return NULL;
294}
295
296
297static uint64_t textSizeWhenMightNeedBranchIslands(const Options& opts, bool seenThumbBranch)
298{
299 switch ( opts.architecture() ) {
300 case CPU_TYPE_ARM:
301 if ( ! seenThumbBranch )
302 return 32000000; // ARM can branch +/- 32MB
303 else if ( opts.preferSubArchitecture() && opts.archSupportsThumb2() )
304 return 16000000; // thumb2 can branch +/- 16MB
305 else
306 return 4000000; // thumb1 can branch +/- 4MB
307 break;
308 }
309 assert(0 && "unexpected architecture");
310 return 0x100000000LL;
311}
312
313
314static uint64_t maxDistanceBetweenIslands(const Options& opts, bool seenThumbBranch)
315{
316 switch ( opts.architecture() ) {
317 case CPU_TYPE_ARM:
318 if ( ! seenThumbBranch )
319 return 30*1024*1024; // 2MB of branch islands per 32MB
320 else if ( opts.preferSubArchitecture() && opts.archSupportsThumb2() )
321 return 14*1024*1024; // 2MB of branch islands per 16MB
322 else
323 return 3500000; // 0.5MB of branch islands per 4MB
324 break;
325 }
326 assert(0 && "unexpected architecture");
327 return 0x100000000LL;
328}
329
330
331//
332// PowerPC can do PC relative branches as far as +/-16MB.
333// If a branch target is >16MB then we insert one or more
334// "branch islands" between the branch and its target that
335// allows island hopping to the target.
336//
337// Branch Island Algorithm
338//
339// If the __TEXT segment < 16MB, then no branch islands needed
340// Otherwise, every 14MB into the __TEXT segment a region is
341// added which can contain branch islands. Every out-of-range
342// bl instruction is checked. If it crosses a region, an island
343// is added to that region with the same target and the bl is
344// adjusted to target the island instead.
345//
346// In theory, if too many islands are added to one region, it
347// could grow the __TEXT enough that other previously in-range
348// bl branches could be pushed out of range. We reduce the
349// probability this could happen by placing the ranges every
350// 14MB which means the region would have to be 2MB (512,000 islands)
351// before any branches could be pushed out of range.
352//
353
354void doPass(const Options& opts, ld::Internal& state)
355{
356 // only make branch islands in final linked images
357 if ( opts.outputKind() == Options::kObjectFile )
358 return;
359
360 // only ARM needs branch islands
361 switch ( opts.architecture() ) {
362 case CPU_TYPE_ARM:
363 break;
364 default:
365 return;
366 }
367
368 // scan to find __text section
369 ld::Internal::FinalSection* textSection = NULL;
370 for (std::vector<ld::Internal::FinalSection*>::iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) {
371 ld::Internal::FinalSection* sect = *sit;
372 if ( strcmp(sect->sectionName(), "__text") == 0 )
373 textSection = sect;
374 }
375 if ( textSection == NULL )
376 return;
377
378 // assign section offsets to each atom in __text section, watch for thumb branches, and find total size
379 const bool isARM = (opts.architecture() == CPU_TYPE_ARM);
380 bool hasThumbBranches = false;
381 uint64_t offset = 0;
382 for (std::vector<const ld::Atom*>::iterator ait=textSection->atoms.begin(); ait != textSection->atoms.end(); ++ait) {
383 const ld::Atom* atom = *ait;
384 // check for thumb branches
385 if ( isARM && ~hasThumbBranches ) {
386 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
387 switch ( fit->kind ) {
388 case ld::Fixup::kindStoreThumbBranch22:
389 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
390 hasThumbBranches = true;
391 break;
392 default:
393 break;
394 }
395 }
396 }
397 // align atom
398 ld::Atom::Alignment atomAlign = atom->alignment();
399 uint64_t atomAlignP2 = (1 << atomAlign.powerOf2);
400 uint64_t currentModulus = (offset % atomAlignP2);
401 if ( currentModulus != atomAlign.modulus ) {
402 if ( atomAlign.modulus > currentModulus )
403 offset += atomAlign.modulus-currentModulus;
404 else
405 offset += atomAlign.modulus+atomAlignP2-currentModulus;
406 }
407 (const_cast<ld::Atom*>(atom))->setSectionOffset(offset);
408 offset += atom->size();
409 }
410 uint64_t totalTextSize = offset;
411 if ( totalTextSize < textSizeWhenMightNeedBranchIslands(opts, hasThumbBranches) )
412 return;
413 if (_s_log) fprintf(stderr, "ld: __text section size=%llu, might need branch islands\n", totalTextSize);
414
415 // figure out how many regions of branch islands will be needed
416 const uint32_t kBetweenRegions = maxDistanceBetweenIslands(opts, hasThumbBranches); // place regions of islands every 14MB in __text section
417 const int kIslandRegionsCount = totalTextSize / kBetweenRegions;
418 typedef std::map<TargetAndOffset,const ld::Atom*, TargetAndOffsetComparor> AtomToIsland;
419 AtomToIsland* regionsMap[kIslandRegionsCount];
420 std::vector<const ld::Atom*>* regionsIslands[kIslandRegionsCount];
421 for(int i=0; i < kIslandRegionsCount; ++i) {
422 regionsMap[i] = new AtomToIsland();
423 regionsIslands[i] = new std::vector<const ld::Atom*>();
424 }
425 unsigned int islandCount = 0;
426 if (_s_log) fprintf(stderr, "ld: will use %u branch island regions\n", kIslandRegionsCount);
427
428 // create islands for branches in __text that are out of range
429 for (std::vector<const ld::Atom*>::iterator ait=textSection->atoms.begin(); ait != textSection->atoms.end(); ++ait) {
430 const ld::Atom* atom = *ait;
431 const ld::Atom* target = NULL;
432 uint64_t addend = 0;
433 ld::Fixup* fixupWithTarget = NULL;
434 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
435 if ( fit->firstInCluster() ) {
436 target = NULL;
437 fixupWithTarget = NULL;
438 addend = 0;
439 }
440 switch ( fit->binding ) {
441 case ld::Fixup::bindingNone:
442 case ld::Fixup::bindingByNameUnbound:
443 break;
444 case ld::Fixup::bindingByContentBound:
445 case ld::Fixup::bindingDirectlyBound:
446 target = fit->u.target;
447 fixupWithTarget = fit;
448 break;
449 case ld::Fixup::bindingsIndirectlyBound:
450 target = state.indirectBindingTable[fit->u.bindingIndex];
451 fixupWithTarget = fit;
452 break;
453 }
454 bool haveBranch = false;
455 switch (fit->kind) {
456 case ld::Fixup::kindAddAddend:
457 addend = fit->u.addend;
458 break;
459 case ld::Fixup::kindStoreARMBranch24:
460 case ld::Fixup::kindStoreThumbBranch22:
461 case ld::Fixup::kindStoreTargetAddressARMBranch24:
462 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
463 haveBranch = true;
464 break;
465 default:
466 break;
467 }
468 if ( haveBranch ) {
469 int64_t srcAddr = atom->sectionOffset() + fit->offsetInAtom;
470 int64_t dstAddr = target->sectionOffset() + addend;
471 if ( target->section().type() == ld::Section::typeStub )
472 dstAddr = totalTextSize;
473 int64_t displacement = dstAddr - srcAddr;
474 TargetAndOffset finalTargetAndOffset = { target, addend };
475 const int64_t kBranchLimit = kBetweenRegions;
476 if ( displacement > kBranchLimit ) {
477 // create forward branch chain
478 const ld::Atom* nextTarget = target;
479 for (int i=kIslandRegionsCount-1; i >=0 ; --i) {
480 AtomToIsland* region = regionsMap[i];
481 int64_t islandRegionAddr = kBetweenRegions * (i+1);
482 if ( (srcAddr < islandRegionAddr) && (islandRegionAddr <= dstAddr) ) {
483 AtomToIsland::iterator pos = region->find(finalTargetAndOffset);
484 if ( pos == region->end() ) {
485 ld::Atom* island = makeBranchIsland(opts, fit->kind, i, nextTarget, finalTargetAndOffset);
486 (*region)[finalTargetAndOffset] = island;
487 if (_s_log) fprintf(stderr, "added island %s to region %d for %s\n", island->name(), i, atom->name());
488 regionsIslands[i]->push_back(island);
489 ++islandCount;
490 nextTarget = island;
491 }
492 else {
493 nextTarget = pos->second;
494 }
495 }
496 }
497 if (_s_log) fprintf(stderr, "using island %s for branch to %s from %s\n", nextTarget->name(), target->name(), atom->name());
498 fixupWithTarget->u.target = nextTarget;
499 fixupWithTarget->binding = ld::Fixup::bindingDirectlyBound;
500 }
501 else if ( displacement < (-kBranchLimit) ) {
502 // create back branching chain
503 const ld::Atom* prevTarget = target;
504 for (int i=0; i < kIslandRegionsCount ; ++i) {
505 AtomToIsland* region = regionsMap[i];
506 int64_t islandRegionAddr = kBetweenRegions * (i+1);
507 if ( (dstAddr <= islandRegionAddr) && (islandRegionAddr < srcAddr) ) {
508 AtomToIsland::iterator pos = region->find(finalTargetAndOffset);
509 if ( pos == region->end() ) {
510 ld::Atom* island = makeBranchIsland(opts, fit->kind, i, prevTarget, finalTargetAndOffset);
511 (*region)[finalTargetAndOffset] = island;
512 if (_s_log) fprintf(stderr, "added back island %s to region %d for %s\n", island->name(), i, atom->name());
513 regionsIslands[i]->push_back(island);
514 ++islandCount;
515 prevTarget = island;
516 }
517 else {
518 prevTarget = pos->second;
519 }
520 }
521 }
522 if (_s_log) fprintf(stderr, "using back island %s for %s\n", prevTarget->name(), atom->name());
523 fixupWithTarget->u.target = prevTarget;
524 fixupWithTarget->binding = ld::Fixup::bindingDirectlyBound;
525 }
526 }
527 }
528 }
529
530
531 // insert islands into __text section and adjust section offsets
532 if ( islandCount > 0 ) {
533 if ( _s_log ) fprintf(stderr, "ld: %u branch islands required in %u regions\n", islandCount, kIslandRegionsCount);
534 std::vector<const ld::Atom*> newAtomList;
535 newAtomList.reserve(textSection->atoms.size()+islandCount);
536 uint64_t islandRegionAddr = kBetweenRegions;;
537 int regionIndex = 0;
538 for (std::vector<const ld::Atom*>::iterator it=textSection->atoms.begin(); it != textSection->atoms.end(); it++) {
539 const ld::Atom* atom = *it;
540 if ( (atom->sectionOffset()+atom->size()) > islandRegionAddr ) {
541 std::vector<const ld::Atom*>* regionIslands = regionsIslands[regionIndex];
542 for (std::vector<const ld::Atom*>::iterator rit=regionIslands->begin(); rit != regionIslands->end(); rit++) {
543 const ld::Atom* islandAtom = *rit;
544 newAtomList.push_back(islandAtom);
545 if ( _s_log ) fprintf(stderr, "inserting island %s into __text section\n", islandAtom->name());
546 }
547 ++regionIndex;
548 islandRegionAddr += kBetweenRegions;
549 }
550 newAtomList.push_back(atom);
551 }
552 // put any remaining islands at end of __text section
553 if ( regionIndex < kIslandRegionsCount ) {
554 std::vector<const ld::Atom*>* regionIslands = regionsIslands[regionIndex];
555 for (std::vector<const ld::Atom*>::iterator rit=regionIslands->begin(); rit != regionIslands->end(); rit++) {
556 const ld::Atom* islandAtom = *rit;
557 newAtomList.push_back(islandAtom);
558 if ( _s_log ) fprintf(stderr, "inserting island %s into __text section\n", islandAtom->name());
559 }
560 }
561 // swap in new list of atoms for __text section
562 textSection->atoms.clear();
563 textSection->atoms = newAtomList;
564 }
565
566}
567
568
569} // namespace branch_island
570} // namespace passes
571} // namespace ld