1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
3 * Copyright (c) 2009 Apple Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
34 #include "MachOFileAbstraction.hpp"
37 #include "configure.h"
43 class File
; // forward reference
45 class GOTEntryAtom
: public ld::Atom
{
47 GOTEntryAtom(ld::Internal
& internal
, const ld::Atom
* target
, bool weakImport
, bool is64
)
48 : ld::Atom(_s_section
, ld::Atom::definitionRegular
, ld::Atom::combineNever
,
49 ld::Atom::scopeLinkageUnit
, ld::Atom::typeNonLazyPointer
,
50 symbolTableNotIn
, false, false, false, (is64
? ld::Atom::Alignment(3) : ld::Atom::Alignment(2))),
51 _fixup(0, ld::Fixup::k1of1
, (is64
? ld::Fixup::kindStoreTargetAddressLittleEndian64
: ld::Fixup::kindStoreTargetAddressLittleEndian32
), target
),
54 { _fixup
.weakImport
= weakImport
; internal
.addAtom(*this); }
56 virtual const ld::File
* file() const { return NULL
; }
57 virtual const char* name() const { return _target
->name(); }
58 virtual uint64_t size() const { return (_is64
? 8 : 4); }
59 virtual uint64_t objectAddress() const { return 0; }
60 virtual void copyRawContent(uint8_t buffer
[]) const { }
61 virtual void setScope(Scope
) { }
62 virtual ld::Fixup::iterator
fixupsBegin() const { return &_fixup
; }
63 virtual ld::Fixup::iterator
fixupsEnd() const { return &((ld::Fixup
*)&_fixup
)[1]; }
66 mutable ld::Fixup _fixup
;
67 const ld::Atom
* _target
;
70 static ld::Section _s_section
;
73 ld::Section
GOTEntryAtom::_s_section("__DATA", "__got", ld::Section::typeNonLazyPointer
);
76 static bool gotFixup(const Options
& opts
, ld::Internal
& internal
, const ld::Atom
* targetOfGOT
, const ld::Fixup
* fixup
, bool* optimizable
)
78 switch (fixup
->kind
) {
79 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad
:
80 #if SUPPORT_ARCH_arm64
81 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21
:
82 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12
:
84 // start by assuming this can be optimized
86 // cannot do LEA optimization if target is in another dylib
87 if ( targetOfGOT
->definition() == ld::Atom::definitionProxy
)
89 // cannot do LEA optimization if target in __huge section
90 if ( internal
.usingHugeSections
&& (targetOfGOT
->size() > 1024*1024)
91 && ( (targetOfGOT
->section().type() == ld::Section::typeZeroFill
)
92 || (targetOfGOT
->section().type() == ld::Section::typeTentativeDefs
)) ) {
95 if ( targetOfGOT
->scope() == ld::Atom::scopeGlobal
) {
96 // cannot do LEA optimization if target is weak exported symbol
97 if ( (targetOfGOT
->definition() == ld::Atom::definitionRegular
) && (targetOfGOT
->combine() == ld::Atom::combineByName
) ) {
98 switch ( opts
.outputKind() ) {
99 case Options::kDynamicExecutable
:
100 case Options::kDynamicLibrary
:
101 case Options::kDynamicBundle
:
102 case Options::kKextBundle
:
103 *optimizable
= false;
105 case Options::kStaticExecutable
:
107 case Options::kPreload
:
108 case Options::kObjectFile
:
112 // cannot do LEA optimization if target is interposable
113 if ( opts
.interposable(targetOfGOT
->name()) )
114 *optimizable
= false;
115 // cannot do LEA optimization if target is resolver function
116 if ( targetOfGOT
->contentType() == ld::Atom::typeResolver
)
117 *optimizable
= false;
118 // cannot do LEA optimization for flat-namespace
119 if ( opts
.nameSpace() != Options::kTwoLevelNameSpace
)
120 *optimizable
= false;
122 else if ( targetOfGOT
->scope() == ld::Atom::scopeLinkageUnit
) {
123 // <rdar://problem/12379969> don't do optimization if target is in custom segment
124 if ( opts
.sharedRegionEligible() ) {
125 const char* segName
= targetOfGOT
->section().segmentName();
126 if ( (strcmp(segName
, "__TEXT") != 0) && (strcmp(segName
, "__DATA") != 0) ) {
127 *optimizable
= false;
132 case ld::Fixup::kindStoreX86PCRel32GOT
:
133 #if SUPPORT_ARCH_arm64
134 case ld::Fixup::kindStoreARM64PCRelToGOT
:
136 *optimizable
= false;
138 case ld::Fixup::kindNoneGroupSubordinatePersonality
:
139 *optimizable
= false;
148 struct AtomByNameSorter
150 bool operator()(const ld::Atom
* left
, const ld::Atom
* right
)
152 return (strcmp(left
->name(), right
->name()) < 0);
156 void doPass(const Options
& opts
, ld::Internal
& internal
)
158 const bool log
= false;
160 // only make got section in final linked images
161 if ( opts
.outputKind() == Options::kObjectFile
)
164 // pre-fill gotMap with existing non-lazy pointers
165 std::map
<const ld::Atom
*, const ld::Atom
*> gotMap
;
166 for (ld::Internal::FinalSection
* sect
: internal
.sections
) {
167 if ( sect
->type() != ld::Section::typeNonLazyPointer
)
169 for (const ld::Atom
* atom
: sect
->atoms
) {
170 const ld::Atom
* target
= NULL
;
171 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
173 case ld::Fixup::kindStoreTargetAddressLittleEndian64
:
174 case ld::Fixup::kindStoreTargetAddressLittleEndian32
:
175 switch ( fit
->binding
) {
176 case ld::Fixup::bindingsIndirectlyBound
:
177 target
= internal
.indirectBindingTable
[fit
->u
.bindingIndex
];
179 case ld::Fixup::bindingDirectlyBound
:
180 target
= fit
->u
.target
;
183 fprintf(stderr
, "non-pointer is got entry\n");
191 if ( target
!= NULL
) {
192 if (log
) fprintf(stderr
, "found existing got entry to %s\n", target
->name());
193 gotMap
[target
] = atom
;
198 // walk all atoms and fixups looking for GOT-able references
199 // don't create GOT atoms during this loop because that could invalidate the sections iterator
200 std::vector
<const ld::Atom
*> atomsReferencingGOT
;
201 std::map
<const ld::Atom
*,bool> weakImportMap
;
202 atomsReferencingGOT
.reserve(128);
203 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
=internal
.sections
.begin(); sit
!= internal
.sections
.end(); ++sit
) {
204 ld::Internal::FinalSection
* sect
= *sit
;
205 for (std::vector
<const ld::Atom
*>::iterator ait
=sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
206 const ld::Atom
* atom
= *ait
;
207 bool atomUsesGOT
= false;
208 const ld::Atom
* targetOfGOT
= NULL
;
209 bool targetIsWeakImport
= false;
210 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
211 if ( fit
->firstInCluster() )
213 switch ( fit
->binding
) {
214 case ld::Fixup::bindingsIndirectlyBound
:
215 targetOfGOT
= internal
.indirectBindingTable
[fit
->u
.bindingIndex
];
216 targetIsWeakImport
= fit
->weakImport
;
218 case ld::Fixup::bindingDirectlyBound
:
219 targetOfGOT
= fit
->u
.target
;
220 targetIsWeakImport
= fit
->weakImport
;
226 if ( !gotFixup(opts
, internal
, targetOfGOT
, fit
, &optimizable
) )
229 // change from load of GOT entry to lea of target
230 if ( log
) fprintf(stderr
, "optimized GOT usage in %s to %s\n", atom
->name(), targetOfGOT
->name());
231 switch ( fit
->binding
) {
232 case ld::Fixup::bindingsIndirectlyBound
:
233 case ld::Fixup::bindingDirectlyBound
:
234 fit
->binding
= ld::Fixup::bindingDirectlyBound
;
235 fit
->u
.target
= targetOfGOT
;
236 switch ( fit
->kind
) {
237 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad
:
238 fit
->kind
= ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA
;
240 #if SUPPORT_ARCH_arm64
241 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21
:
242 fit
->kind
= ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21
;
244 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12
:
245 fit
->kind
= ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12
;
249 assert(0 && "unsupported GOT reference kind");
254 assert(0 && "unsupported GOT reference");
259 // remember that we need to use GOT in this function
260 if ( log
) fprintf(stderr
, "found GOT use in %s\n", atom
->name());
261 if ( !atomUsesGOT
) {
262 atomsReferencingGOT
.push_back(atom
);
265 if ( gotMap
.count(targetOfGOT
) == 0 )
266 gotMap
[targetOfGOT
] = NULL
;
267 // record weak_import attribute
268 std::map
<const ld::Atom
*,bool>::iterator pos
= weakImportMap
.find(targetOfGOT
);
269 if ( pos
== weakImportMap
.end() ) {
270 // target not in weakImportMap, so add
271 if ( log
) fprintf(stderr
, "weakImportMap[%s] = %d\n", targetOfGOT
->name(), targetIsWeakImport
);
272 weakImportMap
[targetOfGOT
] = targetIsWeakImport
;
275 // target in weakImportMap, check for weakness mismatch
276 if ( pos
->second
!= targetIsWeakImport
) {
278 switch ( opts
.weakReferenceMismatchTreatment() ) {
279 case Options::kWeakReferenceMismatchError
:
280 throwf("mismatching weak references for symbol: %s", targetOfGOT
->name());
281 case Options::kWeakReferenceMismatchWeak
:
284 case Options::kWeakReferenceMismatchNonWeak
:
296 switch ( opts
.architecture() ) {
297 #if SUPPORT_ARCH_i386
302 #if SUPPORT_ARCH_x86_64
303 case CPU_TYPE_X86_64
:
307 #if SUPPORT_ARCH_arm_any
312 #if SUPPORT_ARCH_arm64
320 for (auto& entry
: gotMap
) {
321 if ( entry
.second
== NULL
) {
322 entry
.second
= new GOTEntryAtom(internal
, entry
.first
, weakImportMap
[entry
.first
], is64
);
323 if (log
) fprintf(stderr
, "making new GOT slot for %s, gotMap[%p] = %p\n", entry
.first
->name(), entry
.first
, entry
.second
);
328 // update atoms to use GOT entries
329 for (std::vector
<const ld::Atom
*>::iterator it
=atomsReferencingGOT
.begin(); it
!= atomsReferencingGOT
.end(); ++it
) {
330 const ld::Atom
* atom
= *it
;
331 const ld::Atom
* targetOfGOT
= NULL
;
332 ld::Fixup::iterator fitThatSetTarget
= NULL
;
333 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
334 if ( fit
->firstInCluster() ) {
336 fitThatSetTarget
= NULL
;
338 switch ( fit
->binding
) {
339 case ld::Fixup::bindingsIndirectlyBound
:
340 targetOfGOT
= internal
.indirectBindingTable
[fit
->u
.bindingIndex
];
341 fitThatSetTarget
= fit
;
343 case ld::Fixup::bindingDirectlyBound
:
344 targetOfGOT
= fit
->u
.target
;
345 fitThatSetTarget
= fit
;
351 if ( (targetOfGOT
== NULL
) || !gotFixup(opts
, internal
, targetOfGOT
, fit
, &optimizable
) )
353 if ( !optimizable
) {
354 // GOT use not optimized away, update to bind to GOT entry
355 assert(fitThatSetTarget
!= NULL
);
356 switch ( fitThatSetTarget
->binding
) {
357 case ld::Fixup::bindingsIndirectlyBound
:
358 case ld::Fixup::bindingDirectlyBound
:
359 if ( log
) fprintf(stderr
, "updating GOT use in %s to %s\n", atom
->name(), targetOfGOT
->name());
360 fitThatSetTarget
->binding
= ld::Fixup::bindingDirectlyBound
;
361 fitThatSetTarget
->u
.target
= gotMap
[targetOfGOT
];
364 assert(0 && "unsupported GOT reference");
371 // sort new atoms so links are consistent
372 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
=internal
.sections
.begin(); sit
!= internal
.sections
.end(); ++sit
) {
373 ld::Internal::FinalSection
* sect
= *sit
;
374 if ( sect
->type() == ld::Section::typeNonLazyPointer
) {
375 std::sort(sect
->atoms
.begin(), sect
->atoms
.end(), AtomByNameSorter());
382 } // namespace passes