1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
3 * Copyright (c) 2009 Apple Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
34 #include "MachOFileAbstraction.hpp"
37 #include "configure.h"
43 class File
; // forward reference
45 class GOTEntryAtom
: public ld::Atom
{
47 GOTEntryAtom(ld::Internal
& internal
, const ld::Atom
* target
, bool weakImport
, bool is64
)
48 : ld::Atom(_s_section
, ld::Atom::definitionRegular
, ld::Atom::combineNever
,
49 ld::Atom::scopeLinkageUnit
, ld::Atom::typeNonLazyPointer
,
50 symbolTableNotIn
, false, false, false, (is64
? ld::Atom::Alignment(3) : ld::Atom::Alignment(2))),
51 _fixup(0, ld::Fixup::k1of1
, (is64
? ld::Fixup::kindStoreTargetAddressLittleEndian64
: ld::Fixup::kindStoreTargetAddressLittleEndian32
), target
),
54 { _fixup
.weakImport
= weakImport
; internal
.addAtom(*this); }
56 virtual const ld::File
* file() const { return NULL
; }
57 virtual const char* name() const { return _target
->name(); }
58 virtual uint64_t size() const { return (_is64
? 8 : 4); }
59 virtual uint64_t objectAddress() const { return 0; }
60 virtual void copyRawContent(uint8_t buffer
[]) const { }
61 virtual void setScope(Scope
) { }
62 virtual ld::Fixup::iterator
fixupsBegin() const { return &_fixup
; }
63 virtual ld::Fixup::iterator
fixupsEnd() const { return &((ld::Fixup
*)&_fixup
)[1]; }
66 mutable ld::Fixup _fixup
;
67 const ld::Atom
* _target
;
70 static ld::Section _s_section
;
73 ld::Section
GOTEntryAtom::_s_section("__DATA", "__got", ld::Section::typeNonLazyPointer
);
76 static bool gotFixup(const Options
& opts
, ld::Internal
& internal
, const ld::Atom
* targetOfGOT
, const ld::Fixup
* fixup
, bool* optimizable
)
78 switch (fixup
->kind
) {
79 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad
:
80 #if SUPPORT_ARCH_arm64
81 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21
:
82 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12
:
84 // start by assuming this can be optimized
86 // cannot do LEA optimization if target is in another dylib
87 if ( targetOfGOT
->definition() == ld::Atom::definitionProxy
)
89 // cannot do LEA optimization if target in __huge section
90 if ( internal
.usingHugeSections
&& (targetOfGOT
->size() > 1024*1024)
91 && ( (targetOfGOT
->section().type() == ld::Section::typeZeroFill
)
92 || (targetOfGOT
->section().type() == ld::Section::typeTentativeDefs
)) ) {
95 if ( targetOfGOT
->scope() == ld::Atom::scopeGlobal
) {
96 // cannot do LEA optimization if target is weak exported symbol
97 if ( (targetOfGOT
->definition() == ld::Atom::definitionRegular
) && (targetOfGOT
->combine() == ld::Atom::combineByName
) ) {
98 switch ( opts
.outputKind() ) {
99 case Options::kDynamicExecutable
:
100 case Options::kDynamicLibrary
:
101 case Options::kDynamicBundle
:
102 case Options::kKextBundle
:
103 *optimizable
= false;
105 case Options::kStaticExecutable
:
107 case Options::kPreload
:
108 case Options::kObjectFile
:
112 // cannot do LEA optimization if target is interposable
113 if ( opts
.interposable(targetOfGOT
->name()) )
114 *optimizable
= false;
115 // cannot do LEA optimization if target is resolver function
116 if ( targetOfGOT
->contentType() == ld::Atom::typeResolver
)
117 *optimizable
= false;
118 // cannot do LEA optimization for flat-namespace
119 if ( opts
.nameSpace() != Options::kTwoLevelNameSpace
)
120 *optimizable
= false;
122 else if ( targetOfGOT
->scope() == ld::Atom::scopeLinkageUnit
) {
123 // <rdar://problem/12379969> don't do optimization if target is in custom segment
124 if ( opts
.sharedRegionEligible() ) {
125 const char* segName
= targetOfGOT
->section().segmentName();
126 if ( (strcmp(segName
, "__TEXT") != 0) && (strcmp(segName
, "__DATA") != 0) ) {
127 *optimizable
= false;
132 case ld::Fixup::kindStoreX86PCRel32GOT
:
133 #if SUPPORT_ARCH_arm64
134 case ld::Fixup::kindStoreARM64PCRelToGOT
:
136 *optimizable
= false;
138 case ld::Fixup::kindNoneGroupSubordinatePersonality
:
139 *optimizable
= false;
148 struct AtomByNameSorter
150 bool operator()(const ld::Atom
* left
, const ld::Atom
* right
)
152 return (strcmp(left
->name(), right
->name()) < 0);
156 void doPass(const Options
& opts
, ld::Internal
& internal
)
158 const bool log
= false;
160 // only make got section in final linked images
161 if ( opts
.outputKind() == Options::kObjectFile
)
164 // walk all atoms and fixups looking for GOT-able references
165 // don't create GOT atoms during this loop because that could invalidate the sections iterator
166 std::vector
<const ld::Atom
*> atomsReferencingGOT
;
167 std::map
<const ld::Atom
*,ld::Atom
*> gotMap
;
168 std::map
<const ld::Atom
*,bool> weakImportMap
;
169 atomsReferencingGOT
.reserve(128);
170 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
=internal
.sections
.begin(); sit
!= internal
.sections
.end(); ++sit
) {
171 ld::Internal::FinalSection
* sect
= *sit
;
172 for (std::vector
<const ld::Atom
*>::iterator ait
=sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
173 const ld::Atom
* atom
= *ait
;
174 bool atomUsesGOT
= false;
175 const ld::Atom
* targetOfGOT
= NULL
;
176 bool targetIsWeakImport
= false;
177 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
178 if ( fit
->firstInCluster() )
180 switch ( fit
->binding
) {
181 case ld::Fixup::bindingsIndirectlyBound
:
182 targetOfGOT
= internal
.indirectBindingTable
[fit
->u
.bindingIndex
];
183 targetIsWeakImport
= fit
->weakImport
;
185 case ld::Fixup::bindingDirectlyBound
:
186 targetOfGOT
= fit
->u
.target
;
187 targetIsWeakImport
= fit
->weakImport
;
193 if ( !gotFixup(opts
, internal
, targetOfGOT
, fit
, &optimizable
) )
196 // change from load of GOT entry to lea of target
197 if ( log
) fprintf(stderr
, "optimized GOT usage in %s to %s\n", atom
->name(), targetOfGOT
->name());
198 switch ( fit
->binding
) {
199 case ld::Fixup::bindingsIndirectlyBound
:
200 case ld::Fixup::bindingDirectlyBound
:
201 fit
->binding
= ld::Fixup::bindingDirectlyBound
;
202 fit
->u
.target
= targetOfGOT
;
203 switch ( fit
->kind
) {
204 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad
:
205 fit
->kind
= ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA
;
207 #if SUPPORT_ARCH_arm64
208 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21
:
209 fit
->kind
= ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21
;
211 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12
:
212 fit
->kind
= ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12
;
216 assert(0 && "unsupported GOT reference kind");
221 assert(0 && "unsupported GOT reference");
226 // remember that we need to use GOT in this function
227 if ( log
) fprintf(stderr
, "found GOT use in %s to %s\n", atom
->name(), targetOfGOT
->name());
228 if ( !atomUsesGOT
) {
229 atomsReferencingGOT
.push_back(atom
);
232 gotMap
[targetOfGOT
] = NULL
;
233 // record weak_import attribute
234 std::map
<const ld::Atom
*,bool>::iterator pos
= weakImportMap
.find(targetOfGOT
);
235 if ( pos
== weakImportMap
.end() ) {
236 // target not in weakImportMap, so add
237 if ( log
) fprintf(stderr
, "weakImportMap[%s] = %d\n", targetOfGOT
->name(), targetIsWeakImport
);
238 weakImportMap
[targetOfGOT
] = targetIsWeakImport
;
241 // target in weakImportMap, check for weakness mismatch
242 if ( pos
->second
!= targetIsWeakImport
) {
244 switch ( opts
.weakReferenceMismatchTreatment() ) {
245 case Options::kWeakReferenceMismatchError
:
246 throwf("mismatching weak references for symbol: %s", targetOfGOT
->name());
247 case Options::kWeakReferenceMismatchWeak
:
250 case Options::kWeakReferenceMismatchNonWeak
:
262 switch ( opts
.architecture() ) {
263 #if SUPPORT_ARCH_i386
268 #if SUPPORT_ARCH_x86_64
269 case CPU_TYPE_X86_64
:
273 #if SUPPORT_ARCH_arm_any
278 #if SUPPORT_ARCH_arm64
286 for (std::map
<const ld::Atom
*,ld::Atom
*>::iterator it
= gotMap
.begin(); it
!= gotMap
.end(); ++it
) {
287 it
->second
= new GOTEntryAtom(internal
, it
->first
, weakImportMap
[it
->first
], is64
);
290 // update atoms to use GOT entries
291 for (std::vector
<const ld::Atom
*>::iterator it
=atomsReferencingGOT
.begin(); it
!= atomsReferencingGOT
.end(); ++it
) {
292 const ld::Atom
* atom
= *it
;
293 const ld::Atom
* targetOfGOT
= NULL
;
294 ld::Fixup::iterator fitThatSetTarget
= NULL
;
295 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
296 if ( fit
->firstInCluster() ) {
298 fitThatSetTarget
= NULL
;
300 switch ( fit
->binding
) {
301 case ld::Fixup::bindingsIndirectlyBound
:
302 targetOfGOT
= internal
.indirectBindingTable
[fit
->u
.bindingIndex
];
303 fitThatSetTarget
= fit
;
305 case ld::Fixup::bindingDirectlyBound
:
306 targetOfGOT
= fit
->u
.target
;
307 fitThatSetTarget
= fit
;
313 if ( (targetOfGOT
== NULL
) || !gotFixup(opts
, internal
, targetOfGOT
, fit
, &optimizable
) )
315 if ( !optimizable
) {
316 // GOT use not optimized away, update to bind to GOT entry
317 assert(fitThatSetTarget
!= NULL
);
318 switch ( fitThatSetTarget
->binding
) {
319 case ld::Fixup::bindingsIndirectlyBound
:
320 case ld::Fixup::bindingDirectlyBound
:
321 fitThatSetTarget
->binding
= ld::Fixup::bindingDirectlyBound
;
322 fitThatSetTarget
->u
.target
= gotMap
[targetOfGOT
];
325 assert(0 && "unsupported GOT reference");
332 // sort new atoms so links are consistent
333 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
=internal
.sections
.begin(); sit
!= internal
.sections
.end(); ++sit
) {
334 ld::Internal::FinalSection
* sect
= *sit
;
335 if ( sect
->type() == ld::Section::typeNonLazyPointer
) {
336 std::sort(sect
->atoms
.begin(), sect
->atoms
.end(), AtomByNameSorter());
343 } // namespace passes