1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
3 * Copyright (c) 2010 Apple Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
33 #include <ext/hash_map>
42 class File
; // forward reference
44 class TLVEntryAtom
: public ld::Atom
{
46 TLVEntryAtom(ld::Internal
& internal
, const ld::Atom
* target
, bool weakImport
)
47 : ld::Atom(_s_section
, ld::Atom::definitionRegular
, ld::Atom::combineNever
,
48 ld::Atom::scopeLinkageUnit
, ld::Atom::typeNonLazyPointer
,
49 symbolTableNotIn
, false, false, false, ld::Atom::Alignment(3)),
50 _fixup(0, ld::Fixup::k1of1
, ld::Fixup::kindStoreTargetAddressLittleEndian64
, target
),
52 { _fixup
.weakImport
= weakImport
; internal
.addAtom(*this); }
54 virtual const ld::File
* file() const { return NULL
; }
55 virtual bool translationUnitSource(const char** dir
, const char**) const
57 virtual const char* name() const { return _target
->name(); }
58 virtual uint64_t size() const { return 8; }
59 virtual uint64_t objectAddress() const { return 0; }
60 virtual void copyRawContent(uint8_t buffer
[]) const { }
61 virtual void setScope(Scope
) { }
62 virtual ld::Fixup::iterator
fixupsBegin() const { return &_fixup
; }
63 virtual ld::Fixup::iterator
fixupsEnd() const { return &((ld::Fixup
*)&_fixup
)[1]; }
66 mutable ld::Fixup _fixup
;
67 const ld::Atom
* _target
;
69 static ld::Section _s_section
;
72 ld::Section
TLVEntryAtom::_s_section("__DATA", "__thread_ptrs", ld::Section::typeTLVPointers
);
75 static bool optimizable(const Options
& opts
, const ld::Atom
* targetOfTLV
)
77 // cannot do LEA optimization if target is in another dylib
78 if ( targetOfTLV
->definition() == ld::Atom::definitionProxy
)
80 if ( targetOfTLV
->scope() == ld::Atom::scopeGlobal
) {
81 // cannot do LEA optimization if target is weak exported symbol
82 if ( (targetOfTLV
->definition() == ld::Atom::definitionRegular
) && (targetOfTLV
->combine() == ld::Atom::combineByName
) )
84 // cannot do LEA optimization if target is interposable
85 if ( opts
.interposable(targetOfTLV
->name()) )
87 // cannot do LEA optimization for flat-namespace
88 if ( opts
.nameSpace() != Options::kTwoLevelNameSpace
)
94 struct AtomByNameSorter
96 bool operator()(const ld::Atom
* left
, const ld::Atom
* right
)
98 return (strcmp(left
->name(), right
->name()) < 0);
102 struct TlVReferenceCluster
104 const ld::Atom
* targetOfTLV
;
105 ld::Fixup
* fixupWithTarget
;
106 ld::Fixup
* fixupWithTLVStore
;
110 void doPass(const Options
& opts
, ld::Internal
& internal
)
112 const bool log
= false;
114 // only make tlv section in final linked images
115 if ( opts
.outputKind() == Options::kObjectFile
)
118 // walk all atoms and fixups looking for TLV references and add them to list
119 std::vector
<TlVReferenceCluster
> references
;
120 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
=internal
.sections
.begin(); sit
!= internal
.sections
.end(); ++sit
) {
121 ld::Internal::FinalSection
* sect
= *sit
;
122 for (std::vector
<const ld::Atom
*>::iterator ait
=sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
123 const ld::Atom
* atom
= *ait
;
124 TlVReferenceCluster ref
;
125 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
126 if ( fit
->firstInCluster() ) {
127 ref
.targetOfTLV
= NULL
;
128 ref
.fixupWithTarget
= NULL
;
129 ref
.fixupWithTLVStore
= NULL
;
131 switch ( fit
->binding
) {
132 case ld::Fixup::bindingsIndirectlyBound
:
133 ref
.targetOfTLV
= internal
.indirectBindingTable
[fit
->u
.bindingIndex
];
134 ref
.fixupWithTarget
= fit
;
136 case ld::Fixup::bindingDirectlyBound
:
137 ref
.targetOfTLV
= fit
->u
.target
;
138 ref
.fixupWithTarget
= fit
;
143 switch ( fit
->kind
) {
144 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad
:
145 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad
:
146 case ld::Fixup::kindStoreX86PCRel32TLVLoad
:
147 case ld::Fixup::kindStoreX86Abs32TLVLoad
:
148 ref
.fixupWithTLVStore
= fit
;
153 if ( fit
->lastInCluster() && (ref
.fixupWithTLVStore
!= NULL
) ) {
154 ref
.optimizable
= optimizable(opts
, ref
.targetOfTLV
);
155 if (log
) fprintf(stderr
, "found reference to TLV at %s+0x%X to %s\n",
156 atom
->name(), ref
.fixupWithTLVStore
->offsetInAtom
, ref
.targetOfTLV
->name());
157 if ( ! opts
.canUseThreadLocalVariables() ) {
158 throwf("targeted OS version does not support use of thread local variables in %s", atom
->name());
160 references
.push_back(ref
);
166 // compute which TLV references will be weak_imports
167 std::map
<const ld::Atom
*,bool> weakImportMap
;
168 for(std::vector
<TlVReferenceCluster
>::iterator it
=references
.begin(); it
!= references
.end(); ++it
) {
169 if ( !it
->optimizable
) {
170 // record weak_import attribute
171 std::map
<const ld::Atom
*,bool>::iterator pos
= weakImportMap
.find(it
->targetOfTLV
);
172 if ( pos
== weakImportMap
.end() ) {
173 // target not in weakImportMap, so add
174 weakImportMap
[it
->targetOfTLV
] = it
->fixupWithTarget
->weakImport
;
175 // <rdar://problem/5529626> If only weak_import symbols are used, linker should use LD_LOAD_WEAK_DYLIB
176 const ld::dylib::File
* dylib
= dynamic_cast<const ld::dylib::File
*>(it
->targetOfTLV
->file());
177 if ( dylib
!= NULL
) {
178 if ( it
->fixupWithTarget
->weakImport
)
179 (const_cast<ld::dylib::File
*>(dylib
))->setUsingWeakImportedSymbols();
181 (const_cast<ld::dylib::File
*>(dylib
))->setUsingNonWeakImportedSymbols();
185 // target in weakImportMap, check for weakness mismatch
186 if ( pos
->second
!= it
->fixupWithTarget
->weakImport
) {
188 switch ( opts
.weakReferenceMismatchTreatment() ) {
189 case Options::kWeakReferenceMismatchError
:
190 throwf("mismatching weak references for symbol: %s", it
->targetOfTLV
->name());
191 case Options::kWeakReferenceMismatchWeak
:
194 case Options::kWeakReferenceMismatchNonWeak
:
203 // create TLV pointers for TLV references that cannot be optimized
204 std::map
<const ld::Atom
*,ld::Atom
*> variableToPointerMap
;
205 for(std::map
<const ld::Atom
*,bool>::iterator it
=weakImportMap
.begin(); it
!= weakImportMap
.end(); ++it
) {
206 std::map
<const ld::Atom
*,ld::Atom
*>::iterator pos
= variableToPointerMap
.find(it
->first
);
207 if ( pos
== variableToPointerMap
.end() ) {
208 if (log
) fprintf(stderr
, "make TLV pointer for %s\n", it
->first
->name());
209 if ( it
->first
->contentType() != ld::Atom::typeTLV
)
210 throwf("illegal thread local variable reference to regular symbol %s", it
->first
->name());
211 TLVEntryAtom
* tlvp
= new TLVEntryAtom(internal
, it
->first
, it
->second
);
212 variableToPointerMap
[it
->first
] = tlvp
;
216 // sort new tvlp atoms so links are consistent
217 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
=internal
.sections
.begin(); sit
!= internal
.sections
.end(); ++sit
) {
218 ld::Internal::FinalSection
* sect
= *sit
;
219 if ( sect
->type() == ld::Section::typeTLVPointers
) {
220 std::sort(sect
->atoms
.begin(), sect
->atoms
.end(), AtomByNameSorter());
224 // update references to use TLV pointers or TLV object directly
225 for(std::vector
<TlVReferenceCluster
>::iterator it
=references
.begin(); it
!= references
.end(); ++it
) {
226 if ( it
->optimizable
) {
227 // change store to be LEA instead load (mov)
228 if (log
) fprintf(stderr
, "optimizing load of TLV to %s into an LEA\n", it
->targetOfTLV
->name());
229 it
->fixupWithTLVStore
->binding
= ld::Fixup::bindingDirectlyBound
;
230 it
->fixupWithTLVStore
->u
.target
= it
->targetOfTLV
;
231 switch ( it
->fixupWithTLVStore
->kind
) {
232 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad
:
233 it
->fixupWithTLVStore
->kind
= ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA
;
235 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad
:
236 it
->fixupWithTLVStore
->kind
= ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoadNowLEA
;
238 case ld::Fixup::kindStoreX86PCRel32TLVLoad
:
239 it
->fixupWithTLVStore
->kind
= ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA
;
241 case ld::Fixup::kindStoreX86Abs32TLVLoad
:
242 it
->fixupWithTLVStore
->kind
= ld::Fixup::kindStoreX86Abs32TLVLoadNowLEA
;
245 assert(0 && "bad store kind for TLV optimization");
249 // change target to be new TLV pointer atom
250 if (log
) fprintf(stderr
, "updating load of TLV to %s to load from TLV pointer\n", it
->targetOfTLV
->name());
251 const ld::Atom
* tlvpAtom
= variableToPointerMap
[it
->targetOfTLV
];
252 assert(tlvpAtom
!= NULL
);
253 it
->fixupWithTarget
->binding
= ld::Fixup::bindingDirectlyBound
;
254 it
->fixupWithTarget
->u
.target
= tlvpAtom
;
260 // alter tlv definitions to have an offset as third field
261 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
=internal
.sections
.begin(); sit
!= internal
.sections
.end(); ++sit
) {
262 ld::Internal::FinalSection
* sect
= *sit
;
263 if ( sect
->type() != ld::Section::typeTLVDefs
)
265 for (std::vector
<const ld::Atom
*>::iterator ait
=sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
266 const ld::Atom
* atom
= *ait
;
267 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
268 if ( fit
->offsetInAtom
!= 0 ) {
269 assert(fit
->binding
== ld::Fixup::bindingDirectlyBound
&& "thread variable def contains pointer to global");
270 switch( fit
->u
.target
->contentType() ) {
271 case ld::Atom::typeTLVZeroFill
:
272 case ld::Atom::typeTLVInitialValue
:
273 switch ( fit
->kind
) {
274 case ld::Fixup::kindSetTargetAddress
:
275 fit
->kind
= ld::Fixup::kindSetTargetTLVTemplateOffset
;
277 case ld::Fixup::kindStoreTargetAddressLittleEndian32
:
278 fit
->kind
= ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian32
;
280 case ld::Fixup::kindStoreTargetAddressLittleEndian64
:
281 fit
->kind
= ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian64
;
284 assert(0 && "bad kind for target in tlv defs");
289 assert(0 && "wrong content type for target in tlv defs");
302 } // namespace passes