1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
3 * Copyright (c) 2010 Apple Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
41 class File
; // forward reference
43 class TLVEntryAtom
: public ld::Atom
{
45 TLVEntryAtom(ld::Internal
& internal
, const ld::Atom
* target
, bool weakImport
)
46 : ld::Atom(_s_section
, ld::Atom::definitionRegular
, ld::Atom::combineNever
,
47 ld::Atom::scopeLinkageUnit
, ld::Atom::typeNonLazyPointer
,
48 symbolTableNotIn
, false, false, false, ld::Atom::Alignment(3)),
49 _fixup(0, ld::Fixup::k1of1
, ld::Fixup::kindStoreTargetAddressLittleEndian64
, target
),
51 { _fixup
.weakImport
= weakImport
; internal
.addAtom(*this); }
53 virtual const ld::File
* file() const { return NULL
; }
54 virtual const char* name() const { return _target
->name(); }
55 virtual uint64_t size() const { return 8; }
56 virtual uint64_t objectAddress() const { return 0; }
57 virtual void copyRawContent(uint8_t buffer
[]) const { }
58 virtual void setScope(Scope
) { }
59 virtual ld::Fixup::iterator
fixupsBegin() const { return &_fixup
; }
60 virtual ld::Fixup::iterator
fixupsEnd() const { return &((ld::Fixup
*)&_fixup
)[1]; }
63 mutable ld::Fixup _fixup
;
64 const ld::Atom
* _target
;
66 static ld::Section _s_section
;
69 ld::Section
TLVEntryAtom::_s_section("__DATA", "__thread_ptrs", ld::Section::typeTLVPointers
);
72 static bool optimizable(const Options
& opts
, const ld::Atom
* targetOfTLV
)
74 // cannot do LEA optimization if target is in another dylib
75 if ( targetOfTLV
->definition() == ld::Atom::definitionProxy
)
77 if ( targetOfTLV
->scope() == ld::Atom::scopeGlobal
) {
78 // cannot do LEA optimization if target is weak exported symbol
79 if ( (targetOfTLV
->definition() == ld::Atom::definitionRegular
) && (targetOfTLV
->combine() == ld::Atom::combineByName
) )
81 // cannot do LEA optimization if target is interposable
82 if ( opts
.interposable(targetOfTLV
->name()) )
84 // cannot do LEA optimization for flat-namespace
85 if ( opts
.nameSpace() != Options::kTwoLevelNameSpace
)
91 struct AtomByNameSorter
93 bool operator()(const ld::Atom
* left
, const ld::Atom
* right
)
95 return (strcmp(left
->name(), right
->name()) < 0);
99 struct TlVReferenceCluster
101 const ld::Atom
* targetOfTLV
;
102 ld::Fixup
* fixupWithTarget
;
103 ld::Fixup
* fixupWithTLVStore
;
107 void doPass(const Options
& opts
, ld::Internal
& internal
)
109 const bool log
= false;
111 // only make tlv section in final linked images
112 if ( opts
.outputKind() == Options::kObjectFile
)
115 // walk all atoms and fixups looking for TLV references and add them to list
116 std::vector
<TlVReferenceCluster
> references
;
117 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
=internal
.sections
.begin(); sit
!= internal
.sections
.end(); ++sit
) {
118 ld::Internal::FinalSection
* sect
= *sit
;
119 for (std::vector
<const ld::Atom
*>::iterator ait
=sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
120 const ld::Atom
* atom
= *ait
;
121 TlVReferenceCluster ref
;
122 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
123 if ( fit
->firstInCluster() ) {
124 ref
.targetOfTLV
= NULL
;
125 ref
.fixupWithTarget
= NULL
;
126 ref
.fixupWithTLVStore
= NULL
;
128 switch ( fit
->binding
) {
129 case ld::Fixup::bindingsIndirectlyBound
:
130 ref
.targetOfTLV
= internal
.indirectBindingTable
[fit
->u
.bindingIndex
];
131 ref
.fixupWithTarget
= fit
;
133 case ld::Fixup::bindingDirectlyBound
:
134 ref
.targetOfTLV
= fit
->u
.target
;
135 ref
.fixupWithTarget
= fit
;
140 switch ( fit
->kind
) {
141 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad
:
142 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad
:
143 case ld::Fixup::kindStoreX86PCRel32TLVLoad
:
144 case ld::Fixup::kindStoreX86Abs32TLVLoad
:
145 #if SUPPORT_ARCH_arm64
146 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21
:
147 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12
:
149 ref
.fixupWithTLVStore
= fit
;
154 if ( fit
->lastInCluster() && (ref
.fixupWithTLVStore
!= NULL
) ) {
155 ref
.optimizable
= optimizable(opts
, ref
.targetOfTLV
);
156 if (log
) fprintf(stderr
, "found reference to TLV at %s+0x%X to %s\n",
157 atom
->name(), ref
.fixupWithTLVStore
->offsetInAtom
, ref
.targetOfTLV
->name());
158 if ( ! opts
.canUseThreadLocalVariables() ) {
159 throwf("targeted OS version does not support use of thread local variables in %s", atom
->name());
161 references
.push_back(ref
);
167 // compute which TLV references will be weak_imports
168 std::map
<const ld::Atom
*,bool> weakImportMap
;
169 for(std::vector
<TlVReferenceCluster
>::iterator it
=references
.begin(); it
!= references
.end(); ++it
) {
170 if ( !it
->optimizable
) {
171 // record weak_import attribute
172 std::map
<const ld::Atom
*,bool>::iterator pos
= weakImportMap
.find(it
->targetOfTLV
);
173 if ( pos
== weakImportMap
.end() ) {
174 // target not in weakImportMap, so add
175 weakImportMap
[it
->targetOfTLV
] = it
->fixupWithTarget
->weakImport
;
178 // target in weakImportMap, check for weakness mismatch
179 if ( pos
->second
!= it
->fixupWithTarget
->weakImport
) {
181 switch ( opts
.weakReferenceMismatchTreatment() ) {
182 case Options::kWeakReferenceMismatchError
:
183 throwf("mismatching weak references for symbol: %s", it
->targetOfTLV
->name());
184 case Options::kWeakReferenceMismatchWeak
:
187 case Options::kWeakReferenceMismatchNonWeak
:
196 // create TLV pointers for TLV references that cannot be optimized
197 std::map
<const ld::Atom
*,ld::Atom
*> variableToPointerMap
;
198 for(std::map
<const ld::Atom
*,bool>::iterator it
=weakImportMap
.begin(); it
!= weakImportMap
.end(); ++it
) {
199 std::map
<const ld::Atom
*,ld::Atom
*>::iterator pos
= variableToPointerMap
.find(it
->first
);
200 if ( pos
== variableToPointerMap
.end() ) {
201 if (log
) fprintf(stderr
, "make TLV pointer for %s\n", it
->first
->name());
202 if ( it
->first
->contentType() != ld::Atom::typeTLV
)
203 throwf("illegal thread local variable reference to regular symbol %s", it
->first
->name());
204 TLVEntryAtom
* tlvp
= new TLVEntryAtom(internal
, it
->first
, it
->second
);
205 variableToPointerMap
[it
->first
] = tlvp
;
209 // sort new tvlp atoms so links are consistent
210 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
=internal
.sections
.begin(); sit
!= internal
.sections
.end(); ++sit
) {
211 ld::Internal::FinalSection
* sect
= *sit
;
212 if ( sect
->type() == ld::Section::typeTLVPointers
) {
213 std::sort(sect
->atoms
.begin(), sect
->atoms
.end(), AtomByNameSorter());
217 // update references to use TLV pointers or TLV object directly
218 for(std::vector
<TlVReferenceCluster
>::iterator it
=references
.begin(); it
!= references
.end(); ++it
) {
219 if ( it
->optimizable
) {
220 // change store to be LEA instead load (mov)
221 if (log
) fprintf(stderr
, "optimizing load of TLV to %s into an LEA\n", it
->targetOfTLV
->name());
222 it
->fixupWithTLVStore
->binding
= ld::Fixup::bindingDirectlyBound
;
223 it
->fixupWithTLVStore
->u
.target
= it
->targetOfTLV
;
224 switch ( it
->fixupWithTLVStore
->kind
) {
225 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad
:
226 it
->fixupWithTLVStore
->kind
= ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA
;
228 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad
:
229 it
->fixupWithTLVStore
->kind
= ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoadNowLEA
;
231 case ld::Fixup::kindStoreX86PCRel32TLVLoad
:
232 it
->fixupWithTLVStore
->kind
= ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA
;
234 case ld::Fixup::kindStoreX86Abs32TLVLoad
:
235 it
->fixupWithTLVStore
->kind
= ld::Fixup::kindStoreX86Abs32TLVLoadNowLEA
;
237 #if SUPPORT_ARCH_arm64
238 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21
:
239 it
->fixupWithTLVStore
->kind
= ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21
;
241 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12
:
242 it
->fixupWithTLVStore
->kind
= ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12
;
246 assert(0 && "bad store kind for TLV optimization");
250 // change target to be new TLV pointer atom
251 if (log
) fprintf(stderr
, "updating load of TLV to %s to load from TLV pointer\n", it
->targetOfTLV
->name());
252 const ld::Atom
* tlvpAtom
= variableToPointerMap
[it
->targetOfTLV
];
253 assert(tlvpAtom
!= NULL
);
254 it
->fixupWithTarget
->binding
= ld::Fixup::bindingDirectlyBound
;
255 it
->fixupWithTarget
->u
.target
= tlvpAtom
;
261 // alter tlv definitions to have an offset as third field
262 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
=internal
.sections
.begin(); sit
!= internal
.sections
.end(); ++sit
) {
263 ld::Internal::FinalSection
* sect
= *sit
;
264 if ( sect
->type() != ld::Section::typeTLVDefs
)
266 for (std::vector
<const ld::Atom
*>::iterator ait
=sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
267 const ld::Atom
* atom
= *ait
;
268 if ( ! opts
.canUseThreadLocalVariables() ) {
269 throwf("targeted OS version does not support use of thread local variables in %s", atom
->name());
271 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
272 if ( fit
->offsetInAtom
!= 0 ) {
273 assert(fit
->binding
== ld::Fixup::bindingDirectlyBound
&& "thread variable def contains pointer to global");
274 switch( fit
->u
.target
->contentType() ) {
275 case ld::Atom::typeTLVZeroFill
:
276 case ld::Atom::typeTLVInitialValue
:
277 switch ( fit
->kind
) {
278 case ld::Fixup::kindSetTargetAddress
:
279 fit
->kind
= ld::Fixup::kindSetTargetTLVTemplateOffset
;
281 case ld::Fixup::kindStoreTargetAddressLittleEndian32
:
282 fit
->kind
= ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian32
;
284 case ld::Fixup::kindStoreTargetAddressLittleEndian64
:
285 fit
->kind
= ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian64
;
288 assert(0 && "bad kind for target in tlv defs");
293 assert(0 && "wrong content type for target in tlv defs");
306 } // namespace passes