1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
3 * Copyright (c) 2010 Apple Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
33 #include <ext/hash_map>
42 class File
; // forward reference
44 class TLVEntryAtom
: public ld::Atom
{
46 TLVEntryAtom(ld::Internal
& internal
, const ld::Atom
* target
, bool weakImport
)
47 : ld::Atom(_s_section
, ld::Atom::definitionRegular
, ld::Atom::combineNever
,
48 ld::Atom::scopeLinkageUnit
, ld::Atom::typeNonLazyPointer
,
49 symbolTableNotIn
, false, false, false, ld::Atom::Alignment(3)),
50 _fixup(0, ld::Fixup::k1of1
, ld::Fixup::kindStoreTargetAddressLittleEndian64
, target
),
52 { _fixup
.weakImport
= weakImport
; internal
.addAtom(*this); }
54 virtual const ld::File
* file() const { return NULL
; }
55 virtual bool translationUnitSource(const char** dir
, const char**) const
57 virtual const char* name() const { return _target
->name(); }
58 virtual uint64_t size() const { return 8; }
59 virtual uint64_t objectAddress() const { return 0; }
60 virtual void copyRawContent(uint8_t buffer
[]) const { }
61 virtual void setScope(Scope
) { }
62 virtual ld::Fixup::iterator
fixupsBegin() const { return &_fixup
; }
63 virtual ld::Fixup::iterator
fixupsEnd() const { return &((ld::Fixup
*)&_fixup
)[1]; }
66 mutable ld::Fixup _fixup
;
67 const ld::Atom
* _target
;
69 static ld::Section _s_section
;
72 ld::Section
TLVEntryAtom::_s_section("__DATA", "__thread_ptrs", ld::Section::typeTLVPointers
);
75 static bool optimizable(const Options
& opts
, const ld::Atom
* targetOfTLV
)
77 // cannot do LEA optimization if target is in another dylib
78 if ( targetOfTLV
->definition() == ld::Atom::definitionProxy
)
80 if ( targetOfTLV
->scope() == ld::Atom::scopeGlobal
) {
81 // cannot do LEA optimization if target is weak exported symbol
82 if ( (targetOfTLV
->definition() == ld::Atom::definitionRegular
) && (targetOfTLV
->combine() == ld::Atom::combineByName
) )
84 // cannot do LEA optimization if target is interposable
85 if ( opts
.interposable(targetOfTLV
->name()) )
87 // cannot do LEA optimization for flat-namespace
88 if ( opts
.nameSpace() != Options::kTwoLevelNameSpace
)
94 struct AtomByNameSorter
96 bool operator()(const ld::Atom
* left
, const ld::Atom
* right
)
98 return (strcmp(left
->name(), right
->name()) < 0);
102 struct TlVReferenceCluster
104 const ld::Atom
* targetOfTLV
;
105 ld::Fixup
* fixupWithTarget
;
106 ld::Fixup
* fixupWithTLVStore
;
110 void doPass(const Options
& opts
, ld::Internal
& internal
)
112 const bool log
= false;
114 // only make tlv section in final linked images
115 if ( opts
.outputKind() == Options::kObjectFile
)
118 // walk all atoms and fixups looking for TLV references and add them to list
119 std::vector
<TlVReferenceCluster
> references
;
120 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
=internal
.sections
.begin(); sit
!= internal
.sections
.end(); ++sit
) {
121 ld::Internal::FinalSection
* sect
= *sit
;
122 for (std::vector
<const ld::Atom
*>::iterator ait
=sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
123 const ld::Atom
* atom
= *ait
;
124 TlVReferenceCluster ref
;
125 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
126 if ( fit
->firstInCluster() ) {
127 ref
.targetOfTLV
= NULL
;
128 ref
.fixupWithTarget
= NULL
;
129 ref
.fixupWithTLVStore
= NULL
;
131 switch ( fit
->binding
) {
132 case ld::Fixup::bindingsIndirectlyBound
:
133 ref
.targetOfTLV
= internal
.indirectBindingTable
[fit
->u
.bindingIndex
];
134 ref
.fixupWithTarget
= fit
;
136 case ld::Fixup::bindingDirectlyBound
:
137 ref
.targetOfTLV
= fit
->u
.target
;
138 ref
.fixupWithTarget
= fit
;
143 switch ( fit
->kind
) {
144 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad
:
145 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad
:
146 case ld::Fixup::kindStoreX86PCRel32TLVLoad
:
147 case ld::Fixup::kindStoreX86Abs32TLVLoad
:
148 ref
.fixupWithTLVStore
= fit
;
153 if ( fit
->lastInCluster() && (ref
.fixupWithTLVStore
!= NULL
) ) {
154 ref
.optimizable
= optimizable(opts
, ref
.targetOfTLV
);
155 if (log
) fprintf(stderr
, "found reference to TLV at %s+0x%X to %s\n",
156 atom
->name(), ref
.fixupWithTLVStore
->offsetInAtom
, ref
.targetOfTLV
->name());
157 if ( ! opts
.canUseThreadLocalVariables() ) {
158 throwf("targeted OS version does not support use of thread local variables in %s", atom
->name());
160 references
.push_back(ref
);
166 // compute which TLV references will be weak_imports
167 std::map
<const ld::Atom
*,bool> weakImportMap
;
168 for(std::vector
<TlVReferenceCluster
>::iterator it
=references
.begin(); it
!= references
.end(); ++it
) {
169 if ( !it
->optimizable
) {
170 // record weak_import attribute
171 std::map
<const ld::Atom
*,bool>::iterator pos
= weakImportMap
.find(it
->targetOfTLV
);
172 if ( pos
== weakImportMap
.end() ) {
173 // target not in weakImportMap, so add
174 weakImportMap
[it
->targetOfTLV
] = it
->fixupWithTarget
->weakImport
;
177 // target in weakImportMap, check for weakness mismatch
178 if ( pos
->second
!= it
->fixupWithTarget
->weakImport
) {
180 switch ( opts
.weakReferenceMismatchTreatment() ) {
181 case Options::kWeakReferenceMismatchError
:
182 throwf("mismatching weak references for symbol: %s", it
->targetOfTLV
->name());
183 case Options::kWeakReferenceMismatchWeak
:
186 case Options::kWeakReferenceMismatchNonWeak
:
195 // create TLV pointers for TLV references that cannot be optimized
196 std::map
<const ld::Atom
*,ld::Atom
*> variableToPointerMap
;
197 for(std::map
<const ld::Atom
*,bool>::iterator it
=weakImportMap
.begin(); it
!= weakImportMap
.end(); ++it
) {
198 std::map
<const ld::Atom
*,ld::Atom
*>::iterator pos
= variableToPointerMap
.find(it
->first
);
199 if ( pos
== variableToPointerMap
.end() ) {
200 if (log
) fprintf(stderr
, "make TLV pointer for %s\n", it
->first
->name());
201 if ( it
->first
->contentType() != ld::Atom::typeTLV
)
202 throwf("illegal thread local variable reference to regular symbol %s", it
->first
->name());
203 TLVEntryAtom
* tlvp
= new TLVEntryAtom(internal
, it
->first
, it
->second
);
204 variableToPointerMap
[it
->first
] = tlvp
;
208 // sort new tvlp atoms so links are consistent
209 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
=internal
.sections
.begin(); sit
!= internal
.sections
.end(); ++sit
) {
210 ld::Internal::FinalSection
* sect
= *sit
;
211 if ( sect
->type() == ld::Section::typeTLVPointers
) {
212 std::sort(sect
->atoms
.begin(), sect
->atoms
.end(), AtomByNameSorter());
216 // update references to use TLV pointers or TLV object directly
217 for(std::vector
<TlVReferenceCluster
>::iterator it
=references
.begin(); it
!= references
.end(); ++it
) {
218 if ( it
->optimizable
) {
219 // change store to be LEA instead load (mov)
220 if (log
) fprintf(stderr
, "optimizing load of TLV to %s into an LEA\n", it
->targetOfTLV
->name());
221 it
->fixupWithTLVStore
->binding
= ld::Fixup::bindingDirectlyBound
;
222 it
->fixupWithTLVStore
->u
.target
= it
->targetOfTLV
;
223 switch ( it
->fixupWithTLVStore
->kind
) {
224 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad
:
225 it
->fixupWithTLVStore
->kind
= ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA
;
227 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad
:
228 it
->fixupWithTLVStore
->kind
= ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoadNowLEA
;
230 case ld::Fixup::kindStoreX86PCRel32TLVLoad
:
231 it
->fixupWithTLVStore
->kind
= ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA
;
233 case ld::Fixup::kindStoreX86Abs32TLVLoad
:
234 it
->fixupWithTLVStore
->kind
= ld::Fixup::kindStoreX86Abs32TLVLoadNowLEA
;
237 assert(0 && "bad store kind for TLV optimization");
241 // change target to be new TLV pointer atom
242 if (log
) fprintf(stderr
, "updating load of TLV to %s to load from TLV pointer\n", it
->targetOfTLV
->name());
243 const ld::Atom
* tlvpAtom
= variableToPointerMap
[it
->targetOfTLV
];
244 assert(tlvpAtom
!= NULL
);
245 it
->fixupWithTarget
->binding
= ld::Fixup::bindingDirectlyBound
;
246 it
->fixupWithTarget
->u
.target
= tlvpAtom
;
252 // alter tlv definitions to have an offset as third field
253 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
=internal
.sections
.begin(); sit
!= internal
.sections
.end(); ++sit
) {
254 ld::Internal::FinalSection
* sect
= *sit
;
255 if ( sect
->type() != ld::Section::typeTLVDefs
)
257 for (std::vector
<const ld::Atom
*>::iterator ait
=sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
258 const ld::Atom
* atom
= *ait
;
259 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
260 if ( fit
->offsetInAtom
!= 0 ) {
261 assert(fit
->binding
== ld::Fixup::bindingDirectlyBound
&& "thread variable def contains pointer to global");
262 switch( fit
->u
.target
->contentType() ) {
263 case ld::Atom::typeTLVZeroFill
:
264 case ld::Atom::typeTLVInitialValue
:
265 switch ( fit
->kind
) {
266 case ld::Fixup::kindSetTargetAddress
:
267 fit
->kind
= ld::Fixup::kindSetTargetTLVTemplateOffset
;
269 case ld::Fixup::kindStoreTargetAddressLittleEndian32
:
270 fit
->kind
= ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian32
;
272 case ld::Fixup::kindStoreTargetAddressLittleEndian64
:
273 fit
->kind
= ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian64
;
276 assert(0 && "bad kind for target in tlv defs");
281 assert(0 && "wrong content type for target in tlv defs");
294 } // namespace passes