1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
3 * Copyright (c) 2010 Apple Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
33 #include <ext/hash_map>
42 class File
; // forward reference
44 class TLVEntryAtom
: public ld::Atom
{
46 TLVEntryAtom(ld::Internal
& internal
, const ld::Atom
* target
, bool weakImport
)
47 : ld::Atom(_s_section
, ld::Atom::definitionRegular
, ld::Atom::combineNever
,
48 ld::Atom::scopeLinkageUnit
, ld::Atom::typeNonLazyPointer
,
49 symbolTableNotIn
, false, false, false, ld::Atom::Alignment(3)),
50 _fixup(0, ld::Fixup::k1of1
, ld::Fixup::kindStoreTargetAddressLittleEndian64
, target
),
52 { _fixup
.weakImport
= weakImport
; internal
.addAtom(*this); }
54 virtual const ld::File
* file() const { return NULL
; }
55 virtual const char* name() const { return _target
->name(); }
56 virtual uint64_t size() const { return 8; }
57 virtual uint64_t objectAddress() const { return 0; }
58 virtual void copyRawContent(uint8_t buffer
[]) const { }
59 virtual void setScope(Scope
) { }
60 virtual ld::Fixup::iterator
fixupsBegin() const { return &_fixup
; }
61 virtual ld::Fixup::iterator
fixupsEnd() const { return &((ld::Fixup
*)&_fixup
)[1]; }
64 mutable ld::Fixup _fixup
;
65 const ld::Atom
* _target
;
67 static ld::Section _s_section
;
70 ld::Section
TLVEntryAtom::_s_section("__DATA", "__thread_ptrs", ld::Section::typeTLVPointers
);
73 static bool optimizable(const Options
& opts
, const ld::Atom
* targetOfTLV
)
75 // cannot do LEA optimization if target is in another dylib
76 if ( targetOfTLV
->definition() == ld::Atom::definitionProxy
)
78 if ( targetOfTLV
->scope() == ld::Atom::scopeGlobal
) {
79 // cannot do LEA optimization if target is weak exported symbol
80 if ( (targetOfTLV
->definition() == ld::Atom::definitionRegular
) && (targetOfTLV
->combine() == ld::Atom::combineByName
) )
82 // cannot do LEA optimization if target is interposable
83 if ( opts
.interposable(targetOfTLV
->name()) )
85 // cannot do LEA optimization for flat-namespace
86 if ( opts
.nameSpace() != Options::kTwoLevelNameSpace
)
92 struct AtomByNameSorter
94 bool operator()(const ld::Atom
* left
, const ld::Atom
* right
)
96 return (strcmp(left
->name(), right
->name()) < 0);
100 struct TlVReferenceCluster
102 const ld::Atom
* targetOfTLV
;
103 ld::Fixup
* fixupWithTarget
;
104 ld::Fixup
* fixupWithTLVStore
;
108 void doPass(const Options
& opts
, ld::Internal
& internal
)
110 const bool log
= false;
112 // only make tlv section in final linked images
113 if ( opts
.outputKind() == Options::kObjectFile
)
116 // walk all atoms and fixups looking for TLV references and add them to list
117 std::vector
<TlVReferenceCluster
> references
;
118 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
=internal
.sections
.begin(); sit
!= internal
.sections
.end(); ++sit
) {
119 ld::Internal::FinalSection
* sect
= *sit
;
120 for (std::vector
<const ld::Atom
*>::iterator ait
=sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
121 const ld::Atom
* atom
= *ait
;
122 TlVReferenceCluster ref
;
123 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
124 if ( fit
->firstInCluster() ) {
125 ref
.targetOfTLV
= NULL
;
126 ref
.fixupWithTarget
= NULL
;
127 ref
.fixupWithTLVStore
= NULL
;
129 switch ( fit
->binding
) {
130 case ld::Fixup::bindingsIndirectlyBound
:
131 ref
.targetOfTLV
= internal
.indirectBindingTable
[fit
->u
.bindingIndex
];
132 ref
.fixupWithTarget
= fit
;
134 case ld::Fixup::bindingDirectlyBound
:
135 ref
.targetOfTLV
= fit
->u
.target
;
136 ref
.fixupWithTarget
= fit
;
141 switch ( fit
->kind
) {
142 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad
:
143 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad
:
144 case ld::Fixup::kindStoreX86PCRel32TLVLoad
:
145 case ld::Fixup::kindStoreX86Abs32TLVLoad
:
146 ref
.fixupWithTLVStore
= fit
;
151 if ( fit
->lastInCluster() && (ref
.fixupWithTLVStore
!= NULL
) ) {
152 ref
.optimizable
= optimizable(opts
, ref
.targetOfTLV
);
153 if (log
) fprintf(stderr
, "found reference to TLV at %s+0x%X to %s\n",
154 atom
->name(), ref
.fixupWithTLVStore
->offsetInAtom
, ref
.targetOfTLV
->name());
155 if ( ! opts
.canUseThreadLocalVariables() ) {
156 throwf("targeted OS version does not support use of thread local variables in %s", atom
->name());
158 references
.push_back(ref
);
164 // compute which TLV references will be weak_imports
165 std::map
<const ld::Atom
*,bool> weakImportMap
;
166 for(std::vector
<TlVReferenceCluster
>::iterator it
=references
.begin(); it
!= references
.end(); ++it
) {
167 if ( !it
->optimizable
) {
168 // record weak_import attribute
169 std::map
<const ld::Atom
*,bool>::iterator pos
= weakImportMap
.find(it
->targetOfTLV
);
170 if ( pos
== weakImportMap
.end() ) {
171 // target not in weakImportMap, so add
172 weakImportMap
[it
->targetOfTLV
] = it
->fixupWithTarget
->weakImport
;
175 // target in weakImportMap, check for weakness mismatch
176 if ( pos
->second
!= it
->fixupWithTarget
->weakImport
) {
178 switch ( opts
.weakReferenceMismatchTreatment() ) {
179 case Options::kWeakReferenceMismatchError
:
180 throwf("mismatching weak references for symbol: %s", it
->targetOfTLV
->name());
181 case Options::kWeakReferenceMismatchWeak
:
184 case Options::kWeakReferenceMismatchNonWeak
:
193 // create TLV pointers for TLV references that cannot be optimized
194 std::map
<const ld::Atom
*,ld::Atom
*> variableToPointerMap
;
195 for(std::map
<const ld::Atom
*,bool>::iterator it
=weakImportMap
.begin(); it
!= weakImportMap
.end(); ++it
) {
196 std::map
<const ld::Atom
*,ld::Atom
*>::iterator pos
= variableToPointerMap
.find(it
->first
);
197 if ( pos
== variableToPointerMap
.end() ) {
198 if (log
) fprintf(stderr
, "make TLV pointer for %s\n", it
->first
->name());
199 if ( it
->first
->contentType() != ld::Atom::typeTLV
)
200 throwf("illegal thread local variable reference to regular symbol %s", it
->first
->name());
201 TLVEntryAtom
* tlvp
= new TLVEntryAtom(internal
, it
->first
, it
->second
);
202 variableToPointerMap
[it
->first
] = tlvp
;
206 // sort new tvlp atoms so links are consistent
207 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
=internal
.sections
.begin(); sit
!= internal
.sections
.end(); ++sit
) {
208 ld::Internal::FinalSection
* sect
= *sit
;
209 if ( sect
->type() == ld::Section::typeTLVPointers
) {
210 std::sort(sect
->atoms
.begin(), sect
->atoms
.end(), AtomByNameSorter());
214 // update references to use TLV pointers or TLV object directly
215 for(std::vector
<TlVReferenceCluster
>::iterator it
=references
.begin(); it
!= references
.end(); ++it
) {
216 if ( it
->optimizable
) {
217 // change store to be LEA instead load (mov)
218 if (log
) fprintf(stderr
, "optimizing load of TLV to %s into an LEA\n", it
->targetOfTLV
->name());
219 it
->fixupWithTLVStore
->binding
= ld::Fixup::bindingDirectlyBound
;
220 it
->fixupWithTLVStore
->u
.target
= it
->targetOfTLV
;
221 switch ( it
->fixupWithTLVStore
->kind
) {
222 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad
:
223 it
->fixupWithTLVStore
->kind
= ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA
;
225 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad
:
226 it
->fixupWithTLVStore
->kind
= ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoadNowLEA
;
228 case ld::Fixup::kindStoreX86PCRel32TLVLoad
:
229 it
->fixupWithTLVStore
->kind
= ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA
;
231 case ld::Fixup::kindStoreX86Abs32TLVLoad
:
232 it
->fixupWithTLVStore
->kind
= ld::Fixup::kindStoreX86Abs32TLVLoadNowLEA
;
235 assert(0 && "bad store kind for TLV optimization");
239 // change target to be new TLV pointer atom
240 if (log
) fprintf(stderr
, "updating load of TLV to %s to load from TLV pointer\n", it
->targetOfTLV
->name());
241 const ld::Atom
* tlvpAtom
= variableToPointerMap
[it
->targetOfTLV
];
242 assert(tlvpAtom
!= NULL
);
243 it
->fixupWithTarget
->binding
= ld::Fixup::bindingDirectlyBound
;
244 it
->fixupWithTarget
->u
.target
= tlvpAtom
;
250 // alter tlv definitions to have an offset as third field
251 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
=internal
.sections
.begin(); sit
!= internal
.sections
.end(); ++sit
) {
252 ld::Internal::FinalSection
* sect
= *sit
;
253 if ( sect
->type() != ld::Section::typeTLVDefs
)
255 for (std::vector
<const ld::Atom
*>::iterator ait
=sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
256 const ld::Atom
* atom
= *ait
;
257 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
258 if ( fit
->offsetInAtom
!= 0 ) {
259 assert(fit
->binding
== ld::Fixup::bindingDirectlyBound
&& "thread variable def contains pointer to global");
260 switch( fit
->u
.target
->contentType() ) {
261 case ld::Atom::typeTLVZeroFill
:
262 case ld::Atom::typeTLVInitialValue
:
263 switch ( fit
->kind
) {
264 case ld::Fixup::kindSetTargetAddress
:
265 fit
->kind
= ld::Fixup::kindSetTargetTLVTemplateOffset
;
267 case ld::Fixup::kindStoreTargetAddressLittleEndian32
:
268 fit
->kind
= ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian32
;
270 case ld::Fixup::kindStoreTargetAddressLittleEndian64
:
271 fit
->kind
= ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian64
;
274 assert(0 && "bad kind for target in tlv defs");
279 assert(0 && "wrong content type for target in tlv defs");
292 } // namespace passes