1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
3 * Copyright (c) 2010 Apple Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
41 class File
; // forward reference
43 class TLVEntryAtom
: public ld::Atom
{
45 TLVEntryAtom(ld::Internal
& internal
, const ld::Atom
* target
, bool weakImport
)
46 : ld::Atom(_s_section
, ld::Atom::definitionRegular
, ld::Atom::combineNever
,
47 ld::Atom::scopeLinkageUnit
, ld::Atom::typeNonLazyPointer
,
48 symbolTableNotIn
, false, false, false, ld::Atom::Alignment(3)),
49 _fixup(0, ld::Fixup::k1of1
, ld::Fixup::kindStoreTargetAddressLittleEndian64
, target
),
51 { _fixup
.weakImport
= weakImport
; internal
.addAtom(*this); }
53 virtual const ld::File
* file() const { return NULL
; }
54 virtual const char* name() const { return _target
->name(); }
55 virtual uint64_t size() const { return 8; }
56 virtual uint64_t objectAddress() const { return 0; }
57 virtual void copyRawContent(uint8_t buffer
[]) const { }
58 virtual void setScope(Scope
) { }
59 virtual ld::Fixup::iterator
fixupsBegin() const { return &_fixup
; }
60 virtual ld::Fixup::iterator
fixupsEnd() const { return &((ld::Fixup
*)&_fixup
)[1]; }
63 mutable ld::Fixup _fixup
;
64 const ld::Atom
* _target
;
66 static ld::Section _s_section
;
69 ld::Section
TLVEntryAtom::_s_section("__DATA", "__thread_ptrs", ld::Section::typeTLVPointers
);
72 static bool optimizable(const Options
& opts
, const ld::Atom
* targetOfTLV
)
74 // cannot do LEA optimization if target is in another dylib
75 if ( targetOfTLV
->definition() == ld::Atom::definitionProxy
)
77 if ( targetOfTLV
->scope() == ld::Atom::scopeGlobal
) {
78 // cannot do LEA optimization if target is weak exported symbol
79 if ( (targetOfTLV
->definition() == ld::Atom::definitionRegular
) && (targetOfTLV
->combine() == ld::Atom::combineByName
) )
81 // cannot do LEA optimization if target is interposable
82 if ( opts
.interposable(targetOfTLV
->name()) )
84 // cannot do LEA optimization for flat-namespace
85 if ( opts
.nameSpace() != Options::kTwoLevelNameSpace
)
91 struct AtomByNameSorter
93 bool operator()(const ld::Atom
* left
, const ld::Atom
* right
)
95 return (strcmp(left
->name(), right
->name()) < 0);
99 struct TlVReferenceCluster
101 const ld::Atom
* targetOfTLV
;
102 ld::Fixup
* fixupWithTarget
;
103 ld::Fixup
* fixupWithTLVStore
;
107 void doPass(const Options
& opts
, ld::Internal
& internal
)
109 const bool log
= false;
111 // only make tlv section in final linked images
112 if ( opts
.outputKind() == Options::kObjectFile
)
115 // walk all atoms and fixups looking for TLV references and add them to list
116 std::vector
<TlVReferenceCluster
> references
;
117 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
=internal
.sections
.begin(); sit
!= internal
.sections
.end(); ++sit
) {
118 ld::Internal::FinalSection
* sect
= *sit
;
119 for (std::vector
<const ld::Atom
*>::iterator ait
=sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
120 const ld::Atom
* atom
= *ait
;
121 TlVReferenceCluster ref
;
122 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
123 if ( fit
->firstInCluster() ) {
124 ref
.targetOfTLV
= NULL
;
125 ref
.fixupWithTarget
= NULL
;
126 ref
.fixupWithTLVStore
= NULL
;
128 switch ( fit
->binding
) {
129 case ld::Fixup::bindingsIndirectlyBound
:
130 ref
.targetOfTLV
= internal
.indirectBindingTable
[fit
->u
.bindingIndex
];
131 ref
.fixupWithTarget
= fit
;
133 case ld::Fixup::bindingDirectlyBound
:
134 ref
.targetOfTLV
= fit
->u
.target
;
135 ref
.fixupWithTarget
= fit
;
140 switch ( fit
->kind
) {
141 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad
:
142 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad
:
143 case ld::Fixup::kindStoreX86PCRel32TLVLoad
:
144 case ld::Fixup::kindStoreX86Abs32TLVLoad
:
145 ref
.fixupWithTLVStore
= fit
;
150 if ( fit
->lastInCluster() && (ref
.fixupWithTLVStore
!= NULL
) ) {
151 ref
.optimizable
= optimizable(opts
, ref
.targetOfTLV
);
152 if (log
) fprintf(stderr
, "found reference to TLV at %s+0x%X to %s\n",
153 atom
->name(), ref
.fixupWithTLVStore
->offsetInAtom
, ref
.targetOfTLV
->name());
154 if ( ! opts
.canUseThreadLocalVariables() ) {
155 throwf("targeted OS version does not support use of thread local variables in %s", atom
->name());
157 references
.push_back(ref
);
163 // compute which TLV references will be weak_imports
164 std::map
<const ld::Atom
*,bool> weakImportMap
;
165 for(std::vector
<TlVReferenceCluster
>::iterator it
=references
.begin(); it
!= references
.end(); ++it
) {
166 if ( !it
->optimizable
) {
167 // record weak_import attribute
168 std::map
<const ld::Atom
*,bool>::iterator pos
= weakImportMap
.find(it
->targetOfTLV
);
169 if ( pos
== weakImportMap
.end() ) {
170 // target not in weakImportMap, so add
171 weakImportMap
[it
->targetOfTLV
] = it
->fixupWithTarget
->weakImport
;
174 // target in weakImportMap, check for weakness mismatch
175 if ( pos
->second
!= it
->fixupWithTarget
->weakImport
) {
177 switch ( opts
.weakReferenceMismatchTreatment() ) {
178 case Options::kWeakReferenceMismatchError
:
179 throwf("mismatching weak references for symbol: %s", it
->targetOfTLV
->name());
180 case Options::kWeakReferenceMismatchWeak
:
183 case Options::kWeakReferenceMismatchNonWeak
:
192 // create TLV pointers for TLV references that cannot be optimized
193 std::map
<const ld::Atom
*,ld::Atom
*> variableToPointerMap
;
194 for(std::map
<const ld::Atom
*,bool>::iterator it
=weakImportMap
.begin(); it
!= weakImportMap
.end(); ++it
) {
195 std::map
<const ld::Atom
*,ld::Atom
*>::iterator pos
= variableToPointerMap
.find(it
->first
);
196 if ( pos
== variableToPointerMap
.end() ) {
197 if (log
) fprintf(stderr
, "make TLV pointer for %s\n", it
->first
->name());
198 if ( it
->first
->contentType() != ld::Atom::typeTLV
)
199 throwf("illegal thread local variable reference to regular symbol %s", it
->first
->name());
200 TLVEntryAtom
* tlvp
= new TLVEntryAtom(internal
, it
->first
, it
->second
);
201 variableToPointerMap
[it
->first
] = tlvp
;
205 // sort new tvlp atoms so links are consistent
206 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
=internal
.sections
.begin(); sit
!= internal
.sections
.end(); ++sit
) {
207 ld::Internal::FinalSection
* sect
= *sit
;
208 if ( sect
->type() == ld::Section::typeTLVPointers
) {
209 std::sort(sect
->atoms
.begin(), sect
->atoms
.end(), AtomByNameSorter());
213 // update references to use TLV pointers or TLV object directly
214 for(std::vector
<TlVReferenceCluster
>::iterator it
=references
.begin(); it
!= references
.end(); ++it
) {
215 if ( it
->optimizable
) {
216 // change store to be LEA instead load (mov)
217 if (log
) fprintf(stderr
, "optimizing load of TLV to %s into an LEA\n", it
->targetOfTLV
->name());
218 it
->fixupWithTLVStore
->binding
= ld::Fixup::bindingDirectlyBound
;
219 it
->fixupWithTLVStore
->u
.target
= it
->targetOfTLV
;
220 switch ( it
->fixupWithTLVStore
->kind
) {
221 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad
:
222 it
->fixupWithTLVStore
->kind
= ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA
;
224 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad
:
225 it
->fixupWithTLVStore
->kind
= ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoadNowLEA
;
227 case ld::Fixup::kindStoreX86PCRel32TLVLoad
:
228 it
->fixupWithTLVStore
->kind
= ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA
;
230 case ld::Fixup::kindStoreX86Abs32TLVLoad
:
231 it
->fixupWithTLVStore
->kind
= ld::Fixup::kindStoreX86Abs32TLVLoadNowLEA
;
234 assert(0 && "bad store kind for TLV optimization");
238 // change target to be new TLV pointer atom
239 if (log
) fprintf(stderr
, "updating load of TLV to %s to load from TLV pointer\n", it
->targetOfTLV
->name());
240 const ld::Atom
* tlvpAtom
= variableToPointerMap
[it
->targetOfTLV
];
241 assert(tlvpAtom
!= NULL
);
242 it
->fixupWithTarget
->binding
= ld::Fixup::bindingDirectlyBound
;
243 it
->fixupWithTarget
->u
.target
= tlvpAtom
;
249 // alter tlv definitions to have an offset as third field
250 for (std::vector
<ld::Internal::FinalSection
*>::iterator sit
=internal
.sections
.begin(); sit
!= internal
.sections
.end(); ++sit
) {
251 ld::Internal::FinalSection
* sect
= *sit
;
252 if ( sect
->type() != ld::Section::typeTLVDefs
)
254 for (std::vector
<const ld::Atom
*>::iterator ait
=sect
->atoms
.begin(); ait
!= sect
->atoms
.end(); ++ait
) {
255 const ld::Atom
* atom
= *ait
;
256 for (ld::Fixup::iterator fit
= atom
->fixupsBegin(), end
=atom
->fixupsEnd(); fit
!= end
; ++fit
) {
257 if ( fit
->offsetInAtom
!= 0 ) {
258 assert(fit
->binding
== ld::Fixup::bindingDirectlyBound
&& "thread variable def contains pointer to global");
259 switch( fit
->u
.target
->contentType() ) {
260 case ld::Atom::typeTLVZeroFill
:
261 case ld::Atom::typeTLVInitialValue
:
262 switch ( fit
->kind
) {
263 case ld::Fixup::kindSetTargetAddress
:
264 fit
->kind
= ld::Fixup::kindSetTargetTLVTemplateOffset
;
266 case ld::Fixup::kindStoreTargetAddressLittleEndian32
:
267 fit
->kind
= ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian32
;
269 case ld::Fixup::kindStoreTargetAddressLittleEndian64
:
270 fit
->kind
= ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian64
;
273 assert(0 && "bad kind for target in tlv defs");
278 assert(0 && "wrong content type for target in tlv defs");
291 } // namespace passes