]> git.saurik.com Git - apple/ld64.git/blame - src/ld/passes/tlvp.cpp
ld64-136.tar.gz
[apple/ld64.git] / src / ld / passes / tlvp.cpp
CommitLineData
a645023d
A
1/* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
2 *
3 * Copyright (c) 2010 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26#include <stdint.h>
27#include <math.h>
28#include <unistd.h>
29#include <dlfcn.h>
30
31#include <vector>
32#include <map>
a645023d
A
33
34#include "ld.hpp"
35#include "tlvp.h"
36
37namespace ld {
38namespace passes {
39namespace tlvp {
40
41class File; // forward reference
42
43class TLVEntryAtom : public ld::Atom {
44public:
45 TLVEntryAtom(ld::Internal& internal, const ld::Atom* target, bool weakImport)
46 : ld::Atom(_s_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
47 ld::Atom::scopeLinkageUnit, ld::Atom::typeNonLazyPointer,
48 symbolTableNotIn, false, false, false, ld::Atom::Alignment(3)),
49 _fixup(0, ld::Fixup::k1of1, ld::Fixup::kindStoreTargetAddressLittleEndian64, target),
50 _target(target)
51 { _fixup.weakImport = weakImport; internal.addAtom(*this); }
52
53 virtual const ld::File* file() const { return NULL; }
a645023d
A
54 virtual const char* name() const { return _target->name(); }
55 virtual uint64_t size() const { return 8; }
56 virtual uint64_t objectAddress() const { return 0; }
57 virtual void copyRawContent(uint8_t buffer[]) const { }
58 virtual void setScope(Scope) { }
59 virtual ld::Fixup::iterator fixupsBegin() const { return &_fixup; }
60 virtual ld::Fixup::iterator fixupsEnd() const { return &((ld::Fixup*)&_fixup)[1]; }
61
62private:
63 mutable ld::Fixup _fixup;
64 const ld::Atom* _target;
65
66 static ld::Section _s_section;
67};
68
69ld::Section TLVEntryAtom::_s_section("__DATA", "__thread_ptrs", ld::Section::typeTLVPointers);
70
71
72static bool optimizable(const Options& opts, const ld::Atom* targetOfTLV)
73{
74 // cannot do LEA optimization if target is in another dylib
75 if ( targetOfTLV->definition() == ld::Atom::definitionProxy )
76 return false;
77 if ( targetOfTLV->scope() == ld::Atom::scopeGlobal ) {
78 // cannot do LEA optimization if target is weak exported symbol
79 if ( (targetOfTLV->definition() == ld::Atom::definitionRegular) && (targetOfTLV->combine() == ld::Atom::combineByName) )
80 return false;
81 // cannot do LEA optimization if target is interposable
82 if ( opts.interposable(targetOfTLV->name()) )
83 return false;
84 // cannot do LEA optimization for flat-namespace
85 if ( opts.nameSpace() != Options::kTwoLevelNameSpace )
86 return false;
87 }
88 return true;
89}
90
91struct AtomByNameSorter
92{
93 bool operator()(const ld::Atom* left, const ld::Atom* right)
94 {
95 return (strcmp(left->name(), right->name()) < 0);
96 }
97};
98
99struct TlVReferenceCluster
100{
101 const ld::Atom* targetOfTLV;
102 ld::Fixup* fixupWithTarget;
103 ld::Fixup* fixupWithTLVStore;
104 bool optimizable;
105};
106
107void doPass(const Options& opts, ld::Internal& internal)
108{
109 const bool log = false;
110
111 // only make tlv section in final linked images
112 if ( opts.outputKind() == Options::kObjectFile )
113 return;
114
115 // walk all atoms and fixups looking for TLV references and add them to list
116 std::vector<TlVReferenceCluster> references;
117 for (std::vector<ld::Internal::FinalSection*>::iterator sit=internal.sections.begin(); sit != internal.sections.end(); ++sit) {
118 ld::Internal::FinalSection* sect = *sit;
119 for (std::vector<const ld::Atom*>::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
120 const ld::Atom* atom = *ait;
121 TlVReferenceCluster ref;
122 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
123 if ( fit->firstInCluster() ) {
124 ref.targetOfTLV = NULL;
125 ref.fixupWithTarget = NULL;
126 ref.fixupWithTLVStore = NULL;
127 }
128 switch ( fit->binding ) {
129 case ld::Fixup::bindingsIndirectlyBound:
130 ref.targetOfTLV = internal.indirectBindingTable[fit->u.bindingIndex];
131 ref.fixupWithTarget = fit;
132 break;
133 case ld::Fixup::bindingDirectlyBound:
134 ref.targetOfTLV = fit->u.target;
135 ref.fixupWithTarget = fit;
136 break;
137 default:
138 break;
139 }
140 switch ( fit->kind ) {
141 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad:
142 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad:
143 case ld::Fixup::kindStoreX86PCRel32TLVLoad:
144 case ld::Fixup::kindStoreX86Abs32TLVLoad:
145 ref.fixupWithTLVStore = fit;
146 break;
147 default:
148 break;
149 }
150 if ( fit->lastInCluster() && (ref.fixupWithTLVStore != NULL) ) {
151 ref.optimizable = optimizable(opts, ref.targetOfTLV);
152 if (log) fprintf(stderr, "found reference to TLV at %s+0x%X to %s\n",
153 atom->name(), ref.fixupWithTLVStore->offsetInAtom, ref.targetOfTLV->name());
154 if ( ! opts.canUseThreadLocalVariables() ) {
155 throwf("targeted OS version does not support use of thread local variables in %s", atom->name());
156 }
157 references.push_back(ref);
158 }
159 }
160 }
161 }
162
163 // compute which TLV references will be weak_imports
164 std::map<const ld::Atom*,bool> weakImportMap;
165 for(std::vector<TlVReferenceCluster>::iterator it=references.begin(); it != references.end(); ++it) {
166 if ( !it->optimizable ) {
167 // record weak_import attribute
168 std::map<const ld::Atom*,bool>::iterator pos = weakImportMap.find(it->targetOfTLV);
169 if ( pos == weakImportMap.end() ) {
170 // target not in weakImportMap, so add
171 weakImportMap[it->targetOfTLV] = it->fixupWithTarget->weakImport;
a645023d
A
172 }
173 else {
174 // target in weakImportMap, check for weakness mismatch
175 if ( pos->second != it->fixupWithTarget->weakImport ) {
176 // found mismatch
177 switch ( opts.weakReferenceMismatchTreatment() ) {
178 case Options::kWeakReferenceMismatchError:
179 throwf("mismatching weak references for symbol: %s", it->targetOfTLV->name());
180 case Options::kWeakReferenceMismatchWeak:
181 pos->second = true;
182 break;
183 case Options::kWeakReferenceMismatchNonWeak:
184 pos->second = false;
185 break;
186 }
187 }
188 }
189 }
190 }
191
192 // create TLV pointers for TLV references that cannot be optimized
193 std::map<const ld::Atom*,ld::Atom*> variableToPointerMap;
194 for(std::map<const ld::Atom*,bool>::iterator it=weakImportMap.begin(); it != weakImportMap.end(); ++it) {
195 std::map<const ld::Atom*,ld::Atom*>::iterator pos = variableToPointerMap.find(it->first);
196 if ( pos == variableToPointerMap.end() ) {
197 if (log) fprintf(stderr, "make TLV pointer for %s\n", it->first->name());
198 if ( it->first->contentType() != ld::Atom::typeTLV )
199 throwf("illegal thread local variable reference to regular symbol %s", it->first->name());
200 TLVEntryAtom* tlvp = new TLVEntryAtom(internal, it->first, it->second);
201 variableToPointerMap[it->first] = tlvp;
202 }
203 }
204
205 // sort new tvlp atoms so links are consistent
206 for (std::vector<ld::Internal::FinalSection*>::iterator sit=internal.sections.begin(); sit != internal.sections.end(); ++sit) {
207 ld::Internal::FinalSection* sect = *sit;
208 if ( sect->type() == ld::Section::typeTLVPointers ) {
209 std::sort(sect->atoms.begin(), sect->atoms.end(), AtomByNameSorter());
210 }
211 }
212
213 // update references to use TLV pointers or TLV object directly
214 for(std::vector<TlVReferenceCluster>::iterator it=references.begin(); it != references.end(); ++it) {
215 if ( it->optimizable ) {
216 // change store to be LEA instead load (mov)
217 if (log) fprintf(stderr, "optimizing load of TLV to %s into an LEA\n", it->targetOfTLV->name());
218 it->fixupWithTLVStore->binding = ld::Fixup::bindingDirectlyBound;
219 it->fixupWithTLVStore->u.target = it->targetOfTLV;
220 switch ( it->fixupWithTLVStore->kind ) {
221 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad:
222 it->fixupWithTLVStore->kind = ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA;
223 break;
224 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad:
225 it->fixupWithTLVStore->kind = ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoadNowLEA;
226 break;
227 case ld::Fixup::kindStoreX86PCRel32TLVLoad:
228 it->fixupWithTLVStore->kind = ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA;
229 break;
230 case ld::Fixup::kindStoreX86Abs32TLVLoad:
231 it->fixupWithTLVStore->kind = ld::Fixup::kindStoreX86Abs32TLVLoadNowLEA;
232 break;
233 default:
234 assert(0 && "bad store kind for TLV optimization");
235 }
236 }
237 else {
238 // change target to be new TLV pointer atom
239 if (log) fprintf(stderr, "updating load of TLV to %s to load from TLV pointer\n", it->targetOfTLV->name());
240 const ld::Atom* tlvpAtom = variableToPointerMap[it->targetOfTLV];
241 assert(tlvpAtom != NULL);
242 it->fixupWithTarget->binding = ld::Fixup::bindingDirectlyBound;
243 it->fixupWithTarget->u.target = tlvpAtom;
244 }
245 }
246
247
248
249 // alter tlv definitions to have an offset as third field
250 for (std::vector<ld::Internal::FinalSection*>::iterator sit=internal.sections.begin(); sit != internal.sections.end(); ++sit) {
251 ld::Internal::FinalSection* sect = *sit;
252 if ( sect->type() != ld::Section::typeTLVDefs )
253 continue;
254 for (std::vector<const ld::Atom*>::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
255 const ld::Atom* atom = *ait;
256 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
257 if ( fit->offsetInAtom != 0 ) {
258 assert(fit->binding == ld::Fixup::bindingDirectlyBound && "thread variable def contains pointer to global");
259 switch( fit->u.target->contentType() ) {
260 case ld::Atom::typeTLVZeroFill:
261 case ld::Atom::typeTLVInitialValue:
262 switch ( fit->kind ) {
263 case ld::Fixup::kindSetTargetAddress:
264 fit->kind = ld::Fixup::kindSetTargetTLVTemplateOffset;
265 break;
266 case ld::Fixup::kindStoreTargetAddressLittleEndian32:
267 fit->kind = ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian32;
268 break;
269 case ld::Fixup::kindStoreTargetAddressLittleEndian64:
270 fit->kind = ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian64;
271 break;
272 default:
273 assert(0 && "bad kind for target in tlv defs");
274 break;
275 }
276 break;
277 default:
278 assert(0 && "wrong content type for target in tlv defs");
279 break;
280 }
281 }
282 }
283 }
284 }
285
286}
287
288
289
290} // namespace tlvp
291} // namespace passes
292} // namespace ld