]> git.saurik.com Git - apple/ld64.git/blob - src/ld/passes/tlvp.cpp
ld64-123.2.tar.gz
[apple/ld64.git] / src / ld / passes / tlvp.cpp
1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
2 *
3 * Copyright (c) 2010 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26 #include <stdint.h>
27 #include <math.h>
28 #include <unistd.h>
29 #include <dlfcn.h>
30
31 #include <vector>
32 #include <map>
33 #include <ext/hash_map>
34
35 #include "ld.hpp"
36 #include "tlvp.h"
37
38 namespace ld {
39 namespace passes {
40 namespace tlvp {
41
42 class File; // forward reference
43
44 class TLVEntryAtom : public ld::Atom {
45 public:
46 TLVEntryAtom(ld::Internal& internal, const ld::Atom* target, bool weakImport)
47 : ld::Atom(_s_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
48 ld::Atom::scopeLinkageUnit, ld::Atom::typeNonLazyPointer,
49 symbolTableNotIn, false, false, false, ld::Atom::Alignment(3)),
50 _fixup(0, ld::Fixup::k1of1, ld::Fixup::kindStoreTargetAddressLittleEndian64, target),
51 _target(target)
52 { _fixup.weakImport = weakImport; internal.addAtom(*this); }
53
54 virtual const ld::File* file() const { return NULL; }
55 virtual bool translationUnitSource(const char** dir, const char**) const
56 { return false; }
57 virtual const char* name() const { return _target->name(); }
58 virtual uint64_t size() const { return 8; }
59 virtual uint64_t objectAddress() const { return 0; }
60 virtual void copyRawContent(uint8_t buffer[]) const { }
61 virtual void setScope(Scope) { }
62 virtual ld::Fixup::iterator fixupsBegin() const { return &_fixup; }
63 virtual ld::Fixup::iterator fixupsEnd() const { return &((ld::Fixup*)&_fixup)[1]; }
64
65 private:
66 mutable ld::Fixup _fixup;
67 const ld::Atom* _target;
68
69 static ld::Section _s_section;
70 };
71
72 ld::Section TLVEntryAtom::_s_section("__DATA", "__thread_ptrs", ld::Section::typeTLVPointers);
73
74
75 static bool optimizable(const Options& opts, const ld::Atom* targetOfTLV)
76 {
77 // cannot do LEA optimization if target is in another dylib
78 if ( targetOfTLV->definition() == ld::Atom::definitionProxy )
79 return false;
80 if ( targetOfTLV->scope() == ld::Atom::scopeGlobal ) {
81 // cannot do LEA optimization if target is weak exported symbol
82 if ( (targetOfTLV->definition() == ld::Atom::definitionRegular) && (targetOfTLV->combine() == ld::Atom::combineByName) )
83 return false;
84 // cannot do LEA optimization if target is interposable
85 if ( opts.interposable(targetOfTLV->name()) )
86 return false;
87 // cannot do LEA optimization for flat-namespace
88 if ( opts.nameSpace() != Options::kTwoLevelNameSpace )
89 return false;
90 }
91 return true;
92 }
93
94 struct AtomByNameSorter
95 {
96 bool operator()(const ld::Atom* left, const ld::Atom* right)
97 {
98 return (strcmp(left->name(), right->name()) < 0);
99 }
100 };
101
102 struct TlVReferenceCluster
103 {
104 const ld::Atom* targetOfTLV;
105 ld::Fixup* fixupWithTarget;
106 ld::Fixup* fixupWithTLVStore;
107 bool optimizable;
108 };
109
110 void doPass(const Options& opts, ld::Internal& internal)
111 {
112 const bool log = false;
113
114 // only make tlv section in final linked images
115 if ( opts.outputKind() == Options::kObjectFile )
116 return;
117
118 // walk all atoms and fixups looking for TLV references and add them to list
119 std::vector<TlVReferenceCluster> references;
120 for (std::vector<ld::Internal::FinalSection*>::iterator sit=internal.sections.begin(); sit != internal.sections.end(); ++sit) {
121 ld::Internal::FinalSection* sect = *sit;
122 for (std::vector<const ld::Atom*>::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
123 const ld::Atom* atom = *ait;
124 TlVReferenceCluster ref;
125 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
126 if ( fit->firstInCluster() ) {
127 ref.targetOfTLV = NULL;
128 ref.fixupWithTarget = NULL;
129 ref.fixupWithTLVStore = NULL;
130 }
131 switch ( fit->binding ) {
132 case ld::Fixup::bindingsIndirectlyBound:
133 ref.targetOfTLV = internal.indirectBindingTable[fit->u.bindingIndex];
134 ref.fixupWithTarget = fit;
135 break;
136 case ld::Fixup::bindingDirectlyBound:
137 ref.targetOfTLV = fit->u.target;
138 ref.fixupWithTarget = fit;
139 break;
140 default:
141 break;
142 }
143 switch ( fit->kind ) {
144 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad:
145 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad:
146 case ld::Fixup::kindStoreX86PCRel32TLVLoad:
147 case ld::Fixup::kindStoreX86Abs32TLVLoad:
148 ref.fixupWithTLVStore = fit;
149 break;
150 default:
151 break;
152 }
153 if ( fit->lastInCluster() && (ref.fixupWithTLVStore != NULL) ) {
154 ref.optimizable = optimizable(opts, ref.targetOfTLV);
155 if (log) fprintf(stderr, "found reference to TLV at %s+0x%X to %s\n",
156 atom->name(), ref.fixupWithTLVStore->offsetInAtom, ref.targetOfTLV->name());
157 if ( ! opts.canUseThreadLocalVariables() ) {
158 throwf("targeted OS version does not support use of thread local variables in %s", atom->name());
159 }
160 references.push_back(ref);
161 }
162 }
163 }
164 }
165
166 // compute which TLV references will be weak_imports
167 std::map<const ld::Atom*,bool> weakImportMap;
168 for(std::vector<TlVReferenceCluster>::iterator it=references.begin(); it != references.end(); ++it) {
169 if ( !it->optimizable ) {
170 // record weak_import attribute
171 std::map<const ld::Atom*,bool>::iterator pos = weakImportMap.find(it->targetOfTLV);
172 if ( pos == weakImportMap.end() ) {
173 // target not in weakImportMap, so add
174 weakImportMap[it->targetOfTLV] = it->fixupWithTarget->weakImport;
175 // <rdar://problem/5529626> If only weak_import symbols are used, linker should use LD_LOAD_WEAK_DYLIB
176 const ld::dylib::File* dylib = dynamic_cast<const ld::dylib::File*>(it->targetOfTLV->file());
177 if ( dylib != NULL ) {
178 if ( it->fixupWithTarget->weakImport )
179 (const_cast<ld::dylib::File*>(dylib))->setUsingWeakImportedSymbols();
180 else
181 (const_cast<ld::dylib::File*>(dylib))->setUsingNonWeakImportedSymbols();
182 }
183 }
184 else {
185 // target in weakImportMap, check for weakness mismatch
186 if ( pos->second != it->fixupWithTarget->weakImport ) {
187 // found mismatch
188 switch ( opts.weakReferenceMismatchTreatment() ) {
189 case Options::kWeakReferenceMismatchError:
190 throwf("mismatching weak references for symbol: %s", it->targetOfTLV->name());
191 case Options::kWeakReferenceMismatchWeak:
192 pos->second = true;
193 break;
194 case Options::kWeakReferenceMismatchNonWeak:
195 pos->second = false;
196 break;
197 }
198 }
199 }
200 }
201 }
202
203 // create TLV pointers for TLV references that cannot be optimized
204 std::map<const ld::Atom*,ld::Atom*> variableToPointerMap;
205 for(std::map<const ld::Atom*,bool>::iterator it=weakImportMap.begin(); it != weakImportMap.end(); ++it) {
206 std::map<const ld::Atom*,ld::Atom*>::iterator pos = variableToPointerMap.find(it->first);
207 if ( pos == variableToPointerMap.end() ) {
208 if (log) fprintf(stderr, "make TLV pointer for %s\n", it->first->name());
209 if ( it->first->contentType() != ld::Atom::typeTLV )
210 throwf("illegal thread local variable reference to regular symbol %s", it->first->name());
211 TLVEntryAtom* tlvp = new TLVEntryAtom(internal, it->first, it->second);
212 variableToPointerMap[it->first] = tlvp;
213 }
214 }
215
216 // sort new tvlp atoms so links are consistent
217 for (std::vector<ld::Internal::FinalSection*>::iterator sit=internal.sections.begin(); sit != internal.sections.end(); ++sit) {
218 ld::Internal::FinalSection* sect = *sit;
219 if ( sect->type() == ld::Section::typeTLVPointers ) {
220 std::sort(sect->atoms.begin(), sect->atoms.end(), AtomByNameSorter());
221 }
222 }
223
224 // update references to use TLV pointers or TLV object directly
225 for(std::vector<TlVReferenceCluster>::iterator it=references.begin(); it != references.end(); ++it) {
226 if ( it->optimizable ) {
227 // change store to be LEA instead load (mov)
228 if (log) fprintf(stderr, "optimizing load of TLV to %s into an LEA\n", it->targetOfTLV->name());
229 it->fixupWithTLVStore->binding = ld::Fixup::bindingDirectlyBound;
230 it->fixupWithTLVStore->u.target = it->targetOfTLV;
231 switch ( it->fixupWithTLVStore->kind ) {
232 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad:
233 it->fixupWithTLVStore->kind = ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA;
234 break;
235 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad:
236 it->fixupWithTLVStore->kind = ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoadNowLEA;
237 break;
238 case ld::Fixup::kindStoreX86PCRel32TLVLoad:
239 it->fixupWithTLVStore->kind = ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA;
240 break;
241 case ld::Fixup::kindStoreX86Abs32TLVLoad:
242 it->fixupWithTLVStore->kind = ld::Fixup::kindStoreX86Abs32TLVLoadNowLEA;
243 break;
244 default:
245 assert(0 && "bad store kind for TLV optimization");
246 }
247 }
248 else {
249 // change target to be new TLV pointer atom
250 if (log) fprintf(stderr, "updating load of TLV to %s to load from TLV pointer\n", it->targetOfTLV->name());
251 const ld::Atom* tlvpAtom = variableToPointerMap[it->targetOfTLV];
252 assert(tlvpAtom != NULL);
253 it->fixupWithTarget->binding = ld::Fixup::bindingDirectlyBound;
254 it->fixupWithTarget->u.target = tlvpAtom;
255 }
256 }
257
258
259
260 // alter tlv definitions to have an offset as third field
261 for (std::vector<ld::Internal::FinalSection*>::iterator sit=internal.sections.begin(); sit != internal.sections.end(); ++sit) {
262 ld::Internal::FinalSection* sect = *sit;
263 if ( sect->type() != ld::Section::typeTLVDefs )
264 continue;
265 for (std::vector<const ld::Atom*>::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
266 const ld::Atom* atom = *ait;
267 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
268 if ( fit->offsetInAtom != 0 ) {
269 assert(fit->binding == ld::Fixup::bindingDirectlyBound && "thread variable def contains pointer to global");
270 switch( fit->u.target->contentType() ) {
271 case ld::Atom::typeTLVZeroFill:
272 case ld::Atom::typeTLVInitialValue:
273 switch ( fit->kind ) {
274 case ld::Fixup::kindSetTargetAddress:
275 fit->kind = ld::Fixup::kindSetTargetTLVTemplateOffset;
276 break;
277 case ld::Fixup::kindStoreTargetAddressLittleEndian32:
278 fit->kind = ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian32;
279 break;
280 case ld::Fixup::kindStoreTargetAddressLittleEndian64:
281 fit->kind = ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian64;
282 break;
283 default:
284 assert(0 && "bad kind for target in tlv defs");
285 break;
286 }
287 break;
288 default:
289 assert(0 && "wrong content type for target in tlv defs");
290 break;
291 }
292 }
293 }
294 }
295 }
296
297 }
298
299
300
301 } // namespace tlvp
302 } // namespace passes
303 } // namespace ld