]> git.saurik.com Git - apple/ld64.git/blob - src/ld/passes/tlvp.cpp
ld64-409.12.tar.gz
[apple/ld64.git] / src / ld / passes / tlvp.cpp
1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
2 *
3 * Copyright (c) 2010 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26 #include <stdint.h>
27 #include <math.h>
28 #include <unistd.h>
29 #include <dlfcn.h>
30
31 #include <vector>
32 #include <map>
33
34 #include "ld.hpp"
35 #include "tlvp.h"
36
37 namespace ld {
38 namespace passes {
39 namespace tlvp {
40
41 class File; // forward reference
42
43 class TLVEntryAtom : public ld::Atom {
44 public:
45 TLVEntryAtom(ld::Internal& internal, const ld::Atom* target, bool weakImport)
46 : ld::Atom(_s_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
47 ld::Atom::scopeLinkageUnit, ld::Atom::typeNonLazyPointer,
48 symbolTableNotIn, false, false, false, ld::Atom::Alignment(3)),
49 _fixup(0, ld::Fixup::k1of1, ld::Fixup::kindStoreTargetAddressLittleEndian64, target),
50 _target(target)
51 { _fixup.weakImport = weakImport; internal.addAtom(*this); }
52
53 virtual const ld::File* file() const { return NULL; }
54 virtual const char* name() const { return _target->name(); }
55 virtual uint64_t size() const { return 8; }
56 virtual uint64_t objectAddress() const { return 0; }
57 virtual void copyRawContent(uint8_t buffer[]) const { }
58 virtual void setScope(Scope) { }
59 virtual ld::Fixup::iterator fixupsBegin() const { return &_fixup; }
60 virtual ld::Fixup::iterator fixupsEnd() const { return &((ld::Fixup*)&_fixup)[1]; }
61
62 private:
63 mutable ld::Fixup _fixup;
64 const ld::Atom* _target;
65
66 static ld::Section _s_section;
67 };
68
69 ld::Section TLVEntryAtom::_s_section("__DATA", "__thread_ptrs", ld::Section::typeTLVPointers);
70
71
72 static bool optimizable(const Options& opts, const ld::Atom* targetOfTLV)
73 {
74 // cannot do LEA optimization if target is in another dylib
75 if ( targetOfTLV->definition() == ld::Atom::definitionProxy )
76 return false;
77 if ( targetOfTLV->scope() == ld::Atom::scopeGlobal ) {
78 // cannot do LEA optimization if target is weak exported symbol
79 if ( (targetOfTLV->definition() == ld::Atom::definitionRegular) && (targetOfTLV->combine() == ld::Atom::combineByName) )
80 return false;
81 // cannot do LEA optimization if target is interposable
82 if ( opts.interposable(targetOfTLV->name()) )
83 return false;
84 // cannot do LEA optimization for flat-namespace
85 if ( opts.nameSpace() != Options::kTwoLevelNameSpace )
86 return false;
87 }
88 return true;
89 }
90
91 struct AtomByNameSorter
92 {
93 bool operator()(const ld::Atom* left, const ld::Atom* right)
94 {
95 return (strcmp(left->name(), right->name()) < 0);
96 }
97 };
98
99 struct TlVReferenceCluster
100 {
101 const ld::Atom* targetOfTLV;
102 ld::Fixup* fixupWithTarget;
103 ld::Fixup* fixupWithTLVStore;
104 bool optimizable;
105 };
106
107 void doPass(const Options& opts, ld::Internal& internal)
108 {
109 const bool log = false;
110
111 // only make tlv section in final linked images
112 if ( opts.outputKind() == Options::kObjectFile )
113 return;
114
115 // walk all atoms and fixups looking for TLV references and add them to list
116 std::vector<TlVReferenceCluster> references;
117 for (std::vector<ld::Internal::FinalSection*>::iterator sit=internal.sections.begin(); sit != internal.sections.end(); ++sit) {
118 ld::Internal::FinalSection* sect = *sit;
119 for (std::vector<const ld::Atom*>::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
120 const ld::Atom* atom = *ait;
121 TlVReferenceCluster ref;
122 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
123 if ( fit->firstInCluster() ) {
124 ref.targetOfTLV = NULL;
125 ref.fixupWithTarget = NULL;
126 ref.fixupWithTLVStore = NULL;
127 }
128 switch ( fit->binding ) {
129 case ld::Fixup::bindingsIndirectlyBound:
130 ref.targetOfTLV = internal.indirectBindingTable[fit->u.bindingIndex];
131 ref.fixupWithTarget = fit;
132 break;
133 case ld::Fixup::bindingDirectlyBound:
134 ref.targetOfTLV = fit->u.target;
135 ref.fixupWithTarget = fit;
136 break;
137 default:
138 break;
139 }
140 switch ( fit->kind ) {
141 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad:
142 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad:
143 case ld::Fixup::kindStoreX86PCRel32TLVLoad:
144 case ld::Fixup::kindStoreX86Abs32TLVLoad:
145 #if SUPPORT_ARCH_arm64
146 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21:
147 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12:
148 #endif
149 ref.fixupWithTLVStore = fit;
150 break;
151 default:
152 break;
153 }
154 if ( fit->lastInCluster() && (ref.fixupWithTLVStore != NULL) ) {
155 ref.optimizable = optimizable(opts, ref.targetOfTLV);
156 if (log) fprintf(stderr, "found reference to TLV at %s+0x%X to %s\n",
157 atom->name(), ref.fixupWithTLVStore->offsetInAtom, ref.targetOfTLV->name());
158 if ( ! opts.canUseThreadLocalVariables() ) {
159 throwf("targeted OS version does not support use of thread local variables in %s", atom->name());
160 }
161 references.push_back(ref);
162 }
163 }
164 }
165 }
166
167 // compute which TLV references will be weak_imports
168 std::map<const ld::Atom*,bool> weakImportMap;
169 for(std::vector<TlVReferenceCluster>::iterator it=references.begin(); it != references.end(); ++it) {
170 if ( !it->optimizable ) {
171 // record weak_import attribute
172 std::map<const ld::Atom*,bool>::iterator pos = weakImportMap.find(it->targetOfTLV);
173 if ( pos == weakImportMap.end() ) {
174 // target not in weakImportMap, so add
175 weakImportMap[it->targetOfTLV] = it->fixupWithTarget->weakImport;
176 }
177 else {
178 // target in weakImportMap, check for weakness mismatch
179 if ( pos->second != it->fixupWithTarget->weakImport ) {
180 // found mismatch
181 switch ( opts.weakReferenceMismatchTreatment() ) {
182 case Options::kWeakReferenceMismatchError:
183 throwf("mismatching weak references for symbol: %s", it->targetOfTLV->name());
184 case Options::kWeakReferenceMismatchWeak:
185 pos->second = true;
186 break;
187 case Options::kWeakReferenceMismatchNonWeak:
188 pos->second = false;
189 break;
190 }
191 }
192 }
193 }
194 }
195
196 // create TLV pointers for TLV references that cannot be optimized
197 std::map<const ld::Atom*,ld::Atom*> variableToPointerMap;
198 for(std::map<const ld::Atom*,bool>::iterator it=weakImportMap.begin(); it != weakImportMap.end(); ++it) {
199 std::map<const ld::Atom*,ld::Atom*>::iterator pos = variableToPointerMap.find(it->first);
200 if ( pos == variableToPointerMap.end() ) {
201 if (log) fprintf(stderr, "make TLV pointer for %s\n", it->first->name());
202 if ( it->first->contentType() != ld::Atom::typeTLV )
203 throwf("illegal thread local variable reference to regular symbol %s", it->first->name());
204 TLVEntryAtom* tlvp = new TLVEntryAtom(internal, it->first, it->second);
205 variableToPointerMap[it->first] = tlvp;
206 }
207 }
208
209 // sort new tvlp atoms so links are consistent
210 for (std::vector<ld::Internal::FinalSection*>::iterator sit=internal.sections.begin(); sit != internal.sections.end(); ++sit) {
211 ld::Internal::FinalSection* sect = *sit;
212 if ( sect->type() == ld::Section::typeTLVPointers ) {
213 std::sort(sect->atoms.begin(), sect->atoms.end(), AtomByNameSorter());
214 }
215 }
216
217 // update references to use TLV pointers or TLV object directly
218 for(std::vector<TlVReferenceCluster>::iterator it=references.begin(); it != references.end(); ++it) {
219 if ( it->optimizable ) {
220 // change store to be LEA instead load (mov)
221 if (log) fprintf(stderr, "optimizing load of TLV to %s into an LEA\n", it->targetOfTLV->name());
222 it->fixupWithTLVStore->binding = ld::Fixup::bindingDirectlyBound;
223 it->fixupWithTLVStore->u.target = it->targetOfTLV;
224 switch ( it->fixupWithTLVStore->kind ) {
225 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad:
226 it->fixupWithTLVStore->kind = ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA;
227 break;
228 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad:
229 it->fixupWithTLVStore->kind = ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoadNowLEA;
230 break;
231 case ld::Fixup::kindStoreX86PCRel32TLVLoad:
232 it->fixupWithTLVStore->kind = ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA;
233 break;
234 case ld::Fixup::kindStoreX86Abs32TLVLoad:
235 it->fixupWithTLVStore->kind = ld::Fixup::kindStoreX86Abs32TLVLoadNowLEA;
236 break;
237 #if SUPPORT_ARCH_arm64
238 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21:
239 it->fixupWithTLVStore->kind = ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21;
240 break;
241 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12:
242 it->fixupWithTLVStore->kind = ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12;
243 break;
244 #endif
245 default:
246 assert(0 && "bad store kind for TLV optimization");
247 }
248 }
249 else {
250 // change target to be new TLV pointer atom
251 if (log) fprintf(stderr, "updating load of TLV to %s to load from TLV pointer\n", it->targetOfTLV->name());
252 const ld::Atom* tlvpAtom = variableToPointerMap[it->targetOfTLV];
253 assert(tlvpAtom != NULL);
254 it->fixupWithTarget->binding = ld::Fixup::bindingDirectlyBound;
255 it->fixupWithTarget->u.target = tlvpAtom;
256 }
257 }
258
259
260
261 // alter tlv definitions to have an offset as third field
262 for (std::vector<ld::Internal::FinalSection*>::iterator sit=internal.sections.begin(); sit != internal.sections.end(); ++sit) {
263 ld::Internal::FinalSection* sect = *sit;
264 if ( sect->type() != ld::Section::typeTLVDefs )
265 continue;
266 for (std::vector<const ld::Atom*>::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
267 const ld::Atom* atom = *ait;
268 if ( ! opts.canUseThreadLocalVariables() ) {
269 throwf("targeted OS version does not support use of thread local variables in %s", atom->name());
270 }
271 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
272 if ( fit->offsetInAtom != 0 ) {
273 assert(fit->binding == ld::Fixup::bindingDirectlyBound && "thread variable def contains pointer to global");
274 switch( fit->u.target->contentType() ) {
275 case ld::Atom::typeTLVZeroFill:
276 case ld::Atom::typeTLVInitialValue:
277 switch ( fit->kind ) {
278 case ld::Fixup::kindSetTargetAddress:
279 fit->kind = ld::Fixup::kindSetTargetTLVTemplateOffset;
280 break;
281 case ld::Fixup::kindStoreTargetAddressLittleEndian32:
282 fit->kind = ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian32;
283 break;
284 case ld::Fixup::kindStoreTargetAddressLittleEndian64:
285 fit->kind = ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian64;
286 break;
287 default:
288 assert(0 && "bad kind for target in tlv defs");
289 break;
290 }
291 break;
292 default:
293 assert(0 && "wrong content type for target in tlv defs");
294 break;
295 }
296 }
297 }
298 }
299 }
300
301 }
302
303
304
305 } // namespace tlvp
306 } // namespace passes
307 } // namespace ld