]> git.saurik.com Git - apple/ld64.git/blob - src/ld/passes/got.cpp
ld64-224.1.tar.gz
[apple/ld64.git] / src / ld / passes / got.cpp
1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
2 *
3 * Copyright (c) 2009 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26 #include <stdint.h>
27 #include <math.h>
28 #include <unistd.h>
29 #include <dlfcn.h>
30
31 #include <vector>
32 #include <map>
33
34 #include "MachOFileAbstraction.hpp"
35 #include "ld.hpp"
36 #include "got.h"
37 #include "configure.h"
38
39 namespace ld {
40 namespace passes {
41 namespace got {
42
43 class File; // forward reference
44
45 class GOTEntryAtom : public ld::Atom {
46 public:
47 GOTEntryAtom(ld::Internal& internal, const ld::Atom* target, bool weakImport, bool is64)
48 : ld::Atom(_s_section, ld::Atom::definitionRegular, ld::Atom::combineNever,
49 ld::Atom::scopeLinkageUnit, ld::Atom::typeNonLazyPointer,
50 symbolTableNotIn, false, false, false, (is64 ? ld::Atom::Alignment(3) : ld::Atom::Alignment(2))),
51 _fixup(0, ld::Fixup::k1of1, (is64 ? ld::Fixup::kindStoreTargetAddressLittleEndian64 : ld::Fixup::kindStoreTargetAddressLittleEndian32), target),
52 _target(target),
53 _is64(is64)
54 { _fixup.weakImport = weakImport; internal.addAtom(*this); }
55
56 virtual const ld::File* file() const { return NULL; }
57 virtual const char* name() const { return _target->name(); }
58 virtual uint64_t size() const { return (_is64 ? 8 : 4); }
59 virtual uint64_t objectAddress() const { return 0; }
60 virtual void copyRawContent(uint8_t buffer[]) const { }
61 virtual void setScope(Scope) { }
62 virtual ld::Fixup::iterator fixupsBegin() const { return &_fixup; }
63 virtual ld::Fixup::iterator fixupsEnd() const { return &((ld::Fixup*)&_fixup)[1]; }
64
65 private:
66 mutable ld::Fixup _fixup;
67 const ld::Atom* _target;
68 bool _is64;
69
70 static ld::Section _s_section;
71 };
72
73 ld::Section GOTEntryAtom::_s_section("__DATA", "__got", ld::Section::typeNonLazyPointer);
74
75
76 static bool gotFixup(const Options& opts, ld::Internal& internal, const ld::Atom* targetOfGOT, const ld::Fixup* fixup, bool* optimizable)
77 {
78 switch (fixup->kind) {
79 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad:
80 #if SUPPORT_ARCH_arm64
81 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
82 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12:
83 #endif
84 // start by assuming this can be optimized
85 *optimizable = true;
86 // cannot do LEA optimization if target is in another dylib
87 if ( targetOfGOT->definition() == ld::Atom::definitionProxy )
88 *optimizable = false;
89 // cannot do LEA optimization if target in __huge section
90 if ( internal.usingHugeSections && (targetOfGOT->size() > 1024*1024)
91 && ( (targetOfGOT->section().type() == ld::Section::typeZeroFill)
92 || (targetOfGOT->section().type() == ld::Section::typeTentativeDefs)) ) {
93 *optimizable = false;
94 }
95 if ( targetOfGOT->scope() == ld::Atom::scopeGlobal ) {
96 // cannot do LEA optimization if target is weak exported symbol
97 if ( (targetOfGOT->definition() == ld::Atom::definitionRegular) && (targetOfGOT->combine() == ld::Atom::combineByName) ) {
98 switch ( opts.outputKind() ) {
99 case Options::kDynamicExecutable:
100 case Options::kDynamicLibrary:
101 case Options::kDynamicBundle:
102 case Options::kKextBundle:
103 *optimizable = false;
104 break;
105 case Options::kStaticExecutable:
106 case Options::kDyld:
107 case Options::kPreload:
108 case Options::kObjectFile:
109 break;
110 }
111 }
112 // cannot do LEA optimization if target is interposable
113 if ( opts.interposable(targetOfGOT->name()) )
114 *optimizable = false;
115 // cannot do LEA optimization if target is resolver function
116 if ( targetOfGOT->contentType() == ld::Atom::typeResolver )
117 *optimizable = false;
118 // cannot do LEA optimization for flat-namespace
119 if ( opts.nameSpace() != Options::kTwoLevelNameSpace )
120 *optimizable = false;
121 }
122 else if ( targetOfGOT->scope() == ld::Atom::scopeLinkageUnit) {
123 // <rdar://problem/12379969> don't do optimization if target is in custom segment
124 if ( opts.sharedRegionEligible() ) {
125 const char* segName = targetOfGOT->section().segmentName();
126 if ( (strcmp(segName, "__TEXT") != 0) && (strcmp(segName, "__DATA") != 0) ) {
127 *optimizable = false;
128 }
129 }
130 }
131 return true;
132 case ld::Fixup::kindStoreX86PCRel32GOT:
133 #if SUPPORT_ARCH_arm64
134 case ld::Fixup::kindStoreARM64PCRelToGOT:
135 #endif
136 *optimizable = false;
137 return true;
138 case ld::Fixup::kindNoneGroupSubordinatePersonality:
139 *optimizable = false;
140 return true;
141 default:
142 break;
143 }
144
145 return false;
146 }
147
148 struct AtomByNameSorter
149 {
150 bool operator()(const ld::Atom* left, const ld::Atom* right)
151 {
152 return (strcmp(left->name(), right->name()) < 0);
153 }
154 };
155
156 void doPass(const Options& opts, ld::Internal& internal)
157 {
158 const bool log = false;
159
160 // only make got section in final linked images
161 if ( opts.outputKind() == Options::kObjectFile )
162 return;
163
164 // walk all atoms and fixups looking for GOT-able references
165 // don't create GOT atoms during this loop because that could invalidate the sections iterator
166 std::vector<const ld::Atom*> atomsReferencingGOT;
167 std::map<const ld::Atom*,ld::Atom*> gotMap;
168 std::map<const ld::Atom*,bool> weakImportMap;
169 atomsReferencingGOT.reserve(128);
170 for (std::vector<ld::Internal::FinalSection*>::iterator sit=internal.sections.begin(); sit != internal.sections.end(); ++sit) {
171 ld::Internal::FinalSection* sect = *sit;
172 for (std::vector<const ld::Atom*>::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
173 const ld::Atom* atom = *ait;
174 bool atomUsesGOT = false;
175 const ld::Atom* targetOfGOT = NULL;
176 bool targetIsWeakImport = false;
177 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
178 if ( fit->firstInCluster() )
179 targetOfGOT = NULL;
180 switch ( fit->binding ) {
181 case ld::Fixup::bindingsIndirectlyBound:
182 targetOfGOT = internal.indirectBindingTable[fit->u.bindingIndex];
183 targetIsWeakImport = fit->weakImport;
184 break;
185 case ld::Fixup::bindingDirectlyBound:
186 targetOfGOT = fit->u.target;
187 targetIsWeakImport = fit->weakImport;
188 break;
189 default:
190 break;
191 }
192 bool optimizable;
193 if ( !gotFixup(opts, internal, targetOfGOT, fit, &optimizable) )
194 continue;
195 if ( optimizable ) {
196 // change from load of GOT entry to lea of target
197 if ( log ) fprintf(stderr, "optimized GOT usage in %s to %s\n", atom->name(), targetOfGOT->name());
198 switch ( fit->binding ) {
199 case ld::Fixup::bindingsIndirectlyBound:
200 case ld::Fixup::bindingDirectlyBound:
201 fit->binding = ld::Fixup::bindingDirectlyBound;
202 fit->u.target = targetOfGOT;
203 switch ( fit->kind ) {
204 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad:
205 fit->kind = ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA;
206 break;
207 #if SUPPORT_ARCH_arm64
208 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
209 fit->kind = ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21;
210 break;
211 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12:
212 fit->kind = ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12;
213 break;
214 #endif
215 default:
216 assert(0 && "unsupported GOT reference kind");
217 break;
218 }
219 break;
220 default:
221 assert(0 && "unsupported GOT reference");
222 break;
223 }
224 }
225 else {
226 // remember that we need to use GOT in this function
227 if ( log ) fprintf(stderr, "found GOT use in %s to %s\n", atom->name(), targetOfGOT->name());
228 if ( !atomUsesGOT ) {
229 atomsReferencingGOT.push_back(atom);
230 atomUsesGOT = true;
231 }
232 gotMap[targetOfGOT] = NULL;
233 // record weak_import attribute
234 std::map<const ld::Atom*,bool>::iterator pos = weakImportMap.find(targetOfGOT);
235 if ( pos == weakImportMap.end() ) {
236 // target not in weakImportMap, so add
237 if ( log ) fprintf(stderr, "weakImportMap[%s] = %d\n", targetOfGOT->name(), targetIsWeakImport);
238 weakImportMap[targetOfGOT] = targetIsWeakImport;
239 }
240 else {
241 // target in weakImportMap, check for weakness mismatch
242 if ( pos->second != targetIsWeakImport ) {
243 // found mismatch
244 switch ( opts.weakReferenceMismatchTreatment() ) {
245 case Options::kWeakReferenceMismatchError:
246 throwf("mismatching weak references for symbol: %s", targetOfGOT->name());
247 case Options::kWeakReferenceMismatchWeak:
248 pos->second = true;
249 break;
250 case Options::kWeakReferenceMismatchNonWeak:
251 pos->second = false;
252 break;
253 }
254 }
255 }
256 }
257 }
258 }
259 }
260
261 bool is64 = false;
262 switch ( opts.architecture() ) {
263 #if SUPPORT_ARCH_i386
264 case CPU_TYPE_I386:
265 is64 = false;
266 break;
267 #endif
268 #if SUPPORT_ARCH_x86_64
269 case CPU_TYPE_X86_64:
270 is64 = true;
271 break;
272 #endif
273 #if SUPPORT_ARCH_arm_any
274 case CPU_TYPE_ARM:
275 is64 = false;
276 break;
277 #endif
278 #if SUPPORT_ARCH_arm64
279 case CPU_TYPE_ARM64:
280 is64 = true;
281 break;
282 #endif
283 }
284
285 // make GOT entries
286 for (std::map<const ld::Atom*,ld::Atom*>::iterator it = gotMap.begin(); it != gotMap.end(); ++it) {
287 it->second = new GOTEntryAtom(internal, it->first, weakImportMap[it->first], is64);
288 }
289
290 // update atoms to use GOT entries
291 for (std::vector<const ld::Atom*>::iterator it=atomsReferencingGOT.begin(); it != atomsReferencingGOT.end(); ++it) {
292 const ld::Atom* atom = *it;
293 const ld::Atom* targetOfGOT = NULL;
294 ld::Fixup::iterator fitThatSetTarget = NULL;
295 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
296 if ( fit->firstInCluster() ) {
297 targetOfGOT = NULL;
298 fitThatSetTarget = NULL;
299 }
300 switch ( fit->binding ) {
301 case ld::Fixup::bindingsIndirectlyBound:
302 targetOfGOT = internal.indirectBindingTable[fit->u.bindingIndex];
303 fitThatSetTarget = fit;
304 break;
305 case ld::Fixup::bindingDirectlyBound:
306 targetOfGOT = fit->u.target;
307 fitThatSetTarget = fit;
308 break;
309 default:
310 break;
311 }
312 bool optimizable;
313 if ( (targetOfGOT == NULL) || !gotFixup(opts, internal, targetOfGOT, fit, &optimizable) )
314 continue;
315 if ( !optimizable ) {
316 // GOT use not optimized away, update to bind to GOT entry
317 assert(fitThatSetTarget != NULL);
318 switch ( fitThatSetTarget->binding ) {
319 case ld::Fixup::bindingsIndirectlyBound:
320 case ld::Fixup::bindingDirectlyBound:
321 fitThatSetTarget->binding = ld::Fixup::bindingDirectlyBound;
322 fitThatSetTarget->u.target = gotMap[targetOfGOT];
323 break;
324 default:
325 assert(0 && "unsupported GOT reference");
326 break;
327 }
328 }
329 }
330 }
331
332 // sort new atoms so links are consistent
333 for (std::vector<ld::Internal::FinalSection*>::iterator sit=internal.sections.begin(); sit != internal.sections.end(); ++sit) {
334 ld::Internal::FinalSection* sect = *sit;
335 if ( sect->type() == ld::Section::typeNonLazyPointer ) {
336 std::sort(sect->atoms.begin(), sect->atoms.end(), AtomByNameSorter());
337 }
338 }
339 }
340
341
342 } // namespace got
343 } // namespace passes
344 } // namespace ld