]> git.saurik.com Git - apple/dyld.git/blob - dyld3/shared-cache/OptimizerObjC.cpp
dyld-625.13.tar.gz
[apple/dyld.git] / dyld3 / shared-cache / OptimizerObjC.cpp
1 /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*-
2 *
3 * Copyright (c) 2014 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26 #include <dirent.h>
27 #include <sys/errno.h>
28 #include <sys/fcntl.h>
29 #include <mach-o/loader.h>
30 #include <mach-o/fat.h>
31 #include <assert.h>
32
33 #include "DyldSharedCache.h"
34 #include "Diagnostics.h"
35 #include "CacheBuilder.h"
36 #include "FileAbstraction.hpp"
37 #include "MachOFileAbstraction.hpp"
38 #include "MachOLoaded.h"
39
40 // Scan a C++ or Swift length-mangled field.
41 static bool scanMangledField(const char *&string, const char *end,
42 const char *&field, int& length)
43 {
44 // Leading zero not allowed.
45 if (*string == '0') return false;
46
47 length = 0;
48 field = string;
49 while (field < end) {
50 char c = *field;
51 if (!isdigit(c)) break;
52 field++;
53 if (__builtin_smul_overflow(length, 10, &length)) return false;
54 if (__builtin_sadd_overflow(length, c - '0', &length)) return false;
55 }
56
57 string = field + length;
58 return length > 0 && string <= end;
59 }
60
61
62 // copySwiftDemangledName
63 // Returns the pretty form of the given Swift-mangled class or protocol name.
64 // Returns nullptr if the string doesn't look like a mangled Swift name.
65 // The result must be freed with free().
66 static char *copySwiftDemangledName(const char *string, bool isProtocol = false)
67 {
68 if (!string) return nullptr;
69
70 // Swift mangling prefix.
71 if (strncmp(string, isProtocol ? "_TtP" : "_TtC", 4) != 0) return nullptr;
72 string += 4;
73
74 const char *end = string + strlen(string);
75
76 // Module name.
77 const char *prefix;
78 int prefixLength;
79 if (string[0] == 's') {
80 // "s" is the Swift module.
81 prefix = "Swift";
82 prefixLength = 5;
83 string += 1;
84 } else {
85 if (! scanMangledField(string, end, prefix, prefixLength)) return nullptr;
86 }
87
88 // Class or protocol name.
89 const char *suffix;
90 int suffixLength;
91 if (! scanMangledField(string, end, suffix, suffixLength)) return nullptr;
92
93 if (isProtocol) {
94 // Remainder must be "_".
95 if (strcmp(string, "_") != 0) return nullptr;
96 } else {
97 // Remainder must be empty.
98 if (string != end) return nullptr;
99 }
100
101 char *result;
102 asprintf(&result, "%.*s.%.*s", prefixLength,prefix, suffixLength,suffix);
103 return result;
104 }
105
106
107 class ContentAccessor {
108 public:
109 ContentAccessor(const DyldSharedCache* cache, Diagnostics& diag)
110 : _diagnostics(diag)
111 {
112 _cacheStart = (uint8_t*)cache;
113 _cacheUnslideAddr = cache->unslidLoadAddress();
114 _slide = (uint64_t)cache - _cacheUnslideAddr;
115 #if SUPPORT_ARCH_arm64e
116 _chainedFixups = (strcmp(cache->archName(), "arm64e") == 0);
117 #else
118 _chainedFixups = false;
119 #endif
120 }
121
122 // Converts from an on disk vmAddr to the real vmAddr
123 // That is, for a chained fixup, decodes the chain, for a non-chained fixup, does nothing.
124 uint64_t vmAddrForOnDiskVMAddr(uint64_t vmaddr) {
125 if ( _chainedFixups ) {
126 dyld3::MachOLoaded::ChainedFixupPointerOnDisk ptr;
127 ptr.raw = vmaddr;
128 assert(ptr.authRebase.bind == 0);
129 if ( ptr.authRebase.auth ) {
130 vmaddr = _cacheUnslideAddr + ptr.authRebase.target;
131 }
132 else {
133 vmaddr = ptr.plainRebase.signExtendedTarget();
134 }
135 }
136 return vmaddr;
137 }
138
139 void* contentForVMAddr(uint64_t vmaddr) {
140 vmaddr = vmAddrForOnDiskVMAddr(vmaddr);
141 if ( vmaddr != 0 ) {
142 uint64_t offset = vmaddr - _cacheUnslideAddr;
143 return _cacheStart + offset;
144 } else
145 return nullptr;
146 }
147
148 uint64_t vmAddrForContent(const void* content) {
149 if ( content != nullptr )
150 return _cacheUnslideAddr + ((uint8_t*)content - _cacheStart);
151 else
152 return 0;
153 }
154
155 Diagnostics& diagnostics() { return _diagnostics; }
156
157 private:
158 Diagnostics& _diagnostics;
159 uint64_t _slide;
160 uint64_t _cacheUnslideAddr;
161 uint8_t* _cacheStart;
162 bool _chainedFixups;
163 };
164
165
166 // Access a section containing a list of pointers
167 template <typename P, typename T>
168 class PointerSection
169 {
170 typedef typename P::uint_t pint_t;
171 public:
172 PointerSection(ContentAccessor* cache, const macho_header<P>* mh,
173 const char* segname, const char* sectname)
174 : _cache(cache),
175 _section(mh->getSection(segname, sectname)),
176 _base(_section ? (pint_t*)cache->contentForVMAddr(_section->addr()) : 0),
177 _count(_section ? (pint_t)(_section->size() / sizeof(pint_t)) : 0) {
178 }
179
180 pint_t count() const { return _count; }
181
182 pint_t getVMAddress(pint_t index) const {
183 if ( index >= _count ) {
184 _cache->diagnostics().error("index out of range in section %s", _section->sectname());
185 return 0;
186 }
187 return (pint_t)P::getP(_base[index]);
188 }
189
190 pint_t getSectionVMAddress() const {
191 return (pint_t)_section->addr();
192 }
193
194 T get(pint_t index) const {
195 return (T)_cache->contentForVMAddr(getVMAddress(index));
196 }
197
198 void setVMAddress(pint_t index, pint_t value) {
199 if ( index >= _count ) {
200 _cache->diagnostics().error("index out of range in section %s", _section->sectname());
201 return;
202 }
203 P::setP(_base[index], value);
204 }
205
206 void removeNulls() {
207 pint_t shift = 0;
208 for (pint_t i = 0; i < _count; i++) {
209 pint_t value = _base[i];
210 if (value) {
211 _base[i-shift] = value;
212 } else {
213 shift++;
214 }
215 }
216 _count -= shift;
217 const_cast<macho_section<P>*>(_section)->set_size(_count * sizeof(pint_t));
218 }
219
220 private:
221 ContentAccessor* const _cache;
222 const macho_section<P>* const _section;
223 pint_t* const _base;
224 pint_t const _count;
225 };
226
227
228 // Access a section containing an array of structures
229 template <typename P, typename T>
230 class ArraySection
231 {
232 public:
233 ArraySection(ContentAccessor* cache, const macho_header<P>* mh,
234 const char *segname, const char *sectname)
235 : _cache(cache),
236 _section(mh->getSection(segname, sectname)),
237 _base(_section ? (T *)cache->contentForVMAddr(_section->addr()) : 0),
238 _count(_section ? _section->size() / sizeof(T) : 0) {
239 }
240
241 uint64_t count() const { return _count; }
242
243 T& get(uint64_t index) const {
244 if (index >= _count) {
245 _cache->diagnostics().error("index out of range in section %s", _section->sectname());
246 }
247 return _base[index];
248 }
249
250 private:
251 ContentAccessor* const _cache;
252 const macho_section<P>* const _section;
253 T * const _base;
254 uint64_t const _count;
255 };
256
257
258 #define SELOPT_WRITE
259 #include "objc-shared-cache.h"
260 #include "ObjC1Abstraction.hpp"
261 #include "ObjC2Abstraction.hpp"
262
263
264 namespace {
265
266
267
268 template <typename P>
269 class ObjCSelectorUniquer
270 {
271 public:
272 typedef typename P::uint_t pint_t;
273
274 ObjCSelectorUniquer(ContentAccessor* cache) : _cache(cache) { }
275
276 pint_t visit(pint_t oldValue)
277 {
278 _count++;
279 const char *s = (const char *)_cache->contentForVMAddr(oldValue);
280 oldValue = (pint_t)_cache->vmAddrForOnDiskVMAddr(oldValue);
281 objc_opt::string_map::iterator element =
282 _selectorStrings.insert(objc_opt::string_map::value_type(s, oldValue)).first;
283 return (pint_t)element->second;
284 }
285
286 objc_opt::string_map& strings() {
287 return _selectorStrings;
288 }
289
290 size_t count() const { return _count; }
291
292 private:
293 objc_opt::string_map _selectorStrings;
294 ContentAccessor* _cache;
295 size_t _count = 0;
296 };
297
298
299 template <typename P>
300 class ClassListBuilder
301 {
302 private:
303 objc_opt::string_map _classNames;
304 objc_opt::class_map _classes;
305 size_t _count = 0;
306 HeaderInfoOptimizer<P, objc_header_info_ro_t<P>>& _hInfos;
307
308 public:
309
310 ClassListBuilder(HeaderInfoOptimizer<P, objc_header_info_ro_t<P>>& hinfos) : _hInfos(hinfos) { }
311
312 void visitClass(ContentAccessor* cache,
313 const macho_header<P>* header,
314 objc_class_t<P>* cls)
315 {
316 if (cls->isMetaClass(cache)) return;
317
318 const char *name = cls->getName(cache);
319 uint64_t name_vmaddr = cache->vmAddrForContent((void*)name);
320 uint64_t cls_vmaddr = cache->vmAddrForContent(cls);
321 uint64_t hinfo_vmaddr = cache->vmAddrForContent(_hInfos.hinfoForHeader(cache, header));
322 _classNames.insert(objc_opt::string_map::value_type(name, name_vmaddr));
323 _classes.insert(objc_opt::class_map::value_type(name, std::pair<uint64_t, uint64_t>(cls_vmaddr, hinfo_vmaddr)));
324 _count++;
325 }
326
327 objc_opt::string_map& classNames() {
328 return _classNames;
329 }
330
331 objc_opt::class_map& classes() {
332 return _classes;
333 }
334
335 size_t count() const { return _count; }
336 };
337
338 template <typename P>
339 class ProtocolOptimizer
340 {
341 private:
342 typedef typename P::uint_t pint_t;
343
344 objc_opt::string_map _protocolNames;
345 objc_opt::protocol_map _protocols;
346 size_t _protocolCount;
347 size_t _protocolReferenceCount;
348 Diagnostics& _diagnostics;
349
350 friend class ProtocolReferenceWalker<P, ProtocolOptimizer<P>>;
351
352 pint_t visitProtocolReference(ContentAccessor* cache, pint_t oldValue)
353 {
354 objc_protocol_t<P>* proto = (objc_protocol_t<P>*)
355 cache->contentForVMAddr(oldValue);
356 pint_t newValue = (pint_t)_protocols[proto->getName(cache)];
357 if (oldValue != newValue) _protocolReferenceCount++;
358 return newValue;
359 }
360
361 public:
362
363 ProtocolOptimizer(Diagnostics& diag)
364 : _protocolCount(0), _protocolReferenceCount(0), _diagnostics(diag) {
365 }
366
367 void addProtocols(ContentAccessor* cache, const macho_header<P>* header)
368 {
369 PointerSection<P, objc_protocol_t<P> *>
370 protocols(cache, header, "__DATA", "__objc_protolist");
371
372 for (pint_t i = 0; i < protocols.count(); i++) {
373 objc_protocol_t<P> *proto = protocols.get(i);
374
375 const char *name = proto->getName(cache);
376 if (_protocolNames.count(name) == 0) {
377 if (proto->getSize() > sizeof(objc_protocol_t<P>)) {
378 _diagnostics.error("objc protocol is too big");
379 return;
380 }
381
382 uint64_t name_vmaddr = cache->vmAddrForContent((void*)name);
383 uint64_t proto_vmaddr = cache->vmAddrForContent(proto);
384 _protocolNames.insert(objc_opt::string_map::value_type(name, name_vmaddr));
385 _protocols.insert(objc_opt::protocol_map::value_type(name, proto_vmaddr));
386 _protocolCount++;
387 }
388 }
389 }
390
391 const char *writeProtocols(ContentAccessor* cache,
392 uint8_t *& rwdest, size_t& rwremaining,
393 uint8_t *& rodest, size_t& roremaining,
394 CacheBuilder::ASLR_Tracker& aslrTracker,
395 pint_t protocolClassVMAddr)
396 {
397 if (_protocolCount == 0) return NULL;
398
399 if (protocolClassVMAddr == 0) {
400 return "libobjc's Protocol class symbol not found (metadata not optimized)";
401 }
402
403 size_t rwrequired = _protocolCount * sizeof(objc_protocol_t<P>);
404 if (rwremaining < rwrequired) {
405 return "libobjc's read-write section is too small (metadata not optimized)";
406 }
407
408 for (objc_opt::protocol_map::iterator iter = _protocols.begin();
409 iter != _protocols.end();
410 ++iter)
411 {
412 objc_protocol_t<P>* oldProto = (objc_protocol_t<P>*)
413 cache->contentForVMAddr(iter->second);
414
415 // Create a new protocol object.
416 objc_protocol_t<P>* proto = (objc_protocol_t<P>*)rwdest;
417 rwdest += sizeof(*proto);
418 rwremaining -= sizeof(*proto);
419
420 // Initialize it.
421 uint32_t oldSize = oldProto->getSize();
422 memcpy(proto, oldProto, oldSize);
423 if (!proto->getIsaVMAddr()) {
424 proto->setIsaVMAddr(protocolClassVMAddr);
425 }
426 if (oldSize < sizeof(*proto)) {
427 // Protocol object is old. Populate new fields.
428 proto->setSize(sizeof(objc_protocol_t<P>));
429 // missing extendedMethodTypes is already nil
430 }
431 // Some protocol objects are big enough to have the
432 // demangledName field but don't initialize it.
433 // Initialize it here if it is not already set.
434 if (!proto->getDemangledName(cache)) {
435 const char *roName = proto->getName(cache);
436 char *demangledName = copySwiftDemangledName(roName, true);
437 if (demangledName) {
438 size_t length = 1 + strlen(demangledName);
439 if (roremaining < length) {
440 return "libobjc's read-only section is too small (metadata not optimized)";
441 }
442
443 memmove(rodest, demangledName, length);
444 roName = (const char *)rodest;
445 rodest += length;
446 roremaining -= length;
447
448 free(demangledName);
449 }
450 proto->setDemangledName(cache, roName, _diagnostics);
451 }
452 proto->setFixedUp();
453
454 // Redirect the protocol table at our new object.
455 iter->second = cache->vmAddrForContent(proto);
456
457 // Add new rebase entries.
458 proto->addPointers(cache, aslrTracker);
459 }
460
461 return NULL;
462 }
463
464 void updateReferences(ContentAccessor* cache, const macho_header<P>* header)
465 {
466 ProtocolReferenceWalker<P, ProtocolOptimizer<P>> refs(*this);
467 refs.walk(cache, header);
468 }
469
470 objc_opt::string_map& protocolNames() {
471 return _protocolNames;
472 }
473
474 objc_opt::protocol_map& protocols() {
475 return _protocols;
476 }
477
478 size_t protocolCount() const { return _protocolCount; }
479 size_t protocolReferenceCount() const { return _protocolReferenceCount; }
480 };
481
482
483 static int percent(size_t num, size_t denom) {
484 if (denom)
485 return (int)(num / (double)denom * 100);
486 else
487 return 100;
488 }
489
490
491 template <typename P>
492 void doOptimizeObjC(DyldSharedCache* cache, bool forProduction, CacheBuilder::ASLR_Tracker& aslrTracker,
493 CacheBuilder::LOH_Tracker& lohTracker,
494 const std::map<void*, std::string>& missingWeakImports, Diagnostics& diag)
495 {
496 typedef typename P::E E;
497 typedef typename P::uint_t pint_t;
498
499 diag.verbose("Optimizing objc metadata:\n");
500 diag.verbose(" cache type is %s\n", forProduction ? "production" : "development");
501
502 ContentAccessor cacheAccessor(cache, diag);
503
504 size_t headerSize = P::round_up(sizeof(objc_opt::objc_opt_t));
505 if (headerSize != sizeof(objc_opt::objc_opt_t)) {
506 diag.warning("libobjc's optimization structure size is wrong (metadata not optimized)");
507 }
508
509 //
510 // Find libobjc's empty sections and build list of images with objc metadata
511 //
512 __block const macho_section<P> *optROSection = nullptr;
513 __block const macho_section<P> *optRWSection = nullptr;
514 __block const macho_section<P> *optPointerListSection = nullptr;
515 __block std::vector<const macho_header<P>*> objcDylibs;
516 cache->forEachImage(^(const mach_header* machHeader, const char* installName) {
517 const macho_header<P>* mh = (const macho_header<P>*)machHeader;
518 if ( strstr(installName, "/libobjc.") != nullptr ) {
519 optROSection = mh->getSection("__TEXT", "__objc_opt_ro");
520 optRWSection = mh->getSection("__DATA", "__objc_opt_rw");
521 optPointerListSection = mh->getSection("__DATA", "__objc_opt_ptrs");
522 }
523 if ( mh->getSection("__DATA", "__objc_imageinfo") || mh->getSection("__OBJC", "__image_info") ) {
524 objcDylibs.push_back(mh);
525 }
526 // log("installName %s at mhdr 0x%016lx", installName, (uintptr_t)cacheAccessor.vmAddrForContent((void*)mh));
527 });
528 if ( optROSection == nullptr ) {
529 diag.warning("libobjc's read-only section missing (metadata not optimized)");
530 return;
531 }
532 if ( optRWSection == nullptr ) {
533 diag.warning("libobjc's read/write section missing (metadata not optimized)");
534 return;
535 }
536 if ( optPointerListSection == nullptr ) {
537 diag.warning("libobjc's pointer list section missing (metadata not optimized)");
538 return;
539 }
540
541 uint8_t* optROData = (uint8_t*)cacheAccessor.contentForVMAddr(optROSection->addr());
542 if ( optROData == nullptr ) {
543 diag.warning("libobjc's read-only section has bad content");
544 return;
545 }
546 size_t optRORemaining = optROSection->size();
547 uint8_t* optRWData = (uint8_t*)cacheAccessor.contentForVMAddr(optRWSection->addr());
548 size_t optRWRemaining = optRWSection->size();
549 if (optRORemaining < headerSize) {
550 diag.warning("libobjc's read-only section is too small (metadata not optimized)");
551 return;
552 }
553 objc_opt::objc_opt_t* optROHeader = (objc_opt::objc_opt_t *)optROData;
554 optROData += headerSize;
555 optRORemaining -= headerSize;
556 if (E::get32(optROHeader->version) != objc_opt::VERSION) {
557 diag.warning("libobjc's read-only section version is unrecognized (metadata not optimized)");
558 return;
559 }
560
561 if (optPointerListSection->size() < sizeof(objc_opt::objc_opt_pointerlist_tt<pint_t>)) {
562 diag.warning("libobjc's pointer list section is too small (metadata not optimized)");
563 return;
564 }
565 const objc_opt::objc_opt_pointerlist_tt<pint_t> *optPointerList = (const objc_opt::objc_opt_pointerlist_tt<pint_t> *)cacheAccessor.contentForVMAddr(optPointerListSection->addr());
566
567 // Write nothing to optROHeader until everything else is written.
568 // If something fails below, libobjc will not use the section.
569
570
571 //
572 // Make copy of objcList and sort that list.
573 //
574 std::vector<const macho_header<P>*> addressSortedDylibs = objcDylibs;
575 std::sort(addressSortedDylibs.begin(), addressSortedDylibs.end(), [](const macho_header<P>* lmh, const macho_header<P>* rmh) -> bool {
576 return lmh < rmh;
577 });
578
579 //
580 // Build HeaderInfo list in cache
581 //
582 // First the RO header info
583 // log("writing out %d RO dylibs at offset %d", (uint32_t)objcDylibs.size(), (uint32_t)(optROSection->size() - optRORemaining));
584 uint64_t hinfoROVMAddr = optROSection->addr() + optROSection->size() - optRORemaining;
585 HeaderInfoOptimizer<P, objc_header_info_ro_t<P>> hinfoROOptimizer;
586 const char* err = hinfoROOptimizer.init((uint32_t)objcDylibs.size(), optROData, optRORemaining);
587 if (err) {
588 diag.warning("%s", err);
589 return;
590 }
591 else {
592 for (const macho_header<P>* mh : addressSortedDylibs) {
593 hinfoROOptimizer.update(&cacheAccessor, mh, aslrTracker);
594 }
595 }
596
597 // Then the RW header info
598 // log("writing out %d RW dylibs at offset %d", (uint32_t)objcDylibs.size(), (uint32_t)(optRWSection->size() - optRWRemaining));
599 uint64_t hinfoRWVMAddr = (uint64_t)optRWSection->addr() + (uint64_t)optRWSection->size() - optRWRemaining;
600 HeaderInfoOptimizer<P, objc_header_info_rw_t<P>> hinfoRWOptimizer;
601 err = hinfoRWOptimizer.init((uint32_t)objcDylibs.size(), optRWData, optRWRemaining);
602 if (err) {
603 diag.warning("%s", err);
604 return;
605 }
606 else {
607 for (const macho_header<P>* mh : addressSortedDylibs) {
608 hinfoRWOptimizer.update(&cacheAccessor, mh, aslrTracker);
609 }
610 }
611
612 //
613 // Update selector references and build selector list
614 //
615 // This is SAFE: if we run out of room for the selector table,
616 // the modified binaries are still usable.
617 //
618 // Heuristic: choose selectors from libraries with more selector cstring data first.
619 // This tries to localize selector cstring memory.
620 //
621 ObjCSelectorUniquer<P> uniq(&cacheAccessor);
622 std::vector<const macho_header<P>*> sizeSortedDylibs = objcDylibs;
623 std::sort(sizeSortedDylibs.begin(), sizeSortedDylibs.end(), [](const macho_header<P>* lmh, const macho_header<P>* rmh) -> bool {
624 const macho_section<P>* lSection = lmh->getSection("__TEXT", "__objc_methname");
625 const macho_section<P>* rSection = rmh->getSection("__TEXT", "__objc_methname");
626 uint64_t lSelectorSize = (lSection ? lSection->size() : 0);
627 uint64_t rSelectorSize = (rSection ? rSection->size() : 0);
628 return lSelectorSize > rSelectorSize;
629 });
630
631 SelectorOptimizer<P, ObjCSelectorUniquer<P> > selOptimizer(uniq);
632 for (const macho_header<P>* mh : sizeSortedDylibs) {
633 LegacySelectorUpdater<P, ObjCSelectorUniquer<P>>::update(&cacheAccessor, mh, uniq);
634 selOptimizer.optimize(&cacheAccessor, mh);
635 }
636
637 diag.verbose(" uniqued %6lu selectors\n", uniq.strings().size());
638 diag.verbose(" updated %6lu selector references\n", uniq.count());
639
640 uint64_t seloptVMAddr = optROSection->addr() + optROSection->size() - optRORemaining;
641 objc_opt::objc_selopt_t *selopt = new(optROData) objc_opt::objc_selopt_t;
642 err = selopt->write(seloptVMAddr, optRORemaining, uniq.strings());
643 if (err) {
644 diag.warning("%s", err);
645 return;
646 }
647 optROData += selopt->size();
648 optRORemaining -= selopt->size();
649 uint32_t seloptCapacity = selopt->capacity;
650 uint32_t seloptOccupied = selopt->occupied;
651 selopt->byteswap(E::little_endian), selopt = nullptr;
652
653 diag.verbose(" selector table occupancy %u/%u (%u%%)\n",
654 seloptOccupied, seloptCapacity,
655 (unsigned)(seloptOccupied/(double)seloptCapacity*100));
656
657
658 //
659 // Detect classes that have missing weak-import superclasses.
660 //
661 // Production only. Development cache does not do this: a replacement
662 // library could omit a class at runtime that was present during
663 // cache construction.
664 //
665 // This is SAFE: the binaries themselves are unmodified.
666 bool noMissingWeakSuperclasses = false; // dev cache can't promise otherwise
667 if (forProduction) {
668 WeakClassDetector<P> weakopt;
669 noMissingWeakSuperclasses =
670 weakopt.noMissingWeakSuperclasses(&cacheAccessor, missingWeakImports, sizeSortedDylibs);
671
672 // Shared cache does not currently support unbound weak references.
673 // Here we assert that there are none. If support is added later then
674 // this assertion needs to be removed and this path needs to be tested.
675 if (!noMissingWeakSuperclasses) {
676 diag.error("Some Objective-C class has a superclass that is "
677 "weak-import and missing from the cache.");
678 }
679 }
680
681
682 //
683 // Build class table.
684 //
685 // This is SAFE: the binaries themselves are unmodified.
686 ClassListBuilder<P> classes(hinfoROOptimizer);
687 ClassWalker<P, ClassListBuilder<P>> classWalker(classes);
688 for (const macho_header<P>* mh : sizeSortedDylibs) {
689 classWalker.walk(&cacheAccessor, mh);
690 }
691
692 diag.verbose(" recorded % 6ld classes\n", classes.classNames().size());
693
694 uint64_t clsoptVMAddr = optROSection->addr() + optROSection->size() - optRORemaining;
695 objc_opt::objc_clsopt_t *clsopt = new(optROData) objc_opt::objc_clsopt_t;
696 err = clsopt->write(clsoptVMAddr, optRORemaining,
697 classes.classNames(), classes.classes(), false);
698 if (err) {
699 diag.warning("%s", err);
700 return;
701 }
702 optROData += clsopt->size();
703 optRORemaining -= clsopt->size();
704 size_t duplicateCount = clsopt->duplicateCount();
705 uint32_t clsoptCapacity = clsopt->capacity;
706 uint32_t clsoptOccupied = clsopt->occupied;
707 clsopt->byteswap(E::little_endian);
708 clsopt = nullptr;
709
710 diag.verbose(" found % 6ld duplicate classes\n",
711 duplicateCount);
712 diag.verbose(" class table occupancy %u/%u (%u%%)\n",
713 clsoptOccupied, clsoptCapacity,
714 (unsigned)(clsoptOccupied/(double)clsoptCapacity*100));
715
716
717 //
718 // Sort method lists.
719 //
720 // This is SAFE: modified binaries are still usable as unsorted lists.
721 // This must be done AFTER uniquing selectors.
722 MethodListSorter<P> methodSorter;
723 for (const macho_header<P>* mh : sizeSortedDylibs) {
724 methodSorter.optimize(&cacheAccessor, mh);
725 }
726
727 diag.verbose(" sorted % 6ld method lists\n", methodSorter.optimized());
728
729
730 // Unique protocols and build protocol table.
731
732 // This is SAFE: no protocol references are updated yet
733 // This must be done AFTER updating method lists.
734
735 ProtocolOptimizer<P> protocolOptimizer(diag);
736 for (const macho_header<P>* mh : sizeSortedDylibs) {
737 protocolOptimizer.addProtocols(&cacheAccessor, mh);
738 }
739
740 diag.verbose(" uniqued % 6ld protocols\n",
741 protocolOptimizer.protocolCount());
742
743 pint_t protocolClassVMAddr = (pint_t)P::getP(optPointerList->protocolClass);
744 err = protocolOptimizer.writeProtocols(&cacheAccessor,
745 optRWData, optRWRemaining,
746 optROData, optRORemaining,
747 aslrTracker, protocolClassVMAddr);
748 if (err) {
749 diag.warning("%s", err);
750 return;
751 }
752
753 uint64_t protocoloptVMAddr = optROSection->addr() + optROSection->size() - optRORemaining;
754 objc_opt::objc_protocolopt_t *protocolopt = new (optROData) objc_opt::objc_protocolopt_t;
755 err = protocolopt->write(protocoloptVMAddr, optRORemaining,
756 protocolOptimizer.protocolNames(),
757 protocolOptimizer.protocols(), true);
758 if (err) {
759 diag.warning("%s", err);
760 return;
761 }
762 optROData += protocolopt->size();
763 optRORemaining -= protocolopt->size();
764 uint32_t protocoloptCapacity = protocolopt->capacity;
765 uint32_t protocoloptOccupied = protocolopt->occupied;
766 protocolopt->byteswap(E::little_endian), protocolopt = NULL;
767
768 diag.verbose(" protocol table occupancy %u/%u (%u%%)\n",
769 protocoloptOccupied, protocoloptCapacity,
770 (unsigned)(protocoloptOccupied/(double)protocoloptCapacity*100));
771
772
773 // Redirect protocol references to the uniqued protocols.
774
775 // This is SAFE: the new protocol objects are still usable as-is.
776 for (const macho_header<P>* mh : sizeSortedDylibs) {
777 protocolOptimizer.updateReferences(&cacheAccessor, mh);
778 }
779
780 diag.verbose(" updated % 6ld protocol references\n", protocolOptimizer.protocolReferenceCount());
781
782
783 //
784 // Repair ivar offsets.
785 //
786 // This is SAFE: the runtime always validates ivar offsets at runtime.
787 IvarOffsetOptimizer<P> ivarOffsetOptimizer;
788 for (const macho_header<P>* mh : sizeSortedDylibs) {
789 ivarOffsetOptimizer.optimize(&cacheAccessor, mh);
790 }
791
792 diag.verbose(" updated % 6ld ivar offsets\n", ivarOffsetOptimizer.optimized());
793
794
795 // Collect flags.
796 uint32_t headerFlags = 0;
797 if (forProduction) {
798 headerFlags |= objc_opt::IsProduction;
799 }
800 if (noMissingWeakSuperclasses) {
801 headerFlags |= objc_opt::NoMissingWeakSuperclasses;
802 }
803
804
805 // Success. Mark dylibs as optimized.
806 for (const macho_header<P>* mh : sizeSortedDylibs) {
807 const macho_section<P>* imageInfoSection = mh->getSection("__DATA", "__objc_imageinfo");
808 if (!imageInfoSection) {
809 imageInfoSection = mh->getSection("__OBJC", "__image_info");
810 }
811 if (imageInfoSection) {
812 objc_image_info<P>* info = (objc_image_info<P>*)cacheAccessor.contentForVMAddr(imageInfoSection->addr());
813 info->setOptimizedByDyld();
814 }
815 }
816
817
818 // Success. Update RO header last.
819 E::set32(optROHeader->flags, headerFlags);
820 E::set32(optROHeader->selopt_offset, (uint32_t)(seloptVMAddr - optROSection->addr()));
821 E::set32(optROHeader->clsopt_offset, (uint32_t)(clsoptVMAddr - optROSection->addr()));
822 E::set32(optROHeader->protocolopt_offset, (uint32_t)(protocoloptVMAddr - optROSection->addr()));
823 E::set32(optROHeader->headeropt_ro_offset, (uint32_t)(hinfoROVMAddr - optROSection->addr()));
824 E::set32(optROHeader->headeropt_rw_offset, (uint32_t)(hinfoRWVMAddr - optROSection->addr()));
825
826 // Log statistics.
827 size_t roSize = optROSection->size() - optRORemaining;
828 size_t rwSize = optRWSection->size() - optRWRemaining;
829 diag.verbose(" %lu/%llu bytes (%d%%) used in libobjc read-only optimization section\n",
830 roSize, optROSection->size(), percent(roSize, optROSection->size()));
831 diag.verbose(" %lu/%llu bytes (%d%%) used in libobjc read/write optimization section\n",
832 rwSize, optRWSection->size(), percent(rwSize, optRWSection->size()));
833 diag.verbose(" wrote objc metadata optimization version %d\n", objc_opt::VERSION);
834
835 // Now that objc has uniqued the selector references, we can apply the LOHs so that ADRP/LDR -> ADRP/ADD
836 if (forProduction) {
837 uint64_t lohADRPCount = 0;
838 uint64_t lohLDRCount = 0;
839
840 for (auto& targetAndInstructions : lohTracker) {
841 uint64_t targetVMAddr = targetAndInstructions.first;
842 if (!selOptimizer.isSelectorRefAddress((pint_t)targetVMAddr))
843 continue;
844
845 std::set<void*>& instructions = targetAndInstructions.second;
846 // We do 2 passes over the instructions. The first to validate them and the second
847 // to actually update them.
848 for (unsigned pass = 0; pass != 2; ++pass) {
849 uint32_t adrpCount = 0;
850 uint32_t ldrCount = 0;
851 for (void* instructionAddress : instructions) {
852 uint32_t& instruction = *(uint32_t*)instructionAddress;
853 uint64_t instructionVMAddr = cacheAccessor.vmAddrForContent(&instruction);
854 uint64_t selRefContent = *(uint64_t*)cacheAccessor.contentForVMAddr(targetVMAddr);
855 const char* selectorString = (const char*)cacheAccessor.contentForVMAddr(selRefContent);
856 uint64_t selectorStringVMAddr = cacheAccessor.vmAddrForContent(selectorString);
857
858 if ( (instruction & 0x9F000000) == 0x90000000 ) {
859 // ADRP
860 int64_t pageDistance = ((selectorStringVMAddr & ~0xFFF) - (instructionVMAddr & ~0xFFF));
861 int64_t newPage21 = pageDistance >> 12;
862
863 if (pass == 0) {
864 if ( (newPage21 > 2097151) || (newPage21 < -2097151) ) {
865 diag.verbose("Out of bounds ADRP selector reference target\n");
866 instructions.clear();
867 break;
868 }
869 ++adrpCount;
870 }
871
872 if (pass == 1) {
873 instruction = (instruction & 0x9F00001F) | ((newPage21 << 29) & 0x60000000) | ((newPage21 << 3) & 0x00FFFFE0);
874 ++lohADRPCount;
875 }
876 continue;
877 }
878
879 if ( (instruction & 0x3B000000) == 0x39000000 ) {
880 // LDR/STR. STR shouldn't be possible as this is a selref!
881 if (pass == 0) {
882 if ( (instruction & 0xC0C00000) != 0xC0400000 ) {
883 // Not a load, or dest reg isn't xN, or uses sign extension
884 diag.verbose("Bad LDR for selector reference optimisation\n");
885 instructions.clear();
886 break;
887 }
888 if ( (instruction & 0x04000000) != 0 ) {
889 // Loading a float
890 diag.verbose("Bad LDR for selector reference optimisation\n");
891 instructions.clear();
892 break;
893 }
894 ++ldrCount;
895 }
896
897 if (pass == 1) {
898 uint32_t ldrDestReg = (instruction & 0x1F);
899 uint32_t ldrBaseReg = ((instruction >> 5) & 0x1F);
900
901 // Convert the LDR to an ADD
902 instruction = 0x91000000;
903 instruction |= ldrDestReg;
904 instruction |= ldrBaseReg << 5;
905 instruction |= (selectorStringVMAddr & 0xFFF) << 10;
906
907 ++lohLDRCount;
908 }
909 continue;
910 }
911
912 if ( (instruction & 0xFFC00000) == 0x91000000 ) {
913 // ADD imm12
914 // We don't support ADDs.
915 diag.verbose("Bad ADD for selector reference optimisation\n");
916 instructions.clear();
917 break;
918 }
919
920 diag.verbose("Unknown instruction for selref optimisation\n");
921 instructions.clear();
922 break;
923 }
924 if (pass == 0) {
925 // If we didn't see at least one ADRP/LDR in pass one then don't optimize this location
926 if ((adrpCount == 0) || (ldrCount == 0)) {
927 instructions.clear();
928 break;
929 }
930 }
931 }
932 }
933
934 diag.verbose(" Optimized %lld ADRP LOHs\n", lohADRPCount);
935 diag.verbose(" Optimized %lld LDR LOHs\n", lohLDRCount);
936 }
937 }
938
939
940 } // anon namespace
941
942 void CacheBuilder::optimizeObjC()
943 {
944 if ( _archLayout->is64 )
945 doOptimizeObjC<Pointer64<LittleEndian>>((DyldSharedCache*)_readExecuteRegion.buffer, _options.optimizeStubs, _aslrTracker, _lohTracker, _missingWeakImports, _diagnostics);
946 else
947 doOptimizeObjC<Pointer32<LittleEndian>>((DyldSharedCache*)_readExecuteRegion.buffer, _options.optimizeStubs, _aslrTracker, _lohTracker, _missingWeakImports, _diagnostics);
948 }
949
950