]> git.saurik.com Git - apple/dyld.git/blob - interlinked-dylibs/OptimizerObjC.cpp
7b1eef38ea3d8ba8dc4c994e6657bec79cd592c5
[apple/dyld.git] / interlinked-dylibs / OptimizerObjC.cpp
1 /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*-
2 *
3 * Copyright (c) 2014 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26 #include "mega-dylib-utils.h"
27 #include "Logging.h"
28 #include "MachOFileAbstraction.hpp"
29
30
31 #include <dirent.h>
32 #include <sys/errno.h>
33 #include <sys/fcntl.h>
34 #include <mach-o/loader.h>
35 #include <mach-o/fat.h>
36 #include <assert.h>
37
38
39 // Scan a C++ or Swift length-mangled field.
40 static bool scanMangledField(const char *&string, const char *end,
41 const char *&field, int& length)
42 {
43 // Leading zero not allowed.
44 if (*string == '0') return false;
45
46 length = 0;
47 field = string;
48 while (field < end) {
49 char c = *field;
50 if (!isdigit(c)) break;
51 field++;
52 if (__builtin_smul_overflow(length, 10, &length)) return false;
53 if (__builtin_sadd_overflow(length, c - '0', &length)) return false;
54 }
55
56 string = field + length;
57 return length > 0 && string <= end;
58 }
59
60
61 // copySwiftDemangledName
62 // Returns the pretty form of the given Swift-mangled class or protocol name.
63 // Returns nullptr if the string doesn't look like a mangled Swift name.
64 // The result must be freed with free().
65 static char *copySwiftDemangledName(const char *string, bool isProtocol = false)
66 {
67 if (!string) return nullptr;
68
69 // Swift mangling prefix.
70 if (strncmp(string, isProtocol ? "_TtP" : "_TtC", 4) != 0) return nullptr;
71 string += 4;
72
73 const char *end = string + strlen(string);
74
75 // Module name.
76 const char *prefix;
77 int prefixLength;
78 if (string[0] == 's') {
79 // "s" is the Swift module.
80 prefix = "Swift";
81 prefixLength = 5;
82 string += 1;
83 } else {
84 if (! scanMangledField(string, end, prefix, prefixLength)) return nullptr;
85 }
86
87 // Class or protocol name.
88 const char *suffix;
89 int suffixLength;
90 if (! scanMangledField(string, end, suffix, suffixLength)) return nullptr;
91
92 if (isProtocol) {
93 // Remainder must be "_".
94 if (strcmp(string, "_") != 0) return nullptr;
95 } else {
96 // Remainder must be empty.
97 if (string != end) return nullptr;
98 }
99
100 char *result;
101 asprintf(&result, "%.*s.%.*s", prefixLength,prefix, suffixLength,suffix);
102 return result;
103 }
104
105
106 class ContentAccessor {
107 public:
108 ContentAccessor(SharedCache& cache) {
109 cache.forEachRegion([&] (void* content, uint64_t vmAddr, uint64_t size, uint32_t permissions) {
110 Info info = { (uint8_t*)content, (uint8_t*)content+size, vmAddr, vmAddr+size };
111 _regions.push_back(info);
112 });
113 }
114
115 void* contentForVMAddr(uint64_t vmaddr) {
116 for (Info& info : _regions) {
117 if ( (info.startAddr <= vmaddr) && (vmaddr < info.endAddr) )
118 return (void*)(info.contentStart + vmaddr - info.startAddr);
119 }
120 if ( vmaddr == 0 )
121 return nullptr;
122 terminate("contentForVMAddr(0x%0llX) invalid vmaddr in ObjC data", vmaddr);
123 }
124
125 uint64_t vmAddrForContent(const void* content) {
126 for (Info& info : _regions) {
127 if ( (info.contentStart <= content) && (content < info.contentEnd) )
128 return info.startAddr + ((uint8_t*)content - (uint8_t*)info.contentStart);
129 }
130 terminate("vmAddrForContent(%p) invalid content pointer in ObjC data", content);
131 }
132
133 private:
134 struct Info { uint8_t* contentStart; uint8_t* contentEnd; uint64_t startAddr; uint64_t endAddr; };
135 std::vector<Info> _regions;
136 };
137
138
139 // Access a section containing a list of pointers
140 template <typename P, typename T>
141 class PointerSection
142 {
143 typedef typename P::uint_t pint_t;
144 public:
145 PointerSection(ContentAccessor* cache, const macho_header<P>* mh,
146 const char* segname, const char* sectname)
147 : _cache(cache),
148 _section(mh->getSection(segname, sectname)),
149 _base(_section ? (pint_t*)cache->contentForVMAddr(_section->addr()) : 0),
150 _count(_section ? (pint_t)(_section->size() / sizeof(pint_t)) : 0) {
151 }
152
153 pint_t count() const { return _count; }
154
155 pint_t getVMAddress(pint_t index) const {
156 if ( index >= _count )
157 terminate("index out of range in section %s", _section->sectname());
158 return (pint_t)P::getP(_base[index]);
159 }
160
161 T get(pint_t index) const {
162 return (T)_cache->contentForVMAddr(getVMAddress(index));
163 }
164
165 void setVMAddress(pint_t index, pint_t value) {
166 if (index >= _count)
167 terminate("index out of range in section %s", _section->sectname());
168 P::setP(_base[index], value);
169 }
170
171 void removeNulls() {
172 pint_t shift = 0;
173 for (pint_t i = 0; i < _count; i++) {
174 pint_t value = _base[i];
175 if (value) {
176 _base[i-shift] = value;
177 } else {
178 shift++;
179 }
180 }
181 _count -= shift;
182 const_cast<macho_section<P>*>(_section)->set_size(_count * sizeof(pint_t));
183 }
184
185 private:
186 ContentAccessor* const _cache;
187 const macho_section<P>* const _section;
188 pint_t* const _base;
189 pint_t const _count;
190 };
191
192
193 // Access a section containing an array of structures
194 template <typename P, typename T>
195 class ArraySection
196 {
197 public:
198 ArraySection(ContentAccessor* cache, const macho_header<P>* mh,
199 const char *segname, const char *sectname)
200 : _cache(cache),
201 _section(mh->getSection(segname, sectname)),
202 _base(_section ? (T *)cache->contentForVMAddr(_section->addr()) : 0),
203 _count(_section ? _section->size() / sizeof(T) : 0) {
204 }
205
206 uint64_t count() const { return _count; }
207
208 T& get(uint64_t index) const {
209 if (index >= _count)
210 terminate("index out of range in section %s", _section->sectname());
211 return _base[index];
212 }
213
214 private:
215 ContentAccessor* const _cache;
216 const macho_section<P>* const _section;
217 T * const _base;
218 uint64_t const _count;
219 };
220
221
222 #define SELOPT_WRITE
223 #include "objc-shared-cache.h"
224 #include "ObjC1Abstraction.hpp"
225 #include "ObjC2Abstraction.hpp"
226
227
228 namespace {
229
230
231
232 template <typename P>
233 class ObjCSelectorUniquer
234 {
235 public:
236 typedef typename P::uint_t pint_t;
237
238 ObjCSelectorUniquer(ContentAccessor* cache) : _cache(cache) { }
239
240 pint_t visit(pint_t oldValue)
241 {
242 _count++;
243 const char *s = (const char *)_cache->contentForVMAddr(oldValue);
244 objc_opt::string_map::iterator element =
245 _selectorStrings.insert(objc_opt::string_map::value_type(s, oldValue)).first;
246 return (pint_t)element->second;
247 }
248
249 objc_opt::string_map& strings() {
250 return _selectorStrings;
251 }
252
253 size_t count() const { return _count; }
254
255 private:
256 objc_opt::string_map _selectorStrings;
257 ContentAccessor* _cache;
258 size_t _count = 0;
259 };
260
261
262 template <typename P>
263 class ClassListBuilder
264 {
265 private:
266 objc_opt::string_map _classNames;
267 objc_opt::class_map _classes;
268 size_t _count = 0;
269 HeaderInfoOptimizer<P, objc_header_info_ro_t<P>>& _hInfos;
270
271 public:
272
273 ClassListBuilder(HeaderInfoOptimizer<P, objc_header_info_ro_t<P>>& hinfos) : _hInfos(hinfos) { }
274
275 void visitClass(ContentAccessor* cache,
276 const macho_header<P>* header,
277 objc_class_t<P>* cls)
278 {
279 if (cls->isMetaClass(cache)) return;
280
281 const char *name = cls->getName(cache);
282 uint64_t name_vmaddr = cache->vmAddrForContent((void*)name);
283 uint64_t cls_vmaddr = cache->vmAddrForContent(cls);
284 uint64_t hinfo_vmaddr = cache->vmAddrForContent(_hInfos.hinfoForHeader(cache, header));
285 _classNames.insert(objc_opt::string_map::value_type(name, name_vmaddr));
286 _classes.insert(objc_opt::class_map::value_type(name, std::pair<uint64_t, uint64_t>(cls_vmaddr, hinfo_vmaddr)));
287 _count++;
288 }
289
290 objc_opt::string_map& classNames() {
291 return _classNames;
292 }
293
294 objc_opt::class_map& classes() {
295 return _classes;
296 }
297
298 size_t count() const { return _count; }
299 };
300
301 template <typename P>
302 class ProtocolOptimizer
303 {
304 private:
305 typedef typename P::uint_t pint_t;
306
307 objc_opt::string_map _protocolNames;
308 objc_opt::protocol_map _protocols;
309 size_t _protocolCount;
310 size_t _protocolReferenceCount;
311
312 friend class ProtocolReferenceWalker<P, ProtocolOptimizer<P>>;
313
314 pint_t visitProtocolReference(ContentAccessor* cache, pint_t oldValue)
315 {
316 objc_protocol_t<P>* proto = (objc_protocol_t<P>*)
317 cache->contentForVMAddr(oldValue);
318 pint_t newValue = (pint_t)_protocols[proto->getName(cache)];
319 if (oldValue != newValue) _protocolReferenceCount++;
320 return newValue;
321 }
322
323 public:
324
325 ProtocolOptimizer()
326 : _protocolNames()
327 , _protocols()
328 , _protocolCount(0)
329 , _protocolReferenceCount(0)
330 { }
331
332 void addProtocols(ContentAccessor* cache,
333 const macho_header<P>* header)
334 {
335 PointerSection<P, objc_protocol_t<P> *>
336 protocols(cache, header, "__DATA", "__objc_protolist");
337
338 for (pint_t i = 0; i < protocols.count(); i++) {
339 objc_protocol_t<P> *proto = protocols.get(i);
340
341 const char *name = proto->getName(cache);
342 if (_protocolNames.count(name) == 0) {
343 if (proto->getSize() > sizeof(objc_protocol_t<P>)) {
344 terminate("objc protocol is too big");
345 }
346
347 uint64_t name_vmaddr = cache->vmAddrForContent((void*)name);
348 uint64_t proto_vmaddr = cache->vmAddrForContent(proto);
349 _protocolNames.insert(objc_opt::string_map::value_type(name, name_vmaddr));
350 _protocols.insert(objc_opt::protocol_map::value_type(name, proto_vmaddr));
351 _protocolCount++;
352 }
353 }
354 }
355
356 const char *writeProtocols(ContentAccessor* cache,
357 uint8_t *& rwdest, size_t& rwremaining,
358 uint8_t *& rodest, size_t& roremaining,
359 std::vector<void*>& pointersInData,
360 pint_t protocolClassVMAddr)
361 {
362 if (_protocolCount == 0) return NULL;
363
364 if (protocolClassVMAddr == 0) {
365 return "libobjc's Protocol class symbol not found (metadata not optimized)";
366 }
367
368 size_t rwrequired = _protocolCount * sizeof(objc_protocol_t<P>);
369 if (rwremaining < rwrequired) {
370 return "libobjc's read-write section is too small (metadata not optimized)";
371 }
372
373 for (objc_opt::protocol_map::iterator iter = _protocols.begin();
374 iter != _protocols.end();
375 ++iter)
376 {
377 objc_protocol_t<P>* oldProto = (objc_protocol_t<P>*)
378 cache->contentForVMAddr(iter->second);
379
380 // Create a new protocol object.
381 objc_protocol_t<P>* proto = (objc_protocol_t<P>*)rwdest;
382 rwdest += sizeof(*proto);
383 rwremaining -= sizeof(*proto);
384
385 // Initialize it.
386 uint32_t oldSize = oldProto->getSize();
387 memcpy(proto, oldProto, oldSize);
388 if (!proto->getIsaVMAddr()) {
389 proto->setIsaVMAddr(protocolClassVMAddr);
390 }
391 if (oldSize < sizeof(*proto)) {
392 // Protocol object is old. Populate new fields.
393 proto->setSize(sizeof(objc_protocol_t<P>));
394 // missing extendedMethodTypes is already nil
395 }
396 // Some protocol objects are big enough to have the
397 // demangledName field but don't initialize it.
398 // Initialize it here if it is not already set.
399 if (!proto->getDemangledName(cache)) {
400 const char *roName = proto->getName(cache);
401 char *demangledName = copySwiftDemangledName(roName, true);
402 if (demangledName) {
403 size_t length = 1 + strlen(demangledName);
404 if (roremaining < length) {
405 return "libobjc's read-only section is too small (metadata not optimized)";
406 }
407
408 memmove(rodest, demangledName, length);
409 roName = (const char *)rodest;
410 rodest += length;
411 roremaining -= length;
412
413 free(demangledName);
414 }
415 proto->setDemangledName(cache, roName);
416 }
417 proto->setFixedUp();
418
419 // Redirect the protocol table at our new object.
420 iter->second = cache->vmAddrForContent(proto);
421
422 // Add new rebase entries.
423 proto->addPointers(pointersInData);
424 }
425
426 return NULL;
427 }
428
429 void updateReferences(ContentAccessor* cache, const macho_header<P>* header)
430 {
431 ProtocolReferenceWalker<P, ProtocolOptimizer<P>> refs(*this);
432 refs.walk(cache, header);
433 }
434
435 objc_opt::string_map& protocolNames() {
436 return _protocolNames;
437 }
438
439 objc_opt::protocol_map& protocols() {
440 return _protocols;
441 }
442
443 size_t protocolCount() const { return _protocolCount; }
444 size_t protocolReferenceCount() const { return _protocolReferenceCount; }
445 };
446
447
448 static int percent(size_t num, size_t denom) {
449 if (denom)
450 return (int)(num / (double)denom * 100);
451 else
452 return 100;
453 }
454
455
456 template <typename P>
457 void optimizeObjC(SharedCache& cache, std::vector<void*>& pointersForASLR, bool forProduction)
458 {
459 typedef typename P::E E;
460 typedef typename P::uint_t pint_t;
461
462 verboseLog("Optimizing objc metadata:");
463 verboseLog(" cache type is %s",
464 forProduction ? "production" : "development");
465
466 ContentAccessor cacheAccessor(cache);
467
468 size_t headerSize = P::round_up(sizeof(objc_opt::objc_opt_t));
469 if (headerSize != sizeof(objc_opt::objc_opt_t)) {
470 warning("libobjc's optimization structure size is wrong (metadata not optimized)");
471 }
472
473 //
474 // Find libobjc's empty sections and build list of images with objc metadata
475 //
476 const macho_section<P> *optROSection = nullptr;
477 const macho_section<P> *optRWSection = nullptr;
478 const macho_section<P> *optPointerListSection = nullptr;
479 std::vector<const macho_header<P>*> objcDylibs;
480 cache.forEachImage([&](const void* machHeader, const char* installName,
481 time_t, ino_t, const std::vector<MachOProxySegment>& segments) {
482 const macho_header<P>* mh = (const macho_header<P>*)machHeader;
483 if ( strstr(installName, "/libobjc.") != nullptr ) {
484 optROSection = mh->getSection("__TEXT", "__objc_opt_ro");
485 optRWSection = mh->getSection("__DATA", "__objc_opt_rw");
486 optPointerListSection = mh->getSection("__DATA", "__objc_opt_ptrs");
487 }
488 if ( mh->getSection("__DATA", "__objc_imageinfo") || mh->getSection("__OBJC", "__image_info") ) {
489 objcDylibs.push_back(mh);
490 }
491 // log("installName %s at mhdr 0x%016lx", installName, (uintptr_t)cacheAccessor.vmAddrForContent((void*)mh));
492 });
493 if ( optROSection == nullptr ) {
494 warning("libobjc's read-only section missing (metadata not optimized)");
495 return;
496 }
497 if ( optRWSection == nullptr ) {
498 warning("libobjc's read/write section missing (metadata not optimized)");
499 return;
500 }
501 if ( optPointerListSection == nullptr ) {
502 warning("libobjc's pointer list section missing (metadata not optimized)");
503 return;
504 }
505
506 uint8_t* optROData = (uint8_t*)cacheAccessor.contentForVMAddr(optROSection->addr());
507 size_t optRORemaining = optROSection->size();
508 uint8_t* optRWData = (uint8_t*)cacheAccessor.contentForVMAddr(optRWSection->addr());
509 size_t optRWRemaining = optRWSection->size();
510 if (optRORemaining < headerSize) {
511 warning("libobjc's read-only section is too small (metadata not optimized)");
512 return;
513 }
514 objc_opt::objc_opt_t* optROHeader = (objc_opt::objc_opt_t *)optROData;
515 optROData += headerSize;
516 optRORemaining -= headerSize;
517 if (E::get32(optROHeader->version) != objc_opt::VERSION) {
518 warning("libobjc's read-only section version is unrecognized (metadata not optimized)");
519 return;
520 }
521
522 if (optPointerListSection->size() < sizeof(objc_opt::objc_opt_pointerlist_tt<pint_t>)) {
523 warning("libobjc's pointer list section is too small (metadata not optimized)");
524 return;
525 }
526 const objc_opt::objc_opt_pointerlist_tt<pint_t> *optPointerList = (const objc_opt::objc_opt_pointerlist_tt<pint_t> *)cacheAccessor.contentForVMAddr(optPointerListSection->addr());
527
528 // Write nothing to optROHeader until everything else is written.
529 // If something fails below, libobjc will not use the section.
530
531
532 //
533 // Make copy of objcList and sort that list.
534 //
535 std::vector<const macho_header<P>*> addressSortedDylibs = objcDylibs;
536 std::sort(addressSortedDylibs.begin(), addressSortedDylibs.end(), [](const macho_header<P>* lmh, const macho_header<P>* rmh) -> bool {
537 return lmh < rmh;
538 });
539
540 //
541 // Build HeaderInfo list in cache
542 //
543 // First the RO header info
544 // log("writing out %d RO dylibs at offset %d", (uint32_t)objcDylibs.size(), (uint32_t)(optROSection->size() - optRORemaining));
545 uint64_t hinfoROVMAddr = optROSection->addr() + optROSection->size() - optRORemaining;
546 HeaderInfoOptimizer<P, objc_header_info_ro_t<P>> hinfoROOptimizer;
547 const char* err = hinfoROOptimizer.init((uint32_t)objcDylibs.size(), optROData, optRORemaining);
548 if (err) {
549 warning("%s", err);
550 return;
551 }
552 else {
553 for (const macho_header<P>* mh : addressSortedDylibs) {
554 hinfoROOptimizer.update(&cacheAccessor, mh, pointersForASLR);
555 }
556 }
557
558 // Then the RW header info
559 // log("writing out %d RW dylibs at offset %d", (uint32_t)objcDylibs.size(), (uint32_t)(optRWSection->size() - optRWRemaining));
560 uint64_t hinfoRWVMAddr = (uint64_t)optRWSection->addr() + (uint64_t)optRWSection->size() - optRWRemaining;
561 HeaderInfoOptimizer<P, objc_header_info_rw_t<P>> hinfoRWOptimizer;
562 err = hinfoRWOptimizer.init((uint32_t)objcDylibs.size(), optRWData, optRWRemaining);
563 if (err) {
564 warning("%s", err);
565 return;
566 }
567 else {
568 for (const macho_header<P>* mh : addressSortedDylibs) {
569 hinfoRWOptimizer.update(&cacheAccessor, mh, pointersForASLR);
570 }
571 }
572
573 //
574 // Update selector references and build selector list
575 //
576 // This is SAFE: if we run out of room for the selector table,
577 // the modified binaries are still usable.
578 //
579 // Heuristic: choose selectors from libraries with more selector cstring data first.
580 // This tries to localize selector cstring memory.
581 //
582 ObjCSelectorUniquer<P> uniq(&cacheAccessor);
583 std::vector<const macho_header<P>*> sizeSortedDylibs = objcDylibs;
584 std::sort(sizeSortedDylibs.begin(), sizeSortedDylibs.end(), [](const macho_header<P>* lmh, const macho_header<P>* rmh) -> bool {
585 const macho_section<P>* lSection = lmh->getSection("__TEXT", "__objc_methname");
586 const macho_section<P>* rSection = rmh->getSection("__TEXT", "__objc_methname");
587 uint64_t lSelectorSize = (lSection ? lSection->size() : 0);
588 uint64_t rSelectorSize = (rSection ? rSection->size() : 0);
589 return lSelectorSize > rSelectorSize;
590 });
591
592 SelectorOptimizer<P, ObjCSelectorUniquer<P> > selOptimizer(uniq);
593 for (const macho_header<P>* mh : sizeSortedDylibs) {
594 LegacySelectorUpdater<P, ObjCSelectorUniquer<P>>::update(&cacheAccessor, mh, uniq);
595 selOptimizer.optimize(&cacheAccessor, mh);
596 }
597
598 verboseLog(" uniqued % 6ld selectors",
599 uniq.strings().size());
600 verboseLog(" updated % 6ld selector references",
601 uniq.count());
602
603 uint64_t seloptVMAddr = optROSection->addr() + optROSection->size() - optRORemaining;
604 objc_opt::objc_selopt_t *selopt = new(optROData) objc_opt::objc_selopt_t;
605 err = selopt->write(seloptVMAddr, optRORemaining, uniq.strings());
606 if (err) {
607 warning("%s", err);
608 return;
609 }
610 optROData += selopt->size();
611 optRORemaining -= selopt->size();
612 uint32_t seloptCapacity = selopt->capacity;
613 uint32_t seloptOccupied = selopt->occupied;
614 selopt->byteswap(E::little_endian), selopt = nullptr;
615
616 verboseLog(" selector table occupancy %u/%u (%u%%)",
617 seloptOccupied, seloptCapacity,
618 (unsigned)(seloptOccupied/(double)seloptCapacity*100));
619
620
621 //
622 // Detect classes that have missing weak-import superclasses.
623 //
624 // Production only. Development cache does not do this: a replacement
625 // library could omit a class at runtime that was present during
626 // cache construction.
627 //
628 // This is SAFE: the binaries themselves are unmodified.
629 bool noMissingWeakSuperclasses = false; // dev cache can't promise otherwise
630 if (forProduction) {
631 WeakClassDetector<P> weakopt;
632 noMissingWeakSuperclasses =
633 weakopt.noMissingWeakSuperclasses(&cacheAccessor, sizeSortedDylibs);
634
635 // Shared cache does not currently support unbound weak references.
636 // Here we assert that there are none. If support is added later then
637 // this assertion needs to be removed and this path needs to be tested.
638 if (!noMissingWeakSuperclasses) {
639 terminate("Some Objective-C class has a superclass that is "
640 "weak-import and missing from the cache.");
641 }
642 }
643
644
645 //
646 // Build class table.
647 //
648 // This is SAFE: the binaries themselves are unmodified.
649 ClassListBuilder<P> classes(hinfoROOptimizer);
650 ClassWalker<P, ClassListBuilder<P>> classWalker(classes);
651 for (const macho_header<P>* mh : sizeSortedDylibs) {
652 classWalker.walk(&cacheAccessor, mh);
653 }
654
655 verboseLog(" recorded % 6ld classes",
656 classes.classNames().size());
657
658 uint64_t clsoptVMAddr = optROSection->addr() + optROSection->size() - optRORemaining;
659 objc_opt::objc_clsopt_t *clsopt = new(optROData) objc_opt::objc_clsopt_t;
660 err = clsopt->write(clsoptVMAddr, optRORemaining,
661 classes.classNames(), classes.classes(), false);
662 if (err) {
663 warning("%s", err);
664 return;
665 }
666 optROData += clsopt->size();
667 optRORemaining -= clsopt->size();
668 size_t duplicateCount = clsopt->duplicateCount();
669 uint32_t clsoptCapacity = clsopt->capacity;
670 uint32_t clsoptOccupied = clsopt->occupied;
671 clsopt->byteswap(E::little_endian);
672 clsopt = nullptr;
673
674 verboseLog(" found % 6ld duplicate classes",
675 duplicateCount);
676 verboseLog(" class table occupancy %u/%u (%u%%)",
677 clsoptOccupied, clsoptCapacity,
678 (unsigned)(clsoptOccupied/(double)clsoptCapacity*100));
679
680
681 //
682 // Sort method lists.
683 //
684 // This is SAFE: modified binaries are still usable as unsorted lists.
685 // This must be done AFTER uniquing selectors.
686 MethodListSorter<P> methodSorter;
687 for (const macho_header<P>* mh : sizeSortedDylibs) {
688 methodSorter.optimize(&cacheAccessor, mh);
689 }
690
691 verboseLog(" sorted % 6ld method lists",
692 methodSorter.optimized());
693
694
695 // Unique protocols and build protocol table.
696
697 // This is SAFE: no protocol references are updated yet
698 // This must be done AFTER updating method lists.
699
700 ProtocolOptimizer<P> protocolOptimizer;
701 for (const macho_header<P>* mh : sizeSortedDylibs) {
702 protocolOptimizer.addProtocols(&cacheAccessor, mh);
703 }
704
705 verboseLog(" uniqued % 6ld protocols",
706 protocolOptimizer.protocolCount());
707
708 pint_t protocolClassVMAddr = (pint_t)P::getP(optPointerList->protocolClass);
709 err = protocolOptimizer.writeProtocols(&cacheAccessor,
710 optRWData, optRWRemaining,
711 optROData, optRORemaining,
712 pointersForASLR, protocolClassVMAddr);
713 if (err) {
714 warning("%s", err);
715 return;
716 }
717
718 uint64_t protocoloptVMAddr = optROSection->addr() + optROSection->size() - optRORemaining;
719 objc_opt::objc_protocolopt_t *protocolopt = new (optROData) objc_opt::objc_protocolopt_t;
720 err = protocolopt->write(protocoloptVMAddr, optRORemaining,
721 protocolOptimizer.protocolNames(),
722 protocolOptimizer.protocols(), true);
723 if (err) {
724 warning("%s", err);
725 return;
726 }
727 optROData += protocolopt->size();
728 optRORemaining -= protocolopt->size();
729 uint32_t protocoloptCapacity = protocolopt->capacity;
730 uint32_t protocoloptOccupied = protocolopt->occupied;
731 protocolopt->byteswap(E::little_endian), protocolopt = NULL;
732
733 verboseLog(" protocol table occupancy %u/%u (%u%%)",
734 protocoloptOccupied, protocoloptCapacity,
735 (unsigned)(protocoloptOccupied/(double)protocoloptCapacity*100));
736
737
738 // Redirect protocol references to the uniqued protocols.
739
740 // This is SAFE: the new protocol objects are still usable as-is.
741 for (const macho_header<P>* mh : sizeSortedDylibs) {
742 protocolOptimizer.updateReferences(&cacheAccessor, mh);
743 }
744
745 verboseLog(" updated % 6ld protocol references",
746 protocolOptimizer.protocolReferenceCount());
747
748
749 //
750 // Repair ivar offsets.
751 //
752 // This is SAFE: the runtime always validates ivar offsets at runtime.
753 IvarOffsetOptimizer<P> ivarOffsetOptimizer;
754 for (const macho_header<P>* mh : sizeSortedDylibs) {
755 ivarOffsetOptimizer.optimize(&cacheAccessor, mh);
756 }
757
758 verboseLog(" updated % 6ld ivar offsets",
759 ivarOffsetOptimizer.optimized());
760
761
762 // Collect flags.
763 uint32_t headerFlags = 0;
764 if (forProduction) {
765 headerFlags |= objc_opt::IsProduction;
766 }
767 if (noMissingWeakSuperclasses) {
768 headerFlags |= objc_opt::NoMissingWeakSuperclasses;
769 }
770
771
772 // Success. Mark dylibs as optimized.
773 for (const macho_header<P>* mh : sizeSortedDylibs) {
774 const macho_section<P>* imageInfoSection = mh->getSection("__DATA", "__objc_imageinfo");
775 if (!imageInfoSection) {
776 imageInfoSection = mh->getSection("__OBJC", "__image_info");
777 }
778 if (imageInfoSection) {
779 objc_image_info<P>* info = (objc_image_info<P>*)cacheAccessor.contentForVMAddr(imageInfoSection->addr());
780 info->setOptimizedByDyld();
781 }
782 }
783
784
785 // Success. Update RO header last.
786 E::set32(optROHeader->flags, headerFlags);
787 E::set32(optROHeader->selopt_offset, (uint32_t)(seloptVMAddr - optROSection->addr()));
788 E::set32(optROHeader->clsopt_offset, (uint32_t)(clsoptVMAddr - optROSection->addr()));
789 E::set32(optROHeader->protocolopt_offset, (uint32_t)(protocoloptVMAddr - optROSection->addr()));
790 E::set32(optROHeader->headeropt_ro_offset, (uint32_t)(hinfoROVMAddr - optROSection->addr()));
791 E::set32(optROHeader->headeropt_rw_offset, (uint32_t)(hinfoRWVMAddr - optROSection->addr()));
792
793 // Log statistics.
794 size_t roSize = optROSection->size() - optRORemaining;
795 size_t rwSize = optRWSection->size() - optRWRemaining;
796 verboseLog(" %zu/%llu bytes "
797 "(%d%%) used in libobjc read-only optimization section",
798 roSize, optROSection->size(),
799 percent(roSize, optROSection->size()));
800 verboseLog(" %zu/%llu bytes "
801 "(%d%%) used in libobjc read/write optimization section",
802 rwSize, optRWSection->size(),
803 percent(rwSize, optRWSection->size()));
804 verboseLog(" wrote objc metadata optimization version %d",
805 objc_opt::VERSION);
806 }
807
808
809 } // anon namespace
810
811
812 void SharedCache::optimizeObjC(bool forProduction)
813 {
814 switch ( _arch.arch ) {
815 case CPU_TYPE_ARM:
816 case CPU_TYPE_I386:
817 ::optimizeObjC<Pointer32<LittleEndian>>(*this, _pointersForASLR, forProduction);
818 break;
819 case CPU_TYPE_X86_64:
820 case CPU_TYPE_ARM64:
821 ::optimizeObjC<Pointer64<LittleEndian>>(*this, _pointersForASLR, forProduction);
822 break;
823 default:
824 terminate("unsupported arch 0x%08X", _arch.arch);
825 }
826 }