]> git.saurik.com Git - apple/dyld.git/blob - dyld3/shared-cache/OptimizerObjC.cpp
58d59b9379a0a92a1ca6f594613aa83644178282
[apple/dyld.git] / dyld3 / shared-cache / OptimizerObjC.cpp
1 /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*-
2 *
3 * Copyright (c) 2014 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26 #include <dirent.h>
27 #include <sys/errno.h>
28 #include <sys/fcntl.h>
29 #include <mach-o/loader.h>
30 #include <mach-o/fat.h>
31 #include <assert.h>
32
33 #include "DyldSharedCache.h"
34 #include "Diagnostics.h"
35 #include "SharedCacheBuilder.h"
36 #include "FileAbstraction.hpp"
37 #include "MachOFileAbstraction.hpp"
38 #include "MachOLoaded.h"
39 #include "MachOAnalyzer.h"
40 #include "MachOAnalyzerSet.h"
41
42 #ifndef MH_HAS_OBJC
43 #define MH_HAS_OBJC 0x40000000
44 #endif
45
46 // Scan a C++ or Swift length-mangled field.
47 static bool scanMangledField(const char *&string, const char *end,
48 const char *&field, int& length)
49 {
50 // Leading zero not allowed.
51 if (*string == '0') return false;
52
53 length = 0;
54 field = string;
55 while (field < end) {
56 char c = *field;
57 if (!isdigit(c)) break;
58 field++;
59 if (__builtin_smul_overflow(length, 10, &length)) return false;
60 if (__builtin_sadd_overflow(length, c - '0', &length)) return false;
61 }
62
63 string = field + length;
64 return length > 0 && string <= end;
65 }
66
67
68 // copySwiftDemangledName
69 // Returns the pretty form of the given Swift-mangled class or protocol name.
70 // Returns nullptr if the string doesn't look like a mangled Swift name.
71 // The result must be freed with free().
72 static char *copySwiftDemangledName(const char *string, bool isProtocol = false)
73 {
74 if (!string) return nullptr;
75
76 // Swift mangling prefix.
77 if (strncmp(string, isProtocol ? "_TtP" : "_TtC", 4) != 0) return nullptr;
78 string += 4;
79
80 const char *end = string + strlen(string);
81
82 // Module name.
83 const char *prefix;
84 int prefixLength;
85 if (string[0] == 's') {
86 // "s" is the Swift module.
87 prefix = "Swift";
88 prefixLength = 5;
89 string += 1;
90 } else {
91 if (! scanMangledField(string, end, prefix, prefixLength)) return nullptr;
92 }
93
94 // Class or protocol name.
95 const char *suffix;
96 int suffixLength;
97 if (! scanMangledField(string, end, suffix, suffixLength)) return nullptr;
98
99 if (isProtocol) {
100 // Remainder must be "_".
101 if (strcmp(string, "_") != 0) return nullptr;
102 } else {
103 // Remainder must be empty.
104 if (string != end) return nullptr;
105 }
106
107 char *result;
108 asprintf(&result, "%.*s.%.*s", prefixLength,prefix, suffixLength,suffix);
109 return result;
110 }
111
112
113 class ContentAccessor {
114 public:
115 ContentAccessor(const DyldSharedCache* cache, Diagnostics& diag)
116 : _diagnostics(diag)
117 {
118 _cacheStart = (uint8_t*)cache;
119 _cacheUnslideAddr = cache->unslidLoadAddress();
120 _slide = (uint64_t)cache - _cacheUnslideAddr;
121 }
122
123 // Converts from an on disk vmAddr to the real vmAddr
124 // That is, for a chained fixup, decodes the chain, for a non-chained fixup, does nothing.
125 uint64_t vmAddrForOnDiskVMAddr(uint64_t vmaddr) {
126 return vmaddr;
127 }
128
129 void* contentForVMAddr(uint64_t vmaddr) {
130 vmaddr = vmAddrForOnDiskVMAddr(vmaddr);
131 if ( vmaddr != 0 ) {
132 uint64_t offset = vmaddr - _cacheUnslideAddr;
133 return _cacheStart + offset;
134 } else
135 return nullptr;
136 }
137
138 uint64_t vmAddrForContent(const void* content) {
139 if ( content != nullptr )
140 return _cacheUnslideAddr + ((uint8_t*)content - _cacheStart);
141 else
142 return 0;
143 }
144
145 Diagnostics& diagnostics() { return _diagnostics; }
146
147 private:
148 Diagnostics& _diagnostics;
149 uint64_t _slide;
150 uint64_t _cacheUnslideAddr;
151 uint8_t* _cacheStart;
152 };
153
154
155 // Access a section containing a list of pointers
156 template <typename P, typename T>
157 class PointerSection
158 {
159 typedef typename P::uint_t pint_t;
160 public:
161 PointerSection(ContentAccessor* cache, const macho_header<P>* mh,
162 const char* segname, const char* sectname)
163 : _cache(cache),
164 _section(mh->getSection(segname, sectname)),
165 _base(_section ? (pint_t*)cache->contentForVMAddr(_section->addr()) : 0),
166 _count(_section ? (pint_t)(_section->size() / sizeof(pint_t)) : 0) {
167 }
168
169 pint_t count() const { return _count; }
170
171 pint_t getVMAddress(pint_t index) const {
172 if ( index >= _count ) {
173 _cache->diagnostics().error("index out of range in section %s", _section->sectname());
174 return 0;
175 }
176 return (pint_t)P::getP(_base[index]);
177 }
178
179 pint_t getSectionVMAddress() const {
180 return (pint_t)_section->addr();
181 }
182
183 T get(pint_t index) const {
184 return (T)_cache->contentForVMAddr(getVMAddress(index));
185 }
186
187 void setVMAddress(pint_t index, pint_t value) {
188 if ( index >= _count ) {
189 _cache->diagnostics().error("index out of range in section %s", _section->sectname());
190 return;
191 }
192 P::setP(_base[index], value);
193 }
194
195 void removeNulls() {
196 pint_t shift = 0;
197 for (pint_t i = 0; i < _count; i++) {
198 pint_t value = _base[i];
199 if (value) {
200 _base[i-shift] = value;
201 } else {
202 shift++;
203 }
204 }
205 _count -= shift;
206 const_cast<macho_section<P>*>(_section)->set_size(_count * sizeof(pint_t));
207 }
208
209 private:
210 ContentAccessor* const _cache;
211 const macho_section<P>* const _section;
212 pint_t* const _base;
213 pint_t const _count;
214 };
215
216
217 // Access a section containing an array of structures
218 template <typename P, typename T>
219 class ArraySection
220 {
221 public:
222 ArraySection(ContentAccessor* cache, const macho_header<P>* mh,
223 const char *segname, const char *sectname)
224 : _cache(cache),
225 _section(mh->getSection(segname, sectname)),
226 _base(_section ? (T *)cache->contentForVMAddr(_section->addr()) : 0),
227 _count(_section ? _section->size() / sizeof(T) : 0) {
228 }
229
230 uint64_t count() const { return _count; }
231
232 T& get(uint64_t index) const {
233 if (index >= _count) {
234 _cache->diagnostics().error("index out of range in section %s", _section->sectname());
235 }
236 return _base[index];
237 }
238
239 private:
240 ContentAccessor* const _cache;
241 const macho_section<P>* const _section;
242 T * const _base;
243 uint64_t const _count;
244 };
245
246
247 #define SELOPT_WRITE
248 #include "objc-shared-cache.h"
249 #include "ObjC1Abstraction.hpp"
250 #include "ObjC2Abstraction.hpp"
251
252
253 namespace {
254
255
256
257 template <typename P>
258 class ObjCSelectorUniquer
259 {
260 public:
261 typedef typename P::uint_t pint_t;
262
263 ObjCSelectorUniquer(ContentAccessor* cache) : _cache(cache) { }
264
265 pint_t visit(pint_t oldValue)
266 {
267 _count++;
268 const char *s = (const char *)_cache->contentForVMAddr(oldValue);
269 oldValue = (pint_t)_cache->vmAddrForOnDiskVMAddr(oldValue);
270 objc_opt::string_map::iterator element =
271 _selectorStrings.insert(objc_opt::string_map::value_type(s, oldValue)).first;
272 return (pint_t)element->second;
273 }
274
275 void visitCoalescedStrings(const CacheBuilder::CacheCoalescedText& coalescedText) {
276 const CacheBuilder::CacheCoalescedText::StringSection& methodNames = coalescedText.getSectionData("__objc_methname");
277 for (const auto& stringAndOffset : methodNames.stringsToOffsets) {
278 uint64_t vmAddr = methodNames.bufferVMAddr + stringAndOffset.second;
279 _selectorStrings[stringAndOffset.first.data()] = vmAddr;
280 }
281 }
282
283 objc_opt::string_map& strings() {
284 return _selectorStrings;
285 }
286
287 size_t count() const { return _count; }
288
289 private:
290 objc_opt::string_map _selectorStrings;
291 ContentAccessor* _cache;
292 size_t _count = 0;
293 };
294
295
296 template <typename P>
297 class ClassListBuilder
298 {
299 private:
300 objc_opt::string_map _classNames;
301 objc_opt::class_map _classes;
302 size_t _count = 0;
303 HeaderInfoOptimizer<P, objc_header_info_ro_t<P>>& _hInfos;
304
305 public:
306
307 ClassListBuilder(HeaderInfoOptimizer<P, objc_header_info_ro_t<P>>& hinfos) : _hInfos(hinfos) { }
308
309 void visitClass(ContentAccessor* cache,
310 const macho_header<P>* header,
311 objc_class_t<P>* cls)
312 {
313 if (cls->isMetaClass(cache)) return;
314
315 const char* name = cls->getName(cache);
316 uint64_t name_vmaddr = cache->vmAddrForContent((void*)name);
317 uint64_t cls_vmaddr = cache->vmAddrForContent(cls);
318 uint64_t hinfo_vmaddr = cache->vmAddrForContent(_hInfos.hinfoForHeader(cache, header));
319 _classNames.insert(objc_opt::string_map::value_type(name, name_vmaddr));
320 _classes.insert(objc_opt::class_map::value_type(name, std::pair<uint64_t, uint64_t>(cls_vmaddr, hinfo_vmaddr)));
321 _count++;
322 }
323
324 objc_opt::string_map& classNames() {
325 return _classNames;
326 }
327
328 objc_opt::class_map& classes() {
329 return _classes;
330 }
331
332 size_t count() const { return _count; }
333 };
334
335
336 /// Builds a map from (install name, class name, method name) to actual IMPs
337 template <typename P>
338 class IMPMapBuilder
339 {
340 private:
341 typedef typename P::uint_t pint_t;
342
343 public:
344
345 struct MapKey {
346 std::string_view installName;
347 std::string_view className;
348 std::string_view methodName;
349 bool isInstanceMethod;
350
351 bool operator==(const MapKey& other) const {
352 return isInstanceMethod == other.isInstanceMethod &&
353 installName == other.installName &&
354 className == other.className &&
355 methodName == other.methodName;
356 }
357
358 size_t hash() const {
359 std::size_t seed = 0;
360 seed ^= std::hash<std::string_view>()(installName) + 0x9e3779b9 + (seed<<6) + (seed>>2);
361 seed ^= std::hash<std::string_view>()(className) + 0x9e3779b9 + (seed<<6) + (seed>>2);
362 seed ^= std::hash<std::string_view>()(methodName) + 0x9e3779b9 + (seed<<6) + (seed>>2);
363 seed ^= std::hash<bool>()(isInstanceMethod) + 0x9e3779b9 + (seed<<6) + (seed>>2);
364 return seed;
365 }
366 };
367
368 struct MapKeyHasher {
369 size_t operator()(const MapKey& k) const {
370 return k.hash();
371 }
372 };
373
374 std::unordered_map<MapKey, pint_t, MapKeyHasher> impMap;
375 bool relativeMethodListSelectorsAreDirect;
376
377 IMPMapBuilder(bool relativeMethodListSelectorsAreDirect)
378 : relativeMethodListSelectorsAreDirect(relativeMethodListSelectorsAreDirect) { }
379
380 void visitClass(ContentAccessor* cache,
381 const macho_header<P>* header,
382 objc_class_t<P>* cls)
383 {
384 objc_method_list_t<P> *methodList = cls->getMethodList(cache);
385 if (methodList == nullptr) return;
386
387 const dyld3::MachOAnalyzer* ma = (const dyld3::MachOAnalyzer*)header;
388 bool isInstanceMethod = !cls->isMetaClass(cache);
389 const char* className = cls->getName(cache);
390 const char* installName = ma->installName();
391
392 for (uint32_t n = 0; n < methodList->getCount(); n++) {
393 // do not clobber an existing entry if any, because categories win
394 impMap.try_emplace(MapKey{
395 .installName = installName,
396 .className = className,
397 .methodName = methodList->getStringName(cache, n, relativeMethodListSelectorsAreDirect),
398 .isInstanceMethod = isInstanceMethod
399 }, methodList->getImp(n, cache));
400 }
401 }
402
403 void visit(ContentAccessor* cache, const macho_header<P>* header) {
404 const dyld3::MachOAnalyzer* ma = (const dyld3::MachOAnalyzer*)header;
405
406 // Method lists from categories
407 PointerSection<P, objc_category_t<P> *>
408 cats(cache, header, "__DATA", "__objc_catlist");
409 for (pint_t i = 0; i < cats.count(); i++) {
410 objc_category_t<P> *cat = cats.get(i);
411 objc_class_t<P>* cls = cat->getClass(cache);
412 if (cls == nullptr)
413 continue;
414
415 objc_method_list_t<P> *instanceMethods = cat->getInstanceMethods(cache);
416 if (instanceMethods != nullptr) {
417 for (uint32_t n = 0; n < instanceMethods->getCount(); n++) {
418 MapKey k {
419 .installName = ma->installName(),
420 .className = cls->getName(cache),
421 .methodName = instanceMethods->getStringName(cache, n, relativeMethodListSelectorsAreDirect),
422 .isInstanceMethod = true
423 };
424 //printf("Adding %s %s %s %d cat %s\n", k.installName.data(), k.className.data(), k.methodName.data(), k.isInstanceMethod, k.catName->data());
425 impMap[k] = instanceMethods->getImp(n, cache);
426 }
427 }
428 objc_method_list_t<P> *classMethods = cat->getClassMethods(cache);
429 if (classMethods != nullptr) {
430 for (uint32_t n = 0; n < classMethods->getCount(); n++) {
431 MapKey k {
432 .installName = ma->installName(),
433 .className = cls->getName(cache),
434 .methodName = classMethods->getStringName(cache, n, relativeMethodListSelectorsAreDirect),
435 .isInstanceMethod = false
436 };
437 //printf("Adding %s %s %s %d cat %s\n", k.installName.data(), k.className.data(), k.methodName.data(), k.isInstanceMethod, k.catName->data());
438 impMap[k] = classMethods->getImp(n, cache);
439 }
440 }
441 }
442 }
443 };
444
445 // List of offsets in libobjc that the shared cache optimization needs to use.
446 template <typename T>
447 struct objc_opt_imp_caches_pointerlist_tt {
448 T selectorStringVMAddrStart;
449 T selectorStringVMAddrEnd;
450 T inlinedSelectorsVMAddrStart;
451 T inlinedSelectorsVMAddrEnd;
452 };
453
454 template <typename P>
455 class IMPCachesEmitter
456 {
457 typedef typename P::uint_t pint_t;
458
459 private:
460 Diagnostics& diag;
461 const IMPMapBuilder<P>& impMapBuilder;
462 uint64_t selectorStringVMAddr;
463 uint8_t*& readOnlyBuffer;
464 size_t& readOnlyBufferSize;
465 uint8_t*& readWriteBuffer;
466 size_t& readWriteBufferSize;
467 CacheBuilder::ASLR_Tracker& aslrTracker;
468
469 std::map<std::string_view, const CacheBuilder::DylibInfo*> _dylibInfos;
470 std::map<std::string_view, const macho_header<P>*> _dylibs;
471 const std::vector<const IMPCaches::Selector*> inlinedSelectors;
472
473 struct ImpCacheHeader {
474 int32_t fallback_class_offset;
475 uint32_t cache_shift : 5;
476 uint32_t cache_mask : 11;
477 uint32_t occupied : 14;
478 uint32_t has_inlines : 1;
479 uint32_t bit_one : 1;
480 };
481
482 struct ImpCacheEntry {
483 uint32_t selOffset;
484 uint32_t impOffset;
485 };
486
487 public:
488
489 static size_t sizeForImpCacheWithCount(int entries) {
490 return sizeof(ImpCacheHeader) + entries * sizeof(ImpCacheEntry);
491 }
492
493 struct ImpCacheContents {
494 struct bucket_t {
495 uint32_t sel_offset = 0;
496 uint64_t imp = 0;
497 };
498 std::vector<bucket_t> buckets;
499 uint64_t occupiedBuckets = 0;
500 bool hasInlines = false;
501
502 uint64_t capacity() const
503 {
504 return buckets.size();
505 }
506
507 uint64_t occupied() const {
508 return occupiedBuckets;
509 }
510
511 void incrementOccupied() {
512 ++occupiedBuckets;
513 }
514
515 void insert(uint64_t slot, uint64_t selOffset, uint64_t imp) {
516 bucket_t& b = buckets[slot];
517 assert(b.imp == 0);
518
519 if (!b.imp) incrementOccupied();
520 assert((uint32_t)selOffset == selOffset);
521 b.sel_offset = (uint32_t)selOffset;
522 b.imp = imp;
523 }
524
525 void fillBuckets(const IMPCaches::ClassData* classData, bool metaclass, const IMPMapBuilder<P> & classRecorder) {
526 const std::vector<IMPCaches::ClassData::Method> & methods = classData->methods;
527 buckets.resize(classData->modulo());
528 for (const IMPCaches::ClassData::Method& method : methods) {
529 typename IMPMapBuilder<P>::MapKey k {
530 .installName = method.installName,
531 .className = method.className,
532 .methodName = method.selector->name,
533 .isInstanceMethod = !metaclass
534 };
535
536 pint_t imp = classRecorder.impMap.at(k);
537 int slot = (method.selector->inProgressBucketIndex >> classData->shift) & classData->mask();
538 insert(slot, method.selector->offset, imp);
539 hasInlines |= (method.wasInlined && !method.fromFlattening);
540 }
541 }
542
543 std::pair<uint64_t, uint64_t>
544 write(ContentAccessor* cache,
545 uint64_t cacheSelectorStringVMAddr, uint64_t clsVMAddr,
546 uint8_t*& buf, size_t& bufSize, Diagnostics& diags) {
547 constexpr bool log = false;
548 uint64_t spaceRequired = sizeof(ImpCacheEntry) * capacity();
549
550 if (spaceRequired > bufSize) {
551 diags.error("Not enough space for imp cache");
552 return { 0, 0 };
553 }
554
555 // Convert from addresses to offsets and write out
556 ImpCacheEntry* offsetBuckets = (ImpCacheEntry*)buf;
557 // printf("Buckets: 0x%08llx\n", cache->vmAddrForContent(offsetBuckets));
558 for (uint64_t index = 0; index != buckets.size(); ++index) {
559 bucket_t bucket = buckets[index];
560 if (bucket.sel_offset == 0 && bucket.imp == 0) {
561 // Empty bucket
562 offsetBuckets[index].selOffset = 0xFFFFFFFF;
563 offsetBuckets[index].impOffset = 0;
564 } else {
565 int64_t selOffset = (int64_t)bucket.sel_offset;
566 int64_t impOffset = clsVMAddr - bucket.imp;
567 assert((int32_t)impOffset == impOffset);
568 assert((int32_t)selOffset == selOffset);
569 offsetBuckets[index].selOffset = (int32_t)selOffset;
570 offsetBuckets[index].impOffset = (int32_t)impOffset;
571 if (log) {
572 diags.verbose("[IMP Caches] Coder[%lld]: %#08llx (sel: %#08x, imp %#08x) %s\n", index,
573 cache->vmAddrForOnDiskVMAddr(bucket.imp),
574 (int32_t)selOffset, (int32_t)impOffset,
575 (const char*)cache->contentForVMAddr(cacheSelectorStringVMAddr + bucket.sel_offset));
576 }
577 }
578 }
579
580 buf += spaceRequired;
581 bufSize -= spaceRequired;
582
583 return { cache->vmAddrForContent(offsetBuckets), (uint64_t)buckets.size() };
584 }
585 };
586
587 IMPCachesEmitter(Diagnostics& diags, const IMPMapBuilder<P>& builder, uint64_t selectorStringVMAddr, uint8_t*& roBuf, size_t& roBufSize, uint8_t* &rwBuf, size_t& rwBufSize, const std::vector<CacheBuilder::DylibInfo> & dylibInfos, const std::vector<const macho_header<P>*> & dylibs, CacheBuilder::ASLR_Tracker& tracker)
588 : diag(diags), impMapBuilder(builder), selectorStringVMAddr(selectorStringVMAddr), readOnlyBuffer(roBuf), readOnlyBufferSize(roBufSize), readWriteBuffer(rwBuf), readWriteBufferSize(rwBufSize), aslrTracker(tracker) {
589 for (const CacheBuilder::DylibInfo& d : dylibInfos) {
590 _dylibInfos[d.dylibID] = &d;
591 }
592 for (const macho_header<P>* d : dylibs) {
593 const dyld3::MachOAnalyzer* ma = (const dyld3::MachOAnalyzer*) d;
594 _dylibs[ma->installName()] = d;
595 }
596 }
597
598 // Returns true if we should filter this class out from getting an imp cache
599 bool filter(ContentAccessor* cache, const dyld3::MachOAnalyzer* ma, const objc_class_t<P>* cls) {
600 const CacheBuilder::DylibInfo* d = _dylibInfos[ma->installName()];
601 IMPCaches::ClassKey key {
602 .name = cls->getName(cache),
603 .metaclass = cls->isMetaClass(cache)
604 };
605 return (d->impCachesClassData.find(key) == d->impCachesClassData.end());
606 }
607
608 void visitClass(ContentAccessor* cache,
609 const macho_header<P>* header,
610 objc_class_t<P>* cls)
611 {
612 // If we ran out of space then don't try to optimize more
613 if (diag.hasError())
614 return;
615
616 const dyld3::MachOAnalyzer* ma = (const dyld3::MachOAnalyzer*) header;
617 if (filter(cache, ma, cls)) {
618 *cls->getVTableAddress() = 0;
619 return;
620 }
621
622 const char* className = cls->getName(cache);
623
624 if (cls->getVTable(cache) != 0) {
625 diag.error("Class '%s' has non-zero vtable\n", className);
626 return;
627 }
628
629 const CacheBuilder::DylibInfo* d = _dylibInfos[ma->installName()];
630 IMPCaches::ClassKey key {
631 .name = cls->getName(cache),
632 .metaclass = cls->isMetaClass(cache)
633 };
634 IMPCaches::ClassData* data = (d->impCachesClassData.at(key)).get();
635 #if 0
636 for (const objc_method_t<P>& method : methods) {
637 printf(" 0x%llx: 0x%llx (%s)\n", method.getImp(), method.getName(),
638 (const char*)cache->contentForVMAddr(method.getName()));
639 }
640 #endif
641
642 uint64_t clsVMAddr = cache->vmAddrForContent(cls);
643
644 if (data->mask() > 0x7ff) {
645 diag.verbose("Cache for class %s (%#08llx) is too large (mask: %#x)\n",
646 className, clsVMAddr, data->mask());
647 return;
648 }
649
650 ImpCacheContents impCache;
651 impCache.fillBuckets(data, cls->isMetaClass(cache), impMapBuilder);
652
653 constexpr bool log = false;
654 if (log) {
655 printf("Writing cache for %sclass %s (%#08llx)\n", cls->isMetaClass(cache) ? "meta" : "", className, clsVMAddr);
656 }
657
658 struct ImpCacheHeader {
659 int32_t fallback_class_offset;
660 uint32_t cache_shift : 5;
661 uint32_t cache_mask : 11;
662 uint32_t occupied : 14;
663 uint32_t has_inlines : 1;
664 uint32_t bit_one : 1;
665 };
666 pint_t* vtableAddr = cls->getVTableAddress();
667
668 // the alignment of ImpCaches to 16 bytes is only needed for arm64_32.
669 ImpCacheHeader* cachePtr = (ImpCacheHeader*)align_buffer(readOnlyBuffer, sizeof(pint_t) == 4 ? 4 : 3);
670
671 assert(readOnlyBufferSize > sizeof(ImpCacheHeader));
672
673 uint64_t occupied = impCache.occupied();
674 int64_t fallback_class_offset = *(cls->getSuperClassAddress()) - clsVMAddr;
675
676 if (data->flatteningRootSuperclass) {
677 // If we are a class being flattened (inheriting all the selectors of
678 // its superclasses up to and including the flattening root), the fallback class
679 // should be the first superclass which is not flattened.
680
681 // Find the VMAddr of that superclass, given its segment index and offset
682 // in the source dylib.
683 const auto & superclass = *(data->flatteningRootSuperclass);
684 const macho_header<P> * d = _dylibs[superclass.installName];
685 __block uint64_t superclassVMAddr = 0;
686 const dyld3::MachOAnalyzer *ma = (const dyld3::MachOAnalyzer *)d;
687 ma->forEachSegment(^(const dyld3::MachOAnalyzer::SegmentInfo &info, bool &stop) {
688 if (info.segIndex == superclass.segmentIndex) {
689 superclassVMAddr = info.vmAddr + superclass.segmentOffset;
690 stop = true;
691 }
692 });
693
694 assert(superclassVMAddr > 0);
695 fallback_class_offset = superclassVMAddr - clsVMAddr;
696 }
697
698 assert((int32_t)fallback_class_offset == fallback_class_offset);
699 assert((uint32_t)occupied == occupied);
700
701 *cachePtr = (ImpCacheHeader){
702 .fallback_class_offset = (int32_t)fallback_class_offset,
703 .cache_shift = (uint32_t)(data->shift + 7),
704 .cache_mask = (uint32_t)data->mask(),
705 .occupied = (uint32_t)occupied,
706 .has_inlines = impCache.hasInlines,
707 .bit_one = 1, // obj-c plays HORRENDOUS games here
708 };
709
710 // is this right?
711 int64_t vmaddr = cache->vmAddrForContent(readOnlyBuffer);
712 assert((pint_t)vmaddr == (uint64_t)vmaddr);
713 *vtableAddr = (pint_t)cache->vmAddrForContent(readOnlyBuffer);
714 aslrTracker.add(vtableAddr);
715 readOnlyBuffer += sizeof(ImpCacheHeader);
716 readOnlyBufferSize -= sizeof(ImpCacheHeader);
717
718 impCache.write(cache, selectorStringVMAddr, clsVMAddr, readOnlyBuffer, readOnlyBufferSize, diag);
719 }
720
721 void emitInlinedSelectors(const std::vector<const IMPCaches::Selector*> selectors) {
722 // FIXME: this should be in constant memory
723 for (const IMPCaches::Selector* s : selectors) {
724 assert(readWriteBufferSize >= sizeof(pint_t));
725 *(pint_t*)readWriteBuffer = (pint_t)(selectorStringVMAddr + s->offset);
726 aslrTracker.add(readWriteBuffer);
727 readWriteBuffer += sizeof(pint_t);
728 readWriteBufferSize -= sizeof(pint_t);
729 }
730 }
731 };
732
733 template <typename P>
734 class ProtocolOptimizer
735 {
736 private:
737 typedef typename P::uint_t pint_t;
738
739 objc_opt::string_map _protocolNames;
740 objc_opt::legacy_protocol_map _protocols;
741 objc_opt::protocol_map _protocolsAndHeaders;
742 size_t _protocolCount;
743 size_t _protocolReferenceCount;
744 Diagnostics& _diagnostics;
745 HeaderInfoOptimizer<P, objc_header_info_ro_t<P>>& _hInfos;
746
747 friend class ProtocolReferenceWalker<P, ProtocolOptimizer<P>>;
748
749 pint_t visitProtocolReference(ContentAccessor* cache, pint_t oldValue)
750 {
751 objc_protocol_t<P>* proto = (objc_protocol_t<P>*)
752 cache->contentForVMAddr(oldValue);
753 pint_t newValue = (pint_t)_protocols[proto->getName(cache)];
754 if (oldValue != newValue) _protocolReferenceCount++;
755 return newValue;
756 }
757
758 public:
759
760 ProtocolOptimizer(Diagnostics& diag, HeaderInfoOptimizer<P, objc_header_info_ro_t<P>>& hinfos)
761 : _protocolCount(0), _protocolReferenceCount(0), _diagnostics(diag), _hInfos(hinfos) {
762 }
763
764 void addProtocols(ContentAccessor* cache, const macho_header<P>* header)
765 {
766 PointerSection<P, objc_protocol_t<P> *>
767 protocols(cache, header, "__DATA", "__objc_protolist");
768
769 for (pint_t i = 0; i < protocols.count(); i++) {
770 objc_protocol_t<P> *proto = protocols.get(i);
771
772 const char* name = proto->getName(cache);
773 if (_protocolNames.count(name) == 0) {
774 if (proto->getSize() > sizeof(objc_protocol_t<P>)) {
775 _diagnostics.error("objc protocol is too big");
776 return;
777 }
778 uint64_t name_vmaddr = cache->vmAddrForContent((void*)name);
779 uint64_t proto_vmaddr = cache->vmAddrForContent(proto);
780 _protocolNames.insert(objc_opt::string_map::value_type(name, name_vmaddr));
781 _protocols.insert(objc_opt::legacy_protocol_map::value_type(name, proto_vmaddr));
782 _protocolCount++;
783 }
784
785 // Note down which header this protocol came from. We'll fill in the proto_vmaddr here later
786 // once we've chosen a single definition for the protocol with this name.
787 uint64_t hinfo_vmaddr = cache->vmAddrForContent(_hInfos.hinfoForHeader(cache, header));
788 _protocolsAndHeaders.insert(objc_opt::class_map::value_type(name, std::pair<uint64_t, uint64_t>(0, hinfo_vmaddr)));
789 }
790 }
791
792 const char *writeProtocols(ContentAccessor* cache,
793 uint8_t *& rwdest, size_t& rwremaining,
794 uint8_t *& rodest, size_t& roremaining,
795 CacheBuilder::ASLR_Tracker& aslrTracker,
796 pint_t protocolClassVMAddr,
797 const dyld3::MachOAnalyzerSet::PointerMetaData& PMD)
798 {
799 if (_protocolCount == 0) return NULL;
800
801 if (protocolClassVMAddr == 0) {
802 return "libobjc's Protocol class symbol not found (metadata not optimized)";
803 }
804
805 size_t rwrequired = _protocolCount * sizeof(objc_protocol_t<P>);
806 if (rwremaining < rwrequired) {
807 return "libobjc's read-write section is too small (metadata not optimized)";
808 }
809
810 for (auto iter = _protocols.begin(); iter != _protocols.end(); ++iter)
811 {
812 objc_protocol_t<P>* oldProto = (objc_protocol_t<P>*)
813 cache->contentForVMAddr(iter->second);
814
815 // Create a new protocol object.
816 objc_protocol_t<P>* proto = (objc_protocol_t<P>*)rwdest;
817 rwdest += sizeof(*proto);
818 rwremaining -= sizeof(*proto);
819
820 // Initialize it.
821 uint32_t oldSize = oldProto->getSize();
822 memcpy(proto, oldProto, oldSize);
823 if (!proto->getIsaVMAddr()) {
824 proto->setIsaVMAddr(protocolClassVMAddr);
825 }
826
827 // If the objc runtime signed the Protocol ISA, then we need to too
828 if ( PMD.authenticated ) {
829 aslrTracker.setAuthData(proto->getISALocation(), PMD.diversity, PMD.usesAddrDiversity, PMD.key);
830 }
831
832 if (oldSize < sizeof(*proto)) {
833 // Protocol object is old. Populate new fields.
834 proto->setSize(sizeof(objc_protocol_t<P>));
835 // missing extendedMethodTypes is already nil
836 }
837 // Some protocol objects are big enough to have the
838 // demangledName field but don't initialize it.
839 // Initialize it here if it is not already set.
840 if (!proto->getDemangledName(cache)) {
841 const char *roName = proto->getName(cache);
842 char *demangledName = copySwiftDemangledName(roName, true);
843 if (demangledName) {
844 size_t length = 1 + strlen(demangledName);
845 if (roremaining < length) {
846 return "libobjc's read-only section is too small (metadata not optimized)";
847 }
848
849 memmove(rodest, demangledName, length);
850 roName = (const char *)rodest;
851 rodest += length;
852 roremaining -= length;
853
854 free(demangledName);
855 }
856 proto->setDemangledName(cache, roName, _diagnostics);
857 }
858 proto->setFixedUp();
859 proto->setIsCanonical();
860
861 // Redirect the protocol table at our new object.
862 iter->second = cache->vmAddrForContent(proto);
863
864 // Add new rebase entries.
865 proto->addPointers(cache, aslrTracker);
866 }
867
868 // Now that we've chosen the canonical protocols, set the duplicate headers to
869 // point to their protocols.
870 for (auto iter = _protocolsAndHeaders.begin(); iter != _protocolsAndHeaders.end(); ++iter) {
871 iter->second.first = _protocols[iter->first];
872 }
873
874 return NULL;
875 }
876
877 void updateReferences(ContentAccessor* cache, const macho_header<P>* header)
878 {
879 ProtocolReferenceWalker<P, ProtocolOptimizer<P>> refs(*this);
880 refs.walk(cache, header);
881 }
882
883 objc_opt::string_map& protocolNames() {
884 return _protocolNames;
885 }
886
887 objc_opt::legacy_protocol_map& protocols() {
888 return _protocols;
889 }
890
891 objc_opt::protocol_map& protocolsAndHeaders() {
892 return _protocolsAndHeaders;
893 }
894
895 size_t protocolCount() const { return _protocolCount; }
896 size_t protocolReferenceCount() const { return _protocolReferenceCount; }
897 };
898
899
900 static int percent(size_t num, size_t denom) {
901 if (denom)
902 return (int)(num / (double)denom * 100);
903 else
904 return 100;
905 }
906
907 template <typename P>
908 void addObjcSegments(Diagnostics& diag, DyldSharedCache* cache, const mach_header* libobjcMH,
909 uint8_t* objcReadOnlyBuffer, uint64_t objcReadOnlyBufferSizeAllocated,
910 uint8_t* objcReadWriteBuffer, uint64_t objcReadWriteBufferSizeAllocated,
911 uint32_t objcRwFileOffset)
912 {
913 // validate there is enough free space to add the load commands
914 const dyld3::MachOAnalyzer* libobjcMA = ((dyld3::MachOAnalyzer*)libobjcMH);
915 uint32_t freeSpace = libobjcMA->loadCommandsFreeSpace();
916 const uint32_t segSize = sizeof(macho_segment_command<P>);
917 if ( freeSpace < 2*segSize ) {
918 diag.warning("not enough space in libojbc.dylib to add load commands for objc optimization regions");
919 return;
920 }
921
922 // find location of LINKEDIT LC_SEGMENT load command, we need to insert new segments before it
923 __block uint8_t* linkeditSeg = nullptr;
924 libobjcMA->forEachSegment(^(const dyld3::MachOFile::SegmentInfo& info, bool& stop) {
925 if ( strcmp(info.segName, "__LINKEDIT") == 0 )
926 linkeditSeg = (uint8_t*)libobjcMH + info.loadCommandOffset;
927 });
928 if ( linkeditSeg == nullptr ) {
929 diag.warning("__LINKEDIT not found in libojbc.dylib");
930 return;
931 }
932
933 // move load commands to make room to insert two new ones before LINKEDIT segment load command
934 uint8_t* endOfLoadCommands = (uint8_t*)libobjcMH + sizeof(macho_header<P>) + libobjcMH->sizeofcmds;
935 uint32_t remainingSize = (uint32_t)(endOfLoadCommands - linkeditSeg);
936 memmove(linkeditSeg+2*segSize, linkeditSeg, remainingSize);
937
938 // insert new segments
939 macho_segment_command<P>* roSeg = (macho_segment_command<P>*)(linkeditSeg);
940 macho_segment_command<P>* rwSeg = (macho_segment_command<P>*)(linkeditSeg+sizeof(macho_segment_command<P>));
941 roSeg->set_cmd(macho_segment_command<P>::CMD);
942 roSeg->set_cmdsize(segSize);
943 roSeg->set_segname("__OBJC_RO");
944 roSeg->set_vmaddr(cache->unslidLoadAddress() + objcReadOnlyBuffer - (uint8_t*)cache);
945 roSeg->set_vmsize(objcReadOnlyBufferSizeAllocated);
946 roSeg->set_fileoff(objcReadOnlyBuffer - (uint8_t*)cache);
947 roSeg->set_filesize(objcReadOnlyBufferSizeAllocated);
948 roSeg->set_maxprot(VM_PROT_READ);
949 roSeg->set_initprot(VM_PROT_READ);
950 roSeg->set_nsects(0);
951 roSeg->set_flags(0);
952 rwSeg->set_cmd(macho_segment_command<P>::CMD);
953 rwSeg->set_cmdsize(segSize);
954 rwSeg->set_segname("__OBJC_RW");
955 rwSeg->set_vmaddr(cache->unslidLoadAddress() + objcReadWriteBuffer - (uint8_t*)cache);
956 rwSeg->set_vmsize(objcReadWriteBufferSizeAllocated);
957 rwSeg->set_fileoff(objcRwFileOffset);
958 rwSeg->set_filesize(objcReadWriteBufferSizeAllocated);
959 rwSeg->set_maxprot(VM_PROT_WRITE|VM_PROT_READ);
960 rwSeg->set_initprot(VM_PROT_WRITE|VM_PROT_READ);
961 rwSeg->set_nsects(0);
962 rwSeg->set_flags(0);
963
964 // update mach_header to account for new load commands
965 macho_header<P>* mh = (macho_header<P>*)libobjcMH;
966 mh->set_sizeofcmds(mh->sizeofcmds() + 2*segSize);
967 mh->set_ncmds(mh->ncmds()+2);
968
969 // fix up table at start of dyld cache that has pointer into install name for libobjc
970 dyld_cache_image_info* images = (dyld_cache_image_info*)((uint8_t*)cache + cache->header.imagesOffset);
971 uint64_t libobjcUnslidAddress = cache->unslidLoadAddress() + ((uint8_t*)libobjcMH - (uint8_t*)cache);
972 for (uint32_t i=0; i < cache->header.imagesCount; ++i) {
973 if ( images[i].address == libobjcUnslidAddress ) {
974 images[i].pathFileOffset += (2*segSize);
975 break;
976 }
977 }
978 }
979
980 template <typename P> static inline void emitIMPCaches(ContentAccessor& cacheAccessor,
981 std::vector<CacheBuilder::DylibInfo> & allDylibs,
982 std::vector<const macho_header<P>*> & sizeSortedDylibs,
983 bool relativeMethodListSelectorsAreDirect,
984 uint64_t selectorStringVMAddr,
985 uint8_t* optROData, size_t& optRORemaining,
986 uint8_t* optRWData, size_t& optRWRemaining,
987 CacheBuilder::ASLR_Tracker& aslrTracker,
988 const std::vector<const IMPCaches::Selector*> & inlinedSelectors,
989 uint8_t* &inlinedSelectorsStart,
990 uint8_t* &inlinedSelectorsEnd,
991 Diagnostics& diag,
992 TimeRecorder& timeRecorder) {
993 diag.verbose("[IMP caches] computing IMP map\n");
994
995 IMPMapBuilder<P> classRecorder(relativeMethodListSelectorsAreDirect);
996 for (const macho_header<P>* mh : sizeSortedDylibs) {
997 ClassWalker<P, IMPMapBuilder<P>> classWalker(classRecorder, ClassWalkerMode::ClassAndMetaclasses);
998 classWalker.walk(&cacheAccessor, mh);
999 classRecorder.visit(&cacheAccessor, mh);
1000 }
1001
1002 timeRecorder.recordTime("compute IMP map");
1003 diag.verbose("[IMP caches] emitting IMP caches\n");
1004
1005 IMPCachesEmitter<P> impCachesEmitter(diag, classRecorder, selectorStringVMAddr, optROData, optRORemaining, optRWData, optRWRemaining, allDylibs, sizeSortedDylibs, aslrTracker);
1006 ClassWalker<P, IMPCachesEmitter<P>> impEmitterClassWalker(impCachesEmitter, ClassWalkerMode::ClassAndMetaclasses);
1007 for (const macho_header<P>* mh : sizeSortedDylibs) {
1008 impEmitterClassWalker.walk(&cacheAccessor, mh);
1009 if (diag.hasError())
1010 return;
1011 }
1012
1013 inlinedSelectorsStart = optRWData;
1014 impCachesEmitter.emitInlinedSelectors(inlinedSelectors);
1015 inlinedSelectorsEnd = optRWData;
1016 }
1017
1018 template <typename P>
1019 void doOptimizeObjC(DyldSharedCache* cache, bool forProduction, CacheBuilder::ASLR_Tracker& aslrTracker,
1020 CacheBuilder::LOH_Tracker& lohTracker, const CacheBuilder::CacheCoalescedText& coalescedText,
1021 const std::map<void*, std::string>& missingWeakImports, Diagnostics& diag,
1022 uint8_t* objcReadOnlyBuffer, uint64_t objcReadOnlyBufferSizeUsed, uint64_t objcReadOnlyBufferSizeAllocated,
1023 uint8_t* objcReadWriteBuffer, uint64_t objcReadWriteBufferSizeAllocated,
1024 uint32_t objcRwFileOffset,
1025 std::vector<CacheBuilder::DylibInfo> & allDylibs,
1026 const std::vector<const IMPCaches::Selector*> & inlinedSelectors,
1027 bool impCachesSuccess,
1028 TimeRecorder& timeRecorder)
1029 {
1030 typedef typename P::E E;
1031 typedef typename P::uint_t pint_t;
1032
1033 diag.verbose("Optimizing objc metadata:\n");
1034 diag.verbose(" cache type is %s\n", forProduction ? "production" : "development");
1035
1036 ContentAccessor cacheAccessor(cache, diag);
1037
1038 size_t headerSize = P::round_up(sizeof(objc_opt::objc_opt_t));
1039 if (headerSize != sizeof(objc_opt::objc_opt_t)) {
1040 diag.warning("libobjc's optimization structure size is wrong (metadata not optimized)");
1041 }
1042
1043 //
1044 // Find libobjc's empty sections and build list of images with objc metadata
1045 //
1046 __block const mach_header* libobjcMH = nullptr;
1047 __block const macho_section<P> *optROSection = nullptr;
1048 __block const macho_section<P> *optPointerListSection = nullptr;
1049 __block const macho_section<P> *optImpCachesPointerSection = nullptr;
1050 __block std::vector<const macho_header<P>*> objcDylibs;
1051 cache->forEachImage(^(const mach_header* machHeader, const char* installName) {
1052 const macho_header<P>* mh = (const macho_header<P>*)machHeader;
1053 if ( strstr(installName, "/libobjc.") != nullptr ) {
1054 libobjcMH = (mach_header*)mh;
1055 optROSection = mh->getSection("__TEXT", "__objc_opt_ro");
1056 optPointerListSection = mh->getSection("__DATA", "__objc_opt_ptrs");
1057 if ( optPointerListSection == nullptr )
1058 optPointerListSection = mh->getSection("__AUTH", "__objc_opt_ptrs");
1059 optImpCachesPointerSection = mh->getSection("__DATA_CONST", "__objc_scoffs");
1060 }
1061 if ( mh->getSection("__DATA", "__objc_imageinfo") || mh->getSection("__OBJC", "__image_info") ) {
1062 objcDylibs.push_back(mh);
1063 }
1064 // log("installName %s at mhdr 0x%016lx", installName, (uintptr_t)cacheAccessor.vmAddrForContent((void*)mh));
1065 });
1066 if ( optROSection == nullptr ) {
1067 diag.warning("libobjc's read-only section missing (metadata not optimized)");
1068 return;
1069 }
1070 if ( optPointerListSection == nullptr ) {
1071 diag.warning("libobjc's pointer list section missing (metadata not optimized)");
1072 return;
1073 }
1074 if ( optImpCachesPointerSection == nullptr ) {
1075 diag.warning("libobjc's magical shared cache offsets list section missing (metadata not optimized)");
1076 }
1077 // point optROData into space allocated in dyld cache
1078 uint8_t* optROData = objcReadOnlyBuffer + objcReadOnlyBufferSizeUsed;
1079 size_t optRORemaining = objcReadOnlyBufferSizeAllocated - objcReadOnlyBufferSizeUsed;
1080 *((uint32_t*)optROData) = objc_opt::VERSION;
1081 if ( optROData == nullptr ) {
1082 diag.warning("libobjc's read-only section has bad content");
1083 return;
1084 }
1085
1086 uint8_t* optRWData = objcReadWriteBuffer;
1087 size_t optRWRemaining = objcReadWriteBufferSizeAllocated;
1088 if (optRORemaining < headerSize) {
1089 diag.warning("libobjc's read-only section is too small (metadata not optimized)");
1090 return;
1091 }
1092 objc_opt::objc_opt_t* optROHeader = (objc_opt::objc_opt_t *)optROData;
1093 optROData += headerSize;
1094 optRORemaining -= headerSize;
1095 if (E::get32(optROHeader->version) != objc_opt::VERSION) {
1096 diag.warning("libobjc's read-only section version is unrecognized (metadata not optimized)");
1097 return;
1098 }
1099
1100 if (optPointerListSection->size() < sizeof(objc_opt::objc_opt_pointerlist_tt<pint_t>)) {
1101 diag.warning("libobjc's pointer list section is too small (metadata not optimized)");
1102 return;
1103 }
1104 const objc_opt::objc_opt_pointerlist_tt<pint_t> *optPointerList = (const objc_opt::objc_opt_pointerlist_tt<pint_t> *)cacheAccessor.contentForVMAddr(optPointerListSection->addr());
1105
1106 // Write nothing to optROHeader until everything else is written.
1107 // If something fails below, libobjc will not use the section.
1108
1109
1110 //
1111 // Make copy of objcList and sort that list.
1112 //
1113 std::vector<const macho_header<P>*> addressSortedDylibs = objcDylibs;
1114 std::sort(addressSortedDylibs.begin(), addressSortedDylibs.end(), [](const macho_header<P>* lmh, const macho_header<P>* rmh) -> bool {
1115 return lmh < rmh;
1116 });
1117
1118 //
1119 // Build HeaderInfo list in cache
1120 //
1121 // First the RO header info
1122 // log("writing out %d RO dylibs at offset %d", (uint32_t)objcDylibs.size(), (uint32_t)(optROSection->size() - optRORemaining));
1123 uint64_t hinfoROVMAddr = cacheAccessor.vmAddrForContent(optROData);
1124 HeaderInfoOptimizer<P, objc_header_info_ro_t<P>> hinfoROOptimizer;
1125 const char* err = hinfoROOptimizer.init((uint32_t)objcDylibs.size(), optROData, optRORemaining);
1126 if (err) {
1127 diag.warning("%s", err);
1128 return;
1129 }
1130 else {
1131 for (const macho_header<P>* mh : addressSortedDylibs) {
1132 hinfoROOptimizer.update(&cacheAccessor, mh, aslrTracker);
1133 }
1134 }
1135
1136 // Then the RW header info
1137 // log("writing out %d RW dylibs at offset %d", (uint32_t)objcDylibs.size(), (uint32_t)(optRWSection->size() - optRWRemaining));
1138 uint64_t hinfoRWVMAddr = cacheAccessor.vmAddrForContent(optRWData);
1139 HeaderInfoOptimizer<P, objc_header_info_rw_t<P>> hinfoRWOptimizer;
1140 err = hinfoRWOptimizer.init((uint32_t)objcDylibs.size(), optRWData, optRWRemaining);
1141 if (err) {
1142 diag.warning("%s", err);
1143 return;
1144 }
1145 else {
1146 for (const macho_header<P>* mh : addressSortedDylibs) {
1147 hinfoRWOptimizer.update(&cacheAccessor, mh, aslrTracker);
1148 }
1149 }
1150
1151 //
1152 // Update selector references and build selector list
1153 //
1154 // This is SAFE: if we run out of room for the selector table,
1155 // the modified binaries are still usable.
1156 //
1157 // Heuristic: choose selectors from libraries with more selector cstring data first.
1158 // This tries to localize selector cstring memory.
1159 //
1160 ObjCSelectorUniquer<P> uniq(&cacheAccessor);
1161 std::vector<const macho_header<P>*> sizeSortedDylibs = objcDylibs;
1162 std::sort(sizeSortedDylibs.begin(), sizeSortedDylibs.end(), [](const macho_header<P>* lmh, const macho_header<P>* rmh) -> bool {
1163 // Sort a select few heavy hitters first.
1164 auto getPriority = [](const char* installName) -> int {
1165 if (!strcmp(installName, "/usr/lib/libobjc.A.dylib"))
1166 return 0;
1167 if (!strcmp(installName, "/System/Library/Frameworks/Foundation.framework/Versions/C/Foundation") ||
1168 !strcmp(installName, "/System/Library/Frameworks/Foundation.framework/Foundation"))
1169 return 1;
1170 if (!strcmp(installName, "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation") ||
1171 !strcmp(installName, "/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation"))
1172 return 2;
1173 // Note we don't sort iOSMac UIKitCore early as we want iOSMac after macOS.
1174 if (!strcmp(installName, "/System/Library/PrivateFrameworks/UIKitCore.framework/UIKitCore"))
1175 return 3;
1176 if (!strcmp(installName, "/System/Library/Frameworks/AppKit.framework/Versions/C/AppKit"))
1177 return 4;
1178 if (!strcmp(installName, "/System/Library/Frameworks/CFNetwork.framework/Versions/A/CFNetwork") ||
1179 !strcmp(installName, "/System/Library/Frameworks/CFNetwork.framework/CFNetwork"))
1180 return 5;
1181 return INT_MAX;
1182 };
1183
1184 // Sort by priority first
1185 int priorityA = getPriority(((const dyld3::MachOFile*)lmh)->installName());
1186 int priorityB = getPriority(((const dyld3::MachOFile*)rmh)->installName());
1187 if (priorityA != priorityB)
1188 return priorityA < priorityB;
1189
1190 // Sort mac before iOSMac
1191 bool isIOSMacA = strncmp(((const dyld3::MachOFile*)lmh)->installName(), "/System/iOSSupport/", 19) == 0;
1192 bool isIOSMacB = strncmp(((const dyld3::MachOFile*)rmh)->installName(), "/System/iOSSupport/", 19) == 0;
1193 if (isIOSMacA != isIOSMacB)
1194 return !isIOSMacA;
1195
1196 const macho_section<P>* lSection = lmh->getSection("__TEXT", "__objc_methname");
1197 const macho_section<P>* rSection = rmh->getSection("__TEXT", "__objc_methname");
1198 uint64_t lSelectorSize = (lSection ? lSection->size() : 0);
1199 uint64_t rSelectorSize = (rSection ? rSection->size() : 0);
1200 return lSelectorSize > rSelectorSize;
1201 });
1202
1203 auto alignPointer = [](uint8_t* ptr) -> uint8_t* {
1204 return (uint8_t*)(((uintptr_t)ptr + 0x7) & ~0x7);
1205 };
1206
1207 // Relative method lists names are initially an offset to a selector reference.
1208 // Eventually we'll update them to offsets directly to the selector string.
1209 bool relativeMethodListSelectorsAreDirect = false;
1210
1211 SelectorOptimizer<P, ObjCSelectorUniquer<P> > selOptimizer(uniq, relativeMethodListSelectorsAreDirect);
1212 selOptimizer.visitCoalescedStrings(coalescedText);
1213 for (const macho_header<P>* mh : sizeSortedDylibs) {
1214 LegacySelectorUpdater<P, ObjCSelectorUniquer<P>>::update(&cacheAccessor, mh, uniq);
1215 selOptimizer.optimize(&cacheAccessor, mh);
1216 }
1217
1218 diag.verbose(" uniqued %6lu selectors\n", uniq.strings().size());
1219 diag.verbose(" updated %6lu selector references\n", uniq.count());
1220
1221 uint64_t seloptVMAddr = cacheAccessor.vmAddrForContent(optROData);
1222 objc_opt::objc_selopt_t *selopt = new(optROData) objc_opt::objc_selopt_t;
1223 err = selopt->write(seloptVMAddr, optRORemaining, uniq.strings());
1224 if (err) {
1225 diag.warning("%s", err);
1226 return;
1227 }
1228 optROData += selopt->size();
1229 optROData = alignPointer(optROData);
1230 optRORemaining -= selopt->size();
1231 uint32_t seloptCapacity = selopt->capacity;
1232 uint32_t seloptOccupied = selopt->occupied;
1233 selopt->byteswap(E::little_endian), selopt = nullptr;
1234
1235 diag.verbose(" selector table occupancy %u/%u (%u%%)\n",
1236 seloptOccupied, seloptCapacity,
1237 (unsigned)(seloptOccupied/(double)seloptCapacity*100));
1238
1239
1240 //
1241 // Detect classes that have missing weak-import superclasses.
1242 //
1243 // Production shared caches don't support roots so we can set this and know
1244 // there will definitely not be missing weak superclasses at runtime.
1245 // Development shared caches can set this bit as the objc runtime only trusts
1246 // this bit if there are no roots at runtime.
1247 //
1248 // This is SAFE: the binaries themselves are unmodified.
1249 WeakClassDetector<P> weakopt;
1250 bool noMissingWeakSuperclasses = weakopt.noMissingWeakSuperclasses(&cacheAccessor,
1251 missingWeakImports,
1252 sizeSortedDylibs);
1253
1254 if (forProduction) {
1255 // Shared cache does not currently support unbound weak references.
1256 // Here we assert that there are none. If support is added later then
1257 // this assertion needs to be removed and this path needs to be tested.
1258 // FIXME: The internal cache also isn't going to notice that an on-disk
1259 // dylib could resolve a weak bind from the shared cache. Should we just
1260 // error on all caches, regardless of dev/customer?
1261 if (!noMissingWeakSuperclasses) {
1262 diag.error("Some Objective-C class has a superclass that is "
1263 "weak-import and missing from the cache.");
1264 }
1265 }
1266
1267
1268 //
1269 // Build class table.
1270 //
1271 // This is SAFE: the binaries themselves are unmodified.
1272 ClassListBuilder<P> classes(hinfoROOptimizer);
1273 ClassWalker<P, ClassListBuilder<P>> classWalker(classes);
1274 for (const macho_header<P>* mh : sizeSortedDylibs) {
1275 classWalker.walk(&cacheAccessor, mh);
1276 }
1277
1278 diag.verbose(" recorded % 6ld classes\n", classes.classNames().size());
1279
1280 uint64_t clsoptVMAddr = cacheAccessor.vmAddrForContent(optROData);
1281 objc_opt::objc_clsopt_t *clsopt = new(optROData) objc_opt::objc_clsopt_t;
1282 err = clsopt->write(clsoptVMAddr, optRORemaining,
1283 classes.classNames(), classes.classes(), false);
1284 if (err) {
1285 diag.warning("%s", err);
1286 return;
1287 }
1288 optROData += clsopt->size();
1289 optROData = alignPointer(optROData);
1290 optRORemaining -= clsopt->size();
1291 size_t duplicateCount = clsopt->duplicateCount();
1292 uint32_t clsoptCapacity = clsopt->capacity;
1293 uint32_t clsoptOccupied = clsopt->occupied;
1294 clsopt->byteswap(E::little_endian);
1295 clsopt = nullptr;
1296
1297 diag.verbose(" found % 6ld duplicate classes\n",
1298 duplicateCount);
1299 diag.verbose(" class table occupancy %u/%u (%u%%)\n",
1300 clsoptOccupied, clsoptCapacity,
1301 (unsigned)(clsoptOccupied/(double)clsoptCapacity*100));
1302
1303
1304 //
1305 // Sort method lists.
1306 //
1307 // This is SAFE: modified binaries are still usable as unsorted lists.
1308 // This must be done AFTER uniquing selectors.
1309 MethodListSorter<P> methodSorter(relativeMethodListSelectorsAreDirect);
1310 for (const macho_header<P>* mh : sizeSortedDylibs) {
1311 methodSorter.optimize(&cacheAccessor, mh);
1312 }
1313
1314 diag.verbose(" sorted % 6ld method lists\n", methodSorter.optimized());
1315
1316
1317 // Unique protocols and build protocol table.
1318
1319 // This is SAFE: no protocol references are updated yet
1320 // This must be done AFTER updating method lists.
1321
1322 ProtocolOptimizer<P> protocolOptimizer(diag, hinfoROOptimizer);
1323 for (const macho_header<P>* mh : sizeSortedDylibs) {
1324 protocolOptimizer.addProtocols(&cacheAccessor, mh);
1325 }
1326
1327 diag.verbose(" uniqued % 6ld protocols\n",
1328 protocolOptimizer.protocolCount());
1329
1330 pint_t protocolClassVMAddr = (pint_t)P::getP(optPointerList->protocolClass);
1331
1332 // Get the pointer metadata from the magic protocolClassVMAddr symbol
1333 // We'll transfer it over to the ISA on all the objc protocols when we set their ISAs
1334 dyld3::MachOAnalyzerSet::PointerMetaData protocolClassPMD;
1335 uint16_t protocolClassAuthDiversity = 0;
1336 bool protocolClassAuthIsAddr = false;
1337 uint8_t protocolClassAuthKey = 0;
1338 if ( aslrTracker.hasAuthData((void*)&optPointerList->protocolClass, &protocolClassAuthDiversity, &protocolClassAuthIsAddr, &protocolClassAuthKey) ) {
1339 protocolClassPMD.diversity = protocolClassAuthDiversity;
1340 protocolClassPMD.high8 = 0;
1341 protocolClassPMD.authenticated = 1;
1342 protocolClassPMD.key = protocolClassAuthKey;
1343 protocolClassPMD.usesAddrDiversity = protocolClassAuthIsAddr;
1344 }
1345
1346 err = protocolOptimizer.writeProtocols(&cacheAccessor,
1347 optRWData, optRWRemaining,
1348 optROData, optRORemaining,
1349 aslrTracker, protocolClassVMAddr,
1350 protocolClassPMD);
1351 if (err) {
1352 diag.warning("%s", err);
1353 return;
1354 }
1355
1356 // Align the buffer again. The new protocols may have added an odd number of name characters
1357 optROData = alignPointer(optROData);
1358
1359 // New protocol table which tracks loaded images.
1360 uint64_t protocoloptVMAddr = cacheAccessor.vmAddrForContent(optROData);
1361 objc_opt::objc_protocolopt2_t *protocolopt = new (optROData) objc_opt::objc_protocolopt2_t;
1362 err = protocolopt->write(protocoloptVMAddr, optRORemaining,
1363 protocolOptimizer.protocolNames(),
1364 protocolOptimizer.protocolsAndHeaders(), false);
1365 if (err) {
1366 diag.warning("%s", err);
1367 return;
1368 }
1369 optROData += protocolopt->size();
1370 optROData = alignPointer(optROData);
1371 optRORemaining -= protocolopt->size();
1372 uint32_t protocoloptCapacity = protocolopt->capacity;
1373 uint32_t protocoloptOccupied = protocolopt->occupied;
1374 protocolopt->byteswap(E::little_endian), protocolopt = NULL;
1375
1376 diag.verbose(" protocol table occupancy %u/%u (%u%%)\n",
1377 protocoloptOccupied, protocoloptCapacity,
1378 (unsigned)(protocoloptOccupied/(double)protocoloptCapacity*100));
1379
1380
1381 // Redirect protocol references to the uniqued protocols.
1382
1383 // This is SAFE: the new protocol objects are still usable as-is.
1384 for (const macho_header<P>* mh : sizeSortedDylibs) {
1385 protocolOptimizer.updateReferences(&cacheAccessor, mh);
1386 }
1387
1388 diag.verbose(" updated % 6ld protocol references\n", protocolOptimizer.protocolReferenceCount());
1389
1390
1391 //
1392 // Repair ivar offsets.
1393 //
1394 // This is SAFE: the runtime always validates ivar offsets at runtime.
1395 IvarOffsetOptimizer<P> ivarOffsetOptimizer;
1396 for (const macho_header<P>* mh : sizeSortedDylibs) {
1397 ivarOffsetOptimizer.optimize(&cacheAccessor, mh);
1398 }
1399
1400 diag.verbose(" updated % 6ld ivar offsets\n", ivarOffsetOptimizer.optimized());
1401
1402 //
1403 // Build imp caches
1404 //
1405 // Objc has a magic section of imp cache base pointers. We need these to
1406 // offset everything else from
1407 const CacheBuilder::CacheCoalescedText::StringSection& methodNames = coalescedText.getSectionData("__objc_methname");
1408 uint64_t selectorStringVMAddr = methodNames.bufferVMAddr;
1409 uint64_t selectorStringVMSize = methodNames.bufferSize;
1410 uint64_t impCachesVMSize = 0; // We'll calculate this later
1411
1412 uint64_t optRODataRemainingBeforeImpCaches = optRORemaining;
1413
1414 timeRecorder.pushTimedSection();
1415
1416 uint8_t* inlinedSelectorsStart = optRWData;
1417 uint8_t* inlinedSelectorsEnd = optRWData;
1418
1419 if (impCachesSuccess) {
1420 emitIMPCaches<P>(cacheAccessor, allDylibs, sizeSortedDylibs, relativeMethodListSelectorsAreDirect,
1421 selectorStringVMAddr, optROData, optRORemaining, optRWData, optRWRemaining,
1422 aslrTracker, inlinedSelectors, inlinedSelectorsStart, inlinedSelectorsEnd, diag, timeRecorder);
1423 }
1424
1425 uint8_t* alignedROData = alignPointer(optROData);
1426 optRORemaining -= (alignedROData - optROData);
1427 optROData = alignedROData;
1428
1429 impCachesVMSize = optRODataRemainingBeforeImpCaches - optRORemaining;
1430 timeRecorder.recordTime("emit IMP caches");
1431 timeRecorder.popTimedSection();
1432
1433 diag.verbose("[IMP Caches] Imp caches size: %'lld bytes\n\n", impCachesVMSize);
1434
1435 // Update the pointers in the pointer list section
1436 if (optImpCachesPointerSection) {
1437 if (optImpCachesPointerSection->size() < sizeof(objc_opt::objc_opt_pointerlist_tt<pint_t>)) {
1438 diag.warning("libobjc's pointer list section is too small (metadata not optimized)");
1439 return;
1440 }
1441 auto *impCachePointers = (objc_opt_imp_caches_pointerlist_tt<pint_t> *)cacheAccessor.contentForVMAddr(optImpCachesPointerSection->addr());
1442 impCachePointers->selectorStringVMAddrStart = (pint_t)selectorStringVMAddr;
1443 impCachePointers->selectorStringVMAddrEnd = (pint_t)(selectorStringVMAddr + selectorStringVMSize);
1444 impCachePointers->inlinedSelectorsVMAddrStart = (pint_t)cacheAccessor.vmAddrForContent(inlinedSelectorsStart);
1445 impCachePointers->inlinedSelectorsVMAddrEnd = (pint_t)cacheAccessor.vmAddrForContent(inlinedSelectorsEnd);
1446
1447 aslrTracker.add(&impCachePointers->selectorStringVMAddrStart);
1448 aslrTracker.add(&impCachePointers->selectorStringVMAddrEnd);
1449 aslrTracker.add(&impCachePointers->inlinedSelectorsVMAddrStart);
1450 aslrTracker.add(&impCachePointers->inlinedSelectorsVMAddrEnd);
1451 }
1452
1453 // Collect flags.
1454 uint32_t headerFlags = 0;
1455 if (forProduction) {
1456 headerFlags |= objc_opt::IsProduction;
1457 }
1458 if (noMissingWeakSuperclasses) {
1459 headerFlags |= objc_opt::NoMissingWeakSuperclasses;
1460 }
1461
1462
1463 // Success. Mark dylibs as optimized.
1464 for (const macho_header<P>* mh : sizeSortedDylibs) {
1465 const macho_section<P>* imageInfoSection = mh->getSection("__DATA", "__objc_imageinfo");
1466 if (!imageInfoSection) {
1467 imageInfoSection = mh->getSection("__OBJC", "__image_info");
1468 }
1469 if (imageInfoSection) {
1470 objc_image_info<P>* info = (objc_image_info<P>*)cacheAccessor.contentForVMAddr(imageInfoSection->addr());
1471 info->setOptimizedByDyld();
1472 }
1473 }
1474
1475
1476 // Success. Update __objc_opt_ro section in libobjc.dylib to contain offsets to generated optimization structures
1477 objc_opt::objc_opt_t* libROHeader = (objc_opt::objc_opt_t *)cacheAccessor.contentForVMAddr(optROSection->addr());
1478 E::set32(libROHeader->flags, headerFlags);
1479 E::set32(libROHeader->selopt_offset, (uint32_t)(seloptVMAddr - optROSection->addr()));
1480 E::set32(libROHeader->clsopt_offset, (uint32_t)(clsoptVMAddr - optROSection->addr()));
1481 E::set32(libROHeader->unused_protocolopt_offset, 0);
1482 E::set32(libROHeader->headeropt_ro_offset, (uint32_t)(hinfoROVMAddr - optROSection->addr()));
1483 E::set32(libROHeader->headeropt_rw_offset, (uint32_t)(hinfoRWVMAddr - optROSection->addr()));
1484 E::set32(libROHeader->protocolopt_offset, (uint32_t)(protocoloptVMAddr - optROSection->addr()));
1485
1486 // Log statistics.
1487 size_t roSize = objcReadOnlyBufferSizeAllocated - optRORemaining;
1488 size_t rwSize = objcReadWriteBufferSizeAllocated - optRWRemaining;
1489 diag.verbose(" %lu/%llu bytes (%d%%) used in shared cache read-only optimization region\n",
1490 roSize, objcReadOnlyBufferSizeAllocated, percent(roSize, objcReadOnlyBufferSizeAllocated));
1491 diag.verbose(" %lu/%llu bytes (%d%%) used in shared cache read/write optimization region\n",
1492 rwSize, objcReadWriteBufferSizeAllocated, percent(rwSize, objcReadWriteBufferSizeAllocated));
1493 diag.verbose(" wrote objc metadata optimization version %d\n", objc_opt::VERSION);
1494
1495 // Add segments to libobjc.dylib that cover cache builder allocated r/o and r/w regions
1496 addObjcSegments<P>(diag, cache, libobjcMH, objcReadOnlyBuffer, objcReadOnlyBufferSizeAllocated, objcReadWriteBuffer, objcReadWriteBufferSizeAllocated, objcRwFileOffset);
1497
1498
1499 // Now that objc has uniqued the selector references, we can apply the LOHs so that ADRP/LDR -> ADRP/ADD
1500 {
1501 const bool logSelectors = false;
1502 uint64_t lohADRPCount = 0;
1503 uint64_t lohLDRCount = 0;
1504
1505 for (auto& targetAndInstructions : lohTracker) {
1506 uint64_t targetVMAddr = targetAndInstructions.first;
1507 if (!selOptimizer.isSelectorRefAddress((pint_t)targetVMAddr))
1508 continue;
1509
1510 std::set<void*>& instructions = targetAndInstructions.second;
1511 // We do 2 passes over the instructions. The first to validate them and the second
1512 // to actually update them.
1513 for (unsigned pass = 0; pass != 2; ++pass) {
1514 uint32_t adrpCount = 0;
1515 uint32_t ldrCount = 0;
1516 for (void* instructionAddress : instructions) {
1517 uint32_t& instruction = *(uint32_t*)instructionAddress;
1518 uint64_t instructionVMAddr = cacheAccessor.vmAddrForContent(&instruction);
1519 uint64_t selRefContent = *(uint64_t*)cacheAccessor.contentForVMAddr(targetVMAddr);
1520 const char* selectorString = (const char*)cacheAccessor.contentForVMAddr(selRefContent);
1521 uint64_t selectorStringVMAddr = cacheAccessor.vmAddrForContent(selectorString);
1522
1523 if ( (instruction & 0x9F000000) == 0x90000000 ) {
1524 // ADRP
1525 int64_t pageDistance = ((selectorStringVMAddr & ~0xFFF) - (instructionVMAddr & ~0xFFF));
1526 int64_t newPage21 = pageDistance >> 12;
1527
1528 if (pass == 0) {
1529 if ( (newPage21 > 2097151) || (newPage21 < -2097151) ) {
1530 if (logSelectors)
1531 fprintf(stderr, "Out of bounds ADRP selector reference target\n");
1532 instructions.clear();
1533 break;
1534 }
1535 ++adrpCount;
1536 }
1537
1538 if (pass == 1) {
1539 instruction = (instruction & 0x9F00001F) | ((newPage21 << 29) & 0x60000000) | ((newPage21 << 3) & 0x00FFFFE0);
1540 ++lohADRPCount;
1541 }
1542 continue;
1543 }
1544
1545 if ( (instruction & 0x3B000000) == 0x39000000 ) {
1546 // LDR/STR. STR shouldn't be possible as this is a selref!
1547 if (pass == 0) {
1548 if ( (instruction & 0xC0C00000) != 0xC0400000 ) {
1549 // Not a load, or dest reg isn't xN, or uses sign extension
1550 if (logSelectors)
1551 fprintf(stderr, "Bad LDR for selector reference optimisation\n");
1552 instructions.clear();
1553 break;
1554 }
1555 if ( (instruction & 0x04000000) != 0 ) {
1556 // Loading a float
1557 if (logSelectors)
1558 fprintf(stderr, "Bad LDR for selector reference optimisation\n");
1559 instructions.clear();
1560 break;
1561 }
1562 ++ldrCount;
1563 }
1564
1565 if (pass == 1) {
1566 uint32_t ldrDestReg = (instruction & 0x1F);
1567 uint32_t ldrBaseReg = ((instruction >> 5) & 0x1F);
1568
1569 // Convert the LDR to an ADD
1570 instruction = 0x91000000;
1571 instruction |= ldrDestReg;
1572 instruction |= ldrBaseReg << 5;
1573 instruction |= (selectorStringVMAddr & 0xFFF) << 10;
1574
1575 ++lohLDRCount;
1576 }
1577 continue;
1578 }
1579
1580 if ( (instruction & 0xFFC00000) == 0x91000000 ) {
1581 // ADD imm12
1582 // We don't support ADDs.
1583 if (logSelectors)
1584 fprintf(stderr, "Bad ADD for selector reference optimisation\n");
1585 instructions.clear();
1586 break;
1587 }
1588
1589 if (logSelectors)
1590 fprintf(stderr, "Unknown instruction for selref optimisation\n");
1591 instructions.clear();
1592 break;
1593 }
1594 if (pass == 0) {
1595 // If we didn't see at least one ADRP/LDR in pass one then don't optimize this location
1596 if ((adrpCount == 0) || (ldrCount == 0)) {
1597 instructions.clear();
1598 break;
1599 }
1600 }
1601 }
1602 }
1603
1604 diag.verbose(" Optimized %lld ADRP LOHs\n", lohADRPCount);
1605 diag.verbose(" Optimized %lld LDR LOHs\n", lohLDRCount);
1606 }
1607 }
1608
1609
1610 } // anon namespace
1611
1612 size_t IMPCaches::sizeForImpCacheWithCount(int count) {
1613 // The architecture should not be relevant here as it's all offsets and fixed int sizes.
1614 // It was just the most logical place to host this function in.
1615
1616 size_t size64 = IMPCachesEmitter<Pointer64<LittleEndian>>::sizeForImpCacheWithCount(count);
1617 size_t size32 = IMPCachesEmitter<Pointer32<LittleEndian>>::sizeForImpCacheWithCount(count);
1618 assert(size64 == size32);
1619
1620 return size64;
1621 }
1622
1623 void SharedCacheBuilder::optimizeObjC(bool impCachesSuccess, const std::vector<const IMPCaches::Selector*> & inlinedSelectors)
1624 {
1625 // FIXME: Can we move the objc RW content to the __DATA_CONST region?
1626 // For now, it is always at the end of the last region
1627 const Region* readWriteRegion = lastDataRegion();
1628 uint32_t objcRwFileOffset = (uint32_t)((_objcReadWriteBuffer - readWriteRegion->buffer) + readWriteRegion->cacheFileOffset);
1629 if ( _archLayout->is64 )
1630 doOptimizeObjC<Pointer64<LittleEndian>>((DyldSharedCache*)_readExecuteRegion.buffer,
1631 _options.optimizeStubs,
1632 _aslrTracker, _lohTracker,
1633 _coalescedText,
1634 _missingWeakImports, _diagnostics,
1635 _objcReadOnlyBuffer,
1636 _objcReadOnlyBufferSizeUsed,
1637 _objcReadOnlyBufferSizeAllocated,
1638 _objcReadWriteBuffer, _objcReadWriteBufferSizeAllocated,
1639 objcRwFileOffset, _sortedDylibs, inlinedSelectors, impCachesSuccess, _timeRecorder);
1640 else
1641 doOptimizeObjC<Pointer32<LittleEndian>>((DyldSharedCache*)_readExecuteRegion.buffer,
1642 _options.optimizeStubs,
1643 _aslrTracker, _lohTracker,
1644 _coalescedText,
1645 _missingWeakImports, _diagnostics,
1646 _objcReadOnlyBuffer,
1647 _objcReadOnlyBufferSizeUsed,
1648 _objcReadOnlyBufferSizeAllocated,
1649 _objcReadWriteBuffer, _objcReadWriteBufferSizeAllocated,
1650 objcRwFileOffset, _sortedDylibs, inlinedSelectors, impCachesSuccess, _timeRecorder);
1651 }
1652
1653 static uint32_t hashTableSize(uint32_t maxElements, uint32_t perElementData)
1654 {
1655 uint32_t elementsWithPadding = maxElements*11/10; // if close to power of 2, perfect hash may fail, so don't get within 10% of that
1656 uint32_t powTwoCapacity = 1 << (32 - __builtin_clz(elementsWithPadding - 1));
1657 uint32_t headerSize = 4*(8+256);
1658 return headerSize + powTwoCapacity/2 + powTwoCapacity + powTwoCapacity*perElementData;
1659 }
1660
1661 // The goal here is to allocate space in the dyld shared cache (while it is being laid out) that will contain
1662 // the objc structures that previously were in the __objc_opt_ro section.
1663 uint32_t SharedCacheBuilder::computeReadOnlyObjC(uint32_t selRefCount, uint32_t classDefCount, uint32_t protocolDefCount)
1664 {
1665 return 0xA000 + hashTableSize(selRefCount, 5) + hashTableSize(classDefCount, 12) + hashTableSize(protocolDefCount, 8);
1666 }
1667
1668 // Space to replace the __objc_opt_rw section.
1669 uint32_t SharedCacheBuilder::computeReadWriteObjC(uint32_t imageCount, uint32_t protocolDefCount)
1670 {
1671 uint8_t pointerSize = _archLayout->is64 ? 8 : 4;
1672 return 8*imageCount
1673 + protocolDefCount*12*pointerSize
1674 + (int)_impCachesBuilder->inlinedSelectors.size() * pointerSize;
1675 }