dyld-732.8.tar.gz
[apple/dyld.git] / dyld3 / shared-cache / OptimizerObjC.cpp
1 /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*-
2 *
3 * Copyright (c) 2014 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26 #include <dirent.h>
27 #include <sys/errno.h>
28 #include <sys/fcntl.h>
29 #include <mach-o/loader.h>
30 #include <mach-o/fat.h>
31 #include <assert.h>
32
33 #include "DyldSharedCache.h"
34 #include "Diagnostics.h"
35 #include "CacheBuilder.h"
36 #include "FileAbstraction.hpp"
37 #include "MachOFileAbstraction.hpp"
38 #include "MachOLoaded.h"
39 #include "MachOAnalyzer.h"
40
41 #ifndef MH_HAS_OBJC
42 #define MH_HAS_OBJC 0x40000000
43 #endif
44
45 // Scan a C++ or Swift length-mangled field.
46 static bool scanMangledField(const char *&string, const char *end,
47 const char *&field, int& length)
48 {
49 // Leading zero not allowed.
50 if (*string == '0') return false;
51
52 length = 0;
53 field = string;
54 while (field < end) {
55 char c = *field;
56 if (!isdigit(c)) break;
57 field++;
58 if (__builtin_smul_overflow(length, 10, &length)) return false;
59 if (__builtin_sadd_overflow(length, c - '0', &length)) return false;
60 }
61
62 string = field + length;
63 return length > 0 && string <= end;
64 }
65
66
67 // copySwiftDemangledName
68 // Returns the pretty form of the given Swift-mangled class or protocol name.
69 // Returns nullptr if the string doesn't look like a mangled Swift name.
70 // The result must be freed with free().
71 static char *copySwiftDemangledName(const char *string, bool isProtocol = false)
72 {
73 if (!string) return nullptr;
74
75 // Swift mangling prefix.
76 if (strncmp(string, isProtocol ? "_TtP" : "_TtC", 4) != 0) return nullptr;
77 string += 4;
78
79 const char *end = string + strlen(string);
80
81 // Module name.
82 const char *prefix;
83 int prefixLength;
84 if (string[0] == 's') {
85 // "s" is the Swift module.
86 prefix = "Swift";
87 prefixLength = 5;
88 string += 1;
89 } else {
90 if (! scanMangledField(string, end, prefix, prefixLength)) return nullptr;
91 }
92
93 // Class or protocol name.
94 const char *suffix;
95 int suffixLength;
96 if (! scanMangledField(string, end, suffix, suffixLength)) return nullptr;
97
98 if (isProtocol) {
99 // Remainder must be "_".
100 if (strcmp(string, "_") != 0) return nullptr;
101 } else {
102 // Remainder must be empty.
103 if (string != end) return nullptr;
104 }
105
106 char *result;
107 asprintf(&result, "%.*s.%.*s", prefixLength,prefix, suffixLength,suffix);
108 return result;
109 }
110
111
112 class ContentAccessor {
113 public:
114 ContentAccessor(const DyldSharedCache* cache, Diagnostics& diag)
115 : _diagnostics(diag)
116 {
117 _cacheStart = (uint8_t*)cache;
118 _cacheUnslideAddr = cache->unslidLoadAddress();
119 _slide = (uint64_t)cache - _cacheUnslideAddr;
120 #if SUPPORT_ARCH_arm64e
121 _chainedFixups = (strcmp(cache->archName(), "arm64e") == 0);
122 #else
123 _chainedFixups = false;
124 #endif
125 }
126
127 // Converts from an on disk vmAddr to the real vmAddr
128 // That is, for a chained fixup, decodes the chain, for a non-chained fixup, does nothing.
129 uint64_t vmAddrForOnDiskVMAddr(uint64_t vmaddr) {
130 if ( _chainedFixups ) {
131 dyld3::MachOLoaded::ChainedFixupPointerOnDisk ptr;
132 ptr.raw64 = vmaddr;
133 assert(ptr.arm64e.authRebase.bind == 0);
134 if ( ptr.arm64e.authRebase.auth ) {
135 vmaddr = _cacheUnslideAddr + ptr.arm64e.authRebase.target;
136 }
137 else {
138 vmaddr = ptr.arm64e.unpackTarget();
139 }
140 }
141 return vmaddr;
142 }
143
144 void* contentForVMAddr(uint64_t vmaddr) {
145 vmaddr = vmAddrForOnDiskVMAddr(vmaddr);
146 if ( vmaddr != 0 ) {
147 uint64_t offset = vmaddr - _cacheUnslideAddr;
148 return _cacheStart + offset;
149 } else
150 return nullptr;
151 }
152
153 uint64_t vmAddrForContent(const void* content) {
154 if ( content != nullptr )
155 return _cacheUnslideAddr + ((uint8_t*)content - _cacheStart);
156 else
157 return 0;
158 }
159
160 Diagnostics& diagnostics() { return _diagnostics; }
161
162 private:
163 Diagnostics& _diagnostics;
164 uint64_t _slide;
165 uint64_t _cacheUnslideAddr;
166 uint8_t* _cacheStart;
167 bool _chainedFixups;
168 };
169
170
171 // Access a section containing a list of pointers
172 template <typename P, typename T>
173 class PointerSection
174 {
175 typedef typename P::uint_t pint_t;
176 public:
177 PointerSection(ContentAccessor* cache, const macho_header<P>* mh,
178 const char* segname, const char* sectname)
179 : _cache(cache),
180 _section(mh->getSection(segname, sectname)),
181 _base(_section ? (pint_t*)cache->contentForVMAddr(_section->addr()) : 0),
182 _count(_section ? (pint_t)(_section->size() / sizeof(pint_t)) : 0) {
183 }
184
185 pint_t count() const { return _count; }
186
187 pint_t getVMAddress(pint_t index) const {
188 if ( index >= _count ) {
189 _cache->diagnostics().error("index out of range in section %s", _section->sectname());
190 return 0;
191 }
192 return (pint_t)P::getP(_base[index]);
193 }
194
195 pint_t getSectionVMAddress() const {
196 return (pint_t)_section->addr();
197 }
198
199 T get(pint_t index) const {
200 return (T)_cache->contentForVMAddr(getVMAddress(index));
201 }
202
203 void setVMAddress(pint_t index, pint_t value) {
204 if ( index >= _count ) {
205 _cache->diagnostics().error("index out of range in section %s", _section->sectname());
206 return;
207 }
208 P::setP(_base[index], value);
209 }
210
211 void removeNulls() {
212 pint_t shift = 0;
213 for (pint_t i = 0; i < _count; i++) {
214 pint_t value = _base[i];
215 if (value) {
216 _base[i-shift] = value;
217 } else {
218 shift++;
219 }
220 }
221 _count -= shift;
222 const_cast<macho_section<P>*>(_section)->set_size(_count * sizeof(pint_t));
223 }
224
225 private:
226 ContentAccessor* const _cache;
227 const macho_section<P>* const _section;
228 pint_t* const _base;
229 pint_t const _count;
230 };
231
232
233 // Access a section containing an array of structures
234 template <typename P, typename T>
235 class ArraySection
236 {
237 public:
238 ArraySection(ContentAccessor* cache, const macho_header<P>* mh,
239 const char *segname, const char *sectname)
240 : _cache(cache),
241 _section(mh->getSection(segname, sectname)),
242 _base(_section ? (T *)cache->contentForVMAddr(_section->addr()) : 0),
243 _count(_section ? _section->size() / sizeof(T) : 0) {
244 }
245
246 uint64_t count() const { return _count; }
247
248 T& get(uint64_t index) const {
249 if (index >= _count) {
250 _cache->diagnostics().error("index out of range in section %s", _section->sectname());
251 }
252 return _base[index];
253 }
254
255 private:
256 ContentAccessor* const _cache;
257 const macho_section<P>* const _section;
258 T * const _base;
259 uint64_t const _count;
260 };
261
262
263 #define SELOPT_WRITE
264 #include "objc-shared-cache.h"
265 #include "ObjC1Abstraction.hpp"
266 #include "ObjC2Abstraction.hpp"
267
268
269 namespace {
270
271
272
273 template <typename P>
274 class ObjCSelectorUniquer
275 {
276 public:
277 typedef typename P::uint_t pint_t;
278
279 ObjCSelectorUniquer(ContentAccessor* cache) : _cache(cache) { }
280
281 pint_t visit(pint_t oldValue)
282 {
283 _count++;
284 const char *s = (const char *)_cache->contentForVMAddr(oldValue);
285 oldValue = (pint_t)_cache->vmAddrForOnDiskVMAddr(oldValue);
286 objc_opt::string_map::iterator element =
287 _selectorStrings.insert(objc_opt::string_map::value_type(s, oldValue)).first;
288 return (pint_t)element->second;
289 }
290
291 void visitCoalescedStrings(const CacheBuilder::CacheCoalescedText& coalescedText) {
292 const CacheBuilder::CacheCoalescedText::StringSection& methodNames = coalescedText.getSectionData("__objc_methname");
293 for (const auto& stringAndOffset : methodNames.stringsToOffsets) {
294 uint64_t vmAddr = methodNames.bufferVMAddr + stringAndOffset.second;
295 _selectorStrings[stringAndOffset.first.data()] = vmAddr;
296 }
297 }
298
299 objc_opt::string_map& strings() {
300 return _selectorStrings;
301 }
302
303 size_t count() const { return _count; }
304
305 private:
306 objc_opt::string_map _selectorStrings;
307 ContentAccessor* _cache;
308 size_t _count = 0;
309 };
310
311
312 template <typename P>
313 class ClassListBuilder
314 {
315 private:
316 objc_opt::string_map _classNames;
317 objc_opt::class_map _classes;
318 size_t _count = 0;
319 HeaderInfoOptimizer<P, objc_header_info_ro_t<P>>& _hInfos;
320
321 public:
322
323 ClassListBuilder(HeaderInfoOptimizer<P, objc_header_info_ro_t<P>>& hinfos) : _hInfos(hinfos) { }
324
325 void visitClass(ContentAccessor* cache,
326 const macho_header<P>* header,
327 objc_class_t<P>* cls)
328 {
329 if (cls->isMetaClass(cache)) return;
330
331 const char *name = cls->getName(cache);
332 uint64_t name_vmaddr = cache->vmAddrForContent((void*)name);
333 uint64_t cls_vmaddr = cache->vmAddrForContent(cls);
334 uint64_t hinfo_vmaddr = cache->vmAddrForContent(_hInfos.hinfoForHeader(cache, header));
335 _classNames.insert(objc_opt::string_map::value_type(name, name_vmaddr));
336 _classes.insert(objc_opt::class_map::value_type(name, std::pair<uint64_t, uint64_t>(cls_vmaddr, hinfo_vmaddr)));
337 _count++;
338 }
339
340 objc_opt::string_map& classNames() {
341 return _classNames;
342 }
343
344 objc_opt::class_map& classes() {
345 return _classes;
346 }
347
348 size_t count() const { return _count; }
349 };
350
351 template <typename P>
352 class ProtocolOptimizer
353 {
354 private:
355 typedef typename P::uint_t pint_t;
356
357 objc_opt::string_map _protocolNames;
358 objc_opt::legacy_protocol_map _protocols;
359 objc_opt::protocol_map _protocolsAndHeaders;
360 size_t _protocolCount;
361 size_t _protocolReferenceCount;
362 Diagnostics& _diagnostics;
363 HeaderInfoOptimizer<P, objc_header_info_ro_t<P>>& _hInfos;
364
365 friend class ProtocolReferenceWalker<P, ProtocolOptimizer<P>>;
366
367 pint_t visitProtocolReference(ContentAccessor* cache, pint_t oldValue)
368 {
369 objc_protocol_t<P>* proto = (objc_protocol_t<P>*)
370 cache->contentForVMAddr(oldValue);
371 pint_t newValue = (pint_t)_protocols[proto->getName(cache)];
372 if (oldValue != newValue) _protocolReferenceCount++;
373 return newValue;
374 }
375
376 public:
377
378 ProtocolOptimizer(Diagnostics& diag, HeaderInfoOptimizer<P, objc_header_info_ro_t<P>>& hinfos)
379 : _protocolCount(0), _protocolReferenceCount(0), _diagnostics(diag), _hInfos(hinfos) {
380 }
381
382 void addProtocols(ContentAccessor* cache, const macho_header<P>* header)
383 {
384 PointerSection<P, objc_protocol_t<P> *>
385 protocols(cache, header, "__DATA", "__objc_protolist");
386
387 for (pint_t i = 0; i < protocols.count(); i++) {
388 objc_protocol_t<P> *proto = protocols.get(i);
389
390 const char *name = proto->getName(cache);
391 if (_protocolNames.count(name) == 0) {
392 if (proto->getSize() > sizeof(objc_protocol_t<P>)) {
393 _diagnostics.error("objc protocol is too big");
394 return;
395 }
396 uint64_t name_vmaddr = cache->vmAddrForContent((void*)name);
397 uint64_t proto_vmaddr = cache->vmAddrForContent(proto);
398 _protocolNames.insert(objc_opt::string_map::value_type(name, name_vmaddr));
399 _protocols.insert(objc_opt::legacy_protocol_map::value_type(name, proto_vmaddr));
400 _protocolCount++;
401 }
402
403 // Note down which header this protocol came from. We'll fill in the proto_vmaddr here later
404 // once we've chosen a single definition for the protocol with this name.
405 uint64_t hinfo_vmaddr = cache->vmAddrForContent(_hInfos.hinfoForHeader(cache, header));
406 _protocolsAndHeaders.insert(objc_opt::class_map::value_type(name, std::pair<uint64_t, uint64_t>(0, hinfo_vmaddr)));
407 }
408 }
409
410 const char *writeProtocols(ContentAccessor* cache,
411 uint8_t *& rwdest, size_t& rwremaining,
412 uint8_t *& rodest, size_t& roremaining,
413 CacheBuilder::ASLR_Tracker& aslrTracker,
414 pint_t protocolClassVMAddr)
415 {
416 if (_protocolCount == 0) return NULL;
417
418 if (protocolClassVMAddr == 0) {
419 return "libobjc's Protocol class symbol not found (metadata not optimized)";
420 }
421
422 size_t rwrequired = _protocolCount * sizeof(objc_protocol_t<P>);
423 if (rwremaining < rwrequired) {
424 return "libobjc's read-write section is too small (metadata not optimized)";
425 }
426
427 for (auto iter = _protocols.begin(); iter != _protocols.end(); ++iter)
428 {
429 objc_protocol_t<P>* oldProto = (objc_protocol_t<P>*)
430 cache->contentForVMAddr(iter->second);
431
432 // Create a new protocol object.
433 objc_protocol_t<P>* proto = (objc_protocol_t<P>*)rwdest;
434 rwdest += sizeof(*proto);
435 rwremaining -= sizeof(*proto);
436
437 // Initialize it.
438 uint32_t oldSize = oldProto->getSize();
439 memcpy(proto, oldProto, oldSize);
440 if (!proto->getIsaVMAddr()) {
441 proto->setIsaVMAddr(protocolClassVMAddr);
442 }
443 if (oldSize < sizeof(*proto)) {
444 // Protocol object is old. Populate new fields.
445 proto->setSize(sizeof(objc_protocol_t<P>));
446 // missing extendedMethodTypes is already nil
447 }
448 // Some protocol objects are big enough to have the
449 // demangledName field but don't initialize it.
450 // Initialize it here if it is not already set.
451 if (!proto->getDemangledName(cache)) {
452 const char *roName = proto->getName(cache);
453 char *demangledName = copySwiftDemangledName(roName, true);
454 if (demangledName) {
455 size_t length = 1 + strlen(demangledName);
456 if (roremaining < length) {
457 return "libobjc's read-only section is too small (metadata not optimized)";
458 }
459
460 memmove(rodest, demangledName, length);
461 roName = (const char *)rodest;
462 rodest += length;
463 roremaining -= length;
464
465 free(demangledName);
466 }
467 proto->setDemangledName(cache, roName, _diagnostics);
468 }
469 proto->setFixedUp();
470 proto->setIsCanonical();
471
472 // Redirect the protocol table at our new object.
473 iter->second = cache->vmAddrForContent(proto);
474
475 // Add new rebase entries.
476 proto->addPointers(cache, aslrTracker);
477 }
478
479 // Now that we've chosen the canonical protocols, set the duplicate headers to
480 // point to their protocols.
481 for (auto iter = _protocolsAndHeaders.begin(); iter != _protocolsAndHeaders.end(); ++iter) {
482 iter->second.first = _protocols[iter->first];
483 }
484
485 return NULL;
486 }
487
488 void updateReferences(ContentAccessor* cache, const macho_header<P>* header)
489 {
490 ProtocolReferenceWalker<P, ProtocolOptimizer<P>> refs(*this);
491 refs.walk(cache, header);
492 }
493
494 objc_opt::string_map& protocolNames() {
495 return _protocolNames;
496 }
497
498 objc_opt::legacy_protocol_map& protocols() {
499 return _protocols;
500 }
501
502 objc_opt::protocol_map& protocolsAndHeaders() {
503 return _protocolsAndHeaders;
504 }
505
506 size_t protocolCount() const { return _protocolCount; }
507 size_t protocolReferenceCount() const { return _protocolReferenceCount; }
508 };
509
510
511 static int percent(size_t num, size_t denom) {
512 if (denom)
513 return (int)(num / (double)denom * 100);
514 else
515 return 100;
516 }
517
518 template <typename P>
519 void addObjcSegments(Diagnostics& diag, DyldSharedCache* cache, const mach_header* libobjcMH,
520 uint8_t* objcReadOnlyBuffer, uint64_t objcReadOnlyBufferSizeAllocated,
521 uint8_t* objcReadWriteBuffer, uint64_t objcReadWriteBufferSizeAllocated,
522 uint32_t objcRwFileOffset)
523 {
524 // validate there is enough free space to add the load commands
525 const dyld3::MachOAnalyzer* libobjcMA = ((dyld3::MachOAnalyzer*)libobjcMH);
526 uint32_t freeSpace = libobjcMA->loadCommandsFreeSpace();
527 const uint32_t segSize = sizeof(macho_segment_command<P>);
528 if ( freeSpace < 2*segSize ) {
529 diag.warning("not enough space in libojbc.dylib to add load commands for objc optimization regions");
530 return;
531 }
532
533 // find location of LINKEDIT LC_SEGMENT load command, we need to insert new segments before it
534 __block uint8_t* linkeditSeg = nullptr;
535 libobjcMA->forEachSegment(^(const dyld3::MachOFile::SegmentInfo& info, bool& stop) {
536 if ( strcmp(info.segName, "__LINKEDIT") == 0 )
537 linkeditSeg = (uint8_t*)libobjcMH + info.loadCommandOffset;
538 });
539 if ( linkeditSeg == nullptr ) {
540 diag.warning("__LINKEDIT not found in libojbc.dylib");
541 return;
542 }
543
544 // move load commands to make room to insert two new ones before LINKEDIT segment load command
545 uint8_t* endOfLoadCommands = (uint8_t*)libobjcMH + sizeof(macho_header<P>) + libobjcMH->sizeofcmds;
546 uint32_t remainingSize = (uint32_t)(endOfLoadCommands - linkeditSeg);
547 memmove(linkeditSeg+2*segSize, linkeditSeg, remainingSize);
548
549 // insert new segments
550 macho_segment_command<P>* roSeg = (macho_segment_command<P>*)(linkeditSeg);
551 macho_segment_command<P>* rwSeg = (macho_segment_command<P>*)(linkeditSeg+sizeof(macho_segment_command<P>));
552 roSeg->set_cmd(macho_segment_command<P>::CMD);
553 roSeg->set_cmdsize(segSize);
554 roSeg->set_segname("__OBJC_RO");
555 roSeg->set_vmaddr(cache->unslidLoadAddress() + objcReadOnlyBuffer - (uint8_t*)cache);
556 roSeg->set_vmsize(objcReadOnlyBufferSizeAllocated);
557 roSeg->set_fileoff(objcReadOnlyBuffer - (uint8_t*)cache);
558 roSeg->set_filesize(objcReadOnlyBufferSizeAllocated);
559 roSeg->set_maxprot(VM_PROT_READ);
560 roSeg->set_initprot(VM_PROT_READ);
561 roSeg->set_nsects(0);
562 roSeg->set_flags(0);
563 rwSeg->set_cmd(macho_segment_command<P>::CMD);
564 rwSeg->set_cmdsize(segSize);
565 rwSeg->set_segname("__OBJC_RW");
566 rwSeg->set_vmaddr(cache->unslidLoadAddress() + objcReadWriteBuffer - (uint8_t*)cache);
567 rwSeg->set_vmsize(objcReadWriteBufferSizeAllocated);
568 rwSeg->set_fileoff(objcRwFileOffset);
569 rwSeg->set_filesize(objcReadWriteBufferSizeAllocated);
570 rwSeg->set_maxprot(VM_PROT_WRITE|VM_PROT_READ);
571 rwSeg->set_initprot(VM_PROT_WRITE|VM_PROT_READ);
572 rwSeg->set_nsects(0);
573 rwSeg->set_flags(0);
574
575 // update mach_header to account for new load commands
576 macho_header<P>* mh = (macho_header<P>*)libobjcMH;
577 mh->set_sizeofcmds(mh->sizeofcmds() + 2*segSize);
578 mh->set_ncmds(mh->ncmds()+2);
579
580 // fix up table at start of dyld cache that has pointer into install name for libobjc
581 dyld_cache_image_info* images = (dyld_cache_image_info*)((uint8_t*)cache + cache->header.imagesOffset);
582 uint64_t libobjcUnslidAddress = cache->unslidLoadAddress() + ((uint8_t*)libobjcMH - (uint8_t*)cache);
583 for (uint32_t i=0; i < cache->header.imagesCount; ++i) {
584 if ( images[i].address == libobjcUnslidAddress ) {
585 images[i].pathFileOffset += (2*segSize);
586 break;
587 }
588 }
589 }
590
591
592 template <typename P>
593 void doOptimizeObjC(DyldSharedCache* cache, bool forProduction, CacheBuilder::ASLR_Tracker& aslrTracker,
594 CacheBuilder::LOH_Tracker& lohTracker, const CacheBuilder::CacheCoalescedText& coalescedText,
595 const std::map<void*, std::string>& missingWeakImports, Diagnostics& diag,
596 uint8_t* objcReadOnlyBuffer, uint64_t objcReadOnlyBufferSizeUsed, uint64_t objcReadOnlyBufferSizeAllocated,
597 uint8_t* objcReadWriteBuffer, uint64_t objcReadWriteBufferSizeAllocated,
598 uint32_t objcRwFileOffset)
599 {
600 typedef typename P::E E;
601 typedef typename P::uint_t pint_t;
602
603 diag.verbose("Optimizing objc metadata:\n");
604 diag.verbose(" cache type is %s\n", forProduction ? "production" : "development");
605
606 ContentAccessor cacheAccessor(cache, diag);
607
608 size_t headerSize = P::round_up(sizeof(objc_opt::objc_opt_t));
609 if (headerSize != sizeof(objc_opt::objc_opt_t)) {
610 diag.warning("libobjc's optimization structure size is wrong (metadata not optimized)");
611 }
612
613 //
614 // Find libobjc's empty sections and build list of images with objc metadata
615 //
616 __block const mach_header* libobjcMH = nullptr;
617 __block const macho_section<P> *optROSection = nullptr;
618 __block const macho_section<P> *optPointerListSection = nullptr;
619 __block std::vector<const macho_header<P>*> objcDylibs;
620 cache->forEachImage(^(const mach_header* machHeader, const char* installName) {
621 const macho_header<P>* mh = (const macho_header<P>*)machHeader;
622 if ( strstr(installName, "/libobjc.") != nullptr ) {
623 libobjcMH = (mach_header*)mh;
624 optROSection = mh->getSection("__TEXT", "__objc_opt_ro");
625 optPointerListSection = mh->getSection("__DATA", "__objc_opt_ptrs");
626 }
627 if ( mh->getSection("__DATA", "__objc_imageinfo") || mh->getSection("__OBJC", "__image_info") ) {
628 objcDylibs.push_back(mh);
629 }
630 // log("installName %s at mhdr 0x%016lx", installName, (uintptr_t)cacheAccessor.vmAddrForContent((void*)mh));
631 });
632 if ( optROSection == nullptr ) {
633 diag.warning("libobjc's read-only section missing (metadata not optimized)");
634 return;
635 }
636 if ( optPointerListSection == nullptr ) {
637 diag.warning("libobjc's pointer list section missing (metadata not optimized)");
638 return;
639 }
640 // point optROData into space allocated in dyld cache
641 uint8_t* optROData = objcReadOnlyBuffer + objcReadOnlyBufferSizeUsed;
642 size_t optRORemaining = objcReadOnlyBufferSizeAllocated - objcReadOnlyBufferSizeUsed;
643 *((uint32_t*)optROData) = objc_opt::VERSION;
644 if ( optROData == nullptr ) {
645 diag.warning("libobjc's read-only section has bad content");
646 return;
647 }
648
649 uint8_t* optRWData = objcReadWriteBuffer;
650 size_t optRWRemaining = objcReadWriteBufferSizeAllocated;
651 if (optRORemaining < headerSize) {
652 diag.warning("libobjc's read-only section is too small (metadata not optimized)");
653 return;
654 }
655 objc_opt::objc_opt_t* optROHeader = (objc_opt::objc_opt_t *)optROData;
656 optROData += headerSize;
657 optRORemaining -= headerSize;
658 if (E::get32(optROHeader->version) != objc_opt::VERSION) {
659 diag.warning("libobjc's read-only section version is unrecognized (metadata not optimized)");
660 return;
661 }
662
663 if (optPointerListSection->size() < sizeof(objc_opt::objc_opt_pointerlist_tt<pint_t>)) {
664 diag.warning("libobjc's pointer list section is too small (metadata not optimized)");
665 return;
666 }
667 const objc_opt::objc_opt_pointerlist_tt<pint_t> *optPointerList = (const objc_opt::objc_opt_pointerlist_tt<pint_t> *)cacheAccessor.contentForVMAddr(optPointerListSection->addr());
668
669 // Write nothing to optROHeader until everything else is written.
670 // If something fails below, libobjc will not use the section.
671
672
673 //
674 // Make copy of objcList and sort that list.
675 //
676 std::vector<const macho_header<P>*> addressSortedDylibs = objcDylibs;
677 std::sort(addressSortedDylibs.begin(), addressSortedDylibs.end(), [](const macho_header<P>* lmh, const macho_header<P>* rmh) -> bool {
678 return lmh < rmh;
679 });
680
681 //
682 // Build HeaderInfo list in cache
683 //
684 // First the RO header info
685 // log("writing out %d RO dylibs at offset %d", (uint32_t)objcDylibs.size(), (uint32_t)(optROSection->size() - optRORemaining));
686 uint64_t hinfoROVMAddr = cacheAccessor.vmAddrForContent(optROData);
687 HeaderInfoOptimizer<P, objc_header_info_ro_t<P>> hinfoROOptimizer;
688 const char* err = hinfoROOptimizer.init((uint32_t)objcDylibs.size(), optROData, optRORemaining);
689 if (err) {
690 diag.warning("%s", err);
691 return;
692 }
693 else {
694 for (const macho_header<P>* mh : addressSortedDylibs) {
695 hinfoROOptimizer.update(&cacheAccessor, mh, aslrTracker);
696 }
697 }
698
699 // Then the RW header info
700 // log("writing out %d RW dylibs at offset %d", (uint32_t)objcDylibs.size(), (uint32_t)(optRWSection->size() - optRWRemaining));
701 uint64_t hinfoRWVMAddr = cacheAccessor.vmAddrForContent(optRWData);
702 HeaderInfoOptimizer<P, objc_header_info_rw_t<P>> hinfoRWOptimizer;
703 err = hinfoRWOptimizer.init((uint32_t)objcDylibs.size(), optRWData, optRWRemaining);
704 if (err) {
705 diag.warning("%s", err);
706 return;
707 }
708 else {
709 for (const macho_header<P>* mh : addressSortedDylibs) {
710 hinfoRWOptimizer.update(&cacheAccessor, mh, aslrTracker);
711 }
712 }
713
714 //
715 // Update selector references and build selector list
716 //
717 // This is SAFE: if we run out of room for the selector table,
718 // the modified binaries are still usable.
719 //
720 // Heuristic: choose selectors from libraries with more selector cstring data first.
721 // This tries to localize selector cstring memory.
722 //
723 ObjCSelectorUniquer<P> uniq(&cacheAccessor);
724 std::vector<const macho_header<P>*> sizeSortedDylibs = objcDylibs;
725 std::sort(sizeSortedDylibs.begin(), sizeSortedDylibs.end(), [](const macho_header<P>* lmh, const macho_header<P>* rmh) -> bool {
726 // Sort a select few heavy hitters first.
727 auto getPriority = [](const char* installName) -> int {
728 if (!strcmp(installName, "/usr/lib/libobjc.A.dylib"))
729 return 0;
730 if (!strcmp(installName, "/System/Library/Frameworks/Foundation.framework/Versions/C/Foundation") ||
731 !strcmp(installName, "/System/Library/Frameworks/Foundation.framework/Foundation"))
732 return 1;
733 if (!strcmp(installName, "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation") ||
734 !strcmp(installName, "/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation"))
735 return 2;
736 // Note we don't sort iOSMac UIKitCore early as we want iOSMac after macOS.
737 if (!strcmp(installName, "/System/Library/PrivateFrameworks/UIKitCore.framework/UIKitCore"))
738 return 3;
739 if (!strcmp(installName, "/System/Library/Frameworks/AppKit.framework/Versions/C/AppKit"))
740 return 4;
741 if (!strcmp(installName, "/System/Library/Frameworks/CFNetwork.framework/Versions/A/CFNetwork") ||
742 !strcmp(installName, "/System/Library/Frameworks/CFNetwork.framework/CFNetwork"))
743 return 5;
744 return INT_MAX;
745 };
746
747 // Sort by priority first
748 int priorityA = getPriority(((const dyld3::MachOFile*)lmh)->installName());
749 int priorityB = getPriority(((const dyld3::MachOFile*)rmh)->installName());
750 if (priorityA != priorityB)
751 return priorityA < priorityB;
752
753 // Sort mac before iOSMac
754 bool isIOSMacA = strncmp(((const dyld3::MachOFile*)lmh)->installName(), "/System/iOSSupport/", 19) == 0;
755 bool isIOSMacB = strncmp(((const dyld3::MachOFile*)rmh)->installName(), "/System/iOSSupport/", 19) == 0;
756 if (isIOSMacA != isIOSMacB)
757 return !isIOSMacA;
758
759 const macho_section<P>* lSection = lmh->getSection("__TEXT", "__objc_methname");
760 const macho_section<P>* rSection = rmh->getSection("__TEXT", "__objc_methname");
761 uint64_t lSelectorSize = (lSection ? lSection->size() : 0);
762 uint64_t rSelectorSize = (rSection ? rSection->size() : 0);
763 return lSelectorSize > rSelectorSize;
764 });
765
766 auto alignPointer = [](uint8_t* ptr) -> uint8_t* {
767 return (uint8_t*)(((uintptr_t)ptr + 0x7) & ~0x7);
768 };
769
770 SelectorOptimizer<P, ObjCSelectorUniquer<P> > selOptimizer(uniq);
771 selOptimizer.visitCoalescedStrings(coalescedText);
772 for (const macho_header<P>* mh : sizeSortedDylibs) {
773 LegacySelectorUpdater<P, ObjCSelectorUniquer<P>>::update(&cacheAccessor, mh, uniq);
774 selOptimizer.optimize(&cacheAccessor, mh);
775 }
776
777 diag.verbose(" uniqued %6lu selectors\n", uniq.strings().size());
778 diag.verbose(" updated %6lu selector references\n", uniq.count());
779
780 uint64_t seloptVMAddr = cacheAccessor.vmAddrForContent(optROData);
781 objc_opt::objc_selopt_t *selopt = new(optROData) objc_opt::objc_selopt_t;
782 err = selopt->write(seloptVMAddr, optRORemaining, uniq.strings());
783 if (err) {
784 diag.warning("%s", err);
785 return;
786 }
787 optROData += selopt->size();
788 optROData = alignPointer(optROData);
789 optRORemaining -= selopt->size();
790 uint32_t seloptCapacity = selopt->capacity;
791 uint32_t seloptOccupied = selopt->occupied;
792 selopt->byteswap(E::little_endian), selopt = nullptr;
793
794 diag.verbose(" selector table occupancy %u/%u (%u%%)\n",
795 seloptOccupied, seloptCapacity,
796 (unsigned)(seloptOccupied/(double)seloptCapacity*100));
797
798
799 //
800 // Detect classes that have missing weak-import superclasses.
801 //
802 // Production shared caches don't support roots so we can set this and know
803 // there will definitely not be missing weak superclasses at runtime.
804 // Development shared caches can set this bit as the objc runtime only trusts
805 // this bit if there are no roots at runtime.
806 //
807 // This is SAFE: the binaries themselves are unmodified.
808 WeakClassDetector<P> weakopt;
809 bool noMissingWeakSuperclasses = weakopt.noMissingWeakSuperclasses(&cacheAccessor,
810 missingWeakImports,
811 sizeSortedDylibs);
812
813 if (forProduction) {
814 // Shared cache does not currently support unbound weak references.
815 // Here we assert that there are none. If support is added later then
816 // this assertion needs to be removed and this path needs to be tested.
817 // FIXME: The internal cache also isn't going to notice that an on-disk
818 // dylib could resolve a weak bind from the shared cache. Should we just
819 // error on all caches, regardless of dev/customer?
820 if (!noMissingWeakSuperclasses) {
821 diag.error("Some Objective-C class has a superclass that is "
822 "weak-import and missing from the cache.");
823 }
824 }
825
826
827 //
828 // Build class table.
829 //
830 // This is SAFE: the binaries themselves are unmodified.
831 ClassListBuilder<P> classes(hinfoROOptimizer);
832 ClassWalker<P, ClassListBuilder<P>> classWalker(classes);
833 for (const macho_header<P>* mh : sizeSortedDylibs) {
834 classWalker.walk(&cacheAccessor, mh);
835 }
836
837 diag.verbose(" recorded % 6ld classes\n", classes.classNames().size());
838
839 uint64_t clsoptVMAddr = cacheAccessor.vmAddrForContent(optROData);
840 objc_opt::objc_clsopt_t *clsopt = new(optROData) objc_opt::objc_clsopt_t;
841 err = clsopt->write(clsoptVMAddr, optRORemaining,
842 classes.classNames(), classes.classes(), false);
843 if (err) {
844 diag.warning("%s", err);
845 return;
846 }
847 optROData += clsopt->size();
848 optROData = alignPointer(optROData);
849 optRORemaining -= clsopt->size();
850 size_t duplicateCount = clsopt->duplicateCount();
851 uint32_t clsoptCapacity = clsopt->capacity;
852 uint32_t clsoptOccupied = clsopt->occupied;
853 clsopt->byteswap(E::little_endian);
854 clsopt = nullptr;
855
856 diag.verbose(" found % 6ld duplicate classes\n",
857 duplicateCount);
858 diag.verbose(" class table occupancy %u/%u (%u%%)\n",
859 clsoptOccupied, clsoptCapacity,
860 (unsigned)(clsoptOccupied/(double)clsoptCapacity*100));
861
862
863 //
864 // Sort method lists.
865 //
866 // This is SAFE: modified binaries are still usable as unsorted lists.
867 // This must be done AFTER uniquing selectors.
868 MethodListSorter<P> methodSorter;
869 for (const macho_header<P>* mh : sizeSortedDylibs) {
870 methodSorter.optimize(&cacheAccessor, mh);
871 }
872
873 diag.verbose(" sorted % 6ld method lists\n", methodSorter.optimized());
874
875
876 // Unique protocols and build protocol table.
877
878 // This is SAFE: no protocol references are updated yet
879 // This must be done AFTER updating method lists.
880
881 ProtocolOptimizer<P> protocolOptimizer(diag, hinfoROOptimizer);
882 for (const macho_header<P>* mh : sizeSortedDylibs) {
883 protocolOptimizer.addProtocols(&cacheAccessor, mh);
884 }
885
886 diag.verbose(" uniqued % 6ld protocols\n",
887 protocolOptimizer.protocolCount());
888
889 pint_t protocolClassVMAddr = (pint_t)P::getP(optPointerList->protocolClass);
890 err = protocolOptimizer.writeProtocols(&cacheAccessor,
891 optRWData, optRWRemaining,
892 optROData, optRORemaining,
893 aslrTracker, protocolClassVMAddr);
894 if (err) {
895 diag.warning("%s", err);
896 return;
897 }
898
899 // Align the buffer again. The new protocols may have added an odd number of name characters
900 optROData = alignPointer(optROData);
901
902 // New protocol table which tracks loaded images.
903 uint64_t protocoloptVMAddr = cacheAccessor.vmAddrForContent(optROData);
904 objc_opt::objc_protocolopt2_t *protocolopt = new (optROData) objc_opt::objc_protocolopt2_t;
905 err = protocolopt->write(protocoloptVMAddr, optRORemaining,
906 protocolOptimizer.protocolNames(),
907 protocolOptimizer.protocolsAndHeaders(), false);
908 if (err) {
909 diag.warning("%s", err);
910 return;
911 }
912 optROData += protocolopt->size();
913 optROData = alignPointer(optROData);
914 optRORemaining -= protocolopt->size();
915 uint32_t protocoloptCapacity = protocolopt->capacity;
916 uint32_t protocoloptOccupied = protocolopt->occupied;
917 protocolopt->byteswap(E::little_endian), protocolopt = NULL;
918
919 diag.verbose(" protocol table occupancy %u/%u (%u%%)\n",
920 protocoloptOccupied, protocoloptCapacity,
921 (unsigned)(protocoloptOccupied/(double)protocoloptCapacity*100));
922
923
924 // Redirect protocol references to the uniqued protocols.
925
926 // This is SAFE: the new protocol objects are still usable as-is.
927 for (const macho_header<P>* mh : sizeSortedDylibs) {
928 protocolOptimizer.updateReferences(&cacheAccessor, mh);
929 }
930
931 diag.verbose(" updated % 6ld protocol references\n", protocolOptimizer.protocolReferenceCount());
932
933
934 //
935 // Repair ivar offsets.
936 //
937 // This is SAFE: the runtime always validates ivar offsets at runtime.
938 IvarOffsetOptimizer<P> ivarOffsetOptimizer;
939 for (const macho_header<P>* mh : sizeSortedDylibs) {
940 ivarOffsetOptimizer.optimize(&cacheAccessor, mh);
941 }
942
943 diag.verbose(" updated % 6ld ivar offsets\n", ivarOffsetOptimizer.optimized());
944
945
946 // Collect flags.
947 uint32_t headerFlags = 0;
948 if (forProduction) {
949 headerFlags |= objc_opt::IsProduction;
950 }
951 if (noMissingWeakSuperclasses) {
952 headerFlags |= objc_opt::NoMissingWeakSuperclasses;
953 }
954
955
956 // Success. Mark dylibs as optimized.
957 for (const macho_header<P>* mh : sizeSortedDylibs) {
958 const macho_section<P>* imageInfoSection = mh->getSection("__DATA", "__objc_imageinfo");
959 if (!imageInfoSection) {
960 imageInfoSection = mh->getSection("__OBJC", "__image_info");
961 }
962 if (imageInfoSection) {
963 objc_image_info<P>* info = (objc_image_info<P>*)cacheAccessor.contentForVMAddr(imageInfoSection->addr());
964 info->setOptimizedByDyld();
965 }
966 }
967
968
969 // Success. Update __objc_opt_ro section in libobjc.dylib to contain offsets to generated optimization structures
970 objc_opt::objc_opt_t* libROHeader = (objc_opt::objc_opt_t *)cacheAccessor.contentForVMAddr(optROSection->addr());
971 E::set32(libROHeader->flags, headerFlags);
972 E::set32(libROHeader->selopt_offset, (uint32_t)(seloptVMAddr - optROSection->addr()));
973 E::set32(libROHeader->clsopt_offset, (uint32_t)(clsoptVMAddr - optROSection->addr()));
974 E::set32(libROHeader->unused_protocolopt_offset, 0);
975 E::set32(libROHeader->headeropt_ro_offset, (uint32_t)(hinfoROVMAddr - optROSection->addr()));
976 E::set32(libROHeader->headeropt_rw_offset, (uint32_t)(hinfoRWVMAddr - optROSection->addr()));
977 E::set32(libROHeader->protocolopt_offset, (uint32_t)(protocoloptVMAddr - optROSection->addr()));
978
979 // Log statistics.
980 size_t roSize = objcReadOnlyBufferSizeAllocated - optRORemaining;
981 size_t rwSize = objcReadWriteBufferSizeAllocated - optRWRemaining;
982 diag.verbose(" %lu/%llu bytes (%d%%) used in shared cache read-only optimization region\n",
983 roSize, objcReadOnlyBufferSizeAllocated, percent(roSize, objcReadOnlyBufferSizeAllocated));
984 diag.verbose(" %lu/%llu bytes (%d%%) used in shared cache read/write optimization region\n",
985 rwSize, objcReadWriteBufferSizeAllocated, percent(rwSize, objcReadWriteBufferSizeAllocated));
986 diag.verbose(" wrote objc metadata optimization version %d\n", objc_opt::VERSION);
987
988 // Add segments to libobjc.dylib that cover cache builder allocated r/o and r/w regions
989 addObjcSegments<P>(diag, cache, libobjcMH, objcReadOnlyBuffer, objcReadOnlyBufferSizeAllocated, objcReadWriteBuffer, objcReadWriteBufferSizeAllocated, objcRwFileOffset);
990
991
992 // Now that objc has uniqued the selector references, we can apply the LOHs so that ADRP/LDR -> ADRP/ADD
993 if (forProduction) {
994 const bool logSelectors = false;
995 uint64_t lohADRPCount = 0;
996 uint64_t lohLDRCount = 0;
997
998 for (auto& targetAndInstructions : lohTracker) {
999 uint64_t targetVMAddr = targetAndInstructions.first;
1000 if (!selOptimizer.isSelectorRefAddress((pint_t)targetVMAddr))
1001 continue;
1002
1003 std::set<void*>& instructions = targetAndInstructions.second;
1004 // We do 2 passes over the instructions. The first to validate them and the second
1005 // to actually update them.
1006 for (unsigned pass = 0; pass != 2; ++pass) {
1007 uint32_t adrpCount = 0;
1008 uint32_t ldrCount = 0;
1009 for (void* instructionAddress : instructions) {
1010 uint32_t& instruction = *(uint32_t*)instructionAddress;
1011 uint64_t instructionVMAddr = cacheAccessor.vmAddrForContent(&instruction);
1012 uint64_t selRefContent = *(uint64_t*)cacheAccessor.contentForVMAddr(targetVMAddr);
1013 const char* selectorString = (const char*)cacheAccessor.contentForVMAddr(selRefContent);
1014 uint64_t selectorStringVMAddr = cacheAccessor.vmAddrForContent(selectorString);
1015
1016 if ( (instruction & 0x9F000000) == 0x90000000 ) {
1017 // ADRP
1018 int64_t pageDistance = ((selectorStringVMAddr & ~0xFFF) - (instructionVMAddr & ~0xFFF));
1019 int64_t newPage21 = pageDistance >> 12;
1020
1021 if (pass == 0) {
1022 if ( (newPage21 > 2097151) || (newPage21 < -2097151) ) {
1023 if (logSelectors)
1024 fprintf(stderr, "Out of bounds ADRP selector reference target\n");
1025 instructions.clear();
1026 break;
1027 }
1028 ++adrpCount;
1029 }
1030
1031 if (pass == 1) {
1032 instruction = (instruction & 0x9F00001F) | ((newPage21 << 29) & 0x60000000) | ((newPage21 << 3) & 0x00FFFFE0);
1033 ++lohADRPCount;
1034 }
1035 continue;
1036 }
1037
1038 if ( (instruction & 0x3B000000) == 0x39000000 ) {
1039 // LDR/STR. STR shouldn't be possible as this is a selref!
1040 if (pass == 0) {
1041 if ( (instruction & 0xC0C00000) != 0xC0400000 ) {
1042 // Not a load, or dest reg isn't xN, or uses sign extension
1043 if (logSelectors)
1044 fprintf(stderr, "Bad LDR for selector reference optimisation\n");
1045 instructions.clear();
1046 break;
1047 }
1048 if ( (instruction & 0x04000000) != 0 ) {
1049 // Loading a float
1050 if (logSelectors)
1051 fprintf(stderr, "Bad LDR for selector reference optimisation\n");
1052 instructions.clear();
1053 break;
1054 }
1055 ++ldrCount;
1056 }
1057
1058 if (pass == 1) {
1059 uint32_t ldrDestReg = (instruction & 0x1F);
1060 uint32_t ldrBaseReg = ((instruction >> 5) & 0x1F);
1061
1062 // Convert the LDR to an ADD
1063 instruction = 0x91000000;
1064 instruction |= ldrDestReg;
1065 instruction |= ldrBaseReg << 5;
1066 instruction |= (selectorStringVMAddr & 0xFFF) << 10;
1067
1068 ++lohLDRCount;
1069 }
1070 continue;
1071 }
1072
1073 if ( (instruction & 0xFFC00000) == 0x91000000 ) {
1074 // ADD imm12
1075 // We don't support ADDs.
1076 if (logSelectors)
1077 fprintf(stderr, "Bad ADD for selector reference optimisation\n");
1078 instructions.clear();
1079 break;
1080 }
1081
1082 if (logSelectors)
1083 fprintf(stderr, "Unknown instruction for selref optimisation\n");
1084 instructions.clear();
1085 break;
1086 }
1087 if (pass == 0) {
1088 // If we didn't see at least one ADRP/LDR in pass one then don't optimize this location
1089 if ((adrpCount == 0) || (ldrCount == 0)) {
1090 instructions.clear();
1091 break;
1092 }
1093 }
1094 }
1095 }
1096
1097 diag.verbose(" Optimized %lld ADRP LOHs\n", lohADRPCount);
1098 diag.verbose(" Optimized %lld LDR LOHs\n", lohLDRCount);
1099 }
1100 }
1101
1102
1103 } // anon namespace
1104
1105 void CacheBuilder::optimizeObjC()
1106 {
1107 uint32_t objcRwFileOffset = (uint32_t)((_objcReadWriteBuffer - _readWriteRegion.buffer) + _readWriteRegion.cacheFileOffset);
1108 if ( _archLayout->is64 )
1109 doOptimizeObjC<Pointer64<LittleEndian>>((DyldSharedCache*)_readExecuteRegion.buffer, _options.optimizeStubs, _aslrTracker, _lohTracker,
1110 _coalescedText, _missingWeakImports,
1111 _diagnostics, _objcReadOnlyBuffer, _objcReadOnlyBufferSizeUsed, _objcReadOnlyBufferSizeAllocated,
1112 _objcReadWriteBuffer, _objcReadWriteBufferSizeAllocated, objcRwFileOffset);
1113 else
1114 doOptimizeObjC<Pointer32<LittleEndian>>((DyldSharedCache*)_readExecuteRegion.buffer, _options.optimizeStubs, _aslrTracker, _lohTracker,
1115 _coalescedText, _missingWeakImports,
1116 _diagnostics, _objcReadOnlyBuffer, _objcReadOnlyBufferSizeUsed, _objcReadOnlyBufferSizeAllocated,
1117 _objcReadWriteBuffer, _objcReadWriteBufferSizeAllocated, objcRwFileOffset);
1118 }
1119
1120 static uint32_t hashTableSize(uint32_t maxElements, uint32_t perElementData)
1121 {
1122 uint32_t elementsWithPadding = maxElements*11/10; // if close to power of 2, perfect hash may fail, so don't get within 10% of that
1123 uint32_t powTwoCapacity = 1 << (32 - __builtin_clz(elementsWithPadding - 1));
1124 uint32_t headerSize = 4*(8+256);
1125 return headerSize + powTwoCapacity/2 + powTwoCapacity + powTwoCapacity*perElementData;
1126 }
1127
1128 // The goal here is to allocate space in the dyld shared cache (while it is being laid out) that will contain
1129 // the objc structures that previously were in the __objc_opt_ro section.
1130 uint32_t CacheBuilder::computeReadOnlyObjC(uint32_t selRefCount, uint32_t classDefCount, uint32_t protocolDefCount)
1131 {
1132 return 0xA000 + hashTableSize(selRefCount, 5) + hashTableSize(classDefCount, 12) + hashTableSize(protocolDefCount, 8);
1133 }
1134
1135 // Space to replace the __objc_opt_rw section.
1136 uint32_t CacheBuilder::computeReadWriteObjC(uint32_t imageCount, uint32_t protocolDefCount)
1137 {
1138 return 8*imageCount + protocolDefCount*12*(_archLayout->is64 ? 8 : 4);
1139 }