1 /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*-
3 * Copyright (c) 2014 Apple Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
27 #include <sys/errno.h>
28 #include <sys/fcntl.h>
29 #include <mach-o/loader.h>
30 #include <mach-o/fat.h>
33 #include "DyldSharedCache.h"
34 #include "Diagnostics.h"
35 #include "CacheBuilder.h"
36 #include "FileAbstraction.hpp"
37 #include "MachOFileAbstraction.hpp"
40 // Scan a C++ or Swift length-mangled field.
41 static bool scanMangledField(const char *&string
, const char *end
,
42 const char *&field
, int& length
)
44 // Leading zero not allowed.
45 if (*string
== '0') return false;
51 if (!isdigit(c
)) break;
53 if (__builtin_smul_overflow(length
, 10, &length
)) return false;
54 if (__builtin_sadd_overflow(length
, c
- '0', &length
)) return false;
57 string
= field
+ length
;
58 return length
> 0 && string
<= end
;
62 // copySwiftDemangledName
63 // Returns the pretty form of the given Swift-mangled class or protocol name.
64 // Returns nullptr if the string doesn't look like a mangled Swift name.
65 // The result must be freed with free().
66 static char *copySwiftDemangledName(const char *string
, bool isProtocol
= false)
68 if (!string
) return nullptr;
70 // Swift mangling prefix.
71 if (strncmp(string
, isProtocol
? "_TtP" : "_TtC", 4) != 0) return nullptr;
74 const char *end
= string
+ strlen(string
);
79 if (string
[0] == 's') {
80 // "s" is the Swift module.
85 if (! scanMangledField(string
, end
, prefix
, prefixLength
)) return nullptr;
88 // Class or protocol name.
91 if (! scanMangledField(string
, end
, suffix
, suffixLength
)) return nullptr;
94 // Remainder must be "_".
95 if (strcmp(string
, "_") != 0) return nullptr;
97 // Remainder must be empty.
98 if (string
!= end
) return nullptr;
102 asprintf(&result
, "%.*s.%.*s", prefixLength
,prefix
, suffixLength
,suffix
);
107 class ContentAccessor
{
109 ContentAccessor(const DyldSharedCache
* cache
, Diagnostics
& diag
)
112 __block
int index
= 0;
113 cache
->forEachRegion(^(const void* content
, uint64_t vmAddr
, uint64_t size
, uint32_t permissions
) {
114 _regions
[index
++] = { (uint8_t*)content
, (uint8_t*)content
+size
, vmAddr
, vmAddr
+size
};
118 void* contentForVMAddr(uint64_t vmaddr
) {
119 for (const Info
& info
: _regions
) {
120 if ( (info
.startAddr
<= vmaddr
) && (vmaddr
< info
.endAddr
) )
121 return (void*)(info
.contentStart
+ vmaddr
- info
.startAddr
);
124 _diagnostics
.error("invalid vmaddr 0x%0llX in ObjC data", vmaddr
);
128 uint64_t vmAddrForContent(const void* content
) {
129 for (const Info
& info
: _regions
) {
130 if ( (info
.contentStart
<= content
) && (content
< info
.contentEnd
) )
131 return info
.startAddr
+ ((uint8_t*)content
- (uint8_t*)info
.contentStart
);
133 _diagnostics
.error("invalid content pointer %p in ObjC data", content
);
137 Diagnostics
& diagnostics() { return _diagnostics
; }
140 struct Info
{ uint8_t* contentStart
; uint8_t* contentEnd
; uint64_t startAddr
; uint64_t endAddr
; };
141 Diagnostics
& _diagnostics
;
146 // Access a section containing a list of pointers
147 template <typename P
, typename T
>
150 typedef typename
P::uint_t pint_t
;
152 PointerSection(ContentAccessor
* cache
, const macho_header
<P
>* mh
,
153 const char* segname
, const char* sectname
)
155 _section(mh
->getSection(segname
, sectname
)),
156 _base(_section
? (pint_t
*)cache
->contentForVMAddr(_section
->addr()) : 0),
157 _count(_section
? (pint_t
)(_section
->size() / sizeof(pint_t
)) : 0) {
160 pint_t
count() const { return _count
; }
162 pint_t
getVMAddress(pint_t index
) const {
163 if ( index
>= _count
) {
164 _cache
->diagnostics().error("index out of range in section %s", _section
->sectname());
167 return (pint_t
)P::getP(_base
[index
]);
170 T
get(pint_t index
) const {
171 return (T
)_cache
->contentForVMAddr(getVMAddress(index
));
174 void setVMAddress(pint_t index
, pint_t value
) {
175 if ( index
>= _count
) {
176 _cache
->diagnostics().error("index out of range in section %s", _section
->sectname());
179 P::setP(_base
[index
], value
);
184 for (pint_t i
= 0; i
< _count
; i
++) {
185 pint_t value
= _base
[i
];
187 _base
[i
-shift
] = value
;
193 const_cast<macho_section
<P
>*>(_section
)->set_size(_count
* sizeof(pint_t
));
197 ContentAccessor
* const _cache
;
198 const macho_section
<P
>* const _section
;
204 // Access a section containing an array of structures
205 template <typename P
, typename T
>
209 ArraySection(ContentAccessor
* cache
, const macho_header
<P
>* mh
,
210 const char *segname
, const char *sectname
)
212 _section(mh
->getSection(segname
, sectname
)),
213 _base(_section
? (T
*)cache
->contentForVMAddr(_section
->addr()) : 0),
214 _count(_section
? _section
->size() / sizeof(T
) : 0) {
217 uint64_t count() const { return _count
; }
219 T
& get(uint64_t index
) const {
220 if (index
>= _count
) {
221 _cache
->diagnostics().error("index out of range in section %s", _section
->sectname());
227 ContentAccessor
* const _cache
;
228 const macho_section
<P
>* const _section
;
230 uint64_t const _count
;
235 #include "objc-shared-cache.h"
236 #include "ObjC1Abstraction.hpp"
237 #include "ObjC2Abstraction.hpp"
244 template <typename P
>
245 class ObjCSelectorUniquer
248 typedef typename
P::uint_t pint_t
;
250 ObjCSelectorUniquer(ContentAccessor
* cache
) : _cache(cache
) { }
252 pint_t
visit(pint_t oldValue
)
255 const char *s
= (const char *)_cache
->contentForVMAddr(oldValue
);
256 objc_opt::string_map::iterator element
=
257 _selectorStrings
.insert(objc_opt::string_map::value_type(s
, oldValue
)).first
;
258 return (pint_t
)element
->second
;
261 objc_opt::string_map
& strings() {
262 return _selectorStrings
;
265 size_t count() const { return _count
; }
268 objc_opt::string_map _selectorStrings
;
269 ContentAccessor
* _cache
;
274 template <typename P
>
275 class ClassListBuilder
278 objc_opt::string_map _classNames
;
279 objc_opt::class_map _classes
;
281 HeaderInfoOptimizer
<P
, objc_header_info_ro_t
<P
>>& _hInfos
;
285 ClassListBuilder(HeaderInfoOptimizer
<P
, objc_header_info_ro_t
<P
>>& hinfos
) : _hInfos(hinfos
) { }
287 void visitClass(ContentAccessor
* cache
,
288 const macho_header
<P
>* header
,
289 objc_class_t
<P
>* cls
)
291 if (cls
->isMetaClass(cache
)) return;
293 const char *name
= cls
->getName(cache
);
294 uint64_t name_vmaddr
= cache
->vmAddrForContent((void*)name
);
295 uint64_t cls_vmaddr
= cache
->vmAddrForContent(cls
);
296 uint64_t hinfo_vmaddr
= cache
->vmAddrForContent(_hInfos
.hinfoForHeader(cache
, header
));
297 _classNames
.insert(objc_opt::string_map::value_type(name
, name_vmaddr
));
298 _classes
.insert(objc_opt::class_map::value_type(name
, std::pair
<uint64_t, uint64_t>(cls_vmaddr
, hinfo_vmaddr
)));
302 objc_opt::string_map
& classNames() {
306 objc_opt::class_map
& classes() {
310 size_t count() const { return _count
; }
313 template <typename P
>
314 class ProtocolOptimizer
317 typedef typename
P::uint_t pint_t
;
319 objc_opt::string_map _protocolNames
;
320 objc_opt::protocol_map _protocols
;
321 size_t _protocolCount
;
322 size_t _protocolReferenceCount
;
323 Diagnostics
& _diagnostics
;
325 friend class ProtocolReferenceWalker
<P
, ProtocolOptimizer
<P
>>;
327 pint_t
visitProtocolReference(ContentAccessor
* cache
, pint_t oldValue
)
329 objc_protocol_t
<P
>* proto
= (objc_protocol_t
<P
>*)
330 cache
->contentForVMAddr(oldValue
);
331 pint_t newValue
= (pint_t
)_protocols
[proto
->getName(cache
)];
332 if (oldValue
!= newValue
) _protocolReferenceCount
++;
338 ProtocolOptimizer(Diagnostics
& diag
)
339 : _protocolCount(0), _protocolReferenceCount(0), _diagnostics(diag
) {
342 void addProtocols(ContentAccessor
* cache
, const macho_header
<P
>* header
)
344 PointerSection
<P
, objc_protocol_t
<P
> *>
345 protocols(cache
, header
, "__DATA", "__objc_protolist");
347 for (pint_t i
= 0; i
< protocols
.count(); i
++) {
348 objc_protocol_t
<P
> *proto
= protocols
.get(i
);
350 const char *name
= proto
->getName(cache
);
351 if (_protocolNames
.count(name
) == 0) {
352 if (proto
->getSize() > sizeof(objc_protocol_t
<P
>)) {
353 _diagnostics
.error("objc protocol is too big");
357 uint64_t name_vmaddr
= cache
->vmAddrForContent((void*)name
);
358 uint64_t proto_vmaddr
= cache
->vmAddrForContent(proto
);
359 _protocolNames
.insert(objc_opt::string_map::value_type(name
, name_vmaddr
));
360 _protocols
.insert(objc_opt::protocol_map::value_type(name
, proto_vmaddr
));
366 const char *writeProtocols(ContentAccessor
* cache
,
367 uint8_t *& rwdest
, size_t& rwremaining
,
368 uint8_t *& rodest
, size_t& roremaining
,
369 std::vector
<void*>& pointersInData
,
370 pint_t protocolClassVMAddr
)
372 if (_protocolCount
== 0) return NULL
;
374 if (protocolClassVMAddr
== 0) {
375 return "libobjc's Protocol class symbol not found (metadata not optimized)";
378 size_t rwrequired
= _protocolCount
* sizeof(objc_protocol_t
<P
>);
379 if (rwremaining
< rwrequired
) {
380 return "libobjc's read-write section is too small (metadata not optimized)";
383 for (objc_opt::protocol_map::iterator iter
= _protocols
.begin();
384 iter
!= _protocols
.end();
387 objc_protocol_t
<P
>* oldProto
= (objc_protocol_t
<P
>*)
388 cache
->contentForVMAddr(iter
->second
);
390 // Create a new protocol object.
391 objc_protocol_t
<P
>* proto
= (objc_protocol_t
<P
>*)rwdest
;
392 rwdest
+= sizeof(*proto
);
393 rwremaining
-= sizeof(*proto
);
396 uint32_t oldSize
= oldProto
->getSize();
397 memcpy(proto
, oldProto
, oldSize
);
398 if (!proto
->getIsaVMAddr()) {
399 proto
->setIsaVMAddr(protocolClassVMAddr
);
401 if (oldSize
< sizeof(*proto
)) {
402 // Protocol object is old. Populate new fields.
403 proto
->setSize(sizeof(objc_protocol_t
<P
>));
404 // missing extendedMethodTypes is already nil
406 // Some protocol objects are big enough to have the
407 // demangledName field but don't initialize it.
408 // Initialize it here if it is not already set.
409 if (!proto
->getDemangledName(cache
)) {
410 const char *roName
= proto
->getName(cache
);
411 char *demangledName
= copySwiftDemangledName(roName
, true);
413 size_t length
= 1 + strlen(demangledName
);
414 if (roremaining
< length
) {
415 return "libobjc's read-only section is too small (metadata not optimized)";
418 memmove(rodest
, demangledName
, length
);
419 roName
= (const char *)rodest
;
421 roremaining
-= length
;
425 proto
->setDemangledName(cache
, roName
, _diagnostics
);
429 // Redirect the protocol table at our new object.
430 iter
->second
= cache
->vmAddrForContent(proto
);
432 // Add new rebase entries.
433 proto
->addPointers(pointersInData
);
439 void updateReferences(ContentAccessor
* cache
, const macho_header
<P
>* header
)
441 ProtocolReferenceWalker
<P
, ProtocolOptimizer
<P
>> refs(*this);
442 refs
.walk(cache
, header
);
445 objc_opt::string_map
& protocolNames() {
446 return _protocolNames
;
449 objc_opt::protocol_map
& protocols() {
453 size_t protocolCount() const { return _protocolCount
; }
454 size_t protocolReferenceCount() const { return _protocolReferenceCount
; }
458 static int percent(size_t num
, size_t denom
) {
460 return (int)(num
/ (double)denom
* 100);
466 template <typename P
>
467 void optimizeObjC(DyldSharedCache
* cache
, bool forProduction
, std::vector
<void*>& pointersForASLR
, Diagnostics
& diag
)
469 typedef typename
P::E E
;
470 typedef typename
P::uint_t pint_t
;
472 diag
.verbose("Optimizing objc metadata:\n");
473 diag
.verbose(" cache type is %s\n", forProduction
? "production" : "development");
475 ContentAccessor
cacheAccessor(cache
, diag
);
477 size_t headerSize
= P::round_up(sizeof(objc_opt::objc_opt_t
));
478 if (headerSize
!= sizeof(objc_opt::objc_opt_t
)) {
479 diag
.warning("libobjc's optimization structure size is wrong (metadata not optimized)");
483 // Find libobjc's empty sections and build list of images with objc metadata
485 __block
const macho_section
<P
> *optROSection
= nullptr;
486 __block
const macho_section
<P
> *optRWSection
= nullptr;
487 __block
const macho_section
<P
> *optPointerListSection
= nullptr;
488 __block
std::vector
<const macho_header
<P
>*> objcDylibs
;
489 cache
->forEachImage(^(const mach_header
* machHeader
, const char* installName
) {
490 const macho_header
<P
>* mh
= (const macho_header
<P
>*)machHeader
;
491 if ( strstr(installName
, "/libobjc.") != nullptr ) {
492 optROSection
= mh
->getSection("__TEXT", "__objc_opt_ro");
493 optRWSection
= mh
->getSection("__DATA", "__objc_opt_rw");
494 optPointerListSection
= mh
->getSection("__DATA", "__objc_opt_ptrs");
496 if ( mh
->getSection("__DATA", "__objc_imageinfo") || mh
->getSection("__OBJC", "__image_info") ) {
497 objcDylibs
.push_back(mh
);
499 // log("installName %s at mhdr 0x%016lx", installName, (uintptr_t)cacheAccessor.vmAddrForContent((void*)mh));
501 if ( optROSection
== nullptr ) {
502 diag
.warning("libobjc's read-only section missing (metadata not optimized)");
505 if ( optRWSection
== nullptr ) {
506 diag
.warning("libobjc's read/write section missing (metadata not optimized)");
509 if ( optPointerListSection
== nullptr ) {
510 diag
.warning("libobjc's pointer list section missing (metadata not optimized)");
514 uint8_t* optROData
= (uint8_t*)cacheAccessor
.contentForVMAddr(optROSection
->addr());
515 if ( optROData
== nullptr ) {
516 diag
.warning("libobjc's read-only section has bad content");
519 size_t optRORemaining
= optROSection
->size();
520 uint8_t* optRWData
= (uint8_t*)cacheAccessor
.contentForVMAddr(optRWSection
->addr());
521 size_t optRWRemaining
= optRWSection
->size();
522 if (optRORemaining
< headerSize
) {
523 diag
.warning("libobjc's read-only section is too small (metadata not optimized)");
526 objc_opt::objc_opt_t
* optROHeader
= (objc_opt::objc_opt_t
*)optROData
;
527 optROData
+= headerSize
;
528 optRORemaining
-= headerSize
;
529 if (E::get32(optROHeader
->version
) != objc_opt::VERSION
) {
530 diag
.warning("libobjc's read-only section version is unrecognized (metadata not optimized)");
534 if (optPointerListSection
->size() < sizeof(objc_opt::objc_opt_pointerlist_tt
<pint_t
>)) {
535 diag
.warning("libobjc's pointer list section is too small (metadata not optimized)");
538 const objc_opt::objc_opt_pointerlist_tt
<pint_t
> *optPointerList
= (const objc_opt::objc_opt_pointerlist_tt
<pint_t
> *)cacheAccessor
.contentForVMAddr(optPointerListSection
->addr());
540 // Write nothing to optROHeader until everything else is written.
541 // If something fails below, libobjc will not use the section.
545 // Make copy of objcList and sort that list.
547 std::vector
<const macho_header
<P
>*> addressSortedDylibs
= objcDylibs
;
548 std::sort(addressSortedDylibs
.begin(), addressSortedDylibs
.end(), [](const macho_header
<P
>* lmh
, const macho_header
<P
>* rmh
) -> bool {
553 // Build HeaderInfo list in cache
555 // First the RO header info
556 // log("writing out %d RO dylibs at offset %d", (uint32_t)objcDylibs.size(), (uint32_t)(optROSection->size() - optRORemaining));
557 uint64_t hinfoROVMAddr
= optROSection
->addr() + optROSection
->size() - optRORemaining
;
558 HeaderInfoOptimizer
<P
, objc_header_info_ro_t
<P
>> hinfoROOptimizer
;
559 const char* err
= hinfoROOptimizer
.init((uint32_t)objcDylibs
.size(), optROData
, optRORemaining
);
561 diag
.warning("%s", err
);
565 for (const macho_header
<P
>* mh
: addressSortedDylibs
) {
566 hinfoROOptimizer
.update(&cacheAccessor
, mh
, pointersForASLR
);
570 // Then the RW header info
571 // log("writing out %d RW dylibs at offset %d", (uint32_t)objcDylibs.size(), (uint32_t)(optRWSection->size() - optRWRemaining));
572 uint64_t hinfoRWVMAddr
= (uint64_t)optRWSection
->addr() + (uint64_t)optRWSection
->size() - optRWRemaining
;
573 HeaderInfoOptimizer
<P
, objc_header_info_rw_t
<P
>> hinfoRWOptimizer
;
574 err
= hinfoRWOptimizer
.init((uint32_t)objcDylibs
.size(), optRWData
, optRWRemaining
);
576 diag
.warning("%s", err
);
580 for (const macho_header
<P
>* mh
: addressSortedDylibs
) {
581 hinfoRWOptimizer
.update(&cacheAccessor
, mh
, pointersForASLR
);
586 // Update selector references and build selector list
588 // This is SAFE: if we run out of room for the selector table,
589 // the modified binaries are still usable.
591 // Heuristic: choose selectors from libraries with more selector cstring data first.
592 // This tries to localize selector cstring memory.
594 ObjCSelectorUniquer
<P
> uniq(&cacheAccessor
);
595 std::vector
<const macho_header
<P
>*> sizeSortedDylibs
= objcDylibs
;
596 std::sort(sizeSortedDylibs
.begin(), sizeSortedDylibs
.end(), [](const macho_header
<P
>* lmh
, const macho_header
<P
>* rmh
) -> bool {
597 const macho_section
<P
>* lSection
= lmh
->getSection("__TEXT", "__objc_methname");
598 const macho_section
<P
>* rSection
= rmh
->getSection("__TEXT", "__objc_methname");
599 uint64_t lSelectorSize
= (lSection
? lSection
->size() : 0);
600 uint64_t rSelectorSize
= (rSection
? rSection
->size() : 0);
601 return lSelectorSize
> rSelectorSize
;
604 SelectorOptimizer
<P
, ObjCSelectorUniquer
<P
> > selOptimizer(uniq
);
605 for (const macho_header
<P
>* mh
: sizeSortedDylibs
) {
606 LegacySelectorUpdater
<P
, ObjCSelectorUniquer
<P
>>::update(&cacheAccessor
, mh
, uniq
);
607 selOptimizer
.optimize(&cacheAccessor
, mh
);
610 diag
.verbose(" uniqued %6lu selectors\n", uniq
.strings().size());
611 diag
.verbose(" updated %6lu selector references\n", uniq
.count());
613 uint64_t seloptVMAddr
= optROSection
->addr() + optROSection
->size() - optRORemaining
;
614 objc_opt::objc_selopt_t
*selopt
= new(optROData
) objc_opt::objc_selopt_t
;
615 err
= selopt
->write(seloptVMAddr
, optRORemaining
, uniq
.strings());
617 diag
.warning("%s", err
);
620 optROData
+= selopt
->size();
621 optRORemaining
-= selopt
->size();
622 uint32_t seloptCapacity
= selopt
->capacity
;
623 uint32_t seloptOccupied
= selopt
->occupied
;
624 selopt
->byteswap(E::little_endian
), selopt
= nullptr;
626 diag
.verbose(" selector table occupancy %u/%u (%u%%)\n",
627 seloptOccupied
, seloptCapacity
,
628 (unsigned)(seloptOccupied
/(double)seloptCapacity
*100));
632 // Detect classes that have missing weak-import superclasses.
634 // Production only. Development cache does not do this: a replacement
635 // library could omit a class at runtime that was present during
636 // cache construction.
638 // This is SAFE: the binaries themselves are unmodified.
639 bool noMissingWeakSuperclasses
= false; // dev cache can't promise otherwise
641 WeakClassDetector
<P
> weakopt
;
642 noMissingWeakSuperclasses
=
643 weakopt
.noMissingWeakSuperclasses(&cacheAccessor
, sizeSortedDylibs
);
645 // Shared cache does not currently support unbound weak references.
646 // Here we assert that there are none. If support is added later then
647 // this assertion needs to be removed and this path needs to be tested.
648 if (!noMissingWeakSuperclasses
) {
649 diag
.error("Some Objective-C class has a superclass that is "
650 "weak-import and missing from the cache.");
656 // Build class table.
658 // This is SAFE: the binaries themselves are unmodified.
659 ClassListBuilder
<P
> classes(hinfoROOptimizer
);
660 ClassWalker
<P
, ClassListBuilder
<P
>> classWalker(classes
);
661 for (const macho_header
<P
>* mh
: sizeSortedDylibs
) {
662 classWalker
.walk(&cacheAccessor
, mh
);
665 diag
.verbose(" recorded % 6ld classes\n", classes
.classNames().size());
667 uint64_t clsoptVMAddr
= optROSection
->addr() + optROSection
->size() - optRORemaining
;
668 objc_opt::objc_clsopt_t
*clsopt
= new(optROData
) objc_opt::objc_clsopt_t
;
669 err
= clsopt
->write(clsoptVMAddr
, optRORemaining
,
670 classes
.classNames(), classes
.classes(), false);
672 diag
.warning("%s", err
);
675 optROData
+= clsopt
->size();
676 optRORemaining
-= clsopt
->size();
677 size_t duplicateCount
= clsopt
->duplicateCount();
678 uint32_t clsoptCapacity
= clsopt
->capacity
;
679 uint32_t clsoptOccupied
= clsopt
->occupied
;
680 clsopt
->byteswap(E::little_endian
);
683 diag
.verbose(" found % 6ld duplicate classes\n",
685 diag
.verbose(" class table occupancy %u/%u (%u%%)\n",
686 clsoptOccupied
, clsoptCapacity
,
687 (unsigned)(clsoptOccupied
/(double)clsoptCapacity
*100));
691 // Sort method lists.
693 // This is SAFE: modified binaries are still usable as unsorted lists.
694 // This must be done AFTER uniquing selectors.
695 MethodListSorter
<P
> methodSorter
;
696 for (const macho_header
<P
>* mh
: sizeSortedDylibs
) {
697 methodSorter
.optimize(&cacheAccessor
, mh
);
700 diag
.verbose(" sorted % 6ld method lists\n", methodSorter
.optimized());
703 // Unique protocols and build protocol table.
705 // This is SAFE: no protocol references are updated yet
706 // This must be done AFTER updating method lists.
708 ProtocolOptimizer
<P
> protocolOptimizer(diag
);
709 for (const macho_header
<P
>* mh
: sizeSortedDylibs
) {
710 protocolOptimizer
.addProtocols(&cacheAccessor
, mh
);
713 diag
.verbose(" uniqued % 6ld protocols\n",
714 protocolOptimizer
.protocolCount());
716 pint_t protocolClassVMAddr
= (pint_t
)P::getP(optPointerList
->protocolClass
);
717 err
= protocolOptimizer
.writeProtocols(&cacheAccessor
,
718 optRWData
, optRWRemaining
,
719 optROData
, optRORemaining
,
720 pointersForASLR
, protocolClassVMAddr
);
722 diag
.warning("%s", err
);
726 uint64_t protocoloptVMAddr
= optROSection
->addr() + optROSection
->size() - optRORemaining
;
727 objc_opt::objc_protocolopt_t
*protocolopt
= new (optROData
) objc_opt::objc_protocolopt_t
;
728 err
= protocolopt
->write(protocoloptVMAddr
, optRORemaining
,
729 protocolOptimizer
.protocolNames(),
730 protocolOptimizer
.protocols(), true);
732 diag
.warning("%s", err
);
735 optROData
+= protocolopt
->size();
736 optRORemaining
-= protocolopt
->size();
737 uint32_t protocoloptCapacity
= protocolopt
->capacity
;
738 uint32_t protocoloptOccupied
= protocolopt
->occupied
;
739 protocolopt
->byteswap(E::little_endian
), protocolopt
= NULL
;
741 diag
.verbose(" protocol table occupancy %u/%u (%u%%)\n",
742 protocoloptOccupied
, protocoloptCapacity
,
743 (unsigned)(protocoloptOccupied
/(double)protocoloptCapacity
*100));
746 // Redirect protocol references to the uniqued protocols.
748 // This is SAFE: the new protocol objects are still usable as-is.
749 for (const macho_header
<P
>* mh
: sizeSortedDylibs
) {
750 protocolOptimizer
.updateReferences(&cacheAccessor
, mh
);
753 diag
.verbose(" updated % 6ld protocol references\n", protocolOptimizer
.protocolReferenceCount());
757 // Repair ivar offsets.
759 // This is SAFE: the runtime always validates ivar offsets at runtime.
760 IvarOffsetOptimizer
<P
> ivarOffsetOptimizer
;
761 for (const macho_header
<P
>* mh
: sizeSortedDylibs
) {
762 ivarOffsetOptimizer
.optimize(&cacheAccessor
, mh
);
765 diag
.verbose(" updated % 6ld ivar offsets\n", ivarOffsetOptimizer
.optimized());
769 uint32_t headerFlags
= 0;
771 headerFlags
|= objc_opt::IsProduction
;
773 if (noMissingWeakSuperclasses
) {
774 headerFlags
|= objc_opt::NoMissingWeakSuperclasses
;
778 // Success. Mark dylibs as optimized.
779 for (const macho_header
<P
>* mh
: sizeSortedDylibs
) {
780 const macho_section
<P
>* imageInfoSection
= mh
->getSection("__DATA", "__objc_imageinfo");
781 if (!imageInfoSection
) {
782 imageInfoSection
= mh
->getSection("__OBJC", "__image_info");
784 if (imageInfoSection
) {
785 objc_image_info
<P
>* info
= (objc_image_info
<P
>*)cacheAccessor
.contentForVMAddr(imageInfoSection
->addr());
786 info
->setOptimizedByDyld();
791 // Success. Update RO header last.
792 E::set32(optROHeader
->flags
, headerFlags
);
793 E::set32(optROHeader
->selopt_offset
, (uint32_t)(seloptVMAddr
- optROSection
->addr()));
794 E::set32(optROHeader
->clsopt_offset
, (uint32_t)(clsoptVMAddr
- optROSection
->addr()));
795 E::set32(optROHeader
->protocolopt_offset
, (uint32_t)(protocoloptVMAddr
- optROSection
->addr()));
796 E::set32(optROHeader
->headeropt_ro_offset
, (uint32_t)(hinfoROVMAddr
- optROSection
->addr()));
797 E::set32(optROHeader
->headeropt_rw_offset
, (uint32_t)(hinfoRWVMAddr
- optROSection
->addr()));
800 size_t roSize
= optROSection
->size() - optRORemaining
;
801 size_t rwSize
= optRWSection
->size() - optRWRemaining
;
802 diag
.verbose(" %lu/%llu bytes (%d%%) used in libobjc read-only optimization section\n",
803 roSize
, optROSection
->size(), percent(roSize
, optROSection
->size()));
804 diag
.verbose(" %lu/%llu bytes (%d%%) used in libobjc read/write optimization section\n",
805 rwSize
, optRWSection
->size(), percent(rwSize
, optRWSection
->size()));
806 diag
.verbose(" wrote objc metadata optimization version %d\n", objc_opt::VERSION
);
812 void optimizeObjC(DyldSharedCache
* cache
, bool is64
, bool customerCache
, std::vector
<void*>& pointersForASLR
, Diagnostics
& diag
)
815 optimizeObjC
<Pointer64
<LittleEndian
>>(cache
, customerCache
, pointersForASLR
, diag
);
817 optimizeObjC
<Pointer32
<LittleEndian
>>(cache
, customerCache
, pointersForASLR
, diag
);