1 /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*-
3 * Copyright (c) 2014 Apple Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
26 #include "mega-dylib-utils.h"
28 #include "MachOFileAbstraction.hpp"
32 #include <sys/errno.h>
33 #include <sys/fcntl.h>
34 #include <mach-o/loader.h>
35 #include <mach-o/fat.h>
39 // Scan a C++ or Swift length-mangled field.
40 static bool scanMangledField(const char *&string
, const char *end
,
41 const char *&field
, int& length
)
43 // Leading zero not allowed.
44 if (*string
== '0') return false;
50 if (!isdigit(c
)) break;
52 if (__builtin_smul_overflow(length
, 10, &length
)) return false;
53 if (__builtin_sadd_overflow(length
, c
- '0', &length
)) return false;
56 string
= field
+ length
;
57 return length
> 0 && string
<= end
;
61 // copySwiftDemangledName
62 // Returns the pretty form of the given Swift-mangled class or protocol name.
63 // Returns nullptr if the string doesn't look like a mangled Swift name.
64 // The result must be freed with free().
65 static char *copySwiftDemangledName(const char *string
, bool isProtocol
= false)
67 if (!string
) return nullptr;
69 // Swift mangling prefix.
70 if (strncmp(string
, isProtocol
? "_TtP" : "_TtC", 4) != 0) return nullptr;
73 const char *end
= string
+ strlen(string
);
78 if (string
[0] == 's') {
79 // "s" is the Swift module.
84 if (! scanMangledField(string
, end
, prefix
, prefixLength
)) return nullptr;
87 // Class or protocol name.
90 if (! scanMangledField(string
, end
, suffix
, suffixLength
)) return nullptr;
93 // Remainder must be "_".
94 if (strcmp(string
, "_") != 0) return nullptr;
96 // Remainder must be empty.
97 if (string
!= end
) return nullptr;
101 asprintf(&result
, "%.*s.%.*s", prefixLength
,prefix
, suffixLength
,suffix
);
106 class ContentAccessor
{
108 ContentAccessor(SharedCache
& cache
) {
109 cache
.forEachRegion([&] (void* content
, uint64_t vmAddr
, uint64_t size
, uint32_t permissions
) {
110 Info info
= { (uint8_t*)content
, (uint8_t*)content
+size
, vmAddr
, vmAddr
+size
};
111 _regions
.push_back(info
);
115 void* contentForVMAddr(uint64_t vmaddr
) {
116 for (Info
& info
: _regions
) {
117 if ( (info
.startAddr
<= vmaddr
) && (vmaddr
< info
.endAddr
) )
118 return (void*)(info
.contentStart
+ vmaddr
- info
.startAddr
);
122 terminate("contentForVMAddr(0x%0llX) invalid vmaddr in ObjC data", vmaddr
);
125 uint64_t vmAddrForContent(const void* content
) {
126 for (Info
& info
: _regions
) {
127 if ( (info
.contentStart
<= content
) && (content
< info
.contentEnd
) )
128 return info
.startAddr
+ ((uint8_t*)content
- (uint8_t*)info
.contentStart
);
130 terminate("vmAddrForContent(%p) invalid content pointer in ObjC data", content
);
134 struct Info
{ uint8_t* contentStart
; uint8_t* contentEnd
; uint64_t startAddr
; uint64_t endAddr
; };
135 std::vector
<Info
> _regions
;
139 // Access a section containing a list of pointers
140 template <typename P
, typename T
>
143 typedef typename
P::uint_t pint_t
;
145 PointerSection(ContentAccessor
* cache
, const macho_header
<P
>* mh
,
146 const char* segname
, const char* sectname
)
148 _section(mh
->getSection(segname
, sectname
)),
149 _base(_section
? (pint_t
*)cache
->contentForVMAddr(_section
->addr()) : 0),
150 _count(_section
? (pint_t
)(_section
->size() / sizeof(pint_t
)) : 0) {
153 pint_t
count() const { return _count
; }
155 pint_t
getVMAddress(pint_t index
) const {
156 if ( index
>= _count
)
157 terminate("index out of range in section %s", _section
->sectname());
158 return (pint_t
)P::getP(_base
[index
]);
161 T
get(pint_t index
) const {
162 return (T
)_cache
->contentForVMAddr(getVMAddress(index
));
165 void setVMAddress(pint_t index
, pint_t value
) {
167 terminate("index out of range in section %s", _section
->sectname());
168 P::setP(_base
[index
], value
);
173 for (pint_t i
= 0; i
< _count
; i
++) {
174 pint_t value
= _base
[i
];
176 _base
[i
-shift
] = value
;
182 const_cast<macho_section
<P
>*>(_section
)->set_size(_count
* sizeof(pint_t
));
186 ContentAccessor
* const _cache
;
187 const macho_section
<P
>* const _section
;
193 // Access a section containing an array of structures
194 template <typename P
, typename T
>
198 ArraySection(ContentAccessor
* cache
, const macho_header
<P
>* mh
,
199 const char *segname
, const char *sectname
)
201 _section(mh
->getSection(segname
, sectname
)),
202 _base(_section
? (T
*)cache
->contentForVMAddr(_section
->addr()) : 0),
203 _count(_section
? _section
->size() / sizeof(T
) : 0) {
206 uint64_t count() const { return _count
; }
208 T
& get(uint64_t index
) const {
210 terminate("index out of range in section %s", _section
->sectname());
215 ContentAccessor
* const _cache
;
216 const macho_section
<P
>* const _section
;
218 uint64_t const _count
;
223 #include "objc-shared-cache.h"
224 #include "ObjC1Abstraction.hpp"
225 #include "ObjC2Abstraction.hpp"
232 template <typename P
>
233 class ObjCSelectorUniquer
236 typedef typename
P::uint_t pint_t
;
238 ObjCSelectorUniquer(ContentAccessor
* cache
) : _cache(cache
) { }
240 pint_t
visit(pint_t oldValue
)
243 const char *s
= (const char *)_cache
->contentForVMAddr(oldValue
);
244 objc_opt::string_map::iterator element
=
245 _selectorStrings
.insert(objc_opt::string_map::value_type(s
, oldValue
)).first
;
246 return (pint_t
)element
->second
;
249 objc_opt::string_map
& strings() {
250 return _selectorStrings
;
253 size_t count() const { return _count
; }
256 objc_opt::string_map _selectorStrings
;
257 ContentAccessor
* _cache
;
262 template <typename P
>
263 class ClassListBuilder
266 objc_opt::string_map _classNames
;
267 objc_opt::class_map _classes
;
269 HeaderInfoOptimizer
<P
, objc_header_info_ro_t
<P
>>& _hInfos
;
273 ClassListBuilder(HeaderInfoOptimizer
<P
, objc_header_info_ro_t
<P
>>& hinfos
) : _hInfos(hinfos
) { }
275 void visitClass(ContentAccessor
* cache
,
276 const macho_header
<P
>* header
,
277 objc_class_t
<P
>* cls
)
279 if (cls
->isMetaClass(cache
)) return;
281 const char *name
= cls
->getName(cache
);
282 uint64_t name_vmaddr
= cache
->vmAddrForContent((void*)name
);
283 uint64_t cls_vmaddr
= cache
->vmAddrForContent(cls
);
284 uint64_t hinfo_vmaddr
= cache
->vmAddrForContent(_hInfos
.hinfoForHeader(cache
, header
));
285 _classNames
.insert(objc_opt::string_map::value_type(name
, name_vmaddr
));
286 _classes
.insert(objc_opt::class_map::value_type(name
, std::pair
<uint64_t, uint64_t>(cls_vmaddr
, hinfo_vmaddr
)));
290 objc_opt::string_map
& classNames() {
294 objc_opt::class_map
& classes() {
298 size_t count() const { return _count
; }
301 template <typename P
>
302 class ProtocolOptimizer
305 typedef typename
P::uint_t pint_t
;
307 objc_opt::string_map _protocolNames
;
308 objc_opt::protocol_map _protocols
;
309 size_t _protocolCount
;
310 size_t _protocolReferenceCount
;
312 friend class ProtocolReferenceWalker
<P
, ProtocolOptimizer
<P
>>;
314 pint_t
visitProtocolReference(ContentAccessor
* cache
, pint_t oldValue
)
316 objc_protocol_t
<P
>* proto
= (objc_protocol_t
<P
>*)
317 cache
->contentForVMAddr(oldValue
);
318 pint_t newValue
= (pint_t
)_protocols
[proto
->getName(cache
)];
319 if (oldValue
!= newValue
) _protocolReferenceCount
++;
329 , _protocolReferenceCount(0)
332 void addProtocols(ContentAccessor
* cache
,
333 const macho_header
<P
>* header
)
335 PointerSection
<P
, objc_protocol_t
<P
> *>
336 protocols(cache
, header
, "__DATA", "__objc_protolist");
338 for (pint_t i
= 0; i
< protocols
.count(); i
++) {
339 objc_protocol_t
<P
> *proto
= protocols
.get(i
);
341 const char *name
= proto
->getName(cache
);
342 if (_protocolNames
.count(name
) == 0) {
343 if (proto
->getSize() > sizeof(objc_protocol_t
<P
>)) {
344 terminate("objc protocol is too big");
347 uint64_t name_vmaddr
= cache
->vmAddrForContent((void*)name
);
348 uint64_t proto_vmaddr
= cache
->vmAddrForContent(proto
);
349 _protocolNames
.insert(objc_opt::string_map::value_type(name
, name_vmaddr
));
350 _protocols
.insert(objc_opt::protocol_map::value_type(name
, proto_vmaddr
));
356 const char *writeProtocols(ContentAccessor
* cache
,
357 uint8_t *& rwdest
, size_t& rwremaining
,
358 uint8_t *& rodest
, size_t& roremaining
,
359 std::vector
<void*>& pointersInData
,
360 pint_t protocolClassVMAddr
)
362 if (_protocolCount
== 0) return NULL
;
364 if (protocolClassVMAddr
== 0) {
365 return "libobjc's Protocol class symbol not found (metadata not optimized)";
368 size_t rwrequired
= _protocolCount
* sizeof(objc_protocol_t
<P
>);
369 if (rwremaining
< rwrequired
) {
370 return "libobjc's read-write section is too small (metadata not optimized)";
373 for (objc_opt::protocol_map::iterator iter
= _protocols
.begin();
374 iter
!= _protocols
.end();
377 objc_protocol_t
<P
>* oldProto
= (objc_protocol_t
<P
>*)
378 cache
->contentForVMAddr(iter
->second
);
380 // Create a new protocol object.
381 objc_protocol_t
<P
>* proto
= (objc_protocol_t
<P
>*)rwdest
;
382 rwdest
+= sizeof(*proto
);
383 rwremaining
-= sizeof(*proto
);
386 uint32_t oldSize
= oldProto
->getSize();
387 memcpy(proto
, oldProto
, oldSize
);
388 if (!proto
->getIsaVMAddr()) {
389 proto
->setIsaVMAddr(protocolClassVMAddr
);
391 if (oldSize
< sizeof(*proto
)) {
392 // Protocol object is old. Populate new fields.
393 proto
->setSize(sizeof(objc_protocol_t
<P
>));
394 // missing extendedMethodTypes is already nil
396 // Some protocol objects are big enough to have the
397 // demangledName field but don't initialize it.
398 // Initialize it here if it is not already set.
399 if (!proto
->getDemangledName(cache
)) {
400 const char *roName
= proto
->getName(cache
);
401 char *demangledName
= copySwiftDemangledName(roName
, true);
403 size_t length
= 1 + strlen(demangledName
);
404 if (roremaining
< length
) {
405 return "libobjc's read-only section is too small (metadata not optimized)";
408 memmove(rodest
, demangledName
, length
);
409 roName
= (const char *)rodest
;
411 roremaining
-= length
;
415 proto
->setDemangledName(cache
, roName
);
419 // Redirect the protocol table at our new object.
420 iter
->second
= cache
->vmAddrForContent(proto
);
422 // Add new rebase entries.
423 proto
->addPointers(pointersInData
);
429 void updateReferences(ContentAccessor
* cache
, const macho_header
<P
>* header
)
431 ProtocolReferenceWalker
<P
, ProtocolOptimizer
<P
>> refs(*this);
432 refs
.walk(cache
, header
);
435 objc_opt::string_map
& protocolNames() {
436 return _protocolNames
;
439 objc_opt::protocol_map
& protocols() {
443 size_t protocolCount() const { return _protocolCount
; }
444 size_t protocolReferenceCount() const { return _protocolReferenceCount
; }
448 static int percent(size_t num
, size_t denom
) {
450 return (int)(num
/ (double)denom
* 100);
456 template <typename P
>
457 void optimizeObjC(SharedCache
& cache
, std::vector
<void*>& pointersForASLR
, bool forProduction
)
459 typedef typename
P::E E
;
460 typedef typename
P::uint_t pint_t
;
462 verboseLog("Optimizing objc metadata:");
463 verboseLog(" cache type is %s",
464 forProduction
? "production" : "development");
466 ContentAccessor
cacheAccessor(cache
);
468 size_t headerSize
= P::round_up(sizeof(objc_opt::objc_opt_t
));
469 if (headerSize
!= sizeof(objc_opt::objc_opt_t
)) {
470 warning("libobjc's optimization structure size is wrong (metadata not optimized)");
474 // Find libobjc's empty sections and build list of images with objc metadata
476 const macho_section
<P
> *optROSection
= nullptr;
477 const macho_section
<P
> *optRWSection
= nullptr;
478 const macho_section
<P
> *optPointerListSection
= nullptr;
479 std::vector
<const macho_header
<P
>*> objcDylibs
;
480 cache
.forEachImage([&](const void* machHeader
, const char* installName
,
481 time_t, ino_t
, const std::vector
<MachOProxySegment
>& segments
) {
482 const macho_header
<P
>* mh
= (const macho_header
<P
>*)machHeader
;
483 if ( strstr(installName
, "/libobjc.") != nullptr ) {
484 optROSection
= mh
->getSection("__TEXT", "__objc_opt_ro");
485 optRWSection
= mh
->getSection("__DATA", "__objc_opt_rw");
486 optPointerListSection
= mh
->getSection("__DATA", "__objc_opt_ptrs");
488 if ( mh
->getSection("__DATA", "__objc_imageinfo") || mh
->getSection("__OBJC", "__image_info") ) {
489 objcDylibs
.push_back(mh
);
491 // log("installName %s at mhdr 0x%016lx", installName, (uintptr_t)cacheAccessor.vmAddrForContent((void*)mh));
493 if ( optROSection
== nullptr ) {
494 warning("libobjc's read-only section missing (metadata not optimized)");
497 if ( optRWSection
== nullptr ) {
498 warning("libobjc's read/write section missing (metadata not optimized)");
501 if ( optPointerListSection
== nullptr ) {
502 warning("libobjc's pointer list section missing (metadata not optimized)");
506 uint8_t* optROData
= (uint8_t*)cacheAccessor
.contentForVMAddr(optROSection
->addr());
507 size_t optRORemaining
= optROSection
->size();
508 uint8_t* optRWData
= (uint8_t*)cacheAccessor
.contentForVMAddr(optRWSection
->addr());
509 size_t optRWRemaining
= optRWSection
->size();
510 if (optRORemaining
< headerSize
) {
511 warning("libobjc's read-only section is too small (metadata not optimized)");
514 objc_opt::objc_opt_t
* optROHeader
= (objc_opt::objc_opt_t
*)optROData
;
515 optROData
+= headerSize
;
516 optRORemaining
-= headerSize
;
517 if (E::get32(optROHeader
->version
) != objc_opt::VERSION
) {
518 warning("libobjc's read-only section version is unrecognized (metadata not optimized)");
522 if (optPointerListSection
->size() < sizeof(objc_opt::objc_opt_pointerlist_tt
<pint_t
>)) {
523 warning("libobjc's pointer list section is too small (metadata not optimized)");
526 const objc_opt::objc_opt_pointerlist_tt
<pint_t
> *optPointerList
= (const objc_opt::objc_opt_pointerlist_tt
<pint_t
> *)cacheAccessor
.contentForVMAddr(optPointerListSection
->addr());
528 // Write nothing to optROHeader until everything else is written.
529 // If something fails below, libobjc will not use the section.
533 // Make copy of objcList and sort that list.
535 std::vector
<const macho_header
<P
>*> addressSortedDylibs
= objcDylibs
;
536 std::sort(addressSortedDylibs
.begin(), addressSortedDylibs
.end(), [](const macho_header
<P
>* lmh
, const macho_header
<P
>* rmh
) -> bool {
541 // Build HeaderInfo list in cache
543 // First the RO header info
544 // log("writing out %d RO dylibs at offset %d", (uint32_t)objcDylibs.size(), (uint32_t)(optROSection->size() - optRORemaining));
545 uint64_t hinfoROVMAddr
= optROSection
->addr() + optROSection
->size() - optRORemaining
;
546 HeaderInfoOptimizer
<P
, objc_header_info_ro_t
<P
>> hinfoROOptimizer
;
547 const char* err
= hinfoROOptimizer
.init((uint32_t)objcDylibs
.size(), optROData
, optRORemaining
);
553 for (const macho_header
<P
>* mh
: addressSortedDylibs
) {
554 hinfoROOptimizer
.update(&cacheAccessor
, mh
, pointersForASLR
);
558 // Then the RW header info
559 // log("writing out %d RW dylibs at offset %d", (uint32_t)objcDylibs.size(), (uint32_t)(optRWSection->size() - optRWRemaining));
560 uint64_t hinfoRWVMAddr
= (uint64_t)optRWSection
->addr() + (uint64_t)optRWSection
->size() - optRWRemaining
;
561 HeaderInfoOptimizer
<P
, objc_header_info_rw_t
<P
>> hinfoRWOptimizer
;
562 err
= hinfoRWOptimizer
.init((uint32_t)objcDylibs
.size(), optRWData
, optRWRemaining
);
568 for (const macho_header
<P
>* mh
: addressSortedDylibs
) {
569 hinfoRWOptimizer
.update(&cacheAccessor
, mh
, pointersForASLR
);
574 // Update selector references and build selector list
576 // This is SAFE: if we run out of room for the selector table,
577 // the modified binaries are still usable.
579 // Heuristic: choose selectors from libraries with more selector cstring data first.
580 // This tries to localize selector cstring memory.
582 ObjCSelectorUniquer
<P
> uniq(&cacheAccessor
);
583 std::vector
<const macho_header
<P
>*> sizeSortedDylibs
= objcDylibs
;
584 std::sort(sizeSortedDylibs
.begin(), sizeSortedDylibs
.end(), [](const macho_header
<P
>* lmh
, const macho_header
<P
>* rmh
) -> bool {
585 const macho_section
<P
>* lSection
= lmh
->getSection("__TEXT", "__objc_methname");
586 const macho_section
<P
>* rSection
= rmh
->getSection("__TEXT", "__objc_methname");
587 uint64_t lSelectorSize
= (lSection
? lSection
->size() : 0);
588 uint64_t rSelectorSize
= (rSection
? rSection
->size() : 0);
589 return lSelectorSize
> rSelectorSize
;
592 SelectorOptimizer
<P
, ObjCSelectorUniquer
<P
> > selOptimizer(uniq
);
593 for (const macho_header
<P
>* mh
: sizeSortedDylibs
) {
594 LegacySelectorUpdater
<P
, ObjCSelectorUniquer
<P
>>::update(&cacheAccessor
, mh
, uniq
);
595 selOptimizer
.optimize(&cacheAccessor
, mh
);
598 verboseLog(" uniqued % 6ld selectors",
599 uniq
.strings().size());
600 verboseLog(" updated % 6ld selector references",
603 uint64_t seloptVMAddr
= optROSection
->addr() + optROSection
->size() - optRORemaining
;
604 objc_opt::objc_selopt_t
*selopt
= new(optROData
) objc_opt::objc_selopt_t
;
605 err
= selopt
->write(seloptVMAddr
, optRORemaining
, uniq
.strings());
610 optROData
+= selopt
->size();
611 optRORemaining
-= selopt
->size();
612 uint32_t seloptCapacity
= selopt
->capacity
;
613 uint32_t seloptOccupied
= selopt
->occupied
;
614 selopt
->byteswap(E::little_endian
), selopt
= nullptr;
616 verboseLog(" selector table occupancy %u/%u (%u%%)",
617 seloptOccupied
, seloptCapacity
,
618 (unsigned)(seloptOccupied
/(double)seloptCapacity
*100));
622 // Detect classes that have missing weak-import superclasses.
624 // Production only. Development cache does not do this: a replacement
625 // library could omit a class at runtime that was present during
626 // cache construction.
628 // This is SAFE: the binaries themselves are unmodified.
629 bool noMissingWeakSuperclasses
= false; // dev cache can't promise otherwise
631 WeakClassDetector
<P
> weakopt
;
632 noMissingWeakSuperclasses
=
633 weakopt
.noMissingWeakSuperclasses(&cacheAccessor
, sizeSortedDylibs
);
635 // Shared cache does not currently support unbound weak references.
636 // Here we assert that there are none. If support is added later then
637 // this assertion needs to be removed and this path needs to be tested.
638 if (!noMissingWeakSuperclasses
) {
639 terminate("Some Objective-C class has a superclass that is "
640 "weak-import and missing from the cache.");
646 // Build class table.
648 // This is SAFE: the binaries themselves are unmodified.
649 ClassListBuilder
<P
> classes(hinfoROOptimizer
);
650 ClassWalker
<P
, ClassListBuilder
<P
>> classWalker(classes
);
651 for (const macho_header
<P
>* mh
: sizeSortedDylibs
) {
652 classWalker
.walk(&cacheAccessor
, mh
);
655 verboseLog(" recorded % 6ld classes",
656 classes
.classNames().size());
658 uint64_t clsoptVMAddr
= optROSection
->addr() + optROSection
->size() - optRORemaining
;
659 objc_opt::objc_clsopt_t
*clsopt
= new(optROData
) objc_opt::objc_clsopt_t
;
660 err
= clsopt
->write(clsoptVMAddr
, optRORemaining
,
661 classes
.classNames(), classes
.classes(), false);
666 optROData
+= clsopt
->size();
667 optRORemaining
-= clsopt
->size();
668 size_t duplicateCount
= clsopt
->duplicateCount();
669 uint32_t clsoptCapacity
= clsopt
->capacity
;
670 uint32_t clsoptOccupied
= clsopt
->occupied
;
671 clsopt
->byteswap(E::little_endian
);
674 verboseLog(" found % 6ld duplicate classes",
676 verboseLog(" class table occupancy %u/%u (%u%%)",
677 clsoptOccupied
, clsoptCapacity
,
678 (unsigned)(clsoptOccupied
/(double)clsoptCapacity
*100));
682 // Sort method lists.
684 // This is SAFE: modified binaries are still usable as unsorted lists.
685 // This must be done AFTER uniquing selectors.
686 MethodListSorter
<P
> methodSorter
;
687 for (const macho_header
<P
>* mh
: sizeSortedDylibs
) {
688 methodSorter
.optimize(&cacheAccessor
, mh
);
691 verboseLog(" sorted % 6ld method lists",
692 methodSorter
.optimized());
695 // Unique protocols and build protocol table.
697 // This is SAFE: no protocol references are updated yet
698 // This must be done AFTER updating method lists.
700 ProtocolOptimizer
<P
> protocolOptimizer
;
701 for (const macho_header
<P
>* mh
: sizeSortedDylibs
) {
702 protocolOptimizer
.addProtocols(&cacheAccessor
, mh
);
705 verboseLog(" uniqued % 6ld protocols",
706 protocolOptimizer
.protocolCount());
708 pint_t protocolClassVMAddr
= (pint_t
)P::getP(optPointerList
->protocolClass
);
709 err
= protocolOptimizer
.writeProtocols(&cacheAccessor
,
710 optRWData
, optRWRemaining
,
711 optROData
, optRORemaining
,
712 pointersForASLR
, protocolClassVMAddr
);
718 uint64_t protocoloptVMAddr
= optROSection
->addr() + optROSection
->size() - optRORemaining
;
719 objc_opt::objc_protocolopt_t
*protocolopt
= new (optROData
) objc_opt::objc_protocolopt_t
;
720 err
= protocolopt
->write(protocoloptVMAddr
, optRORemaining
,
721 protocolOptimizer
.protocolNames(),
722 protocolOptimizer
.protocols(), true);
727 optROData
+= protocolopt
->size();
728 optRORemaining
-= protocolopt
->size();
729 uint32_t protocoloptCapacity
= protocolopt
->capacity
;
730 uint32_t protocoloptOccupied
= protocolopt
->occupied
;
731 protocolopt
->byteswap(E::little_endian
), protocolopt
= NULL
;
733 verboseLog(" protocol table occupancy %u/%u (%u%%)",
734 protocoloptOccupied
, protocoloptCapacity
,
735 (unsigned)(protocoloptOccupied
/(double)protocoloptCapacity
*100));
738 // Redirect protocol references to the uniqued protocols.
740 // This is SAFE: the new protocol objects are still usable as-is.
741 for (const macho_header
<P
>* mh
: sizeSortedDylibs
) {
742 protocolOptimizer
.updateReferences(&cacheAccessor
, mh
);
745 verboseLog(" updated % 6ld protocol references",
746 protocolOptimizer
.protocolReferenceCount());
750 // Repair ivar offsets.
752 // This is SAFE: the runtime always validates ivar offsets at runtime.
753 IvarOffsetOptimizer
<P
> ivarOffsetOptimizer
;
754 for (const macho_header
<P
>* mh
: sizeSortedDylibs
) {
755 ivarOffsetOptimizer
.optimize(&cacheAccessor
, mh
);
758 verboseLog(" updated % 6ld ivar offsets",
759 ivarOffsetOptimizer
.optimized());
763 uint32_t headerFlags
= 0;
765 headerFlags
|= objc_opt::IsProduction
;
767 if (noMissingWeakSuperclasses
) {
768 headerFlags
|= objc_opt::NoMissingWeakSuperclasses
;
772 // Success. Mark dylibs as optimized.
773 for (const macho_header
<P
>* mh
: sizeSortedDylibs
) {
774 const macho_section
<P
>* imageInfoSection
= mh
->getSection("__DATA", "__objc_imageinfo");
775 if (!imageInfoSection
) {
776 imageInfoSection
= mh
->getSection("__OBJC", "__image_info");
778 if (imageInfoSection
) {
779 objc_image_info
<P
>* info
= (objc_image_info
<P
>*)cacheAccessor
.contentForVMAddr(imageInfoSection
->addr());
780 info
->setOptimizedByDyld();
785 // Success. Update RO header last.
786 E::set32(optROHeader
->flags
, headerFlags
);
787 E::set32(optROHeader
->selopt_offset
, (uint32_t)(seloptVMAddr
- optROSection
->addr()));
788 E::set32(optROHeader
->clsopt_offset
, (uint32_t)(clsoptVMAddr
- optROSection
->addr()));
789 E::set32(optROHeader
->protocolopt_offset
, (uint32_t)(protocoloptVMAddr
- optROSection
->addr()));
790 E::set32(optROHeader
->headeropt_ro_offset
, (uint32_t)(hinfoROVMAddr
- optROSection
->addr()));
791 E::set32(optROHeader
->headeropt_rw_offset
, (uint32_t)(hinfoRWVMAddr
- optROSection
->addr()));
794 size_t roSize
= optROSection
->size() - optRORemaining
;
795 size_t rwSize
= optRWSection
->size() - optRWRemaining
;
796 verboseLog(" %zu/%llu bytes "
797 "(%d%%) used in libobjc read-only optimization section",
798 roSize
, optROSection
->size(),
799 percent(roSize
, optROSection
->size()));
800 verboseLog(" %zu/%llu bytes "
801 "(%d%%) used in libobjc read/write optimization section",
802 rwSize
, optRWSection
->size(),
803 percent(rwSize
, optRWSection
->size()));
804 verboseLog(" wrote objc metadata optimization version %d",
812 void SharedCache::optimizeObjC(bool forProduction
)
814 switch ( _arch
.arch
) {
817 ::optimizeObjC
<Pointer32
<LittleEndian
>>(*this, _pointersForASLR
, forProduction
);
819 case CPU_TYPE_X86_64
:
821 ::optimizeObjC
<Pointer64
<LittleEndian
>>(*this, _pointersForASLR
, forProduction
);
824 terminate("unsupported arch 0x%08X", _arch
.arch
);