]> git.saurik.com Git - apple/dyld.git/blob - dyld3/shared-cache/IMPCaches.cpp
dyld-832.7.3.tar.gz
[apple/dyld.git] / dyld3 / shared-cache / IMPCaches.cpp
1 //
2 // IMPCaches.cpp
3 // dyld_shared_cache_builder
4 //
5 // Created by Thomas Deniau on 18/12/2019.
6 //
7
8 #include "FileAbstraction.hpp"
9 #include "IMPCaches.hpp"
10 #include "IMPCachesBuilder.hpp"
11 #include "JSONReader.h"
12
13 #include <unordered_map>
14 #include <vector>
15 #include <string>
16 #include <iostream>
17 #include <sstream>
18 #include <iomanip>
19 #include <numeric>
20 #include <random>
21 #include <map>
22
23 namespace IMPCaches {
24
25 std::string ClassData::description() const
26 {
27 std::stringstream ss;
28 ss << name << " modulo:" << modulo();
29 if (isMetaclass) ss << " (metaclass)";
30 return ss.str();
31 }
32
33 std::string ClassData::PlacementAttempt::description() const
34 {
35 std::stringstream stream;
36 stream << "needed bits: " << neededBits << ", shift: " << shift;
37 return stream.str();
38 }
39
40 typename ClassData::PlacementAttempt ClassData::attemptForShift(int shiftToTry, int neededBitsToTry) const
41 {
42 int totalNumberOfBitsToSet = 0;
43 int mask = (1 << neededBitsToTry) - 1;
44 for (const Method& method : methods) {
45 totalNumberOfBitsToSet += method.selector->numberOfBitsToSet(shiftToTry, mask);
46 }
47
48 return ClassData::PlacementAttempt(totalNumberOfBitsToSet, shiftToTry, neededBitsToTry);
49 }
50
51 std::vector<typename ClassData::PlacementAttempt> ClassData::attempts() const
52 {
53 // We have 26 MB of selectors, and among them only ~ 7MB are deemed
54 // "interesting" to include in our hash tables.
55
56 // So we should be able to fit on 24 bits of address space (~ 16 MB of selectors),
57 // but need to keep the low 7 bits available so that we
58 // don't have to worry about selector length and so that
59 // we don't have to worry about collisions. (i.e. multiple selectors
60 // end up at the same address with this algorithm and then we play
61 // in another step with
62 // the low 7 bits to give them all an unique address).
63 // Then if there are holes given this 128-byte alignment, we can
64 // fill the holes with selectors excluded from the algorithm.
65
66 // Let us grow the hash tables to one more bit if needed
67 // as the full problem is too difficult.
68 std::vector<int> allowedNeededBits { neededBits, neededBits + 1 };
69 std::vector<PlacementAttempt> attempts;
70 for (auto i : allowedNeededBits) {
71 // Go thrugh all the possible shifts, starting at 0, knowing that
72 // shift+neededBits needs to fit on 17 bits.
73
74 for (int shiftToTry = 0; shiftToTry <= 17 - i; shiftToTry++) {
75 attempts.push_back(attemptForShift(shiftToTry, i));
76 }
77 }
78 sort(attempts.begin(), attempts.end());
79 return attempts;
80 }
81
82 void ClassData::resetSlots()
83 {
84 #if DEBUG_SLOTS
85 slots.assign(slots.size(), nullptr);
86 #else
87 slots.assign(slots.size(), false);
88 #endif
89 }
90
91 void ClassData::backtrack(typename ClassData::PlacementAttempt::Result& resultToBacktrackFrom)
92 {
93 if (!resultToBacktrackFrom.success) {
94 // We backtrack from a failure if we decided to skip a class that was too difficult to place.
95 // In this case there's nothing to do.
96
97 return;
98 }
99
100 // Restore the addresses and masks we had in place before we did the step that let to resultToBacktrackFrom
101 typename PlacementAttempt::PreviousState previousState = resultToBacktrackFrom.previousState;
102 for (const Method& method : methods) {
103 Selector* selector = method.selector;
104 typename PlacementAttempt::PreviousMethodAddress previousMethod = previousState.methods[selector];
105 selector->inProgressBucketIndex = previousMethod.address;
106 selector->fixedBitsMask = previousMethod.fixedBitsMask;
107 }
108
109 shift = previousState.shift;
110 neededBits = previousState.neededBits;
111 }
112
113 // Compute the number of needed bits for the hash table now that all the methods have been added
114 void ClassData::didFinishAddingMethods()
115 {
116 if (methods.size() == 0) {
117 neededBits = 0;
118 } else {
119 neededBits = (int)ceil(log2(double(methods.size())));
120 }
121 }
122
123 bool ClassData::hadToIncreaseSize() const
124 {
125 if (methods.size() == 0) return false;
126 return neededBits > (int)(ceil(log2((double)(methods.size()))));
127 }
128
129 /// Try to see if we can make the shift and mask in @param attempt work.
130 typename ClassData::PlacementAttempt::Result ClassData::applyAttempt(const PlacementAttempt& attempt, std::minstd_rand & randomNumberGenerator)
131 {
132 std::vector<Selector*> sortedMethods;
133 sortedMethods.reserve(methods.size());
134 for (const Method& method : methods) {
135 sortedMethods.push_back(method.selector);
136 }
137
138 // Solve from most constrained to least constrained
139 std::sort(sortedMethods.begin(), sortedMethods.end(), [attempt](Selector* m1, Selector* m2) {
140 return m1->numberOfBitsToSet(attempt.shift, attempt.mask()) < m2->numberOfBitsToSet(attempt.shift, attempt.mask());
141 });
142
143 if (slots.size() < (1 << attempt.neededBits)) {
144 #if DEBUG_SLOTS
145 slots.resize(1 << attempt.neededBits, nullptr);
146 #else
147 slots.resize(1 << attempt.neededBits, false);
148 #endif
149 }
150
151 resetSlots();
152
153 std::vector<int> addresses;
154 for (auto m : sortedMethods) {
155 bool found = false;
156
157 // Check if all the bits are already assigned
158 int shiftedMask = attempt.mask() << attempt.shift;
159 if ((m->fixedBitsMask & shiftedMask) == shiftedMask) {
160 int index = (m->inProgressBucketIndex >> attempt.shift) & attempt.mask();
161 if (slots[index]) {
162 typename ClassData::PlacementAttempt::Result result;
163 result.success = false;
164 return result;
165 }
166 #if DEBUG_SLOTS
167 slots[index] = m;
168 #else
169 slots[index] = true;
170 #endif
171 found = true;
172 addresses.push_back(index);
173 } else {
174 // Some bits are not assigned yet, so try to find an address that would be compatible
175 // with the existing bits for this method.
176
177 int attemptModulo = 1 << attempt.neededBits;
178
179 // possibleAddresses = 0..<attemptModulo
180 std::vector<int> possibleAddresses(attemptModulo);
181 std::iota(possibleAddresses.begin(), possibleAddresses.end(), 0);
182
183 // We randomize the addresses to try so that two random selectors
184 // have as many ranges of different bits as possible, in order
185 // to find a satisfying shift for every class.
186
187 std::shuffle(possibleAddresses.begin(), possibleAddresses.end(), randomNumberGenerator);
188 for (auto i : possibleAddresses) {
189 int futureAddress = m->inProgressBucketIndex | (i << attempt.shift);
190 int slot = (futureAddress >> attempt.shift) & attempt.mask();
191
192 // Make sure the new address is compatible with the existing bits
193 bool addressesMatch = (futureAddress & m->fixedBitsMask) == (m->inProgressBucketIndex & m->fixedBitsMask);
194 if (addressesMatch && !slots[slot]) {
195 #if DEBUG_SLOTS
196 slots[slot] = m;
197 #else
198 slots[slot] = true;
199 #endif
200 found = true;
201 addresses.push_back(i);
202 break;
203 }
204 }
205 if (!found) {
206 typename ClassData::PlacementAttempt::Result result;
207 result.success = false;
208 return result;
209 }
210 }
211 }
212
213 // We succeeded, record the state so that we can backtrack if needed
214 std::unordered_map<Selector*, typename PlacementAttempt::PreviousMethodAddress> previousMethods;
215 for (unsigned long i = 0; i < sortedMethods.size(); i++) {
216 Selector* m = sortedMethods[i];
217 int previousAddress = m->inProgressBucketIndex;
218 int previousMask = m->fixedBitsMask;
219 m->inProgressBucketIndex |= addresses[i] << attempt.shift;
220 m->fixedBitsMask |= attempt.mask() << attempt.shift;
221 previousMethods[m] = typename PlacementAttempt::PreviousMethodAddress {
222 .address = previousAddress,
223 .fixedBitsMask = previousMask
224 };
225 }
226
227 typename PlacementAttempt::PreviousState previousState {
228 .neededBits = neededBits,
229 .shift = shift,
230 .methods = previousMethods
231 };
232 shift = attempt.shift;
233 neededBits = attempt.neededBits;
234
235 typename PlacementAttempt::Result result {
236 .success = true,
237 .previousState = previousState
238 };
239
240 return result;
241 }
242
243 bool ClassData::checkConsistency() {
244 resetSlots();
245 for (const Method& method : methods) {
246 const Selector* s = method.selector;
247 int slotIndex = (s->inProgressBucketIndex >> shift) & mask();
248 if (slots[slotIndex]) {
249 return false;
250 }
251 #if DEBUG_SLOTS
252 slots[slotIndex] = s;
253 #else
254 slots[slotIndex] = true;
255 #endif
256 }
257 return true;
258 }
259
260 Constraint ClassData::constraintForMethod(const Selector* method) {
261 resetSlots();
262 allowedValues.clear();
263
264 // Fill the slots with all our methods except `method`
265 for (const Method& m : methods) {
266 const Selector* s = m.selector;
267 if (s == method) {
268 continue;
269 }
270
271 int slotIndex = (s->inProgressBucketIndex >> shift) & mask();
272 #if DEBUG_SLOTS
273 assert(slots[slotIndex] == nullptr);
274 slots[slotIndex] = s;
275 #else
276 assert(!slots[slotIndex]);
277 slots[slotIndex] = true;
278 #endif
279 }
280
281 // What are the remaining empty slots in which we could put `method`?
282 int max = 1 << neededBits;
283 for (int i = 0 ; i < max ; i++) {
284 if (!slots[i]) {
285 allowedValues.push_back(i);
286 }
287 }
288
289 auto allowedSet = std::unordered_set<int>(allowedValues.begin(), allowedValues.end());
290 return Constraint {
291 .mask = mask(),
292 .shift = shift,
293 .allowedValues = allowedSet
294 };
295 }
296
297 size_t ClassData::sizeInSharedCache() const {
298 return IMPCaches::sizeForImpCacheWithCount(modulo());
299 }
300
301 int AddressSpace::sizeAtIndex(int idx) const
302 {
303 if (sizes.find(idx) != sizes.end()) {
304 return sizes.at(idx);
305 } else {
306 return 0;
307 }
308 }
309
310 void AddressSpace::removeUninterestingSelectors() {
311 for (auto& [k,v] : methodsByIndex) {
312 v.erase(std::remove_if(v.begin(), v.end(), [](const Selector* s){
313 return s->classes.size() == 0;
314 }), v.end());
315 }
316 }
317
318 int AddressSpace::sizeAvailableAfterIndex(int idx) const
319 {
320 int availableAfterThisAddress = bagSizeAtIndex(idx) - sizeAtIndex(idx);
321 for (int j = idx + 1; j < maximumIndex; j++) {
322 if (methodsByIndex.find(j) != methodsByIndex.end()) {
323 break;
324 } else {
325 availableAfterThisAddress += bagSizeAtIndex(j);
326 }
327 }
328
329 return availableAfterThisAddress;
330 }
331
332 // Because some selectors are longer than 128 bytes, we have to sometimes let
333 // them overflow into the next 128-byte bucket. This method tells you if you
334 // can place a method in a bucket without colliding with an overflowing selector
335 // from one of the previous buckets.
336 bool AddressSpace::canPlaceWithoutFillingOverflowCellAtIndex(int idx) const
337 {
338 if ((idx == 0) || (sizeAtIndex(idx) > 0)) {
339 return true;
340 }
341
342 int j = idx;
343 int availableOnOrBefore = 0;
344
345 while (j > 0 && (sizeAtIndex(j) == 0)) {
346 availableOnOrBefore += bagSizeAtIndex(j);
347 j -= 1;
348 }
349
350 int sizeOfFirstNonEmptyCellBefore = sizeAtIndex(j);
351 return (sizeOfFirstNonEmptyCellBefore < availableOnOrBefore);
352 }
353
354 bool AddressSpace::canPlaceMethodAtIndex(const Selector* method, int idx) const
355 {
356 int existingSize = sizeAtIndex(idx);
357 bool canPlaceWithoutFillingOverflow = canPlaceWithoutFillingOverflowCellAtIndex(idx);
358
359 if (!canPlaceWithoutFillingOverflow) {
360 return false;
361 }
362
363 int available = bagSizeAtIndex(idx) - existingSize;
364 int methodSize = method->size();
365 bool enoughRoom = available > methodSize;
366
367 if (enoughRoom) {
368 return true;
369 }
370
371 bool tooBigButOverflowExists = (methodSize > 64) && (available > 0) && (sizeAvailableAfterIndex(idx) > methodSize);
372
373 return tooBigButOverflowExists;
374 }
375
376 void AddressSpace::placeMethodAtIndex(Selector* method, int idx)
377 {
378 auto [it, success] = methodsByIndex.try_emplace(idx, std::vector<Selector*>());
379 it->second.push_back(method);
380
381 auto [it2, success2] = sizes.try_emplace(idx, 0);
382 it2->second += method->size();
383 }
384
385 // At this point selected are already sorted into 128-byte buckets.
386 // Now fill in the low 7 bits of each address, and return a list
387 // of intervals [ ..... selector data....][ ...... hole ....][.... selector data...]
388 // so that we can stuff in selectors that don't participate in
389 // static IMP caches
390 void AddressSpace::computeLowBits(HoleMap& selectors) const {
391 int currentEndOffset = magicSelector.length() + 1;
392
393 std::set<IMPCaches::Hole> & holes = selectors.holes;
394 holes.clear();
395
396 std::vector<int> orderedIndices;
397 for (const auto& [index, selectors] : methodsByIndex) {
398 orderedIndices.push_back(index);
399 }
400 std::sort(orderedIndices.begin(), orderedIndices.end());
401 for (int index : orderedIndices) {
402 const std::vector<Selector*> & selectorsAtThisIndex = methodsByIndex.at(index);
403 int bucketOffset = index << bagSizeShift;
404 if (bucketOffset > currentEndOffset) {
405 holes.insert(Hole {
406 .startAddress = currentEndOffset,
407 .endAddress = bucketOffset,
408 });
409 currentEndOffset = bucketOffset;
410 }
411 for (Selector* s : selectorsAtThisIndex) {
412 s->offset = currentEndOffset;
413 currentEndOffset += s->size();
414 }
415 }
416
417 selectors.endAddress = currentEndOffset;
418 }
419
420 int HoleMap::addStringOfSize(unsigned size) {
421 // static int i = 0;
422 // if (i++ % 1000 == 0) {
423 // printf("Inserted 1000 more strings, number of holes = %lu\n", holes.size());
424 // }
425 Hole neededHole = Hole {
426 .startAddress = 0,
427 .endAddress = static_cast<int>(size)
428 };
429 std::set<Hole>::iterator it = holes.lower_bound(neededHole);
430 if (it == holes.end()) {
431 // insert at the end
432 int end = endAddress;
433 endAddress += size;
434 return end;
435 } else {
436 // Remove this hole and insert a smaller one instead
437
438 int address = it->startAddress;
439 Hole updatedHole = *it;
440 updatedHole.startAddress += size;
441 holes.erase(it);
442
443 // Don't insert if the hole is empty or won't fit any selector
444 if (updatedHole.size() > 1) {
445 holes.insert(updatedHole);
446 }
447 return address;
448 }
449 }
450
451 void HoleMap::clear() {
452 holes.clear();
453 endAddress = 0;
454 }
455
456 unsigned long HoleMap::totalHoleSize() const {
457 unsigned long result = 0;
458 for (const Hole& hole : holes) {
459 result += hole.size();
460 }
461 return result;
462 }
463
464 std::ostream& operator<<(std::ostream& o, const HoleMap& m) {
465 int size = 0;
466 int count = 0;
467 for (const Hole& h : m.holes) {
468 if (h.size() == size) {
469 count++;
470 } else {
471 o << count << " holes of size " << size << std::endl;
472 size = h.size();
473 count = 1;
474 }
475 }
476 return o;
477 }
478
479 std::ostream& operator<<(std::ostream& o, const AddressSpace& a)
480 {
481 int maximumIndex = 0;
482 for (const auto& kvp : a.methodsByIndex) {
483 maximumIndex = std::max(maximumIndex, kvp.first);
484 }
485
486 std::vector<double> lengths;
487 std::map<int, std::vector<Selector*>> sortedMethodsByAddress;
488 sortedMethodsByAddress.insert(a.methodsByIndex.begin(), a.methodsByIndex.end());
489
490 std::map<int, int> sortedSizes;
491 sortedSizes.insert(a.sizes.begin(), a.sizes.end());
492
493 for (const auto& kvp : sortedSizes) {
494 lengths.push_back(kvp.second);
495 }
496
497 for (const auto& kvp : sortedMethodsByAddress) {
498 o << std::setw(5) << kvp.first << ": ";
499 for (Selector* m : kvp.second) {
500 o << m->name << " ";
501 }
502 o << "\n";
503 }
504
505 o << "Max address " << maximumIndex << "\n";
506 o << "Average length " << (double)std::accumulate(lengths.begin(), lengths.end(), 0) / lengths.size() << "\n";
507
508 return o;
509 }
510
511 // Returns a constraint that is the intersection of "this" and "other", i.e. a constraint for which the allowed values
512 // are the intersection of the allowed values of "this" and "other" (taking into account shift and mask)
513 Constraint Constraint::intersecting(const Constraint& other) const
514 {
515 if ((mask == other.mask) && (shift == other.shift)) {
516 // fast path
517 std::unordered_set<int> intersection;
518 for (int allowedValue : allowedValues) {
519 if (other.allowedValues.find(allowedValue) != other.allowedValues.end()) {
520 intersection.insert(allowedValue);
521 }
522 }
523
524 return Constraint {
525 .mask = mask,
526 .shift = shift,
527 .allowedValues = intersection
528 };
529 }
530
531 int shiftedMask = mask << shift;
532 int otherShift = other.shift;
533 int otherMask = other.mask;
534 int otherShiftedMask = other.mask << otherShift;
535 int intersectionMask = shiftedMask & otherShiftedMask;
536 const std::unordered_set<int>& otherAllowedValues = other.allowedValues;
537
538 // Always make sure we start with the left-most mask as self
539 if (shiftedMask < otherShiftedMask) {
540 return other.intersecting(*this);
541 }
542
543 // If there are no real constraints on our side, just return the other
544 if ((mask == 0) && (allowedValues.size() == 1) && (*(allowedValues.begin()) == 0)) {
545 return other;
546 }
547
548 // If there are no real constraints on the other side, just return our constraint
549 if ((otherMask == 0) && (otherAllowedValues.size() == 1) && (*(otherAllowedValues.begin()) == 0)) {
550 return *this;
551 }
552
553 if (otherShift >= shift) {
554 // [self..[other]..self]
555 // Restrict the allowed values to make sure they have the right bits.
556 int shiftDifference = otherShift - shift;
557 std::unordered_set<int> combinedAllowedValues;
558 for (int v : allowedValues) {
559 int val = (v >> shiftDifference) & otherMask;
560 if (otherAllowedValues.find(val) != otherAllowedValues.end()) {
561 combinedAllowedValues.insert(v);
562 }
563 }
564
565 return Constraint {
566 .mask = mask,
567 .shift = shift,
568 .allowedValues = combinedAllowedValues
569 };
570 }
571
572 int highestBit = (int)fls(shiftedMask) - 1;
573 int otherHighestBit = (int)fls(otherShiftedMask) - 1;
574 int otherMaskLength = fls(otherMask + 1) - 1;
575
576 if (otherShiftedMask < (1 << shift)) {
577 // [self]....[other]
578 // Start by shifting all the allowed values in self
579 int numberOfUnconstrainedBits = shift - otherHighestBit - 1;
580 int maxUnconstrained = 1 << numberOfUnconstrainedBits;
581 std::set<int> includingUnrestrictedBits;
582
583 if (numberOfUnconstrainedBits > 0) {
584 for (const int allowed : allowedValues) {
585 int shifted = allowed << numberOfUnconstrainedBits;
586 for (int unconstrained = 0; unconstrained < maxUnconstrained; unconstrained++) {
587 // Mix in unrestricted bits, then shift by [other]'s length
588 includingUnrestrictedBits.insert((shifted | unconstrained) << otherMaskLength);
589 }
590 };
591 } else {
592 for (const int allowed : allowedValues) {
593 // Shift all the values by [other]'s length
594 includingUnrestrictedBits.insert(allowed << otherMaskLength);
595 }
596 }
597
598 // Or in the values for [other]
599 std::unordered_set<int> finalAllowedValues;
600 for (const int allowed : includingUnrestrictedBits) {
601 for (const int otherValue : otherAllowedValues) {
602 finalAllowedValues.insert(allowed | otherValue);
603 }
604 }
605
606 return Constraint {
607 .mask = ((1 << (highestBit + 1)) - 1) >> otherShift,
608 .shift = otherShift,
609 .allowedValues = finalAllowedValues
610 };
611
612 } else {
613 // Overlap.
614 // [self....[other....self].....other].......
615 // We need to
616 // * determine the set of bits allowed in the intersection
617 // * filter each set of values to keep only these
618 // * do the cross-product
619
620 // Bits in the intersection
621
622 int shiftDifference = shift - otherShift;
623 std::set<int> selfIntersectingBits;
624 for (const int v : allowedValues) {
625 selfIntersectingBits.insert(((v << shift) & intersectionMask) >> shift);
626 }
627 std::set<int> otherIntersectingBits;
628 for (const int v : otherAllowedValues) {
629 otherIntersectingBits.insert(((v << otherShift) & intersectionMask) >> shift);
630 }
631
632 std::set<int> intersectingBits;
633 std::set_intersection(selfIntersectingBits.begin(), selfIntersectingBits.end(), otherIntersectingBits.begin(), otherIntersectingBits.end(), std::inserter(intersectingBits, intersectingBits.begin()));
634
635 std::unordered_set<int> values;
636 // Swift here was constructing a list of values for self and other
637 // filtered on which elements had the right values for intersectionMask
638 // This would avoid the n^3 loop at the expense of some storage... FIXME
639 for (const int intersecting : intersectingBits) {
640 int intersectingShifted = intersecting << shift;
641 for (int selfAllowed : allowedValues) {
642 if (((selfAllowed << shift) & intersectionMask) == intersectingShifted) {
643 for (int otherAllowed : otherAllowedValues) {
644 if (((otherAllowed << otherShift) & intersectionMask) == intersectingShifted) {
645 values.insert((selfAllowed << shiftDifference) | otherAllowed);
646 }
647 }
648 }
649 }
650 }
651
652 return Constraint {
653 .mask = (shiftedMask | otherShiftedMask) >> otherShift,
654 .shift = otherShift,
655 .allowedValues = values
656 };
657 }
658 }
659
660 std::ostream& operator << (std::ostream& o, const std::unordered_set<int> & s) {
661 o << "{";
662 for (int i : s) {
663 o << i << ", ";
664 }
665 o << "}";
666 return o;
667 }
668
669 std::ostream& operator << (std::ostream& o, const Constraint& c) {
670 o << "(x >> " << c.shift << " & " << c.mask << " == " << c.allowedValues;
671 return o;
672 }
673
674 bool ConstraintSet::add(const Constraint& c) {
675 if (constraints.find(c) != constraints.end()) {
676 return false;
677 }
678 constraints.insert(c);
679 if (mergedConstraint.has_value()) {
680 mergedConstraint = mergedConstraint->intersecting(c);
681 } else {
682 mergedConstraint = c;
683 }
684 return true;
685 }
686
687 void ConstraintSet::clear() {
688 constraints.clear();
689 mergedConstraint.reset();
690 }
691
692 bool IMPCachesBuilder::isClassInteresting(const ObjCClass& theClass) const {
693 std::string_view classNameStr(theClass.className);
694 if (theClass.isMetaClass) {
695 return neededMetaclasses.find(classNameStr) != neededMetaclasses.end();
696 } else {
697 return neededClasses.find(classNameStr) != neededClasses.end();
698 }
699 }
700
701 bool IMPCachesBuilder::isClassInterestingOrTracked(const ObjCClass& theClass) const {
702 std::string_view classNameStr(theClass.className);
703 auto& neededArray = theClass.isMetaClass ? neededMetaclasses : neededClasses;
704 auto& trackedArray = theClass.isMetaClass ? trackedMetaclasses : trackedClasses;
705
706 return (neededArray.find(classNameStr) != neededArray.end()) ||
707 (trackedArray.find(classNameStr) != trackedArray.end());
708 }
709
710 void IMPCachesBuilder::addMethod(IMPCaches::ClassData* classDataPtr, const char* methodName, const char* installName, const char* className, const char* catName, bool inlined, bool fromFlattening) {
711 std::string_view methodNameView(methodName);
712
713 auto [selectorIterator, success] = selectors.map.try_emplace(methodNameView, std::make_unique<Selector>());
714 IMPCaches::Selector* thisSelectorData = selectorIterator->second.get();
715 if (success) {
716 thisSelectorData->name = methodName;
717 }
718
719 std::vector<ClassData::Method> & methods = classDataPtr->methods;
720 // Check in the existing methods to see if the method already exists...
721 bool exists = false;
722 for (const ClassData::Method& m : methods) {
723 if (m.selector == thisSelectorData) {
724 // printf("Preventing insertion of duplicate %s.%s\n", classDataPtr->name, methodName);
725 exists = true;
726 break;
727 }
728 }
729
730 if (!exists) {
731 ClassData::Method m {
732 .installName = installName,
733 .className = className,
734 .categoryName = catName,
735 .selector = thisSelectorData,
736 .wasInlined = inlined,
737 .fromFlattening = fromFlattening
738 };
739
740 thisSelectorData->classes.push_back(classDataPtr);
741 classDataPtr->methods.push_back(m);
742 }
743 }
744
745 void IMPCachesBuilder::inlineMethodIfNeeded(IMPCaches::ClassData* classToInlineIn, const char* classToInlineFrom, const char* catToInlineFrom, const char* installNameToInlineFrom, const char* name, std::set<Selector*>& seenSelectors, bool isFlattening) {
746 std::string_view nameView(name);
747
748 if ((nameView == ".cxx_construct") || (nameView == ".cxx_destruct")) {
749 // These selectors should never be inherited.
750 // object_cxxConstructFromClass / object_cxxDestructFromClass walk the class hierarchy and call them all.
751
752 return;
753 }
754
755 bool shouldInline = isFlattening || (selectorsToInline.find(nameView) != selectorsToInline.end());
756 if (shouldInline) {
757 // The selector hasn't necessarily been seen at this point: eg. we don't build an IMP cache for UIView, so we haven't seen -[UIView superview] yet.
758
759 auto [it, inserted] = selectors.map.try_emplace(nameView, std::make_unique<Selector>());
760 IMPCaches::Selector* thisSelectorData = it->second.get();
761
762 if (inserted) {
763 thisSelectorData->name = name;
764 }
765
766 if (inserted || seenSelectors.find(thisSelectorData) == seenSelectors.end()) {
767 seenSelectors.insert(thisSelectorData);
768 const bool inlined = true;
769 addMethod(classToInlineIn, name, installNameToInlineFrom, classToInlineFrom, catToInlineFrom, inlined, isFlattening);
770 }
771 }
772 }
773
774 // This goes through all the superclasses of the interesting classes so that
775 // we can track their methods for inlining.
776 // Since it goes through the superclasses, we also take this opportunity
777 // to add subclassses of duplicate classes to the duplicate classes set.
778 void IMPCachesBuilder::buildTrackedClasses(CacheBuilder::DylibInfo& dylib, const dyld3::MachOAnalyzer* ma) {
779 dyld3::MachOAnalyzer::VMAddrConverter vmAddrConverter = ma->makeVMAddrConverter(false);
780 ma->forEachObjCClass(_diagnostics, vmAddrConverter, ^(Diagnostics& diag, uint64_t classVMAddr, uint64_t classSuperclassVMAddr, uint64_t classDataVMAddr, const dyld3::MachOAnalyzer::ObjCClassInfo& objcClass, bool isMetaClass) {
781 const uint8_t* classLocation = (classVMAddr - ma->preferredLoadAddress()) + (uint8_t*)ma;
782
783 // The class might not be in the map as we exclude classes with missing weak superclasses
784 auto classIt = objcClasses.find(classLocation);
785 if ( classIt == objcClasses.end() )
786 return;
787 const auto& theClass = classIt->second;
788 ClassKey theClassKey {
789 .name = theClass.className,
790 .metaclass = theClass.isMetaClass
791 };
792
793 if (!isClassInteresting(theClass)) return;
794
795 // Go through superclasses and add them to the tracked set.
796 const IMPCaches::IMPCachesBuilder::ObjCClass* currentClass = &theClass;
797 bool rootClass = false;
798 do {
799 ClassKey k {
800 .name = currentClass->className,
801 .metaclass = currentClass->isMetaClass
802 };
803
804 // If one of the superclasses of theClass is in the duplicate classes set,
805 // add theClass to the duplicate classes as well.
806 if (duplicateClasses.find(k) != duplicateClasses.end()) {
807 duplicateClasses.insert(theClassKey);
808 }
809
810 if (currentClass->isMetaClass) {
811 trackedMetaclasses.insert(currentClass->className);
812 } else {
813 trackedClasses.insert(currentClass->className);
814 }
815 rootClass = currentClass->isRootClass;
816 if (!rootClass) {
817 // The superclass might not be in the map as we exclude classes with missing weak superclasses
818 auto superclassIt = objcClasses.find(currentClass->superclass);
819 if ( superclassIt == objcClasses.end() )
820 break;
821 currentClass = &superclassIt->second;
822 }
823 } while (!rootClass);
824 });
825 }
826
827 /// Parses the method lists of all the classes in @param dylib so that we populate the methods we want in each IMP cache skeleton.
828 void IMPCachesBuilder::populateMethodLists(CacheBuilder::DylibInfo& dylib, const dyld3::MachOAnalyzer* ma, int* duplicateClassCount) {
829 const uint32_t pointerSize = ma->pointerSize();
830 dyld3::MachOAnalyzer::VMAddrConverter vmAddrConverter = ma->makeVMAddrConverter(false);
831
832 ma->forEachObjCClass(_diagnostics, vmAddrConverter, ^(Diagnostics& diag, uint64_t classVMAddr, uint64_t classSuperclassVMAddr, uint64_t classDataVMAddr, const dyld3::MachOAnalyzer::ObjCClassInfo& objcClass, bool isMetaClass) {
833
834 const uint8_t* classLocation = (classVMAddr - ma->preferredLoadAddress()) + (uint8_t*)ma;
835
836 // The class might not be in the map as we exclude classes with missing weak superclasses
837 auto classIt = objcClasses.find(classLocation);
838 if ( classIt == objcClasses.end() )
839 return;
840 const auto& theClass = classIt->second;
841
842 if (!isClassInterestingOrTracked(theClass)) return;
843 bool interesting = isClassInteresting(theClass);
844
845 std::string_view classNameString(theClass.className);
846 std::unique_ptr<IMPCaches::ClassData> thisData = std::make_unique<ClassData>();
847 IMPCaches::ClassData* thisDataPtr = thisData.get();
848 thisData->name = theClass.className;
849 thisData->isMetaclass = isMetaClass;
850 thisData->shouldGenerateImpCache = interesting;
851
852 ma->forEachObjCMethod(objcClass.baseMethodsVMAddr(pointerSize), vmAddrConverter, ^(uint64_t methodVMAddr, const dyld3::MachOAnalyzer::ObjCMethod& method) {
853 dyld3::MachOAnalyzer::PrintableStringResult printableStringResult;
854 const char* methodName = ma->getPrintableString(method.nameVMAddr, printableStringResult);
855 if (printableStringResult != dyld3::MachOAnalyzer::PrintableStringResult::CanPrint) {
856 return;
857 }
858
859 const bool inlined = false;
860 const bool fromFlattening = false;
861 addMethod(thisDataPtr, methodName, ma->installName(), theClass.className, NULL, inlined, fromFlattening);
862 });
863
864 ClassKey key {
865 .name = classNameString,
866 .metaclass = isMetaClass
867 };
868 assert(dylib.impCachesClassData.find(key) == dylib.impCachesClassData.end());
869
870 if (duplicateClasses.find(key) != duplicateClasses.end()) {
871 // We can't just set shouldGenerateImpCache to false ; we do it later
872 // when we have built the flattening hierarchies in order to drop
873 // any related classes as well.
874
875 thisData->isPartOfDuplicateSet = true;
876 *duplicateClassCount += 1;
877 }
878
879 dylib.impCachesClassData[key] = std::move(thisData);
880 });
881 }
882
883 /// Parses all the categories within the same image as a class so that we can add the corresponding methods to the IMP cache skeletons, too.
884 void IMPCachesBuilder::attachCategories(CacheBuilder::DylibInfo& dylib, const dyld3::MachOAnalyzer* ma) {
885 dyld3::MachOAnalyzer::VMAddrConverter vmAddrConverter = ma->makeVMAddrConverter(false);
886 ma->forEachObjCCategory(_diagnostics, vmAddrConverter, ^(Diagnostics& diag, uint64_t categoryVMAddr, const dyld3::MachOAnalyzer::ObjCCategory& objcCategory) {
887
888 const uint8_t* categoryLocation = (categoryVMAddr - ma->preferredLoadAddress()) + (uint8_t*)ma;
889 ObjCCategory previouslyFoundCategory = objcCategories.at(categoryLocation);
890
891 __block dyld3::MachOAnalyzer::PrintableStringResult printableStringResult;
892 const char* catName = ma->getPrintableString(objcCategory.nameVMAddr, printableStringResult);
893
894 if (previouslyFoundCategory.classMA != ma) {
895 // Cross-image category
896 return;
897 }
898
899 // The class might not be in the map as we exclude classes with missing weak superclasses
900 auto classIt = objcClasses.find(previouslyFoundCategory.cls);
901 if ( classIt == objcClasses.end() )
902 return;
903 const auto& theClass = classIt->second;
904
905 auto& theMetaClass = objcClasses[theClass.metaClass];
906 auto classNameString = std::string_view(theClass.className);
907
908 if (isClassInterestingOrTracked(theClass)) {
909 ClassKey key {
910 .name = classNameString,
911 .metaclass = false
912 };
913 ClassData* clsData = dylib.impCachesClassData.at(key).get();
914
915 ma->forEachObjCMethod(objcCategory.instanceMethodsVMAddr, vmAddrConverter, ^(uint64_t methodVMAddr, const dyld3::MachOAnalyzer::ObjCMethod& method) {
916 // The config file should specify only classes without cross-image categories, so we should have found a class here
917 assert(clsData != NULL);
918 const char* methodName = ma->getPrintableString(method.nameVMAddr, printableStringResult);
919 const bool inlined = false;
920 const bool fromFlattening = false;
921 addMethod(clsData, methodName, ma->installName(), theClass.className, catName, inlined, fromFlattening);
922 });
923 }
924 if (isClassInterestingOrTracked(theMetaClass)) {
925 ClassKey key {
926 .name = classNameString,
927 .metaclass = true
928 };
929 ClassData* metaclsData = dylib.impCachesClassData.at(key).get();
930
931 ma->forEachObjCMethod(objcCategory.classMethodsVMAddr, vmAddrConverter, ^(uint64_t methodVMAddr, const dyld3::MachOAnalyzer::ObjCMethod& method) {
932 assert(metaclsData != NULL);
933 const char* methodName = ma->getPrintableString(method.nameVMAddr, printableStringResult);
934 const bool inlined = false;
935 const bool fromFlattening = false;
936 addMethod(metaclsData, methodName, ma->installName(), theMetaClass.className, catName, inlined, fromFlattening);
937 });
938 }
939 });
940 }
941
942 struct FlatteningRootLookupResult {
943 bool isInFlatteningHierarchy;
944
945 const uint8_t * flatteningRootSuperclassLocation;
946 ClassData::ClassLocator flatteningRootSuperclass;
947 std::set<std::string_view> superclassesInFlatteningHierarchy;
948 const char * flatteningRootName;
949 };
950
951 static FlatteningRootLookupResult findFlatteningRoot(const uint8_t* classLocation,
952 const std::unordered_map<const uint8_t*, IMPCachesBuilder::ObjCClass> & objcClasses,
953 const std::unordered_set<std::string_view> & classHierarchiesToFlatten,
954 const std::unordered_set<std::string_view> & metaclassHierarchiesToFlatten,
955 bool storeSuperclasses) {
956 FlatteningRootLookupResult result;
957 const uint8_t* superclassLocation = classLocation;
958 __block bool rootClass = false;
959 bool success = false;
960
961 while (!rootClass) {
962 const auto it = objcClasses.find(superclassLocation);
963 if (it == objcClasses.end()) {
964 break;
965 }
966 const IMPCachesBuilder::ObjCClass& iteratedClass = it->second;
967 rootClass = iteratedClass.isRootClass;
968 superclassLocation = iteratedClass.superclass;
969
970 if (storeSuperclasses) {
971 result.superclassesInFlatteningHierarchy.insert(iteratedClass.className);
972 }
973
974 bool metaClassBeingFlattened = iteratedClass.isMetaClass && metaclassHierarchiesToFlatten.find(iteratedClass.className) != metaclassHierarchiesToFlatten.end();
975 bool classBeingFlattened = !iteratedClass.isMetaClass && classHierarchiesToFlatten.find(iteratedClass.className) != classHierarchiesToFlatten.end();
976
977 if (metaClassBeingFlattened || classBeingFlattened) {
978 result.flatteningRootName = iteratedClass.className;
979 result.flatteningRootSuperclassLocation = iteratedClass.superclass;
980 result.flatteningRootSuperclass = iteratedClass.superclassLocator();
981 success = true;
982 break;
983 }
984 }
985
986 result.isInFlatteningHierarchy = success;
987
988 return result;
989 }
990 /// Inline selectors from parent classes into child classes for performance
991 void IMPCachesBuilder::inlineSelectors(CacheBuilder::DylibInfo& dylib, std::unordered_map<std::string_view, CacheBuilder::DylibInfo*> & dylibsByInstallName, const dyld3::MachOAnalyzer* ma) {
992 dyld3::MachOAnalyzer::VMAddrConverter vmAddrConverter = ma->makeVMAddrConverter(false);
993 ma->forEachObjCClass(_diagnostics, vmAddrConverter, ^(Diagnostics& diag, uint64_t classVMAddr, uint64_t classSuperclassVMAddr, uint64_t classDataVMAddr, const dyld3::MachOAnalyzer::ObjCClassInfo& objcClass, bool isMetaClass) {
994
995 const uint8_t* classLocation = (classVMAddr - ma->preferredLoadAddress()) + (uint8_t*)ma;
996
997 // The class might not be in the map as we exclude classes with missing weak superclasses
998 auto classIt = objcClasses.find(classLocation);
999 if ( classIt == objcClasses.end() )
1000 return;
1001 const auto& theClass = classIt->second;
1002
1003 if (!isClassInteresting(theClass)) {
1004 return;
1005 }
1006
1007 std::string_view classNameString(theClass.className);
1008 ClassKey key {
1009 .name = classNameString,
1010 .metaclass = isMetaClass
1011 };
1012
1013 IMPCaches::ClassData* thisDataPtr = dylib.impCachesClassData.at(key).get();
1014 assert(thisDataPtr != NULL);
1015
1016 __block std::set<Selector*> seenSelectors;
1017 for (const ClassData::Method& method : thisDataPtr->methods) {
1018 seenSelectors.insert(method.selector);
1019 };
1020
1021 // Check the superclass hierarchy to see if we're in a flattened hierarchy
1022 // (meaning we should inline all of the selectors up to the flattening root)
1023 FlatteningRootLookupResult flatteningInfo = findFlatteningRoot(classLocation, objcClasses, classHierarchiesToFlatten, metaclassHierarchiesToFlatten, false);
1024
1025 if (flatteningInfo.isInFlatteningHierarchy) {
1026 // try again and record superclasses this time
1027 // (maybe premature optimization, but given the small number of classes where flattening
1028 // is actually happening I did not want to gather this set every time)
1029
1030 flatteningInfo = findFlatteningRoot(classLocation, objcClasses, classHierarchiesToFlatten, metaclassHierarchiesToFlatten, true);
1031
1032 assert(flatteningInfo.isInFlatteningHierarchy);
1033
1034 thisDataPtr->flatteningRootSuperclass = flatteningInfo.flatteningRootSuperclass;
1035 thisDataPtr->flatteningRootName = flatteningInfo.flatteningRootName;
1036 thisDataPtr->flattenedSuperclasses = flatteningInfo.superclassesInFlatteningHierarchy;
1037 }
1038
1039 // Iterate again to actually flatten/inline the selectors
1040 const uint8_t *superclassLocation = classLocation;
1041 __block const dyld3::MachOAnalyzer* currentMA = ma;
1042 bool isRootClass = false;
1043 bool isFlattening = flatteningInfo.isInFlatteningHierarchy;
1044
1045 while (!isRootClass) {
1046 const auto it = objcClasses.find(superclassLocation);
1047 if (it == objcClasses.end()) {
1048 break;
1049 }
1050 ObjCClass& iteratedClass = it->second;
1051 isRootClass = iteratedClass.isRootClass;
1052
1053 CacheBuilder::DylibInfo* classDylib = dylibsByInstallName.at(currentMA->installName());
1054 ClassKey keyForIteratedClass {
1055 .name = std::string_view(iteratedClass.className),
1056 .metaclass = iteratedClass.isMetaClass
1057 };
1058 auto classDataIt = classDylib->impCachesClassData.find(keyForIteratedClass);
1059
1060 // We should have added this class to our data in populateMethodLists()
1061 assert(classDataIt != classDylib->impCachesClassData.end());
1062
1063 IMPCaches::ClassData* classData = classDataIt->second.get();
1064
1065 for (const ClassData::Method& m : classData->methods) {
1066 // If the method found in the superclass was inlined from a further superclass, we'll inline it
1067 // when we reach that class (otherwise the install name / class name the method is coming from will be wrong)
1068 if (!m.wasInlined) {
1069 inlineMethodIfNeeded(thisDataPtr, m.className, m.categoryName, currentMA->installName(), m.selector->name, seenSelectors, isFlattening);
1070 }
1071 }
1072
1073 currentMA = iteratedClass.superclassMA;
1074 assert(isRootClass || (currentMA != nullptr));
1075 superclassLocation = iteratedClass.superclass;
1076
1077 if (isFlattening && (iteratedClass.superclass == flatteningInfo.flatteningRootSuperclassLocation)) {
1078 // we reached the flattening root, turn flattening off
1079 isFlattening = false;
1080 }
1081 }
1082 });
1083 }
1084
1085 /// Parses all the source dylibs to fill the IMP cache skeletons with all the methods we want to have there.
1086 bool IMPCachesBuilder::parseDylibs(Diagnostics& diag) {
1087 std::unordered_map<std::string_view, CacheBuilder::DylibInfo*> dylibsByInstallName;
1088
1089 for (CacheBuilder::DylibInfo& d : dylibs) {
1090 const DyldSharedCache::MappedMachO& mapped = d.input->mappedFile;
1091 const dyld3::MachOAnalyzer* ma = mapped.mh;
1092
1093 dylibsByInstallName.insert(std::make_pair(std::string_view(ma->installName()), &d));
1094
1095 // Build the set of tracked classes (interesting classes + their superclasses)
1096 buildTrackedClasses(d, ma);
1097 }
1098
1099 int totalDuplicateClassCount = 0;
1100
1101 for (CacheBuilder::DylibInfo& d : dylibs) {
1102 const DyldSharedCache::MappedMachO& mapped = d.input->mappedFile;
1103 const dyld3::MachOAnalyzer* ma = mapped.mh;
1104
1105 int duplicateClassCount = 0;
1106 // First, go through all classes and populate their method lists.
1107 populateMethodLists(d, ma, &duplicateClassCount);
1108 totalDuplicateClassCount += duplicateClassCount;
1109
1110 // Now go through all categories and attach them as well
1111 attachCategories(d, ma);
1112 }
1113
1114 _diagnostics.verbose("[IMP caches] Not generating caches for %d duplicate classes or children of duplicate classes\n", totalDuplicateClassCount);
1115
1116 // Ensure that all the selectors will fit on 16 MB as that's the constant
1117 // embedded in the placement algorithm
1118 uint32_t totalSize = 0;
1119 for (const auto& [k,v] : selectors.map) {
1120 totalSize += v->size();
1121 }
1122 if (totalSize >= (1 << 24)) {
1123 diag.warning("Dropping all IMP caches ; too many selectors\n");
1124 return false;
1125 }
1126
1127 for (CacheBuilder::DylibInfo& d : dylibs) {
1128 const DyldSharedCache::MappedMachO& mapped = d.input->mappedFile;
1129 const dyld3::MachOAnalyzer* ma = mapped.mh;
1130
1131 // Now that all categories are attached, handle any selector inheritance if needed
1132 // (Do this after category attachment so that inlined selectors don't override categories)
1133
1134 inlineSelectors(d, dylibsByInstallName, ma);
1135 }
1136
1137 removeUninterestingClasses();
1138
1139 unsigned count = 0;
1140 for (CacheBuilder::DylibInfo& d : dylibs) {
1141 count += d.impCachesClassData.size();
1142 }
1143
1144 constexpr bool logAllSelectors = false;
1145
1146 diag.verbose("[IMP Caches] parsed %u classes\n", count);
1147 for (CacheBuilder::DylibInfo& d : dylibs) {
1148 for (auto it = d.impCachesClassData.begin() ; it != d.impCachesClassData.end() ; it++) {
1149 auto& c = it->second;
1150 c->didFinishAddingMethods();
1151 if (logAllSelectors) {
1152 printf("%s\n", c->description().c_str());
1153 std::vector<ClassData::Method> sortedMethods(c->methods.begin(), c->methods.end());
1154 std::sort(sortedMethods.begin(), sortedMethods.end(), [](const ClassData::Method& a, const ClassData::Method& b) {
1155 return strcmp(a.selector->name, b.selector->name) < 0;
1156 });
1157 for (const ClassData::Method& m : sortedMethods) {
1158 const Selector* s = m.selector;
1159 printf(" %s", s->name);
1160 if (m.categoryName != nullptr) {
1161 printf(" (from %s::%s+%s)\n", m.installName, m.className, m.categoryName);
1162 } else if (m.className != c->name) {
1163 printf(" (from %s::%s)\n", m.installName, m.className);
1164 } else {
1165 printf("\n");
1166 }
1167 }
1168 }
1169 }
1170 }
1171
1172 for (auto& s : selectorsToInline) {
1173 auto selectorsIt = selectors.map.find(s);
1174 if (selectorsIt == selectors.map.end()) {
1175 diag.warning("Requested selector to inline not found in any classes: %s\n", s.data());
1176 continue;
1177 }
1178 inlinedSelectors.push_back(selectors.map.at(s).get());
1179 }
1180 std::sort(inlinedSelectors.begin(), inlinedSelectors.end(), [](const Selector* a, const Selector *b) {
1181 return a->offset < b->offset;
1182 });
1183
1184 return true;
1185 }
1186
1187 IMPCachesBuilder::TargetClassFindingResult IMPCachesBuilder::findTargetClass(const dyld3::MachOAnalyzer* ma, uint64_t targetClassVMAddr, uint64_t targetClassPointerVMAddr,const char* logContext, const std::unordered_map<uint64_t, uint64_t> & bindLocations, std::vector<BindTarget> & bindTargets, std::unordered_map<std::string, DylibAndDeps> &dylibMap) {
1188 constexpr bool log = false;
1189 uint64_t loadAddress = ma->preferredLoadAddress();
1190
1191 uint64_t superclassRuntimeOffset = targetClassPointerVMAddr - loadAddress;
1192 auto bindIt = bindLocations.find(superclassRuntimeOffset);
1193
1194 if ( bindIt == bindLocations.end() ) {
1195 if (targetClassVMAddr != 0) {
1196 // A rebase, ie, a fixup to a class in this dylib
1197 if ( log )
1198 printf("%s: %s -> this dylib\n", ma->installName(), logContext);
1199 superclassRuntimeOffset = targetClassVMAddr - loadAddress;
1200 return TargetClassFindingResult {
1201 .success = true,
1202 .foundInDylib = ma,
1203 .location = (const uint8_t*)ma + superclassRuntimeOffset
1204 };
1205 } else {
1206 // A bind, ie, a fixup to a class in another dylib
1207 return TargetClassFindingResult { .success = false };
1208 }
1209 }
1210
1211 const BindTarget& bindTarget = bindTargets[bindIt->second];
1212 if ( log )
1213 printf("%s: %s -> %s: %s\n", ma->installName(), logContext,
1214 bindTarget.targetDylib->installName(), bindTarget.symbolName.c_str());
1215
1216 dyld3::MachOLoaded::DependentToMachOLoaded finder = ^(const dyld3::MachOLoaded* mh, uint32_t depIndex) {
1217 auto dylibIt = dylibMap.find(mh->installName());
1218 if ( dylibIt == dylibMap.end() ) {
1219 // Missing weak dylib?
1220 return (const dyld3::MachOLoaded*)nullptr;
1221 }
1222
1223 if ( depIndex >= dylibIt->second.dependentLibraries.size() ) {
1224 // Out of bounds dependent
1225 assert(false);
1226 }
1227
1228 auto depIt = dylibMap.find(dylibIt->second.dependentLibraries[depIndex]);
1229 if ( depIt == dylibMap.end() ) {
1230 // Missing weak dylib?
1231 return (const dyld3::MachOLoaded*)nullptr;
1232 }
1233 return (const dyld3::MachOLoaded*)depIt->second.ma;
1234 };
1235
1236 if (bindTarget.targetDylib != nullptr) {
1237 dyld3::MachOAnalyzer::FoundSymbol foundInfo;
1238 bool found = bindTarget.targetDylib->findExportedSymbol(_diagnostics, bindTarget.symbolName.c_str(),
1239 false, foundInfo, finder);
1240 if ( !found ) {
1241 if ( !bindTarget.isWeakImport ) {
1242 _diagnostics.verbose("Could not find non-weak target class: '%s'\n", bindTarget.symbolName.c_str());
1243 }
1244 // We couldn't find the target class. This is an error if the symbol is not a weak-import, but
1245 // we'll let symbol resolution work that out later if we really have a missing symbol which matters
1246 // For IMP caches, we'll just ignore this class.
1247 return TargetClassFindingResult { .success = false };
1248 }
1249 assert(found);
1250 assert(foundInfo.kind == dyld3::MachOAnalyzer::FoundSymbol::Kind::headerOffset);
1251 if (foundInfo.foundInDylib == nullptr) {
1252 if ( log )
1253 printf("null foundInDylib\n");
1254 }
1255 return TargetClassFindingResult {
1256 .success = true,
1257 .foundInDylib = foundInfo.foundInDylib,
1258 .location = (uint8_t*)foundInfo.foundInDylib + foundInfo.value,
1259 };
1260 } else {
1261 if ( log )
1262 printf("No target dylib found for %s\n", logContext);
1263 return TargetClassFindingResult { .success = false };
1264 }
1265 }
1266
1267 void IMPCachesBuilder::buildClassesMap(Diagnostics& diag) {
1268 const uint32_t pointerSize = 8;
1269
1270 __block std::unordered_map<std::string, DylibAndDeps> dylibMap;
1271
1272 for (CacheBuilder::DylibInfo& dylib : dylibs) {
1273 const dyld3::MachOAnalyzer* ma = dylib.input->mappedFile.mh;
1274 __block std::vector<std::string> dependentLibraries;
1275 ma->forEachDependentDylib(^(const char* loadPath, bool isWeak, bool isReExport,
1276 bool isUpward, uint32_t compatVersion, uint32_t curVersion, bool &stop) {
1277 dependentLibraries.push_back(loadPath);
1278 });
1279 dylibMap[ma->installName()] = { ma, dependentLibraries };
1280 }
1281
1282 const bool log = false;
1283
1284 __block ClassSet seenClasses;
1285
1286 for (CacheBuilder::DylibInfo& dylib : dylibs) {
1287 const dyld3::MachOAnalyzer* ma = dylib.input->mappedFile.mh;
1288 __block std::vector<const dyld3::MachOAnalyzer*> dependentLibraries;
1289 ma->forEachDependentDylib(^(const char* loadPath, bool isWeak, bool isReExport,
1290 bool isUpward, uint32_t compatVersion, uint32_t curVersion, bool& stop) {
1291 auto it = dylibMap.find(loadPath);
1292 if (it == dylibMap.end()) {
1293 char resolvedSymlinkPath[PATH_MAX];
1294
1295 // The dylib may link on a dylib which has moved and has a symlink to a new path.
1296 if (_fileSystem.getRealPath(loadPath, resolvedSymlinkPath)) {
1297 it = dylibMap.find(resolvedSymlinkPath);
1298 }
1299 }
1300 if (it == dylibMap.end()) {
1301 assert(isWeak);
1302 dependentLibraries.push_back(nullptr);
1303 } else {
1304 dependentLibraries.push_back(it->second.ma);
1305 }
1306 });
1307 __block std::vector<BindTarget> bindTargets;
1308
1309 // Map from vmOffsets in this binary to which bind target they point to
1310 __block std::unordered_map<uint64_t, uint64_t> bindLocations;
1311 if ( ma->hasChainedFixups() ) {
1312 // arm64e binaries
1313 ma->forEachChainedFixupTarget(diag, ^(int libOrdinal, const char* symbolName, uint64_t addend, bool weakImport, bool &stop) {
1314 if (libOrdinal == BIND_SPECIAL_DYLIB_SELF) {
1315 bindTargets.push_back({ symbolName, ma, weakImport });
1316 } else if ( libOrdinal < 0 ) {
1317 // A special ordinal such as weak. Just put in a placeholder for now
1318 bindTargets.push_back({ symbolName, nullptr, weakImport });
1319 } else {
1320 assert(libOrdinal <= (int)dependentLibraries.size());
1321 const dyld3::MachOAnalyzer *target = dependentLibraries[libOrdinal-1];
1322 assert(weakImport || (target != nullptr));
1323 bindTargets.push_back({ symbolName, target, weakImport });
1324 }
1325 });
1326 ma->withChainStarts(diag, 0, ^(const dyld_chained_starts_in_image* startsInfo) {
1327 ma->forEachFixupInAllChains(diag, startsInfo, false, ^(dyld3::MachOLoaded::ChainedFixupPointerOnDisk* fixupLoc,
1328 const dyld_chained_starts_in_segment* segInfo, bool& fixupsStop) {
1329 uint64_t fixupOffset = (uint8_t*)fixupLoc - (uint8_t*)ma;
1330 uint32_t bindOrdinal;
1331 int64_t addend;
1332 if ( fixupLoc->isBind(segInfo->pointer_format, bindOrdinal, addend) ) {
1333 if ( bindOrdinal < bindTargets.size() ) {
1334 bindLocations[fixupOffset] = bindOrdinal;
1335 }
1336 else {
1337 diag.error("out of range bind ordinal %d (max %lu)", bindOrdinal, bindTargets.size());
1338 fixupsStop = true;
1339 }
1340 }
1341 });
1342 });
1343 } else {
1344 // Non-arm64e (for now...)
1345 ma->forEachBind(diag, ^(uint64_t runtimeOffset, int libOrdinal, const char* symbolName,
1346 bool weakImport, bool lazyBind, uint64_t addend, bool &stop) {
1347 if ( log )
1348 printf("0x%llx %s: %s\n", runtimeOffset, ma->installName(), symbolName);
1349 if ( libOrdinal < 0 ) {
1350 // A special ordinal such as weak. Just put in a placeholder for now
1351 bindTargets.push_back({ symbolName, nullptr, weakImport });
1352 } else if (libOrdinal == BIND_SPECIAL_DYLIB_SELF) {
1353 bindTargets.push_back({ symbolName, ma, weakImport });
1354 } else {
1355 assert(libOrdinal <= (int)dependentLibraries.size());
1356 bindTargets.push_back({ symbolName, dependentLibraries[libOrdinal - 1], weakImport });
1357 }
1358 bindLocations[runtimeOffset] = bindTargets.size() - 1;
1359 }, ^(const char* symbolName) {
1360 // We don't need this as its only for weak def coalescing
1361 });
1362 }
1363 const uint64_t loadAddress = ma->preferredLoadAddress();
1364 dyld3::MachOAnalyzer::VMAddrConverter vmAddrConverter = ma->makeVMAddrConverter(false);
1365 ma->forEachObjCClass(diag, vmAddrConverter, ^(Diagnostics& maDiag, uint64_t classVMAddr,
1366 uint64_t classSuperclassVMAddr, uint64_t classDataVMAddr,
1367 const dyld3::MachOAnalyzer::ObjCClassInfo& objcClass, bool isMetaClass) {
1368 uint64_t classNameVMAddr = objcClass.nameVMAddr(pointerSize);
1369 dyld3::MachOAnalyzer::PrintableStringResult classNameResult = dyld3::MachOAnalyzer::PrintableStringResult::UnknownSection;
1370 const char* className = ma->getPrintableString(classNameVMAddr, classNameResult);
1371 assert(classNameResult == dyld3::MachOAnalyzer::PrintableStringResult::CanPrint);
1372 if ( log )
1373 printf("%s: %s\n", ma->installName(), className);
1374
1375 const uint32_t RO_ROOT = (1<<1);
1376 bool isRootClass = (objcClass.flags(pointerSize) & RO_ROOT) != 0;
1377 auto result = findTargetClass(ma, objcClass.superclassVMAddr, classSuperclassVMAddr, className, bindLocations, bindTargets, dylibMap);
1378
1379 if (result.success || isRootClass) {
1380 uint64_t classRuntimeOffset = classVMAddr - loadAddress;
1381 const uint8_t* classLocation = (const uint8_t*)ma + classRuntimeOffset;
1382 objcClasses[classLocation] = ObjCClass {
1383 .superclassMA = (const dyld3::MachOAnalyzer*) result.foundInDylib,
1384 .metaClass = isMetaClass ? nullptr : ((const uint8_t*)ma + objcClass.isaVMAddr - loadAddress),
1385 .superclass = result.location,
1386 .methodListVMaddr = objcClass.baseMethodsVMAddr(pointerSize),
1387 .className = className,
1388 .isRootClass = isRootClass,
1389 .isMetaClass = isMetaClass,
1390 };
1391
1392 ClassKey k {
1393 .name = className,
1394 .metaclass = isMetaClass
1395 };
1396
1397 auto [it, success] = seenClasses.insert(k);
1398 if (!success) {
1399 duplicateClasses.insert(k);
1400 }
1401 }
1402 });
1403
1404 ma->forEachObjCCategory(diag, vmAddrConverter, ^(Diagnostics& maDiag, uint64_t categoryVMAddr, const dyld3::MachOAnalyzer::ObjCCategory& objcCategory) {
1405 dyld3::MachOAnalyzer::PrintableStringResult catNameResult = dyld3::MachOAnalyzer::PrintableStringResult::UnknownSection;
1406 const char *catName = ma->getPrintableString(objcCategory.nameVMAddr, catNameResult);
1407
1408 uint64_t clsVMAddr = categoryVMAddr + pointerSize;
1409 TargetClassFindingResult result = findTargetClass(ma, objcCategory.clsVMAddr, clsVMAddr, catName, bindLocations, bindTargets, dylibMap);
1410 uint64_t catRuntimeOffset = categoryVMAddr - loadAddress;
1411 const uint8_t *catLocation = (const uint8_t*)ma + catRuntimeOffset;
1412 if (result.success) {
1413 objcCategories[catLocation] = ObjCCategory {
1414 .classMA = (const dyld3::MachOAnalyzer*)result.foundInDylib,
1415 .cls = result.location
1416 };
1417 } else {
1418 // This happens for categories on weak classes that may be missing.
1419 objcCategories[catLocation] = ObjCCategory {
1420 .classMA = (const dyld3::MachOAnalyzer*)nullptr,
1421 .cls = nullptr
1422 };
1423 }
1424 });
1425 }
1426
1427 // Print the class hierarchy just to see that we found everything
1428 if (log) {
1429 for (const auto& [location, theClass] : objcClasses) {
1430 printf("%p %c%s", location, theClass.isMetaClass ? '+' : '-', theClass.className);
1431 bool isRoot = theClass.isRootClass;
1432 const uint8_t* superclass = theClass.superclass;
1433 while ( !isRoot ) {
1434 auto it = objcClasses.find(superclass);
1435 // assert(it != objcClasses.end());
1436 if (it == objcClasses.end()) {
1437 printf(": missing");
1438 break;
1439 }
1440 printf(" : %c%s", it->second.isMetaClass ? '+' : '-', it->second.className);
1441 isRoot = it->second.isRootClass;
1442 superclass = it->second.superclass;
1443 }
1444 printf("\n");
1445 }
1446 }
1447 }
1448
1449 const std::string * IMPCachesBuilder::nameAndIsMetaclassPairFromNode(const dyld3::json::Node & node, bool* metaclass) {
1450 const dyld3::json::Node& metaclassNode = dyld3::json::getRequiredValue(_diagnostics, node, "metaclass");
1451 if (_diagnostics.hasError()) return nullptr;
1452
1453 if (metaclass != nullptr) *metaclass = dyld3::json::parseRequiredInt(_diagnostics, metaclassNode) != 0;
1454 const dyld3::json::Node& nameNode = dyld3::json::getRequiredValue(_diagnostics, node, "name");
1455 if (_diagnostics.hasError()) return nullptr;
1456
1457 return &(dyld3::json::parseRequiredString(_diagnostics, nameNode));
1458 }
1459
1460 IMPCachesBuilder::IMPCachesBuilder(std::vector<CacheBuilder::DylibInfo>& allDylibs, const dyld3::json::Node& optimizerConfiguration, Diagnostics& diag, TimeRecorder& timeRecorder, const dyld3::closure::FileSystem& fileSystem) : dylibs(allDylibs), _diagnostics(diag), _timeRecorder(timeRecorder), _fileSystem(fileSystem) {
1461 const dyld3::json::Node * version = dyld3::json::getOptionalValue(diag, optimizerConfiguration, "version");
1462 int64_t versionInt = (version != NULL) ? dyld3::json::parseRequiredInt(diag, *version) : 1;
1463 if (versionInt == 2) {
1464 // v2 has a single neededClasses array, with a key to know if it's a metaclass
1465 // or class. This lets us order them by importance so that we handle the important
1466 // cases first in the algorithm, while it's still easy to place things (as we process
1467 // more classes, constraints build up and we risk dropping difficult classes)
1468
1469 const dyld3::json::Node& classes = dyld3::json::getRequiredValue(diag, optimizerConfiguration, "neededClasses");
1470 if (diag.hasError()) return;
1471
1472 int i = 0;
1473 for (const dyld3::json::Node& n : classes.array) {
1474 bool metaclass = false;
1475 const std::string *name = nameAndIsMetaclassPairFromNode(n, &metaclass);
1476 if (name != nullptr) {
1477 if (metaclass) {
1478 neededMetaclasses[*name] = i++;
1479 } else {
1480 neededClasses[*name] = i++;
1481 }
1482 } else {
1483 // nameAndIsMetaclassPairFromNode already logs an error in this case,
1484 // so nothing to do here
1485 }
1486 }
1487 } else {
1488 auto metaclasses = optimizerConfiguration.map.find("neededMetaclasses");
1489 int i = 0;
1490
1491 if (metaclasses != optimizerConfiguration.map.cend()) {
1492 for (const dyld3::json::Node& n : metaclasses->second.array) {
1493 neededMetaclasses[n.value] = i++;
1494 }
1495 }
1496
1497 auto classes = optimizerConfiguration.map.find("neededClasses");
1498
1499 if (classes != optimizerConfiguration.map.cend()) {
1500 for (const dyld3::json::Node& n : classes->second.array) {
1501 neededClasses[n.value] = i++;
1502 }
1503 }
1504 }
1505
1506 auto sels = optimizerConfiguration.map.find("selectorsToInline");
1507 if (sels != optimizerConfiguration.map.cend()) {
1508 for (const dyld3::json::Node& n : sels->second.array) {
1509 selectorsToInline.insert(n.value);
1510 }
1511 }
1512
1513 const dyld3::json::Node* classHierarchiesToFlattenNode = dyld3::json::getOptionalValue(diag, optimizerConfiguration, "flatteningRoots");
1514 if (classHierarchiesToFlattenNode != nullptr) {
1515 for (const dyld3::json::Node& n : classHierarchiesToFlattenNode->array) {
1516 bool metaclass = false;
1517 const std::string *name = nameAndIsMetaclassPairFromNode(n, &metaclass);
1518 if (metaclass) {
1519 metaclassHierarchiesToFlatten.insert(*name);
1520 } else {
1521 classHierarchiesToFlatten.insert(*name);
1522 }
1523 }
1524 } else {
1525 // For old files, we assume we should flatten OS_object, this was implied
1526 // before we decided to extend this set.
1527
1528 metaclassHierarchiesToFlatten.insert("OS_object");
1529 classHierarchiesToFlatten.insert("OS_object");
1530 }
1531 }
1532
1533 struct BacktrackingState {
1534 // Index into the next array that we are currently trying
1535 int currentAttemptIndex;
1536
1537 // Possible placement attempts for this class
1538 std::vector<typename IMPCaches::ClassData::PlacementAttempt> attempts;
1539
1540 // What we had to modify to attempt the current placement. This needs
1541 // to be reversed if we backtrack.
1542 std::optional<IMPCaches::ClassData::PlacementAttempt::Result> result;
1543
1544 // We also need to store the state of the random number generator,
1545 // because when reverting to a snapshot we need to apply exactly the same
1546 // steps as last time.
1547 std::minstd_rand randomNumberGenerator;
1548
1549 bool operator== (const BacktrackingState & other) {
1550 // Don't test attempts, they will be the same as long as the class index is
1551 // the same, and we never compare states for different indices
1552 return (currentAttemptIndex == other.currentAttemptIndex) && (randomNumberGenerator == other.randomNumberGenerator);
1553 }
1554
1555 bool operator!= (const BacktrackingState & other) {
1556 return !(*this == other);
1557 }
1558 };
1559
1560 static void backtrack(std::vector<BacktrackingState>& backtrackingStack, int& numberOfDroppedClasses, std::vector<IMPCaches::ClassData*>& allClasses) {
1561 int i = (int)backtrackingStack.size() - 1;
1562 assert(i>0);
1563 BacktrackingState & last = backtrackingStack.back();
1564 if (!last.result) {
1565 // backtracking over a skipped class
1566 numberOfDroppedClasses--;
1567 }
1568 allClasses[i]->backtrack(*(last.result));
1569 backtrackingStack.pop_back();
1570 };
1571
1572 template <typename Func>
1573 static void forEachClassInFlatteningHierarchy(const IMPCaches::ClassData *parentClass, const std::vector<IMPCaches::ClassData*>& allClasses, const Func &callback) {
1574 if (!parentClass->flatteningRootSuperclass.has_value()) {
1575 return;
1576 }
1577 for (IMPCaches::ClassData * c : allClasses) {
1578 // If c has parentClass in its flattening hierarchy
1579 if ((c != parentClass)
1580 && c->flatteningRootSuperclass.has_value()
1581 && (*(c->flatteningRootSuperclass) == *(parentClass->flatteningRootSuperclass))
1582 && (c->flatteningRootName == parentClass->flatteningRootName)
1583 && (c->flattenedSuperclasses.find(parentClass->name) != c->flattenedSuperclasses.end())) {
1584 callback(c);
1585 }
1586 }
1587 }
1588
1589 static void dropClass(Diagnostics& diagnostics, unsigned long& currentClassIndex, int& numberOfDroppedClasses, std::vector<BacktrackingState>& backtrackingStack, std::minstd_rand& randomNumberGenerator, std::vector<IMPCaches::ClassData*>& allClasses, const char* reason) {
1590 IMPCaches::ClassData* droppedClass = allClasses[currentClassIndex];
1591
1592 diagnostics.verbose("%lu: dropping class %s (%s) because %s\n", currentClassIndex, droppedClass->name, droppedClass->isMetaclass ? "metaclass" : "class", reason);
1593 droppedClass->shouldGenerateImpCache = false;
1594
1595 // If we are inside a flattened hierarchy, we need to also drop any classes inheriting from us,
1596 // as objc relies on all classes inside a flattened hierarchy having constant caches to do invalidation
1597 // properly.
1598 forEachClassInFlatteningHierarchy(droppedClass, allClasses, [&numberOfDroppedClasses, &diagnostics, currentClassIndex](IMPCaches::ClassData *c){
1599 // Drop it as well.
1600 // We could undrop them if we undrop droppedClass while backtracking or restoring
1601 // a snapshot, but it's not worth the effort.
1602
1603 if (c->shouldGenerateImpCache) {
1604 numberOfDroppedClasses++;
1605 c->shouldGenerateImpCache = false;
1606 c->droppedBecauseFlatteningSuperclassWasDropped = true;
1607 diagnostics.verbose("%lu: also dropping %s (%s) in the same flattening hierarchy\n", currentClassIndex, c->name, c->isMetaclass ? "metaclass" : "class");
1608 }
1609 });
1610
1611 currentClassIndex++;
1612 numberOfDroppedClasses++;
1613
1614 BacktrackingState state = {
1615 .currentAttemptIndex = 0,
1616 .randomNumberGenerator = randomNumberGenerator
1617 };
1618
1619 backtrackingStack.push_back(state);
1620 };
1621
1622 static void resetToSnapshot(std::vector<BacktrackingState>& backtrackingStack, std::vector<BacktrackingState>& bestSolutionSnapshot, std::vector<IMPCaches::ClassData*>& allClasses, int& numberOfDroppedClasses) {
1623
1624 // First, backtrack if needed until we reach the first different step.
1625 int firstDifferentStep = -1;
1626 for (int i = 0 ; i < backtrackingStack.size() && i < bestSolutionSnapshot.size() ; i++) {
1627 if (backtrackingStack[i] != bestSolutionSnapshot[i]) {
1628 firstDifferentStep = i;
1629 break;
1630 }
1631 }
1632
1633 if (firstDifferentStep == -1) {
1634 firstDifferentStep = MIN((int)backtrackingStack.size(), (int)bestSolutionSnapshot.size());
1635 }
1636
1637 while (backtrackingStack.size() > firstDifferentStep) {
1638 backtrack(backtrackingStack, numberOfDroppedClasses, allClasses);
1639 }
1640
1641 // Then apply the steps needed to get to the snapshot.
1642 if (firstDifferentStep < bestSolutionSnapshot.size()) {
1643 for (int i = (int)backtrackingStack.size() ; i < bestSolutionSnapshot.size() ; i++) {
1644 BacktrackingState & state = bestSolutionSnapshot[i];
1645
1646 // Make a copy in order not to mutate it should we need to go back...
1647 std::minstd_rand stateRandomNumberGenerator = state.randomNumberGenerator;
1648 if (state.result) {
1649 assert(state.currentAttemptIndex < state.attempts.size());
1650 typename IMPCaches::ClassData::PlacementAttempt::Result result = allClasses[i]->applyAttempt(state.attempts[state.currentAttemptIndex], stateRandomNumberGenerator);
1651 assert(result.success);
1652
1653 if (!allClasses[i]->droppedBecauseFlatteningSuperclassWasDropped) {
1654 // shouldGenerateImpCache might have been flipped to false during backtracking
1655 // we're restoring to a snapshot where we did place this class, so restore
1656 // the success bit...
1657 // ... unless we had decided to drop it because other classes were dropped
1658 // (in that case give up and don't attempt to generate a cache for it,
1659 // but still apply the attempt above in order to set the right constraints
1660 // on each selector, which is necessary for snapshot reproducibility)
1661
1662 allClasses[i]->shouldGenerateImpCache = true;
1663 }
1664 } else {
1665 numberOfDroppedClasses++;
1666 }
1667
1668 backtrackingStack.push_back(state);
1669 }
1670 }
1671 }
1672
1673 /// Finds a shift and mask for each class, and start assigning the bits of the selector addresses
1674 int IMPCachesBuilder::findShiftsAndMasks() {
1675 // Always seed the random number generator with 0 to get reproducibility.
1676 // Note: in overflow scenarios, findShiftsAndMasks can be called more than once,
1677 // so make sure to always use the same value when we enter this method.
1678 std::minstd_rand randomNumberGenerator(0);
1679
1680 // This is a backtracking algorithm, so we need a stack to store our state
1681 // (It goes too deep to do it recursively)
1682 std::vector<BacktrackingState> backtrackingStack;
1683
1684 // Index of the class we're currently looking at.
1685 unsigned long currentClassIndex = 0;
1686
1687 // This lets us backtrack by more than one step, going back eg. 4 classes at a time.
1688 // Yes, this means we're not exploring the full solution space, but it's OK because
1689 // there are many solutions out there and we prefer dropping a few classes here and
1690 // there rather than take hours to find the perfect solution.
1691 unsigned long backtrackingLength = 1;
1692
1693 // Indices of the attempt we had chosen for each class last time we reached the maximum
1694 // number of classes placed so far.
1695 std::vector<BacktrackingState> bestSolutionSnapshot;
1696
1697 #if 0
1698 // Debugging facilities where we store the full state corresponding
1699 // to bestSolutionSnapshot to make sure restoring snapshots works.
1700 std::vector<IMPCaches::ClassData> bestSolutionSnapshotClasses;
1701 std::unordered_map<std::string_view, IMPCaches::Selector> bestSolutionSnapshotSelectors;
1702 #endif
1703
1704 // Number of times we have backtracked. When this becomes too high, we go back to the
1705 // previous snapshot and drop the faulty class.
1706 unsigned long backtrackingAttempts = 0;
1707
1708 // Go through all the classes and find a shift and mask for each,
1709 // backtracking if needed.
1710 std::vector<IMPCaches::ClassData*> allClasses;
1711 fillAllClasses(allClasses);
1712
1713 int numberOfDroppedClasses = 0;
1714
1715 while (currentClassIndex < allClasses.size()) {
1716 /* for (int i = 0 ; i < backtrackingStack.size() ; i++) {
1717 assert(backtrackingStack[i].attempts.size() > 0 || !allClasses[i]->shouldGenerateImpCache);
1718 } */
1719
1720 assert(// Either we are adding a new state...
1721 (currentClassIndex == backtrackingStack.size())
1722 // Or we are backtracking and building on the last state recorded
1723 || (currentClassIndex == (backtrackingStack.size() - 1)));
1724
1725 IMPCaches::ClassData* c = allClasses[currentClassIndex];
1726
1727 if (!c->shouldGenerateImpCache) {
1728 // We have decided to drop this one before, so don't waste time.
1729 dropClass(_diagnostics, currentClassIndex, numberOfDroppedClasses, backtrackingStack, randomNumberGenerator, allClasses, "we have dropped it before");
1730 continue;
1731 }
1732
1733 if (c->isPartOfDuplicateSet) {
1734 dropClass(_diagnostics, currentClassIndex, numberOfDroppedClasses, backtrackingStack, randomNumberGenerator, allClasses, "it is part of a duplicate set");
1735 continue;
1736 }
1737
1738 if (currentClassIndex >= backtrackingStack.size()) {
1739 // We're at the top of the stack. Make a fresh state.
1740
1741 BacktrackingState state;
1742 state.attempts = c->attempts();
1743 state.currentAttemptIndex = 0;
1744 state.randomNumberGenerator = randomNumberGenerator;
1745 backtrackingStack.push_back(state);
1746 } else {
1747 // We are backtracking ; don't retry the attempt we tried before, use the next one.
1748 backtrackingStack[currentClassIndex].currentAttemptIndex++;
1749
1750 // Note that we do not reset randomNumberGenerator to state.randomNumberGenerator
1751 // here, because when backtracking we want to explore a different set of
1752 // possibilities, so let's try other placements.
1753 }
1754
1755 assert(backtrackingStack.size() == currentClassIndex + 1);
1756
1757 BacktrackingState& state = backtrackingStack[currentClassIndex];
1758
1759 bool placed = false;
1760
1761 // Go through all the possible placement attempts for this class.
1762 // If one succeeds, place the next class, and if needed we'll backtrack and try the next attempt, etc.
1763 // This is basically an iterative backtracking because
1764 // we don't want the stack to get too deep.
1765 std::vector<typename IMPCaches::ClassData::PlacementAttempt> & attempts = state.attempts;
1766 for (int operationIndex = state.currentAttemptIndex ; operationIndex < (int)attempts.size() ; operationIndex++) {
1767 // Save the state of the random number generator so that we can save its
1768 // state before applying the attempt in the backtracking stack if needed.
1769 std::minstd_rand maybeSuccessfulRNG = randomNumberGenerator;
1770 typename IMPCaches::ClassData::PlacementAttempt::Result result = c->applyAttempt(attempts[operationIndex], randomNumberGenerator);
1771 if (result.success) {
1772 if (currentClassIndex % 1000 == 0) {
1773 _diagnostics.verbose("[IMP Caches] Placed %lu / %lu classes\n", currentClassIndex, allClasses.size());
1774 }
1775
1776 //fprintf(stderr, "%lu / %lu: placed %s with operation %d/%lu (%s)\n", currentClassIndex, allClasses.size(), c->description().c_str(), operationIndex, attempts.size(), attempts[operationIndex].description().c_str());
1777 placed = true;
1778 state.result = result;
1779 state.currentAttemptIndex = operationIndex;
1780 state.randomNumberGenerator = maybeSuccessfulRNG;
1781 break;
1782 }
1783 }
1784
1785 if (placed) {
1786 currentClassIndex += 1;
1787 } else {
1788 // Remove the current state, which has just failed and does not matter.
1789 // (It was never applied).
1790 backtrackingStack.pop_back();
1791
1792 backtrackingAttempts++;
1793 if (backtrackingAttempts > 10) {
1794 // Reset to the best snapshot and drop the next class
1795
1796 resetToSnapshot(backtrackingStack, bestSolutionSnapshot, allClasses, numberOfDroppedClasses);
1797
1798 #if 0
1799 // Expensive book-keeping to make sure that resetting to the snapshot worked.
1800 for (const auto & [k,v] : selectors.map) {
1801 const IMPCaches::Selector & theoretical = bestSolutionSnapshotSelectors[k];
1802 const IMPCaches::Selector & actual = *v;
1803
1804 if (theoretical != actual) {
1805 fprintf(stderr, "Failed to restore snapshot of %lu classes; method %s differs, (%x, %x) vs (%x, %x)\n", bestSolutionSnapshot.size(), k.data(), theoretical.inProgressBucketIndex, theoretical.fixedBitsMask, actual.inProgressBucketIndex, actual.fixedBitsMask);
1806 assert(theoretical == actual);
1807 }
1808 }
1809 #endif
1810
1811 _diagnostics.verbose("*** SNAPSHOT: successfully reset to snapshot of size %lu\n", bestSolutionSnapshot.size());
1812
1813 currentClassIndex = backtrackingStack.size();
1814 dropClass(_diagnostics, currentClassIndex, numberOfDroppedClasses, backtrackingStack, randomNumberGenerator, allClasses, "it's too difficult to place");
1815
1816 // FIXME: we should consider resetting backtrackingLength to the value it had when we snapshotted here (the risk makes this not worth trying at this point in the release).
1817
1818 backtrackingAttempts = 0;
1819 continue;
1820 } else {
1821 if (currentClassIndex > bestSolutionSnapshot.size()) {
1822 _diagnostics.verbose("*** SNAPSHOT *** %lu / %lu (%s)\n", currentClassIndex, allClasses.size(), c->description().c_str());
1823 bestSolutionSnapshot = backtrackingStack;
1824
1825 #if 0
1826 // Make a full copy of the state so that we can debug resetting to snapshots
1827 bestSolutionSnapshotClasses.clear();
1828 bestSolutionSnapshotSelectors.clear();
1829 for (const auto & [k,v] : selectors.map) {
1830 bestSolutionSnapshotSelectors[k] = *v;
1831 }
1832 #endif
1833 }
1834
1835 _diagnostics.verbose("%lu / %lu (%s): backtracking\n", currentClassIndex, allClasses.size(), c->description().c_str());
1836 assert(currentClassIndex != 0); // Backtracked all the way to the beginning, no solution
1837
1838 for (unsigned long j = 0 ; j < backtrackingLength ; j++) {
1839 backtrack(backtrackingStack, numberOfDroppedClasses, allClasses);
1840 currentClassIndex--;
1841 }
1842
1843 backtrackingLength = std::max(1ul,std::min(std::min(currentClassIndex, backtrackingLength * 2), 1024ul));
1844 }
1845 }
1846 }
1847
1848 if (numberOfDroppedClasses > 0) {
1849 _diagnostics.verbose("Dropped %d classes that were too difficult to place\n", numberOfDroppedClasses);
1850 }
1851
1852 return numberOfDroppedClasses;
1853 }
1854
1855 void IMPCachesBuilder::fillAllClasses(std::vector<IMPCaches::ClassData*> & allClasses) {
1856 for (const CacheBuilder::DylibInfo & d : dylibs) {
1857 typedef typename decltype(d.impCachesClassData)::value_type classpair;
1858
1859 for (const auto& [key, thisClassData]: d.impCachesClassData) {
1860 if (thisClassData->methods.size() > 0 && thisClassData->shouldGenerateImpCache) {
1861 allClasses.push_back(thisClassData.get());
1862 }
1863 }
1864 }
1865
1866
1867 // Only include the classes for which there is actual work to do,
1868 // otherwise we have classes with only 1 choice which makes our
1869 // partial backtracking more difficult.
1870
1871 std::sort(allClasses.begin(), allClasses.end(), [this](IMPCaches::ClassData* a, IMPCaches::ClassData* b) {
1872 int indexA = a->isMetaclass ? neededMetaclasses[a->name] : neededClasses[a->name];
1873 int indexB = b->isMetaclass ? neededMetaclasses[b->name] : neededClasses[b->name];
1874
1875 return (indexA < indexB);
1876 });
1877 }
1878
1879 void IMPCachesBuilder::removeUninterestingClasses() {
1880 // Remove any empty classes and classes for which we don't generate IMP caches now that we've inlined all selectors
1881 // (These classes were just used for inlining purposes)
1882
1883 for (CacheBuilder::DylibInfo& d : dylibs) {
1884 for (auto it = d.impCachesClassData.begin() ; it != d.impCachesClassData.end() ; /* no increment here */) {
1885 auto& c = it->second;
1886 if (((c->methods.size() == 0) && !(c->flatteningRootSuperclass.has_value()))
1887 || !c->shouldGenerateImpCache) {
1888 // Remove this useless class: delete it from the selectors, and from the master class map
1889 // Note that it is not useless if it is in a flattening hierarchy: all classes in a flattening
1890 // hierarchy must have preopt caches so that objc correctly invalidates the caches on children
1891 // when you attach a category to one of the classes in a flattening hierarchy
1892
1893 for (auto& m : c->methods) {
1894 auto& classes = m.selector->classes;
1895 classes.erase(std::remove(classes.begin(), classes.end(), c.get()), classes.end());
1896 }
1897
1898 it = d.impCachesClassData.erase(it);
1899 } else {
1900 it++;
1901 }
1902 }
1903 }
1904
1905
1906 // Now remove from the selector map any selectors that are not used by any classes
1907
1908 addressSpace.removeUninterestingSelectors();
1909 for (auto it = selectors.map.begin() ; it != selectors.map.end() ; /* no increment */ ) {
1910 Selector & s = *(it->second);
1911
1912 if ((s.classes.size() == 0) && (it->first != magicSelector)) {
1913 it = selectors.map.erase(it);
1914 } else {
1915 it++;
1916 }
1917 }
1918 }
1919
1920 void IMPCachesBuilder::fillAllMethods(std::vector<IMPCaches::Selector*> & allMethods) {
1921 typedef typename decltype(selectors.map)::value_type methodpair;
1922 for (auto& [name, selectorData] : selectors.map) {
1923 // Remove all non-interesting classes that were added only for inlining tracking.
1924 if (selectorData->classes.size() > 0) {
1925 allMethods.push_back(selectorData.get());
1926 }
1927 }
1928 }
1929
1930 // Main entry point of the algorithm, chaining all the steps.
1931 void IMPCachesBuilder::buildPerfectHashes(IMPCaches::HoleMap& holeMap, Diagnostics& diag) {
1932 _timeRecorder.pushTimedSection();
1933 int droppedClasses = findShiftsAndMasks();
1934 _timeRecorder.recordTime("find shifts and masks");
1935
1936 if (droppedClasses > 0) {
1937 removeUninterestingClasses();
1938 }
1939
1940 droppedClasses = solveGivenShiftsAndMasks();
1941
1942 if (droppedClasses > 0) {
1943 removeUninterestingClasses();
1944 }
1945
1946 computeLowBits(holeMap);
1947
1948 _timeRecorder.recordTime("assign selector addresses");
1949 _timeRecorder.popTimedSection();
1950 }
1951
1952 size_t IMPCachesBuilder::totalIMPCachesSize() const {
1953 size_t size = 0;
1954 for (CacheBuilder::DylibInfo& d : dylibs) {
1955 for (const auto& [k,v] : d.impCachesClassData) {
1956 assert(v->shouldGenerateImpCache);
1957 size += v->sizeInSharedCache();
1958 }
1959 }
1960 return size;
1961 }
1962
1963 void IMPCachesBuilder::computeLowBits(IMPCaches::HoleMap& holeMap) {
1964 holeMap.clear();
1965 addressSpace.computeLowBits(holeMap);
1966 }
1967
1968 // Shuffles selectors around to satisfy size constraints
1969 int IMPCachesBuilder::solveGivenShiftsAndMasks() {
1970 std::vector<IMPCaches::ClassData*> allClasses;
1971 fillAllClasses(allClasses);
1972
1973 int hadToIncreaseSizeCount = 0;
1974 int droppedClasses = 0;
1975
1976 // Sanity check: all methods should have a fixed bits mask
1977 // that at least encompasses the masks of all the classes they are in.
1978 for (const IMPCaches::ClassData* c : allClasses) {
1979 for (const ClassData::Method& m : c->methods) {
1980 assert(((m.selector->fixedBitsMask >> c->shift) & c->mask()) == c->mask());
1981 }
1982 if (c->hadToIncreaseSize()) {
1983 hadToIncreaseSizeCount++;
1984 }
1985 }
1986
1987 // Sanity check: all classes should have a valid shift and mask.
1988 for (IMPCaches::ClassData* c : allClasses) {
1989 assert(c->checkConsistency());
1990 }
1991
1992
1993 // Now that everything is placed, try to adjust placement within the
1994 // constraints so that we can respect alignment
1995
1996 _diagnostics.verbose("[IMP Caches] Placed %lu classes, increasing hash table size for %d\n", allClasses.size(), hadToIncreaseSizeCount);
1997
1998 std::vector<IMPCaches::Selector*> methodsSortedByNumberOfFixedBits;
1999 fillAllMethods(methodsSortedByNumberOfFixedBits);
2000
2001 std::sort(methodsSortedByNumberOfFixedBits.begin(), methodsSortedByNumberOfFixedBits.end(), [](const IMPCaches::Selector* a, const IMPCaches::Selector* b) -> bool {
2002
2003 // Place the methods with the greatest number of fixed bits first
2004 // as they will have the most constraints
2005
2006 // If we have the same number of fixed bits, place the methods
2007 // in the largest number of classes first, as they will likely
2008 // have more constraints on their bits
2009
2010 std::tuple<int, int, std::string_view> ta = std::make_tuple(a->numberOfSetBits(), a->classes.size(), std::string_view(a->name));
2011 std::tuple<int, int, std::string_view> tb = std::make_tuple(b->numberOfSetBits(), b->classes.size(), std::string_view(b->name));
2012
2013 return ta > tb;
2014 });
2015
2016 std::default_random_engine generator;
2017
2018 _diagnostics.verbose("[IMP Caches] Rearranging selectors in 128-byte buckets…\n");
2019
2020 IMPCaches::ConstraintSet cs;
2021 for (unsigned long methodIndex = 0 ; methodIndex < methodsSortedByNumberOfFixedBits.size() ; methodIndex++) {
2022 IMPCaches::Selector* m = methodsSortedByNumberOfFixedBits[methodIndex];
2023 if (addressSpace.canPlaceMethodAtIndex(m, m->inProgressBucketIndex)) {
2024 addressSpace.placeMethodAtIndex(m, m->inProgressBucketIndex);
2025 } else {
2026 // Try to find another address for m
2027 cs.clear();
2028
2029 #if DEBUG
2030 std::vector<IMPCaches::ClassData*> sortedClasses = m->classes;
2031
2032 // Sort the classes so that we can always debug the same thing.
2033 std::sort(sortedClasses.begin(), sortedClasses.end(), [](const IMPCaches::ClassData* a, const IMPCaches::ClassData* b) {
2034 return *a < *b;
2035 });
2036
2037 std::vector<IMPCaches::ClassData*> & classes = sortedClasses;
2038 #else
2039 std::vector<IMPCaches::ClassData*> & classes = m->classes;
2040 #endif
2041
2042 bool atLeastOneConstraint = false;
2043
2044 // Go through all the classes the method is used in and add constraints
2045 for (IMPCaches::ClassData* c : classes) {
2046 if (!c->shouldGenerateImpCache) continue;
2047 atLeastOneConstraint = true;
2048 IMPCaches::Constraint constraint = c->constraintForMethod(m);
2049 cs.add(constraint);
2050 }
2051
2052 if (!atLeastOneConstraint) {
2053 // This method is only used in classes we have just dropped.
2054 continue;
2055 }
2056
2057 auto dropClassesWithThisMethod = [this, &classes, &allClasses, &droppedClasses](){
2058 for (IMPCaches::ClassData* c : classes) {
2059 c->shouldGenerateImpCache = false;
2060 _diagnostics.verbose("Dropping class %s, selectors too difficult to place\n", c->name);
2061 droppedClasses++;
2062 forEachClassInFlatteningHierarchy(c, allClasses, [this](IMPCaches::ClassData *toDrop) {
2063 if (toDrop->shouldGenerateImpCache) {
2064 toDrop->shouldGenerateImpCache = false;
2065 toDrop->droppedBecauseFlatteningSuperclassWasDropped = true;
2066 _diagnostics.verbose("Dropping class %s in the same flattening hierarchy\n", toDrop->name);
2067 }
2068 });
2069 }
2070 };
2071
2072 IMPCaches::Constraint& mergedConstraint = *(cs.mergedConstraint);
2073
2074 if (mergedConstraint.allowedValues.size() == 0) {
2075 dropClassesWithThisMethod();
2076 continue;
2077 }
2078
2079 bool foundValue = false;
2080 std::unordered_set<int> & allowedValues = mergedConstraint.allowedValues;
2081 int modulo = mergedConstraint.mask + 1;
2082 int multiplier = 1 << mergedConstraint.shift;
2083 // We want to go through:
2084 // [((0 + allowedValues) << shift) + k, ((modulo + allowedValues) << shift) + k, ((2*modulo + allowedValue) << shift) + k, ....] etc.
2085 // but we want to randomize this so that we don't completely
2086 // fill up the small addresses. If we do, and we end up with a
2087 // constraint that forces us to zero the high bits, we'll fail
2088 // to find room for the selector.
2089
2090 // Range for the multiplier of the modulo above
2091 int addressesCount = std::max(((addressSpace.maximumIndex + 1) >> mergedConstraint.shift) / modulo, 1);
2092
2093 // Fill "addresses" with [0, addressesCount[ so that we can shuffle it below
2094 std::vector<int> addresses(addressesCount);
2095 std::iota(addresses.begin(), addresses.end(), 0);
2096
2097 for (int i = 0 ; i < addressesCount ; i++) {
2098 if (foundValue) {
2099 break;
2100 }
2101
2102 // Manual Fisher-Yates:
2103 // Pick a random element in [i, end[. Swap it with the i^th element. Repeat if the random element didn't work.
2104 // We don't do std::shuffle because it wastes time to shuffle the whole range if we find happiness in the beginning.
2105 std::uniform_int_distribution<int> distribution(i,addressesCount-1);
2106 int rd = distribution(generator);
2107 int baseAddress = addresses[rd];
2108 std::swap(addresses[i], addresses[rd]);
2109
2110 for (int j : allowedValues) {
2111 if (foundValue) {
2112 break;
2113 }
2114
2115 for (int k = 0 ; k < multiplier; k++) {
2116 int bucketIndex = ((baseAddress * modulo + j) << mergedConstraint.shift) | k;
2117 if (bucketIndex >= addressSpace.maximumIndex) {
2118 continue;
2119 }
2120
2121 bool canPlace = addressSpace.canPlaceMethodAtIndex(m, bucketIndex);
2122 if (!canPlace) {
2123 continue;
2124 }
2125
2126 foundValue = true;
2127 //std::cerr << methodIndex << "/" << methodsSortedByNumberOfFixedBits.size() << " Moving " << m->name << " to " << address << "\n";
2128 m->inProgressBucketIndex = bucketIndex;
2129 addressSpace.placeMethodAtIndex(m, bucketIndex);
2130 break;
2131 }
2132 }
2133 }
2134
2135 if (!foundValue) {
2136 _diagnostics.verbose("Failed to place %s\n", m->name);
2137 dropClassesWithThisMethod();
2138 }
2139 }
2140
2141 if (methodIndex % 1000 == 0) {
2142 _diagnostics.verbose(" %lu/%lu…\n", methodIndex, methodsSortedByNumberOfFixedBits.size());
2143 }
2144
2145 }
2146
2147 if (droppedClasses == 0) {
2148 _diagnostics.verbose("[IMP Caches] Placed all methods\n");
2149 } else {
2150 _diagnostics.verbose("[IMP Caches] Finished placing methods, dropping %d classes\n", droppedClasses);
2151 }
2152
2153 constexpr bool log = false;
2154 if (log) {
2155 std::cerr << addressSpace << "\n";
2156 }
2157
2158 return droppedClasses;
2159 }
2160
2161 SelectorMap::SelectorMap() {
2162 std::unique_ptr<IMPCaches::Selector> magicSelectorStruct = std::make_unique<IMPCaches::Selector>();
2163 magicSelectorStruct->name = magicSelector.data();
2164 magicSelectorStruct->offset = 0;
2165 map[magicSelector] = std::move(magicSelectorStruct);
2166 }
2167
2168 HoleMap::HoleMap() {
2169 addStringOfSize(magicSelector.length() + 1);
2170 }
2171
2172 bool ClassData::ClassLocator::operator==(const ClassData::ClassLocator & other) {
2173 return (segmentIndex == other.segmentIndex)
2174 && (segmentOffset == other.segmentOffset)
2175 && (strcmp(installName, other.installName) == 0);
2176 }
2177
2178 bool ClassData::operator==(const ClassData & other) {
2179 if (strcmp(name, other.name) != 0) {
2180 return false;
2181 }
2182 if (methods.size() != other.methods.size()) {
2183 return false;
2184 }
2185 for (unsigned i = 0 ; i < methods.size() ; i++) {
2186 if (*(methods[i].selector) != *(other.methods[i].selector)) {
2187 return false;
2188 }
2189 }
2190
2191 return true;
2192 }
2193
2194 };