+void cache_t::initializeToEmpty()
+{
+ _bucketsAndMaybeMask.store((uintptr_t)&_objc_empty_cache, std::memory_order_relaxed);
+ _originalPreoptCache.store(nullptr, std::memory_order_relaxed);
+}
+
+#if CONFIG_USE_PREOPT_CACHES
+/*
+ * The shared cache builder will sometimes have prebuilt an IMP cache
+ * for the class and left a `preopt_cache_t` pointer in _originalPreoptCache.
+ *
+ * However we have this tension:
+ * - when the class is realized it has to have a cache that can't resolve any
+ * selector until the class is properly initialized so that every
+ * caller falls in the slowpath and synchronizes with the class initializing,
+ * - we need to remember that cache pointer and we have no space for that.
+ *
+ * The caches are designed so that preopt_cache::bit_one is set to 1,
+ * so we "disguise" the pointer so that it looks like a cache of capacity 1
+ * where that bit one aliases with where the top bit of a SEL in the bucket_t
+ * would live:
+ *
+ * +----------------+----------------+
+ * | IMP | SEL | << a bucket_t
+ * +----------------+----------------+--------------...
+ * preopt_cache_t >>| 1| ...
+ * +----------------+--------------...
+ *
+ * The shared cache guarantees that there's valid memory to read under "IMP"
+ *
+ * This lets us encode the original preoptimized cache pointer during
+ * initialization, and we can reconstruct its original address and install
+ * it back later.
+ */
+void cache_t::initializeToPreoptCacheInDisguise(const preopt_cache_t *cache)
+{
+ // preopt_cache_t::bit_one is 1 which sets the top bit
+ // and is never set on any valid selector
+
+ uintptr_t value = (uintptr_t)cache + sizeof(preopt_cache_t) -
+ (bucket_t::offsetOfSel() + sizeof(SEL));
+
+ _originalPreoptCache.store(nullptr, std::memory_order_relaxed);
+ setBucketsAndMask((bucket_t *)value, 0);
+ _occupied = cache->occupied;
+}
+
+void cache_t::maybeConvertToPreoptimized()
+{
+ const preopt_cache_t *cache = disguised_preopt_cache();
+
+ if (cache == nil) {
+ return;
+ }
+
+ if (!cls()->allowsPreoptCaches() ||
+ (cache->has_inlines && !cls()->allowsPreoptInlinedSels())) {
+ if (PrintCaches) {
+ _objc_inform("CACHES: %sclass %s: dropping cache (from %s)",
+ cls()->isMetaClass() ? "meta" : "",
+ cls()->nameForLogging(), "setInitialized");
+ }
+ return setBucketsAndMask(emptyBuckets(), 0);
+ }
+
+ uintptr_t value = (uintptr_t)&cache->entries;
+#if __has_feature(ptrauth_calls)
+ value = (uintptr_t)ptrauth_sign_unauthenticated((void *)value,
+ ptrauth_key_process_dependent_data, (uintptr_t)cls());
+#endif
+ value |= preoptBucketsHashParams(cache) | preoptBucketsMarker;
+ _bucketsAndMaybeMask.store(value, memory_order_relaxed);
+ _occupied = cache->occupied;
+}
+
+void cache_t::initializeToEmptyOrPreoptimizedInDisguise()
+{
+ if (os_fastpath(!DisablePreoptCaches)) {
+ if (!objc::dataSegmentsRanges.inSharedCache((uintptr_t)this)) {
+ if (dyld_shared_cache_some_image_overridden()) {
+ // If the system has roots, then we must disable preoptimized
+ // caches completely. If a class in another image has a
+ // superclass in the root, the offset to the superclass will
+ // be wrong. rdar://problem/61601961
+ cls()->setDisallowPreoptCachesRecursively("roots");
+ }
+ return initializeToEmpty();
+ }
+
+ auto cache = _originalPreoptCache.load(memory_order_relaxed);
+ if (cache) {
+ return initializeToPreoptCacheInDisguise(cache);
+ }
+ }
+
+ return initializeToEmpty();
+}
+
+const preopt_cache_t *cache_t::preopt_cache() const
+{
+ auto addr = _bucketsAndMaybeMask.load(memory_order_relaxed);
+ addr &= preoptBucketsMask;
+#if __has_feature(ptrauth_calls)
+#if __BUILDING_OBJCDT__
+ addr = (uintptr_t)ptrauth_strip((preopt_cache_entry_t *)addr,
+ ptrauth_key_process_dependent_data);
+#else
+ addr = (uintptr_t)ptrauth_auth_data((preopt_cache_entry_t *)addr,
+ ptrauth_key_process_dependent_data, (uintptr_t)cls());
+#endif
+#endif
+ return (preopt_cache_t *)(addr - sizeof(preopt_cache_t));
+}
+
+const preopt_cache_t *cache_t::disguised_preopt_cache() const
+{
+ bucket_t *b = buckets();
+ if ((intptr_t)b->sel() >= 0) return nil;
+
+ uintptr_t value = (uintptr_t)b + bucket_t::offsetOfSel() + sizeof(SEL);
+ return (preopt_cache_t *)(value - sizeof(preopt_cache_t));
+}
+
+Class cache_t::preoptFallbackClass() const
+{
+ return (Class)((uintptr_t)cls() + preopt_cache()->fallback_class_offset);
+}
+
+bool cache_t::isConstantOptimizedCache(bool strict, uintptr_t empty_addr) const
+{
+ uintptr_t addr = _bucketsAndMaybeMask.load(memory_order_relaxed);
+ if (addr & preoptBucketsMarker) {
+ return true;
+ }
+ if (strict) {
+ return false;
+ }
+ return mask() == 0 && addr != empty_addr;
+}
+
+bool cache_t::shouldFlush(SEL sel, IMP imp) const
+{
+ // This test isn't backwards: disguised caches aren't "strict"
+ // constant optimized caches
+ if (!isConstantOptimizedCache(/*strict*/true)) {
+ const preopt_cache_t *cache = disguised_preopt_cache();
+ if (cache) {
+ uintptr_t offs = (uintptr_t)sel - (uintptr_t)@selector(🤯);
+ uintptr_t slot = ((offs >> cache->shift) & cache->mask);
+ auto &entry = cache->entries[slot];
+
+ return entry.sel_offs == offs &&
+ (uintptr_t)cls() - entry.imp_offs ==
+ (uintptr_t)ptrauth_strip(imp, ptrauth_key_function_pointer);
+ }
+ }
+
+ return cache_getImp(cls(), sel) == imp;
+}
+
+bool cache_t::isConstantOptimizedCacheWithInlinedSels() const
+{
+ return isConstantOptimizedCache(/* strict */true) && preopt_cache()->has_inlines;
+}
+#endif // CONFIG_USE_PREOPT_CACHES
+