2 * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 /***********************************************************************
26 * Method cache management
28 * Cache garbage collection
29 * Cache instrumentation
30 * Dedicated allocator for large caches
31 **********************************************************************/
34 /***********************************************************************
35 * Method cache locking (GrP 2001-1-14)
37 * For speed, objc_msgSend does not acquire any locks when it reads
38 * method caches. Instead, all cache changes are performed so that any
39 * objc_msgSend running concurrently with the cache mutator will not
40 * crash or hang or get an incorrect result from the cache.
42 * When cache memory becomes unused (e.g. the old cache after cache
43 * expansion), it is not immediately freed, because a concurrent
44 * objc_msgSend could still be using it. Instead, the memory is
45 * disconnected from the data structures and placed on a garbage list.
46 * The memory is now only accessible to instances of objc_msgSend that
47 * were running when the memory was disconnected; any further calls to
48 * objc_msgSend will not see the garbage memory because the other data
49 * structures don't point to it anymore. The collecting_in_critical
50 * function checks the PC of all threads and returns FALSE when all threads
51 * are found to be outside objc_msgSend. This means any call to objc_msgSend
52 * that could have had access to the garbage has finished or moved past the
53 * cache lookup stage, so it is safe to free the memory.
55 * All functions that modify cache data or structures must acquire the
56 * cacheUpdateLock to prevent interference from concurrent modifications.
57 * The function that frees cache garbage must acquire the cacheUpdateLock
58 * and use collecting_in_critical() to flush out cache readers.
59 * The cacheUpdateLock is also used to protect the custom allocator used
60 * for large method cache blocks.
62 * Cache readers (PC-checked by collecting_in_critical())
66 * Cache readers/writers (hold cacheUpdateLock during access; not PC-checked)
67 * cache_t::copyCacheNolock (caller must hold the lock)
68 * cache_t::eraseNolock (caller must hold the lock)
69 * cache_t::collectNolock (caller must hold the lock)
70 * cache_t::insert (acquires lock)
71 * cache_t::destroy (acquires lock)
73 * UNPROTECTED cache readers (NOT thread-safe; used for debug info only)
75 * _class_printMethodCaches
76 * _class_printDuplicateCacheEntries
77 * _class_printMethodCacheStatistics
79 ***********************************************************************/
84 #include "objc-private.h"
87 #include <Cambria/Traps.h>
88 #include <Cambria/Cambria.h>
91 #if __arm__ || __x86_64__ || __i386__
93 // objc_msgSend has few registers available.
94 // Cache scan increments and wraps at special end-marking bucket.
95 #define CACHE_END_MARKER 1
97 // Historical fill ratio of 75% (since the new objc runtime was introduced).
98 static inline mask_t cache_fill_ratio(mask_t capacity) {
99 return capacity * 3 / 4;
102 #elif __arm64__ && !__LP64__
104 // objc_msgSend has lots of registers available.
105 // Cache scan decrements. No end marker needed.
106 #define CACHE_END_MARKER 0
108 // Historical fill ratio of 75% (since the new objc runtime was introduced).
109 static inline mask_t cache_fill_ratio(mask_t capacity) {
110 return capacity * 3 / 4;
113 #elif __arm64__ && __LP64__
115 // objc_msgSend has lots of registers available.
116 // Cache scan decrements. No end marker needed.
117 #define CACHE_END_MARKER 0
119 // Allow 87.5% fill ratio in the fast path for all cache sizes.
120 // Increasing the cache fill ratio reduces the fragmentation and wasted space
121 // in imp-caches at the cost of potentially increasing the average lookup of
122 // a selector in imp-caches by increasing collision chains. Another potential
123 // change is that cache table resizes / resets happen at different moments.
124 static inline mask_t cache_fill_ratio(mask_t capacity) {
125 return capacity * 7 / 8;
128 // Allow 100% cache utilization for smaller cache sizes. This has the same
129 // advantages and disadvantages as the fill ratio. A very large percentage
130 // of caches end up with very few entries and the worst case of collision
131 // chains in small tables is relatively small.
132 // NOTE: objc_msgSend properly handles a cache lookup with a full cache.
133 #define CACHE_ALLOW_FULL_UTILIZATION 1
136 #error unknown architecture
139 /* Initial cache bucket count. INIT_CACHE_SIZE must be a power of two. */
141 #if CACHE_END_MARKER || (__arm64__ && !__LP64__)
142 // When we have a cache end marker it fills a bucket slot, so having a
143 // initial cache size of 2 buckets would not be efficient when one of the
144 // slots is always filled with the end marker. So start with a cache size
146 INIT_CACHE_SIZE_LOG2 = 2,
148 // Allow an initial bucket size of 2 buckets, since a large number of
149 // classes, especially metaclasses, have very few imps, and we support
150 // the ability to fill 100% of the cache before resizing.
151 INIT_CACHE_SIZE_LOG2 = 1,
153 INIT_CACHE_SIZE = (1 << INIT_CACHE_SIZE_LOG2),
154 MAX_CACHE_SIZE_LOG2 = 16,
155 MAX_CACHE_SIZE = (1 << MAX_CACHE_SIZE_LOG2),
156 FULL_UTILIZATION_CACHE_SIZE_LOG2 = 3,
157 FULL_UTILIZATION_CACHE_SIZE = (1 << FULL_UTILIZATION_CACHE_SIZE_LOG2),
160 static int _collecting_in_critical(void);
161 static void _garbage_make_room(void);
163 #if DEBUG_TASK_THREADS
164 static kern_return_t objc_task_threads
167 thread_act_array_t *act_list,
168 mach_msg_type_number_t *act_listCnt
172 #if DEBUG_TASK_THREADS
173 #undef HAVE_TASK_RESTARTABLE_RANGES
176 /***********************************************************************
177 * Cache statistics for OBJC_PRINT_CACHE_SETUP
178 **********************************************************************/
179 static unsigned int cache_counts[16];
180 static size_t cache_allocations;
181 static size_t cache_collections;
183 static void recordNewCache(mask_t capacity)
185 size_t bucket = log2u(capacity);
186 if (bucket < countof(cache_counts)) {
187 cache_counts[bucket]++;
192 static void recordDeadCache(mask_t capacity)
194 size_t bucket = log2u(capacity);
195 if (bucket < countof(cache_counts)) {
196 cache_counts[bucket]--;
200 /***********************************************************************
201 * Pointers used by compiled class objects
202 * These use asm to avoid conflicts with the compiler's internal declarations
203 **********************************************************************/
205 // EMPTY_BYTES includes space for a cache end marker bucket.
206 // This end marker doesn't actually have the wrap-around pointer
207 // because cache scans always find an empty bucket before they might wrap.
208 // 1024 buckets is fairly common.
210 // Use a smaller size to exercise heap-allocated empty caches.
211 # define EMPTY_BYTES ((8+1)*16)
213 # define EMPTY_BYTES ((1024+1)*16)
216 #define stringize(x) #x
217 #define stringize2(x) stringize(x)
219 // "cache" is cache->buckets; "vtable" is cache->mask/occupied
220 // hack to avoid conflicts with compiler's internal declaration
221 asm("\n .section __TEXT,__const"
222 "\n .globl __objc_empty_vtable"
223 "\n .set __objc_empty_vtable, 0"
224 "\n .globl __objc_empty_cache"
225 #if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4
227 "\n L__objc_empty_cache: .space " stringize2(EMPTY_BYTES)
228 "\n .set __objc_empty_cache, L__objc_empty_cache + 0xf"
231 "\n __objc_empty_cache: .space " stringize2(EMPTY_BYTES)
235 #if CONFIG_USE_PREOPT_CACHES
236 __attribute__((used, section("__DATA_CONST,__objc_scoffs")))
237 uintptr_t objc_opt_offsets[__OBJC_OPT_OFFSETS_COUNT];
241 static inline mask_t cache_next(mask_t i, mask_t mask) {
245 static inline mask_t cache_next(mask_t i, mask_t mask) {
246 return i ? i-1 : mask;
249 #error unexpected configuration
253 // mega_barrier doesn't really work, but it works enough on ARM that
254 // we leave well enough alone and keep using it there.
256 #define mega_barrier() \
257 __asm__ __volatile__( \
265 // Pointer-size register prefix for inline asm
267 # define p "x" // true arm64
269 # define p "w" // arm64_32
272 // Use atomic double-word instructions to update cache entries.
273 // This requires cache buckets not cross cache line boundaries.
274 static ALWAYS_INLINE void
275 stp(uintptr_t onep, uintptr_t twop, void *destp)
277 __asm__ ("stp %" p "[one], %" p "[two], [%x[dest]]"
278 : "=m" (((uintptr_t *)(destp))[0]),
279 "=m" (((uintptr_t *)(destp))[1])
287 static ALWAYS_INLINE void __unused
288 ldp(uintptr_t& onep, uintptr_t& twop, const void *srcp)
290 __asm__ ("ldp %" p "[one], %" p "[two], [%x[src]]"
293 : "m" (((const uintptr_t *)(srcp))[0]),
294 "m" (((const uintptr_t *)(srcp))[1]),
304 // Class points to cache. SEL is key. Cache buckets store SEL+IMP.
305 // Caches are never built in the dyld shared cache.
307 static inline mask_t cache_hash(SEL sel, mask_t mask)
309 uintptr_t value = (uintptr_t)sel;
310 #if CONFIG_USE_PREOPT_CACHES
313 return (mask_t)(value & mask);
318 template<Atomicity atomicity, IMPEncoding impEncoding>
319 void bucket_t::set(bucket_t *base, SEL newSel, IMP newImp, Class cls)
321 ASSERT(_sel.load(memory_order_relaxed) == 0 ||
322 _sel.load(memory_order_relaxed) == newSel);
324 static_assert(offsetof(bucket_t,_imp) == 0 &&
325 offsetof(bucket_t,_sel) == sizeof(void *),
326 "bucket_t layout doesn't match arm64 bucket_t::set()");
328 uintptr_t encodedImp = (impEncoding == Encoded
329 ? encodeImp(base, newImp, newSel, cls)
330 : (uintptr_t)newImp);
332 // LDP/STP guarantees that all observers get
333 // either imp/sel or newImp/newSel
334 stp(encodedImp, (uintptr_t)newSel, this);
339 template<Atomicity atomicity, IMPEncoding impEncoding>
340 void bucket_t::set(bucket_t *base, SEL newSel, IMP newImp, Class cls)
342 ASSERT(_sel.load(memory_order_relaxed) == 0 ||
343 _sel.load(memory_order_relaxed) == newSel);
345 // objc_msgSend uses sel and imp with no locks.
346 // It is safe for objc_msgSend to see new imp but NULL sel
347 // (It will get a cache miss but not dispatch to the wrong place.)
348 // It is unsafe for objc_msgSend to see old imp and new sel.
349 // Therefore we write new imp, wait a lot, then write new sel.
351 uintptr_t newIMP = (impEncoding == Encoded
352 ? encodeImp(base, newImp, newSel, cls)
353 : (uintptr_t)newImp);
355 if (atomicity == Atomic) {
356 _imp.store(newIMP, memory_order_relaxed);
358 if (_sel.load(memory_order_relaxed) != newSel) {
361 _sel.store(newSel, memory_order_relaxed);
362 #elif __x86_64__ || __i386__
363 _sel.store(newSel, memory_order_release);
365 #error Don't know how to do bucket_t::set on this architecture.
369 _imp.store(newIMP, memory_order_relaxed);
370 _sel.store(newSel, memory_order_relaxed);
376 void cache_t::initializeToEmpty()
378 _bucketsAndMaybeMask.store((uintptr_t)&_objc_empty_cache, std::memory_order_relaxed);
379 _originalPreoptCache.store(nullptr, std::memory_order_relaxed);
382 #if CONFIG_USE_PREOPT_CACHES
384 * The shared cache builder will sometimes have prebuilt an IMP cache
385 * for the class and left a `preopt_cache_t` pointer in _originalPreoptCache.
387 * However we have this tension:
388 * - when the class is realized it has to have a cache that can't resolve any
389 * selector until the class is properly initialized so that every
390 * caller falls in the slowpath and synchronizes with the class initializing,
391 * - we need to remember that cache pointer and we have no space for that.
393 * The caches are designed so that preopt_cache::bit_one is set to 1,
394 * so we "disguise" the pointer so that it looks like a cache of capacity 1
395 * where that bit one aliases with where the top bit of a SEL in the bucket_t
398 * +----------------+----------------+
399 * | IMP | SEL | << a bucket_t
400 * +----------------+----------------+--------------...
401 * preopt_cache_t >>| 1| ...
402 * +----------------+--------------...
404 * The shared cache guarantees that there's valid memory to read under "IMP"
406 * This lets us encode the original preoptimized cache pointer during
407 * initialization, and we can reconstruct its original address and install
410 void cache_t::initializeToPreoptCacheInDisguise(const preopt_cache_t *cache)
412 // preopt_cache_t::bit_one is 1 which sets the top bit
413 // and is never set on any valid selector
415 uintptr_t value = (uintptr_t)cache + sizeof(preopt_cache_t) -
416 (bucket_t::offsetOfSel() + sizeof(SEL));
418 _originalPreoptCache.store(nullptr, std::memory_order_relaxed);
419 setBucketsAndMask((bucket_t *)value, 0);
420 _occupied = cache->occupied;
423 void cache_t::maybeConvertToPreoptimized()
425 const preopt_cache_t *cache = disguised_preopt_cache();
431 if (!cls()->allowsPreoptCaches() ||
432 (cache->has_inlines && !cls()->allowsPreoptInlinedSels())) {
434 _objc_inform("CACHES: %sclass %s: dropping cache (from %s)",
435 cls()->isMetaClass() ? "meta" : "",
436 cls()->nameForLogging(), "setInitialized");
438 return setBucketsAndMask(emptyBuckets(), 0);
441 uintptr_t value = (uintptr_t)&cache->entries;
442 #if __has_feature(ptrauth_calls)
443 value = (uintptr_t)ptrauth_sign_unauthenticated((void *)value,
444 ptrauth_key_process_dependent_data, (uintptr_t)cls());
446 value |= preoptBucketsHashParams(cache) | preoptBucketsMarker;
447 _bucketsAndMaybeMask.store(value, memory_order_relaxed);
448 _occupied = cache->occupied;
451 void cache_t::initializeToEmptyOrPreoptimizedInDisguise()
453 if (os_fastpath(!DisablePreoptCaches)) {
454 if (!objc::dataSegmentsRanges.inSharedCache((uintptr_t)this)) {
455 if (dyld_shared_cache_some_image_overridden()) {
456 // If the system has roots, then we must disable preoptimized
457 // caches completely. If a class in another image has a
458 // superclass in the root, the offset to the superclass will
459 // be wrong. rdar://problem/61601961
460 cls()->setDisallowPreoptCachesRecursively("roots");
462 return initializeToEmpty();
465 auto cache = _originalPreoptCache.load(memory_order_relaxed);
467 return initializeToPreoptCacheInDisguise(cache);
471 return initializeToEmpty();
474 const preopt_cache_t *cache_t::preopt_cache() const
476 auto addr = _bucketsAndMaybeMask.load(memory_order_relaxed);
477 addr &= preoptBucketsMask;
478 #if __has_feature(ptrauth_calls)
479 #if __BUILDING_OBJCDT__
480 addr = (uintptr_t)ptrauth_strip((preopt_cache_entry_t *)addr,
481 ptrauth_key_process_dependent_data);
483 addr = (uintptr_t)ptrauth_auth_data((preopt_cache_entry_t *)addr,
484 ptrauth_key_process_dependent_data, (uintptr_t)cls());
487 return (preopt_cache_t *)(addr - sizeof(preopt_cache_t));
490 const preopt_cache_t *cache_t::disguised_preopt_cache() const
492 bucket_t *b = buckets();
493 if ((intptr_t)b->sel() >= 0) return nil;
495 uintptr_t value = (uintptr_t)b + bucket_t::offsetOfSel() + sizeof(SEL);
496 return (preopt_cache_t *)(value - sizeof(preopt_cache_t));
499 Class cache_t::preoptFallbackClass() const
501 return (Class)((uintptr_t)cls() + preopt_cache()->fallback_class_offset);
504 bool cache_t::isConstantOptimizedCache(bool strict, uintptr_t empty_addr) const
506 uintptr_t addr = _bucketsAndMaybeMask.load(memory_order_relaxed);
507 if (addr & preoptBucketsMarker) {
513 return mask() == 0 && addr != empty_addr;
516 bool cache_t::shouldFlush(SEL sel, IMP imp) const
518 // This test isn't backwards: disguised caches aren't "strict"
519 // constant optimized caches
520 if (!isConstantOptimizedCache(/*strict*/true)) {
521 const preopt_cache_t *cache = disguised_preopt_cache();
523 uintptr_t offs = (uintptr_t)sel - (uintptr_t)@selector(🤯);
524 uintptr_t slot = ((offs >> cache->shift) & cache->mask);
525 auto &entry = cache->entries[slot];
527 return entry.sel_offs == offs &&
528 (uintptr_t)cls() - entry.imp_offs ==
529 (uintptr_t)ptrauth_strip(imp, ptrauth_key_function_pointer);
533 return cache_getImp(cls(), sel) == imp;
536 bool cache_t::isConstantOptimizedCacheWithInlinedSels() const
538 return isConstantOptimizedCache(/* strict */true) && preopt_cache()->has_inlines;
540 #endif // CONFIG_USE_PREOPT_CACHES
542 #if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_OUTLINED
544 void cache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask)
546 // objc_msgSend uses mask and buckets with no locks.
547 // It is safe for objc_msgSend to see new buckets but old mask.
548 // (It will get a cache miss but not overrun the buckets' bounds).
549 // It is unsafe for objc_msgSend to see old buckets and new mask.
550 // Therefore we write new buckets, wait a lot, then write new mask.
551 // objc_msgSend reads mask first, then buckets.
554 // ensure other threads see buckets contents before buckets pointer
557 _bucketsAndMaybeMask.store((uintptr_t)newBuckets, memory_order_relaxed);
559 // ensure other threads see new buckets before new mask
562 _maybeMask.store(newMask, memory_order_relaxed);
564 #elif __x86_64__ || i386
565 // ensure other threads see buckets contents before buckets pointer
566 _bucketsAndMaybeMask.store((uintptr_t)newBuckets, memory_order_release);
568 // ensure other threads see new buckets before new mask
569 _maybeMask.store(newMask, memory_order_release);
572 #error Don't know how to do setBucketsAndMask on this architecture.
576 mask_t cache_t::mask() const
578 return _maybeMask.load(memory_order_relaxed);
581 #elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16 || CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16_BIG_ADDRS
583 void cache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask)
585 uintptr_t buckets = (uintptr_t)newBuckets;
586 uintptr_t mask = (uintptr_t)newMask;
588 ASSERT(buckets <= bucketsMask);
589 ASSERT(mask <= maxMask);
591 _bucketsAndMaybeMask.store(((uintptr_t)newMask << maskShift) | (uintptr_t)newBuckets, memory_order_relaxed);
595 mask_t cache_t::mask() const
597 uintptr_t maskAndBuckets = _bucketsAndMaybeMask.load(memory_order_relaxed);
598 return maskAndBuckets >> maskShift;
601 #elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4
603 void cache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask)
605 uintptr_t buckets = (uintptr_t)newBuckets;
606 unsigned mask = (unsigned)newMask;
608 ASSERT(buckets == (buckets & bucketsMask));
609 ASSERT(mask <= 0xffff);
611 _bucketsAndMaybeMask.store(buckets | objc::mask16ShiftBits(mask), memory_order_relaxed);
614 ASSERT(this->buckets() == newBuckets);
615 ASSERT(this->mask() == newMask);
618 mask_t cache_t::mask() const
620 uintptr_t maskAndBuckets = _bucketsAndMaybeMask.load(memory_order_relaxed);
621 uintptr_t maskShift = (maskAndBuckets & maskMask);
622 return 0xffff >> maskShift;
626 #error Unknown cache mask storage type.
629 struct bucket_t *cache_t::buckets() const
631 uintptr_t addr = _bucketsAndMaybeMask.load(memory_order_relaxed);
632 return (bucket_t *)(addr & bucketsMask);
635 mask_t cache_t::occupied() const
640 void cache_t::incrementOccupied()
645 unsigned cache_t::capacity() const
647 return mask() ? mask()+1 : 0;
650 Class cache_t::cls() const
652 return (Class)((uintptr_t)this - offsetof(objc_class, cache));
655 size_t cache_t::bytesForCapacity(uint32_t cap)
657 return sizeof(bucket_t) * cap;
662 bucket_t *cache_t::endMarker(struct bucket_t *b, uint32_t cap)
664 return (bucket_t *)((uintptr_t)b + bytesForCapacity(cap)) - 1;
667 bucket_t *cache_t::allocateBuckets(mask_t newCapacity)
669 // Allocate one extra bucket to mark the end of the list.
670 // This can't overflow mask_t because newCapacity is a power of 2.
671 bucket_t *newBuckets = (bucket_t *)calloc(bytesForCapacity(newCapacity), 1);
673 bucket_t *end = endMarker(newBuckets, newCapacity);
676 // End marker's sel is 1 and imp points BEFORE the first bucket.
677 // This saves an instruction in objc_msgSend.
678 end->set<NotAtomic, Raw>(newBuckets, (SEL)(uintptr_t)1, (IMP)(newBuckets - 1), nil);
680 // End marker's sel is 1 and imp points to the first bucket.
681 end->set<NotAtomic, Raw>(newBuckets, (SEL)(uintptr_t)1, (IMP)newBuckets, nil);
684 if (PrintCaches) recordNewCache(newCapacity);
691 bucket_t *cache_t::allocateBuckets(mask_t newCapacity)
693 if (PrintCaches) recordNewCache(newCapacity);
695 return (bucket_t *)calloc(bytesForCapacity(newCapacity), 1);
700 struct bucket_t *cache_t::emptyBuckets()
702 return (bucket_t *)((uintptr_t)&_objc_empty_cache & bucketsMask);
705 bucket_t *cache_t::emptyBucketsForCapacity(mask_t capacity, bool allocate)
707 #if CONFIG_USE_CACHE_LOCK
708 cacheUpdateLock.assertLocked();
710 runtimeLock.assertLocked();
713 size_t bytes = bytesForCapacity(capacity);
715 // Use _objc_empty_cache if the buckets is small enough.
716 if (bytes <= EMPTY_BYTES) {
717 return emptyBuckets();
720 // Use shared empty buckets allocated on the heap.
721 static bucket_t **emptyBucketsList = nil;
722 static mask_t emptyBucketsListCount = 0;
724 mask_t index = log2u(capacity);
726 if (index >= emptyBucketsListCount) {
727 if (!allocate) return nil;
729 mask_t newListCount = index + 1;
730 bucket_t *newBuckets = (bucket_t *)calloc(bytes, 1);
731 emptyBucketsList = (bucket_t**)
732 realloc(emptyBucketsList, newListCount * sizeof(bucket_t *));
733 // Share newBuckets for every un-allocated size smaller than index.
734 // The array is therefore always fully populated.
735 for (mask_t i = emptyBucketsListCount; i < newListCount; i++) {
736 emptyBucketsList[i] = newBuckets;
738 emptyBucketsListCount = newListCount;
741 _objc_inform("CACHES: new empty buckets at %p (capacity %zu)",
742 newBuckets, (size_t)capacity);
746 return emptyBucketsList[index];
749 bool cache_t::isConstantEmptyCache() const
753 buckets() == emptyBucketsForCapacity(capacity(), false);
756 bool cache_t::canBeFreed() const
758 return !isConstantEmptyCache() && !isConstantOptimizedCache();
762 void cache_t::reallocate(mask_t oldCapacity, mask_t newCapacity, bool freeOld)
764 bucket_t *oldBuckets = buckets();
765 bucket_t *newBuckets = allocateBuckets(newCapacity);
767 // Cache's old contents are not propagated.
768 // This is thought to save cache memory at the cost of extra cache fills.
769 // fixme re-measure this
771 ASSERT(newCapacity > 0);
772 ASSERT((uintptr_t)(mask_t)(newCapacity-1) == newCapacity-1);
774 setBucketsAndMask(newBuckets, newCapacity - 1);
777 collect_free(oldBuckets, oldCapacity);
782 void cache_t::bad_cache(id receiver, SEL sel)
784 // Log in separate steps in case the logging itself causes a crash.
785 _objc_inform_now_and_on_crash
786 ("Method cache corrupted. This may be a message to an "
787 "invalid object, or a memory error somewhere else.");
788 #if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_OUTLINED
789 bucket_t *b = buckets();
790 _objc_inform_now_and_on_crash
791 ("%s %p, SEL %p, isa %p, cache %p, buckets %p, "
792 "mask 0x%x, occupied 0x%x",
793 receiver ? "receiver" : "unused", receiver,
795 _maybeMask.load(memory_order_relaxed),
797 _objc_inform_now_and_on_crash
798 ("%s %zu bytes, buckets %zu bytes",
799 receiver ? "receiver" : "unused", malloc_size(receiver),
801 #elif (CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16 || \
802 CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16_BIG_ADDRS || \
803 CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4)
804 uintptr_t maskAndBuckets = _bucketsAndMaybeMask.load(memory_order_relaxed);
805 _objc_inform_now_and_on_crash
806 ("%s %p, SEL %p, isa %p, cache %p, buckets and mask 0x%lx, "
808 receiver ? "receiver" : "unused", receiver,
809 sel, cls(), this, maskAndBuckets, _occupied);
810 _objc_inform_now_and_on_crash
811 ("%s %zu bytes, buckets %zu bytes",
812 receiver ? "receiver" : "unused", malloc_size(receiver),
813 malloc_size(buckets()));
815 #error Unknown cache mask storage type.
817 _objc_inform_now_and_on_crash
818 ("selector '%s'", sel_getName(sel));
819 _objc_inform_now_and_on_crash
820 ("isa '%s'", cls()->nameForLogging());
822 ("Method cache corrupted. This may be a message to an "
823 "invalid object, or a memory error somewhere else.");
826 void cache_t::insert(SEL sel, IMP imp, id receiver)
828 runtimeLock.assertLocked();
830 // Never cache before +initialize is done
831 if (slowpath(!cls()->isInitialized())) {
835 if (isConstantOptimizedCache()) {
836 _objc_fatal("cache_t::insert() called with a preoptimized cache for %s",
837 cls()->nameForLogging());
840 #if DEBUG_TASK_THREADS
841 return _collecting_in_critical();
843 #if CONFIG_USE_CACHE_LOCK
844 mutex_locker_t lock(cacheUpdateLock);
847 ASSERT(sel != 0 && cls()->isInitialized());
849 // Use the cache as-is if until we exceed our expected fill ratio.
850 mask_t newOccupied = occupied() + 1;
851 unsigned oldCapacity = capacity(), capacity = oldCapacity;
852 if (slowpath(isConstantEmptyCache())) {
853 // Cache is read-only. Replace it.
854 if (!capacity) capacity = INIT_CACHE_SIZE;
855 reallocate(oldCapacity, capacity, /* freeOld */false);
857 else if (fastpath(newOccupied + CACHE_END_MARKER <= cache_fill_ratio(capacity))) {
858 // Cache is less than 3/4 or 7/8 full. Use it as-is.
860 #if CACHE_ALLOW_FULL_UTILIZATION
861 else if (capacity <= FULL_UTILIZATION_CACHE_SIZE && newOccupied + CACHE_END_MARKER <= capacity) {
862 // Allow 100% cache utilization for small buckets. Use it as-is.
866 capacity = capacity ? capacity * 2 : INIT_CACHE_SIZE;
867 if (capacity > MAX_CACHE_SIZE) {
868 capacity = MAX_CACHE_SIZE;
870 reallocate(oldCapacity, capacity, true);
873 bucket_t *b = buckets();
874 mask_t m = capacity - 1;
875 mask_t begin = cache_hash(sel, m);
878 // Scan for the first unused slot and insert there.
879 // There is guaranteed to be an empty slot.
881 if (fastpath(b[i].sel() == 0)) {
883 b[i].set<Atomic, Encoded>(b, sel, imp, cls());
886 if (b[i].sel() == sel) {
887 // The entry was added to the cache by some other thread
888 // before we grabbed the cacheUpdateLock.
891 } while (fastpath((i = cache_next(i, m)) != begin));
893 bad_cache(receiver, (SEL)sel);
894 #endif // !DEBUG_TASK_THREADS
897 void cache_t::copyCacheNolock(objc_imp_cache_entry *buffer, int len)
899 #if CONFIG_USE_CACHE_LOCK
900 cacheUpdateLock.assertLocked();
902 runtimeLock.assertLocked();
906 #if CONFIG_USE_PREOPT_CACHES
907 if (isConstantOptimizedCache()) {
908 auto cache = preopt_cache();
909 auto mask = cache->mask;
910 uintptr_t sel_base = objc_opt_offsets[OBJC_OPT_METHODNAME_START];
911 uintptr_t imp_base = (uintptr_t)&cache->entries;
913 for (uintptr_t index = 0; index <= mask && wpos < len; index++) {
914 auto &ent = cache->entries[index];
916 buffer[wpos].sel = (SEL)(sel_base + ent.sel_offs);
917 buffer[wpos].imp = (IMP)(imp_base - ent.imp_offs);
925 bucket_t *buckets = this->buckets();
926 uintptr_t count = capacity();
928 for (uintptr_t index = 0; index < count && wpos < len; index++) {
929 if (buckets[index].sel()) {
930 buffer[wpos].imp = buckets[index].imp(buckets, cls());
931 buffer[wpos].sel = buckets[index].sel();
938 // Reset this entire cache to the uncached lookup by reallocating it.
939 // This must not shrink the cache - that breaks the lock-free scheme.
940 void cache_t::eraseNolock(const char *func)
942 #if CONFIG_USE_CACHE_LOCK
943 cacheUpdateLock.assertLocked();
945 runtimeLock.assertLocked();
948 if (isConstantOptimizedCache()) {
951 _objc_inform("CACHES: %sclass %s: dropping and disallowing preopt cache (from %s)",
952 c->isMetaClass() ? "meta" : "",
953 c->nameForLogging(), func);
955 setBucketsAndMask(emptyBuckets(), 0);
956 c->setDisallowPreoptCaches();
957 } else if (occupied() > 0) {
958 auto capacity = this->capacity();
959 auto oldBuckets = buckets();
960 auto buckets = emptyBucketsForCapacity(capacity);
962 setBucketsAndMask(buckets, capacity - 1); // also clears occupied
963 collect_free(oldBuckets, capacity);
968 void cache_t::destroy()
970 #if CONFIG_USE_CACHE_LOCK
971 mutex_locker_t lock(cacheUpdateLock);
973 runtimeLock.assertLocked();
976 if (PrintCaches) recordDeadCache(capacity());
982 /***********************************************************************
984 **********************************************************************/
988 // A sentinel (magic value) to report bad thread_get_state status.
989 // Must not be a valid PC.
990 // Must not be zero - thread_get_state() on a new thread returns PC == 0.
991 #define PC_SENTINEL 1
993 static uintptr_t _get_pc_for_thread(thread_t thread)
994 #if defined(__i386__)
996 i386_thread_state_t state;
997 unsigned int count = i386_THREAD_STATE_COUNT;
998 kern_return_t okay = thread_get_state (thread, i386_THREAD_STATE, (thread_state_t)&state, &count);
999 return (okay == KERN_SUCCESS) ? state.__eip : PC_SENTINEL;
1001 #elif defined(__x86_64__)
1003 x86_thread_state64_t state;
1004 unsigned int count = x86_THREAD_STATE64_COUNT;
1005 kern_return_t okay = thread_get_state (thread, x86_THREAD_STATE64, (thread_state_t)&state, &count);
1006 return (okay == KERN_SUCCESS) ? state.__rip : PC_SENTINEL;
1008 #elif defined(__arm__)
1010 arm_thread_state_t state;
1011 unsigned int count = ARM_THREAD_STATE_COUNT;
1012 kern_return_t okay = thread_get_state (thread, ARM_THREAD_STATE, (thread_state_t)&state, &count);
1013 return (okay == KERN_SUCCESS) ? state.__pc : PC_SENTINEL;
1015 #elif defined(__arm64__)
1017 arm_thread_state64_t state;
1018 unsigned int count = ARM_THREAD_STATE64_COUNT;
1019 kern_return_t okay = thread_get_state (thread, ARM_THREAD_STATE64, (thread_state_t)&state, &count);
1020 return (okay == KERN_SUCCESS) ? (uintptr_t)arm_thread_state64_get_pc(state) : PC_SENTINEL;
1024 #error _get_pc_for_thread () not implemented for this architecture
1030 /***********************************************************************
1031 * _collecting_in_critical.
1032 * Returns TRUE if some thread is currently executing a cache-reading
1033 * function. Collection of cache garbage is not allowed when a cache-
1034 * reading function is in progress because it might still be using
1035 * the garbage memory.
1036 **********************************************************************/
1037 #if HAVE_TASK_RESTARTABLE_RANGES
1038 #include <kern/restartable.h>
1042 unsigned short length;
1043 unsigned short recovery_offs;
1045 } task_restartable_range_t;
1048 extern "C" task_restartable_range_t objc_restartableRanges[];
1050 #if HAVE_TASK_RESTARTABLE_RANGES
1051 static bool shouldUseRestartableRanges = true;
1054 void cache_t::init()
1056 #if HAVE_TASK_RESTARTABLE_RANGES
1057 mach_msg_type_number_t count = 0;
1060 while (objc_restartableRanges[count].location) {
1064 kr = task_restartable_ranges_register(mach_task_self(),
1065 objc_restartableRanges, count);
1066 if (kr == KERN_SUCCESS) return;
1067 _objc_fatal("task_restartable_ranges_register failed (result 0x%x: %s)",
1068 kr, mach_error_string(kr));
1069 #endif // HAVE_TASK_RESTARTABLE_RANGES
1072 static int _collecting_in_critical(void)
1076 #elif HAVE_TASK_RESTARTABLE_RANGES
1077 // Only use restartable ranges if we registered them earlier.
1078 if (shouldUseRestartableRanges) {
1079 kern_return_t kr = task_restartable_ranges_synchronize(mach_task_self());
1080 if (kr == KERN_SUCCESS) return FALSE;
1081 _objc_fatal("task_restartable_ranges_synchronize failed (result 0x%x: %s)",
1082 kr, mach_error_string(kr));
1084 #endif // !HAVE_TASK_RESTARTABLE_RANGES
1086 // Fallthrough if we didn't use restartable ranges.
1088 thread_act_port_array_t threads;
1094 mach_port_t mythread = pthread_mach_thread_np(objc_thread_self());
1096 // Get a list of all the threads in the current task
1097 #if !DEBUG_TASK_THREADS
1098 ret = task_threads(mach_task_self(), &threads, &number);
1100 ret = objc_task_threads(mach_task_self(), &threads, &number);
1103 if (ret != KERN_SUCCESS) {
1104 // See DEBUG_TASK_THREADS below to help debug this.
1105 _objc_fatal("task_threads failed (result 0x%x)\n", ret);
1108 // Check whether any thread is in the cache lookup code
1110 for (count = 0; count < number; count++)
1115 // Don't bother checking ourselves
1116 if (threads[count] == mythread)
1119 // Find out where thread is executing
1121 if (oah_is_current_process_translated()) {
1122 kern_return_t ret = objc_thread_get_rip(threads[count], (uint64_t*)&pc);
1123 if (ret != KERN_SUCCESS) {
1127 pc = _get_pc_for_thread (threads[count]);
1130 pc = _get_pc_for_thread (threads[count]);
1133 // Check for bad status, and if so, assume the worse (can't collect)
1134 if (pc == PC_SENTINEL)
1140 // Check whether it is in the cache lookup code
1141 for (region = 0; objc_restartableRanges[region].location != 0; region++)
1143 uint64_t loc = objc_restartableRanges[region].location;
1145 (pc - loc < (uint64_t)objc_restartableRanges[region].length))
1154 // Deallocate the port rights for the threads
1155 for (count = 0; count < number; count++) {
1156 mach_port_deallocate(mach_task_self (), threads[count]);
1159 // Deallocate the thread list
1160 vm_deallocate (mach_task_self (), (vm_address_t) threads, sizeof(threads[0]) * number);
1162 // Return our finding
1167 /***********************************************************************
1168 * _garbage_make_room. Ensure that there is enough room for at least
1169 * one more ref in the garbage.
1170 **********************************************************************/
1172 // amount of memory represented by all refs in the garbage
1173 static size_t garbage_byte_size = 0;
1175 // do not empty the garbage until garbage_byte_size gets at least this big
1176 static size_t garbage_threshold = 32*1024;
1178 // table of refs to free
1179 static bucket_t **garbage_refs = 0;
1181 // current number of refs in garbage_refs
1182 static size_t garbage_count = 0;
1184 // capacity of current garbage_refs
1185 static size_t garbage_max = 0;
1187 // capacity of initial garbage_refs
1189 INIT_GARBAGE_COUNT = 128
1192 static void _garbage_make_room(void)
1194 static int first = 1;
1196 // Create the collection table the first time it is needed
1200 garbage_refs = (bucket_t**)
1201 malloc(INIT_GARBAGE_COUNT * sizeof(void *));
1202 garbage_max = INIT_GARBAGE_COUNT;
1205 // Double the table if it is full
1206 else if (garbage_count == garbage_max)
1208 garbage_refs = (bucket_t**)
1209 realloc(garbage_refs, garbage_max * 2 * sizeof(void *));
1215 /***********************************************************************
1216 * cache_t::collect_free. Add the specified malloc'd memory to the list
1217 * of them to free at some later point.
1218 * size is used for the collection threshold. It does not have to be
1219 * precisely the block's size.
1220 * Cache locks: cacheUpdateLock must be held by the caller.
1221 **********************************************************************/
1222 void cache_t::collect_free(bucket_t *data, mask_t capacity)
1224 #if CONFIG_USE_CACHE_LOCK
1225 cacheUpdateLock.assertLocked();
1227 runtimeLock.assertLocked();
1230 if (PrintCaches) recordDeadCache(capacity);
1232 _garbage_make_room ();
1233 garbage_byte_size += cache_t::bytesForCapacity(capacity);
1234 garbage_refs[garbage_count++] = data;
1235 cache_t::collectNolock(false);
1239 /***********************************************************************
1240 * cache_collect. Try to free accumulated dead caches.
1241 * collectALot tries harder to free memory.
1242 * Cache locks: cacheUpdateLock must be held by the caller.
1243 **********************************************************************/
1244 void cache_t::collectNolock(bool collectALot)
1246 #if CONFIG_USE_CACHE_LOCK
1247 cacheUpdateLock.assertLocked();
1249 runtimeLock.assertLocked();
1252 // Done if the garbage is not full
1253 if (garbage_byte_size < garbage_threshold && !collectALot) {
1257 // Synchronize collection with objc_msgSend and other cache readers
1259 if (_collecting_in_critical ()) {
1260 // objc_msgSend (or other cache reader) is currently looking in
1261 // the cache and might still be using some garbage.
1263 _objc_inform ("CACHES: not collecting; "
1264 "objc_msgSend in progress");
1271 while (_collecting_in_critical())
1275 // No cache readers in progress - garbage is now deletable
1279 cache_collections++;
1280 _objc_inform ("CACHES: COLLECTING %zu bytes (%zu allocations, %zu collections)", garbage_byte_size, cache_allocations, cache_collections);
1283 // Dispose all refs now in the garbage
1284 // Erase each entry so debugging tools don't see stale pointers.
1285 while (garbage_count--) {
1286 auto dead = garbage_refs[garbage_count];
1287 garbage_refs[garbage_count] = nil;
1291 // Clear the garbage count and total size indicator
1293 garbage_byte_size = 0;
1297 size_t total_count = 0;
1298 size_t total_size = 0;
1300 for (i = 0; i < countof(cache_counts); i++) {
1301 int count = cache_counts[i];
1303 size_t size = count * slots * sizeof(bucket_t);
1305 if (!count) continue;
1307 _objc_inform("CACHES: %4d slots: %4d caches, %6zu bytes",
1308 slots, count, size);
1310 total_count += count;
1314 _objc_inform("CACHES: total: %4zu caches, %6zu bytes",
1315 total_count, total_size);
1320 /***********************************************************************
1322 * Replacement for task_threads(). Define DEBUG_TASK_THREADS to debug
1323 * crashes when task_threads() is failing.
1325 * A failure in task_threads() usually means somebody has botched their
1326 * Mach or MIG traffic. For example, somebody's error handling was wrong
1327 * and they left a message queued on the MIG reply port for task_threads()
1330 * The code below is a modified version of task_threads(). It logs
1331 * the msgh_id of the reply message. The msgh_id can identify the sender
1332 * of the message, which can help pinpoint the faulty code.
1333 * DEBUG_TASK_THREADS also calls collecting_in_critical() during every
1334 * message dispatch, which can increase reproducibility of bugs.
1336 * This code can be regenerated by running
1337 * `mig /usr/include/mach/task.defs`.
1338 **********************************************************************/
1339 #if DEBUG_TASK_THREADS
1341 #include <mach/mach.h>
1342 #include <mach/message.h>
1343 #include <mach/mig.h>
1345 #define __MIG_check__Reply__task_subsystem__ 1
1346 #define mig_internal static inline
1347 #define __DeclareSendRpc(a, b)
1348 #define __BeforeSendRpc(a, b)
1349 #define __AfterSendRpc(a, b)
1350 #define msgh_request_port msgh_remote_port
1351 #define msgh_reply_port msgh_local_port
1353 #ifndef __MachMsgErrorWithTimeout
1354 #define __MachMsgErrorWithTimeout(_R_) { \
1356 case MACH_SEND_INVALID_DATA: \
1357 case MACH_SEND_INVALID_DEST: \
1358 case MACH_SEND_INVALID_HEADER: \
1359 mig_put_reply_port(InP->Head.msgh_reply_port); \
1361 case MACH_SEND_TIMED_OUT: \
1362 case MACH_RCV_TIMED_OUT: \
1364 mig_dealloc_reply_port(InP->Head.msgh_reply_port); \
1367 #endif /* __MachMsgErrorWithTimeout */
1369 #ifndef __MachMsgErrorWithoutTimeout
1370 #define __MachMsgErrorWithoutTimeout(_R_) { \
1372 case MACH_SEND_INVALID_DATA: \
1373 case MACH_SEND_INVALID_DEST: \
1374 case MACH_SEND_INVALID_HEADER: \
1375 mig_put_reply_port(InP->Head.msgh_reply_port); \
1378 mig_dealloc_reply_port(InP->Head.msgh_reply_port); \
1381 #endif /* __MachMsgErrorWithoutTimeout */
1384 #if ( __MigTypeCheck )
1385 #if __MIG_check__Reply__task_subsystem__
1386 #if !defined(__MIG_check__Reply__task_threads_t__defined)
1387 #define __MIG_check__Reply__task_threads_t__defined
1389 mig_internal kern_return_t __MIG_check__Reply__task_threads_t(__Reply__task_threads_t *Out0P)
1392 typedef __Reply__task_threads_t __Reply;
1393 boolean_t msgh_simple;
1395 unsigned int msgh_size;
1396 #endif /* __MigTypeCheck */
1397 if (Out0P->Head.msgh_id != 3502) {
1398 if (Out0P->Head.msgh_id == MACH_NOTIFY_SEND_ONCE)
1399 { return MIG_SERVER_DIED; }
1401 { return MIG_REPLY_MISMATCH; }
1404 msgh_simple = !(Out0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX);
1406 msgh_size = Out0P->Head.msgh_size;
1408 if ((msgh_simple || Out0P->msgh_body.msgh_descriptor_count != 1 ||
1409 msgh_size != (mach_msg_size_t)sizeof(__Reply)) &&
1410 (!msgh_simple || msgh_size != (mach_msg_size_t)sizeof(mig_reply_error_t) ||
1411 ((mig_reply_error_t *)Out0P)->RetCode == KERN_SUCCESS))
1412 { return MIG_TYPE_ERROR ; }
1413 #endif /* __MigTypeCheck */
1416 return ((mig_reply_error_t *)Out0P)->RetCode;
1420 if (Out0P->act_list.type != MACH_MSG_OOL_PORTS_DESCRIPTOR ||
1421 Out0P->act_list.disposition != 17) {
1422 return MIG_TYPE_ERROR;
1424 #endif /* __MigTypeCheck */
1426 return MACH_MSG_SUCCESS;
1428 #endif /* !defined(__MIG_check__Reply__task_threads_t__defined) */
1429 #endif /* __MIG_check__Reply__task_subsystem__ */
1430 #endif /* ( __MigTypeCheck ) */
1433 /* Routine task_threads */
1434 static kern_return_t objc_task_threads
1437 thread_act_array_t *act_list,
1438 mach_msg_type_number_t *act_listCnt
1442 #ifdef __MigPackStructs
1446 mach_msg_header_t Head;
1448 #ifdef __MigPackStructs
1452 #ifdef __MigPackStructs
1456 mach_msg_header_t Head;
1457 /* start of the kernel processed data */
1458 mach_msg_body_t msgh_body;
1459 mach_msg_ool_ports_descriptor_t act_list;
1460 /* end of the kernel processed data */
1462 mach_msg_type_number_t act_listCnt;
1463 mach_msg_trailer_t trailer;
1465 #ifdef __MigPackStructs
1469 #ifdef __MigPackStructs
1473 mach_msg_header_t Head;
1474 /* start of the kernel processed data */
1475 mach_msg_body_t msgh_body;
1476 mach_msg_ool_ports_descriptor_t act_list;
1477 /* end of the kernel processed data */
1479 mach_msg_type_number_t act_listCnt;
1481 #ifdef __MigPackStructs
1486 * mach_msg_header_t Head;
1488 * kern_return_t RetCode;
1489 * } mig_reply_error_t;
1497 Request *InP = &Mess.In;
1498 Reply *Out0P = &Mess.Out;
1500 mach_msg_return_t msg_result;
1502 #ifdef __MIG_check__Reply__task_threads_t__defined
1503 kern_return_t check_result;
1504 #endif /* __MIG_check__Reply__task_threads_t__defined */
1506 __DeclareSendRpc(3402, "task_threads")
1508 InP->Head.msgh_bits =
1509 MACH_MSGH_BITS(19, MACH_MSG_TYPE_MAKE_SEND_ONCE);
1510 /* msgh_size passed as argument */
1511 InP->Head.msgh_request_port = target_task;
1512 InP->Head.msgh_reply_port = mig_get_reply_port();
1513 InP->Head.msgh_id = 3402;
1515 __BeforeSendRpc(3402, "task_threads")
1516 msg_result = mach_msg(&InP->Head, MACH_SEND_MSG|MACH_RCV_MSG|MACH_MSG_OPTION_NONE, (mach_msg_size_t)sizeof(Request), (mach_msg_size_t)sizeof(Reply), InP->Head.msgh_reply_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
1517 __AfterSendRpc(3402, "task_threads")
1518 if (msg_result != MACH_MSG_SUCCESS) {
1519 _objc_inform("task_threads received unexpected reply msgh_id 0x%zx",
1520 (size_t)Out0P->Head.msgh_id);
1521 __MachMsgErrorWithoutTimeout(msg_result);
1522 { return msg_result; }
1526 #if defined(__MIG_check__Reply__task_threads_t__defined)
1527 check_result = __MIG_check__Reply__task_threads_t((__Reply__task_threads_t *)Out0P);
1528 if (check_result != MACH_MSG_SUCCESS)
1529 { return check_result; }
1530 #endif /* defined(__MIG_check__Reply__task_threads_t__defined) */
1532 *act_list = (thread_act_array_t)(Out0P->act_list.address);
1533 *act_listCnt = Out0P->act_listCnt;
1535 return KERN_SUCCESS;
1538 // DEBUG_TASK_THREADS
1541 OBJC_EXPORT bucket_t * objc_cache_buckets(const cache_t * cache) {
1542 return cache->buckets();
1545 #if CONFIG_USE_PREOPT_CACHES
1547 OBJC_EXPORT const preopt_cache_t * _Nonnull objc_cache_preoptCache(const cache_t * _Nonnull cache) {
1548 return cache->preopt_cache();
1551 OBJC_EXPORT bool objc_cache_isConstantOptimizedCache(const cache_t * _Nonnull cache, bool strict, uintptr_t empty_addr) {
1552 return cache->isConstantOptimizedCache(strict, empty_addr);
1555 OBJC_EXPORT unsigned objc_cache_preoptCapacity(const cache_t * _Nonnull cache) {
1556 return cache->preopt_cache()->capacity();
1559 OBJC_EXPORT Class _Nonnull objc_cache_preoptFallbackClass(const cache_t * _Nonnull cache) {
1560 return cache->preoptFallbackClass();
1565 OBJC_EXPORT size_t objc_cache_bytesForCapacity(uint32_t cap) {
1566 return cache_t::bytesForCapacity(cap);
1569 OBJC_EXPORT uint32_t objc_cache_occupied(const cache_t * _Nonnull cache) {
1570 return cache->occupied();
1573 OBJC_EXPORT unsigned objc_cache_capacity(const struct cache_t * _Nonnull cache) {
1574 return cache->capacity();