2 * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 /***********************************************************************
26 * Method cache management
28 * Cache garbage collection
29 * Cache instrumentation
30 * Dedicated allocator for large caches
31 **********************************************************************/
34 /***********************************************************************
35 * Method cache locking (GrP 2001-1-14)
37 * For speed, objc_msgSend does not acquire any locks when it reads
38 * method caches. Instead, all cache changes are performed so that any
39 * objc_msgSend running concurrently with the cache mutator will not
40 * crash or hang or get an incorrect result from the cache.
42 * When cache memory becomes unused (e.g. the old cache after cache
43 * expansion), it is not immediately freed, because a concurrent
44 * objc_msgSend could still be using it. Instead, the memory is
45 * disconnected from the data structures and placed on a garbage list.
46 * The memory is now only accessible to instances of objc_msgSend that
47 * were running when the memory was disconnected; any further calls to
48 * objc_msgSend will not see the garbage memory because the other data
49 * structures don't point to it anymore. The collecting_in_critical
50 * function checks the PC of all threads and returns FALSE when all threads
51 * are found to be outside objc_msgSend. This means any call to objc_msgSend
52 * that could have had access to the garbage has finished or moved past the
53 * cache lookup stage, so it is safe to free the memory.
55 * All functions that modify cache data or structures must acquire the
56 * cacheUpdateLock to prevent interference from concurrent modifications.
57 * The function that frees cache garbage must acquire the cacheUpdateLock
58 * and use collecting_in_critical() to flush out cache readers.
59 * The cacheUpdateLock is also used to protect the custom allocator used
60 * for large method cache blocks.
62 * Cache readers (PC-checked by collecting_in_critical())
66 * Cache writers (hold cacheUpdateLock while reading or writing; not PC-checked)
67 * cache_fill (acquires lock)
68 * cache_expand (only called from cache_fill)
69 * cache_create (only called from cache_expand)
70 * bcopy (only called from instrumented cache_expand)
71 * flush_caches (acquires lock)
72 * cache_flush (only called from cache_fill and flush_caches)
73 * cache_collect_free (only called from cache_expand and cache_flush)
75 * UNPROTECTED cache readers (NOT thread-safe; used for debug info only)
77 * _class_printMethodCaches
78 * _class_printDuplicateCacheEntries
79 * _class_printMethodCacheStatistics
81 ***********************************************************************/
86 #include "objc-private.h"
87 #include "objc-cache.h"
90 /* Initial cache bucket count. INIT_CACHE_SIZE must be a power of two. */
92 INIT_CACHE_SIZE_LOG2 = 2,
93 INIT_CACHE_SIZE = (1 << INIT_CACHE_SIZE_LOG2)
96 static void cache_collect_free(struct bucket_t *data, mask_t capacity);
97 static int _collecting_in_critical(void);
98 static void _garbage_make_room(void);
101 /***********************************************************************
102 * Cache statistics for OBJC_PRINT_CACHE_SETUP
103 **********************************************************************/
104 static unsigned int cache_counts[16];
105 static size_t cache_allocations;
106 static size_t cache_collections;
108 static void recordNewCache(mask_t capacity)
110 size_t bucket = log2u(capacity);
111 if (bucket < countof(cache_counts)) {
112 cache_counts[bucket]++;
117 static void recordDeadCache(mask_t capacity)
119 size_t bucket = log2u(capacity);
120 if (bucket < countof(cache_counts)) {
121 cache_counts[bucket]--;
125 /***********************************************************************
126 * Pointers used by compiled class objects
127 * These use asm to avoid conflicts with the compiler's internal declarations
128 **********************************************************************/
130 // EMPTY_BYTES includes space for a cache end marker bucket.
131 // This end marker doesn't actually have the wrap-around pointer
132 // because cache scans always find an empty bucket before they might wrap.
133 // 1024 buckets is fairly common.
135 // Use a smaller size to exercise heap-allocated empty caches.
136 # define EMPTY_BYTES ((8+1)*16)
138 # define EMPTY_BYTES ((1024+1)*16)
141 #define stringize(x) #x
142 #define stringize2(x) stringize(x)
144 // "cache" is cache->buckets; "vtable" is cache->mask/occupied
145 // hack to avoid conflicts with compiler's internal declaration
146 asm("\n .section __TEXT,__const"
147 "\n .globl __objc_empty_vtable"
148 "\n .set __objc_empty_vtable, 0"
149 "\n .globl __objc_empty_cache"
151 "\n __objc_empty_cache: .space " stringize2(EMPTY_BYTES)
155 #if __arm__ || __x86_64__ || __i386__
156 // objc_msgSend has few registers available.
157 // Cache scan increments and wraps at special end-marking bucket.
158 #define CACHE_END_MARKER 1
159 static inline mask_t cache_next(mask_t i, mask_t mask) {
164 // objc_msgSend has lots of registers available.
165 // Cache scan decrements. No end marker needed.
166 #define CACHE_END_MARKER 0
167 static inline mask_t cache_next(mask_t i, mask_t mask) {
168 return i ? i-1 : mask;
172 #error unknown architecture
176 // copied from dispatch_atomic_maximally_synchronizing_barrier
177 // fixme verify that this barrier hack does in fact work here
179 #define mega_barrier() \
180 do { unsigned long _clbr; __asm__ __volatile__( \
182 : "=a" (_clbr) : "0" (0) : "rbx", "rcx", "rdx", "cc", "memory" \
186 #define mega_barrier() \
187 do { unsigned long _clbr; __asm__ __volatile__( \
189 : "=a" (_clbr) : "0" (0) : "ebx", "ecx", "edx", "cc", "memory" \
192 #elif __arm__ || __arm64__
193 #define mega_barrier() \
194 __asm__ __volatile__( \
199 #error unknown architecture
204 // Pointer-size register prefix for inline asm
206 # define p "x" // true arm64
208 # define p "w" // arm64_32
211 // Use atomic double-word instructions to update cache entries.
212 // This requires cache buckets not cross cache line boundaries.
213 static ALWAYS_INLINE void
214 stp(uintptr_t onep, uintptr_t twop, void *destp)
216 __asm__ ("stp %" p "[one], %" p "[two], [%x[dest]]"
217 : "=m" (((uintptr_t *)(destp))[0]),
218 "=m" (((uintptr_t *)(destp))[1])
226 static ALWAYS_INLINE void __unused
227 ldp(uintptr_t& onep, uintptr_t& twop, const void *srcp)
229 __asm__ ("ldp %" p "[one], %" p "[two], [%x[src]]"
232 : "m" (((const uintptr_t *)(srcp))[0]),
233 "m" (((const uintptr_t *)(srcp))[1]),
243 // Class points to cache. SEL is key. Cache buckets store SEL+IMP.
244 // Caches are never built in the dyld shared cache.
246 static inline mask_t cache_hash(cache_key_t key, mask_t mask)
248 return (mask_t)(key & mask);
251 cache_t *getCache(Class cls)
257 cache_key_t getKey(SEL sel)
260 return (cache_key_t)sel;
265 void bucket_t::set(cache_key_t newKey, IMP newImp)
267 assert(_key == 0 || _key == newKey);
269 static_assert(offsetof(bucket_t,_imp) == 0 && offsetof(bucket_t,_key) == sizeof(void *),
270 "bucket_t doesn't match arm64 bucket_t::set()");
272 #if __has_feature(ptrauth_calls)
273 // Authenticate as a C function pointer and re-sign for the cache bucket.
274 uintptr_t signedImp = _imp.prepareWrite(newImp);
276 // No function pointer signing.
277 uintptr_t signedImp = (uintptr_t)newImp;
280 // Write to the bucket.
281 // LDP/STP guarantees that all observers get
282 // either imp/key or newImp/newKey
283 stp(signedImp, newKey, this);
288 void bucket_t::set(cache_key_t newKey, IMP newImp)
290 assert(_key == 0 || _key == newKey);
292 // objc_msgSend uses key and imp with no locks.
293 // It is safe for objc_msgSend to see new imp but NULL key
294 // (It will get a cache miss but not dispatch to the wrong place.)
295 // It is unsafe for objc_msgSend to see old imp and new key.
296 // Therefore we write new imp, wait a lot, then write new key.
300 if (_key != newKey) {
308 void cache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask)
310 // objc_msgSend uses mask and buckets with no locks.
311 // It is safe for objc_msgSend to see new buckets but old mask.
312 // (It will get a cache miss but not overrun the buckets' bounds).
313 // It is unsafe for objc_msgSend to see old buckets and new mask.
314 // Therefore we write new buckets, wait a lot, then write new mask.
315 // objc_msgSend reads mask first, then buckets.
317 // ensure other threads see buckets contents before buckets pointer
320 _buckets = newBuckets;
322 // ensure other threads see new buckets before new mask
330 struct bucket_t *cache_t::buckets()
335 mask_t cache_t::mask()
340 mask_t cache_t::occupied()
345 void cache_t::incrementOccupied()
350 void cache_t::initializeToEmpty()
352 bzero(this, sizeof(*this));
353 _buckets = (bucket_t *)&_objc_empty_cache;
357 mask_t cache_t::capacity()
359 return mask() ? mask()+1 : 0;
365 size_t cache_t::bytesForCapacity(uint32_t cap)
367 // fixme put end marker inline when capacity+1 malloc is inefficient
368 return sizeof(bucket_t) * (cap + 1);
371 bucket_t *cache_t::endMarker(struct bucket_t *b, uint32_t cap)
373 // bytesForCapacity() chooses whether the end marker is inline or not
374 return (bucket_t *)((uintptr_t)b + bytesForCapacity(cap)) - 1;
377 bucket_t *allocateBuckets(mask_t newCapacity)
379 // Allocate one extra bucket to mark the end of the list.
380 // This can't overflow mask_t because newCapacity is a power of 2.
381 // fixme instead put the end mark inline when +1 is malloc-inefficient
382 bucket_t *newBuckets = (bucket_t *)
383 calloc(cache_t::bytesForCapacity(newCapacity), 1);
385 bucket_t *end = cache_t::endMarker(newBuckets, newCapacity);
388 // End marker's key is 1 and imp points BEFORE the first bucket.
389 // This saves an instruction in objc_msgSend.
390 end->setKey((cache_key_t)(uintptr_t)1);
391 end->setImp((IMP)(newBuckets - 1));
393 // End marker's key is 1 and imp points to the first bucket.
394 end->setKey((cache_key_t)(uintptr_t)1);
395 end->setImp((IMP)newBuckets);
398 if (PrintCaches) recordNewCache(newCapacity);
405 size_t cache_t::bytesForCapacity(uint32_t cap)
407 return sizeof(bucket_t) * cap;
410 bucket_t *allocateBuckets(mask_t newCapacity)
412 if (PrintCaches) recordNewCache(newCapacity);
414 return (bucket_t *)calloc(cache_t::bytesForCapacity(newCapacity), 1);
420 bucket_t *emptyBucketsForCapacity(mask_t capacity, bool allocate = true)
422 cacheUpdateLock.assertLocked();
424 size_t bytes = cache_t::bytesForCapacity(capacity);
426 // Use _objc_empty_cache if the buckets is small enough.
427 if (bytes <= EMPTY_BYTES) {
428 return (bucket_t *)&_objc_empty_cache;
431 // Use shared empty buckets allocated on the heap.
432 static bucket_t **emptyBucketsList = nil;
433 static mask_t emptyBucketsListCount = 0;
435 mask_t index = log2u(capacity);
437 if (index >= emptyBucketsListCount) {
438 if (!allocate) return nil;
440 mask_t newListCount = index + 1;
441 bucket_t *newBuckets = (bucket_t *)calloc(bytes, 1);
442 emptyBucketsList = (bucket_t**)
443 realloc(emptyBucketsList, newListCount * sizeof(bucket_t *));
444 // Share newBuckets for every un-allocated size smaller than index.
445 // The array is therefore always fully populated.
446 for (mask_t i = emptyBucketsListCount; i < newListCount; i++) {
447 emptyBucketsList[i] = newBuckets;
449 emptyBucketsListCount = newListCount;
452 _objc_inform("CACHES: new empty buckets at %p (capacity %zu)",
453 newBuckets, (size_t)capacity);
457 return emptyBucketsList[index];
461 bool cache_t::isConstantEmptyCache()
465 buckets() == emptyBucketsForCapacity(capacity(), false);
468 bool cache_t::canBeFreed()
470 return !isConstantEmptyCache();
474 void cache_t::reallocate(mask_t oldCapacity, mask_t newCapacity)
476 bool freeOld = canBeFreed();
478 bucket_t *oldBuckets = buckets();
479 bucket_t *newBuckets = allocateBuckets(newCapacity);
481 // Cache's old contents are not propagated.
482 // This is thought to save cache memory at the cost of extra cache fills.
483 // fixme re-measure this
485 assert(newCapacity > 0);
486 assert((uintptr_t)(mask_t)(newCapacity-1) == newCapacity-1);
488 setBucketsAndMask(newBuckets, newCapacity - 1);
491 cache_collect_free(oldBuckets, oldCapacity);
492 cache_collect(false);
497 void cache_t::bad_cache(id receiver, SEL sel, Class isa)
499 // Log in separate steps in case the logging itself causes a crash.
500 _objc_inform_now_and_on_crash
501 ("Method cache corrupted. This may be a message to an "
502 "invalid object, or a memory error somewhere else.");
503 cache_t *cache = &isa->cache;
504 _objc_inform_now_and_on_crash
505 ("%s %p, SEL %p, isa %p, cache %p, buckets %p, "
506 "mask 0x%x, occupied 0x%x",
507 receiver ? "receiver" : "unused", receiver,
508 sel, isa, cache, cache->_buckets,
509 cache->_mask, cache->_occupied);
510 _objc_inform_now_and_on_crash
511 ("%s %zu bytes, buckets %zu bytes",
512 receiver ? "receiver" : "unused", malloc_size(receiver),
513 malloc_size(cache->_buckets));
514 _objc_inform_now_and_on_crash
515 ("selector '%s'", sel_getName(sel));
516 _objc_inform_now_and_on_crash
517 ("isa '%s'", isa->nameForLogging());
519 ("Method cache corrupted. This may be a message to an "
520 "invalid object, or a memory error somewhere else.");
524 bucket_t * cache_t::find(cache_key_t k, id receiver)
528 bucket_t *b = buckets();
530 mask_t begin = cache_hash(k, m);
533 if (b[i].key() == 0 || b[i].key() == k) {
536 } while ((i = cache_next(i, m)) != begin);
539 Class cls = (Class)((uintptr_t)this - offsetof(objc_class, cache));
540 cache_t::bad_cache(receiver, (SEL)k, cls);
544 void cache_t::expand()
546 cacheUpdateLock.assertLocked();
548 uint32_t oldCapacity = capacity();
549 uint32_t newCapacity = oldCapacity ? oldCapacity*2 : INIT_CACHE_SIZE;
551 if ((uint32_t)(mask_t)newCapacity != newCapacity) {
552 // mask overflow - can't grow further
553 // fixme this wastes one bit of mask
554 newCapacity = oldCapacity;
557 reallocate(oldCapacity, newCapacity);
561 static void cache_fill_nolock(Class cls, SEL sel, IMP imp, id receiver)
563 cacheUpdateLock.assertLocked();
565 // Never cache before +initialize is done
566 if (!cls->isInitialized()) return;
568 // Make sure the entry wasn't added to the cache by some other thread
569 // before we grabbed the cacheUpdateLock.
570 if (cache_getImp(cls, sel)) return;
572 cache_t *cache = getCache(cls);
573 cache_key_t key = getKey(sel);
575 // Use the cache as-is if it is less than 3/4 full
576 mask_t newOccupied = cache->occupied() + 1;
577 mask_t capacity = cache->capacity();
578 if (cache->isConstantEmptyCache()) {
579 // Cache is read-only. Replace it.
580 cache->reallocate(capacity, capacity ?: INIT_CACHE_SIZE);
582 else if (newOccupied <= capacity / 4 * 3) {
583 // Cache is less than 3/4 full. Use it as-is.
586 // Cache is too full. Expand it.
590 // Scan for the first unused slot and insert there.
591 // There is guaranteed to be an empty slot because the
592 // minimum size is 4 and we resized at 3/4 full.
593 bucket_t *bucket = cache->find(key, receiver);
594 if (bucket->key() == 0) cache->incrementOccupied();
595 bucket->set(key, imp);
598 void cache_fill(Class cls, SEL sel, IMP imp, id receiver)
600 #if !DEBUG_TASK_THREADS
601 mutex_locker_t lock(cacheUpdateLock);
602 cache_fill_nolock(cls, sel, imp, receiver);
604 _collecting_in_critical();
610 // Reset this entire cache to the uncached lookup by reallocating it.
611 // This must not shrink the cache - that breaks the lock-free scheme.
612 void cache_erase_nolock(Class cls)
614 cacheUpdateLock.assertLocked();
616 cache_t *cache = getCache(cls);
618 mask_t capacity = cache->capacity();
619 if (capacity > 0 && cache->occupied() > 0) {
620 auto oldBuckets = cache->buckets();
621 auto buckets = emptyBucketsForCapacity(capacity);
622 cache->setBucketsAndMask(buckets, capacity - 1); // also clears occupied
624 cache_collect_free(oldBuckets, capacity);
625 cache_collect(false);
630 void cache_delete(Class cls)
632 mutex_locker_t lock(cacheUpdateLock);
633 if (cls->cache.canBeFreed()) {
634 if (PrintCaches) recordDeadCache(cls->cache.capacity());
635 free(cls->cache.buckets());
640 /***********************************************************************
642 **********************************************************************/
646 // A sentinel (magic value) to report bad thread_get_state status.
647 // Must not be a valid PC.
648 // Must not be zero - thread_get_state() on a new thread returns PC == 0.
649 #define PC_SENTINEL 1
651 static uintptr_t _get_pc_for_thread(thread_t thread)
652 #if defined(__i386__)
654 i386_thread_state_t state;
655 unsigned int count = i386_THREAD_STATE_COUNT;
656 kern_return_t okay = thread_get_state (thread, i386_THREAD_STATE, (thread_state_t)&state, &count);
657 return (okay == KERN_SUCCESS) ? state.__eip : PC_SENTINEL;
659 #elif defined(__x86_64__)
661 x86_thread_state64_t state;
662 unsigned int count = x86_THREAD_STATE64_COUNT;
663 kern_return_t okay = thread_get_state (thread, x86_THREAD_STATE64, (thread_state_t)&state, &count);
664 return (okay == KERN_SUCCESS) ? state.__rip : PC_SENTINEL;
666 #elif defined(__arm__)
668 arm_thread_state_t state;
669 unsigned int count = ARM_THREAD_STATE_COUNT;
670 kern_return_t okay = thread_get_state (thread, ARM_THREAD_STATE, (thread_state_t)&state, &count);
671 return (okay == KERN_SUCCESS) ? state.__pc : PC_SENTINEL;
673 #elif defined(__arm64__)
675 arm_thread_state64_t state;
676 unsigned int count = ARM_THREAD_STATE64_COUNT;
677 kern_return_t okay = thread_get_state (thread, ARM_THREAD_STATE64, (thread_state_t)&state, &count);
678 return (okay == KERN_SUCCESS) ? arm_thread_state64_get_pc(state) : PC_SENTINEL;
682 #error _get_pc_for_thread () not implemented for this architecture
688 /***********************************************************************
689 * _collecting_in_critical.
690 * Returns TRUE if some thread is currently executing a cache-reading
691 * function. Collection of cache garbage is not allowed when a cache-
692 * reading function is in progress because it might still be using
693 * the garbage memory.
694 **********************************************************************/
695 extern "C" uintptr_t objc_entryPoints[];
696 extern "C" uintptr_t objc_exitPoints[];
698 static int _collecting_in_critical(void)
703 thread_act_port_array_t threads;
709 mach_port_t mythread = pthread_mach_thread_np(pthread_self());
711 // Get a list of all the threads in the current task
712 #if !DEBUG_TASK_THREADS
713 ret = task_threads(mach_task_self(), &threads, &number);
715 ret = objc_task_threads(mach_task_self(), &threads, &number);
718 if (ret != KERN_SUCCESS) {
719 // See DEBUG_TASK_THREADS below to help debug this.
720 _objc_fatal("task_threads failed (result 0x%x)\n", ret);
723 // Check whether any thread is in the cache lookup code
725 for (count = 0; count < number; count++)
730 // Don't bother checking ourselves
731 if (threads[count] == mythread)
734 // Find out where thread is executing
735 pc = _get_pc_for_thread (threads[count]);
737 // Check for bad status, and if so, assume the worse (can't collect)
738 if (pc == PC_SENTINEL)
744 // Check whether it is in the cache lookup code
745 for (region = 0; objc_entryPoints[region] != 0; region++)
747 if ((pc >= objc_entryPoints[region]) &&
748 (pc <= objc_exitPoints[region]))
757 // Deallocate the port rights for the threads
758 for (count = 0; count < number; count++) {
759 mach_port_deallocate(mach_task_self (), threads[count]);
762 // Deallocate the thread list
763 vm_deallocate (mach_task_self (), (vm_address_t) threads, sizeof(threads[0]) * number);
765 // Return our finding
771 /***********************************************************************
772 * _garbage_make_room. Ensure that there is enough room for at least
773 * one more ref in the garbage.
774 **********************************************************************/
776 // amount of memory represented by all refs in the garbage
777 static size_t garbage_byte_size = 0;
779 // do not empty the garbage until garbage_byte_size gets at least this big
780 static size_t garbage_threshold = 32*1024;
782 // table of refs to free
783 static bucket_t **garbage_refs = 0;
785 // current number of refs in garbage_refs
786 static size_t garbage_count = 0;
788 // capacity of current garbage_refs
789 static size_t garbage_max = 0;
791 // capacity of initial garbage_refs
793 INIT_GARBAGE_COUNT = 128
796 static void _garbage_make_room(void)
798 static int first = 1;
800 // Create the collection table the first time it is needed
804 garbage_refs = (bucket_t**)
805 malloc(INIT_GARBAGE_COUNT * sizeof(void *));
806 garbage_max = INIT_GARBAGE_COUNT;
809 // Double the table if it is full
810 else if (garbage_count == garbage_max)
812 garbage_refs = (bucket_t**)
813 realloc(garbage_refs, garbage_max * 2 * sizeof(void *));
819 /***********************************************************************
820 * cache_collect_free. Add the specified malloc'd memory to the list
821 * of them to free at some later point.
822 * size is used for the collection threshold. It does not have to be
823 * precisely the block's size.
824 * Cache locks: cacheUpdateLock must be held by the caller.
825 **********************************************************************/
826 static void cache_collect_free(bucket_t *data, mask_t capacity)
828 cacheUpdateLock.assertLocked();
830 if (PrintCaches) recordDeadCache(capacity);
832 _garbage_make_room ();
833 garbage_byte_size += cache_t::bytesForCapacity(capacity);
834 garbage_refs[garbage_count++] = data;
838 /***********************************************************************
839 * cache_collect. Try to free accumulated dead caches.
840 * collectALot tries harder to free memory.
841 * Cache locks: cacheUpdateLock must be held by the caller.
842 **********************************************************************/
843 void cache_collect(bool collectALot)
845 cacheUpdateLock.assertLocked();
847 // Done if the garbage is not full
848 if (garbage_byte_size < garbage_threshold && !collectALot) {
852 // Synchronize collection with objc_msgSend and other cache readers
854 if (_collecting_in_critical ()) {
855 // objc_msgSend (or other cache reader) is currently looking in
856 // the cache and might still be using some garbage.
858 _objc_inform ("CACHES: not collecting; "
859 "objc_msgSend in progress");
866 while (_collecting_in_critical())
870 // No cache readers in progress - garbage is now deletable
875 _objc_inform ("CACHES: COLLECTING %zu bytes (%zu allocations, %zu collections)", garbage_byte_size, cache_allocations, cache_collections);
878 // Dispose all refs now in the garbage
879 // Erase each entry so debugging tools don't see stale pointers.
880 while (garbage_count--) {
881 auto dead = garbage_refs[garbage_count];
882 garbage_refs[garbage_count] = nil;
886 // Clear the garbage count and total size indicator
888 garbage_byte_size = 0;
892 size_t total_count = 0;
893 size_t total_size = 0;
895 for (i = 0; i < countof(cache_counts); i++) {
896 int count = cache_counts[i];
898 size_t size = count * slots * sizeof(bucket_t);
900 if (!count) continue;
902 _objc_inform("CACHES: %4d slots: %4d caches, %6zu bytes",
905 total_count += count;
909 _objc_inform("CACHES: total: %4zu caches, %6zu bytes",
910 total_count, total_size);
915 /***********************************************************************
917 * Replacement for task_threads(). Define DEBUG_TASK_THREADS to debug
918 * crashes when task_threads() is failing.
920 * A failure in task_threads() usually means somebody has botched their
921 * Mach or MIG traffic. For example, somebody's error handling was wrong
922 * and they left a message queued on the MIG reply port for task_threads()
925 * The code below is a modified version of task_threads(). It logs
926 * the msgh_id of the reply message. The msgh_id can identify the sender
927 * of the message, which can help pinpoint the faulty code.
928 * DEBUG_TASK_THREADS also calls collecting_in_critical() during every
929 * message dispatch, which can increase reproducibility of bugs.
931 * This code can be regenerated by running
932 * `mig /usr/include/mach/task.defs`.
933 **********************************************************************/
934 #if DEBUG_TASK_THREADS
936 #include <mach/mach.h>
937 #include <mach/message.h>
938 #include <mach/mig.h>
940 #define __MIG_check__Reply__task_subsystem__ 1
941 #define mig_internal static inline
942 #define __DeclareSendRpc(a, b)
943 #define __BeforeSendRpc(a, b)
944 #define __AfterSendRpc(a, b)
945 #define msgh_request_port msgh_remote_port
946 #define msgh_reply_port msgh_local_port
948 #ifndef __MachMsgErrorWithTimeout
949 #define __MachMsgErrorWithTimeout(_R_) { \
951 case MACH_SEND_INVALID_DATA: \
952 case MACH_SEND_INVALID_DEST: \
953 case MACH_SEND_INVALID_HEADER: \
954 mig_put_reply_port(InP->Head.msgh_reply_port); \
956 case MACH_SEND_TIMED_OUT: \
957 case MACH_RCV_TIMED_OUT: \
959 mig_dealloc_reply_port(InP->Head.msgh_reply_port); \
962 #endif /* __MachMsgErrorWithTimeout */
964 #ifndef __MachMsgErrorWithoutTimeout
965 #define __MachMsgErrorWithoutTimeout(_R_) { \
967 case MACH_SEND_INVALID_DATA: \
968 case MACH_SEND_INVALID_DEST: \
969 case MACH_SEND_INVALID_HEADER: \
970 mig_put_reply_port(InP->Head.msgh_reply_port); \
973 mig_dealloc_reply_port(InP->Head.msgh_reply_port); \
976 #endif /* __MachMsgErrorWithoutTimeout */
979 #if ( __MigTypeCheck )
980 #if __MIG_check__Reply__task_subsystem__
981 #if !defined(__MIG_check__Reply__task_threads_t__defined)
982 #define __MIG_check__Reply__task_threads_t__defined
984 mig_internal kern_return_t __MIG_check__Reply__task_threads_t(__Reply__task_threads_t *Out0P)
987 typedef __Reply__task_threads_t __Reply;
988 boolean_t msgh_simple;
990 unsigned int msgh_size;
991 #endif /* __MigTypeCheck */
992 if (Out0P->Head.msgh_id != 3502) {
993 if (Out0P->Head.msgh_id == MACH_NOTIFY_SEND_ONCE)
994 { return MIG_SERVER_DIED; }
996 { return MIG_REPLY_MISMATCH; }
999 msgh_simple = !(Out0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX);
1001 msgh_size = Out0P->Head.msgh_size;
1003 if ((msgh_simple || Out0P->msgh_body.msgh_descriptor_count != 1 ||
1004 msgh_size != (mach_msg_size_t)sizeof(__Reply)) &&
1005 (!msgh_simple || msgh_size != (mach_msg_size_t)sizeof(mig_reply_error_t) ||
1006 ((mig_reply_error_t *)Out0P)->RetCode == KERN_SUCCESS))
1007 { return MIG_TYPE_ERROR ; }
1008 #endif /* __MigTypeCheck */
1011 return ((mig_reply_error_t *)Out0P)->RetCode;
1015 if (Out0P->act_list.type != MACH_MSG_OOL_PORTS_DESCRIPTOR ||
1016 Out0P->act_list.disposition != 17) {
1017 return MIG_TYPE_ERROR;
1019 #endif /* __MigTypeCheck */
1021 return MACH_MSG_SUCCESS;
1023 #endif /* !defined(__MIG_check__Reply__task_threads_t__defined) */
1024 #endif /* __MIG_check__Reply__task_subsystem__ */
1025 #endif /* ( __MigTypeCheck ) */
1028 /* Routine task_threads */
1029 static kern_return_t objc_task_threads
1032 thread_act_array_t *act_list,
1033 mach_msg_type_number_t *act_listCnt
1037 #ifdef __MigPackStructs
1041 mach_msg_header_t Head;
1043 #ifdef __MigPackStructs
1047 #ifdef __MigPackStructs
1051 mach_msg_header_t Head;
1052 /* start of the kernel processed data */
1053 mach_msg_body_t msgh_body;
1054 mach_msg_ool_ports_descriptor_t act_list;
1055 /* end of the kernel processed data */
1057 mach_msg_type_number_t act_listCnt;
1058 mach_msg_trailer_t trailer;
1060 #ifdef __MigPackStructs
1064 #ifdef __MigPackStructs
1068 mach_msg_header_t Head;
1069 /* start of the kernel processed data */
1070 mach_msg_body_t msgh_body;
1071 mach_msg_ool_ports_descriptor_t act_list;
1072 /* end of the kernel processed data */
1074 mach_msg_type_number_t act_listCnt;
1076 #ifdef __MigPackStructs
1081 * mach_msg_header_t Head;
1083 * kern_return_t RetCode;
1084 * } mig_reply_error_t;
1092 Request *InP = &Mess.In;
1093 Reply *Out0P = &Mess.Out;
1095 mach_msg_return_t msg_result;
1097 #ifdef __MIG_check__Reply__task_threads_t__defined
1098 kern_return_t check_result;
1099 #endif /* __MIG_check__Reply__task_threads_t__defined */
1101 __DeclareSendRpc(3402, "task_threads")
1103 InP->Head.msgh_bits =
1104 MACH_MSGH_BITS(19, MACH_MSG_TYPE_MAKE_SEND_ONCE);
1105 /* msgh_size passed as argument */
1106 InP->Head.msgh_request_port = target_task;
1107 InP->Head.msgh_reply_port = mig_get_reply_port();
1108 InP->Head.msgh_id = 3402;
1110 __BeforeSendRpc(3402, "task_threads")
1111 msg_result = mach_msg(&InP->Head, MACH_SEND_MSG|MACH_RCV_MSG|MACH_MSG_OPTION_NONE, (mach_msg_size_t)sizeof(Request), (mach_msg_size_t)sizeof(Reply), InP->Head.msgh_reply_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
1112 __AfterSendRpc(3402, "task_threads")
1113 if (msg_result != MACH_MSG_SUCCESS) {
1114 _objc_inform("task_threads received unexpected reply msgh_id 0x%zx",
1115 (size_t)Out0P->Head.msgh_id);
1116 __MachMsgErrorWithoutTimeout(msg_result);
1117 { return msg_result; }
1121 #if defined(__MIG_check__Reply__task_threads_t__defined)
1122 check_result = __MIG_check__Reply__task_threads_t((__Reply__task_threads_t *)Out0P);
1123 if (check_result != MACH_MSG_SUCCESS)
1124 { return check_result; }
1125 #endif /* defined(__MIG_check__Reply__task_threads_t__defined) */
1127 *act_list = (thread_act_array_t)(Out0P->act_list.address);
1128 *act_listCnt = Out0P->act_listCnt;
1130 return KERN_SUCCESS;
1133 // DEBUG_TASK_THREADS