2 * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 /***********************************************************************
26 * Method cache management
28 * Cache garbage collection
29 * Cache instrumentation
30 * Dedicated allocator for large caches
31 **********************************************************************/
34 /***********************************************************************
35 * Method cache locking (GrP 2001-1-14)
37 * For speed, objc_msgSend does not acquire any locks when it reads
38 * method caches. Instead, all cache changes are performed so that any
39 * objc_msgSend running concurrently with the cache mutator will not
40 * crash or hang or get an incorrect result from the cache.
42 * When cache memory becomes unused (e.g. the old cache after cache
43 * expansion), it is not immediately freed, because a concurrent
44 * objc_msgSend could still be using it. Instead, the memory is
45 * disconnected from the data structures and placed on a garbage list.
46 * The memory is now only accessible to instances of objc_msgSend that
47 * were running when the memory was disconnected; any further calls to
48 * objc_msgSend will not see the garbage memory because the other data
49 * structures don't point to it anymore. The collecting_in_critical
50 * function checks the PC of all threads and returns FALSE when all threads
51 * are found to be outside objc_msgSend. This means any call to objc_msgSend
52 * that could have had access to the garbage has finished or moved past the
53 * cache lookup stage, so it is safe to free the memory.
55 * All functions that modify cache data or structures must acquire the
56 * cacheUpdateLock to prevent interference from concurrent modifications.
57 * The function that frees cache garbage must acquire the cacheUpdateLock
58 * and use collecting_in_critical() to flush out cache readers.
59 * The cacheUpdateLock is also used to protect the custom allocator used
60 * for large method cache blocks.
62 * Cache readers (PC-checked by collecting_in_critical())
66 * Cache writers (hold cacheUpdateLock while reading or writing; not PC-checked)
67 * cache_fill (acquires lock)
68 * cache_expand (only called from cache_fill)
69 * cache_create (only called from cache_expand)
70 * bcopy (only called from instrumented cache_expand)
71 * flush_caches (acquires lock)
72 * cache_flush (only called from cache_fill and flush_caches)
73 * cache_collect_free (only called from cache_expand and cache_flush)
75 * UNPROTECTED cache readers (NOT thread-safe; used for debug info only)
77 * _class_printMethodCaches
78 * _class_printDuplicateCacheEntries
79 * _class_printMethodCacheStatistics
81 ***********************************************************************/
86 #include "objc-private.h"
87 #include "objc-cache.h"
90 /* Initial cache bucket count. INIT_CACHE_SIZE must be a power of two. */
92 INIT_CACHE_SIZE_LOG2 = 2,
93 INIT_CACHE_SIZE = (1 << INIT_CACHE_SIZE_LOG2)
96 static size_t log2u(size_t x)
107 static void cache_collect_free(struct bucket_t *data, size_t size);
108 static int _collecting_in_critical(void);
109 static void _garbage_make_room(void);
112 /***********************************************************************
113 * Cache statistics for OBJC_PRINT_CACHE_SETUP
114 **********************************************************************/
115 static unsigned int cache_counts[16];
116 static size_t cache_allocations;
117 static size_t cache_collections;
120 /***********************************************************************
121 * Pointers used by compiled class objects
122 * These use asm to avoid conflicts with the compiler's internal declarations
123 **********************************************************************/
125 // "cache" is cache->buckets; "vtable" is cache->mask/occupied
126 // hack to avoid conflicts with compiler's internal declaration
127 asm("\n .section __TEXT,__const"
128 "\n .globl __objc_empty_cache"
131 "\n __objc_empty_cache: .quad 0"
134 "\n __objc_empty_cache: .long 0"
136 "\n .globl __objc_empty_vtable"
137 "\n .set __objc_empty_vtable, 0"
142 // objc_msgSend has few registers available.
143 // Cache scan increments and wraps at special end-marking bucket.
144 #define CACHE_END_MARKER 1
145 static inline mask_t cache_next(mask_t i, mask_t mask) {
149 #elif __i386__ || __x86_64__ || __arm64__
150 // objc_msgSend has lots of registers and/or memory operands available.
151 // Cache scan decrements. No end marker needed.
152 #define CACHE_END_MARKER 0
153 static inline mask_t cache_next(mask_t i, mask_t mask) {
154 return i ? i-1 : mask;
158 #error unknown architecture
162 // cannot mix sel-side caches with ignored selector constant
163 // ignored selector constant also not implemented for class-side caches here
164 #if SUPPORT_IGNORED_SELECTOR_CONSTANT
169 // copied from dispatch_atomic_maximally_synchronizing_barrier
170 // fixme verify that this barrier hack does in fact work here
172 #define mega_barrier() \
173 do { unsigned long _clbr; __asm__ __volatile__( \
175 : "=a" (_clbr) : "0" (0) : "rbx", "rcx", "rdx", "cc", "memory" \
179 #define mega_barrier() \
180 do { unsigned long _clbr; __asm__ __volatile__( \
182 : "=a" (_clbr) : "0" (0) : "ebx", "ecx", "edx", "cc", "memory" \
186 #define mega_barrier() \
187 __asm__ __volatile__( \
192 // Use atomic double-word updates instead.
193 // This requires cache buckets not cross cache line boundaries.
195 #define stp(onep, twop, destp) \
196 __asm__ ("stp %[one], %[two], [%[dest]]" \
197 : "=m" (((uint64_t *)(destp))[0]), \
198 "=m" (((uint64_t *)(destp))[1]) \
199 : [one] "r" (onep), \
202 : /* no clobbers */ \
204 #define ldp(onep, twop, srcp) \
205 __asm__ ("ldp %[one], %[two], [%[src]]" \
206 : [one] "=r" (onep), \
208 : "m" (((uint64_t *)(srcp))[0]), \
209 "m" (((uint64_t *)(srcp))[1]) \
211 : /* no clobbers */ \
215 #error unknown architecture
219 // Class points to cache. SEL is key. Cache buckets store SEL+IMP.
220 // Caches are never built in the dyld shared cache.
222 static inline mask_t cache_hash(cache_key_t key, mask_t mask)
224 return (mask_t)(key & mask);
227 cache_t *getCache(Class cls, SEL sel __unused)
233 cache_key_t getKey(Class cls __unused, SEL sel)
236 return (cache_key_t)sel;
243 void bucket_t::set(cache_key_t newKey, IMP newImp)
245 assert(_key == 0 || _key == newKey);
247 // LDP/STP guarantees that all observers get
248 // either key/imp or newKey/newImp
249 stp(newKey, newImp, this);
252 void cache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask)
254 // ensure other threads see buckets contents before buckets pointer
255 // see Barrier Litmus Tests and Cookbook,
256 // "Address Dependency with object construction"
257 __sync_synchronize();
259 // LDP/STP guarantees that all observers get
260 // old mask/buckets or new mask/buckets
262 mask_t newOccupied = 0;
263 uint64_t mask_and_occupied =
264 (uint64_t)newMask | ((uint64_t)newOccupied << 32);
265 stp(newBuckets, mask_and_occupied, this);
272 void bucket_t::set(cache_key_t newKey, IMP newImp)
274 assert(_key == 0 || _key == newKey);
276 // objc_msgSend uses key and imp with no locks.
277 // It is safe for objc_msgSend to see new imp but NULL key
278 // (It will get a cache miss but not dispatch to the wrong place.)
279 // It is unsafe for objc_msgSend to see old imp and new key.
280 // Therefore we write new imp, wait a lot, then write new key.
284 if (_key != newKey) {
290 void cache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask)
292 // objc_msgSend uses mask and buckets with no locks.
293 // It is safe for objc_msgSend to see new buckets but old mask.
294 // (It will get a cache miss but not overrun the buckets' bounds).
295 // It is unsafe for objc_msgSend to see old buckets and new mask.
296 // Therefore we write new buckets, wait a lot, then write new mask.
297 // objc_msgSend reads mask first, then buckets.
299 // ensure other threads see buckets contents before buckets pointer
302 _buckets = newBuckets;
304 // ensure other threads see new buckets before new mask
314 struct bucket_t *cache_t::buckets()
319 mask_t cache_t::mask()
324 mask_t cache_t::occupied()
329 void cache_t::incrementOccupied()
334 void cache_t::setEmpty()
336 bzero(this, sizeof(*this));
337 _buckets = (bucket_t *)&_objc_empty_cache;
341 mask_t cache_t::capacity()
343 return mask() ? mask()+1 : 0;
349 size_t cache_t::bytesForCapacity(uint32_t cap)
351 // fixme put end marker inline when capacity+1 malloc is inefficient
352 return sizeof(cache_t) * (cap + 1);
355 bucket_t *cache_t::endMarker(struct bucket_t *b, uint32_t cap)
357 // bytesForCapacity() chooses whether the end marker is inline or not
358 return (bucket_t *)((uintptr_t)b + bytesForCapacity(cap)) - 1;
361 bucket_t *allocateBuckets(mask_t newCapacity)
363 // Allocate one extra bucket to mark the end of the list.
364 // This can't overflow mask_t because newCapacity is a power of 2.
365 // fixme instead put the end mark inline when +1 is malloc-inefficient
366 bucket_t *newBuckets = (bucket_t *)
367 _calloc_internal(cache_t::bytesForCapacity(newCapacity), 1);
369 bucket_t *end = cache_t::endMarker(newBuckets, newCapacity);
372 // End marker's key is 1 and imp points BEFORE the first bucket.
373 // This saves an instruction in objc_msgSend.
374 end->setKey((cache_key_t)(uintptr_t)1);
375 end->setImp((IMP)(newBuckets - 1));
377 # error unknown architecture
385 bucket_t *allocateBuckets(mask_t newCapacity)
387 return (bucket_t *)_calloc_internal(newCapacity, sizeof(bucket_t));
393 bool cache_t::canBeFreed()
395 return buckets() != (bucket_t *)&_objc_empty_cache;
399 void cache_t::reallocate(mask_t oldCapacity, mask_t newCapacity)
402 size_t bucket = log2u(newCapacity);
403 if (bucket < sizeof(cache_counts) / sizeof(cache_counts[0])) {
404 cache_counts[bucket]++;
409 bucket = log2u(oldCapacity);
410 if (bucket < sizeof(cache_counts) / sizeof(cache_counts[0])) {
411 cache_counts[bucket]--;
416 bool freeOld = canBeFreed();
418 bucket_t *oldBuckets = buckets();
419 bucket_t *newBuckets = allocateBuckets(newCapacity);
421 // Cache's old contents are not propagated.
422 // This is thought to save cache memory at the cost of extra cache fills.
423 // fixme re-measure this
425 assert(newCapacity > 0);
426 assert((uintptr_t)(mask_t)(newCapacity-1) == newCapacity-1);
428 setBucketsAndMask(newBuckets, newCapacity - 1);
431 cache_collect_free(oldBuckets, oldCapacity * sizeof(bucket_t));
432 cache_collect(false);
437 // called by objc_msgSend
439 void objc_msgSend_corrupt_cache_error(id receiver, SEL sel, Class isa)
441 cache_t::bad_cache(receiver, sel, isa);
445 void cache_getImp_corrupt_cache_error(id receiver, SEL sel, Class isa)
447 cache_t::bad_cache(receiver, sel, isa);
450 void cache_t::bad_cache(id receiver, SEL sel, Class isa)
452 // Log in separate steps in case the logging itself causes a crash.
453 _objc_inform_now_and_on_crash
454 ("Method cache corrupted. This may be a message to an "
455 "invalid object, or a memory error somewhere else.");
456 cache_t *cache = &isa->cache;
457 _objc_inform_now_and_on_crash
458 ("%s %p, SEL %p, isa %p, cache %p, buckets %p, "
459 "mask 0x%x, occupied 0x%x",
460 receiver ? "receiver" : "unused", receiver,
461 sel, isa, cache, cache->_buckets,
462 cache->_mask, cache->_occupied);
463 _objc_inform_now_and_on_crash
464 ("%s %zu bytes, buckets %zu bytes",
465 receiver ? "receiver" : "unused", malloc_size(receiver),
466 malloc_size(cache->_buckets));
467 _objc_inform_now_and_on_crash
468 ("selector '%s'", sel_getName(sel));
469 _objc_inform_now_and_on_crash
470 ("isa '%s'", isa->nameForLogging());
472 ("Method cache corrupted.");
476 bucket_t * cache_t::find(cache_key_t k)
480 bucket_t *b = buckets();
482 mask_t begin = cache_hash(k, m);
485 if (b[i].key() == 0 || b[i].key() == k) {
488 } while ((i = cache_next(i, m)) != begin);
491 Class cls = (Class)((uintptr_t)this - offsetof(objc_class, cache));
492 cache_t::bad_cache(nil, (SEL)k, cls);
496 void cache_t::expand()
498 mutex_assert_locked(&cacheUpdateLock);
500 uint32_t oldCapacity = capacity();
501 uint32_t newCapacity = oldCapacity ? oldCapacity*2 : INIT_CACHE_SIZE;
503 if ((uint32_t)(mask_t)newCapacity != newCapacity) {
504 // mask overflow - can't grow further
505 // fixme this wastes one bit of mask
506 newCapacity = oldCapacity;
509 reallocate(oldCapacity, newCapacity);
513 static void cache_fill_nolock(Class cls, SEL sel, IMP imp)
515 mutex_assert_locked(&cacheUpdateLock);
517 // Never cache before +initialize is done
518 if (!cls->isInitialized()) return;
520 // Make sure the entry wasn't added to the cache by some other thread
521 // before we grabbed the cacheUpdateLock.
522 if (cache_getImp(cls, sel)) return;
524 cache_t *cache = getCache(cls, sel);
525 cache_key_t key = getKey(cls, sel);
527 // Use the cache as-is if it is less than 3/4 full
528 mask_t newOccupied = cache->occupied() + 1;
529 if ((newOccupied * 4) <= (cache->mask() + 1) * 3) {
530 // Cache is less than 3/4 full.
532 // Cache is too full. Expand it.
536 // Scan for the first unused slot (or used for this class) and insert there
537 // There is guaranteed to be an empty slot because the
538 // minimum size is 4 and we resized at 3/4 full.
539 bucket_t *bucket = cache->find(key);
540 if (bucket->key() == 0) cache->incrementOccupied();
541 bucket->set(key, imp);
544 void cache_fill(Class cls, SEL sel, IMP imp)
546 #if !DEBUG_TASK_THREADS
547 mutex_lock(&cacheUpdateLock);
548 cache_fill_nolock(cls, sel, imp);
549 mutex_unlock(&cacheUpdateLock);
551 _collecting_in_critical();
557 // Reset any entry for cls/sel to the uncached lookup
558 static void cache_eraseMethod_nolock(Class cls, SEL sel)
560 mutex_assert_locked(&cacheUpdateLock);
562 cache_t *cache = getCache(cls, sel);
563 cache_key_t key = getKey(cls, sel);
565 bucket_t *bucket = cache->find(key);
566 if (bucket->key() == key) {
567 bucket->setImp(_objc_msgSend_uncached_impcache);
572 // Resets cache entries for all methods in mlist for cls and its subclasses.
573 void cache_eraseMethods(Class cls, method_list_t *mlist)
575 rwlock_assert_writing(&runtimeLock);
576 mutex_lock(&cacheUpdateLock);
578 foreach_realized_class_and_subclass(cls, ^(Class c){
579 for (uint32_t m = 0; m < mlist->count; m++) {
580 SEL sel = mlist->get(m).name;
581 cache_eraseMethod_nolock(c, sel);
585 mutex_unlock(&cacheUpdateLock);
589 // Reset any copies of imp in this cache to the uncached lookup
590 void cache_eraseImp_nolock(Class cls, SEL sel, IMP imp)
592 mutex_assert_locked(&cacheUpdateLock);
594 cache_t *cache = getCache(cls, sel);
596 bucket_t *b = cache->buckets();
597 mask_t count = cache->capacity();
598 for (mask_t i = 0; i < count; i++) {
599 if (b[i].imp() == imp) {
600 b[i].setImp(_objc_msgSend_uncached_impcache);
606 void cache_eraseImp(Class cls, SEL sel, IMP imp)
608 mutex_lock(&cacheUpdateLock);
609 cache_eraseImp_nolock(cls, sel, imp);
610 mutex_unlock(&cacheUpdateLock);
614 // Reset this entire cache to the uncached lookup by reallocating it.
615 // This must not shrink the cache - that breaks the lock-free scheme.
616 void cache_erase_nolock(cache_t *cache)
618 mutex_assert_locked(&cacheUpdateLock);
620 mask_t capacity = cache->capacity();
621 if (capacity > 0 && cache->occupied() > 0) {
622 cache->reallocate(capacity, capacity);
627 /***********************************************************************
629 **********************************************************************/
633 // A sentinel (magic value) to report bad thread_get_state status.
634 // Must not be a valid PC.
635 // Must not be zero - thread_get_state() on a new thread returns PC == 0.
636 #define PC_SENTINEL 1
638 static uintptr_t _get_pc_for_thread(thread_t thread)
639 #if defined(__i386__)
641 i386_thread_state_t state;
642 unsigned int count = i386_THREAD_STATE_COUNT;
643 kern_return_t okay = thread_get_state (thread, i386_THREAD_STATE, (thread_state_t)&state, &count);
644 return (okay == KERN_SUCCESS) ? state.__eip : PC_SENTINEL;
646 #elif defined(__x86_64__)
648 x86_thread_state64_t state;
649 unsigned int count = x86_THREAD_STATE64_COUNT;
650 kern_return_t okay = thread_get_state (thread, x86_THREAD_STATE64, (thread_state_t)&state, &count);
651 return (okay == KERN_SUCCESS) ? state.__rip : PC_SENTINEL;
653 #elif defined(__arm__)
655 arm_thread_state_t state;
656 unsigned int count = ARM_THREAD_STATE_COUNT;
657 kern_return_t okay = thread_get_state (thread, ARM_THREAD_STATE, (thread_state_t)&state, &count);
658 return (okay == KERN_SUCCESS) ? state.__pc : PC_SENTINEL;
660 #elif defined(__arm64__)
662 arm_thread_state64_t state;
663 unsigned int count = ARM_THREAD_STATE64_COUNT;
664 kern_return_t okay = thread_get_state (thread, ARM_THREAD_STATE64, (thread_state_t)&state, &count);
665 return (okay == KERN_SUCCESS) ? state.__pc : PC_SENTINEL;
669 #error _get_pc_for_thread () not implemented for this architecture
675 /***********************************************************************
676 * _collecting_in_critical.
677 * Returns TRUE if some thread is currently executing a cache-reading
678 * function. Collection of cache garbage is not allowed when a cache-
679 * reading function is in progress because it might still be using
680 * the garbage memory.
681 **********************************************************************/
682 OBJC_EXPORT uintptr_t objc_entryPoints[];
683 OBJC_EXPORT uintptr_t objc_exitPoints[];
685 static int _collecting_in_critical(void)
690 thread_act_port_array_t threads;
696 mach_port_t mythread = pthread_mach_thread_np(pthread_self());
698 // Get a list of all the threads in the current task
699 #if !DEBUG_TASK_THREADS
700 ret = task_threads(mach_task_self(), &threads, &number);
702 ret = objc_task_threads(mach_task_self(), &threads, &number);
705 if (ret != KERN_SUCCESS) {
706 // See DEBUG_TASK_THREADS below to help debug this.
707 _objc_fatal("task_threads failed (result 0x%x)\n", ret);
710 // Check whether any thread is in the cache lookup code
712 for (count = 0; count < number; count++)
717 // Don't bother checking ourselves
718 if (threads[count] == mythread)
721 // Find out where thread is executing
722 pc = _get_pc_for_thread (threads[count]);
724 // Check for bad status, and if so, assume the worse (can't collect)
725 if (pc == PC_SENTINEL)
731 // Check whether it is in the cache lookup code
732 for (region = 0; objc_entryPoints[region] != 0; region++)
734 if ((pc >= objc_entryPoints[region]) &&
735 (pc <= objc_exitPoints[region]))
744 // Deallocate the port rights for the threads
745 for (count = 0; count < number; count++) {
746 mach_port_deallocate(mach_task_self (), threads[count]);
749 // Deallocate the thread list
750 vm_deallocate (mach_task_self (), (vm_address_t) threads, sizeof(threads[0]) * number);
752 // Return our finding
758 /***********************************************************************
759 * _garbage_make_room. Ensure that there is enough room for at least
760 * one more ref in the garbage.
761 **********************************************************************/
763 // amount of memory represented by all refs in the garbage
764 static size_t garbage_byte_size = 0;
766 // do not empty the garbage until garbage_byte_size gets at least this big
767 static size_t garbage_threshold = 32*1024;
769 // table of refs to free
770 static bucket_t **garbage_refs = 0;
772 // current number of refs in garbage_refs
773 static size_t garbage_count = 0;
775 // capacity of current garbage_refs
776 static size_t garbage_max = 0;
778 // capacity of initial garbage_refs
780 INIT_GARBAGE_COUNT = 128
783 static void _garbage_make_room(void)
785 static int first = 1;
787 // Create the collection table the first time it is needed
791 garbage_refs = (bucket_t**)
792 _malloc_internal(INIT_GARBAGE_COUNT * sizeof(void *));
793 garbage_max = INIT_GARBAGE_COUNT;
796 // Double the table if it is full
797 else if (garbage_count == garbage_max)
799 garbage_refs = (bucket_t**)
800 _realloc_internal(garbage_refs, garbage_max * 2 * sizeof(void *));
806 /***********************************************************************
807 * cache_collect_free. Add the specified malloc'd memory to the list
808 * of them to free at some later point.
809 * size is used for the collection threshold. It does not have to be
810 * precisely the block's size.
811 * Cache locks: cacheUpdateLock must be held by the caller.
812 **********************************************************************/
813 static void cache_collect_free(bucket_t *data, size_t size)
815 mutex_assert_locked(&cacheUpdateLock);
817 _garbage_make_room ();
818 garbage_byte_size += size;
819 garbage_refs[garbage_count++] = data;
823 /***********************************************************************
824 * cache_collect. Try to free accumulated dead caches.
825 * collectALot tries harder to free memory.
826 * Cache locks: cacheUpdateLock must be held by the caller.
827 **********************************************************************/
828 void cache_collect(bool collectALot)
830 mutex_assert_locked(&cacheUpdateLock);
832 // Done if the garbage is not full
833 if (garbage_byte_size < garbage_threshold && !collectALot) {
837 // Synchronize collection with objc_msgSend and other cache readers
839 if (_collecting_in_critical ()) {
840 // objc_msgSend (or other cache reader) is currently looking in
841 // the cache and might still be using some garbage.
843 _objc_inform ("CACHES: not collecting; "
844 "objc_msgSend in progress");
851 while (_collecting_in_critical())
855 // No cache readers in progress - garbage is now deletable
860 _objc_inform ("CACHES: COLLECTING %zu bytes (%zu allocations, %zu collections)", garbage_byte_size, cache_allocations, cache_collections);
863 // Dispose all refs now in the garbage
864 while (garbage_count--) {
865 free(garbage_refs[garbage_count]);
868 // Clear the garbage count and total size indicator
870 garbage_byte_size = 0;
874 size_t total_count = 0;
875 size_t total_size = 0;
877 for (i = 0; i < sizeof(cache_counts) / sizeof(cache_counts[0]); i++) {
878 int count = cache_counts[i];
880 size_t size = count * slots * sizeof(bucket_t);
882 if (!count) continue;
884 _objc_inform("CACHES: %4d slots: %4d caches, %6zu bytes",
887 total_count += count;
891 _objc_inform("CACHES: total: %4zu caches, %6zu bytes",
892 total_count, total_size);
897 /***********************************************************************
899 * Replacement for task_threads(). Define DEBUG_TASK_THREADS to debug
900 * crashes when task_threads() is failing.
902 * A failure in task_threads() usually means somebody has botched their
903 * Mach or MIG traffic. For example, somebody's error handling was wrong
904 * and they left a message queued on the MIG reply port for task_threads()
907 * The code below is a modified version of task_threads(). It logs
908 * the msgh_id of the reply message. The msgh_id can identify the sender
909 * of the message, which can help pinpoint the faulty code.
910 * DEBUG_TASK_THREADS also calls collecting_in_critical() during every
911 * message dispatch, which can increase reproducibility of bugs.
913 * This code can be regenerated by running
914 * `mig /usr/include/mach/task.defs`.
915 **********************************************************************/
916 #if DEBUG_TASK_THREADS
918 #include <mach/mach.h>
919 #include <mach/message.h>
920 #include <mach/mig.h>
922 #define __MIG_check__Reply__task_subsystem__ 1
923 #define mig_internal static inline
924 #define __DeclareSendRpc(a, b)
925 #define __BeforeSendRpc(a, b)
926 #define __AfterSendRpc(a, b)
927 #define msgh_request_port msgh_remote_port
928 #define msgh_reply_port msgh_local_port
930 #ifndef __MachMsgErrorWithTimeout
931 #define __MachMsgErrorWithTimeout(_R_) { \
933 case MACH_SEND_INVALID_DATA: \
934 case MACH_SEND_INVALID_DEST: \
935 case MACH_SEND_INVALID_HEADER: \
936 mig_put_reply_port(InP->Head.msgh_reply_port); \
938 case MACH_SEND_TIMED_OUT: \
939 case MACH_RCV_TIMED_OUT: \
941 mig_dealloc_reply_port(InP->Head.msgh_reply_port); \
944 #endif /* __MachMsgErrorWithTimeout */
946 #ifndef __MachMsgErrorWithoutTimeout
947 #define __MachMsgErrorWithoutTimeout(_R_) { \
949 case MACH_SEND_INVALID_DATA: \
950 case MACH_SEND_INVALID_DEST: \
951 case MACH_SEND_INVALID_HEADER: \
952 mig_put_reply_port(InP->Head.msgh_reply_port); \
955 mig_dealloc_reply_port(InP->Head.msgh_reply_port); \
958 #endif /* __MachMsgErrorWithoutTimeout */
961 #if ( __MigTypeCheck )
962 #if __MIG_check__Reply__task_subsystem__
963 #if !defined(__MIG_check__Reply__task_threads_t__defined)
964 #define __MIG_check__Reply__task_threads_t__defined
966 mig_internal kern_return_t __MIG_check__Reply__task_threads_t(__Reply__task_threads_t *Out0P)
969 typedef __Reply__task_threads_t __Reply;
970 boolean_t msgh_simple;
972 unsigned int msgh_size;
973 #endif /* __MigTypeCheck */
974 if (Out0P->Head.msgh_id != 3502) {
975 if (Out0P->Head.msgh_id == MACH_NOTIFY_SEND_ONCE)
976 { return MIG_SERVER_DIED; }
978 { return MIG_REPLY_MISMATCH; }
981 msgh_simple = !(Out0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX);
983 msgh_size = Out0P->Head.msgh_size;
985 if ((msgh_simple || Out0P->msgh_body.msgh_descriptor_count != 1 ||
986 msgh_size != (mach_msg_size_t)sizeof(__Reply)) &&
987 (!msgh_simple || msgh_size != (mach_msg_size_t)sizeof(mig_reply_error_t) ||
988 ((mig_reply_error_t *)Out0P)->RetCode == KERN_SUCCESS))
989 { return MIG_TYPE_ERROR ; }
990 #endif /* __MigTypeCheck */
993 return ((mig_reply_error_t *)Out0P)->RetCode;
997 if (Out0P->act_list.type != MACH_MSG_OOL_PORTS_DESCRIPTOR ||
998 Out0P->act_list.disposition != 17) {
999 return MIG_TYPE_ERROR;
1001 #endif /* __MigTypeCheck */
1003 return MACH_MSG_SUCCESS;
1005 #endif /* !defined(__MIG_check__Reply__task_threads_t__defined) */
1006 #endif /* __MIG_check__Reply__task_subsystem__ */
1007 #endif /* ( __MigTypeCheck ) */
1010 /* Routine task_threads */
1011 static kern_return_t objc_task_threads
1014 thread_act_array_t *act_list,
1015 mach_msg_type_number_t *act_listCnt
1019 #ifdef __MigPackStructs
1023 mach_msg_header_t Head;
1025 #ifdef __MigPackStructs
1029 #ifdef __MigPackStructs
1033 mach_msg_header_t Head;
1034 /* start of the kernel processed data */
1035 mach_msg_body_t msgh_body;
1036 mach_msg_ool_ports_descriptor_t act_list;
1037 /* end of the kernel processed data */
1039 mach_msg_type_number_t act_listCnt;
1040 mach_msg_trailer_t trailer;
1042 #ifdef __MigPackStructs
1046 #ifdef __MigPackStructs
1050 mach_msg_header_t Head;
1051 /* start of the kernel processed data */
1052 mach_msg_body_t msgh_body;
1053 mach_msg_ool_ports_descriptor_t act_list;
1054 /* end of the kernel processed data */
1056 mach_msg_type_number_t act_listCnt;
1058 #ifdef __MigPackStructs
1063 * mach_msg_header_t Head;
1065 * kern_return_t RetCode;
1066 * } mig_reply_error_t;
1074 Request *InP = &Mess.In;
1075 Reply *Out0P = &Mess.Out;
1077 mach_msg_return_t msg_result;
1079 #ifdef __MIG_check__Reply__task_threads_t__defined
1080 kern_return_t check_result;
1081 #endif /* __MIG_check__Reply__task_threads_t__defined */
1083 __DeclareSendRpc(3402, "task_threads")
1085 InP->Head.msgh_bits =
1086 MACH_MSGH_BITS(19, MACH_MSG_TYPE_MAKE_SEND_ONCE);
1087 /* msgh_size passed as argument */
1088 InP->Head.msgh_request_port = target_task;
1089 InP->Head.msgh_reply_port = mig_get_reply_port();
1090 InP->Head.msgh_id = 3402;
1092 __BeforeSendRpc(3402, "task_threads")
1093 msg_result = mach_msg(&InP->Head, MACH_SEND_MSG|MACH_RCV_MSG|MACH_MSG_OPTION_NONE, (mach_msg_size_t)sizeof(Request), (mach_msg_size_t)sizeof(Reply), InP->Head.msgh_reply_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
1094 __AfterSendRpc(3402, "task_threads")
1095 if (msg_result != MACH_MSG_SUCCESS) {
1096 _objc_inform("task_threads received unexpected reply msgh_id 0x%zx",
1097 (size_t)Out0P->Head.msgh_id);
1098 __MachMsgErrorWithoutTimeout(msg_result);
1099 { return msg_result; }
1103 #if defined(__MIG_check__Reply__task_threads_t__defined)
1104 check_result = __MIG_check__Reply__task_threads_t((__Reply__task_threads_t *)Out0P);
1105 if (check_result != MACH_MSG_SUCCESS)
1106 { return check_result; }
1107 #endif /* defined(__MIG_check__Reply__task_threads_t__defined) */
1109 *act_list = (thread_act_array_t)(Out0P->act_list.address);
1110 *act_listCnt = Out0P->act_listCnt;
1112 return KERN_SUCCESS;
1115 // DEBUG_TASK_THREADS