]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-cache.mm
objc4-551.1.tar.gz
[apple/objc4.git] / runtime / objc-cache.mm
1 /*
2 * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 /***********************************************************************
25 * objc-cache.m
26 * Method cache management
27 * Cache flushing
28 * Cache garbage collection
29 * Cache instrumentation
30 * Dedicated allocator for large caches
31 **********************************************************************/
32
33
34 /***********************************************************************
35 * Method cache locking (GrP 2001-1-14)
36 *
37 * For speed, objc_msgSend does not acquire any locks when it reads
38 * method caches. Instead, all cache changes are performed so that any
39 * objc_msgSend running concurrently with the cache mutator will not
40 * crash or hang or get an incorrect result from the cache.
41 *
42 * When cache memory becomes unused (e.g. the old cache after cache
43 * expansion), it is not immediately freed, because a concurrent
44 * objc_msgSend could still be using it. Instead, the memory is
45 * disconnected from the data structures and placed on a garbage list.
46 * The memory is now only accessible to instances of objc_msgSend that
47 * were running when the memory was disconnected; any further calls to
48 * objc_msgSend will not see the garbage memory because the other data
49 * structures don't point to it anymore. The collecting_in_critical
50 * function checks the PC of all threads and returns FALSE when all threads
51 * are found to be outside objc_msgSend. This means any call to objc_msgSend
52 * that could have had access to the garbage has finished or moved past the
53 * cache lookup stage, so it is safe to free the memory.
54 *
55 * All functions that modify cache data or structures must acquire the
56 * cacheUpdateLock to prevent interference from concurrent modifications.
57 * The function that frees cache garbage must acquire the cacheUpdateLock
58 * and use collecting_in_critical() to flush out cache readers.
59 * The cacheUpdateLock is also used to protect the custom allocator used
60 * for large method cache blocks.
61 *
62 * Cache readers (PC-checked by collecting_in_critical())
63 * objc_msgSend*
64 * cache_getImp
65 * cache_getMethod
66 *
67 * Cache writers (hold cacheUpdateLock while reading or writing; not PC-checked)
68 * cache_fill (acquires lock)
69 * cache_expand (only called from cache_fill)
70 * cache_create (only called from cache_expand)
71 * bcopy (only called from instrumented cache_expand)
72 * flush_caches (acquires lock)
73 * cache_flush (only called from cache_fill and flush_caches)
74 * cache_collect_free (only called from cache_expand and cache_flush)
75 *
76 * UNPROTECTED cache readers (NOT thread-safe; used for debug info only)
77 * cache_print
78 * _class_printMethodCaches
79 * _class_printDuplicateCacheEntries
80 * _class_printMethodCacheStatistics
81 *
82 * _class_lookupMethodAndLoadCache is a special case. It may read a
83 * method triplet out of one cache and store it in another cache. This
84 * is unsafe if the method triplet is a forward:: entry, because the
85 * triplet itself could be freed unless _class_lookupMethodAndLoadCache
86 * were PC-checked or used a lock. Additionally, storing the method
87 * triplet in both caches would result in double-freeing if both caches
88 * were flushed or expanded. The solution is for cache_getMethod to
89 * ignore all entries whose implementation is _objc_msgForward_impcache,
90 * so _class_lookupMethodAndLoadCache cannot look at a forward:: entry
91 * unsafely or place it in multiple caches.
92 ***********************************************************************/
93
94
95 #if __OBJC2__
96
97 #include "objc-private.h"
98 #include "objc-cache.h"
99
100
101 /* Initial cache bucket count. INIT_CACHE_SIZE must be a power of two. */
102 enum {
103 INIT_CACHE_SIZE_LOG2 = 2,
104 INIT_CACHE_SIZE = (1 << INIT_CACHE_SIZE_LOG2)
105 };
106
107 static size_t log2u(size_t x)
108 {
109 unsigned int log;
110
111 log = 0;
112 while (x >>= 1)
113 log += 1;
114
115 return log;
116 }
117
118 static void cache_collect_free(struct bucket_t *data, size_t size);
119 static int _collecting_in_critical(void);
120 static void _garbage_make_room(void);
121
122
123 /***********************************************************************
124 * Cache statistics for OBJC_PRINT_CACHE_SETUP
125 **********************************************************************/
126 static unsigned int cache_counts[16];
127 static size_t cache_allocations;
128 static size_t cache_collections;
129
130
131 /***********************************************************************
132 * Pointers used by compiled class objects
133 * These use asm to avoid conflicts with the compiler's internal declarations
134 **********************************************************************/
135
136 // "cache" is cache->buckets; "vtable" is cache->mask/occupied
137 // hack to avoid conflicts with compiler's internal declaration
138 asm("\n .section __TEXT,__const"
139 "\n .globl __objc_empty_cache"
140 #if __LP64__
141 "\n .align 3"
142 "\n __objc_empty_cache: .quad 0"
143 #else
144 "\n .align 2"
145 "\n __objc_empty_cache: .long 0"
146 #endif
147 "\n .globl __objc_empty_vtable"
148 "\n .set __objc_empty_vtable, 0"
149 );
150
151
152 #if __i386__ || __arm__
153 // objc_msgSend has few registers available.
154 // Cache scan increments and wraps at special end-marking bucket.
155 #define CACHE_END_MARKER 1
156 static inline mask_t cache_next(mask_t i, mask_t mask) {
157 return (i+1) & mask;
158 }
159
160 #elif __x86_64__
161 // objc_msgSend has lots of registers and/or memory operands available.
162 // Cache scan decrements. No end marker needed.
163 #define CACHE_END_MARKER 0
164 static inline mask_t cache_next(mask_t i, mask_t mask) {
165 return i ? i-1 : mask;
166 }
167
168 #else
169 #error unknown architecture
170 #endif
171
172
173 // cannot mix sel-side caches with ignored selector constant
174 // ignored selector constant also not implemented for class-side caches here
175 #if SUPPORT_IGNORED_SELECTOR_CONSTANT
176 #error sorry
177 #endif
178
179
180 // copied from dispatch_atomic_maximally_synchronizing_barrier
181 // fixme verify that this barrier hack does in fact work here
182 #if __x86_64__
183 #define mega_barrier() \
184 do { unsigned long _clbr; __asm__ __volatile__( \
185 "cpuid" \
186 : "=a" (_clbr) : "0" (0) : "rbx", "rcx", "rdx", "cc", "memory" \
187 ); } while(0)
188 #elif __i386__
189 #define mega_barrier() \
190 do { unsigned long _clbr; __asm__ __volatile__( \
191 "cpuid" \
192 : "=a" (_clbr) : "0" (0) : "ebx", "ecx", "edx", "cc", "memory" \
193 ); } while(0)
194 #elif __arm__
195 #define mega_barrier() \
196 __asm__ __volatile__( \
197 "dsb ish" \
198 : : : "memory")
199 #else
200 #error unknown architecture
201 #endif
202
203
204 static inline mask_t cache_hash(cache_key_t key, mask_t mask)
205 {
206 return (mask_t)((key >> MASK_SHIFT) & mask);
207 }
208
209
210 // Class points to cache. Cache buckets store SEL+IMP.
211 cache_t *getCache(Class cls, SEL sel __unused)
212 {
213 assert(cls);
214 return &cls->cache;
215 }
216 cache_key_t getKey(Class cls __unused, SEL sel)
217 {
218 assert(sel);
219 return (cache_key_t)sel;
220 }
221
222
223 struct bucket_t {
224 cache_key_t key;
225 IMP imp;
226
227 void set(cache_key_t newKey, IMP newImp)
228 {
229 // objc_msgSend uses key and imp with no locks.
230 // It is safe for objc_msgSend to see new imp but NULL key
231 // (It will get a cache miss but not dispatch to the wrong place.)
232 // It is unsafe for objc_msgSend to see old imp and new key.
233 // Therefore we write new imp, wait a lot, then write new key.
234
235 assert(key == 0 || key == newKey);
236
237 imp = newImp;
238
239 if (key != newKey) {
240 mega_barrier();
241 key = newKey;
242 }
243 }
244 };
245
246
247 void cache_t::reallocate(mask_t oldCapacity, mask_t newCapacity)
248 {
249 if (PrintCaches) {
250 size_t bucket = log2u(newCapacity);
251 if (bucket < sizeof(cache_counts) / sizeof(cache_counts[0])) {
252 cache_counts[bucket]++;
253 }
254 cache_allocations++;
255
256 if (oldCapacity) {
257 bucket = log2u(oldCapacity);
258 if (bucket < sizeof(cache_counts) / sizeof(cache_counts[0])) {
259 cache_counts[bucket]--;
260 }
261 }
262 }
263
264 // objc_msgSend uses shiftmask and buckets with no locks.
265 // It is safe for objc_msgSend to see new buckets but old shiftmask.
266 // (It will get a cache miss but not overrun the buckets' bounds).
267 // It is unsafe for objc_msgSend to see old buckets and new shiftmask.
268 // Therefore we write new buckets, wait a lot, then write new shiftmask.
269 // objc_msgSend reads shiftmask first, then buckets.
270
271 bucket_t *oldBuckets = buckets;
272
273 #if CACHE_END_MARKER
274 // Allocate one extra bucket to mark the end of the list.
275 // fixme instead put the end mark inline when +1 is malloc-inefficient
276 bucket_t *newBuckets =
277 (bucket_t *)_calloc_internal(newCapacity + 1, sizeof(bucket_t));
278
279 // End marker's key is 1 and imp points to the first bucket.
280 newBuckets[newCapacity].key = (cache_key_t)(uintptr_t)1;
281 # if __arm__
282 // Point before the first bucket instead to save an instruction in msgSend
283 newBuckets[newCapacity].imp = (IMP)(newBuckets - 1);
284 # else
285 newBuckets[newCapacity].imp = (IMP)newBuckets;
286 # endif
287 #else
288 bucket_t *newBuckets =
289 (bucket_t *)_calloc_internal(newCapacity, sizeof(bucket_t));
290 #endif
291
292 // Cache's old contents are not propagated.
293 // This is thought to save cache memory at the cost of extra cache fills.
294 // fixme re-measure this
295
296 // ensure other threads see buckets contents before buckets pointer
297 mega_barrier();
298
299 buckets = newBuckets;
300
301 // ensure other threads see new buckets before new shiftmask
302 mega_barrier();
303
304 setCapacity(newCapacity);
305 occupied = 0;
306
307 if (oldCapacity > 0) {
308 cache_collect_free(oldBuckets, oldCapacity * sizeof(bucket_t));
309 cache_collect(false);
310 }
311 }
312
313
314 // called by objc_msgSend
315 extern "C"
316 void objc_msgSend_corrupt_cache_error(id receiver, SEL sel, Class isa,
317 bucket_t *bucket)
318 {
319 cache_t::bad_cache(receiver, sel, isa, bucket);
320 }
321
322 extern "C"
323 void cache_getImp_corrupt_cache_error(id receiver, SEL sel, Class isa,
324 bucket_t *bucket)
325 {
326 cache_t::bad_cache(receiver, sel, isa, bucket);
327 }
328
329 void cache_t::bad_cache(id receiver, SEL sel, Class isa, bucket_t *bucket)
330 {
331 // Log in separate steps in case the logging itself causes a crash.
332 _objc_inform_now_and_on_crash
333 ("Method cache corrupted. This may be a message to an "
334 "invalid object, or a memory error somewhere else.");
335 cache_t *cache = &isa->cache;
336 _objc_inform_now_and_on_crash
337 ("%s %p, SEL %p, isa %p, cache %p, buckets %p, "
338 "mask 0x%x, occupied 0x%x, wrap bucket %p",
339 receiver ? "receiver" : "unused", receiver,
340 sel, isa, cache, cache->buckets,
341 cache->shiftmask >> MASK_SHIFT, cache->occupied, bucket);
342 _objc_inform_now_and_on_crash
343 ("%s %zu bytes, buckets %zu bytes",
344 receiver ? "receiver" : "unused", malloc_size(receiver),
345 malloc_size(cache->buckets));
346 _objc_inform_now_and_on_crash
347 ("selector '%s'", sel_getName(sel));
348 _objc_inform_now_and_on_crash
349 ("isa '%s'", isa->getName());
350 _objc_fatal
351 ("Method cache corrupted.");
352 }
353
354
355 bucket_t * cache_t::find(cache_key_t k)
356 {
357 mask_t m = mask();
358 mask_t begin = cache_hash(k, m);
359 mask_t i = begin;
360 do {
361 if (buckets[i].key == 0 || buckets[i].key == k) {
362 return &buckets[i];
363 }
364 } while ((i = cache_next(i, m)) != begin);
365
366 // hack
367 Class cls = (Class)((uintptr_t)this - offsetof(objc_class, cache));
368 cache_t::bad_cache(nil, (SEL)k, cls, nil);
369 }
370
371
372 void cache_t::expand()
373 {
374 mutex_assert_locked(&cacheUpdateLock);
375
376 mask_t oldCapacity = capacity();
377 mask_t newCapacity = oldCapacity ? oldCapacity*2 : INIT_CACHE_SIZE;
378
379 if ((((newCapacity-1) << MASK_SHIFT) >> MASK_SHIFT) != newCapacity-1) {
380 // shiftmask overflow - can't grow further
381 newCapacity = oldCapacity;
382 }
383
384 reallocate(oldCapacity, newCapacity);
385 }
386
387
388 static void cache_fill_nolock(Class cls, SEL sel, IMP imp)
389 {
390 mutex_assert_locked(&cacheUpdateLock);
391
392 // Never cache before +initialize is done
393 if (!cls->isInitialized()) return;
394
395 // Make sure the entry wasn't added to the cache by some other thread
396 // before we grabbed the cacheUpdateLock.
397 if (cache_getImp(cls, sel)) return;
398
399 cache_t *cache = getCache(cls, sel);
400 cache_key_t key = getKey(cls, sel);
401
402 // Use the cache as-is if it is less than 3/4 full
403 mask_t newOccupied = cache->occupied + 1;
404 if ((newOccupied * 4) <= (cache->mask() + 1) * 3) {
405 // Cache is less than 3/4 full.
406 } else {
407 // Cache is too full. Expand it.
408 cache->expand();
409 }
410
411 // Scan for the first unused slot (or used for this class) and insert there
412 // There is guaranteed to be an empty slot because the
413 // minimum size is 4 and we resized at 3/4 full.
414 bucket_t *bucket = cache->find(key);
415 if (bucket->key == 0) cache->occupied++;
416 bucket->set(key, imp);
417 }
418
419 void cache_fill(Class cls, SEL sel, IMP imp)
420 {
421 #if !DEBUG_TASK_THREADS
422 mutex_lock(&cacheUpdateLock);
423 cache_fill_nolock(cls, sel, imp);
424 mutex_unlock(&cacheUpdateLock);
425 #else
426 _collecting_in_critical();
427 return;
428 #endif
429 }
430
431
432 // Reset any entry for cls/sel to the uncached lookup
433 static void cache_eraseMethod_nolock(Class cls, SEL sel)
434 {
435 mutex_assert_locked(&cacheUpdateLock);
436
437 cache_t *cache = getCache(cls, sel);
438 cache_key_t key = getKey(cls, sel);
439
440 bucket_t *bucket = cache->find(key);
441 if (bucket->key == key) {
442 bucket->imp = _objc_msgSend_uncached_impcache;
443 }
444 }
445
446
447 // Resets cache entries for all methods in mlist for cls and its subclasses.
448 void cache_eraseMethods(Class cls, method_list_t *mlist)
449 {
450 rwlock_assert_writing(&runtimeLock);
451 mutex_lock(&cacheUpdateLock);
452
453 FOREACH_REALIZED_CLASS_AND_SUBCLASS(c, cls, {
454 for (uint32_t m = 0; m < mlist->count; m++) {
455 SEL sel = mlist->get(m).name;
456 cache_eraseMethod_nolock(c, sel);
457 }
458 });
459
460 mutex_unlock(&cacheUpdateLock);
461 }
462
463
464 // Reset any copies of imp in this cache to the uncached lookup
465 void cache_eraseImp_nolock(Class cls, SEL sel, IMP imp)
466 {
467 mutex_assert_locked(&cacheUpdateLock);
468
469 cache_t *cache = getCache(cls, sel);
470
471 bucket_t *buckets = cache->buckets;
472 mask_t count = cache->capacity();
473 for (mask_t i = 0; i < count; i++) {
474 if (buckets[i].imp == imp) {
475 buckets[i].imp = _objc_msgSend_uncached_impcache;
476 }
477 }
478 }
479
480
481 void cache_eraseImp(Class cls, SEL sel, IMP imp)
482 {
483 mutex_lock(&cacheUpdateLock);
484 cache_eraseImp_nolock(cls, sel, imp);
485 mutex_unlock(&cacheUpdateLock);
486 }
487
488
489 // Reset this entire cache to the uncached lookup by reallocating it.
490 // This must not shrink the cache - that breaks the lock-free scheme.
491 void cache_erase_nolock(cache_t *cache)
492 {
493 mutex_assert_locked(&cacheUpdateLock);
494
495 mask_t capacity = cache->capacity();
496 if (capacity > 0 && cache->occupied > 0) {
497 cache->reallocate(capacity, capacity);
498 }
499 }
500
501
502 /***********************************************************************
503 * cache collection.
504 **********************************************************************/
505
506 #if !TARGET_OS_WIN32
507
508 // A sentinel (magic value) to report bad thread_get_state status.
509 // Must not be a valid PC.
510 // Must not be zero - thread_get_state() on a new thread returns PC == 0.
511 #define PC_SENTINEL 1
512
513 static uintptr_t _get_pc_for_thread(thread_t thread)
514 #if defined(__i386__)
515 {
516 i386_thread_state_t state;
517 unsigned int count = i386_THREAD_STATE_COUNT;
518 kern_return_t okay = thread_get_state (thread, i386_THREAD_STATE, (thread_state_t)&state, &count);
519 return (okay == KERN_SUCCESS) ? state.__eip : PC_SENTINEL;
520 }
521 #elif defined(__x86_64__)
522 {
523 x86_thread_state64_t state;
524 unsigned int count = x86_THREAD_STATE64_COUNT;
525 kern_return_t okay = thread_get_state (thread, x86_THREAD_STATE64, (thread_state_t)&state, &count);
526 return (okay == KERN_SUCCESS) ? state.__rip : PC_SENTINEL;
527 }
528 #elif defined(__arm__)
529 {
530 arm_thread_state_t state;
531 unsigned int count = ARM_THREAD_STATE_COUNT;
532 kern_return_t okay = thread_get_state (thread, ARM_THREAD_STATE, (thread_state_t)&state, &count);
533 return (okay == KERN_SUCCESS) ? state.__pc : PC_SENTINEL;
534 }
535 #else
536 {
537 #error _get_pc_for_thread () not implemented for this architecture
538 }
539 #endif
540
541 #endif
542
543 /***********************************************************************
544 * _collecting_in_critical.
545 * Returns TRUE if some thread is currently executing a cache-reading
546 * function. Collection of cache garbage is not allowed when a cache-
547 * reading function is in progress because it might still be using
548 * the garbage memory.
549 **********************************************************************/
550 OBJC_EXPORT uintptr_t objc_entryPoints[];
551 OBJC_EXPORT uintptr_t objc_exitPoints[];
552
553 static int _collecting_in_critical(void)
554 {
555 #if TARGET_OS_WIN32
556 return TRUE;
557 #else
558 thread_act_port_array_t threads;
559 unsigned number;
560 unsigned count;
561 kern_return_t ret;
562 int result;
563
564 mach_port_t mythread = pthread_mach_thread_np(pthread_self());
565
566 // Get a list of all the threads in the current task
567 #if !DEBUG_TASK_THREADS
568 ret = task_threads(mach_task_self(), &threads, &number);
569 #else
570 ret = objc_task_threads(mach_task_self(), &threads, &number);
571 #endif
572
573 if (ret != KERN_SUCCESS) {
574 // See DEBUG_TASK_THREADS below to help debug this.
575 _objc_fatal("task_threads failed (result 0x%x)\n", ret);
576 }
577
578 // Check whether any thread is in the cache lookup code
579 result = FALSE;
580 for (count = 0; count < number; count++)
581 {
582 int region;
583 uintptr_t pc;
584
585 // Don't bother checking ourselves
586 if (threads[count] == mythread)
587 continue;
588
589 // Find out where thread is executing
590 pc = _get_pc_for_thread (threads[count]);
591
592 // Check for bad status, and if so, assume the worse (can't collect)
593 if (pc == PC_SENTINEL)
594 {
595 result = TRUE;
596 goto done;
597 }
598
599 // Check whether it is in the cache lookup code
600 for (region = 0; objc_entryPoints[region] != 0; region++)
601 {
602 if ((pc >= objc_entryPoints[region]) &&
603 (pc <= objc_exitPoints[region]))
604 {
605 result = TRUE;
606 goto done;
607 }
608 }
609 }
610
611 done:
612 // Deallocate the port rights for the threads
613 for (count = 0; count < number; count++) {
614 mach_port_deallocate(mach_task_self (), threads[count]);
615 }
616
617 // Deallocate the thread list
618 vm_deallocate (mach_task_self (), (vm_address_t) threads, sizeof(threads[0]) * number);
619
620 // Return our finding
621 return result;
622 #endif
623 }
624
625
626 /***********************************************************************
627 * _garbage_make_room. Ensure that there is enough room for at least
628 * one more ref in the garbage.
629 **********************************************************************/
630
631 // amount of memory represented by all refs in the garbage
632 static size_t garbage_byte_size = 0;
633
634 // do not empty the garbage until garbage_byte_size gets at least this big
635 static size_t garbage_threshold = 32*1024;
636
637 // table of refs to free
638 static bucket_t **garbage_refs = 0;
639
640 // current number of refs in garbage_refs
641 static size_t garbage_count = 0;
642
643 // capacity of current garbage_refs
644 static size_t garbage_max = 0;
645
646 // capacity of initial garbage_refs
647 enum {
648 INIT_GARBAGE_COUNT = 128
649 };
650
651 static void _garbage_make_room(void)
652 {
653 static int first = 1;
654
655 // Create the collection table the first time it is needed
656 if (first)
657 {
658 first = 0;
659 garbage_refs = (bucket_t**)
660 _malloc_internal(INIT_GARBAGE_COUNT * sizeof(void *));
661 garbage_max = INIT_GARBAGE_COUNT;
662 }
663
664 // Double the table if it is full
665 else if (garbage_count == garbage_max)
666 {
667 garbage_refs = (bucket_t**)
668 _realloc_internal(garbage_refs, garbage_max * 2 * sizeof(void *));
669 garbage_max *= 2;
670 }
671 }
672
673
674 /***********************************************************************
675 * cache_collect_free. Add the specified malloc'd memory to the list
676 * of them to free at some later point.
677 * size is used for the collection threshold. It does not have to be
678 * precisely the block's size.
679 * Cache locks: cacheUpdateLock must be held by the caller.
680 **********************************************************************/
681 static void cache_collect_free(bucket_t *data, size_t size)
682 {
683 mutex_assert_locked(&cacheUpdateLock);
684
685 _garbage_make_room ();
686 garbage_byte_size += size;
687 garbage_refs[garbage_count++] = data;
688 }
689
690
691 /***********************************************************************
692 * cache_collect. Try to free accumulated dead caches.
693 * collectALot tries harder to free memory.
694 * Cache locks: cacheUpdateLock must be held by the caller.
695 **********************************************************************/
696 void cache_collect(bool collectALot)
697 {
698 mutex_assert_locked(&cacheUpdateLock);
699
700 // Done if the garbage is not full
701 if (garbage_byte_size < garbage_threshold && !collectALot) {
702 return;
703 }
704
705 // Synchronize collection with objc_msgSend and other cache readers
706 if (!collectALot) {
707 if (_collecting_in_critical ()) {
708 // objc_msgSend (or other cache reader) is currently looking in
709 // the cache and might still be using some garbage.
710 if (PrintCaches) {
711 _objc_inform ("CACHES: not collecting; "
712 "objc_msgSend in progress");
713 }
714 return;
715 }
716 }
717 else {
718 // No excuses.
719 while (_collecting_in_critical())
720 ;
721 }
722
723 // No cache readers in progress - garbage is now deletable
724
725 // Log our progress
726 if (PrintCaches) {
727 cache_collections++;
728 _objc_inform ("CACHES: COLLECTING %zu bytes (%zu allocations, %zu collections)", garbage_byte_size, cache_allocations, cache_collections);
729 }
730
731 // Dispose all refs now in the garbage
732 while (garbage_count--) {
733 free(garbage_refs[garbage_count]);
734 }
735
736 // Clear the garbage count and total size indicator
737 garbage_count = 0;
738 garbage_byte_size = 0;
739
740 if (PrintCaches) {
741 size_t i;
742 size_t total_count = 0;
743 size_t total_size = 0;
744
745 for (i = 0; i < sizeof(cache_counts) / sizeof(cache_counts[0]); i++) {
746 int count = cache_counts[i];
747 int slots = 1 << i;
748 size_t size = count * slots * sizeof(bucket_t);
749
750 if (!count) continue;
751
752 _objc_inform("CACHES: %4d slots: %4d caches, %6zu bytes",
753 slots, count, size);
754
755 total_count += count;
756 total_size += size;
757 }
758
759 _objc_inform("CACHES: total: %4zu caches, %6zu bytes",
760 total_count, total_size);
761 }
762 }
763
764
765 /***********************************************************************
766 * objc_task_threads
767 * Replacement for task_threads(). Define DEBUG_TASK_THREADS to debug
768 * crashes when task_threads() is failing.
769 *
770 * A failure in task_threads() usually means somebody has botched their
771 * Mach or MIG traffic. For example, somebody's error handling was wrong
772 * and they left a message queued on the MIG reply port for task_threads()
773 * to trip over.
774 *
775 * The code below is a modified version of task_threads(). It logs
776 * the msgh_id of the reply message. The msgh_id can identify the sender
777 * of the message, which can help pinpoint the faulty code.
778 * DEBUG_TASK_THREADS also calls collecting_in_critical() during every
779 * message dispatch, which can increase reproducibility of bugs.
780 *
781 * This code can be regenerated by running
782 * `mig /usr/include/mach/task.defs`.
783 **********************************************************************/
784 #if DEBUG_TASK_THREADS
785
786 #include <mach/mach.h>
787 #include <mach/message.h>
788 #include <mach/mig.h>
789
790 #define __MIG_check__Reply__task_subsystem__ 1
791 #define mig_internal static inline
792 #define __DeclareSendRpc(a, b)
793 #define __BeforeSendRpc(a, b)
794 #define __AfterSendRpc(a, b)
795 #define msgh_request_port msgh_remote_port
796 #define msgh_reply_port msgh_local_port
797
798 #ifndef __MachMsgErrorWithTimeout
799 #define __MachMsgErrorWithTimeout(_R_) { \
800 switch (_R_) { \
801 case MACH_SEND_INVALID_DATA: \
802 case MACH_SEND_INVALID_DEST: \
803 case MACH_SEND_INVALID_HEADER: \
804 mig_put_reply_port(InP->Head.msgh_reply_port); \
805 break; \
806 case MACH_SEND_TIMED_OUT: \
807 case MACH_RCV_TIMED_OUT: \
808 default: \
809 mig_dealloc_reply_port(InP->Head.msgh_reply_port); \
810 } \
811 }
812 #endif /* __MachMsgErrorWithTimeout */
813
814 #ifndef __MachMsgErrorWithoutTimeout
815 #define __MachMsgErrorWithoutTimeout(_R_) { \
816 switch (_R_) { \
817 case MACH_SEND_INVALID_DATA: \
818 case MACH_SEND_INVALID_DEST: \
819 case MACH_SEND_INVALID_HEADER: \
820 mig_put_reply_port(InP->Head.msgh_reply_port); \
821 break; \
822 default: \
823 mig_dealloc_reply_port(InP->Head.msgh_reply_port); \
824 } \
825 }
826 #endif /* __MachMsgErrorWithoutTimeout */
827
828
829 #if ( __MigTypeCheck )
830 #if __MIG_check__Reply__task_subsystem__
831 #if !defined(__MIG_check__Reply__task_threads_t__defined)
832 #define __MIG_check__Reply__task_threads_t__defined
833
834 mig_internal kern_return_t __MIG_check__Reply__task_threads_t(__Reply__task_threads_t *Out0P)
835 {
836
837 typedef __Reply__task_threads_t __Reply;
838 boolean_t msgh_simple;
839 #if __MigTypeCheck
840 unsigned int msgh_size;
841 #endif /* __MigTypeCheck */
842 if (Out0P->Head.msgh_id != 3502) {
843 if (Out0P->Head.msgh_id == MACH_NOTIFY_SEND_ONCE)
844 { return MIG_SERVER_DIED; }
845 else
846 { return MIG_REPLY_MISMATCH; }
847 }
848
849 msgh_simple = !(Out0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX);
850 #if __MigTypeCheck
851 msgh_size = Out0P->Head.msgh_size;
852
853 if ((msgh_simple || Out0P->msgh_body.msgh_descriptor_count != 1 ||
854 msgh_size != (mach_msg_size_t)sizeof(__Reply)) &&
855 (!msgh_simple || msgh_size != (mach_msg_size_t)sizeof(mig_reply_error_t) ||
856 ((mig_reply_error_t *)Out0P)->RetCode == KERN_SUCCESS))
857 { return MIG_TYPE_ERROR ; }
858 #endif /* __MigTypeCheck */
859
860 if (msgh_simple) {
861 return ((mig_reply_error_t *)Out0P)->RetCode;
862 }
863
864 #if __MigTypeCheck
865 if (Out0P->act_list.type != MACH_MSG_OOL_PORTS_DESCRIPTOR ||
866 Out0P->act_list.disposition != 17) {
867 return MIG_TYPE_ERROR;
868 }
869 #endif /* __MigTypeCheck */
870
871 return MACH_MSG_SUCCESS;
872 }
873 #endif /* !defined(__MIG_check__Reply__task_threads_t__defined) */
874 #endif /* __MIG_check__Reply__task_subsystem__ */
875 #endif /* ( __MigTypeCheck ) */
876
877
878 /* Routine task_threads */
879 static kern_return_t objc_task_threads
880 (
881 task_t target_task,
882 thread_act_array_t *act_list,
883 mach_msg_type_number_t *act_listCnt
884 )
885 {
886
887 #ifdef __MigPackStructs
888 #pragma pack(4)
889 #endif
890 typedef struct {
891 mach_msg_header_t Head;
892 } Request;
893 #ifdef __MigPackStructs
894 #pragma pack()
895 #endif
896
897 #ifdef __MigPackStructs
898 #pragma pack(4)
899 #endif
900 typedef struct {
901 mach_msg_header_t Head;
902 /* start of the kernel processed data */
903 mach_msg_body_t msgh_body;
904 mach_msg_ool_ports_descriptor_t act_list;
905 /* end of the kernel processed data */
906 NDR_record_t NDR;
907 mach_msg_type_number_t act_listCnt;
908 mach_msg_trailer_t trailer;
909 } Reply;
910 #ifdef __MigPackStructs
911 #pragma pack()
912 #endif
913
914 #ifdef __MigPackStructs
915 #pragma pack(4)
916 #endif
917 typedef struct {
918 mach_msg_header_t Head;
919 /* start of the kernel processed data */
920 mach_msg_body_t msgh_body;
921 mach_msg_ool_ports_descriptor_t act_list;
922 /* end of the kernel processed data */
923 NDR_record_t NDR;
924 mach_msg_type_number_t act_listCnt;
925 } __Reply;
926 #ifdef __MigPackStructs
927 #pragma pack()
928 #endif
929 /*
930 * typedef struct {
931 * mach_msg_header_t Head;
932 * NDR_record_t NDR;
933 * kern_return_t RetCode;
934 * } mig_reply_error_t;
935 */
936
937 union {
938 Request In;
939 Reply Out;
940 } Mess;
941
942 Request *InP = &Mess.In;
943 Reply *Out0P = &Mess.Out;
944
945 mach_msg_return_t msg_result;
946
947 #ifdef __MIG_check__Reply__task_threads_t__defined
948 kern_return_t check_result;
949 #endif /* __MIG_check__Reply__task_threads_t__defined */
950
951 __DeclareSendRpc(3402, "task_threads")
952
953 InP->Head.msgh_bits =
954 MACH_MSGH_BITS(19, MACH_MSG_TYPE_MAKE_SEND_ONCE);
955 /* msgh_size passed as argument */
956 InP->Head.msgh_request_port = target_task;
957 InP->Head.msgh_reply_port = mig_get_reply_port();
958 InP->Head.msgh_id = 3402;
959
960 __BeforeSendRpc(3402, "task_threads")
961 msg_result = mach_msg(&InP->Head, MACH_SEND_MSG|MACH_RCV_MSG|MACH_MSG_OPTION_NONE, (mach_msg_size_t)sizeof(Request), (mach_msg_size_t)sizeof(Reply), InP->Head.msgh_reply_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
962 __AfterSendRpc(3402, "task_threads")
963 if (msg_result != MACH_MSG_SUCCESS) {
964 _objc_inform("task_threads received unexpected reply msgh_id 0x%zx",
965 (size_t)Out0P->Head.msgh_id);
966 __MachMsgErrorWithoutTimeout(msg_result);
967 { return msg_result; }
968 }
969
970
971 #if defined(__MIG_check__Reply__task_threads_t__defined)
972 check_result = __MIG_check__Reply__task_threads_t((__Reply__task_threads_t *)Out0P);
973 if (check_result != MACH_MSG_SUCCESS)
974 { return check_result; }
975 #endif /* defined(__MIG_check__Reply__task_threads_t__defined) */
976
977 *act_list = (thread_act_array_t)(Out0P->act_list.address);
978 *act_listCnt = Out0P->act_listCnt;
979
980 return KERN_SUCCESS;
981 }
982
983 // DEBUG_TASK_THREADS
984 #endif
985
986
987 // __OBJC2__
988 #endif