]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-cache.mm
objc4-647.tar.gz
[apple/objc4.git] / runtime / objc-cache.mm
1 /*
2 * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 /***********************************************************************
25 * objc-cache.m
26 * Method cache management
27 * Cache flushing
28 * Cache garbage collection
29 * Cache instrumentation
30 * Dedicated allocator for large caches
31 **********************************************************************/
32
33
34 /***********************************************************************
35 * Method cache locking (GrP 2001-1-14)
36 *
37 * For speed, objc_msgSend does not acquire any locks when it reads
38 * method caches. Instead, all cache changes are performed so that any
39 * objc_msgSend running concurrently with the cache mutator will not
40 * crash or hang or get an incorrect result from the cache.
41 *
42 * When cache memory becomes unused (e.g. the old cache after cache
43 * expansion), it is not immediately freed, because a concurrent
44 * objc_msgSend could still be using it. Instead, the memory is
45 * disconnected from the data structures and placed on a garbage list.
46 * The memory is now only accessible to instances of objc_msgSend that
47 * were running when the memory was disconnected; any further calls to
48 * objc_msgSend will not see the garbage memory because the other data
49 * structures don't point to it anymore. The collecting_in_critical
50 * function checks the PC of all threads and returns FALSE when all threads
51 * are found to be outside objc_msgSend. This means any call to objc_msgSend
52 * that could have had access to the garbage has finished or moved past the
53 * cache lookup stage, so it is safe to free the memory.
54 *
55 * All functions that modify cache data or structures must acquire the
56 * cacheUpdateLock to prevent interference from concurrent modifications.
57 * The function that frees cache garbage must acquire the cacheUpdateLock
58 * and use collecting_in_critical() to flush out cache readers.
59 * The cacheUpdateLock is also used to protect the custom allocator used
60 * for large method cache blocks.
61 *
62 * Cache readers (PC-checked by collecting_in_critical())
63 * objc_msgSend*
64 * cache_getImp
65 *
66 * Cache writers (hold cacheUpdateLock while reading or writing; not PC-checked)
67 * cache_fill (acquires lock)
68 * cache_expand (only called from cache_fill)
69 * cache_create (only called from cache_expand)
70 * bcopy (only called from instrumented cache_expand)
71 * flush_caches (acquires lock)
72 * cache_flush (only called from cache_fill and flush_caches)
73 * cache_collect_free (only called from cache_expand and cache_flush)
74 *
75 * UNPROTECTED cache readers (NOT thread-safe; used for debug info only)
76 * cache_print
77 * _class_printMethodCaches
78 * _class_printDuplicateCacheEntries
79 * _class_printMethodCacheStatistics
80 *
81 ***********************************************************************/
82
83
84 #if __OBJC2__
85
86 #include "objc-private.h"
87 #include "objc-cache.h"
88
89
90 /* Initial cache bucket count. INIT_CACHE_SIZE must be a power of two. */
91 enum {
92 INIT_CACHE_SIZE_LOG2 = 2,
93 INIT_CACHE_SIZE = (1 << INIT_CACHE_SIZE_LOG2)
94 };
95
96 static size_t log2u(size_t x)
97 {
98 unsigned int log;
99
100 log = 0;
101 while (x >>= 1)
102 log += 1;
103
104 return log;
105 }
106
107 static void cache_collect_free(struct bucket_t *data, size_t size);
108 static int _collecting_in_critical(void);
109 static void _garbage_make_room(void);
110
111
112 /***********************************************************************
113 * Cache statistics for OBJC_PRINT_CACHE_SETUP
114 **********************************************************************/
115 static unsigned int cache_counts[16];
116 static size_t cache_allocations;
117 static size_t cache_collections;
118
119
120 /***********************************************************************
121 * Pointers used by compiled class objects
122 * These use asm to avoid conflicts with the compiler's internal declarations
123 **********************************************************************/
124
125 // "cache" is cache->buckets; "vtable" is cache->mask/occupied
126 // hack to avoid conflicts with compiler's internal declaration
127 asm("\n .section __TEXT,__const"
128 "\n .globl __objc_empty_cache"
129 #if __LP64__
130 "\n .align 3"
131 "\n __objc_empty_cache: .quad 0"
132 #else
133 "\n .align 2"
134 "\n __objc_empty_cache: .long 0"
135 #endif
136 "\n .globl __objc_empty_vtable"
137 "\n .set __objc_empty_vtable, 0"
138 );
139
140
141 #if __arm__
142 // objc_msgSend has few registers available.
143 // Cache scan increments and wraps at special end-marking bucket.
144 #define CACHE_END_MARKER 1
145 static inline mask_t cache_next(mask_t i, mask_t mask) {
146 return (i+1) & mask;
147 }
148
149 #elif __i386__ || __x86_64__ || __arm64__
150 // objc_msgSend has lots of registers and/or memory operands available.
151 // Cache scan decrements. No end marker needed.
152 #define CACHE_END_MARKER 0
153 static inline mask_t cache_next(mask_t i, mask_t mask) {
154 return i ? i-1 : mask;
155 }
156
157 #else
158 #error unknown architecture
159 #endif
160
161
162 // cannot mix sel-side caches with ignored selector constant
163 // ignored selector constant also not implemented for class-side caches here
164 #if SUPPORT_IGNORED_SELECTOR_CONSTANT
165 #error sorry
166 #endif
167
168
169 // copied from dispatch_atomic_maximally_synchronizing_barrier
170 // fixme verify that this barrier hack does in fact work here
171 #if __x86_64__
172 #define mega_barrier() \
173 do { unsigned long _clbr; __asm__ __volatile__( \
174 "cpuid" \
175 : "=a" (_clbr) : "0" (0) : "rbx", "rcx", "rdx", "cc", "memory" \
176 ); } while(0)
177
178 #elif __i386__
179 #define mega_barrier() \
180 do { unsigned long _clbr; __asm__ __volatile__( \
181 "cpuid" \
182 : "=a" (_clbr) : "0" (0) : "ebx", "ecx", "edx", "cc", "memory" \
183 ); } while(0)
184
185 #elif __arm__
186 #define mega_barrier() \
187 __asm__ __volatile__( \
188 "dsb ish" \
189 : : : "memory")
190
191 #elif __arm64__
192 // Use atomic double-word updates instead.
193 // This requires cache buckets not cross cache line boundaries.
194 #undef mega_barrier
195 #define stp(onep, twop, destp) \
196 __asm__ ("stp %[one], %[two], [%[dest]]" \
197 : "=m" (((uint64_t *)(destp))[0]), \
198 "=m" (((uint64_t *)(destp))[1]) \
199 : [one] "r" (onep), \
200 [two] "r" (twop), \
201 [dest] "r" (destp) \
202 : /* no clobbers */ \
203 )
204 #define ldp(onep, twop, srcp) \
205 __asm__ ("ldp %[one], %[two], [%[src]]" \
206 : [one] "=r" (onep), \
207 [two] "=r" (twop), \
208 : "m" (((uint64_t *)(srcp))[0]), \
209 "m" (((uint64_t *)(srcp))[1]) \
210 [src] "r" (srcp) \
211 : /* no clobbers */ \
212 )
213
214 #else
215 #error unknown architecture
216 #endif
217
218
219 // Class points to cache. SEL is key. Cache buckets store SEL+IMP.
220 // Caches are never built in the dyld shared cache.
221
222 static inline mask_t cache_hash(cache_key_t key, mask_t mask)
223 {
224 return (mask_t)(key & mask);
225 }
226
227 cache_t *getCache(Class cls, SEL sel __unused)
228 {
229 assert(cls);
230 return &cls->cache;
231 }
232
233 cache_key_t getKey(Class cls __unused, SEL sel)
234 {
235 assert(sel);
236 return (cache_key_t)sel;
237 }
238
239
240
241 #if __arm64__
242
243 void bucket_t::set(cache_key_t newKey, IMP newImp)
244 {
245 assert(_key == 0 || _key == newKey);
246
247 // LDP/STP guarantees that all observers get
248 // either key/imp or newKey/newImp
249 stp(newKey, newImp, this);
250 }
251
252 void cache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask)
253 {
254 // ensure other threads see buckets contents before buckets pointer
255 // see Barrier Litmus Tests and Cookbook,
256 // "Address Dependency with object construction"
257 __sync_synchronize();
258
259 // LDP/STP guarantees that all observers get
260 // old mask/buckets or new mask/buckets
261
262 mask_t newOccupied = 0;
263 uint64_t mask_and_occupied =
264 (uint64_t)newMask | ((uint64_t)newOccupied << 32);
265 stp(newBuckets, mask_and_occupied, this);
266 }
267
268 // arm64
269 #else
270 // not arm64
271
272 void bucket_t::set(cache_key_t newKey, IMP newImp)
273 {
274 assert(_key == 0 || _key == newKey);
275
276 // objc_msgSend uses key and imp with no locks.
277 // It is safe for objc_msgSend to see new imp but NULL key
278 // (It will get a cache miss but not dispatch to the wrong place.)
279 // It is unsafe for objc_msgSend to see old imp and new key.
280 // Therefore we write new imp, wait a lot, then write new key.
281
282 _imp = newImp;
283
284 if (_key != newKey) {
285 mega_barrier();
286 _key = newKey;
287 }
288 }
289
290 void cache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask)
291 {
292 // objc_msgSend uses mask and buckets with no locks.
293 // It is safe for objc_msgSend to see new buckets but old mask.
294 // (It will get a cache miss but not overrun the buckets' bounds).
295 // It is unsafe for objc_msgSend to see old buckets and new mask.
296 // Therefore we write new buckets, wait a lot, then write new mask.
297 // objc_msgSend reads mask first, then buckets.
298
299 // ensure other threads see buckets contents before buckets pointer
300 mega_barrier();
301
302 _buckets = newBuckets;
303
304 // ensure other threads see new buckets before new mask
305 mega_barrier();
306
307 _mask = newMask;
308 _occupied = 0;
309 }
310
311 // not arm64
312 #endif
313
314 struct bucket_t *cache_t::buckets()
315 {
316 return _buckets;
317 }
318
319 mask_t cache_t::mask()
320 {
321 return _mask;
322 }
323
324 mask_t cache_t::occupied()
325 {
326 return _occupied;
327 }
328
329 void cache_t::incrementOccupied()
330 {
331 _occupied++;
332 }
333
334 void cache_t::setEmpty()
335 {
336 bzero(this, sizeof(*this));
337 _buckets = (bucket_t *)&_objc_empty_cache;
338 }
339
340
341 mask_t cache_t::capacity()
342 {
343 return mask() ? mask()+1 : 0;
344 }
345
346
347 #if CACHE_END_MARKER
348
349 size_t cache_t::bytesForCapacity(uint32_t cap)
350 {
351 // fixme put end marker inline when capacity+1 malloc is inefficient
352 return sizeof(cache_t) * (cap + 1);
353 }
354
355 bucket_t *cache_t::endMarker(struct bucket_t *b, uint32_t cap)
356 {
357 // bytesForCapacity() chooses whether the end marker is inline or not
358 return (bucket_t *)((uintptr_t)b + bytesForCapacity(cap)) - 1;
359 }
360
361 bucket_t *allocateBuckets(mask_t newCapacity)
362 {
363 // Allocate one extra bucket to mark the end of the list.
364 // This can't overflow mask_t because newCapacity is a power of 2.
365 // fixme instead put the end mark inline when +1 is malloc-inefficient
366 bucket_t *newBuckets = (bucket_t *)
367 _calloc_internal(cache_t::bytesForCapacity(newCapacity), 1);
368
369 bucket_t *end = cache_t::endMarker(newBuckets, newCapacity);
370
371 #if __arm__
372 // End marker's key is 1 and imp points BEFORE the first bucket.
373 // This saves an instruction in objc_msgSend.
374 end->setKey((cache_key_t)(uintptr_t)1);
375 end->setImp((IMP)(newBuckets - 1));
376 #else
377 # error unknown architecture
378 #endif
379
380 return newBuckets;
381 }
382
383 #else
384
385 bucket_t *allocateBuckets(mask_t newCapacity)
386 {
387 return (bucket_t *)_calloc_internal(newCapacity, sizeof(bucket_t));
388 }
389
390 #endif
391
392
393 bool cache_t::canBeFreed()
394 {
395 return buckets() != (bucket_t *)&_objc_empty_cache;
396 }
397
398
399 void cache_t::reallocate(mask_t oldCapacity, mask_t newCapacity)
400 {
401 if (PrintCaches) {
402 size_t bucket = log2u(newCapacity);
403 if (bucket < sizeof(cache_counts) / sizeof(cache_counts[0])) {
404 cache_counts[bucket]++;
405 }
406 cache_allocations++;
407
408 if (oldCapacity) {
409 bucket = log2u(oldCapacity);
410 if (bucket < sizeof(cache_counts) / sizeof(cache_counts[0])) {
411 cache_counts[bucket]--;
412 }
413 }
414 }
415
416 bool freeOld = canBeFreed();
417
418 bucket_t *oldBuckets = buckets();
419 bucket_t *newBuckets = allocateBuckets(newCapacity);
420
421 // Cache's old contents are not propagated.
422 // This is thought to save cache memory at the cost of extra cache fills.
423 // fixme re-measure this
424
425 assert(newCapacity > 0);
426 assert((uintptr_t)(mask_t)(newCapacity-1) == newCapacity-1);
427
428 setBucketsAndMask(newBuckets, newCapacity - 1);
429
430 if (freeOld) {
431 cache_collect_free(oldBuckets, oldCapacity * sizeof(bucket_t));
432 cache_collect(false);
433 }
434 }
435
436
437 // called by objc_msgSend
438 extern "C"
439 void objc_msgSend_corrupt_cache_error(id receiver, SEL sel, Class isa)
440 {
441 cache_t::bad_cache(receiver, sel, isa);
442 }
443
444 extern "C"
445 void cache_getImp_corrupt_cache_error(id receiver, SEL sel, Class isa)
446 {
447 cache_t::bad_cache(receiver, sel, isa);
448 }
449
450 void cache_t::bad_cache(id receiver, SEL sel, Class isa)
451 {
452 // Log in separate steps in case the logging itself causes a crash.
453 _objc_inform_now_and_on_crash
454 ("Method cache corrupted. This may be a message to an "
455 "invalid object, or a memory error somewhere else.");
456 cache_t *cache = &isa->cache;
457 _objc_inform_now_and_on_crash
458 ("%s %p, SEL %p, isa %p, cache %p, buckets %p, "
459 "mask 0x%x, occupied 0x%x",
460 receiver ? "receiver" : "unused", receiver,
461 sel, isa, cache, cache->_buckets,
462 cache->_mask, cache->_occupied);
463 _objc_inform_now_and_on_crash
464 ("%s %zu bytes, buckets %zu bytes",
465 receiver ? "receiver" : "unused", malloc_size(receiver),
466 malloc_size(cache->_buckets));
467 _objc_inform_now_and_on_crash
468 ("selector '%s'", sel_getName(sel));
469 _objc_inform_now_and_on_crash
470 ("isa '%s'", isa->nameForLogging());
471 _objc_fatal
472 ("Method cache corrupted.");
473 }
474
475
476 bucket_t * cache_t::find(cache_key_t k)
477 {
478 assert(k != 0);
479
480 bucket_t *b = buckets();
481 mask_t m = mask();
482 mask_t begin = cache_hash(k, m);
483 mask_t i = begin;
484 do {
485 if (b[i].key() == 0 || b[i].key() == k) {
486 return &b[i];
487 }
488 } while ((i = cache_next(i, m)) != begin);
489
490 // hack
491 Class cls = (Class)((uintptr_t)this - offsetof(objc_class, cache));
492 cache_t::bad_cache(nil, (SEL)k, cls);
493 }
494
495
496 void cache_t::expand()
497 {
498 mutex_assert_locked(&cacheUpdateLock);
499
500 uint32_t oldCapacity = capacity();
501 uint32_t newCapacity = oldCapacity ? oldCapacity*2 : INIT_CACHE_SIZE;
502
503 if ((uint32_t)(mask_t)newCapacity != newCapacity) {
504 // mask overflow - can't grow further
505 // fixme this wastes one bit of mask
506 newCapacity = oldCapacity;
507 }
508
509 reallocate(oldCapacity, newCapacity);
510 }
511
512
513 static void cache_fill_nolock(Class cls, SEL sel, IMP imp)
514 {
515 mutex_assert_locked(&cacheUpdateLock);
516
517 // Never cache before +initialize is done
518 if (!cls->isInitialized()) return;
519
520 // Make sure the entry wasn't added to the cache by some other thread
521 // before we grabbed the cacheUpdateLock.
522 if (cache_getImp(cls, sel)) return;
523
524 cache_t *cache = getCache(cls, sel);
525 cache_key_t key = getKey(cls, sel);
526
527 // Use the cache as-is if it is less than 3/4 full
528 mask_t newOccupied = cache->occupied() + 1;
529 if ((newOccupied * 4) <= (cache->mask() + 1) * 3) {
530 // Cache is less than 3/4 full.
531 } else {
532 // Cache is too full. Expand it.
533 cache->expand();
534 }
535
536 // Scan for the first unused slot (or used for this class) and insert there
537 // There is guaranteed to be an empty slot because the
538 // minimum size is 4 and we resized at 3/4 full.
539 bucket_t *bucket = cache->find(key);
540 if (bucket->key() == 0) cache->incrementOccupied();
541 bucket->set(key, imp);
542 }
543
544 void cache_fill(Class cls, SEL sel, IMP imp)
545 {
546 #if !DEBUG_TASK_THREADS
547 mutex_lock(&cacheUpdateLock);
548 cache_fill_nolock(cls, sel, imp);
549 mutex_unlock(&cacheUpdateLock);
550 #else
551 _collecting_in_critical();
552 return;
553 #endif
554 }
555
556
557 // Reset any entry for cls/sel to the uncached lookup
558 static void cache_eraseMethod_nolock(Class cls, SEL sel)
559 {
560 mutex_assert_locked(&cacheUpdateLock);
561
562 cache_t *cache = getCache(cls, sel);
563 cache_key_t key = getKey(cls, sel);
564
565 bucket_t *bucket = cache->find(key);
566 if (bucket->key() == key) {
567 bucket->setImp(_objc_msgSend_uncached_impcache);
568 }
569 }
570
571
572 // Resets cache entries for all methods in mlist for cls and its subclasses.
573 void cache_eraseMethods(Class cls, method_list_t *mlist)
574 {
575 rwlock_assert_writing(&runtimeLock);
576 mutex_lock(&cacheUpdateLock);
577
578 foreach_realized_class_and_subclass(cls, ^(Class c){
579 for (uint32_t m = 0; m < mlist->count; m++) {
580 SEL sel = mlist->get(m).name;
581 cache_eraseMethod_nolock(c, sel);
582 }
583 });
584
585 mutex_unlock(&cacheUpdateLock);
586 }
587
588
589 // Reset any copies of imp in this cache to the uncached lookup
590 void cache_eraseImp_nolock(Class cls, SEL sel, IMP imp)
591 {
592 mutex_assert_locked(&cacheUpdateLock);
593
594 cache_t *cache = getCache(cls, sel);
595
596 bucket_t *b = cache->buckets();
597 mask_t count = cache->capacity();
598 for (mask_t i = 0; i < count; i++) {
599 if (b[i].imp() == imp) {
600 b[i].setImp(_objc_msgSend_uncached_impcache);
601 }
602 }
603 }
604
605
606 void cache_eraseImp(Class cls, SEL sel, IMP imp)
607 {
608 mutex_lock(&cacheUpdateLock);
609 cache_eraseImp_nolock(cls, sel, imp);
610 mutex_unlock(&cacheUpdateLock);
611 }
612
613
614 // Reset this entire cache to the uncached lookup by reallocating it.
615 // This must not shrink the cache - that breaks the lock-free scheme.
616 void cache_erase_nolock(cache_t *cache)
617 {
618 mutex_assert_locked(&cacheUpdateLock);
619
620 mask_t capacity = cache->capacity();
621 if (capacity > 0 && cache->occupied() > 0) {
622 cache->reallocate(capacity, capacity);
623 }
624 }
625
626
627 /***********************************************************************
628 * cache collection.
629 **********************************************************************/
630
631 #if !TARGET_OS_WIN32
632
633 // A sentinel (magic value) to report bad thread_get_state status.
634 // Must not be a valid PC.
635 // Must not be zero - thread_get_state() on a new thread returns PC == 0.
636 #define PC_SENTINEL 1
637
638 static uintptr_t _get_pc_for_thread(thread_t thread)
639 #if defined(__i386__)
640 {
641 i386_thread_state_t state;
642 unsigned int count = i386_THREAD_STATE_COUNT;
643 kern_return_t okay = thread_get_state (thread, i386_THREAD_STATE, (thread_state_t)&state, &count);
644 return (okay == KERN_SUCCESS) ? state.__eip : PC_SENTINEL;
645 }
646 #elif defined(__x86_64__)
647 {
648 x86_thread_state64_t state;
649 unsigned int count = x86_THREAD_STATE64_COUNT;
650 kern_return_t okay = thread_get_state (thread, x86_THREAD_STATE64, (thread_state_t)&state, &count);
651 return (okay == KERN_SUCCESS) ? state.__rip : PC_SENTINEL;
652 }
653 #elif defined(__arm__)
654 {
655 arm_thread_state_t state;
656 unsigned int count = ARM_THREAD_STATE_COUNT;
657 kern_return_t okay = thread_get_state (thread, ARM_THREAD_STATE, (thread_state_t)&state, &count);
658 return (okay == KERN_SUCCESS) ? state.__pc : PC_SENTINEL;
659 }
660 #elif defined(__arm64__)
661 {
662 arm_thread_state64_t state;
663 unsigned int count = ARM_THREAD_STATE64_COUNT;
664 kern_return_t okay = thread_get_state (thread, ARM_THREAD_STATE64, (thread_state_t)&state, &count);
665 return (okay == KERN_SUCCESS) ? state.__pc : PC_SENTINEL;
666 }
667 #else
668 {
669 #error _get_pc_for_thread () not implemented for this architecture
670 }
671 #endif
672
673 #endif
674
675 /***********************************************************************
676 * _collecting_in_critical.
677 * Returns TRUE if some thread is currently executing a cache-reading
678 * function. Collection of cache garbage is not allowed when a cache-
679 * reading function is in progress because it might still be using
680 * the garbage memory.
681 **********************************************************************/
682 OBJC_EXPORT uintptr_t objc_entryPoints[];
683 OBJC_EXPORT uintptr_t objc_exitPoints[];
684
685 static int _collecting_in_critical(void)
686 {
687 #if TARGET_OS_WIN32
688 return TRUE;
689 #else
690 thread_act_port_array_t threads;
691 unsigned number;
692 unsigned count;
693 kern_return_t ret;
694 int result;
695
696 mach_port_t mythread = pthread_mach_thread_np(pthread_self());
697
698 // Get a list of all the threads in the current task
699 #if !DEBUG_TASK_THREADS
700 ret = task_threads(mach_task_self(), &threads, &number);
701 #else
702 ret = objc_task_threads(mach_task_self(), &threads, &number);
703 #endif
704
705 if (ret != KERN_SUCCESS) {
706 // See DEBUG_TASK_THREADS below to help debug this.
707 _objc_fatal("task_threads failed (result 0x%x)\n", ret);
708 }
709
710 // Check whether any thread is in the cache lookup code
711 result = FALSE;
712 for (count = 0; count < number; count++)
713 {
714 int region;
715 uintptr_t pc;
716
717 // Don't bother checking ourselves
718 if (threads[count] == mythread)
719 continue;
720
721 // Find out where thread is executing
722 pc = _get_pc_for_thread (threads[count]);
723
724 // Check for bad status, and if so, assume the worse (can't collect)
725 if (pc == PC_SENTINEL)
726 {
727 result = TRUE;
728 goto done;
729 }
730
731 // Check whether it is in the cache lookup code
732 for (region = 0; objc_entryPoints[region] != 0; region++)
733 {
734 if ((pc >= objc_entryPoints[region]) &&
735 (pc <= objc_exitPoints[region]))
736 {
737 result = TRUE;
738 goto done;
739 }
740 }
741 }
742
743 done:
744 // Deallocate the port rights for the threads
745 for (count = 0; count < number; count++) {
746 mach_port_deallocate(mach_task_self (), threads[count]);
747 }
748
749 // Deallocate the thread list
750 vm_deallocate (mach_task_self (), (vm_address_t) threads, sizeof(threads[0]) * number);
751
752 // Return our finding
753 return result;
754 #endif
755 }
756
757
758 /***********************************************************************
759 * _garbage_make_room. Ensure that there is enough room for at least
760 * one more ref in the garbage.
761 **********************************************************************/
762
763 // amount of memory represented by all refs in the garbage
764 static size_t garbage_byte_size = 0;
765
766 // do not empty the garbage until garbage_byte_size gets at least this big
767 static size_t garbage_threshold = 32*1024;
768
769 // table of refs to free
770 static bucket_t **garbage_refs = 0;
771
772 // current number of refs in garbage_refs
773 static size_t garbage_count = 0;
774
775 // capacity of current garbage_refs
776 static size_t garbage_max = 0;
777
778 // capacity of initial garbage_refs
779 enum {
780 INIT_GARBAGE_COUNT = 128
781 };
782
783 static void _garbage_make_room(void)
784 {
785 static int first = 1;
786
787 // Create the collection table the first time it is needed
788 if (first)
789 {
790 first = 0;
791 garbage_refs = (bucket_t**)
792 _malloc_internal(INIT_GARBAGE_COUNT * sizeof(void *));
793 garbage_max = INIT_GARBAGE_COUNT;
794 }
795
796 // Double the table if it is full
797 else if (garbage_count == garbage_max)
798 {
799 garbage_refs = (bucket_t**)
800 _realloc_internal(garbage_refs, garbage_max * 2 * sizeof(void *));
801 garbage_max *= 2;
802 }
803 }
804
805
806 /***********************************************************************
807 * cache_collect_free. Add the specified malloc'd memory to the list
808 * of them to free at some later point.
809 * size is used for the collection threshold. It does not have to be
810 * precisely the block's size.
811 * Cache locks: cacheUpdateLock must be held by the caller.
812 **********************************************************************/
813 static void cache_collect_free(bucket_t *data, size_t size)
814 {
815 mutex_assert_locked(&cacheUpdateLock);
816
817 _garbage_make_room ();
818 garbage_byte_size += size;
819 garbage_refs[garbage_count++] = data;
820 }
821
822
823 /***********************************************************************
824 * cache_collect. Try to free accumulated dead caches.
825 * collectALot tries harder to free memory.
826 * Cache locks: cacheUpdateLock must be held by the caller.
827 **********************************************************************/
828 void cache_collect(bool collectALot)
829 {
830 mutex_assert_locked(&cacheUpdateLock);
831
832 // Done if the garbage is not full
833 if (garbage_byte_size < garbage_threshold && !collectALot) {
834 return;
835 }
836
837 // Synchronize collection with objc_msgSend and other cache readers
838 if (!collectALot) {
839 if (_collecting_in_critical ()) {
840 // objc_msgSend (or other cache reader) is currently looking in
841 // the cache and might still be using some garbage.
842 if (PrintCaches) {
843 _objc_inform ("CACHES: not collecting; "
844 "objc_msgSend in progress");
845 }
846 return;
847 }
848 }
849 else {
850 // No excuses.
851 while (_collecting_in_critical())
852 ;
853 }
854
855 // No cache readers in progress - garbage is now deletable
856
857 // Log our progress
858 if (PrintCaches) {
859 cache_collections++;
860 _objc_inform ("CACHES: COLLECTING %zu bytes (%zu allocations, %zu collections)", garbage_byte_size, cache_allocations, cache_collections);
861 }
862
863 // Dispose all refs now in the garbage
864 while (garbage_count--) {
865 free(garbage_refs[garbage_count]);
866 }
867
868 // Clear the garbage count and total size indicator
869 garbage_count = 0;
870 garbage_byte_size = 0;
871
872 if (PrintCaches) {
873 size_t i;
874 size_t total_count = 0;
875 size_t total_size = 0;
876
877 for (i = 0; i < sizeof(cache_counts) / sizeof(cache_counts[0]); i++) {
878 int count = cache_counts[i];
879 int slots = 1 << i;
880 size_t size = count * slots * sizeof(bucket_t);
881
882 if (!count) continue;
883
884 _objc_inform("CACHES: %4d slots: %4d caches, %6zu bytes",
885 slots, count, size);
886
887 total_count += count;
888 total_size += size;
889 }
890
891 _objc_inform("CACHES: total: %4zu caches, %6zu bytes",
892 total_count, total_size);
893 }
894 }
895
896
897 /***********************************************************************
898 * objc_task_threads
899 * Replacement for task_threads(). Define DEBUG_TASK_THREADS to debug
900 * crashes when task_threads() is failing.
901 *
902 * A failure in task_threads() usually means somebody has botched their
903 * Mach or MIG traffic. For example, somebody's error handling was wrong
904 * and they left a message queued on the MIG reply port for task_threads()
905 * to trip over.
906 *
907 * The code below is a modified version of task_threads(). It logs
908 * the msgh_id of the reply message. The msgh_id can identify the sender
909 * of the message, which can help pinpoint the faulty code.
910 * DEBUG_TASK_THREADS also calls collecting_in_critical() during every
911 * message dispatch, which can increase reproducibility of bugs.
912 *
913 * This code can be regenerated by running
914 * `mig /usr/include/mach/task.defs`.
915 **********************************************************************/
916 #if DEBUG_TASK_THREADS
917
918 #include <mach/mach.h>
919 #include <mach/message.h>
920 #include <mach/mig.h>
921
922 #define __MIG_check__Reply__task_subsystem__ 1
923 #define mig_internal static inline
924 #define __DeclareSendRpc(a, b)
925 #define __BeforeSendRpc(a, b)
926 #define __AfterSendRpc(a, b)
927 #define msgh_request_port msgh_remote_port
928 #define msgh_reply_port msgh_local_port
929
930 #ifndef __MachMsgErrorWithTimeout
931 #define __MachMsgErrorWithTimeout(_R_) { \
932 switch (_R_) { \
933 case MACH_SEND_INVALID_DATA: \
934 case MACH_SEND_INVALID_DEST: \
935 case MACH_SEND_INVALID_HEADER: \
936 mig_put_reply_port(InP->Head.msgh_reply_port); \
937 break; \
938 case MACH_SEND_TIMED_OUT: \
939 case MACH_RCV_TIMED_OUT: \
940 default: \
941 mig_dealloc_reply_port(InP->Head.msgh_reply_port); \
942 } \
943 }
944 #endif /* __MachMsgErrorWithTimeout */
945
946 #ifndef __MachMsgErrorWithoutTimeout
947 #define __MachMsgErrorWithoutTimeout(_R_) { \
948 switch (_R_) { \
949 case MACH_SEND_INVALID_DATA: \
950 case MACH_SEND_INVALID_DEST: \
951 case MACH_SEND_INVALID_HEADER: \
952 mig_put_reply_port(InP->Head.msgh_reply_port); \
953 break; \
954 default: \
955 mig_dealloc_reply_port(InP->Head.msgh_reply_port); \
956 } \
957 }
958 #endif /* __MachMsgErrorWithoutTimeout */
959
960
961 #if ( __MigTypeCheck )
962 #if __MIG_check__Reply__task_subsystem__
963 #if !defined(__MIG_check__Reply__task_threads_t__defined)
964 #define __MIG_check__Reply__task_threads_t__defined
965
966 mig_internal kern_return_t __MIG_check__Reply__task_threads_t(__Reply__task_threads_t *Out0P)
967 {
968
969 typedef __Reply__task_threads_t __Reply;
970 boolean_t msgh_simple;
971 #if __MigTypeCheck
972 unsigned int msgh_size;
973 #endif /* __MigTypeCheck */
974 if (Out0P->Head.msgh_id != 3502) {
975 if (Out0P->Head.msgh_id == MACH_NOTIFY_SEND_ONCE)
976 { return MIG_SERVER_DIED; }
977 else
978 { return MIG_REPLY_MISMATCH; }
979 }
980
981 msgh_simple = !(Out0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX);
982 #if __MigTypeCheck
983 msgh_size = Out0P->Head.msgh_size;
984
985 if ((msgh_simple || Out0P->msgh_body.msgh_descriptor_count != 1 ||
986 msgh_size != (mach_msg_size_t)sizeof(__Reply)) &&
987 (!msgh_simple || msgh_size != (mach_msg_size_t)sizeof(mig_reply_error_t) ||
988 ((mig_reply_error_t *)Out0P)->RetCode == KERN_SUCCESS))
989 { return MIG_TYPE_ERROR ; }
990 #endif /* __MigTypeCheck */
991
992 if (msgh_simple) {
993 return ((mig_reply_error_t *)Out0P)->RetCode;
994 }
995
996 #if __MigTypeCheck
997 if (Out0P->act_list.type != MACH_MSG_OOL_PORTS_DESCRIPTOR ||
998 Out0P->act_list.disposition != 17) {
999 return MIG_TYPE_ERROR;
1000 }
1001 #endif /* __MigTypeCheck */
1002
1003 return MACH_MSG_SUCCESS;
1004 }
1005 #endif /* !defined(__MIG_check__Reply__task_threads_t__defined) */
1006 #endif /* __MIG_check__Reply__task_subsystem__ */
1007 #endif /* ( __MigTypeCheck ) */
1008
1009
1010 /* Routine task_threads */
1011 static kern_return_t objc_task_threads
1012 (
1013 task_t target_task,
1014 thread_act_array_t *act_list,
1015 mach_msg_type_number_t *act_listCnt
1016 )
1017 {
1018
1019 #ifdef __MigPackStructs
1020 #pragma pack(4)
1021 #endif
1022 typedef struct {
1023 mach_msg_header_t Head;
1024 } Request;
1025 #ifdef __MigPackStructs
1026 #pragma pack()
1027 #endif
1028
1029 #ifdef __MigPackStructs
1030 #pragma pack(4)
1031 #endif
1032 typedef struct {
1033 mach_msg_header_t Head;
1034 /* start of the kernel processed data */
1035 mach_msg_body_t msgh_body;
1036 mach_msg_ool_ports_descriptor_t act_list;
1037 /* end of the kernel processed data */
1038 NDR_record_t NDR;
1039 mach_msg_type_number_t act_listCnt;
1040 mach_msg_trailer_t trailer;
1041 } Reply;
1042 #ifdef __MigPackStructs
1043 #pragma pack()
1044 #endif
1045
1046 #ifdef __MigPackStructs
1047 #pragma pack(4)
1048 #endif
1049 typedef struct {
1050 mach_msg_header_t Head;
1051 /* start of the kernel processed data */
1052 mach_msg_body_t msgh_body;
1053 mach_msg_ool_ports_descriptor_t act_list;
1054 /* end of the kernel processed data */
1055 NDR_record_t NDR;
1056 mach_msg_type_number_t act_listCnt;
1057 } __Reply;
1058 #ifdef __MigPackStructs
1059 #pragma pack()
1060 #endif
1061 /*
1062 * typedef struct {
1063 * mach_msg_header_t Head;
1064 * NDR_record_t NDR;
1065 * kern_return_t RetCode;
1066 * } mig_reply_error_t;
1067 */
1068
1069 union {
1070 Request In;
1071 Reply Out;
1072 } Mess;
1073
1074 Request *InP = &Mess.In;
1075 Reply *Out0P = &Mess.Out;
1076
1077 mach_msg_return_t msg_result;
1078
1079 #ifdef __MIG_check__Reply__task_threads_t__defined
1080 kern_return_t check_result;
1081 #endif /* __MIG_check__Reply__task_threads_t__defined */
1082
1083 __DeclareSendRpc(3402, "task_threads")
1084
1085 InP->Head.msgh_bits =
1086 MACH_MSGH_BITS(19, MACH_MSG_TYPE_MAKE_SEND_ONCE);
1087 /* msgh_size passed as argument */
1088 InP->Head.msgh_request_port = target_task;
1089 InP->Head.msgh_reply_port = mig_get_reply_port();
1090 InP->Head.msgh_id = 3402;
1091
1092 __BeforeSendRpc(3402, "task_threads")
1093 msg_result = mach_msg(&InP->Head, MACH_SEND_MSG|MACH_RCV_MSG|MACH_MSG_OPTION_NONE, (mach_msg_size_t)sizeof(Request), (mach_msg_size_t)sizeof(Reply), InP->Head.msgh_reply_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
1094 __AfterSendRpc(3402, "task_threads")
1095 if (msg_result != MACH_MSG_SUCCESS) {
1096 _objc_inform("task_threads received unexpected reply msgh_id 0x%zx",
1097 (size_t)Out0P->Head.msgh_id);
1098 __MachMsgErrorWithoutTimeout(msg_result);
1099 { return msg_result; }
1100 }
1101
1102
1103 #if defined(__MIG_check__Reply__task_threads_t__defined)
1104 check_result = __MIG_check__Reply__task_threads_t((__Reply__task_threads_t *)Out0P);
1105 if (check_result != MACH_MSG_SUCCESS)
1106 { return check_result; }
1107 #endif /* defined(__MIG_check__Reply__task_threads_t__defined) */
1108
1109 *act_list = (thread_act_array_t)(Out0P->act_list.address);
1110 *act_listCnt = Out0P->act_listCnt;
1111
1112 return KERN_SUCCESS;
1113 }
1114
1115 // DEBUG_TASK_THREADS
1116 #endif
1117
1118
1119 // __OBJC2__
1120 #endif