]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-cache.mm
objc4-680.tar.gz
[apple/objc4.git] / runtime / objc-cache.mm
1 /*
2 * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 /***********************************************************************
25 * objc-cache.m
26 * Method cache management
27 * Cache flushing
28 * Cache garbage collection
29 * Cache instrumentation
30 * Dedicated allocator for large caches
31 **********************************************************************/
32
33
34 /***********************************************************************
35 * Method cache locking (GrP 2001-1-14)
36 *
37 * For speed, objc_msgSend does not acquire any locks when it reads
38 * method caches. Instead, all cache changes are performed so that any
39 * objc_msgSend running concurrently with the cache mutator will not
40 * crash or hang or get an incorrect result from the cache.
41 *
42 * When cache memory becomes unused (e.g. the old cache after cache
43 * expansion), it is not immediately freed, because a concurrent
44 * objc_msgSend could still be using it. Instead, the memory is
45 * disconnected from the data structures and placed on a garbage list.
46 * The memory is now only accessible to instances of objc_msgSend that
47 * were running when the memory was disconnected; any further calls to
48 * objc_msgSend will not see the garbage memory because the other data
49 * structures don't point to it anymore. The collecting_in_critical
50 * function checks the PC of all threads and returns FALSE when all threads
51 * are found to be outside objc_msgSend. This means any call to objc_msgSend
52 * that could have had access to the garbage has finished or moved past the
53 * cache lookup stage, so it is safe to free the memory.
54 *
55 * All functions that modify cache data or structures must acquire the
56 * cacheUpdateLock to prevent interference from concurrent modifications.
57 * The function that frees cache garbage must acquire the cacheUpdateLock
58 * and use collecting_in_critical() to flush out cache readers.
59 * The cacheUpdateLock is also used to protect the custom allocator used
60 * for large method cache blocks.
61 *
62 * Cache readers (PC-checked by collecting_in_critical())
63 * objc_msgSend*
64 * cache_getImp
65 *
66 * Cache writers (hold cacheUpdateLock while reading or writing; not PC-checked)
67 * cache_fill (acquires lock)
68 * cache_expand (only called from cache_fill)
69 * cache_create (only called from cache_expand)
70 * bcopy (only called from instrumented cache_expand)
71 * flush_caches (acquires lock)
72 * cache_flush (only called from cache_fill and flush_caches)
73 * cache_collect_free (only called from cache_expand and cache_flush)
74 *
75 * UNPROTECTED cache readers (NOT thread-safe; used for debug info only)
76 * cache_print
77 * _class_printMethodCaches
78 * _class_printDuplicateCacheEntries
79 * _class_printMethodCacheStatistics
80 *
81 ***********************************************************************/
82
83
84 #if __OBJC2__
85
86 #include "objc-private.h"
87 #include "objc-cache.h"
88
89
90 /* Initial cache bucket count. INIT_CACHE_SIZE must be a power of two. */
91 enum {
92 INIT_CACHE_SIZE_LOG2 = 2,
93 INIT_CACHE_SIZE = (1 << INIT_CACHE_SIZE_LOG2)
94 };
95
96 static void cache_collect_free(struct bucket_t *data, mask_t capacity);
97 static int _collecting_in_critical(void);
98 static void _garbage_make_room(void);
99
100
101 /***********************************************************************
102 * Cache statistics for OBJC_PRINT_CACHE_SETUP
103 **********************************************************************/
104 static unsigned int cache_counts[16];
105 static size_t cache_allocations;
106 static size_t cache_collections;
107
108 static void recordNewCache(mask_t capacity)
109 {
110 size_t bucket = log2u(capacity);
111 if (bucket < countof(cache_counts)) {
112 cache_counts[bucket]++;
113 }
114 cache_allocations++;
115 }
116
117 static void recordDeadCache(mask_t capacity)
118 {
119 size_t bucket = log2u(capacity);
120 if (bucket < countof(cache_counts)) {
121 cache_counts[bucket]--;
122 }
123 }
124
125 /***********************************************************************
126 * Pointers used by compiled class objects
127 * These use asm to avoid conflicts with the compiler's internal declarations
128 **********************************************************************/
129
130 // EMPTY_BYTES includes space for a cache end marker bucket.
131 // This end marker doesn't actually have the wrap-around pointer
132 // because cache scans always find an empty bucket before they might wrap.
133 // 1024 buckets is fairly common.
134 #if DEBUG
135 // Use a smaller size to exercise heap-allocated empty caches.
136 # define EMPTY_BYTES ((8+1)*16)
137 #else
138 # define EMPTY_BYTES ((1024+1)*16)
139 #endif
140
141 #define stringize(x) #x
142 #define stringize2(x) stringize(x)
143
144 // "cache" is cache->buckets; "vtable" is cache->mask/occupied
145 // hack to avoid conflicts with compiler's internal declaration
146 asm("\n .section __TEXT,__const"
147 "\n .globl __objc_empty_vtable"
148 "\n .set __objc_empty_vtable, 0"
149 "\n .globl __objc_empty_cache"
150 "\n .align 3"
151 "\n __objc_empty_cache: .space " stringize2(EMPTY_BYTES)
152 );
153
154
155 #if __arm__ || __x86_64__ || __i386__
156 // objc_msgSend has few registers available.
157 // Cache scan increments and wraps at special end-marking bucket.
158 #define CACHE_END_MARKER 1
159 static inline mask_t cache_next(mask_t i, mask_t mask) {
160 return (i+1) & mask;
161 }
162
163 #elif __arm64__
164 // objc_msgSend has lots of registers available.
165 // Cache scan decrements. No end marker needed.
166 #define CACHE_END_MARKER 0
167 static inline mask_t cache_next(mask_t i, mask_t mask) {
168 return i ? i-1 : mask;
169 }
170
171 #else
172 #error unknown architecture
173 #endif
174
175
176 #if SUPPORT_IGNORED_SELECTOR_CONSTANT
177 #error sorry not implemented
178 #endif
179
180
181 // copied from dispatch_atomic_maximally_synchronizing_barrier
182 // fixme verify that this barrier hack does in fact work here
183 #if __x86_64__
184 #define mega_barrier() \
185 do { unsigned long _clbr; __asm__ __volatile__( \
186 "cpuid" \
187 : "=a" (_clbr) : "0" (0) : "rbx", "rcx", "rdx", "cc", "memory" \
188 ); } while(0)
189
190 #elif __i386__
191 #define mega_barrier() \
192 do { unsigned long _clbr; __asm__ __volatile__( \
193 "cpuid" \
194 : "=a" (_clbr) : "0" (0) : "ebx", "ecx", "edx", "cc", "memory" \
195 ); } while(0)
196
197 #elif __arm__ || __arm64__
198 #define mega_barrier() \
199 __asm__ __volatile__( \
200 "dsb ish" \
201 : : : "memory")
202
203 #else
204 #error unknown architecture
205 #endif
206
207 #if __arm64__
208
209 // Use atomic double-word instructions to update cache entries.
210 // This requires cache buckets not cross cache line boundaries.
211 #define stp(onep, twop, destp) \
212 __asm__ ("stp %[one], %[two], [%[dest]]" \
213 : "=m" (((uint64_t *)(destp))[0]), \
214 "=m" (((uint64_t *)(destp))[1]) \
215 : [one] "r" (onep), \
216 [two] "r" (twop), \
217 [dest] "r" (destp) \
218 : /* no clobbers */ \
219 )
220 #define ldp(onep, twop, srcp) \
221 __asm__ ("ldp %[one], %[two], [%[src]]" \
222 : [one] "=r" (onep), \
223 [two] "=r" (twop) \
224 : "m" (((uint64_t *)(srcp))[0]), \
225 "m" (((uint64_t *)(srcp))[1]), \
226 [src] "r" (srcp) \
227 : /* no clobbers */ \
228 )
229
230 #endif
231
232
233 // Class points to cache. SEL is key. Cache buckets store SEL+IMP.
234 // Caches are never built in the dyld shared cache.
235
236 static inline mask_t cache_hash(cache_key_t key, mask_t mask)
237 {
238 return (mask_t)(key & mask);
239 }
240
241 cache_t *getCache(Class cls)
242 {
243 assert(cls);
244 return &cls->cache;
245 }
246
247 cache_key_t getKey(SEL sel)
248 {
249 assert(sel);
250 return (cache_key_t)sel;
251 }
252
253 #if __arm64__
254
255 void bucket_t::set(cache_key_t newKey, IMP newImp)
256 {
257 assert(_key == 0 || _key == newKey);
258
259 // LDP/STP guarantees that all observers get
260 // either key/imp or newKey/newImp
261 stp(newKey, newImp, this);
262 }
263
264 #else
265
266 void bucket_t::set(cache_key_t newKey, IMP newImp)
267 {
268 assert(_key == 0 || _key == newKey);
269
270 // objc_msgSend uses key and imp with no locks.
271 // It is safe for objc_msgSend to see new imp but NULL key
272 // (It will get a cache miss but not dispatch to the wrong place.)
273 // It is unsafe for objc_msgSend to see old imp and new key.
274 // Therefore we write new imp, wait a lot, then write new key.
275
276 _imp = newImp;
277
278 if (_key != newKey) {
279 mega_barrier();
280 _key = newKey;
281 }
282 }
283
284 #endif
285
286 void cache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask)
287 {
288 // objc_msgSend uses mask and buckets with no locks.
289 // It is safe for objc_msgSend to see new buckets but old mask.
290 // (It will get a cache miss but not overrun the buckets' bounds).
291 // It is unsafe for objc_msgSend to see old buckets and new mask.
292 // Therefore we write new buckets, wait a lot, then write new mask.
293 // objc_msgSend reads mask first, then buckets.
294
295 // ensure other threads see buckets contents before buckets pointer
296 mega_barrier();
297
298 _buckets = newBuckets;
299
300 // ensure other threads see new buckets before new mask
301 mega_barrier();
302
303 _mask = newMask;
304 _occupied = 0;
305 }
306
307
308 struct bucket_t *cache_t::buckets()
309 {
310 return _buckets;
311 }
312
313 mask_t cache_t::mask()
314 {
315 return _mask;
316 }
317
318 mask_t cache_t::occupied()
319 {
320 return _occupied;
321 }
322
323 void cache_t::incrementOccupied()
324 {
325 _occupied++;
326 }
327
328 void cache_t::initializeToEmpty()
329 {
330 bzero(this, sizeof(*this));
331 _buckets = (bucket_t *)&_objc_empty_cache;
332 }
333
334
335 mask_t cache_t::capacity()
336 {
337 return mask() ? mask()+1 : 0;
338 }
339
340
341 #if CACHE_END_MARKER
342
343 size_t cache_t::bytesForCapacity(uint32_t cap)
344 {
345 // fixme put end marker inline when capacity+1 malloc is inefficient
346 return sizeof(bucket_t) * (cap + 1);
347 }
348
349 bucket_t *cache_t::endMarker(struct bucket_t *b, uint32_t cap)
350 {
351 // bytesForCapacity() chooses whether the end marker is inline or not
352 return (bucket_t *)((uintptr_t)b + bytesForCapacity(cap)) - 1;
353 }
354
355 bucket_t *allocateBuckets(mask_t newCapacity)
356 {
357 // Allocate one extra bucket to mark the end of the list.
358 // This can't overflow mask_t because newCapacity is a power of 2.
359 // fixme instead put the end mark inline when +1 is malloc-inefficient
360 bucket_t *newBuckets = (bucket_t *)
361 calloc(cache_t::bytesForCapacity(newCapacity), 1);
362
363 bucket_t *end = cache_t::endMarker(newBuckets, newCapacity);
364
365 #if __arm__
366 // End marker's key is 1 and imp points BEFORE the first bucket.
367 // This saves an instruction in objc_msgSend.
368 end->setKey((cache_key_t)(uintptr_t)1);
369 end->setImp((IMP)(newBuckets - 1));
370 #else
371 // End marker's key is 1 and imp points to the first bucket.
372 end->setKey((cache_key_t)(uintptr_t)1);
373 end->setImp((IMP)newBuckets);
374 #endif
375
376 if (PrintCaches) recordNewCache(newCapacity);
377
378 return newBuckets;
379 }
380
381 #else
382
383 size_t cache_t::bytesForCapacity(uint32_t cap)
384 {
385 return sizeof(bucket_t) * cap;
386 }
387
388 bucket_t *allocateBuckets(mask_t newCapacity)
389 {
390 if (PrintCaches) recordNewCache(newCapacity);
391
392 return (bucket_t *)calloc(cache_t::bytesForCapacity(newCapacity), 1);
393 }
394
395 #endif
396
397
398 bucket_t *emptyBucketsForCapacity(mask_t capacity, bool allocate = true)
399 {
400 cacheUpdateLock.assertLocked();
401
402 size_t bytes = cache_t::bytesForCapacity(capacity);
403
404 // Use _objc_empty_cache if the buckets is small enough.
405 if (bytes <= EMPTY_BYTES) {
406 return (bucket_t *)&_objc_empty_cache;
407 }
408
409 // Use shared empty buckets allocated on the heap.
410 static bucket_t **emptyBucketsList = nil;
411 static mask_t emptyBucketsListCount = 0;
412
413 mask_t index = log2u(capacity);
414
415 if (index >= emptyBucketsListCount) {
416 if (!allocate) return nil;
417
418 mask_t newListCount = index + 1;
419 bucket_t *newBuckets = (bucket_t *)calloc(bytes, 1);
420 emptyBucketsList = (bucket_t**)
421 realloc(emptyBucketsList, newListCount * sizeof(bucket_t *));
422 // Share newBuckets for every un-allocated size smaller than index.
423 // The array is therefore always fully populated.
424 for (mask_t i = emptyBucketsListCount; i < newListCount; i++) {
425 emptyBucketsList[i] = newBuckets;
426 }
427 emptyBucketsListCount = newListCount;
428
429 if (PrintCaches) {
430 _objc_inform("CACHES: new empty buckets at %p (capacity %zu)",
431 newBuckets, (size_t)capacity);
432 }
433 }
434
435 return emptyBucketsList[index];
436 }
437
438
439 bool cache_t::isConstantEmptyCache()
440 {
441 return
442 occupied() == 0 &&
443 buckets() == emptyBucketsForCapacity(capacity(), false);
444 }
445
446 bool cache_t::canBeFreed()
447 {
448 return !isConstantEmptyCache();
449 }
450
451
452 void cache_t::reallocate(mask_t oldCapacity, mask_t newCapacity)
453 {
454 bool freeOld = canBeFreed();
455
456 bucket_t *oldBuckets = buckets();
457 bucket_t *newBuckets = allocateBuckets(newCapacity);
458
459 // Cache's old contents are not propagated.
460 // This is thought to save cache memory at the cost of extra cache fills.
461 // fixme re-measure this
462
463 assert(newCapacity > 0);
464 assert((uintptr_t)(mask_t)(newCapacity-1) == newCapacity-1);
465
466 setBucketsAndMask(newBuckets, newCapacity - 1);
467
468 if (freeOld) {
469 cache_collect_free(oldBuckets, oldCapacity);
470 cache_collect(false);
471 }
472 }
473
474
475 void cache_t::bad_cache(id receiver, SEL sel, Class isa)
476 {
477 // Log in separate steps in case the logging itself causes a crash.
478 _objc_inform_now_and_on_crash
479 ("Method cache corrupted. This may be a message to an "
480 "invalid object, or a memory error somewhere else.");
481 cache_t *cache = &isa->cache;
482 _objc_inform_now_and_on_crash
483 ("%s %p, SEL %p, isa %p, cache %p, buckets %p, "
484 "mask 0x%x, occupied 0x%x",
485 receiver ? "receiver" : "unused", receiver,
486 sel, isa, cache, cache->_buckets,
487 cache->_mask, cache->_occupied);
488 _objc_inform_now_and_on_crash
489 ("%s %zu bytes, buckets %zu bytes",
490 receiver ? "receiver" : "unused", malloc_size(receiver),
491 malloc_size(cache->_buckets));
492 _objc_inform_now_and_on_crash
493 ("selector '%s'", sel_getName(sel));
494 _objc_inform_now_and_on_crash
495 ("isa '%s'", isa->nameForLogging());
496 _objc_fatal
497 ("Method cache corrupted.");
498 }
499
500
501 bucket_t * cache_t::find(cache_key_t k, id receiver)
502 {
503 assert(k != 0);
504
505 bucket_t *b = buckets();
506 mask_t m = mask();
507 mask_t begin = cache_hash(k, m);
508 mask_t i = begin;
509 do {
510 if (b[i].key() == 0 || b[i].key() == k) {
511 return &b[i];
512 }
513 } while ((i = cache_next(i, m)) != begin);
514
515 // hack
516 Class cls = (Class)((uintptr_t)this - offsetof(objc_class, cache));
517 cache_t::bad_cache(receiver, (SEL)k, cls);
518 }
519
520
521 void cache_t::expand()
522 {
523 cacheUpdateLock.assertLocked();
524
525 uint32_t oldCapacity = capacity();
526 uint32_t newCapacity = oldCapacity ? oldCapacity*2 : INIT_CACHE_SIZE;
527
528 if ((uint32_t)(mask_t)newCapacity != newCapacity) {
529 // mask overflow - can't grow further
530 // fixme this wastes one bit of mask
531 newCapacity = oldCapacity;
532 }
533
534 reallocate(oldCapacity, newCapacity);
535 }
536
537
538 static void cache_fill_nolock(Class cls, SEL sel, IMP imp, id receiver)
539 {
540 cacheUpdateLock.assertLocked();
541
542 // Never cache before +initialize is done
543 if (!cls->isInitialized()) return;
544
545 // Make sure the entry wasn't added to the cache by some other thread
546 // before we grabbed the cacheUpdateLock.
547 if (cache_getImp(cls, sel)) return;
548
549 cache_t *cache = getCache(cls);
550 cache_key_t key = getKey(sel);
551
552 // Use the cache as-is if it is less than 3/4 full
553 mask_t newOccupied = cache->occupied() + 1;
554 mask_t capacity = cache->capacity();
555 if (cache->isConstantEmptyCache()) {
556 // Cache is read-only. Replace it.
557 cache->reallocate(capacity, capacity ?: INIT_CACHE_SIZE);
558 }
559 else if (newOccupied <= capacity / 4 * 3) {
560 // Cache is less than 3/4 full. Use it as-is.
561 }
562 else {
563 // Cache is too full. Expand it.
564 cache->expand();
565 }
566
567 // Scan for the first unused slot and insert there.
568 // There is guaranteed to be an empty slot because the
569 // minimum size is 4 and we resized at 3/4 full.
570 bucket_t *bucket = cache->find(key, receiver);
571 if (bucket->key() == 0) cache->incrementOccupied();
572 bucket->set(key, imp);
573 }
574
575 void cache_fill(Class cls, SEL sel, IMP imp, id receiver)
576 {
577 #if !DEBUG_TASK_THREADS
578 mutex_locker_t lock(cacheUpdateLock);
579 cache_fill_nolock(cls, sel, imp, receiver);
580 #else
581 _collecting_in_critical();
582 return;
583 #endif
584 }
585
586
587 // Reset this entire cache to the uncached lookup by reallocating it.
588 // This must not shrink the cache - that breaks the lock-free scheme.
589 void cache_erase_nolock(Class cls)
590 {
591 cacheUpdateLock.assertLocked();
592
593 cache_t *cache = getCache(cls);
594
595 mask_t capacity = cache->capacity();
596 if (capacity > 0 && cache->occupied() > 0) {
597 auto oldBuckets = cache->buckets();
598 auto buckets = emptyBucketsForCapacity(capacity);
599 cache->setBucketsAndMask(buckets, capacity - 1); // also clears occupied
600
601 cache_collect_free(oldBuckets, capacity);
602 cache_collect(false);
603 }
604 }
605
606
607 void cache_delete(Class cls)
608 {
609 mutex_locker_t lock(cacheUpdateLock);
610 if (cls->cache.canBeFreed()) {
611 if (PrintCaches) recordDeadCache(cls->cache.capacity());
612 free(cls->cache.buckets());
613 }
614 }
615
616
617 /***********************************************************************
618 * cache collection.
619 **********************************************************************/
620
621 #if !TARGET_OS_WIN32
622
623 // A sentinel (magic value) to report bad thread_get_state status.
624 // Must not be a valid PC.
625 // Must not be zero - thread_get_state() on a new thread returns PC == 0.
626 #define PC_SENTINEL 1
627
628 static uintptr_t _get_pc_for_thread(thread_t thread)
629 #if defined(__i386__)
630 {
631 i386_thread_state_t state;
632 unsigned int count = i386_THREAD_STATE_COUNT;
633 kern_return_t okay = thread_get_state (thread, i386_THREAD_STATE, (thread_state_t)&state, &count);
634 return (okay == KERN_SUCCESS) ? state.__eip : PC_SENTINEL;
635 }
636 #elif defined(__x86_64__)
637 {
638 x86_thread_state64_t state;
639 unsigned int count = x86_THREAD_STATE64_COUNT;
640 kern_return_t okay = thread_get_state (thread, x86_THREAD_STATE64, (thread_state_t)&state, &count);
641 return (okay == KERN_SUCCESS) ? state.__rip : PC_SENTINEL;
642 }
643 #elif defined(__arm__)
644 {
645 arm_thread_state_t state;
646 unsigned int count = ARM_THREAD_STATE_COUNT;
647 kern_return_t okay = thread_get_state (thread, ARM_THREAD_STATE, (thread_state_t)&state, &count);
648 return (okay == KERN_SUCCESS) ? state.__pc : PC_SENTINEL;
649 }
650 #elif defined(__arm64__)
651 {
652 arm_thread_state64_t state;
653 unsigned int count = ARM_THREAD_STATE64_COUNT;
654 kern_return_t okay = thread_get_state (thread, ARM_THREAD_STATE64, (thread_state_t)&state, &count);
655 return (okay == KERN_SUCCESS) ? state.__pc : PC_SENTINEL;
656 }
657 #else
658 {
659 #error _get_pc_for_thread () not implemented for this architecture
660 }
661 #endif
662
663 #endif
664
665 /***********************************************************************
666 * _collecting_in_critical.
667 * Returns TRUE if some thread is currently executing a cache-reading
668 * function. Collection of cache garbage is not allowed when a cache-
669 * reading function is in progress because it might still be using
670 * the garbage memory.
671 **********************************************************************/
672 OBJC_EXPORT uintptr_t objc_entryPoints[];
673 OBJC_EXPORT uintptr_t objc_exitPoints[];
674
675 static int _collecting_in_critical(void)
676 {
677 #if TARGET_OS_WIN32
678 return TRUE;
679 #else
680 thread_act_port_array_t threads;
681 unsigned number;
682 unsigned count;
683 kern_return_t ret;
684 int result;
685
686 mach_port_t mythread = pthread_mach_thread_np(pthread_self());
687
688 // Get a list of all the threads in the current task
689 #if !DEBUG_TASK_THREADS
690 ret = task_threads(mach_task_self(), &threads, &number);
691 #else
692 ret = objc_task_threads(mach_task_self(), &threads, &number);
693 #endif
694
695 if (ret != KERN_SUCCESS) {
696 // See DEBUG_TASK_THREADS below to help debug this.
697 _objc_fatal("task_threads failed (result 0x%x)\n", ret);
698 }
699
700 // Check whether any thread is in the cache lookup code
701 result = FALSE;
702 for (count = 0; count < number; count++)
703 {
704 int region;
705 uintptr_t pc;
706
707 // Don't bother checking ourselves
708 if (threads[count] == mythread)
709 continue;
710
711 // Find out where thread is executing
712 pc = _get_pc_for_thread (threads[count]);
713
714 // Check for bad status, and if so, assume the worse (can't collect)
715 if (pc == PC_SENTINEL)
716 {
717 result = TRUE;
718 goto done;
719 }
720
721 // Check whether it is in the cache lookup code
722 for (region = 0; objc_entryPoints[region] != 0; region++)
723 {
724 if ((pc >= objc_entryPoints[region]) &&
725 (pc <= objc_exitPoints[region]))
726 {
727 result = TRUE;
728 goto done;
729 }
730 }
731 }
732
733 done:
734 // Deallocate the port rights for the threads
735 for (count = 0; count < number; count++) {
736 mach_port_deallocate(mach_task_self (), threads[count]);
737 }
738
739 // Deallocate the thread list
740 vm_deallocate (mach_task_self (), (vm_address_t) threads, sizeof(threads[0]) * number);
741
742 // Return our finding
743 return result;
744 #endif
745 }
746
747
748 /***********************************************************************
749 * _garbage_make_room. Ensure that there is enough room for at least
750 * one more ref in the garbage.
751 **********************************************************************/
752
753 // amount of memory represented by all refs in the garbage
754 static size_t garbage_byte_size = 0;
755
756 // do not empty the garbage until garbage_byte_size gets at least this big
757 static size_t garbage_threshold = 32*1024;
758
759 // table of refs to free
760 static bucket_t **garbage_refs = 0;
761
762 // current number of refs in garbage_refs
763 static size_t garbage_count = 0;
764
765 // capacity of current garbage_refs
766 static size_t garbage_max = 0;
767
768 // capacity of initial garbage_refs
769 enum {
770 INIT_GARBAGE_COUNT = 128
771 };
772
773 static void _garbage_make_room(void)
774 {
775 static int first = 1;
776
777 // Create the collection table the first time it is needed
778 if (first)
779 {
780 first = 0;
781 garbage_refs = (bucket_t**)
782 malloc(INIT_GARBAGE_COUNT * sizeof(void *));
783 garbage_max = INIT_GARBAGE_COUNT;
784 }
785
786 // Double the table if it is full
787 else if (garbage_count == garbage_max)
788 {
789 garbage_refs = (bucket_t**)
790 realloc(garbage_refs, garbage_max * 2 * sizeof(void *));
791 garbage_max *= 2;
792 }
793 }
794
795
796 /***********************************************************************
797 * cache_collect_free. Add the specified malloc'd memory to the list
798 * of them to free at some later point.
799 * size is used for the collection threshold. It does not have to be
800 * precisely the block's size.
801 * Cache locks: cacheUpdateLock must be held by the caller.
802 **********************************************************************/
803 static void cache_collect_free(bucket_t *data, mask_t capacity)
804 {
805 cacheUpdateLock.assertLocked();
806
807 if (PrintCaches) recordDeadCache(capacity);
808
809 _garbage_make_room ();
810 garbage_byte_size += cache_t::bytesForCapacity(capacity);
811 garbage_refs[garbage_count++] = data;
812 }
813
814
815 /***********************************************************************
816 * cache_collect. Try to free accumulated dead caches.
817 * collectALot tries harder to free memory.
818 * Cache locks: cacheUpdateLock must be held by the caller.
819 **********************************************************************/
820 void cache_collect(bool collectALot)
821 {
822 cacheUpdateLock.assertLocked();
823
824 // Done if the garbage is not full
825 if (garbage_byte_size < garbage_threshold && !collectALot) {
826 return;
827 }
828
829 // Synchronize collection with objc_msgSend and other cache readers
830 if (!collectALot) {
831 if (_collecting_in_critical ()) {
832 // objc_msgSend (or other cache reader) is currently looking in
833 // the cache and might still be using some garbage.
834 if (PrintCaches) {
835 _objc_inform ("CACHES: not collecting; "
836 "objc_msgSend in progress");
837 }
838 return;
839 }
840 }
841 else {
842 // No excuses.
843 while (_collecting_in_critical())
844 ;
845 }
846
847 // No cache readers in progress - garbage is now deletable
848
849 // Log our progress
850 if (PrintCaches) {
851 cache_collections++;
852 _objc_inform ("CACHES: COLLECTING %zu bytes (%zu allocations, %zu collections)", garbage_byte_size, cache_allocations, cache_collections);
853 }
854
855 // Dispose all refs now in the garbage
856 while (garbage_count--) {
857 free(garbage_refs[garbage_count]);
858 }
859
860 // Clear the garbage count and total size indicator
861 garbage_count = 0;
862 garbage_byte_size = 0;
863
864 if (PrintCaches) {
865 size_t i;
866 size_t total_count = 0;
867 size_t total_size = 0;
868
869 for (i = 0; i < countof(cache_counts); i++) {
870 int count = cache_counts[i];
871 int slots = 1 << i;
872 size_t size = count * slots * sizeof(bucket_t);
873
874 if (!count) continue;
875
876 _objc_inform("CACHES: %4d slots: %4d caches, %6zu bytes",
877 slots, count, size);
878
879 total_count += count;
880 total_size += size;
881 }
882
883 _objc_inform("CACHES: total: %4zu caches, %6zu bytes",
884 total_count, total_size);
885 }
886 }
887
888
889 /***********************************************************************
890 * objc_task_threads
891 * Replacement for task_threads(). Define DEBUG_TASK_THREADS to debug
892 * crashes when task_threads() is failing.
893 *
894 * A failure in task_threads() usually means somebody has botched their
895 * Mach or MIG traffic. For example, somebody's error handling was wrong
896 * and they left a message queued on the MIG reply port for task_threads()
897 * to trip over.
898 *
899 * The code below is a modified version of task_threads(). It logs
900 * the msgh_id of the reply message. The msgh_id can identify the sender
901 * of the message, which can help pinpoint the faulty code.
902 * DEBUG_TASK_THREADS also calls collecting_in_critical() during every
903 * message dispatch, which can increase reproducibility of bugs.
904 *
905 * This code can be regenerated by running
906 * `mig /usr/include/mach/task.defs`.
907 **********************************************************************/
908 #if DEBUG_TASK_THREADS
909
910 #include <mach/mach.h>
911 #include <mach/message.h>
912 #include <mach/mig.h>
913
914 #define __MIG_check__Reply__task_subsystem__ 1
915 #define mig_internal static inline
916 #define __DeclareSendRpc(a, b)
917 #define __BeforeSendRpc(a, b)
918 #define __AfterSendRpc(a, b)
919 #define msgh_request_port msgh_remote_port
920 #define msgh_reply_port msgh_local_port
921
922 #ifndef __MachMsgErrorWithTimeout
923 #define __MachMsgErrorWithTimeout(_R_) { \
924 switch (_R_) { \
925 case MACH_SEND_INVALID_DATA: \
926 case MACH_SEND_INVALID_DEST: \
927 case MACH_SEND_INVALID_HEADER: \
928 mig_put_reply_port(InP->Head.msgh_reply_port); \
929 break; \
930 case MACH_SEND_TIMED_OUT: \
931 case MACH_RCV_TIMED_OUT: \
932 default: \
933 mig_dealloc_reply_port(InP->Head.msgh_reply_port); \
934 } \
935 }
936 #endif /* __MachMsgErrorWithTimeout */
937
938 #ifndef __MachMsgErrorWithoutTimeout
939 #define __MachMsgErrorWithoutTimeout(_R_) { \
940 switch (_R_) { \
941 case MACH_SEND_INVALID_DATA: \
942 case MACH_SEND_INVALID_DEST: \
943 case MACH_SEND_INVALID_HEADER: \
944 mig_put_reply_port(InP->Head.msgh_reply_port); \
945 break; \
946 default: \
947 mig_dealloc_reply_port(InP->Head.msgh_reply_port); \
948 } \
949 }
950 #endif /* __MachMsgErrorWithoutTimeout */
951
952
953 #if ( __MigTypeCheck )
954 #if __MIG_check__Reply__task_subsystem__
955 #if !defined(__MIG_check__Reply__task_threads_t__defined)
956 #define __MIG_check__Reply__task_threads_t__defined
957
958 mig_internal kern_return_t __MIG_check__Reply__task_threads_t(__Reply__task_threads_t *Out0P)
959 {
960
961 typedef __Reply__task_threads_t __Reply;
962 boolean_t msgh_simple;
963 #if __MigTypeCheck
964 unsigned int msgh_size;
965 #endif /* __MigTypeCheck */
966 if (Out0P->Head.msgh_id != 3502) {
967 if (Out0P->Head.msgh_id == MACH_NOTIFY_SEND_ONCE)
968 { return MIG_SERVER_DIED; }
969 else
970 { return MIG_REPLY_MISMATCH; }
971 }
972
973 msgh_simple = !(Out0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX);
974 #if __MigTypeCheck
975 msgh_size = Out0P->Head.msgh_size;
976
977 if ((msgh_simple || Out0P->msgh_body.msgh_descriptor_count != 1 ||
978 msgh_size != (mach_msg_size_t)sizeof(__Reply)) &&
979 (!msgh_simple || msgh_size != (mach_msg_size_t)sizeof(mig_reply_error_t) ||
980 ((mig_reply_error_t *)Out0P)->RetCode == KERN_SUCCESS))
981 { return MIG_TYPE_ERROR ; }
982 #endif /* __MigTypeCheck */
983
984 if (msgh_simple) {
985 return ((mig_reply_error_t *)Out0P)->RetCode;
986 }
987
988 #if __MigTypeCheck
989 if (Out0P->act_list.type != MACH_MSG_OOL_PORTS_DESCRIPTOR ||
990 Out0P->act_list.disposition != 17) {
991 return MIG_TYPE_ERROR;
992 }
993 #endif /* __MigTypeCheck */
994
995 return MACH_MSG_SUCCESS;
996 }
997 #endif /* !defined(__MIG_check__Reply__task_threads_t__defined) */
998 #endif /* __MIG_check__Reply__task_subsystem__ */
999 #endif /* ( __MigTypeCheck ) */
1000
1001
1002 /* Routine task_threads */
1003 static kern_return_t objc_task_threads
1004 (
1005 task_t target_task,
1006 thread_act_array_t *act_list,
1007 mach_msg_type_number_t *act_listCnt
1008 )
1009 {
1010
1011 #ifdef __MigPackStructs
1012 #pragma pack(4)
1013 #endif
1014 typedef struct {
1015 mach_msg_header_t Head;
1016 } Request;
1017 #ifdef __MigPackStructs
1018 #pragma pack()
1019 #endif
1020
1021 #ifdef __MigPackStructs
1022 #pragma pack(4)
1023 #endif
1024 typedef struct {
1025 mach_msg_header_t Head;
1026 /* start of the kernel processed data */
1027 mach_msg_body_t msgh_body;
1028 mach_msg_ool_ports_descriptor_t act_list;
1029 /* end of the kernel processed data */
1030 NDR_record_t NDR;
1031 mach_msg_type_number_t act_listCnt;
1032 mach_msg_trailer_t trailer;
1033 } Reply;
1034 #ifdef __MigPackStructs
1035 #pragma pack()
1036 #endif
1037
1038 #ifdef __MigPackStructs
1039 #pragma pack(4)
1040 #endif
1041 typedef struct {
1042 mach_msg_header_t Head;
1043 /* start of the kernel processed data */
1044 mach_msg_body_t msgh_body;
1045 mach_msg_ool_ports_descriptor_t act_list;
1046 /* end of the kernel processed data */
1047 NDR_record_t NDR;
1048 mach_msg_type_number_t act_listCnt;
1049 } __Reply;
1050 #ifdef __MigPackStructs
1051 #pragma pack()
1052 #endif
1053 /*
1054 * typedef struct {
1055 * mach_msg_header_t Head;
1056 * NDR_record_t NDR;
1057 * kern_return_t RetCode;
1058 * } mig_reply_error_t;
1059 */
1060
1061 union {
1062 Request In;
1063 Reply Out;
1064 } Mess;
1065
1066 Request *InP = &Mess.In;
1067 Reply *Out0P = &Mess.Out;
1068
1069 mach_msg_return_t msg_result;
1070
1071 #ifdef __MIG_check__Reply__task_threads_t__defined
1072 kern_return_t check_result;
1073 #endif /* __MIG_check__Reply__task_threads_t__defined */
1074
1075 __DeclareSendRpc(3402, "task_threads")
1076
1077 InP->Head.msgh_bits =
1078 MACH_MSGH_BITS(19, MACH_MSG_TYPE_MAKE_SEND_ONCE);
1079 /* msgh_size passed as argument */
1080 InP->Head.msgh_request_port = target_task;
1081 InP->Head.msgh_reply_port = mig_get_reply_port();
1082 InP->Head.msgh_id = 3402;
1083
1084 __BeforeSendRpc(3402, "task_threads")
1085 msg_result = mach_msg(&InP->Head, MACH_SEND_MSG|MACH_RCV_MSG|MACH_MSG_OPTION_NONE, (mach_msg_size_t)sizeof(Request), (mach_msg_size_t)sizeof(Reply), InP->Head.msgh_reply_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
1086 __AfterSendRpc(3402, "task_threads")
1087 if (msg_result != MACH_MSG_SUCCESS) {
1088 _objc_inform("task_threads received unexpected reply msgh_id 0x%zx",
1089 (size_t)Out0P->Head.msgh_id);
1090 __MachMsgErrorWithoutTimeout(msg_result);
1091 { return msg_result; }
1092 }
1093
1094
1095 #if defined(__MIG_check__Reply__task_threads_t__defined)
1096 check_result = __MIG_check__Reply__task_threads_t((__Reply__task_threads_t *)Out0P);
1097 if (check_result != MACH_MSG_SUCCESS)
1098 { return check_result; }
1099 #endif /* defined(__MIG_check__Reply__task_threads_t__defined) */
1100
1101 *act_list = (thread_act_array_t)(Out0P->act_list.address);
1102 *act_listCnt = Out0P->act_listCnt;
1103
1104 return KERN_SUCCESS;
1105 }
1106
1107 // DEBUG_TASK_THREADS
1108 #endif
1109
1110
1111 // __OBJC2__
1112 #endif