]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-cache.mm
objc4-756.2.tar.gz
[apple/objc4.git] / runtime / objc-cache.mm
1 /*
2 * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 /***********************************************************************
25 * objc-cache.m
26 * Method cache management
27 * Cache flushing
28 * Cache garbage collection
29 * Cache instrumentation
30 * Dedicated allocator for large caches
31 **********************************************************************/
32
33
34 /***********************************************************************
35 * Method cache locking (GrP 2001-1-14)
36 *
37 * For speed, objc_msgSend does not acquire any locks when it reads
38 * method caches. Instead, all cache changes are performed so that any
39 * objc_msgSend running concurrently with the cache mutator will not
40 * crash or hang or get an incorrect result from the cache.
41 *
42 * When cache memory becomes unused (e.g. the old cache after cache
43 * expansion), it is not immediately freed, because a concurrent
44 * objc_msgSend could still be using it. Instead, the memory is
45 * disconnected from the data structures and placed on a garbage list.
46 * The memory is now only accessible to instances of objc_msgSend that
47 * were running when the memory was disconnected; any further calls to
48 * objc_msgSend will not see the garbage memory because the other data
49 * structures don't point to it anymore. The collecting_in_critical
50 * function checks the PC of all threads and returns FALSE when all threads
51 * are found to be outside objc_msgSend. This means any call to objc_msgSend
52 * that could have had access to the garbage has finished or moved past the
53 * cache lookup stage, so it is safe to free the memory.
54 *
55 * All functions that modify cache data or structures must acquire the
56 * cacheUpdateLock to prevent interference from concurrent modifications.
57 * The function that frees cache garbage must acquire the cacheUpdateLock
58 * and use collecting_in_critical() to flush out cache readers.
59 * The cacheUpdateLock is also used to protect the custom allocator used
60 * for large method cache blocks.
61 *
62 * Cache readers (PC-checked by collecting_in_critical())
63 * objc_msgSend*
64 * cache_getImp
65 *
66 * Cache writers (hold cacheUpdateLock while reading or writing; not PC-checked)
67 * cache_fill (acquires lock)
68 * cache_expand (only called from cache_fill)
69 * cache_create (only called from cache_expand)
70 * bcopy (only called from instrumented cache_expand)
71 * flush_caches (acquires lock)
72 * cache_flush (only called from cache_fill and flush_caches)
73 * cache_collect_free (only called from cache_expand and cache_flush)
74 *
75 * UNPROTECTED cache readers (NOT thread-safe; used for debug info only)
76 * cache_print
77 * _class_printMethodCaches
78 * _class_printDuplicateCacheEntries
79 * _class_printMethodCacheStatistics
80 *
81 ***********************************************************************/
82
83
84 #if __OBJC2__
85
86 #include "objc-private.h"
87 #include "objc-cache.h"
88
89
90 /* Initial cache bucket count. INIT_CACHE_SIZE must be a power of two. */
91 enum {
92 INIT_CACHE_SIZE_LOG2 = 2,
93 INIT_CACHE_SIZE = (1 << INIT_CACHE_SIZE_LOG2)
94 };
95
96 static void cache_collect_free(struct bucket_t *data, mask_t capacity);
97 static int _collecting_in_critical(void);
98 static void _garbage_make_room(void);
99
100
101 /***********************************************************************
102 * Cache statistics for OBJC_PRINT_CACHE_SETUP
103 **********************************************************************/
104 static unsigned int cache_counts[16];
105 static size_t cache_allocations;
106 static size_t cache_collections;
107
108 static void recordNewCache(mask_t capacity)
109 {
110 size_t bucket = log2u(capacity);
111 if (bucket < countof(cache_counts)) {
112 cache_counts[bucket]++;
113 }
114 cache_allocations++;
115 }
116
117 static void recordDeadCache(mask_t capacity)
118 {
119 size_t bucket = log2u(capacity);
120 if (bucket < countof(cache_counts)) {
121 cache_counts[bucket]--;
122 }
123 }
124
125 /***********************************************************************
126 * Pointers used by compiled class objects
127 * These use asm to avoid conflicts with the compiler's internal declarations
128 **********************************************************************/
129
130 // EMPTY_BYTES includes space for a cache end marker bucket.
131 // This end marker doesn't actually have the wrap-around pointer
132 // because cache scans always find an empty bucket before they might wrap.
133 // 1024 buckets is fairly common.
134 #if DEBUG
135 // Use a smaller size to exercise heap-allocated empty caches.
136 # define EMPTY_BYTES ((8+1)*16)
137 #else
138 # define EMPTY_BYTES ((1024+1)*16)
139 #endif
140
141 #define stringize(x) #x
142 #define stringize2(x) stringize(x)
143
144 // "cache" is cache->buckets; "vtable" is cache->mask/occupied
145 // hack to avoid conflicts with compiler's internal declaration
146 asm("\n .section __TEXT,__const"
147 "\n .globl __objc_empty_vtable"
148 "\n .set __objc_empty_vtable, 0"
149 "\n .globl __objc_empty_cache"
150 "\n .align 3"
151 "\n __objc_empty_cache: .space " stringize2(EMPTY_BYTES)
152 );
153
154
155 #if __arm__ || __x86_64__ || __i386__
156 // objc_msgSend has few registers available.
157 // Cache scan increments and wraps at special end-marking bucket.
158 #define CACHE_END_MARKER 1
159 static inline mask_t cache_next(mask_t i, mask_t mask) {
160 return (i+1) & mask;
161 }
162
163 #elif __arm64__
164 // objc_msgSend has lots of registers available.
165 // Cache scan decrements. No end marker needed.
166 #define CACHE_END_MARKER 0
167 static inline mask_t cache_next(mask_t i, mask_t mask) {
168 return i ? i-1 : mask;
169 }
170
171 #else
172 #error unknown architecture
173 #endif
174
175
176 // copied from dispatch_atomic_maximally_synchronizing_barrier
177 // fixme verify that this barrier hack does in fact work here
178 #if __x86_64__
179 #define mega_barrier() \
180 do { unsigned long _clbr; __asm__ __volatile__( \
181 "cpuid" \
182 : "=a" (_clbr) : "0" (0) : "rbx", "rcx", "rdx", "cc", "memory" \
183 ); } while(0)
184
185 #elif __i386__
186 #define mega_barrier() \
187 do { unsigned long _clbr; __asm__ __volatile__( \
188 "cpuid" \
189 : "=a" (_clbr) : "0" (0) : "ebx", "ecx", "edx", "cc", "memory" \
190 ); } while(0)
191
192 #elif __arm__ || __arm64__
193 #define mega_barrier() \
194 __asm__ __volatile__( \
195 "dsb ish" \
196 : : : "memory")
197
198 #else
199 #error unknown architecture
200 #endif
201
202 #if __arm64__
203
204 // Pointer-size register prefix for inline asm
205 # if __LP64__
206 # define p "x" // true arm64
207 # else
208 # define p "w" // arm64_32
209 # endif
210
211 // Use atomic double-word instructions to update cache entries.
212 // This requires cache buckets not cross cache line boundaries.
213 static ALWAYS_INLINE void
214 stp(uintptr_t onep, uintptr_t twop, void *destp)
215 {
216 __asm__ ("stp %" p "[one], %" p "[two], [%x[dest]]"
217 : "=m" (((uintptr_t *)(destp))[0]),
218 "=m" (((uintptr_t *)(destp))[1])
219 : [one] "r" (onep),
220 [two] "r" (twop),
221 [dest] "r" (destp)
222 : /* no clobbers */
223 );
224 }
225
226 static ALWAYS_INLINE void __unused
227 ldp(uintptr_t& onep, uintptr_t& twop, const void *srcp)
228 {
229 __asm__ ("ldp %" p "[one], %" p "[two], [%x[src]]"
230 : [one] "=r" (onep),
231 [two] "=r" (twop)
232 : "m" (((const uintptr_t *)(srcp))[0]),
233 "m" (((const uintptr_t *)(srcp))[1]),
234 [src] "r" (srcp)
235 : /* no clobbers */
236 );
237 }
238
239 #undef p
240 #endif
241
242
243 // Class points to cache. SEL is key. Cache buckets store SEL+IMP.
244 // Caches are never built in the dyld shared cache.
245
246 static inline mask_t cache_hash(SEL sel, mask_t mask)
247 {
248 return (mask_t)(uintptr_t)sel & mask;
249 }
250
251 cache_t *getCache(Class cls)
252 {
253 assert(cls);
254 return &cls->cache;
255 }
256
257 #if __arm64__
258
259 template<Atomicity atomicity>
260 void bucket_t::set(SEL newSel, IMP newImp)
261 {
262 assert(_sel == 0 || _sel == newSel);
263
264 static_assert(offsetof(bucket_t,_imp) == 0 &&
265 offsetof(bucket_t,_sel) == sizeof(void *),
266 "bucket_t layout doesn't match arm64 bucket_t::set()");
267
268 uintptr_t signedImp = signIMP(newImp, newSel);
269
270 if (atomicity == Atomic) {
271 // LDP/STP guarantees that all observers get
272 // either imp/sel or newImp/newSel
273 stp(signedImp, (uintptr_t)newSel, this);
274 } else {
275 _sel = newSel;
276 _imp = signedImp;
277 }
278 }
279
280 #else
281
282 template<Atomicity atomicity>
283 void bucket_t::set(SEL newSel, IMP newImp)
284 {
285 assert(_sel == 0 || _sel == newSel);
286
287 // objc_msgSend uses sel and imp with no locks.
288 // It is safe for objc_msgSend to see new imp but NULL sel
289 // (It will get a cache miss but not dispatch to the wrong place.)
290 // It is unsafe for objc_msgSend to see old imp and new sel.
291 // Therefore we write new imp, wait a lot, then write new sel.
292
293 _imp = (uintptr_t)newImp;
294
295 if (_sel != newSel) {
296 if (atomicity == Atomic) {
297 mega_barrier();
298 }
299 _sel = newSel;
300 }
301 }
302
303 #endif
304
305 void cache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask)
306 {
307 // objc_msgSend uses mask and buckets with no locks.
308 // It is safe for objc_msgSend to see new buckets but old mask.
309 // (It will get a cache miss but not overrun the buckets' bounds).
310 // It is unsafe for objc_msgSend to see old buckets and new mask.
311 // Therefore we write new buckets, wait a lot, then write new mask.
312 // objc_msgSend reads mask first, then buckets.
313
314 // ensure other threads see buckets contents before buckets pointer
315 mega_barrier();
316
317 _buckets = newBuckets;
318
319 // ensure other threads see new buckets before new mask
320 mega_barrier();
321
322 _mask = newMask;
323 _occupied = 0;
324 }
325
326
327 struct bucket_t *cache_t::buckets()
328 {
329 return _buckets;
330 }
331
332 mask_t cache_t::mask()
333 {
334 return _mask;
335 }
336
337 mask_t cache_t::occupied()
338 {
339 return _occupied;
340 }
341
342 void cache_t::incrementOccupied()
343 {
344 _occupied++;
345 }
346
347 void cache_t::initializeToEmpty()
348 {
349 bzero(this, sizeof(*this));
350 _buckets = (bucket_t *)&_objc_empty_cache;
351 }
352
353
354 mask_t cache_t::capacity()
355 {
356 return mask() ? mask()+1 : 0;
357 }
358
359
360 #if CACHE_END_MARKER
361
362 size_t cache_t::bytesForCapacity(uint32_t cap)
363 {
364 // fixme put end marker inline when capacity+1 malloc is inefficient
365 return sizeof(bucket_t) * (cap + 1);
366 }
367
368 bucket_t *cache_t::endMarker(struct bucket_t *b, uint32_t cap)
369 {
370 // bytesForCapacity() chooses whether the end marker is inline or not
371 return (bucket_t *)((uintptr_t)b + bytesForCapacity(cap)) - 1;
372 }
373
374 bucket_t *allocateBuckets(mask_t newCapacity)
375 {
376 // Allocate one extra bucket to mark the end of the list.
377 // This can't overflow mask_t because newCapacity is a power of 2.
378 // fixme instead put the end mark inline when +1 is malloc-inefficient
379 bucket_t *newBuckets = (bucket_t *)
380 calloc(cache_t::bytesForCapacity(newCapacity), 1);
381
382 bucket_t *end = cache_t::endMarker(newBuckets, newCapacity);
383
384 #if __arm__
385 // End marker's sel is 1 and imp points BEFORE the first bucket.
386 // This saves an instruction in objc_msgSend.
387 end->set<NotAtomic>((SEL)(uintptr_t)1, (IMP)(newBuckets - 1));
388 #else
389 // End marker's sel is 1 and imp points to the first bucket.
390 end->set<NotAtomic>((SEL)(uintptr_t)1, (IMP)newBuckets);
391 #endif
392
393 if (PrintCaches) recordNewCache(newCapacity);
394
395 return newBuckets;
396 }
397
398 #else
399
400 size_t cache_t::bytesForCapacity(uint32_t cap)
401 {
402 return sizeof(bucket_t) * cap;
403 }
404
405 bucket_t *allocateBuckets(mask_t newCapacity)
406 {
407 if (PrintCaches) recordNewCache(newCapacity);
408
409 return (bucket_t *)calloc(cache_t::bytesForCapacity(newCapacity), 1);
410 }
411
412 #endif
413
414
415 bucket_t *emptyBucketsForCapacity(mask_t capacity, bool allocate = true)
416 {
417 cacheUpdateLock.assertLocked();
418
419 size_t bytes = cache_t::bytesForCapacity(capacity);
420
421 // Use _objc_empty_cache if the buckets is small enough.
422 if (bytes <= EMPTY_BYTES) {
423 return (bucket_t *)&_objc_empty_cache;
424 }
425
426 // Use shared empty buckets allocated on the heap.
427 static bucket_t **emptyBucketsList = nil;
428 static mask_t emptyBucketsListCount = 0;
429
430 mask_t index = log2u(capacity);
431
432 if (index >= emptyBucketsListCount) {
433 if (!allocate) return nil;
434
435 mask_t newListCount = index + 1;
436 bucket_t *newBuckets = (bucket_t *)calloc(bytes, 1);
437 emptyBucketsList = (bucket_t**)
438 realloc(emptyBucketsList, newListCount * sizeof(bucket_t *));
439 // Share newBuckets for every un-allocated size smaller than index.
440 // The array is therefore always fully populated.
441 for (mask_t i = emptyBucketsListCount; i < newListCount; i++) {
442 emptyBucketsList[i] = newBuckets;
443 }
444 emptyBucketsListCount = newListCount;
445
446 if (PrintCaches) {
447 _objc_inform("CACHES: new empty buckets at %p (capacity %zu)",
448 newBuckets, (size_t)capacity);
449 }
450 }
451
452 return emptyBucketsList[index];
453 }
454
455
456 bool cache_t::isConstantEmptyCache()
457 {
458 return
459 occupied() == 0 &&
460 buckets() == emptyBucketsForCapacity(capacity(), false);
461 }
462
463 bool cache_t::canBeFreed()
464 {
465 return !isConstantEmptyCache();
466 }
467
468
469 void cache_t::reallocate(mask_t oldCapacity, mask_t newCapacity)
470 {
471 bool freeOld = canBeFreed();
472
473 bucket_t *oldBuckets = buckets();
474 bucket_t *newBuckets = allocateBuckets(newCapacity);
475
476 // Cache's old contents are not propagated.
477 // This is thought to save cache memory at the cost of extra cache fills.
478 // fixme re-measure this
479
480 assert(newCapacity > 0);
481 assert((uintptr_t)(mask_t)(newCapacity-1) == newCapacity-1);
482
483 setBucketsAndMask(newBuckets, newCapacity - 1);
484
485 if (freeOld) {
486 cache_collect_free(oldBuckets, oldCapacity);
487 cache_collect(false);
488 }
489 }
490
491
492 void cache_t::bad_cache(id receiver, SEL sel, Class isa)
493 {
494 // Log in separate steps in case the logging itself causes a crash.
495 _objc_inform_now_and_on_crash
496 ("Method cache corrupted. This may be a message to an "
497 "invalid object, or a memory error somewhere else.");
498 cache_t *cache = &isa->cache;
499 _objc_inform_now_and_on_crash
500 ("%s %p, SEL %p, isa %p, cache %p, buckets %p, "
501 "mask 0x%x, occupied 0x%x",
502 receiver ? "receiver" : "unused", receiver,
503 sel, isa, cache, cache->_buckets,
504 cache->_mask, cache->_occupied);
505 _objc_inform_now_and_on_crash
506 ("%s %zu bytes, buckets %zu bytes",
507 receiver ? "receiver" : "unused", malloc_size(receiver),
508 malloc_size(cache->_buckets));
509 _objc_inform_now_and_on_crash
510 ("selector '%s'", sel_getName(sel));
511 _objc_inform_now_and_on_crash
512 ("isa '%s'", isa->nameForLogging());
513 _objc_fatal
514 ("Method cache corrupted. This may be a message to an "
515 "invalid object, or a memory error somewhere else.");
516 }
517
518
519 bucket_t * cache_t::find(SEL s, id receiver)
520 {
521 assert(s != 0);
522
523 bucket_t *b = buckets();
524 mask_t m = mask();
525 mask_t begin = cache_hash(s, m);
526 mask_t i = begin;
527 do {
528 if (b[i].sel() == 0 || b[i].sel() == s) {
529 return &b[i];
530 }
531 } while ((i = cache_next(i, m)) != begin);
532
533 // hack
534 Class cls = (Class)((uintptr_t)this - offsetof(objc_class, cache));
535 cache_t::bad_cache(receiver, (SEL)s, cls);
536 }
537
538
539 void cache_t::expand()
540 {
541 cacheUpdateLock.assertLocked();
542
543 uint32_t oldCapacity = capacity();
544 uint32_t newCapacity = oldCapacity ? oldCapacity*2 : INIT_CACHE_SIZE;
545
546 if ((uint32_t)(mask_t)newCapacity != newCapacity) {
547 // mask overflow - can't grow further
548 // fixme this wastes one bit of mask
549 newCapacity = oldCapacity;
550 }
551
552 reallocate(oldCapacity, newCapacity);
553 }
554
555
556 static void cache_fill_nolock(Class cls, SEL sel, IMP imp, id receiver)
557 {
558 cacheUpdateLock.assertLocked();
559
560 // Never cache before +initialize is done
561 if (!cls->isInitialized()) return;
562
563 // Make sure the entry wasn't added to the cache by some other thread
564 // before we grabbed the cacheUpdateLock.
565 if (cache_getImp(cls, sel)) return;
566
567 cache_t *cache = getCache(cls);
568
569 // Use the cache as-is if it is less than 3/4 full
570 mask_t newOccupied = cache->occupied() + 1;
571 mask_t capacity = cache->capacity();
572 if (cache->isConstantEmptyCache()) {
573 // Cache is read-only. Replace it.
574 cache->reallocate(capacity, capacity ?: INIT_CACHE_SIZE);
575 }
576 else if (newOccupied <= capacity / 4 * 3) {
577 // Cache is less than 3/4 full. Use it as-is.
578 }
579 else {
580 // Cache is too full. Expand it.
581 cache->expand();
582 }
583
584 // Scan for the first unused slot and insert there.
585 // There is guaranteed to be an empty slot because the
586 // minimum size is 4 and we resized at 3/4 full.
587 bucket_t *bucket = cache->find(sel, receiver);
588 if (bucket->sel() == 0) cache->incrementOccupied();
589 bucket->set<Atomic>(sel, imp);
590 }
591
592 void cache_fill(Class cls, SEL sel, IMP imp, id receiver)
593 {
594 #if !DEBUG_TASK_THREADS
595 mutex_locker_t lock(cacheUpdateLock);
596 cache_fill_nolock(cls, sel, imp, receiver);
597 #else
598 _collecting_in_critical();
599 return;
600 #endif
601 }
602
603
604 // Reset this entire cache to the uncached lookup by reallocating it.
605 // This must not shrink the cache - that breaks the lock-free scheme.
606 void cache_erase_nolock(Class cls)
607 {
608 cacheUpdateLock.assertLocked();
609
610 cache_t *cache = getCache(cls);
611
612 mask_t capacity = cache->capacity();
613 if (capacity > 0 && cache->occupied() > 0) {
614 auto oldBuckets = cache->buckets();
615 auto buckets = emptyBucketsForCapacity(capacity);
616 cache->setBucketsAndMask(buckets, capacity - 1); // also clears occupied
617
618 cache_collect_free(oldBuckets, capacity);
619 cache_collect(false);
620 }
621 }
622
623
624 void cache_delete(Class cls)
625 {
626 mutex_locker_t lock(cacheUpdateLock);
627 if (cls->cache.canBeFreed()) {
628 if (PrintCaches) recordDeadCache(cls->cache.capacity());
629 free(cls->cache.buckets());
630 }
631 }
632
633
634 /***********************************************************************
635 * cache collection.
636 **********************************************************************/
637
638 #if !TARGET_OS_WIN32
639
640 // A sentinel (magic value) to report bad thread_get_state status.
641 // Must not be a valid PC.
642 // Must not be zero - thread_get_state() on a new thread returns PC == 0.
643 #define PC_SENTINEL 1
644
645 static uintptr_t _get_pc_for_thread(thread_t thread)
646 #if defined(__i386__)
647 {
648 i386_thread_state_t state;
649 unsigned int count = i386_THREAD_STATE_COUNT;
650 kern_return_t okay = thread_get_state (thread, i386_THREAD_STATE, (thread_state_t)&state, &count);
651 return (okay == KERN_SUCCESS) ? state.__eip : PC_SENTINEL;
652 }
653 #elif defined(__x86_64__)
654 {
655 x86_thread_state64_t state;
656 unsigned int count = x86_THREAD_STATE64_COUNT;
657 kern_return_t okay = thread_get_state (thread, x86_THREAD_STATE64, (thread_state_t)&state, &count);
658 return (okay == KERN_SUCCESS) ? state.__rip : PC_SENTINEL;
659 }
660 #elif defined(__arm__)
661 {
662 arm_thread_state_t state;
663 unsigned int count = ARM_THREAD_STATE_COUNT;
664 kern_return_t okay = thread_get_state (thread, ARM_THREAD_STATE, (thread_state_t)&state, &count);
665 return (okay == KERN_SUCCESS) ? state.__pc : PC_SENTINEL;
666 }
667 #elif defined(__arm64__)
668 {
669 arm_thread_state64_t state;
670 unsigned int count = ARM_THREAD_STATE64_COUNT;
671 kern_return_t okay = thread_get_state (thread, ARM_THREAD_STATE64, (thread_state_t)&state, &count);
672 return (okay == KERN_SUCCESS) ? arm_thread_state64_get_pc(state) : PC_SENTINEL;
673 }
674 #else
675 {
676 #error _get_pc_for_thread () not implemented for this architecture
677 }
678 #endif
679
680 #endif
681
682 /***********************************************************************
683 * _collecting_in_critical.
684 * Returns TRUE if some thread is currently executing a cache-reading
685 * function. Collection of cache garbage is not allowed when a cache-
686 * reading function is in progress because it might still be using
687 * the garbage memory.
688 **********************************************************************/
689 extern "C" uintptr_t objc_entryPoints[];
690 extern "C" uintptr_t objc_exitPoints[];
691
692 static int _collecting_in_critical(void)
693 {
694 #if TARGET_OS_WIN32
695 return TRUE;
696 #else
697 thread_act_port_array_t threads;
698 unsigned number;
699 unsigned count;
700 kern_return_t ret;
701 int result;
702
703 mach_port_t mythread = pthread_mach_thread_np(pthread_self());
704
705 // Get a list of all the threads in the current task
706 #if !DEBUG_TASK_THREADS
707 ret = task_threads(mach_task_self(), &threads, &number);
708 #else
709 ret = objc_task_threads(mach_task_self(), &threads, &number);
710 #endif
711
712 if (ret != KERN_SUCCESS) {
713 // See DEBUG_TASK_THREADS below to help debug this.
714 _objc_fatal("task_threads failed (result 0x%x)\n", ret);
715 }
716
717 // Check whether any thread is in the cache lookup code
718 result = FALSE;
719 for (count = 0; count < number; count++)
720 {
721 int region;
722 uintptr_t pc;
723
724 // Don't bother checking ourselves
725 if (threads[count] == mythread)
726 continue;
727
728 // Find out where thread is executing
729 pc = _get_pc_for_thread (threads[count]);
730
731 // Check for bad status, and if so, assume the worse (can't collect)
732 if (pc == PC_SENTINEL)
733 {
734 result = TRUE;
735 goto done;
736 }
737
738 // Check whether it is in the cache lookup code
739 for (region = 0; objc_entryPoints[region] != 0; region++)
740 {
741 if ((pc >= objc_entryPoints[region]) &&
742 (pc <= objc_exitPoints[region]))
743 {
744 result = TRUE;
745 goto done;
746 }
747 }
748 }
749
750 done:
751 // Deallocate the port rights for the threads
752 for (count = 0; count < number; count++) {
753 mach_port_deallocate(mach_task_self (), threads[count]);
754 }
755
756 // Deallocate the thread list
757 vm_deallocate (mach_task_self (), (vm_address_t) threads, sizeof(threads[0]) * number);
758
759 // Return our finding
760 return result;
761 #endif
762 }
763
764
765 /***********************************************************************
766 * _garbage_make_room. Ensure that there is enough room for at least
767 * one more ref in the garbage.
768 **********************************************************************/
769
770 // amount of memory represented by all refs in the garbage
771 static size_t garbage_byte_size = 0;
772
773 // do not empty the garbage until garbage_byte_size gets at least this big
774 static size_t garbage_threshold = 32*1024;
775
776 // table of refs to free
777 static bucket_t **garbage_refs = 0;
778
779 // current number of refs in garbage_refs
780 static size_t garbage_count = 0;
781
782 // capacity of current garbage_refs
783 static size_t garbage_max = 0;
784
785 // capacity of initial garbage_refs
786 enum {
787 INIT_GARBAGE_COUNT = 128
788 };
789
790 static void _garbage_make_room(void)
791 {
792 static int first = 1;
793
794 // Create the collection table the first time it is needed
795 if (first)
796 {
797 first = 0;
798 garbage_refs = (bucket_t**)
799 malloc(INIT_GARBAGE_COUNT * sizeof(void *));
800 garbage_max = INIT_GARBAGE_COUNT;
801 }
802
803 // Double the table if it is full
804 else if (garbage_count == garbage_max)
805 {
806 garbage_refs = (bucket_t**)
807 realloc(garbage_refs, garbage_max * 2 * sizeof(void *));
808 garbage_max *= 2;
809 }
810 }
811
812
813 /***********************************************************************
814 * cache_collect_free. Add the specified malloc'd memory to the list
815 * of them to free at some later point.
816 * size is used for the collection threshold. It does not have to be
817 * precisely the block's size.
818 * Cache locks: cacheUpdateLock must be held by the caller.
819 **********************************************************************/
820 static void cache_collect_free(bucket_t *data, mask_t capacity)
821 {
822 cacheUpdateLock.assertLocked();
823
824 if (PrintCaches) recordDeadCache(capacity);
825
826 _garbage_make_room ();
827 garbage_byte_size += cache_t::bytesForCapacity(capacity);
828 garbage_refs[garbage_count++] = data;
829 }
830
831
832 /***********************************************************************
833 * cache_collect. Try to free accumulated dead caches.
834 * collectALot tries harder to free memory.
835 * Cache locks: cacheUpdateLock must be held by the caller.
836 **********************************************************************/
837 void cache_collect(bool collectALot)
838 {
839 cacheUpdateLock.assertLocked();
840
841 // Done if the garbage is not full
842 if (garbage_byte_size < garbage_threshold && !collectALot) {
843 return;
844 }
845
846 // Synchronize collection with objc_msgSend and other cache readers
847 if (!collectALot) {
848 if (_collecting_in_critical ()) {
849 // objc_msgSend (or other cache reader) is currently looking in
850 // the cache and might still be using some garbage.
851 if (PrintCaches) {
852 _objc_inform ("CACHES: not collecting; "
853 "objc_msgSend in progress");
854 }
855 return;
856 }
857 }
858 else {
859 // No excuses.
860 while (_collecting_in_critical())
861 ;
862 }
863
864 // No cache readers in progress - garbage is now deletable
865
866 // Log our progress
867 if (PrintCaches) {
868 cache_collections++;
869 _objc_inform ("CACHES: COLLECTING %zu bytes (%zu allocations, %zu collections)", garbage_byte_size, cache_allocations, cache_collections);
870 }
871
872 // Dispose all refs now in the garbage
873 // Erase each entry so debugging tools don't see stale pointers.
874 while (garbage_count--) {
875 auto dead = garbage_refs[garbage_count];
876 garbage_refs[garbage_count] = nil;
877 free(dead);
878 }
879
880 // Clear the garbage count and total size indicator
881 garbage_count = 0;
882 garbage_byte_size = 0;
883
884 if (PrintCaches) {
885 size_t i;
886 size_t total_count = 0;
887 size_t total_size = 0;
888
889 for (i = 0; i < countof(cache_counts); i++) {
890 int count = cache_counts[i];
891 int slots = 1 << i;
892 size_t size = count * slots * sizeof(bucket_t);
893
894 if (!count) continue;
895
896 _objc_inform("CACHES: %4d slots: %4d caches, %6zu bytes",
897 slots, count, size);
898
899 total_count += count;
900 total_size += size;
901 }
902
903 _objc_inform("CACHES: total: %4zu caches, %6zu bytes",
904 total_count, total_size);
905 }
906 }
907
908
909 /***********************************************************************
910 * objc_task_threads
911 * Replacement for task_threads(). Define DEBUG_TASK_THREADS to debug
912 * crashes when task_threads() is failing.
913 *
914 * A failure in task_threads() usually means somebody has botched their
915 * Mach or MIG traffic. For example, somebody's error handling was wrong
916 * and they left a message queued on the MIG reply port for task_threads()
917 * to trip over.
918 *
919 * The code below is a modified version of task_threads(). It logs
920 * the msgh_id of the reply message. The msgh_id can identify the sender
921 * of the message, which can help pinpoint the faulty code.
922 * DEBUG_TASK_THREADS also calls collecting_in_critical() during every
923 * message dispatch, which can increase reproducibility of bugs.
924 *
925 * This code can be regenerated by running
926 * `mig /usr/include/mach/task.defs`.
927 **********************************************************************/
928 #if DEBUG_TASK_THREADS
929
930 #include <mach/mach.h>
931 #include <mach/message.h>
932 #include <mach/mig.h>
933
934 #define __MIG_check__Reply__task_subsystem__ 1
935 #define mig_internal static inline
936 #define __DeclareSendRpc(a, b)
937 #define __BeforeSendRpc(a, b)
938 #define __AfterSendRpc(a, b)
939 #define msgh_request_port msgh_remote_port
940 #define msgh_reply_port msgh_local_port
941
942 #ifndef __MachMsgErrorWithTimeout
943 #define __MachMsgErrorWithTimeout(_R_) { \
944 switch (_R_) { \
945 case MACH_SEND_INVALID_DATA: \
946 case MACH_SEND_INVALID_DEST: \
947 case MACH_SEND_INVALID_HEADER: \
948 mig_put_reply_port(InP->Head.msgh_reply_port); \
949 break; \
950 case MACH_SEND_TIMED_OUT: \
951 case MACH_RCV_TIMED_OUT: \
952 default: \
953 mig_dealloc_reply_port(InP->Head.msgh_reply_port); \
954 } \
955 }
956 #endif /* __MachMsgErrorWithTimeout */
957
958 #ifndef __MachMsgErrorWithoutTimeout
959 #define __MachMsgErrorWithoutTimeout(_R_) { \
960 switch (_R_) { \
961 case MACH_SEND_INVALID_DATA: \
962 case MACH_SEND_INVALID_DEST: \
963 case MACH_SEND_INVALID_HEADER: \
964 mig_put_reply_port(InP->Head.msgh_reply_port); \
965 break; \
966 default: \
967 mig_dealloc_reply_port(InP->Head.msgh_reply_port); \
968 } \
969 }
970 #endif /* __MachMsgErrorWithoutTimeout */
971
972
973 #if ( __MigTypeCheck )
974 #if __MIG_check__Reply__task_subsystem__
975 #if !defined(__MIG_check__Reply__task_threads_t__defined)
976 #define __MIG_check__Reply__task_threads_t__defined
977
978 mig_internal kern_return_t __MIG_check__Reply__task_threads_t(__Reply__task_threads_t *Out0P)
979 {
980
981 typedef __Reply__task_threads_t __Reply;
982 boolean_t msgh_simple;
983 #if __MigTypeCheck
984 unsigned int msgh_size;
985 #endif /* __MigTypeCheck */
986 if (Out0P->Head.msgh_id != 3502) {
987 if (Out0P->Head.msgh_id == MACH_NOTIFY_SEND_ONCE)
988 { return MIG_SERVER_DIED; }
989 else
990 { return MIG_REPLY_MISMATCH; }
991 }
992
993 msgh_simple = !(Out0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX);
994 #if __MigTypeCheck
995 msgh_size = Out0P->Head.msgh_size;
996
997 if ((msgh_simple || Out0P->msgh_body.msgh_descriptor_count != 1 ||
998 msgh_size != (mach_msg_size_t)sizeof(__Reply)) &&
999 (!msgh_simple || msgh_size != (mach_msg_size_t)sizeof(mig_reply_error_t) ||
1000 ((mig_reply_error_t *)Out0P)->RetCode == KERN_SUCCESS))
1001 { return MIG_TYPE_ERROR ; }
1002 #endif /* __MigTypeCheck */
1003
1004 if (msgh_simple) {
1005 return ((mig_reply_error_t *)Out0P)->RetCode;
1006 }
1007
1008 #if __MigTypeCheck
1009 if (Out0P->act_list.type != MACH_MSG_OOL_PORTS_DESCRIPTOR ||
1010 Out0P->act_list.disposition != 17) {
1011 return MIG_TYPE_ERROR;
1012 }
1013 #endif /* __MigTypeCheck */
1014
1015 return MACH_MSG_SUCCESS;
1016 }
1017 #endif /* !defined(__MIG_check__Reply__task_threads_t__defined) */
1018 #endif /* __MIG_check__Reply__task_subsystem__ */
1019 #endif /* ( __MigTypeCheck ) */
1020
1021
1022 /* Routine task_threads */
1023 static kern_return_t objc_task_threads
1024 (
1025 task_t target_task,
1026 thread_act_array_t *act_list,
1027 mach_msg_type_number_t *act_listCnt
1028 )
1029 {
1030
1031 #ifdef __MigPackStructs
1032 #pragma pack(4)
1033 #endif
1034 typedef struct {
1035 mach_msg_header_t Head;
1036 } Request;
1037 #ifdef __MigPackStructs
1038 #pragma pack()
1039 #endif
1040
1041 #ifdef __MigPackStructs
1042 #pragma pack(4)
1043 #endif
1044 typedef struct {
1045 mach_msg_header_t Head;
1046 /* start of the kernel processed data */
1047 mach_msg_body_t msgh_body;
1048 mach_msg_ool_ports_descriptor_t act_list;
1049 /* end of the kernel processed data */
1050 NDR_record_t NDR;
1051 mach_msg_type_number_t act_listCnt;
1052 mach_msg_trailer_t trailer;
1053 } Reply;
1054 #ifdef __MigPackStructs
1055 #pragma pack()
1056 #endif
1057
1058 #ifdef __MigPackStructs
1059 #pragma pack(4)
1060 #endif
1061 typedef struct {
1062 mach_msg_header_t Head;
1063 /* start of the kernel processed data */
1064 mach_msg_body_t msgh_body;
1065 mach_msg_ool_ports_descriptor_t act_list;
1066 /* end of the kernel processed data */
1067 NDR_record_t NDR;
1068 mach_msg_type_number_t act_listCnt;
1069 } __Reply;
1070 #ifdef __MigPackStructs
1071 #pragma pack()
1072 #endif
1073 /*
1074 * typedef struct {
1075 * mach_msg_header_t Head;
1076 * NDR_record_t NDR;
1077 * kern_return_t RetCode;
1078 * } mig_reply_error_t;
1079 */
1080
1081 union {
1082 Request In;
1083 Reply Out;
1084 } Mess;
1085
1086 Request *InP = &Mess.In;
1087 Reply *Out0P = &Mess.Out;
1088
1089 mach_msg_return_t msg_result;
1090
1091 #ifdef __MIG_check__Reply__task_threads_t__defined
1092 kern_return_t check_result;
1093 #endif /* __MIG_check__Reply__task_threads_t__defined */
1094
1095 __DeclareSendRpc(3402, "task_threads")
1096
1097 InP->Head.msgh_bits =
1098 MACH_MSGH_BITS(19, MACH_MSG_TYPE_MAKE_SEND_ONCE);
1099 /* msgh_size passed as argument */
1100 InP->Head.msgh_request_port = target_task;
1101 InP->Head.msgh_reply_port = mig_get_reply_port();
1102 InP->Head.msgh_id = 3402;
1103
1104 __BeforeSendRpc(3402, "task_threads")
1105 msg_result = mach_msg(&InP->Head, MACH_SEND_MSG|MACH_RCV_MSG|MACH_MSG_OPTION_NONE, (mach_msg_size_t)sizeof(Request), (mach_msg_size_t)sizeof(Reply), InP->Head.msgh_reply_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
1106 __AfterSendRpc(3402, "task_threads")
1107 if (msg_result != MACH_MSG_SUCCESS) {
1108 _objc_inform("task_threads received unexpected reply msgh_id 0x%zx",
1109 (size_t)Out0P->Head.msgh_id);
1110 __MachMsgErrorWithoutTimeout(msg_result);
1111 { return msg_result; }
1112 }
1113
1114
1115 #if defined(__MIG_check__Reply__task_threads_t__defined)
1116 check_result = __MIG_check__Reply__task_threads_t((__Reply__task_threads_t *)Out0P);
1117 if (check_result != MACH_MSG_SUCCESS)
1118 { return check_result; }
1119 #endif /* defined(__MIG_check__Reply__task_threads_t__defined) */
1120
1121 *act_list = (thread_act_array_t)(Out0P->act_list.address);
1122 *act_listCnt = Out0P->act_listCnt;
1123
1124 return KERN_SUCCESS;
1125 }
1126
1127 // DEBUG_TASK_THREADS
1128 #endif
1129
1130
1131 // __OBJC2__
1132 #endif