]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-cache-old.mm
objc4-706.tar.gz
[apple/objc4.git] / runtime / objc-cache-old.mm
1 /*
2 * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 /***********************************************************************
25 * objc-cache.m
26 * Method cache management
27 * Cache flushing
28 * Cache garbage collection
29 * Cache instrumentation
30 * Dedicated allocator for large caches
31 **********************************************************************/
32
33
34 /***********************************************************************
35 * Method cache locking (GrP 2001-1-14)
36 *
37 * For speed, objc_msgSend does not acquire any locks when it reads
38 * method caches. Instead, all cache changes are performed so that any
39 * objc_msgSend running concurrently with the cache mutator will not
40 * crash or hang or get an incorrect result from the cache.
41 *
42 * When cache memory becomes unused (e.g. the old cache after cache
43 * expansion), it is not immediately freed, because a concurrent
44 * objc_msgSend could still be using it. Instead, the memory is
45 * disconnected from the data structures and placed on a garbage list.
46 * The memory is now only accessible to instances of objc_msgSend that
47 * were running when the memory was disconnected; any further calls to
48 * objc_msgSend will not see the garbage memory because the other data
49 * structures don't point to it anymore. The collecting_in_critical
50 * function checks the PC of all threads and returns FALSE when all threads
51 * are found to be outside objc_msgSend. This means any call to objc_msgSend
52 * that could have had access to the garbage has finished or moved past the
53 * cache lookup stage, so it is safe to free the memory.
54 *
55 * All functions that modify cache data or structures must acquire the
56 * cacheUpdateLock to prevent interference from concurrent modifications.
57 * The function that frees cache garbage must acquire the cacheUpdateLock
58 * and use collecting_in_critical() to flush out cache readers.
59 * The cacheUpdateLock is also used to protect the custom allocator used
60 * for large method cache blocks.
61 *
62 * Cache readers (PC-checked by collecting_in_critical())
63 * objc_msgSend*
64 * _cache_getImp
65 * _cache_getMethod
66 *
67 * Cache writers (hold cacheUpdateLock while reading or writing; not PC-checked)
68 * _cache_fill (acquires lock)
69 * _cache_expand (only called from cache_fill)
70 * _cache_create (only called from cache_expand)
71 * bcopy (only called from instrumented cache_expand)
72 * flush_caches (acquires lock)
73 * _cache_flush (only called from cache_fill and flush_caches)
74 * _cache_collect_free (only called from cache_expand and cache_flush)
75 *
76 * UNPROTECTED cache readers (NOT thread-safe; used for debug info only)
77 * _cache_print
78 * _class_printMethodCaches
79 * _class_printDuplicateCacheEntries
80 * _class_printMethodCacheStatistics
81 *
82 * _class_lookupMethodAndLoadCache is a special case. It may read a
83 * method triplet out of one cache and store it in another cache. This
84 * is unsafe if the method triplet is a forward:: entry, because the
85 * triplet itself could be freed unless _class_lookupMethodAndLoadCache
86 * were PC-checked or used a lock. Additionally, storing the method
87 * triplet in both caches would result in double-freeing if both caches
88 * were flushed or expanded. The solution is for _cache_getMethod to
89 * ignore all entries whose implementation is _objc_msgForward_impcache,
90 * so _class_lookupMethodAndLoadCache cannot look at a forward:: entry
91 * unsafely or place it in multiple caches.
92 ***********************************************************************/
93
94 #if !__OBJC2__
95
96 #include "objc-private.h"
97 #include "objc-cache-old.h"
98 #include "hashtable2.h"
99
100 typedef struct {
101 SEL name; // same layout as struct old_method
102 void *unused;
103 IMP imp; // same layout as struct old_method
104 } cache_entry;
105
106
107 /* When _class_slow_grow is non-zero, any given cache is actually grown
108 * only on the odd-numbered times it becomes full; on the even-numbered
109 * times, it is simply emptied and re-used. When this flag is zero,
110 * caches are grown every time. */
111 static const int _class_slow_grow = 1;
112
113 /* For min cache size: clear_cache=1, slow_grow=1
114 For max cache size: clear_cache=0, slow_grow=0 */
115
116 /* Initial cache bucket count. INIT_CACHE_SIZE must be a power of two. */
117 enum {
118 INIT_CACHE_SIZE_LOG2 = 2,
119 INIT_CACHE_SIZE = (1 << INIT_CACHE_SIZE_LOG2)
120 };
121
122
123 /* Amount of space required for `count` hash table buckets, knowing that
124 * one entry is embedded in the cache structure itself. */
125 #define TABLE_SIZE(count) ((count - 1) * sizeof(cache_entry *))
126
127
128 #if !TARGET_OS_WIN32
129 # define CACHE_ALLOCATOR
130 #endif
131
132 /* Custom cache allocator parameters.
133 * CACHE_REGION_SIZE must be a multiple of CACHE_QUANTUM. */
134 #define CACHE_ALLOCATOR_MIN 512
135 #define CACHE_QUANTUM (CACHE_ALLOCATOR_MIN+sizeof(struct objc_cache)-sizeof(cache_entry*))
136 #define CACHE_REGION_SIZE ((128*1024 / CACHE_QUANTUM) * CACHE_QUANTUM)
137 // #define CACHE_REGION_SIZE ((256*1024 / CACHE_QUANTUM) * CACHE_QUANTUM)
138
139 static uintptr_t cache_allocator_mask_for_size(size_t size)
140 {
141 return (size - sizeof(struct objc_cache)) / sizeof(cache_entry *);
142 }
143
144 static size_t cache_allocator_size_for_mask(uintptr_t mask)
145 {
146 size_t requested = sizeof(struct objc_cache) + TABLE_SIZE(mask+1);
147 size_t actual = CACHE_QUANTUM;
148 while (actual < requested) actual += CACHE_QUANTUM;
149 return actual;
150 }
151
152
153 /* Cache instrumentation data. Immediately follows the cache block itself. */
154 #ifdef OBJC_INSTRUMENTED
155 typedef struct
156 {
157 unsigned int hitCount; // cache lookup success tally
158 unsigned int hitProbes; // sum entries checked to hit
159 unsigned int maxHitProbes; // max entries checked to hit
160 unsigned int missCount; // cache lookup no-find tally
161 unsigned int missProbes; // sum entries checked to miss
162 unsigned int maxMissProbes; // max entries checked to miss
163 unsigned int flushCount; // cache flush tally
164 unsigned int flushedEntries; // sum cache entries flushed
165 unsigned int maxFlushedEntries; // max cache entries flushed
166 } CacheInstrumentation;
167
168 #define CACHE_INSTRUMENTATION(cache) (CacheInstrumentation *) &cache->buckets[cache->mask + 1];
169 #endif
170
171 /* Cache filling and flushing instrumentation */
172
173 static int totalCacheFills = 0;
174
175 #ifdef OBJC_INSTRUMENTED
176 unsigned int LinearFlushCachesCount = 0;
177 unsigned int LinearFlushCachesVisitedCount = 0;
178 unsigned int MaxLinearFlushCachesVisitedCount = 0;
179 unsigned int NonlinearFlushCachesCount = 0;
180 unsigned int NonlinearFlushCachesClassCount = 0;
181 unsigned int NonlinearFlushCachesVisitedCount = 0;
182 unsigned int MaxNonlinearFlushCachesVisitedCount = 0;
183 unsigned int IdealFlushCachesCount = 0;
184 unsigned int MaxIdealFlushCachesCount = 0;
185 #endif
186
187
188 /***********************************************************************
189 * A static empty cache. All classes initially point at this cache.
190 * When the first message is sent it misses in the cache, and when
191 * the cache is grown it checks for this case and uses malloc rather
192 * than realloc. This avoids the need to check for NULL caches in the
193 * messenger.
194 ***********************************************************************/
195
196 struct objc_cache _objc_empty_cache =
197 {
198 0, // mask
199 0, // occupied
200 { NULL } // buckets
201 };
202 #ifdef OBJC_INSTRUMENTED
203 CacheInstrumentation emptyCacheInstrumentation = {0};
204 #endif
205
206
207 /* Local prototypes */
208
209 static bool _cache_isEmpty(Cache cache);
210 static Cache _cache_malloc(uintptr_t slotCount);
211 static Cache _cache_create(Class cls);
212 static Cache _cache_expand(Class cls);
213
214 static int _collecting_in_critical(void);
215 static void _garbage_make_room(void);
216 static void _cache_collect_free(void *data, size_t size);
217
218 #if defined(CACHE_ALLOCATOR)
219 static bool cache_allocator_is_block(void *block);
220 static Cache cache_allocator_calloc(size_t size);
221 static void cache_allocator_free(void *block);
222 #endif
223
224 /***********************************************************************
225 * Cache statistics for OBJC_PRINT_CACHE_SETUP
226 **********************************************************************/
227 static unsigned int cache_counts[16];
228 static size_t cache_allocations;
229 static size_t cache_collections;
230 static size_t cache_allocator_regions;
231
232 static size_t log2u(size_t x)
233 {
234 unsigned int log;
235
236 log = 0;
237 while (x >>= 1)
238 log += 1;
239
240 return log;
241 }
242
243
244 /***********************************************************************
245 * _cache_isEmpty.
246 * Returns YES if the given cache is some empty cache.
247 * Empty caches should never be allocated on the heap.
248 **********************************************************************/
249 static bool _cache_isEmpty(Cache cache)
250 {
251 return (cache == NULL || cache == (Cache)&_objc_empty_cache || cache->mask == 0);
252 }
253
254
255 /***********************************************************************
256 * _cache_malloc.
257 *
258 * Called from _cache_create() and cache_expand()
259 * Cache locks: cacheUpdateLock must be held by the caller.
260 **********************************************************************/
261 static Cache _cache_malloc(uintptr_t slotCount)
262 {
263 Cache new_cache;
264 size_t size;
265
266 cacheUpdateLock.assertLocked();
267
268 // Allocate table (why not check for failure?)
269 size = sizeof(struct objc_cache) + TABLE_SIZE(slotCount);
270 #if defined(OBJC_INSTRUMENTED)
271 // Custom cache allocator can't handle instrumentation.
272 size += sizeof(CacheInstrumentation);
273 new_cache = calloc(size, 1);
274 new_cache->mask = slotCount - 1;
275 #elif !defined(CACHE_ALLOCATOR)
276 // fixme cache allocator implementation isn't 64-bit clean
277 new_cache = calloc(size, 1);
278 new_cache->mask = (unsigned int)(slotCount - 1);
279 #else
280 if (size < CACHE_ALLOCATOR_MIN) {
281 new_cache = (Cache)calloc(size, 1);
282 new_cache->mask = slotCount - 1;
283 // occupied and buckets and instrumentation are all zero
284 } else {
285 new_cache = cache_allocator_calloc(size);
286 // mask is already set
287 // occupied and buckets and instrumentation are all zero
288 }
289 #endif
290
291 if (PrintCaches) {
292 size_t bucket = log2u(slotCount);
293 if (bucket < sizeof(cache_counts) / sizeof(cache_counts[0])) {
294 cache_counts[bucket]++;
295 }
296 cache_allocations++;
297 }
298
299 return new_cache;
300 }
301
302 /***********************************************************************
303 * _cache_free_block.
304 *
305 * Called from _cache_free() and _cache_collect_free().
306 * block may be a cache or a forward:: entry.
307 * If block is a cache, forward:: entries it points to will NOT be freed.
308 * Cache locks: cacheUpdateLock must be held by the caller.
309 **********************************************************************/
310 static inline int isPowerOf2(unsigned long l) { return 1 == __builtin_popcountl(l); }
311 static void _cache_free_block(void *block)
312 {
313 cacheUpdateLock.assertLocked();
314
315 #if !TARGET_OS_WIN32
316 if (PrintCaches) {
317 Cache cache = (Cache)block;
318 size_t slotCount = cache->mask + 1;
319 if (isPowerOf2(slotCount)) {
320 size_t bucket = log2u(slotCount);
321 if (bucket < sizeof(cache_counts) / sizeof(cache_counts[0])) {
322 cache_counts[bucket]--;
323 }
324 }
325 }
326 #endif
327
328 #if defined(CACHE_ALLOCATOR)
329 if (cache_allocator_is_block(block)) {
330 cache_allocator_free(block);
331 } else
332 #endif
333 {
334 free(block);
335 }
336 }
337
338
339 /***********************************************************************
340 * _cache_free.
341 *
342 * Called from _objc_remove_classes_in_image().
343 * forward:: entries in the cache ARE freed.
344 * Cache locks: cacheUpdateLock must NOT be held by the caller.
345 **********************************************************************/
346 void _cache_free(Cache cache)
347 {
348 unsigned int i;
349
350 mutex_locker_t lock(cacheUpdateLock);
351
352 for (i = 0; i < cache->mask + 1; i++) {
353 cache_entry *entry = (cache_entry *)cache->buckets[i];
354 if (entry && entry->imp == _objc_msgForward_impcache) {
355 _cache_free_block(entry);
356 }
357 }
358
359 _cache_free_block(cache);
360 }
361
362
363 /***********************************************************************
364 * _cache_create.
365 *
366 * Called from _cache_expand().
367 * Cache locks: cacheUpdateLock must be held by the caller.
368 **********************************************************************/
369 static Cache _cache_create(Class cls)
370 {
371 Cache new_cache;
372
373 cacheUpdateLock.assertLocked();
374
375 // Allocate new cache block
376 new_cache = _cache_malloc(INIT_CACHE_SIZE);
377
378 // Install the cache
379 cls->cache = new_cache;
380
381 // Clear the grow flag so that we will re-use the current storage,
382 // rather than actually grow the cache, when expanding the cache
383 // for the first time
384 if (_class_slow_grow) {
385 cls->setShouldGrowCache(false);
386 }
387
388 // Return our creation
389 return new_cache;
390 }
391
392
393 /***********************************************************************
394 * _cache_expand.
395 *
396 * Called from _cache_fill ()
397 * Cache locks: cacheUpdateLock must be held by the caller.
398 **********************************************************************/
399 static Cache _cache_expand(Class cls)
400 {
401 Cache old_cache;
402 Cache new_cache;
403 uintptr_t slotCount;
404 uintptr_t index;
405
406 cacheUpdateLock.assertLocked();
407
408 // First growth goes from empty cache to a real one
409 old_cache = cls->cache;
410 if (_cache_isEmpty(old_cache))
411 return _cache_create (cls);
412
413 if (_class_slow_grow) {
414 // Cache grows every other time only.
415 if (cls->shouldGrowCache()) {
416 // Grow the cache this time. Don't grow next time.
417 cls->setShouldGrowCache(false);
418 }
419 else {
420 // Reuse the current cache storage this time. Do grow next time.
421 cls->setShouldGrowCache(true);
422
423 // Clear the valid-entry counter
424 old_cache->occupied = 0;
425
426 // Invalidate all the cache entries
427 for (index = 0; index < old_cache->mask + 1; index += 1)
428 {
429 // Remember what this entry was, so we can possibly
430 // deallocate it after the bucket has been invalidated
431 cache_entry *oldEntry = (cache_entry *)old_cache->buckets[index];
432
433 // Skip invalid entry
434 if (!oldEntry)
435 continue;
436
437 // Invalidate this entry
438 old_cache->buckets[index] = NULL;
439
440 // Deallocate "forward::" entry
441 if (oldEntry->imp == _objc_msgForward_impcache) {
442 _cache_collect_free (oldEntry, sizeof(cache_entry));
443 }
444 }
445
446 // Return the same old cache, freshly emptied
447 return old_cache;
448 }
449 }
450
451 // Double the cache size
452 slotCount = (old_cache->mask + 1) << 1;
453
454 new_cache = _cache_malloc(slotCount);
455
456 #ifdef OBJC_INSTRUMENTED
457 // Propagate the instrumentation data
458 {
459 CacheInstrumentation *oldCacheData;
460 CacheInstrumentation *newCacheData;
461
462 oldCacheData = CACHE_INSTRUMENTATION(old_cache);
463 newCacheData = CACHE_INSTRUMENTATION(new_cache);
464 bcopy ((const char *)oldCacheData, (char *)newCacheData, sizeof(CacheInstrumentation));
465 }
466 #endif
467
468 // Deallocate "forward::" entries from the old cache
469 for (index = 0; index < old_cache->mask + 1; index++) {
470 cache_entry *entry = (cache_entry *)old_cache->buckets[index];
471 if (entry && entry->imp == _objc_msgForward_impcache) {
472 _cache_collect_free (entry, sizeof(cache_entry));
473 }
474 }
475
476 // Install new cache
477 cls->cache = new_cache;
478
479 // Deallocate old cache, try freeing all the garbage
480 _cache_collect_free (old_cache, old_cache->mask * sizeof(cache_entry *));
481 _cache_collect(false);
482
483 return new_cache;
484 }
485
486
487 /***********************************************************************
488 * _cache_fill. Add the specified method to the specified class' cache.
489 * Returns NO if the cache entry wasn't added: cache was busy,
490 * class is still being initialized, new entry is a duplicate.
491 *
492 * Called only from _class_lookupMethodAndLoadCache and
493 * class_respondsToMethod and _cache_addForwardEntry.
494 *
495 * Cache locks: cacheUpdateLock must not be held.
496 **********************************************************************/
497 bool _cache_fill(Class cls, Method smt, SEL sel)
498 {
499 uintptr_t newOccupied;
500 uintptr_t index;
501 cache_entry **buckets;
502 cache_entry *entry;
503 Cache cache;
504
505 cacheUpdateLock.assertUnlocked();
506
507 // Never cache before +initialize is done
508 if (!cls->isInitialized()) {
509 return NO;
510 }
511
512 // Keep tally of cache additions
513 totalCacheFills += 1;
514
515 mutex_locker_t lock(cacheUpdateLock);
516
517 entry = (cache_entry *)smt;
518
519 cache = cls->cache;
520
521 // Make sure the entry wasn't added to the cache by some other thread
522 // before we grabbed the cacheUpdateLock.
523 // Don't use _cache_getMethod() because _cache_getMethod() doesn't
524 // return forward:: entries.
525 if (_cache_getImp(cls, sel)) {
526 return NO; // entry is already cached, didn't add new one
527 }
528
529 // Use the cache as-is if it is less than 3/4 full
530 newOccupied = cache->occupied + 1;
531 if ((newOccupied * 4) <= (cache->mask + 1) * 3) {
532 // Cache is less than 3/4 full.
533 cache->occupied = (unsigned int)newOccupied;
534 } else {
535 // Cache is too full. Expand it.
536 cache = _cache_expand (cls);
537
538 // Account for the addition
539 cache->occupied += 1;
540 }
541
542 // Scan for the first unused slot and insert there.
543 // There is guaranteed to be an empty slot because the
544 // minimum size is 4 and we resized at 3/4 full.
545 buckets = (cache_entry **)cache->buckets;
546 for (index = CACHE_HASH(sel, cache->mask);
547 buckets[index] != NULL;
548 index = (index+1) & cache->mask)
549 {
550 // empty
551 }
552 buckets[index] = entry;
553
554 return YES; // successfully added new cache entry
555 }
556
557
558 /***********************************************************************
559 * _cache_addForwardEntry
560 * Add a forward:: entry for the given selector to cls's method cache.
561 * Does nothing if the cache addition fails for any reason.
562 * Called from class_respondsToMethod and _class_lookupMethodAndLoadCache.
563 * Cache locks: cacheUpdateLock must not be held.
564 **********************************************************************/
565 void _cache_addForwardEntry(Class cls, SEL sel)
566 {
567 cache_entry *smt;
568
569 smt = (cache_entry *)malloc(sizeof(cache_entry));
570 smt->name = sel;
571 smt->imp = _objc_msgForward_impcache;
572 if (! _cache_fill(cls, (Method)smt, sel)) { // fixme hack
573 // Entry not added to cache. Don't leak the method struct.
574 free(smt);
575 }
576 }
577
578
579 /***********************************************************************
580 * _cache_flush. Invalidate all valid entries in the given class' cache.
581 *
582 * Called from flush_caches() and _cache_fill()
583 * Cache locks: cacheUpdateLock must be held by the caller.
584 **********************************************************************/
585 void _cache_flush(Class cls)
586 {
587 Cache cache;
588 unsigned int index;
589
590 cacheUpdateLock.assertLocked();
591
592 // Locate cache. Ignore unused cache.
593 cache = cls->cache;
594 if (_cache_isEmpty(cache)) return;
595
596 #ifdef OBJC_INSTRUMENTED
597 {
598 CacheInstrumentation *cacheData;
599
600 // Tally this flush
601 cacheData = CACHE_INSTRUMENTATION(cache);
602 cacheData->flushCount += 1;
603 cacheData->flushedEntries += cache->occupied;
604 if (cache->occupied > cacheData->maxFlushedEntries)
605 cacheData->maxFlushedEntries = cache->occupied;
606 }
607 #endif
608
609 // Traverse the cache
610 for (index = 0; index <= cache->mask; index += 1)
611 {
612 // Remember what this entry was, so we can possibly
613 // deallocate it after the bucket has been invalidated
614 cache_entry *oldEntry = (cache_entry *)cache->buckets[index];
615
616 // Invalidate this entry
617 cache->buckets[index] = NULL;
618
619 // Deallocate "forward::" entry
620 if (oldEntry && oldEntry->imp == _objc_msgForward_impcache)
621 _cache_collect_free (oldEntry, sizeof(cache_entry));
622 }
623
624 // Clear the valid-entry counter
625 cache->occupied = 0;
626 }
627
628
629 /***********************************************************************
630 * flush_cache. Flushes the instance method cache for class cls only.
631 * Use flush_caches() if cls might have in-use subclasses.
632 **********************************************************************/
633 void flush_cache(Class cls)
634 {
635 if (cls) {
636 mutex_locker_t lock(cacheUpdateLock);
637 _cache_flush(cls);
638 }
639 }
640
641
642 /***********************************************************************
643 * cache collection.
644 **********************************************************************/
645
646 #if !TARGET_OS_WIN32
647
648 // A sentinel (magic value) to report bad thread_get_state status.
649 // Must not be a valid PC.
650 // Must not be zero - thread_get_state() on a new thread returns PC == 0.
651 #define PC_SENTINEL 1
652
653 // UNIX03 compliance hack (4508809)
654 #if !__DARWIN_UNIX03
655 #define __srr0 srr0
656 #define __eip eip
657 #endif
658
659 static uintptr_t _get_pc_for_thread(thread_t thread)
660 #if defined(__i386__)
661 {
662 i386_thread_state_t state;
663 unsigned int count = i386_THREAD_STATE_COUNT;
664 kern_return_t okay = thread_get_state (thread, i386_THREAD_STATE, (thread_state_t)&state, &count);
665 return (okay == KERN_SUCCESS) ? state.__eip : PC_SENTINEL;
666 }
667 #elif defined(__x86_64__)
668 {
669 x86_thread_state64_t state;
670 unsigned int count = x86_THREAD_STATE64_COUNT;
671 kern_return_t okay = thread_get_state (thread, x86_THREAD_STATE64, (thread_state_t)&state, &count);
672 return (okay == KERN_SUCCESS) ? state.__rip : PC_SENTINEL;
673 }
674 #elif defined(__arm__)
675 {
676 arm_thread_state_t state;
677 unsigned int count = ARM_THREAD_STATE_COUNT;
678 kern_return_t okay = thread_get_state (thread, ARM_THREAD_STATE, (thread_state_t)&state, &count);
679 return (okay == KERN_SUCCESS) ? state.__pc : PC_SENTINEL;
680 }
681 #else
682 {
683 #error _get_pc_for_thread () not implemented for this architecture
684 }
685 #endif
686
687 #endif
688
689 /***********************************************************************
690 * _collecting_in_critical.
691 * Returns TRUE if some thread is currently executing a cache-reading
692 * function. Collection of cache garbage is not allowed when a cache-
693 * reading function is in progress because it might still be using
694 * the garbage memory.
695 **********************************************************************/
696 OBJC_EXPORT uintptr_t objc_entryPoints[];
697 OBJC_EXPORT uintptr_t objc_exitPoints[];
698
699 static int _collecting_in_critical(void)
700 {
701 #if TARGET_OS_WIN32
702 return TRUE;
703 #else
704 thread_act_port_array_t threads;
705 unsigned number;
706 unsigned count;
707 kern_return_t ret;
708 int result;
709
710 mach_port_t mythread = pthread_mach_thread_np(pthread_self());
711
712 // Get a list of all the threads in the current task
713 ret = task_threads (mach_task_self (), &threads, &number);
714 if (ret != KERN_SUCCESS)
715 {
716 _objc_fatal("task_thread failed (result %d)\n", ret);
717 }
718
719 // Check whether any thread is in the cache lookup code
720 result = FALSE;
721 for (count = 0; count < number; count++)
722 {
723 int region;
724 uintptr_t pc;
725
726 // Don't bother checking ourselves
727 if (threads[count] == mythread)
728 continue;
729
730 // Find out where thread is executing
731 pc = _get_pc_for_thread (threads[count]);
732
733 // Check for bad status, and if so, assume the worse (can't collect)
734 if (pc == PC_SENTINEL)
735 {
736 result = TRUE;
737 goto done;
738 }
739
740 // Check whether it is in the cache lookup code
741 for (region = 0; objc_entryPoints[region] != 0; region++)
742 {
743 if ((pc >= objc_entryPoints[region]) &&
744 (pc <= objc_exitPoints[region]))
745 {
746 result = TRUE;
747 goto done;
748 }
749 }
750 }
751
752 done:
753 // Deallocate the port rights for the threads
754 for (count = 0; count < number; count++) {
755 mach_port_deallocate(mach_task_self (), threads[count]);
756 }
757
758 // Deallocate the thread list
759 vm_deallocate (mach_task_self (), (vm_address_t) threads, sizeof(threads[0]) * number);
760
761 // Return our finding
762 return result;
763 #endif
764 }
765
766
767 /***********************************************************************
768 * _garbage_make_room. Ensure that there is enough room for at least
769 * one more ref in the garbage.
770 **********************************************************************/
771
772 // amount of memory represented by all refs in the garbage
773 static size_t garbage_byte_size = 0;
774
775 // do not empty the garbage until garbage_byte_size gets at least this big
776 static size_t garbage_threshold = 1024;
777
778 // table of refs to free
779 static void **garbage_refs = 0;
780
781 // current number of refs in garbage_refs
782 static size_t garbage_count = 0;
783
784 // capacity of current garbage_refs
785 static size_t garbage_max = 0;
786
787 // capacity of initial garbage_refs
788 enum {
789 INIT_GARBAGE_COUNT = 128
790 };
791
792 static void _garbage_make_room(void)
793 {
794 static int first = 1;
795
796 // Create the collection table the first time it is needed
797 if (first)
798 {
799 first = 0;
800 garbage_refs = (void**)
801 malloc(INIT_GARBAGE_COUNT * sizeof(void *));
802 garbage_max = INIT_GARBAGE_COUNT;
803 }
804
805 // Double the table if it is full
806 else if (garbage_count == garbage_max)
807 {
808 garbage_refs = (void**)
809 realloc(garbage_refs, garbage_max * 2 * sizeof(void *));
810 garbage_max *= 2;
811 }
812 }
813
814
815 /***********************************************************************
816 * _cache_collect_free. Add the specified malloc'd memory to the list
817 * of them to free at some later point.
818 * size is used for the collection threshold. It does not have to be
819 * precisely the block's size.
820 * Cache locks: cacheUpdateLock must be held by the caller.
821 **********************************************************************/
822 static void _cache_collect_free(void *data, size_t size)
823 {
824 cacheUpdateLock.assertLocked();
825
826 _garbage_make_room ();
827 garbage_byte_size += size;
828 garbage_refs[garbage_count++] = data;
829 }
830
831
832 /***********************************************************************
833 * _cache_collect. Try to free accumulated dead caches.
834 * collectALot tries harder to free memory.
835 * Cache locks: cacheUpdateLock must be held by the caller.
836 **********************************************************************/
837 void _cache_collect(bool collectALot)
838 {
839 cacheUpdateLock.assertLocked();
840
841 // Done if the garbage is not full
842 if (garbage_byte_size < garbage_threshold && !collectALot) {
843 return;
844 }
845
846 // Synchronize collection with objc_msgSend and other cache readers
847 if (!collectALot) {
848 if (_collecting_in_critical ()) {
849 // objc_msgSend (or other cache reader) is currently looking in
850 // the cache and might still be using some garbage.
851 if (PrintCaches) {
852 _objc_inform ("CACHES: not collecting; "
853 "objc_msgSend in progress");
854 }
855 return;
856 }
857 }
858 else {
859 // No excuses.
860 while (_collecting_in_critical())
861 ;
862 }
863
864 // No cache readers in progress - garbage is now deletable
865
866 // Log our progress
867 if (PrintCaches) {
868 cache_collections++;
869 _objc_inform ("CACHES: COLLECTING %zu bytes (%zu regions, %zu allocations, %zu collections)", garbage_byte_size, cache_allocator_regions, cache_allocations, cache_collections);
870 }
871
872 // Dispose all refs now in the garbage
873 while (garbage_count--) {
874 _cache_free_block(garbage_refs[garbage_count]);
875 }
876
877 // Clear the garbage count and total size indicator
878 garbage_count = 0;
879 garbage_byte_size = 0;
880
881 if (PrintCaches) {
882 size_t i;
883 size_t total = 0;
884 size_t ideal_total = 0;
885 size_t malloc_total = 0;
886 size_t local_total = 0;
887
888 for (i = 0; i < sizeof(cache_counts) / sizeof(cache_counts[0]); i++) {
889 int count = cache_counts[i];
890 int slots = 1 << i;
891 size_t size = sizeof(struct objc_cache) + TABLE_SIZE(slots);
892 size_t ideal = size;
893 #if TARGET_OS_WIN32
894 size_t malloc = size;
895 #else
896 size_t malloc = malloc_good_size(size);
897 #endif
898 size_t local = size < CACHE_ALLOCATOR_MIN ? malloc : cache_allocator_size_for_mask(cache_allocator_mask_for_size(size));
899
900 if (!count) continue;
901
902 _objc_inform("CACHES: %4d slots: %4d caches, %6zu / %6zu / %6zu bytes ideal/malloc/local, %6zu / %6zu bytes wasted malloc/local", slots, count, ideal*count, malloc*count, local*count, malloc*count-ideal*count, local*count-ideal*count);
903
904 total += count;
905 ideal_total += ideal*count;
906 malloc_total += malloc*count;
907 local_total += local*count;
908 }
909
910 _objc_inform("CACHES: total: %4zu caches, %6zu / %6zu / %6zu bytes ideal/malloc/local, %6zu / %6zu bytes wasted malloc/local", total, ideal_total, malloc_total, local_total, malloc_total-ideal_total, local_total-ideal_total);
911 }
912 }
913
914
915
916
917
918 #if defined(CACHE_ALLOCATOR)
919
920 /***********************************************************************
921 * Custom method cache allocator.
922 * Method cache block sizes are 2^slots+2 words, which is a pessimal
923 * case for the system allocator. It wastes 504 bytes per cache block
924 * with 128 or more slots, which adds up to tens of KB for an AppKit process.
925 * To save memory, the custom cache allocator below is used.
926 *
927 * The cache allocator uses 128 KB allocation regions. Few processes will
928 * require a second region. Within a region, allocation is address-ordered
929 * first fit.
930 *
931 * The cache allocator uses a quantum of 520.
932 * Cache block ideal sizes: 520, 1032, 2056, 4104
933 * Cache allocator sizes: 520, 1040, 2080, 4160
934 *
935 * Because all blocks are known to be genuine method caches, the ordinary
936 * cache->mask and cache->occupied fields are used as block headers.
937 * No out-of-band headers are maintained. The number of blocks will
938 * almost always be fewer than 200, so for simplicity there is no free
939 * list or other optimization.
940 *
941 * Block in use: mask != 0, occupied != -1 (mask indicates block size)
942 * Block free: mask != 0, occupied == -1 (mask is precisely block size)
943 *
944 * No cache allocator functions take any locks. Instead, the caller
945 * must hold the cacheUpdateLock.
946 *
947 * fixme with 128 KB regions and 520 B min block size, an allocation
948 * bitmap would be only 32 bytes - better than free list?
949 **********************************************************************/
950
951 typedef struct cache_allocator_block {
952 uintptr_t size;
953 uintptr_t state;
954 struct cache_allocator_block *nextFree;
955 } cache_allocator_block;
956
957 typedef struct cache_allocator_region {
958 cache_allocator_block *start;
959 cache_allocator_block *end; // first non-block address
960 cache_allocator_block *freeList;
961 struct cache_allocator_region *next;
962 } cache_allocator_region;
963
964 static cache_allocator_region *cacheRegion = NULL;
965
966
967 /***********************************************************************
968 * cache_allocator_add_region
969 * Allocates and returns a new region that can hold at least size
970 * bytes of large method caches.
971 * The actual size will be rounded up to a CACHE_QUANTUM boundary,
972 * with a minimum of CACHE_REGION_SIZE.
973 * The new region is lowest-priority for new allocations. Callers that
974 * know the other regions are already full should allocate directly
975 * into the returned region.
976 **********************************************************************/
977 static cache_allocator_region *cache_allocator_add_region(size_t size)
978 {
979 vm_address_t addr;
980 cache_allocator_block *b;
981 cache_allocator_region **rgnP;
982 cache_allocator_region *newRegion = (cache_allocator_region *)
983 calloc(1, sizeof(cache_allocator_region));
984
985 // Round size up to quantum boundary, and apply the minimum size.
986 size += CACHE_QUANTUM - (size % CACHE_QUANTUM);
987 if (size < CACHE_REGION_SIZE) size = CACHE_REGION_SIZE;
988
989 // Allocate the region
990 addr = (vm_address_t)calloc(size, 1);
991 newRegion->start = (cache_allocator_block *)addr;
992 newRegion->end = (cache_allocator_block *)(addr + size);
993
994 // Mark the first block: free and covers the entire region
995 b = newRegion->start;
996 b->size = size;
997 b->state = (uintptr_t)-1;
998 b->nextFree = NULL;
999 newRegion->freeList = b;
1000
1001 // Add to end of the linked list of regions.
1002 // Other regions should be re-used before this one is touched.
1003 newRegion->next = NULL;
1004 rgnP = &cacheRegion;
1005 while (*rgnP) {
1006 rgnP = &(**rgnP).next;
1007 }
1008 *rgnP = newRegion;
1009
1010 cache_allocator_regions++;
1011
1012 return newRegion;
1013 }
1014
1015
1016 /***********************************************************************
1017 * cache_allocator_coalesce
1018 * Attempts to coalesce a free block with the single free block following
1019 * it in the free list, if any.
1020 **********************************************************************/
1021 static void cache_allocator_coalesce(cache_allocator_block *block)
1022 {
1023 if (block->size + (uintptr_t)block == (uintptr_t)block->nextFree) {
1024 block->size += block->nextFree->size;
1025 block->nextFree = block->nextFree->nextFree;
1026 }
1027 }
1028
1029
1030 /***********************************************************************
1031 * cache_region_calloc
1032 * Attempt to allocate a size-byte block in the given region.
1033 * Allocation is first-fit. The free list is already fully coalesced.
1034 * Returns NULL if there is not enough room in the region for the block.
1035 **********************************************************************/
1036 static void *cache_region_calloc(cache_allocator_region *rgn, size_t size)
1037 {
1038 cache_allocator_block **blockP;
1039 uintptr_t mask;
1040
1041 // Save mask for allocated block, then round size
1042 // up to CACHE_QUANTUM boundary
1043 mask = cache_allocator_mask_for_size(size);
1044 size = cache_allocator_size_for_mask(mask);
1045
1046 // Search the free list for a sufficiently large free block.
1047
1048 for (blockP = &rgn->freeList;
1049 *blockP != NULL;
1050 blockP = &(**blockP).nextFree)
1051 {
1052 cache_allocator_block *block = *blockP;
1053 if (block->size < size) continue; // not big enough
1054
1055 // block is now big enough. Allocate from it.
1056
1057 // Slice off unneeded fragment of block, if any,
1058 // and reconnect the free list around block.
1059 if (block->size - size >= CACHE_QUANTUM) {
1060 cache_allocator_block *leftover =
1061 (cache_allocator_block *)(size + (uintptr_t)block);
1062 leftover->size = block->size - size;
1063 leftover->state = (uintptr_t)-1;
1064 leftover->nextFree = block->nextFree;
1065 *blockP = leftover;
1066 } else {
1067 *blockP = block->nextFree;
1068 }
1069
1070 // block is now exactly the right size.
1071
1072 bzero(block, size);
1073 block->size = mask; // Cache->mask
1074 block->state = 0; // Cache->occupied
1075
1076 return block;
1077 }
1078
1079 // No room in this region.
1080 return NULL;
1081 }
1082
1083
1084 /***********************************************************************
1085 * cache_allocator_calloc
1086 * Custom allocator for large method caches (128+ slots)
1087 * The returned cache block already has cache->mask set.
1088 * cache->occupied and the cache contents are zero.
1089 * Cache locks: cacheUpdateLock must be held by the caller
1090 **********************************************************************/
1091 static Cache cache_allocator_calloc(size_t size)
1092 {
1093 cache_allocator_region *rgn;
1094
1095 cacheUpdateLock.assertLocked();
1096
1097 for (rgn = cacheRegion; rgn != NULL; rgn = rgn->next) {
1098 void *p = cache_region_calloc(rgn, size);
1099 if (p) {
1100 return (Cache)p;
1101 }
1102 }
1103
1104 // No regions or all regions full - make a region and try one more time
1105 // In the unlikely case of a cache over 256KB, it will get its own region.
1106 return (Cache)cache_region_calloc(cache_allocator_add_region(size), size);
1107 }
1108
1109
1110 /***********************************************************************
1111 * cache_allocator_region_for_block
1112 * Returns the cache allocator region that ptr points into, or NULL.
1113 **********************************************************************/
1114 static cache_allocator_region *cache_allocator_region_for_block(cache_allocator_block *block)
1115 {
1116 cache_allocator_region *rgn;
1117 for (rgn = cacheRegion; rgn != NULL; rgn = rgn->next) {
1118 if (block >= rgn->start && block < rgn->end) return rgn;
1119 }
1120 return NULL;
1121 }
1122
1123
1124 /***********************************************************************
1125 * cache_allocator_is_block
1126 * If ptr is a live block from the cache allocator, return YES
1127 * If ptr is a block from some other allocator, return NO.
1128 * If ptr is a dead block from the cache allocator, result is undefined.
1129 * Cache locks: cacheUpdateLock must be held by the caller
1130 **********************************************************************/
1131 static bool cache_allocator_is_block(void *ptr)
1132 {
1133 cacheUpdateLock.assertLocked();
1134 return (cache_allocator_region_for_block((cache_allocator_block *)ptr) != NULL);
1135 }
1136
1137 /***********************************************************************
1138 * cache_allocator_free
1139 * Frees a block allocated by the cache allocator.
1140 * Cache locks: cacheUpdateLock must be held by the caller.
1141 **********************************************************************/
1142 static void cache_allocator_free(void *ptr)
1143 {
1144 cache_allocator_block *dead = (cache_allocator_block *)ptr;
1145 cache_allocator_block *cur;
1146 cache_allocator_region *rgn;
1147
1148 cacheUpdateLock.assertLocked();
1149
1150 if (! (rgn = cache_allocator_region_for_block(dead))) {
1151 // free of non-pointer
1152 _objc_inform("cache_allocator_free of non-pointer %p", dead);
1153 return;
1154 }
1155
1156 dead->size = cache_allocator_size_for_mask(dead->size);
1157 dead->state = (uintptr_t)-1;
1158
1159 if (!rgn->freeList || rgn->freeList > dead) {
1160 // dead block belongs at front of free list
1161 dead->nextFree = rgn->freeList;
1162 rgn->freeList = dead;
1163 cache_allocator_coalesce(dead);
1164 return;
1165 }
1166
1167 // dead block belongs in the middle or end of free list
1168 for (cur = rgn->freeList; cur != NULL; cur = cur->nextFree) {
1169 cache_allocator_block *ahead = cur->nextFree;
1170
1171 if (!ahead || ahead > dead) {
1172 // cur and ahead straddle dead, OR dead belongs at end of free list
1173 cur->nextFree = dead;
1174 dead->nextFree = ahead;
1175
1176 // coalesce into dead first in case both succeed
1177 cache_allocator_coalesce(dead);
1178 cache_allocator_coalesce(cur);
1179 return;
1180 }
1181 }
1182
1183 // uh-oh
1184 _objc_inform("cache_allocator_free of non-pointer %p", ptr);
1185 }
1186
1187 // defined(CACHE_ALLOCATOR)
1188 #endif
1189
1190 /***********************************************************************
1191 * Cache instrumentation and debugging
1192 **********************************************************************/
1193
1194 #ifdef OBJC_INSTRUMENTED
1195 enum {
1196 CACHE_HISTOGRAM_SIZE = 512
1197 };
1198
1199 unsigned int CacheHitHistogram [CACHE_HISTOGRAM_SIZE];
1200 unsigned int CacheMissHistogram [CACHE_HISTOGRAM_SIZE];
1201 #endif
1202
1203
1204 /***********************************************************************
1205 * _cache_print.
1206 **********************************************************************/
1207 static void _cache_print(Cache cache)
1208 {
1209 uintptr_t index;
1210 uintptr_t count;
1211
1212 count = cache->mask + 1;
1213 for (index = 0; index < count; index += 1) {
1214 cache_entry *entry = (cache_entry *)cache->buckets[index];
1215 if (entry) {
1216 if (entry->imp == _objc_msgForward_impcache)
1217 printf ("does not recognize: \n");
1218 printf ("%s\n", sel_getName(entry->name));
1219 }
1220 }
1221 }
1222
1223
1224 /***********************************************************************
1225 * _class_printMethodCaches.
1226 **********************************************************************/
1227 void _class_printMethodCaches(Class cls)
1228 {
1229 if (_cache_isEmpty(cls->cache)) {
1230 printf("no instance-method cache for class %s\n",cls->nameForLogging());
1231 } else {
1232 printf("instance-method cache for class %s:\n", cls->nameForLogging());
1233 _cache_print(cls->cache);
1234 }
1235
1236 if (_cache_isEmpty(cls->ISA()->cache)) {
1237 printf("no class-method cache for class %s\n", cls->nameForLogging());
1238 } else {
1239 printf ("class-method cache for class %s:\n", cls->nameForLogging());
1240 _cache_print(cls->ISA()->cache);
1241 }
1242 }
1243
1244
1245 #if 0
1246 #warning fixme
1247
1248
1249 /***********************************************************************
1250 * _class_printDuplicateCacheEntries.
1251 **********************************************************************/
1252 void _class_printDuplicateCacheEntries(bool detail)
1253 {
1254 NXHashState state;
1255 Class cls;
1256 unsigned int duplicates;
1257 unsigned int index1;
1258 unsigned int index2;
1259 unsigned int mask;
1260 unsigned int count;
1261 unsigned int isMeta;
1262 Cache cache;
1263
1264
1265 printf ("Checking for duplicate cache entries \n");
1266
1267 // Outermost loop - iterate over all classes
1268 state = NXInitHashState (class_hash);
1269 duplicates = 0;
1270 while (NXNextHashState (class_hash, &state, (void **) &cls))
1271 {
1272 // Control loop - do given class' cache, then its isa's cache
1273 for (isMeta = 0; isMeta <= 1; isMeta += 1)
1274 {
1275 // Select cache of interest and make sure it exists
1276 cache = (isMeta ? cls->ISA : cls)->cache;
1277 if (_cache_isEmpty(cache))
1278 continue;
1279
1280 // Middle loop - check each entry in the given cache
1281 mask = cache->mask;
1282 count = mask + 1;
1283 for (index1 = 0; index1 < count; index1 += 1)
1284 {
1285 // Skip invalid entry
1286 if (!cache->buckets[index1])
1287 continue;
1288
1289 // Inner loop - check that given entry matches no later entry
1290 for (index2 = index1 + 1; index2 < count; index2 += 1)
1291 {
1292 // Skip invalid entry
1293 if (!cache->buckets[index2])
1294 continue;
1295
1296 // Check for duplication by method name comparison
1297 if (strcmp ((char *) cache->buckets[index1]->name),
1298 (char *) cache->buckets[index2]->name)) == 0)
1299 {
1300 if (detail)
1301 printf ("%s %s\n", cls->nameForLogging(), sel_getName(cache->buckets[index1]->name));
1302 duplicates += 1;
1303 break;
1304 }
1305 }
1306 }
1307 }
1308 }
1309
1310 // Log the findings
1311 printf ("duplicates = %d\n", duplicates);
1312 printf ("total cache fills = %d\n", totalCacheFills);
1313 }
1314
1315
1316 /***********************************************************************
1317 * PrintCacheHeader.
1318 **********************************************************************/
1319 static void PrintCacheHeader(void)
1320 {
1321 #ifdef OBJC_INSTRUMENTED
1322 printf ("Cache Cache Slots Avg Max AvgS MaxS AvgS MaxS TotalD AvgD MaxD TotalD AvgD MaxD TotD AvgD MaxD\n");
1323 printf ("Size Count Used Used Used Hit Hit Miss Miss Hits Prbs Prbs Misses Prbs Prbs Flsh Flsh Flsh\n");
1324 printf ("----- ----- ----- ----- ---- ---- ---- ---- ---- ------- ---- ---- ------- ---- ---- ---- ---- ----\n");
1325 #else
1326 printf ("Cache Cache Slots Avg Max AvgS MaxS AvgS MaxS\n");
1327 printf ("Size Count Used Used Used Hit Hit Miss Miss\n");
1328 printf ("----- ----- ----- ----- ---- ---- ---- ---- ----\n");
1329 #endif
1330 }
1331
1332
1333 /***********************************************************************
1334 * PrintCacheInfo.
1335 **********************************************************************/
1336 static void PrintCacheInfo(unsigned int cacheSize,
1337 unsigned int cacheCount,
1338 unsigned int slotsUsed,
1339 float avgUsed, unsigned int maxUsed,
1340 float avgSHit, unsigned int maxSHit,
1341 float avgSMiss, unsigned int maxSMiss
1342 #ifdef OBJC_INSTRUMENTED
1343 , unsigned int totDHits,
1344 float avgDHit,
1345 unsigned int maxDHit,
1346 unsigned int totDMisses,
1347 float avgDMiss,
1348 unsigned int maxDMiss,
1349 unsigned int totDFlsh,
1350 float avgDFlsh,
1351 unsigned int maxDFlsh
1352 #endif
1353 )
1354 {
1355 #ifdef OBJC_INSTRUMENTED
1356 printf ("%5u %5u %5u %5.1f %4u %4.1f %4u %4.1f %4u %7u %4.1f %4u %7u %4.1f %4u %4u %4.1f %4u\n",
1357 #else
1358 printf ("%5u %5u %5u %5.1f %4u %4.1f %4u %4.1f %4u\n",
1359 #endif
1360 cacheSize, cacheCount, slotsUsed, avgUsed, maxUsed, avgSHit, maxSHit, avgSMiss, maxSMiss
1361 #ifdef OBJC_INSTRUMENTED
1362 , totDHits, avgDHit, maxDHit, totDMisses, avgDMiss, maxDMiss, totDFlsh, avgDFlsh, maxDFlsh
1363 #endif
1364 );
1365
1366 }
1367
1368
1369 #ifdef OBJC_INSTRUMENTED
1370 /***********************************************************************
1371 * PrintCacheHistogram. Show the non-zero entries from the specified
1372 * cache histogram.
1373 **********************************************************************/
1374 static void PrintCacheHistogram(char *title,
1375 unsigned int *firstEntry,
1376 unsigned int entryCount)
1377 {
1378 unsigned int index;
1379 unsigned int *thisEntry;
1380
1381 printf ("%s\n", title);
1382 printf (" Probes Tally\n");
1383 printf (" ------ -----\n");
1384 for (index = 0, thisEntry = firstEntry;
1385 index < entryCount;
1386 index += 1, thisEntry += 1)
1387 {
1388 if (*thisEntry == 0)
1389 continue;
1390
1391 printf (" %6d %5d\n", index, *thisEntry);
1392 }
1393 }
1394 #endif
1395
1396
1397 /***********************************************************************
1398 * _class_printMethodCacheStatistics.
1399 **********************************************************************/
1400
1401 #define MAX_LOG2_SIZE 32
1402 #define MAX_CHAIN_SIZE 100
1403
1404 void _class_printMethodCacheStatistics(void)
1405 {
1406 unsigned int isMeta;
1407 unsigned int index;
1408 NXHashState state;
1409 Class cls;
1410 unsigned int totalChain;
1411 unsigned int totalMissChain;
1412 unsigned int maxChain;
1413 unsigned int maxMissChain;
1414 unsigned int classCount;
1415 unsigned int negativeEntryCount;
1416 unsigned int cacheExpandCount;
1417 unsigned int cacheCountBySize[2][MAX_LOG2_SIZE] = {{0}};
1418 unsigned int totalEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
1419 unsigned int maxEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
1420 unsigned int totalChainBySize[2][MAX_LOG2_SIZE] = {{0}};
1421 unsigned int totalMissChainBySize[2][MAX_LOG2_SIZE] = {{0}};
1422 unsigned int totalMaxChainBySize[2][MAX_LOG2_SIZE] = {{0}};
1423 unsigned int totalMaxMissChainBySize[2][MAX_LOG2_SIZE] = {{0}};
1424 unsigned int maxChainBySize[2][MAX_LOG2_SIZE] = {{0}};
1425 unsigned int maxMissChainBySize[2][MAX_LOG2_SIZE] = {{0}};
1426 unsigned int chainCount[MAX_CHAIN_SIZE] = {0};
1427 unsigned int missChainCount[MAX_CHAIN_SIZE] = {0};
1428 #ifdef OBJC_INSTRUMENTED
1429 unsigned int hitCountBySize[2][MAX_LOG2_SIZE] = {{0}};
1430 unsigned int hitProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
1431 unsigned int maxHitProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
1432 unsigned int missCountBySize[2][MAX_LOG2_SIZE] = {{0}};
1433 unsigned int missProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
1434 unsigned int maxMissProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
1435 unsigned int flushCountBySize[2][MAX_LOG2_SIZE] = {{0}};
1436 unsigned int flushedEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
1437 unsigned int maxFlushedEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
1438 #endif
1439
1440 printf ("Printing cache statistics\n");
1441
1442 // Outermost loop - iterate over all classes
1443 state = NXInitHashState (class_hash);
1444 classCount = 0;
1445 negativeEntryCount = 0;
1446 cacheExpandCount = 0;
1447 while (NXNextHashState (class_hash, &state, (void **) &cls))
1448 {
1449 // Tally classes
1450 classCount += 1;
1451
1452 // Control loop - do given class' cache, then its isa's cache
1453 for (isMeta = 0; isMeta <= 1; isMeta += 1)
1454 {
1455 Cache cache;
1456 unsigned int mask;
1457 unsigned int log2Size;
1458 unsigned int entryCount;
1459
1460 // Select cache of interest
1461 cache = (isMeta ? cls->ISA : cls)->cache;
1462
1463 // Ignore empty cache... should we?
1464 if (_cache_isEmpty(cache))
1465 continue;
1466
1467 // Middle loop - do each entry in the given cache
1468 mask = cache->mask;
1469 entryCount = 0;
1470 totalChain = 0;
1471 totalMissChain = 0;
1472 maxChain = 0;
1473 maxMissChain = 0;
1474 for (index = 0; index < mask + 1; index += 1)
1475 {
1476 cache_entry **buckets;
1477 cache_entry *entry;
1478 unsigned int hash;
1479 unsigned int methodChain;
1480 unsigned int methodMissChain;
1481 unsigned int index2;
1482
1483 // If entry is invalid, the only item of
1484 // interest is that future insert hashes
1485 // to this entry can use it directly.
1486 buckets = (cache_entry **)cache->buckets;
1487 if (!buckets[index])
1488 {
1489 missChainCount[0] += 1;
1490 continue;
1491 }
1492
1493 entry = buckets[index];
1494
1495 // Tally valid entries
1496 entryCount += 1;
1497
1498 // Tally "forward::" entries
1499 if (entry->imp == _objc_msgForward_impcache)
1500 negativeEntryCount += 1;
1501
1502 // Calculate search distance (chain length) for this method
1503 // The chain may wrap around to the beginning of the table.
1504 hash = CACHE_HASH(entry->name, mask);
1505 if (index >= hash) methodChain = index - hash;
1506 else methodChain = (mask+1) + index - hash;
1507
1508 // Tally chains of this length
1509 if (methodChain < MAX_CHAIN_SIZE)
1510 chainCount[methodChain] += 1;
1511
1512 // Keep sum of all chain lengths
1513 totalChain += methodChain;
1514
1515 // Record greatest chain length
1516 if (methodChain > maxChain)
1517 maxChain = methodChain;
1518
1519 // Calculate search distance for miss that hashes here
1520 index2 = index;
1521 while (buckets[index2])
1522 {
1523 index2 += 1;
1524 index2 &= mask;
1525 }
1526 methodMissChain = ((index2 - index) & mask);
1527
1528 // Tally miss chains of this length
1529 if (methodMissChain < MAX_CHAIN_SIZE)
1530 missChainCount[methodMissChain] += 1;
1531
1532 // Keep sum of all miss chain lengths in this class
1533 totalMissChain += methodMissChain;
1534
1535 // Record greatest miss chain length
1536 if (methodMissChain > maxMissChain)
1537 maxMissChain = methodMissChain;
1538 }
1539
1540 // Factor this cache into statistics about caches of the same
1541 // type and size (all caches are a power of two in size)
1542 log2Size = log2u (mask + 1);
1543 cacheCountBySize[isMeta][log2Size] += 1;
1544 totalEntriesBySize[isMeta][log2Size] += entryCount;
1545 if (entryCount > maxEntriesBySize[isMeta][log2Size])
1546 maxEntriesBySize[isMeta][log2Size] = entryCount;
1547 totalChainBySize[isMeta][log2Size] += totalChain;
1548 totalMissChainBySize[isMeta][log2Size] += totalMissChain;
1549 totalMaxChainBySize[isMeta][log2Size] += maxChain;
1550 totalMaxMissChainBySize[isMeta][log2Size] += maxMissChain;
1551 if (maxChain > maxChainBySize[isMeta][log2Size])
1552 maxChainBySize[isMeta][log2Size] = maxChain;
1553 if (maxMissChain > maxMissChainBySize[isMeta][log2Size])
1554 maxMissChainBySize[isMeta][log2Size] = maxMissChain;
1555 #ifdef OBJC_INSTRUMENTED
1556 {
1557 CacheInstrumentation *cacheData;
1558
1559 cacheData = CACHE_INSTRUMENTATION(cache);
1560 hitCountBySize[isMeta][log2Size] += cacheData->hitCount;
1561 hitProbesBySize[isMeta][log2Size] += cacheData->hitProbes;
1562 if (cacheData->maxHitProbes > maxHitProbesBySize[isMeta][log2Size])
1563 maxHitProbesBySize[isMeta][log2Size] = cacheData->maxHitProbes;
1564 missCountBySize[isMeta][log2Size] += cacheData->missCount;
1565 missProbesBySize[isMeta][log2Size] += cacheData->missProbes;
1566 if (cacheData->maxMissProbes > maxMissProbesBySize[isMeta][log2Size])
1567 maxMissProbesBySize[isMeta][log2Size] = cacheData->maxMissProbes;
1568 flushCountBySize[isMeta][log2Size] += cacheData->flushCount;
1569 flushedEntriesBySize[isMeta][log2Size] += cacheData->flushedEntries;
1570 if (cacheData->maxFlushedEntries > maxFlushedEntriesBySize[isMeta][log2Size])
1571 maxFlushedEntriesBySize[isMeta][log2Size] = cacheData->maxFlushedEntries;
1572 }
1573 #endif
1574 // Caches start with a power of two number of entries, and grow by doubling, so
1575 // we can calculate the number of times this cache has expanded
1576 cacheExpandCount += log2Size - INIT_CACHE_SIZE_LOG2;
1577 }
1578 }
1579
1580 {
1581 unsigned int cacheCountByType[2] = {0};
1582 unsigned int totalCacheCount = 0;
1583 unsigned int totalEntries = 0;
1584 unsigned int maxEntries = 0;
1585 unsigned int totalSlots = 0;
1586 #ifdef OBJC_INSTRUMENTED
1587 unsigned int totalHitCount = 0;
1588 unsigned int totalHitProbes = 0;
1589 unsigned int maxHitProbes = 0;
1590 unsigned int totalMissCount = 0;
1591 unsigned int totalMissProbes = 0;
1592 unsigned int maxMissProbes = 0;
1593 unsigned int totalFlushCount = 0;
1594 unsigned int totalFlushedEntries = 0;
1595 unsigned int maxFlushedEntries = 0;
1596 #endif
1597
1598 totalChain = 0;
1599 maxChain = 0;
1600 totalMissChain = 0;
1601 maxMissChain = 0;
1602
1603 // Sum information over all caches
1604 for (isMeta = 0; isMeta <= 1; isMeta += 1)
1605 {
1606 for (index = 0; index < MAX_LOG2_SIZE; index += 1)
1607 {
1608 cacheCountByType[isMeta] += cacheCountBySize[isMeta][index];
1609 totalEntries += totalEntriesBySize[isMeta][index];
1610 totalSlots += cacheCountBySize[isMeta][index] * (1 << index);
1611 totalChain += totalChainBySize[isMeta][index];
1612 if (maxEntriesBySize[isMeta][index] > maxEntries)
1613 maxEntries = maxEntriesBySize[isMeta][index];
1614 if (maxChainBySize[isMeta][index] > maxChain)
1615 maxChain = maxChainBySize[isMeta][index];
1616 totalMissChain += totalMissChainBySize[isMeta][index];
1617 if (maxMissChainBySize[isMeta][index] > maxMissChain)
1618 maxMissChain = maxMissChainBySize[isMeta][index];
1619 #ifdef OBJC_INSTRUMENTED
1620 totalHitCount += hitCountBySize[isMeta][index];
1621 totalHitProbes += hitProbesBySize[isMeta][index];
1622 if (maxHitProbesBySize[isMeta][index] > maxHitProbes)
1623 maxHitProbes = maxHitProbesBySize[isMeta][index];
1624 totalMissCount += missCountBySize[isMeta][index];
1625 totalMissProbes += missProbesBySize[isMeta][index];
1626 if (maxMissProbesBySize[isMeta][index] > maxMissProbes)
1627 maxMissProbes = maxMissProbesBySize[isMeta][index];
1628 totalFlushCount += flushCountBySize[isMeta][index];
1629 totalFlushedEntries += flushedEntriesBySize[isMeta][index];
1630 if (maxFlushedEntriesBySize[isMeta][index] > maxFlushedEntries)
1631 maxFlushedEntries = maxFlushedEntriesBySize[isMeta][index];
1632 #endif
1633 }
1634
1635 totalCacheCount += cacheCountByType[isMeta];
1636 }
1637
1638 // Log our findings
1639 printf ("There are %u classes\n", classCount);
1640
1641 for (isMeta = 0; isMeta <= 1; isMeta += 1)
1642 {
1643 // Number of this type of class
1644 printf ("\nThere are %u %s-method caches, broken down by size (slot count):\n",
1645 cacheCountByType[isMeta],
1646 isMeta ? "class" : "instance");
1647
1648 // Print header
1649 PrintCacheHeader ();
1650
1651 // Keep format consistent even if there are caches of this kind
1652 if (cacheCountByType[isMeta] == 0)
1653 {
1654 printf ("(none)\n");
1655 continue;
1656 }
1657
1658 // Usage information by cache size
1659 for (index = 0; index < MAX_LOG2_SIZE; index += 1)
1660 {
1661 unsigned int cacheCount;
1662 unsigned int cacheSlotCount;
1663 unsigned int cacheEntryCount;
1664
1665 // Get number of caches of this type and size
1666 cacheCount = cacheCountBySize[isMeta][index];
1667 if (cacheCount == 0)
1668 continue;
1669
1670 // Get the cache slot count and the total number of valid entries
1671 cacheSlotCount = (1 << index);
1672 cacheEntryCount = totalEntriesBySize[isMeta][index];
1673
1674 // Give the analysis
1675 PrintCacheInfo (cacheSlotCount,
1676 cacheCount,
1677 cacheEntryCount,
1678 (float) cacheEntryCount / (float) cacheCount,
1679 maxEntriesBySize[isMeta][index],
1680 (float) totalChainBySize[isMeta][index] / (float) cacheEntryCount,
1681 maxChainBySize[isMeta][index],
1682 (float) totalMissChainBySize[isMeta][index] / (float) (cacheCount * cacheSlotCount),
1683 maxMissChainBySize[isMeta][index]
1684 #ifdef OBJC_INSTRUMENTED
1685 , hitCountBySize[isMeta][index],
1686 hitCountBySize[isMeta][index] ?
1687 (float) hitProbesBySize[isMeta][index] / (float) hitCountBySize[isMeta][index] : 0.0,
1688 maxHitProbesBySize[isMeta][index],
1689 missCountBySize[isMeta][index],
1690 missCountBySize[isMeta][index] ?
1691 (float) missProbesBySize[isMeta][index] / (float) missCountBySize[isMeta][index] : 0.0,
1692 maxMissProbesBySize[isMeta][index],
1693 flushCountBySize[isMeta][index],
1694 flushCountBySize[isMeta][index] ?
1695 (float) flushedEntriesBySize[isMeta][index] / (float) flushCountBySize[isMeta][index] : 0.0,
1696 maxFlushedEntriesBySize[isMeta][index]
1697 #endif
1698 );
1699 }
1700 }
1701
1702 // Give overall numbers
1703 printf ("\nCumulative:\n");
1704 PrintCacheHeader ();
1705 PrintCacheInfo (totalSlots,
1706 totalCacheCount,
1707 totalEntries,
1708 (float) totalEntries / (float) totalCacheCount,
1709 maxEntries,
1710 (float) totalChain / (float) totalEntries,
1711 maxChain,
1712 (float) totalMissChain / (float) totalSlots,
1713 maxMissChain
1714 #ifdef OBJC_INSTRUMENTED
1715 , totalHitCount,
1716 totalHitCount ?
1717 (float) totalHitProbes / (float) totalHitCount : 0.0,
1718 maxHitProbes,
1719 totalMissCount,
1720 totalMissCount ?
1721 (float) totalMissProbes / (float) totalMissCount : 0.0,
1722 maxMissProbes,
1723 totalFlushCount,
1724 totalFlushCount ?
1725 (float) totalFlushedEntries / (float) totalFlushCount : 0.0,
1726 maxFlushedEntries
1727 #endif
1728 );
1729
1730 printf ("\nNumber of \"forward::\" entries: %d\n", negativeEntryCount);
1731 printf ("Number of cache expansions: %d\n", cacheExpandCount);
1732 #ifdef OBJC_INSTRUMENTED
1733 printf ("flush_caches: total calls total visits average visits max visits total classes visits/class\n");
1734 printf (" ----------- ------------ -------------- ---------- ------------- -------------\n");
1735 printf (" linear %11u %12u %14.1f %10u %13u %12.2f\n",
1736 LinearFlushCachesCount,
1737 LinearFlushCachesVisitedCount,
1738 LinearFlushCachesCount ?
1739 (float) LinearFlushCachesVisitedCount / (float) LinearFlushCachesCount : 0.0,
1740 MaxLinearFlushCachesVisitedCount,
1741 LinearFlushCachesVisitedCount,
1742 1.0);
1743 printf (" nonlinear %11u %12u %14.1f %10u %13u %12.2f\n",
1744 NonlinearFlushCachesCount,
1745 NonlinearFlushCachesVisitedCount,
1746 NonlinearFlushCachesCount ?
1747 (float) NonlinearFlushCachesVisitedCount / (float) NonlinearFlushCachesCount : 0.0,
1748 MaxNonlinearFlushCachesVisitedCount,
1749 NonlinearFlushCachesClassCount,
1750 NonlinearFlushCachesClassCount ?
1751 (float) NonlinearFlushCachesVisitedCount / (float) NonlinearFlushCachesClassCount : 0.0);
1752 printf (" ideal %11u %12u %14.1f %10u %13u %12.2f\n",
1753 LinearFlushCachesCount + NonlinearFlushCachesCount,
1754 IdealFlushCachesCount,
1755 LinearFlushCachesCount + NonlinearFlushCachesCount ?
1756 (float) IdealFlushCachesCount / (float) (LinearFlushCachesCount + NonlinearFlushCachesCount) : 0.0,
1757 MaxIdealFlushCachesCount,
1758 LinearFlushCachesVisitedCount + NonlinearFlushCachesClassCount,
1759 LinearFlushCachesVisitedCount + NonlinearFlushCachesClassCount ?
1760 (float) IdealFlushCachesCount / (float) (LinearFlushCachesVisitedCount + NonlinearFlushCachesClassCount) : 0.0);
1761
1762 PrintCacheHistogram ("\nCache hit histogram:", &CacheHitHistogram[0], CACHE_HISTOGRAM_SIZE);
1763 PrintCacheHistogram ("\nCache miss histogram:", &CacheMissHistogram[0], CACHE_HISTOGRAM_SIZE);
1764 #endif
1765
1766 #if 0
1767 printf ("\nLookup chains:");
1768 for (index = 0; index < MAX_CHAIN_SIZE; index += 1)
1769 {
1770 if (chainCount[index] != 0)
1771 printf (" %u:%u", index, chainCount[index]);
1772 }
1773
1774 printf ("\nMiss chains:");
1775 for (index = 0; index < MAX_CHAIN_SIZE; index += 1)
1776 {
1777 if (missChainCount[index] != 0)
1778 printf (" %u:%u", index, missChainCount[index]);
1779 }
1780
1781 printf ("\nTotal memory usage for cache data structures: %lu bytes\n",
1782 totalCacheCount * (sizeof(struct objc_cache) - sizeof(cache_entry *)) +
1783 totalSlots * sizeof(cache_entry *) +
1784 negativeEntryCount * sizeof(cache_entry));
1785 #endif
1786 }
1787 }
1788
1789 #endif
1790
1791
1792 // !__OBJC2__
1793 #endif