2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
25 /***********************************************************************
27 * Copyright 1988-1997, Apple Computer, Inc.
29 **********************************************************************/
32 /***********************************************************************
33 * Method cache locking (GrP 2001-1-14)
35 * For speed, objc_msgSend does not acquire any locks when it reads
36 * method caches. Instead, all cache changes are performed so that any
37 * objc_msgSend running concurrently with the cache mutator will not
38 * crash or hang or get an incorrect result from the cache.
40 * When cache memory becomes unused (e.g. the old cache after cache
41 * expansion), it is not immediately freed, because a concurrent
42 * objc_msgSend could still be using it. Instead, the memory is
43 * disconnected from the data structures and placed on a garbage list.
44 * The memory is now only accessible to instances of objc_msgSend that
45 * were running when the memory was disconnected; any further calls to
46 * objc_msgSend will not see the garbage memory because the other data
47 * structures don't point to it anymore. The collecting_in_critical
48 * function checks the PC of all threads and returns FALSE when all threads
49 * are found to be outside objc_msgSend. This means any call to objc_msgSend
50 * that could have had access to the garbage has finished or moved past the
51 * cache lookup stage, so it is safe to free the memory.
53 * All functions that modify cache data or structures must acquire the
54 * cacheUpdateLock to prevent interference from concurrent modifications.
55 * The function that frees cache garbage must acquire the cacheUpdateLock
56 * and use collecting_in_critical() to flush out cache readers.
57 * The cacheUpdateLock is also used to protect the custom allocator used
58 * for large method cache blocks.
60 * Cache readers (PC-checked by collecting_in_critical())
65 * Cache writers (hold cacheUpdateLock while reading or writing; not PC-checked)
66 * _cache_fill (acquires lock)
67 * _cache_expand (only called from cache_fill)
68 * _cache_create (only called from cache_expand)
69 * bcopy (only called from instrumented cache_expand)
70 * flush_caches (acquires lock)
71 * _cache_flush (only called from cache_fill and flush_caches)
72 * _cache_collect_free (only called from cache_expand and cache_flush)
74 * UNPROTECTED cache readers (NOT thread-safe; used for debug info only)
76 * _class_printMethodCaches
77 * _class_printDuplicateCacheEntries
78 * _class_printMethodCacheStatistics
80 * _class_lookupMethodAndLoadCache is a special case. It may read a
81 * method triplet out of one cache and store it in another cache. This
82 * is unsafe if the method triplet is a forward:: entry, because the
83 * triplet itself could be freed unless _class_lookupMethodAndLoadCache
84 * were PC-checked or used a lock. Additionally, storing the method
85 * triplet in both caches would result in double-freeing if both caches
86 * were flushed or expanded. The solution is for _cache_getMethod to
87 * ignore all entries whose implementation is _objc_msgForward, so
88 * _class_lookupMethodAndLoadCache cannot look at a forward:: entry
89 * unsafely or place it in multiple caches.
90 ***********************************************************************/
92 /***********************************************************************
93 * Lazy method list arrays and method list locking (2004-10-19)
95 * cls->methodLists may be in one of three forms:
96 * 1. NULL: The class has no methods.
97 * 2. non-NULL, with CLS_NO_METHOD_ARRAY set: cls->methodLists points
98 * to a single method list, which is the class's only method list.
99 * 3. non-NULL, with CLS_NO_METHOD_ARRAY clear: cls->methodLists points to
100 * an array of method list pointers. The end of the array's block
101 * is set to -1. If the actual number of method lists is smaller
102 * than that, the rest of the array is NULL.
104 * Attaching categories and adding and removing classes may change
105 * the form of the class list. In addition, individual method lists
106 * may be reallocated when fixed up.
108 * Classes are initially read as #1 or #2. If a category is attached
109 * or other methods added, the class is changed to #3. Once in form #3,
110 * the class is never downgraded to #1 or #2, even if methods are removed.
111 * Classes added with objc_addClass are initially either #1 or #3.
113 * Accessing and manipulating a class's method lists are synchronized,
114 * to prevent races when one thread restructures the list. However,
115 * if the class is not yet in use (i.e. not in class_hash), then the
116 * thread loading the class may access its method lists without locking.
118 * The following functions acquire methodListLock:
119 * class_getInstanceMethod
120 * class_getClassMethod
121 * class_nextMethodList
123 * class_removeMethods
124 * class_respondsToMethod
125 * _class_lookupMethodAndLoadCache
126 * lookupMethodInClassAndLoadCache
127 * _objc_add_category_flush_caches
129 * The following functions don't acquire methodListLock because they
130 * only access method lists during class load and unload:
131 * _objc_register_category
132 * _resolve_categories_for_class (calls _objc_add_category)
133 * add_class_to_loadable_list
135 * _objc_remove_classes_in_image
137 * The following functions use method lists without holding methodListLock.
138 * The caller must either hold methodListLock, or be loading the class.
139 * _getMethod (called by class_getInstanceMethod, class_getClassMethod,
140 * and class_respondsToMethod)
141 * _findMethodInClass (called by _class_lookupMethodAndLoadCache,
142 * lookupMethodInClassAndLoadCache, _getMethod)
143 * _findMethodInList (called by _findMethodInClass)
144 * nextMethodList (called by _findMethodInClass and class_nextMethodList
145 * fixupSelectorsInMethodList (called by nextMethodList)
146 * _objc_add_category (called by _objc_add_category_flush_caches,
147 * resolve_categories_for_class and _objc_register_category)
148 * _objc_insertMethods (called by class_addMethods and _objc_add_category)
149 * _objc_removeMethods (called by class_removeMethods)
150 * _objcTweakMethodListPointerForClass (called by _objc_insertMethods)
151 * get_base_method_list (called by add_class_to_loadable_list)
152 * lookupNamedMethodInMethodList (called by add_class_to_loadable_list)
153 ***********************************************************************/
155 /***********************************************************************
156 * Thread-safety of class info bits (2004-10-19)
158 * Some class info bits are used to store mutable runtime state.
159 * Modifications of the info bits at particular times need to be
160 * synchronized to prevent races.
162 * Three thread-safe modification functions are provided:
163 * _class_setInfo() // atomically sets some bits
164 * _class_clearInfo() // atomically clears some bits
165 * _class_changeInfo() // atomically sets some bits and clears others
166 * These replace CLS_SETINFO() for the multithreaded cases.
168 * Three modification windows are defined:
170 * - class construction or image load (before +load) in one thread
171 * - multi-threaded messaging and method caches
173 * Info bit modification at compile time and class construction do not
174 * need to be locked, because only one thread is manipulating the class.
175 * Info bit modification during messaging needs to be locked, because
176 * there may be other threads simultaneously messaging or otherwise
177 * manipulating the class.
179 * Modification windows for each flag:
181 * CLS_CLASS: compile-time and class load
182 * CLS_META: compile-time and class load
183 * CLS_INITIALIZED: +initialize
184 * CLS_POSING: messaging
185 * CLS_MAPPED: compile-time
186 * CLS_FLUSH_CACHE: messaging
187 * CLS_GROW_CACHE: messaging
188 * CLS_NEED_BIND: unused
189 * CLS_METHOD_ARRAY: unused
190 * CLS_JAVA_HYBRID: JavaBridge only
191 * CLS_JAVA_CLASS: JavaBridge only
192 * CLS_INITIALIZING: messaging
193 * CLS_FROM_BUNDLE: class load
194 * CLS_HAS_CXX_STRUCTORS: compile-time and class load
195 * CLS_NO_METHOD_ARRAY: class load and messaging
196 * CLS_HAS_LOAD_METHOD: class load
198 * CLS_INITIALIZED and CLS_INITIALIZING have additional thread-safety
199 * constraints to support thread-safe +initialize. See "Thread safety
200 * during class initialization" for details.
202 * CLS_JAVA_HYBRID and CLS_JAVA_CLASS are set immediately after JavaBridge
203 * calls objc_addClass(). The JavaBridge does not use an atomic update,
204 * but the modification counts as "class construction" unless some other
205 * thread quickly finds the class via the class list. This race is
206 * small and unlikely in well-behaved code.
208 * Most info bits that may be modified during messaging are also never
209 * read without a lock. There is no general read lock for the info bits.
210 * CLS_INITIALIZED: classInitLock
211 * CLS_FLUSH_CACHE: cacheUpdateLock
212 * CLS_GROW_CACHE: cacheUpdateLock
213 * CLS_NO_METHOD_ARRAY: methodListLock
214 * CLS_INITIALIZING: classInitLock
215 ***********************************************************************/
217 /***********************************************************************
218 * Thread-safety during class initialization (GrP 2001-9-24)
220 * Initial state: CLS_INITIALIZING and CLS_INITIALIZED both clear.
221 * During initialization: CLS_INITIALIZING is set
222 * After initialization: CLS_INITIALIZING clear and CLS_INITIALIZED set.
223 * CLS_INITIALIZING and CLS_INITIALIZED are never set at the same time.
224 * CLS_INITIALIZED is never cleared once set.
226 * Only one thread is allowed to actually initialize a class and send
227 * +initialize. Enforced by allowing only one thread to set CLS_INITIALIZING.
229 * Additionally, threads trying to send messages to a class must wait for
230 * +initialize to finish. During initialization of a class, that class's
231 * method cache is kept empty. objc_msgSend will revert to
232 * class_lookupMethodAndLoadCache, which checks CLS_INITIALIZED before
233 * messaging. If CLS_INITIALIZED is clear but CLS_INITIALIZING is set,
234 * the thread must block, unless it is the thread that started
235 * initializing the class in the first place.
237 * Each thread keeps a list of classes it's initializing.
238 * The global classInitLock is used to synchronize changes to CLS_INITIALIZED
239 * and CLS_INITIALIZING: the transition to CLS_INITIALIZING must be
240 * an atomic test-and-set with respect to itself and the transition
241 * to CLS_INITIALIZED.
242 * The global classInitWaitCond is used to block threads waiting for an
243 * initialization to complete. The classInitLock synchronizes
244 * condition checking and the condition variable.
245 **********************************************************************/
247 /***********************************************************************
248 * +initialize deadlock case when a class is marked initializing while
249 * its superclass is initialized. Solved by completely initializing
250 * superclasses before beginning to initialize a class.
252 * OmniWeb class hierarchy:
257 * OWAddressEntry OWController
259 * OWConsoleController
261 * Thread 1 (evil testing thread):
262 * initialize OWAddressEntry
263 * super init OFObject
264 * super init OBObject
265 * [OBObject initialize] runs OBPostLoader, which inits lots of classes...
266 * initialize OWConsoleController
267 * super init OWController - wait for Thread 2 to finish OWController init
269 * Thread 2 (normal OmniWeb thread):
270 * initialize OWController
271 * super init OFObject - wait for Thread 1 to finish OFObject init
275 * Solution: fully initialize super classes before beginning to initialize
276 * a subclass. Then the initializing+initialized part of the class hierarchy
277 * will be a contiguous subtree starting at the root, so other threads
278 * can't jump into the middle between two initializing classes, and we won't
279 * get stuck while a superclass waits for its subclass which waits for the
281 **********************************************************************/
285 /***********************************************************************
287 **********************************************************************/
289 #import <mach/mach_interface.h>
290 #include <mach-o/ldsyms.h>
291 #include <mach-o/dyld.h>
293 #include <sys/types.h>
297 #include <sys/fcntl.h>
299 #import "objc-class.h"
301 #import <objc/Object.h>
302 #import <objc/objc-runtime.h>
303 #import "objc-private.h"
304 #import "hashtable2.h"
307 #include <sys/types.h>
309 // Needed functions not in any header file
310 size_t malloc_size (const void * ptr);
312 // Needed kernel interface
313 #import <mach/mach.h>
314 #import <mach/thread_status.h>
317 /***********************************************************************
319 **********************************************************************/
321 // Define PRELOAD_SUPERCLASS_CACHES to cause method lookups to add the
322 // method the appropriate superclass caches, in addition to the normal
323 // encaching in the subclass where the method was messaged. Doing so
324 // will speed up messaging the same method from instances of the
325 // superclasses, but also uses up valuable cache space for a speculative
327 // See radar 2364264 about incorrectly propogating _objc_forward entries
328 // and double freeing them, first, before turning this on!
329 // (Radar 2364264 is now "inactive".)
330 // Double-freeing is also a potential problem when this is off. See
331 // note about _class_lookupMethodAndLoadCache in "Method cache locking".
332 //#define PRELOAD_SUPERCLASS_CACHES
334 /***********************************************************************
336 **********************************************************************/
338 #ifdef OBJC_INSTRUMENTED
340 CACHE_HISTOGRAM_SIZE = 512
343 unsigned int CacheHitHistogram [CACHE_HISTOGRAM_SIZE];
344 unsigned int CacheMissHistogram [CACHE_HISTOGRAM_SIZE];
347 /***********************************************************************
348 * Constants and macros internal to this module.
349 **********************************************************************/
351 // INIT_CACHE_SIZE and INIT_META_CACHE_SIZE must be a power of two
353 INIT_CACHE_SIZE_LOG2 = 2,
354 INIT_META_CACHE_SIZE_LOG2 = 2,
355 INIT_CACHE_SIZE = (1 << INIT_CACHE_SIZE_LOG2),
356 INIT_META_CACHE_SIZE = (1 << INIT_META_CACHE_SIZE_LOG2)
359 // Amount of space required for count hash table buckets, knowing that
360 // one entry is embedded in the cache structure itself
361 #define TABLE_SIZE(count) ((count - 1) * sizeof(Method))
363 // A sentinal (magic value) to report bad thread_get_state status
364 #define PC_SENTINAL 0
367 /***********************************************************************
368 * Types internal to this module.
369 **********************************************************************/
371 #ifdef OBJC_INSTRUMENTED
372 struct CacheInstrumentation
374 unsigned int hitCount; // cache lookup success tally
375 unsigned int hitProbes; // sum entries checked to hit
376 unsigned int maxHitProbes; // max entries checked to hit
377 unsigned int missCount; // cache lookup no-find tally
378 unsigned int missProbes; // sum entries checked to miss
379 unsigned int maxMissProbes; // max entries checked to miss
380 unsigned int flushCount; // cache flush tally
381 unsigned int flushedEntries; // sum cache entries flushed
382 unsigned int maxFlushedEntries; // max cache entries flushed
384 typedef struct CacheInstrumentation CacheInstrumentation;
386 // Cache instrumentation data follows table, so it is most compatible
387 #define CACHE_INSTRUMENTATION(cache) (CacheInstrumentation *) &cache->buckets[cache->mask + 1];
390 /***********************************************************************
391 * Function prototypes internal to this module.
392 **********************************************************************/
394 static Ivar class_getVariable (Class cls, const char * name);
395 static void flush_caches (Class cls, BOOL flush_meta);
396 static struct objc_method_list *nextMethodList(struct objc_class *cls, void **it);
397 static void addClassToOriginalClass (Class posingClass, Class originalClass);
398 static void _objc_addOrigClass (Class origClass);
399 static void _freedHandler (id self, SEL sel);
400 static void _nonexistentHandler (id self, SEL sel);
401 static void class_initialize (Class cls);
402 static Cache _cache_expand (Class cls);
403 static int LogObjCMessageSend (BOOL isClassMethod, const char * objectsClass, const char * implementingClass, SEL selector);
404 static BOOL _cache_fill (Class cls, Method smt, SEL sel);
405 static void _cache_addForwardEntry(Class cls, SEL sel);
406 static void _cache_flush (Class cls);
407 static IMP lookupMethodInClassAndLoadCache(Class cls, SEL sel);
408 static int SubtypeUntil (const char * type, char end);
409 static const char * SkipFirstType (const char * type);
411 static unsigned long _get_pc_for_thread (mach_port_t thread);
412 static int _collecting_in_critical (void);
413 static void _garbage_make_room (void);
414 static void _cache_collect_free (void * data, size_t size, BOOL tryCollect);
416 static BOOL cache_allocator_is_block(void *block);
417 static void *cache_allocator_calloc(size_t size);
418 static void cache_allocator_free(void *block);
420 static void _cache_print (Cache cache);
421 static unsigned int log2 (unsigned int x);
422 static void PrintCacheHeader (void);
423 #ifdef OBJC_INSTRUMENTED
424 static void PrintCacheHistogram (char * title, unsigned int * firstEntry, unsigned int entryCount);
427 /***********************************************************************
428 * Static data internal to this module.
429 **********************************************************************/
431 // When _class_uncache is non-zero, cache growth copies the existing
432 // entries into the new (larger) cache. When this flag is zero, new
433 // (larger) caches start out empty.
434 static int _class_uncache = 1;
436 // When _class_slow_grow is non-zero, any given cache is actually grown
437 // only on the odd-numbered times it becomes full; on the even-numbered
438 // times, it is simply emptied and re-used. When this flag is zero,
439 // caches are grown every time.
440 static int _class_slow_grow = 1;
442 // Lock for cache access.
443 // Held when modifying a cache in place.
444 // Held when installing a new cache on a class.
445 // Held when adding to the cache garbage list.
446 // Held when disposing cache garbage.
447 // See "Method cache locking" above for notes about cache locking.
448 static OBJC_DECLARE_LOCK(cacheUpdateLock);
450 // classInitLock protects classInitWaitCond and examination and modification
451 // of CLS_INITIALIZED and CLS_INITIALIZING.
452 OBJC_DECLARE_LOCK(classInitLock);
453 // classInitWaitCond is signalled when any class is done initializing.
454 // Threads that are waiting for a class to finish initializing wait on this.
455 pthread_cond_t classInitWaitCond = PTHREAD_COND_INITIALIZER;
457 // Lock for method list access and modification.
458 // Protects methodLists fields, method arrays, and CLS_NO_METHOD_ARRAY bits.
459 // Classes not yet in use do not need to take this lock.
460 OBJC_DECLARE_LOCK(methodListLock);
462 // When traceDuplicates is non-zero, _cacheFill checks whether the method
463 // being encached is already there. The number of times it finds a match
464 // is tallied in cacheFillDuplicates. When traceDuplicatesVerbose is
465 // non-zero, each duplication is logged when found in this way.
466 static int traceDuplicates = 0;
467 static int traceDuplicatesVerbose = 0;
468 static int cacheFillDuplicates = 0;
470 // Custom cache allocator parameters
471 // CACHE_REGION_SIZE must be a multiple of CACHE_QUANTUM.
472 #define CACHE_QUANTUM 520
473 #define CACHE_REGION_SIZE 131040 // quantized just under 128KB (131072)
474 // #define CACHE_REGION_SIZE 262080 // quantized just under 256KB (262144)
476 #ifdef OBJC_INSTRUMENTED
478 static unsigned int LinearFlushCachesCount = 0;
479 static unsigned int LinearFlushCachesVisitedCount = 0;
480 static unsigned int MaxLinearFlushCachesVisitedCount = 0;
481 static unsigned int NonlinearFlushCachesCount = 0;
482 static unsigned int NonlinearFlushCachesClassCount = 0;
483 static unsigned int NonlinearFlushCachesVisitedCount = 0;
484 static unsigned int MaxNonlinearFlushCachesVisitedCount = 0;
485 static unsigned int IdealFlushCachesCount = 0;
486 static unsigned int MaxIdealFlushCachesCount = 0;
489 // Method call logging
490 typedef int (*ObjCLogProc)(BOOL, const char *, const char *, SEL);
492 static int totalCacheFills NOBSS = 0;
493 static int objcMsgLogFD = (-1);
494 static ObjCLogProc objcMsgLogProc = &LogObjCMessageSend;
495 static int objcMsgLogEnabled = 0;
499 _errNoMem[] = "failed -- out of memory(%s, %u)",
500 _errAllocNil[] = "allocating nil object",
501 _errFreedObject[] = "message %s sent to freed object=0x%lx",
502 _errNonExistentObject[] = "message %s sent to non-existent object=0x%lx",
503 _errBadSel[] = "invalid selector %s",
504 _errNotSuper[] = "[%s poseAs:%s]: target not immediate superclass",
505 _errNewVars[] = "[%s poseAs:%s]: %s defines new instance variables";
507 /***********************************************************************
508 * Information about multi-thread support:
510 * Since we do not lock many operations which walk the superclass, method
511 * and ivar chains, these chains must remain intact once a class is published
512 * by inserting it into the class hashtable. All modifications must be
513 * atomic so that someone walking these chains will always geta valid
515 ***********************************************************************/
516 /***********************************************************************
517 * A static empty cache. All classes initially point at this cache.
518 * When the first message is sent it misses in the cache, and when
519 * the cache is grown it checks for this case and uses malloc rather
520 * than realloc. This avoids the need to check for NULL caches in the
522 ***********************************************************************/
524 #ifndef OBJC_INSTRUMENTED
525 const struct objc_cache emptyCache =
532 // OBJC_INSTRUMENTED requires writable data immediately following emptyCache.
533 struct objc_cache emptyCache =
539 CacheInstrumentation emptyCacheInstrumentation = {0};
543 // Freed objects have their isa set to point to this dummy class.
544 // This avoids the need to check for Nil classes in the messenger.
545 static const struct objc_class freedObjectClass =
555 (Cache) &emptyCache, // cache
559 static const struct objc_class nonexistentObjectClass =
563 "NONEXISTENT(id)", // name
569 (Cache) &emptyCache, // cache
573 /***********************************************************************
574 * object_getClassName.
575 **********************************************************************/
576 const char * object_getClassName (id obj)
578 // Even nil objects have a class name, sort of
582 // Retrieve name from object's class
583 return ((struct objc_class *) obj->isa)->name;
586 /***********************************************************************
587 * object_getIndexedIvars.
588 **********************************************************************/
589 void * object_getIndexedIvars (id obj)
591 // ivars are tacked onto the end of the object
592 return ((char *) obj) + ((struct objc_class *) obj->isa)->instance_size;
596 /***********************************************************************
597 * object_cxxDestructFromClass.
598 * Call C++ destructors on obj, starting with cls's
599 * dtor method (if any) followed by superclasses' dtors (if any),
600 * stopping at cls's dtor (if any).
601 * Uses methodListLock and cacheUpdateLock. The caller must hold neither.
602 **********************************************************************/
603 static void object_cxxDestructFromClass(id obj, Class cls)
607 // Call cls's dtor first, then superclasses's dtors.
609 for ( ; cls != NULL; cls = cls->super_class) {
610 if (!(cls->info & CLS_HAS_CXX_STRUCTORS)) continue;
612 lookupMethodInClassAndLoadCache(cls, cxx_destruct_sel);
613 if (dtor != (void(*)(id))&_objc_msgForward) {
615 _objc_inform("CXX: calling C++ destructors for class %s",
624 /***********************************************************************
625 * object_cxxDestruct.
626 * Call C++ destructors on obj, if any.
627 * Uses methodListLock and cacheUpdateLock. The caller must hold neither.
628 **********************************************************************/
629 void object_cxxDestruct(id obj)
632 object_cxxDestructFromClass(obj, obj->isa);
636 /***********************************************************************
637 * object_cxxConstructFromClass.
638 * Recursively call C++ constructors on obj, starting with base class's
639 * ctor method (if any) followed by subclasses' ctors (if any), stopping
640 * at cls's ctor (if any).
641 * Returns YES if construction succeeded.
642 * Returns NO if some constructor threw an exception. The exception is
643 * caught and discarded. Any partial construction is destructed.
644 * Uses methodListLock and cacheUpdateLock. The caller must hold neither.
646 * .cxx_construct returns id. This really means:
647 * return self: construction succeeded
648 * return nil: construction failed because a C++ constructor threw an exception
649 **********************************************************************/
650 static BOOL object_cxxConstructFromClass(id obj, Class cls)
654 // Call superclasses' ctors first, if any.
655 if (cls->super_class) {
656 BOOL ok = object_cxxConstructFromClass(obj, cls->super_class);
657 if (!ok) return NO; // some superclass's ctor failed - give up
660 // Find this class's ctor, if any.
661 if (!(cls->info & CLS_HAS_CXX_STRUCTORS)) return YES; // no ctor - ok
662 ctor = (id(*)(id))lookupMethodInClassAndLoadCache(cls, cxx_construct_sel);
663 if (ctor == (id(*)(id))&_objc_msgForward) return YES; // no ctor - ok
665 // Call this class's ctor.
667 _objc_inform("CXX: calling C++ constructors for class %s", cls->name);
669 if ((*ctor)(obj)) return YES; // ctor called and succeeded - ok
671 // This class's ctor was called and failed.
672 // Call superclasses's dtors to clean up.
673 if (cls->super_class) object_cxxDestructFromClass(obj, cls->super_class);
678 /***********************************************************************
679 * object_cxxConstructFromClass.
680 * Call C++ constructors on obj, if any.
681 * Returns YES if construction succeeded.
682 * Returns NO if some constructor threw an exception. The exception is
683 * caught and discarded. Any partial construction is destructed.
684 * Uses methodListLock and cacheUpdateLock. The caller must hold neither.
685 **********************************************************************/
686 BOOL object_cxxConstruct(id obj)
688 if (!obj) return YES;
689 return object_cxxConstructFromClass(obj, obj->isa);
693 /***********************************************************************
694 * _internal_class_createInstanceFromZone. Allocate an instance of the
695 * specified class with the specified number of bytes for indexed
696 * variables, in the specified zone. The isa field is set to the
697 * class, C++ default constructors are called, and all other fields are zeroed.
698 **********************************************************************/
699 static id _internal_class_createInstanceFromZone (Class aClass,
704 register unsigned byteCount;
706 // Can't create something for nothing
709 __objc_error ((id) aClass, _errAllocNil, 0);
713 // Allocate and initialize
714 byteCount = ((struct objc_class *) aClass)->instance_size + nIvarBytes;
715 obj = (id) malloc_zone_calloc (z, 1, byteCount);
718 __objc_error ((id) aClass, _errNoMem, ((struct objc_class *) aClass)->name, nIvarBytes);
722 // Set the isa pointer
725 // Call C++ constructors, if any.
726 if (!object_cxxConstruct(obj)) {
727 // Some C++ constructor threw an exception.
728 malloc_zone_free(z, obj);
735 /***********************************************************************
736 * _internal_class_createInstance. Allocate an instance of the specified
737 * class with the specified number of bytes for indexed variables, in
738 * the default zone, using _internal_class_createInstanceFromZone.
739 **********************************************************************/
740 static id _internal_class_createInstance (Class aClass,
743 return _internal_class_createInstanceFromZone (aClass,
745 malloc_default_zone ());
748 id (*_poseAs)() = (id (*)())class_poseAs;
749 id (*_alloc)(Class, unsigned) = _internal_class_createInstance;
750 id (*_zoneAlloc)(Class, unsigned, void *) = _internal_class_createInstanceFromZone;
752 /***********************************************************************
753 * class_createInstanceFromZone. Allocate an instance of the specified
754 * class with the specified number of bytes for indexed variables, in
755 * the specified zone, using _zoneAlloc.
756 **********************************************************************/
757 id class_createInstanceFromZone (Class aClass,
761 // _zoneAlloc can be overridden, but is initially set to
762 // _internal_class_createInstanceFromZone
763 return (*_zoneAlloc) (aClass, nIvarBytes, z);
766 /***********************************************************************
767 * class_createInstance. Allocate an instance of the specified class with
768 * the specified number of bytes for indexed variables, using _alloc.
769 **********************************************************************/
770 id class_createInstance (Class aClass,
773 // _alloc can be overridden, but is initially set to
774 // _internal_class_createInstance
775 return (*_alloc) (aClass, nIvarBytes);
778 /***********************************************************************
779 * class_setVersion. Record the specified version with the class.
780 **********************************************************************/
781 void class_setVersion (Class aClass,
784 ((struct objc_class *) aClass)->version = version;
787 /***********************************************************************
788 * class_getVersion. Return the version recorded with the class.
789 **********************************************************************/
790 int class_getVersion (Class aClass)
792 return ((struct objc_class *) aClass)->version;
796 static inline Method _findNamedMethodInList(struct objc_method_list * mlist, const char *meth_name) {
798 if (!mlist) return NULL;
799 for (i = 0; i < mlist->method_count; i++) {
800 Method m = &mlist->method_list[i];
801 if (*((const char *)m->method_name) == *meth_name && 0 == strcmp((const char *)(m->method_name), meth_name)) {
809 /***********************************************************************
810 * fixupSelectorsInMethodList
811 * Uniques selectors in the given method list.
812 * The given method list must be non-NULL and not already fixed-up.
813 * If the class was loaded from a bundle:
814 * fixes up the given list in place with heap-allocated selector strings
815 * If the class was not from a bundle:
816 * allocates a copy of the method list, fixes up the copy, and returns
817 * the copy. The given list is unmodified.
819 * If cls is already in use, methodListLock must be held by the caller.
820 **********************************************************************/
821 // Fixed-up method lists get mlist->obsolete = _OBJC_FIXED_UP.
822 #define _OBJC_FIXED_UP ((void *)1771)
824 static struct objc_method_list *fixupSelectorsInMethodList(Class cls, struct objc_method_list *mlist)
828 struct objc_method_list *old_mlist;
830 if ( ! mlist ) return (struct objc_method_list *)0;
831 if ( mlist->obsolete != _OBJC_FIXED_UP ) {
832 BOOL isBundle = CLS_GETINFO(cls, CLS_FROM_BUNDLE) ? YES : NO;
835 size = sizeof(struct objc_method_list) - sizeof(struct objc_method) + old_mlist->method_count * sizeof(struct objc_method);
836 mlist = _malloc_internal(size);
837 memmove(mlist, old_mlist, size);
839 // Mach-O bundles are fixed up in place.
840 // This prevents leaks when a bundle is unloaded.
843 for ( i = 0; i < mlist->method_count; i += 1 ) {
844 method = &mlist->method_list[i];
845 method->method_name =
846 sel_registerNameNoLock((const char *)method->method_name, isBundle); // Always copy selector data from bundles.
849 mlist->obsolete = _OBJC_FIXED_UP;
855 /***********************************************************************
857 * Returns successive method lists from the given class.
858 * Method lists are returned in method search order (i.e. highest-priority
859 * implementations first).
860 * All necessary method list fixups are performed, so the
861 * returned method list is fully-constructed.
863 * If cls is already in use, methodListLock must be held by the caller.
864 * For full thread-safety, methodListLock must be continuously held by the
865 * caller across all calls to nextMethodList(). If the lock is released,
866 * the bad results listed in class_nextMethodList() may occur.
868 * void *iterator = NULL;
869 * struct objc_method_list *mlist;
870 * OBJC_LOCK(&methodListLock);
871 * while ((mlist = nextMethodList(cls, &iterator))) {
872 * // do something with mlist
874 * OBJC_UNLOCK(&methodListLock);
875 **********************************************************************/
876 static struct objc_method_list *nextMethodList(struct objc_class *cls,
879 uintptr_t index = *(uintptr_t *)it;
880 struct objc_method_list **resultp;
883 // First call to nextMethodList.
884 if (!cls->methodLists) {
886 } else if (cls->info & CLS_NO_METHOD_ARRAY) {
887 resultp = (struct objc_method_list **)&cls->methodLists;
889 resultp = &cls->methodLists[0];
890 if (!*resultp || *resultp == END_OF_METHODS_LIST) {
895 // Subsequent call to nextMethodList.
896 if (!cls->methodLists) {
898 } else if (cls->info & CLS_NO_METHOD_ARRAY) {
901 resultp = &cls->methodLists[index];
902 if (!*resultp || *resultp == END_OF_METHODS_LIST) {
908 // resultp now is NULL, meaning there are no more method lists,
909 // OR the address of the method list pointer to fix up and return.
912 if (*resultp && (*resultp)->obsolete != _OBJC_FIXED_UP) {
913 *resultp = fixupSelectorsInMethodList(cls, *resultp);
915 *it = (void *)(index + 1);
924 /* These next three functions are the heart of ObjC method lookup.
925 * If the class is currently in use, methodListLock must be held by the caller.
927 static inline Method _findMethodInList(struct objc_method_list * mlist, SEL sel) {
929 if (!mlist) return NULL;
930 for (i = 0; i < mlist->method_count; i++) {
931 Method m = &mlist->method_list[i];
932 if (m->method_name == sel) {
939 static inline Method _findMethodInClass(Class cls, SEL sel) __attribute__((always_inline));
940 static inline Method _findMethodInClass(Class cls, SEL sel) {
941 // Flattened version of nextMethodList(). The optimizer doesn't
942 // do a good job with hoisting the conditionals out of the loop.
943 // Conceptually, this looks like:
944 // while ((mlist = nextMethodList(cls, &iterator))) {
945 // Method m = _findMethodInList(mlist, sel);
949 if (!cls->methodLists) {
953 else if (cls->info & CLS_NO_METHOD_ARRAY) {
955 struct objc_method_list **mlistp;
956 mlistp = (struct objc_method_list **)&cls->methodLists;
957 if ((*mlistp)->obsolete != _OBJC_FIXED_UP) {
958 *mlistp = fixupSelectorsInMethodList(cls, *mlistp);
960 return _findMethodInList(*mlistp, sel);
963 // Multiple method lists.
964 struct objc_method_list **mlistp;
965 for (mlistp = cls->methodLists;
966 *mlistp != NULL && *mlistp != END_OF_METHODS_LIST;
970 if ((*mlistp)->obsolete != _OBJC_FIXED_UP) {
971 *mlistp = fixupSelectorsInMethodList(cls, *mlistp);
973 m = _findMethodInList(*mlistp, sel);
980 static inline Method _getMethod(Class cls, SEL sel) {
981 for (; cls; cls = cls->super_class) {
983 m = _findMethodInClass(cls, sel);
990 // fixme for gc debugging temporary use
991 __private_extern__ IMP findIMPInClass(Class cls, SEL sel)
993 Method m = _findMethodInClass(cls, sel);
994 if (m) return m->method_imp;
998 /***********************************************************************
999 * class_getInstanceMethod. Return the instance method for the
1000 * specified class and selector.
1001 **********************************************************************/
1002 Method class_getInstanceMethod (Class aClass,
1007 // Need both a class and a selector
1008 if (!aClass || !aSelector)
1012 OBJC_LOCK(&methodListLock);
1013 result = _getMethod (aClass, aSelector);
1014 OBJC_UNLOCK(&methodListLock);
1018 /***********************************************************************
1019 * class_getClassMethod. Return the class method for the specified
1020 * class and selector.
1021 **********************************************************************/
1022 Method class_getClassMethod (Class aClass,
1027 // Need both a class and a selector
1028 if (!aClass || !aSelector)
1031 // Go to the class or isa
1032 OBJC_LOCK(&methodListLock);
1033 result = _getMethod (GETMETA(aClass), aSelector);
1034 OBJC_UNLOCK(&methodListLock);
1038 /***********************************************************************
1039 * class_getVariable. Return the named instance variable.
1040 **********************************************************************/
1041 static Ivar class_getVariable (Class cls,
1044 struct objc_class * thisCls;
1046 // Outer loop - search the class and its superclasses
1047 for (thisCls = cls; thisCls != Nil; thisCls = ((struct objc_class *) thisCls)->super_class)
1052 // Skip class having no ivars
1053 if (!thisCls->ivars)
1056 // Inner loop - search the given class
1057 thisIvar = &thisCls->ivars->ivar_list[0];
1058 for (index = 0; index < thisCls->ivars->ivar_count; index += 1)
1060 // Check this ivar's name. Be careful because the
1061 // compiler generates ivar entries with NULL ivar_name
1062 // (e.g. for anonymous bit fields).
1063 if ((thisIvar->ivar_name) &&
1064 (strcmp (name, thisIvar->ivar_name) == 0))
1067 // Move to next ivar
1076 /***********************************************************************
1077 * class_getInstanceVariable. Return the named instance variable.
1079 * Someday add class_getClassVariable ().
1080 **********************************************************************/
1081 Ivar class_getInstanceVariable (Class aClass,
1084 // Must have a class and a name
1085 if (!aClass || !name)
1089 return class_getVariable (aClass, name);
1092 /***********************************************************************
1093 * flush_caches. Flush the instance and optionally class method caches
1094 * of cls and all its subclasses.
1096 * Specifying Nil for the class "all classes."
1097 **********************************************************************/
1098 static void flush_caches(Class cls, BOOL flush_meta)
1100 int numClasses = 0, newNumClasses;
1101 struct objc_class * * classes = NULL;
1103 struct objc_class * clsObject;
1104 #ifdef OBJC_INSTRUMENTED
1105 unsigned int classesVisited;
1106 unsigned int subclassCount;
1109 // Do nothing if class has no cache
1110 // This check is safe to do without any cache locks.
1111 if (cls && !((struct objc_class *) cls)->cache)
1114 newNumClasses = objc_getClassList((Class *)NULL, 0);
1115 while (numClasses < newNumClasses) {
1116 numClasses = newNumClasses;
1117 classes = _realloc_internal(classes, sizeof(Class) * numClasses);
1118 newNumClasses = objc_getClassList((Class *)classes, numClasses);
1120 numClasses = newNumClasses;
1122 OBJC_LOCK(&cacheUpdateLock);
1124 // Handle nil and root instance class specially: flush all
1125 // instance and class method caches. Nice that this
1126 // loop is linear vs the N-squared loop just below.
1127 if (!cls || !((struct objc_class *) cls)->super_class)
1129 #ifdef OBJC_INSTRUMENTED
1130 LinearFlushCachesCount += 1;
1134 // Traverse all classes in the hash table
1135 for (i = 0; i < numClasses; i++)
1137 struct objc_class * metaClsObject;
1138 #ifdef OBJC_INSTRUMENTED
1139 classesVisited += 1;
1141 clsObject = classes[i];
1143 // Skip class that is known not to be a subclass of this root
1144 // (the isa pointer of any meta class points to the meta class
1146 // NOTE: When is an isa pointer of a hash tabled class ever nil?
1147 metaClsObject = clsObject->isa;
1148 if (cls && metaClsObject && cls->isa != metaClsObject->isa)
1153 #ifdef OBJC_INSTRUMENTED
1157 _cache_flush (clsObject);
1158 if (flush_meta && metaClsObject != NULL) {
1159 _cache_flush (metaClsObject);
1162 #ifdef OBJC_INSTRUMENTED
1163 LinearFlushCachesVisitedCount += classesVisited;
1164 if (classesVisited > MaxLinearFlushCachesVisitedCount)
1165 MaxLinearFlushCachesVisitedCount = classesVisited;
1166 IdealFlushCachesCount += subclassCount;
1167 if (subclassCount > MaxIdealFlushCachesCount)
1168 MaxIdealFlushCachesCount = subclassCount;
1171 OBJC_UNLOCK(&cacheUpdateLock);
1172 _free_internal(classes);
1176 // Outer loop - flush any cache that could now get a method from
1177 // cls (i.e. the cache associated with cls and any of its subclasses).
1178 #ifdef OBJC_INSTRUMENTED
1179 NonlinearFlushCachesCount += 1;
1183 for (i = 0; i < numClasses; i++)
1185 struct objc_class * clsIter;
1187 #ifdef OBJC_INSTRUMENTED
1188 NonlinearFlushCachesClassCount += 1;
1190 clsObject = classes[i];
1192 // Inner loop - Process a given class
1193 clsIter = clsObject;
1197 #ifdef OBJC_INSTRUMENTED
1198 classesVisited += 1;
1200 // Flush clsObject instance method cache if
1201 // clsObject is a subclass of cls, or is cls itself
1202 // Flush the class method cache if that was asked for
1205 #ifdef OBJC_INSTRUMENTED
1208 _cache_flush (clsObject);
1210 _cache_flush (clsObject->isa);
1216 // Flush clsObject class method cache if cls is
1217 // the meta class of clsObject or of one
1218 // of clsObject's superclasses
1219 else if (clsIter->isa == cls)
1221 #ifdef OBJC_INSTRUMENTED
1224 _cache_flush (clsObject->isa);
1228 // Move up superclass chain
1229 else if (ISINITIALIZED(clsIter))
1230 clsIter = clsIter->super_class;
1232 // clsIter is not initialized, so its cache
1233 // must be empty. This happens only when
1234 // clsIter == clsObject, because
1235 // superclasses are initialized before
1236 // subclasses, and this loop traverses
1237 // from sub- to super- classes.
1242 #ifdef OBJC_INSTRUMENTED
1243 NonlinearFlushCachesVisitedCount += classesVisited;
1244 if (classesVisited > MaxNonlinearFlushCachesVisitedCount)
1245 MaxNonlinearFlushCachesVisitedCount = classesVisited;
1246 IdealFlushCachesCount += subclassCount;
1247 if (subclassCount > MaxIdealFlushCachesCount)
1248 MaxIdealFlushCachesCount = subclassCount;
1251 OBJC_UNLOCK(&cacheUpdateLock);
1252 _free_internal(classes);
1255 /***********************************************************************
1256 * _objc_flush_caches. Flush the caches of the specified class and any
1257 * of its subclasses. If cls is a meta-class, only meta-class (i.e.
1258 * class method) caches are flushed. If cls is an instance-class, both
1259 * instance-class and meta-class caches are flushed.
1260 **********************************************************************/
1261 void _objc_flush_caches (Class cls)
1263 flush_caches (cls, YES);
1266 /***********************************************************************
1267 * do_not_remove_this_dummy_function.
1268 **********************************************************************/
1269 void do_not_remove_this_dummy_function (void)
1271 (void) class_nextMethodList (NULL, NULL);
1275 /***********************************************************************
1276 * class_nextMethodList.
1277 * External version of nextMethodList().
1279 * This function is not fully thread-safe. A series of calls to
1280 * class_nextMethodList() may fail if methods are added to or removed
1281 * from the class between calls.
1282 * If methods are added between calls to class_nextMethodList(), it may
1283 * return previously-returned method lists again, and may fail to return
1284 * newly-added lists.
1285 * If methods are removed between calls to class_nextMethodList(), it may
1286 * omit surviving method lists or simply crash.
1287 **********************************************************************/
1288 OBJC_EXPORT struct objc_method_list * class_nextMethodList (Class cls,
1291 struct objc_method_list *result;
1292 OBJC_LOCK(&methodListLock);
1293 result = nextMethodList(cls, it);
1294 OBJC_UNLOCK(&methodListLock);
1298 /***********************************************************************
1300 **********************************************************************/
1303 (void) class_nextMethodList (Nil, NULL);
1306 /***********************************************************************
1309 * Formerly class_addInstanceMethods ()
1310 **********************************************************************/
1311 void class_addMethods (Class cls,
1312 struct objc_method_list * meths)
1315 OBJC_LOCK(&methodListLock);
1316 _objc_insertMethods(cls, meths);
1317 OBJC_UNLOCK(&methodListLock);
1319 // Must flush when dynamically adding methods. No need to flush
1320 // all the class method caches. If cls is a meta class, though,
1321 // this will still flush it and any of its sub-meta classes.
1322 flush_caches (cls, NO);
1325 /***********************************************************************
1326 * class_addClassMethods.
1328 * Obsolete (for binary compatibility only).
1329 **********************************************************************/
1330 void class_addClassMethods (Class cls,
1331 struct objc_method_list * meths)
1333 class_addMethods (((struct objc_class *) cls)->isa, meths);
1336 /***********************************************************************
1337 * class_removeMethods.
1338 **********************************************************************/
1339 void class_removeMethods (Class cls,
1340 struct objc_method_list * meths)
1342 // Remove the methods
1343 OBJC_LOCK(&methodListLock);
1344 _objc_removeMethods(cls, meths);
1345 OBJC_UNLOCK(&methodListLock);
1347 // Must flush when dynamically removing methods. No need to flush
1348 // all the class method caches. If cls is a meta class, though,
1349 // this will still flush it and any of its sub-meta classes.
1350 flush_caches (cls, NO);
1353 /***********************************************************************
1354 * addClassToOriginalClass. Add to a hash table of classes involved in
1355 * a posing situation. We use this when we need to get to the "original"
1356 * class for some particular name through the function objc_getOrigClass.
1357 * For instance, the implementation of [super ...] will use this to be
1358 * sure that it gets hold of the correct super class, so that no infinite
1359 * loops will occur if the class it appears in is involved in posing.
1361 * We use the classLock to guard the hash table.
1363 * See tracker bug #51856.
1364 **********************************************************************/
1366 static NXMapTable * posed_class_hash = NULL;
1367 static NXMapTable * posed_class_to_original_class_hash = NULL;
1369 static void addClassToOriginalClass (Class posingClass,
1370 Class originalClass)
1372 // Install hash table when it is first needed
1373 if (!posed_class_to_original_class_hash)
1375 posed_class_to_original_class_hash =
1376 NXCreateMapTableFromZone (NXPtrValueMapPrototype,
1378 _objc_internal_zone ());
1381 // Add pose to hash table
1382 NXMapInsert (posed_class_to_original_class_hash,
1387 /***********************************************************************
1388 * getOriginalClassForPosingClass.
1389 **********************************************************************/
1390 Class getOriginalClassForPosingClass (Class posingClass)
1392 return NXMapGet (posed_class_to_original_class_hash, posingClass);
1395 /***********************************************************************
1396 * objc_getOrigClass.
1397 **********************************************************************/
1398 Class objc_getOrigClass (const char * name)
1400 struct objc_class * ret;
1402 // Look for class among the posers
1404 OBJC_LOCK(&classLock);
1405 if (posed_class_hash)
1406 ret = (Class) NXMapGet (posed_class_hash, name);
1407 OBJC_UNLOCK(&classLock);
1411 // Not a poser. Do a normal lookup.
1412 ret = objc_getClass (name);
1414 _objc_inform ("class `%s' not linked into application", name);
1419 /***********************************************************************
1420 * _objc_addOrigClass. This function is only used from class_poseAs.
1421 * Registers the original class names, before they get obscured by
1422 * posing, so that [super ..] will work correctly from categories
1423 * in posing classes and in categories in classes being posed for.
1424 **********************************************************************/
1425 static void _objc_addOrigClass (Class origClass)
1427 OBJC_LOCK(&classLock);
1429 // Create the poser's hash table on first use
1430 if (!posed_class_hash)
1432 posed_class_hash = NXCreateMapTableFromZone (NXStrValueMapPrototype,
1434 _objc_internal_zone ());
1437 // Add the named class iff it is not already there (or collides?)
1438 if (NXMapGet (posed_class_hash, ((struct objc_class *)origClass)->name) == 0)
1439 NXMapInsert (posed_class_hash, ((struct objc_class *)origClass)->name, origClass);
1441 OBJC_UNLOCK(&classLock);
1444 /***********************************************************************
1447 * !!! class_poseAs () does not currently flush any caches.
1448 **********************************************************************/
1449 Class class_poseAs (Class imposter,
1452 struct objc_class * clsObject;
1453 char * imposterNamePtr;
1454 NXHashTable * class_hash;
1456 struct objc_class * copy;
1457 #ifdef OBJC_CLASS_REFS
1458 header_info * hInfo;
1461 // Trivial case is easy
1462 if (imposter == original)
1465 // Imposter must be an immediate subclass of the original
1466 if (((struct objc_class *)imposter)->super_class != original) {
1467 __objc_error(imposter, _errNotSuper, ((struct objc_class *)imposter)->name, ((struct objc_class *)original)->name);
1470 // Can't pose when you have instance variables (how could it work?)
1471 if (((struct objc_class *)imposter)->ivars) {
1472 __objc_error(imposter, _errNewVars, ((struct objc_class *)imposter)->name, ((struct objc_class *)original)->name, ((struct objc_class *)imposter)->name);
1475 // Build a string to use to replace the name of the original class.
1476 #define imposterNamePrefix "_%"
1477 imposterNamePtr = _malloc_internal(strlen(((struct objc_class *)original)->name) + strlen(imposterNamePrefix) + 1);
1478 strcpy(imposterNamePtr, imposterNamePrefix);
1479 strcat(imposterNamePtr, ((struct objc_class *)original)->name);
1480 #undef imposterNamePrefix
1482 // We lock the class hashtable, so we are thread safe with respect to
1483 // calls to objc_getClass (). However, the class names are not
1484 // changed atomically, nor are all of the subclasses updated
1485 // atomically. I have ordered the operations so that you will
1486 // never crash, but you may get inconsistent results....
1488 // Register the original class so that [super ..] knows
1489 // exactly which classes are the "original" classes.
1490 _objc_addOrigClass (original);
1491 _objc_addOrigClass (imposter);
1493 // Copy the imposter, so that the imposter can continue
1494 // its normal life in addition to changing the behavior of
1495 // the original. As a hack we don't bother to copy the metaclass.
1496 // For some reason we modify the original rather than the copy.
1497 copy = (*_zoneAlloc)(imposter->isa, sizeof(struct objc_class), _objc_internal_zone());
1498 memmove(copy, imposter, sizeof(struct objc_class));
1500 OBJC_LOCK(&classLock);
1502 class_hash = objc_getClasses ();
1504 // Remove both the imposter and the original class.
1505 NXHashRemove (class_hash, imposter);
1506 NXHashRemove (class_hash, original);
1508 NXHashInsert (class_hash, copy);
1509 addClassToOriginalClass (imposter, copy);
1511 // Mark the imposter as such
1512 _class_setInfo(imposter, CLS_POSING);
1513 _class_setInfo(imposter->isa, CLS_POSING);
1515 // Change the name of the imposter to that of the original class.
1516 ((struct objc_class *)imposter)->name = ((struct objc_class *)original)->name;
1517 ((struct objc_class *)imposter)->isa->name = ((struct objc_class *)original)->isa->name;
1519 // Also copy the version field to avoid archiving problems.
1520 ((struct objc_class *)imposter)->version = ((struct objc_class *)original)->version;
1522 // Change all subclasses of the original to point to the imposter.
1523 state = NXInitHashState (class_hash);
1524 while (NXNextHashState (class_hash, &state, (void **) &clsObject))
1526 while ((clsObject) && (clsObject != imposter) &&
1527 (clsObject != copy))
1529 if (clsObject->super_class == original)
1531 clsObject->super_class = imposter;
1532 clsObject->isa->super_class = ((struct objc_class *)imposter)->isa;
1533 // We must flush caches here!
1537 clsObject = clsObject->super_class;
1541 #ifdef OBJC_CLASS_REFS
1542 // Replace the original with the imposter in all class refs
1543 // Major loop - process all headers
1544 for (hInfo = _objc_headerStart(); hInfo != NULL; hInfo = hInfo->next)
1547 unsigned int refCount;
1550 // Get refs associated with this header
1551 cls_refs = (Class *) _getObjcClassRefs ((headerType *) hInfo->mhdr, &refCount);
1552 if (!cls_refs || !refCount)
1555 // Minor loop - process this header's refs
1556 cls_refs = (Class *) ((unsigned long) cls_refs + hInfo->image_slide);
1557 for (index = 0; index < refCount; index += 1)
1559 if (cls_refs[index] == original)
1560 cls_refs[index] = imposter;
1563 #endif // OBJC_CLASS_REFS
1565 // Change the name of the original class.
1566 ((struct objc_class *)original)->name = imposterNamePtr + 1;
1567 ((struct objc_class *)original)->isa->name = imposterNamePtr;
1569 // Restore the imposter and the original class with their new names.
1570 NXHashInsert (class_hash, imposter);
1571 NXHashInsert (class_hash, original);
1573 OBJC_UNLOCK(&classLock);
1578 /***********************************************************************
1580 **********************************************************************/
1581 static void _freedHandler (id self,
1584 __objc_error (self, _errFreedObject, SELNAME(sel), self);
1587 /***********************************************************************
1588 * _nonexistentHandler.
1589 **********************************************************************/
1590 static void _nonexistentHandler (id self,
1593 __objc_error (self, _errNonExistentObject, SELNAME(sel), self);
1596 /***********************************************************************
1597 * class_respondsToMethod.
1599 * Called from -[Object respondsTo:] and +[Object instancesRespondTo:]
1600 **********************************************************************/
1601 BOOL class_respondsToMethod (Class cls,
1607 // No one responds to zero!
1611 imp = _cache_getImp(cls, sel);
1613 // Found method in cache.
1614 // If the cache entry is forward::, the class does not respond to sel.
1615 return (imp != &_objc_msgForward);
1618 // Handle cache miss
1619 OBJC_LOCK(&methodListLock);
1620 meth = _getMethod(cls, sel);
1621 OBJC_UNLOCK(&methodListLock);
1623 _cache_fill(cls, meth, sel);
1627 // Not implemented. Use _objc_msgForward.
1628 _cache_addForwardEntry(cls, sel);
1634 /***********************************************************************
1635 * class_lookupMethod.
1637 * Called from -[Object methodFor:] and +[Object instanceMethodFor:]
1638 **********************************************************************/
1639 IMP class_lookupMethod (Class cls,
1644 // No one responds to zero!
1646 __objc_error(cls, _errBadSel, sel);
1649 imp = _cache_getImp(cls, sel);
1650 if (imp) return imp;
1652 // Handle cache miss
1653 return _class_lookupMethodAndLoadCache (cls, sel);
1656 /***********************************************************************
1657 * lookupNamedMethodInMethodList
1658 * Only called to find +load/-.cxx_construct/-.cxx_destruct methods,
1659 * without fixing up the entire method list.
1660 * The class is not yet in use, so methodListLock is not taken.
1661 **********************************************************************/
1662 __private_extern__ IMP lookupNamedMethodInMethodList(struct objc_method_list *mlist, const char *meth_name)
1664 Method m = meth_name ? _findNamedMethodInList(mlist, meth_name) : NULL;
1665 return (m ? m->method_imp : NULL);
1669 /***********************************************************************
1672 * Called from _cache_create() and cache_expand()
1673 * Cache locks: cacheUpdateLock must be held by the caller.
1674 **********************************************************************/
1675 static Cache _cache_malloc(int slotCount)
1680 // Allocate table (why not check for failure?)
1681 size = sizeof(struct objc_cache) + TABLE_SIZE(slotCount);
1682 #ifdef OBJC_INSTRUMENTED
1683 // Custom cache allocator can't handle instrumentation.
1684 size += sizeof(CacheInstrumentation);
1685 new_cache = _calloc_internal(size, 1);
1686 new_cache->mask = slotCount - 1;
1688 if (size < CACHE_QUANTUM || UseInternalZone) {
1689 new_cache = _calloc_internal(size, 1);
1690 new_cache->mask = slotCount - 1;
1691 // occupied and buckets and instrumentation are all zero
1693 new_cache = cache_allocator_calloc(size);
1694 // mask is already set
1695 // occupied and buckets and instrumentation are all zero
1703 /***********************************************************************
1706 * Called from _cache_expand().
1707 * Cache locks: cacheUpdateLock must be held by the caller.
1708 **********************************************************************/
1709 Cache _cache_create (Class cls)
1714 // Select appropriate size
1715 slotCount = (ISMETA(cls)) ? INIT_META_CACHE_SIZE : INIT_CACHE_SIZE;
1717 new_cache = _cache_malloc(slotCount);
1719 // Install the cache
1720 ((struct objc_class *)cls)->cache = new_cache;
1722 // Clear the cache flush flag so that we will not flush this cache
1723 // before expanding it for the first time.
1724 _class_clearInfo(cls, CLS_FLUSH_CACHE);
1726 // Clear the grow flag so that we will re-use the current storage,
1727 // rather than actually grow the cache, when expanding the cache
1728 // for the first time
1729 if (_class_slow_grow)
1730 _class_clearInfo(cls, CLS_GROW_CACHE);
1732 // Return our creation
1736 /***********************************************************************
1739 * Called from _cache_fill ()
1740 * Cache locks: cacheUpdateLock must be held by the caller.
1741 **********************************************************************/
1742 static Cache _cache_expand (Class cls)
1746 unsigned int slotCount;
1749 // First growth goes from emptyCache to a real one
1750 old_cache = ((struct objc_class *)cls)->cache;
1751 if (old_cache == &emptyCache)
1752 return _cache_create (cls);
1754 // iff _class_slow_grow, trade off actual cache growth with re-using
1755 // the current one, so that growth only happens every odd time
1756 if (_class_slow_grow)
1758 // CLS_GROW_CACHE controls every-other-time behavior. If it
1759 // is non-zero, let the cache grow this time, but clear the
1760 // flag so the cache is reused next time
1761 if ((((struct objc_class * )cls)->info & CLS_GROW_CACHE) != 0)
1762 _class_clearInfo(cls, CLS_GROW_CACHE);
1764 // Reuse the current cache storage this time
1767 // Clear the valid-entry counter
1768 old_cache->occupied = 0;
1770 // Invalidate all the cache entries
1771 for (index = 0; index < old_cache->mask + 1; index += 1)
1773 // Remember what this entry was, so we can possibly
1774 // deallocate it after the bucket has been invalidated
1775 Method oldEntry = old_cache->buckets[index];
1776 // Skip invalid entry
1777 if (!CACHE_BUCKET_VALID(old_cache->buckets[index]))
1780 // Invalidate this entry
1781 CACHE_BUCKET_VALID(old_cache->buckets[index]) = NULL;
1783 // Deallocate "forward::" entry
1784 if (CACHE_BUCKET_IMP(oldEntry) == &_objc_msgForward)
1786 _cache_collect_free (oldEntry, sizeof(struct objc_method), NO);
1790 // Set the slow growth flag so the cache is next grown
1791 _class_setInfo(cls, CLS_GROW_CACHE);
1793 // Return the same old cache, freshly emptied
1799 // Double the cache size
1800 slotCount = (old_cache->mask + 1) << 1;
1802 new_cache = _cache_malloc(slotCount);
1804 #ifdef OBJC_INSTRUMENTED
1805 // Propagate the instrumentation data
1807 CacheInstrumentation * oldCacheData;
1808 CacheInstrumentation * newCacheData;
1810 oldCacheData = CACHE_INSTRUMENTATION(old_cache);
1811 newCacheData = CACHE_INSTRUMENTATION(new_cache);
1812 bcopy ((const char *)oldCacheData, (char *)newCacheData, sizeof(CacheInstrumentation));
1816 // iff _class_uncache, copy old cache entries into the new cache
1817 if (_class_uncache == 0)
1821 newMask = new_cache->mask;
1823 // Look at all entries in the old cache
1824 for (index = 0; index < old_cache->mask + 1; index += 1)
1828 // Skip invalid entry
1829 if (!CACHE_BUCKET_VALID(old_cache->buckets[index]))
1832 // Hash the old entry into the new table
1833 index2 = CACHE_HASH(CACHE_BUCKET_NAME(old_cache->buckets[index]),
1836 // Find an available spot, at or following the hashed spot;
1837 // Guaranteed to not infinite loop, because table has grown
1840 if (!CACHE_BUCKET_VALID(new_cache->buckets[index2]))
1842 new_cache->buckets[index2] = old_cache->buckets[index];
1850 // Account for the addition
1851 new_cache->occupied += 1;
1854 // Set the cache flush flag so that we will flush this cache
1855 // before expanding it again.
1856 _class_setInfo(cls, CLS_FLUSH_CACHE);
1859 // Deallocate "forward::" entries from the old cache
1862 for (index = 0; index < old_cache->mask + 1; index += 1)
1864 if (CACHE_BUCKET_VALID(old_cache->buckets[index]) &&
1865 CACHE_BUCKET_IMP(old_cache->buckets[index]) == &_objc_msgForward)
1867 _cache_collect_free (old_cache->buckets[index], sizeof(struct objc_method), NO);
1872 // Install new cache
1873 ((struct objc_class *)cls)->cache = new_cache;
1875 // Deallocate old cache, try freeing all the garbage
1876 _cache_collect_free (old_cache, old_cache->mask * sizeof(Method), YES);
1880 /***********************************************************************
1881 * instrumentObjcMessageSends/logObjcMessageSends.
1882 **********************************************************************/
1883 static int LogObjCMessageSend (BOOL isClassMethod,
1884 const char * objectsClass,
1885 const char * implementingClass,
1890 // Create/open the log file
1891 if (objcMsgLogFD == (-1))
1893 snprintf (buf, sizeof(buf), "/tmp/msgSends-%d", (int) getpid ());
1894 objcMsgLogFD = secure_open (buf, O_WRONLY | O_CREAT, geteuid());
1895 if (objcMsgLogFD < 0) {
1896 // no log file - disable logging
1897 objcMsgLogEnabled = 0;
1903 // Make the log entry
1904 snprintf(buf, sizeof(buf), "%c %s %s %s\n",
1905 isClassMethod ? '+' : '-',
1910 write (objcMsgLogFD, buf, strlen(buf));
1912 // Tell caller to not cache the method
1916 void instrumentObjcMessageSends (BOOL flag)
1918 int enabledValue = (flag) ? 1 : 0;
1921 if (objcMsgLogEnabled == enabledValue)
1924 // If enabling, flush all method caches so we get some traces
1926 flush_caches (Nil, YES);
1928 // Sync our log file
1929 if (objcMsgLogFD != (-1))
1930 fsync (objcMsgLogFD);
1932 objcMsgLogEnabled = enabledValue;
1935 void logObjcMessageSends (ObjCLogProc logProc)
1939 objcMsgLogProc = logProc;
1940 objcMsgLogEnabled = 1;
1944 objcMsgLogProc = logProc;
1945 objcMsgLogEnabled = 0;
1948 if (objcMsgLogFD != (-1))
1949 fsync (objcMsgLogFD);
1953 /***********************************************************************
1954 * _cache_fill. Add the specified method to the specified class' cache.
1955 * Returns NO if the cache entry wasn't added: cache was busy,
1956 * class is still being initialized, new entry is a duplicate.
1958 * Called only from _class_lookupMethodAndLoadCache and
1959 * class_respondsToMethod and _cache_addForwardEntry.
1961 * Cache locks: cacheUpdateLock must not be held.
1962 **********************************************************************/
1963 static BOOL _cache_fill(Class cls, Method smt, SEL sel)
1965 unsigned int newOccupied;
1970 // Never cache before +initialize is done
1971 if (!ISINITIALIZED(cls)) {
1975 // Keep tally of cache additions
1976 totalCacheFills += 1;
1978 OBJC_LOCK(&cacheUpdateLock);
1980 cache = ((struct objc_class *)cls)->cache;
1982 // Check for duplicate entries, if we're in the mode
1983 if (traceDuplicates)
1986 arith_t mask = cache->mask;
1987 buckets = cache->buckets;
1990 for (index2 = 0; index2 < mask + 1; index2 += 1)
1992 // Skip invalid or non-duplicate entry
1993 if ((!CACHE_BUCKET_VALID(buckets[index2])) ||
1994 (strcmp ((char *) CACHE_BUCKET_NAME(buckets[index2]), (char *) smt->method_name) != 0))
1997 // Tally duplication, but report iff wanted
1998 cacheFillDuplicates += 1;
1999 if (traceDuplicatesVerbose)
2001 _objc_inform ("Cache fill duplicate #%d: found %x adding %x: %s\n",
2002 cacheFillDuplicates,
2003 (unsigned int) CACHE_BUCKET_NAME(buckets[index2]),
2004 (unsigned int) smt->method_name,
2005 (char *) smt->method_name);
2010 // Make sure the entry wasn't added to the cache by some other thread
2011 // before we grabbed the cacheUpdateLock.
2012 // Don't use _cache_getMethod() because _cache_getMethod() doesn't
2013 // return forward:: entries.
2014 if (_cache_getImp(cls, sel)) {
2015 OBJC_UNLOCK(&cacheUpdateLock);
2016 return NO; // entry is already cached, didn't add new one
2019 // Use the cache as-is if it is less than 3/4 full
2020 newOccupied = cache->occupied + 1;
2021 if ((newOccupied * 4) <= (cache->mask + 1) * 3) {
2022 // Cache is less than 3/4 full.
2023 cache->occupied = newOccupied;
2025 // Cache is too full. Flush it or expand it.
2026 if ((((struct objc_class * )cls)->info & CLS_FLUSH_CACHE) != 0) {
2029 cache = _cache_expand (cls);
2032 // Account for the addition
2033 cache->occupied += 1;
2036 // Insert the new entry. This can be done by either:
2037 // (a) Scanning for the first unused spot. Easy!
2038 // (b) Opening up an unused spot by sliding existing
2039 // entries down by one. The benefit of this
2040 // extra work is that it puts the most recently
2041 // loaded entries closest to where the selector
2042 // hash starts the search.
2044 // The loop is a little more complicated because there
2045 // are two kinds of entries, so there have to be two ways
2047 buckets = cache->buckets;
2048 index = CACHE_HASH(sel, cache->mask);
2051 // Slide existing entries down by one
2054 // Copy current entry to a local
2055 saveMethod = buckets[index];
2057 // Copy previous entry (or new entry) to current slot
2058 buckets[index] = smt;
2060 // Done if current slot had been invalid
2061 if (saveMethod == NULL)
2064 // Prepare to copy saved value into next slot
2067 // Move on to next slot
2069 index &= cache->mask;
2072 OBJC_UNLOCK(&cacheUpdateLock);
2074 return YES; // successfully added new cache entry
2078 /***********************************************************************
2079 * _cache_addForwardEntry
2080 * Add a forward:: entry for the given selector to cls's method cache.
2081 * Does nothing if the cache addition fails for any reason.
2082 * Called from class_respondsToMethod and _class_lookupMethodAndLoadCache.
2083 * Cache locks: cacheUpdateLock must not be held.
2084 **********************************************************************/
2085 static void _cache_addForwardEntry(Class cls, SEL sel)
2089 smt = _malloc_internal(sizeof(struct objc_method));
2090 smt->method_name = sel;
2091 smt->method_types = "";
2092 smt->method_imp = &_objc_msgForward;
2093 if (! _cache_fill(cls, smt, sel)) {
2094 // Entry not added to cache. Don't leak the method struct.
2095 _free_internal(smt);
2100 /***********************************************************************
2101 * _cache_flush. Invalidate all valid entries in the given class' cache,
2102 * and clear the CLS_FLUSH_CACHE in the cls->info.
2104 * Called from flush_caches() and _cache_fill()
2105 * Cache locks: cacheUpdateLock must be held by the caller.
2106 **********************************************************************/
2107 static void _cache_flush (Class cls)
2112 // Locate cache. Ignore unused cache.
2113 cache = ((struct objc_class *)cls)->cache;
2114 if (cache == NULL || cache == &emptyCache)
2117 #ifdef OBJC_INSTRUMENTED
2119 CacheInstrumentation * cacheData;
2122 cacheData = CACHE_INSTRUMENTATION(cache);
2123 cacheData->flushCount += 1;
2124 cacheData->flushedEntries += cache->occupied;
2125 if (cache->occupied > cacheData->maxFlushedEntries)
2126 cacheData->maxFlushedEntries = cache->occupied;
2130 // Traverse the cache
2131 for (index = 0; index <= cache->mask; index += 1)
2133 // Remember what this entry was, so we can possibly
2134 // deallocate it after the bucket has been invalidated
2135 Method oldEntry = cache->buckets[index];
2137 // Invalidate this entry
2138 CACHE_BUCKET_VALID(cache->buckets[index]) = NULL;
2140 // Deallocate "forward::" entry
2141 if (oldEntry && oldEntry->method_imp == &_objc_msgForward)
2142 _cache_collect_free (oldEntry, sizeof(struct objc_method), NO);
2145 // Clear the valid-entry counter
2146 cache->occupied = 0;
2148 // Clear the cache flush flag so that we will not flush this cache
2149 // before expanding it again.
2150 _class_clearInfo(cls, CLS_FLUSH_CACHE);
2153 /***********************************************************************
2154 * _objc_getFreedObjectClass. Return a pointer to the dummy freed
2155 * object class. Freed objects get their isa pointers replaced with
2156 * a pointer to the freedObjectClass, so that we can catch usages of
2158 **********************************************************************/
2159 Class _objc_getFreedObjectClass (void)
2161 return (Class) &freedObjectClass;
2164 /***********************************************************************
2165 * _objc_getNonexistentClass. Return a pointer to the dummy nonexistent
2166 * object class. This is used when, for example, mapping the class
2167 * refs for an image, and the class can not be found, so that we can
2168 * catch later uses of the non-existent class.
2169 **********************************************************************/
2170 Class _objc_getNonexistentClass (void)
2172 return (Class) &nonexistentObjectClass;
2176 /***********************************************************************
2177 * struct _objc_initializing_classes
2178 * Per-thread list of classes currently being initialized by that thread.
2179 * During initialization, that thread is allowed to send messages to that
2180 * class, but other threads have to wait.
2181 * The list is a simple array of metaclasses (the metaclass stores
2182 * the initialization state).
2183 **********************************************************************/
2184 typedef struct _objc_initializing_classes {
2185 int classesAllocated;
2186 struct objc_class** metaclasses;
2187 } _objc_initializing_classes;
2190 /***********************************************************************
2191 * _fetchInitializingClassList
2192 * Return the list of classes being initialized by this thread.
2193 * If create == YES, create the list when no classes are being initialized by this thread.
2194 * If create == NO, return NULL when no classes are being initialized by this thread.
2195 **********************************************************************/
2196 static _objc_initializing_classes *_fetchInitializingClassList(BOOL create)
2198 _objc_pthread_data *data;
2199 _objc_initializing_classes *list;
2200 struct objc_class **classes;
2202 data = pthread_getspecific(_objc_pthread_key);
2207 data = _calloc_internal(1, sizeof(_objc_pthread_data));
2208 pthread_setspecific(_objc_pthread_key, data);
2212 list = data->initializingClasses;
2217 list = _calloc_internal(1, sizeof(_objc_initializing_classes));
2218 data->initializingClasses = list;
2222 classes = list->metaclasses;
2223 if (classes == NULL) {
2224 // If _objc_initializing_classes exists, allocate metaclass array,
2225 // even if create == NO.
2226 // Allow 4 simultaneous class inits on this thread before realloc.
2227 list->classesAllocated = 4;
2228 classes = _calloc_internal(list->classesAllocated, sizeof(struct objc_class *));
2229 list->metaclasses = classes;
2235 /***********************************************************************
2236 * _destroyInitializingClassList
2237 * Deallocate memory used by the given initialization list.
2238 * Any part of the list may be NULL.
2239 * Called from _objc_pthread_destroyspecific().
2240 **********************************************************************/
2241 void _destroyInitializingClassList(_objc_initializing_classes *list)
2244 if (list->metaclasses != NULL) {
2245 _free_internal(list->metaclasses);
2247 _free_internal(list);
2252 /***********************************************************************
2253 * _thisThreadIsInitializingClass
2254 * Return TRUE if this thread is currently initializing the given class.
2255 **********************************************************************/
2256 static BOOL _thisThreadIsInitializingClass(struct objc_class *cls)
2260 _objc_initializing_classes *list = _fetchInitializingClassList(NO);
2263 for (i = 0; i < list->classesAllocated; i++) {
2264 if (cls == list->metaclasses[i]) return YES;
2268 // no list or not found in list
2273 /***********************************************************************
2274 * _setThisThreadIsInitializingClass
2275 * Record that this thread is currently initializing the given class.
2276 * This thread will be allowed to send messages to the class, but
2277 * other threads will have to wait.
2278 **********************************************************************/
2279 static void _setThisThreadIsInitializingClass(struct objc_class *cls)
2282 _objc_initializing_classes *list = _fetchInitializingClassList(YES);
2285 // paranoia: explicitly disallow duplicates
2286 for (i = 0; i < list->classesAllocated; i++) {
2287 if (cls == list->metaclasses[i]) {
2288 _objc_fatal("thread is already initializing this class!");
2289 return; // already the initializer
2293 for (i = 0; i < list->classesAllocated; i++) {
2294 if (0 == list->metaclasses[i]) {
2295 list->metaclasses[i] = cls;
2300 // class list is full - reallocate
2301 list->classesAllocated = list->classesAllocated * 2 + 1;
2302 list->metaclasses = _realloc_internal(list->metaclasses, list->classesAllocated * sizeof(struct objc_class *));
2303 // zero out the new entries
2304 list->metaclasses[i++] = cls;
2305 for ( ; i < list->classesAllocated; i++) {
2306 list->metaclasses[i] = NULL;
2311 /***********************************************************************
2312 * _setThisThreadIsNotInitializingClass
2313 * Record that this thread is no longer initializing the given class.
2314 **********************************************************************/
2315 static void _setThisThreadIsNotInitializingClass(struct objc_class *cls)
2319 _objc_initializing_classes *list = _fetchInitializingClassList(NO);
2322 for (i = 0; i < list->classesAllocated; i++) {
2323 if (cls == list->metaclasses[i]) {
2324 list->metaclasses[i] = NULL;
2330 // no list or not found in list
2331 _objc_fatal("thread is not initializing this class!");
2335 /***********************************************************************
2336 * class_initialize. Send the '+initialize' message on demand to any
2337 * uninitialized class. Force initialization of superclasses first.
2339 * Called only from _class_lookupMethodAndLoadCache (or itself).
2340 **********************************************************************/
2341 static void class_initialize(struct objc_class *cls)
2343 struct objc_class *infoCls = GETMETA(cls);
2344 BOOL reallyInitialize = NO;
2346 // Get the real class from the metaclass. The superclass chain
2347 // hangs off the real class only.
2350 if (strncmp(cls->name, "_%", 2) == 0) {
2351 // Posee's meta's name is smashed and isn't in the class_hash,
2352 // so objc_getClass doesn't work.
2353 char *baseName = strchr(cls->name, '%'); // get posee's real name
2354 cls = objc_getClass(baseName);
2356 cls = objc_getClass(cls->name);
2360 // Make sure super is done initializing BEFORE beginning to initialize cls.
2361 // See note about deadlock above.
2362 if (cls->super_class && !ISINITIALIZED(cls->super_class)) {
2363 class_initialize(cls->super_class);
2366 // Try to atomically set CLS_INITIALIZING.
2367 pthread_mutex_lock(&classInitLock);
2368 if (!ISINITIALIZED(cls) && !ISINITIALIZING(cls)) {
2369 _class_setInfo(infoCls, CLS_INITIALIZING);
2370 reallyInitialize = YES;
2372 pthread_mutex_unlock(&classInitLock);
2374 if (reallyInitialize) {
2375 // We successfully set the CLS_INITIALIZING bit. Initialize the class.
2377 // Record that we're initializing this class so we can message it.
2378 _setThisThreadIsInitializingClass(cls);
2380 // Send the +initialize message.
2381 // Note that +initialize is sent to the superclass (again) if
2382 // this class doesn't implement +initialize. 2157218
2383 [(id)cls initialize];
2385 // Done initializing. Update the info bits and notify waiting threads.
2386 pthread_mutex_lock(&classInitLock);
2387 _class_changeInfo(infoCls, CLS_INITIALIZED, CLS_INITIALIZING);
2388 pthread_cond_broadcast(&classInitWaitCond);
2389 pthread_mutex_unlock(&classInitLock);
2390 _setThisThreadIsNotInitializingClass(cls);
2394 else if (ISINITIALIZING(cls)) {
2395 // We couldn't set INITIALIZING because INITIALIZING was already set.
2396 // If this thread set it earlier, continue normally.
2397 // If some other thread set it, block until initialize is done.
2398 // It's ok if INITIALIZING changes to INITIALIZED while we're here,
2399 // because we safely check for INITIALIZED inside the lock
2401 if (_thisThreadIsInitializingClass(cls)) {
2404 pthread_mutex_lock(&classInitLock);
2405 while (!ISINITIALIZED(cls)) {
2406 pthread_cond_wait(&classInitWaitCond, &classInitLock);
2408 pthread_mutex_unlock(&classInitLock);
2413 else if (ISINITIALIZED(cls)) {
2414 // Set CLS_INITIALIZING failed because someone else already
2415 // initialized the class. Continue normally.
2416 // NOTE this check must come AFTER the ISINITIALIZING case.
2417 // Otherwise: Another thread is initializing this class. ISINITIALIZED
2418 // is false. Skip this clause. Then the other thread finishes
2419 // initialization and sets INITIALIZING=no and INITIALIZED=yes.
2420 // Skip the ISINITIALIZING clause. Die horribly.
2425 // We shouldn't be here.
2426 _objc_fatal("thread-safe class init in objc runtime is buggy!");
2431 /***********************************************************************
2432 * _class_lookupMethodAndLoadCache.
2434 * Called only from objc_msgSend, objc_msgSendSuper and class_lookupMethod.
2435 **********************************************************************/
2436 IMP _class_lookupMethodAndLoadCache (Class cls,
2439 struct objc_class * curClass;
2441 IMP methodPC = NULL;
2443 trace(0xb300, 0, 0, 0);
2445 // Check for freed class
2446 if (cls == &freedObjectClass)
2447 return (IMP) _freedHandler;
2449 // Check for nonexistent class
2450 if (cls == &nonexistentObjectClass)
2451 return (IMP) _nonexistentHandler;
2453 trace(0xb301, 0, 0, 0);
2455 if (!ISINITIALIZED(cls)) {
2456 class_initialize ((struct objc_class *)cls);
2457 // If sel == initialize, class_initialize will send +initialize and
2458 // then the messenger will send +initialize again after this
2459 // procedure finishes. Of course, if this is not being called
2460 // from the messenger then it won't happen. 2778172
2463 trace(0xb302, 0, 0, 0);
2465 // Outer loop - search the caches and method lists of the
2466 // class and its super-classes
2467 for (curClass = cls; curClass; curClass = ((struct objc_class * )curClass)->super_class)
2469 #ifdef PRELOAD_SUPERCLASS_CACHES
2470 struct objc_class *curClass2;
2473 trace(0xb303, 0, 0, 0);
2475 // Beware of thread-unsafety and double-freeing of forward::
2476 // entries here! See note in "Method cache locking" above.
2477 // The upshot is that _cache_getMethod() will return NULL
2478 // instead of returning a forward:: entry.
2479 meth = _cache_getMethod(curClass, sel, &_objc_msgForward);
2481 // Found the method in this class or a superclass.
2482 // Cache the method in this class, unless we just found it in
2483 // this class's cache.
2484 if (curClass != cls) {
2485 #ifdef PRELOAD_SUPERCLASS_CACHES
2486 for (curClass2 = cls; curClass2 != curClass; curClass2 = curClass2->super_class)
2487 _cache_fill (curClass2, meth, sel);
2488 _cache_fill (curClass, meth, sel);
2490 _cache_fill (cls, meth, sel);
2494 methodPC = meth->method_imp;
2498 trace(0xb304, (int)methodPC, 0, 0);
2500 // Cache scan failed. Search method list.
2502 OBJC_LOCK(&methodListLock);
2503 meth = _findMethodInClass(curClass, sel);
2504 OBJC_UNLOCK(&methodListLock);
2506 // If logging is enabled, log the message send and let
2507 // the logger decide whether to encache the method.
2508 if ((objcMsgLogEnabled == 0) ||
2509 (objcMsgLogProc (CLS_GETINFO(((struct objc_class * )curClass),
2510 CLS_META) ? YES : NO,
2511 ((struct objc_class *)cls)->name,
2512 curClass->name, sel)))
2514 // Cache the method implementation
2515 #ifdef PRELOAD_SUPERCLASS_CACHES
2516 for (curClass2 = cls; curClass2 != curClass; curClass2 = curClass2->super_class)
2517 _cache_fill (curClass2, meth, sel);
2518 _cache_fill (curClass, meth, sel);
2520 _cache_fill (cls, meth, sel);
2524 methodPC = meth->method_imp;
2528 trace(0xb305, (int)methodPC, 0, 0);
2531 trace(0xb306, (int)methodPC, 0, 0);
2533 if (methodPC == NULL)
2535 // Class and superclasses do not respond -- use forwarding
2536 _cache_addForwardEntry(cls, sel);
2537 methodPC = &_objc_msgForward;
2540 trace(0xb30f, (int)methodPC, 0, 0);
2546 /***********************************************************************
2547 * lookupMethodInClassAndLoadCache.
2548 * Like _class_lookupMethodAndLoadCache, but does not search superclasses.
2549 * Caches and returns objc_msgForward if the method is not found in the class.
2550 **********************************************************************/
2551 static IMP lookupMethodInClassAndLoadCache(Class cls, SEL sel)
2556 // Search cache first.
2557 imp = _cache_getImp(cls, sel);
2558 if (imp) return imp;
2560 // Cache miss. Search method list.
2562 OBJC_LOCK(&methodListLock);
2563 meth = _findMethodInClass(cls, sel);
2564 OBJC_UNLOCK(&methodListLock);
2567 // Hit in method list. Cache it.
2568 _cache_fill(cls, meth, sel);
2569 return meth->method_imp;
2571 // Miss in method list. Cache objc_msgForward.
2572 _cache_addForwardEntry(cls, sel);
2573 return &_objc_msgForward;
2579 /***********************************************************************
2581 * Atomically sets and clears some bits in cls's info field.
2582 * set and clear must not overlap.
2583 **********************************************************************/
2584 static pthread_mutex_t infoLock = PTHREAD_MUTEX_INITIALIZER;
2585 __private_extern__ void _class_changeInfo(struct objc_class *cls,
2586 long set, long clear)
2588 pthread_mutex_lock(&infoLock);
2589 cls->info = (cls->info | set) & ~clear;
2590 pthread_mutex_unlock(&infoLock);
2594 /***********************************************************************
2596 * Atomically sets some bits in cls's info field.
2597 **********************************************************************/
2598 __private_extern__ void _class_setInfo(struct objc_class *cls, long set)
2600 _class_changeInfo(cls, set, 0);
2604 /***********************************************************************
2606 * Atomically clears some bits in cls's info field.
2607 **********************************************************************/
2608 __private_extern__ void _class_clearInfo(struct objc_class *cls, long clear)
2610 _class_changeInfo(cls, 0, clear);
2614 /***********************************************************************
2618 **********************************************************************/
2619 static int SubtypeUntil (const char * type,
2623 const char * head = type;
2628 if (!*type || (!level && (*type == end)))
2629 return (int)(type - head);
2633 case ']': case '}': case ')': level--; break;
2634 case '[': case '{': case '(': level += 1; break;
2640 _objc_fatal ("Object: SubtypeUntil: end of type encountered prematurely\n");
2644 /***********************************************************************
2646 **********************************************************************/
2647 static const char * SkipFirstType (const char * type)
2653 case 'O': /* bycopy */
2656 case 'N': /* inout */
2657 case 'r': /* const */
2658 case 'V': /* oneway */
2659 case '^': /* pointers */
2664 while ((*type >= '0') && (*type <= '9'))
2666 return type + SubtypeUntil (type, ']') + 1;
2670 return type + SubtypeUntil (type, '}') + 1;
2674 return type + SubtypeUntil (type, ')') + 1;
2683 /***********************************************************************
2684 * method_getNumberOfArguments.
2685 **********************************************************************/
2686 unsigned method_getNumberOfArguments (Method method)
2688 const char * typedesc;
2691 // First, skip the return type
2692 typedesc = method->method_types;
2693 typedesc = SkipFirstType (typedesc);
2695 // Next, skip stack size
2696 while ((*typedesc >= '0') && (*typedesc <= '9'))
2699 // Now, we have the arguments - count how many
2703 // Traverse argument type
2704 typedesc = SkipFirstType (typedesc);
2706 // Skip GNU runtime's register parameter hint
2707 if (*typedesc == '+') typedesc++;
2709 // Traverse (possibly negative) argument offset
2710 if (*typedesc == '-')
2712 while ((*typedesc >= '0') && (*typedesc <= '9'))
2715 // Made it past an argument
2722 /***********************************************************************
2723 * method_getSizeOfArguments.
2724 **********************************************************************/
2726 unsigned method_getSizeOfArguments (Method method)
2728 const char * typedesc;
2729 unsigned stack_size;
2730 #if defined(__ppc__) || defined(ppc)
2731 unsigned trueBaseOffset;
2732 unsigned foundBaseOffset;
2735 // Get our starting points
2737 typedesc = method->method_types;
2739 // Skip the return type
2740 #if defined (__ppc__) || defined(ppc)
2741 // Struct returns cause the parameters to be bumped
2742 // by a register, so the offset to the receiver is
2743 // 4 instead of the normal 0.
2744 trueBaseOffset = (*typedesc == '{') ? 4 : 0;
2746 typedesc = SkipFirstType (typedesc);
2748 // Convert ASCII number string to integer
2749 while ((*typedesc >= '0') && (*typedesc <= '9'))
2750 stack_size = (stack_size * 10) + (*typedesc++ - '0');
2751 #if defined (__ppc__) || defined(ppc)
2752 // NOTE: This is a temporary measure pending a compiler fix.
2753 // Work around PowerPC compiler bug wherein the method argument
2754 // string contains an incorrect value for the "stack size."
2755 // Generally, the size is reported 4 bytes too small, so we apply
2756 // that fudge factor. Unfortunately, there is at least one case
2757 // where the error is something other than -4: when the last
2758 // parameter is a double, the reported stack is much too high
2759 // (about 32 bytes). We do not attempt to detect that case.
2760 // The result of returning a too-high value is that objc_msgSendv
2761 // can bus error if the destination of the marg_list copying
2762 // butts up against excluded memory.
2763 // This fix disables itself when it sees a correctly built
2764 // type string (i.e. the offset for the Id is correct). This
2765 // keeps us out of lockstep with the compiler.
2767 // skip the '@' marking the Id field
2768 typedesc = SkipFirstType (typedesc);
2770 // Skip GNU runtime's register parameter hint
2771 if (*typedesc == '+') typedesc++;
2773 // pick up the offset for the Id field
2774 foundBaseOffset = 0;
2775 while ((*typedesc >= '0') && (*typedesc <= '9'))
2776 foundBaseOffset = (foundBaseOffset * 10) + (*typedesc++ - '0');
2778 // add fudge factor iff the Id field offset was wrong
2779 if (foundBaseOffset != trueBaseOffset)
2787 // XXX Getting the size of a type is done all over the place
2788 // (Here, Foundation, remote project)! - Should unify
2790 unsigned int getSizeOfType (const char * type, unsigned int * alignPtr);
2792 unsigned method_getSizeOfArguments (Method method)
2799 unsigned stack_size;
2802 nargs = method_getNumberOfArguments (method);
2803 stack_size = (*method->method_types == '{') ? sizeof(void *) : 0;
2805 for (index = 0; index < nargs; index += 1)
2807 (void) method_getArgumentInfo (method, index, &type, &offset);
2808 size = getSizeOfType (type, &align);
2809 stack_size += ((size + 7) & ~7);
2816 /***********************************************************************
2817 * method_getArgumentInfo.
2818 **********************************************************************/
2819 unsigned method_getArgumentInfo (Method method,
2824 const char * typedesc = method->method_types;
2826 unsigned self_offset = 0;
2827 BOOL offset_is_negative = NO;
2829 // First, skip the return type
2830 typedesc = SkipFirstType (typedesc);
2832 // Next, skip stack size
2833 while ((*typedesc >= '0') && (*typedesc <= '9'))
2836 // Now, we have the arguments - position typedesc to the appropriate argument
2837 while (*typedesc && nargs != arg)
2840 // Skip argument type
2841 typedesc = SkipFirstType (typedesc);
2845 // Skip GNU runtime's register parameter hint
2846 if (*typedesc == '+') typedesc++;
2848 // Skip negative sign in offset
2849 if (*typedesc == '-')
2851 offset_is_negative = YES;
2855 offset_is_negative = NO;
2857 while ((*typedesc >= '0') && (*typedesc <= '9'))
2858 self_offset = self_offset * 10 + (*typedesc++ - '0');
2859 if (offset_is_negative)
2860 self_offset = -(self_offset);
2866 // Skip GNU runtime's register parameter hint
2867 if (*typedesc == '+') typedesc++;
2869 // Skip (possibly negative) argument offset
2870 if (*typedesc == '-')
2872 while ((*typedesc >= '0') && (*typedesc <= '9'))
2881 unsigned arg_offset = 0;
2884 typedesc = SkipFirstType (typedesc);
2889 *offset = -sizeof(id);
2897 // Skip GNU register parameter hint
2898 if (*typedesc == '+') typedesc++;
2900 // Pick up (possibly negative) argument offset
2901 if (*typedesc == '-')
2903 offset_is_negative = YES;
2907 offset_is_negative = NO;
2909 while ((*typedesc >= '0') && (*typedesc <= '9'))
2910 arg_offset = arg_offset * 10 + (*typedesc++ - '0');
2911 if (offset_is_negative)
2912 arg_offset = - arg_offset;
2915 // For stacks which grow up, since margs points
2916 // to the top of the stack or the END of the args,
2917 // the first offset is at -sizeof(id) rather than 0.
2918 self_offset += sizeof(id);
2920 *offset = arg_offset - self_offset;
2934 /***********************************************************************
2935 * _objc_create_zone.
2936 **********************************************************************/
2938 void * _objc_create_zone (void)
2940 return malloc_default_zone();
2944 /***********************************************************************
2945 * _objc_internal_zone.
2946 * Malloc zone for internal runtime data.
2947 * By default this is the default malloc zone, but a dedicated zone is
2948 * used if environment variable OBJC_USE_INTERNAL_ZONE is set.
2949 **********************************************************************/
2950 __private_extern__ malloc_zone_t *_objc_internal_zone(void)
2952 static malloc_zone_t *z = (malloc_zone_t *)-1;
2953 if (z == (malloc_zone_t *)-1) {
2954 if (UseInternalZone) {
2955 z = malloc_create_zone(vm_page_size, 0);
2956 malloc_set_zone_name(z, "ObjC");
2958 z = malloc_default_zone();
2965 /***********************************************************************
2971 * Convenience functions for the internal malloc zone.
2972 **********************************************************************/
2973 __private_extern__ void *_malloc_internal(size_t size)
2975 return malloc_zone_malloc(_objc_internal_zone(), size);
2978 __private_extern__ void *_calloc_internal(size_t count, size_t size)
2980 return malloc_zone_calloc(_objc_internal_zone(), count, size);
2983 __private_extern__ void *_realloc_internal(void *ptr, size_t size)
2985 return malloc_zone_realloc(_objc_internal_zone(), ptr, size);
2988 __private_extern__ char *_strdup_internal(const char *str)
2990 size_t len = strlen(str);
2991 char *dup = malloc_zone_malloc(_objc_internal_zone(), len + 1);
2992 memcpy(dup, str, len + 1);
2996 __private_extern__ void _free_internal(void *ptr)
2998 malloc_zone_free(_objc_internal_zone(), ptr);
3003 /***********************************************************************
3005 **********************************************************************/
3007 static unsigned long _get_pc_for_thread (mach_port_t thread)
3010 struct hp_pa_frame_thread_state state;
3011 unsigned int count = HPPA_FRAME_THREAD_STATE_COUNT;
3012 kern_return_t okay = thread_get_state (thread, HPPA_FRAME_THREAD_STATE, (thread_state_t)&state, &count);
3013 return (okay == KERN_SUCCESS) ? state.ts_pcoq_front : PC_SENTINAL;
3015 #elif defined(sparc)
3017 struct sparc_thread_state_regs state;
3018 unsigned int count = SPARC_THREAD_STATE_REGS_COUNT;
3019 kern_return_t okay = thread_get_state (thread, SPARC_THREAD_STATE_REGS, (thread_state_t)&state, &count);
3020 return (okay == KERN_SUCCESS) ? state.regs.r_pc : PC_SENTINAL;
3022 #elif defined(__i386__) || defined(i386)
3024 i386_thread_state_t state;
3025 unsigned int count = i386_THREAD_STATE_COUNT;
3026 kern_return_t okay = thread_get_state (thread, i386_THREAD_STATE, (thread_state_t)&state, &count);
3027 return (okay == KERN_SUCCESS) ? state.eip : PC_SENTINAL;
3031 struct m68k_thread_state_regs state;
3032 unsigned int count = M68K_THREAD_STATE_REGS_COUNT;
3033 kern_return_t okay = thread_get_state (thread, M68K_THREAD_STATE_REGS, (thread_state_t)&state, &count);
3034 return (okay == KERN_SUCCESS) ? state.pc : PC_SENTINAL;
3036 #elif defined(__ppc__) || defined(ppc)
3038 struct ppc_thread_state state;
3039 unsigned int count = PPC_THREAD_STATE_COUNT;
3040 kern_return_t okay = thread_get_state (thread, PPC_THREAD_STATE, (thread_state_t)&state, &count);
3041 return (okay == KERN_SUCCESS) ? state.srr0 : PC_SENTINAL;
3045 #error _get_pc_for_thread () not implemented for this architecture
3049 /***********************************************************************
3050 * _collecting_in_critical.
3051 * Returns TRUE if some thread is currently executing a cache-reading
3052 * function. Collection of cache garbage is not allowed when a cache-
3053 * reading function is in progress because it might still be using
3054 * the garbage memory.
3055 **********************************************************************/
3056 OBJC_EXPORT unsigned long objc_entryPoints[];
3057 OBJC_EXPORT unsigned long objc_exitPoints[];
3059 static int _collecting_in_critical (void)
3061 thread_act_port_array_t threads;
3067 mach_port_t mythread = pthread_mach_thread_np(pthread_self());
3069 // Get a list of all the threads in the current task
3070 ret = task_threads (mach_task_self (), &threads, &number);
3071 if (ret != KERN_SUCCESS)
3073 _objc_fatal("task_thread failed (result %d)\n", ret);
3076 // Check whether any thread is in the cache lookup code
3078 for (count = 0; count < number; count++)
3083 // Don't bother checking ourselves
3084 if (threads[count] == mythread)
3087 // Find out where thread is executing
3088 pc = _get_pc_for_thread (threads[count]);
3090 // Check for bad status, and if so, assume the worse (can't collect)
3091 if (pc == PC_SENTINAL)
3097 // Check whether it is in the cache lookup code
3098 for (region = 0; objc_entryPoints[region] != 0; region++)
3100 if ((pc >= objc_entryPoints[region]) &&
3101 (pc <= objc_exitPoints[region]))
3110 // Deallocate the port rights for the threads
3111 for (count = 0; count < number; count++) {
3112 mach_port_deallocate(mach_task_self (), threads[count]);
3115 // Deallocate the thread list
3116 vm_deallocate (mach_task_self (), (vm_address_t) threads, sizeof(threads) * number);
3118 // Return our finding
3122 /***********************************************************************
3123 * _garbage_make_room. Ensure that there is enough room for at least
3124 * one more ref in the garbage.
3125 **********************************************************************/
3127 // amount of memory represented by all refs in the garbage
3128 static int garbage_byte_size = 0;
3130 // do not empty the garbage until garbage_byte_size gets at least this big
3131 static int garbage_threshold = 1024;
3133 // table of refs to free
3134 static void **garbage_refs = 0;
3136 // current number of refs in garbage_refs
3137 static int garbage_count = 0;
3139 // capacity of current garbage_refs
3140 static int garbage_max = 0;
3142 // capacity of initial garbage_refs
3144 INIT_GARBAGE_COUNT = 128
3147 static void _garbage_make_room (void)
3149 static int first = 1;
3150 volatile void * tempGarbage;
3152 // Create the collection table the first time it is needed
3156 garbage_refs = _malloc_internal(INIT_GARBAGE_COUNT * sizeof(void *));
3157 garbage_max = INIT_GARBAGE_COUNT;
3160 // Double the table if it is full
3161 else if (garbage_count == garbage_max)
3163 tempGarbage = _realloc_internal(garbage_refs, garbage_max * 2 * sizeof(void *));
3164 garbage_refs = (void **) tempGarbage;
3169 /***********************************************************************
3170 * _cache_collect_free. Add the specified malloc'd memory to the list
3171 * of them to free at some later point.
3172 * size is used for the collection threshold. It does not have to be
3173 * precisely the block's size.
3174 * Cache locks: cacheUpdateLock must be held by the caller.
3175 **********************************************************************/
3176 static void _cache_collect_free(void *data, size_t size, BOOL tryCollect)
3178 static char *report_garbage = (char *)0xffffffff;
3180 if ((char *)0xffffffff == report_garbage) {
3181 // Check whether to log our activity
3182 report_garbage = getenv ("OBJC_REPORT_GARBAGE");
3185 // Insert new element in garbage list
3186 // Note that we do this even if we end up free'ing everything
3187 _garbage_make_room ();
3188 garbage_byte_size += size;
3189 garbage_refs[garbage_count++] = data;
3192 if (tryCollect && report_garbage)
3193 _objc_inform ("total of %d bytes of garbage ...", garbage_byte_size);
3195 // Done if caller says not to empty or the garbage is not full
3196 if (!tryCollect || (garbage_byte_size < garbage_threshold))
3198 if (tryCollect && report_garbage)
3199 _objc_inform ("couldn't collect cache garbage: below threshold\n");
3204 // tryCollect is guaranteed to be true after this point
3206 // Synchronize garbage collection with objc_msgSend and other cache readers
3207 if (!_collecting_in_critical ()) {
3208 // No cache readers in progress - garbage is now deletable
3212 _objc_inform ("collecting!\n");
3214 // Dispose all refs now in the garbage
3215 while (garbage_count--) {
3216 if (cache_allocator_is_block(garbage_refs[garbage_count])) {
3217 cache_allocator_free(garbage_refs[garbage_count]);
3219 free(garbage_refs[garbage_count]);
3223 // Clear the garbage count and total size indicator
3225 garbage_byte_size = 0;
3228 // objc_msgSend (or other cache reader) is currently looking in the
3229 // cache and might still be using some garbage.
3230 if (report_garbage) {
3231 _objc_inform ("couldn't collect cache garbage: objc_msgSend in progress\n");
3238 /***********************************************************************
3239 * Custom method cache allocator.
3240 * Method cache block sizes are 2^slots+2 words, which is a pessimal
3241 * case for the system allocator. It wastes 504 bytes per cache block
3242 * with 128 or more slots, which adds up to tens of KB for an AppKit process.
3243 * To save memory, the custom cache allocator below is used.
3245 * The cache allocator uses 128 KB allocation regions. Few processes will
3246 * require a second region. Within a region, allocation is address-ordered
3249 * The cache allocator uses a quantum of 520.
3250 * Cache block ideal sizes: 520, 1032, 2056, 4104
3251 * Cache allocator sizes: 520, 1040, 2080, 4160
3253 * Because all blocks are known to be genuine method caches, the ordinary
3254 * cache->mask and cache->occupied fields are used as block headers.
3255 * No out-of-band headers are maintained. The number of blocks will
3256 * almost always be fewer than 200, so for simplicity there is no free
3257 * list or other optimization.
3259 * Block in use: mask != 0, occupied != -1 (mask indicates block size)
3260 * Block free: mask != 0, occupied == -1 (mask is precisely block size)
3262 * No cache allocator functions take any locks. Instead, the caller
3263 * must hold the cacheUpdateLock.
3264 **********************************************************************/
3266 typedef struct cache_allocator_block {
3269 struct cache_allocator_block *nextFree;
3270 } cache_allocator_block;
3272 typedef struct cache_allocator_region {
3273 cache_allocator_block *start;
3274 cache_allocator_block *end; // first non-block address
3275 cache_allocator_block *freeList;
3276 struct cache_allocator_region *next;
3277 } cache_allocator_region;
3279 static cache_allocator_region *cacheRegion = NULL;
3282 static unsigned int cache_allocator_mask_for_size(size_t size)
3284 return (size - sizeof(struct objc_cache)) / sizeof(Method);
3287 static size_t cache_allocator_size_for_mask(unsigned int mask)
3289 size_t requested = sizeof(struct objc_cache) + TABLE_SIZE(mask+1);
3290 size_t actual = CACHE_QUANTUM;
3291 while (actual < requested) actual += CACHE_QUANTUM;
3295 /***********************************************************************
3296 * cache_allocator_add_region
3297 * Allocates and returns a new region that can hold at least size
3298 * bytes of large method caches.
3299 * The actual size will be rounded up to a CACHE_QUANTUM boundary,
3300 * with a minimum of CACHE_REGION_SIZE.
3301 * The new region is lowest-priority for new allocations. Callers that
3302 * know the other regions are already full should allocate directly
3303 * into the returned region.
3304 **********************************************************************/
3305 static cache_allocator_region *cache_allocator_add_region(size_t size)
3308 cache_allocator_block *b;
3309 cache_allocator_region **rgnP;
3310 cache_allocator_region *newRegion =
3311 _calloc_internal(1, sizeof(cache_allocator_region));
3313 // Round size up to quantum boundary, and apply the minimum size.
3314 size += CACHE_QUANTUM - (size % CACHE_QUANTUM);
3315 if (size < CACHE_REGION_SIZE) size = CACHE_REGION_SIZE;
3317 // Allocate the region
3319 vm_allocate(mach_task_self(), &addr, size, 1);
3320 newRegion->start = (cache_allocator_block *)addr;
3321 newRegion->end = (cache_allocator_block *)(addr + size);
3323 // Mark the first block: free and covers the entire region
3324 b = newRegion->start;
3326 b->state = (unsigned int)-1;
3328 newRegion->freeList = b;
3330 // Add to end of the linked list of regions.
3331 // Other regions should be re-used before this one is touched.
3332 newRegion->next = NULL;
3333 rgnP = &cacheRegion;
3335 rgnP = &(**rgnP).next;
3343 /***********************************************************************
3344 * cache_allocator_coalesce
3345 * Attempts to coalesce a free block with the single free block following
3346 * it in the free list, if any.
3347 **********************************************************************/
3348 static void cache_allocator_coalesce(cache_allocator_block *block)
3350 if (block->size + (uintptr_t)block == (uintptr_t)block->nextFree) {
3351 block->size += block->nextFree->size;
3352 block->nextFree = block->nextFree->nextFree;
3357 /***********************************************************************
3358 * cache_region_calloc
3359 * Attempt to allocate a size-byte block in the given region.
3360 * Allocation is first-fit. The free list is already fully coalesced.
3361 * Returns NULL if there is not enough room in the region for the block.
3362 **********************************************************************/
3363 static void *cache_region_calloc(cache_allocator_region *rgn, size_t size)
3365 cache_allocator_block **blockP;
3368 // Save mask for allocated block, then round size
3369 // up to CACHE_QUANTUM boundary
3370 mask = cache_allocator_mask_for_size(size);
3371 size = cache_allocator_size_for_mask(mask);
3373 // Search the free list for a sufficiently large free block.
3375 for (blockP = &rgn->freeList;
3377 blockP = &(**blockP).nextFree)
3379 cache_allocator_block *block = *blockP;
3380 if (block->size < size) continue; // not big enough
3382 // block is now big enough. Allocate from it.
3384 // Slice off unneeded fragment of block, if any,
3385 // and reconnect the free list around block.
3386 if (block->size - size >= CACHE_QUANTUM) {
3387 cache_allocator_block *leftover =
3388 (cache_allocator_block *)(size + (uintptr_t)block);
3389 leftover->size = block->size - size;
3390 leftover->state = (unsigned int)-1;
3391 leftover->nextFree = block->nextFree;
3394 *blockP = block->nextFree;
3397 // block is now exactly the right size.
3400 block->size = mask; // Cache->mask
3401 block->state = 0; // Cache->occupied
3406 // No room in this region.
3411 /***********************************************************************
3412 * cache_allocator_calloc
3413 * Custom allocator for large method caches (128+ slots)
3414 * The returned cache block already has cache->mask set.
3415 * cache->occupied and the cache contents are zero.
3416 * Cache locks: cacheUpdateLock must be held by the caller
3417 **********************************************************************/
3418 static void *cache_allocator_calloc(size_t size)
3420 cache_allocator_region *rgn;
3422 for (rgn = cacheRegion; rgn != NULL; rgn = rgn->next) {
3423 void *p = cache_region_calloc(rgn, size);
3429 // No regions or all regions full - make a region and try one more time
3430 // In the unlikely case of a cache over 256KB, it will get its own region.
3431 return cache_region_calloc(cache_allocator_add_region(size), size);
3435 /***********************************************************************
3436 * cache_allocator_region_for_block
3437 * Returns the cache allocator region that ptr points into, or NULL.
3438 **********************************************************************/
3439 static cache_allocator_region *cache_allocator_region_for_block(cache_allocator_block *block)
3441 cache_allocator_region *rgn;
3442 for (rgn = cacheRegion; rgn != NULL; rgn = rgn->next) {
3443 if (block >= rgn->start && block < rgn->end) return rgn;
3449 /***********************************************************************
3450 * cache_allocator_is_block
3451 * If ptr is a live block from the cache allocator, return YES
3452 * If ptr is a block from some other allocator, return NO.
3453 * If ptr is a dead block from the cache allocator, result is undefined.
3454 * Cache locks: cacheUpdateLock must be held by the caller
3455 **********************************************************************/
3456 static BOOL cache_allocator_is_block(void *ptr)
3458 return (cache_allocator_region_for_block((cache_allocator_block *)ptr) != NULL);
3461 /***********************************************************************
3462 * cache_allocator_free
3463 * Frees a block allocated by the cache allocator.
3464 * Cache locks: cacheUpdateLock must be held by the caller.
3465 **********************************************************************/
3466 static void cache_allocator_free(void *ptr)
3468 cache_allocator_block *dead = (cache_allocator_block *)ptr;
3469 cache_allocator_block *cur;
3470 cache_allocator_region *rgn;
3472 if (! (rgn = cache_allocator_region_for_block(ptr))) {
3473 // free of non-pointer
3474 _objc_inform("cache_allocator_free of non-pointer %p", ptr);
3478 dead->size = cache_allocator_size_for_mask(dead->size);
3479 dead->state = (unsigned int)-1;
3481 if (!rgn->freeList || rgn->freeList > dead) {
3482 // dead block belongs at front of free list
3483 dead->nextFree = rgn->freeList;
3484 rgn->freeList = dead;
3485 cache_allocator_coalesce(dead);
3489 // dead block belongs in the middle or end of free list
3490 for (cur = rgn->freeList; cur != NULL; cur = cur->nextFree) {
3491 cache_allocator_block *ahead = cur->nextFree;
3493 if (!ahead || ahead > dead) {
3494 // cur and ahead straddle dead, OR dead belongs at end of free list
3495 cur->nextFree = dead;
3496 dead->nextFree = ahead;
3498 // coalesce into dead first in case both succeed
3499 cache_allocator_coalesce(dead);
3500 cache_allocator_coalesce(cur);
3506 _objc_inform("cache_allocator_free of non-pointer %p", ptr);
3510 /***********************************************************************
3512 **********************************************************************/
3513 static void _cache_print (Cache cache)
3518 count = cache->mask + 1;
3519 for (index = 0; index < count; index += 1)
3520 if (CACHE_BUCKET_VALID(cache->buckets[index]))
3522 if (CACHE_BUCKET_IMP(cache->buckets[index]) == &_objc_msgForward)
3523 printf ("does not recognize: \n");
3524 printf ("%s\n", (const char *) CACHE_BUCKET_NAME(cache->buckets[index]));
3528 /***********************************************************************
3529 * _class_printMethodCaches.
3530 **********************************************************************/
3531 void _class_printMethodCaches (Class cls)
3533 if (((struct objc_class *)cls)->cache == &emptyCache)
3534 printf ("no instance-method cache for class %s\n", ((struct objc_class *)cls)->name);
3538 printf ("instance-method cache for class %s:\n", ((struct objc_class *)cls)->name);
3539 _cache_print (((struct objc_class *)cls)->cache);
3542 if (((struct objc_class * )((struct objc_class * )cls)->isa)->cache == &emptyCache)
3543 printf ("no class-method cache for class %s\n", ((struct objc_class *)cls)->name);
3547 printf ("class-method cache for class %s:\n", ((struct objc_class *)cls)->name);
3548 _cache_print (((struct objc_class * )((struct objc_class * )cls)->isa)->cache);
3552 /***********************************************************************
3554 **********************************************************************/
3555 static unsigned int log2 (unsigned int x)
3566 /***********************************************************************
3567 * _class_printDuplicateCacheEntries.
3568 **********************************************************************/
3569 void _class_printDuplicateCacheEntries (BOOL detail)
3571 NXHashTable * class_hash;
3573 struct objc_class * cls;
3574 unsigned int duplicates;
3575 unsigned int index1;
3576 unsigned int index2;
3579 unsigned int isMeta;
3583 printf ("Checking for duplicate cache entries \n");
3585 // Outermost loop - iterate over all classes
3586 class_hash = objc_getClasses ();
3587 state = NXInitHashState (class_hash);
3589 while (NXNextHashState (class_hash, &state, (void **) &cls))
3591 // Control loop - do given class' cache, then its isa's cache
3592 for (isMeta = 0; isMeta <= 1; isMeta += 1)
3594 // Select cache of interest and make sure it exists
3595 cache = isMeta ? cls->isa->cache : ((struct objc_class *)cls)->cache;
3596 if (cache == &emptyCache)
3599 // Middle loop - check each entry in the given cache
3602 for (index1 = 0; index1 < count; index1 += 1)
3604 // Skip invalid entry
3605 if (!CACHE_BUCKET_VALID(cache->buckets[index1]))
3608 // Inner loop - check that given entry matches no later entry
3609 for (index2 = index1 + 1; index2 < count; index2 += 1)
3611 // Skip invalid entry
3612 if (!CACHE_BUCKET_VALID(cache->buckets[index2]))
3615 // Check for duplication by method name comparison
3616 if (strcmp ((char *) CACHE_BUCKET_NAME(cache->buckets[index1]),
3617 (char *) CACHE_BUCKET_NAME(cache->buckets[index2])) == 0)
3620 printf ("%s %s\n", ((struct objc_class *)cls)->name, (char *) CACHE_BUCKET_NAME(cache->buckets[index1]));
3630 printf ("duplicates = %d\n", duplicates);
3631 printf ("total cache fills = %d\n", totalCacheFills);
3634 /***********************************************************************
3636 **********************************************************************/
3637 static void PrintCacheHeader (void)
3639 #ifdef OBJC_INSTRUMENTED
3640 printf ("Cache Cache Slots Avg Max AvgS MaxS AvgS MaxS TotalD AvgD MaxD TotalD AvgD MaxD TotD AvgD MaxD\n");
3641 printf ("Size Count Used Used Used Hit Hit Miss Miss Hits Prbs Prbs Misses Prbs Prbs Flsh Flsh Flsh\n");
3642 printf ("----- ----- ----- ----- ---- ---- ---- ---- ---- ------- ---- ---- ------- ---- ---- ---- ---- ----\n");
3644 printf ("Cache Cache Slots Avg Max AvgS MaxS AvgS MaxS\n");
3645 printf ("Size Count Used Used Used Hit Hit Miss Miss\n");
3646 printf ("----- ----- ----- ----- ---- ---- ---- ---- ----\n");
3650 /***********************************************************************
3652 **********************************************************************/
3653 static void PrintCacheInfo (unsigned int cacheSize,
3654 unsigned int cacheCount,
3655 unsigned int slotsUsed,
3657 unsigned int maxUsed,
3659 unsigned int maxSHit,
3661 unsigned int maxSMiss
3662 #ifdef OBJC_INSTRUMENTED
3663 , unsigned int totDHits,
3665 unsigned int maxDHit,
3666 unsigned int totDMisses,
3668 unsigned int maxDMiss,
3669 unsigned int totDFlsh,
3671 unsigned int maxDFlsh
3675 #ifdef OBJC_INSTRUMENTED
3676 printf ("%5u %5u %5u %5.1f %4u %4.1f %4u %4.1f %4u %7u %4.1f %4u %7u %4.1f %4u %4u %4.1f %4u\n",
3678 printf ("%5u %5u %5u %5.1f %4u %4.1f %4u %4.1f %4u\n",
3680 cacheSize, cacheCount, slotsUsed, avgUsed, maxUsed, avgSHit, maxSHit, avgSMiss, maxSMiss
3681 #ifdef OBJC_INSTRUMENTED
3682 , totDHits, avgDHit, maxDHit, totDMisses, avgDMiss, maxDMiss, totDFlsh, avgDFlsh, maxDFlsh
3688 #ifdef OBJC_INSTRUMENTED
3689 /***********************************************************************
3690 * PrintCacheHistogram. Show the non-zero entries from the specified
3692 **********************************************************************/
3693 static void PrintCacheHistogram (char * title,
3694 unsigned int * firstEntry,
3695 unsigned int entryCount)
3698 unsigned int * thisEntry;
3700 printf ("%s\n", title);
3701 printf (" Probes Tally\n");
3702 printf (" ------ -----\n");
3703 for (index = 0, thisEntry = firstEntry;
3705 index += 1, thisEntry += 1)
3707 if (*thisEntry == 0)
3710 printf (" %6d %5d\n", index, *thisEntry);
3715 /***********************************************************************
3716 * _class_printMethodCacheStatistics.
3717 **********************************************************************/
3719 #define MAX_LOG2_SIZE 32
3720 #define MAX_CHAIN_SIZE 100
3722 void _class_printMethodCacheStatistics (void)
3724 unsigned int isMeta;
3726 NXHashTable * class_hash;
3728 struct objc_class * cls;
3729 unsigned int totalChain;
3730 unsigned int totalMissChain;
3731 unsigned int maxChain;
3732 unsigned int maxMissChain;
3733 unsigned int classCount;
3734 unsigned int negativeEntryCount;
3735 unsigned int cacheExpandCount;
3736 unsigned int cacheCountBySize[2][MAX_LOG2_SIZE] = {{0}};
3737 unsigned int totalEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
3738 unsigned int maxEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
3739 unsigned int totalChainBySize[2][MAX_LOG2_SIZE] = {{0}};
3740 unsigned int totalMissChainBySize[2][MAX_LOG2_SIZE] = {{0}};
3741 unsigned int totalMaxChainBySize[2][MAX_LOG2_SIZE] = {{0}};
3742 unsigned int totalMaxMissChainBySize[2][MAX_LOG2_SIZE] = {{0}};
3743 unsigned int maxChainBySize[2][MAX_LOG2_SIZE] = {{0}};
3744 unsigned int maxMissChainBySize[2][MAX_LOG2_SIZE] = {{0}};
3745 unsigned int chainCount[MAX_CHAIN_SIZE] = {0};
3746 unsigned int missChainCount[MAX_CHAIN_SIZE] = {0};
3747 #ifdef OBJC_INSTRUMENTED
3748 unsigned int hitCountBySize[2][MAX_LOG2_SIZE] = {{0}};
3749 unsigned int hitProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
3750 unsigned int maxHitProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
3751 unsigned int missCountBySize[2][MAX_LOG2_SIZE] = {{0}};
3752 unsigned int missProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
3753 unsigned int maxMissProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
3754 unsigned int flushCountBySize[2][MAX_LOG2_SIZE] = {{0}};
3755 unsigned int flushedEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
3756 unsigned int maxFlushedEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
3759 printf ("Printing cache statistics\n");
3761 // Outermost loop - iterate over all classes
3762 class_hash = objc_getClasses ();
3763 state = NXInitHashState (class_hash);
3765 negativeEntryCount = 0;
3766 cacheExpandCount = 0;
3767 while (NXNextHashState (class_hash, &state, (void **) &cls))
3772 // Control loop - do given class' cache, then its isa's cache
3773 for (isMeta = 0; isMeta <= 1; isMeta += 1)
3777 unsigned int log2Size;
3778 unsigned int entryCount;
3780 // Select cache of interest
3781 cache = isMeta ? cls->isa->cache : ((struct objc_class *)cls)->cache;
3783 // Ignore empty cache... should we?
3784 if (cache == &emptyCache)
3787 // Middle loop - do each entry in the given cache
3794 for (index = 0; index < mask + 1; index += 1)
3799 uarith_t methodChain;
3800 uarith_t methodMissChain;
3803 // If entry is invalid, the only item of
3804 // interest is that future insert hashes
3805 // to this entry can use it directly.
3806 buckets = cache->buckets;
3807 if (!CACHE_BUCKET_VALID(buckets[index]))
3809 missChainCount[0] += 1;
3813 method = buckets[index];
3815 // Tally valid entries
3818 // Tally "forward::" entries
3819 if (CACHE_BUCKET_IMP(method) == &_objc_msgForward)
3820 negativeEntryCount += 1;
3822 // Calculate search distance (chain length) for this method
3823 // The chain may wrap around to the beginning of the table.
3824 hash = CACHE_HASH(CACHE_BUCKET_NAME(method), mask);
3825 if (index >= hash) methodChain = index - hash;
3826 else methodChain = (mask+1) + index - hash;
3828 // Tally chains of this length
3829 if (methodChain < MAX_CHAIN_SIZE)
3830 chainCount[methodChain] += 1;
3832 // Keep sum of all chain lengths
3833 totalChain += methodChain;
3835 // Record greatest chain length
3836 if (methodChain > maxChain)
3837 maxChain = methodChain;
3839 // Calculate search distance for miss that hashes here
3841 while (CACHE_BUCKET_VALID(buckets[index2]))
3846 methodMissChain = ((index2 - index) & mask);
3848 // Tally miss chains of this length
3849 if (methodMissChain < MAX_CHAIN_SIZE)
3850 missChainCount[methodMissChain] += 1;
3852 // Keep sum of all miss chain lengths in this class
3853 totalMissChain += methodMissChain;
3855 // Record greatest miss chain length
3856 if (methodMissChain > maxMissChain)
3857 maxMissChain = methodMissChain;
3860 // Factor this cache into statistics about caches of the same
3861 // type and size (all caches are a power of two in size)
3862 log2Size = log2 (mask + 1);
3863 cacheCountBySize[isMeta][log2Size] += 1;
3864 totalEntriesBySize[isMeta][log2Size] += entryCount;
3865 if (entryCount > maxEntriesBySize[isMeta][log2Size])
3866 maxEntriesBySize[isMeta][log2Size] = entryCount;
3867 totalChainBySize[isMeta][log2Size] += totalChain;
3868 totalMissChainBySize[isMeta][log2Size] += totalMissChain;
3869 totalMaxChainBySize[isMeta][log2Size] += maxChain;
3870 totalMaxMissChainBySize[isMeta][log2Size] += maxMissChain;
3871 if (maxChain > maxChainBySize[isMeta][log2Size])
3872 maxChainBySize[isMeta][log2Size] = maxChain;
3873 if (maxMissChain > maxMissChainBySize[isMeta][log2Size])
3874 maxMissChainBySize[isMeta][log2Size] = maxMissChain;
3875 #ifdef OBJC_INSTRUMENTED
3877 CacheInstrumentation * cacheData;
3879 cacheData = CACHE_INSTRUMENTATION(cache);
3880 hitCountBySize[isMeta][log2Size] += cacheData->hitCount;
3881 hitProbesBySize[isMeta][log2Size] += cacheData->hitProbes;
3882 if (cacheData->maxHitProbes > maxHitProbesBySize[isMeta][log2Size])
3883 maxHitProbesBySize[isMeta][log2Size] = cacheData->maxHitProbes;
3884 missCountBySize[isMeta][log2Size] += cacheData->missCount;
3885 missProbesBySize[isMeta][log2Size] += cacheData->missProbes;
3886 if (cacheData->maxMissProbes > maxMissProbesBySize[isMeta][log2Size])
3887 maxMissProbesBySize[isMeta][log2Size] = cacheData->maxMissProbes;
3888 flushCountBySize[isMeta][log2Size] += cacheData->flushCount;
3889 flushedEntriesBySize[isMeta][log2Size] += cacheData->flushedEntries;
3890 if (cacheData->maxFlushedEntries > maxFlushedEntriesBySize[isMeta][log2Size])
3891 maxFlushedEntriesBySize[isMeta][log2Size] = cacheData->maxFlushedEntries;
3894 // Caches start with a power of two number of entries, and grow by doubling, so
3895 // we can calculate the number of times this cache has expanded
3897 cacheExpandCount += log2Size - INIT_META_CACHE_SIZE_LOG2;
3899 cacheExpandCount += log2Size - INIT_CACHE_SIZE_LOG2;
3905 unsigned int cacheCountByType[2] = {0};
3906 unsigned int totalCacheCount = 0;
3907 unsigned int totalEntries = 0;
3908 unsigned int maxEntries = 0;
3909 unsigned int totalSlots = 0;
3910 #ifdef OBJC_INSTRUMENTED
3911 unsigned int totalHitCount = 0;
3912 unsigned int totalHitProbes = 0;
3913 unsigned int maxHitProbes = 0;
3914 unsigned int totalMissCount = 0;
3915 unsigned int totalMissProbes = 0;
3916 unsigned int maxMissProbes = 0;
3917 unsigned int totalFlushCount = 0;
3918 unsigned int totalFlushedEntries = 0;
3919 unsigned int maxFlushedEntries = 0;
3927 // Sum information over all caches
3928 for (isMeta = 0; isMeta <= 1; isMeta += 1)
3930 for (index = 0; index < MAX_LOG2_SIZE; index += 1)
3932 cacheCountByType[isMeta] += cacheCountBySize[isMeta][index];
3933 totalEntries += totalEntriesBySize[isMeta][index];
3934 totalSlots += cacheCountBySize[isMeta][index] * (1 << index);
3935 totalChain += totalChainBySize[isMeta][index];
3936 if (maxEntriesBySize[isMeta][index] > maxEntries)
3937 maxEntries = maxEntriesBySize[isMeta][index];
3938 if (maxChainBySize[isMeta][index] > maxChain)
3939 maxChain = maxChainBySize[isMeta][index];
3940 totalMissChain += totalMissChainBySize[isMeta][index];
3941 if (maxMissChainBySize[isMeta][index] > maxMissChain)
3942 maxMissChain = maxMissChainBySize[isMeta][index];
3943 #ifdef OBJC_INSTRUMENTED
3944 totalHitCount += hitCountBySize[isMeta][index];
3945 totalHitProbes += hitProbesBySize[isMeta][index];
3946 if (maxHitProbesBySize[isMeta][index] > maxHitProbes)
3947 maxHitProbes = maxHitProbesBySize[isMeta][index];
3948 totalMissCount += missCountBySize[isMeta][index];
3949 totalMissProbes += missProbesBySize[isMeta][index];
3950 if (maxMissProbesBySize[isMeta][index] > maxMissProbes)
3951 maxMissProbes = maxMissProbesBySize[isMeta][index];
3952 totalFlushCount += flushCountBySize[isMeta][index];
3953 totalFlushedEntries += flushedEntriesBySize[isMeta][index];
3954 if (maxFlushedEntriesBySize[isMeta][index] > maxFlushedEntries)
3955 maxFlushedEntries = maxFlushedEntriesBySize[isMeta][index];
3959 totalCacheCount += cacheCountByType[isMeta];
3963 printf ("There are %u classes\n", classCount);
3965 for (isMeta = 0; isMeta <= 1; isMeta += 1)
3967 // Number of this type of class
3968 printf ("\nThere are %u %s-method caches, broken down by size (slot count):\n",
3969 cacheCountByType[isMeta],
3970 isMeta ? "class" : "instance");
3973 PrintCacheHeader ();
3975 // Keep format consistent even if there are caches of this kind
3976 if (cacheCountByType[isMeta] == 0)
3978 printf ("(none)\n");
3982 // Usage information by cache size
3983 for (index = 0; index < MAX_LOG2_SIZE; index += 1)
3985 unsigned int cacheCount;
3986 unsigned int cacheSlotCount;
3987 unsigned int cacheEntryCount;
3989 // Get number of caches of this type and size
3990 cacheCount = cacheCountBySize[isMeta][index];
3991 if (cacheCount == 0)
3994 // Get the cache slot count and the total number of valid entries
3995 cacheSlotCount = (1 << index);
3996 cacheEntryCount = totalEntriesBySize[isMeta][index];
3998 // Give the analysis
3999 PrintCacheInfo (cacheSlotCount,
4002 (float) cacheEntryCount / (float) cacheCount,
4003 maxEntriesBySize[isMeta][index],
4004 (float) totalChainBySize[isMeta][index] / (float) cacheEntryCount,
4005 maxChainBySize[isMeta][index],
4006 (float) totalMissChainBySize[isMeta][index] / (float) (cacheCount * cacheSlotCount),
4007 maxMissChainBySize[isMeta][index]
4008 #ifdef OBJC_INSTRUMENTED
4009 , hitCountBySize[isMeta][index],
4010 hitCountBySize[isMeta][index] ?
4011 (float) hitProbesBySize[isMeta][index] / (float) hitCountBySize[isMeta][index] : 0.0,
4012 maxHitProbesBySize[isMeta][index],
4013 missCountBySize[isMeta][index],
4014 missCountBySize[isMeta][index] ?
4015 (float) missProbesBySize[isMeta][index] / (float) missCountBySize[isMeta][index] : 0.0,
4016 maxMissProbesBySize[isMeta][index],
4017 flushCountBySize[isMeta][index],
4018 flushCountBySize[isMeta][index] ?
4019 (float) flushedEntriesBySize[isMeta][index] / (float) flushCountBySize[isMeta][index] : 0.0,
4020 maxFlushedEntriesBySize[isMeta][index]
4026 // Give overall numbers
4027 printf ("\nCumulative:\n");
4028 PrintCacheHeader ();
4029 PrintCacheInfo (totalSlots,
4032 (float) totalEntries / (float) totalCacheCount,
4034 (float) totalChain / (float) totalEntries,
4036 (float) totalMissChain / (float) totalSlots,
4038 #ifdef OBJC_INSTRUMENTED
4041 (float) totalHitProbes / (float) totalHitCount : 0.0,
4045 (float) totalMissProbes / (float) totalMissCount : 0.0,
4049 (float) totalFlushedEntries / (float) totalFlushCount : 0.0,
4054 printf ("\nNumber of \"forward::\" entries: %d\n", negativeEntryCount);
4055 printf ("Number of cache expansions: %d\n", cacheExpandCount);
4056 #ifdef OBJC_INSTRUMENTED
4057 printf ("flush_caches: total calls total visits average visits max visits total classes visits/class\n");
4058 printf (" ----------- ------------ -------------- ---------- ------------- -------------\n");
4059 printf (" linear %11u %12u %14.1f %10u %13u %12.2f\n",
4060 LinearFlushCachesCount,
4061 LinearFlushCachesVisitedCount,
4062 LinearFlushCachesCount ?
4063 (float) LinearFlushCachesVisitedCount / (float) LinearFlushCachesCount : 0.0,
4064 MaxLinearFlushCachesVisitedCount,
4065 LinearFlushCachesVisitedCount,
4067 printf (" nonlinear %11u %12u %14.1f %10u %13u %12.2f\n",
4068 NonlinearFlushCachesCount,
4069 NonlinearFlushCachesVisitedCount,
4070 NonlinearFlushCachesCount ?
4071 (float) NonlinearFlushCachesVisitedCount / (float) NonlinearFlushCachesCount : 0.0,
4072 MaxNonlinearFlushCachesVisitedCount,
4073 NonlinearFlushCachesClassCount,
4074 NonlinearFlushCachesClassCount ?
4075 (float) NonlinearFlushCachesVisitedCount / (float) NonlinearFlushCachesClassCount : 0.0);
4076 printf (" ideal %11u %12u %14.1f %10u %13u %12.2f\n",
4077 LinearFlushCachesCount + NonlinearFlushCachesCount,
4078 IdealFlushCachesCount,
4079 LinearFlushCachesCount + NonlinearFlushCachesCount ?
4080 (float) IdealFlushCachesCount / (float) (LinearFlushCachesCount + NonlinearFlushCachesCount) : 0.0,
4081 MaxIdealFlushCachesCount,
4082 LinearFlushCachesVisitedCount + NonlinearFlushCachesClassCount,
4083 LinearFlushCachesVisitedCount + NonlinearFlushCachesClassCount ?
4084 (float) IdealFlushCachesCount / (float) (LinearFlushCachesVisitedCount + NonlinearFlushCachesClassCount) : 0.0);
4086 PrintCacheHistogram ("\nCache hit histogram:", &CacheHitHistogram[0], CACHE_HISTOGRAM_SIZE);
4087 PrintCacheHistogram ("\nCache miss histogram:", &CacheMissHistogram[0], CACHE_HISTOGRAM_SIZE);
4091 printf ("\nLookup chains:");
4092 for (index = 0; index < MAX_CHAIN_SIZE; index += 1)
4094 if (chainCount[index] != 0)
4095 printf (" %u:%u", index, chainCount[index]);
4098 printf ("\nMiss chains:");
4099 for (index = 0; index < MAX_CHAIN_SIZE; index += 1)
4101 if (missChainCount[index] != 0)
4102 printf (" %u:%u", index, missChainCount[index]);
4105 printf ("\nTotal memory usage for cache data structures: %lu bytes\n",
4106 totalCacheCount * (sizeof(struct objc_cache) - sizeof(Method)) +
4107 totalSlots * sizeof(Method) +
4108 negativeEntryCount * sizeof(struct objc_method));
4113 /***********************************************************************
4115 **********************************************************************/
4116 void checkUniqueness (SEL s1,
4122 if (s1 && s2 && (strcmp ((const char *) s1, (const char *) s2) == 0))
4123 _objc_inform ("%p != %p but !strcmp (%s, %s)\n", s1, s2, (char *) s1, (char *) s2);