2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
25 /***********************************************************************
27 * Copyright 1988-1997, Apple Computer, Inc.
29 **********************************************************************/
32 /***********************************************************************
33 * Method cache locking (GrP 2001-1-14)
35 * For speed, objc_msgSend does not acquire any locks when it reads
36 * method caches. Instead, all cache changes are performed so that any
37 * objc_msgSend running concurrently with the cache mutator will not
38 * crash or hang or get an incorrect result from the cache.
40 * When cache memory becomes unused (e.g. the old cache after cache
41 * expansion), it is not immediately freed, because a concurrent
42 * objc_msgSend could still be using it. Instead, the memory is
43 * disconnected from the data structures and placed on a garbage list.
44 * The memory is now only accessible to instances of objc_msgSend that
45 * were running when the memory was disconnected; any further calls to
46 * objc_msgSend will not see the garbage memory because the other data
47 * structures don't point to it anymore. The collecting_in_critical
48 * function checks the PC of all threads and returns FALSE when all threads
49 * are found to be outside objc_msgSend. This means any call to objc_msgSend
50 * that could have had access to the garbage has finished or moved past the
51 * cache lookup stage, so it is safe to free the memory.
53 * All functions that modify cache data or structures must acquire the
54 * cacheUpdateLock to prevent interference from concurrent modifications.
55 * The function that frees cache garbage must acquire the cacheUpdateLock
56 * and use collecting_in_critical() to flush out cache readers.
57 * The cacheUpdateLock is also used to protect the custom allocator used
58 * for large method cache blocks.
60 * Cache readers (PC-checked by collecting_in_critical())
65 * Cache writers (hold cacheUpdateLock while reading or writing; not PC-checked)
66 * _cache_fill (acquires lock)
67 * _cache_expand (only called from cache_fill)
68 * _cache_create (only called from cache_expand)
69 * bcopy (only called from instrumented cache_expand)
70 * flush_caches (acquires lock)
71 * _cache_flush (only called from cache_fill and flush_caches)
72 * _cache_collect_free (only called from cache_expand and cache_flush)
74 * UNPROTECTED cache readers (NOT thread-safe; used for debug info only)
76 * _class_printMethodCaches
77 * _class_printDuplicateCacheEntries
78 * _class_printMethodCacheStatistics
80 * _class_lookupMethodAndLoadCache is a special case. It may read a
81 * method triplet out of one cache and store it in another cache. This
82 * is unsafe if the method triplet is a forward:: entry, because the
83 * triplet itself could be freed unless _class_lookupMethodAndLoadCache
84 * were PC-checked or used a lock. Additionally, storing the method
85 * triplet in both caches would result in double-freeing if both caches
86 * were flushed or expanded. The solution is for _cache_getMethod to
87 * ignore all entries whose implementation is _objc_msgForward, so
88 * _class_lookupMethodAndLoadCache cannot look at a forward:: entry
89 * unsafely or place it in multiple caches.
90 ***********************************************************************/
92 /***********************************************************************
93 * Lazy method list arrays and method list locking (2004-10-19)
95 * cls->methodLists may be in one of three forms:
96 * 1. NULL: The class has no methods.
97 * 2. non-NULL, with CLS_NO_METHOD_ARRAY set: cls->methodLists points
98 * to a single method list, which is the class's only method list.
99 * 3. non-NULL, with CLS_NO_METHOD_ARRAY clear: cls->methodLists points to
100 * an array of method list pointers. The end of the array's block
101 * is set to -1. If the actual number of method lists is smaller
102 * than that, the rest of the array is NULL.
104 * Attaching categories and adding and removing classes may change
105 * the form of the class list. In addition, individual method lists
106 * may be reallocated when fixed up.
108 * Classes are initially read as #1 or #2. If a category is attached
109 * or other methods added, the class is changed to #3. Once in form #3,
110 * the class is never downgraded to #1 or #2, even if methods are removed.
111 * Classes added with objc_addClass are initially either #1 or #3.
113 * Accessing and manipulating a class's method lists are synchronized,
114 * to prevent races when one thread restructures the list. However,
115 * if the class is not yet in use (i.e. not in class_hash), then the
116 * thread loading the class may access its method lists without locking.
118 * The following functions acquire methodListLock:
119 * class_getInstanceMethod
120 * class_getClassMethod
121 * class_nextMethodList
123 * class_removeMethods
124 * class_respondsToMethod
125 * _class_lookupMethodAndLoadCache
126 * lookupMethodInClassAndLoadCache
127 * _objc_add_category_flush_caches
129 * The following functions don't acquire methodListLock because they
130 * only access method lists during class load and unload:
131 * _objc_register_category
132 * _resolve_categories_for_class (calls _objc_add_category)
133 * add_class_to_loadable_list
135 * _objc_remove_classes_in_image
137 * The following functions use method lists without holding methodListLock.
138 * The caller must either hold methodListLock, or be loading the class.
139 * _getMethod (called by class_getInstanceMethod, class_getClassMethod,
140 * and class_respondsToMethod)
141 * _findMethodInClass (called by _class_lookupMethodAndLoadCache,
142 * lookupMethodInClassAndLoadCache, _getMethod)
143 * _findMethodInList (called by _findMethodInClass)
144 * nextMethodList (called by _findMethodInClass and class_nextMethodList
145 * fixupSelectorsInMethodList (called by nextMethodList)
146 * _objc_add_category (called by _objc_add_category_flush_caches,
147 * resolve_categories_for_class and _objc_register_category)
148 * _objc_insertMethods (called by class_addMethods and _objc_add_category)
149 * _objc_removeMethods (called by class_removeMethods)
150 * _objcTweakMethodListPointerForClass (called by _objc_insertMethods)
151 * get_base_method_list (called by add_class_to_loadable_list)
152 * lookupNamedMethodInMethodList (called by add_class_to_loadable_list)
153 ***********************************************************************/
155 /***********************************************************************
156 * Thread-safety of class info bits (2004-10-19)
158 * Some class info bits are used to store mutable runtime state.
159 * Modifications of the info bits at particular times need to be
160 * synchronized to prevent races.
162 * Three thread-safe modification functions are provided:
163 * _class_setInfo() // atomically sets some bits
164 * _class_clearInfo() // atomically clears some bits
165 * _class_changeInfo() // atomically sets some bits and clears others
166 * These replace CLS_SETINFO() for the multithreaded cases.
168 * Three modification windows are defined:
170 * - class construction or image load (before +load) in one thread
171 * - multi-threaded messaging and method caches
173 * Info bit modification at compile time and class construction do not
174 * need to be locked, because only one thread is manipulating the class.
175 * Info bit modification during messaging needs to be locked, because
176 * there may be other threads simultaneously messaging or otherwise
177 * manipulating the class.
179 * Modification windows for each flag:
181 * CLS_CLASS: compile-time and class load
182 * CLS_META: compile-time and class load
183 * CLS_INITIALIZED: +initialize
184 * CLS_POSING: messaging
185 * CLS_MAPPED: compile-time
186 * CLS_FLUSH_CACHE: messaging
187 * CLS_GROW_CACHE: messaging
188 * CLS_NEED_BIND: unused
189 * CLS_METHOD_ARRAY: unused
190 * CLS_JAVA_HYBRID: JavaBridge only
191 * CLS_JAVA_CLASS: JavaBridge only
192 * CLS_INITIALIZING: messaging
193 * CLS_FROM_BUNDLE: class load
194 * CLS_HAS_CXX_STRUCTORS: compile-time and class load
195 * CLS_NO_METHOD_ARRAY: class load and messaging
197 * CLS_INITIALIZED and CLS_INITIALIZING have additional thread-safety
198 * constraints to support thread-safe +initialize. See "Thread safety
199 * during class initialization" for details.
201 * CLS_JAVA_HYBRID and CLS_JAVA_CLASS are set immediately after JavaBridge
202 * calls objc_addClass(). The JavaBridge does not use an atomic update,
203 * but the modification counts as "class construction" unless some other
204 * thread quickly finds the class via the class list. This race is
205 * small and unlikely in well-behaved code.
207 * Most info bits that may be modified during messaging are also never
208 * read without a lock. There is no general read lock for the info bits.
209 * CLS_INITIALIZED: classInitLock
210 * CLS_FLUSH_CACHE: cacheUpdateLock
211 * CLS_GROW_CACHE: cacheUpdateLock
212 * CLS_NO_METHOD_ARRAY: methodListLock
213 * CLS_INITIALIZING: classInitLock
214 ***********************************************************************/
216 /***********************************************************************
217 * Thread-safety during class initialization (GrP 2001-9-24)
219 * Initial state: CLS_INITIALIZING and CLS_INITIALIZED both clear.
220 * During initialization: CLS_INITIALIZING is set
221 * After initialization: CLS_INITIALIZING clear and CLS_INITIALIZED set.
222 * CLS_INITIALIZING and CLS_INITIALIZED are never set at the same time.
223 * CLS_INITIALIZED is never cleared once set.
225 * Only one thread is allowed to actually initialize a class and send
226 * +initialize. Enforced by allowing only one thread to set CLS_INITIALIZING.
228 * Additionally, threads trying to send messages to a class must wait for
229 * +initialize to finish. During initialization of a class, that class's
230 * method cache is kept empty. objc_msgSend will revert to
231 * class_lookupMethodAndLoadCache, which checks CLS_INITIALIZED before
232 * messaging. If CLS_INITIALIZED is clear but CLS_INITIALIZING is set,
233 * the thread must block, unless it is the thread that started
234 * initializing the class in the first place.
236 * Each thread keeps a list of classes it's initializing.
237 * The global classInitLock is used to synchronize changes to CLS_INITIALIZED
238 * and CLS_INITIALIZING: the transition to CLS_INITIALIZING must be
239 * an atomic test-and-set with respect to itself and the transition
240 * to CLS_INITIALIZED.
241 * The global classInitWaitCond is used to block threads waiting for an
242 * initialization to complete. The classInitLock synchronizes
243 * condition checking and the condition variable.
244 **********************************************************************/
246 /***********************************************************************
247 * +initialize deadlock case when a class is marked initializing while
248 * its superclass is initialized. Solved by completely initializing
249 * superclasses before beginning to initialize a class.
251 * OmniWeb class hierarchy:
256 * OWAddressEntry OWController
258 * OWConsoleController
260 * Thread 1 (evil testing thread):
261 * initialize OWAddressEntry
262 * super init OFObject
263 * super init OBObject
264 * [OBObject initialize] runs OBPostLoader, which inits lots of classes...
265 * initialize OWConsoleController
266 * super init OWController - wait for Thread 2 to finish OWController init
268 * Thread 2 (normal OmniWeb thread):
269 * initialize OWController
270 * super init OFObject - wait for Thread 1 to finish OFObject init
274 * Solution: fully initialize super classes before beginning to initialize
275 * a subclass. Then the initializing+initialized part of the class hierarchy
276 * will be a contiguous subtree starting at the root, so other threads
277 * can't jump into the middle between two initializing classes, and we won't
278 * get stuck while a superclass waits for its subclass which waits for the
280 **********************************************************************/
284 /***********************************************************************
286 **********************************************************************/
288 #import <mach/mach_interface.h>
289 #include <mach-o/ldsyms.h>
290 #include <mach-o/dyld.h>
292 #include <sys/types.h>
296 #include <sys/fcntl.h>
298 #import "objc-class.h"
300 #import <objc/Object.h>
301 #import <objc/objc-runtime.h>
302 #import "objc-private.h"
303 #import "hashtable2.h"
306 #include <sys/types.h>
308 // Needed functions not in any header file
309 size_t malloc_size (const void * ptr);
311 // Needed kernel interface
312 #import <mach/mach.h>
313 #import <mach/thread_status.h>
316 /***********************************************************************
318 **********************************************************************/
320 // Define PRELOAD_SUPERCLASS_CACHES to cause method lookups to add the
321 // method the appropriate superclass caches, in addition to the normal
322 // encaching in the subclass where the method was messaged. Doing so
323 // will speed up messaging the same method from instances of the
324 // superclasses, but also uses up valuable cache space for a speculative
326 // See radar 2364264 about incorrectly propogating _objc_forward entries
327 // and double freeing them, first, before turning this on!
328 // (Radar 2364264 is now "inactive".)
329 // Double-freeing is also a potential problem when this is off. See
330 // note about _class_lookupMethodAndLoadCache in "Method cache locking".
331 //#define PRELOAD_SUPERCLASS_CACHES
333 /***********************************************************************
335 **********************************************************************/
337 #ifdef OBJC_INSTRUMENTED
339 CACHE_HISTOGRAM_SIZE = 512
342 unsigned int CacheHitHistogram [CACHE_HISTOGRAM_SIZE];
343 unsigned int CacheMissHistogram [CACHE_HISTOGRAM_SIZE];
346 /***********************************************************************
347 * Constants and macros internal to this module.
348 **********************************************************************/
350 // INIT_CACHE_SIZE and INIT_META_CACHE_SIZE must be a power of two
352 INIT_CACHE_SIZE_LOG2 = 2,
353 INIT_META_CACHE_SIZE_LOG2 = 2,
354 INIT_CACHE_SIZE = (1 << INIT_CACHE_SIZE_LOG2),
355 INIT_META_CACHE_SIZE = (1 << INIT_META_CACHE_SIZE_LOG2)
358 // Amount of space required for count hash table buckets, knowing that
359 // one entry is embedded in the cache structure itself
360 #define TABLE_SIZE(count) ((count - 1) * sizeof(Method))
362 // A sentinal (magic value) to report bad thread_get_state status
363 #define PC_SENTINAL 0
366 /***********************************************************************
367 * Types internal to this module.
368 **********************************************************************/
370 #ifdef OBJC_INSTRUMENTED
371 struct CacheInstrumentation
373 unsigned int hitCount; // cache lookup success tally
374 unsigned int hitProbes; // sum entries checked to hit
375 unsigned int maxHitProbes; // max entries checked to hit
376 unsigned int missCount; // cache lookup no-find tally
377 unsigned int missProbes; // sum entries checked to miss
378 unsigned int maxMissProbes; // max entries checked to miss
379 unsigned int flushCount; // cache flush tally
380 unsigned int flushedEntries; // sum cache entries flushed
381 unsigned int maxFlushedEntries; // max cache entries flushed
383 typedef struct CacheInstrumentation CacheInstrumentation;
385 // Cache instrumentation data follows table, so it is most compatible
386 #define CACHE_INSTRUMENTATION(cache) (CacheInstrumentation *) &cache->buckets[cache->mask + 1];
389 /***********************************************************************
390 * Function prototypes internal to this module.
391 **********************************************************************/
393 static Ivar class_getVariable (Class cls, const char * name);
394 static void flush_caches (Class cls, BOOL flush_meta);
395 static struct objc_method_list *nextMethodList(struct objc_class *cls, void **it);
396 static void addClassToOriginalClass (Class posingClass, Class originalClass);
397 static void _objc_addOrigClass (Class origClass);
398 static void _freedHandler (id self, SEL sel);
399 static void _nonexistentHandler (id self, SEL sel);
400 static void class_initialize (Class cls);
401 static Cache _cache_expand (Class cls);
402 static int LogObjCMessageSend (BOOL isClassMethod, const char * objectsClass, const char * implementingClass, SEL selector);
403 static BOOL _cache_fill (Class cls, Method smt, SEL sel);
404 static void _cache_addForwardEntry(Class cls, SEL sel);
405 static void _cache_flush (Class cls);
406 static IMP lookupMethodInClassAndLoadCache(Class cls, SEL sel);
407 static int SubtypeUntil (const char * type, char end);
408 static const char * SkipFirstType (const char * type);
410 static unsigned long _get_pc_for_thread (mach_port_t thread);
411 static int _collecting_in_critical (void);
412 static void _garbage_make_room (void);
413 static void _cache_collect_free (void * data, size_t size, BOOL tryCollect);
415 static BOOL cache_allocator_is_block(void *block);
416 static void *cache_allocator_calloc(size_t size);
417 static void cache_allocator_free(void *block);
419 static void _cache_print (Cache cache);
420 static unsigned int log2 (unsigned int x);
421 static void PrintCacheHeader (void);
422 #ifdef OBJC_INSTRUMENTED
423 static void PrintCacheHistogram (char * title, unsigned int * firstEntry, unsigned int entryCount);
426 /***********************************************************************
427 * Static data internal to this module.
428 **********************************************************************/
430 // When _class_uncache is non-zero, cache growth copies the existing
431 // entries into the new (larger) cache. When this flag is zero, new
432 // (larger) caches start out empty.
433 static int _class_uncache = 1;
435 // When _class_slow_grow is non-zero, any given cache is actually grown
436 // only on the odd-numbered times it becomes full; on the even-numbered
437 // times, it is simply emptied and re-used. When this flag is zero,
438 // caches are grown every time.
439 static int _class_slow_grow = 1;
441 // Lock for cache access.
442 // Held when modifying a cache in place.
443 // Held when installing a new cache on a class.
444 // Held when adding to the cache garbage list.
445 // Held when disposing cache garbage.
446 // See "Method cache locking" above for notes about cache locking.
447 static OBJC_DECLARE_LOCK(cacheUpdateLock);
449 // classInitLock protects classInitWaitCond and examination and modification
450 // of CLS_INITIALIZED and CLS_INITIALIZING.
451 OBJC_DECLARE_LOCK(classInitLock);
452 // classInitWaitCond is signalled when any class is done initializing.
453 // Threads that are waiting for a class to finish initializing wait on this.
454 pthread_cond_t classInitWaitCond = PTHREAD_COND_INITIALIZER;
456 // Lock for method list access and modification.
457 // Protects methodLists fields, method arrays, and CLS_NO_METHOD_ARRAY bits.
458 // Classes not yet in use do not need to take this lock.
459 OBJC_DECLARE_LOCK(methodListLock);
461 // When traceDuplicates is non-zero, _cacheFill checks whether the method
462 // being encached is already there. The number of times it finds a match
463 // is tallied in cacheFillDuplicates. When traceDuplicatesVerbose is
464 // non-zero, each duplication is logged when found in this way.
465 static int traceDuplicates = 0;
466 static int traceDuplicatesVerbose = 0;
467 static int cacheFillDuplicates = 0;
469 // Custom cache allocator parameters
470 // CACHE_REGION_SIZE must be a multiple of CACHE_QUANTUM.
471 #define CACHE_QUANTUM 520
472 #define CACHE_REGION_SIZE 131040 // quantized just under 128KB (131072)
473 // #define CACHE_REGION_SIZE 262080 // quantized just under 256KB (262144)
475 #ifdef OBJC_INSTRUMENTED
477 static unsigned int LinearFlushCachesCount = 0;
478 static unsigned int LinearFlushCachesVisitedCount = 0;
479 static unsigned int MaxLinearFlushCachesVisitedCount = 0;
480 static unsigned int NonlinearFlushCachesCount = 0;
481 static unsigned int NonlinearFlushCachesClassCount = 0;
482 static unsigned int NonlinearFlushCachesVisitedCount = 0;
483 static unsigned int MaxNonlinearFlushCachesVisitedCount = 0;
484 static unsigned int IdealFlushCachesCount = 0;
485 static unsigned int MaxIdealFlushCachesCount = 0;
488 // Method call logging
489 typedef int (*ObjCLogProc)(BOOL, const char *, const char *, SEL);
491 static int totalCacheFills NOBSS = 0;
492 static int objcMsgLogFD = (-1);
493 static ObjCLogProc objcMsgLogProc = &LogObjCMessageSend;
494 static int objcMsgLogEnabled = 0;
498 _errNoMem[] = "failed -- out of memory(%s, %u)",
499 _errAllocNil[] = "allocating nil object",
500 _errFreedObject[] = "message %s sent to freed object=0x%lx",
501 _errNonExistentObject[] = "message %s sent to non-existent object=0x%lx",
502 _errBadSel[] = "invalid selector %s",
503 _errNotSuper[] = "[%s poseAs:%s]: target not immediate superclass",
504 _errNewVars[] = "[%s poseAs:%s]: %s defines new instance variables";
506 /***********************************************************************
507 * Information about multi-thread support:
509 * Since we do not lock many operations which walk the superclass, method
510 * and ivar chains, these chains must remain intact once a class is published
511 * by inserting it into the class hashtable. All modifications must be
512 * atomic so that someone walking these chains will always geta valid
514 ***********************************************************************/
515 /***********************************************************************
516 * A static empty cache. All classes initially point at this cache.
517 * When the first message is sent it misses in the cache, and when
518 * the cache is grown it checks for this case and uses malloc rather
519 * than realloc. This avoids the need to check for NULL caches in the
521 ***********************************************************************/
523 #ifndef OBJC_INSTRUMENTED
524 const struct objc_cache emptyCache =
531 // OBJC_INSTRUMENTED requires writable data immediately following emptyCache.
532 struct objc_cache emptyCache =
538 CacheInstrumentation emptyCacheInstrumentation = {0};
542 // Freed objects have their isa set to point to this dummy class.
543 // This avoids the need to check for Nil classes in the messenger.
544 static const struct objc_class freedObjectClass =
554 (Cache) &emptyCache, // cache
558 static const struct objc_class nonexistentObjectClass =
562 "NONEXISTENT(id)", // name
568 (Cache) &emptyCache, // cache
572 /***********************************************************************
573 * object_getClassName.
574 **********************************************************************/
575 const char * object_getClassName (id obj)
577 // Even nil objects have a class name, sort of
581 // Retrieve name from object's class
582 return ((struct objc_class *) obj->isa)->name;
585 /***********************************************************************
586 * object_getIndexedIvars.
587 **********************************************************************/
588 void * object_getIndexedIvars (id obj)
590 // ivars are tacked onto the end of the object
591 return ((char *) obj) + ((struct objc_class *) obj->isa)->instance_size;
595 /***********************************************************************
596 * object_cxxDestructFromClass.
597 * Call C++ destructors on obj, starting with cls's
598 * dtor method (if any) followed by superclasses' dtors (if any),
599 * stopping at cls's dtor (if any).
600 * Uses methodListLock and cacheUpdateLock. The caller must hold neither.
601 **********************************************************************/
602 static void object_cxxDestructFromClass(id obj, Class cls)
606 // Call cls's dtor first, then superclasses's dtors.
608 for ( ; cls != NULL; cls = cls->super_class) {
609 if (!(cls->info & CLS_HAS_CXX_STRUCTORS)) continue;
611 lookupMethodInClassAndLoadCache(cls, cxx_destruct_sel);
612 if (dtor != (void(*)(id))&_objc_msgForward) {
614 _objc_inform("CXX: calling C++ destructors for class %s",
623 /***********************************************************************
624 * object_cxxDestruct.
625 * Call C++ destructors on obj, if any.
626 * Uses methodListLock and cacheUpdateLock. The caller must hold neither.
627 **********************************************************************/
628 void object_cxxDestruct(id obj)
631 object_cxxDestructFromClass(obj, obj->isa);
635 /***********************************************************************
636 * object_cxxConstructFromClass.
637 * Recursively call C++ constructors on obj, starting with base class's
638 * ctor method (if any) followed by subclasses' ctors (if any), stopping
639 * at cls's ctor (if any).
640 * Returns YES if construction succeeded.
641 * Returns NO if some constructor threw an exception. The exception is
642 * caught and discarded. Any partial construction is destructed.
643 * Uses methodListLock and cacheUpdateLock. The caller must hold neither.
645 * .cxx_construct returns id. This really means:
646 * return self: construction succeeded
647 * return nil: construction failed because a C++ constructor threw an exception
648 **********************************************************************/
649 static BOOL object_cxxConstructFromClass(id obj, Class cls)
653 // Call superclasses' ctors first, if any.
654 if (cls->super_class) {
655 BOOL ok = object_cxxConstructFromClass(obj, cls->super_class);
656 if (!ok) return NO; // some superclass's ctor failed - give up
659 // Find this class's ctor, if any.
660 if (!(cls->info & CLS_HAS_CXX_STRUCTORS)) return YES; // no ctor - ok
661 ctor = (id(*)(id))lookupMethodInClassAndLoadCache(cls, cxx_construct_sel);
662 if (ctor == (id(*)(id))&_objc_msgForward) return YES; // no ctor - ok
664 // Call this class's ctor.
666 _objc_inform("CXX: calling C++ constructors for class %s", cls->name);
668 if ((*ctor)(obj)) return YES; // ctor called and succeeded - ok
670 // This class's ctor was called and failed.
671 // Call superclasses's dtors to clean up.
672 if (cls->super_class) object_cxxDestructFromClass(obj, cls->super_class);
677 /***********************************************************************
678 * object_cxxConstructFromClass.
679 * Call C++ constructors on obj, if any.
680 * Returns YES if construction succeeded.
681 * Returns NO if some constructor threw an exception. The exception is
682 * caught and discarded. Any partial construction is destructed.
683 * Uses methodListLock and cacheUpdateLock. The caller must hold neither.
684 **********************************************************************/
685 BOOL object_cxxConstruct(id obj)
687 if (!obj) return YES;
688 return object_cxxConstructFromClass(obj, obj->isa);
692 /***********************************************************************
693 * _internal_class_createInstanceFromZone. Allocate an instance of the
694 * specified class with the specified number of bytes for indexed
695 * variables, in the specified zone. The isa field is set to the
696 * class, C++ default constructors are called, and all other fields are zeroed.
697 **********************************************************************/
698 static id _internal_class_createInstanceFromZone (Class aClass,
703 register unsigned byteCount;
705 // Can't create something for nothing
708 __objc_error ((id) aClass, _errAllocNil, 0);
712 // Allocate and initialize
713 byteCount = ((struct objc_class *) aClass)->instance_size + nIvarBytes;
714 obj = (id) malloc_zone_calloc (z, 1, byteCount);
717 __objc_error ((id) aClass, _errNoMem, ((struct objc_class *) aClass)->name, nIvarBytes);
721 // Set the isa pointer
724 // Call C++ constructors, if any.
725 if (!object_cxxConstruct(obj)) {
726 // Some C++ constructor threw an exception.
727 malloc_zone_free(z, obj);
734 /***********************************************************************
735 * _internal_class_createInstance. Allocate an instance of the specified
736 * class with the specified number of bytes for indexed variables, in
737 * the default zone, using _internal_class_createInstanceFromZone.
738 **********************************************************************/
739 static id _internal_class_createInstance (Class aClass,
742 return _internal_class_createInstanceFromZone (aClass,
744 malloc_default_zone ());
747 id (*_poseAs)() = (id (*)())class_poseAs;
748 id (*_alloc)(Class, unsigned) = _internal_class_createInstance;
749 id (*_zoneAlloc)(Class, unsigned, void *) = _internal_class_createInstanceFromZone;
751 /***********************************************************************
752 * class_createInstanceFromZone. Allocate an instance of the specified
753 * class with the specified number of bytes for indexed variables, in
754 * the specified zone, using _zoneAlloc.
755 **********************************************************************/
756 id class_createInstanceFromZone (Class aClass,
760 // _zoneAlloc can be overridden, but is initially set to
761 // _internal_class_createInstanceFromZone
762 return (*_zoneAlloc) (aClass, nIvarBytes, z);
765 /***********************************************************************
766 * class_createInstance. Allocate an instance of the specified class with
767 * the specified number of bytes for indexed variables, using _alloc.
768 **********************************************************************/
769 id class_createInstance (Class aClass,
772 // _alloc can be overridden, but is initially set to
773 // _internal_class_createInstance
774 return (*_alloc) (aClass, nIvarBytes);
777 /***********************************************************************
778 * class_setVersion. Record the specified version with the class.
779 **********************************************************************/
780 void class_setVersion (Class aClass,
783 ((struct objc_class *) aClass)->version = version;
786 /***********************************************************************
787 * class_getVersion. Return the version recorded with the class.
788 **********************************************************************/
789 int class_getVersion (Class aClass)
791 return ((struct objc_class *) aClass)->version;
795 static inline Method _findNamedMethodInList(struct objc_method_list * mlist, const char *meth_name) {
797 if (!mlist) return NULL;
798 for (i = 0; i < mlist->method_count; i++) {
799 Method m = &mlist->method_list[i];
800 if (*((const char *)m->method_name) == *meth_name && 0 == strcmp((const char *)(m->method_name), meth_name)) {
808 /***********************************************************************
809 * fixupSelectorsInMethodList
810 * Uniques selectors in the given method list.
811 * The given method list must be non-NULL and not already fixed-up.
812 * If the class was loaded from a bundle:
813 * fixes up the given list in place with heap-allocated selector strings
814 * If the class was not from a bundle:
815 * allocates a copy of the method list, fixes up the copy, and returns
816 * the copy. The given list is unmodified.
818 * If cls is already in use, methodListLock must be held by the caller.
819 **********************************************************************/
820 // Fixed-up method lists get mlist->obsolete = _OBJC_FIXED_UP.
821 #define _OBJC_FIXED_UP ((void *)1771)
823 static struct objc_method_list *fixupSelectorsInMethodList(Class cls, struct objc_method_list *mlist)
827 struct objc_method_list *old_mlist;
829 if ( ! mlist ) return (struct objc_method_list *)0;
830 if ( mlist->obsolete != _OBJC_FIXED_UP ) {
831 BOOL isBundle = CLS_GETINFO(cls, CLS_FROM_BUNDLE) ? YES : NO;
834 size = sizeof(struct objc_method_list) - sizeof(struct objc_method) + old_mlist->method_count * sizeof(struct objc_method);
835 mlist = _malloc_internal(size);
836 memmove(mlist, old_mlist, size);
838 // Mach-O bundles are fixed up in place.
839 // This prevents leaks when a bundle is unloaded.
842 for ( i = 0; i < mlist->method_count; i += 1 ) {
843 method = &mlist->method_list[i];
844 method->method_name =
845 sel_registerNameNoLock((const char *)method->method_name, isBundle); // Always copy selector data from bundles.
848 mlist->obsolete = _OBJC_FIXED_UP;
854 /***********************************************************************
856 * Returns successive method lists from the given class.
857 * Method lists are returned in method search order (i.e. highest-priority
858 * implementations first).
859 * All necessary method list fixups are performed, so the
860 * returned method list is fully-constructed.
862 * If cls is already in use, methodListLock must be held by the caller.
863 * For full thread-safety, methodListLock must be continuously held by the
864 * caller across all calls to nextMethodList(). If the lock is released,
865 * the bad results listed in class_nextMethodList() may occur.
867 * void *iterator = NULL;
868 * struct objc_method_list *mlist;
869 * OBJC_LOCK(&methodListLock);
870 * while ((mlist = nextMethodList(cls, &iterator))) {
871 * // do something with mlist
873 * OBJC_UNLOCK(&methodListLock);
874 **********************************************************************/
875 static struct objc_method_list *nextMethodList(struct objc_class *cls,
878 uintptr_t index = *(uintptr_t *)it;
879 struct objc_method_list **resultp;
882 // First call to nextMethodList.
883 if (!cls->methodLists) {
885 } else if (cls->info & CLS_NO_METHOD_ARRAY) {
886 resultp = (struct objc_method_list **)&cls->methodLists;
888 resultp = &cls->methodLists[0];
889 if (!*resultp || *resultp == END_OF_METHODS_LIST) {
894 // Subsequent call to nextMethodList.
895 if (!cls->methodLists) {
897 } else if (cls->info & CLS_NO_METHOD_ARRAY) {
900 resultp = &cls->methodLists[index];
901 if (!*resultp || *resultp == END_OF_METHODS_LIST) {
907 // resultp now is NULL, meaning there are no more method lists,
908 // OR the address of the method list pointer to fix up and return.
911 if (*resultp && (*resultp)->obsolete != _OBJC_FIXED_UP) {
912 *resultp = fixupSelectorsInMethodList(cls, *resultp);
914 *it = (void *)(index + 1);
923 /* These next three functions are the heart of ObjC method lookup.
924 * If the class is currently in use, methodListLock must be held by the caller.
926 static inline Method _findMethodInList(struct objc_method_list * mlist, SEL sel) {
928 if (!mlist) return NULL;
929 for (i = 0; i < mlist->method_count; i++) {
930 Method m = &mlist->method_list[i];
931 if (m->method_name == sel) {
938 static inline Method _findMethodInClass(Class cls, SEL sel) __attribute__((always_inline));
939 static inline Method _findMethodInClass(Class cls, SEL sel) {
940 // Flattened version of nextMethodList(). The optimizer doesn't
941 // do a good job with hoisting the conditionals out of the loop.
942 // Conceptually, this looks like:
943 // while ((mlist = nextMethodList(cls, &iterator))) {
944 // Method m = _findMethodInList(mlist, sel);
948 if (!cls->methodLists) {
952 else if (cls->info & CLS_NO_METHOD_ARRAY) {
954 struct objc_method_list **mlistp;
955 mlistp = (struct objc_method_list **)&cls->methodLists;
956 if ((*mlistp)->obsolete != _OBJC_FIXED_UP) {
957 *mlistp = fixupSelectorsInMethodList(cls, *mlistp);
959 return _findMethodInList(*mlistp, sel);
962 // Multiple method lists.
963 struct objc_method_list **mlistp;
964 for (mlistp = cls->methodLists;
965 *mlistp != NULL && *mlistp != END_OF_METHODS_LIST;
969 if ((*mlistp)->obsolete != _OBJC_FIXED_UP) {
970 *mlistp = fixupSelectorsInMethodList(cls, *mlistp);
972 m = _findMethodInList(*mlistp, sel);
979 static inline Method _getMethod(Class cls, SEL sel) {
980 for (; cls; cls = cls->super_class) {
982 m = _findMethodInClass(cls, sel);
989 // fixme for gc debugging temporary use
990 __private_extern__ IMP findIMPInClass(Class cls, SEL sel)
992 Method m = _findMethodInClass(cls, sel);
993 if (m) return m->method_imp;
997 /***********************************************************************
998 * class_getInstanceMethod. Return the instance method for the
999 * specified class and selector.
1000 **********************************************************************/
1001 Method class_getInstanceMethod (Class aClass,
1006 // Need both a class and a selector
1007 if (!aClass || !aSelector)
1011 OBJC_LOCK(&methodListLock);
1012 result = _getMethod (aClass, aSelector);
1013 OBJC_UNLOCK(&methodListLock);
1017 /***********************************************************************
1018 * class_getClassMethod. Return the class method for the specified
1019 * class and selector.
1020 **********************************************************************/
1021 Method class_getClassMethod (Class aClass,
1026 // Need both a class and a selector
1027 if (!aClass || !aSelector)
1030 // Go to the class or isa
1031 OBJC_LOCK(&methodListLock);
1032 result = _getMethod (GETMETA(aClass), aSelector);
1033 OBJC_UNLOCK(&methodListLock);
1037 /***********************************************************************
1038 * class_getVariable. Return the named instance variable.
1039 **********************************************************************/
1040 static Ivar class_getVariable (Class cls,
1043 struct objc_class * thisCls;
1045 // Outer loop - search the class and its superclasses
1046 for (thisCls = cls; thisCls != Nil; thisCls = ((struct objc_class *) thisCls)->super_class)
1051 // Skip class having no ivars
1052 if (!thisCls->ivars)
1055 // Inner loop - search the given class
1056 thisIvar = &thisCls->ivars->ivar_list[0];
1057 for (index = 0; index < thisCls->ivars->ivar_count; index += 1)
1059 // Check this ivar's name. Be careful because the
1060 // compiler generates ivar entries with NULL ivar_name
1061 // (e.g. for anonymous bit fields).
1062 if ((thisIvar->ivar_name) &&
1063 (strcmp (name, thisIvar->ivar_name) == 0))
1066 // Move to next ivar
1075 /***********************************************************************
1076 * class_getInstanceVariable. Return the named instance variable.
1078 * Someday add class_getClassVariable ().
1079 **********************************************************************/
1080 Ivar class_getInstanceVariable (Class aClass,
1083 // Must have a class and a name
1084 if (!aClass || !name)
1088 return class_getVariable (aClass, name);
1091 /***********************************************************************
1092 * flush_caches. Flush the instance and optionally class method caches
1093 * of cls and all its subclasses.
1095 * Specifying Nil for the class "all classes."
1096 **********************************************************************/
1097 static void flush_caches(Class cls, BOOL flush_meta)
1099 int numClasses = 0, newNumClasses;
1100 struct objc_class * * classes = NULL;
1102 struct objc_class * clsObject;
1103 #ifdef OBJC_INSTRUMENTED
1104 unsigned int classesVisited;
1105 unsigned int subclassCount;
1108 // Do nothing if class has no cache
1109 // This check is safe to do without any cache locks.
1110 if (cls && !((struct objc_class *) cls)->cache)
1113 newNumClasses = objc_getClassList((Class *)NULL, 0);
1114 while (numClasses < newNumClasses) {
1115 numClasses = newNumClasses;
1116 classes = _realloc_internal(classes, sizeof(Class) * numClasses);
1117 newNumClasses = objc_getClassList((Class *)classes, numClasses);
1119 numClasses = newNumClasses;
1121 OBJC_LOCK(&cacheUpdateLock);
1123 // Handle nil and root instance class specially: flush all
1124 // instance and class method caches. Nice that this
1125 // loop is linear vs the N-squared loop just below.
1126 if (!cls || !((struct objc_class *) cls)->super_class)
1128 #ifdef OBJC_INSTRUMENTED
1129 LinearFlushCachesCount += 1;
1133 // Traverse all classes in the hash table
1134 for (i = 0; i < numClasses; i++)
1136 struct objc_class * metaClsObject;
1137 #ifdef OBJC_INSTRUMENTED
1138 classesVisited += 1;
1140 clsObject = classes[i];
1142 // Skip class that is known not to be a subclass of this root
1143 // (the isa pointer of any meta class points to the meta class
1145 // NOTE: When is an isa pointer of a hash tabled class ever nil?
1146 metaClsObject = clsObject->isa;
1147 if (cls && metaClsObject && cls->isa != metaClsObject->isa)
1152 #ifdef OBJC_INSTRUMENTED
1156 _cache_flush (clsObject);
1157 if (flush_meta && metaClsObject != NULL) {
1158 _cache_flush (metaClsObject);
1161 #ifdef OBJC_INSTRUMENTED
1162 LinearFlushCachesVisitedCount += classesVisited;
1163 if (classesVisited > MaxLinearFlushCachesVisitedCount)
1164 MaxLinearFlushCachesVisitedCount = classesVisited;
1165 IdealFlushCachesCount += subclassCount;
1166 if (subclassCount > MaxIdealFlushCachesCount)
1167 MaxIdealFlushCachesCount = subclassCount;
1170 OBJC_UNLOCK(&cacheUpdateLock);
1171 _free_internal(classes);
1175 // Outer loop - flush any cache that could now get a method from
1176 // cls (i.e. the cache associated with cls and any of its subclasses).
1177 #ifdef OBJC_INSTRUMENTED
1178 NonlinearFlushCachesCount += 1;
1182 for (i = 0; i < numClasses; i++)
1184 struct objc_class * clsIter;
1186 #ifdef OBJC_INSTRUMENTED
1187 NonlinearFlushCachesClassCount += 1;
1189 clsObject = classes[i];
1191 // Inner loop - Process a given class
1192 clsIter = clsObject;
1196 #ifdef OBJC_INSTRUMENTED
1197 classesVisited += 1;
1199 // Flush clsObject instance method cache if
1200 // clsObject is a subclass of cls, or is cls itself
1201 // Flush the class method cache if that was asked for
1204 #ifdef OBJC_INSTRUMENTED
1207 _cache_flush (clsObject);
1209 _cache_flush (clsObject->isa);
1215 // Flush clsObject class method cache if cls is
1216 // the meta class of clsObject or of one
1217 // of clsObject's superclasses
1218 else if (clsIter->isa == cls)
1220 #ifdef OBJC_INSTRUMENTED
1223 _cache_flush (clsObject->isa);
1227 // Move up superclass chain
1228 else if (ISINITIALIZED(clsIter))
1229 clsIter = clsIter->super_class;
1231 // clsIter is not initialized, so its cache
1232 // must be empty. This happens only when
1233 // clsIter == clsObject, because
1234 // superclasses are initialized before
1235 // subclasses, and this loop traverses
1236 // from sub- to super- classes.
1241 #ifdef OBJC_INSTRUMENTED
1242 NonlinearFlushCachesVisitedCount += classesVisited;
1243 if (classesVisited > MaxNonlinearFlushCachesVisitedCount)
1244 MaxNonlinearFlushCachesVisitedCount = classesVisited;
1245 IdealFlushCachesCount += subclassCount;
1246 if (subclassCount > MaxIdealFlushCachesCount)
1247 MaxIdealFlushCachesCount = subclassCount;
1250 OBJC_UNLOCK(&cacheUpdateLock);
1251 _free_internal(classes);
1254 /***********************************************************************
1255 * _objc_flush_caches. Flush the caches of the specified class and any
1256 * of its subclasses. If cls is a meta-class, only meta-class (i.e.
1257 * class method) caches are flushed. If cls is an instance-class, both
1258 * instance-class and meta-class caches are flushed.
1259 **********************************************************************/
1260 void _objc_flush_caches (Class cls)
1262 flush_caches (cls, YES);
1265 /***********************************************************************
1266 * do_not_remove_this_dummy_function.
1267 **********************************************************************/
1268 void do_not_remove_this_dummy_function (void)
1270 (void) class_nextMethodList (NULL, NULL);
1274 /***********************************************************************
1275 * class_nextMethodList.
1276 * External version of nextMethodList().
1278 * This function is not fully thread-safe. A series of calls to
1279 * class_nextMethodList() may fail if methods are added to or removed
1280 * from the class between calls.
1281 * If methods are added between calls to class_nextMethodList(), it may
1282 * return previously-returned method lists again, and may fail to return
1283 * newly-added lists.
1284 * If methods are removed between calls to class_nextMethodList(), it may
1285 * omit surviving method lists or simply crash.
1286 **********************************************************************/
1287 OBJC_EXPORT struct objc_method_list * class_nextMethodList (Class cls,
1290 struct objc_method_list *result;
1291 OBJC_LOCK(&methodListLock);
1292 result = nextMethodList(cls, it);
1293 OBJC_UNLOCK(&methodListLock);
1297 /***********************************************************************
1299 **********************************************************************/
1302 (void) class_nextMethodList (Nil, NULL);
1305 /***********************************************************************
1308 * Formerly class_addInstanceMethods ()
1309 **********************************************************************/
1310 void class_addMethods (Class cls,
1311 struct objc_method_list * meths)
1314 OBJC_LOCK(&methodListLock);
1315 _objc_insertMethods(cls, meths);
1316 OBJC_UNLOCK(&methodListLock);
1318 // Must flush when dynamically adding methods. No need to flush
1319 // all the class method caches. If cls is a meta class, though,
1320 // this will still flush it and any of its sub-meta classes.
1321 flush_caches (cls, NO);
1324 /***********************************************************************
1325 * class_addClassMethods.
1327 * Obsolete (for binary compatibility only).
1328 **********************************************************************/
1329 void class_addClassMethods (Class cls,
1330 struct objc_method_list * meths)
1332 class_addMethods (((struct objc_class *) cls)->isa, meths);
1335 /***********************************************************************
1336 * class_removeMethods.
1337 **********************************************************************/
1338 void class_removeMethods (Class cls,
1339 struct objc_method_list * meths)
1341 // Remove the methods
1342 OBJC_LOCK(&methodListLock);
1343 _objc_removeMethods(cls, meths);
1344 OBJC_UNLOCK(&methodListLock);
1346 // Must flush when dynamically removing methods. No need to flush
1347 // all the class method caches. If cls is a meta class, though,
1348 // this will still flush it and any of its sub-meta classes.
1349 flush_caches (cls, NO);
1352 /***********************************************************************
1353 * addClassToOriginalClass. Add to a hash table of classes involved in
1354 * a posing situation. We use this when we need to get to the "original"
1355 * class for some particular name through the function objc_getOrigClass.
1356 * For instance, the implementation of [super ...] will use this to be
1357 * sure that it gets hold of the correct super class, so that no infinite
1358 * loops will occur if the class it appears in is involved in posing.
1360 * We use the classLock to guard the hash table.
1362 * See tracker bug #51856.
1363 **********************************************************************/
1365 static NXMapTable * posed_class_hash = NULL;
1366 static NXMapTable * posed_class_to_original_class_hash = NULL;
1368 static void addClassToOriginalClass (Class posingClass,
1369 Class originalClass)
1371 // Install hash table when it is first needed
1372 if (!posed_class_to_original_class_hash)
1374 posed_class_to_original_class_hash =
1375 NXCreateMapTableFromZone (NXPtrValueMapPrototype,
1377 _objc_internal_zone ());
1380 // Add pose to hash table
1381 NXMapInsert (posed_class_to_original_class_hash,
1386 /***********************************************************************
1387 * getOriginalClassForPosingClass.
1388 **********************************************************************/
1389 Class getOriginalClassForPosingClass (Class posingClass)
1391 return NXMapGet (posed_class_to_original_class_hash, posingClass);
1394 /***********************************************************************
1395 * objc_getOrigClass.
1396 **********************************************************************/
1397 Class objc_getOrigClass (const char * name)
1399 struct objc_class * ret;
1401 // Look for class among the posers
1403 OBJC_LOCK(&classLock);
1404 if (posed_class_hash)
1405 ret = (Class) NXMapGet (posed_class_hash, name);
1406 OBJC_UNLOCK(&classLock);
1410 // Not a poser. Do a normal lookup.
1411 ret = objc_getClass (name);
1413 _objc_inform ("class `%s' not linked into application", name);
1418 /***********************************************************************
1419 * _objc_addOrigClass. This function is only used from class_poseAs.
1420 * Registers the original class names, before they get obscured by
1421 * posing, so that [super ..] will work correctly from categories
1422 * in posing classes and in categories in classes being posed for.
1423 **********************************************************************/
1424 static void _objc_addOrigClass (Class origClass)
1426 OBJC_LOCK(&classLock);
1428 // Create the poser's hash table on first use
1429 if (!posed_class_hash)
1431 posed_class_hash = NXCreateMapTableFromZone (NXStrValueMapPrototype,
1433 _objc_internal_zone ());
1436 // Add the named class iff it is not already there (or collides?)
1437 if (NXMapGet (posed_class_hash, ((struct objc_class *)origClass)->name) == 0)
1438 NXMapInsert (posed_class_hash, ((struct objc_class *)origClass)->name, origClass);
1440 OBJC_UNLOCK(&classLock);
1443 /***********************************************************************
1446 * !!! class_poseAs () does not currently flush any caches.
1447 **********************************************************************/
1448 Class class_poseAs (Class imposter,
1451 struct objc_class * clsObject;
1452 char * imposterNamePtr;
1453 NXHashTable * class_hash;
1455 struct objc_class * copy;
1456 #ifdef OBJC_CLASS_REFS
1457 header_info * hInfo;
1460 // Trivial case is easy
1461 if (imposter == original)
1464 // Imposter must be an immediate subclass of the original
1465 if (((struct objc_class *)imposter)->super_class != original) {
1466 __objc_error(imposter, _errNotSuper, ((struct objc_class *)imposter)->name, ((struct objc_class *)original)->name);
1469 // Can't pose when you have instance variables (how could it work?)
1470 if (((struct objc_class *)imposter)->ivars) {
1471 __objc_error(imposter, _errNewVars, ((struct objc_class *)imposter)->name, ((struct objc_class *)original)->name, ((struct objc_class *)imposter)->name);
1474 // Build a string to use to replace the name of the original class.
1475 #define imposterNamePrefix "_%"
1476 imposterNamePtr = _malloc_internal(strlen(((struct objc_class *)original)->name) + strlen(imposterNamePrefix) + 1);
1477 strcpy(imposterNamePtr, imposterNamePrefix);
1478 strcat(imposterNamePtr, ((struct objc_class *)original)->name);
1479 #undef imposterNamePrefix
1481 // We lock the class hashtable, so we are thread safe with respect to
1482 // calls to objc_getClass (). However, the class names are not
1483 // changed atomically, nor are all of the subclasses updated
1484 // atomically. I have ordered the operations so that you will
1485 // never crash, but you may get inconsistent results....
1487 // Register the original class so that [super ..] knows
1488 // exactly which classes are the "original" classes.
1489 _objc_addOrigClass (original);
1490 _objc_addOrigClass (imposter);
1492 // Copy the imposter, so that the imposter can continue
1493 // its normal life in addition to changing the behavior of
1494 // the original. As a hack we don't bother to copy the metaclass.
1495 // For some reason we modify the original rather than the copy.
1496 copy = (*_zoneAlloc)(imposter->isa, sizeof(struct objc_class), _objc_internal_zone());
1497 memmove(copy, imposter, sizeof(struct objc_class));
1499 OBJC_LOCK(&classLock);
1501 class_hash = objc_getClasses ();
1503 // Remove both the imposter and the original class.
1504 NXHashRemove (class_hash, imposter);
1505 NXHashRemove (class_hash, original);
1507 NXHashInsert (class_hash, copy);
1508 addClassToOriginalClass (imposter, copy);
1510 // Mark the imposter as such
1511 _class_setInfo(imposter, CLS_POSING);
1512 _class_setInfo(imposter->isa, CLS_POSING);
1514 // Change the name of the imposter to that of the original class.
1515 ((struct objc_class *)imposter)->name = ((struct objc_class *)original)->name;
1516 ((struct objc_class *)imposter)->isa->name = ((struct objc_class *)original)->isa->name;
1518 // Also copy the version field to avoid archiving problems.
1519 ((struct objc_class *)imposter)->version = ((struct objc_class *)original)->version;
1521 // Change all subclasses of the original to point to the imposter.
1522 state = NXInitHashState (class_hash);
1523 while (NXNextHashState (class_hash, &state, (void **) &clsObject))
1525 while ((clsObject) && (clsObject != imposter) &&
1526 (clsObject != copy))
1528 if (clsObject->super_class == original)
1530 clsObject->super_class = imposter;
1531 clsObject->isa->super_class = ((struct objc_class *)imposter)->isa;
1532 // We must flush caches here!
1536 clsObject = clsObject->super_class;
1540 #ifdef OBJC_CLASS_REFS
1541 // Replace the original with the imposter in all class refs
1542 // Major loop - process all headers
1543 for (hInfo = _objc_headerStart(); hInfo != NULL; hInfo = hInfo->next)
1546 unsigned int refCount;
1549 // Get refs associated with this header
1550 cls_refs = (Class *) _getObjcClassRefs ((headerType *) hInfo->mhdr, &refCount);
1551 if (!cls_refs || !refCount)
1554 // Minor loop - process this header's refs
1555 cls_refs = (Class *) ((unsigned long) cls_refs + hInfo->image_slide);
1556 for (index = 0; index < refCount; index += 1)
1558 if (cls_refs[index] == original)
1559 cls_refs[index] = imposter;
1562 #endif // OBJC_CLASS_REFS
1564 // Change the name of the original class.
1565 ((struct objc_class *)original)->name = imposterNamePtr + 1;
1566 ((struct objc_class *)original)->isa->name = imposterNamePtr;
1568 // Restore the imposter and the original class with their new names.
1569 NXHashInsert (class_hash, imposter);
1570 NXHashInsert (class_hash, original);
1572 OBJC_UNLOCK(&classLock);
1577 /***********************************************************************
1579 **********************************************************************/
1580 static void _freedHandler (id self,
1583 __objc_error (self, _errFreedObject, SELNAME(sel), self);
1586 /***********************************************************************
1587 * _nonexistentHandler.
1588 **********************************************************************/
1589 static void _nonexistentHandler (id self,
1592 __objc_error (self, _errNonExistentObject, SELNAME(sel), self);
1595 /***********************************************************************
1596 * class_respondsToMethod.
1598 * Called from -[Object respondsTo:] and +[Object instancesRespondTo:]
1599 **********************************************************************/
1600 BOOL class_respondsToMethod (Class cls,
1606 // No one responds to zero!
1610 imp = _cache_getImp(cls, sel);
1612 // Found method in cache.
1613 // If the cache entry is forward::, the class does not respond to sel.
1614 return (imp != &_objc_msgForward);
1617 // Handle cache miss
1618 OBJC_LOCK(&methodListLock);
1619 meth = _getMethod(cls, sel);
1620 OBJC_UNLOCK(&methodListLock);
1622 _cache_fill(cls, meth, sel);
1626 // Not implemented. Use _objc_msgForward.
1627 _cache_addForwardEntry(cls, sel);
1633 /***********************************************************************
1634 * class_lookupMethod.
1636 * Called from -[Object methodFor:] and +[Object instanceMethodFor:]
1637 **********************************************************************/
1638 IMP class_lookupMethod (Class cls,
1643 // No one responds to zero!
1645 __objc_error(cls, _errBadSel, sel);
1648 imp = _cache_getImp(cls, sel);
1649 if (imp) return imp;
1651 // Handle cache miss
1652 return _class_lookupMethodAndLoadCache (cls, sel);
1655 /***********************************************************************
1656 * lookupNamedMethodInMethodList
1657 * Only called to find +load/-.cxx_construct/-.cxx_destruct methods,
1658 * without fixing up the entire method list.
1659 * The class is not yet in use, so methodListLock is not taken.
1660 **********************************************************************/
1661 __private_extern__ IMP lookupNamedMethodInMethodList(struct objc_method_list *mlist, const char *meth_name)
1663 Method m = meth_name ? _findNamedMethodInList(mlist, meth_name) : NULL;
1664 return (m ? m->method_imp : NULL);
1668 /***********************************************************************
1671 * Called from _cache_create() and cache_expand()
1672 * Cache locks: cacheUpdateLock must be held by the caller.
1673 **********************************************************************/
1674 static Cache _cache_malloc(int slotCount)
1679 // Allocate table (why not check for failure?)
1680 size = sizeof(struct objc_cache) + TABLE_SIZE(slotCount);
1681 #ifdef OBJC_INSTRUMENTED
1682 // Custom cache allocator can't handle instrumentation.
1683 size += sizeof(CacheInstrumentation);
1684 new_cache = _calloc_internal(size, 1);
1685 new_cache->mask = slotCount - 1;
1687 if (size < CACHE_QUANTUM || UseInternalZone) {
1688 new_cache = _calloc_internal(size, 1);
1689 new_cache->mask = slotCount - 1;
1690 // occupied and buckets and instrumentation are all zero
1692 new_cache = cache_allocator_calloc(size);
1693 // mask is already set
1694 // occupied and buckets and instrumentation are all zero
1702 /***********************************************************************
1705 * Called from _cache_expand().
1706 * Cache locks: cacheUpdateLock must be held by the caller.
1707 **********************************************************************/
1708 Cache _cache_create (Class cls)
1713 // Select appropriate size
1714 slotCount = (ISMETA(cls)) ? INIT_META_CACHE_SIZE : INIT_CACHE_SIZE;
1716 new_cache = _cache_malloc(slotCount);
1718 // Install the cache
1719 ((struct objc_class *)cls)->cache = new_cache;
1721 // Clear the cache flush flag so that we will not flush this cache
1722 // before expanding it for the first time.
1723 _class_clearInfo(cls, CLS_FLUSH_CACHE);
1725 // Clear the grow flag so that we will re-use the current storage,
1726 // rather than actually grow the cache, when expanding the cache
1727 // for the first time
1728 if (_class_slow_grow)
1729 _class_clearInfo(cls, CLS_GROW_CACHE);
1731 // Return our creation
1735 /***********************************************************************
1738 * Called from _cache_fill ()
1739 * Cache locks: cacheUpdateLock must be held by the caller.
1740 **********************************************************************/
1741 static Cache _cache_expand (Class cls)
1745 unsigned int slotCount;
1748 // First growth goes from emptyCache to a real one
1749 old_cache = ((struct objc_class *)cls)->cache;
1750 if (old_cache == &emptyCache)
1751 return _cache_create (cls);
1753 // iff _class_slow_grow, trade off actual cache growth with re-using
1754 // the current one, so that growth only happens every odd time
1755 if (_class_slow_grow)
1757 // CLS_GROW_CACHE controls every-other-time behavior. If it
1758 // is non-zero, let the cache grow this time, but clear the
1759 // flag so the cache is reused next time
1760 if ((((struct objc_class * )cls)->info & CLS_GROW_CACHE) != 0)
1761 _class_clearInfo(cls, CLS_GROW_CACHE);
1763 // Reuse the current cache storage this time
1766 // Clear the valid-entry counter
1767 old_cache->occupied = 0;
1769 // Invalidate all the cache entries
1770 for (index = 0; index < old_cache->mask + 1; index += 1)
1772 // Remember what this entry was, so we can possibly
1773 // deallocate it after the bucket has been invalidated
1774 Method oldEntry = old_cache->buckets[index];
1775 // Skip invalid entry
1776 if (!CACHE_BUCKET_VALID(old_cache->buckets[index]))
1779 // Invalidate this entry
1780 CACHE_BUCKET_VALID(old_cache->buckets[index]) = NULL;
1782 // Deallocate "forward::" entry
1783 if (CACHE_BUCKET_IMP(oldEntry) == &_objc_msgForward)
1785 _cache_collect_free (oldEntry, sizeof(struct objc_method), NO);
1789 // Set the slow growth flag so the cache is next grown
1790 _class_setInfo(cls, CLS_GROW_CACHE);
1792 // Return the same old cache, freshly emptied
1798 // Double the cache size
1799 slotCount = (old_cache->mask + 1) << 1;
1801 new_cache = _cache_malloc(slotCount);
1803 #ifdef OBJC_INSTRUMENTED
1804 // Propagate the instrumentation data
1806 CacheInstrumentation * oldCacheData;
1807 CacheInstrumentation * newCacheData;
1809 oldCacheData = CACHE_INSTRUMENTATION(old_cache);
1810 newCacheData = CACHE_INSTRUMENTATION(new_cache);
1811 bcopy ((const char *)oldCacheData, (char *)newCacheData, sizeof(CacheInstrumentation));
1815 // iff _class_uncache, copy old cache entries into the new cache
1816 if (_class_uncache == 0)
1820 newMask = new_cache->mask;
1822 // Look at all entries in the old cache
1823 for (index = 0; index < old_cache->mask + 1; index += 1)
1827 // Skip invalid entry
1828 if (!CACHE_BUCKET_VALID(old_cache->buckets[index]))
1831 // Hash the old entry into the new table
1832 index2 = CACHE_HASH(CACHE_BUCKET_NAME(old_cache->buckets[index]),
1835 // Find an available spot, at or following the hashed spot;
1836 // Guaranteed to not infinite loop, because table has grown
1839 if (!CACHE_BUCKET_VALID(new_cache->buckets[index2]))
1841 new_cache->buckets[index2] = old_cache->buckets[index];
1849 // Account for the addition
1850 new_cache->occupied += 1;
1853 // Set the cache flush flag so that we will flush this cache
1854 // before expanding it again.
1855 _class_setInfo(cls, CLS_FLUSH_CACHE);
1858 // Deallocate "forward::" entries from the old cache
1861 for (index = 0; index < old_cache->mask + 1; index += 1)
1863 if (CACHE_BUCKET_VALID(old_cache->buckets[index]) &&
1864 CACHE_BUCKET_IMP(old_cache->buckets[index]) == &_objc_msgForward)
1866 _cache_collect_free (old_cache->buckets[index], sizeof(struct objc_method), NO);
1871 // Install new cache
1872 ((struct objc_class *)cls)->cache = new_cache;
1874 // Deallocate old cache, try freeing all the garbage
1875 _cache_collect_free (old_cache, old_cache->mask * sizeof(Method), YES);
1879 /***********************************************************************
1880 * instrumentObjcMessageSends/logObjcMessageSends.
1881 **********************************************************************/
1882 static int LogObjCMessageSend (BOOL isClassMethod,
1883 const char * objectsClass,
1884 const char * implementingClass,
1889 // Create/open the log file
1890 if (objcMsgLogFD == (-1))
1892 snprintf (buf, sizeof(buf), "/tmp/msgSends-%d", (int) getpid ());
1893 objcMsgLogFD = secure_open (buf, O_WRONLY | O_CREAT, geteuid());
1894 if (objcMsgLogFD < 0) {
1895 // no log file - disable logging
1896 objcMsgLogEnabled = 0;
1902 // Make the log entry
1903 snprintf(buf, sizeof(buf), "%c %s %s %s\n",
1904 isClassMethod ? '+' : '-',
1909 write (objcMsgLogFD, buf, strlen(buf));
1911 // Tell caller to not cache the method
1915 void instrumentObjcMessageSends (BOOL flag)
1917 int enabledValue = (flag) ? 1 : 0;
1920 if (objcMsgLogEnabled == enabledValue)
1923 // If enabling, flush all method caches so we get some traces
1925 flush_caches (Nil, YES);
1927 // Sync our log file
1928 if (objcMsgLogFD != (-1))
1929 fsync (objcMsgLogFD);
1931 objcMsgLogEnabled = enabledValue;
1934 void logObjcMessageSends (ObjCLogProc logProc)
1938 objcMsgLogProc = logProc;
1939 objcMsgLogEnabled = 1;
1943 objcMsgLogProc = logProc;
1944 objcMsgLogEnabled = 0;
1947 if (objcMsgLogFD != (-1))
1948 fsync (objcMsgLogFD);
1952 /***********************************************************************
1953 * _cache_fill. Add the specified method to the specified class' cache.
1954 * Returns NO if the cache entry wasn't added: cache was busy,
1955 * class is still being initialized, new entry is a duplicate.
1957 * Called only from _class_lookupMethodAndLoadCache and
1958 * class_respondsToMethod and _cache_addForwardEntry.
1960 * Cache locks: cacheUpdateLock must not be held.
1961 **********************************************************************/
1962 static BOOL _cache_fill(Class cls, Method smt, SEL sel)
1964 unsigned int newOccupied;
1969 // Never cache before +initialize is done
1970 if (!ISINITIALIZED(cls)) {
1974 // Keep tally of cache additions
1975 totalCacheFills += 1;
1977 OBJC_LOCK(&cacheUpdateLock);
1979 cache = ((struct objc_class *)cls)->cache;
1981 // Check for duplicate entries, if we're in the mode
1982 if (traceDuplicates)
1985 arith_t mask = cache->mask;
1986 buckets = cache->buckets;
1989 for (index2 = 0; index2 < mask + 1; index2 += 1)
1991 // Skip invalid or non-duplicate entry
1992 if ((!CACHE_BUCKET_VALID(buckets[index2])) ||
1993 (strcmp ((char *) CACHE_BUCKET_NAME(buckets[index2]), (char *) smt->method_name) != 0))
1996 // Tally duplication, but report iff wanted
1997 cacheFillDuplicates += 1;
1998 if (traceDuplicatesVerbose)
2000 _objc_inform ("Cache fill duplicate #%d: found %x adding %x: %s\n",
2001 cacheFillDuplicates,
2002 (unsigned int) CACHE_BUCKET_NAME(buckets[index2]),
2003 (unsigned int) smt->method_name,
2004 (char *) smt->method_name);
2009 // Make sure the entry wasn't added to the cache by some other thread
2010 // before we grabbed the cacheUpdateLock.
2011 // Don't use _cache_getMethod() because _cache_getMethod() doesn't
2012 // return forward:: entries.
2013 if (_cache_getImp(cls, sel)) {
2014 OBJC_UNLOCK(&cacheUpdateLock);
2015 return NO; // entry is already cached, didn't add new one
2018 // Use the cache as-is if it is less than 3/4 full
2019 newOccupied = cache->occupied + 1;
2020 if ((newOccupied * 4) <= (cache->mask + 1) * 3) {
2021 // Cache is less than 3/4 full.
2022 cache->occupied = newOccupied;
2024 // Cache is too full. Flush it or expand it.
2025 if ((((struct objc_class * )cls)->info & CLS_FLUSH_CACHE) != 0) {
2028 cache = _cache_expand (cls);
2031 // Account for the addition
2032 cache->occupied += 1;
2035 // Insert the new entry. This can be done by either:
2036 // (a) Scanning for the first unused spot. Easy!
2037 // (b) Opening up an unused spot by sliding existing
2038 // entries down by one. The benefit of this
2039 // extra work is that it puts the most recently
2040 // loaded entries closest to where the selector
2041 // hash starts the search.
2043 // The loop is a little more complicated because there
2044 // are two kinds of entries, so there have to be two ways
2046 buckets = cache->buckets;
2047 index = CACHE_HASH(sel, cache->mask);
2050 // Slide existing entries down by one
2053 // Copy current entry to a local
2054 saveMethod = buckets[index];
2056 // Copy previous entry (or new entry) to current slot
2057 buckets[index] = smt;
2059 // Done if current slot had been invalid
2060 if (saveMethod == NULL)
2063 // Prepare to copy saved value into next slot
2066 // Move on to next slot
2068 index &= cache->mask;
2071 OBJC_UNLOCK(&cacheUpdateLock);
2073 return YES; // successfully added new cache entry
2077 /***********************************************************************
2078 * _cache_addForwardEntry
2079 * Add a forward:: entry for the given selector to cls's method cache.
2080 * Does nothing if the cache addition fails for any reason.
2081 * Called from class_respondsToMethod and _class_lookupMethodAndLoadCache.
2082 * Cache locks: cacheUpdateLock must not be held.
2083 **********************************************************************/
2084 static void _cache_addForwardEntry(Class cls, SEL sel)
2088 smt = _malloc_internal(sizeof(struct objc_method));
2089 smt->method_name = sel;
2090 smt->method_types = "";
2091 smt->method_imp = &_objc_msgForward;
2092 if (! _cache_fill(cls, smt, sel)) {
2093 // Entry not added to cache. Don't leak the method struct.
2094 _free_internal(smt);
2099 /***********************************************************************
2100 * _cache_flush. Invalidate all valid entries in the given class' cache,
2101 * and clear the CLS_FLUSH_CACHE in the cls->info.
2103 * Called from flush_caches() and _cache_fill()
2104 * Cache locks: cacheUpdateLock must be held by the caller.
2105 **********************************************************************/
2106 static void _cache_flush (Class cls)
2111 // Locate cache. Ignore unused cache.
2112 cache = ((struct objc_class *)cls)->cache;
2113 if (cache == NULL || cache == &emptyCache)
2116 #ifdef OBJC_INSTRUMENTED
2118 CacheInstrumentation * cacheData;
2121 cacheData = CACHE_INSTRUMENTATION(cache);
2122 cacheData->flushCount += 1;
2123 cacheData->flushedEntries += cache->occupied;
2124 if (cache->occupied > cacheData->maxFlushedEntries)
2125 cacheData->maxFlushedEntries = cache->occupied;
2129 // Traverse the cache
2130 for (index = 0; index <= cache->mask; index += 1)
2132 // Remember what this entry was, so we can possibly
2133 // deallocate it after the bucket has been invalidated
2134 Method oldEntry = cache->buckets[index];
2136 // Invalidate this entry
2137 CACHE_BUCKET_VALID(cache->buckets[index]) = NULL;
2139 // Deallocate "forward::" entry
2140 if (oldEntry && oldEntry->method_imp == &_objc_msgForward)
2141 _cache_collect_free (oldEntry, sizeof(struct objc_method), NO);
2144 // Clear the valid-entry counter
2145 cache->occupied = 0;
2147 // Clear the cache flush flag so that we will not flush this cache
2148 // before expanding it again.
2149 _class_clearInfo(cls, CLS_FLUSH_CACHE);
2152 /***********************************************************************
2153 * _objc_getFreedObjectClass. Return a pointer to the dummy freed
2154 * object class. Freed objects get their isa pointers replaced with
2155 * a pointer to the freedObjectClass, so that we can catch usages of
2157 **********************************************************************/
2158 Class _objc_getFreedObjectClass (void)
2160 return (Class) &freedObjectClass;
2163 /***********************************************************************
2164 * _objc_getNonexistentClass. Return a pointer to the dummy nonexistent
2165 * object class. This is used when, for example, mapping the class
2166 * refs for an image, and the class can not be found, so that we can
2167 * catch later uses of the non-existent class.
2168 **********************************************************************/
2169 Class _objc_getNonexistentClass (void)
2171 return (Class) &nonexistentObjectClass;
2175 /***********************************************************************
2176 * struct _objc_initializing_classes
2177 * Per-thread list of classes currently being initialized by that thread.
2178 * During initialization, that thread is allowed to send messages to that
2179 * class, but other threads have to wait.
2180 * The list is a simple array of metaclasses (the metaclass stores
2181 * the initialization state).
2182 **********************************************************************/
2183 typedef struct _objc_initializing_classes {
2184 int classesAllocated;
2185 struct objc_class** metaclasses;
2186 } _objc_initializing_classes;
2189 /***********************************************************************
2190 * _fetchInitializingClassList
2191 * Return the list of classes being initialized by this thread.
2192 * If create == YES, create the list when no classes are being initialized by this thread.
2193 * If create == NO, return NULL when no classes are being initialized by this thread.
2194 **********************************************************************/
2195 static _objc_initializing_classes *_fetchInitializingClassList(BOOL create)
2197 _objc_pthread_data *data;
2198 _objc_initializing_classes *list;
2199 struct objc_class **classes;
2201 data = pthread_getspecific(_objc_pthread_key);
2206 data = _calloc_internal(1, sizeof(_objc_pthread_data));
2207 pthread_setspecific(_objc_pthread_key, data);
2211 list = data->initializingClasses;
2216 list = _calloc_internal(1, sizeof(_objc_initializing_classes));
2217 data->initializingClasses = list;
2221 classes = list->metaclasses;
2222 if (classes == NULL) {
2223 // If _objc_initializing_classes exists, allocate metaclass array,
2224 // even if create == NO.
2225 // Allow 4 simultaneous class inits on this thread before realloc.
2226 list->classesAllocated = 4;
2227 classes = _calloc_internal(list->classesAllocated, sizeof(struct objc_class *));
2228 list->metaclasses = classes;
2234 /***********************************************************************
2235 * _destroyInitializingClassList
2236 * Deallocate memory used by the given initialization list.
2237 * Any part of the list may be NULL.
2238 * Called from _objc_pthread_destroyspecific().
2239 **********************************************************************/
2240 void _destroyInitializingClassList(_objc_initializing_classes *list)
2243 if (list->metaclasses != NULL) {
2244 _free_internal(list->metaclasses);
2246 _free_internal(list);
2251 /***********************************************************************
2252 * _thisThreadIsInitializingClass
2253 * Return TRUE if this thread is currently initializing the given class.
2254 **********************************************************************/
2255 static BOOL _thisThreadIsInitializingClass(struct objc_class *cls)
2259 _objc_initializing_classes *list = _fetchInitializingClassList(NO);
2262 for (i = 0; i < list->classesAllocated; i++) {
2263 if (cls == list->metaclasses[i]) return YES;
2267 // no list or not found in list
2272 /***********************************************************************
2273 * _setThisThreadIsInitializingClass
2274 * Record that this thread is currently initializing the given class.
2275 * This thread will be allowed to send messages to the class, but
2276 * other threads will have to wait.
2277 **********************************************************************/
2278 static void _setThisThreadIsInitializingClass(struct objc_class *cls)
2281 _objc_initializing_classes *list = _fetchInitializingClassList(YES);
2284 // paranoia: explicitly disallow duplicates
2285 for (i = 0; i < list->classesAllocated; i++) {
2286 if (cls == list->metaclasses[i]) {
2287 _objc_fatal("thread is already initializing this class!");
2288 return; // already the initializer
2292 for (i = 0; i < list->classesAllocated; i++) {
2293 if (0 == list->metaclasses[i]) {
2294 list->metaclasses[i] = cls;
2299 // class list is full - reallocate
2300 list->classesAllocated = list->classesAllocated * 2 + 1;
2301 list->metaclasses = _realloc_internal(list->metaclasses, list->classesAllocated * sizeof(struct objc_class *));
2302 // zero out the new entries
2303 list->metaclasses[i++] = cls;
2304 for ( ; i < list->classesAllocated; i++) {
2305 list->metaclasses[i] = NULL;
2310 /***********************************************************************
2311 * _setThisThreadIsNotInitializingClass
2312 * Record that this thread is no longer initializing the given class.
2313 **********************************************************************/
2314 static void _setThisThreadIsNotInitializingClass(struct objc_class *cls)
2318 _objc_initializing_classes *list = _fetchInitializingClassList(NO);
2321 for (i = 0; i < list->classesAllocated; i++) {
2322 if (cls == list->metaclasses[i]) {
2323 list->metaclasses[i] = NULL;
2329 // no list or not found in list
2330 _objc_fatal("thread is not initializing this class!");
2334 /***********************************************************************
2335 * class_initialize. Send the '+initialize' message on demand to any
2336 * uninitialized class. Force initialization of superclasses first.
2338 * Called only from _class_lookupMethodAndLoadCache (or itself).
2339 **********************************************************************/
2340 static void class_initialize(struct objc_class *cls)
2342 struct objc_class *infoCls = GETMETA(cls);
2343 BOOL reallyInitialize = NO;
2345 // Get the real class from the metaclass. The superclass chain
2346 // hangs off the real class only.
2349 if (strncmp(cls->name, "_%", 2) == 0) {
2350 // Posee's meta's name is smashed and isn't in the class_hash,
2351 // so objc_getClass doesn't work.
2352 char *baseName = strchr(cls->name, '%'); // get posee's real name
2353 cls = objc_getClass(baseName);
2355 cls = objc_getClass(cls->name);
2359 // Make sure super is done initializing BEFORE beginning to initialize cls.
2360 // See note about deadlock above.
2361 if (cls->super_class && !ISINITIALIZED(cls->super_class)) {
2362 class_initialize(cls->super_class);
2365 // Try to atomically set CLS_INITIALIZING.
2366 pthread_mutex_lock(&classInitLock);
2367 if (!ISINITIALIZED(cls) && !ISINITIALIZING(cls)) {
2368 _class_setInfo(infoCls, CLS_INITIALIZING);
2369 reallyInitialize = YES;
2371 pthread_mutex_unlock(&classInitLock);
2373 if (reallyInitialize) {
2374 // We successfully set the CLS_INITIALIZING bit. Initialize the class.
2376 // Record that we're initializing this class so we can message it.
2377 _setThisThreadIsInitializingClass(cls);
2379 // Send the +initialize message.
2380 // Note that +initialize is sent to the superclass (again) if
2381 // this class doesn't implement +initialize. 2157218
2382 [(id)cls initialize];
2384 // Done initializing. Update the info bits and notify waiting threads.
2385 pthread_mutex_lock(&classInitLock);
2386 _class_changeInfo(infoCls, CLS_INITIALIZED, CLS_INITIALIZING);
2387 pthread_cond_broadcast(&classInitWaitCond);
2388 pthread_mutex_unlock(&classInitLock);
2389 _setThisThreadIsNotInitializingClass(cls);
2393 else if (ISINITIALIZING(cls)) {
2394 // We couldn't set INITIALIZING because INITIALIZING was already set.
2395 // If this thread set it earlier, continue normally.
2396 // If some other thread set it, block until initialize is done.
2397 // It's ok if INITIALIZING changes to INITIALIZED while we're here,
2398 // because we safely check for INITIALIZED inside the lock
2400 if (_thisThreadIsInitializingClass(cls)) {
2403 pthread_mutex_lock(&classInitLock);
2404 while (!ISINITIALIZED(cls)) {
2405 pthread_cond_wait(&classInitWaitCond, &classInitLock);
2407 pthread_mutex_unlock(&classInitLock);
2412 else if (ISINITIALIZED(cls)) {
2413 // Set CLS_INITIALIZING failed because someone else already
2414 // initialized the class. Continue normally.
2415 // NOTE this check must come AFTER the ISINITIALIZING case.
2416 // Otherwise: Another thread is initializing this class. ISINITIALIZED
2417 // is false. Skip this clause. Then the other thread finishes
2418 // initialization and sets INITIALIZING=no and INITIALIZED=yes.
2419 // Skip the ISINITIALIZING clause. Die horribly.
2424 // We shouldn't be here.
2425 _objc_fatal("thread-safe class init in objc runtime is buggy!");
2430 /***********************************************************************
2431 * _class_lookupMethodAndLoadCache.
2433 * Called only from objc_msgSend, objc_msgSendSuper and class_lookupMethod.
2434 **********************************************************************/
2435 IMP _class_lookupMethodAndLoadCache (Class cls,
2438 struct objc_class * curClass;
2440 IMP methodPC = NULL;
2442 trace(0xb300, 0, 0, 0);
2444 // Check for freed class
2445 if (cls == &freedObjectClass)
2446 return (IMP) _freedHandler;
2448 // Check for nonexistent class
2449 if (cls == &nonexistentObjectClass)
2450 return (IMP) _nonexistentHandler;
2452 trace(0xb301, 0, 0, 0);
2454 if (!ISINITIALIZED(cls)) {
2455 class_initialize ((struct objc_class *)cls);
2456 // If sel == initialize, class_initialize will send +initialize and
2457 // then the messenger will send +initialize again after this
2458 // procedure finishes. Of course, if this is not being called
2459 // from the messenger then it won't happen. 2778172
2462 trace(0xb302, 0, 0, 0);
2464 // Outer loop - search the caches and method lists of the
2465 // class and its super-classes
2466 for (curClass = cls; curClass; curClass = ((struct objc_class * )curClass)->super_class)
2468 #ifdef PRELOAD_SUPERCLASS_CACHES
2469 struct objc_class *curClass2;
2472 trace(0xb303, 0, 0, 0);
2474 // Beware of thread-unsafety and double-freeing of forward::
2475 // entries here! See note in "Method cache locking" above.
2476 // The upshot is that _cache_getMethod() will return NULL
2477 // instead of returning a forward:: entry.
2478 meth = _cache_getMethod(curClass, sel, &_objc_msgForward);
2480 // Found the method in this class or a superclass.
2481 // Cache the method in this class, unless we just found it in
2482 // this class's cache.
2483 if (curClass != cls) {
2484 #ifdef PRELOAD_SUPERCLASS_CACHES
2485 for (curClass2 = cls; curClass2 != curClass; curClass2 = curClass2->super_class)
2486 _cache_fill (curClass2, meth, sel);
2487 _cache_fill (curClass, meth, sel);
2489 _cache_fill (cls, meth, sel);
2493 methodPC = meth->method_imp;
2497 trace(0xb304, (int)methodPC, 0, 0);
2499 // Cache scan failed. Search method list.
2501 OBJC_LOCK(&methodListLock);
2502 meth = _findMethodInClass(curClass, sel);
2503 OBJC_UNLOCK(&methodListLock);
2505 // If logging is enabled, log the message send and let
2506 // the logger decide whether to encache the method.
2507 if ((objcMsgLogEnabled == 0) ||
2508 (objcMsgLogProc (CLS_GETINFO(((struct objc_class * )curClass),
2509 CLS_META) ? YES : NO,
2510 ((struct objc_class *)cls)->name,
2511 curClass->name, sel)))
2513 // Cache the method implementation
2514 #ifdef PRELOAD_SUPERCLASS_CACHES
2515 for (curClass2 = cls; curClass2 != curClass; curClass2 = curClass2->super_class)
2516 _cache_fill (curClass2, meth, sel);
2517 _cache_fill (curClass, meth, sel);
2519 _cache_fill (cls, meth, sel);
2523 methodPC = meth->method_imp;
2527 trace(0xb305, (int)methodPC, 0, 0);
2530 trace(0xb306, (int)methodPC, 0, 0);
2532 if (methodPC == NULL)
2534 // Class and superclasses do not respond -- use forwarding
2535 _cache_addForwardEntry(cls, sel);
2536 methodPC = &_objc_msgForward;
2539 trace(0xb30f, (int)methodPC, 0, 0);
2545 /***********************************************************************
2546 * lookupMethodInClassAndLoadCache.
2547 * Like _class_lookupMethodAndLoadCache, but does not search superclasses.
2548 * Caches and returns objc_msgForward if the method is not found in the class.
2549 **********************************************************************/
2550 static IMP lookupMethodInClassAndLoadCache(Class cls, SEL sel)
2555 // Search cache first.
2556 imp = _cache_getImp(cls, sel);
2557 if (imp) return imp;
2559 // Cache miss. Search method list.
2561 OBJC_LOCK(&methodListLock);
2562 meth = _findMethodInClass(cls, sel);
2563 OBJC_UNLOCK(&methodListLock);
2566 // Hit in method list. Cache it.
2567 _cache_fill(cls, meth, sel);
2568 return meth->method_imp;
2570 // Miss in method list. Cache objc_msgForward.
2571 _cache_addForwardEntry(cls, sel);
2572 return &_objc_msgForward;
2578 /***********************************************************************
2580 * Atomically sets and clears some bits in cls's info field.
2581 * set and clear must not overlap.
2582 **********************************************************************/
2583 static pthread_mutex_t infoLock = PTHREAD_MUTEX_INITIALIZER;
2584 __private_extern__ void _class_changeInfo(struct objc_class *cls,
2585 long set, long clear)
2587 pthread_mutex_lock(&infoLock);
2588 cls->info = (cls->info | set) & ~clear;
2589 pthread_mutex_unlock(&infoLock);
2593 /***********************************************************************
2595 * Atomically sets some bits in cls's info field.
2596 **********************************************************************/
2597 __private_extern__ void _class_setInfo(struct objc_class *cls, long set)
2599 _class_changeInfo(cls, set, 0);
2603 /***********************************************************************
2605 * Atomically clears some bits in cls's info field.
2606 **********************************************************************/
2607 __private_extern__ void _class_clearInfo(struct objc_class *cls, long clear)
2609 _class_changeInfo(cls, 0, clear);
2613 /***********************************************************************
2617 **********************************************************************/
2618 static int SubtypeUntil (const char * type,
2622 const char * head = type;
2627 if (!*type || (!level && (*type == end)))
2628 return (int)(type - head);
2632 case ']': case '}': case ')': level--; break;
2633 case '[': case '{': case '(': level += 1; break;
2639 _objc_fatal ("Object: SubtypeUntil: end of type encountered prematurely\n");
2643 /***********************************************************************
2645 **********************************************************************/
2646 static const char * SkipFirstType (const char * type)
2652 case 'O': /* bycopy */
2655 case 'N': /* inout */
2656 case 'r': /* const */
2657 case 'V': /* oneway */
2658 case '^': /* pointers */
2663 while ((*type >= '0') && (*type <= '9'))
2665 return type + SubtypeUntil (type, ']') + 1;
2669 return type + SubtypeUntil (type, '}') + 1;
2673 return type + SubtypeUntil (type, ')') + 1;
2682 /***********************************************************************
2683 * method_getNumberOfArguments.
2684 **********************************************************************/
2685 unsigned method_getNumberOfArguments (Method method)
2687 const char * typedesc;
2690 // First, skip the return type
2691 typedesc = method->method_types;
2692 typedesc = SkipFirstType (typedesc);
2694 // Next, skip stack size
2695 while ((*typedesc >= '0') && (*typedesc <= '9'))
2698 // Now, we have the arguments - count how many
2702 // Traverse argument type
2703 typedesc = SkipFirstType (typedesc);
2705 // Skip GNU runtime's register parameter hint
2706 if (*typedesc == '+') typedesc++;
2708 // Traverse (possibly negative) argument offset
2709 if (*typedesc == '-')
2711 while ((*typedesc >= '0') && (*typedesc <= '9'))
2714 // Made it past an argument
2721 /***********************************************************************
2722 * method_getSizeOfArguments.
2723 **********************************************************************/
2725 unsigned method_getSizeOfArguments (Method method)
2727 const char * typedesc;
2728 unsigned stack_size;
2729 #if defined(__ppc__) || defined(ppc)
2730 unsigned trueBaseOffset;
2731 unsigned foundBaseOffset;
2734 // Get our starting points
2736 typedesc = method->method_types;
2738 // Skip the return type
2739 #if defined (__ppc__) || defined(ppc)
2740 // Struct returns cause the parameters to be bumped
2741 // by a register, so the offset to the receiver is
2742 // 4 instead of the normal 0.
2743 trueBaseOffset = (*typedesc == '{') ? 4 : 0;
2745 typedesc = SkipFirstType (typedesc);
2747 // Convert ASCII number string to integer
2748 while ((*typedesc >= '0') && (*typedesc <= '9'))
2749 stack_size = (stack_size * 10) + (*typedesc++ - '0');
2750 #if defined (__ppc__) || defined(ppc)
2751 // NOTE: This is a temporary measure pending a compiler fix.
2752 // Work around PowerPC compiler bug wherein the method argument
2753 // string contains an incorrect value for the "stack size."
2754 // Generally, the size is reported 4 bytes too small, so we apply
2755 // that fudge factor. Unfortunately, there is at least one case
2756 // where the error is something other than -4: when the last
2757 // parameter is a double, the reported stack is much too high
2758 // (about 32 bytes). We do not attempt to detect that case.
2759 // The result of returning a too-high value is that objc_msgSendv
2760 // can bus error if the destination of the marg_list copying
2761 // butts up against excluded memory.
2762 // This fix disables itself when it sees a correctly built
2763 // type string (i.e. the offset for the Id is correct). This
2764 // keeps us out of lockstep with the compiler.
2766 // skip the '@' marking the Id field
2767 typedesc = SkipFirstType (typedesc);
2769 // Skip GNU runtime's register parameter hint
2770 if (*typedesc == '+') typedesc++;
2772 // pick up the offset for the Id field
2773 foundBaseOffset = 0;
2774 while ((*typedesc >= '0') && (*typedesc <= '9'))
2775 foundBaseOffset = (foundBaseOffset * 10) + (*typedesc++ - '0');
2777 // add fudge factor iff the Id field offset was wrong
2778 if (foundBaseOffset != trueBaseOffset)
2786 // XXX Getting the size of a type is done all over the place
2787 // (Here, Foundation, remote project)! - Should unify
2789 unsigned int getSizeOfType (const char * type, unsigned int * alignPtr);
2791 unsigned method_getSizeOfArguments (Method method)
2798 unsigned stack_size;
2801 nargs = method_getNumberOfArguments (method);
2802 stack_size = (*method->method_types == '{') ? sizeof(void *) : 0;
2804 for (index = 0; index < nargs; index += 1)
2806 (void) method_getArgumentInfo (method, index, &type, &offset);
2807 size = getSizeOfType (type, &align);
2808 stack_size += ((size + 7) & ~7);
2815 /***********************************************************************
2816 * method_getArgumentInfo.
2817 **********************************************************************/
2818 unsigned method_getArgumentInfo (Method method,
2823 const char * typedesc = method->method_types;
2825 unsigned self_offset = 0;
2826 BOOL offset_is_negative = NO;
2828 // First, skip the return type
2829 typedesc = SkipFirstType (typedesc);
2831 // Next, skip stack size
2832 while ((*typedesc >= '0') && (*typedesc <= '9'))
2835 // Now, we have the arguments - position typedesc to the appropriate argument
2836 while (*typedesc && nargs != arg)
2839 // Skip argument type
2840 typedesc = SkipFirstType (typedesc);
2844 // Skip GNU runtime's register parameter hint
2845 if (*typedesc == '+') typedesc++;
2847 // Skip negative sign in offset
2848 if (*typedesc == '-')
2850 offset_is_negative = YES;
2854 offset_is_negative = NO;
2856 while ((*typedesc >= '0') && (*typedesc <= '9'))
2857 self_offset = self_offset * 10 + (*typedesc++ - '0');
2858 if (offset_is_negative)
2859 self_offset = -(self_offset);
2865 // Skip GNU runtime's register parameter hint
2866 if (*typedesc == '+') typedesc++;
2868 // Skip (possibly negative) argument offset
2869 if (*typedesc == '-')
2871 while ((*typedesc >= '0') && (*typedesc <= '9'))
2880 unsigned arg_offset = 0;
2883 typedesc = SkipFirstType (typedesc);
2888 *offset = -sizeof(id);
2896 // Skip GNU register parameter hint
2897 if (*typedesc == '+') typedesc++;
2899 // Pick up (possibly negative) argument offset
2900 if (*typedesc == '-')
2902 offset_is_negative = YES;
2906 offset_is_negative = NO;
2908 while ((*typedesc >= '0') && (*typedesc <= '9'))
2909 arg_offset = arg_offset * 10 + (*typedesc++ - '0');
2910 if (offset_is_negative)
2911 arg_offset = - arg_offset;
2914 // For stacks which grow up, since margs points
2915 // to the top of the stack or the END of the args,
2916 // the first offset is at -sizeof(id) rather than 0.
2917 self_offset += sizeof(id);
2919 *offset = arg_offset - self_offset;
2933 /***********************************************************************
2934 * _objc_create_zone.
2935 **********************************************************************/
2937 void * _objc_create_zone (void)
2939 return malloc_default_zone();
2943 /***********************************************************************
2944 * _objc_internal_zone.
2945 * Malloc zone for internal runtime data.
2946 * By default this is the default malloc zone, but a dedicated zone is
2947 * used if environment variable OBJC_USE_INTERNAL_ZONE is set.
2948 **********************************************************************/
2949 __private_extern__ malloc_zone_t *_objc_internal_zone(void)
2951 static malloc_zone_t *z = (malloc_zone_t *)-1;
2952 if (z == (malloc_zone_t *)-1) {
2953 if (UseInternalZone) {
2954 z = malloc_create_zone(vm_page_size, 0);
2955 malloc_set_zone_name(z, "ObjC");
2957 z = malloc_default_zone();
2964 /***********************************************************************
2970 * Convenience functions for the internal malloc zone.
2971 **********************************************************************/
2972 __private_extern__ void *_malloc_internal(size_t size)
2974 return malloc_zone_malloc(_objc_internal_zone(), size);
2977 __private_extern__ void *_calloc_internal(size_t count, size_t size)
2979 return malloc_zone_calloc(_objc_internal_zone(), count, size);
2982 __private_extern__ void *_realloc_internal(void *ptr, size_t size)
2984 return malloc_zone_realloc(_objc_internal_zone(), ptr, size);
2987 __private_extern__ char *_strdup_internal(const char *str)
2989 size_t len = strlen(str);
2990 char *dup = malloc_zone_malloc(_objc_internal_zone(), len + 1);
2991 memcpy(dup, str, len + 1);
2995 __private_extern__ void _free_internal(void *ptr)
2997 malloc_zone_free(_objc_internal_zone(), ptr);
3002 /***********************************************************************
3004 **********************************************************************/
3006 static unsigned long _get_pc_for_thread (mach_port_t thread)
3009 struct hp_pa_frame_thread_state state;
3010 unsigned int count = HPPA_FRAME_THREAD_STATE_COUNT;
3011 kern_return_t okay = thread_get_state (thread, HPPA_FRAME_THREAD_STATE, (thread_state_t)&state, &count);
3012 return (okay == KERN_SUCCESS) ? state.ts_pcoq_front : PC_SENTINAL;
3014 #elif defined(sparc)
3016 struct sparc_thread_state_regs state;
3017 unsigned int count = SPARC_THREAD_STATE_REGS_COUNT;
3018 kern_return_t okay = thread_get_state (thread, SPARC_THREAD_STATE_REGS, (thread_state_t)&state, &count);
3019 return (okay == KERN_SUCCESS) ? state.regs.r_pc : PC_SENTINAL;
3021 #elif defined(__i386__) || defined(i386)
3023 i386_thread_state_t state;
3024 unsigned int count = i386_THREAD_STATE_COUNT;
3025 kern_return_t okay = thread_get_state (thread, i386_THREAD_STATE, (thread_state_t)&state, &count);
3026 return (okay == KERN_SUCCESS) ? state.eip : PC_SENTINAL;
3030 struct m68k_thread_state_regs state;
3031 unsigned int count = M68K_THREAD_STATE_REGS_COUNT;
3032 kern_return_t okay = thread_get_state (thread, M68K_THREAD_STATE_REGS, (thread_state_t)&state, &count);
3033 return (okay == KERN_SUCCESS) ? state.pc : PC_SENTINAL;
3035 #elif defined(__ppc__) || defined(ppc)
3037 struct ppc_thread_state state;
3038 unsigned int count = PPC_THREAD_STATE_COUNT;
3039 kern_return_t okay = thread_get_state (thread, PPC_THREAD_STATE, (thread_state_t)&state, &count);
3040 return (okay == KERN_SUCCESS) ? state.srr0 : PC_SENTINAL;
3044 #error _get_pc_for_thread () not implemented for this architecture
3048 /***********************************************************************
3049 * _collecting_in_critical.
3050 * Returns TRUE if some thread is currently executing a cache-reading
3051 * function. Collection of cache garbage is not allowed when a cache-
3052 * reading function is in progress because it might still be using
3053 * the garbage memory.
3054 **********************************************************************/
3055 OBJC_EXPORT unsigned long objc_entryPoints[];
3056 OBJC_EXPORT unsigned long objc_exitPoints[];
3058 static int _collecting_in_critical (void)
3060 thread_act_port_array_t threads;
3066 mach_port_t mythread = pthread_mach_thread_np(pthread_self());
3068 // Get a list of all the threads in the current task
3069 ret = task_threads (mach_task_self (), &threads, &number);
3070 if (ret != KERN_SUCCESS)
3072 _objc_fatal("task_thread failed (result %d)\n", ret);
3075 // Check whether any thread is in the cache lookup code
3077 for (count = 0; count < number; count++)
3082 // Don't bother checking ourselves
3083 if (threads[count] == mythread)
3086 // Find out where thread is executing
3087 pc = _get_pc_for_thread (threads[count]);
3089 // Check for bad status, and if so, assume the worse (can't collect)
3090 if (pc == PC_SENTINAL)
3096 // Check whether it is in the cache lookup code
3097 for (region = 0; objc_entryPoints[region] != 0; region++)
3099 if ((pc >= objc_entryPoints[region]) &&
3100 (pc <= objc_exitPoints[region]))
3109 // Deallocate the port rights for the threads
3110 for (count = 0; count < number; count++) {
3111 mach_port_deallocate(mach_task_self (), threads[count]);
3114 // Deallocate the thread list
3115 vm_deallocate (mach_task_self (), (vm_address_t) threads, sizeof(threads) * number);
3117 // Return our finding
3121 /***********************************************************************
3122 * _garbage_make_room. Ensure that there is enough room for at least
3123 * one more ref in the garbage.
3124 **********************************************************************/
3126 // amount of memory represented by all refs in the garbage
3127 static int garbage_byte_size = 0;
3129 // do not empty the garbage until garbage_byte_size gets at least this big
3130 static int garbage_threshold = 1024;
3132 // table of refs to free
3133 static void **garbage_refs = 0;
3135 // current number of refs in garbage_refs
3136 static int garbage_count = 0;
3138 // capacity of current garbage_refs
3139 static int garbage_max = 0;
3141 // capacity of initial garbage_refs
3143 INIT_GARBAGE_COUNT = 128
3146 static void _garbage_make_room (void)
3148 static int first = 1;
3149 volatile void * tempGarbage;
3151 // Create the collection table the first time it is needed
3155 garbage_refs = _malloc_internal(INIT_GARBAGE_COUNT * sizeof(void *));
3156 garbage_max = INIT_GARBAGE_COUNT;
3159 // Double the table if it is full
3160 else if (garbage_count == garbage_max)
3162 tempGarbage = _realloc_internal(garbage_refs, garbage_max * 2 * sizeof(void *));
3163 garbage_refs = (void **) tempGarbage;
3168 /***********************************************************************
3169 * _cache_collect_free. Add the specified malloc'd memory to the list
3170 * of them to free at some later point.
3171 * size is used for the collection threshold. It does not have to be
3172 * precisely the block's size.
3173 * Cache locks: cacheUpdateLock must be held by the caller.
3174 **********************************************************************/
3175 static void _cache_collect_free(void *data, size_t size, BOOL tryCollect)
3177 static char *report_garbage = (char *)0xffffffff;
3179 if ((char *)0xffffffff == report_garbage) {
3180 // Check whether to log our activity
3181 report_garbage = getenv ("OBJC_REPORT_GARBAGE");
3184 // Insert new element in garbage list
3185 // Note that we do this even if we end up free'ing everything
3186 _garbage_make_room ();
3187 garbage_byte_size += size;
3188 garbage_refs[garbage_count++] = data;
3191 if (tryCollect && report_garbage)
3192 _objc_inform ("total of %d bytes of garbage ...", garbage_byte_size);
3194 // Done if caller says not to empty or the garbage is not full
3195 if (!tryCollect || (garbage_byte_size < garbage_threshold))
3197 if (tryCollect && report_garbage)
3198 _objc_inform ("couldn't collect cache garbage: below threshold\n");
3203 // tryCollect is guaranteed to be true after this point
3205 // Synchronize garbage collection with objc_msgSend and other cache readers
3206 if (!_collecting_in_critical ()) {
3207 // No cache readers in progress - garbage is now deletable
3211 _objc_inform ("collecting!\n");
3213 // Dispose all refs now in the garbage
3214 while (garbage_count--) {
3215 if (cache_allocator_is_block(garbage_refs[garbage_count])) {
3216 cache_allocator_free(garbage_refs[garbage_count]);
3218 free(garbage_refs[garbage_count]);
3222 // Clear the garbage count and total size indicator
3224 garbage_byte_size = 0;
3227 // objc_msgSend (or other cache reader) is currently looking in the
3228 // cache and might still be using some garbage.
3229 if (report_garbage) {
3230 _objc_inform ("couldn't collect cache garbage: objc_msgSend in progress\n");
3237 /***********************************************************************
3238 * Custom method cache allocator.
3239 * Method cache block sizes are 2^slots+2 words, which is a pessimal
3240 * case for the system allocator. It wastes 504 bytes per cache block
3241 * with 128 or more slots, which adds up to tens of KB for an AppKit process.
3242 * To save memory, the custom cache allocator below is used.
3244 * The cache allocator uses 128 KB allocation regions. Few processes will
3245 * require a second region. Within a region, allocation is address-ordered
3248 * The cache allocator uses a quantum of 520.
3249 * Cache block ideal sizes: 520, 1032, 2056, 4104
3250 * Cache allocator sizes: 520, 1040, 2080, 4160
3252 * Because all blocks are known to be genuine method caches, the ordinary
3253 * cache->mask and cache->occupied fields are used as block headers.
3254 * No out-of-band headers are maintained. The number of blocks will
3255 * almost always be fewer than 200, so for simplicity there is no free
3256 * list or other optimization.
3258 * Block in use: mask != 0, occupied != -1 (mask indicates block size)
3259 * Block free: mask != 0, occupied == -1 (mask is precisely block size)
3261 * No cache allocator functions take any locks. Instead, the caller
3262 * must hold the cacheUpdateLock.
3263 **********************************************************************/
3265 typedef struct cache_allocator_block {
3268 struct cache_allocator_block *nextFree;
3269 } cache_allocator_block;
3271 typedef struct cache_allocator_region {
3272 cache_allocator_block *start;
3273 cache_allocator_block *end; // first non-block address
3274 cache_allocator_block *freeList;
3275 struct cache_allocator_region *next;
3276 } cache_allocator_region;
3278 static cache_allocator_region *cacheRegion = NULL;
3281 static unsigned int cache_allocator_mask_for_size(size_t size)
3283 return (size - sizeof(struct objc_cache)) / sizeof(Method);
3286 static size_t cache_allocator_size_for_mask(unsigned int mask)
3288 size_t requested = sizeof(struct objc_cache) + TABLE_SIZE(mask+1);
3289 size_t actual = CACHE_QUANTUM;
3290 while (actual < requested) actual += CACHE_QUANTUM;
3294 /***********************************************************************
3295 * cache_allocator_add_region
3296 * Allocates and returns a new region that can hold at least size
3297 * bytes of large method caches.
3298 * The actual size will be rounded up to a CACHE_QUANTUM boundary,
3299 * with a minimum of CACHE_REGION_SIZE.
3300 * The new region is lowest-priority for new allocations. Callers that
3301 * know the other regions are already full should allocate directly
3302 * into the returned region.
3303 **********************************************************************/
3304 static cache_allocator_region *cache_allocator_add_region(size_t size)
3307 cache_allocator_block *b;
3308 cache_allocator_region **rgnP;
3309 cache_allocator_region *newRegion =
3310 _calloc_internal(1, sizeof(cache_allocator_region));
3312 // Round size up to quantum boundary, and apply the minimum size.
3313 size += CACHE_QUANTUM - (size % CACHE_QUANTUM);
3314 if (size < CACHE_REGION_SIZE) size = CACHE_REGION_SIZE;
3316 // Allocate the region
3318 vm_allocate(mach_task_self(), &addr, size, 1);
3319 newRegion->start = (cache_allocator_block *)addr;
3320 newRegion->end = (cache_allocator_block *)(addr + size);
3322 // Mark the first block: free and covers the entire region
3323 b = newRegion->start;
3325 b->state = (unsigned int)-1;
3327 newRegion->freeList = b;
3329 // Add to end of the linked list of regions.
3330 // Other regions should be re-used before this one is touched.
3331 newRegion->next = NULL;
3332 rgnP = &cacheRegion;
3334 rgnP = &(**rgnP).next;
3342 /***********************************************************************
3343 * cache_allocator_coalesce
3344 * Attempts to coalesce a free block with the single free block following
3345 * it in the free list, if any.
3346 **********************************************************************/
3347 static void cache_allocator_coalesce(cache_allocator_block *block)
3349 if (block->size + (uintptr_t)block == (uintptr_t)block->nextFree) {
3350 block->size += block->nextFree->size;
3351 block->nextFree = block->nextFree->nextFree;
3356 /***********************************************************************
3357 * cache_region_calloc
3358 * Attempt to allocate a size-byte block in the given region.
3359 * Allocation is first-fit. The free list is already fully coalesced.
3360 * Returns NULL if there is not enough room in the region for the block.
3361 **********************************************************************/
3362 static void *cache_region_calloc(cache_allocator_region *rgn, size_t size)
3364 cache_allocator_block **blockP;
3367 // Save mask for allocated block, then round size
3368 // up to CACHE_QUANTUM boundary
3369 mask = cache_allocator_mask_for_size(size);
3370 size = cache_allocator_size_for_mask(mask);
3372 // Search the free list for a sufficiently large free block.
3374 for (blockP = &rgn->freeList;
3376 blockP = &(**blockP).nextFree)
3378 cache_allocator_block *block = *blockP;
3379 if (block->size < size) continue; // not big enough
3381 // block is now big enough. Allocate from it.
3383 // Slice off unneeded fragment of block, if any,
3384 // and reconnect the free list around block.
3385 if (block->size - size >= CACHE_QUANTUM) {
3386 cache_allocator_block *leftover =
3387 (cache_allocator_block *)(size + (uintptr_t)block);
3388 leftover->size = block->size - size;
3389 leftover->state = (unsigned int)-1;
3390 leftover->nextFree = block->nextFree;
3393 *blockP = block->nextFree;
3396 // block is now exactly the right size.
3399 block->size = mask; // Cache->mask
3400 block->state = 0; // Cache->occupied
3405 // No room in this region.
3410 /***********************************************************************
3411 * cache_allocator_calloc
3412 * Custom allocator for large method caches (128+ slots)
3413 * The returned cache block already has cache->mask set.
3414 * cache->occupied and the cache contents are zero.
3415 * Cache locks: cacheUpdateLock must be held by the caller
3416 **********************************************************************/
3417 static void *cache_allocator_calloc(size_t size)
3419 cache_allocator_region *rgn;
3421 for (rgn = cacheRegion; rgn != NULL; rgn = rgn->next) {
3422 void *p = cache_region_calloc(rgn, size);
3428 // No regions or all regions full - make a region and try one more time
3429 // In the unlikely case of a cache over 256KB, it will get its own region.
3430 return cache_region_calloc(cache_allocator_add_region(size), size);
3434 /***********************************************************************
3435 * cache_allocator_region_for_block
3436 * Returns the cache allocator region that ptr points into, or NULL.
3437 **********************************************************************/
3438 static cache_allocator_region *cache_allocator_region_for_block(cache_allocator_block *block)
3440 cache_allocator_region *rgn;
3441 for (rgn = cacheRegion; rgn != NULL; rgn = rgn->next) {
3442 if (block >= rgn->start && block < rgn->end) return rgn;
3448 /***********************************************************************
3449 * cache_allocator_is_block
3450 * If ptr is a live block from the cache allocator, return YES
3451 * If ptr is a block from some other allocator, return NO.
3452 * If ptr is a dead block from the cache allocator, result is undefined.
3453 * Cache locks: cacheUpdateLock must be held by the caller
3454 **********************************************************************/
3455 static BOOL cache_allocator_is_block(void *ptr)
3457 return (cache_allocator_region_for_block((cache_allocator_block *)ptr) != NULL);
3460 /***********************************************************************
3461 * cache_allocator_free
3462 * Frees a block allocated by the cache allocator.
3463 * Cache locks: cacheUpdateLock must be held by the caller.
3464 **********************************************************************/
3465 static void cache_allocator_free(void *ptr)
3467 cache_allocator_block *dead = (cache_allocator_block *)ptr;
3468 cache_allocator_block *cur;
3469 cache_allocator_region *rgn;
3471 if (! (rgn = cache_allocator_region_for_block(ptr))) {
3472 // free of non-pointer
3473 _objc_inform("cache_allocator_free of non-pointer %p", ptr);
3477 dead->size = cache_allocator_size_for_mask(dead->size);
3478 dead->state = (unsigned int)-1;
3480 if (!rgn->freeList || rgn->freeList > dead) {
3481 // dead block belongs at front of free list
3482 dead->nextFree = rgn->freeList;
3483 rgn->freeList = dead;
3484 cache_allocator_coalesce(dead);
3488 // dead block belongs in the middle or end of free list
3489 for (cur = rgn->freeList; cur != NULL; cur = cur->nextFree) {
3490 cache_allocator_block *ahead = cur->nextFree;
3492 if (!ahead || ahead > dead) {
3493 // cur and ahead straddle dead, OR dead belongs at end of free list
3494 cur->nextFree = dead;
3495 dead->nextFree = ahead;
3497 // coalesce into dead first in case both succeed
3498 cache_allocator_coalesce(dead);
3499 cache_allocator_coalesce(cur);
3505 _objc_inform("cache_allocator_free of non-pointer %p", ptr);
3509 /***********************************************************************
3511 **********************************************************************/
3512 static void _cache_print (Cache cache)
3517 count = cache->mask + 1;
3518 for (index = 0; index < count; index += 1)
3519 if (CACHE_BUCKET_VALID(cache->buckets[index]))
3521 if (CACHE_BUCKET_IMP(cache->buckets[index]) == &_objc_msgForward)
3522 printf ("does not recognize: \n");
3523 printf ("%s\n", (const char *) CACHE_BUCKET_NAME(cache->buckets[index]));
3527 /***********************************************************************
3528 * _class_printMethodCaches.
3529 **********************************************************************/
3530 void _class_printMethodCaches (Class cls)
3532 if (((struct objc_class *)cls)->cache == &emptyCache)
3533 printf ("no instance-method cache for class %s\n", ((struct objc_class *)cls)->name);
3537 printf ("instance-method cache for class %s:\n", ((struct objc_class *)cls)->name);
3538 _cache_print (((struct objc_class *)cls)->cache);
3541 if (((struct objc_class * )((struct objc_class * )cls)->isa)->cache == &emptyCache)
3542 printf ("no class-method cache for class %s\n", ((struct objc_class *)cls)->name);
3546 printf ("class-method cache for class %s:\n", ((struct objc_class *)cls)->name);
3547 _cache_print (((struct objc_class * )((struct objc_class * )cls)->isa)->cache);
3551 /***********************************************************************
3553 **********************************************************************/
3554 static unsigned int log2 (unsigned int x)
3565 /***********************************************************************
3566 * _class_printDuplicateCacheEntries.
3567 **********************************************************************/
3568 void _class_printDuplicateCacheEntries (BOOL detail)
3570 NXHashTable * class_hash;
3572 struct objc_class * cls;
3573 unsigned int duplicates;
3574 unsigned int index1;
3575 unsigned int index2;
3578 unsigned int isMeta;
3582 printf ("Checking for duplicate cache entries \n");
3584 // Outermost loop - iterate over all classes
3585 class_hash = objc_getClasses ();
3586 state = NXInitHashState (class_hash);
3588 while (NXNextHashState (class_hash, &state, (void **) &cls))
3590 // Control loop - do given class' cache, then its isa's cache
3591 for (isMeta = 0; isMeta <= 1; isMeta += 1)
3593 // Select cache of interest and make sure it exists
3594 cache = isMeta ? cls->isa->cache : ((struct objc_class *)cls)->cache;
3595 if (cache == &emptyCache)
3598 // Middle loop - check each entry in the given cache
3601 for (index1 = 0; index1 < count; index1 += 1)
3603 // Skip invalid entry
3604 if (!CACHE_BUCKET_VALID(cache->buckets[index1]))
3607 // Inner loop - check that given entry matches no later entry
3608 for (index2 = index1 + 1; index2 < count; index2 += 1)
3610 // Skip invalid entry
3611 if (!CACHE_BUCKET_VALID(cache->buckets[index2]))
3614 // Check for duplication by method name comparison
3615 if (strcmp ((char *) CACHE_BUCKET_NAME(cache->buckets[index1]),
3616 (char *) CACHE_BUCKET_NAME(cache->buckets[index2])) == 0)
3619 printf ("%s %s\n", ((struct objc_class *)cls)->name, (char *) CACHE_BUCKET_NAME(cache->buckets[index1]));
3629 printf ("duplicates = %d\n", duplicates);
3630 printf ("total cache fills = %d\n", totalCacheFills);
3633 /***********************************************************************
3635 **********************************************************************/
3636 static void PrintCacheHeader (void)
3638 #ifdef OBJC_INSTRUMENTED
3639 printf ("Cache Cache Slots Avg Max AvgS MaxS AvgS MaxS TotalD AvgD MaxD TotalD AvgD MaxD TotD AvgD MaxD\n");
3640 printf ("Size Count Used Used Used Hit Hit Miss Miss Hits Prbs Prbs Misses Prbs Prbs Flsh Flsh Flsh\n");
3641 printf ("----- ----- ----- ----- ---- ---- ---- ---- ---- ------- ---- ---- ------- ---- ---- ---- ---- ----\n");
3643 printf ("Cache Cache Slots Avg Max AvgS MaxS AvgS MaxS\n");
3644 printf ("Size Count Used Used Used Hit Hit Miss Miss\n");
3645 printf ("----- ----- ----- ----- ---- ---- ---- ---- ----\n");
3649 /***********************************************************************
3651 **********************************************************************/
3652 static void PrintCacheInfo (unsigned int cacheSize,
3653 unsigned int cacheCount,
3654 unsigned int slotsUsed,
3656 unsigned int maxUsed,
3658 unsigned int maxSHit,
3660 unsigned int maxSMiss
3661 #ifdef OBJC_INSTRUMENTED
3662 , unsigned int totDHits,
3664 unsigned int maxDHit,
3665 unsigned int totDMisses,
3667 unsigned int maxDMiss,
3668 unsigned int totDFlsh,
3670 unsigned int maxDFlsh
3674 #ifdef OBJC_INSTRUMENTED
3675 printf ("%5u %5u %5u %5.1f %4u %4.1f %4u %4.1f %4u %7u %4.1f %4u %7u %4.1f %4u %4u %4.1f %4u\n",
3677 printf ("%5u %5u %5u %5.1f %4u %4.1f %4u %4.1f %4u\n",
3679 cacheSize, cacheCount, slotsUsed, avgUsed, maxUsed, avgSHit, maxSHit, avgSMiss, maxSMiss
3680 #ifdef OBJC_INSTRUMENTED
3681 , totDHits, avgDHit, maxDHit, totDMisses, avgDMiss, maxDMiss, totDFlsh, avgDFlsh, maxDFlsh
3687 #ifdef OBJC_INSTRUMENTED
3688 /***********************************************************************
3689 * PrintCacheHistogram. Show the non-zero entries from the specified
3691 **********************************************************************/
3692 static void PrintCacheHistogram (char * title,
3693 unsigned int * firstEntry,
3694 unsigned int entryCount)
3697 unsigned int * thisEntry;
3699 printf ("%s\n", title);
3700 printf (" Probes Tally\n");
3701 printf (" ------ -----\n");
3702 for (index = 0, thisEntry = firstEntry;
3704 index += 1, thisEntry += 1)
3706 if (*thisEntry == 0)
3709 printf (" %6d %5d\n", index, *thisEntry);
3714 /***********************************************************************
3715 * _class_printMethodCacheStatistics.
3716 **********************************************************************/
3718 #define MAX_LOG2_SIZE 32
3719 #define MAX_CHAIN_SIZE 100
3721 void _class_printMethodCacheStatistics (void)
3723 unsigned int isMeta;
3725 NXHashTable * class_hash;
3727 struct objc_class * cls;
3728 unsigned int totalChain;
3729 unsigned int totalMissChain;
3730 unsigned int maxChain;
3731 unsigned int maxMissChain;
3732 unsigned int classCount;
3733 unsigned int negativeEntryCount;
3734 unsigned int cacheExpandCount;
3735 unsigned int cacheCountBySize[2][MAX_LOG2_SIZE] = {{0}};
3736 unsigned int totalEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
3737 unsigned int maxEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
3738 unsigned int totalChainBySize[2][MAX_LOG2_SIZE] = {{0}};
3739 unsigned int totalMissChainBySize[2][MAX_LOG2_SIZE] = {{0}};
3740 unsigned int totalMaxChainBySize[2][MAX_LOG2_SIZE] = {{0}};
3741 unsigned int totalMaxMissChainBySize[2][MAX_LOG2_SIZE] = {{0}};
3742 unsigned int maxChainBySize[2][MAX_LOG2_SIZE] = {{0}};
3743 unsigned int maxMissChainBySize[2][MAX_LOG2_SIZE] = {{0}};
3744 unsigned int chainCount[MAX_CHAIN_SIZE] = {0};
3745 unsigned int missChainCount[MAX_CHAIN_SIZE] = {0};
3746 #ifdef OBJC_INSTRUMENTED
3747 unsigned int hitCountBySize[2][MAX_LOG2_SIZE] = {{0}};
3748 unsigned int hitProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
3749 unsigned int maxHitProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
3750 unsigned int missCountBySize[2][MAX_LOG2_SIZE] = {{0}};
3751 unsigned int missProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
3752 unsigned int maxMissProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
3753 unsigned int flushCountBySize[2][MAX_LOG2_SIZE] = {{0}};
3754 unsigned int flushedEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
3755 unsigned int maxFlushedEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
3758 printf ("Printing cache statistics\n");
3760 // Outermost loop - iterate over all classes
3761 class_hash = objc_getClasses ();
3762 state = NXInitHashState (class_hash);
3764 negativeEntryCount = 0;
3765 cacheExpandCount = 0;
3766 while (NXNextHashState (class_hash, &state, (void **) &cls))
3771 // Control loop - do given class' cache, then its isa's cache
3772 for (isMeta = 0; isMeta <= 1; isMeta += 1)
3776 unsigned int log2Size;
3777 unsigned int entryCount;
3779 // Select cache of interest
3780 cache = isMeta ? cls->isa->cache : ((struct objc_class *)cls)->cache;
3782 // Ignore empty cache... should we?
3783 if (cache == &emptyCache)
3786 // Middle loop - do each entry in the given cache
3793 for (index = 0; index < mask + 1; index += 1)
3798 uarith_t methodChain;
3799 uarith_t methodMissChain;
3802 // If entry is invalid, the only item of
3803 // interest is that future insert hashes
3804 // to this entry can use it directly.
3805 buckets = cache->buckets;
3806 if (!CACHE_BUCKET_VALID(buckets[index]))
3808 missChainCount[0] += 1;
3812 method = buckets[index];
3814 // Tally valid entries
3817 // Tally "forward::" entries
3818 if (CACHE_BUCKET_IMP(method) == &_objc_msgForward)
3819 negativeEntryCount += 1;
3821 // Calculate search distance (chain length) for this method
3822 // The chain may wrap around to the beginning of the table.
3823 hash = CACHE_HASH(CACHE_BUCKET_NAME(method), mask);
3824 if (index >= hash) methodChain = index - hash;
3825 else methodChain = (mask+1) + index - hash;
3827 // Tally chains of this length
3828 if (methodChain < MAX_CHAIN_SIZE)
3829 chainCount[methodChain] += 1;
3831 // Keep sum of all chain lengths
3832 totalChain += methodChain;
3834 // Record greatest chain length
3835 if (methodChain > maxChain)
3836 maxChain = methodChain;
3838 // Calculate search distance for miss that hashes here
3840 while (CACHE_BUCKET_VALID(buckets[index2]))
3845 methodMissChain = ((index2 - index) & mask);
3847 // Tally miss chains of this length
3848 if (methodMissChain < MAX_CHAIN_SIZE)
3849 missChainCount[methodMissChain] += 1;
3851 // Keep sum of all miss chain lengths in this class
3852 totalMissChain += methodMissChain;
3854 // Record greatest miss chain length
3855 if (methodMissChain > maxMissChain)
3856 maxMissChain = methodMissChain;
3859 // Factor this cache into statistics about caches of the same
3860 // type and size (all caches are a power of two in size)
3861 log2Size = log2 (mask + 1);
3862 cacheCountBySize[isMeta][log2Size] += 1;
3863 totalEntriesBySize[isMeta][log2Size] += entryCount;
3864 if (entryCount > maxEntriesBySize[isMeta][log2Size])
3865 maxEntriesBySize[isMeta][log2Size] = entryCount;
3866 totalChainBySize[isMeta][log2Size] += totalChain;
3867 totalMissChainBySize[isMeta][log2Size] += totalMissChain;
3868 totalMaxChainBySize[isMeta][log2Size] += maxChain;
3869 totalMaxMissChainBySize[isMeta][log2Size] += maxMissChain;
3870 if (maxChain > maxChainBySize[isMeta][log2Size])
3871 maxChainBySize[isMeta][log2Size] = maxChain;
3872 if (maxMissChain > maxMissChainBySize[isMeta][log2Size])
3873 maxMissChainBySize[isMeta][log2Size] = maxMissChain;
3874 #ifdef OBJC_INSTRUMENTED
3876 CacheInstrumentation * cacheData;
3878 cacheData = CACHE_INSTRUMENTATION(cache);
3879 hitCountBySize[isMeta][log2Size] += cacheData->hitCount;
3880 hitProbesBySize[isMeta][log2Size] += cacheData->hitProbes;
3881 if (cacheData->maxHitProbes > maxHitProbesBySize[isMeta][log2Size])
3882 maxHitProbesBySize[isMeta][log2Size] = cacheData->maxHitProbes;
3883 missCountBySize[isMeta][log2Size] += cacheData->missCount;
3884 missProbesBySize[isMeta][log2Size] += cacheData->missProbes;
3885 if (cacheData->maxMissProbes > maxMissProbesBySize[isMeta][log2Size])
3886 maxMissProbesBySize[isMeta][log2Size] = cacheData->maxMissProbes;
3887 flushCountBySize[isMeta][log2Size] += cacheData->flushCount;
3888 flushedEntriesBySize[isMeta][log2Size] += cacheData->flushedEntries;
3889 if (cacheData->maxFlushedEntries > maxFlushedEntriesBySize[isMeta][log2Size])
3890 maxFlushedEntriesBySize[isMeta][log2Size] = cacheData->maxFlushedEntries;
3893 // Caches start with a power of two number of entries, and grow by doubling, so
3894 // we can calculate the number of times this cache has expanded
3896 cacheExpandCount += log2Size - INIT_META_CACHE_SIZE_LOG2;
3898 cacheExpandCount += log2Size - INIT_CACHE_SIZE_LOG2;
3904 unsigned int cacheCountByType[2] = {0};
3905 unsigned int totalCacheCount = 0;
3906 unsigned int totalEntries = 0;
3907 unsigned int maxEntries = 0;
3908 unsigned int totalSlots = 0;
3909 #ifdef OBJC_INSTRUMENTED
3910 unsigned int totalHitCount = 0;
3911 unsigned int totalHitProbes = 0;
3912 unsigned int maxHitProbes = 0;
3913 unsigned int totalMissCount = 0;
3914 unsigned int totalMissProbes = 0;
3915 unsigned int maxMissProbes = 0;
3916 unsigned int totalFlushCount = 0;
3917 unsigned int totalFlushedEntries = 0;
3918 unsigned int maxFlushedEntries = 0;
3926 // Sum information over all caches
3927 for (isMeta = 0; isMeta <= 1; isMeta += 1)
3929 for (index = 0; index < MAX_LOG2_SIZE; index += 1)
3931 cacheCountByType[isMeta] += cacheCountBySize[isMeta][index];
3932 totalEntries += totalEntriesBySize[isMeta][index];
3933 totalSlots += cacheCountBySize[isMeta][index] * (1 << index);
3934 totalChain += totalChainBySize[isMeta][index];
3935 if (maxEntriesBySize[isMeta][index] > maxEntries)
3936 maxEntries = maxEntriesBySize[isMeta][index];
3937 if (maxChainBySize[isMeta][index] > maxChain)
3938 maxChain = maxChainBySize[isMeta][index];
3939 totalMissChain += totalMissChainBySize[isMeta][index];
3940 if (maxMissChainBySize[isMeta][index] > maxMissChain)
3941 maxMissChain = maxMissChainBySize[isMeta][index];
3942 #ifdef OBJC_INSTRUMENTED
3943 totalHitCount += hitCountBySize[isMeta][index];
3944 totalHitProbes += hitProbesBySize[isMeta][index];
3945 if (maxHitProbesBySize[isMeta][index] > maxHitProbes)
3946 maxHitProbes = maxHitProbesBySize[isMeta][index];
3947 totalMissCount += missCountBySize[isMeta][index];
3948 totalMissProbes += missProbesBySize[isMeta][index];
3949 if (maxMissProbesBySize[isMeta][index] > maxMissProbes)
3950 maxMissProbes = maxMissProbesBySize[isMeta][index];
3951 totalFlushCount += flushCountBySize[isMeta][index];
3952 totalFlushedEntries += flushedEntriesBySize[isMeta][index];
3953 if (maxFlushedEntriesBySize[isMeta][index] > maxFlushedEntries)
3954 maxFlushedEntries = maxFlushedEntriesBySize[isMeta][index];
3958 totalCacheCount += cacheCountByType[isMeta];
3962 printf ("There are %u classes\n", classCount);
3964 for (isMeta = 0; isMeta <= 1; isMeta += 1)
3966 // Number of this type of class
3967 printf ("\nThere are %u %s-method caches, broken down by size (slot count):\n",
3968 cacheCountByType[isMeta],
3969 isMeta ? "class" : "instance");
3972 PrintCacheHeader ();
3974 // Keep format consistent even if there are caches of this kind
3975 if (cacheCountByType[isMeta] == 0)
3977 printf ("(none)\n");
3981 // Usage information by cache size
3982 for (index = 0; index < MAX_LOG2_SIZE; index += 1)
3984 unsigned int cacheCount;
3985 unsigned int cacheSlotCount;
3986 unsigned int cacheEntryCount;
3988 // Get number of caches of this type and size
3989 cacheCount = cacheCountBySize[isMeta][index];
3990 if (cacheCount == 0)
3993 // Get the cache slot count and the total number of valid entries
3994 cacheSlotCount = (1 << index);
3995 cacheEntryCount = totalEntriesBySize[isMeta][index];
3997 // Give the analysis
3998 PrintCacheInfo (cacheSlotCount,
4001 (float) cacheEntryCount / (float) cacheCount,
4002 maxEntriesBySize[isMeta][index],
4003 (float) totalChainBySize[isMeta][index] / (float) cacheEntryCount,
4004 maxChainBySize[isMeta][index],
4005 (float) totalMissChainBySize[isMeta][index] / (float) (cacheCount * cacheSlotCount),
4006 maxMissChainBySize[isMeta][index]
4007 #ifdef OBJC_INSTRUMENTED
4008 , hitCountBySize[isMeta][index],
4009 hitCountBySize[isMeta][index] ?
4010 (float) hitProbesBySize[isMeta][index] / (float) hitCountBySize[isMeta][index] : 0.0,
4011 maxHitProbesBySize[isMeta][index],
4012 missCountBySize[isMeta][index],
4013 missCountBySize[isMeta][index] ?
4014 (float) missProbesBySize[isMeta][index] / (float) missCountBySize[isMeta][index] : 0.0,
4015 maxMissProbesBySize[isMeta][index],
4016 flushCountBySize[isMeta][index],
4017 flushCountBySize[isMeta][index] ?
4018 (float) flushedEntriesBySize[isMeta][index] / (float) flushCountBySize[isMeta][index] : 0.0,
4019 maxFlushedEntriesBySize[isMeta][index]
4025 // Give overall numbers
4026 printf ("\nCumulative:\n");
4027 PrintCacheHeader ();
4028 PrintCacheInfo (totalSlots,
4031 (float) totalEntries / (float) totalCacheCount,
4033 (float) totalChain / (float) totalEntries,
4035 (float) totalMissChain / (float) totalSlots,
4037 #ifdef OBJC_INSTRUMENTED
4040 (float) totalHitProbes / (float) totalHitCount : 0.0,
4044 (float) totalMissProbes / (float) totalMissCount : 0.0,
4048 (float) totalFlushedEntries / (float) totalFlushCount : 0.0,
4053 printf ("\nNumber of \"forward::\" entries: %d\n", negativeEntryCount);
4054 printf ("Number of cache expansions: %d\n", cacheExpandCount);
4055 #ifdef OBJC_INSTRUMENTED
4056 printf ("flush_caches: total calls total visits average visits max visits total classes visits/class\n");
4057 printf (" ----------- ------------ -------------- ---------- ------------- -------------\n");
4058 printf (" linear %11u %12u %14.1f %10u %13u %12.2f\n",
4059 LinearFlushCachesCount,
4060 LinearFlushCachesVisitedCount,
4061 LinearFlushCachesCount ?
4062 (float) LinearFlushCachesVisitedCount / (float) LinearFlushCachesCount : 0.0,
4063 MaxLinearFlushCachesVisitedCount,
4064 LinearFlushCachesVisitedCount,
4066 printf (" nonlinear %11u %12u %14.1f %10u %13u %12.2f\n",
4067 NonlinearFlushCachesCount,
4068 NonlinearFlushCachesVisitedCount,
4069 NonlinearFlushCachesCount ?
4070 (float) NonlinearFlushCachesVisitedCount / (float) NonlinearFlushCachesCount : 0.0,
4071 MaxNonlinearFlushCachesVisitedCount,
4072 NonlinearFlushCachesClassCount,
4073 NonlinearFlushCachesClassCount ?
4074 (float) NonlinearFlushCachesVisitedCount / (float) NonlinearFlushCachesClassCount : 0.0);
4075 printf (" ideal %11u %12u %14.1f %10u %13u %12.2f\n",
4076 LinearFlushCachesCount + NonlinearFlushCachesCount,
4077 IdealFlushCachesCount,
4078 LinearFlushCachesCount + NonlinearFlushCachesCount ?
4079 (float) IdealFlushCachesCount / (float) (LinearFlushCachesCount + NonlinearFlushCachesCount) : 0.0,
4080 MaxIdealFlushCachesCount,
4081 LinearFlushCachesVisitedCount + NonlinearFlushCachesClassCount,
4082 LinearFlushCachesVisitedCount + NonlinearFlushCachesClassCount ?
4083 (float) IdealFlushCachesCount / (float) (LinearFlushCachesVisitedCount + NonlinearFlushCachesClassCount) : 0.0);
4085 PrintCacheHistogram ("\nCache hit histogram:", &CacheHitHistogram[0], CACHE_HISTOGRAM_SIZE);
4086 PrintCacheHistogram ("\nCache miss histogram:", &CacheMissHistogram[0], CACHE_HISTOGRAM_SIZE);
4090 printf ("\nLookup chains:");
4091 for (index = 0; index < MAX_CHAIN_SIZE; index += 1)
4093 if (chainCount[index] != 0)
4094 printf (" %u:%u", index, chainCount[index]);
4097 printf ("\nMiss chains:");
4098 for (index = 0; index < MAX_CHAIN_SIZE; index += 1)
4100 if (missChainCount[index] != 0)
4101 printf (" %u:%u", index, missChainCount[index]);
4104 printf ("\nTotal memory usage for cache data structures: %lu bytes\n",
4105 totalCacheCount * (sizeof(struct objc_cache) - sizeof(Method)) +
4106 totalSlots * sizeof(Method) +
4107 negativeEntryCount * sizeof(struct objc_method));
4112 /***********************************************************************
4114 **********************************************************************/
4115 void checkUniqueness (SEL s1,
4121 if (s1 && s2 && (strcmp ((const char *) s1, (const char *) s2) == 0))
4122 _objc_inform ("%p != %p but !strcmp (%s, %s)\n", s1, s2, (char *) s1, (char *) s2);