]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-class.m
objc4-266.tar.gz
[apple/objc4.git] / runtime / objc-class.m
1 /*
2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /***********************************************************************
26 * objc-class.m
27 * Copyright 1988-1997, Apple Computer, Inc.
28 * Author: s. naroff
29 **********************************************************************/
30
31
32 /***********************************************************************
33 * Method cache locking (GrP 2001-1-14)
34 *
35 * For speed, objc_msgSend does not acquire any locks when it reads
36 * method caches. Instead, all cache changes are performed so that any
37 * objc_msgSend running concurrently with the cache mutator will not
38 * crash or hang or get an incorrect result from the cache.
39 *
40 * When cache memory becomes unused (e.g. the old cache after cache
41 * expansion), it is not immediately freed, because a concurrent
42 * objc_msgSend could still be using it. Instead, the memory is
43 * disconnected from the data structures and placed on a garbage list.
44 * The memory is now only accessible to instances of objc_msgSend that
45 * were running when the memory was disconnected; any further calls to
46 * objc_msgSend will not see the garbage memory because the other data
47 * structures don't point to it anymore. The collecting_in_critical
48 * function checks the PC of all threads and returns FALSE when all threads
49 * are found to be outside objc_msgSend. This means any call to objc_msgSend
50 * that could have had access to the garbage has finished or moved past the
51 * cache lookup stage, so it is safe to free the memory.
52 *
53 * All functions that modify cache data or structures must acquire the
54 * cacheUpdateLock to prevent interference from concurrent modifications.
55 * The function that frees cache garbage must acquire the cacheUpdateLock
56 * and use collecting_in_critical() to flush out cache readers.
57 * The cacheUpdateLock is also used to protect the custom allocator used
58 * for large method cache blocks.
59 *
60 * Cache readers (PC-checked by collecting_in_critical())
61 * objc_msgSend*
62 * _cache_getImp
63 * _cache_getMethod
64 *
65 * Cache writers (hold cacheUpdateLock while reading or writing; not PC-checked)
66 * _cache_fill (acquires lock)
67 * _cache_expand (only called from cache_fill)
68 * _cache_create (only called from cache_expand)
69 * bcopy (only called from instrumented cache_expand)
70 * flush_caches (acquires lock)
71 * _cache_flush (only called from cache_fill and flush_caches)
72 * _cache_collect_free (only called from cache_expand and cache_flush)
73 *
74 * UNPROTECTED cache readers (NOT thread-safe; used for debug info only)
75 * _cache_print
76 * _class_printMethodCaches
77 * _class_printDuplicateCacheEntries
78 * _class_printMethodCacheStatistics
79 *
80 * _class_lookupMethodAndLoadCache is a special case. It may read a
81 * method triplet out of one cache and store it in another cache. This
82 * is unsafe if the method triplet is a forward:: entry, because the
83 * triplet itself could be freed unless _class_lookupMethodAndLoadCache
84 * were PC-checked or used a lock. Additionally, storing the method
85 * triplet in both caches would result in double-freeing if both caches
86 * were flushed or expanded. The solution is for _cache_getMethod to
87 * ignore all entries whose implementation is _objc_msgForward, so
88 * _class_lookupMethodAndLoadCache cannot look at a forward:: entry
89 * unsafely or place it in multiple caches.
90 ***********************************************************************/
91
92 /***********************************************************************
93 * Lazy method list arrays and method list locking (2004-10-19)
94 *
95 * cls->methodLists may be in one of three forms:
96 * 1. NULL: The class has no methods.
97 * 2. non-NULL, with CLS_NO_METHOD_ARRAY set: cls->methodLists points
98 * to a single method list, which is the class's only method list.
99 * 3. non-NULL, with CLS_NO_METHOD_ARRAY clear: cls->methodLists points to
100 * an array of method list pointers. The end of the array's block
101 * is set to -1. If the actual number of method lists is smaller
102 * than that, the rest of the array is NULL.
103 *
104 * Attaching categories and adding and removing classes may change
105 * the form of the class list. In addition, individual method lists
106 * may be reallocated when fixed up.
107 *
108 * Classes are initially read as #1 or #2. If a category is attached
109 * or other methods added, the class is changed to #3. Once in form #3,
110 * the class is never downgraded to #1 or #2, even if methods are removed.
111 * Classes added with objc_addClass are initially either #1 or #3.
112 *
113 * Accessing and manipulating a class's method lists are synchronized,
114 * to prevent races when one thread restructures the list. However,
115 * if the class is not yet in use (i.e. not in class_hash), then the
116 * thread loading the class may access its method lists without locking.
117 *
118 * The following functions acquire methodListLock:
119 * class_getInstanceMethod
120 * class_getClassMethod
121 * class_nextMethodList
122 * class_addMethods
123 * class_removeMethods
124 * class_respondsToMethod
125 * _class_lookupMethodAndLoadCache
126 * lookupMethodInClassAndLoadCache
127 * _objc_add_category_flush_caches
128 *
129 * The following functions don't acquire methodListLock because they
130 * only access method lists during class load and unload:
131 * _objc_register_category
132 * _resolve_categories_for_class (calls _objc_add_category)
133 * add_class_to_loadable_list
134 * _objc_addClass
135 * _objc_remove_classes_in_image
136 *
137 * The following functions use method lists without holding methodListLock.
138 * The caller must either hold methodListLock, or be loading the class.
139 * _getMethod (called by class_getInstanceMethod, class_getClassMethod,
140 * and class_respondsToMethod)
141 * _findMethodInClass (called by _class_lookupMethodAndLoadCache,
142 * lookupMethodInClassAndLoadCache, _getMethod)
143 * _findMethodInList (called by _findMethodInClass)
144 * nextMethodList (called by _findMethodInClass and class_nextMethodList
145 * fixupSelectorsInMethodList (called by nextMethodList)
146 * _objc_add_category (called by _objc_add_category_flush_caches,
147 * resolve_categories_for_class and _objc_register_category)
148 * _objc_insertMethods (called by class_addMethods and _objc_add_category)
149 * _objc_removeMethods (called by class_removeMethods)
150 * _objcTweakMethodListPointerForClass (called by _objc_insertMethods)
151 * get_base_method_list (called by add_class_to_loadable_list)
152 * lookupNamedMethodInMethodList (called by add_class_to_loadable_list)
153 ***********************************************************************/
154
155 /***********************************************************************
156 * Thread-safety of class info bits (2004-10-19)
157 *
158 * Some class info bits are used to store mutable runtime state.
159 * Modifications of the info bits at particular times need to be
160 * synchronized to prevent races.
161 *
162 * Three thread-safe modification functions are provided:
163 * _class_setInfo() // atomically sets some bits
164 * _class_clearInfo() // atomically clears some bits
165 * _class_changeInfo() // atomically sets some bits and clears others
166 * These replace CLS_SETINFO() for the multithreaded cases.
167 *
168 * Three modification windows are defined:
169 * - compile time
170 * - class construction or image load (before +load) in one thread
171 * - multi-threaded messaging and method caches
172 *
173 * Info bit modification at compile time and class construction do not
174 * need to be locked, because only one thread is manipulating the class.
175 * Info bit modification during messaging needs to be locked, because
176 * there may be other threads simultaneously messaging or otherwise
177 * manipulating the class.
178 *
179 * Modification windows for each flag:
180 *
181 * CLS_CLASS: compile-time and class load
182 * CLS_META: compile-time and class load
183 * CLS_INITIALIZED: +initialize
184 * CLS_POSING: messaging
185 * CLS_MAPPED: compile-time
186 * CLS_FLUSH_CACHE: messaging
187 * CLS_GROW_CACHE: messaging
188 * CLS_NEED_BIND: unused
189 * CLS_METHOD_ARRAY: unused
190 * CLS_JAVA_HYBRID: JavaBridge only
191 * CLS_JAVA_CLASS: JavaBridge only
192 * CLS_INITIALIZING: messaging
193 * CLS_FROM_BUNDLE: class load
194 * CLS_HAS_CXX_STRUCTORS: compile-time and class load
195 * CLS_NO_METHOD_ARRAY: class load and messaging
196 *
197 * CLS_INITIALIZED and CLS_INITIALIZING have additional thread-safety
198 * constraints to support thread-safe +initialize. See "Thread safety
199 * during class initialization" for details.
200 *
201 * CLS_JAVA_HYBRID and CLS_JAVA_CLASS are set immediately after JavaBridge
202 * calls objc_addClass(). The JavaBridge does not use an atomic update,
203 * but the modification counts as "class construction" unless some other
204 * thread quickly finds the class via the class list. This race is
205 * small and unlikely in well-behaved code.
206 *
207 * Most info bits that may be modified during messaging are also never
208 * read without a lock. There is no general read lock for the info bits.
209 * CLS_INITIALIZED: classInitLock
210 * CLS_FLUSH_CACHE: cacheUpdateLock
211 * CLS_GROW_CACHE: cacheUpdateLock
212 * CLS_NO_METHOD_ARRAY: methodListLock
213 * CLS_INITIALIZING: classInitLock
214 ***********************************************************************/
215
216 /***********************************************************************
217 * Thread-safety during class initialization (GrP 2001-9-24)
218 *
219 * Initial state: CLS_INITIALIZING and CLS_INITIALIZED both clear.
220 * During initialization: CLS_INITIALIZING is set
221 * After initialization: CLS_INITIALIZING clear and CLS_INITIALIZED set.
222 * CLS_INITIALIZING and CLS_INITIALIZED are never set at the same time.
223 * CLS_INITIALIZED is never cleared once set.
224 *
225 * Only one thread is allowed to actually initialize a class and send
226 * +initialize. Enforced by allowing only one thread to set CLS_INITIALIZING.
227 *
228 * Additionally, threads trying to send messages to a class must wait for
229 * +initialize to finish. During initialization of a class, that class's
230 * method cache is kept empty. objc_msgSend will revert to
231 * class_lookupMethodAndLoadCache, which checks CLS_INITIALIZED before
232 * messaging. If CLS_INITIALIZED is clear but CLS_INITIALIZING is set,
233 * the thread must block, unless it is the thread that started
234 * initializing the class in the first place.
235 *
236 * Each thread keeps a list of classes it's initializing.
237 * The global classInitLock is used to synchronize changes to CLS_INITIALIZED
238 * and CLS_INITIALIZING: the transition to CLS_INITIALIZING must be
239 * an atomic test-and-set with respect to itself and the transition
240 * to CLS_INITIALIZED.
241 * The global classInitWaitCond is used to block threads waiting for an
242 * initialization to complete. The classInitLock synchronizes
243 * condition checking and the condition variable.
244 **********************************************************************/
245
246 /***********************************************************************
247 * +initialize deadlock case when a class is marked initializing while
248 * its superclass is initialized. Solved by completely initializing
249 * superclasses before beginning to initialize a class.
250 *
251 * OmniWeb class hierarchy:
252 * OBObject
253 * | ` OBPostLoader
254 * OFObject
255 * / \
256 * OWAddressEntry OWController
257 * |
258 * OWConsoleController
259 *
260 * Thread 1 (evil testing thread):
261 * initialize OWAddressEntry
262 * super init OFObject
263 * super init OBObject
264 * [OBObject initialize] runs OBPostLoader, which inits lots of classes...
265 * initialize OWConsoleController
266 * super init OWController - wait for Thread 2 to finish OWController init
267 *
268 * Thread 2 (normal OmniWeb thread):
269 * initialize OWController
270 * super init OFObject - wait for Thread 1 to finish OFObject init
271 *
272 * deadlock!
273 *
274 * Solution: fully initialize super classes before beginning to initialize
275 * a subclass. Then the initializing+initialized part of the class hierarchy
276 * will be a contiguous subtree starting at the root, so other threads
277 * can't jump into the middle between two initializing classes, and we won't
278 * get stuck while a superclass waits for its subclass which waits for the
279 * superclass.
280 **********************************************************************/
281
282
283
284 /***********************************************************************
285 * Imports.
286 **********************************************************************/
287
288 #import <mach/mach_interface.h>
289 #include <mach-o/ldsyms.h>
290 #include <mach-o/dyld.h>
291
292 #include <sys/types.h>
293 #include <unistd.h>
294 #include <stdlib.h>
295 #include <sys/uio.h>
296 #include <sys/fcntl.h>
297
298 #import "objc-class.h"
299
300 #import <objc/Object.h>
301 #import <objc/objc-runtime.h>
302 #import "objc-private.h"
303 #import "hashtable2.h"
304 #import "maptable.h"
305
306 #include <sys/types.h>
307
308 // Needed functions not in any header file
309 size_t malloc_size (const void * ptr);
310
311 // Needed kernel interface
312 #import <mach/mach.h>
313 #import <mach/thread_status.h>
314
315
316 /***********************************************************************
317 * Conditionals.
318 **********************************************************************/
319
320 // Define PRELOAD_SUPERCLASS_CACHES to cause method lookups to add the
321 // method the appropriate superclass caches, in addition to the normal
322 // encaching in the subclass where the method was messaged. Doing so
323 // will speed up messaging the same method from instances of the
324 // superclasses, but also uses up valuable cache space for a speculative
325 // purpose
326 // See radar 2364264 about incorrectly propogating _objc_forward entries
327 // and double freeing them, first, before turning this on!
328 // (Radar 2364264 is now "inactive".)
329 // Double-freeing is also a potential problem when this is off. See
330 // note about _class_lookupMethodAndLoadCache in "Method cache locking".
331 //#define PRELOAD_SUPERCLASS_CACHES
332
333 /***********************************************************************
334 * Exports.
335 **********************************************************************/
336
337 #ifdef OBJC_INSTRUMENTED
338 enum {
339 CACHE_HISTOGRAM_SIZE = 512
340 };
341
342 unsigned int CacheHitHistogram [CACHE_HISTOGRAM_SIZE];
343 unsigned int CacheMissHistogram [CACHE_HISTOGRAM_SIZE];
344 #endif
345
346 /***********************************************************************
347 * Constants and macros internal to this module.
348 **********************************************************************/
349
350 // INIT_CACHE_SIZE and INIT_META_CACHE_SIZE must be a power of two
351 enum {
352 INIT_CACHE_SIZE_LOG2 = 2,
353 INIT_META_CACHE_SIZE_LOG2 = 2,
354 INIT_CACHE_SIZE = (1 << INIT_CACHE_SIZE_LOG2),
355 INIT_META_CACHE_SIZE = (1 << INIT_META_CACHE_SIZE_LOG2)
356 };
357
358 // Amount of space required for count hash table buckets, knowing that
359 // one entry is embedded in the cache structure itself
360 #define TABLE_SIZE(count) ((count - 1) * sizeof(Method))
361
362 // A sentinal (magic value) to report bad thread_get_state status
363 #define PC_SENTINAL 0
364
365
366 /***********************************************************************
367 * Types internal to this module.
368 **********************************************************************/
369
370 #ifdef OBJC_INSTRUMENTED
371 struct CacheInstrumentation
372 {
373 unsigned int hitCount; // cache lookup success tally
374 unsigned int hitProbes; // sum entries checked to hit
375 unsigned int maxHitProbes; // max entries checked to hit
376 unsigned int missCount; // cache lookup no-find tally
377 unsigned int missProbes; // sum entries checked to miss
378 unsigned int maxMissProbes; // max entries checked to miss
379 unsigned int flushCount; // cache flush tally
380 unsigned int flushedEntries; // sum cache entries flushed
381 unsigned int maxFlushedEntries; // max cache entries flushed
382 };
383 typedef struct CacheInstrumentation CacheInstrumentation;
384
385 // Cache instrumentation data follows table, so it is most compatible
386 #define CACHE_INSTRUMENTATION(cache) (CacheInstrumentation *) &cache->buckets[cache->mask + 1];
387 #endif
388
389 /***********************************************************************
390 * Function prototypes internal to this module.
391 **********************************************************************/
392
393 static Ivar class_getVariable (Class cls, const char * name);
394 static void flush_caches (Class cls, BOOL flush_meta);
395 static struct objc_method_list *nextMethodList(struct objc_class *cls, void **it);
396 static void addClassToOriginalClass (Class posingClass, Class originalClass);
397 static void _objc_addOrigClass (Class origClass);
398 static void _freedHandler (id self, SEL sel);
399 static void _nonexistentHandler (id self, SEL sel);
400 static void class_initialize (Class cls);
401 static Cache _cache_expand (Class cls);
402 static int LogObjCMessageSend (BOOL isClassMethod, const char * objectsClass, const char * implementingClass, SEL selector);
403 static BOOL _cache_fill (Class cls, Method smt, SEL sel);
404 static void _cache_addForwardEntry(Class cls, SEL sel);
405 static void _cache_flush (Class cls);
406 static IMP lookupMethodInClassAndLoadCache(Class cls, SEL sel);
407 static int SubtypeUntil (const char * type, char end);
408 static const char * SkipFirstType (const char * type);
409
410 static unsigned long _get_pc_for_thread (mach_port_t thread);
411 static int _collecting_in_critical (void);
412 static void _garbage_make_room (void);
413 static void _cache_collect_free (void * data, size_t size, BOOL tryCollect);
414
415 static BOOL cache_allocator_is_block(void *block);
416 static void *cache_allocator_calloc(size_t size);
417 static void cache_allocator_free(void *block);
418
419 static void _cache_print (Cache cache);
420 static unsigned int log2 (unsigned int x);
421 static void PrintCacheHeader (void);
422 #ifdef OBJC_INSTRUMENTED
423 static void PrintCacheHistogram (char * title, unsigned int * firstEntry, unsigned int entryCount);
424 #endif
425
426 /***********************************************************************
427 * Static data internal to this module.
428 **********************************************************************/
429
430 // When _class_uncache is non-zero, cache growth copies the existing
431 // entries into the new (larger) cache. When this flag is zero, new
432 // (larger) caches start out empty.
433 static int _class_uncache = 1;
434
435 // When _class_slow_grow is non-zero, any given cache is actually grown
436 // only on the odd-numbered times it becomes full; on the even-numbered
437 // times, it is simply emptied and re-used. When this flag is zero,
438 // caches are grown every time.
439 static int _class_slow_grow = 1;
440
441 // Lock for cache access.
442 // Held when modifying a cache in place.
443 // Held when installing a new cache on a class.
444 // Held when adding to the cache garbage list.
445 // Held when disposing cache garbage.
446 // See "Method cache locking" above for notes about cache locking.
447 static OBJC_DECLARE_LOCK(cacheUpdateLock);
448
449 // classInitLock protects classInitWaitCond and examination and modification
450 // of CLS_INITIALIZED and CLS_INITIALIZING.
451 OBJC_DECLARE_LOCK(classInitLock);
452 // classInitWaitCond is signalled when any class is done initializing.
453 // Threads that are waiting for a class to finish initializing wait on this.
454 pthread_cond_t classInitWaitCond = PTHREAD_COND_INITIALIZER;
455
456 // Lock for method list access and modification.
457 // Protects methodLists fields, method arrays, and CLS_NO_METHOD_ARRAY bits.
458 // Classes not yet in use do not need to take this lock.
459 OBJC_DECLARE_LOCK(methodListLock);
460
461 // When traceDuplicates is non-zero, _cacheFill checks whether the method
462 // being encached is already there. The number of times it finds a match
463 // is tallied in cacheFillDuplicates. When traceDuplicatesVerbose is
464 // non-zero, each duplication is logged when found in this way.
465 static int traceDuplicates = 0;
466 static int traceDuplicatesVerbose = 0;
467 static int cacheFillDuplicates = 0;
468
469 // Custom cache allocator parameters
470 // CACHE_REGION_SIZE must be a multiple of CACHE_QUANTUM.
471 #define CACHE_QUANTUM 520
472 #define CACHE_REGION_SIZE 131040 // quantized just under 128KB (131072)
473 // #define CACHE_REGION_SIZE 262080 // quantized just under 256KB (262144)
474
475 #ifdef OBJC_INSTRUMENTED
476 // Instrumentation
477 static unsigned int LinearFlushCachesCount = 0;
478 static unsigned int LinearFlushCachesVisitedCount = 0;
479 static unsigned int MaxLinearFlushCachesVisitedCount = 0;
480 static unsigned int NonlinearFlushCachesCount = 0;
481 static unsigned int NonlinearFlushCachesClassCount = 0;
482 static unsigned int NonlinearFlushCachesVisitedCount = 0;
483 static unsigned int MaxNonlinearFlushCachesVisitedCount = 0;
484 static unsigned int IdealFlushCachesCount = 0;
485 static unsigned int MaxIdealFlushCachesCount = 0;
486 #endif
487
488 // Method call logging
489 typedef int (*ObjCLogProc)(BOOL, const char *, const char *, SEL);
490
491 static int totalCacheFills NOBSS = 0;
492 static int objcMsgLogFD = (-1);
493 static ObjCLogProc objcMsgLogProc = &LogObjCMessageSend;
494 static int objcMsgLogEnabled = 0;
495
496 // Error Messages
497 static const char
498 _errNoMem[] = "failed -- out of memory(%s, %u)",
499 _errAllocNil[] = "allocating nil object",
500 _errFreedObject[] = "message %s sent to freed object=0x%lx",
501 _errNonExistentObject[] = "message %s sent to non-existent object=0x%lx",
502 _errBadSel[] = "invalid selector %s",
503 _errNotSuper[] = "[%s poseAs:%s]: target not immediate superclass",
504 _errNewVars[] = "[%s poseAs:%s]: %s defines new instance variables";
505
506 /***********************************************************************
507 * Information about multi-thread support:
508 *
509 * Since we do not lock many operations which walk the superclass, method
510 * and ivar chains, these chains must remain intact once a class is published
511 * by inserting it into the class hashtable. All modifications must be
512 * atomic so that someone walking these chains will always geta valid
513 * result.
514 ***********************************************************************/
515 /***********************************************************************
516 * A static empty cache. All classes initially point at this cache.
517 * When the first message is sent it misses in the cache, and when
518 * the cache is grown it checks for this case and uses malloc rather
519 * than realloc. This avoids the need to check for NULL caches in the
520 * messenger.
521 ***********************************************************************/
522
523 #ifndef OBJC_INSTRUMENTED
524 const struct objc_cache emptyCache =
525 {
526 0, // mask
527 0, // occupied
528 { NULL } // buckets
529 };
530 #else
531 // OBJC_INSTRUMENTED requires writable data immediately following emptyCache.
532 struct objc_cache emptyCache =
533 {
534 0, // mask
535 0, // occupied
536 { NULL } // buckets
537 };
538 CacheInstrumentation emptyCacheInstrumentation = {0};
539 #endif
540
541
542 // Freed objects have their isa set to point to this dummy class.
543 // This avoids the need to check for Nil classes in the messenger.
544 static const struct objc_class freedObjectClass =
545 {
546 Nil, // isa
547 Nil, // super_class
548 "FREED(id)", // name
549 0, // version
550 0, // info
551 0, // instance_size
552 NULL, // ivars
553 NULL, // methodLists
554 (Cache) &emptyCache, // cache
555 NULL // protocols
556 };
557
558 static const struct objc_class nonexistentObjectClass =
559 {
560 Nil, // isa
561 Nil, // super_class
562 "NONEXISTENT(id)", // name
563 0, // version
564 0, // info
565 0, // instance_size
566 NULL, // ivars
567 NULL, // methodLists
568 (Cache) &emptyCache, // cache
569 NULL // protocols
570 };
571
572 /***********************************************************************
573 * object_getClassName.
574 **********************************************************************/
575 const char * object_getClassName (id obj)
576 {
577 // Even nil objects have a class name, sort of
578 if (obj == nil)
579 return "nil";
580
581 // Retrieve name from object's class
582 return ((struct objc_class *) obj->isa)->name;
583 }
584
585 /***********************************************************************
586 * object_getIndexedIvars.
587 **********************************************************************/
588 void * object_getIndexedIvars (id obj)
589 {
590 // ivars are tacked onto the end of the object
591 return ((char *) obj) + ((struct objc_class *) obj->isa)->instance_size;
592 }
593
594
595 /***********************************************************************
596 * object_cxxDestructFromClass.
597 * Call C++ destructors on obj, starting with cls's
598 * dtor method (if any) followed by superclasses' dtors (if any),
599 * stopping at cls's dtor (if any).
600 * Uses methodListLock and cacheUpdateLock. The caller must hold neither.
601 **********************************************************************/
602 static void object_cxxDestructFromClass(id obj, Class cls)
603 {
604 void (*dtor)(id);
605
606 // Call cls's dtor first, then superclasses's dtors.
607
608 for ( ; cls != NULL; cls = cls->super_class) {
609 if (!(cls->info & CLS_HAS_CXX_STRUCTORS)) continue;
610 dtor = (void(*)(id))
611 lookupMethodInClassAndLoadCache(cls, cxx_destruct_sel);
612 if (dtor != (void(*)(id))&_objc_msgForward) {
613 if (PrintCxxCtors) {
614 _objc_inform("CXX: calling C++ destructors for class %s",
615 cls->name);
616 }
617 (*dtor)(obj);
618 }
619 }
620 }
621
622
623 /***********************************************************************
624 * object_cxxDestruct.
625 * Call C++ destructors on obj, if any.
626 * Uses methodListLock and cacheUpdateLock. The caller must hold neither.
627 **********************************************************************/
628 void object_cxxDestruct(id obj)
629 {
630 if (!obj) return;
631 object_cxxDestructFromClass(obj, obj->isa);
632 }
633
634
635 /***********************************************************************
636 * object_cxxConstructFromClass.
637 * Recursively call C++ constructors on obj, starting with base class's
638 * ctor method (if any) followed by subclasses' ctors (if any), stopping
639 * at cls's ctor (if any).
640 * Returns YES if construction succeeded.
641 * Returns NO if some constructor threw an exception. The exception is
642 * caught and discarded. Any partial construction is destructed.
643 * Uses methodListLock and cacheUpdateLock. The caller must hold neither.
644 *
645 * .cxx_construct returns id. This really means:
646 * return self: construction succeeded
647 * return nil: construction failed because a C++ constructor threw an exception
648 **********************************************************************/
649 static BOOL object_cxxConstructFromClass(id obj, Class cls)
650 {
651 id (*ctor)(id);
652
653 // Call superclasses' ctors first, if any.
654 if (cls->super_class) {
655 BOOL ok = object_cxxConstructFromClass(obj, cls->super_class);
656 if (!ok) return NO; // some superclass's ctor failed - give up
657 }
658
659 // Find this class's ctor, if any.
660 if (!(cls->info & CLS_HAS_CXX_STRUCTORS)) return YES; // no ctor - ok
661 ctor = (id(*)(id))lookupMethodInClassAndLoadCache(cls, cxx_construct_sel);
662 if (ctor == (id(*)(id))&_objc_msgForward) return YES; // no ctor - ok
663
664 // Call this class's ctor.
665 if (PrintCxxCtors) {
666 _objc_inform("CXX: calling C++ constructors for class %s", cls->name);
667 }
668 if ((*ctor)(obj)) return YES; // ctor called and succeeded - ok
669
670 // This class's ctor was called and failed.
671 // Call superclasses's dtors to clean up.
672 if (cls->super_class) object_cxxDestructFromClass(obj, cls->super_class);
673 return NO;
674 }
675
676
677 /***********************************************************************
678 * object_cxxConstructFromClass.
679 * Call C++ constructors on obj, if any.
680 * Returns YES if construction succeeded.
681 * Returns NO if some constructor threw an exception. The exception is
682 * caught and discarded. Any partial construction is destructed.
683 * Uses methodListLock and cacheUpdateLock. The caller must hold neither.
684 **********************************************************************/
685 BOOL object_cxxConstruct(id obj)
686 {
687 if (!obj) return YES;
688 return object_cxxConstructFromClass(obj, obj->isa);
689 }
690
691
692 /***********************************************************************
693 * _internal_class_createInstanceFromZone. Allocate an instance of the
694 * specified class with the specified number of bytes for indexed
695 * variables, in the specified zone. The isa field is set to the
696 * class, C++ default constructors are called, and all other fields are zeroed.
697 **********************************************************************/
698 static id _internal_class_createInstanceFromZone (Class aClass,
699 unsigned nIvarBytes,
700 void * z)
701 {
702 id obj;
703 register unsigned byteCount;
704
705 // Can't create something for nothing
706 if (aClass == Nil)
707 {
708 __objc_error ((id) aClass, _errAllocNil, 0);
709 return nil;
710 }
711
712 // Allocate and initialize
713 byteCount = ((struct objc_class *) aClass)->instance_size + nIvarBytes;
714 obj = (id) malloc_zone_calloc (z, 1, byteCount);
715 if (!obj)
716 {
717 __objc_error ((id) aClass, _errNoMem, ((struct objc_class *) aClass)->name, nIvarBytes);
718 return nil;
719 }
720
721 // Set the isa pointer
722 obj->isa = aClass;
723
724 // Call C++ constructors, if any.
725 if (!object_cxxConstruct(obj)) {
726 // Some C++ constructor threw an exception.
727 malloc_zone_free(z, obj);
728 return nil;
729 }
730
731 return obj;
732 }
733
734 /***********************************************************************
735 * _internal_class_createInstance. Allocate an instance of the specified
736 * class with the specified number of bytes for indexed variables, in
737 * the default zone, using _internal_class_createInstanceFromZone.
738 **********************************************************************/
739 static id _internal_class_createInstance (Class aClass,
740 unsigned nIvarBytes)
741 {
742 return _internal_class_createInstanceFromZone (aClass,
743 nIvarBytes,
744 malloc_default_zone ());
745 }
746
747 id (*_poseAs)() = (id (*)())class_poseAs;
748 id (*_alloc)(Class, unsigned) = _internal_class_createInstance;
749 id (*_zoneAlloc)(Class, unsigned, void *) = _internal_class_createInstanceFromZone;
750
751 /***********************************************************************
752 * class_createInstanceFromZone. Allocate an instance of the specified
753 * class with the specified number of bytes for indexed variables, in
754 * the specified zone, using _zoneAlloc.
755 **********************************************************************/
756 id class_createInstanceFromZone (Class aClass,
757 unsigned nIvarBytes,
758 void * z)
759 {
760 // _zoneAlloc can be overridden, but is initially set to
761 // _internal_class_createInstanceFromZone
762 return (*_zoneAlloc) (aClass, nIvarBytes, z);
763 }
764
765 /***********************************************************************
766 * class_createInstance. Allocate an instance of the specified class with
767 * the specified number of bytes for indexed variables, using _alloc.
768 **********************************************************************/
769 id class_createInstance (Class aClass,
770 unsigned nIvarBytes)
771 {
772 // _alloc can be overridden, but is initially set to
773 // _internal_class_createInstance
774 return (*_alloc) (aClass, nIvarBytes);
775 }
776
777 /***********************************************************************
778 * class_setVersion. Record the specified version with the class.
779 **********************************************************************/
780 void class_setVersion (Class aClass,
781 int version)
782 {
783 ((struct objc_class *) aClass)->version = version;
784 }
785
786 /***********************************************************************
787 * class_getVersion. Return the version recorded with the class.
788 **********************************************************************/
789 int class_getVersion (Class aClass)
790 {
791 return ((struct objc_class *) aClass)->version;
792 }
793
794
795 static inline Method _findNamedMethodInList(struct objc_method_list * mlist, const char *meth_name) {
796 int i;
797 if (!mlist) return NULL;
798 for (i = 0; i < mlist->method_count; i++) {
799 Method m = &mlist->method_list[i];
800 if (*((const char *)m->method_name) == *meth_name && 0 == strcmp((const char *)(m->method_name), meth_name)) {
801 return m;
802 }
803 }
804 return NULL;
805 }
806
807
808 /***********************************************************************
809 * fixupSelectorsInMethodList
810 * Uniques selectors in the given method list.
811 * The given method list must be non-NULL and not already fixed-up.
812 * If the class was loaded from a bundle:
813 * fixes up the given list in place with heap-allocated selector strings
814 * If the class was not from a bundle:
815 * allocates a copy of the method list, fixes up the copy, and returns
816 * the copy. The given list is unmodified.
817 *
818 * If cls is already in use, methodListLock must be held by the caller.
819 **********************************************************************/
820 // Fixed-up method lists get mlist->obsolete = _OBJC_FIXED_UP.
821 #define _OBJC_FIXED_UP ((void *)1771)
822
823 static struct objc_method_list *fixupSelectorsInMethodList(Class cls, struct objc_method_list *mlist)
824 {
825 unsigned i, size;
826 Method method;
827 struct objc_method_list *old_mlist;
828
829 if ( ! mlist ) return (struct objc_method_list *)0;
830 if ( mlist->obsolete != _OBJC_FIXED_UP ) {
831 BOOL isBundle = CLS_GETINFO(cls, CLS_FROM_BUNDLE) ? YES : NO;
832 if (!isBundle) {
833 old_mlist = mlist;
834 size = sizeof(struct objc_method_list) - sizeof(struct objc_method) + old_mlist->method_count * sizeof(struct objc_method);
835 mlist = _malloc_internal(size);
836 memmove(mlist, old_mlist, size);
837 } else {
838 // Mach-O bundles are fixed up in place.
839 // This prevents leaks when a bundle is unloaded.
840 }
841 sel_lock();
842 for ( i = 0; i < mlist->method_count; i += 1 ) {
843 method = &mlist->method_list[i];
844 method->method_name =
845 sel_registerNameNoLock((const char *)method->method_name, isBundle); // Always copy selector data from bundles.
846 }
847 sel_unlock();
848 mlist->obsolete = _OBJC_FIXED_UP;
849 }
850 return mlist;
851 }
852
853
854 /***********************************************************************
855 * nextMethodList
856 * Returns successive method lists from the given class.
857 * Method lists are returned in method search order (i.e. highest-priority
858 * implementations first).
859 * All necessary method list fixups are performed, so the
860 * returned method list is fully-constructed.
861 *
862 * If cls is already in use, methodListLock must be held by the caller.
863 * For full thread-safety, methodListLock must be continuously held by the
864 * caller across all calls to nextMethodList(). If the lock is released,
865 * the bad results listed in class_nextMethodList() may occur.
866 *
867 * void *iterator = NULL;
868 * struct objc_method_list *mlist;
869 * OBJC_LOCK(&methodListLock);
870 * while ((mlist = nextMethodList(cls, &iterator))) {
871 * // do something with mlist
872 * }
873 * OBJC_UNLOCK(&methodListLock);
874 **********************************************************************/
875 static struct objc_method_list *nextMethodList(struct objc_class *cls,
876 void **it)
877 {
878 uintptr_t index = *(uintptr_t *)it;
879 struct objc_method_list **resultp;
880
881 if (index == 0) {
882 // First call to nextMethodList.
883 if (!cls->methodLists) {
884 resultp = NULL;
885 } else if (cls->info & CLS_NO_METHOD_ARRAY) {
886 resultp = (struct objc_method_list **)&cls->methodLists;
887 } else {
888 resultp = &cls->methodLists[0];
889 if (!*resultp || *resultp == END_OF_METHODS_LIST) {
890 resultp = NULL;
891 }
892 }
893 } else {
894 // Subsequent call to nextMethodList.
895 if (!cls->methodLists) {
896 resultp = NULL;
897 } else if (cls->info & CLS_NO_METHOD_ARRAY) {
898 resultp = NULL;
899 } else {
900 resultp = &cls->methodLists[index];
901 if (!*resultp || *resultp == END_OF_METHODS_LIST) {
902 resultp = NULL;
903 }
904 }
905 }
906
907 // resultp now is NULL, meaning there are no more method lists,
908 // OR the address of the method list pointer to fix up and return.
909
910 if (resultp) {
911 if (*resultp && (*resultp)->obsolete != _OBJC_FIXED_UP) {
912 *resultp = fixupSelectorsInMethodList(cls, *resultp);
913 }
914 *it = (void *)(index + 1);
915 return *resultp;
916 } else {
917 *it = 0;
918 return NULL;
919 }
920 }
921
922
923 /* These next three functions are the heart of ObjC method lookup.
924 * If the class is currently in use, methodListLock must be held by the caller.
925 */
926 static inline Method _findMethodInList(struct objc_method_list * mlist, SEL sel) {
927 int i;
928 if (!mlist) return NULL;
929 for (i = 0; i < mlist->method_count; i++) {
930 Method m = &mlist->method_list[i];
931 if (m->method_name == sel) {
932 return m;
933 }
934 }
935 return NULL;
936 }
937
938 static inline Method _findMethodInClass(Class cls, SEL sel) __attribute__((always_inline));
939 static inline Method _findMethodInClass(Class cls, SEL sel) {
940 // Flattened version of nextMethodList(). The optimizer doesn't
941 // do a good job with hoisting the conditionals out of the loop.
942 // Conceptually, this looks like:
943 // while ((mlist = nextMethodList(cls, &iterator))) {
944 // Method m = _findMethodInList(mlist, sel);
945 // if (m) return m;
946 // }
947
948 if (!cls->methodLists) {
949 // No method lists.
950 return NULL;
951 }
952 else if (cls->info & CLS_NO_METHOD_ARRAY) {
953 // One method list.
954 struct objc_method_list **mlistp;
955 mlistp = (struct objc_method_list **)&cls->methodLists;
956 if ((*mlistp)->obsolete != _OBJC_FIXED_UP) {
957 *mlistp = fixupSelectorsInMethodList(cls, *mlistp);
958 }
959 return _findMethodInList(*mlistp, sel);
960 }
961 else {
962 // Multiple method lists.
963 struct objc_method_list **mlistp;
964 for (mlistp = cls->methodLists;
965 *mlistp != NULL && *mlistp != END_OF_METHODS_LIST;
966 mlistp++)
967 {
968 Method m;
969 if ((*mlistp)->obsolete != _OBJC_FIXED_UP) {
970 *mlistp = fixupSelectorsInMethodList(cls, *mlistp);
971 }
972 m = _findMethodInList(*mlistp, sel);
973 if (m) return m;
974 }
975 return NULL;
976 }
977 }
978
979 static inline Method _getMethod(Class cls, SEL sel) {
980 for (; cls; cls = cls->super_class) {
981 Method m;
982 m = _findMethodInClass(cls, sel);
983 if (m) return m;
984 }
985 return NULL;
986 }
987
988
989 // fixme for gc debugging temporary use
990 __private_extern__ IMP findIMPInClass(Class cls, SEL sel)
991 {
992 Method m = _findMethodInClass(cls, sel);
993 if (m) return m->method_imp;
994 else return NULL;
995 }
996
997 /***********************************************************************
998 * class_getInstanceMethod. Return the instance method for the
999 * specified class and selector.
1000 **********************************************************************/
1001 Method class_getInstanceMethod (Class aClass,
1002 SEL aSelector)
1003 {
1004 Method result;
1005
1006 // Need both a class and a selector
1007 if (!aClass || !aSelector)
1008 return NULL;
1009
1010 // Go to the class
1011 OBJC_LOCK(&methodListLock);
1012 result = _getMethod (aClass, aSelector);
1013 OBJC_UNLOCK(&methodListLock);
1014 return result;
1015 }
1016
1017 /***********************************************************************
1018 * class_getClassMethod. Return the class method for the specified
1019 * class and selector.
1020 **********************************************************************/
1021 Method class_getClassMethod (Class aClass,
1022 SEL aSelector)
1023 {
1024 Method result;
1025
1026 // Need both a class and a selector
1027 if (!aClass || !aSelector)
1028 return NULL;
1029
1030 // Go to the class or isa
1031 OBJC_LOCK(&methodListLock);
1032 result = _getMethod (GETMETA(aClass), aSelector);
1033 OBJC_UNLOCK(&methodListLock);
1034 return result;
1035 }
1036
1037 /***********************************************************************
1038 * class_getVariable. Return the named instance variable.
1039 **********************************************************************/
1040 static Ivar class_getVariable (Class cls,
1041 const char * name)
1042 {
1043 struct objc_class * thisCls;
1044
1045 // Outer loop - search the class and its superclasses
1046 for (thisCls = cls; thisCls != Nil; thisCls = ((struct objc_class *) thisCls)->super_class)
1047 {
1048 int index;
1049 Ivar thisIvar;
1050
1051 // Skip class having no ivars
1052 if (!thisCls->ivars)
1053 continue;
1054
1055 // Inner loop - search the given class
1056 thisIvar = &thisCls->ivars->ivar_list[0];
1057 for (index = 0; index < thisCls->ivars->ivar_count; index += 1)
1058 {
1059 // Check this ivar's name. Be careful because the
1060 // compiler generates ivar entries with NULL ivar_name
1061 // (e.g. for anonymous bit fields).
1062 if ((thisIvar->ivar_name) &&
1063 (strcmp (name, thisIvar->ivar_name) == 0))
1064 return thisIvar;
1065
1066 // Move to next ivar
1067 thisIvar += 1;
1068 }
1069 }
1070
1071 // Not found
1072 return NULL;
1073 }
1074
1075 /***********************************************************************
1076 * class_getInstanceVariable. Return the named instance variable.
1077 *
1078 * Someday add class_getClassVariable ().
1079 **********************************************************************/
1080 Ivar class_getInstanceVariable (Class aClass,
1081 const char * name)
1082 {
1083 // Must have a class and a name
1084 if (!aClass || !name)
1085 return NULL;
1086
1087 // Look it up
1088 return class_getVariable (aClass, name);
1089 }
1090
1091 /***********************************************************************
1092 * flush_caches. Flush the instance and optionally class method caches
1093 * of cls and all its subclasses.
1094 *
1095 * Specifying Nil for the class "all classes."
1096 **********************************************************************/
1097 static void flush_caches(Class cls, BOOL flush_meta)
1098 {
1099 int numClasses = 0, newNumClasses;
1100 struct objc_class * * classes = NULL;
1101 int i;
1102 struct objc_class * clsObject;
1103 #ifdef OBJC_INSTRUMENTED
1104 unsigned int classesVisited;
1105 unsigned int subclassCount;
1106 #endif
1107
1108 // Do nothing if class has no cache
1109 // This check is safe to do without any cache locks.
1110 if (cls && !((struct objc_class *) cls)->cache)
1111 return;
1112
1113 newNumClasses = objc_getClassList((Class *)NULL, 0);
1114 while (numClasses < newNumClasses) {
1115 numClasses = newNumClasses;
1116 classes = _realloc_internal(classes, sizeof(Class) * numClasses);
1117 newNumClasses = objc_getClassList((Class *)classes, numClasses);
1118 }
1119 numClasses = newNumClasses;
1120
1121 OBJC_LOCK(&cacheUpdateLock);
1122
1123 // Handle nil and root instance class specially: flush all
1124 // instance and class method caches. Nice that this
1125 // loop is linear vs the N-squared loop just below.
1126 if (!cls || !((struct objc_class *) cls)->super_class)
1127 {
1128 #ifdef OBJC_INSTRUMENTED
1129 LinearFlushCachesCount += 1;
1130 classesVisited = 0;
1131 subclassCount = 0;
1132 #endif
1133 // Traverse all classes in the hash table
1134 for (i = 0; i < numClasses; i++)
1135 {
1136 struct objc_class * metaClsObject;
1137 #ifdef OBJC_INSTRUMENTED
1138 classesVisited += 1;
1139 #endif
1140 clsObject = classes[i];
1141
1142 // Skip class that is known not to be a subclass of this root
1143 // (the isa pointer of any meta class points to the meta class
1144 // of the root).
1145 // NOTE: When is an isa pointer of a hash tabled class ever nil?
1146 metaClsObject = clsObject->isa;
1147 if (cls && metaClsObject && cls->isa != metaClsObject->isa)
1148 {
1149 continue;
1150 }
1151
1152 #ifdef OBJC_INSTRUMENTED
1153 subclassCount += 1;
1154 #endif
1155
1156 _cache_flush (clsObject);
1157 if (flush_meta && metaClsObject != NULL) {
1158 _cache_flush (metaClsObject);
1159 }
1160 }
1161 #ifdef OBJC_INSTRUMENTED
1162 LinearFlushCachesVisitedCount += classesVisited;
1163 if (classesVisited > MaxLinearFlushCachesVisitedCount)
1164 MaxLinearFlushCachesVisitedCount = classesVisited;
1165 IdealFlushCachesCount += subclassCount;
1166 if (subclassCount > MaxIdealFlushCachesCount)
1167 MaxIdealFlushCachesCount = subclassCount;
1168 #endif
1169
1170 OBJC_UNLOCK(&cacheUpdateLock);
1171 _free_internal(classes);
1172 return;
1173 }
1174
1175 // Outer loop - flush any cache that could now get a method from
1176 // cls (i.e. the cache associated with cls and any of its subclasses).
1177 #ifdef OBJC_INSTRUMENTED
1178 NonlinearFlushCachesCount += 1;
1179 classesVisited = 0;
1180 subclassCount = 0;
1181 #endif
1182 for (i = 0; i < numClasses; i++)
1183 {
1184 struct objc_class * clsIter;
1185
1186 #ifdef OBJC_INSTRUMENTED
1187 NonlinearFlushCachesClassCount += 1;
1188 #endif
1189 clsObject = classes[i];
1190
1191 // Inner loop - Process a given class
1192 clsIter = clsObject;
1193 while (clsIter)
1194 {
1195
1196 #ifdef OBJC_INSTRUMENTED
1197 classesVisited += 1;
1198 #endif
1199 // Flush clsObject instance method cache if
1200 // clsObject is a subclass of cls, or is cls itself
1201 // Flush the class method cache if that was asked for
1202 if (clsIter == cls)
1203 {
1204 #ifdef OBJC_INSTRUMENTED
1205 subclassCount += 1;
1206 #endif
1207 _cache_flush (clsObject);
1208 if (flush_meta)
1209 _cache_flush (clsObject->isa);
1210
1211 break;
1212
1213 }
1214
1215 // Flush clsObject class method cache if cls is
1216 // the meta class of clsObject or of one
1217 // of clsObject's superclasses
1218 else if (clsIter->isa == cls)
1219 {
1220 #ifdef OBJC_INSTRUMENTED
1221 subclassCount += 1;
1222 #endif
1223 _cache_flush (clsObject->isa);
1224 break;
1225 }
1226
1227 // Move up superclass chain
1228 else if (ISINITIALIZED(clsIter))
1229 clsIter = clsIter->super_class;
1230
1231 // clsIter is not initialized, so its cache
1232 // must be empty. This happens only when
1233 // clsIter == clsObject, because
1234 // superclasses are initialized before
1235 // subclasses, and this loop traverses
1236 // from sub- to super- classes.
1237 else
1238 break;
1239 }
1240 }
1241 #ifdef OBJC_INSTRUMENTED
1242 NonlinearFlushCachesVisitedCount += classesVisited;
1243 if (classesVisited > MaxNonlinearFlushCachesVisitedCount)
1244 MaxNonlinearFlushCachesVisitedCount = classesVisited;
1245 IdealFlushCachesCount += subclassCount;
1246 if (subclassCount > MaxIdealFlushCachesCount)
1247 MaxIdealFlushCachesCount = subclassCount;
1248 #endif
1249
1250 OBJC_UNLOCK(&cacheUpdateLock);
1251 _free_internal(classes);
1252 }
1253
1254 /***********************************************************************
1255 * _objc_flush_caches. Flush the caches of the specified class and any
1256 * of its subclasses. If cls is a meta-class, only meta-class (i.e.
1257 * class method) caches are flushed. If cls is an instance-class, both
1258 * instance-class and meta-class caches are flushed.
1259 **********************************************************************/
1260 void _objc_flush_caches (Class cls)
1261 {
1262 flush_caches (cls, YES);
1263 }
1264
1265 /***********************************************************************
1266 * do_not_remove_this_dummy_function.
1267 **********************************************************************/
1268 void do_not_remove_this_dummy_function (void)
1269 {
1270 (void) class_nextMethodList (NULL, NULL);
1271 }
1272
1273
1274 /***********************************************************************
1275 * class_nextMethodList.
1276 * External version of nextMethodList().
1277 *
1278 * This function is not fully thread-safe. A series of calls to
1279 * class_nextMethodList() may fail if methods are added to or removed
1280 * from the class between calls.
1281 * If methods are added between calls to class_nextMethodList(), it may
1282 * return previously-returned method lists again, and may fail to return
1283 * newly-added lists.
1284 * If methods are removed between calls to class_nextMethodList(), it may
1285 * omit surviving method lists or simply crash.
1286 **********************************************************************/
1287 OBJC_EXPORT struct objc_method_list * class_nextMethodList (Class cls,
1288 void ** it)
1289 {
1290 struct objc_method_list *result;
1291 OBJC_LOCK(&methodListLock);
1292 result = nextMethodList(cls, it);
1293 OBJC_UNLOCK(&methodListLock);
1294 return result;
1295 }
1296
1297 /***********************************************************************
1298 * _dummy.
1299 **********************************************************************/
1300 void _dummy (void)
1301 {
1302 (void) class_nextMethodList (Nil, NULL);
1303 }
1304
1305 /***********************************************************************
1306 * class_addMethods.
1307 *
1308 * Formerly class_addInstanceMethods ()
1309 **********************************************************************/
1310 void class_addMethods (Class cls,
1311 struct objc_method_list * meths)
1312 {
1313 // Add the methods.
1314 OBJC_LOCK(&methodListLock);
1315 _objc_insertMethods(cls, meths);
1316 OBJC_UNLOCK(&methodListLock);
1317
1318 // Must flush when dynamically adding methods. No need to flush
1319 // all the class method caches. If cls is a meta class, though,
1320 // this will still flush it and any of its sub-meta classes.
1321 flush_caches (cls, NO);
1322 }
1323
1324 /***********************************************************************
1325 * class_addClassMethods.
1326 *
1327 * Obsolete (for binary compatibility only).
1328 **********************************************************************/
1329 void class_addClassMethods (Class cls,
1330 struct objc_method_list * meths)
1331 {
1332 class_addMethods (((struct objc_class *) cls)->isa, meths);
1333 }
1334
1335 /***********************************************************************
1336 * class_removeMethods.
1337 **********************************************************************/
1338 void class_removeMethods (Class cls,
1339 struct objc_method_list * meths)
1340 {
1341 // Remove the methods
1342 OBJC_LOCK(&methodListLock);
1343 _objc_removeMethods(cls, meths);
1344 OBJC_UNLOCK(&methodListLock);
1345
1346 // Must flush when dynamically removing methods. No need to flush
1347 // all the class method caches. If cls is a meta class, though,
1348 // this will still flush it and any of its sub-meta classes.
1349 flush_caches (cls, NO);
1350 }
1351
1352 /***********************************************************************
1353 * addClassToOriginalClass. Add to a hash table of classes involved in
1354 * a posing situation. We use this when we need to get to the "original"
1355 * class for some particular name through the function objc_getOrigClass.
1356 * For instance, the implementation of [super ...] will use this to be
1357 * sure that it gets hold of the correct super class, so that no infinite
1358 * loops will occur if the class it appears in is involved in posing.
1359 *
1360 * We use the classLock to guard the hash table.
1361 *
1362 * See tracker bug #51856.
1363 **********************************************************************/
1364
1365 static NXMapTable * posed_class_hash = NULL;
1366 static NXMapTable * posed_class_to_original_class_hash = NULL;
1367
1368 static void addClassToOriginalClass (Class posingClass,
1369 Class originalClass)
1370 {
1371 // Install hash table when it is first needed
1372 if (!posed_class_to_original_class_hash)
1373 {
1374 posed_class_to_original_class_hash =
1375 NXCreateMapTableFromZone (NXPtrValueMapPrototype,
1376 8,
1377 _objc_internal_zone ());
1378 }
1379
1380 // Add pose to hash table
1381 NXMapInsert (posed_class_to_original_class_hash,
1382 posingClass,
1383 originalClass);
1384 }
1385
1386 /***********************************************************************
1387 * getOriginalClassForPosingClass.
1388 **********************************************************************/
1389 Class getOriginalClassForPosingClass (Class posingClass)
1390 {
1391 return NXMapGet (posed_class_to_original_class_hash, posingClass);
1392 }
1393
1394 /***********************************************************************
1395 * objc_getOrigClass.
1396 **********************************************************************/
1397 Class objc_getOrigClass (const char * name)
1398 {
1399 struct objc_class * ret;
1400
1401 // Look for class among the posers
1402 ret = Nil;
1403 OBJC_LOCK(&classLock);
1404 if (posed_class_hash)
1405 ret = (Class) NXMapGet (posed_class_hash, name);
1406 OBJC_UNLOCK(&classLock);
1407 if (ret)
1408 return ret;
1409
1410 // Not a poser. Do a normal lookup.
1411 ret = objc_getClass (name);
1412 if (!ret)
1413 _objc_inform ("class `%s' not linked into application", name);
1414
1415 return ret;
1416 }
1417
1418 /***********************************************************************
1419 * _objc_addOrigClass. This function is only used from class_poseAs.
1420 * Registers the original class names, before they get obscured by
1421 * posing, so that [super ..] will work correctly from categories
1422 * in posing classes and in categories in classes being posed for.
1423 **********************************************************************/
1424 static void _objc_addOrigClass (Class origClass)
1425 {
1426 OBJC_LOCK(&classLock);
1427
1428 // Create the poser's hash table on first use
1429 if (!posed_class_hash)
1430 {
1431 posed_class_hash = NXCreateMapTableFromZone (NXStrValueMapPrototype,
1432 8,
1433 _objc_internal_zone ());
1434 }
1435
1436 // Add the named class iff it is not already there (or collides?)
1437 if (NXMapGet (posed_class_hash, ((struct objc_class *)origClass)->name) == 0)
1438 NXMapInsert (posed_class_hash, ((struct objc_class *)origClass)->name, origClass);
1439
1440 OBJC_UNLOCK(&classLock);
1441 }
1442
1443 /***********************************************************************
1444 * class_poseAs.
1445 *
1446 * !!! class_poseAs () does not currently flush any caches.
1447 **********************************************************************/
1448 Class class_poseAs (Class imposter,
1449 Class original)
1450 {
1451 struct objc_class * clsObject;
1452 char * imposterNamePtr;
1453 NXHashTable * class_hash;
1454 NXHashState state;
1455 struct objc_class * copy;
1456 #ifdef OBJC_CLASS_REFS
1457 header_info * hInfo;
1458 #endif
1459
1460 // Trivial case is easy
1461 if (imposter == original)
1462 return imposter;
1463
1464 // Imposter must be an immediate subclass of the original
1465 if (((struct objc_class *)imposter)->super_class != original) {
1466 __objc_error(imposter, _errNotSuper, ((struct objc_class *)imposter)->name, ((struct objc_class *)original)->name);
1467 }
1468
1469 // Can't pose when you have instance variables (how could it work?)
1470 if (((struct objc_class *)imposter)->ivars) {
1471 __objc_error(imposter, _errNewVars, ((struct objc_class *)imposter)->name, ((struct objc_class *)original)->name, ((struct objc_class *)imposter)->name);
1472 }
1473
1474 // Build a string to use to replace the name of the original class.
1475 #define imposterNamePrefix "_%"
1476 imposterNamePtr = _malloc_internal(strlen(((struct objc_class *)original)->name) + strlen(imposterNamePrefix) + 1);
1477 strcpy(imposterNamePtr, imposterNamePrefix);
1478 strcat(imposterNamePtr, ((struct objc_class *)original)->name);
1479 #undef imposterNamePrefix
1480
1481 // We lock the class hashtable, so we are thread safe with respect to
1482 // calls to objc_getClass (). However, the class names are not
1483 // changed atomically, nor are all of the subclasses updated
1484 // atomically. I have ordered the operations so that you will
1485 // never crash, but you may get inconsistent results....
1486
1487 // Register the original class so that [super ..] knows
1488 // exactly which classes are the "original" classes.
1489 _objc_addOrigClass (original);
1490 _objc_addOrigClass (imposter);
1491
1492 // Copy the imposter, so that the imposter can continue
1493 // its normal life in addition to changing the behavior of
1494 // the original. As a hack we don't bother to copy the metaclass.
1495 // For some reason we modify the original rather than the copy.
1496 copy = (*_zoneAlloc)(imposter->isa, sizeof(struct objc_class), _objc_internal_zone());
1497 memmove(copy, imposter, sizeof(struct objc_class));
1498
1499 OBJC_LOCK(&classLock);
1500
1501 class_hash = objc_getClasses ();
1502
1503 // Remove both the imposter and the original class.
1504 NXHashRemove (class_hash, imposter);
1505 NXHashRemove (class_hash, original);
1506
1507 NXHashInsert (class_hash, copy);
1508 addClassToOriginalClass (imposter, copy);
1509
1510 // Mark the imposter as such
1511 _class_setInfo(imposter, CLS_POSING);
1512 _class_setInfo(imposter->isa, CLS_POSING);
1513
1514 // Change the name of the imposter to that of the original class.
1515 ((struct objc_class *)imposter)->name = ((struct objc_class *)original)->name;
1516 ((struct objc_class *)imposter)->isa->name = ((struct objc_class *)original)->isa->name;
1517
1518 // Also copy the version field to avoid archiving problems.
1519 ((struct objc_class *)imposter)->version = ((struct objc_class *)original)->version;
1520
1521 // Change all subclasses of the original to point to the imposter.
1522 state = NXInitHashState (class_hash);
1523 while (NXNextHashState (class_hash, &state, (void **) &clsObject))
1524 {
1525 while ((clsObject) && (clsObject != imposter) &&
1526 (clsObject != copy))
1527 {
1528 if (clsObject->super_class == original)
1529 {
1530 clsObject->super_class = imposter;
1531 clsObject->isa->super_class = ((struct objc_class *)imposter)->isa;
1532 // We must flush caches here!
1533 break;
1534 }
1535
1536 clsObject = clsObject->super_class;
1537 }
1538 }
1539
1540 #ifdef OBJC_CLASS_REFS
1541 // Replace the original with the imposter in all class refs
1542 // Major loop - process all headers
1543 for (hInfo = _objc_headerStart(); hInfo != NULL; hInfo = hInfo->next)
1544 {
1545 Class * cls_refs;
1546 unsigned int refCount;
1547 unsigned int index;
1548
1549 // Get refs associated with this header
1550 cls_refs = (Class *) _getObjcClassRefs ((headerType *) hInfo->mhdr, &refCount);
1551 if (!cls_refs || !refCount)
1552 continue;
1553
1554 // Minor loop - process this header's refs
1555 cls_refs = (Class *) ((unsigned long) cls_refs + hInfo->image_slide);
1556 for (index = 0; index < refCount; index += 1)
1557 {
1558 if (cls_refs[index] == original)
1559 cls_refs[index] = imposter;
1560 }
1561 }
1562 #endif // OBJC_CLASS_REFS
1563
1564 // Change the name of the original class.
1565 ((struct objc_class *)original)->name = imposterNamePtr + 1;
1566 ((struct objc_class *)original)->isa->name = imposterNamePtr;
1567
1568 // Restore the imposter and the original class with their new names.
1569 NXHashInsert (class_hash, imposter);
1570 NXHashInsert (class_hash, original);
1571
1572 OBJC_UNLOCK(&classLock);
1573
1574 return imposter;
1575 }
1576
1577 /***********************************************************************
1578 * _freedHandler.
1579 **********************************************************************/
1580 static void _freedHandler (id self,
1581 SEL sel)
1582 {
1583 __objc_error (self, _errFreedObject, SELNAME(sel), self);
1584 }
1585
1586 /***********************************************************************
1587 * _nonexistentHandler.
1588 **********************************************************************/
1589 static void _nonexistentHandler (id self,
1590 SEL sel)
1591 {
1592 __objc_error (self, _errNonExistentObject, SELNAME(sel), self);
1593 }
1594
1595 /***********************************************************************
1596 * class_respondsToMethod.
1597 *
1598 * Called from -[Object respondsTo:] and +[Object instancesRespondTo:]
1599 **********************************************************************/
1600 BOOL class_respondsToMethod (Class cls,
1601 SEL sel)
1602 {
1603 Method meth;
1604 IMP imp;
1605
1606 // No one responds to zero!
1607 if (!sel)
1608 return NO;
1609
1610 imp = _cache_getImp(cls, sel);
1611 if (imp) {
1612 // Found method in cache.
1613 // If the cache entry is forward::, the class does not respond to sel.
1614 return (imp != &_objc_msgForward);
1615 }
1616
1617 // Handle cache miss
1618 OBJC_LOCK(&methodListLock);
1619 meth = _getMethod(cls, sel);
1620 OBJC_UNLOCK(&methodListLock);
1621 if (meth) {
1622 _cache_fill(cls, meth, sel);
1623 return YES;
1624 }
1625
1626 // Not implemented. Use _objc_msgForward.
1627 _cache_addForwardEntry(cls, sel);
1628
1629 return NO;
1630 }
1631
1632
1633 /***********************************************************************
1634 * class_lookupMethod.
1635 *
1636 * Called from -[Object methodFor:] and +[Object instanceMethodFor:]
1637 **********************************************************************/
1638 IMP class_lookupMethod (Class cls,
1639 SEL sel)
1640 {
1641 IMP imp;
1642
1643 // No one responds to zero!
1644 if (!sel) {
1645 __objc_error(cls, _errBadSel, sel);
1646 }
1647
1648 imp = _cache_getImp(cls, sel);
1649 if (imp) return imp;
1650
1651 // Handle cache miss
1652 return _class_lookupMethodAndLoadCache (cls, sel);
1653 }
1654
1655 /***********************************************************************
1656 * lookupNamedMethodInMethodList
1657 * Only called to find +load/-.cxx_construct/-.cxx_destruct methods,
1658 * without fixing up the entire method list.
1659 * The class is not yet in use, so methodListLock is not taken.
1660 **********************************************************************/
1661 __private_extern__ IMP lookupNamedMethodInMethodList(struct objc_method_list *mlist, const char *meth_name)
1662 {
1663 Method m = meth_name ? _findNamedMethodInList(mlist, meth_name) : NULL;
1664 return (m ? m->method_imp : NULL);
1665 }
1666
1667
1668 /***********************************************************************
1669 * _cache_malloc.
1670 *
1671 * Called from _cache_create() and cache_expand()
1672 * Cache locks: cacheUpdateLock must be held by the caller.
1673 **********************************************************************/
1674 static Cache _cache_malloc(int slotCount)
1675 {
1676 Cache new_cache;
1677 size_t size;
1678
1679 // Allocate table (why not check for failure?)
1680 size = sizeof(struct objc_cache) + TABLE_SIZE(slotCount);
1681 #ifdef OBJC_INSTRUMENTED
1682 // Custom cache allocator can't handle instrumentation.
1683 size += sizeof(CacheInstrumentation);
1684 new_cache = _calloc_internal(size, 1);
1685 new_cache->mask = slotCount - 1;
1686 #else
1687 if (size < CACHE_QUANTUM || UseInternalZone) {
1688 new_cache = _calloc_internal(size, 1);
1689 new_cache->mask = slotCount - 1;
1690 // occupied and buckets and instrumentation are all zero
1691 } else {
1692 new_cache = cache_allocator_calloc(size);
1693 // mask is already set
1694 // occupied and buckets and instrumentation are all zero
1695 }
1696 #endif
1697
1698 return new_cache;
1699 }
1700
1701
1702 /***********************************************************************
1703 * _cache_create.
1704 *
1705 * Called from _cache_expand().
1706 * Cache locks: cacheUpdateLock must be held by the caller.
1707 **********************************************************************/
1708 Cache _cache_create (Class cls)
1709 {
1710 Cache new_cache;
1711 int slotCount;
1712
1713 // Select appropriate size
1714 slotCount = (ISMETA(cls)) ? INIT_META_CACHE_SIZE : INIT_CACHE_SIZE;
1715
1716 new_cache = _cache_malloc(slotCount);
1717
1718 // Install the cache
1719 ((struct objc_class *)cls)->cache = new_cache;
1720
1721 // Clear the cache flush flag so that we will not flush this cache
1722 // before expanding it for the first time.
1723 _class_clearInfo(cls, CLS_FLUSH_CACHE);
1724
1725 // Clear the grow flag so that we will re-use the current storage,
1726 // rather than actually grow the cache, when expanding the cache
1727 // for the first time
1728 if (_class_slow_grow)
1729 _class_clearInfo(cls, CLS_GROW_CACHE);
1730
1731 // Return our creation
1732 return new_cache;
1733 }
1734
1735 /***********************************************************************
1736 * _cache_expand.
1737 *
1738 * Called from _cache_fill ()
1739 * Cache locks: cacheUpdateLock must be held by the caller.
1740 **********************************************************************/
1741 static Cache _cache_expand (Class cls)
1742 {
1743 Cache old_cache;
1744 Cache new_cache;
1745 unsigned int slotCount;
1746 unsigned int index;
1747
1748 // First growth goes from emptyCache to a real one
1749 old_cache = ((struct objc_class *)cls)->cache;
1750 if (old_cache == &emptyCache)
1751 return _cache_create (cls);
1752
1753 // iff _class_slow_grow, trade off actual cache growth with re-using
1754 // the current one, so that growth only happens every odd time
1755 if (_class_slow_grow)
1756 {
1757 // CLS_GROW_CACHE controls every-other-time behavior. If it
1758 // is non-zero, let the cache grow this time, but clear the
1759 // flag so the cache is reused next time
1760 if ((((struct objc_class * )cls)->info & CLS_GROW_CACHE) != 0)
1761 _class_clearInfo(cls, CLS_GROW_CACHE);
1762
1763 // Reuse the current cache storage this time
1764 else
1765 {
1766 // Clear the valid-entry counter
1767 old_cache->occupied = 0;
1768
1769 // Invalidate all the cache entries
1770 for (index = 0; index < old_cache->mask + 1; index += 1)
1771 {
1772 // Remember what this entry was, so we can possibly
1773 // deallocate it after the bucket has been invalidated
1774 Method oldEntry = old_cache->buckets[index];
1775 // Skip invalid entry
1776 if (!CACHE_BUCKET_VALID(old_cache->buckets[index]))
1777 continue;
1778
1779 // Invalidate this entry
1780 CACHE_BUCKET_VALID(old_cache->buckets[index]) = NULL;
1781
1782 // Deallocate "forward::" entry
1783 if (CACHE_BUCKET_IMP(oldEntry) == &_objc_msgForward)
1784 {
1785 _cache_collect_free (oldEntry, sizeof(struct objc_method), NO);
1786 }
1787 }
1788
1789 // Set the slow growth flag so the cache is next grown
1790 _class_setInfo(cls, CLS_GROW_CACHE);
1791
1792 // Return the same old cache, freshly emptied
1793 return old_cache;
1794 }
1795
1796 }
1797
1798 // Double the cache size
1799 slotCount = (old_cache->mask + 1) << 1;
1800
1801 new_cache = _cache_malloc(slotCount);
1802
1803 #ifdef OBJC_INSTRUMENTED
1804 // Propagate the instrumentation data
1805 {
1806 CacheInstrumentation * oldCacheData;
1807 CacheInstrumentation * newCacheData;
1808
1809 oldCacheData = CACHE_INSTRUMENTATION(old_cache);
1810 newCacheData = CACHE_INSTRUMENTATION(new_cache);
1811 bcopy ((const char *)oldCacheData, (char *)newCacheData, sizeof(CacheInstrumentation));
1812 }
1813 #endif
1814
1815 // iff _class_uncache, copy old cache entries into the new cache
1816 if (_class_uncache == 0)
1817 {
1818 int newMask;
1819
1820 newMask = new_cache->mask;
1821
1822 // Look at all entries in the old cache
1823 for (index = 0; index < old_cache->mask + 1; index += 1)
1824 {
1825 int index2;
1826
1827 // Skip invalid entry
1828 if (!CACHE_BUCKET_VALID(old_cache->buckets[index]))
1829 continue;
1830
1831 // Hash the old entry into the new table
1832 index2 = CACHE_HASH(CACHE_BUCKET_NAME(old_cache->buckets[index]),
1833 newMask);
1834
1835 // Find an available spot, at or following the hashed spot;
1836 // Guaranteed to not infinite loop, because table has grown
1837 for (;;)
1838 {
1839 if (!CACHE_BUCKET_VALID(new_cache->buckets[index2]))
1840 {
1841 new_cache->buckets[index2] = old_cache->buckets[index];
1842 break;
1843 }
1844
1845 index2 += 1;
1846 index2 &= newMask;
1847 }
1848
1849 // Account for the addition
1850 new_cache->occupied += 1;
1851 }
1852
1853 // Set the cache flush flag so that we will flush this cache
1854 // before expanding it again.
1855 _class_setInfo(cls, CLS_FLUSH_CACHE);
1856 }
1857
1858 // Deallocate "forward::" entries from the old cache
1859 else
1860 {
1861 for (index = 0; index < old_cache->mask + 1; index += 1)
1862 {
1863 if (CACHE_BUCKET_VALID(old_cache->buckets[index]) &&
1864 CACHE_BUCKET_IMP(old_cache->buckets[index]) == &_objc_msgForward)
1865 {
1866 _cache_collect_free (old_cache->buckets[index], sizeof(struct objc_method), NO);
1867 }
1868 }
1869 }
1870
1871 // Install new cache
1872 ((struct objc_class *)cls)->cache = new_cache;
1873
1874 // Deallocate old cache, try freeing all the garbage
1875 _cache_collect_free (old_cache, old_cache->mask * sizeof(Method), YES);
1876 return new_cache;
1877 }
1878
1879 /***********************************************************************
1880 * instrumentObjcMessageSends/logObjcMessageSends.
1881 **********************************************************************/
1882 static int LogObjCMessageSend (BOOL isClassMethod,
1883 const char * objectsClass,
1884 const char * implementingClass,
1885 SEL selector)
1886 {
1887 char buf[ 1024 ];
1888
1889 // Create/open the log file
1890 if (objcMsgLogFD == (-1))
1891 {
1892 snprintf (buf, sizeof(buf), "/tmp/msgSends-%d", (int) getpid ());
1893 objcMsgLogFD = secure_open (buf, O_WRONLY | O_CREAT, geteuid());
1894 if (objcMsgLogFD < 0) {
1895 // no log file - disable logging
1896 objcMsgLogEnabled = 0;
1897 objcMsgLogFD = -1;
1898 return 1;
1899 }
1900 }
1901
1902 // Make the log entry
1903 snprintf(buf, sizeof(buf), "%c %s %s %s\n",
1904 isClassMethod ? '+' : '-',
1905 objectsClass,
1906 implementingClass,
1907 (char *) selector);
1908
1909 write (objcMsgLogFD, buf, strlen(buf));
1910
1911 // Tell caller to not cache the method
1912 return 0;
1913 }
1914
1915 void instrumentObjcMessageSends (BOOL flag)
1916 {
1917 int enabledValue = (flag) ? 1 : 0;
1918
1919 // Shortcut NOP
1920 if (objcMsgLogEnabled == enabledValue)
1921 return;
1922
1923 // If enabling, flush all method caches so we get some traces
1924 if (flag)
1925 flush_caches (Nil, YES);
1926
1927 // Sync our log file
1928 if (objcMsgLogFD != (-1))
1929 fsync (objcMsgLogFD);
1930
1931 objcMsgLogEnabled = enabledValue;
1932 }
1933
1934 void logObjcMessageSends (ObjCLogProc logProc)
1935 {
1936 if (logProc)
1937 {
1938 objcMsgLogProc = logProc;
1939 objcMsgLogEnabled = 1;
1940 }
1941 else
1942 {
1943 objcMsgLogProc = logProc;
1944 objcMsgLogEnabled = 0;
1945 }
1946
1947 if (objcMsgLogFD != (-1))
1948 fsync (objcMsgLogFD);
1949 }
1950
1951
1952 /***********************************************************************
1953 * _cache_fill. Add the specified method to the specified class' cache.
1954 * Returns NO if the cache entry wasn't added: cache was busy,
1955 * class is still being initialized, new entry is a duplicate.
1956 *
1957 * Called only from _class_lookupMethodAndLoadCache and
1958 * class_respondsToMethod and _cache_addForwardEntry.
1959 *
1960 * Cache locks: cacheUpdateLock must not be held.
1961 **********************************************************************/
1962 static BOOL _cache_fill(Class cls, Method smt, SEL sel)
1963 {
1964 unsigned int newOccupied;
1965 arith_t index;
1966 Method *buckets;
1967 Cache cache;
1968
1969 // Never cache before +initialize is done
1970 if (!ISINITIALIZED(cls)) {
1971 return NO;
1972 }
1973
1974 // Keep tally of cache additions
1975 totalCacheFills += 1;
1976
1977 OBJC_LOCK(&cacheUpdateLock);
1978
1979 cache = ((struct objc_class *)cls)->cache;
1980
1981 // Check for duplicate entries, if we're in the mode
1982 if (traceDuplicates)
1983 {
1984 int index2;
1985 arith_t mask = cache->mask;
1986 buckets = cache->buckets;
1987
1988 // Scan the cache
1989 for (index2 = 0; index2 < mask + 1; index2 += 1)
1990 {
1991 // Skip invalid or non-duplicate entry
1992 if ((!CACHE_BUCKET_VALID(buckets[index2])) ||
1993 (strcmp ((char *) CACHE_BUCKET_NAME(buckets[index2]), (char *) smt->method_name) != 0))
1994 continue;
1995
1996 // Tally duplication, but report iff wanted
1997 cacheFillDuplicates += 1;
1998 if (traceDuplicatesVerbose)
1999 {
2000 _objc_inform ("Cache fill duplicate #%d: found %x adding %x: %s\n",
2001 cacheFillDuplicates,
2002 (unsigned int) CACHE_BUCKET_NAME(buckets[index2]),
2003 (unsigned int) smt->method_name,
2004 (char *) smt->method_name);
2005 }
2006 }
2007 }
2008
2009 // Make sure the entry wasn't added to the cache by some other thread
2010 // before we grabbed the cacheUpdateLock.
2011 // Don't use _cache_getMethod() because _cache_getMethod() doesn't
2012 // return forward:: entries.
2013 if (_cache_getImp(cls, sel)) {
2014 OBJC_UNLOCK(&cacheUpdateLock);
2015 return NO; // entry is already cached, didn't add new one
2016 }
2017
2018 // Use the cache as-is if it is less than 3/4 full
2019 newOccupied = cache->occupied + 1;
2020 if ((newOccupied * 4) <= (cache->mask + 1) * 3) {
2021 // Cache is less than 3/4 full.
2022 cache->occupied = newOccupied;
2023 } else {
2024 // Cache is too full. Flush it or expand it.
2025 if ((((struct objc_class * )cls)->info & CLS_FLUSH_CACHE) != 0) {
2026 _cache_flush (cls);
2027 } else {
2028 cache = _cache_expand (cls);
2029 }
2030
2031 // Account for the addition
2032 cache->occupied += 1;
2033 }
2034
2035 // Insert the new entry. This can be done by either:
2036 // (a) Scanning for the first unused spot. Easy!
2037 // (b) Opening up an unused spot by sliding existing
2038 // entries down by one. The benefit of this
2039 // extra work is that it puts the most recently
2040 // loaded entries closest to where the selector
2041 // hash starts the search.
2042 //
2043 // The loop is a little more complicated because there
2044 // are two kinds of entries, so there have to be two ways
2045 // to slide them.
2046 buckets = cache->buckets;
2047 index = CACHE_HASH(sel, cache->mask);
2048 for (;;)
2049 {
2050 // Slide existing entries down by one
2051 Method saveMethod;
2052
2053 // Copy current entry to a local
2054 saveMethod = buckets[index];
2055
2056 // Copy previous entry (or new entry) to current slot
2057 buckets[index] = smt;
2058
2059 // Done if current slot had been invalid
2060 if (saveMethod == NULL)
2061 break;
2062
2063 // Prepare to copy saved value into next slot
2064 smt = saveMethod;
2065
2066 // Move on to next slot
2067 index += 1;
2068 index &= cache->mask;
2069 }
2070
2071 OBJC_UNLOCK(&cacheUpdateLock);
2072
2073 return YES; // successfully added new cache entry
2074 }
2075
2076
2077 /***********************************************************************
2078 * _cache_addForwardEntry
2079 * Add a forward:: entry for the given selector to cls's method cache.
2080 * Does nothing if the cache addition fails for any reason.
2081 * Called from class_respondsToMethod and _class_lookupMethodAndLoadCache.
2082 * Cache locks: cacheUpdateLock must not be held.
2083 **********************************************************************/
2084 static void _cache_addForwardEntry(Class cls, SEL sel)
2085 {
2086 Method smt;
2087
2088 smt = _malloc_internal(sizeof(struct objc_method));
2089 smt->method_name = sel;
2090 smt->method_types = "";
2091 smt->method_imp = &_objc_msgForward;
2092 if (! _cache_fill(cls, smt, sel)) {
2093 // Entry not added to cache. Don't leak the method struct.
2094 _free_internal(smt);
2095 }
2096 }
2097
2098
2099 /***********************************************************************
2100 * _cache_flush. Invalidate all valid entries in the given class' cache,
2101 * and clear the CLS_FLUSH_CACHE in the cls->info.
2102 *
2103 * Called from flush_caches() and _cache_fill()
2104 * Cache locks: cacheUpdateLock must be held by the caller.
2105 **********************************************************************/
2106 static void _cache_flush (Class cls)
2107 {
2108 Cache cache;
2109 unsigned int index;
2110
2111 // Locate cache. Ignore unused cache.
2112 cache = ((struct objc_class *)cls)->cache;
2113 if (cache == NULL || cache == &emptyCache)
2114 return;
2115
2116 #ifdef OBJC_INSTRUMENTED
2117 {
2118 CacheInstrumentation * cacheData;
2119
2120 // Tally this flush
2121 cacheData = CACHE_INSTRUMENTATION(cache);
2122 cacheData->flushCount += 1;
2123 cacheData->flushedEntries += cache->occupied;
2124 if (cache->occupied > cacheData->maxFlushedEntries)
2125 cacheData->maxFlushedEntries = cache->occupied;
2126 }
2127 #endif
2128
2129 // Traverse the cache
2130 for (index = 0; index <= cache->mask; index += 1)
2131 {
2132 // Remember what this entry was, so we can possibly
2133 // deallocate it after the bucket has been invalidated
2134 Method oldEntry = cache->buckets[index];
2135
2136 // Invalidate this entry
2137 CACHE_BUCKET_VALID(cache->buckets[index]) = NULL;
2138
2139 // Deallocate "forward::" entry
2140 if (oldEntry && oldEntry->method_imp == &_objc_msgForward)
2141 _cache_collect_free (oldEntry, sizeof(struct objc_method), NO);
2142 }
2143
2144 // Clear the valid-entry counter
2145 cache->occupied = 0;
2146
2147 // Clear the cache flush flag so that we will not flush this cache
2148 // before expanding it again.
2149 _class_clearInfo(cls, CLS_FLUSH_CACHE);
2150 }
2151
2152 /***********************************************************************
2153 * _objc_getFreedObjectClass. Return a pointer to the dummy freed
2154 * object class. Freed objects get their isa pointers replaced with
2155 * a pointer to the freedObjectClass, so that we can catch usages of
2156 * the freed object.
2157 **********************************************************************/
2158 Class _objc_getFreedObjectClass (void)
2159 {
2160 return (Class) &freedObjectClass;
2161 }
2162
2163 /***********************************************************************
2164 * _objc_getNonexistentClass. Return a pointer to the dummy nonexistent
2165 * object class. This is used when, for example, mapping the class
2166 * refs for an image, and the class can not be found, so that we can
2167 * catch later uses of the non-existent class.
2168 **********************************************************************/
2169 Class _objc_getNonexistentClass (void)
2170 {
2171 return (Class) &nonexistentObjectClass;
2172 }
2173
2174
2175 /***********************************************************************
2176 * struct _objc_initializing_classes
2177 * Per-thread list of classes currently being initialized by that thread.
2178 * During initialization, that thread is allowed to send messages to that
2179 * class, but other threads have to wait.
2180 * The list is a simple array of metaclasses (the metaclass stores
2181 * the initialization state).
2182 **********************************************************************/
2183 typedef struct _objc_initializing_classes {
2184 int classesAllocated;
2185 struct objc_class** metaclasses;
2186 } _objc_initializing_classes;
2187
2188
2189 /***********************************************************************
2190 * _fetchInitializingClassList
2191 * Return the list of classes being initialized by this thread.
2192 * If create == YES, create the list when no classes are being initialized by this thread.
2193 * If create == NO, return NULL when no classes are being initialized by this thread.
2194 **********************************************************************/
2195 static _objc_initializing_classes *_fetchInitializingClassList(BOOL create)
2196 {
2197 _objc_pthread_data *data;
2198 _objc_initializing_classes *list;
2199 struct objc_class **classes;
2200
2201 data = pthread_getspecific(_objc_pthread_key);
2202 if (data == NULL) {
2203 if (!create) {
2204 return NULL;
2205 } else {
2206 data = _calloc_internal(1, sizeof(_objc_pthread_data));
2207 pthread_setspecific(_objc_pthread_key, data);
2208 }
2209 }
2210
2211 list = data->initializingClasses;
2212 if (list == NULL) {
2213 if (!create) {
2214 return NULL;
2215 } else {
2216 list = _calloc_internal(1, sizeof(_objc_initializing_classes));
2217 data->initializingClasses = list;
2218 }
2219 }
2220
2221 classes = list->metaclasses;
2222 if (classes == NULL) {
2223 // If _objc_initializing_classes exists, allocate metaclass array,
2224 // even if create == NO.
2225 // Allow 4 simultaneous class inits on this thread before realloc.
2226 list->classesAllocated = 4;
2227 classes = _calloc_internal(list->classesAllocated, sizeof(struct objc_class *));
2228 list->metaclasses = classes;
2229 }
2230 return list;
2231 }
2232
2233
2234 /***********************************************************************
2235 * _destroyInitializingClassList
2236 * Deallocate memory used by the given initialization list.
2237 * Any part of the list may be NULL.
2238 * Called from _objc_pthread_destroyspecific().
2239 **********************************************************************/
2240 void _destroyInitializingClassList(_objc_initializing_classes *list)
2241 {
2242 if (list != NULL) {
2243 if (list->metaclasses != NULL) {
2244 _free_internal(list->metaclasses);
2245 }
2246 _free_internal(list);
2247 }
2248 }
2249
2250
2251 /***********************************************************************
2252 * _thisThreadIsInitializingClass
2253 * Return TRUE if this thread is currently initializing the given class.
2254 **********************************************************************/
2255 static BOOL _thisThreadIsInitializingClass(struct objc_class *cls)
2256 {
2257 int i;
2258
2259 _objc_initializing_classes *list = _fetchInitializingClassList(NO);
2260 if (list) {
2261 cls = GETMETA(cls);
2262 for (i = 0; i < list->classesAllocated; i++) {
2263 if (cls == list->metaclasses[i]) return YES;
2264 }
2265 }
2266
2267 // no list or not found in list
2268 return NO;
2269 }
2270
2271
2272 /***********************************************************************
2273 * _setThisThreadIsInitializingClass
2274 * Record that this thread is currently initializing the given class.
2275 * This thread will be allowed to send messages to the class, but
2276 * other threads will have to wait.
2277 **********************************************************************/
2278 static void _setThisThreadIsInitializingClass(struct objc_class *cls)
2279 {
2280 int i;
2281 _objc_initializing_classes *list = _fetchInitializingClassList(YES);
2282 cls = GETMETA(cls);
2283
2284 // paranoia: explicitly disallow duplicates
2285 for (i = 0; i < list->classesAllocated; i++) {
2286 if (cls == list->metaclasses[i]) {
2287 _objc_fatal("thread is already initializing this class!");
2288 return; // already the initializer
2289 }
2290 }
2291
2292 for (i = 0; i < list->classesAllocated; i++) {
2293 if (0 == list->metaclasses[i]) {
2294 list->metaclasses[i] = cls;
2295 return;
2296 }
2297 }
2298
2299 // class list is full - reallocate
2300 list->classesAllocated = list->classesAllocated * 2 + 1;
2301 list->metaclasses = _realloc_internal(list->metaclasses, list->classesAllocated * sizeof(struct objc_class *));
2302 // zero out the new entries
2303 list->metaclasses[i++] = cls;
2304 for ( ; i < list->classesAllocated; i++) {
2305 list->metaclasses[i] = NULL;
2306 }
2307 }
2308
2309
2310 /***********************************************************************
2311 * _setThisThreadIsNotInitializingClass
2312 * Record that this thread is no longer initializing the given class.
2313 **********************************************************************/
2314 static void _setThisThreadIsNotInitializingClass(struct objc_class *cls)
2315 {
2316 int i;
2317
2318 _objc_initializing_classes *list = _fetchInitializingClassList(NO);
2319 if (list) {
2320 cls = GETMETA(cls);
2321 for (i = 0; i < list->classesAllocated; i++) {
2322 if (cls == list->metaclasses[i]) {
2323 list->metaclasses[i] = NULL;
2324 return;
2325 }
2326 }
2327 }
2328
2329 // no list or not found in list
2330 _objc_fatal("thread is not initializing this class!");
2331 }
2332
2333
2334 /***********************************************************************
2335 * class_initialize. Send the '+initialize' message on demand to any
2336 * uninitialized class. Force initialization of superclasses first.
2337 *
2338 * Called only from _class_lookupMethodAndLoadCache (or itself).
2339 **********************************************************************/
2340 static void class_initialize(struct objc_class *cls)
2341 {
2342 struct objc_class *infoCls = GETMETA(cls);
2343 BOOL reallyInitialize = NO;
2344
2345 // Get the real class from the metaclass. The superclass chain
2346 // hangs off the real class only.
2347 // fixme ick
2348 if (ISMETA(cls)) {
2349 if (strncmp(cls->name, "_%", 2) == 0) {
2350 // Posee's meta's name is smashed and isn't in the class_hash,
2351 // so objc_getClass doesn't work.
2352 char *baseName = strchr(cls->name, '%'); // get posee's real name
2353 cls = objc_getClass(baseName);
2354 } else {
2355 cls = objc_getClass(cls->name);
2356 }
2357 }
2358
2359 // Make sure super is done initializing BEFORE beginning to initialize cls.
2360 // See note about deadlock above.
2361 if (cls->super_class && !ISINITIALIZED(cls->super_class)) {
2362 class_initialize(cls->super_class);
2363 }
2364
2365 // Try to atomically set CLS_INITIALIZING.
2366 pthread_mutex_lock(&classInitLock);
2367 if (!ISINITIALIZED(cls) && !ISINITIALIZING(cls)) {
2368 _class_setInfo(infoCls, CLS_INITIALIZING);
2369 reallyInitialize = YES;
2370 }
2371 pthread_mutex_unlock(&classInitLock);
2372
2373 if (reallyInitialize) {
2374 // We successfully set the CLS_INITIALIZING bit. Initialize the class.
2375
2376 // Record that we're initializing this class so we can message it.
2377 _setThisThreadIsInitializingClass(cls);
2378
2379 // Send the +initialize message.
2380 // Note that +initialize is sent to the superclass (again) if
2381 // this class doesn't implement +initialize. 2157218
2382 [(id)cls initialize];
2383
2384 // Done initializing. Update the info bits and notify waiting threads.
2385 pthread_mutex_lock(&classInitLock);
2386 _class_changeInfo(infoCls, CLS_INITIALIZED, CLS_INITIALIZING);
2387 pthread_cond_broadcast(&classInitWaitCond);
2388 pthread_mutex_unlock(&classInitLock);
2389 _setThisThreadIsNotInitializingClass(cls);
2390 return;
2391 }
2392
2393 else if (ISINITIALIZING(cls)) {
2394 // We couldn't set INITIALIZING because INITIALIZING was already set.
2395 // If this thread set it earlier, continue normally.
2396 // If some other thread set it, block until initialize is done.
2397 // It's ok if INITIALIZING changes to INITIALIZED while we're here,
2398 // because we safely check for INITIALIZED inside the lock
2399 // before blocking.
2400 if (_thisThreadIsInitializingClass(cls)) {
2401 return;
2402 } else {
2403 pthread_mutex_lock(&classInitLock);
2404 while (!ISINITIALIZED(cls)) {
2405 pthread_cond_wait(&classInitWaitCond, &classInitLock);
2406 }
2407 pthread_mutex_unlock(&classInitLock);
2408 return;
2409 }
2410 }
2411
2412 else if (ISINITIALIZED(cls)) {
2413 // Set CLS_INITIALIZING failed because someone else already
2414 // initialized the class. Continue normally.
2415 // NOTE this check must come AFTER the ISINITIALIZING case.
2416 // Otherwise: Another thread is initializing this class. ISINITIALIZED
2417 // is false. Skip this clause. Then the other thread finishes
2418 // initialization and sets INITIALIZING=no and INITIALIZED=yes.
2419 // Skip the ISINITIALIZING clause. Die horribly.
2420 return;
2421 }
2422
2423 else {
2424 // We shouldn't be here.
2425 _objc_fatal("thread-safe class init in objc runtime is buggy!");
2426 }
2427 }
2428
2429
2430 /***********************************************************************
2431 * _class_lookupMethodAndLoadCache.
2432 *
2433 * Called only from objc_msgSend, objc_msgSendSuper and class_lookupMethod.
2434 **********************************************************************/
2435 IMP _class_lookupMethodAndLoadCache (Class cls,
2436 SEL sel)
2437 {
2438 struct objc_class * curClass;
2439 Method meth;
2440 IMP methodPC = NULL;
2441
2442 trace(0xb300, 0, 0, 0);
2443
2444 // Check for freed class
2445 if (cls == &freedObjectClass)
2446 return (IMP) _freedHandler;
2447
2448 // Check for nonexistent class
2449 if (cls == &nonexistentObjectClass)
2450 return (IMP) _nonexistentHandler;
2451
2452 trace(0xb301, 0, 0, 0);
2453
2454 if (!ISINITIALIZED(cls)) {
2455 class_initialize ((struct objc_class *)cls);
2456 // If sel == initialize, class_initialize will send +initialize and
2457 // then the messenger will send +initialize again after this
2458 // procedure finishes. Of course, if this is not being called
2459 // from the messenger then it won't happen. 2778172
2460 }
2461
2462 trace(0xb302, 0, 0, 0);
2463
2464 // Outer loop - search the caches and method lists of the
2465 // class and its super-classes
2466 for (curClass = cls; curClass; curClass = ((struct objc_class * )curClass)->super_class)
2467 {
2468 #ifdef PRELOAD_SUPERCLASS_CACHES
2469 struct objc_class *curClass2;
2470 #endif
2471
2472 trace(0xb303, 0, 0, 0);
2473
2474 // Beware of thread-unsafety and double-freeing of forward::
2475 // entries here! See note in "Method cache locking" above.
2476 // The upshot is that _cache_getMethod() will return NULL
2477 // instead of returning a forward:: entry.
2478 meth = _cache_getMethod(curClass, sel, &_objc_msgForward);
2479 if (meth) {
2480 // Found the method in this class or a superclass.
2481 // Cache the method in this class, unless we just found it in
2482 // this class's cache.
2483 if (curClass != cls) {
2484 #ifdef PRELOAD_SUPERCLASS_CACHES
2485 for (curClass2 = cls; curClass2 != curClass; curClass2 = curClass2->super_class)
2486 _cache_fill (curClass2, meth, sel);
2487 _cache_fill (curClass, meth, sel);
2488 #else
2489 _cache_fill (cls, meth, sel);
2490 #endif
2491 }
2492
2493 methodPC = meth->method_imp;
2494 break;
2495 }
2496
2497 trace(0xb304, (int)methodPC, 0, 0);
2498
2499 // Cache scan failed. Search method list.
2500
2501 OBJC_LOCK(&methodListLock);
2502 meth = _findMethodInClass(curClass, sel);
2503 OBJC_UNLOCK(&methodListLock);
2504 if (meth) {
2505 // If logging is enabled, log the message send and let
2506 // the logger decide whether to encache the method.
2507 if ((objcMsgLogEnabled == 0) ||
2508 (objcMsgLogProc (CLS_GETINFO(((struct objc_class * )curClass),
2509 CLS_META) ? YES : NO,
2510 ((struct objc_class *)cls)->name,
2511 curClass->name, sel)))
2512 {
2513 // Cache the method implementation
2514 #ifdef PRELOAD_SUPERCLASS_CACHES
2515 for (curClass2 = cls; curClass2 != curClass; curClass2 = curClass2->super_class)
2516 _cache_fill (curClass2, meth, sel);
2517 _cache_fill (curClass, meth, sel);
2518 #else
2519 _cache_fill (cls, meth, sel);
2520 #endif
2521 }
2522
2523 methodPC = meth->method_imp;
2524 break;
2525 }
2526
2527 trace(0xb305, (int)methodPC, 0, 0);
2528 }
2529
2530 trace(0xb306, (int)methodPC, 0, 0);
2531
2532 if (methodPC == NULL)
2533 {
2534 // Class and superclasses do not respond -- use forwarding
2535 _cache_addForwardEntry(cls, sel);
2536 methodPC = &_objc_msgForward;
2537 }
2538
2539 trace(0xb30f, (int)methodPC, 0, 0);
2540
2541 return methodPC;
2542 }
2543
2544
2545 /***********************************************************************
2546 * lookupMethodInClassAndLoadCache.
2547 * Like _class_lookupMethodAndLoadCache, but does not search superclasses.
2548 * Caches and returns objc_msgForward if the method is not found in the class.
2549 **********************************************************************/
2550 static IMP lookupMethodInClassAndLoadCache(Class cls, SEL sel)
2551 {
2552 Method meth;
2553 IMP imp;
2554
2555 // Search cache first.
2556 imp = _cache_getImp(cls, sel);
2557 if (imp) return imp;
2558
2559 // Cache miss. Search method list.
2560
2561 OBJC_LOCK(&methodListLock);
2562 meth = _findMethodInClass(cls, sel);
2563 OBJC_UNLOCK(&methodListLock);
2564
2565 if (meth) {
2566 // Hit in method list. Cache it.
2567 _cache_fill(cls, meth, sel);
2568 return meth->method_imp;
2569 } else {
2570 // Miss in method list. Cache objc_msgForward.
2571 _cache_addForwardEntry(cls, sel);
2572 return &_objc_msgForward;
2573 }
2574 }
2575
2576
2577
2578 /***********************************************************************
2579 * _class_changeInfo
2580 * Atomically sets and clears some bits in cls's info field.
2581 * set and clear must not overlap.
2582 **********************************************************************/
2583 static pthread_mutex_t infoLock = PTHREAD_MUTEX_INITIALIZER;
2584 __private_extern__ void _class_changeInfo(struct objc_class *cls,
2585 long set, long clear)
2586 {
2587 pthread_mutex_lock(&infoLock);
2588 cls->info = (cls->info | set) & ~clear;
2589 pthread_mutex_unlock(&infoLock);
2590 }
2591
2592
2593 /***********************************************************************
2594 * _class_setInfo
2595 * Atomically sets some bits in cls's info field.
2596 **********************************************************************/
2597 __private_extern__ void _class_setInfo(struct objc_class *cls, long set)
2598 {
2599 _class_changeInfo(cls, set, 0);
2600 }
2601
2602
2603 /***********************************************************************
2604 * _class_clearInfo
2605 * Atomically clears some bits in cls's info field.
2606 **********************************************************************/
2607 __private_extern__ void _class_clearInfo(struct objc_class *cls, long clear)
2608 {
2609 _class_changeInfo(cls, 0, clear);
2610 }
2611
2612
2613 /***********************************************************************
2614 * SubtypeUntil.
2615 *
2616 * Delegation.
2617 **********************************************************************/
2618 static int SubtypeUntil (const char * type,
2619 char end)
2620 {
2621 int level = 0;
2622 const char * head = type;
2623
2624 //
2625 while (*type)
2626 {
2627 if (!*type || (!level && (*type == end)))
2628 return (int)(type - head);
2629
2630 switch (*type)
2631 {
2632 case ']': case '}': case ')': level--; break;
2633 case '[': case '{': case '(': level += 1; break;
2634 }
2635
2636 type += 1;
2637 }
2638
2639 _objc_fatal ("Object: SubtypeUntil: end of type encountered prematurely\n");
2640 return 0;
2641 }
2642
2643 /***********************************************************************
2644 * SkipFirstType.
2645 **********************************************************************/
2646 static const char * SkipFirstType (const char * type)
2647 {
2648 while (1)
2649 {
2650 switch (*type++)
2651 {
2652 case 'O': /* bycopy */
2653 case 'n': /* in */
2654 case 'o': /* out */
2655 case 'N': /* inout */
2656 case 'r': /* const */
2657 case 'V': /* oneway */
2658 case '^': /* pointers */
2659 break;
2660
2661 /* arrays */
2662 case '[':
2663 while ((*type >= '0') && (*type <= '9'))
2664 type += 1;
2665 return type + SubtypeUntil (type, ']') + 1;
2666
2667 /* structures */
2668 case '{':
2669 return type + SubtypeUntil (type, '}') + 1;
2670
2671 /* unions */
2672 case '(':
2673 return type + SubtypeUntil (type, ')') + 1;
2674
2675 /* basic types */
2676 default:
2677 return type;
2678 }
2679 }
2680 }
2681
2682 /***********************************************************************
2683 * method_getNumberOfArguments.
2684 **********************************************************************/
2685 unsigned method_getNumberOfArguments (Method method)
2686 {
2687 const char * typedesc;
2688 unsigned nargs;
2689
2690 // First, skip the return type
2691 typedesc = method->method_types;
2692 typedesc = SkipFirstType (typedesc);
2693
2694 // Next, skip stack size
2695 while ((*typedesc >= '0') && (*typedesc <= '9'))
2696 typedesc += 1;
2697
2698 // Now, we have the arguments - count how many
2699 nargs = 0;
2700 while (*typedesc)
2701 {
2702 // Traverse argument type
2703 typedesc = SkipFirstType (typedesc);
2704
2705 // Skip GNU runtime's register parameter hint
2706 if (*typedesc == '+') typedesc++;
2707
2708 // Traverse (possibly negative) argument offset
2709 if (*typedesc == '-')
2710 typedesc += 1;
2711 while ((*typedesc >= '0') && (*typedesc <= '9'))
2712 typedesc += 1;
2713
2714 // Made it past an argument
2715 nargs += 1;
2716 }
2717
2718 return nargs;
2719 }
2720
2721 /***********************************************************************
2722 * method_getSizeOfArguments.
2723 **********************************************************************/
2724 #ifndef __alpha__
2725 unsigned method_getSizeOfArguments (Method method)
2726 {
2727 const char * typedesc;
2728 unsigned stack_size;
2729 #if defined(__ppc__) || defined(ppc)
2730 unsigned trueBaseOffset;
2731 unsigned foundBaseOffset;
2732 #endif
2733
2734 // Get our starting points
2735 stack_size = 0;
2736 typedesc = method->method_types;
2737
2738 // Skip the return type
2739 #if defined (__ppc__) || defined(ppc)
2740 // Struct returns cause the parameters to be bumped
2741 // by a register, so the offset to the receiver is
2742 // 4 instead of the normal 0.
2743 trueBaseOffset = (*typedesc == '{') ? 4 : 0;
2744 #endif
2745 typedesc = SkipFirstType (typedesc);
2746
2747 // Convert ASCII number string to integer
2748 while ((*typedesc >= '0') && (*typedesc <= '9'))
2749 stack_size = (stack_size * 10) + (*typedesc++ - '0');
2750 #if defined (__ppc__) || defined(ppc)
2751 // NOTE: This is a temporary measure pending a compiler fix.
2752 // Work around PowerPC compiler bug wherein the method argument
2753 // string contains an incorrect value for the "stack size."
2754 // Generally, the size is reported 4 bytes too small, so we apply
2755 // that fudge factor. Unfortunately, there is at least one case
2756 // where the error is something other than -4: when the last
2757 // parameter is a double, the reported stack is much too high
2758 // (about 32 bytes). We do not attempt to detect that case.
2759 // The result of returning a too-high value is that objc_msgSendv
2760 // can bus error if the destination of the marg_list copying
2761 // butts up against excluded memory.
2762 // This fix disables itself when it sees a correctly built
2763 // type string (i.e. the offset for the Id is correct). This
2764 // keeps us out of lockstep with the compiler.
2765
2766 // skip the '@' marking the Id field
2767 typedesc = SkipFirstType (typedesc);
2768
2769 // Skip GNU runtime's register parameter hint
2770 if (*typedesc == '+') typedesc++;
2771
2772 // pick up the offset for the Id field
2773 foundBaseOffset = 0;
2774 while ((*typedesc >= '0') && (*typedesc <= '9'))
2775 foundBaseOffset = (foundBaseOffset * 10) + (*typedesc++ - '0');
2776
2777 // add fudge factor iff the Id field offset was wrong
2778 if (foundBaseOffset != trueBaseOffset)
2779 stack_size += 4;
2780 #endif
2781
2782 return stack_size;
2783 }
2784
2785 #else // __alpha__
2786 // XXX Getting the size of a type is done all over the place
2787 // (Here, Foundation, remote project)! - Should unify
2788
2789 unsigned int getSizeOfType (const char * type, unsigned int * alignPtr);
2790
2791 unsigned method_getSizeOfArguments (Method method)
2792 {
2793 const char * type;
2794 int size;
2795 int index;
2796 int align;
2797 int offset;
2798 unsigned stack_size;
2799 int nargs;
2800
2801 nargs = method_getNumberOfArguments (method);
2802 stack_size = (*method->method_types == '{') ? sizeof(void *) : 0;
2803
2804 for (index = 0; index < nargs; index += 1)
2805 {
2806 (void) method_getArgumentInfo (method, index, &type, &offset);
2807 size = getSizeOfType (type, &align);
2808 stack_size += ((size + 7) & ~7);
2809 }
2810
2811 return stack_size;
2812 }
2813 #endif // __alpha__
2814
2815 /***********************************************************************
2816 * method_getArgumentInfo.
2817 **********************************************************************/
2818 unsigned method_getArgumentInfo (Method method,
2819 int arg,
2820 const char ** type,
2821 int * offset)
2822 {
2823 const char * typedesc = method->method_types;
2824 unsigned nargs = 0;
2825 unsigned self_offset = 0;
2826 BOOL offset_is_negative = NO;
2827
2828 // First, skip the return type
2829 typedesc = SkipFirstType (typedesc);
2830
2831 // Next, skip stack size
2832 while ((*typedesc >= '0') && (*typedesc <= '9'))
2833 typedesc += 1;
2834
2835 // Now, we have the arguments - position typedesc to the appropriate argument
2836 while (*typedesc && nargs != arg)
2837 {
2838
2839 // Skip argument type
2840 typedesc = SkipFirstType (typedesc);
2841
2842 if (nargs == 0)
2843 {
2844 // Skip GNU runtime's register parameter hint
2845 if (*typedesc == '+') typedesc++;
2846
2847 // Skip negative sign in offset
2848 if (*typedesc == '-')
2849 {
2850 offset_is_negative = YES;
2851 typedesc += 1;
2852 }
2853 else
2854 offset_is_negative = NO;
2855
2856 while ((*typedesc >= '0') && (*typedesc <= '9'))
2857 self_offset = self_offset * 10 + (*typedesc++ - '0');
2858 if (offset_is_negative)
2859 self_offset = -(self_offset);
2860
2861 }
2862
2863 else
2864 {
2865 // Skip GNU runtime's register parameter hint
2866 if (*typedesc == '+') typedesc++;
2867
2868 // Skip (possibly negative) argument offset
2869 if (*typedesc == '-')
2870 typedesc += 1;
2871 while ((*typedesc >= '0') && (*typedesc <= '9'))
2872 typedesc += 1;
2873 }
2874
2875 nargs += 1;
2876 }
2877
2878 if (*typedesc)
2879 {
2880 unsigned arg_offset = 0;
2881
2882 *type = typedesc;
2883 typedesc = SkipFirstType (typedesc);
2884
2885 if (arg == 0)
2886 {
2887 #ifdef hppa
2888 *offset = -sizeof(id);
2889 #else
2890 *offset = 0;
2891 #endif // hppa
2892 }
2893
2894 else
2895 {
2896 // Skip GNU register parameter hint
2897 if (*typedesc == '+') typedesc++;
2898
2899 // Pick up (possibly negative) argument offset
2900 if (*typedesc == '-')
2901 {
2902 offset_is_negative = YES;
2903 typedesc += 1;
2904 }
2905 else
2906 offset_is_negative = NO;
2907
2908 while ((*typedesc >= '0') && (*typedesc <= '9'))
2909 arg_offset = arg_offset * 10 + (*typedesc++ - '0');
2910 if (offset_is_negative)
2911 arg_offset = - arg_offset;
2912
2913 #ifdef hppa
2914 // For stacks which grow up, since margs points
2915 // to the top of the stack or the END of the args,
2916 // the first offset is at -sizeof(id) rather than 0.
2917 self_offset += sizeof(id);
2918 #endif
2919 *offset = arg_offset - self_offset;
2920 }
2921
2922 }
2923
2924 else
2925 {
2926 *type = 0;
2927 *offset = 0;
2928 }
2929
2930 return nargs;
2931 }
2932
2933 /***********************************************************************
2934 * _objc_create_zone.
2935 **********************************************************************/
2936
2937 void * _objc_create_zone (void)
2938 {
2939 return malloc_default_zone();
2940 }
2941
2942
2943 /***********************************************************************
2944 * _objc_internal_zone.
2945 * Malloc zone for internal runtime data.
2946 * By default this is the default malloc zone, but a dedicated zone is
2947 * used if environment variable OBJC_USE_INTERNAL_ZONE is set.
2948 **********************************************************************/
2949 __private_extern__ malloc_zone_t *_objc_internal_zone(void)
2950 {
2951 static malloc_zone_t *z = (malloc_zone_t *)-1;
2952 if (z == (malloc_zone_t *)-1) {
2953 if (UseInternalZone) {
2954 z = malloc_create_zone(vm_page_size, 0);
2955 malloc_set_zone_name(z, "ObjC");
2956 } else {
2957 z = malloc_default_zone();
2958 }
2959 }
2960 return z;
2961 }
2962
2963
2964 /***********************************************************************
2965 * _malloc_internal
2966 * _calloc_internal
2967 * _realloc_internal
2968 * _strdup_internal
2969 * _free_internal
2970 * Convenience functions for the internal malloc zone.
2971 **********************************************************************/
2972 __private_extern__ void *_malloc_internal(size_t size)
2973 {
2974 return malloc_zone_malloc(_objc_internal_zone(), size);
2975 }
2976
2977 __private_extern__ void *_calloc_internal(size_t count, size_t size)
2978 {
2979 return malloc_zone_calloc(_objc_internal_zone(), count, size);
2980 }
2981
2982 __private_extern__ void *_realloc_internal(void *ptr, size_t size)
2983 {
2984 return malloc_zone_realloc(_objc_internal_zone(), ptr, size);
2985 }
2986
2987 __private_extern__ char *_strdup_internal(const char *str)
2988 {
2989 size_t len = strlen(str);
2990 char *dup = malloc_zone_malloc(_objc_internal_zone(), len + 1);
2991 memcpy(dup, str, len + 1);
2992 return dup;
2993 }
2994
2995 __private_extern__ void _free_internal(void *ptr)
2996 {
2997 malloc_zone_free(_objc_internal_zone(), ptr);
2998 }
2999
3000
3001
3002 /***********************************************************************
3003 * cache collection.
3004 **********************************************************************/
3005
3006 static unsigned long _get_pc_for_thread (mach_port_t thread)
3007 #ifdef hppa
3008 {
3009 struct hp_pa_frame_thread_state state;
3010 unsigned int count = HPPA_FRAME_THREAD_STATE_COUNT;
3011 kern_return_t okay = thread_get_state (thread, HPPA_FRAME_THREAD_STATE, (thread_state_t)&state, &count);
3012 return (okay == KERN_SUCCESS) ? state.ts_pcoq_front : PC_SENTINAL;
3013 }
3014 #elif defined(sparc)
3015 {
3016 struct sparc_thread_state_regs state;
3017 unsigned int count = SPARC_THREAD_STATE_REGS_COUNT;
3018 kern_return_t okay = thread_get_state (thread, SPARC_THREAD_STATE_REGS, (thread_state_t)&state, &count);
3019 return (okay == KERN_SUCCESS) ? state.regs.r_pc : PC_SENTINAL;
3020 }
3021 #elif defined(__i386__) || defined(i386)
3022 {
3023 i386_thread_state_t state;
3024 unsigned int count = i386_THREAD_STATE_COUNT;
3025 kern_return_t okay = thread_get_state (thread, i386_THREAD_STATE, (thread_state_t)&state, &count);
3026 return (okay == KERN_SUCCESS) ? state.eip : PC_SENTINAL;
3027 }
3028 #elif defined(m68k)
3029 {
3030 struct m68k_thread_state_regs state;
3031 unsigned int count = M68K_THREAD_STATE_REGS_COUNT;
3032 kern_return_t okay = thread_get_state (thread, M68K_THREAD_STATE_REGS, (thread_state_t)&state, &count);
3033 return (okay == KERN_SUCCESS) ? state.pc : PC_SENTINAL;
3034 }
3035 #elif defined(__ppc__) || defined(ppc)
3036 {
3037 struct ppc_thread_state state;
3038 unsigned int count = PPC_THREAD_STATE_COUNT;
3039 kern_return_t okay = thread_get_state (thread, PPC_THREAD_STATE, (thread_state_t)&state, &count);
3040 return (okay == KERN_SUCCESS) ? state.srr0 : PC_SENTINAL;
3041 }
3042 #else
3043 {
3044 #error _get_pc_for_thread () not implemented for this architecture
3045 }
3046 #endif
3047
3048 /***********************************************************************
3049 * _collecting_in_critical.
3050 * Returns TRUE if some thread is currently executing a cache-reading
3051 * function. Collection of cache garbage is not allowed when a cache-
3052 * reading function is in progress because it might still be using
3053 * the garbage memory.
3054 **********************************************************************/
3055 OBJC_EXPORT unsigned long objc_entryPoints[];
3056 OBJC_EXPORT unsigned long objc_exitPoints[];
3057
3058 static int _collecting_in_critical (void)
3059 {
3060 thread_act_port_array_t threads;
3061 unsigned number;
3062 unsigned count;
3063 kern_return_t ret;
3064 int result;
3065
3066 mach_port_t mythread = pthread_mach_thread_np(pthread_self());
3067
3068 // Get a list of all the threads in the current task
3069 ret = task_threads (mach_task_self (), &threads, &number);
3070 if (ret != KERN_SUCCESS)
3071 {
3072 _objc_fatal("task_thread failed (result %d)\n", ret);
3073 }
3074
3075 // Check whether any thread is in the cache lookup code
3076 result = FALSE;
3077 for (count = 0; count < number; count++)
3078 {
3079 int region;
3080 unsigned long pc;
3081
3082 // Don't bother checking ourselves
3083 if (threads[count] == mythread)
3084 continue;
3085
3086 // Find out where thread is executing
3087 pc = _get_pc_for_thread (threads[count]);
3088
3089 // Check for bad status, and if so, assume the worse (can't collect)
3090 if (pc == PC_SENTINAL)
3091 {
3092 result = TRUE;
3093 goto done;
3094 }
3095
3096 // Check whether it is in the cache lookup code
3097 for (region = 0; objc_entryPoints[region] != 0; region++)
3098 {
3099 if ((pc >= objc_entryPoints[region]) &&
3100 (pc <= objc_exitPoints[region]))
3101 {
3102 result = TRUE;
3103 goto done;
3104 }
3105 }
3106 }
3107
3108 done:
3109 // Deallocate the port rights for the threads
3110 for (count = 0; count < number; count++) {
3111 mach_port_deallocate(mach_task_self (), threads[count]);
3112 }
3113
3114 // Deallocate the thread list
3115 vm_deallocate (mach_task_self (), (vm_address_t) threads, sizeof(threads) * number);
3116
3117 // Return our finding
3118 return result;
3119 }
3120
3121 /***********************************************************************
3122 * _garbage_make_room. Ensure that there is enough room for at least
3123 * one more ref in the garbage.
3124 **********************************************************************/
3125
3126 // amount of memory represented by all refs in the garbage
3127 static int garbage_byte_size = 0;
3128
3129 // do not empty the garbage until garbage_byte_size gets at least this big
3130 static int garbage_threshold = 1024;
3131
3132 // table of refs to free
3133 static void **garbage_refs = 0;
3134
3135 // current number of refs in garbage_refs
3136 static int garbage_count = 0;
3137
3138 // capacity of current garbage_refs
3139 static int garbage_max = 0;
3140
3141 // capacity of initial garbage_refs
3142 enum {
3143 INIT_GARBAGE_COUNT = 128
3144 };
3145
3146 static void _garbage_make_room (void)
3147 {
3148 static int first = 1;
3149 volatile void * tempGarbage;
3150
3151 // Create the collection table the first time it is needed
3152 if (first)
3153 {
3154 first = 0;
3155 garbage_refs = _malloc_internal(INIT_GARBAGE_COUNT * sizeof(void *));
3156 garbage_max = INIT_GARBAGE_COUNT;
3157 }
3158
3159 // Double the table if it is full
3160 else if (garbage_count == garbage_max)
3161 {
3162 tempGarbage = _realloc_internal(garbage_refs, garbage_max * 2 * sizeof(void *));
3163 garbage_refs = (void **) tempGarbage;
3164 garbage_max *= 2;
3165 }
3166 }
3167
3168 /***********************************************************************
3169 * _cache_collect_free. Add the specified malloc'd memory to the list
3170 * of them to free at some later point.
3171 * size is used for the collection threshold. It does not have to be
3172 * precisely the block's size.
3173 * Cache locks: cacheUpdateLock must be held by the caller.
3174 **********************************************************************/
3175 static void _cache_collect_free(void *data, size_t size, BOOL tryCollect)
3176 {
3177 static char *report_garbage = (char *)0xffffffff;
3178
3179 if ((char *)0xffffffff == report_garbage) {
3180 // Check whether to log our activity
3181 report_garbage = getenv ("OBJC_REPORT_GARBAGE");
3182 }
3183
3184 // Insert new element in garbage list
3185 // Note that we do this even if we end up free'ing everything
3186 _garbage_make_room ();
3187 garbage_byte_size += size;
3188 garbage_refs[garbage_count++] = data;
3189
3190 // Log our progress
3191 if (tryCollect && report_garbage)
3192 _objc_inform ("total of %d bytes of garbage ...", garbage_byte_size);
3193
3194 // Done if caller says not to empty or the garbage is not full
3195 if (!tryCollect || (garbage_byte_size < garbage_threshold))
3196 {
3197 if (tryCollect && report_garbage)
3198 _objc_inform ("couldn't collect cache garbage: below threshold\n");
3199
3200 return;
3201 }
3202
3203 // tryCollect is guaranteed to be true after this point
3204
3205 // Synchronize garbage collection with objc_msgSend and other cache readers
3206 if (!_collecting_in_critical ()) {
3207 // No cache readers in progress - garbage is now deletable
3208
3209 // Log our progress
3210 if (report_garbage)
3211 _objc_inform ("collecting!\n");
3212
3213 // Dispose all refs now in the garbage
3214 while (garbage_count--) {
3215 if (cache_allocator_is_block(garbage_refs[garbage_count])) {
3216 cache_allocator_free(garbage_refs[garbage_count]);
3217 } else {
3218 free(garbage_refs[garbage_count]);
3219 }
3220 }
3221
3222 // Clear the garbage count and total size indicator
3223 garbage_count = 0;
3224 garbage_byte_size = 0;
3225 }
3226 else {
3227 // objc_msgSend (or other cache reader) is currently looking in the
3228 // cache and might still be using some garbage.
3229 if (report_garbage) {
3230 _objc_inform ("couldn't collect cache garbage: objc_msgSend in progress\n");
3231 }
3232 }
3233 }
3234
3235
3236
3237 /***********************************************************************
3238 * Custom method cache allocator.
3239 * Method cache block sizes are 2^slots+2 words, which is a pessimal
3240 * case for the system allocator. It wastes 504 bytes per cache block
3241 * with 128 or more slots, which adds up to tens of KB for an AppKit process.
3242 * To save memory, the custom cache allocator below is used.
3243 *
3244 * The cache allocator uses 128 KB allocation regions. Few processes will
3245 * require a second region. Within a region, allocation is address-ordered
3246 * first fit.
3247 *
3248 * The cache allocator uses a quantum of 520.
3249 * Cache block ideal sizes: 520, 1032, 2056, 4104
3250 * Cache allocator sizes: 520, 1040, 2080, 4160
3251 *
3252 * Because all blocks are known to be genuine method caches, the ordinary
3253 * cache->mask and cache->occupied fields are used as block headers.
3254 * No out-of-band headers are maintained. The number of blocks will
3255 * almost always be fewer than 200, so for simplicity there is no free
3256 * list or other optimization.
3257 *
3258 * Block in use: mask != 0, occupied != -1 (mask indicates block size)
3259 * Block free: mask != 0, occupied == -1 (mask is precisely block size)
3260 *
3261 * No cache allocator functions take any locks. Instead, the caller
3262 * must hold the cacheUpdateLock.
3263 **********************************************************************/
3264
3265 typedef struct cache_allocator_block {
3266 unsigned int size;
3267 unsigned int state;
3268 struct cache_allocator_block *nextFree;
3269 } cache_allocator_block;
3270
3271 typedef struct cache_allocator_region {
3272 cache_allocator_block *start;
3273 cache_allocator_block *end; // first non-block address
3274 cache_allocator_block *freeList;
3275 struct cache_allocator_region *next;
3276 } cache_allocator_region;
3277
3278 static cache_allocator_region *cacheRegion = NULL;
3279
3280
3281 static unsigned int cache_allocator_mask_for_size(size_t size)
3282 {
3283 return (size - sizeof(struct objc_cache)) / sizeof(Method);
3284 }
3285
3286 static size_t cache_allocator_size_for_mask(unsigned int mask)
3287 {
3288 size_t requested = sizeof(struct objc_cache) + TABLE_SIZE(mask+1);
3289 size_t actual = CACHE_QUANTUM;
3290 while (actual < requested) actual += CACHE_QUANTUM;
3291 return actual;
3292 }
3293
3294 /***********************************************************************
3295 * cache_allocator_add_region
3296 * Allocates and returns a new region that can hold at least size
3297 * bytes of large method caches.
3298 * The actual size will be rounded up to a CACHE_QUANTUM boundary,
3299 * with a minimum of CACHE_REGION_SIZE.
3300 * The new region is lowest-priority for new allocations. Callers that
3301 * know the other regions are already full should allocate directly
3302 * into the returned region.
3303 **********************************************************************/
3304 static cache_allocator_region *cache_allocator_add_region(size_t size)
3305 {
3306 vm_address_t addr;
3307 cache_allocator_block *b;
3308 cache_allocator_region **rgnP;
3309 cache_allocator_region *newRegion =
3310 _calloc_internal(1, sizeof(cache_allocator_region));
3311
3312 // Round size up to quantum boundary, and apply the minimum size.
3313 size += CACHE_QUANTUM - (size % CACHE_QUANTUM);
3314 if (size < CACHE_REGION_SIZE) size = CACHE_REGION_SIZE;
3315
3316 // Allocate the region
3317 addr = 0;
3318 vm_allocate(mach_task_self(), &addr, size, 1);
3319 newRegion->start = (cache_allocator_block *)addr;
3320 newRegion->end = (cache_allocator_block *)(addr + size);
3321
3322 // Mark the first block: free and covers the entire region
3323 b = newRegion->start;
3324 b->size = size;
3325 b->state = (unsigned int)-1;
3326 b->nextFree = NULL;
3327 newRegion->freeList = b;
3328
3329 // Add to end of the linked list of regions.
3330 // Other regions should be re-used before this one is touched.
3331 newRegion->next = NULL;
3332 rgnP = &cacheRegion;
3333 while (*rgnP) {
3334 rgnP = &(**rgnP).next;
3335 }
3336 *rgnP = newRegion;
3337
3338 return newRegion;
3339 }
3340
3341
3342 /***********************************************************************
3343 * cache_allocator_coalesce
3344 * Attempts to coalesce a free block with the single free block following
3345 * it in the free list, if any.
3346 **********************************************************************/
3347 static void cache_allocator_coalesce(cache_allocator_block *block)
3348 {
3349 if (block->size + (uintptr_t)block == (uintptr_t)block->nextFree) {
3350 block->size += block->nextFree->size;
3351 block->nextFree = block->nextFree->nextFree;
3352 }
3353 }
3354
3355
3356 /***********************************************************************
3357 * cache_region_calloc
3358 * Attempt to allocate a size-byte block in the given region.
3359 * Allocation is first-fit. The free list is already fully coalesced.
3360 * Returns NULL if there is not enough room in the region for the block.
3361 **********************************************************************/
3362 static void *cache_region_calloc(cache_allocator_region *rgn, size_t size)
3363 {
3364 cache_allocator_block **blockP;
3365 unsigned int mask;
3366
3367 // Save mask for allocated block, then round size
3368 // up to CACHE_QUANTUM boundary
3369 mask = cache_allocator_mask_for_size(size);
3370 size = cache_allocator_size_for_mask(mask);
3371
3372 // Search the free list for a sufficiently large free block.
3373
3374 for (blockP = &rgn->freeList;
3375 *blockP != NULL;
3376 blockP = &(**blockP).nextFree)
3377 {
3378 cache_allocator_block *block = *blockP;
3379 if (block->size < size) continue; // not big enough
3380
3381 // block is now big enough. Allocate from it.
3382
3383 // Slice off unneeded fragment of block, if any,
3384 // and reconnect the free list around block.
3385 if (block->size - size >= CACHE_QUANTUM) {
3386 cache_allocator_block *leftover =
3387 (cache_allocator_block *)(size + (uintptr_t)block);
3388 leftover->size = block->size - size;
3389 leftover->state = (unsigned int)-1;
3390 leftover->nextFree = block->nextFree;
3391 *blockP = leftover;
3392 } else {
3393 *blockP = block->nextFree;
3394 }
3395
3396 // block is now exactly the right size.
3397
3398 bzero(block, size);
3399 block->size = mask; // Cache->mask
3400 block->state = 0; // Cache->occupied
3401
3402 return block;
3403 }
3404
3405 // No room in this region.
3406 return NULL;
3407 }
3408
3409
3410 /***********************************************************************
3411 * cache_allocator_calloc
3412 * Custom allocator for large method caches (128+ slots)
3413 * The returned cache block already has cache->mask set.
3414 * cache->occupied and the cache contents are zero.
3415 * Cache locks: cacheUpdateLock must be held by the caller
3416 **********************************************************************/
3417 static void *cache_allocator_calloc(size_t size)
3418 {
3419 cache_allocator_region *rgn;
3420
3421 for (rgn = cacheRegion; rgn != NULL; rgn = rgn->next) {
3422 void *p = cache_region_calloc(rgn, size);
3423 if (p) {
3424 return p;
3425 }
3426 }
3427
3428 // No regions or all regions full - make a region and try one more time
3429 // In the unlikely case of a cache over 256KB, it will get its own region.
3430 return cache_region_calloc(cache_allocator_add_region(size), size);
3431 }
3432
3433
3434 /***********************************************************************
3435 * cache_allocator_region_for_block
3436 * Returns the cache allocator region that ptr points into, or NULL.
3437 **********************************************************************/
3438 static cache_allocator_region *cache_allocator_region_for_block(cache_allocator_block *block)
3439 {
3440 cache_allocator_region *rgn;
3441 for (rgn = cacheRegion; rgn != NULL; rgn = rgn->next) {
3442 if (block >= rgn->start && block < rgn->end) return rgn;
3443 }
3444 return NULL;
3445 }
3446
3447
3448 /***********************************************************************
3449 * cache_allocator_is_block
3450 * If ptr is a live block from the cache allocator, return YES
3451 * If ptr is a block from some other allocator, return NO.
3452 * If ptr is a dead block from the cache allocator, result is undefined.
3453 * Cache locks: cacheUpdateLock must be held by the caller
3454 **********************************************************************/
3455 static BOOL cache_allocator_is_block(void *ptr)
3456 {
3457 return (cache_allocator_region_for_block((cache_allocator_block *)ptr) != NULL);
3458 }
3459
3460 /***********************************************************************
3461 * cache_allocator_free
3462 * Frees a block allocated by the cache allocator.
3463 * Cache locks: cacheUpdateLock must be held by the caller.
3464 **********************************************************************/
3465 static void cache_allocator_free(void *ptr)
3466 {
3467 cache_allocator_block *dead = (cache_allocator_block *)ptr;
3468 cache_allocator_block *cur;
3469 cache_allocator_region *rgn;
3470
3471 if (! (rgn = cache_allocator_region_for_block(ptr))) {
3472 // free of non-pointer
3473 _objc_inform("cache_allocator_free of non-pointer %p", ptr);
3474 return;
3475 }
3476
3477 dead->size = cache_allocator_size_for_mask(dead->size);
3478 dead->state = (unsigned int)-1;
3479
3480 if (!rgn->freeList || rgn->freeList > dead) {
3481 // dead block belongs at front of free list
3482 dead->nextFree = rgn->freeList;
3483 rgn->freeList = dead;
3484 cache_allocator_coalesce(dead);
3485 return;
3486 }
3487
3488 // dead block belongs in the middle or end of free list
3489 for (cur = rgn->freeList; cur != NULL; cur = cur->nextFree) {
3490 cache_allocator_block *ahead = cur->nextFree;
3491
3492 if (!ahead || ahead > dead) {
3493 // cur and ahead straddle dead, OR dead belongs at end of free list
3494 cur->nextFree = dead;
3495 dead->nextFree = ahead;
3496
3497 // coalesce into dead first in case both succeed
3498 cache_allocator_coalesce(dead);
3499 cache_allocator_coalesce(cur);
3500 return;
3501 }
3502 }
3503
3504 // uh-oh
3505 _objc_inform("cache_allocator_free of non-pointer %p", ptr);
3506 }
3507
3508
3509 /***********************************************************************
3510 * _cache_print.
3511 **********************************************************************/
3512 static void _cache_print (Cache cache)
3513 {
3514 unsigned int index;
3515 unsigned int count;
3516
3517 count = cache->mask + 1;
3518 for (index = 0; index < count; index += 1)
3519 if (CACHE_BUCKET_VALID(cache->buckets[index]))
3520 {
3521 if (CACHE_BUCKET_IMP(cache->buckets[index]) == &_objc_msgForward)
3522 printf ("does not recognize: \n");
3523 printf ("%s\n", (const char *) CACHE_BUCKET_NAME(cache->buckets[index]));
3524 }
3525 }
3526
3527 /***********************************************************************
3528 * _class_printMethodCaches.
3529 **********************************************************************/
3530 void _class_printMethodCaches (Class cls)
3531 {
3532 if (((struct objc_class *)cls)->cache == &emptyCache)
3533 printf ("no instance-method cache for class %s\n", ((struct objc_class *)cls)->name);
3534
3535 else
3536 {
3537 printf ("instance-method cache for class %s:\n", ((struct objc_class *)cls)->name);
3538 _cache_print (((struct objc_class *)cls)->cache);
3539 }
3540
3541 if (((struct objc_class * )((struct objc_class * )cls)->isa)->cache == &emptyCache)
3542 printf ("no class-method cache for class %s\n", ((struct objc_class *)cls)->name);
3543
3544 else
3545 {
3546 printf ("class-method cache for class %s:\n", ((struct objc_class *)cls)->name);
3547 _cache_print (((struct objc_class * )((struct objc_class * )cls)->isa)->cache);
3548 }
3549 }
3550
3551 /***********************************************************************
3552 * log2.
3553 **********************************************************************/
3554 static unsigned int log2 (unsigned int x)
3555 {
3556 unsigned int log;
3557
3558 log = 0;
3559 while (x >>= 1)
3560 log += 1;
3561
3562 return log;
3563 }
3564
3565 /***********************************************************************
3566 * _class_printDuplicateCacheEntries.
3567 **********************************************************************/
3568 void _class_printDuplicateCacheEntries (BOOL detail)
3569 {
3570 NXHashTable * class_hash;
3571 NXHashState state;
3572 struct objc_class * cls;
3573 unsigned int duplicates;
3574 unsigned int index1;
3575 unsigned int index2;
3576 unsigned int mask;
3577 unsigned int count;
3578 unsigned int isMeta;
3579 Cache cache;
3580
3581
3582 printf ("Checking for duplicate cache entries \n");
3583
3584 // Outermost loop - iterate over all classes
3585 class_hash = objc_getClasses ();
3586 state = NXInitHashState (class_hash);
3587 duplicates = 0;
3588 while (NXNextHashState (class_hash, &state, (void **) &cls))
3589 {
3590 // Control loop - do given class' cache, then its isa's cache
3591 for (isMeta = 0; isMeta <= 1; isMeta += 1)
3592 {
3593 // Select cache of interest and make sure it exists
3594 cache = isMeta ? cls->isa->cache : ((struct objc_class *)cls)->cache;
3595 if (cache == &emptyCache)
3596 continue;
3597
3598 // Middle loop - check each entry in the given cache
3599 mask = cache->mask;
3600 count = mask + 1;
3601 for (index1 = 0; index1 < count; index1 += 1)
3602 {
3603 // Skip invalid entry
3604 if (!CACHE_BUCKET_VALID(cache->buckets[index1]))
3605 continue;
3606
3607 // Inner loop - check that given entry matches no later entry
3608 for (index2 = index1 + 1; index2 < count; index2 += 1)
3609 {
3610 // Skip invalid entry
3611 if (!CACHE_BUCKET_VALID(cache->buckets[index2]))
3612 continue;
3613
3614 // Check for duplication by method name comparison
3615 if (strcmp ((char *) CACHE_BUCKET_NAME(cache->buckets[index1]),
3616 (char *) CACHE_BUCKET_NAME(cache->buckets[index2])) == 0)
3617 {
3618 if (detail)
3619 printf ("%s %s\n", ((struct objc_class *)cls)->name, (char *) CACHE_BUCKET_NAME(cache->buckets[index1]));
3620 duplicates += 1;
3621 break;
3622 }
3623 }
3624 }
3625 }
3626 }
3627
3628 // Log the findings
3629 printf ("duplicates = %d\n", duplicates);
3630 printf ("total cache fills = %d\n", totalCacheFills);
3631 }
3632
3633 /***********************************************************************
3634 * PrintCacheHeader.
3635 **********************************************************************/
3636 static void PrintCacheHeader (void)
3637 {
3638 #ifdef OBJC_INSTRUMENTED
3639 printf ("Cache Cache Slots Avg Max AvgS MaxS AvgS MaxS TotalD AvgD MaxD TotalD AvgD MaxD TotD AvgD MaxD\n");
3640 printf ("Size Count Used Used Used Hit Hit Miss Miss Hits Prbs Prbs Misses Prbs Prbs Flsh Flsh Flsh\n");
3641 printf ("----- ----- ----- ----- ---- ---- ---- ---- ---- ------- ---- ---- ------- ---- ---- ---- ---- ----\n");
3642 #else
3643 printf ("Cache Cache Slots Avg Max AvgS MaxS AvgS MaxS\n");
3644 printf ("Size Count Used Used Used Hit Hit Miss Miss\n");
3645 printf ("----- ----- ----- ----- ---- ---- ---- ---- ----\n");
3646 #endif
3647 }
3648
3649 /***********************************************************************
3650 * PrintCacheInfo.
3651 **********************************************************************/
3652 static void PrintCacheInfo (unsigned int cacheSize,
3653 unsigned int cacheCount,
3654 unsigned int slotsUsed,
3655 float avgUsed,
3656 unsigned int maxUsed,
3657 float avgSHit,
3658 unsigned int maxSHit,
3659 float avgSMiss,
3660 unsigned int maxSMiss
3661 #ifdef OBJC_INSTRUMENTED
3662 , unsigned int totDHits,
3663 float avgDHit,
3664 unsigned int maxDHit,
3665 unsigned int totDMisses,
3666 float avgDMiss,
3667 unsigned int maxDMiss,
3668 unsigned int totDFlsh,
3669 float avgDFlsh,
3670 unsigned int maxDFlsh
3671 #endif
3672 )
3673 {
3674 #ifdef OBJC_INSTRUMENTED
3675 printf ("%5u %5u %5u %5.1f %4u %4.1f %4u %4.1f %4u %7u %4.1f %4u %7u %4.1f %4u %4u %4.1f %4u\n",
3676 #else
3677 printf ("%5u %5u %5u %5.1f %4u %4.1f %4u %4.1f %4u\n",
3678 #endif
3679 cacheSize, cacheCount, slotsUsed, avgUsed, maxUsed, avgSHit, maxSHit, avgSMiss, maxSMiss
3680 #ifdef OBJC_INSTRUMENTED
3681 , totDHits, avgDHit, maxDHit, totDMisses, avgDMiss, maxDMiss, totDFlsh, avgDFlsh, maxDFlsh
3682 #endif
3683 );
3684
3685 }
3686
3687 #ifdef OBJC_INSTRUMENTED
3688 /***********************************************************************
3689 * PrintCacheHistogram. Show the non-zero entries from the specified
3690 * cache histogram.
3691 **********************************************************************/
3692 static void PrintCacheHistogram (char * title,
3693 unsigned int * firstEntry,
3694 unsigned int entryCount)
3695 {
3696 unsigned int index;
3697 unsigned int * thisEntry;
3698
3699 printf ("%s\n", title);
3700 printf (" Probes Tally\n");
3701 printf (" ------ -----\n");
3702 for (index = 0, thisEntry = firstEntry;
3703 index < entryCount;
3704 index += 1, thisEntry += 1)
3705 {
3706 if (*thisEntry == 0)
3707 continue;
3708
3709 printf (" %6d %5d\n", index, *thisEntry);
3710 }
3711 }
3712 #endif
3713
3714 /***********************************************************************
3715 * _class_printMethodCacheStatistics.
3716 **********************************************************************/
3717
3718 #define MAX_LOG2_SIZE 32
3719 #define MAX_CHAIN_SIZE 100
3720
3721 void _class_printMethodCacheStatistics (void)
3722 {
3723 unsigned int isMeta;
3724 unsigned int index;
3725 NXHashTable * class_hash;
3726 NXHashState state;
3727 struct objc_class * cls;
3728 unsigned int totalChain;
3729 unsigned int totalMissChain;
3730 unsigned int maxChain;
3731 unsigned int maxMissChain;
3732 unsigned int classCount;
3733 unsigned int negativeEntryCount;
3734 unsigned int cacheExpandCount;
3735 unsigned int cacheCountBySize[2][MAX_LOG2_SIZE] = {{0}};
3736 unsigned int totalEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
3737 unsigned int maxEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
3738 unsigned int totalChainBySize[2][MAX_LOG2_SIZE] = {{0}};
3739 unsigned int totalMissChainBySize[2][MAX_LOG2_SIZE] = {{0}};
3740 unsigned int totalMaxChainBySize[2][MAX_LOG2_SIZE] = {{0}};
3741 unsigned int totalMaxMissChainBySize[2][MAX_LOG2_SIZE] = {{0}};
3742 unsigned int maxChainBySize[2][MAX_LOG2_SIZE] = {{0}};
3743 unsigned int maxMissChainBySize[2][MAX_LOG2_SIZE] = {{0}};
3744 unsigned int chainCount[MAX_CHAIN_SIZE] = {0};
3745 unsigned int missChainCount[MAX_CHAIN_SIZE] = {0};
3746 #ifdef OBJC_INSTRUMENTED
3747 unsigned int hitCountBySize[2][MAX_LOG2_SIZE] = {{0}};
3748 unsigned int hitProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
3749 unsigned int maxHitProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
3750 unsigned int missCountBySize[2][MAX_LOG2_SIZE] = {{0}};
3751 unsigned int missProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
3752 unsigned int maxMissProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
3753 unsigned int flushCountBySize[2][MAX_LOG2_SIZE] = {{0}};
3754 unsigned int flushedEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
3755 unsigned int maxFlushedEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
3756 #endif
3757
3758 printf ("Printing cache statistics\n");
3759
3760 // Outermost loop - iterate over all classes
3761 class_hash = objc_getClasses ();
3762 state = NXInitHashState (class_hash);
3763 classCount = 0;
3764 negativeEntryCount = 0;
3765 cacheExpandCount = 0;
3766 while (NXNextHashState (class_hash, &state, (void **) &cls))
3767 {
3768 // Tally classes
3769 classCount += 1;
3770
3771 // Control loop - do given class' cache, then its isa's cache
3772 for (isMeta = 0; isMeta <= 1; isMeta += 1)
3773 {
3774 Cache cache;
3775 unsigned int mask;
3776 unsigned int log2Size;
3777 unsigned int entryCount;
3778
3779 // Select cache of interest
3780 cache = isMeta ? cls->isa->cache : ((struct objc_class *)cls)->cache;
3781
3782 // Ignore empty cache... should we?
3783 if (cache == &emptyCache)
3784 continue;
3785
3786 // Middle loop - do each entry in the given cache
3787 mask = cache->mask;
3788 entryCount = 0;
3789 totalChain = 0;
3790 totalMissChain = 0;
3791 maxChain = 0;
3792 maxMissChain = 0;
3793 for (index = 0; index < mask + 1; index += 1)
3794 {
3795 Method * buckets;
3796 Method method;
3797 uarith_t hash;
3798 uarith_t methodChain;
3799 uarith_t methodMissChain;
3800 uarith_t index2;
3801
3802 // If entry is invalid, the only item of
3803 // interest is that future insert hashes
3804 // to this entry can use it directly.
3805 buckets = cache->buckets;
3806 if (!CACHE_BUCKET_VALID(buckets[index]))
3807 {
3808 missChainCount[0] += 1;
3809 continue;
3810 }
3811
3812 method = buckets[index];
3813
3814 // Tally valid entries
3815 entryCount += 1;
3816
3817 // Tally "forward::" entries
3818 if (CACHE_BUCKET_IMP(method) == &_objc_msgForward)
3819 negativeEntryCount += 1;
3820
3821 // Calculate search distance (chain length) for this method
3822 // The chain may wrap around to the beginning of the table.
3823 hash = CACHE_HASH(CACHE_BUCKET_NAME(method), mask);
3824 if (index >= hash) methodChain = index - hash;
3825 else methodChain = (mask+1) + index - hash;
3826
3827 // Tally chains of this length
3828 if (methodChain < MAX_CHAIN_SIZE)
3829 chainCount[methodChain] += 1;
3830
3831 // Keep sum of all chain lengths
3832 totalChain += methodChain;
3833
3834 // Record greatest chain length
3835 if (methodChain > maxChain)
3836 maxChain = methodChain;
3837
3838 // Calculate search distance for miss that hashes here
3839 index2 = index;
3840 while (CACHE_BUCKET_VALID(buckets[index2]))
3841 {
3842 index2 += 1;
3843 index2 &= mask;
3844 }
3845 methodMissChain = ((index2 - index) & mask);
3846
3847 // Tally miss chains of this length
3848 if (methodMissChain < MAX_CHAIN_SIZE)
3849 missChainCount[methodMissChain] += 1;
3850
3851 // Keep sum of all miss chain lengths in this class
3852 totalMissChain += methodMissChain;
3853
3854 // Record greatest miss chain length
3855 if (methodMissChain > maxMissChain)
3856 maxMissChain = methodMissChain;
3857 }
3858
3859 // Factor this cache into statistics about caches of the same
3860 // type and size (all caches are a power of two in size)
3861 log2Size = log2 (mask + 1);
3862 cacheCountBySize[isMeta][log2Size] += 1;
3863 totalEntriesBySize[isMeta][log2Size] += entryCount;
3864 if (entryCount > maxEntriesBySize[isMeta][log2Size])
3865 maxEntriesBySize[isMeta][log2Size] = entryCount;
3866 totalChainBySize[isMeta][log2Size] += totalChain;
3867 totalMissChainBySize[isMeta][log2Size] += totalMissChain;
3868 totalMaxChainBySize[isMeta][log2Size] += maxChain;
3869 totalMaxMissChainBySize[isMeta][log2Size] += maxMissChain;
3870 if (maxChain > maxChainBySize[isMeta][log2Size])
3871 maxChainBySize[isMeta][log2Size] = maxChain;
3872 if (maxMissChain > maxMissChainBySize[isMeta][log2Size])
3873 maxMissChainBySize[isMeta][log2Size] = maxMissChain;
3874 #ifdef OBJC_INSTRUMENTED
3875 {
3876 CacheInstrumentation * cacheData;
3877
3878 cacheData = CACHE_INSTRUMENTATION(cache);
3879 hitCountBySize[isMeta][log2Size] += cacheData->hitCount;
3880 hitProbesBySize[isMeta][log2Size] += cacheData->hitProbes;
3881 if (cacheData->maxHitProbes > maxHitProbesBySize[isMeta][log2Size])
3882 maxHitProbesBySize[isMeta][log2Size] = cacheData->maxHitProbes;
3883 missCountBySize[isMeta][log2Size] += cacheData->missCount;
3884 missProbesBySize[isMeta][log2Size] += cacheData->missProbes;
3885 if (cacheData->maxMissProbes > maxMissProbesBySize[isMeta][log2Size])
3886 maxMissProbesBySize[isMeta][log2Size] = cacheData->maxMissProbes;
3887 flushCountBySize[isMeta][log2Size] += cacheData->flushCount;
3888 flushedEntriesBySize[isMeta][log2Size] += cacheData->flushedEntries;
3889 if (cacheData->maxFlushedEntries > maxFlushedEntriesBySize[isMeta][log2Size])
3890 maxFlushedEntriesBySize[isMeta][log2Size] = cacheData->maxFlushedEntries;
3891 }
3892 #endif
3893 // Caches start with a power of two number of entries, and grow by doubling, so
3894 // we can calculate the number of times this cache has expanded
3895 if (isMeta)
3896 cacheExpandCount += log2Size - INIT_META_CACHE_SIZE_LOG2;
3897 else
3898 cacheExpandCount += log2Size - INIT_CACHE_SIZE_LOG2;
3899
3900 }
3901 }
3902
3903 {
3904 unsigned int cacheCountByType[2] = {0};
3905 unsigned int totalCacheCount = 0;
3906 unsigned int totalEntries = 0;
3907 unsigned int maxEntries = 0;
3908 unsigned int totalSlots = 0;
3909 #ifdef OBJC_INSTRUMENTED
3910 unsigned int totalHitCount = 0;
3911 unsigned int totalHitProbes = 0;
3912 unsigned int maxHitProbes = 0;
3913 unsigned int totalMissCount = 0;
3914 unsigned int totalMissProbes = 0;
3915 unsigned int maxMissProbes = 0;
3916 unsigned int totalFlushCount = 0;
3917 unsigned int totalFlushedEntries = 0;
3918 unsigned int maxFlushedEntries = 0;
3919 #endif
3920
3921 totalChain = 0;
3922 maxChain = 0;
3923 totalMissChain = 0;
3924 maxMissChain = 0;
3925
3926 // Sum information over all caches
3927 for (isMeta = 0; isMeta <= 1; isMeta += 1)
3928 {
3929 for (index = 0; index < MAX_LOG2_SIZE; index += 1)
3930 {
3931 cacheCountByType[isMeta] += cacheCountBySize[isMeta][index];
3932 totalEntries += totalEntriesBySize[isMeta][index];
3933 totalSlots += cacheCountBySize[isMeta][index] * (1 << index);
3934 totalChain += totalChainBySize[isMeta][index];
3935 if (maxEntriesBySize[isMeta][index] > maxEntries)
3936 maxEntries = maxEntriesBySize[isMeta][index];
3937 if (maxChainBySize[isMeta][index] > maxChain)
3938 maxChain = maxChainBySize[isMeta][index];
3939 totalMissChain += totalMissChainBySize[isMeta][index];
3940 if (maxMissChainBySize[isMeta][index] > maxMissChain)
3941 maxMissChain = maxMissChainBySize[isMeta][index];
3942 #ifdef OBJC_INSTRUMENTED
3943 totalHitCount += hitCountBySize[isMeta][index];
3944 totalHitProbes += hitProbesBySize[isMeta][index];
3945 if (maxHitProbesBySize[isMeta][index] > maxHitProbes)
3946 maxHitProbes = maxHitProbesBySize[isMeta][index];
3947 totalMissCount += missCountBySize[isMeta][index];
3948 totalMissProbes += missProbesBySize[isMeta][index];
3949 if (maxMissProbesBySize[isMeta][index] > maxMissProbes)
3950 maxMissProbes = maxMissProbesBySize[isMeta][index];
3951 totalFlushCount += flushCountBySize[isMeta][index];
3952 totalFlushedEntries += flushedEntriesBySize[isMeta][index];
3953 if (maxFlushedEntriesBySize[isMeta][index] > maxFlushedEntries)
3954 maxFlushedEntries = maxFlushedEntriesBySize[isMeta][index];
3955 #endif
3956 }
3957
3958 totalCacheCount += cacheCountByType[isMeta];
3959 }
3960
3961 // Log our findings
3962 printf ("There are %u classes\n", classCount);
3963
3964 for (isMeta = 0; isMeta <= 1; isMeta += 1)
3965 {
3966 // Number of this type of class
3967 printf ("\nThere are %u %s-method caches, broken down by size (slot count):\n",
3968 cacheCountByType[isMeta],
3969 isMeta ? "class" : "instance");
3970
3971 // Print header
3972 PrintCacheHeader ();
3973
3974 // Keep format consistent even if there are caches of this kind
3975 if (cacheCountByType[isMeta] == 0)
3976 {
3977 printf ("(none)\n");
3978 continue;
3979 }
3980
3981 // Usage information by cache size
3982 for (index = 0; index < MAX_LOG2_SIZE; index += 1)
3983 {
3984 unsigned int cacheCount;
3985 unsigned int cacheSlotCount;
3986 unsigned int cacheEntryCount;
3987
3988 // Get number of caches of this type and size
3989 cacheCount = cacheCountBySize[isMeta][index];
3990 if (cacheCount == 0)
3991 continue;
3992
3993 // Get the cache slot count and the total number of valid entries
3994 cacheSlotCount = (1 << index);
3995 cacheEntryCount = totalEntriesBySize[isMeta][index];
3996
3997 // Give the analysis
3998 PrintCacheInfo (cacheSlotCount,
3999 cacheCount,
4000 cacheEntryCount,
4001 (float) cacheEntryCount / (float) cacheCount,
4002 maxEntriesBySize[isMeta][index],
4003 (float) totalChainBySize[isMeta][index] / (float) cacheEntryCount,
4004 maxChainBySize[isMeta][index],
4005 (float) totalMissChainBySize[isMeta][index] / (float) (cacheCount * cacheSlotCount),
4006 maxMissChainBySize[isMeta][index]
4007 #ifdef OBJC_INSTRUMENTED
4008 , hitCountBySize[isMeta][index],
4009 hitCountBySize[isMeta][index] ?
4010 (float) hitProbesBySize[isMeta][index] / (float) hitCountBySize[isMeta][index] : 0.0,
4011 maxHitProbesBySize[isMeta][index],
4012 missCountBySize[isMeta][index],
4013 missCountBySize[isMeta][index] ?
4014 (float) missProbesBySize[isMeta][index] / (float) missCountBySize[isMeta][index] : 0.0,
4015 maxMissProbesBySize[isMeta][index],
4016 flushCountBySize[isMeta][index],
4017 flushCountBySize[isMeta][index] ?
4018 (float) flushedEntriesBySize[isMeta][index] / (float) flushCountBySize[isMeta][index] : 0.0,
4019 maxFlushedEntriesBySize[isMeta][index]
4020 #endif
4021 );
4022 }
4023 }
4024
4025 // Give overall numbers
4026 printf ("\nCumulative:\n");
4027 PrintCacheHeader ();
4028 PrintCacheInfo (totalSlots,
4029 totalCacheCount,
4030 totalEntries,
4031 (float) totalEntries / (float) totalCacheCount,
4032 maxEntries,
4033 (float) totalChain / (float) totalEntries,
4034 maxChain,
4035 (float) totalMissChain / (float) totalSlots,
4036 maxMissChain
4037 #ifdef OBJC_INSTRUMENTED
4038 , totalHitCount,
4039 totalHitCount ?
4040 (float) totalHitProbes / (float) totalHitCount : 0.0,
4041 maxHitProbes,
4042 totalMissCount,
4043 totalMissCount ?
4044 (float) totalMissProbes / (float) totalMissCount : 0.0,
4045 maxMissProbes,
4046 totalFlushCount,
4047 totalFlushCount ?
4048 (float) totalFlushedEntries / (float) totalFlushCount : 0.0,
4049 maxFlushedEntries
4050 #endif
4051 );
4052
4053 printf ("\nNumber of \"forward::\" entries: %d\n", negativeEntryCount);
4054 printf ("Number of cache expansions: %d\n", cacheExpandCount);
4055 #ifdef OBJC_INSTRUMENTED
4056 printf ("flush_caches: total calls total visits average visits max visits total classes visits/class\n");
4057 printf (" ----------- ------------ -------------- ---------- ------------- -------------\n");
4058 printf (" linear %11u %12u %14.1f %10u %13u %12.2f\n",
4059 LinearFlushCachesCount,
4060 LinearFlushCachesVisitedCount,
4061 LinearFlushCachesCount ?
4062 (float) LinearFlushCachesVisitedCount / (float) LinearFlushCachesCount : 0.0,
4063 MaxLinearFlushCachesVisitedCount,
4064 LinearFlushCachesVisitedCount,
4065 1.0);
4066 printf (" nonlinear %11u %12u %14.1f %10u %13u %12.2f\n",
4067 NonlinearFlushCachesCount,
4068 NonlinearFlushCachesVisitedCount,
4069 NonlinearFlushCachesCount ?
4070 (float) NonlinearFlushCachesVisitedCount / (float) NonlinearFlushCachesCount : 0.0,
4071 MaxNonlinearFlushCachesVisitedCount,
4072 NonlinearFlushCachesClassCount,
4073 NonlinearFlushCachesClassCount ?
4074 (float) NonlinearFlushCachesVisitedCount / (float) NonlinearFlushCachesClassCount : 0.0);
4075 printf (" ideal %11u %12u %14.1f %10u %13u %12.2f\n",
4076 LinearFlushCachesCount + NonlinearFlushCachesCount,
4077 IdealFlushCachesCount,
4078 LinearFlushCachesCount + NonlinearFlushCachesCount ?
4079 (float) IdealFlushCachesCount / (float) (LinearFlushCachesCount + NonlinearFlushCachesCount) : 0.0,
4080 MaxIdealFlushCachesCount,
4081 LinearFlushCachesVisitedCount + NonlinearFlushCachesClassCount,
4082 LinearFlushCachesVisitedCount + NonlinearFlushCachesClassCount ?
4083 (float) IdealFlushCachesCount / (float) (LinearFlushCachesVisitedCount + NonlinearFlushCachesClassCount) : 0.0);
4084
4085 PrintCacheHistogram ("\nCache hit histogram:", &CacheHitHistogram[0], CACHE_HISTOGRAM_SIZE);
4086 PrintCacheHistogram ("\nCache miss histogram:", &CacheMissHistogram[0], CACHE_HISTOGRAM_SIZE);
4087 #endif
4088
4089 #if 0
4090 printf ("\nLookup chains:");
4091 for (index = 0; index < MAX_CHAIN_SIZE; index += 1)
4092 {
4093 if (chainCount[index] != 0)
4094 printf (" %u:%u", index, chainCount[index]);
4095 }
4096
4097 printf ("\nMiss chains:");
4098 for (index = 0; index < MAX_CHAIN_SIZE; index += 1)
4099 {
4100 if (missChainCount[index] != 0)
4101 printf (" %u:%u", index, missChainCount[index]);
4102 }
4103
4104 printf ("\nTotal memory usage for cache data structures: %lu bytes\n",
4105 totalCacheCount * (sizeof(struct objc_cache) - sizeof(Method)) +
4106 totalSlots * sizeof(Method) +
4107 negativeEntryCount * sizeof(struct objc_method));
4108 #endif
4109 }
4110 }
4111
4112 /***********************************************************************
4113 * checkUniqueness.
4114 **********************************************************************/
4115 void checkUniqueness (SEL s1,
4116 SEL s2)
4117 {
4118 if (s1 == s2)
4119 return;
4120
4121 if (s1 && s2 && (strcmp ((const char *) s1, (const char *) s2) == 0))
4122 _objc_inform ("%p != %p but !strcmp (%s, %s)\n", s1, s2, (char *) s1, (char *) s2);
4123 }
4124