]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-class.m
objc4-274.tar.gz
[apple/objc4.git] / runtime / objc-class.m
1 /*
2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /***********************************************************************
26 * objc-class.m
27 * Copyright 1988-1997, Apple Computer, Inc.
28 * Author: s. naroff
29 **********************************************************************/
30
31
32 /***********************************************************************
33 * Method cache locking (GrP 2001-1-14)
34 *
35 * For speed, objc_msgSend does not acquire any locks when it reads
36 * method caches. Instead, all cache changes are performed so that any
37 * objc_msgSend running concurrently with the cache mutator will not
38 * crash or hang or get an incorrect result from the cache.
39 *
40 * When cache memory becomes unused (e.g. the old cache after cache
41 * expansion), it is not immediately freed, because a concurrent
42 * objc_msgSend could still be using it. Instead, the memory is
43 * disconnected from the data structures and placed on a garbage list.
44 * The memory is now only accessible to instances of objc_msgSend that
45 * were running when the memory was disconnected; any further calls to
46 * objc_msgSend will not see the garbage memory because the other data
47 * structures don't point to it anymore. The collecting_in_critical
48 * function checks the PC of all threads and returns FALSE when all threads
49 * are found to be outside objc_msgSend. This means any call to objc_msgSend
50 * that could have had access to the garbage has finished or moved past the
51 * cache lookup stage, so it is safe to free the memory.
52 *
53 * All functions that modify cache data or structures must acquire the
54 * cacheUpdateLock to prevent interference from concurrent modifications.
55 * The function that frees cache garbage must acquire the cacheUpdateLock
56 * and use collecting_in_critical() to flush out cache readers.
57 * The cacheUpdateLock is also used to protect the custom allocator used
58 * for large method cache blocks.
59 *
60 * Cache readers (PC-checked by collecting_in_critical())
61 * objc_msgSend*
62 * _cache_getImp
63 * _cache_getMethod
64 *
65 * Cache writers (hold cacheUpdateLock while reading or writing; not PC-checked)
66 * _cache_fill (acquires lock)
67 * _cache_expand (only called from cache_fill)
68 * _cache_create (only called from cache_expand)
69 * bcopy (only called from instrumented cache_expand)
70 * flush_caches (acquires lock)
71 * _cache_flush (only called from cache_fill and flush_caches)
72 * _cache_collect_free (only called from cache_expand and cache_flush)
73 *
74 * UNPROTECTED cache readers (NOT thread-safe; used for debug info only)
75 * _cache_print
76 * _class_printMethodCaches
77 * _class_printDuplicateCacheEntries
78 * _class_printMethodCacheStatistics
79 *
80 * _class_lookupMethodAndLoadCache is a special case. It may read a
81 * method triplet out of one cache and store it in another cache. This
82 * is unsafe if the method triplet is a forward:: entry, because the
83 * triplet itself could be freed unless _class_lookupMethodAndLoadCache
84 * were PC-checked or used a lock. Additionally, storing the method
85 * triplet in both caches would result in double-freeing if both caches
86 * were flushed or expanded. The solution is for _cache_getMethod to
87 * ignore all entries whose implementation is _objc_msgForward, so
88 * _class_lookupMethodAndLoadCache cannot look at a forward:: entry
89 * unsafely or place it in multiple caches.
90 ***********************************************************************/
91
92 /***********************************************************************
93 * Lazy method list arrays and method list locking (2004-10-19)
94 *
95 * cls->methodLists may be in one of three forms:
96 * 1. NULL: The class has no methods.
97 * 2. non-NULL, with CLS_NO_METHOD_ARRAY set: cls->methodLists points
98 * to a single method list, which is the class's only method list.
99 * 3. non-NULL, with CLS_NO_METHOD_ARRAY clear: cls->methodLists points to
100 * an array of method list pointers. The end of the array's block
101 * is set to -1. If the actual number of method lists is smaller
102 * than that, the rest of the array is NULL.
103 *
104 * Attaching categories and adding and removing classes may change
105 * the form of the class list. In addition, individual method lists
106 * may be reallocated when fixed up.
107 *
108 * Classes are initially read as #1 or #2. If a category is attached
109 * or other methods added, the class is changed to #3. Once in form #3,
110 * the class is never downgraded to #1 or #2, even if methods are removed.
111 * Classes added with objc_addClass are initially either #1 or #3.
112 *
113 * Accessing and manipulating a class's method lists are synchronized,
114 * to prevent races when one thread restructures the list. However,
115 * if the class is not yet in use (i.e. not in class_hash), then the
116 * thread loading the class may access its method lists without locking.
117 *
118 * The following functions acquire methodListLock:
119 * class_getInstanceMethod
120 * class_getClassMethod
121 * class_nextMethodList
122 * class_addMethods
123 * class_removeMethods
124 * class_respondsToMethod
125 * _class_lookupMethodAndLoadCache
126 * lookupMethodInClassAndLoadCache
127 * _objc_add_category_flush_caches
128 *
129 * The following functions don't acquire methodListLock because they
130 * only access method lists during class load and unload:
131 * _objc_register_category
132 * _resolve_categories_for_class (calls _objc_add_category)
133 * add_class_to_loadable_list
134 * _objc_addClass
135 * _objc_remove_classes_in_image
136 *
137 * The following functions use method lists without holding methodListLock.
138 * The caller must either hold methodListLock, or be loading the class.
139 * _getMethod (called by class_getInstanceMethod, class_getClassMethod,
140 * and class_respondsToMethod)
141 * _findMethodInClass (called by _class_lookupMethodAndLoadCache,
142 * lookupMethodInClassAndLoadCache, _getMethod)
143 * _findMethodInList (called by _findMethodInClass)
144 * nextMethodList (called by _findMethodInClass and class_nextMethodList
145 * fixupSelectorsInMethodList (called by nextMethodList)
146 * _objc_add_category (called by _objc_add_category_flush_caches,
147 * resolve_categories_for_class and _objc_register_category)
148 * _objc_insertMethods (called by class_addMethods and _objc_add_category)
149 * _objc_removeMethods (called by class_removeMethods)
150 * _objcTweakMethodListPointerForClass (called by _objc_insertMethods)
151 * get_base_method_list (called by add_class_to_loadable_list)
152 * lookupNamedMethodInMethodList (called by add_class_to_loadable_list)
153 ***********************************************************************/
154
155 /***********************************************************************
156 * Thread-safety of class info bits (2004-10-19)
157 *
158 * Some class info bits are used to store mutable runtime state.
159 * Modifications of the info bits at particular times need to be
160 * synchronized to prevent races.
161 *
162 * Three thread-safe modification functions are provided:
163 * _class_setInfo() // atomically sets some bits
164 * _class_clearInfo() // atomically clears some bits
165 * _class_changeInfo() // atomically sets some bits and clears others
166 * These replace CLS_SETINFO() for the multithreaded cases.
167 *
168 * Three modification windows are defined:
169 * - compile time
170 * - class construction or image load (before +load) in one thread
171 * - multi-threaded messaging and method caches
172 *
173 * Info bit modification at compile time and class construction do not
174 * need to be locked, because only one thread is manipulating the class.
175 * Info bit modification during messaging needs to be locked, because
176 * there may be other threads simultaneously messaging or otherwise
177 * manipulating the class.
178 *
179 * Modification windows for each flag:
180 *
181 * CLS_CLASS: compile-time and class load
182 * CLS_META: compile-time and class load
183 * CLS_INITIALIZED: +initialize
184 * CLS_POSING: messaging
185 * CLS_MAPPED: compile-time
186 * CLS_FLUSH_CACHE: messaging
187 * CLS_GROW_CACHE: messaging
188 * CLS_NEED_BIND: unused
189 * CLS_METHOD_ARRAY: unused
190 * CLS_JAVA_HYBRID: JavaBridge only
191 * CLS_JAVA_CLASS: JavaBridge only
192 * CLS_INITIALIZING: messaging
193 * CLS_FROM_BUNDLE: class load
194 * CLS_HAS_CXX_STRUCTORS: compile-time and class load
195 * CLS_NO_METHOD_ARRAY: class load and messaging
196 * CLS_HAS_LOAD_METHOD: class load
197 *
198 * CLS_INITIALIZED and CLS_INITIALIZING have additional thread-safety
199 * constraints to support thread-safe +initialize. See "Thread safety
200 * during class initialization" for details.
201 *
202 * CLS_JAVA_HYBRID and CLS_JAVA_CLASS are set immediately after JavaBridge
203 * calls objc_addClass(). The JavaBridge does not use an atomic update,
204 * but the modification counts as "class construction" unless some other
205 * thread quickly finds the class via the class list. This race is
206 * small and unlikely in well-behaved code.
207 *
208 * Most info bits that may be modified during messaging are also never
209 * read without a lock. There is no general read lock for the info bits.
210 * CLS_INITIALIZED: classInitLock
211 * CLS_FLUSH_CACHE: cacheUpdateLock
212 * CLS_GROW_CACHE: cacheUpdateLock
213 * CLS_NO_METHOD_ARRAY: methodListLock
214 * CLS_INITIALIZING: classInitLock
215 ***********************************************************************/
216
217 /***********************************************************************
218 * Thread-safety during class initialization (GrP 2001-9-24)
219 *
220 * Initial state: CLS_INITIALIZING and CLS_INITIALIZED both clear.
221 * During initialization: CLS_INITIALIZING is set
222 * After initialization: CLS_INITIALIZING clear and CLS_INITIALIZED set.
223 * CLS_INITIALIZING and CLS_INITIALIZED are never set at the same time.
224 * CLS_INITIALIZED is never cleared once set.
225 *
226 * Only one thread is allowed to actually initialize a class and send
227 * +initialize. Enforced by allowing only one thread to set CLS_INITIALIZING.
228 *
229 * Additionally, threads trying to send messages to a class must wait for
230 * +initialize to finish. During initialization of a class, that class's
231 * method cache is kept empty. objc_msgSend will revert to
232 * class_lookupMethodAndLoadCache, which checks CLS_INITIALIZED before
233 * messaging. If CLS_INITIALIZED is clear but CLS_INITIALIZING is set,
234 * the thread must block, unless it is the thread that started
235 * initializing the class in the first place.
236 *
237 * Each thread keeps a list of classes it's initializing.
238 * The global classInitLock is used to synchronize changes to CLS_INITIALIZED
239 * and CLS_INITIALIZING: the transition to CLS_INITIALIZING must be
240 * an atomic test-and-set with respect to itself and the transition
241 * to CLS_INITIALIZED.
242 * The global classInitWaitCond is used to block threads waiting for an
243 * initialization to complete. The classInitLock synchronizes
244 * condition checking and the condition variable.
245 **********************************************************************/
246
247 /***********************************************************************
248 * +initialize deadlock case when a class is marked initializing while
249 * its superclass is initialized. Solved by completely initializing
250 * superclasses before beginning to initialize a class.
251 *
252 * OmniWeb class hierarchy:
253 * OBObject
254 * | ` OBPostLoader
255 * OFObject
256 * / \
257 * OWAddressEntry OWController
258 * |
259 * OWConsoleController
260 *
261 * Thread 1 (evil testing thread):
262 * initialize OWAddressEntry
263 * super init OFObject
264 * super init OBObject
265 * [OBObject initialize] runs OBPostLoader, which inits lots of classes...
266 * initialize OWConsoleController
267 * super init OWController - wait for Thread 2 to finish OWController init
268 *
269 * Thread 2 (normal OmniWeb thread):
270 * initialize OWController
271 * super init OFObject - wait for Thread 1 to finish OFObject init
272 *
273 * deadlock!
274 *
275 * Solution: fully initialize super classes before beginning to initialize
276 * a subclass. Then the initializing+initialized part of the class hierarchy
277 * will be a contiguous subtree starting at the root, so other threads
278 * can't jump into the middle between two initializing classes, and we won't
279 * get stuck while a superclass waits for its subclass which waits for the
280 * superclass.
281 **********************************************************************/
282
283
284
285 /***********************************************************************
286 * Imports.
287 **********************************************************************/
288
289 #import <mach/mach_interface.h>
290 #include <mach-o/ldsyms.h>
291 #include <mach-o/dyld.h>
292
293 #include <sys/types.h>
294 #include <unistd.h>
295 #include <stdlib.h>
296 #include <sys/uio.h>
297 #include <sys/fcntl.h>
298
299 #import "objc-class.h"
300
301 #import <objc/Object.h>
302 #import <objc/objc-runtime.h>
303 #import "objc-private.h"
304 #import "hashtable2.h"
305 #import "maptable.h"
306
307 #include <sys/types.h>
308
309 // Needed functions not in any header file
310 size_t malloc_size (const void * ptr);
311
312 // Needed kernel interface
313 #import <mach/mach.h>
314 #import <mach/thread_status.h>
315
316
317 /***********************************************************************
318 * Conditionals.
319 **********************************************************************/
320
321 // Define PRELOAD_SUPERCLASS_CACHES to cause method lookups to add the
322 // method the appropriate superclass caches, in addition to the normal
323 // encaching in the subclass where the method was messaged. Doing so
324 // will speed up messaging the same method from instances of the
325 // superclasses, but also uses up valuable cache space for a speculative
326 // purpose
327 // See radar 2364264 about incorrectly propogating _objc_forward entries
328 // and double freeing them, first, before turning this on!
329 // (Radar 2364264 is now "inactive".)
330 // Double-freeing is also a potential problem when this is off. See
331 // note about _class_lookupMethodAndLoadCache in "Method cache locking".
332 //#define PRELOAD_SUPERCLASS_CACHES
333
334 /***********************************************************************
335 * Exports.
336 **********************************************************************/
337
338 #ifdef OBJC_INSTRUMENTED
339 enum {
340 CACHE_HISTOGRAM_SIZE = 512
341 };
342
343 unsigned int CacheHitHistogram [CACHE_HISTOGRAM_SIZE];
344 unsigned int CacheMissHistogram [CACHE_HISTOGRAM_SIZE];
345 #endif
346
347 /***********************************************************************
348 * Constants and macros internal to this module.
349 **********************************************************************/
350
351 // INIT_CACHE_SIZE and INIT_META_CACHE_SIZE must be a power of two
352 enum {
353 INIT_CACHE_SIZE_LOG2 = 2,
354 INIT_META_CACHE_SIZE_LOG2 = 2,
355 INIT_CACHE_SIZE = (1 << INIT_CACHE_SIZE_LOG2),
356 INIT_META_CACHE_SIZE = (1 << INIT_META_CACHE_SIZE_LOG2)
357 };
358
359 // Amount of space required for count hash table buckets, knowing that
360 // one entry is embedded in the cache structure itself
361 #define TABLE_SIZE(count) ((count - 1) * sizeof(Method))
362
363 // A sentinal (magic value) to report bad thread_get_state status
364 #define PC_SENTINAL 0
365
366
367 /***********************************************************************
368 * Types internal to this module.
369 **********************************************************************/
370
371 #ifdef OBJC_INSTRUMENTED
372 struct CacheInstrumentation
373 {
374 unsigned int hitCount; // cache lookup success tally
375 unsigned int hitProbes; // sum entries checked to hit
376 unsigned int maxHitProbes; // max entries checked to hit
377 unsigned int missCount; // cache lookup no-find tally
378 unsigned int missProbes; // sum entries checked to miss
379 unsigned int maxMissProbes; // max entries checked to miss
380 unsigned int flushCount; // cache flush tally
381 unsigned int flushedEntries; // sum cache entries flushed
382 unsigned int maxFlushedEntries; // max cache entries flushed
383 };
384 typedef struct CacheInstrumentation CacheInstrumentation;
385
386 // Cache instrumentation data follows table, so it is most compatible
387 #define CACHE_INSTRUMENTATION(cache) (CacheInstrumentation *) &cache->buckets[cache->mask + 1];
388 #endif
389
390 /***********************************************************************
391 * Function prototypes internal to this module.
392 **********************************************************************/
393
394 static Ivar class_getVariable (Class cls, const char * name);
395 static void flush_caches (Class cls, BOOL flush_meta);
396 static struct objc_method_list *nextMethodList(struct objc_class *cls, void **it);
397 static void addClassToOriginalClass (Class posingClass, Class originalClass);
398 static void _objc_addOrigClass (Class origClass);
399 static void _freedHandler (id self, SEL sel);
400 static void _nonexistentHandler (id self, SEL sel);
401 static void class_initialize (Class cls);
402 static Cache _cache_expand (Class cls);
403 static int LogObjCMessageSend (BOOL isClassMethod, const char * objectsClass, const char * implementingClass, SEL selector);
404 static BOOL _cache_fill (Class cls, Method smt, SEL sel);
405 static void _cache_addForwardEntry(Class cls, SEL sel);
406 static void _cache_flush (Class cls);
407 static IMP lookupMethodInClassAndLoadCache(Class cls, SEL sel);
408 static int SubtypeUntil (const char * type, char end);
409 static const char * SkipFirstType (const char * type);
410
411 static unsigned long _get_pc_for_thread (mach_port_t thread);
412 static int _collecting_in_critical (void);
413 static void _garbage_make_room (void);
414 static void _cache_collect_free (void * data, size_t size, BOOL tryCollect);
415
416 static BOOL cache_allocator_is_block(void *block);
417 static void *cache_allocator_calloc(size_t size);
418 static void cache_allocator_free(void *block);
419
420 static void _cache_print (Cache cache);
421 static unsigned int log2 (unsigned int x);
422 static void PrintCacheHeader (void);
423 #ifdef OBJC_INSTRUMENTED
424 static void PrintCacheHistogram (char * title, unsigned int * firstEntry, unsigned int entryCount);
425 #endif
426
427 /***********************************************************************
428 * Static data internal to this module.
429 **********************************************************************/
430
431 // When _class_uncache is non-zero, cache growth copies the existing
432 // entries into the new (larger) cache. When this flag is zero, new
433 // (larger) caches start out empty.
434 static int _class_uncache = 1;
435
436 // When _class_slow_grow is non-zero, any given cache is actually grown
437 // only on the odd-numbered times it becomes full; on the even-numbered
438 // times, it is simply emptied and re-used. When this flag is zero,
439 // caches are grown every time.
440 static int _class_slow_grow = 1;
441
442 // Lock for cache access.
443 // Held when modifying a cache in place.
444 // Held when installing a new cache on a class.
445 // Held when adding to the cache garbage list.
446 // Held when disposing cache garbage.
447 // See "Method cache locking" above for notes about cache locking.
448 static OBJC_DECLARE_LOCK(cacheUpdateLock);
449
450 // classInitLock protects classInitWaitCond and examination and modification
451 // of CLS_INITIALIZED and CLS_INITIALIZING.
452 OBJC_DECLARE_LOCK(classInitLock);
453 // classInitWaitCond is signalled when any class is done initializing.
454 // Threads that are waiting for a class to finish initializing wait on this.
455 pthread_cond_t classInitWaitCond = PTHREAD_COND_INITIALIZER;
456
457 // Lock for method list access and modification.
458 // Protects methodLists fields, method arrays, and CLS_NO_METHOD_ARRAY bits.
459 // Classes not yet in use do not need to take this lock.
460 OBJC_DECLARE_LOCK(methodListLock);
461
462 // When traceDuplicates is non-zero, _cacheFill checks whether the method
463 // being encached is already there. The number of times it finds a match
464 // is tallied in cacheFillDuplicates. When traceDuplicatesVerbose is
465 // non-zero, each duplication is logged when found in this way.
466 static int traceDuplicates = 0;
467 static int traceDuplicatesVerbose = 0;
468 static int cacheFillDuplicates = 0;
469
470 // Custom cache allocator parameters
471 // CACHE_REGION_SIZE must be a multiple of CACHE_QUANTUM.
472 #define CACHE_QUANTUM 520
473 #define CACHE_REGION_SIZE 131040 // quantized just under 128KB (131072)
474 // #define CACHE_REGION_SIZE 262080 // quantized just under 256KB (262144)
475
476 #ifdef OBJC_INSTRUMENTED
477 // Instrumentation
478 static unsigned int LinearFlushCachesCount = 0;
479 static unsigned int LinearFlushCachesVisitedCount = 0;
480 static unsigned int MaxLinearFlushCachesVisitedCount = 0;
481 static unsigned int NonlinearFlushCachesCount = 0;
482 static unsigned int NonlinearFlushCachesClassCount = 0;
483 static unsigned int NonlinearFlushCachesVisitedCount = 0;
484 static unsigned int MaxNonlinearFlushCachesVisitedCount = 0;
485 static unsigned int IdealFlushCachesCount = 0;
486 static unsigned int MaxIdealFlushCachesCount = 0;
487 #endif
488
489 // Method call logging
490 typedef int (*ObjCLogProc)(BOOL, const char *, const char *, SEL);
491
492 static int totalCacheFills NOBSS = 0;
493 static int objcMsgLogFD = (-1);
494 static ObjCLogProc objcMsgLogProc = &LogObjCMessageSend;
495 static int objcMsgLogEnabled = 0;
496
497 // Error Messages
498 static const char
499 _errNoMem[] = "failed -- out of memory(%s, %u)",
500 _errAllocNil[] = "allocating nil object",
501 _errFreedObject[] = "message %s sent to freed object=0x%lx",
502 _errNonExistentObject[] = "message %s sent to non-existent object=0x%lx",
503 _errBadSel[] = "invalid selector %s",
504 _errNotSuper[] = "[%s poseAs:%s]: target not immediate superclass",
505 _errNewVars[] = "[%s poseAs:%s]: %s defines new instance variables";
506
507 /***********************************************************************
508 * Information about multi-thread support:
509 *
510 * Since we do not lock many operations which walk the superclass, method
511 * and ivar chains, these chains must remain intact once a class is published
512 * by inserting it into the class hashtable. All modifications must be
513 * atomic so that someone walking these chains will always geta valid
514 * result.
515 ***********************************************************************/
516 /***********************************************************************
517 * A static empty cache. All classes initially point at this cache.
518 * When the first message is sent it misses in the cache, and when
519 * the cache is grown it checks for this case and uses malloc rather
520 * than realloc. This avoids the need to check for NULL caches in the
521 * messenger.
522 ***********************************************************************/
523
524 #ifndef OBJC_INSTRUMENTED
525 const struct objc_cache emptyCache =
526 {
527 0, // mask
528 0, // occupied
529 { NULL } // buckets
530 };
531 #else
532 // OBJC_INSTRUMENTED requires writable data immediately following emptyCache.
533 struct objc_cache emptyCache =
534 {
535 0, // mask
536 0, // occupied
537 { NULL } // buckets
538 };
539 CacheInstrumentation emptyCacheInstrumentation = {0};
540 #endif
541
542
543 // Freed objects have their isa set to point to this dummy class.
544 // This avoids the need to check for Nil classes in the messenger.
545 static const struct objc_class freedObjectClass =
546 {
547 Nil, // isa
548 Nil, // super_class
549 "FREED(id)", // name
550 0, // version
551 0, // info
552 0, // instance_size
553 NULL, // ivars
554 NULL, // methodLists
555 (Cache) &emptyCache, // cache
556 NULL // protocols
557 };
558
559 static const struct objc_class nonexistentObjectClass =
560 {
561 Nil, // isa
562 Nil, // super_class
563 "NONEXISTENT(id)", // name
564 0, // version
565 0, // info
566 0, // instance_size
567 NULL, // ivars
568 NULL, // methodLists
569 (Cache) &emptyCache, // cache
570 NULL // protocols
571 };
572
573 /***********************************************************************
574 * object_getClassName.
575 **********************************************************************/
576 const char * object_getClassName (id obj)
577 {
578 // Even nil objects have a class name, sort of
579 if (obj == nil)
580 return "nil";
581
582 // Retrieve name from object's class
583 return ((struct objc_class *) obj->isa)->name;
584 }
585
586 /***********************************************************************
587 * object_getIndexedIvars.
588 **********************************************************************/
589 void * object_getIndexedIvars (id obj)
590 {
591 // ivars are tacked onto the end of the object
592 return ((char *) obj) + ((struct objc_class *) obj->isa)->instance_size;
593 }
594
595
596 /***********************************************************************
597 * object_cxxDestructFromClass.
598 * Call C++ destructors on obj, starting with cls's
599 * dtor method (if any) followed by superclasses' dtors (if any),
600 * stopping at cls's dtor (if any).
601 * Uses methodListLock and cacheUpdateLock. The caller must hold neither.
602 **********************************************************************/
603 static void object_cxxDestructFromClass(id obj, Class cls)
604 {
605 void (*dtor)(id);
606
607 // Call cls's dtor first, then superclasses's dtors.
608
609 for ( ; cls != NULL; cls = cls->super_class) {
610 if (!(cls->info & CLS_HAS_CXX_STRUCTORS)) continue;
611 dtor = (void(*)(id))
612 lookupMethodInClassAndLoadCache(cls, cxx_destruct_sel);
613 if (dtor != (void(*)(id))&_objc_msgForward) {
614 if (PrintCxxCtors) {
615 _objc_inform("CXX: calling C++ destructors for class %s",
616 cls->name);
617 }
618 (*dtor)(obj);
619 }
620 }
621 }
622
623
624 /***********************************************************************
625 * object_cxxDestruct.
626 * Call C++ destructors on obj, if any.
627 * Uses methodListLock and cacheUpdateLock. The caller must hold neither.
628 **********************************************************************/
629 void object_cxxDestruct(id obj)
630 {
631 if (!obj) return;
632 object_cxxDestructFromClass(obj, obj->isa);
633 }
634
635
636 /***********************************************************************
637 * object_cxxConstructFromClass.
638 * Recursively call C++ constructors on obj, starting with base class's
639 * ctor method (if any) followed by subclasses' ctors (if any), stopping
640 * at cls's ctor (if any).
641 * Returns YES if construction succeeded.
642 * Returns NO if some constructor threw an exception. The exception is
643 * caught and discarded. Any partial construction is destructed.
644 * Uses methodListLock and cacheUpdateLock. The caller must hold neither.
645 *
646 * .cxx_construct returns id. This really means:
647 * return self: construction succeeded
648 * return nil: construction failed because a C++ constructor threw an exception
649 **********************************************************************/
650 static BOOL object_cxxConstructFromClass(id obj, Class cls)
651 {
652 id (*ctor)(id);
653
654 // Call superclasses' ctors first, if any.
655 if (cls->super_class) {
656 BOOL ok = object_cxxConstructFromClass(obj, cls->super_class);
657 if (!ok) return NO; // some superclass's ctor failed - give up
658 }
659
660 // Find this class's ctor, if any.
661 if (!(cls->info & CLS_HAS_CXX_STRUCTORS)) return YES; // no ctor - ok
662 ctor = (id(*)(id))lookupMethodInClassAndLoadCache(cls, cxx_construct_sel);
663 if (ctor == (id(*)(id))&_objc_msgForward) return YES; // no ctor - ok
664
665 // Call this class's ctor.
666 if (PrintCxxCtors) {
667 _objc_inform("CXX: calling C++ constructors for class %s", cls->name);
668 }
669 if ((*ctor)(obj)) return YES; // ctor called and succeeded - ok
670
671 // This class's ctor was called and failed.
672 // Call superclasses's dtors to clean up.
673 if (cls->super_class) object_cxxDestructFromClass(obj, cls->super_class);
674 return NO;
675 }
676
677
678 /***********************************************************************
679 * object_cxxConstructFromClass.
680 * Call C++ constructors on obj, if any.
681 * Returns YES if construction succeeded.
682 * Returns NO if some constructor threw an exception. The exception is
683 * caught and discarded. Any partial construction is destructed.
684 * Uses methodListLock and cacheUpdateLock. The caller must hold neither.
685 **********************************************************************/
686 BOOL object_cxxConstruct(id obj)
687 {
688 if (!obj) return YES;
689 return object_cxxConstructFromClass(obj, obj->isa);
690 }
691
692
693 /***********************************************************************
694 * _internal_class_createInstanceFromZone. Allocate an instance of the
695 * specified class with the specified number of bytes for indexed
696 * variables, in the specified zone. The isa field is set to the
697 * class, C++ default constructors are called, and all other fields are zeroed.
698 **********************************************************************/
699 static id _internal_class_createInstanceFromZone (Class aClass,
700 unsigned nIvarBytes,
701 void * z)
702 {
703 id obj;
704 register unsigned byteCount;
705
706 // Can't create something for nothing
707 if (aClass == Nil)
708 {
709 __objc_error ((id) aClass, _errAllocNil, 0);
710 return nil;
711 }
712
713 // Allocate and initialize
714 byteCount = ((struct objc_class *) aClass)->instance_size + nIvarBytes;
715 obj = (id) malloc_zone_calloc (z, 1, byteCount);
716 if (!obj)
717 {
718 __objc_error ((id) aClass, _errNoMem, ((struct objc_class *) aClass)->name, nIvarBytes);
719 return nil;
720 }
721
722 // Set the isa pointer
723 obj->isa = aClass;
724
725 // Call C++ constructors, if any.
726 if (!object_cxxConstruct(obj)) {
727 // Some C++ constructor threw an exception.
728 malloc_zone_free(z, obj);
729 return nil;
730 }
731
732 return obj;
733 }
734
735 /***********************************************************************
736 * _internal_class_createInstance. Allocate an instance of the specified
737 * class with the specified number of bytes for indexed variables, in
738 * the default zone, using _internal_class_createInstanceFromZone.
739 **********************************************************************/
740 static id _internal_class_createInstance (Class aClass,
741 unsigned nIvarBytes)
742 {
743 return _internal_class_createInstanceFromZone (aClass,
744 nIvarBytes,
745 malloc_default_zone ());
746 }
747
748 id (*_poseAs)() = (id (*)())class_poseAs;
749 id (*_alloc)(Class, unsigned) = _internal_class_createInstance;
750 id (*_zoneAlloc)(Class, unsigned, void *) = _internal_class_createInstanceFromZone;
751
752 /***********************************************************************
753 * class_createInstanceFromZone. Allocate an instance of the specified
754 * class with the specified number of bytes for indexed variables, in
755 * the specified zone, using _zoneAlloc.
756 **********************************************************************/
757 id class_createInstanceFromZone (Class aClass,
758 unsigned nIvarBytes,
759 void * z)
760 {
761 // _zoneAlloc can be overridden, but is initially set to
762 // _internal_class_createInstanceFromZone
763 return (*_zoneAlloc) (aClass, nIvarBytes, z);
764 }
765
766 /***********************************************************************
767 * class_createInstance. Allocate an instance of the specified class with
768 * the specified number of bytes for indexed variables, using _alloc.
769 **********************************************************************/
770 id class_createInstance (Class aClass,
771 unsigned nIvarBytes)
772 {
773 // _alloc can be overridden, but is initially set to
774 // _internal_class_createInstance
775 return (*_alloc) (aClass, nIvarBytes);
776 }
777
778 /***********************************************************************
779 * class_setVersion. Record the specified version with the class.
780 **********************************************************************/
781 void class_setVersion (Class aClass,
782 int version)
783 {
784 ((struct objc_class *) aClass)->version = version;
785 }
786
787 /***********************************************************************
788 * class_getVersion. Return the version recorded with the class.
789 **********************************************************************/
790 int class_getVersion (Class aClass)
791 {
792 return ((struct objc_class *) aClass)->version;
793 }
794
795
796 static inline Method _findNamedMethodInList(struct objc_method_list * mlist, const char *meth_name) {
797 int i;
798 if (!mlist) return NULL;
799 for (i = 0; i < mlist->method_count; i++) {
800 Method m = &mlist->method_list[i];
801 if (*((const char *)m->method_name) == *meth_name && 0 == strcmp((const char *)(m->method_name), meth_name)) {
802 return m;
803 }
804 }
805 return NULL;
806 }
807
808
809 /***********************************************************************
810 * fixupSelectorsInMethodList
811 * Uniques selectors in the given method list.
812 * The given method list must be non-NULL and not already fixed-up.
813 * If the class was loaded from a bundle:
814 * fixes up the given list in place with heap-allocated selector strings
815 * If the class was not from a bundle:
816 * allocates a copy of the method list, fixes up the copy, and returns
817 * the copy. The given list is unmodified.
818 *
819 * If cls is already in use, methodListLock must be held by the caller.
820 **********************************************************************/
821 // Fixed-up method lists get mlist->obsolete = _OBJC_FIXED_UP.
822 #define _OBJC_FIXED_UP ((void *)1771)
823
824 static struct objc_method_list *fixupSelectorsInMethodList(Class cls, struct objc_method_list *mlist)
825 {
826 unsigned i, size;
827 Method method;
828 struct objc_method_list *old_mlist;
829
830 if ( ! mlist ) return (struct objc_method_list *)0;
831 if ( mlist->obsolete != _OBJC_FIXED_UP ) {
832 BOOL isBundle = CLS_GETINFO(cls, CLS_FROM_BUNDLE) ? YES : NO;
833 if (!isBundle) {
834 old_mlist = mlist;
835 size = sizeof(struct objc_method_list) - sizeof(struct objc_method) + old_mlist->method_count * sizeof(struct objc_method);
836 mlist = _malloc_internal(size);
837 memmove(mlist, old_mlist, size);
838 } else {
839 // Mach-O bundles are fixed up in place.
840 // This prevents leaks when a bundle is unloaded.
841 }
842 sel_lock();
843 for ( i = 0; i < mlist->method_count; i += 1 ) {
844 method = &mlist->method_list[i];
845 method->method_name =
846 sel_registerNameNoLock((const char *)method->method_name, isBundle); // Always copy selector data from bundles.
847 }
848 sel_unlock();
849 mlist->obsolete = _OBJC_FIXED_UP;
850 }
851 return mlist;
852 }
853
854
855 /***********************************************************************
856 * nextMethodList
857 * Returns successive method lists from the given class.
858 * Method lists are returned in method search order (i.e. highest-priority
859 * implementations first).
860 * All necessary method list fixups are performed, so the
861 * returned method list is fully-constructed.
862 *
863 * If cls is already in use, methodListLock must be held by the caller.
864 * For full thread-safety, methodListLock must be continuously held by the
865 * caller across all calls to nextMethodList(). If the lock is released,
866 * the bad results listed in class_nextMethodList() may occur.
867 *
868 * void *iterator = NULL;
869 * struct objc_method_list *mlist;
870 * OBJC_LOCK(&methodListLock);
871 * while ((mlist = nextMethodList(cls, &iterator))) {
872 * // do something with mlist
873 * }
874 * OBJC_UNLOCK(&methodListLock);
875 **********************************************************************/
876 static struct objc_method_list *nextMethodList(struct objc_class *cls,
877 void **it)
878 {
879 uintptr_t index = *(uintptr_t *)it;
880 struct objc_method_list **resultp;
881
882 if (index == 0) {
883 // First call to nextMethodList.
884 if (!cls->methodLists) {
885 resultp = NULL;
886 } else if (cls->info & CLS_NO_METHOD_ARRAY) {
887 resultp = (struct objc_method_list **)&cls->methodLists;
888 } else {
889 resultp = &cls->methodLists[0];
890 if (!*resultp || *resultp == END_OF_METHODS_LIST) {
891 resultp = NULL;
892 }
893 }
894 } else {
895 // Subsequent call to nextMethodList.
896 if (!cls->methodLists) {
897 resultp = NULL;
898 } else if (cls->info & CLS_NO_METHOD_ARRAY) {
899 resultp = NULL;
900 } else {
901 resultp = &cls->methodLists[index];
902 if (!*resultp || *resultp == END_OF_METHODS_LIST) {
903 resultp = NULL;
904 }
905 }
906 }
907
908 // resultp now is NULL, meaning there are no more method lists,
909 // OR the address of the method list pointer to fix up and return.
910
911 if (resultp) {
912 if (*resultp && (*resultp)->obsolete != _OBJC_FIXED_UP) {
913 *resultp = fixupSelectorsInMethodList(cls, *resultp);
914 }
915 *it = (void *)(index + 1);
916 return *resultp;
917 } else {
918 *it = 0;
919 return NULL;
920 }
921 }
922
923
924 /* These next three functions are the heart of ObjC method lookup.
925 * If the class is currently in use, methodListLock must be held by the caller.
926 */
927 static inline Method _findMethodInList(struct objc_method_list * mlist, SEL sel) {
928 int i;
929 if (!mlist) return NULL;
930 for (i = 0; i < mlist->method_count; i++) {
931 Method m = &mlist->method_list[i];
932 if (m->method_name == sel) {
933 return m;
934 }
935 }
936 return NULL;
937 }
938
939 static inline Method _findMethodInClass(Class cls, SEL sel) __attribute__((always_inline));
940 static inline Method _findMethodInClass(Class cls, SEL sel) {
941 // Flattened version of nextMethodList(). The optimizer doesn't
942 // do a good job with hoisting the conditionals out of the loop.
943 // Conceptually, this looks like:
944 // while ((mlist = nextMethodList(cls, &iterator))) {
945 // Method m = _findMethodInList(mlist, sel);
946 // if (m) return m;
947 // }
948
949 if (!cls->methodLists) {
950 // No method lists.
951 return NULL;
952 }
953 else if (cls->info & CLS_NO_METHOD_ARRAY) {
954 // One method list.
955 struct objc_method_list **mlistp;
956 mlistp = (struct objc_method_list **)&cls->methodLists;
957 if ((*mlistp)->obsolete != _OBJC_FIXED_UP) {
958 *mlistp = fixupSelectorsInMethodList(cls, *mlistp);
959 }
960 return _findMethodInList(*mlistp, sel);
961 }
962 else {
963 // Multiple method lists.
964 struct objc_method_list **mlistp;
965 for (mlistp = cls->methodLists;
966 *mlistp != NULL && *mlistp != END_OF_METHODS_LIST;
967 mlistp++)
968 {
969 Method m;
970 if ((*mlistp)->obsolete != _OBJC_FIXED_UP) {
971 *mlistp = fixupSelectorsInMethodList(cls, *mlistp);
972 }
973 m = _findMethodInList(*mlistp, sel);
974 if (m) return m;
975 }
976 return NULL;
977 }
978 }
979
980 static inline Method _getMethod(Class cls, SEL sel) {
981 for (; cls; cls = cls->super_class) {
982 Method m;
983 m = _findMethodInClass(cls, sel);
984 if (m) return m;
985 }
986 return NULL;
987 }
988
989
990 // fixme for gc debugging temporary use
991 __private_extern__ IMP findIMPInClass(Class cls, SEL sel)
992 {
993 Method m = _findMethodInClass(cls, sel);
994 if (m) return m->method_imp;
995 else return NULL;
996 }
997
998 /***********************************************************************
999 * class_getInstanceMethod. Return the instance method for the
1000 * specified class and selector.
1001 **********************************************************************/
1002 Method class_getInstanceMethod (Class aClass,
1003 SEL aSelector)
1004 {
1005 Method result;
1006
1007 // Need both a class and a selector
1008 if (!aClass || !aSelector)
1009 return NULL;
1010
1011 // Go to the class
1012 OBJC_LOCK(&methodListLock);
1013 result = _getMethod (aClass, aSelector);
1014 OBJC_UNLOCK(&methodListLock);
1015 return result;
1016 }
1017
1018 /***********************************************************************
1019 * class_getClassMethod. Return the class method for the specified
1020 * class and selector.
1021 **********************************************************************/
1022 Method class_getClassMethod (Class aClass,
1023 SEL aSelector)
1024 {
1025 Method result;
1026
1027 // Need both a class and a selector
1028 if (!aClass || !aSelector)
1029 return NULL;
1030
1031 // Go to the class or isa
1032 OBJC_LOCK(&methodListLock);
1033 result = _getMethod (GETMETA(aClass), aSelector);
1034 OBJC_UNLOCK(&methodListLock);
1035 return result;
1036 }
1037
1038 /***********************************************************************
1039 * class_getVariable. Return the named instance variable.
1040 **********************************************************************/
1041 static Ivar class_getVariable (Class cls,
1042 const char * name)
1043 {
1044 struct objc_class * thisCls;
1045
1046 // Outer loop - search the class and its superclasses
1047 for (thisCls = cls; thisCls != Nil; thisCls = ((struct objc_class *) thisCls)->super_class)
1048 {
1049 int index;
1050 Ivar thisIvar;
1051
1052 // Skip class having no ivars
1053 if (!thisCls->ivars)
1054 continue;
1055
1056 // Inner loop - search the given class
1057 thisIvar = &thisCls->ivars->ivar_list[0];
1058 for (index = 0; index < thisCls->ivars->ivar_count; index += 1)
1059 {
1060 // Check this ivar's name. Be careful because the
1061 // compiler generates ivar entries with NULL ivar_name
1062 // (e.g. for anonymous bit fields).
1063 if ((thisIvar->ivar_name) &&
1064 (strcmp (name, thisIvar->ivar_name) == 0))
1065 return thisIvar;
1066
1067 // Move to next ivar
1068 thisIvar += 1;
1069 }
1070 }
1071
1072 // Not found
1073 return NULL;
1074 }
1075
1076 /***********************************************************************
1077 * class_getInstanceVariable. Return the named instance variable.
1078 *
1079 * Someday add class_getClassVariable ().
1080 **********************************************************************/
1081 Ivar class_getInstanceVariable (Class aClass,
1082 const char * name)
1083 {
1084 // Must have a class and a name
1085 if (!aClass || !name)
1086 return NULL;
1087
1088 // Look it up
1089 return class_getVariable (aClass, name);
1090 }
1091
1092 /***********************************************************************
1093 * flush_caches. Flush the instance and optionally class method caches
1094 * of cls and all its subclasses.
1095 *
1096 * Specifying Nil for the class "all classes."
1097 **********************************************************************/
1098 static void flush_caches(Class cls, BOOL flush_meta)
1099 {
1100 int numClasses = 0, newNumClasses;
1101 struct objc_class * * classes = NULL;
1102 int i;
1103 struct objc_class * clsObject;
1104 #ifdef OBJC_INSTRUMENTED
1105 unsigned int classesVisited;
1106 unsigned int subclassCount;
1107 #endif
1108
1109 // Do nothing if class has no cache
1110 // This check is safe to do without any cache locks.
1111 if (cls && !((struct objc_class *) cls)->cache)
1112 return;
1113
1114 newNumClasses = objc_getClassList((Class *)NULL, 0);
1115 while (numClasses < newNumClasses) {
1116 numClasses = newNumClasses;
1117 classes = _realloc_internal(classes, sizeof(Class) * numClasses);
1118 newNumClasses = objc_getClassList((Class *)classes, numClasses);
1119 }
1120 numClasses = newNumClasses;
1121
1122 OBJC_LOCK(&cacheUpdateLock);
1123
1124 // Handle nil and root instance class specially: flush all
1125 // instance and class method caches. Nice that this
1126 // loop is linear vs the N-squared loop just below.
1127 if (!cls || !((struct objc_class *) cls)->super_class)
1128 {
1129 #ifdef OBJC_INSTRUMENTED
1130 LinearFlushCachesCount += 1;
1131 classesVisited = 0;
1132 subclassCount = 0;
1133 #endif
1134 // Traverse all classes in the hash table
1135 for (i = 0; i < numClasses; i++)
1136 {
1137 struct objc_class * metaClsObject;
1138 #ifdef OBJC_INSTRUMENTED
1139 classesVisited += 1;
1140 #endif
1141 clsObject = classes[i];
1142
1143 // Skip class that is known not to be a subclass of this root
1144 // (the isa pointer of any meta class points to the meta class
1145 // of the root).
1146 // NOTE: When is an isa pointer of a hash tabled class ever nil?
1147 metaClsObject = clsObject->isa;
1148 if (cls && metaClsObject && cls->isa != metaClsObject->isa)
1149 {
1150 continue;
1151 }
1152
1153 #ifdef OBJC_INSTRUMENTED
1154 subclassCount += 1;
1155 #endif
1156
1157 _cache_flush (clsObject);
1158 if (flush_meta && metaClsObject != NULL) {
1159 _cache_flush (metaClsObject);
1160 }
1161 }
1162 #ifdef OBJC_INSTRUMENTED
1163 LinearFlushCachesVisitedCount += classesVisited;
1164 if (classesVisited > MaxLinearFlushCachesVisitedCount)
1165 MaxLinearFlushCachesVisitedCount = classesVisited;
1166 IdealFlushCachesCount += subclassCount;
1167 if (subclassCount > MaxIdealFlushCachesCount)
1168 MaxIdealFlushCachesCount = subclassCount;
1169 #endif
1170
1171 OBJC_UNLOCK(&cacheUpdateLock);
1172 _free_internal(classes);
1173 return;
1174 }
1175
1176 // Outer loop - flush any cache that could now get a method from
1177 // cls (i.e. the cache associated with cls and any of its subclasses).
1178 #ifdef OBJC_INSTRUMENTED
1179 NonlinearFlushCachesCount += 1;
1180 classesVisited = 0;
1181 subclassCount = 0;
1182 #endif
1183 for (i = 0; i < numClasses; i++)
1184 {
1185 struct objc_class * clsIter;
1186
1187 #ifdef OBJC_INSTRUMENTED
1188 NonlinearFlushCachesClassCount += 1;
1189 #endif
1190 clsObject = classes[i];
1191
1192 // Inner loop - Process a given class
1193 clsIter = clsObject;
1194 while (clsIter)
1195 {
1196
1197 #ifdef OBJC_INSTRUMENTED
1198 classesVisited += 1;
1199 #endif
1200 // Flush clsObject instance method cache if
1201 // clsObject is a subclass of cls, or is cls itself
1202 // Flush the class method cache if that was asked for
1203 if (clsIter == cls)
1204 {
1205 #ifdef OBJC_INSTRUMENTED
1206 subclassCount += 1;
1207 #endif
1208 _cache_flush (clsObject);
1209 if (flush_meta)
1210 _cache_flush (clsObject->isa);
1211
1212 break;
1213
1214 }
1215
1216 // Flush clsObject class method cache if cls is
1217 // the meta class of clsObject or of one
1218 // of clsObject's superclasses
1219 else if (clsIter->isa == cls)
1220 {
1221 #ifdef OBJC_INSTRUMENTED
1222 subclassCount += 1;
1223 #endif
1224 _cache_flush (clsObject->isa);
1225 break;
1226 }
1227
1228 // Move up superclass chain
1229 else if (ISINITIALIZED(clsIter))
1230 clsIter = clsIter->super_class;
1231
1232 // clsIter is not initialized, so its cache
1233 // must be empty. This happens only when
1234 // clsIter == clsObject, because
1235 // superclasses are initialized before
1236 // subclasses, and this loop traverses
1237 // from sub- to super- classes.
1238 else
1239 break;
1240 }
1241 }
1242 #ifdef OBJC_INSTRUMENTED
1243 NonlinearFlushCachesVisitedCount += classesVisited;
1244 if (classesVisited > MaxNonlinearFlushCachesVisitedCount)
1245 MaxNonlinearFlushCachesVisitedCount = classesVisited;
1246 IdealFlushCachesCount += subclassCount;
1247 if (subclassCount > MaxIdealFlushCachesCount)
1248 MaxIdealFlushCachesCount = subclassCount;
1249 #endif
1250
1251 OBJC_UNLOCK(&cacheUpdateLock);
1252 _free_internal(classes);
1253 }
1254
1255 /***********************************************************************
1256 * _objc_flush_caches. Flush the caches of the specified class and any
1257 * of its subclasses. If cls is a meta-class, only meta-class (i.e.
1258 * class method) caches are flushed. If cls is an instance-class, both
1259 * instance-class and meta-class caches are flushed.
1260 **********************************************************************/
1261 void _objc_flush_caches (Class cls)
1262 {
1263 flush_caches (cls, YES);
1264 }
1265
1266 /***********************************************************************
1267 * do_not_remove_this_dummy_function.
1268 **********************************************************************/
1269 void do_not_remove_this_dummy_function (void)
1270 {
1271 (void) class_nextMethodList (NULL, NULL);
1272 }
1273
1274
1275 /***********************************************************************
1276 * class_nextMethodList.
1277 * External version of nextMethodList().
1278 *
1279 * This function is not fully thread-safe. A series of calls to
1280 * class_nextMethodList() may fail if methods are added to or removed
1281 * from the class between calls.
1282 * If methods are added between calls to class_nextMethodList(), it may
1283 * return previously-returned method lists again, and may fail to return
1284 * newly-added lists.
1285 * If methods are removed between calls to class_nextMethodList(), it may
1286 * omit surviving method lists or simply crash.
1287 **********************************************************************/
1288 OBJC_EXPORT struct objc_method_list * class_nextMethodList (Class cls,
1289 void ** it)
1290 {
1291 struct objc_method_list *result;
1292 OBJC_LOCK(&methodListLock);
1293 result = nextMethodList(cls, it);
1294 OBJC_UNLOCK(&methodListLock);
1295 return result;
1296 }
1297
1298 /***********************************************************************
1299 * _dummy.
1300 **********************************************************************/
1301 void _dummy (void)
1302 {
1303 (void) class_nextMethodList (Nil, NULL);
1304 }
1305
1306 /***********************************************************************
1307 * class_addMethods.
1308 *
1309 * Formerly class_addInstanceMethods ()
1310 **********************************************************************/
1311 void class_addMethods (Class cls,
1312 struct objc_method_list * meths)
1313 {
1314 // Add the methods.
1315 OBJC_LOCK(&methodListLock);
1316 _objc_insertMethods(cls, meths);
1317 OBJC_UNLOCK(&methodListLock);
1318
1319 // Must flush when dynamically adding methods. No need to flush
1320 // all the class method caches. If cls is a meta class, though,
1321 // this will still flush it and any of its sub-meta classes.
1322 flush_caches (cls, NO);
1323 }
1324
1325 /***********************************************************************
1326 * class_addClassMethods.
1327 *
1328 * Obsolete (for binary compatibility only).
1329 **********************************************************************/
1330 void class_addClassMethods (Class cls,
1331 struct objc_method_list * meths)
1332 {
1333 class_addMethods (((struct objc_class *) cls)->isa, meths);
1334 }
1335
1336 /***********************************************************************
1337 * class_removeMethods.
1338 **********************************************************************/
1339 void class_removeMethods (Class cls,
1340 struct objc_method_list * meths)
1341 {
1342 // Remove the methods
1343 OBJC_LOCK(&methodListLock);
1344 _objc_removeMethods(cls, meths);
1345 OBJC_UNLOCK(&methodListLock);
1346
1347 // Must flush when dynamically removing methods. No need to flush
1348 // all the class method caches. If cls is a meta class, though,
1349 // this will still flush it and any of its sub-meta classes.
1350 flush_caches (cls, NO);
1351 }
1352
1353 /***********************************************************************
1354 * addClassToOriginalClass. Add to a hash table of classes involved in
1355 * a posing situation. We use this when we need to get to the "original"
1356 * class for some particular name through the function objc_getOrigClass.
1357 * For instance, the implementation of [super ...] will use this to be
1358 * sure that it gets hold of the correct super class, so that no infinite
1359 * loops will occur if the class it appears in is involved in posing.
1360 *
1361 * We use the classLock to guard the hash table.
1362 *
1363 * See tracker bug #51856.
1364 **********************************************************************/
1365
1366 static NXMapTable * posed_class_hash = NULL;
1367 static NXMapTable * posed_class_to_original_class_hash = NULL;
1368
1369 static void addClassToOriginalClass (Class posingClass,
1370 Class originalClass)
1371 {
1372 // Install hash table when it is first needed
1373 if (!posed_class_to_original_class_hash)
1374 {
1375 posed_class_to_original_class_hash =
1376 NXCreateMapTableFromZone (NXPtrValueMapPrototype,
1377 8,
1378 _objc_internal_zone ());
1379 }
1380
1381 // Add pose to hash table
1382 NXMapInsert (posed_class_to_original_class_hash,
1383 posingClass,
1384 originalClass);
1385 }
1386
1387 /***********************************************************************
1388 * getOriginalClassForPosingClass.
1389 **********************************************************************/
1390 Class getOriginalClassForPosingClass (Class posingClass)
1391 {
1392 return NXMapGet (posed_class_to_original_class_hash, posingClass);
1393 }
1394
1395 /***********************************************************************
1396 * objc_getOrigClass.
1397 **********************************************************************/
1398 Class objc_getOrigClass (const char * name)
1399 {
1400 struct objc_class * ret;
1401
1402 // Look for class among the posers
1403 ret = Nil;
1404 OBJC_LOCK(&classLock);
1405 if (posed_class_hash)
1406 ret = (Class) NXMapGet (posed_class_hash, name);
1407 OBJC_UNLOCK(&classLock);
1408 if (ret)
1409 return ret;
1410
1411 // Not a poser. Do a normal lookup.
1412 ret = objc_getClass (name);
1413 if (!ret)
1414 _objc_inform ("class `%s' not linked into application", name);
1415
1416 return ret;
1417 }
1418
1419 /***********************************************************************
1420 * _objc_addOrigClass. This function is only used from class_poseAs.
1421 * Registers the original class names, before they get obscured by
1422 * posing, so that [super ..] will work correctly from categories
1423 * in posing classes and in categories in classes being posed for.
1424 **********************************************************************/
1425 static void _objc_addOrigClass (Class origClass)
1426 {
1427 OBJC_LOCK(&classLock);
1428
1429 // Create the poser's hash table on first use
1430 if (!posed_class_hash)
1431 {
1432 posed_class_hash = NXCreateMapTableFromZone (NXStrValueMapPrototype,
1433 8,
1434 _objc_internal_zone ());
1435 }
1436
1437 // Add the named class iff it is not already there (or collides?)
1438 if (NXMapGet (posed_class_hash, ((struct objc_class *)origClass)->name) == 0)
1439 NXMapInsert (posed_class_hash, ((struct objc_class *)origClass)->name, origClass);
1440
1441 OBJC_UNLOCK(&classLock);
1442 }
1443
1444 /***********************************************************************
1445 * class_poseAs.
1446 *
1447 * !!! class_poseAs () does not currently flush any caches.
1448 **********************************************************************/
1449 Class class_poseAs (Class imposter,
1450 Class original)
1451 {
1452 struct objc_class * clsObject;
1453 char * imposterNamePtr;
1454 NXHashTable * class_hash;
1455 NXHashState state;
1456 struct objc_class * copy;
1457 #ifdef OBJC_CLASS_REFS
1458 header_info * hInfo;
1459 #endif
1460
1461 // Trivial case is easy
1462 if (imposter == original)
1463 return imposter;
1464
1465 // Imposter must be an immediate subclass of the original
1466 if (((struct objc_class *)imposter)->super_class != original) {
1467 __objc_error(imposter, _errNotSuper, ((struct objc_class *)imposter)->name, ((struct objc_class *)original)->name);
1468 }
1469
1470 // Can't pose when you have instance variables (how could it work?)
1471 if (((struct objc_class *)imposter)->ivars) {
1472 __objc_error(imposter, _errNewVars, ((struct objc_class *)imposter)->name, ((struct objc_class *)original)->name, ((struct objc_class *)imposter)->name);
1473 }
1474
1475 // Build a string to use to replace the name of the original class.
1476 #define imposterNamePrefix "_%"
1477 imposterNamePtr = _malloc_internal(strlen(((struct objc_class *)original)->name) + strlen(imposterNamePrefix) + 1);
1478 strcpy(imposterNamePtr, imposterNamePrefix);
1479 strcat(imposterNamePtr, ((struct objc_class *)original)->name);
1480 #undef imposterNamePrefix
1481
1482 // We lock the class hashtable, so we are thread safe with respect to
1483 // calls to objc_getClass (). However, the class names are not
1484 // changed atomically, nor are all of the subclasses updated
1485 // atomically. I have ordered the operations so that you will
1486 // never crash, but you may get inconsistent results....
1487
1488 // Register the original class so that [super ..] knows
1489 // exactly which classes are the "original" classes.
1490 _objc_addOrigClass (original);
1491 _objc_addOrigClass (imposter);
1492
1493 // Copy the imposter, so that the imposter can continue
1494 // its normal life in addition to changing the behavior of
1495 // the original. As a hack we don't bother to copy the metaclass.
1496 // For some reason we modify the original rather than the copy.
1497 copy = (*_zoneAlloc)(imposter->isa, sizeof(struct objc_class), _objc_internal_zone());
1498 memmove(copy, imposter, sizeof(struct objc_class));
1499
1500 OBJC_LOCK(&classLock);
1501
1502 class_hash = objc_getClasses ();
1503
1504 // Remove both the imposter and the original class.
1505 NXHashRemove (class_hash, imposter);
1506 NXHashRemove (class_hash, original);
1507
1508 NXHashInsert (class_hash, copy);
1509 addClassToOriginalClass (imposter, copy);
1510
1511 // Mark the imposter as such
1512 _class_setInfo(imposter, CLS_POSING);
1513 _class_setInfo(imposter->isa, CLS_POSING);
1514
1515 // Change the name of the imposter to that of the original class.
1516 ((struct objc_class *)imposter)->name = ((struct objc_class *)original)->name;
1517 ((struct objc_class *)imposter)->isa->name = ((struct objc_class *)original)->isa->name;
1518
1519 // Also copy the version field to avoid archiving problems.
1520 ((struct objc_class *)imposter)->version = ((struct objc_class *)original)->version;
1521
1522 // Change all subclasses of the original to point to the imposter.
1523 state = NXInitHashState (class_hash);
1524 while (NXNextHashState (class_hash, &state, (void **) &clsObject))
1525 {
1526 while ((clsObject) && (clsObject != imposter) &&
1527 (clsObject != copy))
1528 {
1529 if (clsObject->super_class == original)
1530 {
1531 clsObject->super_class = imposter;
1532 clsObject->isa->super_class = ((struct objc_class *)imposter)->isa;
1533 // We must flush caches here!
1534 break;
1535 }
1536
1537 clsObject = clsObject->super_class;
1538 }
1539 }
1540
1541 #ifdef OBJC_CLASS_REFS
1542 // Replace the original with the imposter in all class refs
1543 // Major loop - process all headers
1544 for (hInfo = _objc_headerStart(); hInfo != NULL; hInfo = hInfo->next)
1545 {
1546 Class * cls_refs;
1547 unsigned int refCount;
1548 unsigned int index;
1549
1550 // Get refs associated with this header
1551 cls_refs = (Class *) _getObjcClassRefs ((headerType *) hInfo->mhdr, &refCount);
1552 if (!cls_refs || !refCount)
1553 continue;
1554
1555 // Minor loop - process this header's refs
1556 cls_refs = (Class *) ((unsigned long) cls_refs + hInfo->image_slide);
1557 for (index = 0; index < refCount; index += 1)
1558 {
1559 if (cls_refs[index] == original)
1560 cls_refs[index] = imposter;
1561 }
1562 }
1563 #endif // OBJC_CLASS_REFS
1564
1565 // Change the name of the original class.
1566 ((struct objc_class *)original)->name = imposterNamePtr + 1;
1567 ((struct objc_class *)original)->isa->name = imposterNamePtr;
1568
1569 // Restore the imposter and the original class with their new names.
1570 NXHashInsert (class_hash, imposter);
1571 NXHashInsert (class_hash, original);
1572
1573 OBJC_UNLOCK(&classLock);
1574
1575 return imposter;
1576 }
1577
1578 /***********************************************************************
1579 * _freedHandler.
1580 **********************************************************************/
1581 static void _freedHandler (id self,
1582 SEL sel)
1583 {
1584 __objc_error (self, _errFreedObject, SELNAME(sel), self);
1585 }
1586
1587 /***********************************************************************
1588 * _nonexistentHandler.
1589 **********************************************************************/
1590 static void _nonexistentHandler (id self,
1591 SEL sel)
1592 {
1593 __objc_error (self, _errNonExistentObject, SELNAME(sel), self);
1594 }
1595
1596 /***********************************************************************
1597 * class_respondsToMethod.
1598 *
1599 * Called from -[Object respondsTo:] and +[Object instancesRespondTo:]
1600 **********************************************************************/
1601 BOOL class_respondsToMethod (Class cls,
1602 SEL sel)
1603 {
1604 Method meth;
1605 IMP imp;
1606
1607 // No one responds to zero!
1608 if (!sel)
1609 return NO;
1610
1611 imp = _cache_getImp(cls, sel);
1612 if (imp) {
1613 // Found method in cache.
1614 // If the cache entry is forward::, the class does not respond to sel.
1615 return (imp != &_objc_msgForward);
1616 }
1617
1618 // Handle cache miss
1619 OBJC_LOCK(&methodListLock);
1620 meth = _getMethod(cls, sel);
1621 OBJC_UNLOCK(&methodListLock);
1622 if (meth) {
1623 _cache_fill(cls, meth, sel);
1624 return YES;
1625 }
1626
1627 // Not implemented. Use _objc_msgForward.
1628 _cache_addForwardEntry(cls, sel);
1629
1630 return NO;
1631 }
1632
1633
1634 /***********************************************************************
1635 * class_lookupMethod.
1636 *
1637 * Called from -[Object methodFor:] and +[Object instanceMethodFor:]
1638 **********************************************************************/
1639 IMP class_lookupMethod (Class cls,
1640 SEL sel)
1641 {
1642 IMP imp;
1643
1644 // No one responds to zero!
1645 if (!sel) {
1646 __objc_error(cls, _errBadSel, sel);
1647 }
1648
1649 imp = _cache_getImp(cls, sel);
1650 if (imp) return imp;
1651
1652 // Handle cache miss
1653 return _class_lookupMethodAndLoadCache (cls, sel);
1654 }
1655
1656 /***********************************************************************
1657 * lookupNamedMethodInMethodList
1658 * Only called to find +load/-.cxx_construct/-.cxx_destruct methods,
1659 * without fixing up the entire method list.
1660 * The class is not yet in use, so methodListLock is not taken.
1661 **********************************************************************/
1662 __private_extern__ IMP lookupNamedMethodInMethodList(struct objc_method_list *mlist, const char *meth_name)
1663 {
1664 Method m = meth_name ? _findNamedMethodInList(mlist, meth_name) : NULL;
1665 return (m ? m->method_imp : NULL);
1666 }
1667
1668
1669 /***********************************************************************
1670 * _cache_malloc.
1671 *
1672 * Called from _cache_create() and cache_expand()
1673 * Cache locks: cacheUpdateLock must be held by the caller.
1674 **********************************************************************/
1675 static Cache _cache_malloc(int slotCount)
1676 {
1677 Cache new_cache;
1678 size_t size;
1679
1680 // Allocate table (why not check for failure?)
1681 size = sizeof(struct objc_cache) + TABLE_SIZE(slotCount);
1682 #ifdef OBJC_INSTRUMENTED
1683 // Custom cache allocator can't handle instrumentation.
1684 size += sizeof(CacheInstrumentation);
1685 new_cache = _calloc_internal(size, 1);
1686 new_cache->mask = slotCount - 1;
1687 #else
1688 if (size < CACHE_QUANTUM || UseInternalZone) {
1689 new_cache = _calloc_internal(size, 1);
1690 new_cache->mask = slotCount - 1;
1691 // occupied and buckets and instrumentation are all zero
1692 } else {
1693 new_cache = cache_allocator_calloc(size);
1694 // mask is already set
1695 // occupied and buckets and instrumentation are all zero
1696 }
1697 #endif
1698
1699 return new_cache;
1700 }
1701
1702
1703 /***********************************************************************
1704 * _cache_create.
1705 *
1706 * Called from _cache_expand().
1707 * Cache locks: cacheUpdateLock must be held by the caller.
1708 **********************************************************************/
1709 Cache _cache_create (Class cls)
1710 {
1711 Cache new_cache;
1712 int slotCount;
1713
1714 // Select appropriate size
1715 slotCount = (ISMETA(cls)) ? INIT_META_CACHE_SIZE : INIT_CACHE_SIZE;
1716
1717 new_cache = _cache_malloc(slotCount);
1718
1719 // Install the cache
1720 ((struct objc_class *)cls)->cache = new_cache;
1721
1722 // Clear the cache flush flag so that we will not flush this cache
1723 // before expanding it for the first time.
1724 _class_clearInfo(cls, CLS_FLUSH_CACHE);
1725
1726 // Clear the grow flag so that we will re-use the current storage,
1727 // rather than actually grow the cache, when expanding the cache
1728 // for the first time
1729 if (_class_slow_grow)
1730 _class_clearInfo(cls, CLS_GROW_CACHE);
1731
1732 // Return our creation
1733 return new_cache;
1734 }
1735
1736 /***********************************************************************
1737 * _cache_expand.
1738 *
1739 * Called from _cache_fill ()
1740 * Cache locks: cacheUpdateLock must be held by the caller.
1741 **********************************************************************/
1742 static Cache _cache_expand (Class cls)
1743 {
1744 Cache old_cache;
1745 Cache new_cache;
1746 unsigned int slotCount;
1747 unsigned int index;
1748
1749 // First growth goes from emptyCache to a real one
1750 old_cache = ((struct objc_class *)cls)->cache;
1751 if (old_cache == &emptyCache)
1752 return _cache_create (cls);
1753
1754 // iff _class_slow_grow, trade off actual cache growth with re-using
1755 // the current one, so that growth only happens every odd time
1756 if (_class_slow_grow)
1757 {
1758 // CLS_GROW_CACHE controls every-other-time behavior. If it
1759 // is non-zero, let the cache grow this time, but clear the
1760 // flag so the cache is reused next time
1761 if ((((struct objc_class * )cls)->info & CLS_GROW_CACHE) != 0)
1762 _class_clearInfo(cls, CLS_GROW_CACHE);
1763
1764 // Reuse the current cache storage this time
1765 else
1766 {
1767 // Clear the valid-entry counter
1768 old_cache->occupied = 0;
1769
1770 // Invalidate all the cache entries
1771 for (index = 0; index < old_cache->mask + 1; index += 1)
1772 {
1773 // Remember what this entry was, so we can possibly
1774 // deallocate it after the bucket has been invalidated
1775 Method oldEntry = old_cache->buckets[index];
1776 // Skip invalid entry
1777 if (!CACHE_BUCKET_VALID(old_cache->buckets[index]))
1778 continue;
1779
1780 // Invalidate this entry
1781 CACHE_BUCKET_VALID(old_cache->buckets[index]) = NULL;
1782
1783 // Deallocate "forward::" entry
1784 if (CACHE_BUCKET_IMP(oldEntry) == &_objc_msgForward)
1785 {
1786 _cache_collect_free (oldEntry, sizeof(struct objc_method), NO);
1787 }
1788 }
1789
1790 // Set the slow growth flag so the cache is next grown
1791 _class_setInfo(cls, CLS_GROW_CACHE);
1792
1793 // Return the same old cache, freshly emptied
1794 return old_cache;
1795 }
1796
1797 }
1798
1799 // Double the cache size
1800 slotCount = (old_cache->mask + 1) << 1;
1801
1802 new_cache = _cache_malloc(slotCount);
1803
1804 #ifdef OBJC_INSTRUMENTED
1805 // Propagate the instrumentation data
1806 {
1807 CacheInstrumentation * oldCacheData;
1808 CacheInstrumentation * newCacheData;
1809
1810 oldCacheData = CACHE_INSTRUMENTATION(old_cache);
1811 newCacheData = CACHE_INSTRUMENTATION(new_cache);
1812 bcopy ((const char *)oldCacheData, (char *)newCacheData, sizeof(CacheInstrumentation));
1813 }
1814 #endif
1815
1816 // iff _class_uncache, copy old cache entries into the new cache
1817 if (_class_uncache == 0)
1818 {
1819 int newMask;
1820
1821 newMask = new_cache->mask;
1822
1823 // Look at all entries in the old cache
1824 for (index = 0; index < old_cache->mask + 1; index += 1)
1825 {
1826 int index2;
1827
1828 // Skip invalid entry
1829 if (!CACHE_BUCKET_VALID(old_cache->buckets[index]))
1830 continue;
1831
1832 // Hash the old entry into the new table
1833 index2 = CACHE_HASH(CACHE_BUCKET_NAME(old_cache->buckets[index]),
1834 newMask);
1835
1836 // Find an available spot, at or following the hashed spot;
1837 // Guaranteed to not infinite loop, because table has grown
1838 for (;;)
1839 {
1840 if (!CACHE_BUCKET_VALID(new_cache->buckets[index2]))
1841 {
1842 new_cache->buckets[index2] = old_cache->buckets[index];
1843 break;
1844 }
1845
1846 index2 += 1;
1847 index2 &= newMask;
1848 }
1849
1850 // Account for the addition
1851 new_cache->occupied += 1;
1852 }
1853
1854 // Set the cache flush flag so that we will flush this cache
1855 // before expanding it again.
1856 _class_setInfo(cls, CLS_FLUSH_CACHE);
1857 }
1858
1859 // Deallocate "forward::" entries from the old cache
1860 else
1861 {
1862 for (index = 0; index < old_cache->mask + 1; index += 1)
1863 {
1864 if (CACHE_BUCKET_VALID(old_cache->buckets[index]) &&
1865 CACHE_BUCKET_IMP(old_cache->buckets[index]) == &_objc_msgForward)
1866 {
1867 _cache_collect_free (old_cache->buckets[index], sizeof(struct objc_method), NO);
1868 }
1869 }
1870 }
1871
1872 // Install new cache
1873 ((struct objc_class *)cls)->cache = new_cache;
1874
1875 // Deallocate old cache, try freeing all the garbage
1876 _cache_collect_free (old_cache, old_cache->mask * sizeof(Method), YES);
1877 return new_cache;
1878 }
1879
1880 /***********************************************************************
1881 * instrumentObjcMessageSends/logObjcMessageSends.
1882 **********************************************************************/
1883 static int LogObjCMessageSend (BOOL isClassMethod,
1884 const char * objectsClass,
1885 const char * implementingClass,
1886 SEL selector)
1887 {
1888 char buf[ 1024 ];
1889
1890 // Create/open the log file
1891 if (objcMsgLogFD == (-1))
1892 {
1893 snprintf (buf, sizeof(buf), "/tmp/msgSends-%d", (int) getpid ());
1894 objcMsgLogFD = secure_open (buf, O_WRONLY | O_CREAT, geteuid());
1895 if (objcMsgLogFD < 0) {
1896 // no log file - disable logging
1897 objcMsgLogEnabled = 0;
1898 objcMsgLogFD = -1;
1899 return 1;
1900 }
1901 }
1902
1903 // Make the log entry
1904 snprintf(buf, sizeof(buf), "%c %s %s %s\n",
1905 isClassMethod ? '+' : '-',
1906 objectsClass,
1907 implementingClass,
1908 (char *) selector);
1909
1910 write (objcMsgLogFD, buf, strlen(buf));
1911
1912 // Tell caller to not cache the method
1913 return 0;
1914 }
1915
1916 void instrumentObjcMessageSends (BOOL flag)
1917 {
1918 int enabledValue = (flag) ? 1 : 0;
1919
1920 // Shortcut NOP
1921 if (objcMsgLogEnabled == enabledValue)
1922 return;
1923
1924 // If enabling, flush all method caches so we get some traces
1925 if (flag)
1926 flush_caches (Nil, YES);
1927
1928 // Sync our log file
1929 if (objcMsgLogFD != (-1))
1930 fsync (objcMsgLogFD);
1931
1932 objcMsgLogEnabled = enabledValue;
1933 }
1934
1935 void logObjcMessageSends (ObjCLogProc logProc)
1936 {
1937 if (logProc)
1938 {
1939 objcMsgLogProc = logProc;
1940 objcMsgLogEnabled = 1;
1941 }
1942 else
1943 {
1944 objcMsgLogProc = logProc;
1945 objcMsgLogEnabled = 0;
1946 }
1947
1948 if (objcMsgLogFD != (-1))
1949 fsync (objcMsgLogFD);
1950 }
1951
1952
1953 /***********************************************************************
1954 * _cache_fill. Add the specified method to the specified class' cache.
1955 * Returns NO if the cache entry wasn't added: cache was busy,
1956 * class is still being initialized, new entry is a duplicate.
1957 *
1958 * Called only from _class_lookupMethodAndLoadCache and
1959 * class_respondsToMethod and _cache_addForwardEntry.
1960 *
1961 * Cache locks: cacheUpdateLock must not be held.
1962 **********************************************************************/
1963 static BOOL _cache_fill(Class cls, Method smt, SEL sel)
1964 {
1965 unsigned int newOccupied;
1966 arith_t index;
1967 Method *buckets;
1968 Cache cache;
1969
1970 // Never cache before +initialize is done
1971 if (!ISINITIALIZED(cls)) {
1972 return NO;
1973 }
1974
1975 // Keep tally of cache additions
1976 totalCacheFills += 1;
1977
1978 OBJC_LOCK(&cacheUpdateLock);
1979
1980 cache = ((struct objc_class *)cls)->cache;
1981
1982 // Check for duplicate entries, if we're in the mode
1983 if (traceDuplicates)
1984 {
1985 int index2;
1986 arith_t mask = cache->mask;
1987 buckets = cache->buckets;
1988
1989 // Scan the cache
1990 for (index2 = 0; index2 < mask + 1; index2 += 1)
1991 {
1992 // Skip invalid or non-duplicate entry
1993 if ((!CACHE_BUCKET_VALID(buckets[index2])) ||
1994 (strcmp ((char *) CACHE_BUCKET_NAME(buckets[index2]), (char *) smt->method_name) != 0))
1995 continue;
1996
1997 // Tally duplication, but report iff wanted
1998 cacheFillDuplicates += 1;
1999 if (traceDuplicatesVerbose)
2000 {
2001 _objc_inform ("Cache fill duplicate #%d: found %x adding %x: %s\n",
2002 cacheFillDuplicates,
2003 (unsigned int) CACHE_BUCKET_NAME(buckets[index2]),
2004 (unsigned int) smt->method_name,
2005 (char *) smt->method_name);
2006 }
2007 }
2008 }
2009
2010 // Make sure the entry wasn't added to the cache by some other thread
2011 // before we grabbed the cacheUpdateLock.
2012 // Don't use _cache_getMethod() because _cache_getMethod() doesn't
2013 // return forward:: entries.
2014 if (_cache_getImp(cls, sel)) {
2015 OBJC_UNLOCK(&cacheUpdateLock);
2016 return NO; // entry is already cached, didn't add new one
2017 }
2018
2019 // Use the cache as-is if it is less than 3/4 full
2020 newOccupied = cache->occupied + 1;
2021 if ((newOccupied * 4) <= (cache->mask + 1) * 3) {
2022 // Cache is less than 3/4 full.
2023 cache->occupied = newOccupied;
2024 } else {
2025 // Cache is too full. Flush it or expand it.
2026 if ((((struct objc_class * )cls)->info & CLS_FLUSH_CACHE) != 0) {
2027 _cache_flush (cls);
2028 } else {
2029 cache = _cache_expand (cls);
2030 }
2031
2032 // Account for the addition
2033 cache->occupied += 1;
2034 }
2035
2036 // Insert the new entry. This can be done by either:
2037 // (a) Scanning for the first unused spot. Easy!
2038 // (b) Opening up an unused spot by sliding existing
2039 // entries down by one. The benefit of this
2040 // extra work is that it puts the most recently
2041 // loaded entries closest to where the selector
2042 // hash starts the search.
2043 //
2044 // The loop is a little more complicated because there
2045 // are two kinds of entries, so there have to be two ways
2046 // to slide them.
2047 buckets = cache->buckets;
2048 index = CACHE_HASH(sel, cache->mask);
2049 for (;;)
2050 {
2051 // Slide existing entries down by one
2052 Method saveMethod;
2053
2054 // Copy current entry to a local
2055 saveMethod = buckets[index];
2056
2057 // Copy previous entry (or new entry) to current slot
2058 buckets[index] = smt;
2059
2060 // Done if current slot had been invalid
2061 if (saveMethod == NULL)
2062 break;
2063
2064 // Prepare to copy saved value into next slot
2065 smt = saveMethod;
2066
2067 // Move on to next slot
2068 index += 1;
2069 index &= cache->mask;
2070 }
2071
2072 OBJC_UNLOCK(&cacheUpdateLock);
2073
2074 return YES; // successfully added new cache entry
2075 }
2076
2077
2078 /***********************************************************************
2079 * _cache_addForwardEntry
2080 * Add a forward:: entry for the given selector to cls's method cache.
2081 * Does nothing if the cache addition fails for any reason.
2082 * Called from class_respondsToMethod and _class_lookupMethodAndLoadCache.
2083 * Cache locks: cacheUpdateLock must not be held.
2084 **********************************************************************/
2085 static void _cache_addForwardEntry(Class cls, SEL sel)
2086 {
2087 Method smt;
2088
2089 smt = _malloc_internal(sizeof(struct objc_method));
2090 smt->method_name = sel;
2091 smt->method_types = "";
2092 smt->method_imp = &_objc_msgForward;
2093 if (! _cache_fill(cls, smt, sel)) {
2094 // Entry not added to cache. Don't leak the method struct.
2095 _free_internal(smt);
2096 }
2097 }
2098
2099
2100 /***********************************************************************
2101 * _cache_flush. Invalidate all valid entries in the given class' cache,
2102 * and clear the CLS_FLUSH_CACHE in the cls->info.
2103 *
2104 * Called from flush_caches() and _cache_fill()
2105 * Cache locks: cacheUpdateLock must be held by the caller.
2106 **********************************************************************/
2107 static void _cache_flush (Class cls)
2108 {
2109 Cache cache;
2110 unsigned int index;
2111
2112 // Locate cache. Ignore unused cache.
2113 cache = ((struct objc_class *)cls)->cache;
2114 if (cache == NULL || cache == &emptyCache)
2115 return;
2116
2117 #ifdef OBJC_INSTRUMENTED
2118 {
2119 CacheInstrumentation * cacheData;
2120
2121 // Tally this flush
2122 cacheData = CACHE_INSTRUMENTATION(cache);
2123 cacheData->flushCount += 1;
2124 cacheData->flushedEntries += cache->occupied;
2125 if (cache->occupied > cacheData->maxFlushedEntries)
2126 cacheData->maxFlushedEntries = cache->occupied;
2127 }
2128 #endif
2129
2130 // Traverse the cache
2131 for (index = 0; index <= cache->mask; index += 1)
2132 {
2133 // Remember what this entry was, so we can possibly
2134 // deallocate it after the bucket has been invalidated
2135 Method oldEntry = cache->buckets[index];
2136
2137 // Invalidate this entry
2138 CACHE_BUCKET_VALID(cache->buckets[index]) = NULL;
2139
2140 // Deallocate "forward::" entry
2141 if (oldEntry && oldEntry->method_imp == &_objc_msgForward)
2142 _cache_collect_free (oldEntry, sizeof(struct objc_method), NO);
2143 }
2144
2145 // Clear the valid-entry counter
2146 cache->occupied = 0;
2147
2148 // Clear the cache flush flag so that we will not flush this cache
2149 // before expanding it again.
2150 _class_clearInfo(cls, CLS_FLUSH_CACHE);
2151 }
2152
2153 /***********************************************************************
2154 * _objc_getFreedObjectClass. Return a pointer to the dummy freed
2155 * object class. Freed objects get their isa pointers replaced with
2156 * a pointer to the freedObjectClass, so that we can catch usages of
2157 * the freed object.
2158 **********************************************************************/
2159 Class _objc_getFreedObjectClass (void)
2160 {
2161 return (Class) &freedObjectClass;
2162 }
2163
2164 /***********************************************************************
2165 * _objc_getNonexistentClass. Return a pointer to the dummy nonexistent
2166 * object class. This is used when, for example, mapping the class
2167 * refs for an image, and the class can not be found, so that we can
2168 * catch later uses of the non-existent class.
2169 **********************************************************************/
2170 Class _objc_getNonexistentClass (void)
2171 {
2172 return (Class) &nonexistentObjectClass;
2173 }
2174
2175
2176 /***********************************************************************
2177 * struct _objc_initializing_classes
2178 * Per-thread list of classes currently being initialized by that thread.
2179 * During initialization, that thread is allowed to send messages to that
2180 * class, but other threads have to wait.
2181 * The list is a simple array of metaclasses (the metaclass stores
2182 * the initialization state).
2183 **********************************************************************/
2184 typedef struct _objc_initializing_classes {
2185 int classesAllocated;
2186 struct objc_class** metaclasses;
2187 } _objc_initializing_classes;
2188
2189
2190 /***********************************************************************
2191 * _fetchInitializingClassList
2192 * Return the list of classes being initialized by this thread.
2193 * If create == YES, create the list when no classes are being initialized by this thread.
2194 * If create == NO, return NULL when no classes are being initialized by this thread.
2195 **********************************************************************/
2196 static _objc_initializing_classes *_fetchInitializingClassList(BOOL create)
2197 {
2198 _objc_pthread_data *data;
2199 _objc_initializing_classes *list;
2200 struct objc_class **classes;
2201
2202 data = pthread_getspecific(_objc_pthread_key);
2203 if (data == NULL) {
2204 if (!create) {
2205 return NULL;
2206 } else {
2207 data = _calloc_internal(1, sizeof(_objc_pthread_data));
2208 pthread_setspecific(_objc_pthread_key, data);
2209 }
2210 }
2211
2212 list = data->initializingClasses;
2213 if (list == NULL) {
2214 if (!create) {
2215 return NULL;
2216 } else {
2217 list = _calloc_internal(1, sizeof(_objc_initializing_classes));
2218 data->initializingClasses = list;
2219 }
2220 }
2221
2222 classes = list->metaclasses;
2223 if (classes == NULL) {
2224 // If _objc_initializing_classes exists, allocate metaclass array,
2225 // even if create == NO.
2226 // Allow 4 simultaneous class inits on this thread before realloc.
2227 list->classesAllocated = 4;
2228 classes = _calloc_internal(list->classesAllocated, sizeof(struct objc_class *));
2229 list->metaclasses = classes;
2230 }
2231 return list;
2232 }
2233
2234
2235 /***********************************************************************
2236 * _destroyInitializingClassList
2237 * Deallocate memory used by the given initialization list.
2238 * Any part of the list may be NULL.
2239 * Called from _objc_pthread_destroyspecific().
2240 **********************************************************************/
2241 void _destroyInitializingClassList(_objc_initializing_classes *list)
2242 {
2243 if (list != NULL) {
2244 if (list->metaclasses != NULL) {
2245 _free_internal(list->metaclasses);
2246 }
2247 _free_internal(list);
2248 }
2249 }
2250
2251
2252 /***********************************************************************
2253 * _thisThreadIsInitializingClass
2254 * Return TRUE if this thread is currently initializing the given class.
2255 **********************************************************************/
2256 static BOOL _thisThreadIsInitializingClass(struct objc_class *cls)
2257 {
2258 int i;
2259
2260 _objc_initializing_classes *list = _fetchInitializingClassList(NO);
2261 if (list) {
2262 cls = GETMETA(cls);
2263 for (i = 0; i < list->classesAllocated; i++) {
2264 if (cls == list->metaclasses[i]) return YES;
2265 }
2266 }
2267
2268 // no list or not found in list
2269 return NO;
2270 }
2271
2272
2273 /***********************************************************************
2274 * _setThisThreadIsInitializingClass
2275 * Record that this thread is currently initializing the given class.
2276 * This thread will be allowed to send messages to the class, but
2277 * other threads will have to wait.
2278 **********************************************************************/
2279 static void _setThisThreadIsInitializingClass(struct objc_class *cls)
2280 {
2281 int i;
2282 _objc_initializing_classes *list = _fetchInitializingClassList(YES);
2283 cls = GETMETA(cls);
2284
2285 // paranoia: explicitly disallow duplicates
2286 for (i = 0; i < list->classesAllocated; i++) {
2287 if (cls == list->metaclasses[i]) {
2288 _objc_fatal("thread is already initializing this class!");
2289 return; // already the initializer
2290 }
2291 }
2292
2293 for (i = 0; i < list->classesAllocated; i++) {
2294 if (0 == list->metaclasses[i]) {
2295 list->metaclasses[i] = cls;
2296 return;
2297 }
2298 }
2299
2300 // class list is full - reallocate
2301 list->classesAllocated = list->classesAllocated * 2 + 1;
2302 list->metaclasses = _realloc_internal(list->metaclasses, list->classesAllocated * sizeof(struct objc_class *));
2303 // zero out the new entries
2304 list->metaclasses[i++] = cls;
2305 for ( ; i < list->classesAllocated; i++) {
2306 list->metaclasses[i] = NULL;
2307 }
2308 }
2309
2310
2311 /***********************************************************************
2312 * _setThisThreadIsNotInitializingClass
2313 * Record that this thread is no longer initializing the given class.
2314 **********************************************************************/
2315 static void _setThisThreadIsNotInitializingClass(struct objc_class *cls)
2316 {
2317 int i;
2318
2319 _objc_initializing_classes *list = _fetchInitializingClassList(NO);
2320 if (list) {
2321 cls = GETMETA(cls);
2322 for (i = 0; i < list->classesAllocated; i++) {
2323 if (cls == list->metaclasses[i]) {
2324 list->metaclasses[i] = NULL;
2325 return;
2326 }
2327 }
2328 }
2329
2330 // no list or not found in list
2331 _objc_fatal("thread is not initializing this class!");
2332 }
2333
2334
2335 /***********************************************************************
2336 * class_initialize. Send the '+initialize' message on demand to any
2337 * uninitialized class. Force initialization of superclasses first.
2338 *
2339 * Called only from _class_lookupMethodAndLoadCache (or itself).
2340 **********************************************************************/
2341 static void class_initialize(struct objc_class *cls)
2342 {
2343 struct objc_class *infoCls = GETMETA(cls);
2344 BOOL reallyInitialize = NO;
2345
2346 // Get the real class from the metaclass. The superclass chain
2347 // hangs off the real class only.
2348 // fixme ick
2349 if (ISMETA(cls)) {
2350 if (strncmp(cls->name, "_%", 2) == 0) {
2351 // Posee's meta's name is smashed and isn't in the class_hash,
2352 // so objc_getClass doesn't work.
2353 char *baseName = strchr(cls->name, '%'); // get posee's real name
2354 cls = objc_getClass(baseName);
2355 } else {
2356 cls = objc_getClass(cls->name);
2357 }
2358 }
2359
2360 // Make sure super is done initializing BEFORE beginning to initialize cls.
2361 // See note about deadlock above.
2362 if (cls->super_class && !ISINITIALIZED(cls->super_class)) {
2363 class_initialize(cls->super_class);
2364 }
2365
2366 // Try to atomically set CLS_INITIALIZING.
2367 pthread_mutex_lock(&classInitLock);
2368 if (!ISINITIALIZED(cls) && !ISINITIALIZING(cls)) {
2369 _class_setInfo(infoCls, CLS_INITIALIZING);
2370 reallyInitialize = YES;
2371 }
2372 pthread_mutex_unlock(&classInitLock);
2373
2374 if (reallyInitialize) {
2375 // We successfully set the CLS_INITIALIZING bit. Initialize the class.
2376
2377 // Record that we're initializing this class so we can message it.
2378 _setThisThreadIsInitializingClass(cls);
2379
2380 // Send the +initialize message.
2381 // Note that +initialize is sent to the superclass (again) if
2382 // this class doesn't implement +initialize. 2157218
2383 [(id)cls initialize];
2384
2385 // Done initializing. Update the info bits and notify waiting threads.
2386 pthread_mutex_lock(&classInitLock);
2387 _class_changeInfo(infoCls, CLS_INITIALIZED, CLS_INITIALIZING);
2388 pthread_cond_broadcast(&classInitWaitCond);
2389 pthread_mutex_unlock(&classInitLock);
2390 _setThisThreadIsNotInitializingClass(cls);
2391 return;
2392 }
2393
2394 else if (ISINITIALIZING(cls)) {
2395 // We couldn't set INITIALIZING because INITIALIZING was already set.
2396 // If this thread set it earlier, continue normally.
2397 // If some other thread set it, block until initialize is done.
2398 // It's ok if INITIALIZING changes to INITIALIZED while we're here,
2399 // because we safely check for INITIALIZED inside the lock
2400 // before blocking.
2401 if (_thisThreadIsInitializingClass(cls)) {
2402 return;
2403 } else {
2404 pthread_mutex_lock(&classInitLock);
2405 while (!ISINITIALIZED(cls)) {
2406 pthread_cond_wait(&classInitWaitCond, &classInitLock);
2407 }
2408 pthread_mutex_unlock(&classInitLock);
2409 return;
2410 }
2411 }
2412
2413 else if (ISINITIALIZED(cls)) {
2414 // Set CLS_INITIALIZING failed because someone else already
2415 // initialized the class. Continue normally.
2416 // NOTE this check must come AFTER the ISINITIALIZING case.
2417 // Otherwise: Another thread is initializing this class. ISINITIALIZED
2418 // is false. Skip this clause. Then the other thread finishes
2419 // initialization and sets INITIALIZING=no and INITIALIZED=yes.
2420 // Skip the ISINITIALIZING clause. Die horribly.
2421 return;
2422 }
2423
2424 else {
2425 // We shouldn't be here.
2426 _objc_fatal("thread-safe class init in objc runtime is buggy!");
2427 }
2428 }
2429
2430
2431 /***********************************************************************
2432 * _class_lookupMethodAndLoadCache.
2433 *
2434 * Called only from objc_msgSend, objc_msgSendSuper and class_lookupMethod.
2435 **********************************************************************/
2436 IMP _class_lookupMethodAndLoadCache (Class cls,
2437 SEL sel)
2438 {
2439 struct objc_class * curClass;
2440 Method meth;
2441 IMP methodPC = NULL;
2442
2443 trace(0xb300, 0, 0, 0);
2444
2445 // Check for freed class
2446 if (cls == &freedObjectClass)
2447 return (IMP) _freedHandler;
2448
2449 // Check for nonexistent class
2450 if (cls == &nonexistentObjectClass)
2451 return (IMP) _nonexistentHandler;
2452
2453 trace(0xb301, 0, 0, 0);
2454
2455 if (!ISINITIALIZED(cls)) {
2456 class_initialize ((struct objc_class *)cls);
2457 // If sel == initialize, class_initialize will send +initialize and
2458 // then the messenger will send +initialize again after this
2459 // procedure finishes. Of course, if this is not being called
2460 // from the messenger then it won't happen. 2778172
2461 }
2462
2463 trace(0xb302, 0, 0, 0);
2464
2465 // Outer loop - search the caches and method lists of the
2466 // class and its super-classes
2467 for (curClass = cls; curClass; curClass = ((struct objc_class * )curClass)->super_class)
2468 {
2469 #ifdef PRELOAD_SUPERCLASS_CACHES
2470 struct objc_class *curClass2;
2471 #endif
2472
2473 trace(0xb303, 0, 0, 0);
2474
2475 // Beware of thread-unsafety and double-freeing of forward::
2476 // entries here! See note in "Method cache locking" above.
2477 // The upshot is that _cache_getMethod() will return NULL
2478 // instead of returning a forward:: entry.
2479 meth = _cache_getMethod(curClass, sel, &_objc_msgForward);
2480 if (meth) {
2481 // Found the method in this class or a superclass.
2482 // Cache the method in this class, unless we just found it in
2483 // this class's cache.
2484 if (curClass != cls) {
2485 #ifdef PRELOAD_SUPERCLASS_CACHES
2486 for (curClass2 = cls; curClass2 != curClass; curClass2 = curClass2->super_class)
2487 _cache_fill (curClass2, meth, sel);
2488 _cache_fill (curClass, meth, sel);
2489 #else
2490 _cache_fill (cls, meth, sel);
2491 #endif
2492 }
2493
2494 methodPC = meth->method_imp;
2495 break;
2496 }
2497
2498 trace(0xb304, (int)methodPC, 0, 0);
2499
2500 // Cache scan failed. Search method list.
2501
2502 OBJC_LOCK(&methodListLock);
2503 meth = _findMethodInClass(curClass, sel);
2504 OBJC_UNLOCK(&methodListLock);
2505 if (meth) {
2506 // If logging is enabled, log the message send and let
2507 // the logger decide whether to encache the method.
2508 if ((objcMsgLogEnabled == 0) ||
2509 (objcMsgLogProc (CLS_GETINFO(((struct objc_class * )curClass),
2510 CLS_META) ? YES : NO,
2511 ((struct objc_class *)cls)->name,
2512 curClass->name, sel)))
2513 {
2514 // Cache the method implementation
2515 #ifdef PRELOAD_SUPERCLASS_CACHES
2516 for (curClass2 = cls; curClass2 != curClass; curClass2 = curClass2->super_class)
2517 _cache_fill (curClass2, meth, sel);
2518 _cache_fill (curClass, meth, sel);
2519 #else
2520 _cache_fill (cls, meth, sel);
2521 #endif
2522 }
2523
2524 methodPC = meth->method_imp;
2525 break;
2526 }
2527
2528 trace(0xb305, (int)methodPC, 0, 0);
2529 }
2530
2531 trace(0xb306, (int)methodPC, 0, 0);
2532
2533 if (methodPC == NULL)
2534 {
2535 // Class and superclasses do not respond -- use forwarding
2536 _cache_addForwardEntry(cls, sel);
2537 methodPC = &_objc_msgForward;
2538 }
2539
2540 trace(0xb30f, (int)methodPC, 0, 0);
2541
2542 return methodPC;
2543 }
2544
2545
2546 /***********************************************************************
2547 * lookupMethodInClassAndLoadCache.
2548 * Like _class_lookupMethodAndLoadCache, but does not search superclasses.
2549 * Caches and returns objc_msgForward if the method is not found in the class.
2550 **********************************************************************/
2551 static IMP lookupMethodInClassAndLoadCache(Class cls, SEL sel)
2552 {
2553 Method meth;
2554 IMP imp;
2555
2556 // Search cache first.
2557 imp = _cache_getImp(cls, sel);
2558 if (imp) return imp;
2559
2560 // Cache miss. Search method list.
2561
2562 OBJC_LOCK(&methodListLock);
2563 meth = _findMethodInClass(cls, sel);
2564 OBJC_UNLOCK(&methodListLock);
2565
2566 if (meth) {
2567 // Hit in method list. Cache it.
2568 _cache_fill(cls, meth, sel);
2569 return meth->method_imp;
2570 } else {
2571 // Miss in method list. Cache objc_msgForward.
2572 _cache_addForwardEntry(cls, sel);
2573 return &_objc_msgForward;
2574 }
2575 }
2576
2577
2578
2579 /***********************************************************************
2580 * _class_changeInfo
2581 * Atomically sets and clears some bits in cls's info field.
2582 * set and clear must not overlap.
2583 **********************************************************************/
2584 static pthread_mutex_t infoLock = PTHREAD_MUTEX_INITIALIZER;
2585 __private_extern__ void _class_changeInfo(struct objc_class *cls,
2586 long set, long clear)
2587 {
2588 pthread_mutex_lock(&infoLock);
2589 cls->info = (cls->info | set) & ~clear;
2590 pthread_mutex_unlock(&infoLock);
2591 }
2592
2593
2594 /***********************************************************************
2595 * _class_setInfo
2596 * Atomically sets some bits in cls's info field.
2597 **********************************************************************/
2598 __private_extern__ void _class_setInfo(struct objc_class *cls, long set)
2599 {
2600 _class_changeInfo(cls, set, 0);
2601 }
2602
2603
2604 /***********************************************************************
2605 * _class_clearInfo
2606 * Atomically clears some bits in cls's info field.
2607 **********************************************************************/
2608 __private_extern__ void _class_clearInfo(struct objc_class *cls, long clear)
2609 {
2610 _class_changeInfo(cls, 0, clear);
2611 }
2612
2613
2614 /***********************************************************************
2615 * SubtypeUntil.
2616 *
2617 * Delegation.
2618 **********************************************************************/
2619 static int SubtypeUntil (const char * type,
2620 char end)
2621 {
2622 int level = 0;
2623 const char * head = type;
2624
2625 //
2626 while (*type)
2627 {
2628 if (!*type || (!level && (*type == end)))
2629 return (int)(type - head);
2630
2631 switch (*type)
2632 {
2633 case ']': case '}': case ')': level--; break;
2634 case '[': case '{': case '(': level += 1; break;
2635 }
2636
2637 type += 1;
2638 }
2639
2640 _objc_fatal ("Object: SubtypeUntil: end of type encountered prematurely\n");
2641 return 0;
2642 }
2643
2644 /***********************************************************************
2645 * SkipFirstType.
2646 **********************************************************************/
2647 static const char * SkipFirstType (const char * type)
2648 {
2649 while (1)
2650 {
2651 switch (*type++)
2652 {
2653 case 'O': /* bycopy */
2654 case 'n': /* in */
2655 case 'o': /* out */
2656 case 'N': /* inout */
2657 case 'r': /* const */
2658 case 'V': /* oneway */
2659 case '^': /* pointers */
2660 break;
2661
2662 /* arrays */
2663 case '[':
2664 while ((*type >= '0') && (*type <= '9'))
2665 type += 1;
2666 return type + SubtypeUntil (type, ']') + 1;
2667
2668 /* structures */
2669 case '{':
2670 return type + SubtypeUntil (type, '}') + 1;
2671
2672 /* unions */
2673 case '(':
2674 return type + SubtypeUntil (type, ')') + 1;
2675
2676 /* basic types */
2677 default:
2678 return type;
2679 }
2680 }
2681 }
2682
2683 /***********************************************************************
2684 * method_getNumberOfArguments.
2685 **********************************************************************/
2686 unsigned method_getNumberOfArguments (Method method)
2687 {
2688 const char * typedesc;
2689 unsigned nargs;
2690
2691 // First, skip the return type
2692 typedesc = method->method_types;
2693 typedesc = SkipFirstType (typedesc);
2694
2695 // Next, skip stack size
2696 while ((*typedesc >= '0') && (*typedesc <= '9'))
2697 typedesc += 1;
2698
2699 // Now, we have the arguments - count how many
2700 nargs = 0;
2701 while (*typedesc)
2702 {
2703 // Traverse argument type
2704 typedesc = SkipFirstType (typedesc);
2705
2706 // Skip GNU runtime's register parameter hint
2707 if (*typedesc == '+') typedesc++;
2708
2709 // Traverse (possibly negative) argument offset
2710 if (*typedesc == '-')
2711 typedesc += 1;
2712 while ((*typedesc >= '0') && (*typedesc <= '9'))
2713 typedesc += 1;
2714
2715 // Made it past an argument
2716 nargs += 1;
2717 }
2718
2719 return nargs;
2720 }
2721
2722 /***********************************************************************
2723 * method_getSizeOfArguments.
2724 **********************************************************************/
2725 #ifndef __alpha__
2726 unsigned method_getSizeOfArguments (Method method)
2727 {
2728 const char * typedesc;
2729 unsigned stack_size;
2730 #if defined(__ppc__) || defined(ppc)
2731 unsigned trueBaseOffset;
2732 unsigned foundBaseOffset;
2733 #endif
2734
2735 // Get our starting points
2736 stack_size = 0;
2737 typedesc = method->method_types;
2738
2739 // Skip the return type
2740 #if defined (__ppc__) || defined(ppc)
2741 // Struct returns cause the parameters to be bumped
2742 // by a register, so the offset to the receiver is
2743 // 4 instead of the normal 0.
2744 trueBaseOffset = (*typedesc == '{') ? 4 : 0;
2745 #endif
2746 typedesc = SkipFirstType (typedesc);
2747
2748 // Convert ASCII number string to integer
2749 while ((*typedesc >= '0') && (*typedesc <= '9'))
2750 stack_size = (stack_size * 10) + (*typedesc++ - '0');
2751 #if defined (__ppc__) || defined(ppc)
2752 // NOTE: This is a temporary measure pending a compiler fix.
2753 // Work around PowerPC compiler bug wherein the method argument
2754 // string contains an incorrect value for the "stack size."
2755 // Generally, the size is reported 4 bytes too small, so we apply
2756 // that fudge factor. Unfortunately, there is at least one case
2757 // where the error is something other than -4: when the last
2758 // parameter is a double, the reported stack is much too high
2759 // (about 32 bytes). We do not attempt to detect that case.
2760 // The result of returning a too-high value is that objc_msgSendv
2761 // can bus error if the destination of the marg_list copying
2762 // butts up against excluded memory.
2763 // This fix disables itself when it sees a correctly built
2764 // type string (i.e. the offset for the Id is correct). This
2765 // keeps us out of lockstep with the compiler.
2766
2767 // skip the '@' marking the Id field
2768 typedesc = SkipFirstType (typedesc);
2769
2770 // Skip GNU runtime's register parameter hint
2771 if (*typedesc == '+') typedesc++;
2772
2773 // pick up the offset for the Id field
2774 foundBaseOffset = 0;
2775 while ((*typedesc >= '0') && (*typedesc <= '9'))
2776 foundBaseOffset = (foundBaseOffset * 10) + (*typedesc++ - '0');
2777
2778 // add fudge factor iff the Id field offset was wrong
2779 if (foundBaseOffset != trueBaseOffset)
2780 stack_size += 4;
2781 #endif
2782
2783 return stack_size;
2784 }
2785
2786 #else // __alpha__
2787 // XXX Getting the size of a type is done all over the place
2788 // (Here, Foundation, remote project)! - Should unify
2789
2790 unsigned int getSizeOfType (const char * type, unsigned int * alignPtr);
2791
2792 unsigned method_getSizeOfArguments (Method method)
2793 {
2794 const char * type;
2795 int size;
2796 int index;
2797 int align;
2798 int offset;
2799 unsigned stack_size;
2800 int nargs;
2801
2802 nargs = method_getNumberOfArguments (method);
2803 stack_size = (*method->method_types == '{') ? sizeof(void *) : 0;
2804
2805 for (index = 0; index < nargs; index += 1)
2806 {
2807 (void) method_getArgumentInfo (method, index, &type, &offset);
2808 size = getSizeOfType (type, &align);
2809 stack_size += ((size + 7) & ~7);
2810 }
2811
2812 return stack_size;
2813 }
2814 #endif // __alpha__
2815
2816 /***********************************************************************
2817 * method_getArgumentInfo.
2818 **********************************************************************/
2819 unsigned method_getArgumentInfo (Method method,
2820 int arg,
2821 const char ** type,
2822 int * offset)
2823 {
2824 const char * typedesc = method->method_types;
2825 unsigned nargs = 0;
2826 unsigned self_offset = 0;
2827 BOOL offset_is_negative = NO;
2828
2829 // First, skip the return type
2830 typedesc = SkipFirstType (typedesc);
2831
2832 // Next, skip stack size
2833 while ((*typedesc >= '0') && (*typedesc <= '9'))
2834 typedesc += 1;
2835
2836 // Now, we have the arguments - position typedesc to the appropriate argument
2837 while (*typedesc && nargs != arg)
2838 {
2839
2840 // Skip argument type
2841 typedesc = SkipFirstType (typedesc);
2842
2843 if (nargs == 0)
2844 {
2845 // Skip GNU runtime's register parameter hint
2846 if (*typedesc == '+') typedesc++;
2847
2848 // Skip negative sign in offset
2849 if (*typedesc == '-')
2850 {
2851 offset_is_negative = YES;
2852 typedesc += 1;
2853 }
2854 else
2855 offset_is_negative = NO;
2856
2857 while ((*typedesc >= '0') && (*typedesc <= '9'))
2858 self_offset = self_offset * 10 + (*typedesc++ - '0');
2859 if (offset_is_negative)
2860 self_offset = -(self_offset);
2861
2862 }
2863
2864 else
2865 {
2866 // Skip GNU runtime's register parameter hint
2867 if (*typedesc == '+') typedesc++;
2868
2869 // Skip (possibly negative) argument offset
2870 if (*typedesc == '-')
2871 typedesc += 1;
2872 while ((*typedesc >= '0') && (*typedesc <= '9'))
2873 typedesc += 1;
2874 }
2875
2876 nargs += 1;
2877 }
2878
2879 if (*typedesc)
2880 {
2881 unsigned arg_offset = 0;
2882
2883 *type = typedesc;
2884 typedesc = SkipFirstType (typedesc);
2885
2886 if (arg == 0)
2887 {
2888 #ifdef hppa
2889 *offset = -sizeof(id);
2890 #else
2891 *offset = 0;
2892 #endif // hppa
2893 }
2894
2895 else
2896 {
2897 // Skip GNU register parameter hint
2898 if (*typedesc == '+') typedesc++;
2899
2900 // Pick up (possibly negative) argument offset
2901 if (*typedesc == '-')
2902 {
2903 offset_is_negative = YES;
2904 typedesc += 1;
2905 }
2906 else
2907 offset_is_negative = NO;
2908
2909 while ((*typedesc >= '0') && (*typedesc <= '9'))
2910 arg_offset = arg_offset * 10 + (*typedesc++ - '0');
2911 if (offset_is_negative)
2912 arg_offset = - arg_offset;
2913
2914 #ifdef hppa
2915 // For stacks which grow up, since margs points
2916 // to the top of the stack or the END of the args,
2917 // the first offset is at -sizeof(id) rather than 0.
2918 self_offset += sizeof(id);
2919 #endif
2920 *offset = arg_offset - self_offset;
2921 }
2922
2923 }
2924
2925 else
2926 {
2927 *type = 0;
2928 *offset = 0;
2929 }
2930
2931 return nargs;
2932 }
2933
2934 /***********************************************************************
2935 * _objc_create_zone.
2936 **********************************************************************/
2937
2938 void * _objc_create_zone (void)
2939 {
2940 return malloc_default_zone();
2941 }
2942
2943
2944 /***********************************************************************
2945 * _objc_internal_zone.
2946 * Malloc zone for internal runtime data.
2947 * By default this is the default malloc zone, but a dedicated zone is
2948 * used if environment variable OBJC_USE_INTERNAL_ZONE is set.
2949 **********************************************************************/
2950 __private_extern__ malloc_zone_t *_objc_internal_zone(void)
2951 {
2952 static malloc_zone_t *z = (malloc_zone_t *)-1;
2953 if (z == (malloc_zone_t *)-1) {
2954 if (UseInternalZone) {
2955 z = malloc_create_zone(vm_page_size, 0);
2956 malloc_set_zone_name(z, "ObjC");
2957 } else {
2958 z = malloc_default_zone();
2959 }
2960 }
2961 return z;
2962 }
2963
2964
2965 /***********************************************************************
2966 * _malloc_internal
2967 * _calloc_internal
2968 * _realloc_internal
2969 * _strdup_internal
2970 * _free_internal
2971 * Convenience functions for the internal malloc zone.
2972 **********************************************************************/
2973 __private_extern__ void *_malloc_internal(size_t size)
2974 {
2975 return malloc_zone_malloc(_objc_internal_zone(), size);
2976 }
2977
2978 __private_extern__ void *_calloc_internal(size_t count, size_t size)
2979 {
2980 return malloc_zone_calloc(_objc_internal_zone(), count, size);
2981 }
2982
2983 __private_extern__ void *_realloc_internal(void *ptr, size_t size)
2984 {
2985 return malloc_zone_realloc(_objc_internal_zone(), ptr, size);
2986 }
2987
2988 __private_extern__ char *_strdup_internal(const char *str)
2989 {
2990 size_t len = strlen(str);
2991 char *dup = malloc_zone_malloc(_objc_internal_zone(), len + 1);
2992 memcpy(dup, str, len + 1);
2993 return dup;
2994 }
2995
2996 __private_extern__ void _free_internal(void *ptr)
2997 {
2998 malloc_zone_free(_objc_internal_zone(), ptr);
2999 }
3000
3001
3002
3003 /***********************************************************************
3004 * cache collection.
3005 **********************************************************************/
3006
3007 static unsigned long _get_pc_for_thread (mach_port_t thread)
3008 #ifdef hppa
3009 {
3010 struct hp_pa_frame_thread_state state;
3011 unsigned int count = HPPA_FRAME_THREAD_STATE_COUNT;
3012 kern_return_t okay = thread_get_state (thread, HPPA_FRAME_THREAD_STATE, (thread_state_t)&state, &count);
3013 return (okay == KERN_SUCCESS) ? state.ts_pcoq_front : PC_SENTINAL;
3014 }
3015 #elif defined(sparc)
3016 {
3017 struct sparc_thread_state_regs state;
3018 unsigned int count = SPARC_THREAD_STATE_REGS_COUNT;
3019 kern_return_t okay = thread_get_state (thread, SPARC_THREAD_STATE_REGS, (thread_state_t)&state, &count);
3020 return (okay == KERN_SUCCESS) ? state.regs.r_pc : PC_SENTINAL;
3021 }
3022 #elif defined(__i386__) || defined(i386)
3023 {
3024 i386_thread_state_t state;
3025 unsigned int count = i386_THREAD_STATE_COUNT;
3026 kern_return_t okay = thread_get_state (thread, i386_THREAD_STATE, (thread_state_t)&state, &count);
3027 return (okay == KERN_SUCCESS) ? state.eip : PC_SENTINAL;
3028 }
3029 #elif defined(m68k)
3030 {
3031 struct m68k_thread_state_regs state;
3032 unsigned int count = M68K_THREAD_STATE_REGS_COUNT;
3033 kern_return_t okay = thread_get_state (thread, M68K_THREAD_STATE_REGS, (thread_state_t)&state, &count);
3034 return (okay == KERN_SUCCESS) ? state.pc : PC_SENTINAL;
3035 }
3036 #elif defined(__ppc__) || defined(ppc)
3037 {
3038 struct ppc_thread_state state;
3039 unsigned int count = PPC_THREAD_STATE_COUNT;
3040 kern_return_t okay = thread_get_state (thread, PPC_THREAD_STATE, (thread_state_t)&state, &count);
3041 return (okay == KERN_SUCCESS) ? state.srr0 : PC_SENTINAL;
3042 }
3043 #else
3044 {
3045 #error _get_pc_for_thread () not implemented for this architecture
3046 }
3047 #endif
3048
3049 /***********************************************************************
3050 * _collecting_in_critical.
3051 * Returns TRUE if some thread is currently executing a cache-reading
3052 * function. Collection of cache garbage is not allowed when a cache-
3053 * reading function is in progress because it might still be using
3054 * the garbage memory.
3055 **********************************************************************/
3056 OBJC_EXPORT unsigned long objc_entryPoints[];
3057 OBJC_EXPORT unsigned long objc_exitPoints[];
3058
3059 static int _collecting_in_critical (void)
3060 {
3061 thread_act_port_array_t threads;
3062 unsigned number;
3063 unsigned count;
3064 kern_return_t ret;
3065 int result;
3066
3067 mach_port_t mythread = pthread_mach_thread_np(pthread_self());
3068
3069 // Get a list of all the threads in the current task
3070 ret = task_threads (mach_task_self (), &threads, &number);
3071 if (ret != KERN_SUCCESS)
3072 {
3073 _objc_fatal("task_thread failed (result %d)\n", ret);
3074 }
3075
3076 // Check whether any thread is in the cache lookup code
3077 result = FALSE;
3078 for (count = 0; count < number; count++)
3079 {
3080 int region;
3081 unsigned long pc;
3082
3083 // Don't bother checking ourselves
3084 if (threads[count] == mythread)
3085 continue;
3086
3087 // Find out where thread is executing
3088 pc = _get_pc_for_thread (threads[count]);
3089
3090 // Check for bad status, and if so, assume the worse (can't collect)
3091 if (pc == PC_SENTINAL)
3092 {
3093 result = TRUE;
3094 goto done;
3095 }
3096
3097 // Check whether it is in the cache lookup code
3098 for (region = 0; objc_entryPoints[region] != 0; region++)
3099 {
3100 if ((pc >= objc_entryPoints[region]) &&
3101 (pc <= objc_exitPoints[region]))
3102 {
3103 result = TRUE;
3104 goto done;
3105 }
3106 }
3107 }
3108
3109 done:
3110 // Deallocate the port rights for the threads
3111 for (count = 0; count < number; count++) {
3112 mach_port_deallocate(mach_task_self (), threads[count]);
3113 }
3114
3115 // Deallocate the thread list
3116 vm_deallocate (mach_task_self (), (vm_address_t) threads, sizeof(threads) * number);
3117
3118 // Return our finding
3119 return result;
3120 }
3121
3122 /***********************************************************************
3123 * _garbage_make_room. Ensure that there is enough room for at least
3124 * one more ref in the garbage.
3125 **********************************************************************/
3126
3127 // amount of memory represented by all refs in the garbage
3128 static int garbage_byte_size = 0;
3129
3130 // do not empty the garbage until garbage_byte_size gets at least this big
3131 static int garbage_threshold = 1024;
3132
3133 // table of refs to free
3134 static void **garbage_refs = 0;
3135
3136 // current number of refs in garbage_refs
3137 static int garbage_count = 0;
3138
3139 // capacity of current garbage_refs
3140 static int garbage_max = 0;
3141
3142 // capacity of initial garbage_refs
3143 enum {
3144 INIT_GARBAGE_COUNT = 128
3145 };
3146
3147 static void _garbage_make_room (void)
3148 {
3149 static int first = 1;
3150 volatile void * tempGarbage;
3151
3152 // Create the collection table the first time it is needed
3153 if (first)
3154 {
3155 first = 0;
3156 garbage_refs = _malloc_internal(INIT_GARBAGE_COUNT * sizeof(void *));
3157 garbage_max = INIT_GARBAGE_COUNT;
3158 }
3159
3160 // Double the table if it is full
3161 else if (garbage_count == garbage_max)
3162 {
3163 tempGarbage = _realloc_internal(garbage_refs, garbage_max * 2 * sizeof(void *));
3164 garbage_refs = (void **) tempGarbage;
3165 garbage_max *= 2;
3166 }
3167 }
3168
3169 /***********************************************************************
3170 * _cache_collect_free. Add the specified malloc'd memory to the list
3171 * of them to free at some later point.
3172 * size is used for the collection threshold. It does not have to be
3173 * precisely the block's size.
3174 * Cache locks: cacheUpdateLock must be held by the caller.
3175 **********************************************************************/
3176 static void _cache_collect_free(void *data, size_t size, BOOL tryCollect)
3177 {
3178 static char *report_garbage = (char *)0xffffffff;
3179
3180 if ((char *)0xffffffff == report_garbage) {
3181 // Check whether to log our activity
3182 report_garbage = getenv ("OBJC_REPORT_GARBAGE");
3183 }
3184
3185 // Insert new element in garbage list
3186 // Note that we do this even if we end up free'ing everything
3187 _garbage_make_room ();
3188 garbage_byte_size += size;
3189 garbage_refs[garbage_count++] = data;
3190
3191 // Log our progress
3192 if (tryCollect && report_garbage)
3193 _objc_inform ("total of %d bytes of garbage ...", garbage_byte_size);
3194
3195 // Done if caller says not to empty or the garbage is not full
3196 if (!tryCollect || (garbage_byte_size < garbage_threshold))
3197 {
3198 if (tryCollect && report_garbage)
3199 _objc_inform ("couldn't collect cache garbage: below threshold\n");
3200
3201 return;
3202 }
3203
3204 // tryCollect is guaranteed to be true after this point
3205
3206 // Synchronize garbage collection with objc_msgSend and other cache readers
3207 if (!_collecting_in_critical ()) {
3208 // No cache readers in progress - garbage is now deletable
3209
3210 // Log our progress
3211 if (report_garbage)
3212 _objc_inform ("collecting!\n");
3213
3214 // Dispose all refs now in the garbage
3215 while (garbage_count--) {
3216 if (cache_allocator_is_block(garbage_refs[garbage_count])) {
3217 cache_allocator_free(garbage_refs[garbage_count]);
3218 } else {
3219 free(garbage_refs[garbage_count]);
3220 }
3221 }
3222
3223 // Clear the garbage count and total size indicator
3224 garbage_count = 0;
3225 garbage_byte_size = 0;
3226 }
3227 else {
3228 // objc_msgSend (or other cache reader) is currently looking in the
3229 // cache and might still be using some garbage.
3230 if (report_garbage) {
3231 _objc_inform ("couldn't collect cache garbage: objc_msgSend in progress\n");
3232 }
3233 }
3234 }
3235
3236
3237
3238 /***********************************************************************
3239 * Custom method cache allocator.
3240 * Method cache block sizes are 2^slots+2 words, which is a pessimal
3241 * case for the system allocator. It wastes 504 bytes per cache block
3242 * with 128 or more slots, which adds up to tens of KB for an AppKit process.
3243 * To save memory, the custom cache allocator below is used.
3244 *
3245 * The cache allocator uses 128 KB allocation regions. Few processes will
3246 * require a second region. Within a region, allocation is address-ordered
3247 * first fit.
3248 *
3249 * The cache allocator uses a quantum of 520.
3250 * Cache block ideal sizes: 520, 1032, 2056, 4104
3251 * Cache allocator sizes: 520, 1040, 2080, 4160
3252 *
3253 * Because all blocks are known to be genuine method caches, the ordinary
3254 * cache->mask and cache->occupied fields are used as block headers.
3255 * No out-of-band headers are maintained. The number of blocks will
3256 * almost always be fewer than 200, so for simplicity there is no free
3257 * list or other optimization.
3258 *
3259 * Block in use: mask != 0, occupied != -1 (mask indicates block size)
3260 * Block free: mask != 0, occupied == -1 (mask is precisely block size)
3261 *
3262 * No cache allocator functions take any locks. Instead, the caller
3263 * must hold the cacheUpdateLock.
3264 **********************************************************************/
3265
3266 typedef struct cache_allocator_block {
3267 unsigned int size;
3268 unsigned int state;
3269 struct cache_allocator_block *nextFree;
3270 } cache_allocator_block;
3271
3272 typedef struct cache_allocator_region {
3273 cache_allocator_block *start;
3274 cache_allocator_block *end; // first non-block address
3275 cache_allocator_block *freeList;
3276 struct cache_allocator_region *next;
3277 } cache_allocator_region;
3278
3279 static cache_allocator_region *cacheRegion = NULL;
3280
3281
3282 static unsigned int cache_allocator_mask_for_size(size_t size)
3283 {
3284 return (size - sizeof(struct objc_cache)) / sizeof(Method);
3285 }
3286
3287 static size_t cache_allocator_size_for_mask(unsigned int mask)
3288 {
3289 size_t requested = sizeof(struct objc_cache) + TABLE_SIZE(mask+1);
3290 size_t actual = CACHE_QUANTUM;
3291 while (actual < requested) actual += CACHE_QUANTUM;
3292 return actual;
3293 }
3294
3295 /***********************************************************************
3296 * cache_allocator_add_region
3297 * Allocates and returns a new region that can hold at least size
3298 * bytes of large method caches.
3299 * The actual size will be rounded up to a CACHE_QUANTUM boundary,
3300 * with a minimum of CACHE_REGION_SIZE.
3301 * The new region is lowest-priority for new allocations. Callers that
3302 * know the other regions are already full should allocate directly
3303 * into the returned region.
3304 **********************************************************************/
3305 static cache_allocator_region *cache_allocator_add_region(size_t size)
3306 {
3307 vm_address_t addr;
3308 cache_allocator_block *b;
3309 cache_allocator_region **rgnP;
3310 cache_allocator_region *newRegion =
3311 _calloc_internal(1, sizeof(cache_allocator_region));
3312
3313 // Round size up to quantum boundary, and apply the minimum size.
3314 size += CACHE_QUANTUM - (size % CACHE_QUANTUM);
3315 if (size < CACHE_REGION_SIZE) size = CACHE_REGION_SIZE;
3316
3317 // Allocate the region
3318 addr = 0;
3319 vm_allocate(mach_task_self(), &addr, size, 1);
3320 newRegion->start = (cache_allocator_block *)addr;
3321 newRegion->end = (cache_allocator_block *)(addr + size);
3322
3323 // Mark the first block: free and covers the entire region
3324 b = newRegion->start;
3325 b->size = size;
3326 b->state = (unsigned int)-1;
3327 b->nextFree = NULL;
3328 newRegion->freeList = b;
3329
3330 // Add to end of the linked list of regions.
3331 // Other regions should be re-used before this one is touched.
3332 newRegion->next = NULL;
3333 rgnP = &cacheRegion;
3334 while (*rgnP) {
3335 rgnP = &(**rgnP).next;
3336 }
3337 *rgnP = newRegion;
3338
3339 return newRegion;
3340 }
3341
3342
3343 /***********************************************************************
3344 * cache_allocator_coalesce
3345 * Attempts to coalesce a free block with the single free block following
3346 * it in the free list, if any.
3347 **********************************************************************/
3348 static void cache_allocator_coalesce(cache_allocator_block *block)
3349 {
3350 if (block->size + (uintptr_t)block == (uintptr_t)block->nextFree) {
3351 block->size += block->nextFree->size;
3352 block->nextFree = block->nextFree->nextFree;
3353 }
3354 }
3355
3356
3357 /***********************************************************************
3358 * cache_region_calloc
3359 * Attempt to allocate a size-byte block in the given region.
3360 * Allocation is first-fit. The free list is already fully coalesced.
3361 * Returns NULL if there is not enough room in the region for the block.
3362 **********************************************************************/
3363 static void *cache_region_calloc(cache_allocator_region *rgn, size_t size)
3364 {
3365 cache_allocator_block **blockP;
3366 unsigned int mask;
3367
3368 // Save mask for allocated block, then round size
3369 // up to CACHE_QUANTUM boundary
3370 mask = cache_allocator_mask_for_size(size);
3371 size = cache_allocator_size_for_mask(mask);
3372
3373 // Search the free list for a sufficiently large free block.
3374
3375 for (blockP = &rgn->freeList;
3376 *blockP != NULL;
3377 blockP = &(**blockP).nextFree)
3378 {
3379 cache_allocator_block *block = *blockP;
3380 if (block->size < size) continue; // not big enough
3381
3382 // block is now big enough. Allocate from it.
3383
3384 // Slice off unneeded fragment of block, if any,
3385 // and reconnect the free list around block.
3386 if (block->size - size >= CACHE_QUANTUM) {
3387 cache_allocator_block *leftover =
3388 (cache_allocator_block *)(size + (uintptr_t)block);
3389 leftover->size = block->size - size;
3390 leftover->state = (unsigned int)-1;
3391 leftover->nextFree = block->nextFree;
3392 *blockP = leftover;
3393 } else {
3394 *blockP = block->nextFree;
3395 }
3396
3397 // block is now exactly the right size.
3398
3399 bzero(block, size);
3400 block->size = mask; // Cache->mask
3401 block->state = 0; // Cache->occupied
3402
3403 return block;
3404 }
3405
3406 // No room in this region.
3407 return NULL;
3408 }
3409
3410
3411 /***********************************************************************
3412 * cache_allocator_calloc
3413 * Custom allocator for large method caches (128+ slots)
3414 * The returned cache block already has cache->mask set.
3415 * cache->occupied and the cache contents are zero.
3416 * Cache locks: cacheUpdateLock must be held by the caller
3417 **********************************************************************/
3418 static void *cache_allocator_calloc(size_t size)
3419 {
3420 cache_allocator_region *rgn;
3421
3422 for (rgn = cacheRegion; rgn != NULL; rgn = rgn->next) {
3423 void *p = cache_region_calloc(rgn, size);
3424 if (p) {
3425 return p;
3426 }
3427 }
3428
3429 // No regions or all regions full - make a region and try one more time
3430 // In the unlikely case of a cache over 256KB, it will get its own region.
3431 return cache_region_calloc(cache_allocator_add_region(size), size);
3432 }
3433
3434
3435 /***********************************************************************
3436 * cache_allocator_region_for_block
3437 * Returns the cache allocator region that ptr points into, or NULL.
3438 **********************************************************************/
3439 static cache_allocator_region *cache_allocator_region_for_block(cache_allocator_block *block)
3440 {
3441 cache_allocator_region *rgn;
3442 for (rgn = cacheRegion; rgn != NULL; rgn = rgn->next) {
3443 if (block >= rgn->start && block < rgn->end) return rgn;
3444 }
3445 return NULL;
3446 }
3447
3448
3449 /***********************************************************************
3450 * cache_allocator_is_block
3451 * If ptr is a live block from the cache allocator, return YES
3452 * If ptr is a block from some other allocator, return NO.
3453 * If ptr is a dead block from the cache allocator, result is undefined.
3454 * Cache locks: cacheUpdateLock must be held by the caller
3455 **********************************************************************/
3456 static BOOL cache_allocator_is_block(void *ptr)
3457 {
3458 return (cache_allocator_region_for_block((cache_allocator_block *)ptr) != NULL);
3459 }
3460
3461 /***********************************************************************
3462 * cache_allocator_free
3463 * Frees a block allocated by the cache allocator.
3464 * Cache locks: cacheUpdateLock must be held by the caller.
3465 **********************************************************************/
3466 static void cache_allocator_free(void *ptr)
3467 {
3468 cache_allocator_block *dead = (cache_allocator_block *)ptr;
3469 cache_allocator_block *cur;
3470 cache_allocator_region *rgn;
3471
3472 if (! (rgn = cache_allocator_region_for_block(ptr))) {
3473 // free of non-pointer
3474 _objc_inform("cache_allocator_free of non-pointer %p", ptr);
3475 return;
3476 }
3477
3478 dead->size = cache_allocator_size_for_mask(dead->size);
3479 dead->state = (unsigned int)-1;
3480
3481 if (!rgn->freeList || rgn->freeList > dead) {
3482 // dead block belongs at front of free list
3483 dead->nextFree = rgn->freeList;
3484 rgn->freeList = dead;
3485 cache_allocator_coalesce(dead);
3486 return;
3487 }
3488
3489 // dead block belongs in the middle or end of free list
3490 for (cur = rgn->freeList; cur != NULL; cur = cur->nextFree) {
3491 cache_allocator_block *ahead = cur->nextFree;
3492
3493 if (!ahead || ahead > dead) {
3494 // cur and ahead straddle dead, OR dead belongs at end of free list
3495 cur->nextFree = dead;
3496 dead->nextFree = ahead;
3497
3498 // coalesce into dead first in case both succeed
3499 cache_allocator_coalesce(dead);
3500 cache_allocator_coalesce(cur);
3501 return;
3502 }
3503 }
3504
3505 // uh-oh
3506 _objc_inform("cache_allocator_free of non-pointer %p", ptr);
3507 }
3508
3509
3510 /***********************************************************************
3511 * _cache_print.
3512 **********************************************************************/
3513 static void _cache_print (Cache cache)
3514 {
3515 unsigned int index;
3516 unsigned int count;
3517
3518 count = cache->mask + 1;
3519 for (index = 0; index < count; index += 1)
3520 if (CACHE_BUCKET_VALID(cache->buckets[index]))
3521 {
3522 if (CACHE_BUCKET_IMP(cache->buckets[index]) == &_objc_msgForward)
3523 printf ("does not recognize: \n");
3524 printf ("%s\n", (const char *) CACHE_BUCKET_NAME(cache->buckets[index]));
3525 }
3526 }
3527
3528 /***********************************************************************
3529 * _class_printMethodCaches.
3530 **********************************************************************/
3531 void _class_printMethodCaches (Class cls)
3532 {
3533 if (((struct objc_class *)cls)->cache == &emptyCache)
3534 printf ("no instance-method cache for class %s\n", ((struct objc_class *)cls)->name);
3535
3536 else
3537 {
3538 printf ("instance-method cache for class %s:\n", ((struct objc_class *)cls)->name);
3539 _cache_print (((struct objc_class *)cls)->cache);
3540 }
3541
3542 if (((struct objc_class * )((struct objc_class * )cls)->isa)->cache == &emptyCache)
3543 printf ("no class-method cache for class %s\n", ((struct objc_class *)cls)->name);
3544
3545 else
3546 {
3547 printf ("class-method cache for class %s:\n", ((struct objc_class *)cls)->name);
3548 _cache_print (((struct objc_class * )((struct objc_class * )cls)->isa)->cache);
3549 }
3550 }
3551
3552 /***********************************************************************
3553 * log2.
3554 **********************************************************************/
3555 static unsigned int log2 (unsigned int x)
3556 {
3557 unsigned int log;
3558
3559 log = 0;
3560 while (x >>= 1)
3561 log += 1;
3562
3563 return log;
3564 }
3565
3566 /***********************************************************************
3567 * _class_printDuplicateCacheEntries.
3568 **********************************************************************/
3569 void _class_printDuplicateCacheEntries (BOOL detail)
3570 {
3571 NXHashTable * class_hash;
3572 NXHashState state;
3573 struct objc_class * cls;
3574 unsigned int duplicates;
3575 unsigned int index1;
3576 unsigned int index2;
3577 unsigned int mask;
3578 unsigned int count;
3579 unsigned int isMeta;
3580 Cache cache;
3581
3582
3583 printf ("Checking for duplicate cache entries \n");
3584
3585 // Outermost loop - iterate over all classes
3586 class_hash = objc_getClasses ();
3587 state = NXInitHashState (class_hash);
3588 duplicates = 0;
3589 while (NXNextHashState (class_hash, &state, (void **) &cls))
3590 {
3591 // Control loop - do given class' cache, then its isa's cache
3592 for (isMeta = 0; isMeta <= 1; isMeta += 1)
3593 {
3594 // Select cache of interest and make sure it exists
3595 cache = isMeta ? cls->isa->cache : ((struct objc_class *)cls)->cache;
3596 if (cache == &emptyCache)
3597 continue;
3598
3599 // Middle loop - check each entry in the given cache
3600 mask = cache->mask;
3601 count = mask + 1;
3602 for (index1 = 0; index1 < count; index1 += 1)
3603 {
3604 // Skip invalid entry
3605 if (!CACHE_BUCKET_VALID(cache->buckets[index1]))
3606 continue;
3607
3608 // Inner loop - check that given entry matches no later entry
3609 for (index2 = index1 + 1; index2 < count; index2 += 1)
3610 {
3611 // Skip invalid entry
3612 if (!CACHE_BUCKET_VALID(cache->buckets[index2]))
3613 continue;
3614
3615 // Check for duplication by method name comparison
3616 if (strcmp ((char *) CACHE_BUCKET_NAME(cache->buckets[index1]),
3617 (char *) CACHE_BUCKET_NAME(cache->buckets[index2])) == 0)
3618 {
3619 if (detail)
3620 printf ("%s %s\n", ((struct objc_class *)cls)->name, (char *) CACHE_BUCKET_NAME(cache->buckets[index1]));
3621 duplicates += 1;
3622 break;
3623 }
3624 }
3625 }
3626 }
3627 }
3628
3629 // Log the findings
3630 printf ("duplicates = %d\n", duplicates);
3631 printf ("total cache fills = %d\n", totalCacheFills);
3632 }
3633
3634 /***********************************************************************
3635 * PrintCacheHeader.
3636 **********************************************************************/
3637 static void PrintCacheHeader (void)
3638 {
3639 #ifdef OBJC_INSTRUMENTED
3640 printf ("Cache Cache Slots Avg Max AvgS MaxS AvgS MaxS TotalD AvgD MaxD TotalD AvgD MaxD TotD AvgD MaxD\n");
3641 printf ("Size Count Used Used Used Hit Hit Miss Miss Hits Prbs Prbs Misses Prbs Prbs Flsh Flsh Flsh\n");
3642 printf ("----- ----- ----- ----- ---- ---- ---- ---- ---- ------- ---- ---- ------- ---- ---- ---- ---- ----\n");
3643 #else
3644 printf ("Cache Cache Slots Avg Max AvgS MaxS AvgS MaxS\n");
3645 printf ("Size Count Used Used Used Hit Hit Miss Miss\n");
3646 printf ("----- ----- ----- ----- ---- ---- ---- ---- ----\n");
3647 #endif
3648 }
3649
3650 /***********************************************************************
3651 * PrintCacheInfo.
3652 **********************************************************************/
3653 static void PrintCacheInfo (unsigned int cacheSize,
3654 unsigned int cacheCount,
3655 unsigned int slotsUsed,
3656 float avgUsed,
3657 unsigned int maxUsed,
3658 float avgSHit,
3659 unsigned int maxSHit,
3660 float avgSMiss,
3661 unsigned int maxSMiss
3662 #ifdef OBJC_INSTRUMENTED
3663 , unsigned int totDHits,
3664 float avgDHit,
3665 unsigned int maxDHit,
3666 unsigned int totDMisses,
3667 float avgDMiss,
3668 unsigned int maxDMiss,
3669 unsigned int totDFlsh,
3670 float avgDFlsh,
3671 unsigned int maxDFlsh
3672 #endif
3673 )
3674 {
3675 #ifdef OBJC_INSTRUMENTED
3676 printf ("%5u %5u %5u %5.1f %4u %4.1f %4u %4.1f %4u %7u %4.1f %4u %7u %4.1f %4u %4u %4.1f %4u\n",
3677 #else
3678 printf ("%5u %5u %5u %5.1f %4u %4.1f %4u %4.1f %4u\n",
3679 #endif
3680 cacheSize, cacheCount, slotsUsed, avgUsed, maxUsed, avgSHit, maxSHit, avgSMiss, maxSMiss
3681 #ifdef OBJC_INSTRUMENTED
3682 , totDHits, avgDHit, maxDHit, totDMisses, avgDMiss, maxDMiss, totDFlsh, avgDFlsh, maxDFlsh
3683 #endif
3684 );
3685
3686 }
3687
3688 #ifdef OBJC_INSTRUMENTED
3689 /***********************************************************************
3690 * PrintCacheHistogram. Show the non-zero entries from the specified
3691 * cache histogram.
3692 **********************************************************************/
3693 static void PrintCacheHistogram (char * title,
3694 unsigned int * firstEntry,
3695 unsigned int entryCount)
3696 {
3697 unsigned int index;
3698 unsigned int * thisEntry;
3699
3700 printf ("%s\n", title);
3701 printf (" Probes Tally\n");
3702 printf (" ------ -----\n");
3703 for (index = 0, thisEntry = firstEntry;
3704 index < entryCount;
3705 index += 1, thisEntry += 1)
3706 {
3707 if (*thisEntry == 0)
3708 continue;
3709
3710 printf (" %6d %5d\n", index, *thisEntry);
3711 }
3712 }
3713 #endif
3714
3715 /***********************************************************************
3716 * _class_printMethodCacheStatistics.
3717 **********************************************************************/
3718
3719 #define MAX_LOG2_SIZE 32
3720 #define MAX_CHAIN_SIZE 100
3721
3722 void _class_printMethodCacheStatistics (void)
3723 {
3724 unsigned int isMeta;
3725 unsigned int index;
3726 NXHashTable * class_hash;
3727 NXHashState state;
3728 struct objc_class * cls;
3729 unsigned int totalChain;
3730 unsigned int totalMissChain;
3731 unsigned int maxChain;
3732 unsigned int maxMissChain;
3733 unsigned int classCount;
3734 unsigned int negativeEntryCount;
3735 unsigned int cacheExpandCount;
3736 unsigned int cacheCountBySize[2][MAX_LOG2_SIZE] = {{0}};
3737 unsigned int totalEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
3738 unsigned int maxEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
3739 unsigned int totalChainBySize[2][MAX_LOG2_SIZE] = {{0}};
3740 unsigned int totalMissChainBySize[2][MAX_LOG2_SIZE] = {{0}};
3741 unsigned int totalMaxChainBySize[2][MAX_LOG2_SIZE] = {{0}};
3742 unsigned int totalMaxMissChainBySize[2][MAX_LOG2_SIZE] = {{0}};
3743 unsigned int maxChainBySize[2][MAX_LOG2_SIZE] = {{0}};
3744 unsigned int maxMissChainBySize[2][MAX_LOG2_SIZE] = {{0}};
3745 unsigned int chainCount[MAX_CHAIN_SIZE] = {0};
3746 unsigned int missChainCount[MAX_CHAIN_SIZE] = {0};
3747 #ifdef OBJC_INSTRUMENTED
3748 unsigned int hitCountBySize[2][MAX_LOG2_SIZE] = {{0}};
3749 unsigned int hitProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
3750 unsigned int maxHitProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
3751 unsigned int missCountBySize[2][MAX_LOG2_SIZE] = {{0}};
3752 unsigned int missProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
3753 unsigned int maxMissProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
3754 unsigned int flushCountBySize[2][MAX_LOG2_SIZE] = {{0}};
3755 unsigned int flushedEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
3756 unsigned int maxFlushedEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
3757 #endif
3758
3759 printf ("Printing cache statistics\n");
3760
3761 // Outermost loop - iterate over all classes
3762 class_hash = objc_getClasses ();
3763 state = NXInitHashState (class_hash);
3764 classCount = 0;
3765 negativeEntryCount = 0;
3766 cacheExpandCount = 0;
3767 while (NXNextHashState (class_hash, &state, (void **) &cls))
3768 {
3769 // Tally classes
3770 classCount += 1;
3771
3772 // Control loop - do given class' cache, then its isa's cache
3773 for (isMeta = 0; isMeta <= 1; isMeta += 1)
3774 {
3775 Cache cache;
3776 unsigned int mask;
3777 unsigned int log2Size;
3778 unsigned int entryCount;
3779
3780 // Select cache of interest
3781 cache = isMeta ? cls->isa->cache : ((struct objc_class *)cls)->cache;
3782
3783 // Ignore empty cache... should we?
3784 if (cache == &emptyCache)
3785 continue;
3786
3787 // Middle loop - do each entry in the given cache
3788 mask = cache->mask;
3789 entryCount = 0;
3790 totalChain = 0;
3791 totalMissChain = 0;
3792 maxChain = 0;
3793 maxMissChain = 0;
3794 for (index = 0; index < mask + 1; index += 1)
3795 {
3796 Method * buckets;
3797 Method method;
3798 uarith_t hash;
3799 uarith_t methodChain;
3800 uarith_t methodMissChain;
3801 uarith_t index2;
3802
3803 // If entry is invalid, the only item of
3804 // interest is that future insert hashes
3805 // to this entry can use it directly.
3806 buckets = cache->buckets;
3807 if (!CACHE_BUCKET_VALID(buckets[index]))
3808 {
3809 missChainCount[0] += 1;
3810 continue;
3811 }
3812
3813 method = buckets[index];
3814
3815 // Tally valid entries
3816 entryCount += 1;
3817
3818 // Tally "forward::" entries
3819 if (CACHE_BUCKET_IMP(method) == &_objc_msgForward)
3820 negativeEntryCount += 1;
3821
3822 // Calculate search distance (chain length) for this method
3823 // The chain may wrap around to the beginning of the table.
3824 hash = CACHE_HASH(CACHE_BUCKET_NAME(method), mask);
3825 if (index >= hash) methodChain = index - hash;
3826 else methodChain = (mask+1) + index - hash;
3827
3828 // Tally chains of this length
3829 if (methodChain < MAX_CHAIN_SIZE)
3830 chainCount[methodChain] += 1;
3831
3832 // Keep sum of all chain lengths
3833 totalChain += methodChain;
3834
3835 // Record greatest chain length
3836 if (methodChain > maxChain)
3837 maxChain = methodChain;
3838
3839 // Calculate search distance for miss that hashes here
3840 index2 = index;
3841 while (CACHE_BUCKET_VALID(buckets[index2]))
3842 {
3843 index2 += 1;
3844 index2 &= mask;
3845 }
3846 methodMissChain = ((index2 - index) & mask);
3847
3848 // Tally miss chains of this length
3849 if (methodMissChain < MAX_CHAIN_SIZE)
3850 missChainCount[methodMissChain] += 1;
3851
3852 // Keep sum of all miss chain lengths in this class
3853 totalMissChain += methodMissChain;
3854
3855 // Record greatest miss chain length
3856 if (methodMissChain > maxMissChain)
3857 maxMissChain = methodMissChain;
3858 }
3859
3860 // Factor this cache into statistics about caches of the same
3861 // type and size (all caches are a power of two in size)
3862 log2Size = log2 (mask + 1);
3863 cacheCountBySize[isMeta][log2Size] += 1;
3864 totalEntriesBySize[isMeta][log2Size] += entryCount;
3865 if (entryCount > maxEntriesBySize[isMeta][log2Size])
3866 maxEntriesBySize[isMeta][log2Size] = entryCount;
3867 totalChainBySize[isMeta][log2Size] += totalChain;
3868 totalMissChainBySize[isMeta][log2Size] += totalMissChain;
3869 totalMaxChainBySize[isMeta][log2Size] += maxChain;
3870 totalMaxMissChainBySize[isMeta][log2Size] += maxMissChain;
3871 if (maxChain > maxChainBySize[isMeta][log2Size])
3872 maxChainBySize[isMeta][log2Size] = maxChain;
3873 if (maxMissChain > maxMissChainBySize[isMeta][log2Size])
3874 maxMissChainBySize[isMeta][log2Size] = maxMissChain;
3875 #ifdef OBJC_INSTRUMENTED
3876 {
3877 CacheInstrumentation * cacheData;
3878
3879 cacheData = CACHE_INSTRUMENTATION(cache);
3880 hitCountBySize[isMeta][log2Size] += cacheData->hitCount;
3881 hitProbesBySize[isMeta][log2Size] += cacheData->hitProbes;
3882 if (cacheData->maxHitProbes > maxHitProbesBySize[isMeta][log2Size])
3883 maxHitProbesBySize[isMeta][log2Size] = cacheData->maxHitProbes;
3884 missCountBySize[isMeta][log2Size] += cacheData->missCount;
3885 missProbesBySize[isMeta][log2Size] += cacheData->missProbes;
3886 if (cacheData->maxMissProbes > maxMissProbesBySize[isMeta][log2Size])
3887 maxMissProbesBySize[isMeta][log2Size] = cacheData->maxMissProbes;
3888 flushCountBySize[isMeta][log2Size] += cacheData->flushCount;
3889 flushedEntriesBySize[isMeta][log2Size] += cacheData->flushedEntries;
3890 if (cacheData->maxFlushedEntries > maxFlushedEntriesBySize[isMeta][log2Size])
3891 maxFlushedEntriesBySize[isMeta][log2Size] = cacheData->maxFlushedEntries;
3892 }
3893 #endif
3894 // Caches start with a power of two number of entries, and grow by doubling, so
3895 // we can calculate the number of times this cache has expanded
3896 if (isMeta)
3897 cacheExpandCount += log2Size - INIT_META_CACHE_SIZE_LOG2;
3898 else
3899 cacheExpandCount += log2Size - INIT_CACHE_SIZE_LOG2;
3900
3901 }
3902 }
3903
3904 {
3905 unsigned int cacheCountByType[2] = {0};
3906 unsigned int totalCacheCount = 0;
3907 unsigned int totalEntries = 0;
3908 unsigned int maxEntries = 0;
3909 unsigned int totalSlots = 0;
3910 #ifdef OBJC_INSTRUMENTED
3911 unsigned int totalHitCount = 0;
3912 unsigned int totalHitProbes = 0;
3913 unsigned int maxHitProbes = 0;
3914 unsigned int totalMissCount = 0;
3915 unsigned int totalMissProbes = 0;
3916 unsigned int maxMissProbes = 0;
3917 unsigned int totalFlushCount = 0;
3918 unsigned int totalFlushedEntries = 0;
3919 unsigned int maxFlushedEntries = 0;
3920 #endif
3921
3922 totalChain = 0;
3923 maxChain = 0;
3924 totalMissChain = 0;
3925 maxMissChain = 0;
3926
3927 // Sum information over all caches
3928 for (isMeta = 0; isMeta <= 1; isMeta += 1)
3929 {
3930 for (index = 0; index < MAX_LOG2_SIZE; index += 1)
3931 {
3932 cacheCountByType[isMeta] += cacheCountBySize[isMeta][index];
3933 totalEntries += totalEntriesBySize[isMeta][index];
3934 totalSlots += cacheCountBySize[isMeta][index] * (1 << index);
3935 totalChain += totalChainBySize[isMeta][index];
3936 if (maxEntriesBySize[isMeta][index] > maxEntries)
3937 maxEntries = maxEntriesBySize[isMeta][index];
3938 if (maxChainBySize[isMeta][index] > maxChain)
3939 maxChain = maxChainBySize[isMeta][index];
3940 totalMissChain += totalMissChainBySize[isMeta][index];
3941 if (maxMissChainBySize[isMeta][index] > maxMissChain)
3942 maxMissChain = maxMissChainBySize[isMeta][index];
3943 #ifdef OBJC_INSTRUMENTED
3944 totalHitCount += hitCountBySize[isMeta][index];
3945 totalHitProbes += hitProbesBySize[isMeta][index];
3946 if (maxHitProbesBySize[isMeta][index] > maxHitProbes)
3947 maxHitProbes = maxHitProbesBySize[isMeta][index];
3948 totalMissCount += missCountBySize[isMeta][index];
3949 totalMissProbes += missProbesBySize[isMeta][index];
3950 if (maxMissProbesBySize[isMeta][index] > maxMissProbes)
3951 maxMissProbes = maxMissProbesBySize[isMeta][index];
3952 totalFlushCount += flushCountBySize[isMeta][index];
3953 totalFlushedEntries += flushedEntriesBySize[isMeta][index];
3954 if (maxFlushedEntriesBySize[isMeta][index] > maxFlushedEntries)
3955 maxFlushedEntries = maxFlushedEntriesBySize[isMeta][index];
3956 #endif
3957 }
3958
3959 totalCacheCount += cacheCountByType[isMeta];
3960 }
3961
3962 // Log our findings
3963 printf ("There are %u classes\n", classCount);
3964
3965 for (isMeta = 0; isMeta <= 1; isMeta += 1)
3966 {
3967 // Number of this type of class
3968 printf ("\nThere are %u %s-method caches, broken down by size (slot count):\n",
3969 cacheCountByType[isMeta],
3970 isMeta ? "class" : "instance");
3971
3972 // Print header
3973 PrintCacheHeader ();
3974
3975 // Keep format consistent even if there are caches of this kind
3976 if (cacheCountByType[isMeta] == 0)
3977 {
3978 printf ("(none)\n");
3979 continue;
3980 }
3981
3982 // Usage information by cache size
3983 for (index = 0; index < MAX_LOG2_SIZE; index += 1)
3984 {
3985 unsigned int cacheCount;
3986 unsigned int cacheSlotCount;
3987 unsigned int cacheEntryCount;
3988
3989 // Get number of caches of this type and size
3990 cacheCount = cacheCountBySize[isMeta][index];
3991 if (cacheCount == 0)
3992 continue;
3993
3994 // Get the cache slot count and the total number of valid entries
3995 cacheSlotCount = (1 << index);
3996 cacheEntryCount = totalEntriesBySize[isMeta][index];
3997
3998 // Give the analysis
3999 PrintCacheInfo (cacheSlotCount,
4000 cacheCount,
4001 cacheEntryCount,
4002 (float) cacheEntryCount / (float) cacheCount,
4003 maxEntriesBySize[isMeta][index],
4004 (float) totalChainBySize[isMeta][index] / (float) cacheEntryCount,
4005 maxChainBySize[isMeta][index],
4006 (float) totalMissChainBySize[isMeta][index] / (float) (cacheCount * cacheSlotCount),
4007 maxMissChainBySize[isMeta][index]
4008 #ifdef OBJC_INSTRUMENTED
4009 , hitCountBySize[isMeta][index],
4010 hitCountBySize[isMeta][index] ?
4011 (float) hitProbesBySize[isMeta][index] / (float) hitCountBySize[isMeta][index] : 0.0,
4012 maxHitProbesBySize[isMeta][index],
4013 missCountBySize[isMeta][index],
4014 missCountBySize[isMeta][index] ?
4015 (float) missProbesBySize[isMeta][index] / (float) missCountBySize[isMeta][index] : 0.0,
4016 maxMissProbesBySize[isMeta][index],
4017 flushCountBySize[isMeta][index],
4018 flushCountBySize[isMeta][index] ?
4019 (float) flushedEntriesBySize[isMeta][index] / (float) flushCountBySize[isMeta][index] : 0.0,
4020 maxFlushedEntriesBySize[isMeta][index]
4021 #endif
4022 );
4023 }
4024 }
4025
4026 // Give overall numbers
4027 printf ("\nCumulative:\n");
4028 PrintCacheHeader ();
4029 PrintCacheInfo (totalSlots,
4030 totalCacheCount,
4031 totalEntries,
4032 (float) totalEntries / (float) totalCacheCount,
4033 maxEntries,
4034 (float) totalChain / (float) totalEntries,
4035 maxChain,
4036 (float) totalMissChain / (float) totalSlots,
4037 maxMissChain
4038 #ifdef OBJC_INSTRUMENTED
4039 , totalHitCount,
4040 totalHitCount ?
4041 (float) totalHitProbes / (float) totalHitCount : 0.0,
4042 maxHitProbes,
4043 totalMissCount,
4044 totalMissCount ?
4045 (float) totalMissProbes / (float) totalMissCount : 0.0,
4046 maxMissProbes,
4047 totalFlushCount,
4048 totalFlushCount ?
4049 (float) totalFlushedEntries / (float) totalFlushCount : 0.0,
4050 maxFlushedEntries
4051 #endif
4052 );
4053
4054 printf ("\nNumber of \"forward::\" entries: %d\n", negativeEntryCount);
4055 printf ("Number of cache expansions: %d\n", cacheExpandCount);
4056 #ifdef OBJC_INSTRUMENTED
4057 printf ("flush_caches: total calls total visits average visits max visits total classes visits/class\n");
4058 printf (" ----------- ------------ -------------- ---------- ------------- -------------\n");
4059 printf (" linear %11u %12u %14.1f %10u %13u %12.2f\n",
4060 LinearFlushCachesCount,
4061 LinearFlushCachesVisitedCount,
4062 LinearFlushCachesCount ?
4063 (float) LinearFlushCachesVisitedCount / (float) LinearFlushCachesCount : 0.0,
4064 MaxLinearFlushCachesVisitedCount,
4065 LinearFlushCachesVisitedCount,
4066 1.0);
4067 printf (" nonlinear %11u %12u %14.1f %10u %13u %12.2f\n",
4068 NonlinearFlushCachesCount,
4069 NonlinearFlushCachesVisitedCount,
4070 NonlinearFlushCachesCount ?
4071 (float) NonlinearFlushCachesVisitedCount / (float) NonlinearFlushCachesCount : 0.0,
4072 MaxNonlinearFlushCachesVisitedCount,
4073 NonlinearFlushCachesClassCount,
4074 NonlinearFlushCachesClassCount ?
4075 (float) NonlinearFlushCachesVisitedCount / (float) NonlinearFlushCachesClassCount : 0.0);
4076 printf (" ideal %11u %12u %14.1f %10u %13u %12.2f\n",
4077 LinearFlushCachesCount + NonlinearFlushCachesCount,
4078 IdealFlushCachesCount,
4079 LinearFlushCachesCount + NonlinearFlushCachesCount ?
4080 (float) IdealFlushCachesCount / (float) (LinearFlushCachesCount + NonlinearFlushCachesCount) : 0.0,
4081 MaxIdealFlushCachesCount,
4082 LinearFlushCachesVisitedCount + NonlinearFlushCachesClassCount,
4083 LinearFlushCachesVisitedCount + NonlinearFlushCachesClassCount ?
4084 (float) IdealFlushCachesCount / (float) (LinearFlushCachesVisitedCount + NonlinearFlushCachesClassCount) : 0.0);
4085
4086 PrintCacheHistogram ("\nCache hit histogram:", &CacheHitHistogram[0], CACHE_HISTOGRAM_SIZE);
4087 PrintCacheHistogram ("\nCache miss histogram:", &CacheMissHistogram[0], CACHE_HISTOGRAM_SIZE);
4088 #endif
4089
4090 #if 0
4091 printf ("\nLookup chains:");
4092 for (index = 0; index < MAX_CHAIN_SIZE; index += 1)
4093 {
4094 if (chainCount[index] != 0)
4095 printf (" %u:%u", index, chainCount[index]);
4096 }
4097
4098 printf ("\nMiss chains:");
4099 for (index = 0; index < MAX_CHAIN_SIZE; index += 1)
4100 {
4101 if (missChainCount[index] != 0)
4102 printf (" %u:%u", index, missChainCount[index]);
4103 }
4104
4105 printf ("\nTotal memory usage for cache data structures: %lu bytes\n",
4106 totalCacheCount * (sizeof(struct objc_cache) - sizeof(Method)) +
4107 totalSlots * sizeof(Method) +
4108 negativeEntryCount * sizeof(struct objc_method));
4109 #endif
4110 }
4111 }
4112
4113 /***********************************************************************
4114 * checkUniqueness.
4115 **********************************************************************/
4116 void checkUniqueness (SEL s1,
4117 SEL s2)
4118 {
4119 if (s1 == s2)
4120 return;
4121
4122 if (s1 && s2 && (strcmp ((const char *) s1, (const char *) s2) == 0))
4123 _objc_inform ("%p != %p but !strcmp (%s, %s)\n", s1, s2, (char *) s1, (char *) s2);
4124 }
4125