]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-auto.m
objc4-371.1.tar.gz
[apple/objc4.git] / runtime / objc-auto.m
1 /*
2 * Copyright (c) 2004-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #import <stdint.h>
25 #import <stdbool.h>
26 #import <fcntl.h>
27 #import <mach/mach.h>
28 #import <mach-o/dyld.h>
29 #import <sys/types.h>
30 #import <sys/mman.h>
31 #import <libkern/OSAtomic.h>
32
33 #define OLD 1
34 #import "objc-private.h"
35 #import "auto_zone.h"
36 #import "objc-auto.h"
37 #import "objc-rtp.h"
38 #import "maptable.h"
39
40
41 static auto_zone_t *gc_zone_init(void);
42
43
44 __private_extern__ BOOL UseGC NOBSS = NO;
45 static BOOL RecordAllocations = NO;
46 static BOOL MultiThreadedGC = NO;
47 static BOOL WantsMainThreadFinalization = NO;
48 static BOOL NeedsMainThreadFinalization = NO;
49
50 static struct {
51 auto_zone_foreach_object_t foreach;
52 auto_zone_cursor_t cursor;
53 size_t cursor_size;
54 volatile BOOL finished;
55 volatile BOOL started;
56 pthread_mutex_t mutex;
57 pthread_cond_t condition;
58 } BatchFinalizeBlock;
59
60
61 __private_extern__ auto_zone_t *gc_zone = NULL;
62
63 // Pointer magic to make dyld happy. See notes in objc-private.h
64 __private_extern__ id (*objc_assign_ivar_internal)(id, id, ptrdiff_t) = objc_assign_ivar;
65
66
67 /***********************************************************************
68 * Utility exports
69 * Called by various libraries.
70 **********************************************************************/
71
72 OBJC_EXPORT void objc_set_collection_threshold(size_t threshold) { // Old naming
73 if (UseGC) {
74 auto_collection_parameters(gc_zone)->collection_threshold = threshold;
75 }
76 }
77
78 OBJC_EXPORT void objc_setCollectionThreshold(size_t threshold) {
79 if (UseGC) {
80 auto_collection_parameters(gc_zone)->collection_threshold = threshold;
81 }
82 }
83
84 void objc_setCollectionRatio(size_t ratio) {
85 if (UseGC) {
86 auto_collection_parameters(gc_zone)->full_vs_gen_frequency = ratio;
87 }
88 }
89
90 void objc_set_collection_ratio(size_t ratio) { // old naming
91 if (UseGC) {
92 auto_collection_parameters(gc_zone)->full_vs_gen_frequency = ratio;
93 }
94 }
95
96 void objc_finalizeOnMainThread(Class cls) {
97 if (UseGC) {
98 WantsMainThreadFinalization = YES;
99 _class_setFinalizeOnMainThread(cls);
100 }
101 }
102
103
104 void objc_startCollectorThread(void) {
105 static int didOnce = 0;
106 if (!didOnce) {
107 didOnce = 1;
108
109 // pretend we're done to start out with.
110 BatchFinalizeBlock.started = YES;
111 BatchFinalizeBlock.finished = YES;
112 pthread_mutex_init(&BatchFinalizeBlock.mutex, NULL);
113 pthread_cond_init(&BatchFinalizeBlock.condition, NULL);
114 auto_collect_multithreaded(gc_zone);
115 MultiThreadedGC = YES;
116 }
117 }
118
119 void objc_start_collector_thread(void) {
120 objc_startCollectorThread();
121 }
122
123 static void batchFinalizeOnMainThread(void);
124
125 void objc_collect(unsigned long options) {
126 if (!UseGC) return;
127 BOOL onMainThread = pthread_main_np() ? YES : NO;
128
129 if (MultiThreadedGC || onMainThread) {
130 if (MultiThreadedGC && onMainThread) batchFinalizeOnMainThread();
131 auto_collection_mode_t amode = AUTO_COLLECT_RATIO_COLLECTION;
132 switch (options & 0x3) {
133 case OBJC_RATIO_COLLECTION: amode = AUTO_COLLECT_RATIO_COLLECTION; break;
134 case OBJC_GENERATIONAL_COLLECTION: amode = AUTO_COLLECT_GENERATIONAL_COLLECTION; break;
135 case OBJC_FULL_COLLECTION: amode = AUTO_COLLECT_FULL_COLLECTION; break;
136 case OBJC_EXHAUSTIVE_COLLECTION: amode = AUTO_COLLECT_EXHAUSTIVE_COLLECTION; break;
137 }
138 if (options & OBJC_COLLECT_IF_NEEDED) amode |= AUTO_COLLECT_IF_NEEDED;
139 if (options & OBJC_WAIT_UNTIL_DONE) amode |= AUTO_COLLECT_SYNCHRONOUS; // uses different bits
140 auto_collect(gc_zone, amode, NULL);
141 }
142 else {
143 objc_msgSend(objc_getClass("NSGarbageCollector"), @selector(_callOnMainThread:withArgs:), objc_collect, (void *)options);
144 }
145 }
146
147 // SPI
148 // 0 - exhaustively NSGarbageCollector.m
149 // - from AppKit /Developer/Applications/Xcode.app/Contents/MacOS/Xcode via idleTimer
150 // GENERATIONAL
151 // - from autoreleasepool
152 // - several other places
153 void objc_collect_if_needed(unsigned long options) {
154 if (!UseGC) return;
155 BOOL onMainThread = pthread_main_np() ? YES : NO;
156
157 if (MultiThreadedGC || onMainThread) {
158 auto_collection_mode_t mode;
159 if (options & OBJC_GENERATIONAL) {
160 mode = AUTO_COLLECT_IF_NEEDED | AUTO_COLLECT_RATIO_COLLECTION;
161 }
162 else {
163 mode = AUTO_COLLECT_EXHAUSTIVE_COLLECTION;
164 }
165 if (MultiThreadedGC && onMainThread) batchFinalizeOnMainThread();
166 auto_collect(gc_zone, mode, NULL);
167 }
168 else { // XXX could be optimized (e.g. ask auto for threshold check, if so, set ASKING if not already ASKING,...
169 objc_msgSend(objc_getClass("NSGarbageCollector"), @selector(_callOnMainThread:withArgs:), objc_collect_if_needed, (void *)options);
170 }
171 }
172
173 // NEVER USED.
174 size_t objc_numberAllocated(void)
175 {
176 auto_statistics_t stats;
177 stats.version = 0;
178 auto_zone_statistics(gc_zone, &stats);
179 return stats.malloc_statistics.blocks_in_use;
180 }
181
182 // USED BY CF & ONE OTHER
183 BOOL objc_isAuto(id object)
184 {
185 return UseGC && auto_zone_is_valid_pointer(gc_zone, object) != 0;
186 }
187
188
189 BOOL objc_collectingEnabled(void)
190 {
191 return UseGC;
192 }
193 BOOL objc_collecting_enabled(void) // Old naming
194 {
195 return UseGC;
196 }
197
198
199 /***********************************************************************
200 * Memory management.
201 * Called by CF and Foundation.
202 **********************************************************************/
203
204 // Allocate an object in the GC zone, with the given number of extra bytes.
205 id objc_allocate_object(Class cls, int extra)
206 {
207 return class_createInstance(cls, extra);
208 }
209
210
211 /***********************************************************************
212 * Write barrier implementations, optimized for when GC is known to be on
213 * Called by the write barrier exports only.
214 * These implementations assume GC is on. The exported function must
215 * either perform the check itself or be conditionally stomped at
216 * startup time.
217 **********************************************************************/
218
219 __private_extern__ id objc_assign_strongCast_gc(id value, id *slot)
220 {
221 if (!auto_zone_set_write_barrier(gc_zone, (void*)slot, value)) { // stores & returns true if slot points into GC allocated memory
222 auto_zone_root_write_barrier(gc_zone, slot, value); // always stores
223 }
224 return value;
225 }
226
227 __private_extern__ id objc_assign_global_gc(id value, id *slot) {
228 // use explicit root registration.
229 if (value && auto_zone_is_valid_pointer(gc_zone, value)) {
230 if (auto_zone_is_finalized(gc_zone, value)) {
231 __private_extern__ void objc_assign_global_error(id value, id *slot);
232
233 _objc_inform("GC: storing an already collected object %p into global memory at %p, break on objc_assign_global_error to debug\n", value, slot);
234 objc_assign_global_error(value, slot);
235 }
236 auto_zone_add_root(gc_zone, slot, value);
237 }
238 else
239 *slot = value;
240
241 return value;
242 }
243
244
245 __private_extern__ id objc_assign_ivar_gc(id value, id base, ptrdiff_t offset)
246 {
247 id *slot = (id*) ((char *)base + offset);
248
249 if (value) {
250 if (!auto_zone_set_write_barrier(gc_zone, (char *)base + offset, value)) {
251 __private_extern__ void objc_assign_ivar_error(id base, ptrdiff_t offset);
252
253 _objc_inform("GC: %p + %d isn't in the auto_zone, break on objc_assign_ivar_error to debug.\n", base, offset);
254 objc_assign_ivar_error(base, offset);
255 }
256 }
257 else
258 *slot = value;
259
260 return value;
261 }
262
263
264 /***********************************************************************
265 * Write barrier exports
266 * Called by pretty much all GC-supporting code.
267 *
268 * These "generic" implementations, available in PPC, are thought to be
269 * called by Rosetta when it translates the bla instruction.
270 **********************************************************************/
271
272 // Platform-independent write barriers
273 // These contain the UseGC check that the platform-specific
274 // runtime-rewritten implementations do not.
275
276 id objc_assign_strongCast_generic(id value, id *dest)
277 {
278 if (UseGC) {
279 return objc_assign_strongCast_gc(value, dest);
280 } else {
281 return (*dest = value);
282 }
283 }
284
285
286 id objc_assign_global_generic(id value, id *dest)
287 {
288 if (UseGC) {
289 return objc_assign_global_gc(value, dest);
290 } else {
291 return (*dest = value);
292 }
293 }
294
295
296 id objc_assign_ivar_generic(id value, id dest, ptrdiff_t offset)
297 {
298 if (UseGC) {
299 return objc_assign_ivar_gc(value, dest, offset);
300 } else {
301 id *slot = (id*) ((char *)dest + offset);
302 return (*slot = value);
303 }
304 }
305
306 #if defined(__ppc__) || defined(__i386__) || defined(__x86_64__)
307
308 // PPC write barriers are in objc-auto-ppc.s
309 // write_barrier_init conditionally stomps those to jump to the _impl versions.
310
311 // These 3 functions are defined in objc-auto-i386.s and objc-auto-x86_64.s as
312 // the non-GC variants. Under GC, rtp_init stomps them with jumps to
313 // objc_assign_*_gc.
314
315 #else
316
317 // use generic implementation until time can be spent on optimizations
318 id objc_assign_strongCast(id value, id *dest) { return objc_assign_strongCast_generic(value, dest); }
319 id objc_assign_global(id value, id *dest) { return objc_assign_global_generic(value, dest); }
320 id objc_assign_ivar(id value, id dest, ptrdiff_t offset) { return objc_assign_ivar_generic(value, dest, offset); }
321
322 // not (defined(__ppc__)) && not defined(__i386__) && not defined(__x86_64__)
323 #endif
324
325
326 void *objc_memmove_collectable(void *dst, const void *src, size_t size)
327 {
328 if (UseGC) {
329 return auto_zone_write_barrier_memmove(gc_zone, dst, src, size);
330 } else {
331 return memmove(dst, src, size);
332 }
333 }
334
335 BOOL objc_atomicCompareAndSwapGlobal(id predicate, id replacement, volatile id *objectLocation) {
336 if (UseGC)
337 return auto_zone_atomicCompareAndSwap(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, YES, NO);
338 else
339 return OSAtomicCompareAndSwapPtr((void *)predicate, (void *)replacement, (void * volatile *)objectLocation);
340 }
341
342 BOOL objc_atomicCompareAndSwapGlobalBarrier(id predicate, id replacement, volatile id *objectLocation) {
343 if (UseGC)
344 return auto_zone_atomicCompareAndSwap(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, YES, YES);
345 else
346 return OSAtomicCompareAndSwapPtrBarrier((void *)predicate, (void *)replacement, (void * volatile *)objectLocation);
347 }
348
349 BOOL objc_atomicCompareAndSwapInstanceVariable(id predicate, id replacement, volatile id *objectLocation) {
350 if (UseGC)
351 return auto_zone_atomicCompareAndSwap(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, NO, NO);
352 else
353 return OSAtomicCompareAndSwapPtr((void *)predicate, (void *)replacement, (void * volatile *)objectLocation);
354 }
355
356 BOOL objc_atomicCompareAndSwapInstanceVariableBarrier(id predicate, id replacement, volatile id *objectLocation) {
357 if (UseGC)
358 return auto_zone_atomicCompareAndSwap(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, NO, YES);
359 else
360 return OSAtomicCompareAndSwapPtrBarrier((void *)predicate, (void *)replacement, (void * volatile *)objectLocation);
361 }
362
363
364 /***********************************************************************
365 * CF-only write barrier exports
366 * Called by CF only.
367 **********************************************************************/
368
369 // Exported as very private SPI to CF
370 void* objc_assign_ivar_address_CF(void *value, void *base, void **slot)
371 {
372 // CF has already checked that *slot is a gc block so this should never fail
373 if (!auto_zone_set_write_barrier(gc_zone, slot, value))
374 *slot = value;
375 return value;
376 }
377
378
379 // exported as very private SPI to CF
380 void* objc_assign_strongCast_CF(void* value, void **slot)
381 {
382 // CF has already checked that *slot is a gc block so this should never fail
383 if (!auto_zone_set_write_barrier(gc_zone, slot, value))
384 *slot = value;
385 return value;
386 }
387
388
389 /***********************************************************************
390 * Weak ivar support
391 **********************************************************************/
392
393 id objc_read_weak(id *location) {
394 id result = *location;
395 if (UseGC && result) {
396 result = auto_read_weak_reference(gc_zone, (void **)location);
397 }
398 return result;
399 }
400
401 id objc_assign_weak(id value, id *location) {
402 if (UseGC) {
403 auto_assign_weak_reference(gc_zone, value, (void **)location, NULL);
404 }
405 else {
406 *location = value;
407 }
408 return value;
409 }
410
411
412 /***********************************************************************
413 * Testing tools
414 * Used to isolate resurrection of garbage objects during finalization.
415 **********************************************************************/
416 BOOL objc_is_finalized(void *ptr) {
417 if (ptr != NULL && UseGC) {
418 return auto_zone_is_finalized(gc_zone, ptr);
419 }
420 return NO;
421 }
422
423
424 /***********************************************************************
425 * Stack management
426 * Used to tell clean up dirty stack frames before a thread blocks. To
427 * make this more efficient, we really need better support from pthreads.
428 * See <rdar://problem/4548631> for more details.
429 **********************************************************************/
430
431 static vm_address_t _stack_resident_base() {
432 pthread_t self = pthread_self();
433 size_t stack_size = pthread_get_stacksize_np(self);
434 vm_address_t stack_base = (vm_address_t)pthread_get_stackaddr_np(self) - stack_size;
435 size_t stack_page_count = stack_size / vm_page_size;
436 char stack_residency[stack_page_count];
437 vm_address_t stack_resident_base = 0;
438 if (mincore((void*)stack_base, stack_size, stack_residency) == 0) {
439 // we can now tell the degree to which the stack is resident, and use it as our ultimate high water mark.
440 size_t i;
441 for (i = 0; i < stack_page_count; ++i) {
442 if (stack_residency[i]) {
443 stack_resident_base = stack_base + i * vm_page_size;
444 // malloc_printf("last touched page = %lu\n", stack_page_count - i - 1);
445 break;
446 }
447 }
448 }
449 return stack_resident_base;
450 }
451
452 static __attribute__((noinline)) void* _get_stack_pointer() {
453 #if defined(__i386__) || defined(__ppc__) || defined(__ppc64__) || defined(__x86_64__)
454 return __builtin_frame_address(0);
455 #else
456 return NULL;
457 #endif
458 }
459
460 void objc_clear_stack(unsigned long options) {
461 if (!UseGC) return;
462 if (options & OBJC_CLEAR_RESIDENT_STACK) {
463 // clear just the pages of stack that are currently resident.
464 vm_address_t stack_resident_base = _stack_resident_base();
465 vm_address_t stack_top = (vm_address_t)_get_stack_pointer() - 2 * sizeof(void*);
466 bzero((void*)stack_resident_base, (stack_top - stack_resident_base));
467 } else {
468 // clear the entire unused stack, regardless of whether it's pages are resident or not.
469 pthread_t self = pthread_self();
470 size_t stack_size = pthread_get_stacksize_np(self);
471 vm_address_t stack_base = (vm_address_t)pthread_get_stackaddr_np(self) - stack_size;
472 vm_address_t stack_top = (vm_address_t)_get_stack_pointer() - 2 * sizeof(void*);
473 bzero((void*)stack_base, stack_top - stack_base);
474 }
475 }
476
477 /***********************************************************************
478 * Finalization support
479 **********************************************************************/
480
481 static IMP _NSObject_finalize = NULL;
482
483 // Finalizer crash debugging
484 static void *finalizing_object;
485 static const char *__crashreporter_info__;
486
487 static void finalizeOneObject(void *obj, void *sel) {
488 id object = (id)obj;
489 SEL selector = (SEL)sel;
490 finalizing_object = obj;
491 __crashreporter_info__ = object_getClassName(obj);
492
493 /// call -finalize method.
494 objc_msgSend(object, selector);
495 // Call C++ destructors, if any.
496 object_cxxDestruct(object);
497
498 finalizing_object = NULL;
499 __crashreporter_info__ = NULL;
500 }
501
502 static void finalizeOneMainThreadOnlyObject(void *obj, void *sel) {
503 id object = (id)obj;
504 Class cls = object->isa;
505 if (cls == NULL) {
506 _objc_fatal("object with NULL ISA passed to finalizeOneMainThreadOnlyObject: %p\n", obj);
507 }
508 if (_class_shouldFinalizeOnMainThread(cls)) {
509 finalizeOneObject(obj, sel);
510 }
511 }
512
513 static void finalizeOneAnywhereObject(void *obj, void *sel) {
514 id object = (id)obj;
515 Class cls = object->isa;
516 if (cls == NULL) {
517 _objc_fatal("object with NULL ISA passed to finalizeOneAnywhereObject: %p\n", obj);
518 }
519 if (!_class_shouldFinalizeOnMainThread(cls)) {
520 finalizeOneObject(obj, sel);
521 }
522 else {
523 NeedsMainThreadFinalization = YES;
524 }
525 }
526
527
528
529 static void batchFinalize(auto_zone_t *zone,
530 auto_zone_foreach_object_t foreach,
531 auto_zone_cursor_t cursor,
532 size_t cursor_size,
533 void (*finalize)(void *, void*))
534 {
535 for (;;) {
536 @try {
537 foreach(cursor, finalize, @selector(finalize));
538 // non-exceptional return means finalization is complete.
539 break;
540 } @catch (id exception) {
541 // whoops, note exception, then restart at cursor's position
542 __private_extern__ void objc_exception_during_finalize_error(void);
543 _objc_inform("GC: -finalize resulted in an exception (%p) being thrown, break on objc_exception_during_finalize_error to debug\n\t%s", exception, (const char*)[[exception description] UTF8String]);
544 objc_exception_during_finalize_error();
545 }
546 }
547 }
548
549
550 static void batchFinalizeOnMainThread(void) {
551 pthread_mutex_lock(&BatchFinalizeBlock.mutex);
552 if (BatchFinalizeBlock.started) {
553 // main thread got here already
554 pthread_mutex_unlock(&BatchFinalizeBlock.mutex);
555 return;
556 }
557 BatchFinalizeBlock.started = YES;
558 pthread_mutex_unlock(&BatchFinalizeBlock.mutex);
559
560 batchFinalize(gc_zone, BatchFinalizeBlock.foreach, BatchFinalizeBlock.cursor, BatchFinalizeBlock.cursor_size, finalizeOneMainThreadOnlyObject);
561 // signal the collector thread that finalization has finished.
562 pthread_mutex_lock(&BatchFinalizeBlock.mutex);
563 BatchFinalizeBlock.finished = YES;
564 pthread_cond_signal(&BatchFinalizeBlock.condition);
565 pthread_mutex_unlock(&BatchFinalizeBlock.mutex);
566 }
567
568 static void batchFinalizeOnTwoThreads(auto_zone_t *zone,
569 auto_zone_foreach_object_t foreach,
570 auto_zone_cursor_t cursor,
571 size_t cursor_size)
572 {
573 // First, lets get rid of everything we can on this thread, then ask main thread to help if needed
574 NeedsMainThreadFinalization = NO;
575 char cursor_copy[cursor_size];
576 memcpy(cursor_copy, cursor, cursor_size);
577 batchFinalize(zone, foreach, cursor_copy, cursor_size, finalizeOneAnywhereObject);
578
579 if (! NeedsMainThreadFinalization)
580 return; // no help needed
581
582 // set up the control block. Either our ping of main thread with _callOnMainThread will get to it, or
583 // an objc_collect_if_needed() will get to it. Either way, this block will be processed on the main thread.
584 pthread_mutex_lock(&BatchFinalizeBlock.mutex);
585 BatchFinalizeBlock.foreach = foreach;
586 BatchFinalizeBlock.cursor = cursor;
587 BatchFinalizeBlock.cursor_size = cursor_size;
588 BatchFinalizeBlock.started = NO;
589 BatchFinalizeBlock.finished = NO;
590 pthread_mutex_unlock(&BatchFinalizeBlock.mutex);
591
592 //printf("----->asking main thread to finalize\n");
593 objc_msgSend(objc_getClass("NSGarbageCollector"), @selector(_callOnMainThread:withArgs:), batchFinalizeOnMainThread, &BatchFinalizeBlock);
594
595 // wait for the main thread to finish finalizing instances of classes marked CLS_FINALIZE_ON_MAIN_THREAD.
596 pthread_mutex_lock(&BatchFinalizeBlock.mutex);
597 while (!BatchFinalizeBlock.finished) pthread_cond_wait(&BatchFinalizeBlock.condition, &BatchFinalizeBlock.mutex);
598 pthread_mutex_unlock(&BatchFinalizeBlock.mutex);
599 //printf("<------ main thread finalize done\n");
600
601 }
602
603
604 static void objc_will_grow(auto_zone_t *zone, auto_heap_growth_info_t info) {
605 if (MultiThreadedGC) {
606 //printf("objc_will_grow %d\n", info);
607
608 if (auto_zone_is_collecting(gc_zone)) {
609 ;
610 }
611 else {
612 auto_collect(gc_zone, AUTO_COLLECT_RATIO_COLLECTION, NULL);
613 }
614 }
615 }
616
617
618 // collector calls this with garbage ready
619 static void BatchInvalidate(auto_zone_t *zone,
620 auto_zone_foreach_object_t foreach,
621 auto_zone_cursor_t cursor,
622 size_t cursor_size)
623 {
624 if (pthread_main_np() || !WantsMainThreadFinalization) {
625 // Collect all objects. We're either pre-multithreaded on main thread or we're on the collector thread
626 // but no main-thread-only objects have been allocated.
627 batchFinalize(zone, foreach, cursor, cursor_size, finalizeOneObject);
628 }
629 else {
630 // We're on the dedicated thread. Collect some on main thread, the rest here.
631 batchFinalizeOnTwoThreads(zone, foreach, cursor, cursor_size);
632 }
633
634 }
635
636 // idea: keep a side table mapping resurrected object pointers to their original Class, so we don't
637 // need to smash anything. alternatively, could use associative references to track against a secondary
638 // object with information about the resurrection, such as a stack crawl, etc.
639
640 static Class _NSResurrectedObjectClass;
641 static NXMapTable *_NSResurrectedObjectMap = NULL;
642 static OBJC_DECLARE_LOCK(_NSResurrectedObjectLock);
643
644 static Class resurrectedObjectOriginalClass(id object) {
645 Class originalClass;
646 OBJC_LOCK(&_NSResurrectedObjectLock);
647 originalClass = (Class) NXMapGet(_NSResurrectedObjectMap, object);
648 OBJC_UNLOCK(&_NSResurrectedObjectLock);
649 return originalClass;
650 }
651
652 static id _NSResurrectedObject_classMethod(id self, SEL selector) { return self; }
653
654 static id _NSResurrectedObject_instanceMethod(id self, SEL name) {
655 _objc_inform("**resurrected** object %p of class %s being sent message '%s'\n", self, class_getName(resurrectedObjectOriginalClass(self)), sel_getName(name));
656 return self;
657 }
658
659 static void _NSResurrectedObject_finalize(id self, SEL _cmd) {
660 Class originalClass;
661 OBJC_LOCK(&_NSResurrectedObjectLock);
662 originalClass = (Class) NXMapRemove(_NSResurrectedObjectMap, self);
663 OBJC_UNLOCK(&_NSResurrectedObjectLock);
664 if (originalClass) _objc_inform("**resurrected** object %p of class %s being finalized\n", self, class_getName(originalClass));
665 _NSObject_finalize(self, _cmd);
666 }
667
668 static BOOL _NSResurrectedObject_resolveInstanceMethod(id self, SEL _cmd, SEL name) {
669 class_addMethod((Class)self, name, (IMP)_NSResurrectedObject_instanceMethod, "@@:");
670 return YES;
671 }
672
673 static BOOL _NSResurrectedObject_resolveClassMethod(id self, SEL _cmd, SEL name) {
674 class_addMethod(object_getClass(self), name, (IMP)_NSResurrectedObject_classMethod, "@@:");
675 return YES;
676 }
677
678 static void _NSResurrectedObject_initialize() {
679 _NSResurrectedObjectMap = NXCreateMapTable(NXPtrValueMapPrototype, 128);
680 _NSResurrectedObjectClass = objc_allocateClassPair(objc_getClass("NSObject"), "_NSResurrectedObject", 0);
681 class_addMethod(_NSResurrectedObjectClass, @selector(finalize), (IMP)_NSResurrectedObject_finalize, "v@:");
682 Class metaClass = object_getClass(_NSResurrectedObjectClass);
683 class_addMethod(metaClass, @selector(resolveInstanceMethod:), (IMP)_NSResurrectedObject_resolveInstanceMethod, "c@::");
684 class_addMethod(metaClass, @selector(resolveClassMethod:), (IMP)_NSResurrectedObject_resolveClassMethod, "c@::");
685 objc_registerClassPair(_NSResurrectedObjectClass);
686 }
687
688 static void resurrectZombie(auto_zone_t *zone, void *ptr) {
689 id object = (id) ptr;
690 Class cls = object->isa;
691 if (cls != _NSResurrectedObjectClass) {
692 // remember the original class for this instance.
693 OBJC_LOCK(&_NSResurrectedObjectLock);
694 NXMapInsert(_NSResurrectedObjectMap, ptr, cls);
695 OBJC_UNLOCK(&_NSResurrectedObjectLock);
696 object->isa = _NSResurrectedObjectClass;
697 }
698 }
699
700 /***********************************************************************
701 * Pretty printing support
702 * For development purposes.
703 **********************************************************************/
704
705
706 static char *name_for_address(auto_zone_t *zone, vm_address_t base, vm_address_t offset, int withRetainCount);
707
708 static char* objc_name_for_address(auto_zone_t *zone, vm_address_t base, vm_address_t offset)
709 {
710 return name_for_address(zone, base, offset, false);
711 }
712
713 /***********************************************************************
714 * Collection support
715 **********************************************************************/
716
717 static const unsigned char *objc_layout_for_address(auto_zone_t *zone, void *address)
718 {
719 Class cls = *(Class *)address;
720 return (const unsigned char *)class_getIvarLayout(cls);
721 }
722
723 static const unsigned char *objc_weak_layout_for_address(auto_zone_t *zone, void *address)
724 {
725 Class cls = *(Class *)address;
726 return (const unsigned char *)class_getWeakIvarLayout(cls);
727 }
728
729 /***********************************************************************
730 * Initialization
731 **********************************************************************/
732
733 // Always called by _objcInit, even if GC is off.
734 __private_extern__ void gc_init(BOOL on)
735 {
736 UseGC = on;
737
738 if (PrintGC) {
739 _objc_inform("GC: is %s", on ? "ON" : "OFF");
740 }
741
742 if (UseGC) {
743 // Add GC state to crash log reports
744 _objc_inform_on_crash("garbage collection is ON");
745
746 // Set up the GC zone
747 gc_zone = gc_zone_init();
748
749 // no NSObject until Foundation calls objc_collect_init()
750 _NSObject_finalize = &_objc_msgForward;
751
752 } else {
753 auto_zone_start_monitor(false);
754 auto_zone_set_class_list((int (*)(void **, int))objc_getClassList);
755 }
756 }
757
758
759 static auto_zone_t *gc_zone_init(void)
760 {
761 auto_zone_t *result;
762
763 // result = auto_zone_create("objc auto collected zone");
764 result = auto_zone_create("auto_zone");
765
766 auto_collection_control_t *control = auto_collection_parameters(result);
767
768 // set up the magic control parameters
769 control->batch_invalidate = BatchInvalidate;
770 control->will_grow = objc_will_grow;
771 control->resurrect = resurrectZombie;
772 control->layout_for_address = objc_layout_for_address;
773 control->weak_layout_for_address = objc_weak_layout_for_address;
774 control->name_for_address = objc_name_for_address;
775
776 return result;
777 }
778
779
780 // Called by Foundation to install auto's interruption callback.
781 malloc_zone_t *objc_collect_init(int (*callback)(void))
782 {
783 // Find NSObject's finalize method now that Foundation is loaded.
784 // fixme only look for the base implementation, not a category's
785 _NSObject_finalize = class_getMethodImplementation(objc_getClass("NSObject"), @selector(finalize));
786 if (_NSObject_finalize == &_objc_msgForward) {
787 _objc_fatal("GC: -[NSObject finalize] unimplemented!");
788 }
789
790 // create the _NSResurrectedObject class used to track resurrections.
791 _NSResurrectedObject_initialize();
792
793 return (malloc_zone_t *)gc_zone;
794 }
795
796
797
798
799
800
801 /***********************************************************************
802 * Debugging
803 **********************************************************************/
804
805 /* This is non-deadlocking with respect to malloc's locks EXCEPT:
806 * %ls, %a, %A formats
807 * more than 8 args
808 */
809 static void objc_debug_printf(const char *format, ...)
810 {
811 va_list ap;
812 va_start(ap, format);
813 vfprintf(stderr, format, ap);
814 va_end(ap);
815 }
816
817 static malloc_zone_t *objc_debug_zone(void)
818 {
819 static malloc_zone_t *z = NULL;
820 if (!z) {
821 z = malloc_create_zone(4096, 0);
822 malloc_set_zone_name(z, "objc-auto debug");
823 }
824 return z;
825 }
826
827 static char *_malloc_append_unsigned(uintptr_t value, unsigned base, char *head) {
828 if (!value) {
829 head[0] = '0';
830 } else {
831 if (value >= base) head = _malloc_append_unsigned(value / base, base, head);
832 value = value % base;
833 head[0] = (value < 10) ? '0' + value : 'a' + value - 10;
834 }
835 return head+1;
836 }
837
838 static void strcati(char *str, uintptr_t value)
839 {
840 str = _malloc_append_unsigned(value, 10, str + strlen(str));
841 str[0] = '\0';
842 }
843
844 static void strcatx(char *str, uintptr_t value)
845 {
846 str = _malloc_append_unsigned(value, 16, str + strlen(str));
847 str[0] = '\0';
848 }
849
850
851 static Ivar ivar_for_offset(Class cls, vm_address_t offset)
852 {
853 int i;
854 int ivar_offset;
855 Ivar super_ivar, result;
856 Ivar *ivars;
857 unsigned int ivar_count;
858
859 if (!cls) return NULL;
860
861 // scan base classes FIRST
862 super_ivar = ivar_for_offset(class_getSuperclass(cls), offset);
863 // result is best-effort; our ivars may be closer
864
865 ivars = class_copyIvarList(cls, &ivar_count);
866 if (ivars && ivar_count) {
867 // Try our first ivar. If it's too big, use super's best ivar.
868 ivar_offset = ivar_getOffset(ivars[0]);
869 if (ivar_offset > offset) result = super_ivar;
870 else if (ivar_offset == offset) result = ivars[0];
871 else result = NULL;
872
873 // Try our other ivars. If any is too big, use the previous.
874 for (i = 1; result == NULL && i < ivar_count; i++) {
875 ivar_offset = ivar_getOffset(ivars[i]);
876 if (ivar_offset == offset) {
877 result = ivars[i];
878 } else if (ivar_offset > offset) {
879 result = ivars[i - 1];
880 }
881 }
882
883 // Found nothing. Return our last ivar.
884 if (result == NULL)
885 result = ivars[ivar_count - 1];
886
887 free(ivars);
888 } else {
889 result = super_ivar;
890 }
891
892 return result;
893 }
894
895 static void append_ivar_at_offset(char *buf, Class cls, vm_address_t offset)
896 {
897 Ivar ivar = NULL;
898
899 if (offset == 0) return; // don't bother with isa
900 if (offset >= class_getInstanceSize(cls)) {
901 strcat(buf, ".<extra>+");
902 strcati(buf, offset);
903 return;
904 }
905
906 ivar = ivar_for_offset(cls, offset);
907 if (!ivar) {
908 strcat(buf, ".<?>");
909 return;
910 }
911
912 // fixme doesn't handle structs etc.
913
914 strcat(buf, ".");
915 const char *ivar_name = ivar_getName(ivar);
916 if (ivar_name) strcat(buf, ivar_name);
917 else strcat(buf, "<anonymous ivar>");
918
919 offset -= ivar_getOffset(ivar);
920 if (offset > 0) {
921 strcat(buf, "+");
922 strcati(buf, offset);
923 }
924 }
925
926
927 static const char *cf_class_for_object(void *cfobj)
928 {
929 // ick - we don't link against CF anymore
930
931 const char *result;
932 void *dlh;
933 size_t (*CFGetTypeID)(void *);
934 void * (*_CFRuntimeGetClassWithTypeID)(size_t);
935
936 result = "anonymous_NSCFType";
937
938 dlh = dlopen("/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation", RTLD_LAZY | RTLD_NOLOAD | RTLD_FIRST);
939 if (!dlh) return result;
940
941 CFGetTypeID = (size_t(*)(void*)) dlsym(dlh, "CFGetTypeID");
942 _CFRuntimeGetClassWithTypeID = (void*(*)(size_t)) dlsym(dlh, "_CFRuntimeGetClassWithTypeID");
943
944 if (CFGetTypeID && _CFRuntimeGetClassWithTypeID) {
945 struct {
946 size_t version;
947 const char *className;
948 // don't care about the rest
949 } *cfcls;
950 size_t cfid;
951 cfid = (*CFGetTypeID)(cfobj);
952 cfcls = (*_CFRuntimeGetClassWithTypeID)(cfid);
953 result = cfcls->className;
954 }
955
956 dlclose(dlh);
957 return result;
958 }
959
960
961 static char *name_for_address(auto_zone_t *zone, vm_address_t base, vm_address_t offset, int withRetainCount)
962 {
963 #define APPEND_SIZE(s) \
964 strcat(buf, "["); \
965 strcati(buf, s); \
966 strcat(buf, "]");
967
968 char buf[500];
969 char *result;
970
971 buf[0] = '\0';
972
973 size_t size =
974 auto_zone_size_no_lock(zone, (void *)base);
975 auto_memory_type_t type = size ?
976 auto_zone_get_layout_type_no_lock(zone, (void *)base) : AUTO_TYPE_UNKNOWN;
977 unsigned int refcount = size ?
978 auto_zone_retain_count_no_lock(zone, (void *)base) : 0;
979
980 switch (type) {
981 case AUTO_OBJECT_SCANNED:
982 case AUTO_OBJECT_UNSCANNED: {
983 const char *class_name = object_getClassName((id)base);
984 if (0 == strcmp(class_name, "NSCFType")) {
985 strcat(buf, cf_class_for_object((void *)base));
986 } else {
987 strcat(buf, class_name);
988 }
989 if (offset) {
990 append_ivar_at_offset(buf, object_getClass((id)base), offset);
991 }
992 APPEND_SIZE(size);
993 break;
994 }
995 case AUTO_MEMORY_SCANNED:
996 strcat(buf, "{conservative-block}");
997 APPEND_SIZE(size);
998 break;
999 case AUTO_MEMORY_UNSCANNED:
1000 strcat(buf, "{no-pointers-block}");
1001 APPEND_SIZE(size);
1002 break;
1003 default:
1004 strcat(buf, "{unallocated-or-stack}");
1005 }
1006
1007 if (withRetainCount && refcount > 0) {
1008 strcat(buf, " [[refcount=");
1009 strcati(buf, refcount);
1010 strcat(buf, "]]");
1011 }
1012
1013 result = malloc_zone_malloc(objc_debug_zone(), 1 + strlen(buf));
1014 strcpy(result, buf);
1015 return result;
1016
1017 #undef APPEND_SIZE
1018 }
1019
1020
1021 struct objc_class_recorder_context {
1022 malloc_zone_t *zone;
1023 void *cls;
1024 char *clsname;
1025 unsigned int count;
1026 };
1027
1028 static void objc_class_recorder(task_t task, void *context, unsigned type_mask,
1029 vm_range_t *ranges, unsigned range_count)
1030 {
1031 struct objc_class_recorder_context *ctx =
1032 (struct objc_class_recorder_context *)context;
1033
1034 vm_range_t *r;
1035 vm_range_t *end;
1036 for (r = ranges, end = ranges + range_count; r < end; r++) {
1037 auto_memory_type_t type =
1038 auto_zone_get_layout_type_no_lock(ctx->zone, (void *)r->address);
1039 if (type == AUTO_OBJECT_SCANNED || type == AUTO_OBJECT_UNSCANNED) {
1040 // Check if this is an instance of class ctx->cls or some subclass
1041 Class cls;
1042 Class isa = *(Class *)r->address;
1043 for (cls = isa; cls; cls = _class_getSuperclass(cls)) {
1044 if (cls == ctx->cls) {
1045 unsigned int rc;
1046 objc_debug_printf("[%p] : %s", r->address, _class_getName(isa));
1047 if ((rc = auto_zone_retain_count_no_lock(ctx->zone, (void *)r->address))) {
1048 objc_debug_printf(" [[refcount %u]]", rc);
1049 }
1050 objc_debug_printf("\n");
1051 ctx->count++;
1052 break;
1053 }
1054 }
1055 }
1056 }
1057 }
1058
1059 __private_extern__ void objc_enumerate_class(char *clsname)
1060 {
1061 struct objc_class_recorder_context ctx;
1062 ctx.zone = auto_zone();
1063 ctx.clsname = clsname;
1064 ctx.cls = objc_getClass(clsname); // GrP fixme may deadlock if classHash lock is already owned
1065 ctx.count = 0;
1066 if (!ctx.cls) {
1067 objc_debug_printf("No class '%s'\n", clsname);
1068 return;
1069 }
1070 objc_debug_printf("\n\nINSTANCES OF CLASS '%s':\n\n", clsname);
1071 (*ctx.zone->introspect->enumerator)(mach_task_self(), &ctx, MALLOC_PTR_IN_USE_RANGE_TYPE, (vm_address_t)ctx.zone, NULL, objc_class_recorder);
1072 objc_debug_printf("\n%d instances\n\n", ctx.count);
1073 }
1074
1075
1076 static void objc_reference_printer(auto_zone_t *zone, void *ctx,
1077 auto_reference_t ref)
1078 {
1079 char *referrer_name = name_for_address(zone, ref.referrer_base, ref.referrer_offset, true);
1080 char *referent_name = name_for_address(zone, ref.referent, 0, true);
1081
1082 objc_debug_printf("[%p%+d -> %p] : %s -> %s\n",
1083 ref.referrer_base, ref.referrer_offset, ref.referent,
1084 referrer_name, referent_name);
1085
1086 malloc_zone_free(objc_debug_zone(), referrer_name);
1087 malloc_zone_free(objc_debug_zone(), referent_name);
1088 }
1089
1090
1091 __private_extern__ void objc_print_references(void *referent, void *stack_bottom, int lock)
1092 {
1093 if (lock) {
1094 auto_enumerate_references(auto_zone(), referent,
1095 objc_reference_printer, stack_bottom, NULL);
1096 } else {
1097 auto_enumerate_references_no_lock(auto_zone(), referent,
1098 objc_reference_printer, stack_bottom, NULL);
1099 }
1100 }
1101
1102
1103
1104 typedef struct {
1105 vm_address_t address; // of this object
1106 int refcount; // of this object - nonzero means ROOT
1107 int depth; // number of links away from referent, or -1
1108 auto_reference_t *referrers; // of this object
1109 int referrers_used;
1110 int referrers_allocated;
1111 auto_reference_t back; // reference from this object back toward the target
1112 uint32_t ID; // Graphic ID for grafflization
1113 } blob;
1114
1115
1116 typedef struct {
1117 blob **list;
1118 unsigned int used;
1119 unsigned int allocated;
1120 } blob_queue;
1121
1122 static blob_queue blobs = {NULL, 0, 0};
1123 static blob_queue untraced_blobs = {NULL, 0, 0};
1124 static blob_queue root_blobs = {NULL, 0, 0};
1125
1126
1127 static void spin(void) {
1128 static time_t t = 0;
1129 time_t now = time(NULL);
1130 if (t != now) {
1131 objc_debug_printf(".");
1132 t = now;
1133 }
1134 }
1135
1136
1137 static void enqueue_blob(blob_queue *q, blob *b)
1138 {
1139 if (q->used == q->allocated) {
1140 q->allocated = q->allocated * 2 + 1;
1141 q->list = malloc_zone_realloc(objc_debug_zone(), q->list, q->allocated * sizeof(blob *));
1142 }
1143 q->list[q->used++] = b;
1144 }
1145
1146
1147 static blob *dequeue_blob(blob_queue *q)
1148 {
1149 blob *result = q->list[0];
1150 q->used--;
1151 memmove(&q->list[0], &q->list[1], q->used * sizeof(blob *));
1152 return result;
1153 }
1154
1155
1156 static blob *blob_for_address(vm_address_t addr)
1157 {
1158 blob *b, **bp, **end;
1159
1160 if (addr == 0) return NULL;
1161
1162 for (bp = blobs.list, end = blobs.list+blobs.used; bp < end; bp++) {
1163 b = *bp;
1164 if (b->address == addr) return b;
1165 }
1166
1167 b = malloc_zone_calloc(objc_debug_zone(), sizeof(blob), 1);
1168 b->address = addr;
1169 b->depth = -1;
1170 b->refcount = auto_zone_size_no_lock(auto_zone(), (void *)addr) ? auto_zone_retain_count_no_lock(auto_zone(), (void *)addr) : 1;
1171 enqueue_blob(&blobs, b);
1172 return b;
1173 }
1174
1175 static int blob_exists(vm_address_t addr)
1176 {
1177 blob *b, **bp, **end;
1178 for (bp = blobs.list, end = blobs.list+blobs.used; bp < end; bp++) {
1179 b = *bp;
1180 if (b->address == addr) return 1;
1181 }
1182 return 0;
1183 }
1184
1185
1186 // Destroy the blobs table and all blob data in it
1187 static void free_blobs(void)
1188 {
1189 blob *b, **bp, **end;
1190 for (bp = blobs.list, end = blobs.list+blobs.used; bp < end; bp++) {
1191 b = *bp;
1192 malloc_zone_free(objc_debug_zone(), b);
1193 }
1194 if (blobs.list) malloc_zone_free(objc_debug_zone(), blobs.list);
1195 }
1196
1197 static void print_chain(auto_zone_t *zone, blob *root)
1198 {
1199 blob *b;
1200 for (b = root; b != NULL; b = blob_for_address(b->back.referent)) {
1201 char *name;
1202 if (b->back.referent) {
1203 name = name_for_address(zone, b->address, b->back.referrer_offset, true);
1204 objc_debug_printf("[%p%+d] : %s ->\n", b->address, b->back.referrer_offset, name);
1205 } else {
1206 name = name_for_address(zone, b->address, 0, true);
1207 objc_debug_printf("[%p] : %s\n", b->address, name);
1208 }
1209 malloc_zone_free(objc_debug_zone(), name);
1210 }
1211 }
1212
1213
1214 static void objc_blob_recorder(auto_zone_t *zone, void *ctx,
1215 auto_reference_t ref)
1216 {
1217 blob *b = (blob *)ctx;
1218
1219 spin();
1220
1221 if (b->referrers_used == b->referrers_allocated) {
1222 b->referrers_allocated = b->referrers_allocated * 2 + 1;
1223 b->referrers = malloc_zone_realloc(objc_debug_zone(), b->referrers,
1224 b->referrers_allocated *
1225 sizeof(auto_reference_t));
1226 }
1227
1228 b->referrers[b->referrers_used++] = ref;
1229 if (!blob_exists(ref.referrer_base)) {
1230 enqueue_blob(&untraced_blobs, blob_for_address(ref.referrer_base));
1231 }
1232 }
1233
1234
1235 #define INSTANCE_ROOTS 1
1236 #define HEAP_ROOTS 2
1237 #define ALL_REFS 3
1238 static void objc_print_recursive_refs(vm_address_t target, int which, void *stack_bottom, int lock);
1239 static void grafflize(blob_queue *blobs, int everything);
1240
1241 __private_extern__ void objc_print_instance_roots(vm_address_t target, void *stack_bottom, int lock)
1242 {
1243 objc_print_recursive_refs(target, INSTANCE_ROOTS, stack_bottom, lock);
1244 }
1245
1246 __private_extern__ void objc_print_heap_roots(vm_address_t target, void *stack_bottom, int lock)
1247 {
1248 objc_print_recursive_refs(target, HEAP_ROOTS, stack_bottom, lock);
1249 }
1250
1251 __private_extern__ void objc_print_all_refs(vm_address_t target, void *stack_bottom, int lock)
1252 {
1253 objc_print_recursive_refs(target, ALL_REFS, stack_bottom, lock);
1254 }
1255
1256 static void sort_blobs_by_refcount(blob_queue *blobs)
1257 {
1258 int i, j;
1259
1260 // simple bubble sort
1261 for (i = 0; i < blobs->used; i++) {
1262 for (j = i+1; j < blobs->used; j++) {
1263 if (blobs->list[i]->refcount < blobs->list[j]->refcount) {
1264 blob *temp = blobs->list[i];
1265 blobs->list[i] = blobs->list[j];
1266 blobs->list[j] = temp;
1267 }
1268 }
1269 }
1270 }
1271
1272
1273 static void sort_blobs_by_depth(blob_queue *blobs)
1274 {
1275 int i, j;
1276
1277 // simple bubble sort
1278 for (i = 0; i < blobs->used; i++) {
1279 for (j = i+1; j < blobs->used; j++) {
1280 if (blobs->list[i]->depth > blobs->list[j]->depth) {
1281 blob *temp = blobs->list[i];
1282 blobs->list[i] = blobs->list[j];
1283 blobs->list[j] = temp;
1284 }
1285 }
1286 }
1287 }
1288
1289
1290 static void objc_print_recursive_refs(vm_address_t target, int which, void *stack_bottom, int lock)
1291 {
1292 objc_debug_printf("\n "); // make spinner draw in a pretty place
1293
1294 // Construct pointed-to graph (of things eventually pointing to target)
1295
1296 enqueue_blob(&untraced_blobs, blob_for_address(target));
1297
1298 while (untraced_blobs.used > 0) {
1299 blob *b = dequeue_blob(&untraced_blobs);
1300 spin();
1301 if (lock) {
1302 auto_enumerate_references(auto_zone(), (void *)b->address,
1303 objc_blob_recorder, stack_bottom, b);
1304 } else {
1305 auto_enumerate_references_no_lock(auto_zone(), (void *)b->address,
1306 objc_blob_recorder, stack_bottom, b);
1307 }
1308 }
1309
1310 // Walk pointed-to graph to find shortest paths from roots to target.
1311 // This is BREADTH-FIRST order.
1312
1313 blob_for_address(target)->depth = 0;
1314 enqueue_blob(&untraced_blobs, blob_for_address(target));
1315
1316 while (untraced_blobs.used > 0) {
1317 blob *b = dequeue_blob(&untraced_blobs);
1318 blob *other;
1319 auto_reference_t *r, *end;
1320 int stop = NO;
1321
1322 spin();
1323
1324 if (which == ALL_REFS) {
1325 // Never stop at roots.
1326 stop = NO;
1327 } else if (which == HEAP_ROOTS) {
1328 // Stop at any root (a block with positive retain count)
1329 stop = (b->refcount > 0);
1330 } else if (which == INSTANCE_ROOTS) {
1331 // Only stop at roots that are instances
1332 auto_memory_type_t type = auto_zone_get_layout_type_no_lock(auto_zone(), (void *)b->address);
1333 stop = (b->refcount > 0 && (type == AUTO_OBJECT_SCANNED || type == AUTO_OBJECT_UNSCANNED)); // GREG XXX ???
1334 }
1335
1336 // If this object is a root, save it and don't walk its referrers.
1337 if (stop) {
1338 enqueue_blob(&root_blobs, b);
1339 continue;
1340 }
1341
1342 // For any "other object" that points to "this object"
1343 // and does not yet have a depth:
1344 // (1) other object is one level deeper than this object
1345 // (2) (one of) the shortest path(s) from other object to the
1346 // target goes through this object
1347
1348 for (r = b->referrers, end = b->referrers + b->referrers_used;
1349 r < end;
1350 r++)
1351 {
1352 other = blob_for_address(r->referrer_base);
1353 if (other->depth == -1) {
1354 other->depth = b->depth + 1;
1355 other->back = *r;
1356 enqueue_blob(&untraced_blobs, other);
1357 }
1358 }
1359 }
1360
1361 {
1362 char *name = name_for_address(auto_zone(), target, 0, true);
1363 objc_debug_printf("\n\n%d %s %p (%s)\n\n",
1364 (which==ALL_REFS) ? blobs.used : root_blobs.used,
1365 (which==ALL_REFS) ? "INDIRECT REFS TO" : "ROOTS OF",
1366 target, name);
1367 malloc_zone_free(objc_debug_zone(), name);
1368 }
1369
1370 if (which == ALL_REFS) {
1371 // Print all reference objects, biggest refcount first
1372 int i;
1373 sort_blobs_by_refcount(&blobs);
1374 for (i = 0; i < blobs.used; i++) {
1375 char *name = name_for_address(auto_zone(), blobs.list[i]->address, 0, true);
1376 objc_debug_printf("[%p] : %s\n", blobs.list[i]->address, name);
1377 malloc_zone_free(objc_debug_zone(), name);
1378 }
1379 }
1380 else {
1381 // Walk back chain from every root to the target, printing every step.
1382
1383 while (root_blobs.used > 0) {
1384 blob *root = dequeue_blob(&root_blobs);
1385 print_chain(auto_zone(), root);
1386 objc_debug_printf("\n");
1387 }
1388 }
1389
1390 grafflize(&blobs, which == ALL_REFS);
1391
1392 objc_debug_printf("\ndone\n\n");
1393
1394 // Clean up
1395
1396 free_blobs();
1397 if (untraced_blobs.list) malloc_zone_free(objc_debug_zone(), untraced_blobs.list);
1398 if (root_blobs.list) malloc_zone_free(objc_debug_zone(), root_blobs.list);
1399
1400 memset(&blobs, 0, sizeof(blobs));
1401 memset(&root_blobs, 0, sizeof(root_blobs));
1402 memset(&untraced_blobs, 0, sizeof(untraced_blobs));
1403 }
1404
1405
1406
1407 struct objc_block_recorder_context {
1408 malloc_zone_t *zone;
1409 int fd;
1410 unsigned int count;
1411 };
1412
1413
1414 static void objc_block_recorder(task_t task, void *context, unsigned type_mask,
1415 vm_range_t *ranges, unsigned range_count)
1416 {
1417 char buf[20];
1418 struct objc_block_recorder_context *ctx =
1419 (struct objc_block_recorder_context *)context;
1420
1421 vm_range_t *r;
1422 vm_range_t *end;
1423 for (r = ranges, end = ranges + range_count; r < end; r++) {
1424 char *name = name_for_address(ctx->zone, r->address, 0, true);
1425 buf[0] = '\0';
1426 strcatx(buf, r->address);
1427
1428 write(ctx->fd, "0x", 2);
1429 write(ctx->fd, buf, strlen(buf));
1430 write(ctx->fd, " ", 1);
1431 write(ctx->fd, name, strlen(name));
1432 write(ctx->fd, "\n", 1);
1433
1434 malloc_zone_free(objc_debug_zone(), name);
1435 ctx->count++;
1436 }
1437 }
1438
1439
1440 __private_extern__ void objc_dump_block_list(const char* path)
1441 {
1442 struct objc_block_recorder_context ctx;
1443 char filename[] = "/tmp/blocks-XXXXX.txt";
1444
1445 ctx.zone = auto_zone();
1446 ctx.count = 0;
1447 ctx.fd = (path ? open(path, O_WRONLY | O_CREAT | O_TRUNC, 0666) : mkstemps(filename, (int)strlen(strrchr(filename, '.'))));
1448
1449 objc_debug_printf("\n\nALL AUTO-ALLOCATED BLOCKS\n\n");
1450 (*ctx.zone->introspect->enumerator)(mach_task_self(), &ctx, MALLOC_PTR_IN_USE_RANGE_TYPE, (vm_address_t)ctx.zone, NULL, objc_block_recorder);
1451 objc_debug_printf("%d blocks written to file\n", ctx.count);
1452 objc_debug_printf("open %s\n", (path ? path : filename));
1453
1454 close(ctx.fd);
1455 }
1456
1457
1458
1459
1460 static void grafflize_id(int gfile, int ID)
1461 {
1462 char buf[20] = "";
1463 char *c;
1464
1465 strcati(buf, ID);
1466 c = "<key>ID</key><integer>";
1467 write(gfile, c, strlen(c));
1468 write(gfile, buf, strlen(buf));
1469 c = "</integer>";
1470 write(gfile, c, strlen(c));
1471 }
1472
1473
1474 // head = REFERENT end = arrow
1475 // tail = REFERRER end = no arrow
1476 static void grafflize_reference(int gfile, auto_reference_t reference,
1477 int ID, int important)
1478 {
1479 blob *referrer = blob_for_address(reference.referrer_base);
1480 blob *referent = blob_for_address(reference.referent);
1481 char *c;
1482
1483 // line
1484 c = "<dict><key>Class</key><string>LineGraphic</string>";
1485 write(gfile, c, strlen(c));
1486
1487 // id
1488 grafflize_id(gfile, ID);
1489
1490 // head = REFERENT
1491 c = "<key>Head</key><dict>";
1492 write(gfile, c, strlen(c));
1493 grafflize_id(gfile, referent->ID);
1494 c = "</dict>";
1495 write(gfile, c, strlen(c));
1496
1497 // tail = REFERRER
1498 c = "<key>Tail</key><dict>";
1499 write(gfile, c, strlen(c));
1500 grafflize_id(gfile, referrer->ID);
1501 c = "</dict>";
1502 write(gfile, c, strlen(c));
1503
1504 // style - head arrow, thick line if important
1505 c = "<key>Style</key><dict><key>stroke</key><dict>"
1506 "<key>HeadArrow</key><string>FilledArrow</string>"
1507 "<key>LineType</key><integer>1</integer>";
1508 write(gfile, c, strlen(c));
1509 if (important) {
1510 c = "<key>Width</key><real>3</real>";
1511 write(gfile, c, strlen(c));
1512 }
1513 c = "</dict></dict>";
1514 write(gfile, c, strlen(c));
1515
1516 // end line
1517 c = "</dict>";
1518 write(gfile, c, strlen(c));
1519 }
1520
1521
1522 static void grafflize_blob(int gfile, blob *b)
1523 {
1524 // fixme include ivar names too
1525 char *name = name_for_address(auto_zone(), b->address, 0, false);
1526 int width = 30 + (int)strlen(name)*6;
1527 int height = 40;
1528 char buf[40] = "";
1529 char *c;
1530
1531 // rectangle
1532 c = "<dict>"
1533 "<key>Class</key><string>ShapedGraphic</string>"
1534 "<key>Shape</key><string>Rectangle</string>";
1535 write(gfile, c, strlen(c));
1536
1537 // id
1538 grafflize_id(gfile, b->ID);
1539
1540 // bounds
1541 // order vertically by depth
1542 c = "<key>Bounds</key><string>{{0,";
1543 write(gfile, c, strlen(c));
1544 buf[0] = '\0';
1545 strcati(buf, b->depth*60);
1546 write(gfile, buf, strlen(buf));
1547 c = "},{";
1548 write(gfile, c, strlen(c));
1549 buf[0] = '\0';
1550 strcati(buf, width);
1551 strcat(buf, ",");
1552 strcati(buf, height);
1553 write(gfile, buf, strlen(buf));
1554 c = "}}</string>";
1555 write(gfile, c, strlen(c));
1556
1557 // label
1558 c = "<key>Text</key><dict><key>Text</key>"
1559 "<string>{\\rtf1\\mac\\ansicpg10000\\cocoartf102\n"
1560 "{\\fonttbl\\f0\\fswiss\\fcharset77 Helvetica;\\fonttbl\\f1\\fswiss\\fcharset77 Helvetica-Bold;}\n"
1561 "{\\colortbl;\\red255\\green255\\blue255;}\n"
1562 "\\pard\\tx560\\tx1120\\tx1680\\tx2240\\tx3360\\tx3920\\tx4480\\tx5040\\tx5600\\tx6160\\tx6720\\qc\n"
1563 "\\f0\\fs20 \\cf0 ";
1564 write(gfile, c, strlen(c));
1565 write(gfile, name, strlen(name));
1566 strcpy(buf, "\\\n0x");
1567 strcatx(buf, b->address);
1568 write(gfile, buf, strlen(buf));
1569 c = "}</string></dict>";
1570 write(gfile, c, strlen(c));
1571
1572 // styles
1573 c = "<key>Style</key><dict>";
1574 write(gfile, c, strlen(c));
1575
1576 // no shadow
1577 c = "<key>shadow</key><dict><key>Draws</key><string>NO</string></dict>";
1578 write(gfile, c, strlen(c));
1579
1580 // fat border if refcount > 0
1581 if (b->refcount > 0) {
1582 c = "<key>stroke</key><dict><key>Width</key><real>4</real></dict>";
1583 write(gfile, c, strlen(c));
1584 }
1585
1586 // end styles
1587 c = "</dict>";
1588 write(gfile, c, strlen(c));
1589
1590 // done
1591 c = "</dict>\n";
1592 write(gfile, c, strlen(c));
1593
1594 malloc_zone_free(objc_debug_zone(), name);
1595 }
1596
1597
1598 #define gheader "<?xml version=\"1.0\" encoding=\"UTF-8\"?><!DOCTYPE plist PUBLIC \"-//Apple Computer//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\"><plist version=\"1.0\"><dict><key>GraphDocumentVersion</key><integer>3</integer><key>ReadOnly</key><string>NO</string><key>GraphicsList</key><array>\n"
1599
1600 #define gfooter "</array></dict></plist>\n"
1601
1602
1603 static void grafflize(blob_queue *blobs, int everything)
1604 {
1605 // Don't require linking to Foundation!
1606 int i;
1607 int gfile;
1608 int nextid = 1;
1609 char filename[] = "/tmp/gc-XXXXX.graffle";
1610
1611 // Open file
1612 gfile = mkstemps(filename, (int)strlen(strrchr(filename, '.')));
1613 if (gfile < 0) {
1614 objc_debug_printf("couldn't create a graffle file in /tmp/ (errno %d)\n", errno);
1615 return;
1616 }
1617
1618 // Write header
1619 write(gfile, gheader, strlen(gheader));
1620
1621 // Write a rectangle for each blob
1622 sort_blobs_by_depth(blobs);
1623 for (i = 0; i < blobs->used; i++) {
1624 blob *b = blobs->list[i];
1625 b->ID = nextid++;
1626 if (everything || b->depth >= 0) {
1627 grafflize_blob(gfile, b);
1628 }
1629 }
1630
1631 for (i = 0; i < blobs->used; i++) {
1632 int j;
1633 blob *b = blobs->list[i];
1634
1635 if (everything) {
1636 // Write an arrow for each reference
1637 // Use big arrows for backreferences
1638 for (j = 0; j < b->referrers_used; j++) {
1639 int is_back_ref = (b->referrers[i].referent == b->back.referent && b->referrers[i].referrer_offset == b->back.referrer_offset && b->referrers[i].referrer_base == b->back.referrer_base);
1640
1641 grafflize_reference(gfile, b->referrers[j], nextid++,
1642 is_back_ref);
1643 }
1644 }
1645 else {
1646 // Write an arrow for each backreference
1647 if (b->depth > 0) {
1648 grafflize_reference(gfile, b->back, nextid++, false);
1649 }
1650 }
1651 }
1652
1653 // Write footer and close
1654 write(gfile, gfooter, strlen(gfooter));
1655 close(gfile);
1656 objc_debug_printf("wrote object graph (%d objects)\nopen %s\n",
1657 blobs->used, filename);
1658 }