]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-auto.m
objc4-371.tar.gz
[apple/objc4.git] / runtime / objc-auto.m
1 /*
2 * Copyright (c) 2004-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #import <stdint.h>
25 #import <stdbool.h>
26 #import <fcntl.h>
27 #import <mach/mach.h>
28 #import <mach-o/dyld.h>
29 #import <sys/types.h>
30 #import <sys/mman.h>
31 #import <libkern/OSAtomic.h>
32
33 #define OLD 1
34 #import "objc-private.h"
35 #import "auto_zone.h"
36 #import "objc-auto.h"
37 #import "objc-rtp.h"
38 #import "maptable.h"
39
40
41 static auto_zone_t *gc_zone_init(void);
42
43
44 __private_extern__ BOOL UseGC NOBSS = NO;
45 static BOOL RecordAllocations = NO;
46 static BOOL MultiThreadedGC = NO;
47 static BOOL WantsMainThreadFinalization = NO;
48 static BOOL NeedsMainThreadFinalization = NO;
49
50 static struct {
51 auto_zone_foreach_object_t foreach;
52 auto_zone_cursor_t cursor;
53 size_t cursor_size;
54 volatile BOOL finished;
55 volatile BOOL started;
56 pthread_mutex_t mutex;
57 pthread_cond_t condition;
58 } BatchFinalizeBlock;
59
60
61 __private_extern__ auto_zone_t *gc_zone = NULL;
62
63 // Pointer magic to make dyld happy. See notes in objc-private.h
64 __private_extern__ id (*objc_assign_ivar_internal)(id, id, ptrdiff_t) = objc_assign_ivar;
65
66
67 /***********************************************************************
68 * Utility exports
69 * Called by various libraries.
70 **********************************************************************/
71
72 OBJC_EXPORT void objc_set_collection_threshold(size_t threshold) { // Old naming
73 if (UseGC) {
74 auto_collection_parameters(gc_zone)->collection_threshold = threshold;
75 }
76 }
77
78 OBJC_EXPORT void objc_setCollectionThreshold(size_t threshold) {
79 if (UseGC) {
80 auto_collection_parameters(gc_zone)->collection_threshold = threshold;
81 }
82 }
83
84 void objc_setCollectionRatio(size_t ratio) {
85 if (UseGC) {
86 auto_collection_parameters(gc_zone)->full_vs_gen_frequency = ratio;
87 }
88 }
89
90 void objc_set_collection_ratio(size_t ratio) { // old naming
91 if (UseGC) {
92 auto_collection_parameters(gc_zone)->full_vs_gen_frequency = ratio;
93 }
94 }
95
96 void objc_finalizeOnMainThread(Class cls) {
97 if (UseGC) {
98 WantsMainThreadFinalization = YES;
99 _class_setFinalizeOnMainThread(cls);
100 }
101 }
102
103
104 void objc_startCollectorThread(void) {
105 static int didOnce = 0;
106 if (!didOnce) {
107 didOnce = 1;
108
109 // pretend we're done to start out with.
110 BatchFinalizeBlock.started = YES;
111 BatchFinalizeBlock.finished = YES;
112 pthread_mutex_init(&BatchFinalizeBlock.mutex, NULL);
113 pthread_cond_init(&BatchFinalizeBlock.condition, NULL);
114 auto_collect_multithreaded(gc_zone);
115 MultiThreadedGC = YES;
116 }
117 }
118
119 void objc_start_collector_thread(void) {
120 objc_startCollectorThread();
121 }
122
123 static void batchFinalizeOnMainThread(void);
124
125 void objc_collect(unsigned long options) {
126 if (!UseGC) return;
127 BOOL onMainThread = pthread_main_np() ? YES : NO;
128
129 if (MultiThreadedGC || onMainThread) {
130 if (MultiThreadedGC && onMainThread) batchFinalizeOnMainThread();
131 auto_collection_mode_t amode = AUTO_COLLECT_RATIO_COLLECTION;
132 switch (options & 0x3) {
133 case OBJC_RATIO_COLLECTION: amode = AUTO_COLLECT_RATIO_COLLECTION; break;
134 case OBJC_GENERATIONAL_COLLECTION: amode = AUTO_COLLECT_GENERATIONAL_COLLECTION; break;
135 case OBJC_FULL_COLLECTION: amode = AUTO_COLLECT_FULL_COLLECTION; break;
136 case OBJC_EXHAUSTIVE_COLLECTION: amode = AUTO_COLLECT_EXHAUSTIVE_COLLECTION; break;
137 }
138 if (options & OBJC_COLLECT_IF_NEEDED) amode |= AUTO_COLLECT_IF_NEEDED;
139 if (options & OBJC_WAIT_UNTIL_DONE) amode |= AUTO_COLLECT_SYNCHRONOUS; // uses different bits
140 auto_collect(gc_zone, amode, NULL);
141 }
142 else {
143 objc_msgSend(objc_getClass("NSGarbageCollector"), @selector(_callOnMainThread:withArgs:), objc_collect, (void *)options);
144 }
145 }
146
147 // SPI
148 // 0 - exhaustively NSGarbageCollector.m
149 // - from AppKit /Developer/Applications/Xcode.app/Contents/MacOS/Xcode via idleTimer
150 // GENERATIONAL
151 // - from autoreleasepool
152 // - several other places
153 void objc_collect_if_needed(unsigned long options) {
154 if (!UseGC) return;
155 BOOL onMainThread = pthread_main_np() ? YES : NO;
156
157 if (MultiThreadedGC || onMainThread) {
158 auto_collection_mode_t mode;
159 if (options & OBJC_GENERATIONAL) {
160 mode = AUTO_COLLECT_IF_NEEDED | AUTO_COLLECT_RATIO_COLLECTION;
161 }
162 else {
163 mode = AUTO_COLLECT_EXHAUSTIVE_COLLECTION;
164 }
165 if (MultiThreadedGC && onMainThread) batchFinalizeOnMainThread();
166 auto_collect(gc_zone, mode, NULL);
167 }
168 else { // XXX could be optimized (e.g. ask auto for threshold check, if so, set ASKING if not already ASKING,...
169 objc_msgSend(objc_getClass("NSGarbageCollector"), @selector(_callOnMainThread:withArgs:), objc_collect_if_needed, (void *)options);
170 }
171 }
172
173 // NEVER USED.
174 size_t objc_numberAllocated(void)
175 {
176 auto_statistics_t stats;
177 stats.version = 0;
178 auto_zone_statistics(gc_zone, &stats);
179 return stats.malloc_statistics.blocks_in_use;
180 }
181
182 // USED BY CF & ONE OTHER
183 BOOL objc_isAuto(id object)
184 {
185 return UseGC && auto_zone_is_valid_pointer(gc_zone, object) != 0;
186 }
187
188
189 BOOL objc_collectingEnabled(void)
190 {
191 return UseGC;
192 }
193 BOOL objc_collecting_enabled(void) // Old naming
194 {
195 return UseGC;
196 }
197
198
199 /***********************************************************************
200 * Memory management.
201 * Called by CF and Foundation.
202 **********************************************************************/
203
204 // Allocate an object in the GC zone, with the given number of extra bytes.
205 id objc_allocate_object(Class cls, int extra)
206 {
207 return class_createInstance(cls, extra);
208 }
209
210
211 /***********************************************************************
212 * Write barrier implementations, optimized for when GC is known to be on
213 * Called by the write barrier exports only.
214 * These implementations assume GC is on. The exported function must
215 * either perform the check itself or be conditionally stomped at
216 * startup time.
217 **********************************************************************/
218
219 static void objc_strongCast_write_barrier(id value, id *slot) {
220 if (!auto_zone_set_write_barrier(gc_zone, (void*)slot, value)) {
221 auto_zone_root_write_barrier(gc_zone, slot, value);
222 }
223 }
224
225 __private_extern__ id objc_assign_strongCast_gc(id value, id *slot)
226 {
227 objc_strongCast_write_barrier(value, slot);
228 return (*slot = value);
229 }
230
231 static void objc_register_global(id value, id *slot)
232 {
233 // use explicit root registration.
234 if (value && auto_zone_is_valid_pointer(gc_zone, value)) {
235 if (auto_zone_is_finalized(gc_zone, value)) {
236 __private_extern__ void objc_assign_global_error(id value, id *slot);
237
238 _objc_inform("GC: storing an already collected object %p into global memory at %p, break on objc_assign_global_error to debug\n", value, slot);
239 objc_assign_global_error(value, slot);
240 }
241 auto_zone_add_root(gc_zone, slot, value);
242 }
243 }
244
245 __private_extern__ id objc_assign_global_gc(id value, id *slot) {
246 objc_register_global(value, slot);
247 return (*slot = value);
248 }
249
250
251 __private_extern__ id objc_assign_ivar_gc(id value, id base, ptrdiff_t offset)
252 {
253 id *slot = (id*) ((char *)base + offset);
254
255 if (value) {
256 if (!auto_zone_set_write_barrier(gc_zone, (char *)base + offset, value)) {
257 __private_extern__ void objc_assign_ivar_error(id base, ptrdiff_t offset);
258
259 _objc_inform("GC: %p + %d isn't in the auto_zone, break on objc_assign_ivar_error to debug.\n", base, offset);
260 objc_assign_ivar_error(base, offset);
261 }
262 }
263
264 return (*slot = value);
265 }
266
267
268 /***********************************************************************
269 * Write barrier exports
270 * Called by pretty much all GC-supporting code.
271 *
272 * These "generic" implementations, available in PPC, are thought to be
273 * called by Rosetta when it translates the bla instruction.
274 **********************************************************************/
275
276 // Platform-independent write barriers
277 // These contain the UseGC check that the platform-specific
278 // runtime-rewritten implementations do not.
279
280 id objc_assign_strongCast_generic(id value, id *dest)
281 {
282 if (UseGC) {
283 return objc_assign_strongCast_gc(value, dest);
284 } else {
285 return (*dest = value);
286 }
287 }
288
289
290 id objc_assign_global_generic(id value, id *dest)
291 {
292 if (UseGC) {
293 return objc_assign_global_gc(value, dest);
294 } else {
295 return (*dest = value);
296 }
297 }
298
299
300 id objc_assign_ivar_generic(id value, id dest, ptrdiff_t offset)
301 {
302 if (UseGC) {
303 return objc_assign_ivar_gc(value, dest, offset);
304 } else {
305 id *slot = (id*) ((char *)dest + offset);
306 return (*slot = value);
307 }
308 }
309
310 #if defined(__ppc__) || defined(__i386__) || defined(__x86_64__)
311
312 // PPC write barriers are in objc-auto-ppc.s
313 // write_barrier_init conditionally stomps those to jump to the _impl versions.
314
315 // These 3 functions are defined in objc-auto-i386.s and objc-auto-x86_64.s as
316 // the non-GC variants. Under GC, rtp_init stomps them with jumps to
317 // objc_assign_*_gc.
318
319 #else
320
321 // use generic implementation until time can be spent on optimizations
322 id objc_assign_strongCast(id value, id *dest) { return objc_assign_strongCast_generic(value, dest); }
323 id objc_assign_global(id value, id *dest) { return objc_assign_global_generic(value, dest); }
324 id objc_assign_ivar(id value, id dest, ptrdiff_t offset) { return objc_assign_ivar_generic(value, dest, offset); }
325
326 // not (defined(__ppc__)) && not defined(__i386__) && not defined(__x86_64__)
327 #endif
328
329
330 void *objc_memmove_collectable(void *dst, const void *src, size_t size)
331 {
332 if (UseGC) {
333 return auto_zone_write_barrier_memmove(gc_zone, dst, src, size);
334 } else {
335 return memmove(dst, src, size);
336 }
337 }
338
339 BOOL objc_atomicCompareAndSwapGlobal(id predicate, id replacement, volatile id *objectLocation) {
340 if (UseGC) objc_register_global(replacement, (id *)objectLocation);
341 return OSAtomicCompareAndSwapPtr((void *)predicate, (void *)replacement, (void * volatile *)objectLocation);
342 }
343
344 BOOL objc_atomicCompareAndSwapGlobalBarrier(id predicate, id replacement, volatile id *objectLocation) {
345 if (UseGC) objc_register_global(replacement, (id *)objectLocation);
346 return OSAtomicCompareAndSwapPtrBarrier((void *)predicate, (void *)replacement, (void * volatile *)objectLocation);
347 }
348
349 BOOL objc_atomicCompareAndSwapInstanceVariable(id predicate, id replacement, volatile id *objectLocation) {
350 if (UseGC) objc_strongCast_write_barrier(replacement, (id *)objectLocation);
351 return OSAtomicCompareAndSwapPtr((void *)predicate, (void *)replacement, (void * volatile *)objectLocation);
352 }
353
354 BOOL objc_atomicCompareAndSwapInstanceVariableBarrier(id predicate, id replacement, volatile id *objectLocation) {
355 if (UseGC) objc_strongCast_write_barrier(replacement, (id *)objectLocation);
356 return OSAtomicCompareAndSwapPtrBarrier((void *)predicate, (void *)replacement, (void * volatile *)objectLocation);
357 }
358
359
360 /***********************************************************************
361 * Weak ivar support
362 **********************************************************************/
363
364 id objc_read_weak(id *location) {
365 id result = *location;
366 if (UseGC && result) {
367 result = auto_read_weak_reference(gc_zone, (void **)location);
368 }
369 return result;
370 }
371
372 id objc_assign_weak(id value, id *location) {
373 if (UseGC) {
374 auto_assign_weak_reference(gc_zone, value, (void **)location, NULL);
375 }
376 else {
377 *location = value;
378 }
379 return value;
380 }
381
382
383 /***********************************************************************
384 * Testing tools
385 * Used to isolate resurrection of garbage objects during finalization.
386 **********************************************************************/
387 BOOL objc_is_finalized(void *ptr) {
388 if (ptr != NULL && UseGC) {
389 return auto_zone_is_finalized(gc_zone, ptr);
390 }
391 return NO;
392 }
393
394
395 /***********************************************************************
396 * Stack management
397 * Used to tell clean up dirty stack frames before a thread blocks. To
398 * make this more efficient, we really need better support from pthreads.
399 * See <rdar://problem/4548631> for more details.
400 **********************************************************************/
401
402 static vm_address_t _stack_resident_base() {
403 pthread_t self = pthread_self();
404 size_t stack_size = pthread_get_stacksize_np(self);
405 vm_address_t stack_base = (vm_address_t)pthread_get_stackaddr_np(self) - stack_size;
406 size_t stack_page_count = stack_size / vm_page_size;
407 char stack_residency[stack_page_count];
408 vm_address_t stack_resident_base = 0;
409 if (mincore((void*)stack_base, stack_size, stack_residency) == 0) {
410 // we can now tell the degree to which the stack is resident, and use it as our ultimate high water mark.
411 size_t i;
412 for (i = 0; i < stack_page_count; ++i) {
413 if (stack_residency[i]) {
414 stack_resident_base = stack_base + i * vm_page_size;
415 // malloc_printf("last touched page = %lu\n", stack_page_count - i - 1);
416 break;
417 }
418 }
419 }
420 return stack_resident_base;
421 }
422
423 static __attribute__((noinline)) void* _get_stack_pointer() {
424 #if defined(__i386__) || defined(__ppc__) || defined(__ppc64__) || defined(__x86_64__)
425 return __builtin_frame_address(0);
426 #else
427 return NULL;
428 #endif
429 }
430
431 void objc_clear_stack(unsigned long options) {
432 if (!UseGC) return;
433 if (options & OBJC_CLEAR_RESIDENT_STACK) {
434 // clear just the pages of stack that are currently resident.
435 vm_address_t stack_resident_base = _stack_resident_base();
436 vm_address_t stack_top = (vm_address_t)_get_stack_pointer() - 2 * sizeof(void*);
437 bzero((void*)stack_resident_base, (stack_top - stack_resident_base));
438 } else {
439 // clear the entire unused stack, regardless of whether it's pages are resident or not.
440 pthread_t self = pthread_self();
441 size_t stack_size = pthread_get_stacksize_np(self);
442 vm_address_t stack_base = (vm_address_t)pthread_get_stackaddr_np(self) - stack_size;
443 vm_address_t stack_top = (vm_address_t)_get_stack_pointer() - 2 * sizeof(void*);
444 bzero((void*)stack_base, stack_top - stack_base);
445 }
446 }
447
448 /***********************************************************************
449 * CF-only write barrier exports
450 * Called by CF only.
451 * The gc_zone guards are not thought to be necessary
452 **********************************************************************/
453
454 // Exported as very private SPI to Foundation to tell CF about
455 void* objc_assign_ivar_address_CF(void *value, void *base, void **slot)
456 {
457 if (value && gc_zone) {
458 if (auto_zone_is_valid_pointer(gc_zone, base)) {
459 ptrdiff_t offset = (((char *)slot)-(char *)base);
460 auto_zone_write_barrier(gc_zone, base, offset, value);
461 }
462 }
463
464 return (*slot = value);
465 }
466
467
468 // Same as objc_assign_strongCast_gc, should tell Foundation to use _gc version instead
469 // exported as very private SPI to Foundation to tell CF about
470 void* objc_assign_strongCast_CF(void* value, void **slot)
471 {
472 if (value && gc_zone) {
473 void *base = (void *)auto_zone_base_pointer(gc_zone, (void*)slot);
474 if (base) {
475 ptrdiff_t offset = (((char *)slot)-(char *)base);
476 auto_zone_write_barrier(gc_zone, base, offset, value);
477 }
478 }
479 return (*slot = value);
480 }
481
482
483 /***********************************************************************
484 * Finalization support
485 **********************************************************************/
486
487 static IMP _NSObject_finalize = NULL;
488
489 // Finalizer crash debugging
490 static void *finalizing_object;
491 static const char *__crashreporter_info__;
492
493 static void finalizeOneObject(void *obj, void *sel) {
494 id object = (id)obj;
495 SEL selector = (SEL)sel;
496 finalizing_object = obj;
497 __crashreporter_info__ = object_getClassName(obj);
498
499 /// call -finalize method.
500 objc_msgSend(object, selector);
501 // Call C++ destructors, if any.
502 object_cxxDestruct(object);
503
504 finalizing_object = NULL;
505 __crashreporter_info__ = NULL;
506 }
507
508 static void finalizeOneMainThreadOnlyObject(void *obj, void *sel) {
509 id object = (id)obj;
510 Class cls = object->isa;
511 if (cls == NULL) {
512 _objc_fatal("object with NULL ISA passed to finalizeOneMainThreadOnlyObject: %p\n", obj);
513 }
514 if (_class_shouldFinalizeOnMainThread(cls)) {
515 finalizeOneObject(obj, sel);
516 }
517 }
518
519 static void finalizeOneAnywhereObject(void *obj, void *sel) {
520 id object = (id)obj;
521 Class cls = object->isa;
522 if (cls == NULL) {
523 _objc_fatal("object with NULL ISA passed to finalizeOneAnywhereObject: %p\n", obj);
524 }
525 if (!_class_shouldFinalizeOnMainThread(cls)) {
526 finalizeOneObject(obj, sel);
527 }
528 else {
529 NeedsMainThreadFinalization = YES;
530 }
531 }
532
533
534
535 static void batchFinalize(auto_zone_t *zone,
536 auto_zone_foreach_object_t foreach,
537 auto_zone_cursor_t cursor,
538 size_t cursor_size,
539 void (*finalize)(void *, void*))
540 {
541 for (;;) {
542 @try {
543 foreach(cursor, finalize, @selector(finalize));
544 // non-exceptional return means finalization is complete.
545 break;
546 } @catch (id exception) {
547 // whoops, note exception, then restart at cursor's position
548 __private_extern__ void objc_exception_during_finalize_error(void);
549 _objc_inform("GC: -finalize resulted in an exception (%p) being thrown, break on objc_exception_during_finalize_error to debug\n\t%s", exception, (const char*)[[exception description] UTF8String]);
550 objc_exception_during_finalize_error();
551 }
552 }
553 }
554
555
556 static void batchFinalizeOnMainThread(void) {
557 pthread_mutex_lock(&BatchFinalizeBlock.mutex);
558 if (BatchFinalizeBlock.started) {
559 // main thread got here already
560 pthread_mutex_unlock(&BatchFinalizeBlock.mutex);
561 return;
562 }
563 BatchFinalizeBlock.started = YES;
564 pthread_mutex_unlock(&BatchFinalizeBlock.mutex);
565
566 batchFinalize(gc_zone, BatchFinalizeBlock.foreach, BatchFinalizeBlock.cursor, BatchFinalizeBlock.cursor_size, finalizeOneMainThreadOnlyObject);
567 // signal the collector thread that finalization has finished.
568 pthread_mutex_lock(&BatchFinalizeBlock.mutex);
569 BatchFinalizeBlock.finished = YES;
570 pthread_cond_signal(&BatchFinalizeBlock.condition);
571 pthread_mutex_unlock(&BatchFinalizeBlock.mutex);
572 }
573
574 static void batchFinalizeOnTwoThreads(auto_zone_t *zone,
575 auto_zone_foreach_object_t foreach,
576 auto_zone_cursor_t cursor,
577 size_t cursor_size)
578 {
579 // First, lets get rid of everything we can on this thread, then ask main thread to help if needed
580 NeedsMainThreadFinalization = NO;
581 char cursor_copy[cursor_size];
582 memcpy(cursor_copy, cursor, cursor_size);
583 batchFinalize(zone, foreach, cursor_copy, cursor_size, finalizeOneAnywhereObject);
584
585 if (! NeedsMainThreadFinalization)
586 return; // no help needed
587
588 // set up the control block. Either our ping of main thread with _callOnMainThread will get to it, or
589 // an objc_collect_if_needed() will get to it. Either way, this block will be processed on the main thread.
590 pthread_mutex_lock(&BatchFinalizeBlock.mutex);
591 BatchFinalizeBlock.foreach = foreach;
592 BatchFinalizeBlock.cursor = cursor;
593 BatchFinalizeBlock.cursor_size = cursor_size;
594 BatchFinalizeBlock.started = NO;
595 BatchFinalizeBlock.finished = NO;
596 pthread_mutex_unlock(&BatchFinalizeBlock.mutex);
597
598 //printf("----->asking main thread to finalize\n");
599 objc_msgSend(objc_getClass("NSGarbageCollector"), @selector(_callOnMainThread:withArgs:), batchFinalizeOnMainThread, &BatchFinalizeBlock);
600
601 // wait for the main thread to finish finalizing instances of classes marked CLS_FINALIZE_ON_MAIN_THREAD.
602 pthread_mutex_lock(&BatchFinalizeBlock.mutex);
603 while (!BatchFinalizeBlock.finished) pthread_cond_wait(&BatchFinalizeBlock.condition, &BatchFinalizeBlock.mutex);
604 pthread_mutex_unlock(&BatchFinalizeBlock.mutex);
605 //printf("<------ main thread finalize done\n");
606
607 }
608
609
610 static void objc_will_grow(auto_zone_t *zone, auto_heap_growth_info_t info) {
611 if (MultiThreadedGC) {
612 //printf("objc_will_grow %d\n", info);
613
614 if (auto_zone_is_collecting(gc_zone)) {
615 ;
616 }
617 else {
618 auto_collect(gc_zone, AUTO_COLLECT_RATIO_COLLECTION, NULL);
619 }
620 }
621 }
622
623
624 // collector calls this with garbage ready
625 static void BatchInvalidate(auto_zone_t *zone,
626 auto_zone_foreach_object_t foreach,
627 auto_zone_cursor_t cursor,
628 size_t cursor_size)
629 {
630 if (pthread_main_np() || !WantsMainThreadFinalization) {
631 // Collect all objects. We're either pre-multithreaded on main thread or we're on the collector thread
632 // but no main-thread-only objects have been allocated.
633 batchFinalize(zone, foreach, cursor, cursor_size, finalizeOneObject);
634 }
635 else {
636 // We're on the dedicated thread. Collect some on main thread, the rest here.
637 batchFinalizeOnTwoThreads(zone, foreach, cursor, cursor_size);
638 }
639
640 }
641
642 // idea: keep a side table mapping resurrected object pointers to their original Class, so we don't
643 // need to smash anything. alternatively, could use associative references to track against a secondary
644 // object with information about the resurrection, such as a stack crawl, etc.
645
646 static Class _NSResurrectedObjectClass;
647 static NXMapTable *_NSResurrectedObjectMap = NULL;
648 static OBJC_DECLARE_LOCK(_NSResurrectedObjectLock);
649
650 static Class resurrectedObjectOriginalClass(id object) {
651 Class originalClass;
652 OBJC_LOCK(&_NSResurrectedObjectLock);
653 originalClass = (Class) NXMapGet(_NSResurrectedObjectMap, object);
654 OBJC_UNLOCK(&_NSResurrectedObjectLock);
655 return originalClass;
656 }
657
658 static id _NSResurrectedObject_classMethod(id self, SEL selector) { return self; }
659
660 static id _NSResurrectedObject_instanceMethod(id self, SEL name) {
661 _objc_inform("**resurrected** object %p of class %s being sent message '%s'\n", self, class_getName(resurrectedObjectOriginalClass(self)), sel_getName(name));
662 return self;
663 }
664
665 static void _NSResurrectedObject_finalize(id self, SEL _cmd) {
666 Class originalClass;
667 OBJC_LOCK(&_NSResurrectedObjectLock);
668 originalClass = (Class) NXMapRemove(_NSResurrectedObjectMap, self);
669 OBJC_UNLOCK(&_NSResurrectedObjectLock);
670 if (originalClass) _objc_inform("**resurrected** object %p of class %s being finalized\n", self, class_getName(originalClass));
671 _NSObject_finalize(self, _cmd);
672 }
673
674 static BOOL _NSResurrectedObject_resolveInstanceMethod(id self, SEL _cmd, SEL name) {
675 class_addMethod((Class)self, name, (IMP)_NSResurrectedObject_instanceMethod, "@@:");
676 return YES;
677 }
678
679 static BOOL _NSResurrectedObject_resolveClassMethod(id self, SEL _cmd, SEL name) {
680 class_addMethod(object_getClass(self), name, (IMP)_NSResurrectedObject_classMethod, "@@:");
681 return YES;
682 }
683
684 static void _NSResurrectedObject_initialize() {
685 _NSResurrectedObjectMap = NXCreateMapTable(NXPtrValueMapPrototype, 128);
686 _NSResurrectedObjectClass = objc_allocateClassPair(objc_getClass("NSObject"), "_NSResurrectedObject", 0);
687 class_addMethod(_NSResurrectedObjectClass, @selector(finalize), (IMP)_NSResurrectedObject_finalize, "v@:");
688 Class metaClass = object_getClass(_NSResurrectedObjectClass);
689 class_addMethod(metaClass, @selector(resolveInstanceMethod:), (IMP)_NSResurrectedObject_resolveInstanceMethod, "c@::");
690 class_addMethod(metaClass, @selector(resolveClassMethod:), (IMP)_NSResurrectedObject_resolveClassMethod, "c@::");
691 objc_registerClassPair(_NSResurrectedObjectClass);
692 }
693
694 static void resurrectZombie(auto_zone_t *zone, void *ptr) {
695 id object = (id) ptr;
696 Class cls = object->isa;
697 if (cls != _NSResurrectedObjectClass) {
698 // remember the original class for this instance.
699 OBJC_LOCK(&_NSResurrectedObjectLock);
700 NXMapInsert(_NSResurrectedObjectMap, ptr, cls);
701 OBJC_UNLOCK(&_NSResurrectedObjectLock);
702 object->isa = _NSResurrectedObjectClass;
703 }
704 }
705
706 /***********************************************************************
707 * Pretty printing support
708 * For development purposes.
709 **********************************************************************/
710
711
712 static char *name_for_address(auto_zone_t *zone, vm_address_t base, vm_address_t offset, int withRetainCount);
713
714 static char* objc_name_for_address(auto_zone_t *zone, vm_address_t base, vm_address_t offset)
715 {
716 return name_for_address(zone, base, offset, false);
717 }
718
719 /***********************************************************************
720 * Collection support
721 **********************************************************************/
722
723 static const unsigned char *objc_layout_for_address(auto_zone_t *zone, void *address)
724 {
725 Class cls = *(Class *)address;
726 return (const unsigned char *)class_getIvarLayout(cls);
727 }
728
729 static const unsigned char *objc_weak_layout_for_address(auto_zone_t *zone, void *address)
730 {
731 Class cls = *(Class *)address;
732 return (const unsigned char *)class_getWeakIvarLayout(cls);
733 }
734
735 /***********************************************************************
736 * Initialization
737 **********************************************************************/
738
739 // Always called by _objcInit, even if GC is off.
740 __private_extern__ void gc_init(BOOL on)
741 {
742 UseGC = on;
743
744 if (PrintGC) {
745 _objc_inform("GC: is %s", on ? "ON" : "OFF");
746 }
747
748 if (UseGC) {
749 // Add GC state to crash log reports
750 _objc_inform_on_crash("garbage collection is ON");
751
752 // Set up the GC zone
753 gc_zone = gc_zone_init();
754
755 // no NSObject until Foundation calls objc_collect_init()
756 _NSObject_finalize = &_objc_msgForward;
757
758 } else {
759 auto_zone_start_monitor(false);
760 auto_zone_set_class_list((int (*)(void **, int))objc_getClassList);
761 }
762 }
763
764
765 static auto_zone_t *gc_zone_init(void)
766 {
767 auto_zone_t *result;
768
769 // result = auto_zone_create("objc auto collected zone");
770 result = auto_zone_create("auto_zone");
771
772 auto_collection_control_t *control = auto_collection_parameters(result);
773
774 // set up the magic control parameters
775 control->batch_invalidate = BatchInvalidate;
776 control->will_grow = objc_will_grow;
777 control->resurrect = resurrectZombie;
778 control->layout_for_address = objc_layout_for_address;
779 control->weak_layout_for_address = objc_weak_layout_for_address;
780 control->name_for_address = objc_name_for_address;
781
782 return result;
783 }
784
785
786 // Called by Foundation to install auto's interruption callback.
787 malloc_zone_t *objc_collect_init(int (*callback)(void))
788 {
789 // Find NSObject's finalize method now that Foundation is loaded.
790 // fixme only look for the base implementation, not a category's
791 _NSObject_finalize = class_getMethodImplementation(objc_getClass("NSObject"), @selector(finalize));
792 if (_NSObject_finalize == &_objc_msgForward) {
793 _objc_fatal("GC: -[NSObject finalize] unimplemented!");
794 }
795
796 // create the _NSResurrectedObject class used to track resurrections.
797 _NSResurrectedObject_initialize();
798
799 return (malloc_zone_t *)gc_zone;
800 }
801
802
803
804
805
806
807 /***********************************************************************
808 * Debugging
809 **********************************************************************/
810
811 /* This is non-deadlocking with respect to malloc's locks EXCEPT:
812 * %ls, %a, %A formats
813 * more than 8 args
814 */
815 static void objc_debug_printf(const char *format, ...)
816 {
817 va_list ap;
818 va_start(ap, format);
819 vfprintf(stderr, format, ap);
820 va_end(ap);
821 }
822
823 static malloc_zone_t *objc_debug_zone(void)
824 {
825 static malloc_zone_t *z = NULL;
826 if (!z) {
827 z = malloc_create_zone(4096, 0);
828 malloc_set_zone_name(z, "objc-auto debug");
829 }
830 return z;
831 }
832
833 static char *_malloc_append_unsigned(uintptr_t value, unsigned base, char *head) {
834 if (!value) {
835 head[0] = '0';
836 } else {
837 if (value >= base) head = _malloc_append_unsigned(value / base, base, head);
838 value = value % base;
839 head[0] = (value < 10) ? '0' + value : 'a' + value - 10;
840 }
841 return head+1;
842 }
843
844 static void strcati(char *str, uintptr_t value)
845 {
846 str = _malloc_append_unsigned(value, 10, str + strlen(str));
847 str[0] = '\0';
848 }
849
850 static void strcatx(char *str, uintptr_t value)
851 {
852 str = _malloc_append_unsigned(value, 16, str + strlen(str));
853 str[0] = '\0';
854 }
855
856
857 static Ivar ivar_for_offset(Class cls, vm_address_t offset)
858 {
859 int i;
860 int ivar_offset;
861 Ivar super_ivar, result;
862 Ivar *ivars;
863 unsigned int ivar_count;
864
865 if (!cls) return NULL;
866
867 // scan base classes FIRST
868 super_ivar = ivar_for_offset(class_getSuperclass(cls), offset);
869 // result is best-effort; our ivars may be closer
870
871 ivars = class_copyIvarList(cls, &ivar_count);
872 if (ivars && ivar_count) {
873 // Try our first ivar. If it's too big, use super's best ivar.
874 ivar_offset = ivar_getOffset(ivars[0]);
875 if (ivar_offset > offset) result = super_ivar;
876 else if (ivar_offset == offset) result = ivars[0];
877 else result = NULL;
878
879 // Try our other ivars. If any is too big, use the previous.
880 for (i = 1; result == NULL && i < ivar_count; i++) {
881 ivar_offset = ivar_getOffset(ivars[i]);
882 if (ivar_offset == offset) {
883 result = ivars[i];
884 } else if (ivar_offset > offset) {
885 result = ivars[i - 1];
886 }
887 }
888
889 // Found nothing. Return our last ivar.
890 if (result == NULL)
891 result = ivars[ivar_count - 1];
892
893 free(ivars);
894 } else {
895 result = super_ivar;
896 }
897
898 return result;
899 }
900
901 static void append_ivar_at_offset(char *buf, Class cls, vm_address_t offset)
902 {
903 Ivar ivar = NULL;
904
905 if (offset == 0) return; // don't bother with isa
906 if (offset >= class_getInstanceSize(cls)) {
907 strcat(buf, ".<extra>+");
908 strcati(buf, offset);
909 return;
910 }
911
912 ivar = ivar_for_offset(cls, offset);
913 if (!ivar) {
914 strcat(buf, ".<?>");
915 return;
916 }
917
918 // fixme doesn't handle structs etc.
919
920 strcat(buf, ".");
921 const char *ivar_name = ivar_getName(ivar);
922 if (ivar_name) strcat(buf, ivar_name);
923 else strcat(buf, "<anonymous ivar>");
924
925 offset -= ivar_getOffset(ivar);
926 if (offset > 0) {
927 strcat(buf, "+");
928 strcati(buf, offset);
929 }
930 }
931
932
933 static const char *cf_class_for_object(void *cfobj)
934 {
935 // ick - we don't link against CF anymore
936
937 const char *result;
938 void *dlh;
939 size_t (*CFGetTypeID)(void *);
940 void * (*_CFRuntimeGetClassWithTypeID)(size_t);
941
942 result = "anonymous_NSCFType";
943
944 dlh = dlopen("/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation", RTLD_LAZY | RTLD_NOLOAD | RTLD_FIRST);
945 if (!dlh) return result;
946
947 CFGetTypeID = (size_t(*)(void*)) dlsym(dlh, "CFGetTypeID");
948 _CFRuntimeGetClassWithTypeID = (void*(*)(size_t)) dlsym(dlh, "_CFRuntimeGetClassWithTypeID");
949
950 if (CFGetTypeID && _CFRuntimeGetClassWithTypeID) {
951 struct {
952 size_t version;
953 const char *className;
954 // don't care about the rest
955 } *cfcls;
956 size_t cfid;
957 cfid = (*CFGetTypeID)(cfobj);
958 cfcls = (*_CFRuntimeGetClassWithTypeID)(cfid);
959 result = cfcls->className;
960 }
961
962 dlclose(dlh);
963 return result;
964 }
965
966
967 static char *name_for_address(auto_zone_t *zone, vm_address_t base, vm_address_t offset, int withRetainCount)
968 {
969 #define APPEND_SIZE(s) \
970 strcat(buf, "["); \
971 strcati(buf, s); \
972 strcat(buf, "]");
973
974 char buf[500];
975 char *result;
976
977 buf[0] = '\0';
978
979 size_t size =
980 auto_zone_size_no_lock(zone, (void *)base);
981 auto_memory_type_t type = size ?
982 auto_zone_get_layout_type_no_lock(zone, (void *)base) : AUTO_TYPE_UNKNOWN;
983 unsigned int refcount = size ?
984 auto_zone_retain_count_no_lock(zone, (void *)base) : 0;
985
986 switch (type) {
987 case AUTO_OBJECT_SCANNED:
988 case AUTO_OBJECT_UNSCANNED: {
989 const char *class_name = object_getClassName((id)base);
990 if (0 == strcmp(class_name, "NSCFType")) {
991 strcat(buf, cf_class_for_object((void *)base));
992 } else {
993 strcat(buf, class_name);
994 }
995 if (offset) {
996 append_ivar_at_offset(buf, object_getClass((id)base), offset);
997 }
998 APPEND_SIZE(size);
999 break;
1000 }
1001 case AUTO_MEMORY_SCANNED:
1002 strcat(buf, "{conservative-block}");
1003 APPEND_SIZE(size);
1004 break;
1005 case AUTO_MEMORY_UNSCANNED:
1006 strcat(buf, "{no-pointers-block}");
1007 APPEND_SIZE(size);
1008 break;
1009 default:
1010 strcat(buf, "{unallocated-or-stack}");
1011 }
1012
1013 if (withRetainCount && refcount > 0) {
1014 strcat(buf, " [[refcount=");
1015 strcati(buf, refcount);
1016 strcat(buf, "]]");
1017 }
1018
1019 result = malloc_zone_malloc(objc_debug_zone(), 1 + strlen(buf));
1020 strcpy(result, buf);
1021 return result;
1022
1023 #undef APPEND_SIZE
1024 }
1025
1026
1027 struct objc_class_recorder_context {
1028 malloc_zone_t *zone;
1029 void *cls;
1030 char *clsname;
1031 unsigned int count;
1032 };
1033
1034 static void objc_class_recorder(task_t task, void *context, unsigned type_mask,
1035 vm_range_t *ranges, unsigned range_count)
1036 {
1037 struct objc_class_recorder_context *ctx =
1038 (struct objc_class_recorder_context *)context;
1039
1040 vm_range_t *r;
1041 vm_range_t *end;
1042 for (r = ranges, end = ranges + range_count; r < end; r++) {
1043 auto_memory_type_t type =
1044 auto_zone_get_layout_type_no_lock(ctx->zone, (void *)r->address);
1045 if (type == AUTO_OBJECT_SCANNED || type == AUTO_OBJECT_UNSCANNED) {
1046 // Check if this is an instance of class ctx->cls or some subclass
1047 Class cls;
1048 Class isa = *(Class *)r->address;
1049 for (cls = isa; cls; cls = _class_getSuperclass(cls)) {
1050 if (cls == ctx->cls) {
1051 unsigned int rc;
1052 objc_debug_printf("[%p] : %s", r->address, _class_getName(isa));
1053 if ((rc = auto_zone_retain_count_no_lock(ctx->zone, (void *)r->address))) {
1054 objc_debug_printf(" [[refcount %u]]", rc);
1055 }
1056 objc_debug_printf("\n");
1057 ctx->count++;
1058 break;
1059 }
1060 }
1061 }
1062 }
1063 }
1064
1065 __private_extern__ void objc_enumerate_class(char *clsname)
1066 {
1067 struct objc_class_recorder_context ctx;
1068 ctx.zone = auto_zone();
1069 ctx.clsname = clsname;
1070 ctx.cls = objc_getClass(clsname); // GrP fixme may deadlock if classHash lock is already owned
1071 ctx.count = 0;
1072 if (!ctx.cls) {
1073 objc_debug_printf("No class '%s'\n", clsname);
1074 return;
1075 }
1076 objc_debug_printf("\n\nINSTANCES OF CLASS '%s':\n\n", clsname);
1077 (*ctx.zone->introspect->enumerator)(mach_task_self(), &ctx, MALLOC_PTR_IN_USE_RANGE_TYPE, (vm_address_t)ctx.zone, NULL, objc_class_recorder);
1078 objc_debug_printf("\n%d instances\n\n", ctx.count);
1079 }
1080
1081
1082 static void objc_reference_printer(auto_zone_t *zone, void *ctx,
1083 auto_reference_t ref)
1084 {
1085 char *referrer_name = name_for_address(zone, ref.referrer_base, ref.referrer_offset, true);
1086 char *referent_name = name_for_address(zone, ref.referent, 0, true);
1087
1088 objc_debug_printf("[%p%+d -> %p] : %s -> %s\n",
1089 ref.referrer_base, ref.referrer_offset, ref.referent,
1090 referrer_name, referent_name);
1091
1092 malloc_zone_free(objc_debug_zone(), referrer_name);
1093 malloc_zone_free(objc_debug_zone(), referent_name);
1094 }
1095
1096
1097 __private_extern__ void objc_print_references(void *referent, void *stack_bottom, int lock)
1098 {
1099 if (lock) {
1100 auto_enumerate_references(auto_zone(), referent,
1101 objc_reference_printer, stack_bottom, NULL);
1102 } else {
1103 auto_enumerate_references_no_lock(auto_zone(), referent,
1104 objc_reference_printer, stack_bottom, NULL);
1105 }
1106 }
1107
1108
1109
1110 typedef struct {
1111 vm_address_t address; // of this object
1112 int refcount; // of this object - nonzero means ROOT
1113 int depth; // number of links away from referent, or -1
1114 auto_reference_t *referrers; // of this object
1115 int referrers_used;
1116 int referrers_allocated;
1117 auto_reference_t back; // reference from this object back toward the target
1118 uint32_t ID; // Graphic ID for grafflization
1119 } blob;
1120
1121
1122 typedef struct {
1123 blob **list;
1124 unsigned int used;
1125 unsigned int allocated;
1126 } blob_queue;
1127
1128 static blob_queue blobs = {NULL, 0, 0};
1129 static blob_queue untraced_blobs = {NULL, 0, 0};
1130 static blob_queue root_blobs = {NULL, 0, 0};
1131
1132
1133 static void spin(void) {
1134 static time_t t = 0;
1135 time_t now = time(NULL);
1136 if (t != now) {
1137 objc_debug_printf(".");
1138 t = now;
1139 }
1140 }
1141
1142
1143 static void enqueue_blob(blob_queue *q, blob *b)
1144 {
1145 if (q->used == q->allocated) {
1146 q->allocated = q->allocated * 2 + 1;
1147 q->list = malloc_zone_realloc(objc_debug_zone(), q->list, q->allocated * sizeof(blob *));
1148 }
1149 q->list[q->used++] = b;
1150 }
1151
1152
1153 static blob *dequeue_blob(blob_queue *q)
1154 {
1155 blob *result = q->list[0];
1156 q->used--;
1157 memmove(&q->list[0], &q->list[1], q->used * sizeof(blob *));
1158 return result;
1159 }
1160
1161
1162 static blob *blob_for_address(vm_address_t addr)
1163 {
1164 blob *b, **bp, **end;
1165
1166 if (addr == 0) return NULL;
1167
1168 for (bp = blobs.list, end = blobs.list+blobs.used; bp < end; bp++) {
1169 b = *bp;
1170 if (b->address == addr) return b;
1171 }
1172
1173 b = malloc_zone_calloc(objc_debug_zone(), sizeof(blob), 1);
1174 b->address = addr;
1175 b->depth = -1;
1176 b->refcount = auto_zone_size_no_lock(auto_zone(), (void *)addr) ? auto_zone_retain_count_no_lock(auto_zone(), (void *)addr) : 1;
1177 enqueue_blob(&blobs, b);
1178 return b;
1179 }
1180
1181 static int blob_exists(vm_address_t addr)
1182 {
1183 blob *b, **bp, **end;
1184 for (bp = blobs.list, end = blobs.list+blobs.used; bp < end; bp++) {
1185 b = *bp;
1186 if (b->address == addr) return 1;
1187 }
1188 return 0;
1189 }
1190
1191
1192 // Destroy the blobs table and all blob data in it
1193 static void free_blobs(void)
1194 {
1195 blob *b, **bp, **end;
1196 for (bp = blobs.list, end = blobs.list+blobs.used; bp < end; bp++) {
1197 b = *bp;
1198 malloc_zone_free(objc_debug_zone(), b);
1199 }
1200 if (blobs.list) malloc_zone_free(objc_debug_zone(), blobs.list);
1201 }
1202
1203 static void print_chain(auto_zone_t *zone, blob *root)
1204 {
1205 blob *b;
1206 for (b = root; b != NULL; b = blob_for_address(b->back.referent)) {
1207 char *name;
1208 if (b->back.referent) {
1209 name = name_for_address(zone, b->address, b->back.referrer_offset, true);
1210 objc_debug_printf("[%p%+d] : %s ->\n", b->address, b->back.referrer_offset, name);
1211 } else {
1212 name = name_for_address(zone, b->address, 0, true);
1213 objc_debug_printf("[%p] : %s\n", b->address, name);
1214 }
1215 malloc_zone_free(objc_debug_zone(), name);
1216 }
1217 }
1218
1219
1220 static void objc_blob_recorder(auto_zone_t *zone, void *ctx,
1221 auto_reference_t ref)
1222 {
1223 blob *b = (blob *)ctx;
1224
1225 spin();
1226
1227 if (b->referrers_used == b->referrers_allocated) {
1228 b->referrers_allocated = b->referrers_allocated * 2 + 1;
1229 b->referrers = malloc_zone_realloc(objc_debug_zone(), b->referrers,
1230 b->referrers_allocated *
1231 sizeof(auto_reference_t));
1232 }
1233
1234 b->referrers[b->referrers_used++] = ref;
1235 if (!blob_exists(ref.referrer_base)) {
1236 enqueue_blob(&untraced_blobs, blob_for_address(ref.referrer_base));
1237 }
1238 }
1239
1240
1241 #define INSTANCE_ROOTS 1
1242 #define HEAP_ROOTS 2
1243 #define ALL_REFS 3
1244 static void objc_print_recursive_refs(vm_address_t target, int which, void *stack_bottom, int lock);
1245 static void grafflize(blob_queue *blobs, int everything);
1246
1247 __private_extern__ void objc_print_instance_roots(vm_address_t target, void *stack_bottom, int lock)
1248 {
1249 objc_print_recursive_refs(target, INSTANCE_ROOTS, stack_bottom, lock);
1250 }
1251
1252 __private_extern__ void objc_print_heap_roots(vm_address_t target, void *stack_bottom, int lock)
1253 {
1254 objc_print_recursive_refs(target, HEAP_ROOTS, stack_bottom, lock);
1255 }
1256
1257 __private_extern__ void objc_print_all_refs(vm_address_t target, void *stack_bottom, int lock)
1258 {
1259 objc_print_recursive_refs(target, ALL_REFS, stack_bottom, lock);
1260 }
1261
1262 static void sort_blobs_by_refcount(blob_queue *blobs)
1263 {
1264 int i, j;
1265
1266 // simple bubble sort
1267 for (i = 0; i < blobs->used; i++) {
1268 for (j = i+1; j < blobs->used; j++) {
1269 if (blobs->list[i]->refcount < blobs->list[j]->refcount) {
1270 blob *temp = blobs->list[i];
1271 blobs->list[i] = blobs->list[j];
1272 blobs->list[j] = temp;
1273 }
1274 }
1275 }
1276 }
1277
1278
1279 static void sort_blobs_by_depth(blob_queue *blobs)
1280 {
1281 int i, j;
1282
1283 // simple bubble sort
1284 for (i = 0; i < blobs->used; i++) {
1285 for (j = i+1; j < blobs->used; j++) {
1286 if (blobs->list[i]->depth > blobs->list[j]->depth) {
1287 blob *temp = blobs->list[i];
1288 blobs->list[i] = blobs->list[j];
1289 blobs->list[j] = temp;
1290 }
1291 }
1292 }
1293 }
1294
1295
1296 static void objc_print_recursive_refs(vm_address_t target, int which, void *stack_bottom, int lock)
1297 {
1298 objc_debug_printf("\n "); // make spinner draw in a pretty place
1299
1300 // Construct pointed-to graph (of things eventually pointing to target)
1301
1302 enqueue_blob(&untraced_blobs, blob_for_address(target));
1303
1304 while (untraced_blobs.used > 0) {
1305 blob *b = dequeue_blob(&untraced_blobs);
1306 spin();
1307 if (lock) {
1308 auto_enumerate_references(auto_zone(), (void *)b->address,
1309 objc_blob_recorder, stack_bottom, b);
1310 } else {
1311 auto_enumerate_references_no_lock(auto_zone(), (void *)b->address,
1312 objc_blob_recorder, stack_bottom, b);
1313 }
1314 }
1315
1316 // Walk pointed-to graph to find shortest paths from roots to target.
1317 // This is BREADTH-FIRST order.
1318
1319 blob_for_address(target)->depth = 0;
1320 enqueue_blob(&untraced_blobs, blob_for_address(target));
1321
1322 while (untraced_blobs.used > 0) {
1323 blob *b = dequeue_blob(&untraced_blobs);
1324 blob *other;
1325 auto_reference_t *r, *end;
1326 int stop = NO;
1327
1328 spin();
1329
1330 if (which == ALL_REFS) {
1331 // Never stop at roots.
1332 stop = NO;
1333 } else if (which == HEAP_ROOTS) {
1334 // Stop at any root (a block with positive retain count)
1335 stop = (b->refcount > 0);
1336 } else if (which == INSTANCE_ROOTS) {
1337 // Only stop at roots that are instances
1338 auto_memory_type_t type = auto_zone_get_layout_type_no_lock(auto_zone(), (void *)b->address);
1339 stop = (b->refcount > 0 && (type == AUTO_OBJECT_SCANNED || type == AUTO_OBJECT_UNSCANNED)); // GREG XXX ???
1340 }
1341
1342 // If this object is a root, save it and don't walk its referrers.
1343 if (stop) {
1344 enqueue_blob(&root_blobs, b);
1345 continue;
1346 }
1347
1348 // For any "other object" that points to "this object"
1349 // and does not yet have a depth:
1350 // (1) other object is one level deeper than this object
1351 // (2) (one of) the shortest path(s) from other object to the
1352 // target goes through this object
1353
1354 for (r = b->referrers, end = b->referrers + b->referrers_used;
1355 r < end;
1356 r++)
1357 {
1358 other = blob_for_address(r->referrer_base);
1359 if (other->depth == -1) {
1360 other->depth = b->depth + 1;
1361 other->back = *r;
1362 enqueue_blob(&untraced_blobs, other);
1363 }
1364 }
1365 }
1366
1367 {
1368 char *name = name_for_address(auto_zone(), target, 0, true);
1369 objc_debug_printf("\n\n%d %s %p (%s)\n\n",
1370 (which==ALL_REFS) ? blobs.used : root_blobs.used,
1371 (which==ALL_REFS) ? "INDIRECT REFS TO" : "ROOTS OF",
1372 target, name);
1373 malloc_zone_free(objc_debug_zone(), name);
1374 }
1375
1376 if (which == ALL_REFS) {
1377 // Print all reference objects, biggest refcount first
1378 int i;
1379 sort_blobs_by_refcount(&blobs);
1380 for (i = 0; i < blobs.used; i++) {
1381 char *name = name_for_address(auto_zone(), blobs.list[i]->address, 0, true);
1382 objc_debug_printf("[%p] : %s\n", blobs.list[i]->address, name);
1383 malloc_zone_free(objc_debug_zone(), name);
1384 }
1385 }
1386 else {
1387 // Walk back chain from every root to the target, printing every step.
1388
1389 while (root_blobs.used > 0) {
1390 blob *root = dequeue_blob(&root_blobs);
1391 print_chain(auto_zone(), root);
1392 objc_debug_printf("\n");
1393 }
1394 }
1395
1396 grafflize(&blobs, which == ALL_REFS);
1397
1398 objc_debug_printf("\ndone\n\n");
1399
1400 // Clean up
1401
1402 free_blobs();
1403 if (untraced_blobs.list) malloc_zone_free(objc_debug_zone(), untraced_blobs.list);
1404 if (root_blobs.list) malloc_zone_free(objc_debug_zone(), root_blobs.list);
1405
1406 memset(&blobs, 0, sizeof(blobs));
1407 memset(&root_blobs, 0, sizeof(root_blobs));
1408 memset(&untraced_blobs, 0, sizeof(untraced_blobs));
1409 }
1410
1411
1412
1413 struct objc_block_recorder_context {
1414 malloc_zone_t *zone;
1415 int fd;
1416 unsigned int count;
1417 };
1418
1419
1420 static void objc_block_recorder(task_t task, void *context, unsigned type_mask,
1421 vm_range_t *ranges, unsigned range_count)
1422 {
1423 char buf[20];
1424 struct objc_block_recorder_context *ctx =
1425 (struct objc_block_recorder_context *)context;
1426
1427 vm_range_t *r;
1428 vm_range_t *end;
1429 for (r = ranges, end = ranges + range_count; r < end; r++) {
1430 char *name = name_for_address(ctx->zone, r->address, 0, true);
1431 buf[0] = '\0';
1432 strcatx(buf, r->address);
1433
1434 write(ctx->fd, "0x", 2);
1435 write(ctx->fd, buf, strlen(buf));
1436 write(ctx->fd, " ", 1);
1437 write(ctx->fd, name, strlen(name));
1438 write(ctx->fd, "\n", 1);
1439
1440 malloc_zone_free(objc_debug_zone(), name);
1441 ctx->count++;
1442 }
1443 }
1444
1445
1446 __private_extern__ void objc_dump_block_list(const char* path)
1447 {
1448 struct objc_block_recorder_context ctx;
1449 char filename[] = "/tmp/blocks-XXXXX.txt";
1450
1451 ctx.zone = auto_zone();
1452 ctx.count = 0;
1453 ctx.fd = (path ? open(path, O_WRONLY | O_CREAT | O_TRUNC, 0666) : mkstemps(filename, (int)strlen(strrchr(filename, '.'))));
1454
1455 objc_debug_printf("\n\nALL AUTO-ALLOCATED BLOCKS\n\n");
1456 (*ctx.zone->introspect->enumerator)(mach_task_self(), &ctx, MALLOC_PTR_IN_USE_RANGE_TYPE, (vm_address_t)ctx.zone, NULL, objc_block_recorder);
1457 objc_debug_printf("%d blocks written to file\n", ctx.count);
1458 objc_debug_printf("open %s\n", (path ? path : filename));
1459
1460 close(ctx.fd);
1461 }
1462
1463
1464
1465
1466 static void grafflize_id(int gfile, int ID)
1467 {
1468 char buf[20] = "";
1469 char *c;
1470
1471 strcati(buf, ID);
1472 c = "<key>ID</key><integer>";
1473 write(gfile, c, strlen(c));
1474 write(gfile, buf, strlen(buf));
1475 c = "</integer>";
1476 write(gfile, c, strlen(c));
1477 }
1478
1479
1480 // head = REFERENT end = arrow
1481 // tail = REFERRER end = no arrow
1482 static void grafflize_reference(int gfile, auto_reference_t reference,
1483 int ID, int important)
1484 {
1485 blob *referrer = blob_for_address(reference.referrer_base);
1486 blob *referent = blob_for_address(reference.referent);
1487 char *c;
1488
1489 // line
1490 c = "<dict><key>Class</key><string>LineGraphic</string>";
1491 write(gfile, c, strlen(c));
1492
1493 // id
1494 grafflize_id(gfile, ID);
1495
1496 // head = REFERENT
1497 c = "<key>Head</key><dict>";
1498 write(gfile, c, strlen(c));
1499 grafflize_id(gfile, referent->ID);
1500 c = "</dict>";
1501 write(gfile, c, strlen(c));
1502
1503 // tail = REFERRER
1504 c = "<key>Tail</key><dict>";
1505 write(gfile, c, strlen(c));
1506 grafflize_id(gfile, referrer->ID);
1507 c = "</dict>";
1508 write(gfile, c, strlen(c));
1509
1510 // style - head arrow, thick line if important
1511 c = "<key>Style</key><dict><key>stroke</key><dict>"
1512 "<key>HeadArrow</key><string>FilledArrow</string>"
1513 "<key>LineType</key><integer>1</integer>";
1514 write(gfile, c, strlen(c));
1515 if (important) {
1516 c = "<key>Width</key><real>3</real>";
1517 write(gfile, c, strlen(c));
1518 }
1519 c = "</dict></dict>";
1520 write(gfile, c, strlen(c));
1521
1522 // end line
1523 c = "</dict>";
1524 write(gfile, c, strlen(c));
1525 }
1526
1527
1528 static void grafflize_blob(int gfile, blob *b)
1529 {
1530 // fixme include ivar names too
1531 char *name = name_for_address(auto_zone(), b->address, 0, false);
1532 int width = 30 + (int)strlen(name)*6;
1533 int height = 40;
1534 char buf[40] = "";
1535 char *c;
1536
1537 // rectangle
1538 c = "<dict>"
1539 "<key>Class</key><string>ShapedGraphic</string>"
1540 "<key>Shape</key><string>Rectangle</string>";
1541 write(gfile, c, strlen(c));
1542
1543 // id
1544 grafflize_id(gfile, b->ID);
1545
1546 // bounds
1547 // order vertically by depth
1548 c = "<key>Bounds</key><string>{{0,";
1549 write(gfile, c, strlen(c));
1550 buf[0] = '\0';
1551 strcati(buf, b->depth*60);
1552 write(gfile, buf, strlen(buf));
1553 c = "},{";
1554 write(gfile, c, strlen(c));
1555 buf[0] = '\0';
1556 strcati(buf, width);
1557 strcat(buf, ",");
1558 strcati(buf, height);
1559 write(gfile, buf, strlen(buf));
1560 c = "}}</string>";
1561 write(gfile, c, strlen(c));
1562
1563 // label
1564 c = "<key>Text</key><dict><key>Text</key>"
1565 "<string>{\\rtf1\\mac\\ansicpg10000\\cocoartf102\n"
1566 "{\\fonttbl\\f0\\fswiss\\fcharset77 Helvetica;\\fonttbl\\f1\\fswiss\\fcharset77 Helvetica-Bold;}\n"
1567 "{\\colortbl;\\red255\\green255\\blue255;}\n"
1568 "\\pard\\tx560\\tx1120\\tx1680\\tx2240\\tx3360\\tx3920\\tx4480\\tx5040\\tx5600\\tx6160\\tx6720\\qc\n"
1569 "\\f0\\fs20 \\cf0 ";
1570 write(gfile, c, strlen(c));
1571 write(gfile, name, strlen(name));
1572 strcpy(buf, "\\\n0x");
1573 strcatx(buf, b->address);
1574 write(gfile, buf, strlen(buf));
1575 c = "}</string></dict>";
1576 write(gfile, c, strlen(c));
1577
1578 // styles
1579 c = "<key>Style</key><dict>";
1580 write(gfile, c, strlen(c));
1581
1582 // no shadow
1583 c = "<key>shadow</key><dict><key>Draws</key><string>NO</string></dict>";
1584 write(gfile, c, strlen(c));
1585
1586 // fat border if refcount > 0
1587 if (b->refcount > 0) {
1588 c = "<key>stroke</key><dict><key>Width</key><real>4</real></dict>";
1589 write(gfile, c, strlen(c));
1590 }
1591
1592 // end styles
1593 c = "</dict>";
1594 write(gfile, c, strlen(c));
1595
1596 // done
1597 c = "</dict>\n";
1598 write(gfile, c, strlen(c));
1599
1600 malloc_zone_free(objc_debug_zone(), name);
1601 }
1602
1603
1604 #define gheader "<?xml version=\"1.0\" encoding=\"UTF-8\"?><!DOCTYPE plist PUBLIC \"-//Apple Computer//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\"><plist version=\"1.0\"><dict><key>GraphDocumentVersion</key><integer>3</integer><key>ReadOnly</key><string>NO</string><key>GraphicsList</key><array>\n"
1605
1606 #define gfooter "</array></dict></plist>\n"
1607
1608
1609 static void grafflize(blob_queue *blobs, int everything)
1610 {
1611 // Don't require linking to Foundation!
1612 int i;
1613 int gfile;
1614 int nextid = 1;
1615 char filename[] = "/tmp/gc-XXXXX.graffle";
1616
1617 // Open file
1618 gfile = mkstemps(filename, (int)strlen(strrchr(filename, '.')));
1619 if (gfile < 0) {
1620 objc_debug_printf("couldn't create a graffle file in /tmp/ (errno %d)\n", errno);
1621 return;
1622 }
1623
1624 // Write header
1625 write(gfile, gheader, strlen(gheader));
1626
1627 // Write a rectangle for each blob
1628 sort_blobs_by_depth(blobs);
1629 for (i = 0; i < blobs->used; i++) {
1630 blob *b = blobs->list[i];
1631 b->ID = nextid++;
1632 if (everything || b->depth >= 0) {
1633 grafflize_blob(gfile, b);
1634 }
1635 }
1636
1637 for (i = 0; i < blobs->used; i++) {
1638 int j;
1639 blob *b = blobs->list[i];
1640
1641 if (everything) {
1642 // Write an arrow for each reference
1643 // Use big arrows for backreferences
1644 for (j = 0; j < b->referrers_used; j++) {
1645 int is_back_ref = (b->referrers[i].referent == b->back.referent && b->referrers[i].referrer_offset == b->back.referrer_offset && b->referrers[i].referrer_base == b->back.referrer_base);
1646
1647 grafflize_reference(gfile, b->referrers[j], nextid++,
1648 is_back_ref);
1649 }
1650 }
1651 else {
1652 // Write an arrow for each backreference
1653 if (b->depth > 0) {
1654 grafflize_reference(gfile, b->back, nextid++, false);
1655 }
1656 }
1657 }
1658
1659 // Write footer and close
1660 write(gfile, gfooter, strlen(gfooter));
1661 close(gfile);
1662 objc_debug_printf("wrote object graph (%d objects)\nopen %s\n",
1663 blobs->used, filename);
1664 }