]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-auto.m
objc4-437.1.tar.gz
[apple/objc4.git] / runtime / objc-auto.m
1 /*
2 * Copyright (c) 2004-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #import "objc-auto.h"
25
26 #ifndef OBJC_NO_GC
27
28 #import <stdint.h>
29 #import <stdbool.h>
30 #import <fcntl.h>
31 #import <dlfcn.h>
32 #import <mach/mach.h>
33 #import <mach-o/dyld.h>
34 #import <sys/types.h>
35 #import <sys/mman.h>
36 #import <libkern/OSAtomic.h>
37 #import <auto_zone.h>
38
39 #import <Block_private.h>
40 #include <dispatch/dispatch.h>
41
42 #define OLD 1
43 #import "objc-private.h"
44 #import "objc-references.h"
45 #import "objc-rtp.h"
46 #import "maptable.h"
47 #import "message.h"
48 #import "objc-gdb.h"
49
50
51
52 static auto_zone_t *gc_zone_init(void);
53 static void gc_block_init(void);
54 static void registeredClassTableInit(void);
55
56
57 __private_extern__ BOOL UseGC NOBSS = NO;
58 static BOOL MultiThreadedGC = NO;
59 static BOOL WantsMainThreadFinalization = NO;
60
61 __private_extern__ auto_zone_t *gc_zone = NULL;
62
63 // Pointer magic to make dyld happy. See notes in objc-private.h
64 __private_extern__ id (*objc_assign_ivar_internal)(id, id, ptrdiff_t) = objc_assign_ivar;
65
66
67 /* Method prototypes */
68 @interface DoesNotExist
69 - (const char *)UTF8String;
70 - (id)description;
71 @end
72
73
74 /***********************************************************************
75 * Utility exports
76 * Called by various libraries.
77 **********************************************************************/
78
79 OBJC_EXPORT void objc_set_collection_threshold(size_t threshold) { // Old naming
80 if (UseGC) {
81 auto_collection_parameters(gc_zone)->collection_threshold = threshold;
82 }
83 }
84
85 OBJC_EXPORT void objc_setCollectionThreshold(size_t threshold) {
86 if (UseGC) {
87 auto_collection_parameters(gc_zone)->collection_threshold = threshold;
88 }
89 }
90
91 void objc_setCollectionRatio(size_t ratio) {
92 if (UseGC) {
93 auto_collection_parameters(gc_zone)->full_vs_gen_frequency = ratio;
94 }
95 }
96
97 void objc_set_collection_ratio(size_t ratio) { // old naming
98 if (UseGC) {
99 auto_collection_parameters(gc_zone)->full_vs_gen_frequency = ratio;
100 }
101 }
102
103 void objc_finalizeOnMainThread(Class cls) {
104 if (UseGC) {
105 WantsMainThreadFinalization = YES;
106 _class_setFinalizeOnMainThread(cls);
107 }
108 }
109
110 // stack based data structure queued if/when there is main-thread-only finalization work TBD
111 typedef struct BatchFinalizeBlock {
112 auto_zone_foreach_object_t foreach;
113 auto_zone_cursor_t cursor;
114 size_t cursor_size;
115 volatile BOOL finished;
116 volatile BOOL started;
117 struct BatchFinalizeBlock *next;
118 } BatchFinalizeBlock_t;
119
120 // The Main Thread Finalization Work Queue Head
121 static struct {
122 pthread_mutex_t mutex;
123 pthread_cond_t condition;
124 BatchFinalizeBlock_t *head;
125 BatchFinalizeBlock_t *tail;
126 } MainThreadWorkQ;
127
128
129 void objc_startCollectorThread(void) {
130 static int didOnce = 0;
131 if (!UseGC) return;
132 if (!didOnce) {
133 didOnce = 1;
134
135 // initialize the batch finalization queue
136 MainThreadWorkQ.head = NULL;
137 MainThreadWorkQ.tail = NULL;
138 pthread_mutex_init(&MainThreadWorkQ.mutex, NULL);
139 pthread_cond_init(&MainThreadWorkQ.condition, NULL);
140 auto_collect_multithreaded(gc_zone);
141 MultiThreadedGC = YES;
142 }
143 }
144
145 void objc_start_collector_thread(void) {
146 objc_startCollectorThread();
147 }
148
149 static void batchFinalizeOnMainThread(void);
150
151 void objc_collect(unsigned long options) {
152 if (!UseGC) return;
153 BOOL onMainThread = pthread_main_np() ? YES : NO;
154
155 if (MultiThreadedGC || onMainThread) {
156 // while we're here, sneak off and do some finalization work (if any)
157 if (MultiThreadedGC && onMainThread) batchFinalizeOnMainThread();
158 // now on with our normally scheduled programming
159 auto_collection_mode_t amode = AUTO_COLLECT_RATIO_COLLECTION;
160 switch (options & 0x3) {
161 case OBJC_RATIO_COLLECTION: amode = AUTO_COLLECT_RATIO_COLLECTION; break;
162 case OBJC_GENERATIONAL_COLLECTION: amode = AUTO_COLLECT_GENERATIONAL_COLLECTION; break;
163 case OBJC_FULL_COLLECTION: amode = AUTO_COLLECT_FULL_COLLECTION; break;
164 case OBJC_EXHAUSTIVE_COLLECTION: amode = AUTO_COLLECT_EXHAUSTIVE_COLLECTION; break;
165 }
166 if (options & OBJC_COLLECT_IF_NEEDED) amode |= AUTO_COLLECT_IF_NEEDED;
167 if (options & OBJC_WAIT_UNTIL_DONE) amode |= AUTO_COLLECT_SYNCHRONOUS; // uses different bits
168 auto_collect(gc_zone, amode, NULL);
169 }
170 else {
171 dispatch_async(dispatch_get_main_queue(), ^{ objc_collect(options); });
172 }
173 }
174
175
176 // USED BY CF & ONE OTHER
177 BOOL objc_isAuto(id object)
178 {
179 return UseGC && auto_zone_is_valid_pointer(gc_zone, object) != 0;
180 }
181
182
183 BOOL objc_collectingEnabled(void)
184 {
185 return UseGC;
186 }
187
188 BOOL objc_collecting_enabled(void) // Old naming
189 {
190 return UseGC;
191 }
192
193 BOOL objc_dumpHeap(char *filenamebuffer, unsigned long length) {
194 static int counter = 0;
195 ++counter;
196 char buffer[1024];
197 sprintf(buffer, OBJC_HEAP_DUMP_FILENAME_FORMAT, getpid(), counter);
198 if (!_objc_dumpHeap(gc_zone, buffer)) return NO;
199 if (filenamebuffer) {
200 unsigned long blen = strlen(buffer);
201 if (blen < length)
202 strncpy(filenamebuffer, buffer, blen+1);
203 else if (length > 0)
204 filenamebuffer[0] = 0; // give some answer
205 }
206 return YES;
207 }
208
209
210 /***********************************************************************
211 * Memory management.
212 * Called by CF and Foundation.
213 **********************************************************************/
214
215 // Allocate an object in the GC zone, with the given number of extra bytes.
216 id objc_allocate_object(Class cls, int extra)
217 {
218 return class_createInstance(cls, extra);
219 }
220
221
222 /***********************************************************************
223 * Write barrier implementations, optimized for when GC is known to be on
224 * Called by the write barrier exports only.
225 * These implementations assume GC is on. The exported function must
226 * either perform the check itself or be conditionally stomped at
227 * startup time.
228 **********************************************************************/
229
230 __private_extern__ id objc_assign_strongCast_gc(id value, id *slot)
231 {
232 if (!auto_zone_set_write_barrier(gc_zone, (void*)slot, value)) { // stores & returns true if slot points into GC allocated memory
233 auto_zone_root_write_barrier(gc_zone, slot, value); // always stores
234 }
235 return value;
236 }
237
238 __private_extern__ id objc_assign_global_gc(id value, id *slot) {
239 // use explicit root registration.
240 if (value && auto_zone_is_valid_pointer(gc_zone, value)) {
241 if (auto_zone_is_finalized(gc_zone, value)) {
242 __private_extern__ void objc_assign_global_error(id value, id *slot);
243
244 _objc_inform("GC: storing an already collected object %p into global memory at %p, break on objc_assign_global_error to debug\n", value, slot);
245 objc_assign_global_error(value, slot);
246 }
247 auto_zone_add_root(gc_zone, slot, value);
248 }
249 else
250 *slot = value;
251
252 return value;
253 }
254
255
256 __private_extern__ id objc_assign_ivar_gc(id value, id base, ptrdiff_t offset)
257 {
258 id *slot = (id*) ((char *)base + offset);
259
260 if (value) {
261 if (!auto_zone_set_write_barrier(gc_zone, (char *)base + offset, value)) {
262 __private_extern__ void objc_assign_ivar_error(id base, ptrdiff_t offset);
263
264 _objc_inform("GC: %p + %tu isn't in the auto_zone, break on objc_assign_ivar_error to debug.\n", base, offset);
265 objc_assign_ivar_error(base, offset);
266 }
267 }
268 else
269 *slot = value;
270
271 return value;
272 }
273
274
275 /***********************************************************************
276 * Write barrier exports
277 * Called by pretty much all GC-supporting code.
278 *
279 * These "generic" implementations, available in PPC, are thought to be
280 * called by Rosetta when it translates the bla instruction.
281 **********************************************************************/
282
283 // Platform-independent write barriers
284 // These contain the UseGC check that the platform-specific
285 // runtime-rewritten implementations do not.
286
287 id objc_assign_strongCast_generic(id value, id *dest)
288 {
289 if (UseGC) {
290 return objc_assign_strongCast_gc(value, dest);
291 } else {
292 return (*dest = value);
293 }
294 }
295
296
297 id objc_assign_global_generic(id value, id *dest)
298 {
299 if (UseGC) {
300 return objc_assign_global_gc(value, dest);
301 } else {
302 return (*dest = value);
303 }
304 }
305
306
307 id objc_assign_ivar_generic(id value, id dest, ptrdiff_t offset)
308 {
309 if (UseGC) {
310 return objc_assign_ivar_gc(value, dest, offset);
311 } else {
312 id *slot = (id*) ((char *)dest + offset);
313 return (*slot = value);
314 }
315 }
316
317 #if defined(__ppc__) || defined(__i386__)
318
319 // PPC write barriers are in objc-auto-ppc.s
320 // write_barrier_init conditionally stomps those to jump to the _impl versions.
321
322 // These 3 functions are defined in objc-auto-i386.s as
323 // the non-GC variants. Under GC, rtp_init stomps them with jumps to
324 // objc_assign_*_gc.
325
326 #else
327
328 // use generic implementation until time can be spent on optimizations
329 id objc_assign_strongCast(id value, id *dest) { return objc_assign_strongCast_generic(value, dest); }
330 id objc_assign_global(id value, id *dest) { return objc_assign_global_generic(value, dest); }
331 id objc_assign_ivar(id value, id dest, ptrdiff_t offset) { return objc_assign_ivar_generic(value, dest, offset); }
332
333 // not (defined(__ppc__)) && not defined(__i386__)
334 #endif
335
336
337 void *objc_memmove_collectable(void *dst, const void *src, size_t size)
338 {
339 if (UseGC) {
340 return auto_zone_write_barrier_memmove(gc_zone, dst, src, size);
341 } else {
342 return memmove(dst, src, size);
343 }
344 }
345
346 BOOL objc_atomicCompareAndSwapPtr(id predicate, id replacement, volatile id *objectLocation) {
347 const BOOL issueMemoryBarrier = NO;
348 if (UseGC)
349 return auto_zone_atomicCompareAndSwapPtr(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, issueMemoryBarrier);
350 else
351 return OSAtomicCompareAndSwapPtr((void *)predicate, (void *)replacement, (void * volatile *)objectLocation);
352 }
353
354 BOOL objc_atomicCompareAndSwapPtrBarrier(id predicate, id replacement, volatile id *objectLocation) {
355 const BOOL issueMemoryBarrier = YES;
356 if (UseGC)
357 return auto_zone_atomicCompareAndSwapPtr(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, issueMemoryBarrier);
358 else
359 return OSAtomicCompareAndSwapPtrBarrier((void *)predicate, (void *)replacement, (void * volatile *)objectLocation);
360 }
361
362 BOOL objc_atomicCompareAndSwapGlobal(id predicate, id replacement, volatile id *objectLocation) {
363 const BOOL isGlobal = YES;
364 const BOOL issueMemoryBarrier = NO;
365 if (UseGC)
366 return auto_zone_atomicCompareAndSwap(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, isGlobal, issueMemoryBarrier);
367 else
368 return OSAtomicCompareAndSwapPtr((void *)predicate, (void *)replacement, (void * volatile *)objectLocation);
369 }
370
371 BOOL objc_atomicCompareAndSwapGlobalBarrier(id predicate, id replacement, volatile id *objectLocation) {
372 const BOOL isGlobal = YES;
373 const BOOL issueMemoryBarrier = YES;
374 if (UseGC)
375 return auto_zone_atomicCompareAndSwap(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, isGlobal, issueMemoryBarrier);
376 else
377 return OSAtomicCompareAndSwapPtrBarrier((void *)predicate, (void *)replacement, (void * volatile *)objectLocation);
378 }
379
380 BOOL objc_atomicCompareAndSwapInstanceVariable(id predicate, id replacement, volatile id *objectLocation) {
381 const BOOL isGlobal = NO;
382 const BOOL issueMemoryBarrier = NO;
383 if (UseGC)
384 return auto_zone_atomicCompareAndSwap(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, isGlobal, issueMemoryBarrier);
385 else
386 return OSAtomicCompareAndSwapPtr((void *)predicate, (void *)replacement, (void * volatile *)objectLocation);
387 }
388
389 BOOL objc_atomicCompareAndSwapInstanceVariableBarrier(id predicate, id replacement, volatile id *objectLocation) {
390 const BOOL isGlobal = NO;
391 const BOOL issueMemoryBarrier = YES;
392 if (UseGC)
393 return auto_zone_atomicCompareAndSwap(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, isGlobal, issueMemoryBarrier);
394 else
395 return OSAtomicCompareAndSwapPtrBarrier((void *)predicate, (void *)replacement, (void * volatile *)objectLocation);
396 }
397
398
399 /***********************************************************************
400 * CF-only write barrier exports
401 * Called by CF only.
402 **********************************************************************/
403
404 // Exported as very private SPI to CF
405 void* objc_assign_ivar_address_CF(void *value, void *base, void **slot)
406 {
407 // CF has already checked that *slot is a gc block so this should never fail
408 if (!auto_zone_set_write_barrier(gc_zone, slot, value))
409 *slot = value;
410 return value;
411 }
412
413
414 // exported as very private SPI to CF
415 void* objc_assign_strongCast_CF(void* value, void **slot)
416 {
417 // CF has already checked that *slot is a gc block so this should never fail
418 if (!auto_zone_set_write_barrier(gc_zone, slot, value))
419 *slot = value;
420 return value;
421 }
422
423 __private_extern__ void gc_fixup_weakreferences(id newObject, id oldObject) {
424 // fix up weak references if any.
425 const unsigned char *weakLayout = (const unsigned char *)class_getWeakIvarLayout(newObject->isa);
426 if (weakLayout) {
427 void **newPtr = (void **)newObject, **oldPtr = (void **)oldObject;
428 unsigned char byte;
429 while ((byte = *weakLayout++)) {
430 unsigned skips = (byte >> 4);
431 unsigned weaks = (byte & 0x0F);
432 newPtr += skips, oldPtr += skips;
433 while (weaks--) {
434 *newPtr = NULL;
435 auto_assign_weak_reference(gc_zone, auto_read_weak_reference(gc_zone, oldPtr), newPtr, NULL);
436 ++newPtr, ++oldPtr;
437 }
438 }
439 }
440 }
441
442 /***********************************************************************
443 * Weak ivar support
444 **********************************************************************/
445
446 id objc_read_weak(id *location) {
447 id result = *location;
448 if (UseGC && result) {
449 result = auto_read_weak_reference(gc_zone, (void **)location);
450 }
451 return result;
452 }
453
454 id objc_assign_weak(id value, id *location) {
455 if (UseGC) {
456 auto_assign_weak_reference(gc_zone, value, (void **)location, NULL);
457 }
458 else {
459 *location = value;
460 }
461 return value;
462 }
463
464 /* Associative Reference Support. */
465
466 id objc_getAssociatedObject(id object, void *key) {
467 if (UseGC) {
468 return auto_zone_get_associative_ref(gc_zone, object, key);
469 } else {
470 return _object_get_associative_reference(object, key);
471 }
472 }
473
474 void objc_setAssociatedObject(id object, void *key, id value, objc_AssociationPolicy policy) {
475 if (UseGC) {
476 if ((policy & OBJC_ASSOCIATION_COPY_NONATOMIC) == OBJC_ASSOCIATION_COPY_NONATOMIC) {
477 value = objc_msgSend(value, @selector(copy));
478 }
479 auto_zone_set_associative_ref(gc_zone, object, key, value);
480 } else {
481 // Note, creates a retained reference in non-GC.
482 _object_set_associative_reference(object, key, value, policy);
483 }
484 }
485
486 void objc_removeAssociatedObjects(id object) {
487 if (UseGC) {
488 auto_zone_erase_associative_refs(gc_zone, object);
489 } else {
490 if (_class_instancesHaveAssociatedObjects(object->isa)) _object_remove_assocations(object);
491 }
492 }
493
494 BOOL class_instancesHaveAssociatedObjects(Class cls) {
495 return _class_instancesHaveAssociatedObjects(cls);
496 }
497
498 id objc_getAssociatedProperties(id object, Class dataClass) {
499 id data = objc_getAssociatedObject(object, dataClass);
500 if (data == nil) {
501 // FIXME: Need to make this atomic.
502 data = objc_msgSend(dataClass, @selector(new));
503 objc_setAssociatedObject(object, dataClass, data, OBJC_ASSOCIATION_RETAIN_NONATOMIC);
504 objc_msgSend(data, @selector(release));
505 }
506 return data;
507 }
508
509 /***********************************************************************
510 * Testing tools
511 * Used to isolate resurrection of garbage objects during finalization.
512 **********************************************************************/
513 BOOL objc_is_finalized(void *ptr) {
514 if (ptr != NULL && UseGC) {
515 return auto_zone_is_finalized(gc_zone, ptr);
516 }
517 return NO;
518 }
519
520
521 /***********************************************************************
522 * Stack management
523 * Used to tell clean up dirty stack frames before a thread blocks. To
524 * make this more efficient, we really need better support from pthreads.
525 * See <rdar://problem/4548631> for more details.
526 **********************************************************************/
527
528 static vm_address_t _stack_resident_base() {
529 pthread_t self = pthread_self();
530 size_t stack_size = pthread_get_stacksize_np(self);
531 vm_address_t stack_base = (vm_address_t)pthread_get_stackaddr_np(self) - stack_size;
532 size_t stack_page_count = stack_size / vm_page_size;
533 char stack_residency[stack_page_count];
534 vm_address_t stack_resident_base = 0;
535 if (mincore((void*)stack_base, stack_size, stack_residency) == 0) {
536 // we can now tell the degree to which the stack is resident, and use it as our ultimate high water mark.
537 size_t i;
538 for (i = 0; i < stack_page_count; ++i) {
539 if (stack_residency[i]) {
540 stack_resident_base = stack_base + i * vm_page_size;
541 // malloc_printf("last touched page = %lu\n", stack_page_count - i - 1);
542 break;
543 }
544 }
545 }
546 return stack_resident_base;
547 }
548
549 void objc_clear_stack(unsigned long options) {
550 if (!UseGC) return;
551 auto_zone_clear_stack(gc_zone, 0);
552 }
553
554 /***********************************************************************
555 * Finalization support
556 **********************************************************************/
557
558 static IMP _NSObject_finalize = NULL;
559
560 // Finalizer crash debugging
561 static void *finalizing_object;
562 static const char *__crashreporter_info__;
563
564
565 // finalize a single object without fuss
566 // When there are no main-thread-only classes this is used directly
567 // Otherwise, it is used indirectly by smarter code that knows main-thread-affinity requirements
568 static void finalizeOneObject(void *obj, void *ignored) {
569 id object = (id)obj;
570 finalizing_object = obj;
571 __crashreporter_info__ = object_getClassName(obj);
572
573 /// call -finalize method.
574 objc_msgSend(object, @selector(finalize));
575 // Call C++ destructors, if any.
576 object_cxxDestruct(object);
577
578 finalizing_object = NULL;
579 __crashreporter_info__ = NULL;
580 }
581
582 // finalize object only if it is a main-thread-only object.
583 // Called only from the main thread.
584 static void finalizeOneMainThreadOnlyObject(void *obj, void *arg) {
585 id object = (id)obj;
586 Class cls = object->isa;
587 if (cls == NULL) {
588 _objc_fatal("object with NULL ISA passed to finalizeOneMainThreadOnlyObject: %p\n", obj);
589 }
590 if (_class_shouldFinalizeOnMainThread(cls)) {
591 finalizeOneObject(obj, NULL);
592 }
593 }
594
595 // finalize one object only if it is not a main-thread-only object
596 // called from any other thread than the main thread
597 // Important: if a main-thread-only object is passed, return that fact in the needsMain argument
598 static void finalizeOneAnywhereObject(void *obj, void *needsMain) {
599 id object = (id)obj;
600 Class cls = object->isa;
601 bool *needsMainThreadWork = needsMain;
602 if (cls == NULL) {
603 _objc_fatal("object with NULL ISA passed to finalizeOneAnywhereObject: %p\n", obj);
604 }
605 if (!_class_shouldFinalizeOnMainThread(cls)) {
606 finalizeOneObject(obj, NULL);
607 }
608 else {
609 *needsMainThreadWork = true;
610 }
611 }
612
613
614 // Utility workhorse.
615 // Set up the expensive @try block and ask the collector to hand the next object to
616 // our finalizeAnObject function.
617 // Track and return a boolean that records whether or not any main thread work is necessary.
618 // (When we know that there are no main thread only objects then the boolean isn't even computed)
619 static bool batchFinalize(auto_zone_t *zone,
620 auto_zone_foreach_object_t foreach,
621 auto_zone_cursor_t cursor,
622 size_t cursor_size,
623 void (*finalizeAnObject)(void *, void*))
624 {
625 bool needsMainThreadWork = false;
626 for (;;) {
627 @try {
628 foreach(cursor, finalizeAnObject, &needsMainThreadWork);
629 // non-exceptional return means finalization is complete.
630 break;
631 } @catch (id exception) {
632 // whoops, note exception, then restart at cursor's position
633 __private_extern__ void objc_exception_during_finalize_error(void);
634 _objc_inform("GC: -finalize resulted in an exception (%p) being thrown, break on objc_exception_during_finalize_error to debug\n\t%s", exception, (const char*)[[exception description] UTF8String]);
635 objc_exception_during_finalize_error();
636 }
637 }
638 return needsMainThreadWork;
639 }
640
641 // Called on main thread-only.
642 // Pick up work from global queue.
643 // called parasitically by anyone requesting a collection
644 // called explicitly when there is known to be main thread only finalization work
645 // In both cases we are on the main thread
646 // Guard against recursion by something called from a finalizer
647 static void batchFinalizeOnMainThread() {
648 pthread_mutex_lock(&MainThreadWorkQ.mutex);
649 if (!MainThreadWorkQ.head || MainThreadWorkQ.head->started) {
650 // No work or we're already here
651 pthread_mutex_unlock(&MainThreadWorkQ.mutex);
652 return;
653 }
654 while (MainThreadWorkQ.head) {
655 BatchFinalizeBlock_t *bfb = MainThreadWorkQ.head;
656 bfb->started = YES;
657 pthread_mutex_unlock(&MainThreadWorkQ.mutex);
658
659 batchFinalize(gc_zone, bfb->foreach, bfb->cursor, bfb->cursor_size, finalizeOneMainThreadOnlyObject);
660 // signal the collector thread(s) that finalization has finished.
661 pthread_mutex_lock(&MainThreadWorkQ.mutex);
662 bfb->finished = YES;
663 pthread_cond_broadcast(&MainThreadWorkQ.condition);
664 MainThreadWorkQ.head = bfb->next;
665 }
666 MainThreadWorkQ.tail = NULL;
667 pthread_mutex_unlock(&MainThreadWorkQ.mutex);
668 }
669
670
671 // Knowing that we possibly have main thread only work to do, first process everything
672 // that is not main-thread-only. If we discover main thread only work, queue a work block
673 // to the main thread that will do just the main thread only work. Wait for it.
674 // Called from a non main thread.
675 static void batchFinalizeOnTwoThreads(auto_zone_t *zone,
676 auto_zone_foreach_object_t foreach,
677 auto_zone_cursor_t cursor,
678 size_t cursor_size)
679 {
680 // First, lets get rid of everything we can on this thread, then ask main thread to help if needed
681 char cursor_copy[cursor_size];
682 memcpy(cursor_copy, cursor, cursor_size);
683 bool needsMainThreadFinalization = batchFinalize(zone, foreach, (auto_zone_cursor_t)cursor_copy, cursor_size, finalizeOneAnywhereObject);
684
685 if (! needsMainThreadFinalization)
686 return; // no help needed
687
688 // set up the control block. Either our ping of main thread with _callOnMainThread will get to it, or
689 // an objc_collect(if_needed) will get to it. Either way, this block will be processed on the main thread.
690 BatchFinalizeBlock_t bfb;
691 bfb.foreach = foreach;
692 bfb.cursor = cursor;
693 bfb.cursor_size = cursor_size;
694 bfb.started = NO;
695 bfb.finished = NO;
696 bfb.next = NULL;
697 pthread_mutex_lock(&MainThreadWorkQ.mutex);
698 if (MainThreadWorkQ.tail) {
699
700 // link to end so that ordering of finalization is preserved.
701 MainThreadWorkQ.tail->next = &bfb;
702 MainThreadWorkQ.tail = &bfb;
703 }
704 else {
705 MainThreadWorkQ.head = &bfb;
706 MainThreadWorkQ.tail = &bfb;
707 }
708 pthread_mutex_unlock(&MainThreadWorkQ.mutex);
709
710 //printf("----->asking main thread to finalize\n");
711 dispatch_async(dispatch_get_main_queue(), ^{ batchFinalizeOnMainThread(); });
712
713 // wait for the main thread to finish finalizing instances of classes marked CLS_FINALIZE_ON_MAIN_THREAD.
714 pthread_mutex_lock(&MainThreadWorkQ.mutex);
715 while (!bfb.finished) pthread_cond_wait(&MainThreadWorkQ.condition, &MainThreadWorkQ.mutex);
716 pthread_mutex_unlock(&MainThreadWorkQ.mutex);
717 //printf("<------ main thread finalize done\n");
718
719 }
720
721
722
723 // collector calls this with garbage ready
724 // thread collectors, too, so this needs to be thread-safe
725 static void BatchInvalidate(auto_zone_t *zone,
726 auto_zone_foreach_object_t foreach,
727 auto_zone_cursor_t cursor,
728 size_t cursor_size)
729 {
730 if (pthread_main_np() || !WantsMainThreadFinalization) {
731 // Collect all objects. We're either pre-multithreaded on main thread or we're on the collector thread
732 // but no main-thread-only objects have been allocated.
733 batchFinalize(zone, foreach, cursor, cursor_size, finalizeOneObject);
734 }
735 else {
736 // We're on the dedicated thread. Collect some on main thread, the rest here.
737 batchFinalizeOnTwoThreads(zone, foreach, cursor, cursor_size);
738 }
739
740 }
741
742
743 /*
744 * Zombie support
745 * Collector calls into this system when it finds resurrected objects.
746 * This keeps them pitifully alive and leaked, even if they reference garbage.
747 */
748
749 // idea: keep a side table mapping resurrected object pointers to their original Class, so we don't
750 // need to smash anything. alternatively, could use associative references to track against a secondary
751 // object with information about the resurrection, such as a stack crawl, etc.
752
753 static Class _NSResurrectedObjectClass;
754 static NXMapTable *_NSResurrectedObjectMap = NULL;
755 static pthread_mutex_t _NSResurrectedObjectLock = PTHREAD_MUTEX_INITIALIZER;
756
757 static Class resurrectedObjectOriginalClass(id object) {
758 Class originalClass;
759 pthread_mutex_lock(&_NSResurrectedObjectLock);
760 originalClass = (Class) NXMapGet(_NSResurrectedObjectMap, object);
761 pthread_mutex_unlock(&_NSResurrectedObjectLock);
762 return originalClass;
763 }
764
765 static id _NSResurrectedObject_classMethod(id self, SEL selector) { return self; }
766
767 static id _NSResurrectedObject_instanceMethod(id self, SEL name) {
768 _objc_inform("**resurrected** object %p of class %s being sent message '%s'\n", self, class_getName(resurrectedObjectOriginalClass(self)), sel_getName(name));
769 return self;
770 }
771
772 static void _NSResurrectedObject_finalize(id self, SEL _cmd) {
773 Class originalClass;
774 pthread_mutex_lock(&_NSResurrectedObjectLock);
775 originalClass = (Class) NXMapRemove(_NSResurrectedObjectMap, self);
776 pthread_mutex_unlock(&_NSResurrectedObjectLock);
777 if (originalClass) _objc_inform("**resurrected** object %p of class %s being finalized\n", self, class_getName(originalClass));
778 _NSObject_finalize(self, _cmd);
779 }
780
781 static BOOL _NSResurrectedObject_resolveInstanceMethod(id self, SEL _cmd, SEL name) {
782 class_addMethod((Class)self, name, (IMP)_NSResurrectedObject_instanceMethod, "@@:");
783 return YES;
784 }
785
786 static BOOL _NSResurrectedObject_resolveClassMethod(id self, SEL _cmd, SEL name) {
787 class_addMethod(object_getClass(self), name, (IMP)_NSResurrectedObject_classMethod, "@@:");
788 return YES;
789 }
790
791 static void _NSResurrectedObject_initialize() {
792 _NSResurrectedObjectMap = NXCreateMapTable(NXPtrValueMapPrototype, 128);
793 _NSResurrectedObjectClass = objc_allocateClassPair(objc_getClass("NSObject"), "_NSResurrectedObject", 0);
794 class_addMethod(_NSResurrectedObjectClass, @selector(finalize), (IMP)_NSResurrectedObject_finalize, "v@:");
795 Class metaClass = object_getClass(_NSResurrectedObjectClass);
796 class_addMethod(metaClass, @selector(resolveInstanceMethod:), (IMP)_NSResurrectedObject_resolveInstanceMethod, "c@::");
797 class_addMethod(metaClass, @selector(resolveClassMethod:), (IMP)_NSResurrectedObject_resolveClassMethod, "c@::");
798 objc_registerClassPair(_NSResurrectedObjectClass);
799 }
800
801 static void resurrectZombie(auto_zone_t *zone, void *ptr) {
802 id object = (id) ptr;
803 Class cls = object->isa;
804 if (cls != _NSResurrectedObjectClass) {
805 // remember the original class for this instance.
806 pthread_mutex_lock(&_NSResurrectedObjectLock);
807 NXMapInsert(_NSResurrectedObjectMap, ptr, cls);
808 pthread_mutex_unlock(&_NSResurrectedObjectLock);
809 object->isa = _NSResurrectedObjectClass;
810 }
811 }
812
813 /***********************************************************************
814 * Pretty printing support
815 * For development purposes.
816 **********************************************************************/
817
818
819 static char *name_for_address(auto_zone_t *zone, vm_address_t base, vm_address_t offset, int withRetainCount);
820
821 static char* objc_name_for_address(auto_zone_t *zone, vm_address_t base, vm_address_t offset)
822 {
823 return name_for_address(zone, base, offset, false);
824 }
825
826 /***********************************************************************
827 * Collection support
828 **********************************************************************/
829
830 static BOOL objc_isRegisteredClass(Class candidate);
831
832 static const unsigned char *objc_layout_for_address(auto_zone_t *zone, void *address)
833 {
834 Class cls = *(Class *)address;
835 if (!objc_isRegisteredClass(cls)) return NULL;
836 return (const unsigned char *)class_getIvarLayout(cls);
837 }
838
839 static const unsigned char *objc_weak_layout_for_address(auto_zone_t *zone, void *address)
840 {
841 Class cls = *(Class *)address;
842 if (!objc_isRegisteredClass(cls)) return NULL;
843 return (const unsigned char *)class_getWeakIvarLayout(cls);
844 }
845
846 __private_extern__ void gc_register_datasegment(uintptr_t base, size_t size) {
847 auto_zone_register_datasegment(gc_zone, (void*)base, size);
848 }
849
850 __private_extern__ void gc_unregister_datasegment(uintptr_t base, size_t size) {
851 auto_zone_unregister_datasegment(gc_zone, (void*)base, size);
852 }
853
854
855 /***********************************************************************
856 * Initialization
857 **********************************************************************/
858
859 static void objc_will_grow(auto_zone_t *zone, auto_heap_growth_info_t info) {
860 if (MultiThreadedGC) {
861 //printf("objc_will_grow %d\n", info);
862
863 if (auto_zone_is_collecting(gc_zone)) {
864 ;
865 }
866 else {
867 auto_collect(gc_zone, AUTO_COLLECT_RATIO_COLLECTION, NULL);
868 }
869 }
870 }
871
872
873 static auto_zone_t *gc_zone_init(void)
874 {
875 auto_zone_t *result;
876
877 // result = auto_zone_create("objc auto collected zone");
878 result = auto_zone_create("auto_zone");
879
880 auto_collection_control_t *control = auto_collection_parameters(result);
881
882 // set up the magic control parameters
883 control->batch_invalidate = BatchInvalidate;
884 control->will_grow = objc_will_grow;
885 control->resurrect = resurrectZombie;
886 control->layout_for_address = objc_layout_for_address;
887 control->weak_layout_for_address = objc_weak_layout_for_address;
888 control->name_for_address = objc_name_for_address;
889
890 return result;
891 }
892
893
894 /* should be defined in /usr/local/include/libdispatch_private.h. */
895 extern void (*dispatch_begin_thread_4GC)(void);
896 extern void (*dispatch_end_thread_4GC)(void);
897
898 void objc_registerThreadWithCollector()
899 {
900 if (UseGC) auto_zone_register_thread(gc_zone);
901 }
902
903 void objc_unregisterThreadWithCollector()
904 {
905 if (UseGC) auto_zone_unregister_thread(gc_zone);
906 }
907
908 void objc_assertRegisteredThreadWithCollector()
909 {
910 if (UseGC) auto_zone_assert_thread_registered(gc_zone);
911 }
912
913 // Always called by _objcInit, even if GC is off.
914 __private_extern__ void gc_init(BOOL on)
915 {
916 UseGC = on;
917
918 if (PrintGC) {
919 _objc_inform("GC: is %s", on ? "ON" : "OFF");
920 }
921
922 if (UseGC) {
923 // Add GC state to crash log reports
924 _objc_inform_on_crash("garbage collection is ON");
925
926 // Set up the GC zone
927 gc_zone = gc_zone_init();
928
929 // no NSObject until Foundation calls objc_collect_init()
930 _NSObject_finalize = &_objc_msgForward_internal;
931
932 // set up the registered classes list
933 registeredClassTableInit();
934
935 // tell Blocks to use collectable memory. CF will cook up the classes separately.
936 gc_block_init();
937
938 // tell libdispatch to register its threads with the GC.
939 dispatch_begin_thread_4GC = objc_registerThreadWithCollector;
940 dispatch_end_thread_4GC = objc_unregisterThreadWithCollector;
941 } else {
942 auto_zone_start_monitor(false);
943 auto_zone_set_class_list((int (*)(void **, int))objc_getClassList);
944 }
945 }
946
947
948
949 // Called by Foundation to install auto's interruption callback.
950 malloc_zone_t *objc_collect_init(int (*callback)(void))
951 {
952 // Find NSObject's finalize method now that Foundation is loaded.
953 // fixme only look for the base implementation, not a category's
954 _NSObject_finalize = class_getMethodImplementation(objc_getClass("NSObject"), @selector(finalize));
955 if (_NSObject_finalize == &_objc_msgForward /* not _internal! */) {
956 _objc_fatal("GC: -[NSObject finalize] unimplemented!");
957 }
958
959 // create the _NSResurrectedObject class used to track resurrections.
960 _NSResurrectedObject_initialize();
961
962 return (malloc_zone_t *)gc_zone;
963 }
964
965 /*
966 * Support routines for the Block implementation
967 */
968
969
970 // The Block runtime now needs to sometimes allocate a Block that is an Object - namely
971 // when it neesd to have a finalizer which, for now, is only if there are C++ destructors
972 // in the helper function. Hence the isObject parameter.
973 // Under GC a -copy message should allocate a refcount 0 block, ergo the isOne parameter.
974 static void *block_gc_alloc5(const unsigned long size, const bool isOne, const bool isObject) {
975 auto_memory_type_t type = isObject ? (AUTO_OBJECT|AUTO_MEMORY_SCANNED) : AUTO_MEMORY_SCANNED;
976 return auto_zone_allocate_object(gc_zone, size, type, isOne, false);
977 }
978
979 // The Blocks runtime keeps track of everything above 1 and so it only calls
980 // up to the collector to tell it about the 0->1 transition and then the 1->0 transition
981 static void block_gc_setHasRefcount(const void *block, const bool hasRefcount) {
982 if (hasRefcount)
983 auto_zone_retain(gc_zone, (void *)block);
984 else
985 auto_zone_release(gc_zone, (void *)block);
986 }
987
988 static void block_gc_memmove(void *dst, void *src, unsigned long size) {
989 auto_zone_write_barrier_memmove(gc_zone, dst, src, (size_t)size);
990 }
991
992
993 // Initialize the Block subsystem iff running under GC.
994 static void gc_block_init(void) {
995 // set up the callout functions that enable _Block_copy to do the right thing under GC
996 _Block_use_GC(
997 block_gc_alloc5,
998 block_gc_setHasRefcount,
999 (void (*)(void *, void **))objc_assign_strongCast_gc,
1000 (void (*)(const void *, void *))objc_assign_weak,
1001 block_gc_memmove
1002 );
1003 }
1004
1005
1006 /***********************************************************************
1007 * Track classes.
1008 * In addition to the global class hashtable (set) indexed by name, we
1009 * also keep one based purely by pointer when running under Garbage Collection.
1010 * This allows the background collector to race against objects recycled from TLC.
1011 * Specifically, the background collector can read the admin byte and see that
1012 * a thread local object is an object, get scheduled out, and the TLC recovers it,
1013 * linking it into the cache, then the background collector reads the isa field and
1014 * finds linkage info. By qualifying all isa fields read we avoid this.
1015 **********************************************************************/
1016
1017 // This is a self-contained hash table of all classes. The first two elements contain the (size-1) and count.
1018 static volatile Class *AllClasses = nil;
1019
1020 #define SHIFT 3
1021 #define INITIALSIZE 512
1022 #define REMOVED -1
1023
1024 // Allocate the side table.
1025 static void registeredClassTableInit() {
1026 assert(UseGC);
1027 // allocate a collectable (refcount 0) zeroed hunk of unscanned memory
1028 uintptr_t *table = (uintptr_t *)auto_zone_allocate_object(gc_zone, INITIALSIZE*sizeof(void *), AUTO_MEMORY_UNSCANNED, false, true);
1029 // set initial capacity (as mask)
1030 table[0] = INITIALSIZE - 1;
1031 // set initial count
1032 table[1] = 0;
1033 // register it so that the collector will keep it around. We could instead allocate it refcount 1 and then decr when done.
1034 auto_zone_add_root(gc_zone, &AllClasses, table);
1035 }
1036
1037 // Verify that a particular pointer is to a class.
1038 // Safe from any thread anytime
1039 static BOOL objc_isRegisteredClass(Class candidate) {
1040 assert(UseGC);
1041 // We don't care about a race with another thread adding a class to which we randomly might have a pointer
1042 // Get local copy of classes so that we're immune from updates.
1043 // We keep the size of the list as the first element so there is no race as the list & size get updated.
1044 uintptr_t *allClasses = (uintptr_t *)AllClasses;
1045 // Slot 0 is always the size of the list in log 2 masked terms (e.g. size - 1) where size is always power of 2
1046 // Slot 1 is count
1047 uintptr_t slot = (((uintptr_t)candidate) >> SHIFT) & allClasses[0];
1048 // avoid slot 0 and 1
1049 if (slot < 2) slot = 2;
1050 for(;;) {
1051 long int slotValue = allClasses[slot];
1052 if (slotValue == (long int)candidate) {
1053 return YES;
1054 }
1055 if (slotValue == 0) {
1056 return NO;
1057 }
1058 ++slot;
1059 if (slot > allClasses[0])
1060 slot = 2; // skip size, count
1061 }
1062 }
1063
1064 // Utility used when growing
1065 // Assumes lock held
1066 static void addClassHelper(uintptr_t *table, uintptr_t candidate) {
1067 uintptr_t slot = (((long int)candidate) >> SHIFT) & table[0];
1068 if (slot < 2) slot = 2;
1069 for(;;) {
1070 uintptr_t slotValue = table[slot];
1071 if (slotValue == 0) {
1072 table[slot] = candidate;
1073 ++table[1];
1074 return;
1075 }
1076 ++slot;
1077 if (slot > table[0])
1078 slot = 2; // skip size, count
1079 }
1080 }
1081
1082 // lock held by callers
1083 __private_extern__
1084 void objc_addRegisteredClass(Class candidate) {
1085 if (!UseGC) return;
1086 uintptr_t *table = (uintptr_t *)AllClasses;
1087 // Slot 0 is always the size of the list in log 2 masked terms (e.g. size - 1) where size is always power of 2
1088 // Slot 1 is count - always non-zero
1089 uintptr_t slot = (((long int)candidate) >> SHIFT) & table[0];
1090 if (slot < 2) slot = 2;
1091 for(;;) {
1092 uintptr_t slotValue = table[slot];
1093 assert(slotValue != (uintptr_t)candidate);
1094 if (slotValue == REMOVED) {
1095 table[slot] = (long)candidate;
1096 return;
1097 }
1098 else if (slotValue == 0) {
1099 table[slot] = (long)candidate;
1100 if (2*++table[1] > table[0]) { // add to count; check if we cross 50% utilization
1101 // grow
1102 uintptr_t oldSize = table[0]+1;
1103 uintptr_t *newTable = (uintptr_t *)auto_zone_allocate_object(gc_zone, oldSize*2*sizeof(void *), AUTO_MEMORY_UNSCANNED, false, true);
1104 uintptr_t i;
1105 newTable[0] = 2*oldSize - 1;
1106 newTable[1] = 0;
1107 for (i = 2; i < oldSize; ++i) {
1108 if (table[i] && table[i] != REMOVED)
1109 addClassHelper(newTable, table[i]);
1110 }
1111 // this does the write-barrier. Don't use objc_assignGlobal because it trips a linker error on 64-bit.
1112 auto_zone_add_root(gc_zone, &AllClasses, newTable);
1113 }
1114 return;
1115 }
1116 ++slot;
1117 if (slot > table[0])
1118 slot = 2; // skip size, count
1119 }
1120 }
1121
1122 // lock held by callers
1123 __private_extern__
1124 void objc_removeRegisteredClass(Class candidate) {
1125 if (!UseGC) return;
1126 uintptr_t *table = (uintptr_t *)AllClasses;
1127 // Slot 0 is always the size of the list in log 2 masked terms (e.g. size - 1) where size is always power of 2
1128 // Slot 1 is count - always non-zero
1129 uintptr_t slot = (((uintptr_t)candidate) >> SHIFT) & table[0];
1130 if (slot < 2) slot = 2;
1131 for(;;) {
1132 uintptr_t slotValue = table[slot];
1133 if (slotValue == (uintptr_t)candidate) {
1134 table[slot] = REMOVED; // if next slot == 0 we could set to 0 here and decr count
1135 return;
1136 }
1137 assert(slotValue != 0);
1138 ++slot;
1139 if (slot > table[0])
1140 slot = 2; // skip size, count
1141 }
1142 }
1143
1144
1145 /***********************************************************************
1146 * Debugging - support for smart printouts when errors occur
1147 **********************************************************************/
1148
1149
1150 static malloc_zone_t *objc_debug_zone(void)
1151 {
1152 static malloc_zone_t *z = NULL;
1153 if (!z) {
1154 z = malloc_create_zone(4096, 0);
1155 malloc_set_zone_name(z, "objc-auto debug");
1156 }
1157 return z;
1158 }
1159
1160 static char *_malloc_append_unsigned(uintptr_t value, unsigned base, char *head) {
1161 if (!value) {
1162 head[0] = '0';
1163 } else {
1164 if (value >= base) head = _malloc_append_unsigned(value / base, base, head);
1165 value = value % base;
1166 head[0] = (value < 10) ? '0' + value : 'a' + value - 10;
1167 }
1168 return head+1;
1169 }
1170
1171 static void strlcati(char *str, uintptr_t value, size_t bufSize)
1172 {
1173 if ( (bufSize - strlen(str)) < 30)
1174 return;
1175 str = _malloc_append_unsigned(value, 10, str + strlen(str));
1176 str[0] = '\0';
1177 }
1178
1179 static void strlcatx(char *str, uintptr_t value, size_t bufSize)
1180 {
1181 if ( (bufSize- strlen(str)) < 30)
1182 return;
1183 str = _malloc_append_unsigned(value, 16, str + strlen(str));
1184 str[0] = '\0';
1185 }
1186
1187
1188 static Ivar ivar_for_offset(Class cls, vm_address_t offset)
1189 {
1190 int i;
1191 ptrdiff_t ivar_offset;
1192 Ivar super_ivar, result;
1193 Ivar *ivars;
1194 unsigned int ivar_count;
1195
1196 if (!cls) return NULL;
1197
1198 // scan base classes FIRST
1199 super_ivar = ivar_for_offset(class_getSuperclass(cls), offset);
1200 // result is best-effort; our ivars may be closer
1201
1202 ivars = class_copyIvarList(cls, &ivar_count);
1203 if (ivars && ivar_count) {
1204 // Try our first ivar. If it's too big, use super's best ivar.
1205 // (lose 64-bit precision)
1206 ivar_offset = ivar_getOffset(ivars[0]);
1207 if (ivar_offset > offset) result = super_ivar;
1208 else if (ivar_offset == offset) result = ivars[0];
1209 else result = NULL;
1210
1211 // Try our other ivars. If any is too big, use the previous.
1212 for (i = 1; result == NULL && i < ivar_count; i++) {
1213 ivar_offset = ivar_getOffset(ivars[i]);
1214 if (ivar_offset == offset) {
1215 result = ivars[i];
1216 } else if (ivar_offset > offset) {
1217 result = ivars[i - 1];
1218 }
1219 }
1220
1221 // Found nothing. Return our last ivar.
1222 if (result == NULL)
1223 result = ivars[ivar_count - 1];
1224
1225 free(ivars);
1226 } else {
1227 result = super_ivar;
1228 }
1229
1230 return result;
1231 }
1232
1233 static void append_ivar_at_offset(char *buf, Class cls, vm_address_t offset, size_t bufSize)
1234 {
1235 Ivar ivar = NULL;
1236
1237 if (offset == 0) return; // don't bother with isa
1238 if (offset >= class_getInstanceSize(cls)) {
1239 strlcat(buf, ".<extra>+", bufSize);
1240 strlcati(buf, offset, bufSize);
1241 return;
1242 }
1243
1244 ivar = ivar_for_offset(cls, offset);
1245 if (!ivar) {
1246 strlcat(buf, ".<?>", bufSize);
1247 return;
1248 }
1249
1250 // fixme doesn't handle structs etc.
1251
1252 strlcat(buf, ".", bufSize);
1253 const char *ivar_name = ivar_getName(ivar);
1254 if (ivar_name) strlcat(buf, ivar_name, bufSize);
1255 else strlcat(buf, "<anonymous ivar>", bufSize);
1256
1257 offset -= ivar_getOffset(ivar);
1258 if (offset > 0) {
1259 strlcat(buf, "+", bufSize);
1260 strlcati(buf, offset, bufSize);
1261 }
1262 }
1263
1264
1265 static const char *cf_class_for_object(void *cfobj)
1266 {
1267 // ick - we don't link against CF anymore
1268
1269 const char *result;
1270 void *dlh;
1271 size_t (*CFGetTypeID)(void *);
1272 void * (*_CFRuntimeGetClassWithTypeID)(size_t);
1273
1274 result = "anonymous_NSCFType";
1275
1276 dlh = dlopen("/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation", RTLD_LAZY | RTLD_NOLOAD | RTLD_FIRST);
1277 if (!dlh) return result;
1278
1279 CFGetTypeID = (size_t(*)(void*)) dlsym(dlh, "CFGetTypeID");
1280 _CFRuntimeGetClassWithTypeID = (void*(*)(size_t)) dlsym(dlh, "_CFRuntimeGetClassWithTypeID");
1281
1282 if (CFGetTypeID && _CFRuntimeGetClassWithTypeID) {
1283 struct {
1284 size_t version;
1285 const char *className;
1286 // don't care about the rest
1287 } *cfcls;
1288 size_t cfid;
1289 cfid = (*CFGetTypeID)(cfobj);
1290 cfcls = (*_CFRuntimeGetClassWithTypeID)(cfid);
1291 result = cfcls->className;
1292 }
1293
1294 dlclose(dlh);
1295 return result;
1296 }
1297
1298
1299 static char *name_for_address(auto_zone_t *zone, vm_address_t base, vm_address_t offset, int withRetainCount)
1300 {
1301 #define APPEND_SIZE(s) \
1302 strlcat(buf, "[", sizeof(buf)); \
1303 strlcati(buf, s, sizeof(buf)); \
1304 strlcat(buf, "]", sizeof(buf));
1305
1306 char buf[1500];
1307 char *result;
1308
1309 buf[0] = '\0';
1310
1311 size_t size =
1312 auto_zone_size(zone, (void *)base);
1313 auto_memory_type_t type = size ?
1314 auto_zone_get_layout_type(zone, (void *)base) : AUTO_TYPE_UNKNOWN;
1315 unsigned int refcount = size ?
1316 auto_zone_retain_count(zone, (void *)base) : 0;
1317
1318 switch (type) {
1319 case AUTO_OBJECT_SCANNED:
1320 case AUTO_OBJECT_UNSCANNED: {
1321 const char *class_name = object_getClassName((id)base);
1322 if ((0 == strcmp(class_name, "__NSCFType")) || (0 == strcmp(class_name, "NSCFType"))) {
1323 strlcat(buf, cf_class_for_object((void *)base), sizeof(buf));
1324 } else {
1325 strlcat(buf, class_name, sizeof(buf));
1326 }
1327 if (offset) {
1328 append_ivar_at_offset(buf, object_getClass((id)base), offset, sizeof(buf));
1329 }
1330 APPEND_SIZE(size);
1331 break;
1332 }
1333 case AUTO_MEMORY_SCANNED:
1334 strlcat(buf, "{conservative-block}", sizeof(buf));
1335 APPEND_SIZE(size);
1336 break;
1337 case AUTO_MEMORY_UNSCANNED:
1338 strlcat(buf, "{no-pointers-block}", sizeof(buf));
1339 APPEND_SIZE(size);
1340 break;
1341 default:
1342 strlcat(buf, "{unallocated-or-stack}", sizeof(buf));
1343 }
1344
1345 if (withRetainCount && refcount > 0) {
1346 strlcat(buf, " [[refcount=", sizeof(buf));
1347 strlcati(buf, refcount, sizeof(buf));
1348 strlcat(buf, "]]", sizeof(buf));
1349 }
1350
1351 result = malloc_zone_malloc(objc_debug_zone(), 1 + strlen(buf));
1352 strlcpy(result, buf, sizeof(buf));
1353 return result;
1354
1355 #undef APPEND_SIZE
1356 }
1357
1358
1359
1360
1361
1362 #endif