2 * Copyright (c) 2004-2007 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 #import "objc-config.h"
26 #import "objc-accessors.h"
35 #import <mach-o/dyld.h>
36 #import <mach-o/nlist.h>
39 #import <libkern/OSAtomic.h>
42 #import <Block_private.h>
43 #include <dispatch/dispatch.h>
45 #import "objc-private.h"
46 #import "objc-references.h"
51 #if !defined(NDEBUG) && !__OBJC2__
52 #import "objc-exception.h"
56 static auto_zone_t *gc_zone_init(BOOL wantsCompaction);
57 static void gc_block_init(void);
58 static void registeredClassTableInit(void);
59 static BOOL objc_isRegisteredClass(Class candidate);
61 PRIVATE_EXTERN BOOL UseGC = NO;
62 PRIVATE_EXTERN BOOL UseCompaction = NO;
63 static BOOL WantsMainThreadFinalization = NO;
65 PRIVATE_EXTERN auto_zone_t *gc_zone = NULL;
67 // Pointer magic to make dyld happy. See notes in objc-private.h
68 PRIVATE_EXTERN id (*objc_assign_ivar_internal)(id, id, ptrdiff_t) = objc_assign_ivar;
71 /* Method prototypes */
72 @interface DoesNotExist
73 - (const char *)UTF8String;
78 /***********************************************************************
79 * Break-on-error functions
80 **********************************************************************/
83 void objc_assign_ivar_error(id base, ptrdiff_t offset)
87 void objc_assign_global_error(id value, id *slot)
91 void objc_exception_during_finalize_error(void)
94 /***********************************************************************
96 * Called by various libraries.
97 **********************************************************************/
99 OBJC_EXPORT void objc_set_collection_threshold(size_t threshold) { // Old naming
101 auto_collection_parameters(gc_zone)->collection_threshold = threshold;
105 OBJC_EXPORT void objc_setCollectionThreshold(size_t threshold) {
107 auto_collection_parameters(gc_zone)->collection_threshold = threshold;
111 void objc_setCollectionRatio(size_t ratio) {
113 auto_collection_parameters(gc_zone)->full_vs_gen_frequency = ratio;
117 void objc_set_collection_ratio(size_t ratio) { // old naming
119 auto_collection_parameters(gc_zone)->full_vs_gen_frequency = ratio;
123 void objc_finalizeOnMainThread(Class cls) {
125 WantsMainThreadFinalization = YES;
126 _class_setFinalizeOnMainThread(cls);
130 // stack based data structure queued if/when there is main-thread-only finalization work TBD
131 typedef struct BatchFinalizeBlock {
132 auto_zone_foreach_object_t foreach;
133 auto_zone_cursor_t cursor;
135 volatile BOOL finished;
136 volatile BOOL started;
137 struct BatchFinalizeBlock *next;
138 } BatchFinalizeBlock_t;
140 // The Main Thread Finalization Work Queue Head
142 pthread_mutex_t mutex;
143 pthread_cond_t condition;
144 BatchFinalizeBlock_t *head;
145 BatchFinalizeBlock_t *tail;
149 void objc_startCollectorThread(void) {
152 void objc_start_collector_thread(void) {
155 static void batchFinalizeOnMainThread(void);
157 void objc_collect(unsigned long options) {
159 BOOL onMainThread = pthread_main_np() ? YES : NO;
161 // while we're here, sneak off and do some finalization work (if any)
162 if (onMainThread) batchFinalizeOnMainThread();
163 // now on with our normally scheduled programming
164 auto_zone_options_t amode = AUTO_ZONE_COLLECT_NO_OPTIONS;
165 if (!(options & OBJC_COLLECT_IF_NEEDED)) {
166 switch (options & 0x3) {
167 case OBJC_RATIO_COLLECTION: amode = AUTO_ZONE_COLLECT_RATIO_COLLECTION; break;
168 case OBJC_GENERATIONAL_COLLECTION: amode = AUTO_ZONE_COLLECT_GENERATIONAL_COLLECTION; break;
169 case OBJC_FULL_COLLECTION: amode = AUTO_ZONE_COLLECT_FULL_COLLECTION; break;
170 case OBJC_EXHAUSTIVE_COLLECTION: amode = AUTO_ZONE_COLLECT_EXHAUSTIVE_COLLECTION; break;
172 amode |= AUTO_ZONE_COLLECT_COALESCE;
173 amode |= AUTO_ZONE_COLLECT_LOCAL_COLLECTION;
175 if (options & OBJC_WAIT_UNTIL_DONE) {
176 __block BOOL done = NO;
177 // If executing on the main thread, use the main thread work queue condition to block,
178 // so main thread finalization can complete. Otherwise, use a thread-local condition.
179 pthread_mutex_t localMutex = PTHREAD_MUTEX_INITIALIZER, *mutex = &localMutex;
180 pthread_cond_t localCondition = PTHREAD_COND_INITIALIZER, *condition = &localCondition;
182 mutex = &MainThreadWorkQ.mutex;
183 condition = &MainThreadWorkQ.condition;
185 pthread_mutex_lock(mutex);
186 auto_zone_collect_and_notify(gc_zone, amode, dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
187 pthread_mutex_lock(mutex);
189 pthread_cond_signal(condition);
190 pthread_mutex_unlock(mutex);
193 pthread_cond_wait(condition, mutex);
194 if (onMainThread && MainThreadWorkQ.head) {
195 pthread_mutex_unlock(mutex);
196 batchFinalizeOnMainThread();
197 pthread_mutex_lock(mutex);
200 pthread_mutex_unlock(mutex);
202 auto_zone_collect(gc_zone, amode);
207 // USED BY CF & ONE OTHER
208 BOOL objc_isAuto(id object)
210 return UseGC && auto_zone_is_valid_pointer(gc_zone, object) != 0;
214 BOOL objc_collectingEnabled(void)
219 BOOL objc_collecting_enabled(void) // Old naming
224 malloc_zone_t *objc_collectableZone(void) {
228 BOOL objc_dumpHeap(char *filenamebuffer, unsigned long length) {
229 static int counter = 0;
232 sprintf(buffer, OBJC_HEAP_DUMP_FILENAME_FORMAT, getpid(), counter);
233 if (!_objc_dumpHeap(gc_zone, buffer)) return NO;
234 if (filenamebuffer) {
235 unsigned long blen = strlen(buffer);
237 strncpy(filenamebuffer, buffer, blen+1);
239 filenamebuffer[0] = 0; // give some answer
245 /***********************************************************************
247 * Called by CF and Foundation.
248 **********************************************************************/
250 // Allocate an object in the GC zone, with the given number of extra bytes.
251 id objc_allocate_object(Class cls, int extra)
253 return class_createInstance(cls, extra);
257 /***********************************************************************
258 * Write barrier implementations, optimized for when GC is known to be on
259 * Called by the write barrier exports only.
260 * These implementations assume GC is on. The exported function must
261 * either perform the check itself or be conditionally stomped at
263 **********************************************************************/
265 PRIVATE_EXTERN id objc_assign_strongCast_gc(id value, id *slot) {
266 if (!auto_zone_set_write_barrier(gc_zone, (void*)slot, value)) { // stores & returns true if slot points into GC allocated memory
267 auto_zone_root_write_barrier(gc_zone, slot, value); // always stores
272 PRIVATE_EXTERN id objc_assign_global_gc(id value, id *slot) {
273 // use explicit root registration.
274 if (value && auto_zone_is_valid_pointer(gc_zone, value)) {
275 if (auto_zone_is_finalized(gc_zone, value)) {
276 _objc_inform("GC: storing an already collected object %p into global memory at %p, break on objc_assign_global_error to debug\n", value, slot);
277 objc_assign_global_error(value, slot);
279 auto_zone_add_root(gc_zone, slot, value);
287 PRIVATE_EXTERN id objc_assign_threadlocal_gc(id value, id *slot)
289 if (value && auto_zone_is_valid_pointer(gc_zone, value)) {
290 auto_zone_add_root(gc_zone, slot, value);
299 PRIVATE_EXTERN id objc_assign_ivar_gc(id value, id base, ptrdiff_t offset)
301 id *slot = (id*) ((char *)base + offset);
304 if (!auto_zone_set_write_barrier(gc_zone, (char *)base + offset, value)) {
305 _objc_inform("GC: %p + %tu isn't in the auto_zone, break on objc_assign_ivar_error to debug.\n", base, offset);
306 objc_assign_ivar_error(base, offset);
315 PRIVATE_EXTERN id objc_assign_strongCast_non_gc(id value, id *slot) {
316 return (*slot = value);
319 PRIVATE_EXTERN id objc_assign_global_non_gc(id value, id *slot) {
320 return (*slot = value);
323 PRIVATE_EXTERN id objc_assign_threadlocal_non_gc(id value, id *slot) {
324 return (*slot = value);
327 PRIVATE_EXTERN id objc_assign_ivar_non_gc(id value, id base, ptrdiff_t offset) {
328 id *slot = (id*) ((char *)base + offset);
329 return (*slot = value);
332 /***********************************************************************
333 * Write barrier exports
334 * Called by pretty much all GC-supporting code.
335 **********************************************************************/
338 #if defined(__i386__)
340 // These 3 functions are defined in objc-auto-i386.s as
341 // the non-GC variants. Under GC, rtp_init stomps them with jumps to
346 // use generic implementation until time can be spent on optimizations
347 id objc_assign_strongCast(id value, id *dest)
350 return objc_assign_strongCast_gc(value, dest);
352 return (*dest = value);
356 id objc_assign_global(id value, id *dest)
359 return objc_assign_global_gc(value, dest);
361 return (*dest = value);
365 id objc_assign_threadlocal(id value, id *dest)
368 return objc_assign_threadlocal_gc(value, dest);
370 return (*dest = value);
374 id objc_assign_ivar(id value, id dest, ptrdiff_t offset)
377 return objc_assign_ivar_gc(value, dest, offset);
379 id *slot = (id*) ((char *)dest + offset);
380 return (*slot = value);
384 // not defined(__i386__)
388 #define LC_SEGMENT_COMMAND LC_SEGMENT_64
389 #define LC_ROUTINES_COMMAND LC_ROUTINES_64
390 typedef struct mach_header_64 macho_header;
391 typedef struct section_64 macho_section;
392 typedef struct nlist_64 macho_nlist;
393 typedef struct segment_command_64 macho_segment_command;
395 #define LC_SEGMENT_COMMAND LC_SEGMENT
396 #define LC_ROUTINES_COMMAND LC_ROUTINES
397 typedef struct mach_header macho_header;
398 typedef struct section macho_section;
399 typedef struct nlist macho_nlist;
400 typedef struct segment_command macho_segment_command;
403 PRIVATE_EXTERN void _objc_update_stubs_in_mach_header(const struct mach_header* mh, uint32_t symbol_count, const char *symbols[], void *functions[]) {
404 uint32_t cmd_index, cmd_count = mh->ncmds;
406 const struct load_command* const cmds = (struct load_command*)((char*)mh + sizeof(macho_header));
407 const struct load_command* cmd;
408 const uint8_t *linkEditBase = NULL;
409 const macho_nlist *symbolTable = NULL;
410 uint32_t symbolTableCount = 0;
411 const char *stringTable = NULL;
412 uint32_t stringTableSize = 0;
413 const uint32_t *indirectSymbolTable = NULL;
414 uint32_t indirectSymbolTableCount = 0;
416 // first pass at load commands gets linkEditBase
417 for (cmd = cmds, cmd_index = 0; cmd_index < cmd_count; ++cmd_index) {
418 if ( cmd->cmd == LC_SEGMENT_COMMAND ) {
419 const macho_segment_command* seg = (macho_segment_command*)cmd;
420 if ( strcmp(seg->segname,"__TEXT") == 0 )
421 slide = (uintptr_t)mh - seg->vmaddr;
422 else if ( strcmp(seg->segname,"__LINKEDIT") == 0 )
423 linkEditBase = (uint8_t*)(seg->vmaddr + slide - seg->fileoff);
425 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
428 for (cmd = cmds, cmd_index = 0; cmd_index < cmd_count; ++cmd_index) {
429 switch ( cmd->cmd ) {
432 const struct symtab_command* symtab = (struct symtab_command*)cmd;
433 symbolTableCount = symtab->nsyms;
434 symbolTable = (macho_nlist*)(&linkEditBase[symtab->symoff]);
435 stringTableSize = symtab->strsize;
436 stringTable = (const char*)&linkEditBase[symtab->stroff];
441 const struct dysymtab_command* dsymtab = (struct dysymtab_command*)cmd;
442 indirectSymbolTableCount = dsymtab->nindirectsyms;
443 indirectSymbolTable = (uint32_t*)(&linkEditBase[dsymtab->indirectsymoff]);
447 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
450 // walk sections to find one with this lazy pointer
451 for (cmd = cmds, cmd_index = 0; cmd_index < cmd_count; ++cmd_index) {
452 if (cmd->cmd == LC_SEGMENT_COMMAND) {
453 const macho_segment_command* seg = (macho_segment_command*)cmd;
454 const macho_section* const sectionsStart = (macho_section*)((char*)seg + sizeof(macho_segment_command));
455 const macho_section* const sectionsEnd = §ionsStart[seg->nsects];
456 const macho_section* sect;
457 for (sect = sectionsStart; sect < sectionsEnd; ++sect) {
458 const uint8_t type = sect->flags & SECTION_TYPE;
459 if (type == S_LAZY_DYLIB_SYMBOL_POINTERS || type == S_LAZY_SYMBOL_POINTERS) { // S_LAZY_DYLIB_SYMBOL_POINTERS
460 uint32_t pointer_index, pointer_count = (uint32_t)(sect->size / sizeof(uintptr_t));
461 uintptr_t* const symbolPointers = (uintptr_t*)(sect->addr + slide);
462 for (pointer_index = 0; pointer_index < pointer_count; ++pointer_index) {
463 const uint32_t indirectTableOffset = sect->reserved1;
464 if ((indirectTableOffset + pointer_index) < indirectSymbolTableCount) {
465 uint32_t symbolIndex = indirectSymbolTable[indirectTableOffset + pointer_index];
466 // if symbolIndex is INDIRECT_SYMBOL_LOCAL or INDIRECT_SYMBOL_LOCAL|INDIRECT_SYMBOL_ABS, then it will
467 // by definition be >= symbolTableCount.
468 if (symbolIndex < symbolTableCount) {
469 // found symbol for this lazy pointer, now lookup address
470 uint32_t stringTableOffset = symbolTable[symbolIndex].n_un.n_strx;
471 if (stringTableOffset < stringTableSize) {
472 const char* symbolName = &stringTable[stringTableOffset];
474 for (i = 0; i < symbol_count; ++i) {
475 if (strcmp(symbols[i], symbolName) == 0) {
476 symbolPointers[pointer_index] = (uintptr_t)functions[i];
487 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
491 void *objc_memmove_collectable(void *dst, const void *src, size_t size)
494 return auto_zone_write_barrier_memmove(gc_zone, dst, src, size);
496 return memmove(dst, src, size);
500 BOOL objc_atomicCompareAndSwapPtr(id predicate, id replacement, volatile id *objectLocation) {
501 const BOOL issueMemoryBarrier = NO;
503 return auto_zone_atomicCompareAndSwapPtr(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, issueMemoryBarrier);
505 return OSAtomicCompareAndSwapPtr((void *)predicate, (void *)replacement, (void * volatile *)objectLocation);
508 BOOL objc_atomicCompareAndSwapPtrBarrier(id predicate, id replacement, volatile id *objectLocation) {
509 const BOOL issueMemoryBarrier = YES;
511 return auto_zone_atomicCompareAndSwapPtr(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, issueMemoryBarrier);
513 return OSAtomicCompareAndSwapPtrBarrier((void *)predicate, (void *)replacement, (void * volatile *)objectLocation);
516 BOOL objc_atomicCompareAndSwapGlobal(id predicate, id replacement, volatile id *objectLocation) {
517 const BOOL isGlobal = YES;
518 const BOOL issueMemoryBarrier = NO;
520 return auto_zone_atomicCompareAndSwap(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, isGlobal, issueMemoryBarrier);
522 return OSAtomicCompareAndSwapPtr((void *)predicate, (void *)replacement, (void * volatile *)objectLocation);
525 BOOL objc_atomicCompareAndSwapGlobalBarrier(id predicate, id replacement, volatile id *objectLocation) {
526 const BOOL isGlobal = YES;
527 const BOOL issueMemoryBarrier = YES;
529 return auto_zone_atomicCompareAndSwap(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, isGlobal, issueMemoryBarrier);
531 return OSAtomicCompareAndSwapPtrBarrier((void *)predicate, (void *)replacement, (void * volatile *)objectLocation);
534 BOOL objc_atomicCompareAndSwapInstanceVariable(id predicate, id replacement, volatile id *objectLocation) {
535 const BOOL isGlobal = NO;
536 const BOOL issueMemoryBarrier = NO;
538 return auto_zone_atomicCompareAndSwap(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, isGlobal, issueMemoryBarrier);
540 return OSAtomicCompareAndSwapPtr((void *)predicate, (void *)replacement, (void * volatile *)objectLocation);
543 BOOL objc_atomicCompareAndSwapInstanceVariableBarrier(id predicate, id replacement, volatile id *objectLocation) {
544 const BOOL isGlobal = NO;
545 const BOOL issueMemoryBarrier = YES;
547 return auto_zone_atomicCompareAndSwap(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, isGlobal, issueMemoryBarrier);
549 return OSAtomicCompareAndSwapPtrBarrier((void *)predicate, (void *)replacement, (void * volatile *)objectLocation);
553 /***********************************************************************
555 **********************************************************************/
557 PRIVATE_EXTERN id objc_read_weak_gc(id *location) {
558 id result = *location;
560 result = auto_read_weak_reference(gc_zone, (void **)location);
565 PRIVATE_EXTERN id objc_read_weak_non_gc(id *location) {
569 id objc_read_weak(id *location) {
570 id result = *location;
571 if (UseGC && result) {
572 result = auto_read_weak_reference(gc_zone, (void **)location);
577 PRIVATE_EXTERN id objc_assign_weak_gc(id value, id *location) {
578 auto_assign_weak_reference(gc_zone, value, (const void **)location, NULL);
582 PRIVATE_EXTERN id objc_assign_weak_non_gc(id value, id *location) {
583 return (*location = value);
586 id objc_assign_weak(id value, id *location) {
588 auto_assign_weak_reference(gc_zone, value, (const void **)location, NULL);
596 PRIVATE_EXTERN void gc_fixup_weakreferences(id newObject, id oldObject) {
597 // fix up weak references if any.
598 const unsigned char *weakLayout = (const unsigned char *)class_getWeakIvarLayout(_object_getClass(newObject));
600 void **newPtr = (void **)newObject, **oldPtr = (void **)oldObject;
602 while ((byte = *weakLayout++)) {
603 unsigned skips = (byte >> 4);
604 unsigned weaks = (byte & 0x0F);
605 newPtr += skips, oldPtr += skips;
608 auto_assign_weak_reference(gc_zone, auto_read_weak_reference(gc_zone, oldPtr), (const void **)newPtr, NULL);
615 /***********************************************************************
617 * Used to isolate resurrection of garbage objects during finalization.
618 **********************************************************************/
619 BOOL objc_is_finalized(void *ptr) {
620 if (ptr != NULL && UseGC) {
621 return auto_zone_is_finalized(gc_zone, ptr);
627 /***********************************************************************
629 * Used by top-level thread loops to reduce false pointers from the stack.
630 **********************************************************************/
631 void objc_clear_stack(unsigned long options) {
633 auto_zone_clear_stack(gc_zone, 0);
637 /***********************************************************************
638 * Finalization support
639 **********************************************************************/
641 static IMP _NSObject_finalize = NULL;
643 // Finalizer crash debugging
644 static void *finalizing_object;
646 // finalize a single object without fuss
647 // When there are no main-thread-only classes this is used directly
648 // Otherwise, it is used indirectly by smarter code that knows main-thread-affinity requirements
649 static void finalizeOneObject(void *obj, void *ignored) {
651 finalizing_object = obj;
653 Class cls = object_getClass(obj);
654 CRSetCrashLogMessage2(class_getName(cls));
656 /// call -finalize method.
657 objc_msgSend(object, @selector(finalize));
659 // Call C++ destructors.
660 // This would be objc_destructInstance() but for performance.
661 if (_class_hasCxxStructors(cls)) {
662 object_cxxDestruct(object);
665 finalizing_object = NULL;
666 CRSetCrashLogMessage2(NULL);
669 // finalize object only if it is a main-thread-only object.
670 // Called only from the main thread.
671 static void finalizeOneMainThreadOnlyObject(void *obj, void *arg) {
673 Class cls = _object_getClass(object);
675 _objc_fatal("object with NULL ISA passed to finalizeOneMainThreadOnlyObject: %p\n", obj);
677 if (_class_shouldFinalizeOnMainThread(cls)) {
678 finalizeOneObject(obj, NULL);
682 // finalize one object only if it is not a main-thread-only object
683 // called from any other thread than the main thread
684 // Important: if a main-thread-only object is passed, return that fact in the needsMain argument
685 static void finalizeOneAnywhereObject(void *obj, void *needsMain) {
687 Class cls = _object_getClass(object);
688 bool *needsMainThreadWork = needsMain;
690 _objc_fatal("object with NULL ISA passed to finalizeOneAnywhereObject: %p\n", obj);
692 if (!_class_shouldFinalizeOnMainThread(cls)) {
693 finalizeOneObject(obj, NULL);
696 *needsMainThreadWork = true;
701 // Utility workhorse.
702 // Set up the expensive @try block and ask the collector to hand the next object to
703 // our finalizeAnObject function.
704 // Track and return a boolean that records whether or not any main thread work is necessary.
705 // (When we know that there are no main thread only objects then the boolean isn't even computed)
706 static bool batchFinalize(auto_zone_t *zone,
707 auto_zone_foreach_object_t foreach,
708 auto_zone_cursor_t cursor,
710 void (*finalizeAnObject)(void *, void*))
712 #if !defined(NDEBUG) && !__OBJC2__
713 // debug: don't call try/catch before exception handlers are installed
714 objc_exception_functions_t table = {0};
715 objc_exception_get_functions(&table);
716 assert(table.throw_exc);
719 bool needsMainThreadWork = false;
722 foreach(cursor, finalizeAnObject, &needsMainThreadWork);
723 // non-exceptional return means finalization is complete.
726 @catch (id exception) {
727 // whoops, note exception, then restart at cursor's position
728 _objc_inform("GC: -finalize resulted in an exception (%p) being thrown, break on objc_exception_during_finalize_error to debug\n\t%s", exception, (const char*)[[exception description] UTF8String]);
729 objc_exception_during_finalize_error();
732 // whoops, note exception, then restart at cursor's position
733 _objc_inform("GC: -finalize resulted in an exception being thrown, break on objc_exception_during_finalize_error to debug");
734 objc_exception_during_finalize_error();
737 return needsMainThreadWork;
740 // Called on main thread-only.
741 // Pick up work from global queue.
742 // called parasitically by anyone requesting a collection
743 // called explicitly when there is known to be main thread only finalization work
744 // In both cases we are on the main thread
745 // Guard against recursion by something called from a finalizer
746 static void batchFinalizeOnMainThread() {
747 pthread_mutex_lock(&MainThreadWorkQ.mutex);
748 if (!MainThreadWorkQ.head || MainThreadWorkQ.head->started) {
749 // No work or we're already here
750 pthread_mutex_unlock(&MainThreadWorkQ.mutex);
753 while (MainThreadWorkQ.head) {
754 BatchFinalizeBlock_t *bfb = MainThreadWorkQ.head;
756 pthread_mutex_unlock(&MainThreadWorkQ.mutex);
758 batchFinalize(gc_zone, bfb->foreach, bfb->cursor, bfb->cursor_size, finalizeOneMainThreadOnlyObject);
759 // signal the collector thread(s) that finalization has finished.
760 pthread_mutex_lock(&MainThreadWorkQ.mutex);
762 pthread_cond_broadcast(&MainThreadWorkQ.condition);
763 MainThreadWorkQ.head = bfb->next;
765 MainThreadWorkQ.tail = NULL;
766 pthread_mutex_unlock(&MainThreadWorkQ.mutex);
770 // Knowing that we possibly have main thread only work to do, first process everything
771 // that is not main-thread-only. If we discover main thread only work, queue a work block
772 // to the main thread that will do just the main thread only work. Wait for it.
773 // Called from a non main thread.
774 static void batchFinalizeOnTwoThreads(auto_zone_t *zone,
775 auto_zone_foreach_object_t foreach,
776 auto_zone_cursor_t cursor,
779 // First, lets get rid of everything we can on this thread, then ask main thread to help if needed
780 char cursor_copy[cursor_size];
781 memcpy(cursor_copy, cursor, cursor_size);
782 bool needsMainThreadFinalization = batchFinalize(zone, foreach, (auto_zone_cursor_t)cursor_copy, cursor_size, finalizeOneAnywhereObject);
784 if (! needsMainThreadFinalization)
785 return; // no help needed
787 // set up the control block. Either our ping of main thread with _callOnMainThread will get to it, or
788 // an objc_collect(if_needed) will get to it. Either way, this block will be processed on the main thread.
789 BatchFinalizeBlock_t bfb;
790 bfb.foreach = foreach;
792 bfb.cursor_size = cursor_size;
796 pthread_mutex_lock(&MainThreadWorkQ.mutex);
797 if (MainThreadWorkQ.tail) {
799 // link to end so that ordering of finalization is preserved.
800 MainThreadWorkQ.tail->next = &bfb;
801 MainThreadWorkQ.tail = &bfb;
804 MainThreadWorkQ.head = &bfb;
805 MainThreadWorkQ.tail = &bfb;
807 pthread_mutex_unlock(&MainThreadWorkQ.mutex);
809 //printf("----->asking main thread to finalize\n");
810 dispatch_async(dispatch_get_main_queue(), ^{ batchFinalizeOnMainThread(); });
812 // wait for the main thread to finish finalizing instances of classes marked CLS_FINALIZE_ON_MAIN_THREAD.
813 pthread_mutex_lock(&MainThreadWorkQ.mutex);
814 while (!bfb.finished) {
815 // the main thread might be blocked waiting for a synchronous collection to complete, so wake it here
816 pthread_cond_signal(&MainThreadWorkQ.condition);
817 pthread_cond_wait(&MainThreadWorkQ.condition, &MainThreadWorkQ.mutex);
819 pthread_mutex_unlock(&MainThreadWorkQ.mutex);
820 //printf("<------ main thread finalize done\n");
826 // collector calls this with garbage ready
827 // thread collectors, too, so this needs to be thread-safe
828 static void BatchInvalidate(auto_zone_t *zone,
829 auto_zone_foreach_object_t foreach,
830 auto_zone_cursor_t cursor,
833 if (pthread_main_np() || !WantsMainThreadFinalization) {
834 // Collect all objects. We're either pre-multithreaded on main thread or we're on the collector thread
835 // but no main-thread-only objects have been allocated.
836 batchFinalize(zone, foreach, cursor, cursor_size, finalizeOneObject);
839 // We're on the dedicated thread. Collect some on main thread, the rest here.
840 batchFinalizeOnTwoThreads(zone, foreach, cursor, cursor_size);
848 * Collector calls into this system when it finds resurrected objects.
849 * This keeps them pitifully alive and leaked, even if they reference garbage.
852 // idea: keep a side table mapping resurrected object pointers to their original Class, so we don't
853 // need to smash anything. alternatively, could use associative references to track against a secondary
854 // object with information about the resurrection, such as a stack crawl, etc.
856 static Class _NSResurrectedObjectClass;
857 static NXMapTable *_NSResurrectedObjectMap = NULL;
858 static pthread_mutex_t _NSResurrectedObjectLock = PTHREAD_MUTEX_INITIALIZER;
860 static Class resurrectedObjectOriginalClass(id object) {
862 pthread_mutex_lock(&_NSResurrectedObjectLock);
863 originalClass = (Class) NXMapGet(_NSResurrectedObjectMap, object);
864 pthread_mutex_unlock(&_NSResurrectedObjectLock);
865 return originalClass;
868 static id _NSResurrectedObject_classMethod(id self, SEL selector) { return self; }
870 static id _NSResurrectedObject_instanceMethod(id self, SEL name) {
871 _objc_inform("**resurrected** object %p of class %s being sent message '%s'\n", self, class_getName(resurrectedObjectOriginalClass(self)), sel_getName(name));
875 static void _NSResurrectedObject_finalize(id self, SEL _cmd) {
877 pthread_mutex_lock(&_NSResurrectedObjectLock);
878 originalClass = (Class) NXMapRemove(_NSResurrectedObjectMap, self);
879 pthread_mutex_unlock(&_NSResurrectedObjectLock);
880 if (originalClass) _objc_inform("**resurrected** object %p of class %s being finalized\n", self, class_getName(originalClass));
881 _NSObject_finalize(self, _cmd);
884 static BOOL _NSResurrectedObject_resolveInstanceMethod(id self, SEL _cmd, SEL name) {
885 class_addMethod((Class)self, name, (IMP)_NSResurrectedObject_instanceMethod, "@@:");
889 static BOOL _NSResurrectedObject_resolveClassMethod(id self, SEL _cmd, SEL name) {
890 class_addMethod(_object_getClass(self), name, (IMP)_NSResurrectedObject_classMethod, "@@:");
894 static void _NSResurrectedObject_initialize() {
895 _NSResurrectedObjectMap = NXCreateMapTable(NXPtrValueMapPrototype, 128);
896 _NSResurrectedObjectClass = objc_allocateClassPair(objc_getClass("NSObject"), "_NSResurrectedObject", 0);
897 class_addMethod(_NSResurrectedObjectClass, @selector(finalize), (IMP)_NSResurrectedObject_finalize, "v@:");
898 Class metaClass = _object_getClass(_NSResurrectedObjectClass);
899 class_addMethod(metaClass, @selector(resolveInstanceMethod:), (IMP)_NSResurrectedObject_resolveInstanceMethod, "c@::");
900 class_addMethod(metaClass, @selector(resolveClassMethod:), (IMP)_NSResurrectedObject_resolveClassMethod, "c@::");
901 objc_registerClassPair(_NSResurrectedObjectClass);
904 static void resurrectZombie(auto_zone_t *zone, void *ptr) {
905 id object = (id) ptr;
906 Class cls = _object_getClass(object);
907 if (cls != _NSResurrectedObjectClass) {
908 // remember the original class for this instance.
909 pthread_mutex_lock(&_NSResurrectedObjectLock);
910 NXMapInsert(_NSResurrectedObjectMap, ptr, cls);
911 pthread_mutex_unlock(&_NSResurrectedObjectLock);
912 object_setClass(object, _NSResurrectedObjectClass);
916 /***********************************************************************
917 * Pretty printing support
918 * For development purposes.
919 **********************************************************************/
922 static char *name_for_address(auto_zone_t *zone, vm_address_t base, vm_address_t offset, int withRetainCount);
924 static char* objc_name_for_address(auto_zone_t *zone, vm_address_t base, vm_address_t offset)
926 return name_for_address(zone, base, offset, false);
929 static const char* objc_name_for_object(auto_zone_t *zone, void *object) {
930 Class cls = *(Class *)object;
931 if (!objc_isRegisteredClass(cls)) return "";
932 return class_getName(cls);
935 /* Compaction support */
937 PRIVATE_EXTERN void objc_disableCompaction() {
940 auto_zone_disable_compaction(gc_zone);
944 /***********************************************************************
946 **********************************************************************/
948 static BOOL objc_isRegisteredClass(Class candidate);
950 static const unsigned char *objc_layout_for_address(auto_zone_t *zone, void *address) {
951 id object = (id)address;
952 Class cls = (volatile Class)_object_getClass(object);
953 return objc_isRegisteredClass(cls) ? _object_getIvarLayout(cls, object) : NULL;
956 static const unsigned char *objc_weak_layout_for_address(auto_zone_t *zone, void *address) {
957 id object = (id)address;
958 Class cls = (volatile Class)_object_getClass(object);
959 return objc_isRegisteredClass(cls) ? class_getWeakIvarLayout(cls) : NULL;
962 PRIVATE_EXTERN void gc_register_datasegment(uintptr_t base, size_t size) {
963 auto_zone_register_datasegment(gc_zone, (void*)base, size);
966 PRIVATE_EXTERN void gc_unregister_datasegment(uintptr_t base, size_t size) {
967 auto_zone_unregister_datasegment(gc_zone, (void*)base, size);
970 #define countof(array) (sizeof(array) / sizeof(array[0]))
972 // defined in objc-externalref.m.
973 extern objc_xref_t _object_addExternalReference_gc(id obj, objc_xref_t type);
974 extern objc_xref_t _object_addExternalReference_rr(id obj, objc_xref_t type);
975 extern id _object_readExternalReference_gc(objc_xref_t ref);
976 extern id _object_readExternalReference_rr(objc_xref_t ref);
977 extern void _object_removeExternalReference_gc(objc_xref_t ref);
978 extern void _object_removeExternalReference_rr(objc_xref_t ref);
980 PRIVATE_EXTERN void gc_fixup_barrier_stubs(const struct dyld_image_info *info) {
981 static const char *symbols[] = {
982 "_objc_assign_strongCast", "_objc_assign_ivar",
983 "_objc_assign_global", "_objc_assign_threadlocal",
984 "_objc_read_weak", "_objc_assign_weak",
985 "_objc_getProperty", "_objc_setProperty",
986 "_objc_getAssociatedObject", "_objc_setAssociatedObject",
987 "__object_addExternalReference", "__object_readExternalReference", "__object_removeExternalReference"
990 // resolve barrier symbols using GC functions.
991 static void *gc_functions[] = {
992 &objc_assign_strongCast_gc, &objc_assign_ivar_gc,
993 &objc_assign_global_gc, &objc_assign_threadlocal_gc,
994 &objc_read_weak_gc, &objc_assign_weak_gc,
995 &objc_getProperty_gc, &objc_setProperty_gc,
996 &objc_getAssociatedObject_gc, &objc_setAssociatedObject_gc,
997 &_object_addExternalReference_gc, &_object_readExternalReference_gc, &_object_removeExternalReference_gc
999 assert(countof(symbols) == countof(gc_functions));
1000 _objc_update_stubs_in_mach_header(info->imageLoadAddress, countof(symbols), symbols, gc_functions);
1002 // resolve barrier symbols using non-GC functions.
1003 static void *nongc_functions[] = {
1004 &objc_assign_strongCast_non_gc, &objc_assign_ivar_non_gc,
1005 &objc_assign_global_non_gc, &objc_assign_threadlocal_non_gc,
1006 &objc_read_weak_non_gc, &objc_assign_weak_non_gc,
1007 &objc_getProperty_non_gc, &objc_setProperty_non_gc,
1008 &objc_getAssociatedObject_non_gc, &objc_setAssociatedObject_non_gc,
1009 &_object_addExternalReference_rr, &_object_readExternalReference_rr, &_object_removeExternalReference_rr
1011 assert(countof(symbols) == countof(nongc_functions));
1012 _objc_update_stubs_in_mach_header(info->imageLoadAddress, countof(symbols), symbols, nongc_functions);
1016 /***********************************************************************
1018 **********************************************************************/
1020 static void objc_will_grow(auto_zone_t *zone, auto_heap_growth_info_t info) {
1021 if (auto_zone_is_collecting(gc_zone)) {
1025 auto_zone_collect(gc_zone, AUTO_ZONE_COLLECT_COALESCE|AUTO_ZONE_COLLECT_RATIO_COLLECTION);
1030 static auto_zone_t *gc_zone_init(BOOL wantsCompaction)
1032 auto_zone_t *result;
1033 static int didOnce = 0;
1037 // initialize the batch finalization queue
1038 MainThreadWorkQ.head = NULL;
1039 MainThreadWorkQ.tail = NULL;
1040 pthread_mutex_init(&MainThreadWorkQ.mutex, NULL);
1041 pthread_cond_init(&MainThreadWorkQ.condition, NULL);
1044 result = auto_zone_create("auto_zone");
1046 if (!wantsCompaction) auto_zone_disable_compaction(result);
1048 auto_collection_control_t *control = auto_collection_parameters(result);
1050 // set up the magic control parameters
1051 control->batch_invalidate = BatchInvalidate;
1052 control->will_grow = objc_will_grow;
1053 control->resurrect = resurrectZombie;
1054 control->layout_for_address = objc_layout_for_address;
1055 control->weak_layout_for_address = objc_weak_layout_for_address;
1056 control->name_for_address = objc_name_for_address;
1058 if (control->version >= sizeof(auto_collection_control_t)) {
1059 control->name_for_object = objc_name_for_object;
1066 /* should be defined in /usr/local/include/libdispatch_private.h. */
1067 extern void (*dispatch_begin_thread_4GC)(void);
1068 extern void (*dispatch_end_thread_4GC)(void);
1070 static void objc_reapThreadLocalBlocks()
1072 if (UseGC) auto_zone_reap_all_local_blocks(gc_zone);
1075 void objc_registerThreadWithCollector()
1077 if (UseGC) auto_zone_register_thread(gc_zone);
1080 void objc_unregisterThreadWithCollector()
1082 if (UseGC) auto_zone_unregister_thread(gc_zone);
1085 void objc_assertRegisteredThreadWithCollector()
1087 if (UseGC) auto_zone_assert_thread_registered(gc_zone);
1090 // Always called by _objcInit, even if GC is off.
1091 PRIVATE_EXTERN void gc_init(BOOL wantsGC, BOOL wantsCompaction)
1094 UseCompaction = wantsCompaction;
1097 _objc_inform("GC: is %s", wantsGC ? "ON" : "OFF");
1098 _objc_inform("Compaction: is %s", wantsCompaction ? "ON" : "OFF");
1102 // Set up the GC zone
1103 gc_zone = gc_zone_init(wantsCompaction);
1105 // tell libdispatch to register its threads with the GC.
1106 dispatch_begin_thread_4GC = objc_registerThreadWithCollector;
1107 dispatch_end_thread_4GC = objc_reapThreadLocalBlocks;
1109 // no NSObject until Foundation calls objc_collect_init()
1110 _NSObject_finalize = &_objc_msgForward_internal;
1112 // set up the registered classes list
1113 registeredClassTableInit();
1115 // tell Blocks to use collectable memory. CF will cook up the classes separately.
1119 // Add GC state to crash log reports
1120 _objc_inform_on_crash("garbage collection is %s",
1121 wantsGC ? "ON" : "OFF");
1126 // Called by Foundation to install auto's interruption callback.
1127 malloc_zone_t *objc_collect_init(int (*callback)(void))
1129 // Find NSObject's finalize method now that Foundation is loaded.
1130 // fixme only look for the base implementation, not a category's
1131 _NSObject_finalize = class_getMethodImplementation(objc_getClass("NSObject"), @selector(finalize));
1132 if (_NSObject_finalize == &_objc_msgForward /* not _internal! */) {
1133 _objc_fatal("GC: -[NSObject finalize] unimplemented!");
1136 // create the _NSResurrectedObject class used to track resurrections.
1137 _NSResurrectedObject_initialize();
1139 return (malloc_zone_t *)gc_zone;
1143 * Support routines for the Block implementation
1147 // The Block runtime now needs to sometimes allocate a Block that is an Object - namely
1148 // when it neesd to have a finalizer which, for now, is only if there are C++ destructors
1149 // in the helper function. Hence the isObject parameter.
1150 // Under GC a -copy message should allocate a refcount 0 block, ergo the isOne parameter.
1151 static void *block_gc_alloc5(const unsigned long size, const bool isOne, const bool isObject) {
1152 auto_memory_type_t type = isObject ? (AUTO_OBJECT|AUTO_MEMORY_SCANNED) : AUTO_MEMORY_SCANNED;
1153 return auto_zone_allocate_object(gc_zone, size, type, isOne, false);
1156 // The Blocks runtime keeps track of everything above 1 and so it only calls
1157 // up to the collector to tell it about the 0->1 transition and then the 1->0 transition
1158 static void block_gc_setHasRefcount(const void *block, const bool hasRefcount) {
1160 auto_zone_retain(gc_zone, (void *)block);
1162 auto_zone_release(gc_zone, (void *)block);
1165 static void block_gc_memmove(void *dst, void *src, unsigned long size) {
1166 auto_zone_write_barrier_memmove(gc_zone, dst, src, (size_t)size);
1169 static void gc_block_init(void) {
1172 block_gc_setHasRefcount,
1173 (void (*)(void *, void **))objc_assign_strongCast_gc,
1174 (void (*)(const void *, void *))objc_assign_weak,
1180 /***********************************************************************
1182 * In addition to the global class hashtable (set) indexed by name, we
1183 * also keep one based purely by pointer when running under Garbage Collection.
1184 * This allows the background collector to race against objects recycled from TLC.
1185 * Specifically, the background collector can read the admin byte and see that
1186 * a thread local object is an object, get scheduled out, and the TLC recovers it,
1187 * linking it into the cache, then the background collector reads the isa field and
1188 * finds linkage info. By qualifying all isa fields read we avoid this.
1189 **********************************************************************/
1191 // This is a self-contained hash table of all classes. The first two elements contain the (size-1) and count.
1192 static volatile Class *AllClasses = nil;
1195 #define INITIALSIZE 512
1198 // Allocate the side table.
1199 static void registeredClassTableInit() {
1201 // allocate a collectable (refcount 0) zeroed hunk of unscanned memory
1202 uintptr_t *table = (uintptr_t *)auto_zone_allocate_object(gc_zone, INITIALSIZE*sizeof(void *), AUTO_MEMORY_UNSCANNED, true, true);
1203 // set initial capacity (as mask)
1204 table[0] = INITIALSIZE - 1;
1205 // set initial count
1207 // Compaction: we allocate it refcount 1 and then decr when done.
1208 AllClasses = (Class *)table;
1211 // Verify that a particular pointer is to a class.
1212 // Safe from any thread anytime
1213 static BOOL objc_isRegisteredClass(Class candidate) {
1215 // nil is never a valid ISA.
1216 if (candidate == nil) return NO;
1217 // We don't care about a race with another thread adding a class to which we randomly might have a pointer
1218 // Get local copy of classes so that we're immune from updates.
1219 // We keep the size of the list as the first element so there is no race as the list & size get updated.
1220 uintptr_t *allClasses = (uintptr_t *)AllClasses;
1221 // Slot 0 is always the size of the list in log 2 masked terms (e.g. size - 1) where size is always power of 2
1223 uintptr_t slot = (((uintptr_t)candidate) >> SHIFT) & allClasses[0];
1224 // avoid slot 0 and 1
1225 if (slot < 2) slot = 2;
1227 long int slotValue = allClasses[slot];
1228 if (slotValue == (long int)candidate) {
1231 if (slotValue == 0) {
1235 if (slot > allClasses[0])
1236 slot = 2; // skip size, count
1240 // Utility used when growing
1241 // Assumes lock held
1242 static void addClassHelper(uintptr_t *table, uintptr_t candidate) {
1243 uintptr_t slot = (((long int)candidate) >> SHIFT) & table[0];
1244 if (slot < 2) slot = 2;
1246 uintptr_t slotValue = table[slot];
1247 if (slotValue == 0) {
1248 table[slot] = candidate;
1253 if (slot > table[0])
1254 slot = 2; // skip size, count
1258 // lock held by callers
1260 void objc_addRegisteredClass(Class candidate) {
1262 uintptr_t *table = (uintptr_t *)AllClasses;
1263 // Slot 0 is always the size of the list in log 2 masked terms (e.g. size - 1) where size is always power of 2
1264 // Slot 1 is count - always non-zero
1265 uintptr_t slot = (((long int)candidate) >> SHIFT) & table[0];
1266 if (slot < 2) slot = 2;
1268 uintptr_t slotValue = table[slot];
1269 assert(slotValue != (uintptr_t)candidate);
1270 if (slotValue == REMOVED) {
1271 table[slot] = (long)candidate;
1274 else if (slotValue == 0) {
1275 table[slot] = (long)candidate;
1276 if (2*++table[1] > table[0]) { // add to count; check if we cross 50% utilization
1278 uintptr_t oldSize = table[0]+1;
1279 uintptr_t *newTable = (uintptr_t *)auto_zone_allocate_object(gc_zone, oldSize*2*sizeof(void *), AUTO_MEMORY_UNSCANNED, true, true);
1281 newTable[0] = 2*oldSize - 1;
1283 for (i = 2; i < oldSize; ++i) {
1284 if (table[i] && table[i] != REMOVED)
1285 addClassHelper(newTable, table[i]);
1287 AllClasses = (Class *)newTable;
1288 // let the old table be collected when other threads are no longer reading it.
1289 auto_zone_release(gc_zone, (void *)table);
1294 if (slot > table[0])
1295 slot = 2; // skip size, count
1299 // lock held by callers
1301 void objc_removeRegisteredClass(Class candidate) {
1303 uintptr_t *table = (uintptr_t *)AllClasses;
1304 // Slot 0 is always the size of the list in log 2 masked terms (e.g. size - 1) where size is always power of 2
1305 // Slot 1 is count - always non-zero
1306 uintptr_t slot = (((uintptr_t)candidate) >> SHIFT) & table[0];
1307 if (slot < 2) slot = 2;
1309 uintptr_t slotValue = table[slot];
1310 if (slotValue == (uintptr_t)candidate) {
1311 table[slot] = REMOVED; // if next slot == 0 we could set to 0 here and decr count
1314 assert(slotValue != 0);
1316 if (slot > table[0])
1317 slot = 2; // skip size, count
1322 /***********************************************************************
1323 * Debugging - support for smart printouts when errors occur
1324 **********************************************************************/
1327 static malloc_zone_t *objc_debug_zone(void)
1329 static malloc_zone_t *z = NULL;
1331 z = malloc_create_zone(4096, 0);
1332 malloc_set_zone_name(z, "objc-auto debug");
1337 static char *_malloc_append_unsigned(uintptr_t value, unsigned base, char *head) {
1341 if (value >= base) head = _malloc_append_unsigned(value / base, base, head);
1342 value = value % base;
1343 head[0] = (value < 10) ? '0' + value : 'a' + value - 10;
1348 static void strlcati(char *str, uintptr_t value, size_t bufSize)
1350 if ( (bufSize - strlen(str)) < 30)
1352 str = _malloc_append_unsigned(value, 10, str + strlen(str));
1357 static Ivar ivar_for_offset(Class cls, vm_address_t offset)
1360 ptrdiff_t ivar_offset;
1361 Ivar super_ivar, result;
1363 unsigned int ivar_count;
1365 if (!cls) return NULL;
1367 // scan base classes FIRST
1368 super_ivar = ivar_for_offset(class_getSuperclass(cls), offset);
1369 // result is best-effort; our ivars may be closer
1371 ivars = class_copyIvarList(cls, &ivar_count);
1372 if (ivars && ivar_count) {
1373 // Try our first ivar. If it's too big, use super's best ivar.
1374 // (lose 64-bit precision)
1375 ivar_offset = ivar_getOffset(ivars[0]);
1376 if (ivar_offset > offset) result = super_ivar;
1377 else if (ivar_offset == offset) result = ivars[0];
1380 // Try our other ivars. If any is too big, use the previous.
1381 for (i = 1; result == NULL && i < ivar_count; i++) {
1382 ivar_offset = ivar_getOffset(ivars[i]);
1383 if (ivar_offset == offset) {
1385 } else if (ivar_offset > offset) {
1386 result = ivars[i - 1];
1390 // Found nothing. Return our last ivar.
1392 result = ivars[ivar_count - 1];
1396 result = super_ivar;
1402 static void append_ivar_at_offset(char *buf, Class cls, vm_address_t offset, size_t bufSize)
1406 if (offset == 0) return; // don't bother with isa
1407 if (offset >= class_getInstanceSize(cls)) {
1408 strlcat(buf, ".<extra>+", bufSize);
1409 strlcati(buf, offset, bufSize);
1413 ivar = ivar_for_offset(cls, offset);
1415 strlcat(buf, ".<?>", bufSize);
1419 // fixme doesn't handle structs etc.
1421 strlcat(buf, ".", bufSize);
1422 const char *ivar_name = ivar_getName(ivar);
1423 if (ivar_name) strlcat(buf, ivar_name, bufSize);
1424 else strlcat(buf, "<anonymous ivar>", bufSize);
1426 offset -= ivar_getOffset(ivar);
1428 strlcat(buf, "+", bufSize);
1429 strlcati(buf, offset, bufSize);
1434 static const char *cf_class_for_object(void *cfobj)
1436 // ick - we don't link against CF anymore
1440 size_t (*CFGetTypeID)(void *);
1441 void * (*_CFRuntimeGetClassWithTypeID)(size_t);
1443 result = "anonymous_NSCFType";
1445 dlh = dlopen("/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation", RTLD_LAZY | RTLD_NOLOAD | RTLD_FIRST);
1446 if (!dlh) return result;
1448 CFGetTypeID = (size_t(*)(void*)) dlsym(dlh, "CFGetTypeID");
1449 _CFRuntimeGetClassWithTypeID = (void*(*)(size_t)) dlsym(dlh, "_CFRuntimeGetClassWithTypeID");
1451 if (CFGetTypeID && _CFRuntimeGetClassWithTypeID) {
1454 const char *className;
1455 // don't care about the rest
1458 cfid = (*CFGetTypeID)(cfobj);
1459 cfcls = (*_CFRuntimeGetClassWithTypeID)(cfid);
1460 result = cfcls->className;
1468 static char *name_for_address(auto_zone_t *zone, vm_address_t base, vm_address_t offset, int withRetainCount)
1470 #define APPEND_SIZE(s) \
1471 strlcat(buf, "[", sizeof(buf)); \
1472 strlcati(buf, s, sizeof(buf)); \
1473 strlcat(buf, "]", sizeof(buf));
1481 auto_zone_size(zone, (void *)base);
1482 auto_memory_type_t type = size ?
1483 auto_zone_get_layout_type(zone, (void *)base) : AUTO_TYPE_UNKNOWN;
1484 unsigned int refcount = size ?
1485 auto_zone_retain_count(zone, (void *)base) : 0;
1488 case AUTO_OBJECT_SCANNED:
1489 case AUTO_OBJECT_UNSCANNED:
1490 case AUTO_OBJECT_ALL_POINTERS: {
1491 const char *class_name = object_getClassName((id)base);
1492 if ((0 == strcmp(class_name, "__NSCFType")) || (0 == strcmp(class_name, "NSCFType"))) {
1493 strlcat(buf, cf_class_for_object((void *)base), sizeof(buf));
1495 strlcat(buf, class_name, sizeof(buf));
1498 append_ivar_at_offset(buf, _object_getClass((id)base), offset, sizeof(buf));
1503 case AUTO_MEMORY_SCANNED:
1504 strlcat(buf, "{conservative-block}", sizeof(buf));
1507 case AUTO_MEMORY_UNSCANNED:
1508 strlcat(buf, "{no-pointers-block}", sizeof(buf));
1511 case AUTO_MEMORY_ALL_POINTERS:
1512 strlcat(buf, "{all-pointers-block}", sizeof(buf));
1515 case AUTO_MEMORY_ALL_WEAK_POINTERS:
1516 strlcat(buf, "{all-weak-pointers-block}", sizeof(buf));
1519 case AUTO_TYPE_UNKNOWN:
1520 strlcat(buf, "{uncollectable-memory}", sizeof(buf));
1523 strlcat(buf, "{unknown-memory-type}", sizeof(buf));
1526 if (withRetainCount && refcount > 0) {
1527 strlcat(buf, " [[refcount=", sizeof(buf));
1528 strlcati(buf, refcount, sizeof(buf));
1529 strlcat(buf, "]]", sizeof(buf));
1532 result = malloc_zone_malloc(objc_debug_zone(), 1 + strlen(buf));
1533 strlcpy(result, buf, sizeof(buf));