2 * Copyright (c) 2010-2012 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 #include "objc-private.h"
27 #include "objc-weak.h"
28 #include "llvm-DenseMap.h"
31 #include <malloc/malloc.h>
34 #include <mach/mach.h>
35 #include <mach-o/dyld.h>
36 #include <mach-o/nlist.h>
37 #include <sys/types.h>
39 #include <libkern/OSAtomic.h>
44 @interface NSInvocation
51 // NSObject used to be in Foundation/CoreFoundation.
53 #define SYMBOL_ELSEWHERE_IN_3(sym, vers, n) \
54 OBJC_EXPORT const char elsewhere_ ##n __asm__("$ld$hide$os" #vers "$" #sym); const char elsewhere_ ##n = 0
55 #define SYMBOL_ELSEWHERE_IN_2(sym, vers, n) \
56 SYMBOL_ELSEWHERE_IN_3(sym, vers, n)
57 #define SYMBOL_ELSEWHERE_IN(sym, vers) \
58 SYMBOL_ELSEWHERE_IN_2(sym, vers, __COUNTER__)
61 # define NSOBJECT_ELSEWHERE_IN(vers) \
62 SYMBOL_ELSEWHERE_IN(_OBJC_CLASS_$_NSObject, vers); \
63 SYMBOL_ELSEWHERE_IN(_OBJC_METACLASS_$_NSObject, vers); \
64 SYMBOL_ELSEWHERE_IN(_OBJC_IVAR_$_NSObject.isa, vers)
66 # define NSOBJECT_ELSEWHERE_IN(vers) \
67 SYMBOL_ELSEWHERE_IN(.objc_class_name_NSObject, vers)
71 NSOBJECT_ELSEWHERE_IN(5.1);
72 NSOBJECT_ELSEWHERE_IN(5.0);
73 NSOBJECT_ELSEWHERE_IN(4.3);
74 NSOBJECT_ELSEWHERE_IN(4.2);
75 NSOBJECT_ELSEWHERE_IN(4.1);
76 NSOBJECT_ELSEWHERE_IN(4.0);
77 NSOBJECT_ELSEWHERE_IN(3.2);
78 NSOBJECT_ELSEWHERE_IN(3.1);
79 NSOBJECT_ELSEWHERE_IN(3.0);
80 NSOBJECT_ELSEWHERE_IN(2.2);
81 NSOBJECT_ELSEWHERE_IN(2.1);
82 NSOBJECT_ELSEWHERE_IN(2.0);
84 NSOBJECT_ELSEWHERE_IN(10.7);
85 NSOBJECT_ELSEWHERE_IN(10.6);
86 NSOBJECT_ELSEWHERE_IN(10.5);
87 NSOBJECT_ELSEWHERE_IN(10.4);
88 NSOBJECT_ELSEWHERE_IN(10.3);
89 NSOBJECT_ELSEWHERE_IN(10.2);
90 NSOBJECT_ELSEWHERE_IN(10.1);
91 NSOBJECT_ELSEWHERE_IN(10.0);
98 /***********************************************************************
100 **********************************************************************/
102 static id defaultBadAllocHandler(Class cls)
104 _objc_fatal("attempt to allocate object of class '%s' failed",
105 cls->nameForLogging());
108 static id(*badAllocHandler)(Class) = &defaultBadAllocHandler;
110 static id callBadAllocHandler(Class cls)
112 // fixme add re-entrancy protection in case allocation fails inside handler
113 return (*badAllocHandler)(cls);
116 void _objc_setBadAllocHandler(id(*newHandler)(Class))
118 badAllocHandler = newHandler;
124 #if TARGET_OS_EMBEDDED
125 # define SIDE_TABLE_STRIPE 8
127 # define SIDE_TABLE_STRIPE 64
130 // should be a multiple of cache line size (64)
131 #define SIDE_TABLE_SIZE 128
133 // The order of these bits is important.
134 #define SIDE_TABLE_WEAKLY_REFERENCED (1UL<<0)
135 #define SIDE_TABLE_DEALLOCATING (1UL<<1) // MSB-ward of weak bit
136 #define SIDE_TABLE_RC_ONE (1UL<<2) // MSB-ward of deallocating bit
137 #define SIDE_TABLE_RC_PINNED (1UL<<(WORD_BITS-1))
139 #define SIDE_TABLE_RC_SHIFT 2
140 #define SIDE_TABLE_FLAG_MASK (SIDE_TABLE_RC_ONE-1)
142 // RefcountMap disguises its pointers because we
143 // don't want the table to act as a root for `leaks`.
144 typedef objc::DenseMap<DisguisedPtr<objc_object>,size_t,true> RefcountMap;
148 static uint8_t table_buf[SIDE_TABLE_STRIPE * SIDE_TABLE_SIZE];
153 weak_table_t weak_table;
155 SideTable() : slock(SPINLOCK_INITIALIZER)
157 memset(&weak_table, 0, sizeof(weak_table));
162 // never delete side_table in case other threads retain during exit
166 static SideTable *tableForPointer(const void *p)
168 # if SIDE_TABLE_STRIPE == 1
169 return (SideTable *)table_buf;
171 uintptr_t a = (uintptr_t)p;
172 int index = ((a >> 4) ^ (a >> 9)) & (SIDE_TABLE_STRIPE - 1);
173 return (SideTable *)&table_buf[index * SIDE_TABLE_SIZE];
178 // use placement new instead of static ctor to avoid dtor at exit
179 for (int i = 0; i < SIDE_TABLE_STRIPE; i++) {
180 new (&table_buf[i * SIDE_TABLE_SIZE]) SideTable;
185 STATIC_ASSERT(sizeof(SideTable) <= SIDE_TABLE_SIZE);
186 __attribute__((aligned(SIDE_TABLE_SIZE))) uint8_t
187 SideTable::table_buf[SIDE_TABLE_STRIPE * SIDE_TABLE_SIZE];
189 // anonymous namespace
194 // The -fobjc-arc flag causes the compiler to issue calls to objc_{retain/release/autorelease/retain_block}
197 id objc_retainBlock(id x) {
198 return (id)_Block_copy(x);
202 // The following SHOULD be called by the compiler directly, but the request hasn't been made yet :-)
205 BOOL objc_should_deallocate(id object) {
210 objc_retain_autorelease(id obj)
212 return objc_autorelease(objc_retain(obj));
217 objc_storeStrong(id *location, id obj)
230 * This function stores a new value into a __weak variable. It would
231 * be used anywhere a __weak variable is the target of an assignment.
233 * @param location The address of the weak pointer itself
234 * @param newObj The new object this weak ptr should now point to
239 objc_storeWeak(id *location, id newObj)
245 #if SIDE_TABLE_STRIPE > 1
249 // Acquire locks for old and new values.
250 // Order by lock address to prevent lock ordering problems.
251 // Retry if the old value changes underneath us.
255 oldTable = SideTable::tableForPointer(oldObj);
256 newTable = SideTable::tableForPointer(newObj);
258 lock1 = &newTable->slock;
259 #if SIDE_TABLE_STRIPE > 1
260 lock2 = &oldTable->slock;
262 spinlock_t *temp = lock1;
266 if (lock1 != lock2) spinlock_lock(lock2);
268 spinlock_lock(lock1);
270 if (*location != oldObj) {
271 spinlock_unlock(lock1);
272 #if SIDE_TABLE_STRIPE > 1
273 if (lock1 != lock2) spinlock_unlock(lock2);
278 weak_unregister_no_lock(&oldTable->weak_table, oldObj, location);
279 newObj = weak_register_no_lock(&newTable->weak_table, newObj, location);
280 // weak_register_no_lock returns nil if weak store should be rejected
282 // Set is-weakly-referenced bit in refcount table.
283 if (newObj && !newObj->isTaggedPointer()) {
284 newObj->setWeaklyReferenced_nolock();
287 // Do not set *location anywhere else. That would introduce a race.
290 spinlock_unlock(lock1);
291 #if SIDE_TABLE_STRIPE > 1
292 if (lock1 != lock2) spinlock_unlock(lock2);
299 objc_loadWeakRetained(id *location)
308 if (!result) return nil;
310 table = SideTable::tableForPointer(result);
311 lock = &table->slock;
314 if (*location != result) {
315 spinlock_unlock(lock);
319 result = weak_read_no_lock(&table->weak_table, location);
321 spinlock_unlock(lock);
326 * This loads the object referenced by a weak pointer and returns it, after
327 * retaining and autoreleasing the object to ensure that it stays alive
328 * long enough for the caller to use it. This function would be used
329 * anywhere a __weak variable is used in an expression.
331 * @param location The weak pointer address
333 * @return The object pointed to by \e location, or \c nil if \e location is \c nil.
336 objc_loadWeak(id *location)
338 if (!*location) return nil;
339 return objc_autorelease(objc_loadWeakRetained(location));
343 * Initialize a fresh weak pointer to some object location.
344 * It would be used for code like:
350 * __weak id weakPtr = o;
352 * @param addr Address of __weak ptr.
353 * @param val Object ptr.
356 objc_initWeak(id *addr, id val)
359 if (!val) return nil;
360 return objc_storeWeak(addr, val);
363 __attribute__((noinline, used)) void
364 objc_destroyWeak_slow(id *addr)
370 // No need to see weak refs, we are destroying
372 // Acquire lock for old value only
373 // retry if the old value changes underneath us
376 oldTable = SideTable::tableForPointer(oldObj);
378 lock = &oldTable->slock;
381 if (*addr != oldObj) {
382 spinlock_unlock(lock);
386 weak_unregister_no_lock(&oldTable->weak_table, oldObj, addr);
388 spinlock_unlock(lock);
392 * Destroys the relationship between a weak pointer
393 * and the object it is referencing in the internal weak
394 * table. If the weak pointer is not referencing anything,
395 * there is no need to edit the weak table.
397 * @param addr The weak pointer address.
400 objc_destroyWeak(id *addr)
403 return objc_destroyWeak_slow(addr);
407 * This function copies a weak pointer from one location to another,
408 * when the destination doesn't already contain a weak pointer. It
409 * would be used for code like:
411 * __weak id weakPtr1 = ...;
412 * __weak id weakPtr2 = weakPtr1;
414 * @param to weakPtr2 in this ex
415 * @param from weakPtr1
418 objc_copyWeak(id *to, id *from)
420 id val = objc_loadWeakRetained(from);
421 objc_initWeak(to, val);
426 * Move a weak pointer from one location to another.
427 * Before the move, the destination must be uninitialized.
428 * After the move, the source is nil.
431 objc_moveWeak(id *to, id *from)
433 objc_copyWeak(to, from);
434 objc_storeWeak(from, 0);
438 /***********************************************************************
439 Autorelease pool implementation
441 A thread's autorelease pool is a stack of pointers.
442 Each pointer is either an object to release, or POOL_SENTINEL which is
443 an autorelease pool boundary.
444 A pool token is a pointer to the POOL_SENTINEL for that pool. When
445 the pool is popped, every object hotter than the sentinel is released.
446 The stack is divided into a doubly-linked list of pages. Pages are added
447 and deleted as necessary.
448 Thread-local storage points to the hot page, where newly autoreleased
450 **********************************************************************/
452 BREAKPOINT_FUNCTION(void objc_autoreleaseNoPool(id obj));
457 static const uint32_t M0 = 0xA1A1A1A1;
458 # define M1 "AUTORELEASE!"
459 static const size_t M1_len = 12;
463 assert(M1_len == strlen(M1));
464 assert(M1_len == 3 * sizeof(m[1]));
467 strncpy((char *)&m[1], M1, M1_len);
471 m[0] = m[1] = m[2] = m[3] = 0;
475 return (m[0] == M0 && 0 == strncmp((char *)&m[1], M1, M1_len));
478 bool fastcheck() const {
490 // Set this to 1 to mprotect() autorelease pool contents
491 #define PROTECT_AUTORELEASEPOOL 0
493 class AutoreleasePoolPage
496 #define POOL_SENTINEL nil
497 static pthread_key_t const key = AUTORELEASE_POOL_KEY;
498 static uint8_t const SCRIBBLE = 0xA3; // 0xA3A3A3A3 after releasing
499 static size_t const SIZE =
500 #if PROTECT_AUTORELEASEPOOL
501 PAGE_MAX_SIZE; // must be multiple of vm page size
503 PAGE_MAX_SIZE; // size and alignment, power of 2
505 static size_t const COUNT = SIZE / sizeof(id);
509 pthread_t const thread;
510 AutoreleasePoolPage * const parent;
511 AutoreleasePoolPage *child;
512 uint32_t const depth;
515 // SIZE-sizeof(*this) bytes of contents follow
517 static void * operator new(size_t size) {
518 return malloc_zone_memalign(malloc_default_zone(), SIZE, SIZE);
520 static void operator delete(void * p) {
524 inline void protect() {
525 #if PROTECT_AUTORELEASEPOOL
526 mprotect(this, SIZE, PROT_READ);
531 inline void unprotect() {
532 #if PROTECT_AUTORELEASEPOOL
534 mprotect(this, SIZE, PROT_READ | PROT_WRITE);
538 AutoreleasePoolPage(AutoreleasePoolPage *newParent)
539 : magic(), next(begin()), thread(pthread_self()),
540 parent(newParent), child(nil),
541 depth(parent ? 1+parent->depth : 0),
542 hiwat(parent ? parent->hiwat : 0)
546 assert(!parent->child);
548 parent->child = this;
554 ~AutoreleasePoolPage()
560 // Not recursive: we don't want to blow out the stack
561 // if a thread accumulates a stupendous amount of garbage
566 void busted(bool die = true)
569 (die ? _objc_fatal : _objc_inform)
570 ("autorelease pool page %p corrupted\n"
571 " magic 0x%08x 0x%08x 0x%08x 0x%08x\n"
572 " should be 0x%08x 0x%08x 0x%08x 0x%08x\n"
576 magic.m[0], magic.m[1], magic.m[2], magic.m[3],
577 right.m[0], right.m[1], right.m[2], right.m[3],
578 this->thread, pthread_self());
581 void check(bool die = true)
583 if (!magic.check() || !pthread_equal(thread, pthread_self())) {
588 void fastcheck(bool die = true)
590 if (! magic.fastcheck()) {
597 return (id *) ((uint8_t *)this+sizeof(*this));
601 return (id *) ((uint8_t *)this+SIZE);
605 return next == begin();
609 return next == end();
612 bool lessThanHalfFull() {
613 return (next - begin() < (end() - begin()) / 2);
620 id *ret = next; // faster than `return next-1` because of aliasing
628 releaseUntil(begin());
631 void releaseUntil(id *stop)
633 // Not recursive: we don't want to blow out the stack
634 // if a thread accumulates a stupendous amount of garbage
636 while (this->next != stop) {
637 // Restart from hotPage() every time, in case -release
638 // autoreleased more objects
639 AutoreleasePoolPage *page = hotPage();
641 // fixme I think this `while` can be `if`, but I can't prove it
642 while (page->empty()) {
648 id obj = *--page->next;
649 memset((void*)page->next, SCRIBBLE, sizeof(*page->next));
652 if (obj != POOL_SENTINEL) {
660 // we expect any children to be completely empty
661 for (AutoreleasePoolPage *page = child; page; page = page->child) {
662 assert(page->empty());
669 // Not recursive: we don't want to blow out the stack
670 // if a thread accumulates a stupendous amount of garbage
671 AutoreleasePoolPage *page = this;
672 while (page->child) page = page->child;
674 AutoreleasePoolPage *deathptr;
684 } while (deathptr != this);
687 static void tls_dealloc(void *p)
689 // reinstate TLS value while we work
690 setHotPage((AutoreleasePoolPage *)p);
695 static AutoreleasePoolPage *pageForPointer(const void *p)
697 return pageForPointer((uintptr_t)p);
700 static AutoreleasePoolPage *pageForPointer(uintptr_t p)
702 AutoreleasePoolPage *result;
703 uintptr_t offset = p % SIZE;
705 assert(offset >= sizeof(AutoreleasePoolPage));
707 result = (AutoreleasePoolPage *)(p - offset);
714 static inline AutoreleasePoolPage *hotPage()
716 AutoreleasePoolPage *result = (AutoreleasePoolPage *)
718 if (result) result->fastcheck();
722 static inline void setHotPage(AutoreleasePoolPage *page)
724 if (page) page->fastcheck();
725 tls_set_direct(key, (void *)page);
728 static inline AutoreleasePoolPage *coldPage()
730 AutoreleasePoolPage *result = hotPage();
732 while (result->parent) {
733 result = result->parent;
741 static inline id *autoreleaseFast(id obj)
743 AutoreleasePoolPage *page = hotPage();
744 if (page && !page->full()) {
745 return page->add(obj);
747 return autoreleaseFullPage(obj, page);
749 return autoreleaseNoPage(obj);
753 static __attribute__((noinline))
754 id *autoreleaseFullPage(id obj, AutoreleasePoolPage *page)
756 // The hot page is full.
757 // Step to the next non-full page, adding a new page if necessary.
758 // Then add the object to that page.
759 assert(page == hotPage() && page->full());
762 if (page->child) page = page->child;
763 else page = new AutoreleasePoolPage(page);
764 } while (page->full());
767 return page->add(obj);
770 static __attribute__((noinline))
771 id *autoreleaseNoPage(id obj)
776 if (obj != POOL_SENTINEL && DebugMissingPools) {
777 // We are pushing an object with no pool in place,
778 // and no-pool debugging was requested by environment.
779 _objc_inform("MISSING POOLS: Object %p of class %s "
780 "autoreleased with no pool in place - "
781 "just leaking - break on "
782 "objc_autoreleaseNoPool() to debug",
783 (void*)obj, object_getClassName(obj));
784 objc_autoreleaseNoPool(obj);
788 // Install the first page.
789 AutoreleasePoolPage *page = new AutoreleasePoolPage(nil);
792 // Push an autorelease pool boundary if it wasn't already requested.
793 if (obj != POOL_SENTINEL) {
794 page->add(POOL_SENTINEL);
797 // Push the requested object.
798 return page->add(obj);
802 static inline id autorelease(id obj)
805 assert(!obj->isTaggedPointer());
806 id *dest __unused = autoreleaseFast(obj);
807 assert(!dest || *dest == obj);
812 static inline void *push()
814 id *dest = autoreleaseFast(POOL_SENTINEL);
815 assert(*dest == POOL_SENTINEL);
819 static inline void pop(void *token)
821 AutoreleasePoolPage *page;
825 page = pageForPointer(token);
827 assert(*stop == POOL_SENTINEL);
829 // Token 0 is top-level pool
832 stop = page->begin();
835 if (PrintPoolHiwat) printHiwat();
837 page->releaseUntil(stop);
839 // memory: delete empty children
840 // hysteresis: keep one empty child if this page is more than half full
841 // special case: delete everything for pop(0)
842 // special case: delete everything for pop(top) with DebugMissingPools
844 (DebugMissingPools && page->empty() && !page->parent))
848 } else if (page->child) {
849 if (page->lessThanHalfFull()) {
852 else if (page->child->child) {
853 page->child->child->kill();
860 int r __unused = pthread_key_init_np(AutoreleasePoolPage::key,
861 AutoreleasePoolPage::tls_dealloc);
867 _objc_inform("[%p] ................ PAGE %s %s %s", this,
868 full() ? "(full)" : "",
869 this == hotPage() ? "(hot)" : "",
870 this == coldPage() ? "(cold)" : "");
872 for (id *p = begin(); p < next; p++) {
873 if (*p == POOL_SENTINEL) {
874 _objc_inform("[%p] ################ POOL %p", p, p);
876 _objc_inform("[%p] %#16lx %s",
877 p, (unsigned long)*p, object_getClassName(*p));
882 static void printAll()
884 _objc_inform("##############");
885 _objc_inform("AUTORELEASE POOLS for thread %p", pthread_self());
887 AutoreleasePoolPage *page;
888 ptrdiff_t objects = 0;
889 for (page = coldPage(); page; page = page->child) {
890 objects += page->next - page->begin();
892 _objc_inform("%llu releases pending.", (unsigned long long)objects);
894 for (page = coldPage(); page; page = page->child) {
898 _objc_inform("##############");
901 static void printHiwat()
903 // Check and propagate high water mark
904 // Ignore high water marks under 256 to suppress noise.
905 AutoreleasePoolPage *p = hotPage();
906 uint32_t mark = p->depth*COUNT + (uint32_t)(p->next - p->begin());
907 if (mark > p->hiwat && mark > 256) {
908 for( ; p; p = p->parent) {
914 _objc_inform("POOL HIGHWATER: new high water mark of %u "
915 "pending autoreleases for thread %p:",
916 mark, pthread_self());
919 int count = backtrace(stack, sizeof(stack)/sizeof(stack[0]));
920 char **sym = backtrace_symbols(stack, count);
921 for (int i = 0; i < count; i++) {
922 _objc_inform("POOL HIGHWATER: %s", sym[i]);
931 // anonymous namespace
935 /***********************************************************************
936 * Slow paths for inline control
937 **********************************************************************/
939 #if SUPPORT_NONPOINTER_ISA
942 objc_object::rootRetain_overflow(bool tryRetain)
944 return rootRetain(tryRetain, true);
949 objc_object::rootRelease_underflow(bool performDealloc)
951 return rootRelease(performDealloc, true);
955 // Slow path of clearDeallocating()
956 // for weakly-referenced objects with indexed isa
958 objc_object::clearDeallocating_weak()
960 assert(isa.indexed && isa.weakly_referenced);
962 SideTable *table = SideTable::tableForPointer(this);
963 spinlock_lock(&table->slock);
964 weak_clear_no_lock(&table->weak_table, (id)this);
965 spinlock_unlock(&table->slock);
970 __attribute__((noinline,used))
972 objc_object::rootAutorelease2()
974 assert(!isTaggedPointer());
975 return AutoreleasePoolPage::autorelease((id)this);
980 void objc_overrelease_during_dealloc_error(void)
986 objc_object::overrelease_error()
988 _objc_inform_now_and_on_crash("%s object %p overreleased while already deallocating; break on objc_overrelease_during_dealloc_error to debug", object_getClassName((id)this), this);
989 objc_overrelease_during_dealloc_error();
990 return false; // allow rootRelease() to tail-call this
994 /***********************************************************************
995 * Retain count operations for side table.
996 **********************************************************************/
1000 // Used to assert that an object is not present in the side table.
1002 objc_object::sidetable_present()
1004 bool result = false;
1005 SideTable *table = SideTable::tableForPointer(this);
1007 spinlock_lock(&table->slock);
1009 RefcountMap::iterator it = table->refcnts.find(this);
1010 if (it != table->refcnts.end()) result = true;
1012 if (weak_is_registered_no_lock(&table->weak_table, (id)this)) result = true;
1014 spinlock_unlock(&table->slock);
1020 #if SUPPORT_NONPOINTER_ISA
1023 objc_object::sidetable_lock()
1025 SideTable *table = SideTable::tableForPointer(this);
1026 spinlock_lock(&table->slock);
1030 objc_object::sidetable_unlock()
1032 SideTable *table = SideTable::tableForPointer(this);
1033 spinlock_unlock(&table->slock);
1037 // Move the entire retain count to the side table,
1038 // as well as isDeallocating and weaklyReferenced.
1040 objc_object::sidetable_moveExtraRC_nolock(size_t extra_rc,
1041 bool isDeallocating,
1042 bool weaklyReferenced)
1044 assert(!isa.indexed); // should already be changed to not-indexed
1045 SideTable *table = SideTable::tableForPointer(this);
1047 size_t& refcntStorage = table->refcnts[this];
1048 size_t oldRefcnt = refcntStorage;
1049 // not deallocating - that was in the isa
1050 assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1051 assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1054 size_t refcnt = addc(oldRefcnt, extra_rc<<SIDE_TABLE_RC_SHIFT, 0, &carry);
1055 if (carry) refcnt = SIDE_TABLE_RC_PINNED;
1056 if (isDeallocating) refcnt |= SIDE_TABLE_DEALLOCATING;
1057 if (weaklyReferenced) refcnt |= SIDE_TABLE_WEAKLY_REFERENCED;
1059 refcntStorage = refcnt;
1063 // Move some retain counts to the side table from the isa field.
1064 // Returns true if the object is now pinned.
1066 objc_object::sidetable_addExtraRC_nolock(size_t delta_rc)
1068 assert(isa.indexed);
1069 SideTable *table = SideTable::tableForPointer(this);
1071 size_t& refcntStorage = table->refcnts[this];
1072 size_t oldRefcnt = refcntStorage;
1073 // not deallocating - that is in the isa
1074 assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1075 assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1077 if (oldRefcnt & SIDE_TABLE_RC_PINNED) return true;
1081 addc(oldRefcnt, delta_rc << SIDE_TABLE_RC_SHIFT, 0, &carry);
1084 SIDE_TABLE_RC_PINNED | (oldRefcnt & SIDE_TABLE_FLAG_MASK);
1088 refcntStorage = newRefcnt;
1094 // Move some retain counts from the side table to the isa field.
1095 // Returns true if the sidetable retain count is now 0.
1097 objc_object::sidetable_subExtraRC_nolock(size_t delta_rc)
1099 assert(isa.indexed);
1100 SideTable *table = SideTable::tableForPointer(this);
1102 size_t& refcntStorage = table->refcnts[this];
1103 size_t oldRefcnt = refcntStorage;
1104 // not deallocating - that is in the isa
1105 assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1106 assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1108 if (oldRefcnt < delta_rc) {
1109 _objc_inform_now_and_on_crash("refcount underflow error for object %p",
1111 _objc_fatal("refcount underflow error for %s %p",
1112 object_getClassName((id)this), this);
1115 size_t newRefcnt = oldRefcnt - (delta_rc << SIDE_TABLE_RC_SHIFT);
1116 if (newRefcnt == 0) {
1117 table->refcnts.erase(this);
1121 refcntStorage = newRefcnt;
1128 objc_object::sidetable_getExtraRC_nolock()
1130 assert(isa.indexed);
1131 SideTable *table = SideTable::tableForPointer(this);
1132 RefcountMap::iterator it = table->refcnts.find(this);
1133 assert(it != table->refcnts.end());
1134 return it->second >> SIDE_TABLE_RC_SHIFT;
1138 // SUPPORT_NONPOINTER_ISA
1142 __attribute__((used,noinline,nothrow))
1144 objc_object::sidetable_retain_slow(SideTable *table)
1146 #if SUPPORT_NONPOINTER_ISA
1147 assert(!isa.indexed);
1150 spinlock_lock(&table->slock);
1151 size_t& refcntStorage = table->refcnts[this];
1152 if (! (refcntStorage & SIDE_TABLE_RC_PINNED)) {
1153 refcntStorage += SIDE_TABLE_RC_ONE;
1155 spinlock_unlock(&table->slock);
1162 objc_object::sidetable_retain()
1164 #if SUPPORT_NONPOINTER_ISA
1165 assert(!isa.indexed);
1167 SideTable *table = SideTable::tableForPointer(this);
1169 if (spinlock_trylock(&table->slock)) {
1170 size_t& refcntStorage = table->refcnts[this];
1171 if (! (refcntStorage & SIDE_TABLE_RC_PINNED)) {
1172 refcntStorage += SIDE_TABLE_RC_ONE;
1174 spinlock_unlock(&table->slock);
1177 return sidetable_retain_slow(table);
1182 objc_object::sidetable_tryRetain()
1184 #if SUPPORT_NONPOINTER_ISA
1185 assert(!isa.indexed);
1187 SideTable *table = SideTable::tableForPointer(this);
1190 // _objc_rootTryRetain() is called exclusively by _objc_loadWeak(),
1191 // which already acquired the lock on our behalf.
1193 // fixme can't do this efficiently with os_lock_handoff_s
1194 // if (table->slock == 0) {
1195 // _objc_fatal("Do not call -_tryRetain.");
1199 RefcountMap::iterator it = table->refcnts.find(this);
1200 if (it == table->refcnts.end()) {
1201 table->refcnts[this] = SIDE_TABLE_RC_ONE;
1202 } else if (it->second & SIDE_TABLE_DEALLOCATING) {
1204 } else if (! (it->second & SIDE_TABLE_RC_PINNED)) {
1205 it->second += SIDE_TABLE_RC_ONE;
1213 objc_object::sidetable_retainCount()
1215 SideTable *table = SideTable::tableForPointer(this);
1217 size_t refcnt_result = 1;
1219 spinlock_lock(&table->slock);
1220 RefcountMap::iterator it = table->refcnts.find(this);
1221 if (it != table->refcnts.end()) {
1222 // this is valid for SIDE_TABLE_RC_PINNED too
1223 refcnt_result += it->second >> SIDE_TABLE_RC_SHIFT;
1225 spinlock_unlock(&table->slock);
1226 return refcnt_result;
1231 objc_object::sidetable_isDeallocating()
1233 SideTable *table = SideTable::tableForPointer(this);
1236 // _objc_rootIsDeallocating() is called exclusively by _objc_storeWeak(),
1237 // which already acquired the lock on our behalf.
1240 // fixme can't do this efficiently with os_lock_handoff_s
1241 // if (table->slock == 0) {
1242 // _objc_fatal("Do not call -_isDeallocating.");
1245 RefcountMap::iterator it = table->refcnts.find(this);
1246 return (it != table->refcnts.end()) && (it->second & SIDE_TABLE_DEALLOCATING);
1251 objc_object::sidetable_isWeaklyReferenced()
1253 bool result = false;
1255 SideTable *table = SideTable::tableForPointer(this);
1256 spinlock_lock(&table->slock);
1258 RefcountMap::iterator it = table->refcnts.find(this);
1259 if (it != table->refcnts.end()) {
1260 result = it->second & SIDE_TABLE_WEAKLY_REFERENCED;
1263 spinlock_unlock(&table->slock);
1270 objc_object::sidetable_setWeaklyReferenced_nolock()
1272 #if SUPPORT_NONPOINTER_ISA
1273 assert(!isa.indexed);
1276 SideTable *table = SideTable::tableForPointer(this);
1278 table->refcnts[this] |= SIDE_TABLE_WEAKLY_REFERENCED;
1282 __attribute__((used,noinline,nothrow))
1284 objc_object::sidetable_release_slow(SideTable *table, bool performDealloc)
1286 #if SUPPORT_NONPOINTER_ISA
1287 assert(!isa.indexed);
1289 bool do_dealloc = false;
1291 spinlock_lock(&table->slock);
1292 RefcountMap::iterator it = table->refcnts.find(this);
1293 if (it == table->refcnts.end()) {
1295 table->refcnts[this] = SIDE_TABLE_DEALLOCATING;
1296 } else if (it->second < SIDE_TABLE_DEALLOCATING) {
1297 // SIDE_TABLE_WEAKLY_REFERENCED may be set. Don't change it.
1299 it->second |= SIDE_TABLE_DEALLOCATING;
1300 } else if (! (it->second & SIDE_TABLE_RC_PINNED)) {
1301 it->second -= SIDE_TABLE_RC_ONE;
1303 spinlock_unlock(&table->slock);
1304 if (do_dealloc && performDealloc) {
1305 ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_dealloc);
1312 objc_object::sidetable_release(bool performDealloc)
1314 #if SUPPORT_NONPOINTER_ISA
1315 assert(!isa.indexed);
1317 SideTable *table = SideTable::tableForPointer(this);
1319 bool do_dealloc = false;
1321 if (spinlock_trylock(&table->slock)) {
1322 RefcountMap::iterator it = table->refcnts.find(this);
1323 if (it == table->refcnts.end()) {
1325 table->refcnts[this] = SIDE_TABLE_DEALLOCATING;
1326 } else if (it->second < SIDE_TABLE_DEALLOCATING) {
1327 // SIDE_TABLE_WEAKLY_REFERENCED may be set. Don't change it.
1329 it->second |= SIDE_TABLE_DEALLOCATING;
1330 } else if (! (it->second & SIDE_TABLE_RC_PINNED)) {
1331 it->second -= SIDE_TABLE_RC_ONE;
1333 spinlock_unlock(&table->slock);
1334 if (do_dealloc && performDealloc) {
1335 ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_dealloc);
1340 return sidetable_release_slow(table, performDealloc);
1345 objc_object::sidetable_clearDeallocating()
1347 SideTable *table = SideTable::tableForPointer(this);
1349 // clear any weak table items
1350 // clear extra retain count and deallocating bit
1351 // (fixme warn or abort if extra retain count == 0 ?)
1352 spinlock_lock(&table->slock);
1353 RefcountMap::iterator it = table->refcnts.find(this);
1354 if (it != table->refcnts.end()) {
1355 if (it->second & SIDE_TABLE_WEAKLY_REFERENCED) {
1356 weak_clear_no_lock(&table->weak_table, (id)this);
1358 table->refcnts.erase(it);
1360 spinlock_unlock(&table->slock);
1364 /***********************************************************************
1365 * Optimized retain/release/autorelease entrypoints
1366 **********************************************************************/
1371 __attribute__((aligned(16)))
1375 if (!obj) return obj;
1376 if (obj->isTaggedPointer()) return obj;
1377 return obj->retain();
1381 __attribute__((aligned(16)))
1383 objc_release(id obj)
1386 if (obj->isTaggedPointer()) return;
1387 return obj->release();
1391 __attribute__((aligned(16)))
1393 objc_autorelease(id obj)
1395 if (!obj) return obj;
1396 if (obj->isTaggedPointer()) return obj;
1397 return obj->autorelease();
1406 id objc_retain(id obj) { return [obj retain]; }
1407 void objc_release(id obj) { [obj release]; }
1408 id objc_autorelease(id obj) { return [obj autorelease]; }
1414 /***********************************************************************
1415 * Basic operations for root class implementations a.k.a. _objc_root*()
1416 **********************************************************************/
1419 _objc_rootTryRetain(id obj)
1423 return obj->rootTryRetain();
1427 _objc_rootIsDeallocating(id obj)
1431 return obj->rootIsDeallocating();
1436 objc_clear_deallocating(id obj)
1441 if (obj->isTaggedPointer()) return;
1442 obj->clearDeallocating();
1447 _objc_rootReleaseWasZero(id obj)
1451 return obj->rootReleaseShouldDealloc();
1456 _objc_rootAutorelease(id obj)
1460 if (UseGC) return obj; // fixme CF calls this when GC is on
1462 return obj->rootAutorelease();
1466 _objc_rootRetainCount(id obj)
1470 return obj->rootRetainCount();
1475 _objc_rootRetain(id obj)
1479 return obj->rootRetain();
1483 _objc_rootRelease(id obj)
1492 _objc_rootAllocWithZone(Class cls, malloc_zone_t *zone)
1497 // allocWithZone under __OBJC2__ ignores the zone parameter
1499 obj = class_createInstance(cls, 0);
1501 if (!zone || UseGC) {
1502 obj = class_createInstance(cls, 0);
1505 obj = class_createInstanceFromZone(cls, 0, zone);
1509 if (!obj) obj = callBadAllocHandler(cls);
1514 // Call [cls alloc] or [cls allocWithZone:nil], with appropriate
1515 // shortcutting optimizations.
1516 static ALWAYS_INLINE id
1517 callAlloc(Class cls, bool checkNil, bool allocWithZone=false)
1519 if (checkNil && !cls) return nil;
1522 if (! cls->ISA()->hasCustomAWZ()) {
1523 // No alloc/allocWithZone implementation. Go straight to the allocator.
1524 // fixme store hasCustomAWZ in the non-meta class and
1525 // add it to canAllocFast's summary
1526 if (cls->canAllocFast()) {
1527 // No ctors, raw isa, etc. Go straight to the metal.
1528 bool dtor = cls->hasCxxDtor();
1529 id obj = (id)calloc(1, cls->bits.fastInstanceSize());
1530 if (!obj) return callBadAllocHandler(cls);
1531 obj->initInstanceIsa(cls, dtor);
1535 // Has ctor or raw isa or something. Use the slower path.
1536 id obj = class_createInstance(cls, 0);
1537 if (!obj) return callBadAllocHandler(cls);
1543 // No shortcuts available.
1544 if (allocWithZone) return [cls allocWithZone:nil];
1549 // Base class implementation of +alloc. cls is not nil.
1550 // Calls [cls allocWithZone:nil].
1552 _objc_rootAlloc(Class cls)
1554 return callAlloc(cls, false/*checkNil*/, true/*allocWithZone*/);
1557 // Calls [cls alloc].
1559 objc_alloc(Class cls)
1561 return callAlloc(cls, true/*checkNil*/, false/*allocWithZone*/);
1564 // Calls [cls allocWithZone:nil].
1566 objc_allocWithZone(Class cls)
1568 return callAlloc(cls, true/*checkNil*/, true/*allocWithZone*/);
1573 _objc_rootDealloc(id obj)
1581 _objc_rootFinalize(id obj __unused)
1589 _objc_fatal("_objc_rootFinalize called with garbage collection off");
1594 _objc_rootInit(id obj)
1596 // In practice, it will be hard to rely on this function.
1597 // Many classes do not properly chain -init calls.
1603 _objc_rootZone(id obj)
1610 // allocWithZone under __OBJC2__ ignores the zone parameter
1611 return malloc_default_zone();
1613 malloc_zone_t *rval = malloc_zone_from_ptr(obj);
1614 return rval ? rval : malloc_default_zone();
1619 _objc_rootHash(id obj)
1622 return _object_getExternalHash(obj);
1624 return (uintptr_t)obj;
1628 objc_autoreleasePoolPush(void)
1630 if (UseGC) return nil;
1631 return AutoreleasePoolPage::push();
1635 objc_autoreleasePoolPop(void *ctxt)
1639 // fixme rdar://9167170
1642 AutoreleasePoolPage::pop(ctxt);
1647 _objc_autoreleasePoolPush(void)
1649 return objc_autoreleasePoolPush();
1653 _objc_autoreleasePoolPop(void *ctxt)
1655 objc_autoreleasePoolPop(ctxt);
1659 _objc_autoreleasePoolPrint(void)
1662 AutoreleasePoolPage::printAll();
1666 objc_autoreleaseReturnValue(id obj)
1668 if (fastAutoreleaseForReturn(obj)) return obj;
1670 return objc_autorelease(obj);
1674 objc_retainAutoreleaseReturnValue(id obj)
1676 return objc_autoreleaseReturnValue(objc_retain(obj));
1680 objc_retainAutoreleasedReturnValue(id obj)
1682 if (fastRetainFromReturn(obj)) return obj;
1684 return objc_retain(obj);
1688 objc_retainAutorelease(id obj)
1690 return objc_autorelease(objc_retain(obj));
1694 _objc_deallocOnMainThreadHelper(void *context)
1696 id obj = (id)context;
1700 #undef objc_retainedObject
1701 #undef objc_unretainedObject
1702 #undef objc_unretainedPointer
1704 // convert objc_objectptr_t to id, callee must take ownership.
1705 id objc_retainedObject(objc_objectptr_t pointer) { return (id)pointer; }
1707 // convert objc_objectptr_t to id, without ownership transfer.
1708 id objc_unretainedObject(objc_objectptr_t pointer) { return (id)pointer; }
1710 // convert id to objc_objectptr_t, no ownership transfer.
1711 objc_objectptr_t objc_unretainedPointer(id object) { return object; }
1716 AutoreleasePoolPage::init();
1720 @implementation NSObject
1723 if (UseGC) gc_init2();
1726 + (void)initialize {
1742 return object_getClass(self);
1745 + (Class)superclass {
1746 return self->superclass;
1749 - (Class)superclass {
1750 return [self class]->superclass;
1753 + (BOOL)isMemberOfClass:(Class)cls {
1754 return object_getClass((id)self) == cls;
1757 - (BOOL)isMemberOfClass:(Class)cls {
1758 return [self class] == cls;
1761 + (BOOL)isKindOfClass:(Class)cls {
1762 for (Class tcls = object_getClass((id)self); tcls; tcls = tcls->superclass) {
1763 if (tcls == cls) return YES;
1768 - (BOOL)isKindOfClass:(Class)cls {
1769 for (Class tcls = [self class]; tcls; tcls = tcls->superclass) {
1770 if (tcls == cls) return YES;
1775 + (BOOL)isSubclassOfClass:(Class)cls {
1776 for (Class tcls = self; tcls; tcls = tcls->superclass) {
1777 if (tcls == cls) return YES;
1782 + (BOOL)isAncestorOfObject:(NSObject *)obj {
1783 for (Class tcls = [obj class]; tcls; tcls = tcls->superclass) {
1784 if (tcls == self) return YES;
1789 + (BOOL)instancesRespondToSelector:(SEL)sel {
1790 if (!sel) return NO;
1791 return class_respondsToSelector(self, sel);
1794 + (BOOL)respondsToSelector:(SEL)sel {
1795 if (!sel) return NO;
1796 return class_respondsToSelector_inst(object_getClass(self), sel, self);
1799 - (BOOL)respondsToSelector:(SEL)sel {
1800 if (!sel) return NO;
1801 return class_respondsToSelector_inst([self class], sel, self);
1804 + (BOOL)conformsToProtocol:(Protocol *)protocol {
1805 if (!protocol) return NO;
1806 for (Class tcls = self; tcls; tcls = tcls->superclass) {
1807 if (class_conformsToProtocol(tcls, protocol)) return YES;
1812 - (BOOL)conformsToProtocol:(Protocol *)protocol {
1813 if (!protocol) return NO;
1814 for (Class tcls = [self class]; tcls; tcls = tcls->superclass) {
1815 if (class_conformsToProtocol(tcls, protocol)) return YES;
1820 + (NSUInteger)hash {
1821 return _objc_rootHash(self);
1824 - (NSUInteger)hash {
1825 return _objc_rootHash(self);
1828 + (BOOL)isEqual:(id)obj {
1829 return obj == (id)self;
1832 - (BOOL)isEqual:(id)obj {
1854 + (IMP)instanceMethodForSelector:(SEL)sel {
1855 if (!sel) [self doesNotRecognizeSelector:sel];
1856 return class_getMethodImplementation(self, sel);
1859 + (IMP)methodForSelector:(SEL)sel {
1860 if (!sel) [self doesNotRecognizeSelector:sel];
1861 return object_getMethodImplementation((id)self, sel);
1864 - (IMP)methodForSelector:(SEL)sel {
1865 if (!sel) [self doesNotRecognizeSelector:sel];
1866 return object_getMethodImplementation(self, sel);
1869 + (BOOL)resolveClassMethod:(SEL)sel {
1873 + (BOOL)resolveInstanceMethod:(SEL)sel {
1877 // Replaced by CF (throws an NSException)
1878 + (void)doesNotRecognizeSelector:(SEL)sel {
1879 _objc_fatal("+[%s %s]: unrecognized selector sent to instance %p",
1880 class_getName(self), sel_getName(sel), self);
1883 // Replaced by CF (throws an NSException)
1884 - (void)doesNotRecognizeSelector:(SEL)sel {
1885 _objc_fatal("-[%s %s]: unrecognized selector sent to instance %p",
1886 object_getClassName(self), sel_getName(sel), self);
1890 + (id)performSelector:(SEL)sel {
1891 if (!sel) [self doesNotRecognizeSelector:sel];
1892 return ((id(*)(id, SEL))objc_msgSend)((id)self, sel);
1895 + (id)performSelector:(SEL)sel withObject:(id)obj {
1896 if (!sel) [self doesNotRecognizeSelector:sel];
1897 return ((id(*)(id, SEL, id))objc_msgSend)((id)self, sel, obj);
1900 + (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 {
1901 if (!sel) [self doesNotRecognizeSelector:sel];
1902 return ((id(*)(id, SEL, id, id))objc_msgSend)((id)self, sel, obj1, obj2);
1905 - (id)performSelector:(SEL)sel {
1906 if (!sel) [self doesNotRecognizeSelector:sel];
1907 return ((id(*)(id, SEL))objc_msgSend)(self, sel);
1910 - (id)performSelector:(SEL)sel withObject:(id)obj {
1911 if (!sel) [self doesNotRecognizeSelector:sel];
1912 return ((id(*)(id, SEL, id))objc_msgSend)(self, sel, obj);
1915 - (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 {
1916 if (!sel) [self doesNotRecognizeSelector:sel];
1917 return ((id(*)(id, SEL, id, id))objc_msgSend)(self, sel, obj1, obj2);
1921 // Replaced by CF (returns an NSMethodSignature)
1922 + (NSMethodSignature *)instanceMethodSignatureForSelector:(SEL)sel {
1923 _objc_fatal("+[NSObject instanceMethodSignatureForSelector:] "
1924 "not available without CoreFoundation");
1927 // Replaced by CF (returns an NSMethodSignature)
1928 + (NSMethodSignature *)methodSignatureForSelector:(SEL)sel {
1929 _objc_fatal("+[NSObject methodSignatureForSelector:] "
1930 "not available without CoreFoundation");
1933 // Replaced by CF (returns an NSMethodSignature)
1934 - (NSMethodSignature *)methodSignatureForSelector:(SEL)sel {
1935 _objc_fatal("-[NSObject methodSignatureForSelector:] "
1936 "not available without CoreFoundation");
1939 + (void)forwardInvocation:(NSInvocation *)invocation {
1940 [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)];
1943 - (void)forwardInvocation:(NSInvocation *)invocation {
1944 [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)];
1947 + (id)forwardingTargetForSelector:(SEL)sel {
1951 - (id)forwardingTargetForSelector:(SEL)sel {
1956 // Replaced by CF (returns an NSString)
1957 + (NSString *)description {
1961 // Replaced by CF (returns an NSString)
1962 - (NSString *)description {
1966 + (NSString *)debugDescription {
1967 return [self description];
1970 - (NSString *)debugDescription {
1971 return [self description];
1976 return [callAlloc(self, false/*checkNil*/) init];
1983 // Replaced by ObjectAlloc
1985 return ((id)self)->rootRetain();
1989 + (BOOL)_tryRetain {
1993 // Replaced by ObjectAlloc
1994 - (BOOL)_tryRetain {
1995 return ((id)self)->rootTryRetain();
1998 + (BOOL)_isDeallocating {
2002 - (BOOL)_isDeallocating {
2003 return ((id)self)->rootIsDeallocating();
2006 + (BOOL)allowsWeakReference {
2010 + (BOOL)retainWeakReference {
2014 - (BOOL)allowsWeakReference {
2015 return ! [self _isDeallocating];
2018 - (BOOL)retainWeakReference {
2019 return [self _tryRetain];
2022 + (oneway void)release {
2025 // Replaced by ObjectAlloc
2026 - (oneway void)release {
2027 ((id)self)->rootRelease();
2034 // Replaced by ObjectAlloc
2036 return ((id)self)->rootAutorelease();
2039 + (NSUInteger)retainCount {
2043 - (NSUInteger)retainCount {
2044 return ((id)self)->rootRetainCount();
2048 return _objc_rootAlloc(self);
2051 // Replaced by ObjectAlloc
2052 + (id)allocWithZone:(struct _NSZone *)zone {
2053 return _objc_rootAllocWithZone(self, (malloc_zone_t *)zone);
2056 // Replaced by CF (throws an NSException)
2062 return _objc_rootInit(self);
2065 // Replaced by CF (throws an NSException)
2070 // Replaced by NSZombies
2072 _objc_rootDealloc(self);
2075 // Replaced by CF (throws an NSException)
2080 _objc_rootFinalize(self);
2083 + (struct _NSZone *)zone {
2084 return (struct _NSZone *)_objc_rootZone(self);
2087 - (struct _NSZone *)zone {
2088 return (struct _NSZone *)_objc_rootZone(self);
2095 + (id)copyWithZone:(struct _NSZone *)zone {
2100 return [(id)self copyWithZone:nil];
2107 + (id)mutableCopyWithZone:(struct _NSZone *)zone {
2112 return [(id)self mutableCopyWithZone:nil];