2 * Copyright (c) 2010-2012 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 #include "objc-private.h"
27 #include "objc-weak.h"
28 #include "llvm-DenseMap.h"
31 #include <malloc/malloc.h>
34 #include <mach/mach.h>
35 #include <mach-o/dyld.h>
36 #include <mach-o/nlist.h>
37 #include <sys/types.h>
39 #include <libkern/OSAtomic.h>
44 @interface NSInvocation
49 /***********************************************************************
51 **********************************************************************/
53 static id defaultBadAllocHandler(Class cls)
55 _objc_fatal("attempt to allocate object of class '%s' failed",
56 cls->nameForLogging());
59 static id(*badAllocHandler)(Class) = &defaultBadAllocHandler;
61 static id callBadAllocHandler(Class cls)
63 // fixme add re-entrancy protection in case allocation fails inside handler
64 return (*badAllocHandler)(cls);
67 void _objc_setBadAllocHandler(id(*newHandler)(Class))
69 badAllocHandler = newHandler;
75 // The order of these bits is important.
76 #define SIDE_TABLE_WEAKLY_REFERENCED (1UL<<0)
77 #define SIDE_TABLE_DEALLOCATING (1UL<<1) // MSB-ward of weak bit
78 #define SIDE_TABLE_RC_ONE (1UL<<2) // MSB-ward of deallocating bit
79 #define SIDE_TABLE_RC_PINNED (1UL<<(WORD_BITS-1))
81 #define SIDE_TABLE_RC_SHIFT 2
82 #define SIDE_TABLE_FLAG_MASK (SIDE_TABLE_RC_ONE-1)
84 // RefcountMap disguises its pointers because we
85 // don't want the table to act as a root for `leaks`.
86 typedef objc::DenseMap<DisguisedPtr<objc_object>,size_t,true> RefcountMap;
88 // Template parameters.
89 enum HaveOld { DontHaveOld = false, DoHaveOld = true };
90 enum HaveNew { DontHaveNew = false, DoHaveNew = true };
95 weak_table_t weak_table;
98 memset(&weak_table, 0, sizeof(weak_table));
102 _objc_fatal("Do not delete SideTable.");
105 void lock() { slock.lock(); }
106 void unlock() { slock.unlock(); }
107 void forceReset() { slock.forceReset(); }
109 // Address-ordered lock discipline for a pair of side tables.
111 template<HaveOld, HaveNew>
112 static void lockTwo(SideTable *lock1, SideTable *lock2);
113 template<HaveOld, HaveNew>
114 static void unlockTwo(SideTable *lock1, SideTable *lock2);
119 void SideTable::lockTwo<DoHaveOld, DoHaveNew>
120 (SideTable *lock1, SideTable *lock2)
122 spinlock_t::lockTwo(&lock1->slock, &lock2->slock);
126 void SideTable::lockTwo<DoHaveOld, DontHaveNew>
127 (SideTable *lock1, SideTable *)
133 void SideTable::lockTwo<DontHaveOld, DoHaveNew>
134 (SideTable *, SideTable *lock2)
140 void SideTable::unlockTwo<DoHaveOld, DoHaveNew>
141 (SideTable *lock1, SideTable *lock2)
143 spinlock_t::unlockTwo(&lock1->slock, &lock2->slock);
147 void SideTable::unlockTwo<DoHaveOld, DontHaveNew>
148 (SideTable *lock1, SideTable *)
154 void SideTable::unlockTwo<DontHaveOld, DoHaveNew>
155 (SideTable *, SideTable *lock2)
161 // We cannot use a C++ static initializer to initialize SideTables because
162 // libc calls us before our C++ initializers run. We also don't want a global
163 // pointer to this struct because of the extra indirection.
164 // Do it the hard way.
165 alignas(StripedMap<SideTable>) static uint8_t
166 SideTableBuf[sizeof(StripedMap<SideTable>)];
168 static void SideTableInit() {
169 new (SideTableBuf) StripedMap<SideTable>();
172 static StripedMap<SideTable>& SideTables() {
173 return *reinterpret_cast<StripedMap<SideTable>*>(SideTableBuf);
176 // anonymous namespace
179 void SideTableLockAll() {
180 SideTables().lockAll();
183 void SideTableUnlockAll() {
184 SideTables().unlockAll();
187 void SideTableForceResetAll() {
188 SideTables().forceResetAll();
191 void SideTableDefineLockOrder() {
192 SideTables().defineLockOrder();
195 void SideTableLocksPrecedeLock(const void *newlock) {
196 SideTables().precedeLock(newlock);
199 void SideTableLocksSucceedLock(const void *oldlock) {
200 SideTables().succeedLock(oldlock);
203 void SideTableLocksPrecedeLocks(StripedMap<spinlock_t>& newlocks) {
206 while ((newlock = newlocks.getLock(i++))) {
207 SideTables().precedeLock(newlock);
211 void SideTableLocksSucceedLocks(StripedMap<spinlock_t>& oldlocks) {
214 while ((oldlock = oldlocks.getLock(i++))) {
215 SideTables().succeedLock(oldlock);
220 // The -fobjc-arc flag causes the compiler to issue calls to objc_{retain/release/autorelease/retain_block}
223 id objc_retainBlock(id x) {
224 return (id)_Block_copy(x);
228 // The following SHOULD be called by the compiler directly, but the request hasn't been made yet :-)
231 BOOL objc_should_deallocate(id object) {
236 objc_retain_autorelease(id obj)
238 return objc_autorelease(objc_retain(obj));
243 objc_storeStrong(id *location, id obj)
255 // Update a weak variable.
256 // If HaveOld is true, the variable has an existing value
257 // that needs to be cleaned up. This value might be nil.
258 // If HaveNew is true, there is a new value that needs to be
259 // assigned into the variable. This value might be nil.
260 // If CrashIfDeallocating is true, the process is halted if newObj is
261 // deallocating or newObj's class does not support weak references.
262 // If CrashIfDeallocating is false, nil is stored instead.
263 enum CrashIfDeallocating {
264 DontCrashIfDeallocating = false, DoCrashIfDeallocating = true
266 template <HaveOld haveOld, HaveNew haveNew,
267 CrashIfDeallocating crashIfDeallocating>
269 storeWeak(id *location, objc_object *newObj)
271 assert(haveOld || haveNew);
272 if (!haveNew) assert(newObj == nil);
274 Class previouslyInitializedClass = nil;
279 // Acquire locks for old and new values.
280 // Order by lock address to prevent lock ordering problems.
281 // Retry if the old value changes underneath us.
285 oldTable = &SideTables()[oldObj];
290 newTable = &SideTables()[newObj];
295 SideTable::lockTwo<haveOld, haveNew>(oldTable, newTable);
297 if (haveOld && *location != oldObj) {
298 SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
302 // Prevent a deadlock between the weak reference machinery
303 // and the +initialize machinery by ensuring that no
304 // weakly-referenced object has an un-+initialized isa.
305 if (haveNew && newObj) {
306 Class cls = newObj->getIsa();
307 if (cls != previouslyInitializedClass &&
308 !((objc_class *)cls)->isInitialized())
310 SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
311 _class_initialize(_class_getNonMetaClass(cls, (id)newObj));
313 // If this class is finished with +initialize then we're good.
314 // If this class is still running +initialize on this thread
315 // (i.e. +initialize called storeWeak on an instance of itself)
316 // then we may proceed but it will appear initializing and
317 // not yet initialized to the check above.
318 // Instead set previouslyInitializedClass to recognize it on retry.
319 previouslyInitializedClass = cls;
325 // Clean up old value, if any.
327 weak_unregister_no_lock(&oldTable->weak_table, oldObj, location);
330 // Assign new value, if any.
332 newObj = (objc_object *)
333 weak_register_no_lock(&newTable->weak_table, (id)newObj, location,
334 crashIfDeallocating);
335 // weak_register_no_lock returns nil if weak store should be rejected
337 // Set is-weakly-referenced bit in refcount table.
338 if (newObj && !newObj->isTaggedPointer()) {
339 newObj->setWeaklyReferenced_nolock();
342 // Do not set *location anywhere else. That would introduce a race.
343 *location = (id)newObj;
346 // No new value. The storage is not changed.
349 SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
356 * This function stores a new value into a __weak variable. It would
357 * be used anywhere a __weak variable is the target of an assignment.
359 * @param location The address of the weak pointer itself
360 * @param newObj The new object this weak ptr should now point to
365 objc_storeWeak(id *location, id newObj)
367 return storeWeak<DoHaveOld, DoHaveNew, DoCrashIfDeallocating>
368 (location, (objc_object *)newObj);
373 * This function stores a new value into a __weak variable.
374 * If the new object is deallocating or the new object's class
375 * does not support weak references, stores nil instead.
377 * @param location The address of the weak pointer itself
378 * @param newObj The new object this weak ptr should now point to
380 * @return The value stored (either the new object or nil)
383 objc_storeWeakOrNil(id *location, id newObj)
385 return storeWeak<DoHaveOld, DoHaveNew, DontCrashIfDeallocating>
386 (location, (objc_object *)newObj);
391 * Initialize a fresh weak pointer to some object location.
392 * It would be used for code like:
398 * __weak id weakPtr = o;
400 * This function IS NOT thread-safe with respect to concurrent
401 * modifications to the weak variable. (Concurrent weak clear is safe.)
403 * @param location Address of __weak ptr.
404 * @param newObj Object ptr.
407 objc_initWeak(id *location, id newObj)
414 return storeWeak<DontHaveOld, DoHaveNew, DoCrashIfDeallocating>
415 (location, (objc_object*)newObj);
419 objc_initWeakOrNil(id *location, id newObj)
426 return storeWeak<DontHaveOld, DoHaveNew, DontCrashIfDeallocating>
427 (location, (objc_object*)newObj);
432 * Destroys the relationship between a weak pointer
433 * and the object it is referencing in the internal weak
434 * table. If the weak pointer is not referencing anything,
435 * there is no need to edit the weak table.
437 * This function IS NOT thread-safe with respect to concurrent
438 * modifications to the weak variable. (Concurrent weak clear is safe.)
440 * @param location The weak pointer address.
443 objc_destroyWeak(id *location)
445 (void)storeWeak<DoHaveOld, DontHaveNew, DontCrashIfDeallocating>
451 Once upon a time we eagerly cleared *location if we saw the object
452 was deallocating. This confuses code like NSPointerFunctions which
453 tries to pre-flight the raw storage and assumes if the storage is
454 zero then the weak system is done interfering. That is false: the
455 weak system is still going to check and clear the storage later.
456 This can cause objc_weak_error complaints and crashes.
457 So we now don't touch the storage until deallocation completes.
461 objc_loadWeakRetained(id *location)
470 // fixme std::atomic this load
472 if (!obj) return nil;
473 if (obj->isTaggedPointer()) return obj;
475 table = &SideTables()[obj];
478 if (*location != obj) {
486 if (! cls->hasCustomRR()) {
487 // Fast case. We know +initialize is complete because
488 // default-RR can never be set before then.
489 assert(cls->isInitialized());
490 if (! obj->rootTryRetain()) {
495 // Slow case. We must check for +initialize and call it outside
496 // the lock if necessary in order to avoid deadlocks.
497 if (cls->isInitialized() || _thisThreadIsInitializingClass(cls)) {
498 BOOL (*tryRetain)(id, SEL) = (BOOL(*)(id, SEL))
499 class_getMethodImplementation(cls, SEL_retainWeakReference);
500 if ((IMP)tryRetain == _objc_msgForward) {
503 else if (! (*tryRetain)(obj, SEL_retainWeakReference)) {
509 _class_initialize(cls);
519 * This loads the object referenced by a weak pointer and returns it, after
520 * retaining and autoreleasing the object to ensure that it stays alive
521 * long enough for the caller to use it. This function would be used
522 * anywhere a __weak variable is used in an expression.
524 * @param location The weak pointer address
526 * @return The object pointed to by \e location, or \c nil if \e location is \c nil.
529 objc_loadWeak(id *location)
531 if (!*location) return nil;
532 return objc_autorelease(objc_loadWeakRetained(location));
537 * This function copies a weak pointer from one location to another,
538 * when the destination doesn't already contain a weak pointer. It
539 * would be used for code like:
541 * __weak id src = ...;
542 * __weak id dst = src;
544 * This function IS NOT thread-safe with respect to concurrent
545 * modifications to the destination variable. (Concurrent weak clear is safe.)
547 * @param dst The destination variable.
548 * @param src The source variable.
551 objc_copyWeak(id *dst, id *src)
553 id obj = objc_loadWeakRetained(src);
554 objc_initWeak(dst, obj);
559 * Move a weak pointer from one location to another.
560 * Before the move, the destination must be uninitialized.
561 * After the move, the source is nil.
563 * This function IS NOT thread-safe with respect to concurrent
564 * modifications to either weak variable. (Concurrent weak clear is safe.)
568 objc_moveWeak(id *dst, id *src)
570 objc_copyWeak(dst, src);
571 objc_destroyWeak(src);
576 /***********************************************************************
577 Autorelease pool implementation
579 A thread's autorelease pool is a stack of pointers.
580 Each pointer is either an object to release, or POOL_BOUNDARY which is
581 an autorelease pool boundary.
582 A pool token is a pointer to the POOL_BOUNDARY for that pool. When
583 the pool is popped, every object hotter than the sentinel is released.
584 The stack is divided into a doubly-linked list of pages. Pages are added
585 and deleted as necessary.
586 Thread-local storage points to the hot page, where newly autoreleased
588 **********************************************************************/
590 // Set this to 1 to mprotect() autorelease pool contents
591 #define PROTECT_AUTORELEASEPOOL 0
593 // Set this to 1 to validate the entire autorelease pool header all the time
594 // (i.e. use check() instead of fastcheck() everywhere)
595 #define CHECK_AUTORELEASEPOOL (DEBUG)
597 BREAKPOINT_FUNCTION(void objc_autoreleaseNoPool(id obj));
598 BREAKPOINT_FUNCTION(void objc_autoreleasePoolInvalid(const void *token));
603 static const uint32_t M0 = 0xA1A1A1A1;
604 # define M1 "AUTORELEASE!"
605 static const size_t M1_len = 12;
609 assert(M1_len == strlen(M1));
610 assert(M1_len == 3 * sizeof(m[1]));
613 strncpy((char *)&m[1], M1, M1_len);
617 m[0] = m[1] = m[2] = m[3] = 0;
621 return (m[0] == M0 && 0 == strncmp((char *)&m[1], M1, M1_len));
624 bool fastcheck() const {
625 #if CHECK_AUTORELEASEPOOL
636 class AutoreleasePoolPage
638 // EMPTY_POOL_PLACEHOLDER is stored in TLS when exactly one pool is
639 // pushed and it has never contained any objects. This saves memory
640 // when the top level (i.e. libdispatch) pushes and pops pools but
642 # define EMPTY_POOL_PLACEHOLDER ((id*)1)
644 # define POOL_BOUNDARY nil
645 static pthread_key_t const key = AUTORELEASE_POOL_KEY;
646 static uint8_t const SCRIBBLE = 0xA3; // 0xA3A3A3A3 after releasing
647 static size_t const SIZE =
648 #if PROTECT_AUTORELEASEPOOL
649 PAGE_MAX_SIZE; // must be multiple of vm page size
651 PAGE_MAX_SIZE; // size and alignment, power of 2
653 static size_t const COUNT = SIZE / sizeof(id);
657 pthread_t const thread;
658 AutoreleasePoolPage * const parent;
659 AutoreleasePoolPage *child;
660 uint32_t const depth;
663 // SIZE-sizeof(*this) bytes of contents follow
665 static void * operator new(size_t size) {
666 return malloc_zone_memalign(malloc_default_zone(), SIZE, SIZE);
668 static void operator delete(void * p) {
672 inline void protect() {
673 #if PROTECT_AUTORELEASEPOOL
674 mprotect(this, SIZE, PROT_READ);
679 inline void unprotect() {
680 #if PROTECT_AUTORELEASEPOOL
682 mprotect(this, SIZE, PROT_READ | PROT_WRITE);
686 AutoreleasePoolPage(AutoreleasePoolPage *newParent)
687 : magic(), next(begin()), thread(pthread_self()),
688 parent(newParent), child(nil),
689 depth(parent ? 1+parent->depth : 0),
690 hiwat(parent ? parent->hiwat : 0)
694 assert(!parent->child);
696 parent->child = this;
702 ~AutoreleasePoolPage()
708 // Not recursive: we don't want to blow out the stack
709 // if a thread accumulates a stupendous amount of garbage
714 void busted(bool die = true)
717 (die ? _objc_fatal : _objc_inform)
718 ("autorelease pool page %p corrupted\n"
719 " magic 0x%08x 0x%08x 0x%08x 0x%08x\n"
720 " should be 0x%08x 0x%08x 0x%08x 0x%08x\n"
724 magic.m[0], magic.m[1], magic.m[2], magic.m[3],
725 right.m[0], right.m[1], right.m[2], right.m[3],
726 this->thread, pthread_self());
729 void check(bool die = true)
731 if (!magic.check() || !pthread_equal(thread, pthread_self())) {
736 void fastcheck(bool die = true)
738 #if CHECK_AUTORELEASEPOOL
741 if (! magic.fastcheck()) {
749 return (id *) ((uint8_t *)this+sizeof(*this));
753 return (id *) ((uint8_t *)this+SIZE);
757 return next == begin();
761 return next == end();
764 bool lessThanHalfFull() {
765 return (next - begin() < (end() - begin()) / 2);
772 id *ret = next; // faster than `return next-1` because of aliasing
780 releaseUntil(begin());
783 void releaseUntil(id *stop)
785 // Not recursive: we don't want to blow out the stack
786 // if a thread accumulates a stupendous amount of garbage
788 while (this->next != stop) {
789 // Restart from hotPage() every time, in case -release
790 // autoreleased more objects
791 AutoreleasePoolPage *page = hotPage();
793 // fixme I think this `while` can be `if`, but I can't prove it
794 while (page->empty()) {
800 id obj = *--page->next;
801 memset((void*)page->next, SCRIBBLE, sizeof(*page->next));
804 if (obj != POOL_BOUNDARY) {
812 // we expect any children to be completely empty
813 for (AutoreleasePoolPage *page = child; page; page = page->child) {
814 assert(page->empty());
821 // Not recursive: we don't want to blow out the stack
822 // if a thread accumulates a stupendous amount of garbage
823 AutoreleasePoolPage *page = this;
824 while (page->child) page = page->child;
826 AutoreleasePoolPage *deathptr;
836 } while (deathptr != this);
839 static void tls_dealloc(void *p)
841 if (p == (void*)EMPTY_POOL_PLACEHOLDER) {
842 // No objects or pool pages to clean up here.
846 // reinstate TLS value while we work
847 setHotPage((AutoreleasePoolPage *)p);
849 if (AutoreleasePoolPage *page = coldPage()) {
850 if (!page->empty()) pop(page->begin()); // pop all of the pools
851 if (DebugMissingPools || DebugPoolAllocation) {
852 // pop() killed the pages already
854 page->kill(); // free all of the pages
858 // clear TLS value so TLS destruction doesn't loop
862 static AutoreleasePoolPage *pageForPointer(const void *p)
864 return pageForPointer((uintptr_t)p);
867 static AutoreleasePoolPage *pageForPointer(uintptr_t p)
869 AutoreleasePoolPage *result;
870 uintptr_t offset = p % SIZE;
872 assert(offset >= sizeof(AutoreleasePoolPage));
874 result = (AutoreleasePoolPage *)(p - offset);
881 static inline bool haveEmptyPoolPlaceholder()
883 id *tls = (id *)tls_get_direct(key);
884 return (tls == EMPTY_POOL_PLACEHOLDER);
887 static inline id* setEmptyPoolPlaceholder()
889 assert(tls_get_direct(key) == nil);
890 tls_set_direct(key, (void *)EMPTY_POOL_PLACEHOLDER);
891 return EMPTY_POOL_PLACEHOLDER;
894 static inline AutoreleasePoolPage *hotPage()
896 AutoreleasePoolPage *result = (AutoreleasePoolPage *)
898 if ((id *)result == EMPTY_POOL_PLACEHOLDER) return nil;
899 if (result) result->fastcheck();
903 static inline void setHotPage(AutoreleasePoolPage *page)
905 if (page) page->fastcheck();
906 tls_set_direct(key, (void *)page);
909 static inline AutoreleasePoolPage *coldPage()
911 AutoreleasePoolPage *result = hotPage();
913 while (result->parent) {
914 result = result->parent;
922 static inline id *autoreleaseFast(id obj)
924 AutoreleasePoolPage *page = hotPage();
925 if (page && !page->full()) {
926 return page->add(obj);
928 return autoreleaseFullPage(obj, page);
930 return autoreleaseNoPage(obj);
934 static __attribute__((noinline))
935 id *autoreleaseFullPage(id obj, AutoreleasePoolPage *page)
937 // The hot page is full.
938 // Step to the next non-full page, adding a new page if necessary.
939 // Then add the object to that page.
940 assert(page == hotPage());
941 assert(page->full() || DebugPoolAllocation);
944 if (page->child) page = page->child;
945 else page = new AutoreleasePoolPage(page);
946 } while (page->full());
949 return page->add(obj);
952 static __attribute__((noinline))
953 id *autoreleaseNoPage(id obj)
955 // "No page" could mean no pool has been pushed
956 // or an empty placeholder pool has been pushed and has no contents yet
959 bool pushExtraBoundary = false;
960 if (haveEmptyPoolPlaceholder()) {
961 // We are pushing a second pool over the empty placeholder pool
962 // or pushing the first object into the empty placeholder pool.
963 // Before doing that, push a pool boundary on behalf of the pool
964 // that is currently represented by the empty placeholder.
965 pushExtraBoundary = true;
967 else if (obj != POOL_BOUNDARY && DebugMissingPools) {
968 // We are pushing an object with no pool in place,
969 // and no-pool debugging was requested by environment.
970 _objc_inform("MISSING POOLS: (%p) Object %p of class %s "
971 "autoreleased with no pool in place - "
972 "just leaking - break on "
973 "objc_autoreleaseNoPool() to debug",
974 pthread_self(), (void*)obj, object_getClassName(obj));
975 objc_autoreleaseNoPool(obj);
978 else if (obj == POOL_BOUNDARY && !DebugPoolAllocation) {
979 // We are pushing a pool with no pool in place,
980 // and alloc-per-pool debugging was not requested.
981 // Install and return the empty pool placeholder.
982 return setEmptyPoolPlaceholder();
985 // We are pushing an object or a non-placeholder'd pool.
987 // Install the first page.
988 AutoreleasePoolPage *page = new AutoreleasePoolPage(nil);
991 // Push a boundary on behalf of the previously-placeholder'd pool.
992 if (pushExtraBoundary) {
993 page->add(POOL_BOUNDARY);
996 // Push the requested object or pool.
997 return page->add(obj);
1001 static __attribute__((noinline))
1002 id *autoreleaseNewPage(id obj)
1004 AutoreleasePoolPage *page = hotPage();
1005 if (page) return autoreleaseFullPage(obj, page);
1006 else return autoreleaseNoPage(obj);
1010 static inline id autorelease(id obj)
1013 assert(!obj->isTaggedPointer());
1014 id *dest __unused = autoreleaseFast(obj);
1015 assert(!dest || dest == EMPTY_POOL_PLACEHOLDER || *dest == obj);
1020 static inline void *push()
1023 if (DebugPoolAllocation) {
1024 // Each autorelease pool starts on a new pool page.
1025 dest = autoreleaseNewPage(POOL_BOUNDARY);
1027 dest = autoreleaseFast(POOL_BOUNDARY);
1029 assert(dest == EMPTY_POOL_PLACEHOLDER || *dest == POOL_BOUNDARY);
1033 static void badPop(void *token)
1035 // Error. For bincompat purposes this is not
1036 // fatal in executables built with old SDKs.
1038 if (DebugPoolAllocation || sdkIsAtLeast(10_12, 10_0, 10_0, 3_0, 2_0)) {
1039 // OBJC_DEBUG_POOL_ALLOCATION or new SDK. Bad pop is fatal.
1041 ("Invalid or prematurely-freed autorelease pool %p.", token);
1044 // Old SDK. Bad pop is warned once.
1045 static bool complained = false;
1048 _objc_inform_now_and_on_crash
1049 ("Invalid or prematurely-freed autorelease pool %p. "
1050 "Set a breakpoint on objc_autoreleasePoolInvalid to debug. "
1051 "Proceeding anyway because the app is old "
1052 "(SDK version " SDK_FORMAT "). Memory errors are likely.",
1053 token, FORMAT_SDK(sdkVersion()));
1055 objc_autoreleasePoolInvalid(token);
1058 static inline void pop(void *token)
1060 AutoreleasePoolPage *page;
1063 if (token == (void*)EMPTY_POOL_PLACEHOLDER) {
1064 // Popping the top-level placeholder pool.
1066 // Pool was used. Pop its contents normally.
1067 // Pool pages remain allocated for re-use as usual.
1068 pop(coldPage()->begin());
1070 // Pool was never used. Clear the placeholder.
1076 page = pageForPointer(token);
1078 if (*stop != POOL_BOUNDARY) {
1079 if (stop == page->begin() && !page->parent) {
1080 // Start of coldest page may correctly not be POOL_BOUNDARY:
1081 // 1. top-level pool is popped, leaving the cold page in place
1082 // 2. an object is autoreleased with no pool
1084 // Error. For bincompat purposes this is not
1085 // fatal in executables built with old SDKs.
1086 return badPop(token);
1090 if (PrintPoolHiwat) printHiwat();
1092 page->releaseUntil(stop);
1094 // memory: delete empty children
1095 if (DebugPoolAllocation && page->empty()) {
1096 // special case: delete everything during page-per-pool debugging
1097 AutoreleasePoolPage *parent = page->parent;
1100 } else if (DebugMissingPools && page->empty() && !page->parent) {
1101 // special case: delete everything for pop(top)
1102 // when debugging missing autorelease pools
1106 else if (page->child) {
1107 // hysteresis: keep one empty child if page is more than half full
1108 if (page->lessThanHalfFull()) {
1109 page->child->kill();
1111 else if (page->child->child) {
1112 page->child->child->kill();
1119 int r __unused = pthread_key_init_np(AutoreleasePoolPage::key,
1120 AutoreleasePoolPage::tls_dealloc);
1126 _objc_inform("[%p] ................ PAGE %s %s %s", this,
1127 full() ? "(full)" : "",
1128 this == hotPage() ? "(hot)" : "",
1129 this == coldPage() ? "(cold)" : "");
1131 for (id *p = begin(); p < next; p++) {
1132 if (*p == POOL_BOUNDARY) {
1133 _objc_inform("[%p] ################ POOL %p", p, p);
1135 _objc_inform("[%p] %#16lx %s",
1136 p, (unsigned long)*p, object_getClassName(*p));
1141 static void printAll()
1143 _objc_inform("##############");
1144 _objc_inform("AUTORELEASE POOLS for thread %p", pthread_self());
1146 AutoreleasePoolPage *page;
1147 ptrdiff_t objects = 0;
1148 for (page = coldPage(); page; page = page->child) {
1149 objects += page->next - page->begin();
1151 _objc_inform("%llu releases pending.", (unsigned long long)objects);
1153 if (haveEmptyPoolPlaceholder()) {
1154 _objc_inform("[%p] ................ PAGE (placeholder)",
1155 EMPTY_POOL_PLACEHOLDER);
1156 _objc_inform("[%p] ################ POOL (placeholder)",
1157 EMPTY_POOL_PLACEHOLDER);
1160 for (page = coldPage(); page; page = page->child) {
1165 _objc_inform("##############");
1168 static void printHiwat()
1170 // Check and propagate high water mark
1171 // Ignore high water marks under 256 to suppress noise.
1172 AutoreleasePoolPage *p = hotPage();
1173 uint32_t mark = p->depth*COUNT + (uint32_t)(p->next - p->begin());
1174 if (mark > p->hiwat && mark > 256) {
1175 for( ; p; p = p->parent) {
1181 _objc_inform("POOL HIGHWATER: new high water mark of %u "
1182 "pending releases for thread %p:",
1183 mark, pthread_self());
1186 int count = backtrace(stack, sizeof(stack)/sizeof(stack[0]));
1187 char **sym = backtrace_symbols(stack, count);
1188 for (int i = 0; i < count; i++) {
1189 _objc_inform("POOL HIGHWATER: %s", sym[i]);
1195 #undef POOL_BOUNDARY
1198 // anonymous namespace
1202 /***********************************************************************
1203 * Slow paths for inline control
1204 **********************************************************************/
1206 #if SUPPORT_NONPOINTER_ISA
1209 objc_object::rootRetain_overflow(bool tryRetain)
1211 return rootRetain(tryRetain, true);
1216 objc_object::rootRelease_underflow(bool performDealloc)
1218 return rootRelease(performDealloc, true);
1222 // Slow path of clearDeallocating()
1223 // for objects with nonpointer isa
1224 // that were ever weakly referenced
1225 // or whose retain count ever overflowed to the side table.
1227 objc_object::clearDeallocating_slow()
1229 assert(isa.nonpointer && (isa.weakly_referenced || isa.has_sidetable_rc));
1231 SideTable& table = SideTables()[this];
1233 if (isa.weakly_referenced) {
1234 weak_clear_no_lock(&table.weak_table, (id)this);
1236 if (isa.has_sidetable_rc) {
1237 table.refcnts.erase(this);
1244 __attribute__((noinline,used))
1246 objc_object::rootAutorelease2()
1248 assert(!isTaggedPointer());
1249 return AutoreleasePoolPage::autorelease((id)this);
1253 BREAKPOINT_FUNCTION(
1254 void objc_overrelease_during_dealloc_error(void)
1260 objc_object::overrelease_error()
1262 _objc_inform_now_and_on_crash("%s object %p overreleased while already deallocating; break on objc_overrelease_during_dealloc_error to debug", object_getClassName((id)this), this);
1263 objc_overrelease_during_dealloc_error();
1264 return false; // allow rootRelease() to tail-call this
1268 /***********************************************************************
1269 * Retain count operations for side table.
1270 **********************************************************************/
1274 // Used to assert that an object is not present in the side table.
1276 objc_object::sidetable_present()
1278 bool result = false;
1279 SideTable& table = SideTables()[this];
1283 RefcountMap::iterator it = table.refcnts.find(this);
1284 if (it != table.refcnts.end()) result = true;
1286 if (weak_is_registered_no_lock(&table.weak_table, (id)this)) result = true;
1294 #if SUPPORT_NONPOINTER_ISA
1297 objc_object::sidetable_lock()
1299 SideTable& table = SideTables()[this];
1304 objc_object::sidetable_unlock()
1306 SideTable& table = SideTables()[this];
1311 // Move the entire retain count to the side table,
1312 // as well as isDeallocating and weaklyReferenced.
1314 objc_object::sidetable_moveExtraRC_nolock(size_t extra_rc,
1315 bool isDeallocating,
1316 bool weaklyReferenced)
1318 assert(!isa.nonpointer); // should already be changed to raw pointer
1319 SideTable& table = SideTables()[this];
1321 size_t& refcntStorage = table.refcnts[this];
1322 size_t oldRefcnt = refcntStorage;
1323 // not deallocating - that was in the isa
1324 assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1325 assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1328 size_t refcnt = addc(oldRefcnt, extra_rc << SIDE_TABLE_RC_SHIFT, 0, &carry);
1329 if (carry) refcnt = SIDE_TABLE_RC_PINNED;
1330 if (isDeallocating) refcnt |= SIDE_TABLE_DEALLOCATING;
1331 if (weaklyReferenced) refcnt |= SIDE_TABLE_WEAKLY_REFERENCED;
1333 refcntStorage = refcnt;
1337 // Move some retain counts to the side table from the isa field.
1338 // Returns true if the object is now pinned.
1340 objc_object::sidetable_addExtraRC_nolock(size_t delta_rc)
1342 assert(isa.nonpointer);
1343 SideTable& table = SideTables()[this];
1345 size_t& refcntStorage = table.refcnts[this];
1346 size_t oldRefcnt = refcntStorage;
1347 // isa-side bits should not be set here
1348 assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1349 assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1351 if (oldRefcnt & SIDE_TABLE_RC_PINNED) return true;
1355 addc(oldRefcnt, delta_rc << SIDE_TABLE_RC_SHIFT, 0, &carry);
1358 SIDE_TABLE_RC_PINNED | (oldRefcnt & SIDE_TABLE_FLAG_MASK);
1362 refcntStorage = newRefcnt;
1368 // Move some retain counts from the side table to the isa field.
1369 // Returns the actual count subtracted, which may be less than the request.
1371 objc_object::sidetable_subExtraRC_nolock(size_t delta_rc)
1373 assert(isa.nonpointer);
1374 SideTable& table = SideTables()[this];
1376 RefcountMap::iterator it = table.refcnts.find(this);
1377 if (it == table.refcnts.end() || it->second == 0) {
1378 // Side table retain count is zero. Can't borrow.
1381 size_t oldRefcnt = it->second;
1383 // isa-side bits should not be set here
1384 assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1385 assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1387 size_t newRefcnt = oldRefcnt - (delta_rc << SIDE_TABLE_RC_SHIFT);
1388 assert(oldRefcnt > newRefcnt); // shouldn't underflow
1389 it->second = newRefcnt;
1395 objc_object::sidetable_getExtraRC_nolock()
1397 assert(isa.nonpointer);
1398 SideTable& table = SideTables()[this];
1399 RefcountMap::iterator it = table.refcnts.find(this);
1400 if (it == table.refcnts.end()) return 0;
1401 else return it->second >> SIDE_TABLE_RC_SHIFT;
1405 // SUPPORT_NONPOINTER_ISA
1410 objc_object::sidetable_retain()
1412 #if SUPPORT_NONPOINTER_ISA
1413 assert(!isa.nonpointer);
1415 SideTable& table = SideTables()[this];
1418 size_t& refcntStorage = table.refcnts[this];
1419 if (! (refcntStorage & SIDE_TABLE_RC_PINNED)) {
1420 refcntStorage += SIDE_TABLE_RC_ONE;
1429 objc_object::sidetable_tryRetain()
1431 #if SUPPORT_NONPOINTER_ISA
1432 assert(!isa.nonpointer);
1434 SideTable& table = SideTables()[this];
1437 // _objc_rootTryRetain() is called exclusively by _objc_loadWeak(),
1438 // which already acquired the lock on our behalf.
1440 // fixme can't do this efficiently with os_lock_handoff_s
1441 // if (table.slock == 0) {
1442 // _objc_fatal("Do not call -_tryRetain.");
1446 RefcountMap::iterator it = table.refcnts.find(this);
1447 if (it == table.refcnts.end()) {
1448 table.refcnts[this] = SIDE_TABLE_RC_ONE;
1449 } else if (it->second & SIDE_TABLE_DEALLOCATING) {
1451 } else if (! (it->second & SIDE_TABLE_RC_PINNED)) {
1452 it->second += SIDE_TABLE_RC_ONE;
1460 objc_object::sidetable_retainCount()
1462 SideTable& table = SideTables()[this];
1464 size_t refcnt_result = 1;
1467 RefcountMap::iterator it = table.refcnts.find(this);
1468 if (it != table.refcnts.end()) {
1469 // this is valid for SIDE_TABLE_RC_PINNED too
1470 refcnt_result += it->second >> SIDE_TABLE_RC_SHIFT;
1473 return refcnt_result;
1478 objc_object::sidetable_isDeallocating()
1480 SideTable& table = SideTables()[this];
1483 // _objc_rootIsDeallocating() is called exclusively by _objc_storeWeak(),
1484 // which already acquired the lock on our behalf.
1487 // fixme can't do this efficiently with os_lock_handoff_s
1488 // if (table.slock == 0) {
1489 // _objc_fatal("Do not call -_isDeallocating.");
1492 RefcountMap::iterator it = table.refcnts.find(this);
1493 return (it != table.refcnts.end()) && (it->second & SIDE_TABLE_DEALLOCATING);
1498 objc_object::sidetable_isWeaklyReferenced()
1500 bool result = false;
1502 SideTable& table = SideTables()[this];
1505 RefcountMap::iterator it = table.refcnts.find(this);
1506 if (it != table.refcnts.end()) {
1507 result = it->second & SIDE_TABLE_WEAKLY_REFERENCED;
1517 objc_object::sidetable_setWeaklyReferenced_nolock()
1519 #if SUPPORT_NONPOINTER_ISA
1520 assert(!isa.nonpointer);
1523 SideTable& table = SideTables()[this];
1525 table.refcnts[this] |= SIDE_TABLE_WEAKLY_REFERENCED;
1530 // return uintptr_t instead of bool so that the various raw-isa
1531 // -release paths all return zero in eax
1533 objc_object::sidetable_release(bool performDealloc)
1535 #if SUPPORT_NONPOINTER_ISA
1536 assert(!isa.nonpointer);
1538 SideTable& table = SideTables()[this];
1540 bool do_dealloc = false;
1543 RefcountMap::iterator it = table.refcnts.find(this);
1544 if (it == table.refcnts.end()) {
1546 table.refcnts[this] = SIDE_TABLE_DEALLOCATING;
1547 } else if (it->second < SIDE_TABLE_DEALLOCATING) {
1548 // SIDE_TABLE_WEAKLY_REFERENCED may be set. Don't change it.
1550 it->second |= SIDE_TABLE_DEALLOCATING;
1551 } else if (! (it->second & SIDE_TABLE_RC_PINNED)) {
1552 it->second -= SIDE_TABLE_RC_ONE;
1555 if (do_dealloc && performDealloc) {
1556 ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_dealloc);
1563 objc_object::sidetable_clearDeallocating()
1565 SideTable& table = SideTables()[this];
1567 // clear any weak table items
1568 // clear extra retain count and deallocating bit
1569 // (fixme warn or abort if extra retain count == 0 ?)
1571 RefcountMap::iterator it = table.refcnts.find(this);
1572 if (it != table.refcnts.end()) {
1573 if (it->second & SIDE_TABLE_WEAKLY_REFERENCED) {
1574 weak_clear_no_lock(&table.weak_table, (id)this);
1576 table.refcnts.erase(it);
1582 /***********************************************************************
1583 * Optimized retain/release/autorelease entrypoints
1584 **********************************************************************/
1589 __attribute__((aligned(16)))
1593 if (!obj) return obj;
1594 if (obj->isTaggedPointer()) return obj;
1595 return obj->retain();
1599 __attribute__((aligned(16)))
1601 objc_release(id obj)
1604 if (obj->isTaggedPointer()) return;
1605 return obj->release();
1609 __attribute__((aligned(16)))
1611 objc_autorelease(id obj)
1613 if (!obj) return obj;
1614 if (obj->isTaggedPointer()) return obj;
1615 return obj->autorelease();
1624 id objc_retain(id obj) { return [obj retain]; }
1625 void objc_release(id obj) { [obj release]; }
1626 id objc_autorelease(id obj) { return [obj autorelease]; }
1632 /***********************************************************************
1633 * Basic operations for root class implementations a.k.a. _objc_root*()
1634 **********************************************************************/
1637 _objc_rootTryRetain(id obj)
1641 return obj->rootTryRetain();
1645 _objc_rootIsDeallocating(id obj)
1649 return obj->rootIsDeallocating();
1654 objc_clear_deallocating(id obj)
1658 if (obj->isTaggedPointer()) return;
1659 obj->clearDeallocating();
1664 _objc_rootReleaseWasZero(id obj)
1668 return obj->rootReleaseShouldDealloc();
1673 _objc_rootAutorelease(id obj)
1676 return obj->rootAutorelease();
1680 _objc_rootRetainCount(id obj)
1684 return obj->rootRetainCount();
1689 _objc_rootRetain(id obj)
1693 return obj->rootRetain();
1697 _objc_rootRelease(id obj)
1706 _objc_rootAllocWithZone(Class cls, malloc_zone_t *zone)
1711 // allocWithZone under __OBJC2__ ignores the zone parameter
1713 obj = class_createInstance(cls, 0);
1716 obj = class_createInstance(cls, 0);
1719 obj = class_createInstanceFromZone(cls, 0, zone);
1723 if (slowpath(!obj)) obj = callBadAllocHandler(cls);
1728 // Call [cls alloc] or [cls allocWithZone:nil], with appropriate
1729 // shortcutting optimizations.
1730 static ALWAYS_INLINE id
1731 callAlloc(Class cls, bool checkNil, bool allocWithZone=false)
1733 if (slowpath(checkNil && !cls)) return nil;
1736 if (fastpath(!cls->ISA()->hasCustomAWZ())) {
1737 // No alloc/allocWithZone implementation. Go straight to the allocator.
1738 // fixme store hasCustomAWZ in the non-meta class and
1739 // add it to canAllocFast's summary
1740 if (fastpath(cls->canAllocFast())) {
1741 // No ctors, raw isa, etc. Go straight to the metal.
1742 bool dtor = cls->hasCxxDtor();
1743 id obj = (id)calloc(1, cls->bits.fastInstanceSize());
1744 if (slowpath(!obj)) return callBadAllocHandler(cls);
1745 obj->initInstanceIsa(cls, dtor);
1749 // Has ctor or raw isa or something. Use the slower path.
1750 id obj = class_createInstance(cls, 0);
1751 if (slowpath(!obj)) return callBadAllocHandler(cls);
1757 // No shortcuts available.
1758 if (allocWithZone) return [cls allocWithZone:nil];
1763 // Base class implementation of +alloc. cls is not nil.
1764 // Calls [cls allocWithZone:nil].
1766 _objc_rootAlloc(Class cls)
1768 return callAlloc(cls, false/*checkNil*/, true/*allocWithZone*/);
1771 // Calls [cls alloc].
1773 objc_alloc(Class cls)
1775 return callAlloc(cls, true/*checkNil*/, false/*allocWithZone*/);
1778 // Calls [cls allocWithZone:nil].
1780 objc_allocWithZone(Class cls)
1782 return callAlloc(cls, true/*checkNil*/, true/*allocWithZone*/);
1787 _objc_rootDealloc(id obj)
1795 _objc_rootFinalize(id obj __unused)
1798 _objc_fatal("_objc_rootFinalize called with garbage collection off");
1803 _objc_rootInit(id obj)
1805 // In practice, it will be hard to rely on this function.
1806 // Many classes do not properly chain -init calls.
1812 _objc_rootZone(id obj)
1816 // allocWithZone under __OBJC2__ ignores the zone parameter
1817 return malloc_default_zone();
1819 malloc_zone_t *rval = malloc_zone_from_ptr(obj);
1820 return rval ? rval : malloc_default_zone();
1825 _objc_rootHash(id obj)
1827 return (uintptr_t)obj;
1831 objc_autoreleasePoolPush(void)
1833 return AutoreleasePoolPage::push();
1837 objc_autoreleasePoolPop(void *ctxt)
1839 AutoreleasePoolPage::pop(ctxt);
1844 _objc_autoreleasePoolPush(void)
1846 return objc_autoreleasePoolPush();
1850 _objc_autoreleasePoolPop(void *ctxt)
1852 objc_autoreleasePoolPop(ctxt);
1856 _objc_autoreleasePoolPrint(void)
1858 AutoreleasePoolPage::printAll();
1862 // Same as objc_release but suitable for tail-calling
1863 // if you need the value back and don't want to push a frame before this point.
1864 __attribute__((noinline))
1866 objc_releaseAndReturn(id obj)
1872 // Same as objc_retainAutorelease but suitable for tail-calling
1873 // if you don't want to push a frame before this point.
1874 __attribute__((noinline))
1876 objc_retainAutoreleaseAndReturn(id obj)
1878 return objc_retainAutorelease(obj);
1882 // Prepare a value at +1 for return through a +0 autoreleasing convention.
1884 objc_autoreleaseReturnValue(id obj)
1886 if (prepareOptimizedReturn(ReturnAtPlus1)) return obj;
1888 return objc_autorelease(obj);
1891 // Prepare a value at +0 for return through a +0 autoreleasing convention.
1893 objc_retainAutoreleaseReturnValue(id obj)
1895 if (prepareOptimizedReturn(ReturnAtPlus0)) return obj;
1897 // not objc_autoreleaseReturnValue(objc_retain(obj))
1898 // because we don't need another optimization attempt
1899 return objc_retainAutoreleaseAndReturn(obj);
1902 // Accept a value returned through a +0 autoreleasing convention for use at +1.
1904 objc_retainAutoreleasedReturnValue(id obj)
1906 if (acceptOptimizedReturn() == ReturnAtPlus1) return obj;
1908 return objc_retain(obj);
1911 // Accept a value returned through a +0 autoreleasing convention for use at +0.
1913 objc_unsafeClaimAutoreleasedReturnValue(id obj)
1915 if (acceptOptimizedReturn() == ReturnAtPlus0) return obj;
1917 return objc_releaseAndReturn(obj);
1921 objc_retainAutorelease(id obj)
1923 return objc_autorelease(objc_retain(obj));
1927 _objc_deallocOnMainThreadHelper(void *context)
1929 id obj = (id)context;
1933 // convert objc_objectptr_t to id, callee must take ownership.
1934 id objc_retainedObject(objc_objectptr_t pointer) { return (id)pointer; }
1936 // convert objc_objectptr_t to id, without ownership transfer.
1937 id objc_unretainedObject(objc_objectptr_t pointer) { return (id)pointer; }
1939 // convert id to objc_objectptr_t, no ownership transfer.
1940 objc_objectptr_t objc_unretainedPointer(id object) { return object; }
1945 AutoreleasePoolPage::init();
1950 #if SUPPORT_TAGGED_POINTERS
1952 // Placeholder for old debuggers. When they inspect an
1953 // extended tagged pointer object they will see this isa.
1955 @interface __NSUnrecognizedTaggedPointer : NSObject
1958 @implementation __NSUnrecognizedTaggedPointer
1960 -(id) retain { return self; }
1961 -(oneway void) release { }
1962 -(id) autorelease { return self; }
1968 @implementation NSObject
1973 + (void)initialize {
1989 return object_getClass(self);
1992 + (Class)superclass {
1993 return self->superclass;
1996 - (Class)superclass {
1997 return [self class]->superclass;
2000 + (BOOL)isMemberOfClass:(Class)cls {
2001 return object_getClass((id)self) == cls;
2004 - (BOOL)isMemberOfClass:(Class)cls {
2005 return [self class] == cls;
2008 + (BOOL)isKindOfClass:(Class)cls {
2009 for (Class tcls = object_getClass((id)self); tcls; tcls = tcls->superclass) {
2010 if (tcls == cls) return YES;
2015 - (BOOL)isKindOfClass:(Class)cls {
2016 for (Class tcls = [self class]; tcls; tcls = tcls->superclass) {
2017 if (tcls == cls) return YES;
2022 + (BOOL)isSubclassOfClass:(Class)cls {
2023 for (Class tcls = self; tcls; tcls = tcls->superclass) {
2024 if (tcls == cls) return YES;
2029 + (BOOL)isAncestorOfObject:(NSObject *)obj {
2030 for (Class tcls = [obj class]; tcls; tcls = tcls->superclass) {
2031 if (tcls == self) return YES;
2036 + (BOOL)instancesRespondToSelector:(SEL)sel {
2037 if (!sel) return NO;
2038 return class_respondsToSelector(self, sel);
2041 + (BOOL)respondsToSelector:(SEL)sel {
2042 if (!sel) return NO;
2043 return class_respondsToSelector_inst(object_getClass(self), sel, self);
2046 - (BOOL)respondsToSelector:(SEL)sel {
2047 if (!sel) return NO;
2048 return class_respondsToSelector_inst([self class], sel, self);
2051 + (BOOL)conformsToProtocol:(Protocol *)protocol {
2052 if (!protocol) return NO;
2053 for (Class tcls = self; tcls; tcls = tcls->superclass) {
2054 if (class_conformsToProtocol(tcls, protocol)) return YES;
2059 - (BOOL)conformsToProtocol:(Protocol *)protocol {
2060 if (!protocol) return NO;
2061 for (Class tcls = [self class]; tcls; tcls = tcls->superclass) {
2062 if (class_conformsToProtocol(tcls, protocol)) return YES;
2067 + (NSUInteger)hash {
2068 return _objc_rootHash(self);
2071 - (NSUInteger)hash {
2072 return _objc_rootHash(self);
2075 + (BOOL)isEqual:(id)obj {
2076 return obj == (id)self;
2079 - (BOOL)isEqual:(id)obj {
2101 + (IMP)instanceMethodForSelector:(SEL)sel {
2102 if (!sel) [self doesNotRecognizeSelector:sel];
2103 return class_getMethodImplementation(self, sel);
2106 + (IMP)methodForSelector:(SEL)sel {
2107 if (!sel) [self doesNotRecognizeSelector:sel];
2108 return object_getMethodImplementation((id)self, sel);
2111 - (IMP)methodForSelector:(SEL)sel {
2112 if (!sel) [self doesNotRecognizeSelector:sel];
2113 return object_getMethodImplementation(self, sel);
2116 + (BOOL)resolveClassMethod:(SEL)sel {
2120 + (BOOL)resolveInstanceMethod:(SEL)sel {
2124 // Replaced by CF (throws an NSException)
2125 + (void)doesNotRecognizeSelector:(SEL)sel {
2126 _objc_fatal("+[%s %s]: unrecognized selector sent to instance %p",
2127 class_getName(self), sel_getName(sel), self);
2130 // Replaced by CF (throws an NSException)
2131 - (void)doesNotRecognizeSelector:(SEL)sel {
2132 _objc_fatal("-[%s %s]: unrecognized selector sent to instance %p",
2133 object_getClassName(self), sel_getName(sel), self);
2137 + (id)performSelector:(SEL)sel {
2138 if (!sel) [self doesNotRecognizeSelector:sel];
2139 return ((id(*)(id, SEL))objc_msgSend)((id)self, sel);
2142 + (id)performSelector:(SEL)sel withObject:(id)obj {
2143 if (!sel) [self doesNotRecognizeSelector:sel];
2144 return ((id(*)(id, SEL, id))objc_msgSend)((id)self, sel, obj);
2147 + (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 {
2148 if (!sel) [self doesNotRecognizeSelector:sel];
2149 return ((id(*)(id, SEL, id, id))objc_msgSend)((id)self, sel, obj1, obj2);
2152 - (id)performSelector:(SEL)sel {
2153 if (!sel) [self doesNotRecognizeSelector:sel];
2154 return ((id(*)(id, SEL))objc_msgSend)(self, sel);
2157 - (id)performSelector:(SEL)sel withObject:(id)obj {
2158 if (!sel) [self doesNotRecognizeSelector:sel];
2159 return ((id(*)(id, SEL, id))objc_msgSend)(self, sel, obj);
2162 - (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 {
2163 if (!sel) [self doesNotRecognizeSelector:sel];
2164 return ((id(*)(id, SEL, id, id))objc_msgSend)(self, sel, obj1, obj2);
2168 // Replaced by CF (returns an NSMethodSignature)
2169 + (NSMethodSignature *)instanceMethodSignatureForSelector:(SEL)sel {
2170 _objc_fatal("+[NSObject instanceMethodSignatureForSelector:] "
2171 "not available without CoreFoundation");
2174 // Replaced by CF (returns an NSMethodSignature)
2175 + (NSMethodSignature *)methodSignatureForSelector:(SEL)sel {
2176 _objc_fatal("+[NSObject methodSignatureForSelector:] "
2177 "not available without CoreFoundation");
2180 // Replaced by CF (returns an NSMethodSignature)
2181 - (NSMethodSignature *)methodSignatureForSelector:(SEL)sel {
2182 _objc_fatal("-[NSObject methodSignatureForSelector:] "
2183 "not available without CoreFoundation");
2186 + (void)forwardInvocation:(NSInvocation *)invocation {
2187 [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)];
2190 - (void)forwardInvocation:(NSInvocation *)invocation {
2191 [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)];
2194 + (id)forwardingTargetForSelector:(SEL)sel {
2198 - (id)forwardingTargetForSelector:(SEL)sel {
2203 // Replaced by CF (returns an NSString)
2204 + (NSString *)description {
2208 // Replaced by CF (returns an NSString)
2209 - (NSString *)description {
2213 + (NSString *)debugDescription {
2214 return [self description];
2217 - (NSString *)debugDescription {
2218 return [self description];
2223 return [callAlloc(self, false/*checkNil*/) init];
2230 // Replaced by ObjectAlloc
2232 return ((id)self)->rootRetain();
2236 + (BOOL)_tryRetain {
2240 // Replaced by ObjectAlloc
2241 - (BOOL)_tryRetain {
2242 return ((id)self)->rootTryRetain();
2245 + (BOOL)_isDeallocating {
2249 - (BOOL)_isDeallocating {
2250 return ((id)self)->rootIsDeallocating();
2253 + (BOOL)allowsWeakReference {
2257 + (BOOL)retainWeakReference {
2261 - (BOOL)allowsWeakReference {
2262 return ! [self _isDeallocating];
2265 - (BOOL)retainWeakReference {
2266 return [self _tryRetain];
2269 + (oneway void)release {
2272 // Replaced by ObjectAlloc
2273 - (oneway void)release {
2274 ((id)self)->rootRelease();
2281 // Replaced by ObjectAlloc
2283 return ((id)self)->rootAutorelease();
2286 + (NSUInteger)retainCount {
2290 - (NSUInteger)retainCount {
2291 return ((id)self)->rootRetainCount();
2295 return _objc_rootAlloc(self);
2298 // Replaced by ObjectAlloc
2299 + (id)allocWithZone:(struct _NSZone *)zone {
2300 return _objc_rootAllocWithZone(self, (malloc_zone_t *)zone);
2303 // Replaced by CF (throws an NSException)
2309 return _objc_rootInit(self);
2312 // Replaced by CF (throws an NSException)
2317 // Replaced by NSZombies
2319 _objc_rootDealloc(self);
2322 // Previously used by GC. Now a placeholder for binary compatibility.
2326 + (struct _NSZone *)zone {
2327 return (struct _NSZone *)_objc_rootZone(self);
2330 - (struct _NSZone *)zone {
2331 return (struct _NSZone *)_objc_rootZone(self);
2338 + (id)copyWithZone:(struct _NSZone *)zone {
2343 return [(id)self copyWithZone:nil];
2350 + (id)mutableCopyWithZone:(struct _NSZone *)zone {
2355 return [(id)self mutableCopyWithZone:nil];