2 * Copyright (c) 2010-2012 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 #include "objc-private.h"
27 #include "objc-weak.h"
28 #include "DenseMapExtras.h"
30 #include <malloc/malloc.h>
33 #include <mach/mach.h>
34 #include <mach-o/dyld.h>
35 #include <mach-o/nlist.h>
36 #include <sys/types.h>
41 #include "NSObject-internal.h"
43 @interface NSInvocation
47 OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_magic_offset = __builtin_offsetof(AutoreleasePoolPageData, magic);
48 OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_next_offset = __builtin_offsetof(AutoreleasePoolPageData, next);
49 OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_thread_offset = __builtin_offsetof(AutoreleasePoolPageData, thread);
50 OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_parent_offset = __builtin_offsetof(AutoreleasePoolPageData, parent);
51 OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_child_offset = __builtin_offsetof(AutoreleasePoolPageData, child);
52 OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_depth_offset = __builtin_offsetof(AutoreleasePoolPageData, depth);
53 OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_hiwat_offset = __builtin_offsetof(AutoreleasePoolPageData, hiwat);
55 OBJC_EXTERN const uint32_t objc_class_abi_version = OBJC_CLASS_ABI_VERSION_MAX;
58 /***********************************************************************
60 **********************************************************************/
62 static id defaultBadAllocHandler(Class cls)
64 _objc_fatal("attempt to allocate object of class '%s' failed",
65 cls->nameForLogging());
68 id(*badAllocHandler)(Class) = &defaultBadAllocHandler;
70 id _objc_callBadAllocHandler(Class cls)
72 // fixme add re-entrancy protection in case allocation fails inside handler
73 return (*badAllocHandler)(cls);
76 void _objc_setBadAllocHandler(id(*newHandler)(Class))
78 badAllocHandler = newHandler;
84 // The order of these bits is important.
85 #define SIDE_TABLE_WEAKLY_REFERENCED (1UL<<0)
86 #define SIDE_TABLE_DEALLOCATING (1UL<<1) // MSB-ward of weak bit
87 #define SIDE_TABLE_RC_ONE (1UL<<2) // MSB-ward of deallocating bit
88 #define SIDE_TABLE_RC_PINNED (1UL<<(WORD_BITS-1))
90 #define SIDE_TABLE_RC_SHIFT 2
91 #define SIDE_TABLE_FLAG_MASK (SIDE_TABLE_RC_ONE-1)
93 struct RefcountMapValuePurgeable {
94 static inline bool isPurgeable(size_t x) {
99 // RefcountMap disguises its pointers because we
100 // don't want the table to act as a root for `leaks`.
101 typedef objc::DenseMap<DisguisedPtr<objc_object>,size_t,RefcountMapValuePurgeable> RefcountMap;
103 // Template parameters.
104 enum HaveOld { DontHaveOld = false, DoHaveOld = true };
105 enum HaveNew { DontHaveNew = false, DoHaveNew = true };
110 weak_table_t weak_table;
113 memset(&weak_table, 0, sizeof(weak_table));
117 _objc_fatal("Do not delete SideTable.");
120 void lock() { slock.lock(); }
121 void unlock() { slock.unlock(); }
122 void forceReset() { slock.forceReset(); }
124 // Address-ordered lock discipline for a pair of side tables.
126 template<HaveOld, HaveNew>
127 static void lockTwo(SideTable *lock1, SideTable *lock2);
128 template<HaveOld, HaveNew>
129 static void unlockTwo(SideTable *lock1, SideTable *lock2);
134 void SideTable::lockTwo<DoHaveOld, DoHaveNew>
135 (SideTable *lock1, SideTable *lock2)
137 spinlock_t::lockTwo(&lock1->slock, &lock2->slock);
141 void SideTable::lockTwo<DoHaveOld, DontHaveNew>
142 (SideTable *lock1, SideTable *)
148 void SideTable::lockTwo<DontHaveOld, DoHaveNew>
149 (SideTable *, SideTable *lock2)
155 void SideTable::unlockTwo<DoHaveOld, DoHaveNew>
156 (SideTable *lock1, SideTable *lock2)
158 spinlock_t::unlockTwo(&lock1->slock, &lock2->slock);
162 void SideTable::unlockTwo<DoHaveOld, DontHaveNew>
163 (SideTable *lock1, SideTable *)
169 void SideTable::unlockTwo<DontHaveOld, DoHaveNew>
170 (SideTable *, SideTable *lock2)
175 static objc::ExplicitInit<StripedMap<SideTable>> SideTablesMap;
177 static StripedMap<SideTable>& SideTables() {
178 return SideTablesMap.get();
181 // anonymous namespace
184 void SideTableLockAll() {
185 SideTables().lockAll();
188 void SideTableUnlockAll() {
189 SideTables().unlockAll();
192 void SideTableForceResetAll() {
193 SideTables().forceResetAll();
196 void SideTableDefineLockOrder() {
197 SideTables().defineLockOrder();
200 void SideTableLocksPrecedeLock(const void *newlock) {
201 SideTables().precedeLock(newlock);
204 void SideTableLocksSucceedLock(const void *oldlock) {
205 SideTables().succeedLock(oldlock);
208 void SideTableLocksPrecedeLocks(StripedMap<spinlock_t>& newlocks) {
211 while ((newlock = newlocks.getLock(i++))) {
212 SideTables().precedeLock(newlock);
216 void SideTableLocksSucceedLocks(StripedMap<spinlock_t>& oldlocks) {
219 while ((oldlock = oldlocks.getLock(i++))) {
220 SideTables().succeedLock(oldlock);
225 // The -fobjc-arc flag causes the compiler to issue calls to objc_{retain/release/autorelease/retain_block}
228 id objc_retainBlock(id x) {
229 return (id)_Block_copy(x);
233 // The following SHOULD be called by the compiler directly, but the request hasn't been made yet :-)
236 BOOL objc_should_deallocate(id object) {
241 objc_retain_autorelease(id obj)
243 return objc_autorelease(objc_retain(obj));
248 objc_storeStrong(id *location, id obj)
260 // Update a weak variable.
261 // If HaveOld is true, the variable has an existing value
262 // that needs to be cleaned up. This value might be nil.
263 // If HaveNew is true, there is a new value that needs to be
264 // assigned into the variable. This value might be nil.
265 // If CrashIfDeallocating is true, the process is halted if newObj is
266 // deallocating or newObj's class does not support weak references.
267 // If CrashIfDeallocating is false, nil is stored instead.
268 enum CrashIfDeallocating {
269 DontCrashIfDeallocating = false, DoCrashIfDeallocating = true
271 template <HaveOld haveOld, HaveNew haveNew,
272 CrashIfDeallocating crashIfDeallocating>
274 storeWeak(id *location, objc_object *newObj)
276 ASSERT(haveOld || haveNew);
277 if (!haveNew) ASSERT(newObj == nil);
279 Class previouslyInitializedClass = nil;
284 // Acquire locks for old and new values.
285 // Order by lock address to prevent lock ordering problems.
286 // Retry if the old value changes underneath us.
290 oldTable = &SideTables()[oldObj];
295 newTable = &SideTables()[newObj];
300 SideTable::lockTwo<haveOld, haveNew>(oldTable, newTable);
302 if (haveOld && *location != oldObj) {
303 SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
307 // Prevent a deadlock between the weak reference machinery
308 // and the +initialize machinery by ensuring that no
309 // weakly-referenced object has an un-+initialized isa.
310 if (haveNew && newObj) {
311 Class cls = newObj->getIsa();
312 if (cls != previouslyInitializedClass &&
313 !((objc_class *)cls)->isInitialized())
315 SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
316 class_initialize(cls, (id)newObj);
318 // If this class is finished with +initialize then we're good.
319 // If this class is still running +initialize on this thread
320 // (i.e. +initialize called storeWeak on an instance of itself)
321 // then we may proceed but it will appear initializing and
322 // not yet initialized to the check above.
323 // Instead set previouslyInitializedClass to recognize it on retry.
324 previouslyInitializedClass = cls;
330 // Clean up old value, if any.
332 weak_unregister_no_lock(&oldTable->weak_table, oldObj, location);
335 // Assign new value, if any.
337 newObj = (objc_object *)
338 weak_register_no_lock(&newTable->weak_table, (id)newObj, location,
339 crashIfDeallocating);
340 // weak_register_no_lock returns nil if weak store should be rejected
342 // Set is-weakly-referenced bit in refcount table.
343 if (newObj && !newObj->isTaggedPointer()) {
344 newObj->setWeaklyReferenced_nolock();
347 // Do not set *location anywhere else. That would introduce a race.
348 *location = (id)newObj;
351 // No new value. The storage is not changed.
354 SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
361 * This function stores a new value into a __weak variable. It would
362 * be used anywhere a __weak variable is the target of an assignment.
364 * @param location The address of the weak pointer itself
365 * @param newObj The new object this weak ptr should now point to
370 objc_storeWeak(id *location, id newObj)
372 return storeWeak<DoHaveOld, DoHaveNew, DoCrashIfDeallocating>
373 (location, (objc_object *)newObj);
378 * This function stores a new value into a __weak variable.
379 * If the new object is deallocating or the new object's class
380 * does not support weak references, stores nil instead.
382 * @param location The address of the weak pointer itself
383 * @param newObj The new object this weak ptr should now point to
385 * @return The value stored (either the new object or nil)
388 objc_storeWeakOrNil(id *location, id newObj)
390 return storeWeak<DoHaveOld, DoHaveNew, DontCrashIfDeallocating>
391 (location, (objc_object *)newObj);
396 * Initialize a fresh weak pointer to some object location.
397 * It would be used for code like:
403 * __weak id weakPtr = o;
405 * This function IS NOT thread-safe with respect to concurrent
406 * modifications to the weak variable. (Concurrent weak clear is safe.)
408 * @param location Address of __weak ptr.
409 * @param newObj Object ptr.
412 objc_initWeak(id *location, id newObj)
419 return storeWeak<DontHaveOld, DoHaveNew, DoCrashIfDeallocating>
420 (location, (objc_object*)newObj);
424 objc_initWeakOrNil(id *location, id newObj)
431 return storeWeak<DontHaveOld, DoHaveNew, DontCrashIfDeallocating>
432 (location, (objc_object*)newObj);
437 * Destroys the relationship between a weak pointer
438 * and the object it is referencing in the internal weak
439 * table. If the weak pointer is not referencing anything,
440 * there is no need to edit the weak table.
442 * This function IS NOT thread-safe with respect to concurrent
443 * modifications to the weak variable. (Concurrent weak clear is safe.)
445 * @param location The weak pointer address.
448 objc_destroyWeak(id *location)
450 (void)storeWeak<DoHaveOld, DontHaveNew, DontCrashIfDeallocating>
456 Once upon a time we eagerly cleared *location if we saw the object
457 was deallocating. This confuses code like NSPointerFunctions which
458 tries to pre-flight the raw storage and assumes if the storage is
459 zero then the weak system is done interfering. That is false: the
460 weak system is still going to check and clear the storage later.
461 This can cause objc_weak_error complaints and crashes.
462 So we now don't touch the storage until deallocation completes.
466 objc_loadWeakRetained(id *location)
475 // fixme std::atomic this load
477 if (!obj) return nil;
478 if (obj->isTaggedPointer()) return obj;
480 table = &SideTables()[obj];
483 if (*location != obj) {
491 if (! cls->hasCustomRR()) {
492 // Fast case. We know +initialize is complete because
493 // default-RR can never be set before then.
494 ASSERT(cls->isInitialized());
495 if (! obj->rootTryRetain()) {
500 // Slow case. We must check for +initialize and call it outside
501 // the lock if necessary in order to avoid deadlocks.
502 if (cls->isInitialized() || _thisThreadIsInitializingClass(cls)) {
503 BOOL (*tryRetain)(id, SEL) = (BOOL(*)(id, SEL))
504 class_getMethodImplementation(cls, @selector(retainWeakReference));
505 if ((IMP)tryRetain == _objc_msgForward) {
508 else if (! (*tryRetain)(obj, @selector(retainWeakReference))) {
514 class_initialize(cls, obj);
524 * This loads the object referenced by a weak pointer and returns it, after
525 * retaining and autoreleasing the object to ensure that it stays alive
526 * long enough for the caller to use it. This function would be used
527 * anywhere a __weak variable is used in an expression.
529 * @param location The weak pointer address
531 * @return The object pointed to by \e location, or \c nil if \e location is \c nil.
534 objc_loadWeak(id *location)
536 if (!*location) return nil;
537 return objc_autorelease(objc_loadWeakRetained(location));
542 * This function copies a weak pointer from one location to another,
543 * when the destination doesn't already contain a weak pointer. It
544 * would be used for code like:
546 * __weak id src = ...;
547 * __weak id dst = src;
549 * This function IS NOT thread-safe with respect to concurrent
550 * modifications to the destination variable. (Concurrent weak clear is safe.)
552 * @param dst The destination variable.
553 * @param src The source variable.
556 objc_copyWeak(id *dst, id *src)
558 id obj = objc_loadWeakRetained(src);
559 objc_initWeak(dst, obj);
564 * Move a weak pointer from one location to another.
565 * Before the move, the destination must be uninitialized.
566 * After the move, the source is nil.
568 * This function IS NOT thread-safe with respect to concurrent
569 * modifications to either weak variable. (Concurrent weak clear is safe.)
573 objc_moveWeak(id *dst, id *src)
575 objc_copyWeak(dst, src);
576 objc_destroyWeak(src);
581 /***********************************************************************
582 Autorelease pool implementation
584 A thread's autorelease pool is a stack of pointers.
585 Each pointer is either an object to release, or POOL_BOUNDARY which is
586 an autorelease pool boundary.
587 A pool token is a pointer to the POOL_BOUNDARY for that pool. When
588 the pool is popped, every object hotter than the sentinel is released.
589 The stack is divided into a doubly-linked list of pages. Pages are added
590 and deleted as necessary.
591 Thread-local storage points to the hot page, where newly autoreleased
593 **********************************************************************/
595 BREAKPOINT_FUNCTION(void objc_autoreleaseNoPool(id obj));
596 BREAKPOINT_FUNCTION(void objc_autoreleasePoolInvalid(const void *token));
598 class AutoreleasePoolPage : private AutoreleasePoolPageData
600 friend struct thread_data_t;
603 static size_t const SIZE =
604 #if PROTECT_AUTORELEASEPOOL
605 PAGE_MAX_SIZE; // must be multiple of vm page size
607 PAGE_MIN_SIZE; // size and alignment, power of 2
611 static pthread_key_t const key = AUTORELEASE_POOL_KEY;
612 static uint8_t const SCRIBBLE = 0xA3; // 0xA3A3A3A3 after releasing
613 static size_t const COUNT = SIZE / sizeof(id);
615 // EMPTY_POOL_PLACEHOLDER is stored in TLS when exactly one pool is
616 // pushed and it has never contained any objects. This saves memory
617 // when the top level (i.e. libdispatch) pushes and pops pools but
619 # define EMPTY_POOL_PLACEHOLDER ((id*)1)
621 # define POOL_BOUNDARY nil
623 // SIZE-sizeof(*this) bytes of contents follow
625 static void * operator new(size_t size) {
626 return malloc_zone_memalign(malloc_default_zone(), SIZE, SIZE);
628 static void operator delete(void * p) {
632 inline void protect() {
633 #if PROTECT_AUTORELEASEPOOL
634 mprotect(this, SIZE, PROT_READ);
639 inline void unprotect() {
640 #if PROTECT_AUTORELEASEPOOL
642 mprotect(this, SIZE, PROT_READ | PROT_WRITE);
646 AutoreleasePoolPage(AutoreleasePoolPage *newParent) :
647 AutoreleasePoolPageData(begin(),
650 newParent ? 1+newParent->depth : 0,
651 newParent ? newParent->hiwat : 0)
655 ASSERT(!parent->child);
657 parent->child = this;
663 ~AutoreleasePoolPage()
669 // Not recursive: we don't want to blow out the stack
670 // if a thread accumulates a stupendous amount of garbage
674 template<typename Fn>
679 log("autorelease pool page %p corrupted\n"
680 " magic 0x%08x 0x%08x 0x%08x 0x%08x\n"
681 " should be 0x%08x 0x%08x 0x%08x 0x%08x\n"
685 magic.m[0], magic.m[1], magic.m[2], magic.m[3],
686 right.m[0], right.m[1], right.m[2], right.m[3],
687 this->thread, objc_thread_self());
690 __attribute__((noinline, cold, noreturn))
695 __builtin_unreachable();
699 check(bool die = true) const
701 if (!magic.check() || thread != objc_thread_self()) {
705 busted(_objc_inform);
713 #if CHECK_AUTORELEASEPOOL
716 if (! magic.fastcheck()) {
724 return (id *) ((uint8_t *)this+sizeof(*this));
728 return (id *) ((uint8_t *)this+SIZE);
732 return next == begin();
736 return next == end();
739 bool lessThanHalfFull() {
740 return (next - begin() < (end() - begin()) / 2);
747 id *ret = next; // faster than `return next-1` because of aliasing
755 releaseUntil(begin());
758 void releaseUntil(id *stop)
760 // Not recursive: we don't want to blow out the stack
761 // if a thread accumulates a stupendous amount of garbage
763 while (this->next != stop) {
764 // Restart from hotPage() every time, in case -release
765 // autoreleased more objects
766 AutoreleasePoolPage *page = hotPage();
768 // fixme I think this `while` can be `if`, but I can't prove it
769 while (page->empty()) {
775 id obj = *--page->next;
776 memset((void*)page->next, SCRIBBLE, sizeof(*page->next));
779 if (obj != POOL_BOUNDARY) {
787 // we expect any children to be completely empty
788 for (AutoreleasePoolPage *page = child; page; page = page->child) {
789 ASSERT(page->empty());
796 // Not recursive: we don't want to blow out the stack
797 // if a thread accumulates a stupendous amount of garbage
798 AutoreleasePoolPage *page = this;
799 while (page->child) page = page->child;
801 AutoreleasePoolPage *deathptr;
811 } while (deathptr != this);
814 static void tls_dealloc(void *p)
816 if (p == (void*)EMPTY_POOL_PLACEHOLDER) {
817 // No objects or pool pages to clean up here.
821 // reinstate TLS value while we work
822 setHotPage((AutoreleasePoolPage *)p);
824 if (AutoreleasePoolPage *page = coldPage()) {
825 if (!page->empty()) objc_autoreleasePoolPop(page->begin()); // pop all of the pools
826 if (slowpath(DebugMissingPools || DebugPoolAllocation)) {
827 // pop() killed the pages already
829 page->kill(); // free all of the pages
833 // clear TLS value so TLS destruction doesn't loop
837 static AutoreleasePoolPage *pageForPointer(const void *p)
839 return pageForPointer((uintptr_t)p);
842 static AutoreleasePoolPage *pageForPointer(uintptr_t p)
844 AutoreleasePoolPage *result;
845 uintptr_t offset = p % SIZE;
847 ASSERT(offset >= sizeof(AutoreleasePoolPage));
849 result = (AutoreleasePoolPage *)(p - offset);
856 static inline bool haveEmptyPoolPlaceholder()
858 id *tls = (id *)tls_get_direct(key);
859 return (tls == EMPTY_POOL_PLACEHOLDER);
862 static inline id* setEmptyPoolPlaceholder()
864 ASSERT(tls_get_direct(key) == nil);
865 tls_set_direct(key, (void *)EMPTY_POOL_PLACEHOLDER);
866 return EMPTY_POOL_PLACEHOLDER;
869 static inline AutoreleasePoolPage *hotPage()
871 AutoreleasePoolPage *result = (AutoreleasePoolPage *)
873 if ((id *)result == EMPTY_POOL_PLACEHOLDER) return nil;
874 if (result) result->fastcheck();
878 static inline void setHotPage(AutoreleasePoolPage *page)
880 if (page) page->fastcheck();
881 tls_set_direct(key, (void *)page);
884 static inline AutoreleasePoolPage *coldPage()
886 AutoreleasePoolPage *result = hotPage();
888 while (result->parent) {
889 result = result->parent;
897 static inline id *autoreleaseFast(id obj)
899 AutoreleasePoolPage *page = hotPage();
900 if (page && !page->full()) {
901 return page->add(obj);
903 return autoreleaseFullPage(obj, page);
905 return autoreleaseNoPage(obj);
909 static __attribute__((noinline))
910 id *autoreleaseFullPage(id obj, AutoreleasePoolPage *page)
912 // The hot page is full.
913 // Step to the next non-full page, adding a new page if necessary.
914 // Then add the object to that page.
915 ASSERT(page == hotPage());
916 ASSERT(page->full() || DebugPoolAllocation);
919 if (page->child) page = page->child;
920 else page = new AutoreleasePoolPage(page);
921 } while (page->full());
924 return page->add(obj);
927 static __attribute__((noinline))
928 id *autoreleaseNoPage(id obj)
930 // "No page" could mean no pool has been pushed
931 // or an empty placeholder pool has been pushed and has no contents yet
934 bool pushExtraBoundary = false;
935 if (haveEmptyPoolPlaceholder()) {
936 // We are pushing a second pool over the empty placeholder pool
937 // or pushing the first object into the empty placeholder pool.
938 // Before doing that, push a pool boundary on behalf of the pool
939 // that is currently represented by the empty placeholder.
940 pushExtraBoundary = true;
942 else if (obj != POOL_BOUNDARY && DebugMissingPools) {
943 // We are pushing an object with no pool in place,
944 // and no-pool debugging was requested by environment.
945 _objc_inform("MISSING POOLS: (%p) Object %p of class %s "
946 "autoreleased with no pool in place - "
947 "just leaking - break on "
948 "objc_autoreleaseNoPool() to debug",
949 objc_thread_self(), (void*)obj, object_getClassName(obj));
950 objc_autoreleaseNoPool(obj);
953 else if (obj == POOL_BOUNDARY && !DebugPoolAllocation) {
954 // We are pushing a pool with no pool in place,
955 // and alloc-per-pool debugging was not requested.
956 // Install and return the empty pool placeholder.
957 return setEmptyPoolPlaceholder();
960 // We are pushing an object or a non-placeholder'd pool.
962 // Install the first page.
963 AutoreleasePoolPage *page = new AutoreleasePoolPage(nil);
966 // Push a boundary on behalf of the previously-placeholder'd pool.
967 if (pushExtraBoundary) {
968 page->add(POOL_BOUNDARY);
971 // Push the requested object or pool.
972 return page->add(obj);
976 static __attribute__((noinline))
977 id *autoreleaseNewPage(id obj)
979 AutoreleasePoolPage *page = hotPage();
980 if (page) return autoreleaseFullPage(obj, page);
981 else return autoreleaseNoPage(obj);
985 static inline id autorelease(id obj)
988 ASSERT(!obj->isTaggedPointer());
989 id *dest __unused = autoreleaseFast(obj);
990 ASSERT(!dest || dest == EMPTY_POOL_PLACEHOLDER || *dest == obj);
995 static inline void *push()
998 if (slowpath(DebugPoolAllocation)) {
999 // Each autorelease pool starts on a new pool page.
1000 dest = autoreleaseNewPage(POOL_BOUNDARY);
1002 dest = autoreleaseFast(POOL_BOUNDARY);
1004 ASSERT(dest == EMPTY_POOL_PLACEHOLDER || *dest == POOL_BOUNDARY);
1008 __attribute__((noinline, cold))
1009 static void badPop(void *token)
1011 // Error. For bincompat purposes this is not
1012 // fatal in executables built with old SDKs.
1014 if (DebugPoolAllocation || sdkIsAtLeast(10_12, 10_0, 10_0, 3_0, 2_0)) {
1015 // OBJC_DEBUG_POOL_ALLOCATION or new SDK. Bad pop is fatal.
1017 ("Invalid or prematurely-freed autorelease pool %p.", token);
1020 // Old SDK. Bad pop is warned once.
1021 static bool complained = false;
1024 _objc_inform_now_and_on_crash
1025 ("Invalid or prematurely-freed autorelease pool %p. "
1026 "Set a breakpoint on objc_autoreleasePoolInvalid to debug. "
1027 "Proceeding anyway because the app is old "
1028 "(SDK version " SDK_FORMAT "). Memory errors are likely.",
1029 token, FORMAT_SDK(sdkVersion()));
1031 objc_autoreleasePoolInvalid(token);
1034 template<bool allowDebug>
1036 popPage(void *token, AutoreleasePoolPage *page, id *stop)
1038 if (allowDebug && PrintPoolHiwat) printHiwat();
1040 page->releaseUntil(stop);
1042 // memory: delete empty children
1043 if (allowDebug && DebugPoolAllocation && page->empty()) {
1044 // special case: delete everything during page-per-pool debugging
1045 AutoreleasePoolPage *parent = page->parent;
1048 } else if (allowDebug && DebugMissingPools && page->empty() && !page->parent) {
1049 // special case: delete everything for pop(top)
1050 // when debugging missing autorelease pools
1053 } else if (page->child) {
1054 // hysteresis: keep one empty child if page is more than half full
1055 if (page->lessThanHalfFull()) {
1056 page->child->kill();
1058 else if (page->child->child) {
1059 page->child->child->kill();
1064 __attribute__((noinline, cold))
1066 popPageDebug(void *token, AutoreleasePoolPage *page, id *stop)
1068 popPage<true>(token, page, stop);
1074 AutoreleasePoolPage *page;
1076 if (token == (void*)EMPTY_POOL_PLACEHOLDER) {
1077 // Popping the top-level placeholder pool.
1080 // Pool was never used. Clear the placeholder.
1081 return setHotPage(nil);
1083 // Pool was used. Pop its contents normally.
1084 // Pool pages remain allocated for re-use as usual.
1086 token = page->begin();
1088 page = pageForPointer(token);
1092 if (*stop != POOL_BOUNDARY) {
1093 if (stop == page->begin() && !page->parent) {
1094 // Start of coldest page may correctly not be POOL_BOUNDARY:
1095 // 1. top-level pool is popped, leaving the cold page in place
1096 // 2. an object is autoreleased with no pool
1098 // Error. For bincompat purposes this is not
1099 // fatal in executables built with old SDKs.
1100 return badPop(token);
1104 if (slowpath(PrintPoolHiwat || DebugPoolAllocation || DebugMissingPools)) {
1105 return popPageDebug(token, page, stop);
1108 return popPage<false>(token, page, stop);
1113 int r __unused = pthread_key_init_np(AutoreleasePoolPage::key,
1114 AutoreleasePoolPage::tls_dealloc);
1118 __attribute__((noinline, cold))
1121 _objc_inform("[%p] ................ PAGE %s %s %s", this,
1122 full() ? "(full)" : "",
1123 this == hotPage() ? "(hot)" : "",
1124 this == coldPage() ? "(cold)" : "");
1126 for (id *p = begin(); p < next; p++) {
1127 if (*p == POOL_BOUNDARY) {
1128 _objc_inform("[%p] ################ POOL %p", p, p);
1130 _objc_inform("[%p] %#16lx %s",
1131 p, (unsigned long)*p, object_getClassName(*p));
1136 __attribute__((noinline, cold))
1137 static void printAll()
1139 _objc_inform("##############");
1140 _objc_inform("AUTORELEASE POOLS for thread %p", objc_thread_self());
1142 AutoreleasePoolPage *page;
1143 ptrdiff_t objects = 0;
1144 for (page = coldPage(); page; page = page->child) {
1145 objects += page->next - page->begin();
1147 _objc_inform("%llu releases pending.", (unsigned long long)objects);
1149 if (haveEmptyPoolPlaceholder()) {
1150 _objc_inform("[%p] ................ PAGE (placeholder)",
1151 EMPTY_POOL_PLACEHOLDER);
1152 _objc_inform("[%p] ################ POOL (placeholder)",
1153 EMPTY_POOL_PLACEHOLDER);
1156 for (page = coldPage(); page; page = page->child) {
1161 _objc_inform("##############");
1164 __attribute__((noinline, cold))
1165 static void printHiwat()
1167 // Check and propagate high water mark
1168 // Ignore high water marks under 256 to suppress noise.
1169 AutoreleasePoolPage *p = hotPage();
1170 uint32_t mark = p->depth*COUNT + (uint32_t)(p->next - p->begin());
1171 if (mark > p->hiwat && mark > 256) {
1172 for( ; p; p = p->parent) {
1178 _objc_inform("POOL HIGHWATER: new high water mark of %u "
1179 "pending releases for thread %p:",
1180 mark, objc_thread_self());
1183 int count = backtrace(stack, sizeof(stack)/sizeof(stack[0]));
1184 char **sym = backtrace_symbols(stack, count);
1185 for (int i = 0; i < count; i++) {
1186 _objc_inform("POOL HIGHWATER: %s", sym[i]);
1192 #undef POOL_BOUNDARY
1195 /***********************************************************************
1196 * Slow paths for inline control
1197 **********************************************************************/
1199 #if SUPPORT_NONPOINTER_ISA
1202 objc_object::rootRetain_overflow(bool tryRetain)
1204 return rootRetain(tryRetain, true);
1208 NEVER_INLINE uintptr_t
1209 objc_object::rootRelease_underflow(bool performDealloc)
1211 return rootRelease(performDealloc, true);
1215 // Slow path of clearDeallocating()
1216 // for objects with nonpointer isa
1217 // that were ever weakly referenced
1218 // or whose retain count ever overflowed to the side table.
1220 objc_object::clearDeallocating_slow()
1222 ASSERT(isa.nonpointer && (isa.weakly_referenced || isa.has_sidetable_rc));
1224 SideTable& table = SideTables()[this];
1226 if (isa.weakly_referenced) {
1227 weak_clear_no_lock(&table.weak_table, (id)this);
1229 if (isa.has_sidetable_rc) {
1230 table.refcnts.erase(this);
1237 __attribute__((noinline,used))
1239 objc_object::rootAutorelease2()
1241 ASSERT(!isTaggedPointer());
1242 return AutoreleasePoolPage::autorelease((id)this);
1246 BREAKPOINT_FUNCTION(
1247 void objc_overrelease_during_dealloc_error(void)
1251 NEVER_INLINE uintptr_t
1252 objc_object::overrelease_error()
1254 _objc_inform_now_and_on_crash("%s object %p overreleased while already deallocating; break on objc_overrelease_during_dealloc_error to debug", object_getClassName((id)this), this);
1255 objc_overrelease_during_dealloc_error();
1256 return 0; // allow rootRelease() to tail-call this
1260 /***********************************************************************
1261 * Retain count operations for side table.
1262 **********************************************************************/
1266 // Used to assert that an object is not present in the side table.
1268 objc_object::sidetable_present()
1270 bool result = false;
1271 SideTable& table = SideTables()[this];
1275 RefcountMap::iterator it = table.refcnts.find(this);
1276 if (it != table.refcnts.end()) result = true;
1278 if (weak_is_registered_no_lock(&table.weak_table, (id)this)) result = true;
1286 #if SUPPORT_NONPOINTER_ISA
1289 objc_object::sidetable_lock()
1291 SideTable& table = SideTables()[this];
1296 objc_object::sidetable_unlock()
1298 SideTable& table = SideTables()[this];
1303 // Move the entire retain count to the side table,
1304 // as well as isDeallocating and weaklyReferenced.
1306 objc_object::sidetable_moveExtraRC_nolock(size_t extra_rc,
1307 bool isDeallocating,
1308 bool weaklyReferenced)
1310 ASSERT(!isa.nonpointer); // should already be changed to raw pointer
1311 SideTable& table = SideTables()[this];
1313 size_t& refcntStorage = table.refcnts[this];
1314 size_t oldRefcnt = refcntStorage;
1315 // not deallocating - that was in the isa
1316 ASSERT((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1317 ASSERT((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1320 size_t refcnt = addc(oldRefcnt, extra_rc << SIDE_TABLE_RC_SHIFT, 0, &carry);
1321 if (carry) refcnt = SIDE_TABLE_RC_PINNED;
1322 if (isDeallocating) refcnt |= SIDE_TABLE_DEALLOCATING;
1323 if (weaklyReferenced) refcnt |= SIDE_TABLE_WEAKLY_REFERENCED;
1325 refcntStorage = refcnt;
1329 // Move some retain counts to the side table from the isa field.
1330 // Returns true if the object is now pinned.
1332 objc_object::sidetable_addExtraRC_nolock(size_t delta_rc)
1334 ASSERT(isa.nonpointer);
1335 SideTable& table = SideTables()[this];
1337 size_t& refcntStorage = table.refcnts[this];
1338 size_t oldRefcnt = refcntStorage;
1339 // isa-side bits should not be set here
1340 ASSERT((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1341 ASSERT((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1343 if (oldRefcnt & SIDE_TABLE_RC_PINNED) return true;
1347 addc(oldRefcnt, delta_rc << SIDE_TABLE_RC_SHIFT, 0, &carry);
1350 SIDE_TABLE_RC_PINNED | (oldRefcnt & SIDE_TABLE_FLAG_MASK);
1354 refcntStorage = newRefcnt;
1360 // Move some retain counts from the side table to the isa field.
1361 // Returns the actual count subtracted, which may be less than the request.
1363 objc_object::sidetable_subExtraRC_nolock(size_t delta_rc)
1365 ASSERT(isa.nonpointer);
1366 SideTable& table = SideTables()[this];
1368 RefcountMap::iterator it = table.refcnts.find(this);
1369 if (it == table.refcnts.end() || it->second == 0) {
1370 // Side table retain count is zero. Can't borrow.
1373 size_t oldRefcnt = it->second;
1375 // isa-side bits should not be set here
1376 ASSERT((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1377 ASSERT((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1379 size_t newRefcnt = oldRefcnt - (delta_rc << SIDE_TABLE_RC_SHIFT);
1380 ASSERT(oldRefcnt > newRefcnt); // shouldn't underflow
1381 it->second = newRefcnt;
1387 objc_object::sidetable_getExtraRC_nolock()
1389 ASSERT(isa.nonpointer);
1390 SideTable& table = SideTables()[this];
1391 RefcountMap::iterator it = table.refcnts.find(this);
1392 if (it == table.refcnts.end()) return 0;
1393 else return it->second >> SIDE_TABLE_RC_SHIFT;
1397 // SUPPORT_NONPOINTER_ISA
1402 objc_object::sidetable_retain()
1404 #if SUPPORT_NONPOINTER_ISA
1405 ASSERT(!isa.nonpointer);
1407 SideTable& table = SideTables()[this];
1410 size_t& refcntStorage = table.refcnts[this];
1411 if (! (refcntStorage & SIDE_TABLE_RC_PINNED)) {
1412 refcntStorage += SIDE_TABLE_RC_ONE;
1421 objc_object::sidetable_tryRetain()
1423 #if SUPPORT_NONPOINTER_ISA
1424 ASSERT(!isa.nonpointer);
1426 SideTable& table = SideTables()[this];
1429 // _objc_rootTryRetain() is called exclusively by _objc_loadWeak(),
1430 // which already acquired the lock on our behalf.
1432 // fixme can't do this efficiently with os_lock_handoff_s
1433 // if (table.slock == 0) {
1434 // _objc_fatal("Do not call -_tryRetain.");
1438 auto it = table.refcnts.try_emplace(this, SIDE_TABLE_RC_ONE);
1439 auto &refcnt = it.first->second;
1441 // there was no entry
1442 } else if (refcnt & SIDE_TABLE_DEALLOCATING) {
1444 } else if (! (refcnt & SIDE_TABLE_RC_PINNED)) {
1445 refcnt += SIDE_TABLE_RC_ONE;
1453 objc_object::sidetable_retainCount()
1455 SideTable& table = SideTables()[this];
1457 size_t refcnt_result = 1;
1460 RefcountMap::iterator it = table.refcnts.find(this);
1461 if (it != table.refcnts.end()) {
1462 // this is valid for SIDE_TABLE_RC_PINNED too
1463 refcnt_result += it->second >> SIDE_TABLE_RC_SHIFT;
1466 return refcnt_result;
1471 objc_object::sidetable_isDeallocating()
1473 SideTable& table = SideTables()[this];
1476 // _objc_rootIsDeallocating() is called exclusively by _objc_storeWeak(),
1477 // which already acquired the lock on our behalf.
1480 // fixme can't do this efficiently with os_lock_handoff_s
1481 // if (table.slock == 0) {
1482 // _objc_fatal("Do not call -_isDeallocating.");
1485 RefcountMap::iterator it = table.refcnts.find(this);
1486 return (it != table.refcnts.end()) && (it->second & SIDE_TABLE_DEALLOCATING);
1491 objc_object::sidetable_isWeaklyReferenced()
1493 bool result = false;
1495 SideTable& table = SideTables()[this];
1498 RefcountMap::iterator it = table.refcnts.find(this);
1499 if (it != table.refcnts.end()) {
1500 result = it->second & SIDE_TABLE_WEAKLY_REFERENCED;
1510 objc_object::sidetable_setWeaklyReferenced_nolock()
1512 #if SUPPORT_NONPOINTER_ISA
1513 ASSERT(!isa.nonpointer);
1516 SideTable& table = SideTables()[this];
1518 table.refcnts[this] |= SIDE_TABLE_WEAKLY_REFERENCED;
1523 // return uintptr_t instead of bool so that the various raw-isa
1524 // -release paths all return zero in eax
1526 objc_object::sidetable_release(bool performDealloc)
1528 #if SUPPORT_NONPOINTER_ISA
1529 ASSERT(!isa.nonpointer);
1531 SideTable& table = SideTables()[this];
1533 bool do_dealloc = false;
1536 auto it = table.refcnts.try_emplace(this, SIDE_TABLE_DEALLOCATING);
1537 auto &refcnt = it.first->second;
1540 } else if (refcnt < SIDE_TABLE_DEALLOCATING) {
1541 // SIDE_TABLE_WEAKLY_REFERENCED may be set. Don't change it.
1543 refcnt |= SIDE_TABLE_DEALLOCATING;
1544 } else if (! (refcnt & SIDE_TABLE_RC_PINNED)) {
1545 refcnt -= SIDE_TABLE_RC_ONE;
1548 if (do_dealloc && performDealloc) {
1549 ((void(*)(objc_object *, SEL))objc_msgSend)(this, @selector(dealloc));
1556 objc_object::sidetable_clearDeallocating()
1558 SideTable& table = SideTables()[this];
1560 // clear any weak table items
1561 // clear extra retain count and deallocating bit
1562 // (fixme warn or abort if extra retain count == 0 ?)
1564 RefcountMap::iterator it = table.refcnts.find(this);
1565 if (it != table.refcnts.end()) {
1566 if (it->second & SIDE_TABLE_WEAKLY_REFERENCED) {
1567 weak_clear_no_lock(&table.weak_table, (id)this);
1569 table.refcnts.erase(it);
1575 /***********************************************************************
1576 * Optimized retain/release/autorelease entrypoints
1577 **********************************************************************/
1582 __attribute__((aligned(16), flatten, noinline))
1586 if (!obj) return obj;
1587 if (obj->isTaggedPointer()) return obj;
1588 return obj->retain();
1592 __attribute__((aligned(16), flatten, noinline))
1594 objc_release(id obj)
1597 if (obj->isTaggedPointer()) return;
1598 return obj->release();
1602 __attribute__((aligned(16), flatten, noinline))
1604 objc_autorelease(id obj)
1606 if (!obj) return obj;
1607 if (obj->isTaggedPointer()) return obj;
1608 return obj->autorelease();
1617 id objc_retain(id obj) { return [obj retain]; }
1618 void objc_release(id obj) { [obj release]; }
1619 id objc_autorelease(id obj) { return [obj autorelease]; }
1625 /***********************************************************************
1626 * Basic operations for root class implementations a.k.a. _objc_root*()
1627 **********************************************************************/
1630 _objc_rootTryRetain(id obj)
1634 return obj->rootTryRetain();
1638 _objc_rootIsDeallocating(id obj)
1642 return obj->rootIsDeallocating();
1647 objc_clear_deallocating(id obj)
1651 if (obj->isTaggedPointer()) return;
1652 obj->clearDeallocating();
1657 _objc_rootReleaseWasZero(id obj)
1661 return obj->rootReleaseShouldDealloc();
1666 _objc_rootAutorelease(id obj)
1669 return obj->rootAutorelease();
1673 _objc_rootRetainCount(id obj)
1677 return obj->rootRetainCount();
1682 _objc_rootRetain(id obj)
1686 return obj->rootRetain();
1690 _objc_rootRelease(id obj)
1698 // Call [cls alloc] or [cls allocWithZone:nil], with appropriate
1699 // shortcutting optimizations.
1700 static ALWAYS_INLINE id
1701 callAlloc(Class cls, bool checkNil, bool allocWithZone=false)
1704 if (slowpath(checkNil && !cls)) return nil;
1705 if (fastpath(!cls->ISA()->hasCustomAWZ())) {
1706 return _objc_rootAllocWithZone(cls, nil);
1710 // No shortcuts available.
1711 if (allocWithZone) {
1712 return ((id(*)(id, SEL, struct _NSZone *))objc_msgSend)(cls, @selector(allocWithZone:), nil);
1714 return ((id(*)(id, SEL))objc_msgSend)(cls, @selector(alloc));
1718 // Base class implementation of +alloc. cls is not nil.
1719 // Calls [cls allocWithZone:nil].
1721 _objc_rootAlloc(Class cls)
1723 return callAlloc(cls, false/*checkNil*/, true/*allocWithZone*/);
1726 // Calls [cls alloc].
1728 objc_alloc(Class cls)
1730 return callAlloc(cls, true/*checkNil*/, false/*allocWithZone*/);
1733 // Calls [cls allocWithZone:nil].
1735 objc_allocWithZone(Class cls)
1737 return callAlloc(cls, true/*checkNil*/, true/*allocWithZone*/);
1740 // Calls [[cls alloc] init].
1742 objc_alloc_init(Class cls)
1744 return [callAlloc(cls, true/*checkNil*/, false/*allocWithZone*/) init];
1749 objc_opt_new(Class cls)
1752 if (fastpath(cls && !cls->ISA()->hasCustomCore())) {
1753 return [callAlloc(cls, false/*checkNil*/, true/*allocWithZone*/) init];
1756 return ((id(*)(id, SEL))objc_msgSend)(cls, @selector(new));
1761 objc_opt_self(id obj)
1764 if (fastpath(!obj || obj->isTaggedPointer() || !obj->ISA()->hasCustomCore())) {
1768 return ((id(*)(id, SEL))objc_msgSend)(obj, @selector(self));
1771 // Calls [obj class]
1773 objc_opt_class(id obj)
1776 if (slowpath(!obj)) return nil;
1777 Class cls = obj->getIsa();
1778 if (fastpath(!cls->hasCustomCore())) {
1779 return cls->isMetaClass() ? obj : cls;
1782 return ((Class(*)(id, SEL))objc_msgSend)(obj, @selector(class));
1785 // Calls [obj isKindOfClass]
1787 objc_opt_isKindOfClass(id obj, Class otherClass)
1790 if (slowpath(!obj)) return NO;
1791 Class cls = obj->getIsa();
1792 if (fastpath(!cls->hasCustomCore())) {
1793 for (Class tcls = cls; tcls; tcls = tcls->superclass) {
1794 if (tcls == otherClass) return YES;
1799 return ((BOOL(*)(id, SEL, Class))objc_msgSend)(obj, @selector(isKindOfClass:), otherClass);
1802 // Calls [obj respondsToSelector]
1804 objc_opt_respondsToSelector(id obj, SEL sel)
1807 if (slowpath(!obj)) return NO;
1808 Class cls = obj->getIsa();
1809 if (fastpath(!cls->hasCustomCore())) {
1810 return class_respondsToSelector_inst(obj, sel, cls);
1813 return ((BOOL(*)(id, SEL, SEL))objc_msgSend)(obj, @selector(respondsToSelector:), sel);
1817 _objc_rootDealloc(id obj)
1825 _objc_rootFinalize(id obj __unused)
1828 _objc_fatal("_objc_rootFinalize called with garbage collection off");
1833 _objc_rootInit(id obj)
1835 // In practice, it will be hard to rely on this function.
1836 // Many classes do not properly chain -init calls.
1842 _objc_rootZone(id obj)
1846 // allocWithZone under __OBJC2__ ignores the zone parameter
1847 return malloc_default_zone();
1849 malloc_zone_t *rval = malloc_zone_from_ptr(obj);
1850 return rval ? rval : malloc_default_zone();
1855 _objc_rootHash(id obj)
1857 return (uintptr_t)obj;
1861 objc_autoreleasePoolPush(void)
1863 return AutoreleasePoolPage::push();
1868 objc_autoreleasePoolPop(void *ctxt)
1870 AutoreleasePoolPage::pop(ctxt);
1875 _objc_autoreleasePoolPush(void)
1877 return objc_autoreleasePoolPush();
1881 _objc_autoreleasePoolPop(void *ctxt)
1883 objc_autoreleasePoolPop(ctxt);
1887 _objc_autoreleasePoolPrint(void)
1889 AutoreleasePoolPage::printAll();
1893 // Same as objc_release but suitable for tail-calling
1894 // if you need the value back and don't want to push a frame before this point.
1895 __attribute__((noinline))
1897 objc_releaseAndReturn(id obj)
1903 // Same as objc_retainAutorelease but suitable for tail-calling
1904 // if you don't want to push a frame before this point.
1905 __attribute__((noinline))
1907 objc_retainAutoreleaseAndReturn(id obj)
1909 return objc_retainAutorelease(obj);
1913 // Prepare a value at +1 for return through a +0 autoreleasing convention.
1915 objc_autoreleaseReturnValue(id obj)
1917 if (prepareOptimizedReturn(ReturnAtPlus1)) return obj;
1919 return objc_autorelease(obj);
1922 // Prepare a value at +0 for return through a +0 autoreleasing convention.
1924 objc_retainAutoreleaseReturnValue(id obj)
1926 if (prepareOptimizedReturn(ReturnAtPlus0)) return obj;
1928 // not objc_autoreleaseReturnValue(objc_retain(obj))
1929 // because we don't need another optimization attempt
1930 return objc_retainAutoreleaseAndReturn(obj);
1933 // Accept a value returned through a +0 autoreleasing convention for use at +1.
1935 objc_retainAutoreleasedReturnValue(id obj)
1937 if (acceptOptimizedReturn() == ReturnAtPlus1) return obj;
1939 return objc_retain(obj);
1942 // Accept a value returned through a +0 autoreleasing convention for use at +0.
1944 objc_unsafeClaimAutoreleasedReturnValue(id obj)
1946 if (acceptOptimizedReturn() == ReturnAtPlus0) return obj;
1948 return objc_releaseAndReturn(obj);
1952 objc_retainAutorelease(id obj)
1954 return objc_autorelease(objc_retain(obj));
1958 _objc_deallocOnMainThreadHelper(void *context)
1960 id obj = (id)context;
1964 // convert objc_objectptr_t to id, callee must take ownership.
1965 id objc_retainedObject(objc_objectptr_t pointer) { return (id)pointer; }
1967 // convert objc_objectptr_t to id, without ownership transfer.
1968 id objc_unretainedObject(objc_objectptr_t pointer) { return (id)pointer; }
1970 // convert id to objc_objectptr_t, no ownership transfer.
1971 objc_objectptr_t objc_unretainedPointer(id object) { return object; }
1976 AutoreleasePoolPage::init();
1977 SideTablesMap.init();
1978 _objc_associations_init();
1982 #if SUPPORT_TAGGED_POINTERS
1984 // Placeholder for old debuggers. When they inspect an
1985 // extended tagged pointer object they will see this isa.
1987 @interface __NSUnrecognizedTaggedPointer : NSObject
1990 __attribute__((objc_nonlazy_class))
1991 @implementation __NSUnrecognizedTaggedPointer
1992 -(id) retain { return self; }
1993 -(oneway void) release { }
1994 -(id) autorelease { return self; }
1999 __attribute__((objc_nonlazy_class))
2000 @implementation NSObject
2002 + (void)initialize {
2018 return object_getClass(self);
2021 + (Class)superclass {
2022 return self->superclass;
2025 - (Class)superclass {
2026 return [self class]->superclass;
2029 + (BOOL)isMemberOfClass:(Class)cls {
2030 return self->ISA() == cls;
2033 - (BOOL)isMemberOfClass:(Class)cls {
2034 return [self class] == cls;
2037 + (BOOL)isKindOfClass:(Class)cls {
2038 for (Class tcls = self->ISA(); tcls; tcls = tcls->superclass) {
2039 if (tcls == cls) return YES;
2044 - (BOOL)isKindOfClass:(Class)cls {
2045 for (Class tcls = [self class]; tcls; tcls = tcls->superclass) {
2046 if (tcls == cls) return YES;
2051 + (BOOL)isSubclassOfClass:(Class)cls {
2052 for (Class tcls = self; tcls; tcls = tcls->superclass) {
2053 if (tcls == cls) return YES;
2058 + (BOOL)isAncestorOfObject:(NSObject *)obj {
2059 for (Class tcls = [obj class]; tcls; tcls = tcls->superclass) {
2060 if (tcls == self) return YES;
2065 + (BOOL)instancesRespondToSelector:(SEL)sel {
2066 return class_respondsToSelector_inst(nil, sel, self);
2069 + (BOOL)respondsToSelector:(SEL)sel {
2070 return class_respondsToSelector_inst(self, sel, self->ISA());
2073 - (BOOL)respondsToSelector:(SEL)sel {
2074 return class_respondsToSelector_inst(self, sel, [self class]);
2077 + (BOOL)conformsToProtocol:(Protocol *)protocol {
2078 if (!protocol) return NO;
2079 for (Class tcls = self; tcls; tcls = tcls->superclass) {
2080 if (class_conformsToProtocol(tcls, protocol)) return YES;
2085 - (BOOL)conformsToProtocol:(Protocol *)protocol {
2086 if (!protocol) return NO;
2087 for (Class tcls = [self class]; tcls; tcls = tcls->superclass) {
2088 if (class_conformsToProtocol(tcls, protocol)) return YES;
2093 + (NSUInteger)hash {
2094 return _objc_rootHash(self);
2097 - (NSUInteger)hash {
2098 return _objc_rootHash(self);
2101 + (BOOL)isEqual:(id)obj {
2102 return obj == (id)self;
2105 - (BOOL)isEqual:(id)obj {
2127 + (IMP)instanceMethodForSelector:(SEL)sel {
2128 if (!sel) [self doesNotRecognizeSelector:sel];
2129 return class_getMethodImplementation(self, sel);
2132 + (IMP)methodForSelector:(SEL)sel {
2133 if (!sel) [self doesNotRecognizeSelector:sel];
2134 return object_getMethodImplementation((id)self, sel);
2137 - (IMP)methodForSelector:(SEL)sel {
2138 if (!sel) [self doesNotRecognizeSelector:sel];
2139 return object_getMethodImplementation(self, sel);
2142 + (BOOL)resolveClassMethod:(SEL)sel {
2146 + (BOOL)resolveInstanceMethod:(SEL)sel {
2150 // Replaced by CF (throws an NSException)
2151 + (void)doesNotRecognizeSelector:(SEL)sel {
2152 _objc_fatal("+[%s %s]: unrecognized selector sent to instance %p",
2153 class_getName(self), sel_getName(sel), self);
2156 // Replaced by CF (throws an NSException)
2157 - (void)doesNotRecognizeSelector:(SEL)sel {
2158 _objc_fatal("-[%s %s]: unrecognized selector sent to instance %p",
2159 object_getClassName(self), sel_getName(sel), self);
2163 + (id)performSelector:(SEL)sel {
2164 if (!sel) [self doesNotRecognizeSelector:sel];
2165 return ((id(*)(id, SEL))objc_msgSend)((id)self, sel);
2168 + (id)performSelector:(SEL)sel withObject:(id)obj {
2169 if (!sel) [self doesNotRecognizeSelector:sel];
2170 return ((id(*)(id, SEL, id))objc_msgSend)((id)self, sel, obj);
2173 + (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 {
2174 if (!sel) [self doesNotRecognizeSelector:sel];
2175 return ((id(*)(id, SEL, id, id))objc_msgSend)((id)self, sel, obj1, obj2);
2178 - (id)performSelector:(SEL)sel {
2179 if (!sel) [self doesNotRecognizeSelector:sel];
2180 return ((id(*)(id, SEL))objc_msgSend)(self, sel);
2183 - (id)performSelector:(SEL)sel withObject:(id)obj {
2184 if (!sel) [self doesNotRecognizeSelector:sel];
2185 return ((id(*)(id, SEL, id))objc_msgSend)(self, sel, obj);
2188 - (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 {
2189 if (!sel) [self doesNotRecognizeSelector:sel];
2190 return ((id(*)(id, SEL, id, id))objc_msgSend)(self, sel, obj1, obj2);
2194 // Replaced by CF (returns an NSMethodSignature)
2195 + (NSMethodSignature *)instanceMethodSignatureForSelector:(SEL)sel {
2196 _objc_fatal("+[NSObject instanceMethodSignatureForSelector:] "
2197 "not available without CoreFoundation");
2200 // Replaced by CF (returns an NSMethodSignature)
2201 + (NSMethodSignature *)methodSignatureForSelector:(SEL)sel {
2202 _objc_fatal("+[NSObject methodSignatureForSelector:] "
2203 "not available without CoreFoundation");
2206 // Replaced by CF (returns an NSMethodSignature)
2207 - (NSMethodSignature *)methodSignatureForSelector:(SEL)sel {
2208 _objc_fatal("-[NSObject methodSignatureForSelector:] "
2209 "not available without CoreFoundation");
2212 + (void)forwardInvocation:(NSInvocation *)invocation {
2213 [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)];
2216 - (void)forwardInvocation:(NSInvocation *)invocation {
2217 [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)];
2220 + (id)forwardingTargetForSelector:(SEL)sel {
2224 - (id)forwardingTargetForSelector:(SEL)sel {
2229 // Replaced by CF (returns an NSString)
2230 + (NSString *)description {
2234 // Replaced by CF (returns an NSString)
2235 - (NSString *)description {
2239 + (NSString *)debugDescription {
2240 return [self description];
2243 - (NSString *)debugDescription {
2244 return [self description];
2249 return [callAlloc(self, false/*checkNil*/) init];
2256 // Replaced by ObjectAlloc
2258 return _objc_rootRetain(self);
2262 + (BOOL)_tryRetain {
2266 // Replaced by ObjectAlloc
2267 - (BOOL)_tryRetain {
2268 return _objc_rootTryRetain(self);
2271 + (BOOL)_isDeallocating {
2275 - (BOOL)_isDeallocating {
2276 return _objc_rootIsDeallocating(self);
2279 + (BOOL)allowsWeakReference {
2283 + (BOOL)retainWeakReference {
2287 - (BOOL)allowsWeakReference {
2288 return ! [self _isDeallocating];
2291 - (BOOL)retainWeakReference {
2292 return [self _tryRetain];
2295 + (oneway void)release {
2298 // Replaced by ObjectAlloc
2299 - (oneway void)release {
2300 _objc_rootRelease(self);
2307 // Replaced by ObjectAlloc
2309 return _objc_rootAutorelease(self);
2312 + (NSUInteger)retainCount {
2316 - (NSUInteger)retainCount {
2317 return _objc_rootRetainCount(self);
2321 return _objc_rootAlloc(self);
2324 // Replaced by ObjectAlloc
2325 + (id)allocWithZone:(struct _NSZone *)zone {
2326 return _objc_rootAllocWithZone(self, (malloc_zone_t *)zone);
2329 // Replaced by CF (throws an NSException)
2335 return _objc_rootInit(self);
2338 // Replaced by CF (throws an NSException)
2343 // Replaced by NSZombies
2345 _objc_rootDealloc(self);
2348 // Previously used by GC. Now a placeholder for binary compatibility.
2352 + (struct _NSZone *)zone {
2353 return (struct _NSZone *)_objc_rootZone(self);
2356 - (struct _NSZone *)zone {
2357 return (struct _NSZone *)_objc_rootZone(self);
2364 + (id)copyWithZone:(struct _NSZone *)zone {
2369 return [(id)self copyWithZone:nil];
2376 + (id)mutableCopyWithZone:(struct _NSZone *)zone {
2381 return [(id)self mutableCopyWithZone:nil];