2 * Copyright (c) 2010-2011 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 #include "llvm-DenseMap.h"
27 #import "objc-private.h"
28 #import "objc-internal.h"
35 #include <mach/mach.h>
36 #include <mach-o/dyld.h>
37 #include <mach-o/nlist.h>
38 #include <sys/types.h>
40 #include <libkern/OSAtomic.h>
45 #if SUPPORT_RETURN_AUTORELEASE
46 // We cannot peek at where we are returning to unless we always inline this:
47 __attribute__((always_inline))
48 static bool callerAcceptsFastAutorelease(const void * const ra0);
52 /***********************************************************************
54 **********************************************************************/
56 static bool seen_weak_refs;
58 @protocol ReferenceCounted
60 + (id)allocWithZone:(malloc_zone_t *)zone;
61 - (oneway void)dealloc;
63 - (oneway void)release;
65 - (uintptr_t)retainCount;
76 } CompilerGenerated, ExplicitlyCoded;
78 PRIVATE_EXTERN void (^objc_arr_log)(const char *, id param) =
79 ^(const char *str, id param) { printf("%s %p\n", str, param); };
85 #if TARGET_OS_EMBEDDED
86 # define SIDE_TABLE_STRIPE 1
88 # define SIDE_TABLE_STRIPE 8
91 // should be a multiple of cache line size (64)
92 #define SIDE_TABLE_SIZE 64
94 typedef objc::DenseMap<id,size_t,true> RefcountMap;
98 static uint8_t table_buf[SIDE_TABLE_STRIPE * SIDE_TABLE_SIZE];
103 weak_table_t weak_table;
105 SideTable() : slock(OS_SPINLOCK_INIT)
107 memset(&weak_table, 0, sizeof(weak_table));
112 // never delete side_table in case other threads retain during exit
116 static SideTable *tableForPointer(const void *p)
118 # if SIDE_TABLE_STRIPE == 1
119 return (SideTable *)table_buf;
121 uintptr_t a = (uintptr_t)p;
122 int index = ((a >> 4) ^ (a >> 9)) & (SIDE_TABLE_STRIPE - 1);
123 return (SideTable *)&table_buf[index * SIDE_TABLE_SIZE];
128 // use placement new instead of static ctor to avoid dtor at exit
129 for (int i = 0; i < SIDE_TABLE_STRIPE; i++) {
130 new (&table_buf[i * SIDE_TABLE_SIZE]) SideTable;
135 STATIC_ASSERT(sizeof(SideTable) <= SIDE_TABLE_SIZE);
136 __attribute__((aligned(SIDE_TABLE_SIZE))) uint8_t
137 SideTable::table_buf[SIDE_TABLE_STRIPE * SIDE_TABLE_SIZE];
139 // Avoid false-negative reports from tools like "leaks"
140 #define DISGUISE(x) ((id)~(uintptr_t)(x))
142 // anonymous namespace
147 // The -fobjc-arr flag causes the compiler to issue calls to objc_{retain/release/autorelease/retain_block}
150 id objc_retainBlock(id x) {
152 objc_arr_log("objc_retain_block", x);
153 ++CompilerGenerated.blockCopies;
155 return (id)_Block_copy(x);
159 // The following SHOULD be called by the compiler directly, but the request hasn't been made yet :-)
162 BOOL objc_should_deallocate(id object) {
167 // <rdar://problem/9038601> clang remembers variadic bit across function cast
168 // <rdar://problem/9048030> Clang thinks that all ObjC vtable dispatches are variadic
169 // <rdar://problem/8873428> vararg function defeats tail-call optimization
170 id objc_msgSend_hack(id, SEL) asm("_objc_msgSend");
172 // public API entry points that might be optimized later
174 __attribute__((aligned(16)))
178 return objc_msgSend_hack(obj, @selector(retain));
181 __attribute__((aligned(16)))
185 objc_msgSend_hack(obj, @selector(release));
188 __attribute__((aligned(16)))
190 objc_autorelease(id obj)
192 return objc_msgSend_hack(obj, @selector(autorelease));
196 objc_retain_autorelease(id obj)
198 return objc_autorelease(objc_retain(obj));
202 objc_storeWeak(id *location, id newObj)
208 #if SIDE_TABLE_STRIPE > 1
212 if (!seen_weak_refs) {
213 seen_weak_refs = true;
216 // Acquire locks for old and new values.
217 // Order by lock address to prevent lock ordering problems.
218 // Retry if the old value changes underneath us.
222 oldTable = SideTable::tableForPointer(oldObj);
223 newTable = SideTable::tableForPointer(newObj);
225 lock1 = &newTable->slock;
226 #if SIDE_TABLE_STRIPE > 1
227 lock2 = &oldTable->slock;
229 OSSpinLock *temp = lock1;
233 if (lock1 != lock2) OSSpinLockLock(lock2);
235 OSSpinLockLock(lock1);
237 if (*location != oldObj) {
238 OSSpinLockUnlock(lock1);
239 #if SIDE_TABLE_STRIPE > 1
240 if (lock1 != lock2) OSSpinLockUnlock(lock2);
246 weak_unregister_no_lock(&oldTable->weak_table, oldObj, location);
249 newObj = weak_register_no_lock(&newTable->weak_table, newObj,location);
250 // weak_register_no_lock returns NULL if weak store should be rejected
252 // Do not set *location anywhere else. That would introduce a race.
255 OSSpinLockUnlock(lock1);
256 #if SIDE_TABLE_STRIPE > 1
257 if (lock1 != lock2) OSSpinLockUnlock(lock2);
264 objc_loadWeakRetained(id *location)
273 if (!result) return NULL;
275 table = SideTable::tableForPointer(result);
276 lock = &table->slock;
278 OSSpinLockLock(lock);
279 if (*location != result) {
280 OSSpinLockUnlock(lock);
284 result = arr_read_weak_reference(&table->weak_table, location);
286 OSSpinLockUnlock(lock);
291 objc_loadWeak(id *location)
293 return objc_autorelease(objc_loadWeakRetained(location));
297 objc_initWeak(id *addr, id val)
300 return objc_storeWeak(addr, val);
304 objc_destroyWeak(id *addr)
306 objc_storeWeak(addr, 0);
310 objc_copyWeak(id *to, id *from)
312 id val = objc_loadWeakRetained(from);
313 objc_initWeak(to, val);
318 objc_moveWeak(id *to, id *from)
320 objc_copyWeak(to, from);
321 objc_destroyWeak(from);
325 /* Autorelease pool implementation
326 A thread's autorelease pool is a stack of pointers.
327 Each pointer is either an object to release, or POOL_SENTINEL which is
328 an autorelease pool boundary.
329 A pool token is a pointer to the POOL_SENTINEL for that pool. When
330 the pool is popped, every object hotter than the sentinel is released.
331 The stack is divided into a doubly-linked list of pages. Pages are added
332 and deleted as necessary.
333 Thread-local storage points to the hot page, where newly autoreleased
337 extern "C" BREAKPOINT_FUNCTION(void objc_autoreleaseNoPool(id obj));
342 static const uint32_t M0 = 0xA1A1A1A1;
343 # define M1 "AUTORELEASE!"
344 static const size_t M1_len = 12;
348 assert(M1_len == strlen(M1));
349 assert(M1_len == 3 * sizeof(m[1]));
352 strncpy((char *)&m[1], M1, M1_len);
356 m[0] = m[1] = m[2] = m[3] = 0;
360 return (m[0] == M0 && 0 == strncmp((char *)&m[1], M1, M1_len));
363 bool fastcheck() const {
375 // Set this to 1 to mprotect() autorelease pool contents
376 #define PROTECT_AUTORELEASEPOOL 0
378 class AutoreleasePoolPage
381 #define POOL_SENTINEL 0
382 static pthread_key_t const key = AUTORELEASE_POOL_KEY;
383 static uint8_t const SCRIBBLE = 0xA3; // 0xA3A3A3A3 after releasing
384 static size_t const SIZE =
385 #if PROTECT_AUTORELEASEPOOL
386 4096; // must be multiple of vm page size
388 4096; // size and alignment, power of 2
390 static size_t const COUNT = SIZE / sizeof(id);
394 pthread_t const thread;
395 AutoreleasePoolPage * const parent;
396 AutoreleasePoolPage *child;
397 uint32_t const depth;
400 // SIZE-sizeof(*this) bytes of contents follow
402 static void * operator new(size_t size) {
403 return malloc_zone_memalign(malloc_default_zone(), SIZE, SIZE);
405 static void operator delete(void * p) {
409 inline void protect() {
410 #if PROTECT_AUTORELEASEPOOL
411 mprotect(this, SIZE, PROT_READ);
416 inline void unprotect() {
417 #if PROTECT_AUTORELEASEPOOL
419 mprotect(this, SIZE, PROT_READ | PROT_WRITE);
423 AutoreleasePoolPage(AutoreleasePoolPage *newParent)
424 : magic(), next(begin()), thread(pthread_self()),
425 parent(newParent), child(NULL),
426 depth(parent ? 1+parent->depth : 0),
427 hiwat(parent ? parent->hiwat : 0)
431 assert(!parent->child);
433 parent->child = this;
439 ~AutoreleasePoolPage()
445 // Not recursive: we don't want to blow out the stack
446 // if a thread accumulates a stupendous amount of garbage
451 void busted(bool die = true)
453 (die ? _objc_fatal : _objc_inform)
454 ("autorelease pool page %p corrupted\n"
455 " magic %x %x %x %x\n pthread %p\n",
456 this, magic.m[0], magic.m[1], magic.m[2], magic.m[3],
460 void check(bool die = true)
462 if (!magic.check() || !pthread_equal(thread, pthread_self())) {
467 void fastcheck(bool die = true)
469 if (! magic.fastcheck()) {
476 return (id *) ((uint8_t *)this+sizeof(*this));
480 return (id *) ((uint8_t *)this+SIZE);
484 return next == begin();
488 return next == end();
491 bool lessThanHalfFull() {
492 return (next - begin() < (end() - begin()) / 2);
506 releaseUntil(begin());
509 void releaseUntil(id *stop)
511 // Not recursive: we don't want to blow out the stack
512 // if a thread accumulates a stupendous amount of garbage
514 while (this->next != stop) {
515 // Restart from hotPage() every time, in case -release
516 // autoreleased more objects
517 AutoreleasePoolPage *page = hotPage();
519 // fixme I think this `while` can be `if`, but I can't prove it
520 while (page->empty()) {
526 id obj = *--page->next;
527 memset((void*)page->next, SCRIBBLE, sizeof(*page->next));
530 if (obj != POOL_SENTINEL) {
538 // we expect any children to be completely empty
539 for (AutoreleasePoolPage *page = child; page; page = page->child) {
540 assert(page->empty());
547 // Not recursive: we don't want to blow out the stack
548 // if a thread accumulates a stupendous amount of garbage
549 AutoreleasePoolPage *page = this;
550 while (page->child) page = page->child;
552 AutoreleasePoolPage *deathptr;
562 } while (deathptr != this);
565 static void tls_dealloc(void *p)
567 // reinstate TLS value while we work
568 setHotPage((AutoreleasePoolPage *)p);
573 static AutoreleasePoolPage *pageForPointer(const void *p)
575 return pageForPointer((uintptr_t)p);
578 static AutoreleasePoolPage *pageForPointer(uintptr_t p)
580 AutoreleasePoolPage *result;
581 uintptr_t offset = p % SIZE;
583 assert(offset >= sizeof(AutoreleasePoolPage));
585 result = (AutoreleasePoolPage *)(p - offset);
592 static inline AutoreleasePoolPage *hotPage()
594 AutoreleasePoolPage *result = (AutoreleasePoolPage *)
595 _pthread_getspecific_direct(key);
596 if (result) result->fastcheck();
600 static inline void setHotPage(AutoreleasePoolPage *page)
602 if (page) page->fastcheck();
603 _pthread_setspecific_direct(key, (void *)page);
606 static inline AutoreleasePoolPage *coldPage()
608 AutoreleasePoolPage *result = hotPage();
610 while (result->parent) {
611 result = result->parent;
619 static inline id *autoreleaseFast(id obj)
621 AutoreleasePoolPage *page = hotPage();
622 if (page && !page->full()) {
623 return page->add(obj);
625 return autoreleaseSlow(obj);
629 static __attribute__((noinline))
630 id *autoreleaseSlow(id obj)
632 AutoreleasePoolPage *page;
635 // The code below assumes some cases are handled by autoreleaseFast()
636 assert(!page || page->full());
639 assert(obj != POOL_SENTINEL);
640 _objc_inform("Object %p of class %s autoreleased "
641 "with no pool in place - just leaking - "
642 "break on objc_autoreleaseNoPool() to debug",
643 obj, object_getClassName(obj));
644 objc_autoreleaseNoPool(obj);
649 if (page->child) page = page->child;
650 else page = new AutoreleasePoolPage(page);
651 } while (page->full());
654 return page->add(obj);
658 static inline id autorelease(id obj)
661 assert(!OBJC_IS_TAGGED_PTR(obj));
662 id *dest __unused = autoreleaseFast(obj);
663 assert(!dest || *dest == obj);
668 static inline void *push()
671 setHotPage(new AutoreleasePoolPage(NULL));
673 id *dest = autoreleaseFast(POOL_SENTINEL);
674 assert(*dest == POOL_SENTINEL);
678 static inline void pop(void *token)
680 AutoreleasePoolPage *page;
684 page = pageForPointer(token);
686 assert(*stop == POOL_SENTINEL);
688 // Token 0 is top-level pool
691 stop = page->begin();
694 if (PrintPoolHiwat) printHiwat();
696 page->releaseUntil(stop);
698 // memory: delete empty children
699 // hysteresis: keep one empty child if this page is more than half full
700 // special case: delete everything for pop(0)
704 } else if (page->child) {
705 if (page->lessThanHalfFull()) {
708 else if (page->child->child) {
709 page->child->child->kill();
716 int r __unused = pthread_key_init_np(AutoreleasePoolPage::key,
717 AutoreleasePoolPage::tls_dealloc);
723 _objc_inform("[%p] ................ PAGE %s %s %s", this,
724 full() ? "(full)" : "",
725 this == hotPage() ? "(hot)" : "",
726 this == coldPage() ? "(cold)" : "");
728 for (id *p = begin(); p < next; p++) {
729 if (*p == POOL_SENTINEL) {
730 _objc_inform("[%p] ################ POOL %p", p, p);
732 _objc_inform("[%p] %#16lx %s",
733 p, (unsigned long)*p, object_getClassName(*p));
738 static void printAll()
740 _objc_inform("##############");
741 _objc_inform("AUTORELEASE POOLS for thread %p", pthread_self());
743 AutoreleasePoolPage *page;
744 ptrdiff_t objects = 0;
745 for (page = coldPage(); page; page = page->child) {
746 objects += page->next - page->begin();
748 _objc_inform("%llu releases pending.", (unsigned long long)objects);
750 for (page = coldPage(); page; page = page->child) {
754 _objc_inform("##############");
757 static void printHiwat()
759 // Check and propagate high water mark
760 // Ignore high water marks under 256 to suppress noise.
761 AutoreleasePoolPage *p = hotPage();
762 uint32_t mark = p->depth*COUNT + (uint32_t)(p->next - p->begin());
763 if (mark > p->hiwat && mark > 256) {
764 for( ; p; p = p->parent) {
770 _objc_inform("POOL HIGHWATER: new high water mark of %u "
771 "pending autoreleases for thread %p:",
772 mark, pthread_self());
775 int count = backtrace(stack, sizeof(stack)/sizeof(stack[0]));
776 char **sym = backtrace_symbols(stack, count);
777 for (int i = 0; i < count; i++) {
778 _objc_inform("POOL HIGHWATER: %s", sym[i]);
787 // anonymous namespace
790 // API to only be called by root classes like NSObject or NSProxy
793 __attribute__((used,noinline,nothrow))
794 static id _objc_rootRetain_slow(id obj);
795 __attribute__((used,noinline,nothrow))
796 static bool _objc_rootReleaseWasZero_slow(id obj);
800 _objc_rootRetain_slow(id obj)
802 SideTable *table = SideTable::tableForPointer(obj);
803 OSSpinLockLock(&table->slock);
804 table->refcnts[DISGUISE(obj)] += 2;
805 OSSpinLockUnlock(&table->slock);
811 _objc_rootRetain(id obj)
816 if (OBJC_IS_TAGGED_PTR(obj)) return obj;
818 SideTable *table = SideTable::tableForPointer(obj);
820 if (OSSpinLockTry(&table->slock)) {
821 table->refcnts[DISGUISE(obj)] += 2;
822 OSSpinLockUnlock(&table->slock);
825 return _objc_rootRetain_slow(obj);
829 _objc_rootTryRetain(id obj)
834 if (OBJC_IS_TAGGED_PTR(obj)) return true;
836 SideTable *table = SideTable::tableForPointer(obj);
839 // _objc_rootTryRetain() is called exclusively by _objc_loadWeak(),
840 // which already acquired the lock on our behalf.
841 if (table->slock == 0) {
842 _objc_fatal("Do not call -_tryRetain.");
846 RefcountMap::iterator it = table->refcnts.find(DISGUISE(obj));
847 if (it == table->refcnts.end()) {
848 table->refcnts[DISGUISE(obj)] = 2;
849 } else if (it->second & 1) {
859 _objc_rootIsDeallocating(id obj)
864 if (OBJC_IS_TAGGED_PTR(obj)) return false;
866 SideTable *table = SideTable::tableForPointer(obj);
869 // _objc_rootIsDeallocating() is called exclusively by _objc_storeWeak(),
870 // which already acquired the lock on our behalf.
871 if (table->slock == 0) {
872 _objc_fatal("Do not call -_isDeallocating.");
875 RefcountMap::iterator it = table->refcnts.find(DISGUISE(obj));
876 return (it != table->refcnts.end()) && ((it->second & 1) == 1);
881 objc_clear_deallocating(id obj)
886 SideTable *table = SideTable::tableForPointer(obj);
888 // clear any weak table items
889 // clear extra retain count and deallocating bit
890 // (fixme warn or abort if extra retain count == 0 ?)
891 OSSpinLockLock(&table->slock);
892 if (seen_weak_refs) {
893 arr_clear_deallocating(&table->weak_table, obj);
895 table->refcnts.erase(DISGUISE(obj));
896 OSSpinLockUnlock(&table->slock);
901 _objc_rootReleaseWasZero_slow(id obj)
903 SideTable *table = SideTable::tableForPointer(obj);
905 bool do_dealloc = false;
907 OSSpinLockLock(&table->slock);
908 RefcountMap::iterator it = table->refcnts.find(DISGUISE(obj));
909 if (it == table->refcnts.end()) {
911 table->refcnts[DISGUISE(obj)] = 1;
912 } else if (it->second == 0) {
918 OSSpinLockUnlock(&table->slock);
923 _objc_rootReleaseWasZero(id obj)
928 if (OBJC_IS_TAGGED_PTR(obj)) return false;
930 SideTable *table = SideTable::tableForPointer(obj);
932 bool do_dealloc = false;
934 if (OSSpinLockTry(&table->slock)) {
935 RefcountMap::iterator it = table->refcnts.find(DISGUISE(obj));
936 if (it == table->refcnts.end()) {
938 table->refcnts[DISGUISE(obj)] = 1;
939 } else if (it->second == 0) {
945 OSSpinLockUnlock(&table->slock);
948 return _objc_rootReleaseWasZero_slow(obj);
952 _objc_rootRelease(id obj)
957 if (_objc_rootReleaseWasZero(obj) == false) {
960 objc_msgSend_hack(obj, @selector(dealloc));
963 __attribute__((noinline,used))
964 static id _objc_rootAutorelease2(id obj)
966 if (OBJC_IS_TAGGED_PTR(obj)) return obj;
967 return AutoreleasePoolPage::autorelease(obj);
970 __attribute__((aligned(16)))
972 _objc_rootAutorelease(id obj)
974 assert(obj); // root classes shouldn't get here, since objc_msgSend ignores nil
981 // no tag check here: tagged pointers DO use fast autoreleasing
983 #if SUPPORT_RETURN_AUTORELEASE
984 assert(_pthread_getspecific_direct(AUTORELEASE_POOL_RECLAIM_KEY) == NULL);
986 if (callerAcceptsFastAutorelease(__builtin_return_address(0))) {
987 _pthread_setspecific_direct(AUTORELEASE_POOL_RECLAIM_KEY, obj);
991 return _objc_rootAutorelease2(obj);
995 _objc_rootRetainCount(id obj)
1000 // XXX -- There is no way that anybody can use this API race free in a
1001 // threaded environment because the result is immediately stale by the
1002 // time the caller receives it.
1004 if (OBJC_IS_TAGGED_PTR(obj)) return (uintptr_t)obj;
1006 SideTable *table = SideTable::tableForPointer(obj);
1008 size_t refcnt_result = 1;
1010 OSSpinLockLock(&table->slock);
1011 RefcountMap::iterator it = table->refcnts.find(DISGUISE(obj));
1012 if (it != table->refcnts.end()) {
1013 refcnt_result = (it->second >> 1) + 1;
1015 OSSpinLockUnlock(&table->slock);
1016 return refcnt_result;
1020 _objc_rootInit(id obj)
1022 // In practice, it will be hard to rely on this function.
1023 // Many classes do not properly chain -init calls.
1028 _objc_rootAllocWithZone(Class cls, malloc_zone_t *zone)
1031 // allocWithZone under __OBJC2__ ignores the zone parameter
1033 return class_createInstance(cls, 0);
1035 if (!zone || UseGC) {
1036 return class_createInstance(cls, 0);
1038 return class_createInstanceFromZone(cls, 0, zone);
1043 _objc_rootAlloc(Class cls)
1046 // once we get a bit in the class, data structure, we can call this directly
1047 // because allocWithZone under __OBJC2__ ignores the zone parameter
1048 return class_createInstance(cls, 0);
1050 return [cls allocWithZone: nil];
1055 _objc_rootDealloc(id obj)
1060 if (OBJC_IS_TAGGED_PTR(obj)) return;
1062 object_dispose(obj);
1066 _objc_rootFinalize(id obj __unused)
1074 _objc_fatal("_objc_rootFinalize called with garbage collection off");
1078 _objc_rootZone(id obj)
1085 // allocWithZone under __OBJC2__ ignores the zone parameter
1086 return malloc_default_zone();
1088 malloc_zone_t *rval = malloc_zone_from_ptr(obj);
1089 return rval ? rval : malloc_default_zone();
1094 _objc_rootHash(id obj)
1097 return _object_getExternalHash(obj);
1099 return (uintptr_t)obj;
1102 // make CF link for now
1103 void *_objc_autoreleasePoolPush(void) { return objc_autoreleasePoolPush(); }
1104 void _objc_autoreleasePoolPop(void *ctxt) { objc_autoreleasePoolPop(ctxt); }
1107 objc_autoreleasePoolPush(void)
1109 if (UseGC) return NULL;
1110 return AutoreleasePoolPage::push();
1114 objc_autoreleasePoolPop(void *ctxt)
1118 // fixme rdar://9167170
1121 AutoreleasePoolPage::pop(ctxt);
1125 _objc_autoreleasePoolPrint(void)
1128 AutoreleasePoolPage::printAll();
1131 #if SUPPORT_RETURN_AUTORELEASE
1134 Fast handling of returned autoreleased values.
1135 The caller and callee cooperate to keep the returned object
1136 out of the autorelease pool.
1140 objc_retainAutoreleasedReturnValue(ret);
1146 return objc_autoreleaseReturnValue(ret);
1148 objc_autoreleaseReturnValue() examines the caller's instructions following
1149 the return. If the caller's instructions immediately call
1150 objc_autoreleaseReturnValue, then the callee omits the -autorelease and saves
1151 the result in thread-local storage. If the caller does not look like it
1152 cooperates, then the callee calls -autorelease as usual.
1154 objc_autoreleaseReturnValue checks if the returned value is the same as the
1155 one in thread-local storage. If it is, the value is used directly. If not,
1156 the value is assumed to be truly autoreleased and is retained again. In
1157 either case, the caller now has a retained reference to the value.
1159 Tagged pointer objects do participate in the fast autorelease scheme,
1160 because it saves message sends. They are not entered in the autorelease
1161 pool in the slow case.
1166 static bool callerAcceptsFastAutorelease(const void * const ra0)
1168 const uint8_t *ra1 = (const uint8_t *)ra0;
1169 const uint16_t *ra2;
1170 const uint32_t *ra4 = (const uint32_t *)ra1;
1173 #define PREFER_GOTPCREL 0
1175 // 48 89 c7 movq %rax,%rdi
1176 // ff 15 callq *symbol@GOTPCREL(%rip)
1177 if (*ra4 != 0xffc78948) {
1180 if (ra1[4] != 0x15) {
1185 // 48 89 c7 movq %rax,%rdi
1187 if (*ra4 != 0xe8c78948) {
1190 ra1 += (long)*(const int32_t *)(ra1 + 4) + 8l;
1191 ra2 = (const uint16_t *)ra1;
1192 // ff 25 jmpq *symbol@DYLDMAGIC(%rip)
1193 if (*ra2 != 0x25ff) {
1197 ra1 += 6l + (long)*(const int32_t *)(ra1 + 2);
1198 sym = (const void **)ra1;
1199 if (*sym != objc_retainAutoreleasedReturnValue)
1210 #warning unknown architecture
1212 static bool callerAcceptsFastAutorelease(const void *ra)
1219 // SUPPORT_RETURN_AUTORELEASE
1224 objc_autoreleaseReturnValue(id obj)
1226 #if SUPPORT_RETURN_AUTORELEASE
1227 assert(_pthread_getspecific_direct(AUTORELEASE_POOL_RECLAIM_KEY) == NULL);
1229 if (callerAcceptsFastAutorelease(__builtin_return_address(0))) {
1230 _pthread_setspecific_direct(AUTORELEASE_POOL_RECLAIM_KEY, obj);
1235 return objc_autorelease(obj);
1239 objc_retainAutoreleaseReturnValue(id obj)
1241 return objc_autoreleaseReturnValue(objc_retain(obj));
1245 objc_retainAutoreleasedReturnValue(id obj)
1247 #if SUPPORT_RETURN_AUTORELEASE
1248 if (obj == _pthread_getspecific_direct(AUTORELEASE_POOL_RECLAIM_KEY)) {
1249 _pthread_setspecific_direct(AUTORELEASE_POOL_RECLAIM_KEY, 0);
1253 return objc_retain(obj);
1257 objc_storeStrong(id *location, id obj)
1259 // XXX FIXME -- GC support?
1260 id prev = *location;
1270 objc_retainAutorelease(id obj)
1272 return objc_autorelease(objc_retain(obj));
1276 _objc_deallocOnMainThreadHelper(void *context)
1278 id obj = (id)context;
1279 objc_msgSend_hack(obj, @selector(dealloc));
1282 // convert objc_objectptr_t to id, callee must take ownership.
1283 NS_RETURNS_RETAINED id objc_retainedObject(objc_objectptr_t CF_CONSUMED pointer) { return (id)pointer; }
1285 // convert objc_objectptr_t to id, without ownership transfer.
1286 NS_RETURNS_NOT_RETAINED id objc_unretainedObject(objc_objectptr_t pointer) { return (id)pointer; }
1288 // convert id to objc_objectptr_t, no ownership transfer.
1289 CF_RETURNS_NOT_RETAINED objc_objectptr_t objc_unretainedPointer(id object) { return object; }
1292 PRIVATE_EXTERN void arr_init(void)
1294 AutoreleasePoolPage::init();