]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-arr.mm
0bcd995f255a36dd9585edfc828770bc9c4491d3
[apple/objc4.git] / runtime / objc-arr.mm
1 /*
2 * Copyright (c) 2010-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include "llvm-DenseMap.h"
25
26 #import "objc-weak.h"
27 #import "objc-private.h"
28 #import "objc-internal.h"
29 #import "objc-os.h"
30 #import "runtime.h"
31
32 #include <stdint.h>
33 #include <stdbool.h>
34 //#include <fcntl.h>
35 #include <mach/mach.h>
36 #include <mach-o/dyld.h>
37 #include <mach-o/nlist.h>
38 #include <sys/types.h>
39 #include <sys/mman.h>
40 #include <libkern/OSAtomic.h>
41 #include <Block.h>
42 #include <map>
43 #include <execinfo.h>
44
45 #if SUPPORT_RETURN_AUTORELEASE
46 // We cannot peek at where we are returning to unless we always inline this:
47 __attribute__((always_inline))
48 static bool callerAcceptsFastAutorelease(const void * const ra0);
49 #endif
50
51
52 /***********************************************************************
53 * Weak ivar support
54 **********************************************************************/
55
56 static bool seen_weak_refs;
57
58 @protocol ReferenceCounted
59 + (id)alloc;
60 + (id)allocWithZone:(malloc_zone_t *)zone;
61 - (oneway void)dealloc;
62 - (id)retain;
63 - (oneway void)release;
64 - (id)autorelease;
65 - (uintptr_t)retainCount;
66 @end
67
68 #define ARR_LOGGING 0
69
70 #if ARR_LOGGING
71 struct {
72 int retains;
73 int releases;
74 int autoreleases;
75 int blockCopies;
76 } CompilerGenerated, ExplicitlyCoded;
77
78 PRIVATE_EXTERN void (^objc_arr_log)(const char *, id param) =
79 ^(const char *str, id param) { printf("%s %p\n", str, param); };
80 #endif
81
82
83 namespace {
84
85 #if TARGET_OS_EMBEDDED
86 # define SIDE_TABLE_STRIPE 1
87 #else
88 # define SIDE_TABLE_STRIPE 8
89 #endif
90
91 // should be a multiple of cache line size (64)
92 #define SIDE_TABLE_SIZE 64
93
94 typedef objc::DenseMap<id,size_t,true> RefcountMap;
95
96 class SideTable {
97 private:
98 static uint8_t table_buf[SIDE_TABLE_STRIPE * SIDE_TABLE_SIZE];
99
100 public:
101 OSSpinLock slock;
102 RefcountMap refcnts;
103 weak_table_t weak_table;
104
105 SideTable() : slock(OS_SPINLOCK_INIT)
106 {
107 memset(&weak_table, 0, sizeof(weak_table));
108 }
109
110 ~SideTable()
111 {
112 // never delete side_table in case other threads retain during exit
113 assert(0);
114 }
115
116 static SideTable *tableForPointer(const void *p)
117 {
118 # if SIDE_TABLE_STRIPE == 1
119 return (SideTable *)table_buf;
120 # else
121 uintptr_t a = (uintptr_t)p;
122 int index = ((a >> 4) ^ (a >> 9)) & (SIDE_TABLE_STRIPE - 1);
123 return (SideTable *)&table_buf[index * SIDE_TABLE_SIZE];
124 # endif
125 }
126
127 static void init() {
128 // use placement new instead of static ctor to avoid dtor at exit
129 for (int i = 0; i < SIDE_TABLE_STRIPE; i++) {
130 new (&table_buf[i * SIDE_TABLE_SIZE]) SideTable;
131 }
132 }
133 };
134
135 STATIC_ASSERT(sizeof(SideTable) <= SIDE_TABLE_SIZE);
136 __attribute__((aligned(SIDE_TABLE_SIZE))) uint8_t
137 SideTable::table_buf[SIDE_TABLE_STRIPE * SIDE_TABLE_SIZE];
138
139 // Avoid false-negative reports from tools like "leaks"
140 #define DISGUISE(x) ((id)~(uintptr_t)(x))
141
142 // anonymous namespace
143 };
144
145
146 //
147 // The -fobjc-arr flag causes the compiler to issue calls to objc_{retain/release/autorelease/retain_block}
148 //
149
150 id objc_retainBlock(id x) {
151 #if ARR_LOGGING
152 objc_arr_log("objc_retain_block", x);
153 ++CompilerGenerated.blockCopies;
154 #endif
155 return (id)_Block_copy(x);
156 }
157
158 //
159 // The following SHOULD be called by the compiler directly, but the request hasn't been made yet :-)
160 //
161
162 BOOL objc_should_deallocate(id object) {
163 return YES;
164 }
165
166 // WORKAROUND:
167 // <rdar://problem/9038601> clang remembers variadic bit across function cast
168 // <rdar://problem/9048030> Clang thinks that all ObjC vtable dispatches are variadic
169 // <rdar://problem/8873428> vararg function defeats tail-call optimization
170 id objc_msgSend_hack(id, SEL) asm("_objc_msgSend");
171
172 // public API entry points that might be optimized later
173
174 __attribute__((aligned(16)))
175 id
176 objc_retain(id obj)
177 {
178 return objc_msgSend_hack(obj, @selector(retain));
179 }
180
181 __attribute__((aligned(16)))
182 void
183 objc_release(id obj)
184 {
185 objc_msgSend_hack(obj, @selector(release));
186 }
187
188 __attribute__((aligned(16)))
189 id
190 objc_autorelease(id obj)
191 {
192 return objc_msgSend_hack(obj, @selector(autorelease));
193 }
194
195 id
196 objc_retain_autorelease(id obj)
197 {
198 return objc_autorelease(objc_retain(obj));
199 }
200
201 id
202 objc_storeWeak(id *location, id newObj)
203 {
204 id oldObj;
205 SideTable *oldTable;
206 SideTable *newTable;
207 OSSpinLock *lock1;
208 #if SIDE_TABLE_STRIPE > 1
209 OSSpinLock *lock2;
210 #endif
211
212 if (!seen_weak_refs) {
213 seen_weak_refs = true;
214 }
215
216 // Acquire locks for old and new values.
217 // Order by lock address to prevent lock ordering problems.
218 // Retry if the old value changes underneath us.
219 retry:
220 oldObj = *location;
221
222 oldTable = SideTable::tableForPointer(oldObj);
223 newTable = SideTable::tableForPointer(newObj);
224
225 lock1 = &newTable->slock;
226 #if SIDE_TABLE_STRIPE > 1
227 lock2 = &oldTable->slock;
228 if (lock1 > lock2) {
229 OSSpinLock *temp = lock1;
230 lock1 = lock2;
231 lock2 = temp;
232 }
233 if (lock1 != lock2) OSSpinLockLock(lock2);
234 #endif
235 OSSpinLockLock(lock1);
236
237 if (*location != oldObj) {
238 OSSpinLockUnlock(lock1);
239 #if SIDE_TABLE_STRIPE > 1
240 if (lock1 != lock2) OSSpinLockUnlock(lock2);
241 #endif
242 goto retry;
243 }
244
245 if (oldObj) {
246 weak_unregister_no_lock(&oldTable->weak_table, oldObj, location);
247 }
248 if (newObj) {
249 newObj = weak_register_no_lock(&newTable->weak_table, newObj,location);
250 // weak_register_no_lock returns NULL if weak store should be rejected
251 }
252 // Do not set *location anywhere else. That would introduce a race.
253 *location = newObj;
254
255 OSSpinLockUnlock(lock1);
256 #if SIDE_TABLE_STRIPE > 1
257 if (lock1 != lock2) OSSpinLockUnlock(lock2);
258 #endif
259
260 return newObj;
261 }
262
263 id
264 objc_loadWeakRetained(id *location)
265 {
266 id result;
267
268 SideTable *table;
269 OSSpinLock *lock;
270
271 retry:
272 result = *location;
273 if (!result) return NULL;
274
275 table = SideTable::tableForPointer(result);
276 lock = &table->slock;
277
278 OSSpinLockLock(lock);
279 if (*location != result) {
280 OSSpinLockUnlock(lock);
281 goto retry;
282 }
283
284 result = arr_read_weak_reference(&table->weak_table, location);
285
286 OSSpinLockUnlock(lock);
287 return result;
288 }
289
290 id
291 objc_loadWeak(id *location)
292 {
293 return objc_autorelease(objc_loadWeakRetained(location));
294 }
295
296 id
297 objc_initWeak(id *addr, id val)
298 {
299 *addr = 0;
300 return objc_storeWeak(addr, val);
301 }
302
303 void
304 objc_destroyWeak(id *addr)
305 {
306 objc_storeWeak(addr, 0);
307 }
308
309 void
310 objc_copyWeak(id *to, id *from)
311 {
312 id val = objc_loadWeakRetained(from);
313 objc_initWeak(to, val);
314 objc_release(val);
315 }
316
317 void
318 objc_moveWeak(id *to, id *from)
319 {
320 objc_copyWeak(to, from);
321 objc_destroyWeak(from);
322 }
323
324
325 /* Autorelease pool implementation
326 A thread's autorelease pool is a stack of pointers.
327 Each pointer is either an object to release, or POOL_SENTINEL which is
328 an autorelease pool boundary.
329 A pool token is a pointer to the POOL_SENTINEL for that pool. When
330 the pool is popped, every object hotter than the sentinel is released.
331 The stack is divided into a doubly-linked list of pages. Pages are added
332 and deleted as necessary.
333 Thread-local storage points to the hot page, where newly autoreleased
334 objects are stored.
335 */
336
337 extern "C" BREAKPOINT_FUNCTION(void objc_autoreleaseNoPool(id obj));
338
339 namespace {
340
341 struct magic_t {
342 static const uint32_t M0 = 0xA1A1A1A1;
343 # define M1 "AUTORELEASE!"
344 static const size_t M1_len = 12;
345 uint32_t m[4];
346
347 magic_t() {
348 assert(M1_len == strlen(M1));
349 assert(M1_len == 3 * sizeof(m[1]));
350
351 m[0] = M0;
352 strncpy((char *)&m[1], M1, M1_len);
353 }
354
355 ~magic_t() {
356 m[0] = m[1] = m[2] = m[3] = 0;
357 }
358
359 bool check() const {
360 return (m[0] == M0 && 0 == strncmp((char *)&m[1], M1, M1_len));
361 }
362
363 bool fastcheck() const {
364 #ifdef NDEBUG
365 return (m[0] == M0);
366 #else
367 return check();
368 #endif
369 }
370
371 # undef M1
372 };
373
374
375 // Set this to 1 to mprotect() autorelease pool contents
376 #define PROTECT_AUTORELEASEPOOL 0
377
378 class AutoreleasePoolPage
379 {
380
381 #define POOL_SENTINEL 0
382 static pthread_key_t const key = AUTORELEASE_POOL_KEY;
383 static uint8_t const SCRIBBLE = 0xA3; // 0xA3A3A3A3 after releasing
384 static size_t const SIZE =
385 #if PROTECT_AUTORELEASEPOOL
386 4096; // must be multiple of vm page size
387 #else
388 4096; // size and alignment, power of 2
389 #endif
390 static size_t const COUNT = SIZE / sizeof(id);
391
392 magic_t const magic;
393 id *next;
394 pthread_t const thread;
395 AutoreleasePoolPage * const parent;
396 AutoreleasePoolPage *child;
397 uint32_t const depth;
398 uint32_t hiwat;
399
400 // SIZE-sizeof(*this) bytes of contents follow
401
402 static void * operator new(size_t size) {
403 return malloc_zone_memalign(malloc_default_zone(), SIZE, SIZE);
404 }
405 static void operator delete(void * p) {
406 return free(p);
407 }
408
409 inline void protect() {
410 #if PROTECT_AUTORELEASEPOOL
411 mprotect(this, SIZE, PROT_READ);
412 check();
413 #endif
414 }
415
416 inline void unprotect() {
417 #if PROTECT_AUTORELEASEPOOL
418 check();
419 mprotect(this, SIZE, PROT_READ | PROT_WRITE);
420 #endif
421 }
422
423 AutoreleasePoolPage(AutoreleasePoolPage *newParent)
424 : magic(), next(begin()), thread(pthread_self()),
425 parent(newParent), child(NULL),
426 depth(parent ? 1+parent->depth : 0),
427 hiwat(parent ? parent->hiwat : 0)
428 {
429 if (parent) {
430 parent->check();
431 assert(!parent->child);
432 parent->unprotect();
433 parent->child = this;
434 parent->protect();
435 }
436 protect();
437 }
438
439 ~AutoreleasePoolPage()
440 {
441 check();
442 unprotect();
443 assert(empty());
444
445 // Not recursive: we don't want to blow out the stack
446 // if a thread accumulates a stupendous amount of garbage
447 assert(!child);
448 }
449
450
451 void busted(bool die = true)
452 {
453 (die ? _objc_fatal : _objc_inform)
454 ("autorelease pool page %p corrupted\n"
455 " magic %x %x %x %x\n pthread %p\n",
456 this, magic.m[0], magic.m[1], magic.m[2], magic.m[3],
457 this->thread);
458 }
459
460 void check(bool die = true)
461 {
462 if (!magic.check() || !pthread_equal(thread, pthread_self())) {
463 busted(die);
464 }
465 }
466
467 void fastcheck(bool die = true)
468 {
469 if (! magic.fastcheck()) {
470 busted(die);
471 }
472 }
473
474
475 id * begin() {
476 return (id *) ((uint8_t *)this+sizeof(*this));
477 }
478
479 id * end() {
480 return (id *) ((uint8_t *)this+SIZE);
481 }
482
483 bool empty() {
484 return next == begin();
485 }
486
487 bool full() {
488 return next == end();
489 }
490
491 bool lessThanHalfFull() {
492 return (next - begin() < (end() - begin()) / 2);
493 }
494
495 id *add(id obj)
496 {
497 assert(!full());
498 unprotect();
499 *next++ = obj;
500 protect();
501 return next-1;
502 }
503
504 void releaseAll()
505 {
506 releaseUntil(begin());
507 }
508
509 void releaseUntil(id *stop)
510 {
511 // Not recursive: we don't want to blow out the stack
512 // if a thread accumulates a stupendous amount of garbage
513
514 while (this->next != stop) {
515 // Restart from hotPage() every time, in case -release
516 // autoreleased more objects
517 AutoreleasePoolPage *page = hotPage();
518
519 // fixme I think this `while` can be `if`, but I can't prove it
520 while (page->empty()) {
521 page = page->parent;
522 setHotPage(page);
523 }
524
525 page->unprotect();
526 id obj = *--page->next;
527 memset((void*)page->next, SCRIBBLE, sizeof(*page->next));
528 page->protect();
529
530 if (obj != POOL_SENTINEL) {
531 objc_release(obj);
532 }
533 }
534
535 setHotPage(this);
536
537 #ifndef NDEBUG
538 // we expect any children to be completely empty
539 for (AutoreleasePoolPage *page = child; page; page = page->child) {
540 assert(page->empty());
541 }
542 #endif
543 }
544
545 void kill()
546 {
547 // Not recursive: we don't want to blow out the stack
548 // if a thread accumulates a stupendous amount of garbage
549 AutoreleasePoolPage *page = this;
550 while (page->child) page = page->child;
551
552 AutoreleasePoolPage *deathptr;
553 do {
554 deathptr = page;
555 page = page->parent;
556 if (page) {
557 page->unprotect();
558 page->child = NULL;
559 page->protect();
560 }
561 delete deathptr;
562 } while (deathptr != this);
563 }
564
565 static void tls_dealloc(void *p)
566 {
567 // reinstate TLS value while we work
568 setHotPage((AutoreleasePoolPage *)p);
569 pop(0);
570 setHotPage(NULL);
571 }
572
573 static AutoreleasePoolPage *pageForPointer(const void *p)
574 {
575 return pageForPointer((uintptr_t)p);
576 }
577
578 static AutoreleasePoolPage *pageForPointer(uintptr_t p)
579 {
580 AutoreleasePoolPage *result;
581 uintptr_t offset = p % SIZE;
582
583 assert(offset >= sizeof(AutoreleasePoolPage));
584
585 result = (AutoreleasePoolPage *)(p - offset);
586 result->fastcheck();
587
588 return result;
589 }
590
591
592 static inline AutoreleasePoolPage *hotPage()
593 {
594 AutoreleasePoolPage *result = (AutoreleasePoolPage *)
595 _pthread_getspecific_direct(key);
596 if (result) result->fastcheck();
597 return result;
598 }
599
600 static inline void setHotPage(AutoreleasePoolPage *page)
601 {
602 if (page) page->fastcheck();
603 _pthread_setspecific_direct(key, (void *)page);
604 }
605
606 static inline AutoreleasePoolPage *coldPage()
607 {
608 AutoreleasePoolPage *result = hotPage();
609 if (result) {
610 while (result->parent) {
611 result = result->parent;
612 result->fastcheck();
613 }
614 }
615 return result;
616 }
617
618
619 static inline id *autoreleaseFast(id obj)
620 {
621 AutoreleasePoolPage *page = hotPage();
622 if (page && !page->full()) {
623 return page->add(obj);
624 } else {
625 return autoreleaseSlow(obj);
626 }
627 }
628
629 static __attribute__((noinline))
630 id *autoreleaseSlow(id obj)
631 {
632 AutoreleasePoolPage *page;
633 page = hotPage();
634
635 // The code below assumes some cases are handled by autoreleaseFast()
636 assert(!page || page->full());
637
638 if (!page) {
639 assert(obj != POOL_SENTINEL);
640 _objc_inform("Object %p of class %s autoreleased "
641 "with no pool in place - just leaking - "
642 "break on objc_autoreleaseNoPool() to debug",
643 obj, object_getClassName(obj));
644 objc_autoreleaseNoPool(obj);
645 return NULL;
646 }
647
648 do {
649 if (page->child) page = page->child;
650 else page = new AutoreleasePoolPage(page);
651 } while (page->full());
652
653 setHotPage(page);
654 return page->add(obj);
655 }
656
657 public:
658 static inline id autorelease(id obj)
659 {
660 assert(obj);
661 assert(!OBJC_IS_TAGGED_PTR(obj));
662 id *dest __unused = autoreleaseFast(obj);
663 assert(!dest || *dest == obj);
664 return obj;
665 }
666
667
668 static inline void *push()
669 {
670 if (!hotPage()) {
671 setHotPage(new AutoreleasePoolPage(NULL));
672 }
673 id *dest = autoreleaseFast(POOL_SENTINEL);
674 assert(*dest == POOL_SENTINEL);
675 return dest;
676 }
677
678 static inline void pop(void *token)
679 {
680 AutoreleasePoolPage *page;
681 id *stop;
682
683 if (token) {
684 page = pageForPointer(token);
685 stop = (id *)token;
686 assert(*stop == POOL_SENTINEL);
687 } else {
688 // Token 0 is top-level pool
689 page = coldPage();
690 assert(page);
691 stop = page->begin();
692 }
693
694 if (PrintPoolHiwat) printHiwat();
695
696 page->releaseUntil(stop);
697
698 // memory: delete empty children
699 // hysteresis: keep one empty child if this page is more than half full
700 // special case: delete everything for pop(0)
701 if (!token) {
702 page->kill();
703 setHotPage(NULL);
704 } else if (page->child) {
705 if (page->lessThanHalfFull()) {
706 page->child->kill();
707 }
708 else if (page->child->child) {
709 page->child->child->kill();
710 }
711 }
712 }
713
714 static void init()
715 {
716 int r __unused = pthread_key_init_np(AutoreleasePoolPage::key,
717 AutoreleasePoolPage::tls_dealloc);
718 assert(r == 0);
719 }
720
721 void print()
722 {
723 _objc_inform("[%p] ................ PAGE %s %s %s", this,
724 full() ? "(full)" : "",
725 this == hotPage() ? "(hot)" : "",
726 this == coldPage() ? "(cold)" : "");
727 check(false);
728 for (id *p = begin(); p < next; p++) {
729 if (*p == POOL_SENTINEL) {
730 _objc_inform("[%p] ################ POOL %p", p, p);
731 } else {
732 _objc_inform("[%p] %#16lx %s",
733 p, (unsigned long)*p, object_getClassName(*p));
734 }
735 }
736 }
737
738 static void printAll()
739 {
740 _objc_inform("##############");
741 _objc_inform("AUTORELEASE POOLS for thread %p", pthread_self());
742
743 AutoreleasePoolPage *page;
744 ptrdiff_t objects = 0;
745 for (page = coldPage(); page; page = page->child) {
746 objects += page->next - page->begin();
747 }
748 _objc_inform("%llu releases pending.", (unsigned long long)objects);
749
750 for (page = coldPage(); page; page = page->child) {
751 page->print();
752 }
753
754 _objc_inform("##############");
755 }
756
757 static void printHiwat()
758 {
759 // Check and propagate high water mark
760 // Ignore high water marks under 256 to suppress noise.
761 AutoreleasePoolPage *p = hotPage();
762 uint32_t mark = p->depth*COUNT + (uint32_t)(p->next - p->begin());
763 if (mark > p->hiwat && mark > 256) {
764 for( ; p; p = p->parent) {
765 p->unprotect();
766 p->hiwat = mark;
767 p->protect();
768 }
769
770 _objc_inform("POOL HIGHWATER: new high water mark of %u "
771 "pending autoreleases for thread %p:",
772 mark, pthread_self());
773
774 void *stack[128];
775 int count = backtrace(stack, sizeof(stack)/sizeof(stack[0]));
776 char **sym = backtrace_symbols(stack, count);
777 for (int i = 0; i < count; i++) {
778 _objc_inform("POOL HIGHWATER: %s", sym[i]);
779 }
780 free(sym);
781 }
782 }
783
784 #undef POOL_SENTINEL
785 };
786
787 // anonymous namespace
788 };
789
790 // API to only be called by root classes like NSObject or NSProxy
791
792 extern "C" {
793 __attribute__((used,noinline,nothrow))
794 static id _objc_rootRetain_slow(id obj);
795 __attribute__((used,noinline,nothrow))
796 static bool _objc_rootReleaseWasZero_slow(id obj);
797 };
798
799 id
800 _objc_rootRetain_slow(id obj)
801 {
802 SideTable *table = SideTable::tableForPointer(obj);
803 OSSpinLockLock(&table->slock);
804 table->refcnts[DISGUISE(obj)] += 2;
805 OSSpinLockUnlock(&table->slock);
806
807 return obj;
808 }
809
810 id
811 _objc_rootRetain(id obj)
812 {
813 assert(obj);
814 assert(!UseGC);
815
816 if (OBJC_IS_TAGGED_PTR(obj)) return obj;
817
818 SideTable *table = SideTable::tableForPointer(obj);
819
820 if (OSSpinLockTry(&table->slock)) {
821 table->refcnts[DISGUISE(obj)] += 2;
822 OSSpinLockUnlock(&table->slock);
823 return obj;
824 }
825 return _objc_rootRetain_slow(obj);
826 }
827
828 bool
829 _objc_rootTryRetain(id obj)
830 {
831 assert(obj);
832 assert(!UseGC);
833
834 if (OBJC_IS_TAGGED_PTR(obj)) return true;
835
836 SideTable *table = SideTable::tableForPointer(obj);
837
838 // NO SPINLOCK HERE
839 // _objc_rootTryRetain() is called exclusively by _objc_loadWeak(),
840 // which already acquired the lock on our behalf.
841 if (table->slock == 0) {
842 _objc_fatal("Do not call -_tryRetain.");
843 }
844
845 bool result = true;
846 RefcountMap::iterator it = table->refcnts.find(DISGUISE(obj));
847 if (it == table->refcnts.end()) {
848 table->refcnts[DISGUISE(obj)] = 2;
849 } else if (it->second & 1) {
850 result = false;
851 } else {
852 it->second += 2;
853 }
854
855 return result;
856 }
857
858 bool
859 _objc_rootIsDeallocating(id obj)
860 {
861 assert(obj);
862 assert(!UseGC);
863
864 if (OBJC_IS_TAGGED_PTR(obj)) return false;
865
866 SideTable *table = SideTable::tableForPointer(obj);
867
868 // NO SPINLOCK HERE
869 // _objc_rootIsDeallocating() is called exclusively by _objc_storeWeak(),
870 // which already acquired the lock on our behalf.
871 if (table->slock == 0) {
872 _objc_fatal("Do not call -_isDeallocating.");
873 }
874
875 RefcountMap::iterator it = table->refcnts.find(DISGUISE(obj));
876 return (it != table->refcnts.end()) && ((it->second & 1) == 1);
877 }
878
879
880 void
881 objc_clear_deallocating(id obj)
882 {
883 assert(obj);
884 assert(!UseGC);
885
886 SideTable *table = SideTable::tableForPointer(obj);
887
888 // clear any weak table items
889 // clear extra retain count and deallocating bit
890 // (fixme warn or abort if extra retain count == 0 ?)
891 OSSpinLockLock(&table->slock);
892 if (seen_weak_refs) {
893 arr_clear_deallocating(&table->weak_table, obj);
894 }
895 table->refcnts.erase(DISGUISE(obj));
896 OSSpinLockUnlock(&table->slock);
897 }
898
899
900 bool
901 _objc_rootReleaseWasZero_slow(id obj)
902 {
903 SideTable *table = SideTable::tableForPointer(obj);
904
905 bool do_dealloc = false;
906
907 OSSpinLockLock(&table->slock);
908 RefcountMap::iterator it = table->refcnts.find(DISGUISE(obj));
909 if (it == table->refcnts.end()) {
910 do_dealloc = true;
911 table->refcnts[DISGUISE(obj)] = 1;
912 } else if (it->second == 0) {
913 do_dealloc = true;
914 it->second = 1;
915 } else {
916 it->second -= 2;
917 }
918 OSSpinLockUnlock(&table->slock);
919 return do_dealloc;
920 }
921
922 bool
923 _objc_rootReleaseWasZero(id obj)
924 {
925 assert(obj);
926 assert(!UseGC);
927
928 if (OBJC_IS_TAGGED_PTR(obj)) return false;
929
930 SideTable *table = SideTable::tableForPointer(obj);
931
932 bool do_dealloc = false;
933
934 if (OSSpinLockTry(&table->slock)) {
935 RefcountMap::iterator it = table->refcnts.find(DISGUISE(obj));
936 if (it == table->refcnts.end()) {
937 do_dealloc = true;
938 table->refcnts[DISGUISE(obj)] = 1;
939 } else if (it->second == 0) {
940 do_dealloc = true;
941 it->second = 1;
942 } else {
943 it->second -= 2;
944 }
945 OSSpinLockUnlock(&table->slock);
946 return do_dealloc;
947 }
948 return _objc_rootReleaseWasZero_slow(obj);
949 }
950
951 void
952 _objc_rootRelease(id obj)
953 {
954 assert(obj);
955 assert(!UseGC);
956
957 if (_objc_rootReleaseWasZero(obj) == false) {
958 return;
959 }
960 objc_msgSend_hack(obj, @selector(dealloc));
961 }
962
963 __attribute__((noinline,used))
964 static id _objc_rootAutorelease2(id obj)
965 {
966 if (OBJC_IS_TAGGED_PTR(obj)) return obj;
967 return AutoreleasePoolPage::autorelease(obj);
968 }
969
970 __attribute__((aligned(16)))
971 id
972 _objc_rootAutorelease(id obj)
973 {
974 assert(obj); // root classes shouldn't get here, since objc_msgSend ignores nil
975 assert(!UseGC);
976
977 if (UseGC) {
978 return obj;
979 }
980
981 // no tag check here: tagged pointers DO use fast autoreleasing
982
983 #if SUPPORT_RETURN_AUTORELEASE
984 assert(_pthread_getspecific_direct(AUTORELEASE_POOL_RECLAIM_KEY) == NULL);
985
986 if (callerAcceptsFastAutorelease(__builtin_return_address(0))) {
987 _pthread_setspecific_direct(AUTORELEASE_POOL_RECLAIM_KEY, obj);
988 return obj;
989 }
990 #endif
991 return _objc_rootAutorelease2(obj);
992 }
993
994 uintptr_t
995 _objc_rootRetainCount(id obj)
996 {
997 assert(obj);
998 assert(!UseGC);
999
1000 // XXX -- There is no way that anybody can use this API race free in a
1001 // threaded environment because the result is immediately stale by the
1002 // time the caller receives it.
1003
1004 if (OBJC_IS_TAGGED_PTR(obj)) return (uintptr_t)obj;
1005
1006 SideTable *table = SideTable::tableForPointer(obj);
1007
1008 size_t refcnt_result = 1;
1009
1010 OSSpinLockLock(&table->slock);
1011 RefcountMap::iterator it = table->refcnts.find(DISGUISE(obj));
1012 if (it != table->refcnts.end()) {
1013 refcnt_result = (it->second >> 1) + 1;
1014 }
1015 OSSpinLockUnlock(&table->slock);
1016 return refcnt_result;
1017 }
1018
1019 id
1020 _objc_rootInit(id obj)
1021 {
1022 // In practice, it will be hard to rely on this function.
1023 // Many classes do not properly chain -init calls.
1024 return obj;
1025 }
1026
1027 id
1028 _objc_rootAllocWithZone(Class cls, malloc_zone_t *zone)
1029 {
1030 #if __OBJC2__
1031 // allocWithZone under __OBJC2__ ignores the zone parameter
1032 (void)zone;
1033 return class_createInstance(cls, 0);
1034 #else
1035 if (!zone || UseGC) {
1036 return class_createInstance(cls, 0);
1037 }
1038 return class_createInstanceFromZone(cls, 0, zone);
1039 #endif
1040 }
1041
1042 id
1043 _objc_rootAlloc(Class cls)
1044 {
1045 #if 0
1046 // once we get a bit in the class, data structure, we can call this directly
1047 // because allocWithZone under __OBJC2__ ignores the zone parameter
1048 return class_createInstance(cls, 0);
1049 #else
1050 return [cls allocWithZone: nil];
1051 #endif
1052 }
1053
1054 void
1055 _objc_rootDealloc(id obj)
1056 {
1057 assert(obj);
1058 assert(!UseGC);
1059
1060 if (OBJC_IS_TAGGED_PTR(obj)) return;
1061
1062 object_dispose(obj);
1063 }
1064
1065 void
1066 _objc_rootFinalize(id obj __unused)
1067 {
1068 assert(obj);
1069 assert(UseGC);
1070
1071 if (UseGC) {
1072 return;
1073 }
1074 _objc_fatal("_objc_rootFinalize called with garbage collection off");
1075 }
1076
1077 malloc_zone_t *
1078 _objc_rootZone(id obj)
1079 {
1080 (void)obj;
1081 if (gc_zone) {
1082 return gc_zone;
1083 }
1084 #if __OBJC2__
1085 // allocWithZone under __OBJC2__ ignores the zone parameter
1086 return malloc_default_zone();
1087 #else
1088 malloc_zone_t *rval = malloc_zone_from_ptr(obj);
1089 return rval ? rval : malloc_default_zone();
1090 #endif
1091 }
1092
1093 uintptr_t
1094 _objc_rootHash(id obj)
1095 {
1096 if (UseGC) {
1097 return _object_getExternalHash(obj);
1098 }
1099 return (uintptr_t)obj;
1100 }
1101
1102 // make CF link for now
1103 void *_objc_autoreleasePoolPush(void) { return objc_autoreleasePoolPush(); }
1104 void _objc_autoreleasePoolPop(void *ctxt) { objc_autoreleasePoolPop(ctxt); }
1105
1106 void *
1107 objc_autoreleasePoolPush(void)
1108 {
1109 if (UseGC) return NULL;
1110 return AutoreleasePoolPage::push();
1111 }
1112
1113 void
1114 objc_autoreleasePoolPop(void *ctxt)
1115 {
1116 if (UseGC) return;
1117
1118 // fixme rdar://9167170
1119 if (!ctxt) return;
1120
1121 AutoreleasePoolPage::pop(ctxt);
1122 }
1123
1124 void
1125 _objc_autoreleasePoolPrint(void)
1126 {
1127 if (UseGC) return;
1128 AutoreleasePoolPage::printAll();
1129 }
1130
1131 #if SUPPORT_RETURN_AUTORELEASE
1132
1133 /*
1134 Fast handling of returned autoreleased values.
1135 The caller and callee cooperate to keep the returned object
1136 out of the autorelease pool.
1137
1138 Caller:
1139 ret = callee();
1140 objc_retainAutoreleasedReturnValue(ret);
1141 // use ret here
1142
1143 Callee:
1144 // compute ret
1145 [ret retain];
1146 return objc_autoreleaseReturnValue(ret);
1147
1148 objc_autoreleaseReturnValue() examines the caller's instructions following
1149 the return. If the caller's instructions immediately call
1150 objc_autoreleaseReturnValue, then the callee omits the -autorelease and saves
1151 the result in thread-local storage. If the caller does not look like it
1152 cooperates, then the callee calls -autorelease as usual.
1153
1154 objc_autoreleaseReturnValue checks if the returned value is the same as the
1155 one in thread-local storage. If it is, the value is used directly. If not,
1156 the value is assumed to be truly autoreleased and is retained again. In
1157 either case, the caller now has a retained reference to the value.
1158
1159 Tagged pointer objects do participate in the fast autorelease scheme,
1160 because it saves message sends. They are not entered in the autorelease
1161 pool in the slow case.
1162 */
1163
1164 # if __x86_64__
1165
1166 static bool callerAcceptsFastAutorelease(const void * const ra0)
1167 {
1168 const uint8_t *ra1 = (const uint8_t *)ra0;
1169 const uint16_t *ra2;
1170 const uint32_t *ra4 = (const uint32_t *)ra1;
1171 const void **sym;
1172
1173 #define PREFER_GOTPCREL 0
1174 #if PREFER_GOTPCREL
1175 // 48 89 c7 movq %rax,%rdi
1176 // ff 15 callq *symbol@GOTPCREL(%rip)
1177 if (*ra4 != 0xffc78948) {
1178 return false;
1179 }
1180 if (ra1[4] != 0x15) {
1181 return false;
1182 }
1183 ra1 += 3;
1184 #else
1185 // 48 89 c7 movq %rax,%rdi
1186 // e8 callq symbol
1187 if (*ra4 != 0xe8c78948) {
1188 return false;
1189 }
1190 ra1 += (long)*(const int32_t *)(ra1 + 4) + 8l;
1191 ra2 = (const uint16_t *)ra1;
1192 // ff 25 jmpq *symbol@DYLDMAGIC(%rip)
1193 if (*ra2 != 0x25ff) {
1194 return false;
1195 }
1196 #endif
1197 ra1 += 6l + (long)*(const int32_t *)(ra1 + 2);
1198 sym = (const void **)ra1;
1199 if (*sym != objc_retainAutoreleasedReturnValue)
1200 {
1201 return false;
1202 }
1203
1204 return true;
1205 }
1206
1207 // __x86_64__
1208 # else
1209
1210 #warning unknown architecture
1211
1212 static bool callerAcceptsFastAutorelease(const void *ra)
1213 {
1214 return false;
1215 }
1216
1217 # endif
1218
1219 // SUPPORT_RETURN_AUTORELEASE
1220 #endif
1221
1222
1223 id
1224 objc_autoreleaseReturnValue(id obj)
1225 {
1226 #if SUPPORT_RETURN_AUTORELEASE
1227 assert(_pthread_getspecific_direct(AUTORELEASE_POOL_RECLAIM_KEY) == NULL);
1228
1229 if (callerAcceptsFastAutorelease(__builtin_return_address(0))) {
1230 _pthread_setspecific_direct(AUTORELEASE_POOL_RECLAIM_KEY, obj);
1231 return obj;
1232 }
1233 #endif
1234
1235 return objc_autorelease(obj);
1236 }
1237
1238 id
1239 objc_retainAutoreleaseReturnValue(id obj)
1240 {
1241 return objc_autoreleaseReturnValue(objc_retain(obj));
1242 }
1243
1244 id
1245 objc_retainAutoreleasedReturnValue(id obj)
1246 {
1247 #if SUPPORT_RETURN_AUTORELEASE
1248 if (obj == _pthread_getspecific_direct(AUTORELEASE_POOL_RECLAIM_KEY)) {
1249 _pthread_setspecific_direct(AUTORELEASE_POOL_RECLAIM_KEY, 0);
1250 return obj;
1251 }
1252 #endif
1253 return objc_retain(obj);
1254 }
1255
1256 void
1257 objc_storeStrong(id *location, id obj)
1258 {
1259 // XXX FIXME -- GC support?
1260 id prev = *location;
1261 if (obj == prev) {
1262 return;
1263 }
1264 objc_retain(obj);
1265 *location = obj;
1266 objc_release(prev);
1267 }
1268
1269 id
1270 objc_retainAutorelease(id obj)
1271 {
1272 return objc_autorelease(objc_retain(obj));
1273 }
1274
1275 void
1276 _objc_deallocOnMainThreadHelper(void *context)
1277 {
1278 id obj = (id)context;
1279 objc_msgSend_hack(obj, @selector(dealloc));
1280 }
1281
1282 // convert objc_objectptr_t to id, callee must take ownership.
1283 NS_RETURNS_RETAINED id objc_retainedObject(objc_objectptr_t CF_CONSUMED pointer) { return (id)pointer; }
1284
1285 // convert objc_objectptr_t to id, without ownership transfer.
1286 NS_RETURNS_NOT_RETAINED id objc_unretainedObject(objc_objectptr_t pointer) { return (id)pointer; }
1287
1288 // convert id to objc_objectptr_t, no ownership transfer.
1289 CF_RETURNS_NOT_RETAINED objc_objectptr_t objc_unretainedPointer(id object) { return object; }
1290
1291
1292 PRIVATE_EXTERN void arr_init(void)
1293 {
1294 AutoreleasePoolPage::init();
1295 SideTable::init();
1296 }