]> git.saurik.com Git - apple/objc4.git/blob - runtime/NSObject.mm
objc4-532.tar.gz
[apple/objc4.git] / runtime / NSObject.mm
1 /*
2 * Copyright (c) 2010-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include "objc-weak.h"
25 #include "objc-private.h"
26 #include "objc-internal.h"
27 #include "objc-os.h"
28 #if __OBJC2__
29 #include "objc-runtime-new.h"
30 #endif
31 #include "runtime.h"
32 #include "llvm-DenseMap.h"
33
34 #include <malloc/malloc.h>
35 #include <stdint.h>
36 #include <stdbool.h>
37 #include <mach/mach.h>
38 #include <mach-o/dyld.h>
39 #include <mach-o/nlist.h>
40 #include <sys/types.h>
41 #include <sys/mman.h>
42 #include <libkern/OSAtomic.h>
43 #include <Block.h>
44 #include <map>
45 #include <execinfo.h>
46
47 @interface NSInvocation
48 - (SEL)selector;
49 @end
50
51 // better to not rely on Foundation to build
52 @class NSString;
53 @class NSMethodSignature;
54 #ifdef __LP64__
55 typedef unsigned long NSUInteger;
56 #else
57 typedef unsigned int NSUInteger;
58 #endif
59 typedef struct _NSZone NSZone;
60
61 @protocol NSObject
62
63 - (BOOL)isEqual:(id)object;
64 - (NSUInteger)hash;
65
66 - (Class)superclass;
67 - (Class)class;
68 - (id)self;
69 - (NSZone *)zone;
70
71 - (id)performSelector:(SEL)aSelector;
72 - (id)performSelector:(SEL)aSelector withObject:(id)object;
73 - (id)performSelector:(SEL)aSelector withObject:(id)object1 withObject:(id)object2;
74
75 - (BOOL)isProxy;
76
77 - (BOOL)isKindOfClass:(Class)aClass;
78 - (BOOL)isMemberOfClass:(Class)aClass;
79 - (BOOL)conformsToProtocol:(Protocol *)aProtocol;
80
81 - (BOOL)respondsToSelector:(SEL)aSelector;
82
83 - (id)retain;
84 - (oneway void)release;
85 - (id)autorelease;
86 - (NSUInteger)retainCount;
87
88 - (NSString *)description;
89 - (NSString *)debugDescription;
90
91 @end
92
93 OBJC_EXPORT
94 @interface NSObject <NSObject>
95 {
96 Class isa;
97 }
98 @end
99
100 // HACK -- the use of these functions must be after the @implementation
101 id bypass_msgSend_retain(NSObject *obj) asm("-[NSObject retain]");
102 void bypass_msgSend_release(NSObject *obj) asm("-[NSObject release]");
103 id bypass_msgSend_autorelease(NSObject *obj) asm("-[NSObject autorelease]");
104
105
106 #if TARGET_OS_MAC
107
108 // NSObject used to be in Foundation/CoreFoundation.
109
110 #define SYMBOL_ELSEWHERE_IN_3(sym, vers, n) \
111 OBJC_EXPORT const char elsewhere_ ##n __asm__("$ld$hide$os" #vers "$" #sym); const char elsewhere_ ##n = 0
112 #define SYMBOL_ELSEWHERE_IN_2(sym, vers, n) \
113 SYMBOL_ELSEWHERE_IN_3(sym, vers, n)
114 #define SYMBOL_ELSEWHERE_IN(sym, vers) \
115 SYMBOL_ELSEWHERE_IN_2(sym, vers, __COUNTER__)
116
117 #if __OBJC2__
118 # define NSOBJECT_ELSEWHERE_IN(vers) \
119 SYMBOL_ELSEWHERE_IN(_OBJC_CLASS_$_NSObject, vers); \
120 SYMBOL_ELSEWHERE_IN(_OBJC_METACLASS_$_NSObject, vers); \
121 SYMBOL_ELSEWHERE_IN(_OBJC_IVAR_$_NSObject.isa, vers)
122 #else
123 # define NSOBJECT_ELSEWHERE_IN(vers) \
124 SYMBOL_ELSEWHERE_IN(.objc_class_name_NSObject, vers)
125 #endif
126
127 #if TARGET_OS_IPHONE
128 NSOBJECT_ELSEWHERE_IN(5.1);
129 NSOBJECT_ELSEWHERE_IN(5.0);
130 NSOBJECT_ELSEWHERE_IN(4.3);
131 NSOBJECT_ELSEWHERE_IN(4.2);
132 NSOBJECT_ELSEWHERE_IN(4.1);
133 NSOBJECT_ELSEWHERE_IN(4.0);
134 NSOBJECT_ELSEWHERE_IN(3.2);
135 NSOBJECT_ELSEWHERE_IN(3.1);
136 NSOBJECT_ELSEWHERE_IN(3.0);
137 NSOBJECT_ELSEWHERE_IN(2.2);
138 NSOBJECT_ELSEWHERE_IN(2.1);
139 NSOBJECT_ELSEWHERE_IN(2.0);
140 #else
141 NSOBJECT_ELSEWHERE_IN(10.7);
142 NSOBJECT_ELSEWHERE_IN(10.6);
143 NSOBJECT_ELSEWHERE_IN(10.5);
144 NSOBJECT_ELSEWHERE_IN(10.4);
145 NSOBJECT_ELSEWHERE_IN(10.3);
146 NSOBJECT_ELSEWHERE_IN(10.2);
147 NSOBJECT_ELSEWHERE_IN(10.1);
148 NSOBJECT_ELSEWHERE_IN(10.0);
149 #endif
150
151 // TARGET_OS_MAC
152 #endif
153
154 #if SUPPORT_RETURN_AUTORELEASE
155 // We cannot peek at where we are returning to unless we always inline this:
156 __attribute__((always_inline))
157 static bool callerAcceptsFastAutorelease(const void * const ra0);
158 #endif
159
160
161 /***********************************************************************
162 * Weak ivar support
163 **********************************************************************/
164
165 static bool seen_weak_refs;
166
167 static id defaultBadAllocHandler(Class cls)
168 {
169 _objc_fatal("attempt to allocate object of class '%s' failed",
170 class_getName(cls));
171 }
172
173 static id(*badAllocHandler)(Class) = &defaultBadAllocHandler;
174
175 static id callBadAllocHandler(Class cls)
176 {
177 // fixme add re-entrancy protection in case allocation fails inside handler
178 return (*badAllocHandler)(cls);
179 }
180
181 void _objc_setBadAllocHandler(id(*newHandler)(Class))
182 {
183 badAllocHandler = newHandler;
184 }
185
186
187 #define ARR_LOGGING 0
188
189 #if ARR_LOGGING
190 struct {
191 int retains;
192 int releases;
193 int autoreleases;
194 int blockCopies;
195 } CompilerGenerated, ExplicitlyCoded;
196
197 void (^objc_arr_log)(const char *, id param) =
198 ^(const char *str, id param) { printf("%s %p\n", str, param); };
199 #endif
200
201
202 namespace {
203
204 #if TARGET_OS_EMBEDDED
205 # define SIDE_TABLE_STRIPE 1
206 #else
207 # define SIDE_TABLE_STRIPE 8
208 #endif
209
210 // should be a multiple of cache line size (64)
211 #define SIDE_TABLE_SIZE 64
212
213 typedef objc::DenseMap<id,size_t,true> RefcountMap;
214
215 class SideTable {
216 private:
217 static uint8_t table_buf[SIDE_TABLE_STRIPE * SIDE_TABLE_SIZE];
218
219 public:
220 OSSpinLock slock;
221 RefcountMap refcnts;
222 weak_table_t weak_table;
223
224 SideTable() : slock(OS_SPINLOCK_INIT)
225 {
226 memset(&weak_table, 0, sizeof(weak_table));
227 }
228
229 ~SideTable()
230 {
231 // never delete side_table in case other threads retain during exit
232 assert(0);
233 }
234
235 static SideTable *tableForPointer(const void *p)
236 {
237 # if SIDE_TABLE_STRIPE == 1
238 return (SideTable *)table_buf;
239 # else
240 uintptr_t a = (uintptr_t)p;
241 int index = ((a >> 4) ^ (a >> 9)) & (SIDE_TABLE_STRIPE - 1);
242 return (SideTable *)&table_buf[index * SIDE_TABLE_SIZE];
243 # endif
244 }
245
246 static void init() {
247 // use placement new instead of static ctor to avoid dtor at exit
248 for (int i = 0; i < SIDE_TABLE_STRIPE; i++) {
249 new (&table_buf[i * SIDE_TABLE_SIZE]) SideTable;
250 }
251 }
252
253 static bool noLocksHeld(void) {
254 bool gotAll = true;
255 for (int i = 0; i < SIDE_TABLE_STRIPE && gotAll; i++) {
256 SideTable *s = (SideTable *)(&table_buf[i * SIDE_TABLE_SIZE]);
257 if (OSSpinLockTry(&s->slock)) {
258 OSSpinLockUnlock(&s->slock);
259 } else {
260 gotAll = false;
261 }
262 }
263 return gotAll;
264 }
265 };
266
267 STATIC_ASSERT(sizeof(SideTable) <= SIDE_TABLE_SIZE);
268 __attribute__((aligned(SIDE_TABLE_SIZE))) uint8_t
269 SideTable::table_buf[SIDE_TABLE_STRIPE * SIDE_TABLE_SIZE];
270
271 // Avoid false-negative reports from tools like "leaks"
272 #define DISGUISE(x) ((id)~(uintptr_t)(x))
273
274 // anonymous namespace
275 };
276
277 bool noSideTableLocksHeld(void)
278 {
279 return SideTable::noLocksHeld();
280 }
281
282 //
283 // The -fobjc-arc flag causes the compiler to issue calls to objc_{retain/release/autorelease/retain_block}
284 //
285
286 id objc_retainBlock(id x) {
287 #if ARR_LOGGING
288 objc_arr_log("objc_retain_block", x);
289 ++CompilerGenerated.blockCopies;
290 #endif
291 return (id)_Block_copy(x);
292 }
293
294 //
295 // The following SHOULD be called by the compiler directly, but the request hasn't been made yet :-)
296 //
297
298 BOOL objc_should_deallocate(id object) {
299 return YES;
300 }
301
302 id
303 objc_retain_autorelease(id obj)
304 {
305 return objc_autorelease(objc_retain(obj));
306 }
307
308 id
309 objc_storeWeak(id *location, id newObj)
310 {
311 id oldObj;
312 SideTable *oldTable;
313 SideTable *newTable;
314 OSSpinLock *lock1;
315 #if SIDE_TABLE_STRIPE > 1
316 OSSpinLock *lock2;
317 #endif
318
319 if (!seen_weak_refs) {
320 seen_weak_refs = true;
321 }
322
323 // Acquire locks for old and new values.
324 // Order by lock address to prevent lock ordering problems.
325 // Retry if the old value changes underneath us.
326 retry:
327 oldObj = *location;
328
329 oldTable = SideTable::tableForPointer(oldObj);
330 newTable = SideTable::tableForPointer(newObj);
331
332 lock1 = &newTable->slock;
333 #if SIDE_TABLE_STRIPE > 1
334 lock2 = &oldTable->slock;
335 if (lock1 > lock2) {
336 OSSpinLock *temp = lock1;
337 lock1 = lock2;
338 lock2 = temp;
339 }
340 if (lock1 != lock2) OSSpinLockLock(lock2);
341 #endif
342 OSSpinLockLock(lock1);
343
344 if (*location != oldObj) {
345 OSSpinLockUnlock(lock1);
346 #if SIDE_TABLE_STRIPE > 1
347 if (lock1 != lock2) OSSpinLockUnlock(lock2);
348 #endif
349 goto retry;
350 }
351
352 if (oldObj) {
353 weak_unregister_no_lock(&oldTable->weak_table, oldObj, location);
354 }
355 if (newObj) {
356 newObj = weak_register_no_lock(&newTable->weak_table, newObj,location);
357 // weak_register_no_lock returns NULL if weak store should be rejected
358 }
359 // Do not set *location anywhere else. That would introduce a race.
360 *location = newObj;
361
362 OSSpinLockUnlock(lock1);
363 #if SIDE_TABLE_STRIPE > 1
364 if (lock1 != lock2) OSSpinLockUnlock(lock2);
365 #endif
366
367 return newObj;
368 }
369
370 id
371 objc_loadWeakRetained(id *location)
372 {
373 id result;
374
375 SideTable *table;
376 OSSpinLock *lock;
377
378 retry:
379 result = *location;
380 if (!result) return NULL;
381
382 table = SideTable::tableForPointer(result);
383 lock = &table->slock;
384
385 OSSpinLockLock(lock);
386 if (*location != result) {
387 OSSpinLockUnlock(lock);
388 goto retry;
389 }
390
391 result = arr_read_weak_reference(&table->weak_table, location);
392
393 OSSpinLockUnlock(lock);
394 return result;
395 }
396
397 id
398 objc_loadWeak(id *location)
399 {
400 return objc_autorelease(objc_loadWeakRetained(location));
401 }
402
403 id
404 objc_initWeak(id *addr, id val)
405 {
406 *addr = 0;
407 return objc_storeWeak(addr, val);
408 }
409
410 void
411 objc_destroyWeak(id *addr)
412 {
413 objc_storeWeak(addr, 0);
414 }
415
416 void
417 objc_copyWeak(id *to, id *from)
418 {
419 id val = objc_loadWeakRetained(from);
420 objc_initWeak(to, val);
421 objc_release(val);
422 }
423
424 void
425 objc_moveWeak(id *to, id *from)
426 {
427 objc_copyWeak(to, from);
428 objc_destroyWeak(from);
429 }
430
431
432 /* Autorelease pool implementation
433 A thread's autorelease pool is a stack of pointers.
434 Each pointer is either an object to release, or POOL_SENTINEL which is
435 an autorelease pool boundary.
436 A pool token is a pointer to the POOL_SENTINEL for that pool. When
437 the pool is popped, every object hotter than the sentinel is released.
438 The stack is divided into a doubly-linked list of pages. Pages are added
439 and deleted as necessary.
440 Thread-local storage points to the hot page, where newly autoreleased
441 objects are stored.
442 */
443
444 extern "C" BREAKPOINT_FUNCTION(void objc_autoreleaseNoPool(id obj));
445
446 namespace {
447
448 struct magic_t {
449 static const uint32_t M0 = 0xA1A1A1A1;
450 # define M1 "AUTORELEASE!"
451 static const size_t M1_len = 12;
452 uint32_t m[4];
453
454 magic_t() {
455 assert(M1_len == strlen(M1));
456 assert(M1_len == 3 * sizeof(m[1]));
457
458 m[0] = M0;
459 strncpy((char *)&m[1], M1, M1_len);
460 }
461
462 ~magic_t() {
463 m[0] = m[1] = m[2] = m[3] = 0;
464 }
465
466 bool check() const {
467 return (m[0] == M0 && 0 == strncmp((char *)&m[1], M1, M1_len));
468 }
469
470 bool fastcheck() const {
471 #ifdef NDEBUG
472 return (m[0] == M0);
473 #else
474 return check();
475 #endif
476 }
477
478 # undef M1
479 };
480
481
482 // Set this to 1 to mprotect() autorelease pool contents
483 #define PROTECT_AUTORELEASEPOOL 0
484
485 class AutoreleasePoolPage
486 {
487
488 #define POOL_SENTINEL 0
489 static pthread_key_t const key = AUTORELEASE_POOL_KEY;
490 static uint8_t const SCRIBBLE = 0xA3; // 0xA3A3A3A3 after releasing
491 static size_t const SIZE =
492 #if PROTECT_AUTORELEASEPOOL
493 4096; // must be multiple of vm page size
494 #else
495 4096; // size and alignment, power of 2
496 #endif
497 static size_t const COUNT = SIZE / sizeof(id);
498
499 magic_t const magic;
500 id *next;
501 pthread_t const thread;
502 AutoreleasePoolPage * const parent;
503 AutoreleasePoolPage *child;
504 uint32_t const depth;
505 uint32_t hiwat;
506
507 // SIZE-sizeof(*this) bytes of contents follow
508
509 static void * operator new(size_t size) {
510 return malloc_zone_memalign(malloc_default_zone(), SIZE, SIZE);
511 }
512 static void operator delete(void * p) {
513 return free(p);
514 }
515
516 inline void protect() {
517 #if PROTECT_AUTORELEASEPOOL
518 mprotect(this, SIZE, PROT_READ);
519 check();
520 #endif
521 }
522
523 inline void unprotect() {
524 #if PROTECT_AUTORELEASEPOOL
525 check();
526 mprotect(this, SIZE, PROT_READ | PROT_WRITE);
527 #endif
528 }
529
530 AutoreleasePoolPage(AutoreleasePoolPage *newParent)
531 : magic(), next(begin()), thread(pthread_self()),
532 parent(newParent), child(NULL),
533 depth(parent ? 1+parent->depth : 0),
534 hiwat(parent ? parent->hiwat : 0)
535 {
536 if (parent) {
537 parent->check();
538 assert(!parent->child);
539 parent->unprotect();
540 parent->child = this;
541 parent->protect();
542 }
543 protect();
544 }
545
546 ~AutoreleasePoolPage()
547 {
548 check();
549 unprotect();
550 assert(empty());
551
552 // Not recursive: we don't want to blow out the stack
553 // if a thread accumulates a stupendous amount of garbage
554 assert(!child);
555 }
556
557
558 void busted(bool die = true)
559 {
560 (die ? _objc_fatal : _objc_inform)
561 ("autorelease pool page %p corrupted\n"
562 " magic 0x%08x 0x%08x 0x%08x 0x%08x\n pthread %p\n",
563 this, magic.m[0], magic.m[1], magic.m[2], magic.m[3],
564 this->thread);
565 }
566
567 void check(bool die = true)
568 {
569 if (!magic.check() || !pthread_equal(thread, pthread_self())) {
570 busted(die);
571 }
572 }
573
574 void fastcheck(bool die = true)
575 {
576 if (! magic.fastcheck()) {
577 busted(die);
578 }
579 }
580
581
582 id * begin() {
583 return (id *) ((uint8_t *)this+sizeof(*this));
584 }
585
586 id * end() {
587 return (id *) ((uint8_t *)this+SIZE);
588 }
589
590 bool empty() {
591 return next == begin();
592 }
593
594 bool full() {
595 return next == end();
596 }
597
598 bool lessThanHalfFull() {
599 return (next - begin() < (end() - begin()) / 2);
600 }
601
602 id *add(id obj)
603 {
604 assert(!full());
605 unprotect();
606 *next++ = obj;
607 protect();
608 return next-1;
609 }
610
611 void releaseAll()
612 {
613 releaseUntil(begin());
614 }
615
616 void releaseUntil(id *stop)
617 {
618 // Not recursive: we don't want to blow out the stack
619 // if a thread accumulates a stupendous amount of garbage
620
621 while (this->next != stop) {
622 // Restart from hotPage() every time, in case -release
623 // autoreleased more objects
624 AutoreleasePoolPage *page = hotPage();
625
626 // fixme I think this `while` can be `if`, but I can't prove it
627 while (page->empty()) {
628 page = page->parent;
629 setHotPage(page);
630 }
631
632 page->unprotect();
633 id obj = *--page->next;
634 memset((void*)page->next, SCRIBBLE, sizeof(*page->next));
635 page->protect();
636
637 if (obj != POOL_SENTINEL) {
638 objc_release(obj);
639 }
640 }
641
642 setHotPage(this);
643
644 #ifndef NDEBUG
645 // we expect any children to be completely empty
646 for (AutoreleasePoolPage *page = child; page; page = page->child) {
647 assert(page->empty());
648 }
649 #endif
650 }
651
652 void kill()
653 {
654 // Not recursive: we don't want to blow out the stack
655 // if a thread accumulates a stupendous amount of garbage
656 AutoreleasePoolPage *page = this;
657 while (page->child) page = page->child;
658
659 AutoreleasePoolPage *deathptr;
660 do {
661 deathptr = page;
662 page = page->parent;
663 if (page) {
664 page->unprotect();
665 page->child = NULL;
666 page->protect();
667 }
668 delete deathptr;
669 } while (deathptr != this);
670 }
671
672 static void tls_dealloc(void *p)
673 {
674 // reinstate TLS value while we work
675 setHotPage((AutoreleasePoolPage *)p);
676 pop(0);
677 setHotPage(NULL);
678 }
679
680 static AutoreleasePoolPage *pageForPointer(const void *p)
681 {
682 return pageForPointer((uintptr_t)p);
683 }
684
685 static AutoreleasePoolPage *pageForPointer(uintptr_t p)
686 {
687 AutoreleasePoolPage *result;
688 uintptr_t offset = p % SIZE;
689
690 assert(offset >= sizeof(AutoreleasePoolPage));
691
692 result = (AutoreleasePoolPage *)(p - offset);
693 result->fastcheck();
694
695 return result;
696 }
697
698
699 static inline AutoreleasePoolPage *hotPage()
700 {
701 AutoreleasePoolPage *result = (AutoreleasePoolPage *)
702 tls_get_direct(key);
703 if (result) result->fastcheck();
704 return result;
705 }
706
707 static inline void setHotPage(AutoreleasePoolPage *page)
708 {
709 if (page) page->fastcheck();
710 tls_set_direct(key, (void *)page);
711 }
712
713 static inline AutoreleasePoolPage *coldPage()
714 {
715 AutoreleasePoolPage *result = hotPage();
716 if (result) {
717 while (result->parent) {
718 result = result->parent;
719 result->fastcheck();
720 }
721 }
722 return result;
723 }
724
725
726 static inline id *autoreleaseFast(id obj)
727 {
728 AutoreleasePoolPage *page = hotPage();
729 if (page && !page->full()) {
730 return page->add(obj);
731 } else {
732 return autoreleaseSlow(obj);
733 }
734 }
735
736 static __attribute__((noinline))
737 id *autoreleaseSlow(id obj)
738 {
739 AutoreleasePoolPage *page;
740 page = hotPage();
741
742 // The code below assumes some cases are handled by autoreleaseFast()
743 assert(!page || page->full());
744
745 if (!page) {
746 assert(obj != POOL_SENTINEL);
747 _objc_inform("Object %p of class %s autoreleased "
748 "with no pool in place - just leaking - "
749 "break on objc_autoreleaseNoPool() to debug",
750 obj, object_getClassName(obj));
751 objc_autoreleaseNoPool(obj);
752 return NULL;
753 }
754
755 do {
756 if (page->child) page = page->child;
757 else page = new AutoreleasePoolPage(page);
758 } while (page->full());
759
760 setHotPage(page);
761 return page->add(obj);
762 }
763
764 public:
765 static inline id autorelease(id obj)
766 {
767 assert(obj);
768 assert(!OBJC_IS_TAGGED_PTR(obj));
769 id *dest __unused = autoreleaseFast(obj);
770 assert(!dest || *dest == obj);
771 return obj;
772 }
773
774
775 static inline void *push()
776 {
777 if (!hotPage()) {
778 setHotPage(new AutoreleasePoolPage(NULL));
779 }
780 id *dest = autoreleaseFast(POOL_SENTINEL);
781 assert(*dest == POOL_SENTINEL);
782 return dest;
783 }
784
785 static inline void pop(void *token)
786 {
787 AutoreleasePoolPage *page;
788 id *stop;
789
790 if (token) {
791 page = pageForPointer(token);
792 stop = (id *)token;
793 assert(*stop == POOL_SENTINEL);
794 } else {
795 // Token 0 is top-level pool
796 page = coldPage();
797 assert(page);
798 stop = page->begin();
799 }
800
801 if (PrintPoolHiwat) printHiwat();
802
803 page->releaseUntil(stop);
804
805 // memory: delete empty children
806 // hysteresis: keep one empty child if this page is more than half full
807 // special case: delete everything for pop(0)
808 if (!token) {
809 page->kill();
810 setHotPage(NULL);
811 } else if (page->child) {
812 if (page->lessThanHalfFull()) {
813 page->child->kill();
814 }
815 else if (page->child->child) {
816 page->child->child->kill();
817 }
818 }
819 }
820
821 static void init()
822 {
823 int r __unused = pthread_key_init_np(AutoreleasePoolPage::key,
824 AutoreleasePoolPage::tls_dealloc);
825 assert(r == 0);
826 }
827
828 void print()
829 {
830 _objc_inform("[%p] ................ PAGE %s %s %s", this,
831 full() ? "(full)" : "",
832 this == hotPage() ? "(hot)" : "",
833 this == coldPage() ? "(cold)" : "");
834 check(false);
835 for (id *p = begin(); p < next; p++) {
836 if (*p == POOL_SENTINEL) {
837 _objc_inform("[%p] ################ POOL %p", p, p);
838 } else {
839 _objc_inform("[%p] %#16lx %s",
840 p, (unsigned long)*p, object_getClassName(*p));
841 }
842 }
843 }
844
845 static void printAll()
846 {
847 _objc_inform("##############");
848 _objc_inform("AUTORELEASE POOLS for thread %p", pthread_self());
849
850 AutoreleasePoolPage *page;
851 ptrdiff_t objects = 0;
852 for (page = coldPage(); page; page = page->child) {
853 objects += page->next - page->begin();
854 }
855 _objc_inform("%llu releases pending.", (unsigned long long)objects);
856
857 for (page = coldPage(); page; page = page->child) {
858 page->print();
859 }
860
861 _objc_inform("##############");
862 }
863
864 static void printHiwat()
865 {
866 // Check and propagate high water mark
867 // Ignore high water marks under 256 to suppress noise.
868 AutoreleasePoolPage *p = hotPage();
869 uint32_t mark = p->depth*COUNT + (uint32_t)(p->next - p->begin());
870 if (mark > p->hiwat && mark > 256) {
871 for( ; p; p = p->parent) {
872 p->unprotect();
873 p->hiwat = mark;
874 p->protect();
875 }
876
877 _objc_inform("POOL HIGHWATER: new high water mark of %u "
878 "pending autoreleases for thread %p:",
879 mark, pthread_self());
880
881 void *stack[128];
882 int count = backtrace(stack, sizeof(stack)/sizeof(stack[0]));
883 char **sym = backtrace_symbols(stack, count);
884 for (int i = 0; i < count; i++) {
885 _objc_inform("POOL HIGHWATER: %s", sym[i]);
886 }
887 free(sym);
888 }
889 }
890
891 #undef POOL_SENTINEL
892 };
893
894 // anonymous namespace
895 };
896
897 // API to only be called by root classes like NSObject or NSProxy
898
899 extern "C" {
900 __attribute__((used,noinline,nothrow))
901 static id _objc_rootRetain_slow(id obj);
902 __attribute__((used,noinline,nothrow))
903 static bool _objc_rootReleaseWasZero_slow(id obj);
904 };
905
906 id
907 _objc_rootRetain_slow(id obj)
908 {
909 SideTable *table = SideTable::tableForPointer(obj);
910 OSSpinLockLock(&table->slock);
911 table->refcnts[DISGUISE(obj)] += 2;
912 OSSpinLockUnlock(&table->slock);
913
914 return obj;
915 }
916
917 bool
918 _objc_rootTryRetain(id obj)
919 {
920 assert(obj);
921 assert(!UseGC);
922
923 if (OBJC_IS_TAGGED_PTR(obj)) return true;
924
925 SideTable *table = SideTable::tableForPointer(obj);
926
927 // NO SPINLOCK HERE
928 // _objc_rootTryRetain() is called exclusively by _objc_loadWeak(),
929 // which already acquired the lock on our behalf.
930 if (table->slock == 0) {
931 _objc_fatal("Do not call -_tryRetain.");
932 }
933
934 bool result = true;
935 RefcountMap::iterator it = table->refcnts.find(DISGUISE(obj));
936 if (it == table->refcnts.end()) {
937 table->refcnts[DISGUISE(obj)] = 2;
938 } else if (it->second & 1) {
939 result = false;
940 } else {
941 it->second += 2;
942 }
943
944 return result;
945 }
946
947 bool
948 _objc_rootIsDeallocating(id obj)
949 {
950 assert(obj);
951 assert(!UseGC);
952
953 if (OBJC_IS_TAGGED_PTR(obj)) return false;
954
955 SideTable *table = SideTable::tableForPointer(obj);
956
957 // NO SPINLOCK HERE
958 // _objc_rootIsDeallocating() is called exclusively by _objc_storeWeak(),
959 // which already acquired the lock on our behalf.
960 if (table->slock == 0) {
961 _objc_fatal("Do not call -_isDeallocating.");
962 }
963
964 RefcountMap::iterator it = table->refcnts.find(DISGUISE(obj));
965 return (it != table->refcnts.end()) && ((it->second & 1) == 1);
966 }
967
968
969 void
970 objc_clear_deallocating(id obj)
971 {
972 assert(obj);
973 assert(!UseGC);
974
975 SideTable *table = SideTable::tableForPointer(obj);
976
977 // clear any weak table items
978 // clear extra retain count and deallocating bit
979 // (fixme warn or abort if extra retain count == 0 ?)
980 OSSpinLockLock(&table->slock);
981 if (seen_weak_refs) {
982 arr_clear_deallocating(&table->weak_table, obj);
983 }
984 table->refcnts.erase(DISGUISE(obj));
985 OSSpinLockUnlock(&table->slock);
986 }
987
988
989 bool
990 _objc_rootReleaseWasZero_slow(id obj)
991 {
992 SideTable *table = SideTable::tableForPointer(obj);
993
994 bool do_dealloc = false;
995
996 OSSpinLockLock(&table->slock);
997 RefcountMap::iterator it = table->refcnts.find(DISGUISE(obj));
998 if (it == table->refcnts.end()) {
999 do_dealloc = true;
1000 table->refcnts[DISGUISE(obj)] = 1;
1001 } else if (it->second == 0) {
1002 do_dealloc = true;
1003 it->second = 1;
1004 } else {
1005 it->second -= 2;
1006 }
1007 OSSpinLockUnlock(&table->slock);
1008 return do_dealloc;
1009 }
1010
1011 bool
1012 _objc_rootReleaseWasZero(id obj)
1013 {
1014 assert(obj);
1015 assert(!UseGC);
1016
1017 if (OBJC_IS_TAGGED_PTR(obj)) return false;
1018
1019 SideTable *table = SideTable::tableForPointer(obj);
1020
1021 bool do_dealloc = false;
1022
1023 if (OSSpinLockTry(&table->slock)) {
1024 RefcountMap::iterator it = table->refcnts.find(DISGUISE(obj));
1025 if (it == table->refcnts.end()) {
1026 do_dealloc = true;
1027 table->refcnts[DISGUISE(obj)] = 1;
1028 } else if (it->second == 0) {
1029 do_dealloc = true;
1030 it->second = 1;
1031 } else {
1032 it->second -= 2;
1033 }
1034 OSSpinLockUnlock(&table->slock);
1035 return do_dealloc;
1036 }
1037 return _objc_rootReleaseWasZero_slow(obj);
1038 }
1039
1040 __attribute__((noinline,used))
1041 static id _objc_rootAutorelease2(id obj)
1042 {
1043 if (OBJC_IS_TAGGED_PTR(obj)) return obj;
1044 return AutoreleasePoolPage::autorelease(obj);
1045 }
1046
1047 uintptr_t
1048 _objc_rootRetainCount(id obj)
1049 {
1050 assert(obj);
1051 assert(!UseGC);
1052
1053 // XXX -- There is no way that anybody can use this API race free in a
1054 // threaded environment because the result is immediately stale by the
1055 // time the caller receives it.
1056
1057 if (OBJC_IS_TAGGED_PTR(obj)) return (uintptr_t)obj;
1058
1059 SideTable *table = SideTable::tableForPointer(obj);
1060
1061 size_t refcnt_result = 1;
1062
1063 OSSpinLockLock(&table->slock);
1064 RefcountMap::iterator it = table->refcnts.find(DISGUISE(obj));
1065 if (it != table->refcnts.end()) {
1066 refcnt_result = (it->second >> 1) + 1;
1067 }
1068 OSSpinLockUnlock(&table->slock);
1069 return refcnt_result;
1070 }
1071
1072 id
1073 _objc_rootInit(id obj)
1074 {
1075 // In practice, it will be hard to rely on this function.
1076 // Many classes do not properly chain -init calls.
1077 return obj;
1078 }
1079
1080 id
1081 _objc_rootAllocWithZone(Class cls, malloc_zone_t *zone)
1082 {
1083 id obj;
1084
1085 #if __OBJC2__
1086 // allocWithZone under __OBJC2__ ignores the zone parameter
1087 (void)zone;
1088 obj = class_createInstance(cls, 0);
1089 #else
1090 if (!zone || UseGC) {
1091 obj = class_createInstance(cls, 0);
1092 }
1093 else {
1094 obj = class_createInstanceFromZone(cls, 0, zone);
1095 }
1096 #endif
1097
1098 if (!obj) obj = callBadAllocHandler(cls);
1099 return obj;
1100 }
1101
1102 id
1103 _objc_rootAlloc(Class cls)
1104 {
1105 #if 0 && __OBJC2__
1106 // Skip over the +allocWithZone: call if the class doesn't override it.
1107 // fixme not - this breaks ObjectAlloc
1108 if (! ((class_t *)cls)->isa->hasCustomAWZ()) {
1109 return class_createInstance(cls, 0);
1110 }
1111 #endif
1112 return [cls allocWithZone: nil];
1113 }
1114
1115 void
1116 _objc_rootDealloc(id obj)
1117 {
1118 assert(obj);
1119 assert(!UseGC);
1120
1121 if (OBJC_IS_TAGGED_PTR(obj)) return;
1122
1123 object_dispose(obj);
1124 }
1125
1126 void
1127 _objc_rootFinalize(id obj __unused)
1128 {
1129 assert(obj);
1130 assert(UseGC);
1131
1132 if (UseGC) {
1133 return;
1134 }
1135 _objc_fatal("_objc_rootFinalize called with garbage collection off");
1136 }
1137
1138 malloc_zone_t *
1139 _objc_rootZone(id obj)
1140 {
1141 (void)obj;
1142 if (gc_zone) {
1143 return gc_zone;
1144 }
1145 #if __OBJC2__
1146 // allocWithZone under __OBJC2__ ignores the zone parameter
1147 return malloc_default_zone();
1148 #else
1149 malloc_zone_t *rval = malloc_zone_from_ptr(obj);
1150 return rval ? rval : malloc_default_zone();
1151 #endif
1152 }
1153
1154 uintptr_t
1155 _objc_rootHash(id obj)
1156 {
1157 if (UseGC) {
1158 return _object_getExternalHash(obj);
1159 }
1160 return (uintptr_t)obj;
1161 }
1162
1163 // make CF link for now
1164 void *_objc_autoreleasePoolPush(void) { return objc_autoreleasePoolPush(); }
1165 void _objc_autoreleasePoolPop(void *ctxt) { objc_autoreleasePoolPop(ctxt); }
1166
1167 void *
1168 objc_autoreleasePoolPush(void)
1169 {
1170 if (UseGC) return NULL;
1171 return AutoreleasePoolPage::push();
1172 }
1173
1174 void
1175 objc_autoreleasePoolPop(void *ctxt)
1176 {
1177 if (UseGC) return;
1178
1179 // fixme rdar://9167170
1180 if (!ctxt) return;
1181
1182 AutoreleasePoolPage::pop(ctxt);
1183 }
1184
1185 void
1186 _objc_autoreleasePoolPrint(void)
1187 {
1188 if (UseGC) return;
1189 AutoreleasePoolPage::printAll();
1190 }
1191
1192 #if SUPPORT_RETURN_AUTORELEASE
1193
1194 /*
1195 Fast handling of returned autoreleased values.
1196 The caller and callee cooperate to keep the returned object
1197 out of the autorelease pool.
1198
1199 Caller:
1200 ret = callee();
1201 objc_retainAutoreleasedReturnValue(ret);
1202 // use ret here
1203
1204 Callee:
1205 // compute ret
1206 [ret retain];
1207 return objc_autoreleaseReturnValue(ret);
1208
1209 objc_autoreleaseReturnValue() examines the caller's instructions following
1210 the return. If the caller's instructions immediately call
1211 objc_autoreleaseReturnValue, then the callee omits the -autorelease and saves
1212 the result in thread-local storage. If the caller does not look like it
1213 cooperates, then the callee calls -autorelease as usual.
1214
1215 objc_autoreleaseReturnValue checks if the returned value is the same as the
1216 one in thread-local storage. If it is, the value is used directly. If not,
1217 the value is assumed to be truly autoreleased and is retained again. In
1218 either case, the caller now has a retained reference to the value.
1219
1220 Tagged pointer objects do participate in the fast autorelease scheme,
1221 because it saves message sends. They are not entered in the autorelease
1222 pool in the slow case.
1223 */
1224
1225 # if __x86_64__
1226
1227 static bool callerAcceptsFastAutorelease(const void * const ra0)
1228 {
1229 const uint8_t *ra1 = (const uint8_t *)ra0;
1230 const uint16_t *ra2;
1231 const uint32_t *ra4 = (const uint32_t *)ra1;
1232 const void **sym;
1233
1234 #define PREFER_GOTPCREL 0
1235 #if PREFER_GOTPCREL
1236 // 48 89 c7 movq %rax,%rdi
1237 // ff 15 callq *symbol@GOTPCREL(%rip)
1238 if (*ra4 != 0xffc78948) {
1239 return false;
1240 }
1241 if (ra1[4] != 0x15) {
1242 return false;
1243 }
1244 ra1 += 3;
1245 #else
1246 // 48 89 c7 movq %rax,%rdi
1247 // e8 callq symbol
1248 if (*ra4 != 0xe8c78948) {
1249 return false;
1250 }
1251 ra1 += (long)*(const int32_t *)(ra1 + 4) + 8l;
1252 ra2 = (const uint16_t *)ra1;
1253 // ff 25 jmpq *symbol@DYLDMAGIC(%rip)
1254 if (*ra2 != 0x25ff) {
1255 return false;
1256 }
1257 #endif
1258 ra1 += 6l + (long)*(const int32_t *)(ra1 + 2);
1259 sym = (const void **)ra1;
1260 if (*sym != objc_retainAutoreleasedReturnValue)
1261 {
1262 return false;
1263 }
1264
1265 return true;
1266 }
1267
1268 // __x86_64__
1269 # elif __arm__
1270
1271 static bool callerAcceptsFastAutorelease(const void *ra)
1272 {
1273 // if the low bit is set, we're returning to thumb mode
1274 if ((uintptr_t)ra & 1) {
1275 // 3f 46 mov r7, r7
1276 // we mask off the low bit via subtraction
1277 if (*(uint16_t *)((uint8_t *)ra - 1) == 0x463f) {
1278 return true;
1279 }
1280 } else {
1281 // 07 70 a0 e1 mov r7, r7
1282 if (*(uint32_t *)ra == 0xe1a07007) {
1283 return true;
1284 }
1285 }
1286 return false;
1287 }
1288
1289 // __arm__
1290 # elif __i386__ && TARGET_IPHONE_SIMULATOR
1291
1292 static bool callerAcceptsFastAutorelease(const void *ra)
1293 {
1294 return false;
1295 }
1296
1297 // __i386__ && TARGET_IPHONE_SIMULATOR
1298 # else
1299
1300 #warning unknown architecture
1301
1302 static bool callerAcceptsFastAutorelease(const void *ra)
1303 {
1304 return false;
1305 }
1306
1307 # endif
1308
1309 // SUPPORT_RETURN_AUTORELEASE
1310 #endif
1311
1312
1313 id
1314 objc_autoreleaseReturnValue(id obj)
1315 {
1316 #if SUPPORT_RETURN_AUTORELEASE
1317 assert(tls_get_direct(AUTORELEASE_POOL_RECLAIM_KEY) == NULL);
1318
1319 if (callerAcceptsFastAutorelease(__builtin_return_address(0))) {
1320 tls_set_direct(AUTORELEASE_POOL_RECLAIM_KEY, obj);
1321 return obj;
1322 }
1323 #endif
1324
1325 return objc_autorelease(obj);
1326 }
1327
1328 id
1329 objc_retainAutoreleaseReturnValue(id obj)
1330 {
1331 return objc_autoreleaseReturnValue(objc_retain(obj));
1332 }
1333
1334 id
1335 objc_retainAutoreleasedReturnValue(id obj)
1336 {
1337 #if SUPPORT_RETURN_AUTORELEASE
1338 if (obj == tls_get_direct(AUTORELEASE_POOL_RECLAIM_KEY)) {
1339 tls_set_direct(AUTORELEASE_POOL_RECLAIM_KEY, 0);
1340 return obj;
1341 }
1342 #endif
1343 return objc_retain(obj);
1344 }
1345
1346 void
1347 objc_storeStrong(id *location, id obj)
1348 {
1349 // XXX FIXME -- GC support?
1350 id prev = *location;
1351 if (obj == prev) {
1352 return;
1353 }
1354 objc_retain(obj);
1355 *location = obj;
1356 objc_release(prev);
1357 }
1358
1359 id
1360 objc_retainAutorelease(id obj)
1361 {
1362 return objc_autorelease(objc_retain(obj));
1363 }
1364
1365 void
1366 _objc_deallocOnMainThreadHelper(void *context)
1367 {
1368 id obj = (id)context;
1369 [obj dealloc];
1370 }
1371
1372 #undef objc_retainedObject
1373 #undef objc_unretainedObject
1374 #undef objc_unretainedPointer
1375
1376 // convert objc_objectptr_t to id, callee must take ownership.
1377 id objc_retainedObject(objc_objectptr_t pointer) { return (id)pointer; }
1378
1379 // convert objc_objectptr_t to id, without ownership transfer.
1380 id objc_unretainedObject(objc_objectptr_t pointer) { return (id)pointer; }
1381
1382 // convert id to objc_objectptr_t, no ownership transfer.
1383 objc_objectptr_t objc_unretainedPointer(id object) { return object; }
1384
1385
1386 void arr_init(void)
1387 {
1388 AutoreleasePoolPage::init();
1389 SideTable::init();
1390 }
1391
1392 @implementation NSObject
1393
1394 + (void)load {
1395 if (UseGC) gc_init2();
1396 }
1397
1398 + (void)initialize {
1399 }
1400
1401 + (id)self {
1402 return (id)self;
1403 }
1404
1405 - (id)self {
1406 return self;
1407 }
1408
1409 + (Class)class {
1410 return self;
1411 }
1412
1413 - (Class)class {
1414 return object_getClass(self);
1415 }
1416
1417 + (Class)superclass {
1418 return class_getSuperclass(self);
1419 }
1420
1421 - (Class)superclass {
1422 return class_getSuperclass([self class]);
1423 }
1424
1425 + (BOOL)isMemberOfClass:(Class)cls {
1426 return object_getClass((id)self) == cls;
1427 }
1428
1429 - (BOOL)isMemberOfClass:(Class)cls {
1430 return [self class] == cls;
1431 }
1432
1433 + (BOOL)isKindOfClass:(Class)cls {
1434 for (Class tcls = object_getClass((id)self); tcls; tcls = class_getSuperclass(tcls)) {
1435 if (tcls == cls) return YES;
1436 }
1437 return NO;
1438 }
1439
1440 - (BOOL)isKindOfClass:(Class)cls {
1441 for (Class tcls = [self class]; tcls; tcls = class_getSuperclass(tcls)) {
1442 if (tcls == cls) return YES;
1443 }
1444 return NO;
1445 }
1446
1447 + (BOOL)isSubclassOfClass:(Class)cls {
1448 for (Class tcls = self; tcls; tcls = class_getSuperclass(tcls)) {
1449 if (tcls == cls) return YES;
1450 }
1451 return NO;
1452 }
1453
1454 + (BOOL)isAncestorOfObject:(NSObject *)obj {
1455 for (Class tcls = [obj class]; tcls; tcls = class_getSuperclass(tcls)) {
1456 if (tcls == self) return YES;
1457 }
1458 return NO;
1459 }
1460
1461 + (BOOL)instancesRespondToSelector:(SEL)sel {
1462 if (!sel) return NO;
1463 return class_respondsToSelector(self, sel);
1464 }
1465
1466 + (BOOL)respondsToSelector:(SEL)sel {
1467 if (!sel) return NO;
1468 return class_respondsToSelector(object_getClass((id)self), sel);
1469 }
1470
1471 - (BOOL)respondsToSelector:(SEL)sel {
1472 if (!sel) return NO;
1473 return class_respondsToSelector([self class], sel);
1474 }
1475
1476 + (BOOL)conformsToProtocol:(Protocol *)protocol {
1477 if (!protocol) return NO;
1478 for (Class tcls = self; tcls; tcls = class_getSuperclass(tcls)) {
1479 if (class_conformsToProtocol(tcls, protocol)) return YES;
1480 }
1481 return NO;
1482 }
1483
1484 - (BOOL)conformsToProtocol:(Protocol *)protocol {
1485 if (!protocol) return NO;
1486 for (Class tcls = [self class]; tcls; tcls = class_getSuperclass(tcls)) {
1487 if (class_conformsToProtocol(tcls, protocol)) return YES;
1488 }
1489 return NO;
1490 }
1491
1492 + (NSUInteger)hash {
1493 return _objc_rootHash(self);
1494 }
1495
1496 - (NSUInteger)hash {
1497 return _objc_rootHash(self);
1498 }
1499
1500 + (BOOL)isEqual:(id)obj {
1501 return obj == (id)self;
1502 }
1503
1504 - (BOOL)isEqual:(id)obj {
1505 return obj == self;
1506 }
1507
1508
1509 + (BOOL)isFault {
1510 return NO;
1511 }
1512
1513 - (BOOL)isFault {
1514 return NO;
1515 }
1516
1517 + (BOOL)isProxy {
1518 return NO;
1519 }
1520
1521 - (BOOL)isProxy {
1522 return NO;
1523 }
1524
1525 + (BOOL)isBlock {
1526 return NO;
1527 }
1528
1529 - (BOOL)isBlock {
1530 return NO;
1531 }
1532
1533
1534 + (IMP)instanceMethodForSelector:(SEL)sel {
1535 if (!sel) [self doesNotRecognizeSelector:sel];
1536 return class_getMethodImplementation(self, sel);
1537 }
1538
1539 + (IMP)methodForSelector:(SEL)sel {
1540 if (!sel) [self doesNotRecognizeSelector:sel];
1541 return class_getMethodImplementation(object_getClass((id)self), sel);
1542 }
1543
1544 - (IMP)methodForSelector:(SEL)sel {
1545 if (!sel) [self doesNotRecognizeSelector:sel];
1546 return class_getMethodImplementation([self class], sel);
1547 }
1548
1549 + (BOOL)resolveClassMethod:(SEL)sel {
1550 return NO;
1551 }
1552
1553 + (BOOL)resolveInstanceMethod:(SEL)sel {
1554 return NO;
1555 }
1556
1557 // Replaced by CF (throws an NSException)
1558 + (void)doesNotRecognizeSelector:(SEL)sel {
1559 _objc_fatal("+[%s %s]: unrecognized selector sent to instance %p",
1560 class_getName(self), sel_getName(sel), self);
1561 }
1562
1563 // Replaced by CF (throws an NSException)
1564 - (void)doesNotRecognizeSelector:(SEL)sel {
1565 _objc_fatal("-[%s %s]: unrecognized selector sent to instance %p",
1566 object_getClassName(self), sel_getName(sel), self);
1567 }
1568
1569
1570 + (id)performSelector:(SEL)sel {
1571 if (!sel) [self doesNotRecognizeSelector:sel];
1572 return ((id(*)(id, SEL))objc_msgSend)((id)self, sel);
1573 }
1574
1575 + (id)performSelector:(SEL)sel withObject:(id)obj {
1576 if (!sel) [self doesNotRecognizeSelector:sel];
1577 return ((id(*)(id, SEL, id))objc_msgSend)((id)self, sel, obj);
1578 }
1579
1580 + (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 {
1581 if (!sel) [self doesNotRecognizeSelector:sel];
1582 return ((id(*)(id, SEL, id, id))objc_msgSend)((id)self, sel, obj1, obj2);
1583 }
1584
1585 - (id)performSelector:(SEL)sel {
1586 if (!sel) [self doesNotRecognizeSelector:sel];
1587 return ((id(*)(id, SEL))objc_msgSend)(self, sel);
1588 }
1589
1590 - (id)performSelector:(SEL)sel withObject:(id)obj {
1591 if (!sel) [self doesNotRecognizeSelector:sel];
1592 return ((id(*)(id, SEL, id))objc_msgSend)(self, sel, obj);
1593 }
1594
1595 - (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 {
1596 if (!sel) [self doesNotRecognizeSelector:sel];
1597 return ((id(*)(id, SEL, id, id))objc_msgSend)(self, sel, obj1, obj2);
1598 }
1599
1600
1601 // Replaced by CF (returns an NSMethodSignature)
1602 + (NSMethodSignature *)instanceMethodSignatureForSelector:(SEL)sel {
1603 _objc_fatal("+[NSObject instanceMethodSignatureForSelector:] "
1604 "not available without CoreFoundation");
1605 }
1606
1607 // Replaced by CF (returns an NSMethodSignature)
1608 + (NSMethodSignature *)methodSignatureForSelector:(SEL)sel {
1609 _objc_fatal("+[NSObject methodSignatureForSelector:] "
1610 "not available without CoreFoundation");
1611 }
1612
1613 // Replaced by CF (returns an NSMethodSignature)
1614 - (NSMethodSignature *)methodSignatureForSelector:(SEL)sel {
1615 _objc_fatal("-[NSObject methodSignatureForSelector:] "
1616 "not available without CoreFoundation");
1617 }
1618
1619 + (void)forwardInvocation:(NSInvocation *)invocation {
1620 [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)];
1621 }
1622
1623 - (void)forwardInvocation:(NSInvocation *)invocation {
1624 [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)];
1625 }
1626
1627 + (id)forwardingTargetForSelector:(SEL)sel {
1628 return nil;
1629 }
1630
1631 - (id)forwardingTargetForSelector:(SEL)sel {
1632 return nil;
1633 }
1634
1635
1636 // Replaced by CF (returns an NSString)
1637 + (NSString *)description {
1638 return nil;
1639 }
1640
1641 // Replaced by CF (returns an NSString)
1642 - (NSString *)description {
1643 return nil;
1644 }
1645
1646 + (NSString *)debugDescription {
1647 return [self description];
1648 }
1649
1650 - (NSString *)debugDescription {
1651 return [self description];
1652 }
1653
1654
1655 + (id)new {
1656 return [[self alloc] init];
1657 }
1658
1659 + (id)retain {
1660 return (id)self;
1661 }
1662
1663 // Replaced by ObjectAlloc
1664 - (id)retain
1665 __attribute__((aligned(16)))
1666 {
1667 if (OBJC_IS_TAGGED_PTR(self)) return self;
1668
1669 SideTable *table = SideTable::tableForPointer(self);
1670
1671 if (OSSpinLockTry(&table->slock)) {
1672 table->refcnts[DISGUISE(self)] += 2;
1673 OSSpinLockUnlock(&table->slock);
1674 return self;
1675 }
1676 return _objc_rootRetain_slow(self);
1677 }
1678
1679
1680 + (BOOL)_tryRetain {
1681 return YES;
1682 }
1683
1684 // Replaced by ObjectAlloc
1685 - (BOOL)_tryRetain {
1686 return _objc_rootTryRetain(self);
1687 }
1688
1689 + (BOOL)_isDeallocating {
1690 return NO;
1691 }
1692
1693 - (BOOL)_isDeallocating {
1694 return _objc_rootIsDeallocating(self);
1695 }
1696
1697 + (BOOL)allowsWeakReference {
1698 return YES;
1699 }
1700
1701 + (BOOL)retainWeakReference {
1702 return YES;
1703 }
1704
1705 - (BOOL)allowsWeakReference {
1706 return ! [self _isDeallocating];
1707 }
1708
1709 - (BOOL)retainWeakReference {
1710 return [self _tryRetain];
1711 }
1712
1713 + (oneway void)release {
1714 }
1715
1716 // Replaced by ObjectAlloc
1717 - (oneway void)release
1718 __attribute__((aligned(16)))
1719 {
1720 // tagged pointer check is inside _objc_rootReleaseWasZero().
1721
1722 if (_objc_rootReleaseWasZero(self) == false) {
1723 return;
1724 }
1725 [self dealloc];
1726 }
1727
1728 + (id)autorelease {
1729 return (id)self;
1730 }
1731
1732 // Replaced by ObjectAlloc
1733 - (id)autorelease
1734 __attribute__((aligned(16)))
1735 {
1736 // no tag check here: tagged pointers DO use fast autoreleasing
1737
1738 #if SUPPORT_RETURN_AUTORELEASE
1739 assert(tls_get_direct(AUTORELEASE_POOL_RECLAIM_KEY) == NULL);
1740
1741 if (callerAcceptsFastAutorelease(__builtin_return_address(0))) {
1742 tls_set_direct(AUTORELEASE_POOL_RECLAIM_KEY, self);
1743 return self;
1744 }
1745 #endif
1746 return _objc_rootAutorelease2(self);
1747 }
1748
1749 + (NSUInteger)retainCount {
1750 return ULONG_MAX;
1751 }
1752
1753 - (NSUInteger)retainCount {
1754 return _objc_rootRetainCount(self);
1755 }
1756
1757 + (id)alloc {
1758 return _objc_rootAlloc(self);
1759 }
1760
1761 // Replaced by ObjectAlloc
1762 + (id)allocWithZone:(NSZone *)zone {
1763 return _objc_rootAllocWithZone(self, (malloc_zone_t *)zone);
1764 }
1765
1766 // Replaced by CF (throws an NSException)
1767 + (id)init {
1768 return (id)self;
1769 }
1770
1771 - (id)init {
1772 return _objc_rootInit(self);
1773 }
1774
1775 // Replaced by CF (throws an NSException)
1776 + (void)dealloc {
1777 }
1778
1779 // Replaced by NSZombies
1780 - (void)dealloc {
1781 _objc_rootDealloc(self);
1782 }
1783
1784 // Replaced by CF (throws an NSException)
1785 + (void)finalize {
1786 }
1787
1788 - (void)finalize {
1789 _objc_rootFinalize(self);
1790 }
1791
1792 + (NSZone *)zone {
1793 return (NSZone *)_objc_rootZone(self);
1794 }
1795
1796 - (NSZone *)zone {
1797 return (NSZone *)_objc_rootZone(self);
1798 }
1799
1800 + (id)copy {
1801 return (id)self;
1802 }
1803
1804 + (id)copyWithZone:(NSZone *)zone {
1805 return (id)self;
1806 }
1807
1808 - (id)copy {
1809 return [(id)self copyWithZone:NULL];
1810 }
1811
1812 + (id)mutableCopy {
1813 return (id)self;
1814 }
1815
1816 + (id)mutableCopyWithZone:(NSZone *)zone {
1817 return (id)self;
1818 }
1819
1820 - (id)mutableCopy {
1821 return [(id)self mutableCopyWithZone:NULL];
1822 }
1823
1824 @end
1825
1826 __attribute__((aligned(16)))
1827 id
1828 objc_retain(id obj)
1829 {
1830 if (!obj || OBJC_IS_TAGGED_PTR(obj)) {
1831 goto out_slow;
1832 }
1833 #if __OBJC2__
1834 if (((class_t *)obj->isa)->hasCustomRR()) {
1835 return [obj retain];
1836 }
1837 return bypass_msgSend_retain(obj);
1838 #else
1839 return [obj retain];
1840 #endif
1841 out_slow:
1842 // clang really wants to reorder the "mov %rdi, %rax" early
1843 // force better code gen with a data barrier
1844 asm volatile("");
1845 return obj;
1846 }
1847
1848 __attribute__((aligned(16)))
1849 void
1850 objc_release(id obj)
1851 {
1852 if (!obj || OBJC_IS_TAGGED_PTR(obj)) {
1853 return;
1854 }
1855 #if __OBJC2__
1856 if (((class_t *)obj->isa)->hasCustomRR()) {
1857 return (void)[obj release];
1858 }
1859 return bypass_msgSend_release(obj);
1860 #else
1861 [obj release];
1862 #endif
1863 }
1864
1865 __attribute__((aligned(16)))
1866 id
1867 objc_autorelease(id obj)
1868 {
1869 if (!obj || OBJC_IS_TAGGED_PTR(obj)) {
1870 goto out_slow;
1871 }
1872 #if __OBJC2__
1873 if (((class_t *)obj->isa)->hasCustomRR()) {
1874 return [obj autorelease];
1875 }
1876 return bypass_msgSend_autorelease(obj);
1877 #else
1878 return [obj autorelease];
1879 #endif
1880 out_slow:
1881 // clang really wants to reorder the "mov %rdi, %rax" early
1882 // force better code gen with a data barrier
1883 asm volatile("");
1884 return obj;
1885 }
1886
1887 id
1888 _objc_rootRetain(id obj)
1889 {
1890 assert(obj);
1891 assert(!UseGC);
1892
1893 if (OBJC_IS_TAGGED_PTR(obj)) return obj;
1894
1895 return bypass_msgSend_retain(obj);
1896 }
1897
1898 void
1899 _objc_rootRelease(id obj)
1900 {
1901 assert(obj);
1902 assert(!UseGC);
1903
1904 if (OBJC_IS_TAGGED_PTR(obj)) return;
1905
1906 bypass_msgSend_release(obj);
1907 }
1908
1909 id
1910 _objc_rootAutorelease(id obj)
1911 {
1912 assert(obj); // root classes shouldn't get here, since objc_msgSend ignores nil
1913 // assert(!UseGC);
1914
1915 if (UseGC) {
1916 return obj;
1917 }
1918
1919 // no tag check here: tagged pointers DO use fast autoreleasing
1920
1921 return bypass_msgSend_autorelease(obj);
1922 }