]> git.saurik.com Git - apple/objc4.git/blame - runtime/NSObject.mm
objc4-647.tar.gz
[apple/objc4.git] / runtime / NSObject.mm
CommitLineData
8972963c 1/*
cd5f04f5 2 * Copyright (c) 2010-2012 Apple Inc. All rights reserved.
8972963c
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
cd5f04f5 5 *
8972963c
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
cd5f04f5 12 *
8972963c
A
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
cd5f04f5 20 *
8972963c
A
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
cd5f04f5 24#include "objc-private.h"
7257e56c
A
25#include "NSObject.h"
26
27#include "objc-weak.h"
8972963c 28#include "llvm-DenseMap.h"
7257e56c 29#include "NSObject.h"
8972963c 30
cd5f04f5 31#include <malloc/malloc.h>
8972963c
A
32#include <stdint.h>
33#include <stdbool.h>
8972963c
A
34#include <mach/mach.h>
35#include <mach-o/dyld.h>
36#include <mach-o/nlist.h>
37#include <sys/types.h>
38#include <sys/mman.h>
39#include <libkern/OSAtomic.h>
40#include <Block.h>
41#include <map>
42#include <execinfo.h>
43
cd5f04f5
A
44@interface NSInvocation
45- (SEL)selector;
46@end
47
cd5f04f5
A
48
49#if TARGET_OS_MAC
50
51// NSObject used to be in Foundation/CoreFoundation.
52
53#define SYMBOL_ELSEWHERE_IN_3(sym, vers, n) \
54 OBJC_EXPORT const char elsewhere_ ##n __asm__("$ld$hide$os" #vers "$" #sym); const char elsewhere_ ##n = 0
55#define SYMBOL_ELSEWHERE_IN_2(sym, vers, n) \
56 SYMBOL_ELSEWHERE_IN_3(sym, vers, n)
57#define SYMBOL_ELSEWHERE_IN(sym, vers) \
58 SYMBOL_ELSEWHERE_IN_2(sym, vers, __COUNTER__)
59
60#if __OBJC2__
61# define NSOBJECT_ELSEWHERE_IN(vers) \
62 SYMBOL_ELSEWHERE_IN(_OBJC_CLASS_$_NSObject, vers); \
63 SYMBOL_ELSEWHERE_IN(_OBJC_METACLASS_$_NSObject, vers); \
64 SYMBOL_ELSEWHERE_IN(_OBJC_IVAR_$_NSObject.isa, vers)
65#else
66# define NSOBJECT_ELSEWHERE_IN(vers) \
67 SYMBOL_ELSEWHERE_IN(.objc_class_name_NSObject, vers)
68#endif
69
70#if TARGET_OS_IPHONE
71 NSOBJECT_ELSEWHERE_IN(5.1);
72 NSOBJECT_ELSEWHERE_IN(5.0);
73 NSOBJECT_ELSEWHERE_IN(4.3);
74 NSOBJECT_ELSEWHERE_IN(4.2);
75 NSOBJECT_ELSEWHERE_IN(4.1);
76 NSOBJECT_ELSEWHERE_IN(4.0);
77 NSOBJECT_ELSEWHERE_IN(3.2);
78 NSOBJECT_ELSEWHERE_IN(3.1);
79 NSOBJECT_ELSEWHERE_IN(3.0);
80 NSOBJECT_ELSEWHERE_IN(2.2);
81 NSOBJECT_ELSEWHERE_IN(2.1);
82 NSOBJECT_ELSEWHERE_IN(2.0);
83#else
84 NSOBJECT_ELSEWHERE_IN(10.7);
85 NSOBJECT_ELSEWHERE_IN(10.6);
86 NSOBJECT_ELSEWHERE_IN(10.5);
87 NSOBJECT_ELSEWHERE_IN(10.4);
88 NSOBJECT_ELSEWHERE_IN(10.3);
89 NSOBJECT_ELSEWHERE_IN(10.2);
90 NSOBJECT_ELSEWHERE_IN(10.1);
91 NSOBJECT_ELSEWHERE_IN(10.0);
92#endif
93
94// TARGET_OS_MAC
95#endif
96
8972963c
A
97
98/***********************************************************************
99* Weak ivar support
100**********************************************************************/
101
cd5f04f5
A
102static id defaultBadAllocHandler(Class cls)
103{
104 _objc_fatal("attempt to allocate object of class '%s' failed",
8070259c 105 cls->nameForLogging());
cd5f04f5
A
106}
107
108static id(*badAllocHandler)(Class) = &defaultBadAllocHandler;
109
110static id callBadAllocHandler(Class cls)
111{
112 // fixme add re-entrancy protection in case allocation fails inside handler
113 return (*badAllocHandler)(cls);
114}
115
116void _objc_setBadAllocHandler(id(*newHandler)(Class))
117{
118 badAllocHandler = newHandler;
119}
120
8972963c 121
8972963c
A
122namespace {
123
124#if TARGET_OS_EMBEDDED
8972963c 125# define SIDE_TABLE_STRIPE 8
8070259c
A
126#else
127# define SIDE_TABLE_STRIPE 64
8972963c
A
128#endif
129
130// should be a multiple of cache line size (64)
7257e56c
A
131#define SIDE_TABLE_SIZE 128
132
133// The order of these bits is important.
8070259c
A
134#define SIDE_TABLE_WEAKLY_REFERENCED (1UL<<0)
135#define SIDE_TABLE_DEALLOCATING (1UL<<1) // MSB-ward of weak bit
136#define SIDE_TABLE_RC_ONE (1UL<<2) // MSB-ward of deallocating bit
137#define SIDE_TABLE_RC_PINNED (1UL<<(WORD_BITS-1))
7257e56c
A
138
139#define SIDE_TABLE_RC_SHIFT 2
8070259c 140#define SIDE_TABLE_FLAG_MASK (SIDE_TABLE_RC_ONE-1)
7257e56c 141
8070259c
A
142// RefcountMap disguises its pointers because we
143// don't want the table to act as a root for `leaks`.
144typedef objc::DenseMap<DisguisedPtr<objc_object>,size_t,true> RefcountMap;
8972963c
A
145
146class SideTable {
147private:
148 static uint8_t table_buf[SIDE_TABLE_STRIPE * SIDE_TABLE_SIZE];
149
150public:
7257e56c 151 spinlock_t slock;
8972963c
A
152 RefcountMap refcnts;
153 weak_table_t weak_table;
154
7257e56c 155 SideTable() : slock(SPINLOCK_INITIALIZER)
8972963c
A
156 {
157 memset(&weak_table, 0, sizeof(weak_table));
158 }
159
160 ~SideTable()
161 {
162 // never delete side_table in case other threads retain during exit
163 assert(0);
164 }
165
166 static SideTable *tableForPointer(const void *p)
167 {
168# if SIDE_TABLE_STRIPE == 1
169 return (SideTable *)table_buf;
170# else
171 uintptr_t a = (uintptr_t)p;
172 int index = ((a >> 4) ^ (a >> 9)) & (SIDE_TABLE_STRIPE - 1);
173 return (SideTable *)&table_buf[index * SIDE_TABLE_SIZE];
174# endif
175 }
176
177 static void init() {
178 // use placement new instead of static ctor to avoid dtor at exit
179 for (int i = 0; i < SIDE_TABLE_STRIPE; i++) {
180 new (&table_buf[i * SIDE_TABLE_SIZE]) SideTable;
181 }
182 }
183};
184
185STATIC_ASSERT(sizeof(SideTable) <= SIDE_TABLE_SIZE);
186__attribute__((aligned(SIDE_TABLE_SIZE))) uint8_t
187SideTable::table_buf[SIDE_TABLE_STRIPE * SIDE_TABLE_SIZE];
188
8972963c
A
189// anonymous namespace
190};
191
192
193//
cd5f04f5 194// The -fobjc-arc flag causes the compiler to issue calls to objc_{retain/release/autorelease/retain_block}
8972963c
A
195//
196
197id objc_retainBlock(id x) {
8972963c
A
198 return (id)_Block_copy(x);
199}
200
201//
202// The following SHOULD be called by the compiler directly, but the request hasn't been made yet :-)
203//
204
205BOOL objc_should_deallocate(id object) {
206 return YES;
207}
208
8972963c
A
209id
210objc_retain_autorelease(id obj)
211{
cd5f04f5 212 return objc_autorelease(objc_retain(obj));
8972963c
A
213}
214
8070259c
A
215
216void
217objc_storeStrong(id *location, id obj)
218{
219 id prev = *location;
220 if (obj == prev) {
221 return;
222 }
223 objc_retain(obj);
224 *location = obj;
225 objc_release(prev);
226}
227
228
7257e56c
A
229/**
230 * This function stores a new value into a __weak variable. It would
231 * be used anywhere a __weak variable is the target of an assignment.
232 *
233 * @param location The address of the weak pointer itself
234 * @param newObj The new object this weak ptr should now point to
235 *
236 * @return \e newObj
237 */
8972963c
A
238id
239objc_storeWeak(id *location, id newObj)
240{
241 id oldObj;
242 SideTable *oldTable;
243 SideTable *newTable;
7257e56c 244 spinlock_t *lock1;
8972963c 245#if SIDE_TABLE_STRIPE > 1
7257e56c 246 spinlock_t *lock2;
8972963c
A
247#endif
248
8972963c
A
249 // Acquire locks for old and new values.
250 // Order by lock address to prevent lock ordering problems.
251 // Retry if the old value changes underneath us.
252 retry:
253 oldObj = *location;
254
255 oldTable = SideTable::tableForPointer(oldObj);
256 newTable = SideTable::tableForPointer(newObj);
257
258 lock1 = &newTable->slock;
259#if SIDE_TABLE_STRIPE > 1
260 lock2 = &oldTable->slock;
261 if (lock1 > lock2) {
7257e56c 262 spinlock_t *temp = lock1;
8972963c
A
263 lock1 = lock2;
264 lock2 = temp;
265 }
7257e56c 266 if (lock1 != lock2) spinlock_lock(lock2);
8972963c 267#endif
7257e56c 268 spinlock_lock(lock1);
8972963c
A
269
270 if (*location != oldObj) {
7257e56c 271 spinlock_unlock(lock1);
8972963c 272#if SIDE_TABLE_STRIPE > 1
7257e56c 273 if (lock1 != lock2) spinlock_unlock(lock2);
8972963c
A
274#endif
275 goto retry;
276 }
277
7257e56c
A
278 weak_unregister_no_lock(&oldTable->weak_table, oldObj, location);
279 newObj = weak_register_no_lock(&newTable->weak_table, newObj, location);
280 // weak_register_no_lock returns nil if weak store should be rejected
281
282 // Set is-weakly-referenced bit in refcount table.
283 if (newObj && !newObj->isTaggedPointer()) {
8070259c 284 newObj->setWeaklyReferenced_nolock();
8972963c 285 }
7257e56c 286
8972963c
A
287 // Do not set *location anywhere else. That would introduce a race.
288 *location = newObj;
289
7257e56c 290 spinlock_unlock(lock1);
8972963c 291#if SIDE_TABLE_STRIPE > 1
7257e56c 292 if (lock1 != lock2) spinlock_unlock(lock2);
8972963c
A
293#endif
294
295 return newObj;
296}
297
298id
299objc_loadWeakRetained(id *location)
300{
301 id result;
302
303 SideTable *table;
7257e56c 304 spinlock_t *lock;
8972963c
A
305
306 retry:
307 result = *location;
7257e56c 308 if (!result) return nil;
8972963c
A
309
310 table = SideTable::tableForPointer(result);
311 lock = &table->slock;
312
7257e56c 313 spinlock_lock(lock);
8972963c 314 if (*location != result) {
7257e56c 315 spinlock_unlock(lock);
8972963c
A
316 goto retry;
317 }
318
7257e56c 319 result = weak_read_no_lock(&table->weak_table, location);
8972963c 320
7257e56c 321 spinlock_unlock(lock);
8972963c
A
322 return result;
323}
324
7257e56c
A
325/**
326 * This loads the object referenced by a weak pointer and returns it, after
327 * retaining and autoreleasing the object to ensure that it stays alive
328 * long enough for the caller to use it. This function would be used
329 * anywhere a __weak variable is used in an expression.
330 *
331 * @param location The weak pointer address
332 *
333 * @return The object pointed to by \e location, or \c nil if \e location is \c nil.
334 */
8972963c
A
335id
336objc_loadWeak(id *location)
337{
7257e56c 338 if (!*location) return nil;
8972963c
A
339 return objc_autorelease(objc_loadWeakRetained(location));
340}
341
7257e56c
A
342/**
343 * Initialize a fresh weak pointer to some object location.
344 * It would be used for code like:
345 *
346 * (The nil case)
347 * __weak id weakPtr;
348 * (The non-nil case)
349 * NSObject *o = ...;
350 * __weak id weakPtr = o;
351 *
352 * @param addr Address of __weak ptr.
353 * @param val Object ptr.
354 */
8972963c
A
355id
356objc_initWeak(id *addr, id val)
357{
cd5f04f5 358 *addr = 0;
7257e56c 359 if (!val) return nil;
cd5f04f5 360 return objc_storeWeak(addr, val);
8972963c
A
361}
362
7257e56c
A
363__attribute__((noinline, used)) void
364objc_destroyWeak_slow(id *addr)
365{
366 SideTable *oldTable;
367 spinlock_t *lock;
368 id oldObj;
369
370 // No need to see weak refs, we are destroying
371
372 // Acquire lock for old value only
373 // retry if the old value changes underneath us
374 retry:
375 oldObj = *addr;
376 oldTable = SideTable::tableForPointer(oldObj);
377
378 lock = &oldTable->slock;
379 spinlock_lock(lock);
380
381 if (*addr != oldObj) {
382 spinlock_unlock(lock);
383 goto retry;
384 }
385
386 weak_unregister_no_lock(&oldTable->weak_table, oldObj, addr);
387
388 spinlock_unlock(lock);
389}
390
391/**
392 * Destroys the relationship between a weak pointer
393 * and the object it is referencing in the internal weak
394 * table. If the weak pointer is not referencing anything,
395 * there is no need to edit the weak table.
396 *
397 * @param addr The weak pointer address.
398 */
8972963c
A
399void
400objc_destroyWeak(id *addr)
401{
7257e56c
A
402 if (!*addr) return;
403 return objc_destroyWeak_slow(addr);
8972963c
A
404}
405
7257e56c
A
406/**
407 * This function copies a weak pointer from one location to another,
408 * when the destination doesn't already contain a weak pointer. It
409 * would be used for code like:
410 *
411 * __weak id weakPtr1 = ...;
412 * __weak id weakPtr2 = weakPtr1;
413 *
414 * @param to weakPtr2 in this ex
415 * @param from weakPtr1
416 */
8972963c
A
417void
418objc_copyWeak(id *to, id *from)
419{
cd5f04f5
A
420 id val = objc_loadWeakRetained(from);
421 objc_initWeak(to, val);
422 objc_release(val);
8972963c
A
423}
424
7257e56c
A
425/**
426 * Move a weak pointer from one location to another.
427 * Before the move, the destination must be uninitialized.
428 * After the move, the source is nil.
429 */
8972963c
A
430void
431objc_moveWeak(id *to, id *from)
432{
cd5f04f5 433 objc_copyWeak(to, from);
7257e56c 434 objc_storeWeak(from, 0);
8972963c
A
435}
436
437
8070259c
A
438/***********************************************************************
439 Autorelease pool implementation
440
8972963c
A
441 A thread's autorelease pool is a stack of pointers.
442 Each pointer is either an object to release, or POOL_SENTINEL which is
443 an autorelease pool boundary.
444 A pool token is a pointer to the POOL_SENTINEL for that pool. When
445 the pool is popped, every object hotter than the sentinel is released.
446 The stack is divided into a doubly-linked list of pages. Pages are added
447 and deleted as necessary.
448 Thread-local storage points to the hot page, where newly autoreleased
449 objects are stored.
8070259c 450**********************************************************************/
8972963c 451
7257e56c 452BREAKPOINT_FUNCTION(void objc_autoreleaseNoPool(id obj));
8972963c
A
453
454namespace {
455
456struct magic_t {
457 static const uint32_t M0 = 0xA1A1A1A1;
458# define M1 "AUTORELEASE!"
459 static const size_t M1_len = 12;
460 uint32_t m[4];
461
462 magic_t() {
463 assert(M1_len == strlen(M1));
464 assert(M1_len == 3 * sizeof(m[1]));
465
466 m[0] = M0;
467 strncpy((char *)&m[1], M1, M1_len);
468 }
469
470 ~magic_t() {
471 m[0] = m[1] = m[2] = m[3] = 0;
472 }
473
474 bool check() const {
475 return (m[0] == M0 && 0 == strncmp((char *)&m[1], M1, M1_len));
476 }
477
478 bool fastcheck() const {
479#ifdef NDEBUG
480 return (m[0] == M0);
481#else
482 return check();
483#endif
484 }
485
486# undef M1
487};
488
489
490// Set this to 1 to mprotect() autorelease pool contents
491#define PROTECT_AUTORELEASEPOOL 0
492
493class AutoreleasePoolPage
494{
495
7257e56c 496#define POOL_SENTINEL nil
8972963c
A
497 static pthread_key_t const key = AUTORELEASE_POOL_KEY;
498 static uint8_t const SCRIBBLE = 0xA3; // 0xA3A3A3A3 after releasing
499 static size_t const SIZE =
500#if PROTECT_AUTORELEASEPOOL
8070259c 501 PAGE_MAX_SIZE; // must be multiple of vm page size
8972963c 502#else
8070259c 503 PAGE_MAX_SIZE; // size and alignment, power of 2
8972963c
A
504#endif
505 static size_t const COUNT = SIZE / sizeof(id);
506
507 magic_t const magic;
508 id *next;
509 pthread_t const thread;
510 AutoreleasePoolPage * const parent;
511 AutoreleasePoolPage *child;
512 uint32_t const depth;
513 uint32_t hiwat;
514
515 // SIZE-sizeof(*this) bytes of contents follow
516
517 static void * operator new(size_t size) {
518 return malloc_zone_memalign(malloc_default_zone(), SIZE, SIZE);
519 }
520 static void operator delete(void * p) {
521 return free(p);
522 }
523
524 inline void protect() {
525#if PROTECT_AUTORELEASEPOOL
526 mprotect(this, SIZE, PROT_READ);
527 check();
528#endif
529 }
530
531 inline void unprotect() {
532#if PROTECT_AUTORELEASEPOOL
533 check();
534 mprotect(this, SIZE, PROT_READ | PROT_WRITE);
535#endif
536 }
537
538 AutoreleasePoolPage(AutoreleasePoolPage *newParent)
539 : magic(), next(begin()), thread(pthread_self()),
7257e56c 540 parent(newParent), child(nil),
8972963c
A
541 depth(parent ? 1+parent->depth : 0),
542 hiwat(parent ? parent->hiwat : 0)
543 {
544 if (parent) {
545 parent->check();
546 assert(!parent->child);
547 parent->unprotect();
548 parent->child = this;
549 parent->protect();
550 }
551 protect();
552 }
553
554 ~AutoreleasePoolPage()
555 {
556 check();
557 unprotect();
558 assert(empty());
559
560 // Not recursive: we don't want to blow out the stack
561 // if a thread accumulates a stupendous amount of garbage
562 assert(!child);
563 }
564
565
566 void busted(bool die = true)
567 {
8070259c 568 magic_t right;
8972963c
A
569 (die ? _objc_fatal : _objc_inform)
570 ("autorelease pool page %p corrupted\n"
8070259c
A
571 " magic 0x%08x 0x%08x 0x%08x 0x%08x\n"
572 " should be 0x%08x 0x%08x 0x%08x 0x%08x\n"
573 " pthread %p\n"
574 " should be %p\n",
575 this,
576 magic.m[0], magic.m[1], magic.m[2], magic.m[3],
577 right.m[0], right.m[1], right.m[2], right.m[3],
578 this->thread, pthread_self());
8972963c
A
579 }
580
581 void check(bool die = true)
582 {
583 if (!magic.check() || !pthread_equal(thread, pthread_self())) {
584 busted(die);
585 }
586 }
587
588 void fastcheck(bool die = true)
589 {
590 if (! magic.fastcheck()) {
591 busted(die);
592 }
593 }
594
595
596 id * begin() {
597 return (id *) ((uint8_t *)this+sizeof(*this));
598 }
599
600 id * end() {
601 return (id *) ((uint8_t *)this+SIZE);
602 }
603
604 bool empty() {
605 return next == begin();
606 }
607
608 bool full() {
609 return next == end();
610 }
611
612 bool lessThanHalfFull() {
613 return (next - begin() < (end() - begin()) / 2);
614 }
615
616 id *add(id obj)
617 {
618 assert(!full());
619 unprotect();
8070259c 620 id *ret = next; // faster than `return next-1` because of aliasing
8972963c
A
621 *next++ = obj;
622 protect();
8070259c 623 return ret;
8972963c
A
624 }
625
626 void releaseAll()
627 {
628 releaseUntil(begin());
629 }
630
631 void releaseUntil(id *stop)
632 {
633 // Not recursive: we don't want to blow out the stack
634 // if a thread accumulates a stupendous amount of garbage
635
636 while (this->next != stop) {
637 // Restart from hotPage() every time, in case -release
638 // autoreleased more objects
639 AutoreleasePoolPage *page = hotPage();
640
641 // fixme I think this `while` can be `if`, but I can't prove it
642 while (page->empty()) {
643 page = page->parent;
644 setHotPage(page);
645 }
646
647 page->unprotect();
648 id obj = *--page->next;
649 memset((void*)page->next, SCRIBBLE, sizeof(*page->next));
650 page->protect();
651
652 if (obj != POOL_SENTINEL) {
653 objc_release(obj);
654 }
655 }
656
657 setHotPage(this);
658
659#ifndef NDEBUG
660 // we expect any children to be completely empty
661 for (AutoreleasePoolPage *page = child; page; page = page->child) {
662 assert(page->empty());
663 }
664#endif
665 }
666
667 void kill()
668 {
669 // Not recursive: we don't want to blow out the stack
670 // if a thread accumulates a stupendous amount of garbage
671 AutoreleasePoolPage *page = this;
672 while (page->child) page = page->child;
673
674 AutoreleasePoolPage *deathptr;
675 do {
676 deathptr = page;
677 page = page->parent;
678 if (page) {
679 page->unprotect();
7257e56c 680 page->child = nil;
8972963c
A
681 page->protect();
682 }
683 delete deathptr;
684 } while (deathptr != this);
685 }
686
687 static void tls_dealloc(void *p)
688 {
689 // reinstate TLS value while we work
690 setHotPage((AutoreleasePoolPage *)p);
691 pop(0);
7257e56c 692 setHotPage(nil);
8972963c
A
693 }
694
695 static AutoreleasePoolPage *pageForPointer(const void *p)
696 {
697 return pageForPointer((uintptr_t)p);
698 }
699
700 static AutoreleasePoolPage *pageForPointer(uintptr_t p)
701 {
702 AutoreleasePoolPage *result;
703 uintptr_t offset = p % SIZE;
704
705 assert(offset >= sizeof(AutoreleasePoolPage));
706
707 result = (AutoreleasePoolPage *)(p - offset);
708 result->fastcheck();
709
710 return result;
711 }
712
713
714 static inline AutoreleasePoolPage *hotPage()
715 {
716 AutoreleasePoolPage *result = (AutoreleasePoolPage *)
cd5f04f5 717 tls_get_direct(key);
8972963c
A
718 if (result) result->fastcheck();
719 return result;
720 }
721
722 static inline void setHotPage(AutoreleasePoolPage *page)
723 {
724 if (page) page->fastcheck();
cd5f04f5 725 tls_set_direct(key, (void *)page);
8972963c
A
726 }
727
728 static inline AutoreleasePoolPage *coldPage()
729 {
730 AutoreleasePoolPage *result = hotPage();
731 if (result) {
732 while (result->parent) {
733 result = result->parent;
734 result->fastcheck();
735 }
736 }
737 return result;
738 }
739
740
741 static inline id *autoreleaseFast(id obj)
742 {
743 AutoreleasePoolPage *page = hotPage();
744 if (page && !page->full()) {
745 return page->add(obj);
8070259c
A
746 } else if (page) {
747 return autoreleaseFullPage(obj, page);
8972963c 748 } else {
8070259c 749 return autoreleaseNoPage(obj);
8972963c
A
750 }
751 }
752
753 static __attribute__((noinline))
8070259c 754 id *autoreleaseFullPage(id obj, AutoreleasePoolPage *page)
8972963c 755 {
8070259c
A
756 // The hot page is full.
757 // Step to the next non-full page, adding a new page if necessary.
758 // Then add the object to that page.
759 assert(page == hotPage() && page->full());
8972963c
A
760
761 do {
762 if (page->child) page = page->child;
763 else page = new AutoreleasePoolPage(page);
764 } while (page->full());
765
766 setHotPage(page);
767 return page->add(obj);
768 }
769
8070259c
A
770 static __attribute__((noinline))
771 id *autoreleaseNoPage(id obj)
772 {
773 // No pool in place.
774 assert(!hotPage());
775
776 if (obj != POOL_SENTINEL && DebugMissingPools) {
777 // We are pushing an object with no pool in place,
778 // and no-pool debugging was requested by environment.
779 _objc_inform("MISSING POOLS: Object %p of class %s "
780 "autoreleased with no pool in place - "
781 "just leaking - break on "
782 "objc_autoreleaseNoPool() to debug",
783 (void*)obj, object_getClassName(obj));
784 objc_autoreleaseNoPool(obj);
785 return nil;
786 }
787
788 // Install the first page.
789 AutoreleasePoolPage *page = new AutoreleasePoolPage(nil);
790 setHotPage(page);
791
792 // Push an autorelease pool boundary if it wasn't already requested.
793 if (obj != POOL_SENTINEL) {
794 page->add(POOL_SENTINEL);
795 }
796
797 // Push the requested object.
798 return page->add(obj);
799 }
800
8972963c
A
801public:
802 static inline id autorelease(id obj)
803 {
804 assert(obj);
7257e56c 805 assert(!obj->isTaggedPointer());
8972963c
A
806 id *dest __unused = autoreleaseFast(obj);
807 assert(!dest || *dest == obj);
808 return obj;
809 }
810
811
812 static inline void *push()
813 {
8972963c
A
814 id *dest = autoreleaseFast(POOL_SENTINEL);
815 assert(*dest == POOL_SENTINEL);
816 return dest;
817 }
818
819 static inline void pop(void *token)
820 {
821 AutoreleasePoolPage *page;
822 id *stop;
823
824 if (token) {
825 page = pageForPointer(token);
826 stop = (id *)token;
827 assert(*stop == POOL_SENTINEL);
828 } else {
829 // Token 0 is top-level pool
830 page = coldPage();
831 assert(page);
832 stop = page->begin();
833 }
834
835 if (PrintPoolHiwat) printHiwat();
836
837 page->releaseUntil(stop);
838
839 // memory: delete empty children
840 // hysteresis: keep one empty child if this page is more than half full
841 // special case: delete everything for pop(0)
7257e56c
A
842 // special case: delete everything for pop(top) with DebugMissingPools
843 if (!token ||
844 (DebugMissingPools && page->empty() && !page->parent))
845 {
8972963c 846 page->kill();
7257e56c 847 setHotPage(nil);
8972963c
A
848 } else if (page->child) {
849 if (page->lessThanHalfFull()) {
850 page->child->kill();
851 }
852 else if (page->child->child) {
853 page->child->child->kill();
854 }
855 }
856 }
857
858 static void init()
859 {
860 int r __unused = pthread_key_init_np(AutoreleasePoolPage::key,
861 AutoreleasePoolPage::tls_dealloc);
862 assert(r == 0);
863 }
864
865 void print()
866 {
867 _objc_inform("[%p] ................ PAGE %s %s %s", this,
868 full() ? "(full)" : "",
869 this == hotPage() ? "(hot)" : "",
870 this == coldPage() ? "(cold)" : "");
871 check(false);
872 for (id *p = begin(); p < next; p++) {
873 if (*p == POOL_SENTINEL) {
874 _objc_inform("[%p] ################ POOL %p", p, p);
875 } else {
876 _objc_inform("[%p] %#16lx %s",
877 p, (unsigned long)*p, object_getClassName(*p));
878 }
879 }
880 }
881
882 static void printAll()
883 {
884 _objc_inform("##############");
885 _objc_inform("AUTORELEASE POOLS for thread %p", pthread_self());
886
887 AutoreleasePoolPage *page;
888 ptrdiff_t objects = 0;
889 for (page = coldPage(); page; page = page->child) {
890 objects += page->next - page->begin();
891 }
892 _objc_inform("%llu releases pending.", (unsigned long long)objects);
893
894 for (page = coldPage(); page; page = page->child) {
895 page->print();
896 }
897
898 _objc_inform("##############");
899 }
900
901 static void printHiwat()
902 {
903 // Check and propagate high water mark
904 // Ignore high water marks under 256 to suppress noise.
905 AutoreleasePoolPage *p = hotPage();
906 uint32_t mark = p->depth*COUNT + (uint32_t)(p->next - p->begin());
907 if (mark > p->hiwat && mark > 256) {
908 for( ; p; p = p->parent) {
909 p->unprotect();
910 p->hiwat = mark;
911 p->protect();
912 }
913
914 _objc_inform("POOL HIGHWATER: new high water mark of %u "
915 "pending autoreleases for thread %p:",
916 mark, pthread_self());
917
918 void *stack[128];
919 int count = backtrace(stack, sizeof(stack)/sizeof(stack[0]));
920 char **sym = backtrace_symbols(stack, count);
921 for (int i = 0; i < count; i++) {
922 _objc_inform("POOL HIGHWATER: %s", sym[i]);
923 }
cd5f04f5 924 free(sym);
8972963c
A
925 }
926 }
927
928#undef POOL_SENTINEL
929};
930
931// anonymous namespace
932};
933
8972963c 934
8070259c
A
935/***********************************************************************
936* Slow paths for inline control
937**********************************************************************/
8972963c 938
8070259c
A
939#if SUPPORT_NONPOINTER_ISA
940
941NEVER_INLINE id
942objc_object::rootRetain_overflow(bool tryRetain)
943{
944 return rootRetain(tryRetain, true);
945}
946
947
948NEVER_INLINE bool
949objc_object::rootRelease_underflow(bool performDealloc)
950{
951 return rootRelease(performDealloc, true);
952}
953
954
955// Slow path of clearDeallocating()
956// for weakly-referenced objects with indexed isa
957NEVER_INLINE void
958objc_object::clearDeallocating_weak()
8972963c 959{
8070259c
A
960 assert(isa.indexed && isa.weakly_referenced);
961
962 SideTable *table = SideTable::tableForPointer(this);
7257e56c 963 spinlock_lock(&table->slock);
8070259c 964 weak_clear_no_lock(&table->weak_table, (id)this);
7257e56c 965 spinlock_unlock(&table->slock);
8070259c 966}
8972963c 967
8070259c
A
968#endif
969
970__attribute__((noinline,used))
971id
972objc_object::rootAutorelease2()
973{
974 assert(!isTaggedPointer());
975 return AutoreleasePoolPage::autorelease((id)this);
976}
977
978
979BREAKPOINT_FUNCTION(
980 void objc_overrelease_during_dealloc_error(void)
981);
982
983
984NEVER_INLINE
985bool
986objc_object::overrelease_error()
987{
988 _objc_inform_now_and_on_crash("%s object %p overreleased while already deallocating; break on objc_overrelease_during_dealloc_error to debug", object_getClassName((id)this), this);
989 objc_overrelease_during_dealloc_error();
990 return false; // allow rootRelease() to tail-call this
8972963c
A
991}
992
8070259c
A
993
994/***********************************************************************
995* Retain count operations for side table.
996**********************************************************************/
997
998
999#if !NDEBUG
1000// Used to assert that an object is not present in the side table.
8972963c 1001bool
8070259c 1002objc_object::sidetable_present()
8972963c 1003{
8070259c
A
1004 bool result = false;
1005 SideTable *table = SideTable::tableForPointer(this);
1006
1007 spinlock_lock(&table->slock);
1008
1009 RefcountMap::iterator it = table->refcnts.find(this);
1010 if (it != table->refcnts.end()) result = true;
1011
1012 if (weak_is_registered_no_lock(&table->weak_table, (id)this)) result = true;
1013
1014 spinlock_unlock(&table->slock);
1015
1016 return result;
1017}
1018#endif
1019
1020#if SUPPORT_NONPOINTER_ISA
1021
1022void
1023objc_object::sidetable_lock()
1024{
1025 SideTable *table = SideTable::tableForPointer(this);
1026 spinlock_lock(&table->slock);
1027}
1028
1029void
1030objc_object::sidetable_unlock()
1031{
1032 SideTable *table = SideTable::tableForPointer(this);
1033 spinlock_unlock(&table->slock);
1034}
1035
1036
1037// Move the entire retain count to the side table,
1038// as well as isDeallocating and weaklyReferenced.
1039void
1040objc_object::sidetable_moveExtraRC_nolock(size_t extra_rc,
1041 bool isDeallocating,
1042 bool weaklyReferenced)
1043{
1044 assert(!isa.indexed); // should already be changed to not-indexed
1045 SideTable *table = SideTable::tableForPointer(this);
1046
1047 size_t& refcntStorage = table->refcnts[this];
1048 size_t oldRefcnt = refcntStorage;
1049 // not deallocating - that was in the isa
1050 assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1051 assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1052
1053 uintptr_t carry;
1054 size_t refcnt = addc(oldRefcnt, extra_rc<<SIDE_TABLE_RC_SHIFT, 0, &carry);
1055 if (carry) refcnt = SIDE_TABLE_RC_PINNED;
1056 if (isDeallocating) refcnt |= SIDE_TABLE_DEALLOCATING;
1057 if (weaklyReferenced) refcnt |= SIDE_TABLE_WEAKLY_REFERENCED;
1058
1059 refcntStorage = refcnt;
1060}
1061
1062
1063// Move some retain counts to the side table from the isa field.
1064// Returns true if the object is now pinned.
1065bool
1066objc_object::sidetable_addExtraRC_nolock(size_t delta_rc)
1067{
1068 assert(isa.indexed);
1069 SideTable *table = SideTable::tableForPointer(this);
1070
1071 size_t& refcntStorage = table->refcnts[this];
1072 size_t oldRefcnt = refcntStorage;
1073 // not deallocating - that is in the isa
1074 assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1075 assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1076
1077 if (oldRefcnt & SIDE_TABLE_RC_PINNED) return true;
1078
1079 uintptr_t carry;
1080 size_t newRefcnt =
1081 addc(oldRefcnt, delta_rc << SIDE_TABLE_RC_SHIFT, 0, &carry);
1082 if (carry) {
1083 refcntStorage =
1084 SIDE_TABLE_RC_PINNED | (oldRefcnt & SIDE_TABLE_FLAG_MASK);
1085 return true;
1086 }
1087 else {
1088 refcntStorage = newRefcnt;
1089 return false;
1090 }
1091}
1092
1093
1094// Move some retain counts from the side table to the isa field.
1095// Returns true if the sidetable retain count is now 0.
1096bool
1097objc_object::sidetable_subExtraRC_nolock(size_t delta_rc)
1098{
1099 assert(isa.indexed);
1100 SideTable *table = SideTable::tableForPointer(this);
1101
1102 size_t& refcntStorage = table->refcnts[this];
1103 size_t oldRefcnt = refcntStorage;
1104 // not deallocating - that is in the isa
1105 assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1106 assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1107
1108 if (oldRefcnt < delta_rc) {
1109 _objc_inform_now_and_on_crash("refcount underflow error for object %p",
1110 this);
1111 _objc_fatal("refcount underflow error for %s %p",
1112 object_getClassName((id)this), this);
1113 }
1114
1115 size_t newRefcnt = oldRefcnt - (delta_rc << SIDE_TABLE_RC_SHIFT);
1116 if (newRefcnt == 0) {
1117 table->refcnts.erase(this);
1118 return true;
1119 }
1120 else {
1121 refcntStorage = newRefcnt;
1122 return false;
1123 }
1124}
1125
1126
1127size_t
1128objc_object::sidetable_getExtraRC_nolock()
1129{
1130 assert(isa.indexed);
1131 SideTable *table = SideTable::tableForPointer(this);
1132 RefcountMap::iterator it = table->refcnts.find(this);
1133 assert(it != table->refcnts.end());
1134 return it->second >> SIDE_TABLE_RC_SHIFT;
1135}
8972963c 1136
8972963c 1137
8070259c
A
1138// SUPPORT_NONPOINTER_ISA
1139#endif
1140
1141
1142__attribute__((used,noinline,nothrow))
1143id
1144objc_object::sidetable_retain_slow(SideTable *table)
1145{
1146#if SUPPORT_NONPOINTER_ISA
1147 assert(!isa.indexed);
1148#endif
1149
1150 spinlock_lock(&table->slock);
1151 size_t& refcntStorage = table->refcnts[this];
1152 if (! (refcntStorage & SIDE_TABLE_RC_PINNED)) {
1153 refcntStorage += SIDE_TABLE_RC_ONE;
1154 }
1155 spinlock_unlock(&table->slock);
1156
1157 return (id)this;
1158}
1159
1160
1161id
1162objc_object::sidetable_retain()
1163{
1164#if SUPPORT_NONPOINTER_ISA
1165 assert(!isa.indexed);
1166#endif
1167 SideTable *table = SideTable::tableForPointer(this);
1168
1169 if (spinlock_trylock(&table->slock)) {
1170 size_t& refcntStorage = table->refcnts[this];
1171 if (! (refcntStorage & SIDE_TABLE_RC_PINNED)) {
1172 refcntStorage += SIDE_TABLE_RC_ONE;
1173 }
1174 spinlock_unlock(&table->slock);
1175 return (id)this;
1176 }
1177 return sidetable_retain_slow(table);
1178}
1179
1180
1181bool
1182objc_object::sidetable_tryRetain()
1183{
1184#if SUPPORT_NONPOINTER_ISA
1185 assert(!isa.indexed);
1186#endif
1187 SideTable *table = SideTable::tableForPointer(this);
8972963c
A
1188
1189 // NO SPINLOCK HERE
1190 // _objc_rootTryRetain() is called exclusively by _objc_loadWeak(),
1191 // which already acquired the lock on our behalf.
7257e56c
A
1192
1193 // fixme can't do this efficiently with os_lock_handoff_s
1194 // if (table->slock == 0) {
1195 // _objc_fatal("Do not call -_tryRetain.");
1196 // }
8972963c
A
1197
1198 bool result = true;
8070259c 1199 RefcountMap::iterator it = table->refcnts.find(this);
8972963c 1200 if (it == table->refcnts.end()) {
8070259c 1201 table->refcnts[this] = SIDE_TABLE_RC_ONE;
7257e56c 1202 } else if (it->second & SIDE_TABLE_DEALLOCATING) {
8972963c 1203 result = false;
8070259c 1204 } else if (! (it->second & SIDE_TABLE_RC_PINNED)) {
7257e56c 1205 it->second += SIDE_TABLE_RC_ONE;
8972963c
A
1206 }
1207
1208 return result;
1209}
1210
8070259c
A
1211
1212uintptr_t
1213objc_object::sidetable_retainCount()
8972963c 1214{
8070259c
A
1215 SideTable *table = SideTable::tableForPointer(this);
1216
1217 size_t refcnt_result = 1;
1218
1219 spinlock_lock(&table->slock);
1220 RefcountMap::iterator it = table->refcnts.find(this);
1221 if (it != table->refcnts.end()) {
1222 // this is valid for SIDE_TABLE_RC_PINNED too
1223 refcnt_result += it->second >> SIDE_TABLE_RC_SHIFT;
1224 }
1225 spinlock_unlock(&table->slock);
1226 return refcnt_result;
1227}
8972963c 1228
8972963c 1229
8070259c
A
1230bool
1231objc_object::sidetable_isDeallocating()
1232{
1233 SideTable *table = SideTable::tableForPointer(this);
8972963c
A
1234
1235 // NO SPINLOCK HERE
1236 // _objc_rootIsDeallocating() is called exclusively by _objc_storeWeak(),
1237 // which already acquired the lock on our behalf.
7257e56c
A
1238
1239
1240 // fixme can't do this efficiently with os_lock_handoff_s
1241 // if (table->slock == 0) {
1242 // _objc_fatal("Do not call -_isDeallocating.");
1243 // }
8972963c 1244
8070259c 1245 RefcountMap::iterator it = table->refcnts.find(this);
7257e56c 1246 return (it != table->refcnts.end()) && (it->second & SIDE_TABLE_DEALLOCATING);
8972963c
A
1247}
1248
1249
8070259c
A
1250bool
1251objc_object::sidetable_isWeaklyReferenced()
8972963c 1252{
8070259c 1253 bool result = false;
8972963c 1254
8070259c 1255 SideTable *table = SideTable::tableForPointer(this);
7257e56c 1256 spinlock_lock(&table->slock);
8070259c
A
1257
1258 RefcountMap::iterator it = table->refcnts.find(this);
7257e56c 1259 if (it != table->refcnts.end()) {
8070259c 1260 result = it->second & SIDE_TABLE_WEAKLY_REFERENCED;
8972963c 1261 }
8070259c 1262
7257e56c 1263 spinlock_unlock(&table->slock);
8070259c
A
1264
1265 return result;
1266}
1267
1268
1269void
1270objc_object::sidetable_setWeaklyReferenced_nolock()
1271{
1272#if SUPPORT_NONPOINTER_ISA
1273 assert(!isa.indexed);
1274#endif
1275
1276 SideTable *table = SideTable::tableForPointer(this);
1277
1278 table->refcnts[this] |= SIDE_TABLE_WEAKLY_REFERENCED;
8972963c
A
1279}
1280
1281
8070259c 1282__attribute__((used,noinline,nothrow))
8972963c 1283bool
8070259c 1284objc_object::sidetable_release_slow(SideTable *table, bool performDealloc)
8972963c 1285{
8070259c
A
1286#if SUPPORT_NONPOINTER_ISA
1287 assert(!isa.indexed);
1288#endif
8972963c
A
1289 bool do_dealloc = false;
1290
7257e56c 1291 spinlock_lock(&table->slock);
8070259c 1292 RefcountMap::iterator it = table->refcnts.find(this);
8972963c
A
1293 if (it == table->refcnts.end()) {
1294 do_dealloc = true;
8070259c 1295 table->refcnts[this] = SIDE_TABLE_DEALLOCATING;
7257e56c
A
1296 } else if (it->second < SIDE_TABLE_DEALLOCATING) {
1297 // SIDE_TABLE_WEAKLY_REFERENCED may be set. Don't change it.
8972963c 1298 do_dealloc = true;
7257e56c 1299 it->second |= SIDE_TABLE_DEALLOCATING;
8070259c 1300 } else if (! (it->second & SIDE_TABLE_RC_PINNED)) {
7257e56c 1301 it->second -= SIDE_TABLE_RC_ONE;
8972963c 1302 }
7257e56c 1303 spinlock_unlock(&table->slock);
8070259c
A
1304 if (do_dealloc && performDealloc) {
1305 ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_dealloc);
1306 }
8972963c
A
1307 return do_dealloc;
1308}
1309
8972963c 1310
8070259c
A
1311bool
1312objc_object::sidetable_release(bool performDealloc)
1313{
1314#if SUPPORT_NONPOINTER_ISA
1315 assert(!isa.indexed);
1316#endif
1317 SideTable *table = SideTable::tableForPointer(this);
8972963c
A
1318
1319 bool do_dealloc = false;
1320
7257e56c 1321 if (spinlock_trylock(&table->slock)) {
8070259c 1322 RefcountMap::iterator it = table->refcnts.find(this);
8972963c
A
1323 if (it == table->refcnts.end()) {
1324 do_dealloc = true;
8070259c 1325 table->refcnts[this] = SIDE_TABLE_DEALLOCATING;
7257e56c
A
1326 } else if (it->second < SIDE_TABLE_DEALLOCATING) {
1327 // SIDE_TABLE_WEAKLY_REFERENCED may be set. Don't change it.
8972963c 1328 do_dealloc = true;
7257e56c 1329 it->second |= SIDE_TABLE_DEALLOCATING;
8070259c 1330 } else if (! (it->second & SIDE_TABLE_RC_PINNED)) {
7257e56c 1331 it->second -= SIDE_TABLE_RC_ONE;
8972963c 1332 }
7257e56c 1333 spinlock_unlock(&table->slock);
8070259c
A
1334 if (do_dealloc && performDealloc) {
1335 ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_dealloc);
1336 }
8972963c
A
1337 return do_dealloc;
1338 }
7257e56c 1339
8070259c 1340 return sidetable_release_slow(table, performDealloc);
8972963c
A
1341}
1342
8972963c 1343
8070259c
A
1344void
1345objc_object::sidetable_clearDeallocating()
8972963c 1346{
8070259c 1347 SideTable *table = SideTable::tableForPointer(this);
8972963c 1348
8070259c
A
1349 // clear any weak table items
1350 // clear extra retain count and deallocating bit
1351 // (fixme warn or abort if extra retain count == 0 ?)
7257e56c 1352 spinlock_lock(&table->slock);
8070259c 1353 RefcountMap::iterator it = table->refcnts.find(this);
8972963c 1354 if (it != table->refcnts.end()) {
8070259c
A
1355 if (it->second & SIDE_TABLE_WEAKLY_REFERENCED) {
1356 weak_clear_no_lock(&table->weak_table, (id)this);
1357 }
1358 table->refcnts.erase(it);
8972963c 1359 }
7257e56c 1360 spinlock_unlock(&table->slock);
8972963c
A
1361}
1362
8070259c
A
1363
1364/***********************************************************************
1365* Optimized retain/release/autorelease entrypoints
1366**********************************************************************/
1367
1368
1369#if __OBJC2__
1370
1371__attribute__((aligned(16)))
1372id
1373objc_retain(id obj)
8972963c 1374{
8070259c
A
1375 if (!obj) return obj;
1376 if (obj->isTaggedPointer()) return obj;
1377 return obj->retain();
8972963c
A
1378}
1379
cd5f04f5 1380
8070259c
A
1381__attribute__((aligned(16)))
1382void
1383objc_release(id obj)
1384{
1385 if (!obj) return;
1386 if (obj->isTaggedPointer()) return;
1387 return obj->release();
1388}
1389
1390
1391__attribute__((aligned(16)))
1392id
1393objc_autorelease(id obj)
1394{
1395 if (!obj) return obj;
1396 if (obj->isTaggedPointer()) return obj;
1397 return obj->autorelease();
1398}
1399
1400
1401// OBJC2
1402#else
1403// not OBJC2
1404
1405
1406id objc_retain(id obj) { return [obj retain]; }
1407void objc_release(id obj) { [obj release]; }
1408id objc_autorelease(id obj) { return [obj autorelease]; }
1409
1410
1411#endif
1412
1413
1414/***********************************************************************
1415* Basic operations for root class implementations a.k.a. _objc_root*()
1416**********************************************************************/
1417
1418bool
1419_objc_rootTryRetain(id obj)
1420{
1421 assert(obj);
1422
1423 return obj->rootTryRetain();
1424}
1425
1426bool
1427_objc_rootIsDeallocating(id obj)
1428{
1429 assert(obj);
1430
1431 return obj->rootIsDeallocating();
1432}
1433
1434
1435void
1436objc_clear_deallocating(id obj)
1437{
1438 assert(obj);
1439 assert(!UseGC);
1440
1441 if (obj->isTaggedPointer()) return;
1442 obj->clearDeallocating();
1443}
1444
1445
1446bool
1447_objc_rootReleaseWasZero(id obj)
1448{
1449 assert(obj);
1450
1451 return obj->rootReleaseShouldDealloc();
1452}
1453
1454
1455id
1456_objc_rootAutorelease(id obj)
1457{
1458 assert(obj);
1459 // assert(!UseGC);
1460 if (UseGC) return obj; // fixme CF calls this when GC is on
1461
1462 return obj->rootAutorelease();
1463}
1464
1465uintptr_t
1466_objc_rootRetainCount(id obj)
1467{
1468 assert(obj);
1469
1470 return obj->rootRetainCount();
1471}
1472
1473
1474id
1475_objc_rootRetain(id obj)
1476{
1477 assert(obj);
1478
1479 return obj->rootRetain();
1480}
1481
1482void
1483_objc_rootRelease(id obj)
1484{
1485 assert(obj);
1486
1487 obj->rootRelease();
1488}
1489
1490
1491id
1492_objc_rootAllocWithZone(Class cls, malloc_zone_t *zone)
1493{
1494 id obj;
1495
1496#if __OBJC2__
1497 // allocWithZone under __OBJC2__ ignores the zone parameter
1498 (void)zone;
1499 obj = class_createInstance(cls, 0);
8972963c 1500#else
cd5f04f5
A
1501 if (!zone || UseGC) {
1502 obj = class_createInstance(cls, 0);
1503 }
1504 else {
1505 obj = class_createInstanceFromZone(cls, 0, zone);
1506 }
8972963c 1507#endif
cd5f04f5
A
1508
1509 if (!obj) obj = callBadAllocHandler(cls);
1510 return obj;
8972963c
A
1511}
1512
8070259c
A
1513
1514// Call [cls alloc] or [cls allocWithZone:nil], with appropriate
1515// shortcutting optimizations.
1516static ALWAYS_INLINE id
1517callAlloc(Class cls, bool checkNil, bool allocWithZone=false)
8972963c 1518{
8070259c
A
1519 if (checkNil && !cls) return nil;
1520
7257e56c 1521#if __OBJC2__
7257e56c 1522 if (! cls->ISA()->hasCustomAWZ()) {
8070259c
A
1523 // No alloc/allocWithZone implementation. Go straight to the allocator.
1524 // fixme store hasCustomAWZ in the non-meta class and
1525 // add it to canAllocFast's summary
1526 if (cls->canAllocFast()) {
1527 // No ctors, raw isa, etc. Go straight to the metal.
1528 bool dtor = cls->hasCxxDtor();
1529 id obj = (id)calloc(1, cls->bits.fastInstanceSize());
1530 if (!obj) return callBadAllocHandler(cls);
1531 obj->initInstanceIsa(cls, dtor);
1532 return obj;
1533 }
1534 else {
1535 // Has ctor or raw isa or something. Use the slower path.
1536 id obj = class_createInstance(cls, 0);
1537 if (!obj) return callBadAllocHandler(cls);
1538 return obj;
1539 }
7257e56c
A
1540 }
1541#endif
8070259c
A
1542
1543 // No shortcuts available.
1544 if (allocWithZone) return [cls allocWithZone:nil];
1545 return [cls alloc];
7257e56c
A
1546}
1547
8070259c
A
1548
1549// Base class implementation of +alloc. cls is not nil.
1550// Calls [cls allocWithZone:nil].
1551id
1552_objc_rootAlloc(Class cls)
1553{
1554 return callAlloc(cls, false/*checkNil*/, true/*allocWithZone*/);
1555}
1556
1557// Calls [cls alloc].
7257e56c
A
1558id
1559objc_alloc(Class cls)
1560{
8070259c 1561 return callAlloc(cls, true/*checkNil*/, false/*allocWithZone*/);
7257e56c
A
1562}
1563
8070259c 1564// Calls [cls allocWithZone:nil].
7257e56c
A
1565id
1566objc_allocWithZone(Class cls)
1567{
8070259c 1568 return callAlloc(cls, true/*checkNil*/, true/*allocWithZone*/);
8972963c
A
1569}
1570
8070259c 1571
8972963c
A
1572void
1573_objc_rootDealloc(id obj)
1574{
1575 assert(obj);
8972963c 1576
8070259c 1577 obj->rootDealloc();
8972963c
A
1578}
1579
1580void
1581_objc_rootFinalize(id obj __unused)
1582{
1583 assert(obj);
1584 assert(UseGC);
1585
1586 if (UseGC) {
1587 return;
1588 }
1589 _objc_fatal("_objc_rootFinalize called with garbage collection off");
1590}
1591
8070259c
A
1592
1593id
1594_objc_rootInit(id obj)
1595{
1596 // In practice, it will be hard to rely on this function.
1597 // Many classes do not properly chain -init calls.
1598 return obj;
1599}
1600
1601
8972963c
A
1602malloc_zone_t *
1603_objc_rootZone(id obj)
1604{
cd5f04f5
A
1605 (void)obj;
1606 if (gc_zone) {
1607 return gc_zone;
1608 }
8972963c 1609#if __OBJC2__
cd5f04f5
A
1610 // allocWithZone under __OBJC2__ ignores the zone parameter
1611 return malloc_default_zone();
8972963c 1612#else
cd5f04f5
A
1613 malloc_zone_t *rval = malloc_zone_from_ptr(obj);
1614 return rval ? rval : malloc_default_zone();
8972963c
A
1615#endif
1616}
1617
1618uintptr_t
1619_objc_rootHash(id obj)
1620{
cd5f04f5
A
1621 if (UseGC) {
1622 return _object_getExternalHash(obj);
1623 }
1624 return (uintptr_t)obj;
8972963c
A
1625}
1626
8972963c
A
1627void *
1628objc_autoreleasePoolPush(void)
1629{
7257e56c 1630 if (UseGC) return nil;
8972963c
A
1631 return AutoreleasePoolPage::push();
1632}
1633
1634void
1635objc_autoreleasePoolPop(void *ctxt)
1636{
1637 if (UseGC) return;
1638
1639 // fixme rdar://9167170
1640 if (!ctxt) return;
1641
1642 AutoreleasePoolPage::pop(ctxt);
1643}
1644
8972963c 1645
8070259c
A
1646void *
1647_objc_autoreleasePoolPush(void)
cd5f04f5 1648{
8070259c 1649 return objc_autoreleasePoolPush();
cd5f04f5
A
1650}
1651
8070259c
A
1652void
1653_objc_autoreleasePoolPop(void *ctxt)
cd5f04f5 1654{
8070259c 1655 objc_autoreleasePoolPop(ctxt);
cd5f04f5
A
1656}
1657
8070259c
A
1658void
1659_objc_autoreleasePoolPrint(void)
8972963c 1660{
8070259c
A
1661 if (UseGC) return;
1662 AutoreleasePoolPage::printAll();
8972963c
A
1663}
1664
8972963c
A
1665id
1666objc_autoreleaseReturnValue(id obj)
1667{
8070259c 1668 if (fastAutoreleaseForReturn(obj)) return obj;
8972963c
A
1669
1670 return objc_autorelease(obj);
1671}
1672
1673id
1674objc_retainAutoreleaseReturnValue(id obj)
1675{
1676 return objc_autoreleaseReturnValue(objc_retain(obj));
1677}
1678
1679id
1680objc_retainAutoreleasedReturnValue(id obj)
1681{
8070259c 1682 if (fastRetainFromReturn(obj)) return obj;
8972963c 1683
8070259c 1684 return objc_retain(obj);
8972963c
A
1685}
1686
1687id
1688objc_retainAutorelease(id obj)
1689{
cd5f04f5 1690 return objc_autorelease(objc_retain(obj));
8972963c
A
1691}
1692
1693void
1694_objc_deallocOnMainThreadHelper(void *context)
1695{
cd5f04f5
A
1696 id obj = (id)context;
1697 [obj dealloc];
8972963c
A
1698}
1699
cd5f04f5
A
1700#undef objc_retainedObject
1701#undef objc_unretainedObject
1702#undef objc_unretainedPointer
1703
8972963c 1704// convert objc_objectptr_t to id, callee must take ownership.
cd5f04f5 1705id objc_retainedObject(objc_objectptr_t pointer) { return (id)pointer; }
8972963c
A
1706
1707// convert objc_objectptr_t to id, without ownership transfer.
cd5f04f5 1708id objc_unretainedObject(objc_objectptr_t pointer) { return (id)pointer; }
8972963c
A
1709
1710// convert id to objc_objectptr_t, no ownership transfer.
cd5f04f5 1711objc_objectptr_t objc_unretainedPointer(id object) { return object; }
8972963c
A
1712
1713
cd5f04f5 1714void arr_init(void)
8972963c
A
1715{
1716 AutoreleasePoolPage::init();
1717 SideTable::init();
1718}
cd5f04f5
A
1719
1720@implementation NSObject
1721
1722+ (void)load {
1723 if (UseGC) gc_init2();
1724}
1725
1726+ (void)initialize {
1727}
1728
1729+ (id)self {
1730 return (id)self;
1731}
1732
1733- (id)self {
1734 return self;
1735}
1736
1737+ (Class)class {
1738 return self;
1739}
1740
1741- (Class)class {
1742 return object_getClass(self);
1743}
1744
1745+ (Class)superclass {
7257e56c 1746 return self->superclass;
cd5f04f5
A
1747}
1748
1749- (Class)superclass {
7257e56c 1750 return [self class]->superclass;
cd5f04f5
A
1751}
1752
1753+ (BOOL)isMemberOfClass:(Class)cls {
1754 return object_getClass((id)self) == cls;
1755}
1756
1757- (BOOL)isMemberOfClass:(Class)cls {
1758 return [self class] == cls;
1759}
1760
1761+ (BOOL)isKindOfClass:(Class)cls {
7257e56c 1762 for (Class tcls = object_getClass((id)self); tcls; tcls = tcls->superclass) {
cd5f04f5
A
1763 if (tcls == cls) return YES;
1764 }
1765 return NO;
1766}
1767
1768- (BOOL)isKindOfClass:(Class)cls {
7257e56c 1769 for (Class tcls = [self class]; tcls; tcls = tcls->superclass) {
cd5f04f5
A
1770 if (tcls == cls) return YES;
1771 }
1772 return NO;
1773}
1774
1775+ (BOOL)isSubclassOfClass:(Class)cls {
7257e56c 1776 for (Class tcls = self; tcls; tcls = tcls->superclass) {
cd5f04f5
A
1777 if (tcls == cls) return YES;
1778 }
1779 return NO;
1780}
1781
1782+ (BOOL)isAncestorOfObject:(NSObject *)obj {
7257e56c 1783 for (Class tcls = [obj class]; tcls; tcls = tcls->superclass) {
cd5f04f5
A
1784 if (tcls == self) return YES;
1785 }
1786 return NO;
1787}
1788
1789+ (BOOL)instancesRespondToSelector:(SEL)sel {
1790 if (!sel) return NO;
1791 return class_respondsToSelector(self, sel);
1792}
1793
1794+ (BOOL)respondsToSelector:(SEL)sel {
1795 if (!sel) return NO;
8070259c 1796 return class_respondsToSelector_inst(object_getClass(self), sel, self);
cd5f04f5
A
1797}
1798
1799- (BOOL)respondsToSelector:(SEL)sel {
1800 if (!sel) return NO;
8070259c 1801 return class_respondsToSelector_inst([self class], sel, self);
cd5f04f5
A
1802}
1803
1804+ (BOOL)conformsToProtocol:(Protocol *)protocol {
1805 if (!protocol) return NO;
7257e56c 1806 for (Class tcls = self; tcls; tcls = tcls->superclass) {
cd5f04f5
A
1807 if (class_conformsToProtocol(tcls, protocol)) return YES;
1808 }
1809 return NO;
1810}
1811
1812- (BOOL)conformsToProtocol:(Protocol *)protocol {
1813 if (!protocol) return NO;
7257e56c 1814 for (Class tcls = [self class]; tcls; tcls = tcls->superclass) {
cd5f04f5
A
1815 if (class_conformsToProtocol(tcls, protocol)) return YES;
1816 }
1817 return NO;
1818}
1819
1820+ (NSUInteger)hash {
1821 return _objc_rootHash(self);
1822}
1823
1824- (NSUInteger)hash {
1825 return _objc_rootHash(self);
1826}
1827
1828+ (BOOL)isEqual:(id)obj {
1829 return obj == (id)self;
1830}
1831
1832- (BOOL)isEqual:(id)obj {
1833 return obj == self;
1834}
1835
1836
1837+ (BOOL)isFault {
1838 return NO;
1839}
1840
1841- (BOOL)isFault {
1842 return NO;
1843}
1844
1845+ (BOOL)isProxy {
1846 return NO;
1847}
1848
1849- (BOOL)isProxy {
1850 return NO;
1851}
1852
cd5f04f5
A
1853
1854+ (IMP)instanceMethodForSelector:(SEL)sel {
1855 if (!sel) [self doesNotRecognizeSelector:sel];
1856 return class_getMethodImplementation(self, sel);
1857}
1858
1859+ (IMP)methodForSelector:(SEL)sel {
1860 if (!sel) [self doesNotRecognizeSelector:sel];
7257e56c 1861 return object_getMethodImplementation((id)self, sel);
cd5f04f5
A
1862}
1863
1864- (IMP)methodForSelector:(SEL)sel {
1865 if (!sel) [self doesNotRecognizeSelector:sel];
7257e56c 1866 return object_getMethodImplementation(self, sel);
cd5f04f5
A
1867}
1868
1869+ (BOOL)resolveClassMethod:(SEL)sel {
1870 return NO;
1871}
1872
1873+ (BOOL)resolveInstanceMethod:(SEL)sel {
1874 return NO;
1875}
1876
1877// Replaced by CF (throws an NSException)
1878+ (void)doesNotRecognizeSelector:(SEL)sel {
1879 _objc_fatal("+[%s %s]: unrecognized selector sent to instance %p",
1880 class_getName(self), sel_getName(sel), self);
1881}
1882
1883// Replaced by CF (throws an NSException)
1884- (void)doesNotRecognizeSelector:(SEL)sel {
1885 _objc_fatal("-[%s %s]: unrecognized selector sent to instance %p",
1886 object_getClassName(self), sel_getName(sel), self);
1887}
1888
1889
1890+ (id)performSelector:(SEL)sel {
1891 if (!sel) [self doesNotRecognizeSelector:sel];
1892 return ((id(*)(id, SEL))objc_msgSend)((id)self, sel);
1893}
1894
1895+ (id)performSelector:(SEL)sel withObject:(id)obj {
1896 if (!sel) [self doesNotRecognizeSelector:sel];
1897 return ((id(*)(id, SEL, id))objc_msgSend)((id)self, sel, obj);
1898}
1899
1900+ (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 {
1901 if (!sel) [self doesNotRecognizeSelector:sel];
1902 return ((id(*)(id, SEL, id, id))objc_msgSend)((id)self, sel, obj1, obj2);
1903}
1904
1905- (id)performSelector:(SEL)sel {
1906 if (!sel) [self doesNotRecognizeSelector:sel];
1907 return ((id(*)(id, SEL))objc_msgSend)(self, sel);
1908}
1909
1910- (id)performSelector:(SEL)sel withObject:(id)obj {
1911 if (!sel) [self doesNotRecognizeSelector:sel];
1912 return ((id(*)(id, SEL, id))objc_msgSend)(self, sel, obj);
1913}
1914
1915- (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 {
1916 if (!sel) [self doesNotRecognizeSelector:sel];
1917 return ((id(*)(id, SEL, id, id))objc_msgSend)(self, sel, obj1, obj2);
1918}
1919
1920
1921// Replaced by CF (returns an NSMethodSignature)
1922+ (NSMethodSignature *)instanceMethodSignatureForSelector:(SEL)sel {
1923 _objc_fatal("+[NSObject instanceMethodSignatureForSelector:] "
1924 "not available without CoreFoundation");
1925}
1926
1927// Replaced by CF (returns an NSMethodSignature)
1928+ (NSMethodSignature *)methodSignatureForSelector:(SEL)sel {
1929 _objc_fatal("+[NSObject methodSignatureForSelector:] "
1930 "not available without CoreFoundation");
1931}
1932
1933// Replaced by CF (returns an NSMethodSignature)
1934- (NSMethodSignature *)methodSignatureForSelector:(SEL)sel {
1935 _objc_fatal("-[NSObject methodSignatureForSelector:] "
1936 "not available without CoreFoundation");
1937}
1938
1939+ (void)forwardInvocation:(NSInvocation *)invocation {
1940 [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)];
1941}
1942
1943- (void)forwardInvocation:(NSInvocation *)invocation {
1944 [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)];
1945}
1946
1947+ (id)forwardingTargetForSelector:(SEL)sel {
1948 return nil;
1949}
1950
1951- (id)forwardingTargetForSelector:(SEL)sel {
1952 return nil;
1953}
1954
1955
1956// Replaced by CF (returns an NSString)
1957+ (NSString *)description {
1958 return nil;
1959}
1960
1961// Replaced by CF (returns an NSString)
1962- (NSString *)description {
1963 return nil;
1964}
1965
1966+ (NSString *)debugDescription {
1967 return [self description];
1968}
1969
1970- (NSString *)debugDescription {
1971 return [self description];
1972}
1973
1974
1975+ (id)new {
8070259c 1976 return [callAlloc(self, false/*checkNil*/) init];
cd5f04f5
A
1977}
1978
1979+ (id)retain {
1980 return (id)self;
1981}
1982
1983// Replaced by ObjectAlloc
8070259c
A
1984- (id)retain {
1985 return ((id)self)->rootRetain();
cd5f04f5
A
1986}
1987
1988
1989+ (BOOL)_tryRetain {
1990 return YES;
1991}
1992
1993// Replaced by ObjectAlloc
1994- (BOOL)_tryRetain {
8070259c 1995 return ((id)self)->rootTryRetain();
cd5f04f5
A
1996}
1997
1998+ (BOOL)_isDeallocating {
1999 return NO;
2000}
2001
2002- (BOOL)_isDeallocating {
8070259c 2003 return ((id)self)->rootIsDeallocating();
cd5f04f5
A
2004}
2005
2006+ (BOOL)allowsWeakReference {
2007 return YES;
2008}
2009
2010+ (BOOL)retainWeakReference {
2011 return YES;
2012}
2013
2014- (BOOL)allowsWeakReference {
2015 return ! [self _isDeallocating];
2016}
2017
2018- (BOOL)retainWeakReference {
2019 return [self _tryRetain];
2020}
2021
2022+ (oneway void)release {
2023}
2024
2025// Replaced by ObjectAlloc
8070259c
A
2026- (oneway void)release {
2027 ((id)self)->rootRelease();
cd5f04f5
A
2028}
2029
2030+ (id)autorelease {
2031 return (id)self;
2032}
2033
2034// Replaced by ObjectAlloc
8070259c
A
2035- (id)autorelease {
2036 return ((id)self)->rootAutorelease();
cd5f04f5
A
2037}
2038
2039+ (NSUInteger)retainCount {
2040 return ULONG_MAX;
2041}
2042
2043- (NSUInteger)retainCount {
8070259c 2044 return ((id)self)->rootRetainCount();
cd5f04f5
A
2045}
2046
2047+ (id)alloc {
2048 return _objc_rootAlloc(self);
2049}
2050
2051// Replaced by ObjectAlloc
7257e56c 2052+ (id)allocWithZone:(struct _NSZone *)zone {
cd5f04f5
A
2053 return _objc_rootAllocWithZone(self, (malloc_zone_t *)zone);
2054}
2055
2056// Replaced by CF (throws an NSException)
2057+ (id)init {
2058 return (id)self;
2059}
2060
2061- (id)init {
2062 return _objc_rootInit(self);
2063}
2064
2065// Replaced by CF (throws an NSException)
2066+ (void)dealloc {
2067}
2068
8070259c 2069
cd5f04f5
A
2070// Replaced by NSZombies
2071- (void)dealloc {
2072 _objc_rootDealloc(self);
2073}
2074
2075// Replaced by CF (throws an NSException)
2076+ (void)finalize {
2077}
2078
2079- (void)finalize {
2080 _objc_rootFinalize(self);
2081}
2082
7257e56c
A
2083+ (struct _NSZone *)zone {
2084 return (struct _NSZone *)_objc_rootZone(self);
cd5f04f5
A
2085}
2086
7257e56c
A
2087- (struct _NSZone *)zone {
2088 return (struct _NSZone *)_objc_rootZone(self);
cd5f04f5
A
2089}
2090
2091+ (id)copy {
2092 return (id)self;
2093}
2094
7257e56c 2095+ (id)copyWithZone:(struct _NSZone *)zone {
cd5f04f5
A
2096 return (id)self;
2097}
2098
2099- (id)copy {
7257e56c 2100 return [(id)self copyWithZone:nil];
cd5f04f5
A
2101}
2102
2103+ (id)mutableCopy {
2104 return (id)self;
2105}
2106
7257e56c 2107+ (id)mutableCopyWithZone:(struct _NSZone *)zone {
cd5f04f5
A
2108 return (id)self;
2109}
2110
2111- (id)mutableCopy {
7257e56c 2112 return [(id)self mutableCopyWithZone:nil];
cd5f04f5
A
2113}
2114
2115@end
2116
cd5f04f5 2117