]> git.saurik.com Git - apple/objc4.git/blob - runtime/NSObject.mm
objc4-723.tar.gz
[apple/objc4.git] / runtime / NSObject.mm
1 /*
2 * Copyright (c) 2010-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include "objc-private.h"
25 #include "NSObject.h"
26
27 #include "objc-weak.h"
28 #include "llvm-DenseMap.h"
29 #include "NSObject.h"
30
31 #include <malloc/malloc.h>
32 #include <stdint.h>
33 #include <stdbool.h>
34 #include <mach/mach.h>
35 #include <mach-o/dyld.h>
36 #include <mach-o/nlist.h>
37 #include <sys/types.h>
38 #include <sys/mman.h>
39 #include <libkern/OSAtomic.h>
40 #include <Block.h>
41 #include <map>
42 #include <execinfo.h>
43
44 @interface NSInvocation
45 - (SEL)selector;
46 @end
47
48
49 #if TARGET_OS_MAC
50
51 // NSObject used to be in Foundation/CoreFoundation.
52
53 #define SYMBOL_ELSEWHERE_IN_3(sym, vers, n) \
54 OBJC_EXPORT const char elsewhere_ ##n __asm__("$ld$hide$os" #vers "$" #sym); const char elsewhere_ ##n = 0
55 #define SYMBOL_ELSEWHERE_IN_2(sym, vers, n) \
56 SYMBOL_ELSEWHERE_IN_3(sym, vers, n)
57 #define SYMBOL_ELSEWHERE_IN(sym, vers) \
58 SYMBOL_ELSEWHERE_IN_2(sym, vers, __COUNTER__)
59
60 #if __OBJC2__
61 # define NSOBJECT_ELSEWHERE_IN(vers) \
62 SYMBOL_ELSEWHERE_IN(_OBJC_CLASS_$_NSObject, vers); \
63 SYMBOL_ELSEWHERE_IN(_OBJC_METACLASS_$_NSObject, vers); \
64 SYMBOL_ELSEWHERE_IN(_OBJC_IVAR_$_NSObject.isa, vers)
65 #else
66 # define NSOBJECT_ELSEWHERE_IN(vers) \
67 SYMBOL_ELSEWHERE_IN(.objc_class_name_NSObject, vers)
68 #endif
69
70 #if TARGET_OS_IOS
71 NSOBJECT_ELSEWHERE_IN(5.1);
72 NSOBJECT_ELSEWHERE_IN(5.0);
73 NSOBJECT_ELSEWHERE_IN(4.3);
74 NSOBJECT_ELSEWHERE_IN(4.2);
75 NSOBJECT_ELSEWHERE_IN(4.1);
76 NSOBJECT_ELSEWHERE_IN(4.0);
77 NSOBJECT_ELSEWHERE_IN(3.2);
78 NSOBJECT_ELSEWHERE_IN(3.1);
79 NSOBJECT_ELSEWHERE_IN(3.0);
80 NSOBJECT_ELSEWHERE_IN(2.2);
81 NSOBJECT_ELSEWHERE_IN(2.1);
82 NSOBJECT_ELSEWHERE_IN(2.0);
83 #elif TARGET_OS_OSX
84 NSOBJECT_ELSEWHERE_IN(10.7);
85 NSOBJECT_ELSEWHERE_IN(10.6);
86 NSOBJECT_ELSEWHERE_IN(10.5);
87 NSOBJECT_ELSEWHERE_IN(10.4);
88 NSOBJECT_ELSEWHERE_IN(10.3);
89 NSOBJECT_ELSEWHERE_IN(10.2);
90 NSOBJECT_ELSEWHERE_IN(10.1);
91 NSOBJECT_ELSEWHERE_IN(10.0);
92 #else
93 // NSObject has always been in libobjc on these platforms.
94 #endif
95
96 // TARGET_OS_MAC
97 #endif
98
99
100 /***********************************************************************
101 * Weak ivar support
102 **********************************************************************/
103
104 static id defaultBadAllocHandler(Class cls)
105 {
106 _objc_fatal("attempt to allocate object of class '%s' failed",
107 cls->nameForLogging());
108 }
109
110 static id(*badAllocHandler)(Class) = &defaultBadAllocHandler;
111
112 static id callBadAllocHandler(Class cls)
113 {
114 // fixme add re-entrancy protection in case allocation fails inside handler
115 return (*badAllocHandler)(cls);
116 }
117
118 void _objc_setBadAllocHandler(id(*newHandler)(Class))
119 {
120 badAllocHandler = newHandler;
121 }
122
123
124 namespace {
125
126 // The order of these bits is important.
127 #define SIDE_TABLE_WEAKLY_REFERENCED (1UL<<0)
128 #define SIDE_TABLE_DEALLOCATING (1UL<<1) // MSB-ward of weak bit
129 #define SIDE_TABLE_RC_ONE (1UL<<2) // MSB-ward of deallocating bit
130 #define SIDE_TABLE_RC_PINNED (1UL<<(WORD_BITS-1))
131
132 #define SIDE_TABLE_RC_SHIFT 2
133 #define SIDE_TABLE_FLAG_MASK (SIDE_TABLE_RC_ONE-1)
134
135 // RefcountMap disguises its pointers because we
136 // don't want the table to act as a root for `leaks`.
137 typedef objc::DenseMap<DisguisedPtr<objc_object>,size_t,true> RefcountMap;
138
139 // Template parameters.
140 enum HaveOld { DontHaveOld = false, DoHaveOld = true };
141 enum HaveNew { DontHaveNew = false, DoHaveNew = true };
142
143 struct SideTable {
144 spinlock_t slock;
145 RefcountMap refcnts;
146 weak_table_t weak_table;
147
148 SideTable() {
149 memset(&weak_table, 0, sizeof(weak_table));
150 }
151
152 ~SideTable() {
153 _objc_fatal("Do not delete SideTable.");
154 }
155
156 void lock() { slock.lock(); }
157 void unlock() { slock.unlock(); }
158 void forceReset() { slock.forceReset(); }
159
160 // Address-ordered lock discipline for a pair of side tables.
161
162 template<HaveOld, HaveNew>
163 static void lockTwo(SideTable *lock1, SideTable *lock2);
164 template<HaveOld, HaveNew>
165 static void unlockTwo(SideTable *lock1, SideTable *lock2);
166 };
167
168
169 template<>
170 void SideTable::lockTwo<DoHaveOld, DoHaveNew>
171 (SideTable *lock1, SideTable *lock2)
172 {
173 spinlock_t::lockTwo(&lock1->slock, &lock2->slock);
174 }
175
176 template<>
177 void SideTable::lockTwo<DoHaveOld, DontHaveNew>
178 (SideTable *lock1, SideTable *)
179 {
180 lock1->lock();
181 }
182
183 template<>
184 void SideTable::lockTwo<DontHaveOld, DoHaveNew>
185 (SideTable *, SideTable *lock2)
186 {
187 lock2->lock();
188 }
189
190 template<>
191 void SideTable::unlockTwo<DoHaveOld, DoHaveNew>
192 (SideTable *lock1, SideTable *lock2)
193 {
194 spinlock_t::unlockTwo(&lock1->slock, &lock2->slock);
195 }
196
197 template<>
198 void SideTable::unlockTwo<DoHaveOld, DontHaveNew>
199 (SideTable *lock1, SideTable *)
200 {
201 lock1->unlock();
202 }
203
204 template<>
205 void SideTable::unlockTwo<DontHaveOld, DoHaveNew>
206 (SideTable *, SideTable *lock2)
207 {
208 lock2->unlock();
209 }
210
211
212 // We cannot use a C++ static initializer to initialize SideTables because
213 // libc calls us before our C++ initializers run. We also don't want a global
214 // pointer to this struct because of the extra indirection.
215 // Do it the hard way.
216 alignas(StripedMap<SideTable>) static uint8_t
217 SideTableBuf[sizeof(StripedMap<SideTable>)];
218
219 static void SideTableInit() {
220 new (SideTableBuf) StripedMap<SideTable>();
221 }
222
223 static StripedMap<SideTable>& SideTables() {
224 return *reinterpret_cast<StripedMap<SideTable>*>(SideTableBuf);
225 }
226
227 // anonymous namespace
228 };
229
230 void SideTableLockAll() {
231 SideTables().lockAll();
232 }
233
234 void SideTableUnlockAll() {
235 SideTables().unlockAll();
236 }
237
238 void SideTableForceResetAll() {
239 SideTables().forceResetAll();
240 }
241
242 void SideTableDefineLockOrder() {
243 SideTables().defineLockOrder();
244 }
245
246 void SideTableLocksPrecedeLock(const void *newlock) {
247 SideTables().precedeLock(newlock);
248 }
249
250 void SideTableLocksSucceedLock(const void *oldlock) {
251 SideTables().succeedLock(oldlock);
252 }
253
254 void SideTableLocksPrecedeLocks(StripedMap<spinlock_t>& newlocks) {
255 int i = 0;
256 const void *newlock;
257 while ((newlock = newlocks.getLock(i++))) {
258 SideTables().precedeLock(newlock);
259 }
260 }
261
262 void SideTableLocksSucceedLocks(StripedMap<spinlock_t>& oldlocks) {
263 int i = 0;
264 const void *oldlock;
265 while ((oldlock = oldlocks.getLock(i++))) {
266 SideTables().succeedLock(oldlock);
267 }
268 }
269
270 //
271 // The -fobjc-arc flag causes the compiler to issue calls to objc_{retain/release/autorelease/retain_block}
272 //
273
274 id objc_retainBlock(id x) {
275 return (id)_Block_copy(x);
276 }
277
278 //
279 // The following SHOULD be called by the compiler directly, but the request hasn't been made yet :-)
280 //
281
282 BOOL objc_should_deallocate(id object) {
283 return YES;
284 }
285
286 id
287 objc_retain_autorelease(id obj)
288 {
289 return objc_autorelease(objc_retain(obj));
290 }
291
292
293 void
294 objc_storeStrong(id *location, id obj)
295 {
296 id prev = *location;
297 if (obj == prev) {
298 return;
299 }
300 objc_retain(obj);
301 *location = obj;
302 objc_release(prev);
303 }
304
305
306 // Update a weak variable.
307 // If HaveOld is true, the variable has an existing value
308 // that needs to be cleaned up. This value might be nil.
309 // If HaveNew is true, there is a new value that needs to be
310 // assigned into the variable. This value might be nil.
311 // If CrashIfDeallocating is true, the process is halted if newObj is
312 // deallocating or newObj's class does not support weak references.
313 // If CrashIfDeallocating is false, nil is stored instead.
314 enum CrashIfDeallocating {
315 DontCrashIfDeallocating = false, DoCrashIfDeallocating = true
316 };
317 template <HaveOld haveOld, HaveNew haveNew,
318 CrashIfDeallocating crashIfDeallocating>
319 static id
320 storeWeak(id *location, objc_object *newObj)
321 {
322 assert(haveOld || haveNew);
323 if (!haveNew) assert(newObj == nil);
324
325 Class previouslyInitializedClass = nil;
326 id oldObj;
327 SideTable *oldTable;
328 SideTable *newTable;
329
330 // Acquire locks for old and new values.
331 // Order by lock address to prevent lock ordering problems.
332 // Retry if the old value changes underneath us.
333 retry:
334 if (haveOld) {
335 oldObj = *location;
336 oldTable = &SideTables()[oldObj];
337 } else {
338 oldTable = nil;
339 }
340 if (haveNew) {
341 newTable = &SideTables()[newObj];
342 } else {
343 newTable = nil;
344 }
345
346 SideTable::lockTwo<haveOld, haveNew>(oldTable, newTable);
347
348 if (haveOld && *location != oldObj) {
349 SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
350 goto retry;
351 }
352
353 // Prevent a deadlock between the weak reference machinery
354 // and the +initialize machinery by ensuring that no
355 // weakly-referenced object has an un-+initialized isa.
356 if (haveNew && newObj) {
357 Class cls = newObj->getIsa();
358 if (cls != previouslyInitializedClass &&
359 !((objc_class *)cls)->isInitialized())
360 {
361 SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
362 _class_initialize(_class_getNonMetaClass(cls, (id)newObj));
363
364 // If this class is finished with +initialize then we're good.
365 // If this class is still running +initialize on this thread
366 // (i.e. +initialize called storeWeak on an instance of itself)
367 // then we may proceed but it will appear initializing and
368 // not yet initialized to the check above.
369 // Instead set previouslyInitializedClass to recognize it on retry.
370 previouslyInitializedClass = cls;
371
372 goto retry;
373 }
374 }
375
376 // Clean up old value, if any.
377 if (haveOld) {
378 weak_unregister_no_lock(&oldTable->weak_table, oldObj, location);
379 }
380
381 // Assign new value, if any.
382 if (haveNew) {
383 newObj = (objc_object *)
384 weak_register_no_lock(&newTable->weak_table, (id)newObj, location,
385 crashIfDeallocating);
386 // weak_register_no_lock returns nil if weak store should be rejected
387
388 // Set is-weakly-referenced bit in refcount table.
389 if (newObj && !newObj->isTaggedPointer()) {
390 newObj->setWeaklyReferenced_nolock();
391 }
392
393 // Do not set *location anywhere else. That would introduce a race.
394 *location = (id)newObj;
395 }
396 else {
397 // No new value. The storage is not changed.
398 }
399
400 SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
401
402 return (id)newObj;
403 }
404
405
406 /**
407 * This function stores a new value into a __weak variable. It would
408 * be used anywhere a __weak variable is the target of an assignment.
409 *
410 * @param location The address of the weak pointer itself
411 * @param newObj The new object this weak ptr should now point to
412 *
413 * @return \e newObj
414 */
415 id
416 objc_storeWeak(id *location, id newObj)
417 {
418 return storeWeak<DoHaveOld, DoHaveNew, DoCrashIfDeallocating>
419 (location, (objc_object *)newObj);
420 }
421
422
423 /**
424 * This function stores a new value into a __weak variable.
425 * If the new object is deallocating or the new object's class
426 * does not support weak references, stores nil instead.
427 *
428 * @param location The address of the weak pointer itself
429 * @param newObj The new object this weak ptr should now point to
430 *
431 * @return The value stored (either the new object or nil)
432 */
433 id
434 objc_storeWeakOrNil(id *location, id newObj)
435 {
436 return storeWeak<DoHaveOld, DoHaveNew, DontCrashIfDeallocating>
437 (location, (objc_object *)newObj);
438 }
439
440
441 /**
442 * Initialize a fresh weak pointer to some object location.
443 * It would be used for code like:
444 *
445 * (The nil case)
446 * __weak id weakPtr;
447 * (The non-nil case)
448 * NSObject *o = ...;
449 * __weak id weakPtr = o;
450 *
451 * This function IS NOT thread-safe with respect to concurrent
452 * modifications to the weak variable. (Concurrent weak clear is safe.)
453 *
454 * @param location Address of __weak ptr.
455 * @param newObj Object ptr.
456 */
457 id
458 objc_initWeak(id *location, id newObj)
459 {
460 if (!newObj) {
461 *location = nil;
462 return nil;
463 }
464
465 return storeWeak<DontHaveOld, DoHaveNew, DoCrashIfDeallocating>
466 (location, (objc_object*)newObj);
467 }
468
469 id
470 objc_initWeakOrNil(id *location, id newObj)
471 {
472 if (!newObj) {
473 *location = nil;
474 return nil;
475 }
476
477 return storeWeak<DontHaveOld, DoHaveNew, DontCrashIfDeallocating>
478 (location, (objc_object*)newObj);
479 }
480
481
482 /**
483 * Destroys the relationship between a weak pointer
484 * and the object it is referencing in the internal weak
485 * table. If the weak pointer is not referencing anything,
486 * there is no need to edit the weak table.
487 *
488 * This function IS NOT thread-safe with respect to concurrent
489 * modifications to the weak variable. (Concurrent weak clear is safe.)
490 *
491 * @param location The weak pointer address.
492 */
493 void
494 objc_destroyWeak(id *location)
495 {
496 (void)storeWeak<DoHaveOld, DontHaveNew, DontCrashIfDeallocating>
497 (location, nil);
498 }
499
500
501 /*
502 Once upon a time we eagerly cleared *location if we saw the object
503 was deallocating. This confuses code like NSPointerFunctions which
504 tries to pre-flight the raw storage and assumes if the storage is
505 zero then the weak system is done interfering. That is false: the
506 weak system is still going to check and clear the storage later.
507 This can cause objc_weak_error complaints and crashes.
508 So we now don't touch the storage until deallocation completes.
509 */
510
511 id
512 objc_loadWeakRetained(id *location)
513 {
514 id obj;
515 id result;
516 Class cls;
517
518 SideTable *table;
519
520 retry:
521 // fixme std::atomic this load
522 obj = *location;
523 if (!obj) return nil;
524 if (obj->isTaggedPointer()) return obj;
525
526 table = &SideTables()[obj];
527
528 table->lock();
529 if (*location != obj) {
530 table->unlock();
531 goto retry;
532 }
533
534 result = obj;
535
536 cls = obj->ISA();
537 if (! cls->hasCustomRR()) {
538 // Fast case. We know +initialize is complete because
539 // default-RR can never be set before then.
540 assert(cls->isInitialized());
541 if (! obj->rootTryRetain()) {
542 result = nil;
543 }
544 }
545 else {
546 // Slow case. We must check for +initialize and call it outside
547 // the lock if necessary in order to avoid deadlocks.
548 if (cls->isInitialized() || _thisThreadIsInitializingClass(cls)) {
549 BOOL (*tryRetain)(id, SEL) = (BOOL(*)(id, SEL))
550 class_getMethodImplementation(cls, SEL_retainWeakReference);
551 if ((IMP)tryRetain == _objc_msgForward) {
552 result = nil;
553 }
554 else if (! (*tryRetain)(obj, SEL_retainWeakReference)) {
555 result = nil;
556 }
557 }
558 else {
559 table->unlock();
560 _class_initialize(cls);
561 goto retry;
562 }
563 }
564
565 table->unlock();
566 return result;
567 }
568
569 /**
570 * This loads the object referenced by a weak pointer and returns it, after
571 * retaining and autoreleasing the object to ensure that it stays alive
572 * long enough for the caller to use it. This function would be used
573 * anywhere a __weak variable is used in an expression.
574 *
575 * @param location The weak pointer address
576 *
577 * @return The object pointed to by \e location, or \c nil if \e location is \c nil.
578 */
579 id
580 objc_loadWeak(id *location)
581 {
582 if (!*location) return nil;
583 return objc_autorelease(objc_loadWeakRetained(location));
584 }
585
586
587 /**
588 * This function copies a weak pointer from one location to another,
589 * when the destination doesn't already contain a weak pointer. It
590 * would be used for code like:
591 *
592 * __weak id src = ...;
593 * __weak id dst = src;
594 *
595 * This function IS NOT thread-safe with respect to concurrent
596 * modifications to the destination variable. (Concurrent weak clear is safe.)
597 *
598 * @param dst The destination variable.
599 * @param src The source variable.
600 */
601 void
602 objc_copyWeak(id *dst, id *src)
603 {
604 id obj = objc_loadWeakRetained(src);
605 objc_initWeak(dst, obj);
606 objc_release(obj);
607 }
608
609 /**
610 * Move a weak pointer from one location to another.
611 * Before the move, the destination must be uninitialized.
612 * After the move, the source is nil.
613 *
614 * This function IS NOT thread-safe with respect to concurrent
615 * modifications to either weak variable. (Concurrent weak clear is safe.)
616 *
617 */
618 void
619 objc_moveWeak(id *dst, id *src)
620 {
621 objc_copyWeak(dst, src);
622 objc_destroyWeak(src);
623 *src = nil;
624 }
625
626
627 /***********************************************************************
628 Autorelease pool implementation
629
630 A thread's autorelease pool is a stack of pointers.
631 Each pointer is either an object to release, or POOL_BOUNDARY which is
632 an autorelease pool boundary.
633 A pool token is a pointer to the POOL_BOUNDARY for that pool. When
634 the pool is popped, every object hotter than the sentinel is released.
635 The stack is divided into a doubly-linked list of pages. Pages are added
636 and deleted as necessary.
637 Thread-local storage points to the hot page, where newly autoreleased
638 objects are stored.
639 **********************************************************************/
640
641 // Set this to 1 to mprotect() autorelease pool contents
642 #define PROTECT_AUTORELEASEPOOL 0
643
644 // Set this to 1 to validate the entire autorelease pool header all the time
645 // (i.e. use check() instead of fastcheck() everywhere)
646 #define CHECK_AUTORELEASEPOOL (DEBUG)
647
648 BREAKPOINT_FUNCTION(void objc_autoreleaseNoPool(id obj));
649 BREAKPOINT_FUNCTION(void objc_autoreleasePoolInvalid(const void *token));
650
651 namespace {
652
653 struct magic_t {
654 static const uint32_t M0 = 0xA1A1A1A1;
655 # define M1 "AUTORELEASE!"
656 static const size_t M1_len = 12;
657 uint32_t m[4];
658
659 magic_t() {
660 assert(M1_len == strlen(M1));
661 assert(M1_len == 3 * sizeof(m[1]));
662
663 m[0] = M0;
664 strncpy((char *)&m[1], M1, M1_len);
665 }
666
667 ~magic_t() {
668 m[0] = m[1] = m[2] = m[3] = 0;
669 }
670
671 bool check() const {
672 return (m[0] == M0 && 0 == strncmp((char *)&m[1], M1, M1_len));
673 }
674
675 bool fastcheck() const {
676 #if CHECK_AUTORELEASEPOOL
677 return check();
678 #else
679 return (m[0] == M0);
680 #endif
681 }
682
683 # undef M1
684 };
685
686
687 class AutoreleasePoolPage
688 {
689 // EMPTY_POOL_PLACEHOLDER is stored in TLS when exactly one pool is
690 // pushed and it has never contained any objects. This saves memory
691 // when the top level (i.e. libdispatch) pushes and pops pools but
692 // never uses them.
693 # define EMPTY_POOL_PLACEHOLDER ((id*)1)
694
695 # define POOL_BOUNDARY nil
696 static pthread_key_t const key = AUTORELEASE_POOL_KEY;
697 static uint8_t const SCRIBBLE = 0xA3; // 0xA3A3A3A3 after releasing
698 static size_t const SIZE =
699 #if PROTECT_AUTORELEASEPOOL
700 PAGE_MAX_SIZE; // must be multiple of vm page size
701 #else
702 PAGE_MAX_SIZE; // size and alignment, power of 2
703 #endif
704 static size_t const COUNT = SIZE / sizeof(id);
705
706 magic_t const magic;
707 id *next;
708 pthread_t const thread;
709 AutoreleasePoolPage * const parent;
710 AutoreleasePoolPage *child;
711 uint32_t const depth;
712 uint32_t hiwat;
713
714 // SIZE-sizeof(*this) bytes of contents follow
715
716 static void * operator new(size_t size) {
717 return malloc_zone_memalign(malloc_default_zone(), SIZE, SIZE);
718 }
719 static void operator delete(void * p) {
720 return free(p);
721 }
722
723 inline void protect() {
724 #if PROTECT_AUTORELEASEPOOL
725 mprotect(this, SIZE, PROT_READ);
726 check();
727 #endif
728 }
729
730 inline void unprotect() {
731 #if PROTECT_AUTORELEASEPOOL
732 check();
733 mprotect(this, SIZE, PROT_READ | PROT_WRITE);
734 #endif
735 }
736
737 AutoreleasePoolPage(AutoreleasePoolPage *newParent)
738 : magic(), next(begin()), thread(pthread_self()),
739 parent(newParent), child(nil),
740 depth(parent ? 1+parent->depth : 0),
741 hiwat(parent ? parent->hiwat : 0)
742 {
743 if (parent) {
744 parent->check();
745 assert(!parent->child);
746 parent->unprotect();
747 parent->child = this;
748 parent->protect();
749 }
750 protect();
751 }
752
753 ~AutoreleasePoolPage()
754 {
755 check();
756 unprotect();
757 assert(empty());
758
759 // Not recursive: we don't want to blow out the stack
760 // if a thread accumulates a stupendous amount of garbage
761 assert(!child);
762 }
763
764
765 void busted(bool die = true)
766 {
767 magic_t right;
768 (die ? _objc_fatal : _objc_inform)
769 ("autorelease pool page %p corrupted\n"
770 " magic 0x%08x 0x%08x 0x%08x 0x%08x\n"
771 " should be 0x%08x 0x%08x 0x%08x 0x%08x\n"
772 " pthread %p\n"
773 " should be %p\n",
774 this,
775 magic.m[0], magic.m[1], magic.m[2], magic.m[3],
776 right.m[0], right.m[1], right.m[2], right.m[3],
777 this->thread, pthread_self());
778 }
779
780 void check(bool die = true)
781 {
782 if (!magic.check() || !pthread_equal(thread, pthread_self())) {
783 busted(die);
784 }
785 }
786
787 void fastcheck(bool die = true)
788 {
789 #if CHECK_AUTORELEASEPOOL
790 check(die);
791 #else
792 if (! magic.fastcheck()) {
793 busted(die);
794 }
795 #endif
796 }
797
798
799 id * begin() {
800 return (id *) ((uint8_t *)this+sizeof(*this));
801 }
802
803 id * end() {
804 return (id *) ((uint8_t *)this+SIZE);
805 }
806
807 bool empty() {
808 return next == begin();
809 }
810
811 bool full() {
812 return next == end();
813 }
814
815 bool lessThanHalfFull() {
816 return (next - begin() < (end() - begin()) / 2);
817 }
818
819 id *add(id obj)
820 {
821 assert(!full());
822 unprotect();
823 id *ret = next; // faster than `return next-1` because of aliasing
824 *next++ = obj;
825 protect();
826 return ret;
827 }
828
829 void releaseAll()
830 {
831 releaseUntil(begin());
832 }
833
834 void releaseUntil(id *stop)
835 {
836 // Not recursive: we don't want to blow out the stack
837 // if a thread accumulates a stupendous amount of garbage
838
839 while (this->next != stop) {
840 // Restart from hotPage() every time, in case -release
841 // autoreleased more objects
842 AutoreleasePoolPage *page = hotPage();
843
844 // fixme I think this `while` can be `if`, but I can't prove it
845 while (page->empty()) {
846 page = page->parent;
847 setHotPage(page);
848 }
849
850 page->unprotect();
851 id obj = *--page->next;
852 memset((void*)page->next, SCRIBBLE, sizeof(*page->next));
853 page->protect();
854
855 if (obj != POOL_BOUNDARY) {
856 objc_release(obj);
857 }
858 }
859
860 setHotPage(this);
861
862 #if DEBUG
863 // we expect any children to be completely empty
864 for (AutoreleasePoolPage *page = child; page; page = page->child) {
865 assert(page->empty());
866 }
867 #endif
868 }
869
870 void kill()
871 {
872 // Not recursive: we don't want to blow out the stack
873 // if a thread accumulates a stupendous amount of garbage
874 AutoreleasePoolPage *page = this;
875 while (page->child) page = page->child;
876
877 AutoreleasePoolPage *deathptr;
878 do {
879 deathptr = page;
880 page = page->parent;
881 if (page) {
882 page->unprotect();
883 page->child = nil;
884 page->protect();
885 }
886 delete deathptr;
887 } while (deathptr != this);
888 }
889
890 static void tls_dealloc(void *p)
891 {
892 if (p == (void*)EMPTY_POOL_PLACEHOLDER) {
893 // No objects or pool pages to clean up here.
894 return;
895 }
896
897 // reinstate TLS value while we work
898 setHotPage((AutoreleasePoolPage *)p);
899
900 if (AutoreleasePoolPage *page = coldPage()) {
901 if (!page->empty()) pop(page->begin()); // pop all of the pools
902 if (DebugMissingPools || DebugPoolAllocation) {
903 // pop() killed the pages already
904 } else {
905 page->kill(); // free all of the pages
906 }
907 }
908
909 // clear TLS value so TLS destruction doesn't loop
910 setHotPage(nil);
911 }
912
913 static AutoreleasePoolPage *pageForPointer(const void *p)
914 {
915 return pageForPointer((uintptr_t)p);
916 }
917
918 static AutoreleasePoolPage *pageForPointer(uintptr_t p)
919 {
920 AutoreleasePoolPage *result;
921 uintptr_t offset = p % SIZE;
922
923 assert(offset >= sizeof(AutoreleasePoolPage));
924
925 result = (AutoreleasePoolPage *)(p - offset);
926 result->fastcheck();
927
928 return result;
929 }
930
931
932 static inline bool haveEmptyPoolPlaceholder()
933 {
934 id *tls = (id *)tls_get_direct(key);
935 return (tls == EMPTY_POOL_PLACEHOLDER);
936 }
937
938 static inline id* setEmptyPoolPlaceholder()
939 {
940 assert(tls_get_direct(key) == nil);
941 tls_set_direct(key, (void *)EMPTY_POOL_PLACEHOLDER);
942 return EMPTY_POOL_PLACEHOLDER;
943 }
944
945 static inline AutoreleasePoolPage *hotPage()
946 {
947 AutoreleasePoolPage *result = (AutoreleasePoolPage *)
948 tls_get_direct(key);
949 if ((id *)result == EMPTY_POOL_PLACEHOLDER) return nil;
950 if (result) result->fastcheck();
951 return result;
952 }
953
954 static inline void setHotPage(AutoreleasePoolPage *page)
955 {
956 if (page) page->fastcheck();
957 tls_set_direct(key, (void *)page);
958 }
959
960 static inline AutoreleasePoolPage *coldPage()
961 {
962 AutoreleasePoolPage *result = hotPage();
963 if (result) {
964 while (result->parent) {
965 result = result->parent;
966 result->fastcheck();
967 }
968 }
969 return result;
970 }
971
972
973 static inline id *autoreleaseFast(id obj)
974 {
975 AutoreleasePoolPage *page = hotPage();
976 if (page && !page->full()) {
977 return page->add(obj);
978 } else if (page) {
979 return autoreleaseFullPage(obj, page);
980 } else {
981 return autoreleaseNoPage(obj);
982 }
983 }
984
985 static __attribute__((noinline))
986 id *autoreleaseFullPage(id obj, AutoreleasePoolPage *page)
987 {
988 // The hot page is full.
989 // Step to the next non-full page, adding a new page if necessary.
990 // Then add the object to that page.
991 assert(page == hotPage());
992 assert(page->full() || DebugPoolAllocation);
993
994 do {
995 if (page->child) page = page->child;
996 else page = new AutoreleasePoolPage(page);
997 } while (page->full());
998
999 setHotPage(page);
1000 return page->add(obj);
1001 }
1002
1003 static __attribute__((noinline))
1004 id *autoreleaseNoPage(id obj)
1005 {
1006 // "No page" could mean no pool has been pushed
1007 // or an empty placeholder pool has been pushed and has no contents yet
1008 assert(!hotPage());
1009
1010 bool pushExtraBoundary = false;
1011 if (haveEmptyPoolPlaceholder()) {
1012 // We are pushing a second pool over the empty placeholder pool
1013 // or pushing the first object into the empty placeholder pool.
1014 // Before doing that, push a pool boundary on behalf of the pool
1015 // that is currently represented by the empty placeholder.
1016 pushExtraBoundary = true;
1017 }
1018 else if (obj != POOL_BOUNDARY && DebugMissingPools) {
1019 // We are pushing an object with no pool in place,
1020 // and no-pool debugging was requested by environment.
1021 _objc_inform("MISSING POOLS: (%p) Object %p of class %s "
1022 "autoreleased with no pool in place - "
1023 "just leaking - break on "
1024 "objc_autoreleaseNoPool() to debug",
1025 pthread_self(), (void*)obj, object_getClassName(obj));
1026 objc_autoreleaseNoPool(obj);
1027 return nil;
1028 }
1029 else if (obj == POOL_BOUNDARY && !DebugPoolAllocation) {
1030 // We are pushing a pool with no pool in place,
1031 // and alloc-per-pool debugging was not requested.
1032 // Install and return the empty pool placeholder.
1033 return setEmptyPoolPlaceholder();
1034 }
1035
1036 // We are pushing an object or a non-placeholder'd pool.
1037
1038 // Install the first page.
1039 AutoreleasePoolPage *page = new AutoreleasePoolPage(nil);
1040 setHotPage(page);
1041
1042 // Push a boundary on behalf of the previously-placeholder'd pool.
1043 if (pushExtraBoundary) {
1044 page->add(POOL_BOUNDARY);
1045 }
1046
1047 // Push the requested object or pool.
1048 return page->add(obj);
1049 }
1050
1051
1052 static __attribute__((noinline))
1053 id *autoreleaseNewPage(id obj)
1054 {
1055 AutoreleasePoolPage *page = hotPage();
1056 if (page) return autoreleaseFullPage(obj, page);
1057 else return autoreleaseNoPage(obj);
1058 }
1059
1060 public:
1061 static inline id autorelease(id obj)
1062 {
1063 assert(obj);
1064 assert(!obj->isTaggedPointer());
1065 id *dest __unused = autoreleaseFast(obj);
1066 assert(!dest || dest == EMPTY_POOL_PLACEHOLDER || *dest == obj);
1067 return obj;
1068 }
1069
1070
1071 static inline void *push()
1072 {
1073 id *dest;
1074 if (DebugPoolAllocation) {
1075 // Each autorelease pool starts on a new pool page.
1076 dest = autoreleaseNewPage(POOL_BOUNDARY);
1077 } else {
1078 dest = autoreleaseFast(POOL_BOUNDARY);
1079 }
1080 assert(dest == EMPTY_POOL_PLACEHOLDER || *dest == POOL_BOUNDARY);
1081 return dest;
1082 }
1083
1084 static void badPop(void *token)
1085 {
1086 // Error. For bincompat purposes this is not
1087 // fatal in executables built with old SDKs.
1088
1089 if (DebugPoolAllocation || sdkIsAtLeast(10_12, 10_0, 10_0, 3_0, 2_0)) {
1090 // OBJC_DEBUG_POOL_ALLOCATION or new SDK. Bad pop is fatal.
1091 _objc_fatal
1092 ("Invalid or prematurely-freed autorelease pool %p.", token);
1093 }
1094
1095 // Old SDK. Bad pop is warned once.
1096 static bool complained = false;
1097 if (!complained) {
1098 complained = true;
1099 _objc_inform_now_and_on_crash
1100 ("Invalid or prematurely-freed autorelease pool %p. "
1101 "Set a breakpoint on objc_autoreleasePoolInvalid to debug. "
1102 "Proceeding anyway because the app is old "
1103 "(SDK version " SDK_FORMAT "). Memory errors are likely.",
1104 token, FORMAT_SDK(sdkVersion()));
1105 }
1106 objc_autoreleasePoolInvalid(token);
1107 }
1108
1109 static inline void pop(void *token)
1110 {
1111 AutoreleasePoolPage *page;
1112 id *stop;
1113
1114 if (token == (void*)EMPTY_POOL_PLACEHOLDER) {
1115 // Popping the top-level placeholder pool.
1116 if (hotPage()) {
1117 // Pool was used. Pop its contents normally.
1118 // Pool pages remain allocated for re-use as usual.
1119 pop(coldPage()->begin());
1120 } else {
1121 // Pool was never used. Clear the placeholder.
1122 setHotPage(nil);
1123 }
1124 return;
1125 }
1126
1127 page = pageForPointer(token);
1128 stop = (id *)token;
1129 if (*stop != POOL_BOUNDARY) {
1130 if (stop == page->begin() && !page->parent) {
1131 // Start of coldest page may correctly not be POOL_BOUNDARY:
1132 // 1. top-level pool is popped, leaving the cold page in place
1133 // 2. an object is autoreleased with no pool
1134 } else {
1135 // Error. For bincompat purposes this is not
1136 // fatal in executables built with old SDKs.
1137 return badPop(token);
1138 }
1139 }
1140
1141 if (PrintPoolHiwat) printHiwat();
1142
1143 page->releaseUntil(stop);
1144
1145 // memory: delete empty children
1146 if (DebugPoolAllocation && page->empty()) {
1147 // special case: delete everything during page-per-pool debugging
1148 AutoreleasePoolPage *parent = page->parent;
1149 page->kill();
1150 setHotPage(parent);
1151 } else if (DebugMissingPools && page->empty() && !page->parent) {
1152 // special case: delete everything for pop(top)
1153 // when debugging missing autorelease pools
1154 page->kill();
1155 setHotPage(nil);
1156 }
1157 else if (page->child) {
1158 // hysteresis: keep one empty child if page is more than half full
1159 if (page->lessThanHalfFull()) {
1160 page->child->kill();
1161 }
1162 else if (page->child->child) {
1163 page->child->child->kill();
1164 }
1165 }
1166 }
1167
1168 static void init()
1169 {
1170 int r __unused = pthread_key_init_np(AutoreleasePoolPage::key,
1171 AutoreleasePoolPage::tls_dealloc);
1172 assert(r == 0);
1173 }
1174
1175 void print()
1176 {
1177 _objc_inform("[%p] ................ PAGE %s %s %s", this,
1178 full() ? "(full)" : "",
1179 this == hotPage() ? "(hot)" : "",
1180 this == coldPage() ? "(cold)" : "");
1181 check(false);
1182 for (id *p = begin(); p < next; p++) {
1183 if (*p == POOL_BOUNDARY) {
1184 _objc_inform("[%p] ################ POOL %p", p, p);
1185 } else {
1186 _objc_inform("[%p] %#16lx %s",
1187 p, (unsigned long)*p, object_getClassName(*p));
1188 }
1189 }
1190 }
1191
1192 static void printAll()
1193 {
1194 _objc_inform("##############");
1195 _objc_inform("AUTORELEASE POOLS for thread %p", pthread_self());
1196
1197 AutoreleasePoolPage *page;
1198 ptrdiff_t objects = 0;
1199 for (page = coldPage(); page; page = page->child) {
1200 objects += page->next - page->begin();
1201 }
1202 _objc_inform("%llu releases pending.", (unsigned long long)objects);
1203
1204 if (haveEmptyPoolPlaceholder()) {
1205 _objc_inform("[%p] ................ PAGE (placeholder)",
1206 EMPTY_POOL_PLACEHOLDER);
1207 _objc_inform("[%p] ################ POOL (placeholder)",
1208 EMPTY_POOL_PLACEHOLDER);
1209 }
1210 else {
1211 for (page = coldPage(); page; page = page->child) {
1212 page->print();
1213 }
1214 }
1215
1216 _objc_inform("##############");
1217 }
1218
1219 static void printHiwat()
1220 {
1221 // Check and propagate high water mark
1222 // Ignore high water marks under 256 to suppress noise.
1223 AutoreleasePoolPage *p = hotPage();
1224 uint32_t mark = p->depth*COUNT + (uint32_t)(p->next - p->begin());
1225 if (mark > p->hiwat && mark > 256) {
1226 for( ; p; p = p->parent) {
1227 p->unprotect();
1228 p->hiwat = mark;
1229 p->protect();
1230 }
1231
1232 _objc_inform("POOL HIGHWATER: new high water mark of %u "
1233 "pending releases for thread %p:",
1234 mark, pthread_self());
1235
1236 void *stack[128];
1237 int count = backtrace(stack, sizeof(stack)/sizeof(stack[0]));
1238 char **sym = backtrace_symbols(stack, count);
1239 for (int i = 0; i < count; i++) {
1240 _objc_inform("POOL HIGHWATER: %s", sym[i]);
1241 }
1242 free(sym);
1243 }
1244 }
1245
1246 #undef POOL_BOUNDARY
1247 };
1248
1249 // anonymous namespace
1250 };
1251
1252
1253 /***********************************************************************
1254 * Slow paths for inline control
1255 **********************************************************************/
1256
1257 #if SUPPORT_NONPOINTER_ISA
1258
1259 NEVER_INLINE id
1260 objc_object::rootRetain_overflow(bool tryRetain)
1261 {
1262 return rootRetain(tryRetain, true);
1263 }
1264
1265
1266 NEVER_INLINE bool
1267 objc_object::rootRelease_underflow(bool performDealloc)
1268 {
1269 return rootRelease(performDealloc, true);
1270 }
1271
1272
1273 // Slow path of clearDeallocating()
1274 // for objects with nonpointer isa
1275 // that were ever weakly referenced
1276 // or whose retain count ever overflowed to the side table.
1277 NEVER_INLINE void
1278 objc_object::clearDeallocating_slow()
1279 {
1280 assert(isa.nonpointer && (isa.weakly_referenced || isa.has_sidetable_rc));
1281
1282 SideTable& table = SideTables()[this];
1283 table.lock();
1284 if (isa.weakly_referenced) {
1285 weak_clear_no_lock(&table.weak_table, (id)this);
1286 }
1287 if (isa.has_sidetable_rc) {
1288 table.refcnts.erase(this);
1289 }
1290 table.unlock();
1291 }
1292
1293 #endif
1294
1295 __attribute__((noinline,used))
1296 id
1297 objc_object::rootAutorelease2()
1298 {
1299 assert(!isTaggedPointer());
1300 return AutoreleasePoolPage::autorelease((id)this);
1301 }
1302
1303
1304 BREAKPOINT_FUNCTION(
1305 void objc_overrelease_during_dealloc_error(void)
1306 );
1307
1308
1309 NEVER_INLINE
1310 bool
1311 objc_object::overrelease_error()
1312 {
1313 _objc_inform_now_and_on_crash("%s object %p overreleased while already deallocating; break on objc_overrelease_during_dealloc_error to debug", object_getClassName((id)this), this);
1314 objc_overrelease_during_dealloc_error();
1315 return false; // allow rootRelease() to tail-call this
1316 }
1317
1318
1319 /***********************************************************************
1320 * Retain count operations for side table.
1321 **********************************************************************/
1322
1323
1324 #if DEBUG
1325 // Used to assert that an object is not present in the side table.
1326 bool
1327 objc_object::sidetable_present()
1328 {
1329 bool result = false;
1330 SideTable& table = SideTables()[this];
1331
1332 table.lock();
1333
1334 RefcountMap::iterator it = table.refcnts.find(this);
1335 if (it != table.refcnts.end()) result = true;
1336
1337 if (weak_is_registered_no_lock(&table.weak_table, (id)this)) result = true;
1338
1339 table.unlock();
1340
1341 return result;
1342 }
1343 #endif
1344
1345 #if SUPPORT_NONPOINTER_ISA
1346
1347 void
1348 objc_object::sidetable_lock()
1349 {
1350 SideTable& table = SideTables()[this];
1351 table.lock();
1352 }
1353
1354 void
1355 objc_object::sidetable_unlock()
1356 {
1357 SideTable& table = SideTables()[this];
1358 table.unlock();
1359 }
1360
1361
1362 // Move the entire retain count to the side table,
1363 // as well as isDeallocating and weaklyReferenced.
1364 void
1365 objc_object::sidetable_moveExtraRC_nolock(size_t extra_rc,
1366 bool isDeallocating,
1367 bool weaklyReferenced)
1368 {
1369 assert(!isa.nonpointer); // should already be changed to raw pointer
1370 SideTable& table = SideTables()[this];
1371
1372 size_t& refcntStorage = table.refcnts[this];
1373 size_t oldRefcnt = refcntStorage;
1374 // not deallocating - that was in the isa
1375 assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1376 assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1377
1378 uintptr_t carry;
1379 size_t refcnt = addc(oldRefcnt, extra_rc << SIDE_TABLE_RC_SHIFT, 0, &carry);
1380 if (carry) refcnt = SIDE_TABLE_RC_PINNED;
1381 if (isDeallocating) refcnt |= SIDE_TABLE_DEALLOCATING;
1382 if (weaklyReferenced) refcnt |= SIDE_TABLE_WEAKLY_REFERENCED;
1383
1384 refcntStorage = refcnt;
1385 }
1386
1387
1388 // Move some retain counts to the side table from the isa field.
1389 // Returns true if the object is now pinned.
1390 bool
1391 objc_object::sidetable_addExtraRC_nolock(size_t delta_rc)
1392 {
1393 assert(isa.nonpointer);
1394 SideTable& table = SideTables()[this];
1395
1396 size_t& refcntStorage = table.refcnts[this];
1397 size_t oldRefcnt = refcntStorage;
1398 // isa-side bits should not be set here
1399 assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1400 assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1401
1402 if (oldRefcnt & SIDE_TABLE_RC_PINNED) return true;
1403
1404 uintptr_t carry;
1405 size_t newRefcnt =
1406 addc(oldRefcnt, delta_rc << SIDE_TABLE_RC_SHIFT, 0, &carry);
1407 if (carry) {
1408 refcntStorage =
1409 SIDE_TABLE_RC_PINNED | (oldRefcnt & SIDE_TABLE_FLAG_MASK);
1410 return true;
1411 }
1412 else {
1413 refcntStorage = newRefcnt;
1414 return false;
1415 }
1416 }
1417
1418
1419 // Move some retain counts from the side table to the isa field.
1420 // Returns the actual count subtracted, which may be less than the request.
1421 size_t
1422 objc_object::sidetable_subExtraRC_nolock(size_t delta_rc)
1423 {
1424 assert(isa.nonpointer);
1425 SideTable& table = SideTables()[this];
1426
1427 RefcountMap::iterator it = table.refcnts.find(this);
1428 if (it == table.refcnts.end() || it->second == 0) {
1429 // Side table retain count is zero. Can't borrow.
1430 return 0;
1431 }
1432 size_t oldRefcnt = it->second;
1433
1434 // isa-side bits should not be set here
1435 assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1436 assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1437
1438 size_t newRefcnt = oldRefcnt - (delta_rc << SIDE_TABLE_RC_SHIFT);
1439 assert(oldRefcnt > newRefcnt); // shouldn't underflow
1440 it->second = newRefcnt;
1441 return delta_rc;
1442 }
1443
1444
1445 size_t
1446 objc_object::sidetable_getExtraRC_nolock()
1447 {
1448 assert(isa.nonpointer);
1449 SideTable& table = SideTables()[this];
1450 RefcountMap::iterator it = table.refcnts.find(this);
1451 if (it == table.refcnts.end()) return 0;
1452 else return it->second >> SIDE_TABLE_RC_SHIFT;
1453 }
1454
1455
1456 // SUPPORT_NONPOINTER_ISA
1457 #endif
1458
1459
1460 id
1461 objc_object::sidetable_retain()
1462 {
1463 #if SUPPORT_NONPOINTER_ISA
1464 assert(!isa.nonpointer);
1465 #endif
1466 SideTable& table = SideTables()[this];
1467
1468 table.lock();
1469 size_t& refcntStorage = table.refcnts[this];
1470 if (! (refcntStorage & SIDE_TABLE_RC_PINNED)) {
1471 refcntStorage += SIDE_TABLE_RC_ONE;
1472 }
1473 table.unlock();
1474
1475 return (id)this;
1476 }
1477
1478
1479 bool
1480 objc_object::sidetable_tryRetain()
1481 {
1482 #if SUPPORT_NONPOINTER_ISA
1483 assert(!isa.nonpointer);
1484 #endif
1485 SideTable& table = SideTables()[this];
1486
1487 // NO SPINLOCK HERE
1488 // _objc_rootTryRetain() is called exclusively by _objc_loadWeak(),
1489 // which already acquired the lock on our behalf.
1490
1491 // fixme can't do this efficiently with os_lock_handoff_s
1492 // if (table.slock == 0) {
1493 // _objc_fatal("Do not call -_tryRetain.");
1494 // }
1495
1496 bool result = true;
1497 RefcountMap::iterator it = table.refcnts.find(this);
1498 if (it == table.refcnts.end()) {
1499 table.refcnts[this] = SIDE_TABLE_RC_ONE;
1500 } else if (it->second & SIDE_TABLE_DEALLOCATING) {
1501 result = false;
1502 } else if (! (it->second & SIDE_TABLE_RC_PINNED)) {
1503 it->second += SIDE_TABLE_RC_ONE;
1504 }
1505
1506 return result;
1507 }
1508
1509
1510 uintptr_t
1511 objc_object::sidetable_retainCount()
1512 {
1513 SideTable& table = SideTables()[this];
1514
1515 size_t refcnt_result = 1;
1516
1517 table.lock();
1518 RefcountMap::iterator it = table.refcnts.find(this);
1519 if (it != table.refcnts.end()) {
1520 // this is valid for SIDE_TABLE_RC_PINNED too
1521 refcnt_result += it->second >> SIDE_TABLE_RC_SHIFT;
1522 }
1523 table.unlock();
1524 return refcnt_result;
1525 }
1526
1527
1528 bool
1529 objc_object::sidetable_isDeallocating()
1530 {
1531 SideTable& table = SideTables()[this];
1532
1533 // NO SPINLOCK HERE
1534 // _objc_rootIsDeallocating() is called exclusively by _objc_storeWeak(),
1535 // which already acquired the lock on our behalf.
1536
1537
1538 // fixme can't do this efficiently with os_lock_handoff_s
1539 // if (table.slock == 0) {
1540 // _objc_fatal("Do not call -_isDeallocating.");
1541 // }
1542
1543 RefcountMap::iterator it = table.refcnts.find(this);
1544 return (it != table.refcnts.end()) && (it->second & SIDE_TABLE_DEALLOCATING);
1545 }
1546
1547
1548 bool
1549 objc_object::sidetable_isWeaklyReferenced()
1550 {
1551 bool result = false;
1552
1553 SideTable& table = SideTables()[this];
1554 table.lock();
1555
1556 RefcountMap::iterator it = table.refcnts.find(this);
1557 if (it != table.refcnts.end()) {
1558 result = it->second & SIDE_TABLE_WEAKLY_REFERENCED;
1559 }
1560
1561 table.unlock();
1562
1563 return result;
1564 }
1565
1566
1567 void
1568 objc_object::sidetable_setWeaklyReferenced_nolock()
1569 {
1570 #if SUPPORT_NONPOINTER_ISA
1571 assert(!isa.nonpointer);
1572 #endif
1573
1574 SideTable& table = SideTables()[this];
1575
1576 table.refcnts[this] |= SIDE_TABLE_WEAKLY_REFERENCED;
1577 }
1578
1579
1580 // rdar://20206767
1581 // return uintptr_t instead of bool so that the various raw-isa
1582 // -release paths all return zero in eax
1583 uintptr_t
1584 objc_object::sidetable_release(bool performDealloc)
1585 {
1586 #if SUPPORT_NONPOINTER_ISA
1587 assert(!isa.nonpointer);
1588 #endif
1589 SideTable& table = SideTables()[this];
1590
1591 bool do_dealloc = false;
1592
1593 table.lock();
1594 RefcountMap::iterator it = table.refcnts.find(this);
1595 if (it == table.refcnts.end()) {
1596 do_dealloc = true;
1597 table.refcnts[this] = SIDE_TABLE_DEALLOCATING;
1598 } else if (it->second < SIDE_TABLE_DEALLOCATING) {
1599 // SIDE_TABLE_WEAKLY_REFERENCED may be set. Don't change it.
1600 do_dealloc = true;
1601 it->second |= SIDE_TABLE_DEALLOCATING;
1602 } else if (! (it->second & SIDE_TABLE_RC_PINNED)) {
1603 it->second -= SIDE_TABLE_RC_ONE;
1604 }
1605 table.unlock();
1606 if (do_dealloc && performDealloc) {
1607 ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_dealloc);
1608 }
1609 return do_dealloc;
1610 }
1611
1612
1613 void
1614 objc_object::sidetable_clearDeallocating()
1615 {
1616 SideTable& table = SideTables()[this];
1617
1618 // clear any weak table items
1619 // clear extra retain count and deallocating bit
1620 // (fixme warn or abort if extra retain count == 0 ?)
1621 table.lock();
1622 RefcountMap::iterator it = table.refcnts.find(this);
1623 if (it != table.refcnts.end()) {
1624 if (it->second & SIDE_TABLE_WEAKLY_REFERENCED) {
1625 weak_clear_no_lock(&table.weak_table, (id)this);
1626 }
1627 table.refcnts.erase(it);
1628 }
1629 table.unlock();
1630 }
1631
1632
1633 /***********************************************************************
1634 * Optimized retain/release/autorelease entrypoints
1635 **********************************************************************/
1636
1637
1638 #if __OBJC2__
1639
1640 __attribute__((aligned(16)))
1641 id
1642 objc_retain(id obj)
1643 {
1644 if (!obj) return obj;
1645 if (obj->isTaggedPointer()) return obj;
1646 return obj->retain();
1647 }
1648
1649
1650 __attribute__((aligned(16)))
1651 void
1652 objc_release(id obj)
1653 {
1654 if (!obj) return;
1655 if (obj->isTaggedPointer()) return;
1656 return obj->release();
1657 }
1658
1659
1660 __attribute__((aligned(16)))
1661 id
1662 objc_autorelease(id obj)
1663 {
1664 if (!obj) return obj;
1665 if (obj->isTaggedPointer()) return obj;
1666 return obj->autorelease();
1667 }
1668
1669
1670 // OBJC2
1671 #else
1672 // not OBJC2
1673
1674
1675 id objc_retain(id obj) { return [obj retain]; }
1676 void objc_release(id obj) { [obj release]; }
1677 id objc_autorelease(id obj) { return [obj autorelease]; }
1678
1679
1680 #endif
1681
1682
1683 /***********************************************************************
1684 * Basic operations for root class implementations a.k.a. _objc_root*()
1685 **********************************************************************/
1686
1687 bool
1688 _objc_rootTryRetain(id obj)
1689 {
1690 assert(obj);
1691
1692 return obj->rootTryRetain();
1693 }
1694
1695 bool
1696 _objc_rootIsDeallocating(id obj)
1697 {
1698 assert(obj);
1699
1700 return obj->rootIsDeallocating();
1701 }
1702
1703
1704 void
1705 objc_clear_deallocating(id obj)
1706 {
1707 assert(obj);
1708
1709 if (obj->isTaggedPointer()) return;
1710 obj->clearDeallocating();
1711 }
1712
1713
1714 bool
1715 _objc_rootReleaseWasZero(id obj)
1716 {
1717 assert(obj);
1718
1719 return obj->rootReleaseShouldDealloc();
1720 }
1721
1722
1723 id
1724 _objc_rootAutorelease(id obj)
1725 {
1726 assert(obj);
1727 return obj->rootAutorelease();
1728 }
1729
1730 uintptr_t
1731 _objc_rootRetainCount(id obj)
1732 {
1733 assert(obj);
1734
1735 return obj->rootRetainCount();
1736 }
1737
1738
1739 id
1740 _objc_rootRetain(id obj)
1741 {
1742 assert(obj);
1743
1744 return obj->rootRetain();
1745 }
1746
1747 void
1748 _objc_rootRelease(id obj)
1749 {
1750 assert(obj);
1751
1752 obj->rootRelease();
1753 }
1754
1755
1756 id
1757 _objc_rootAllocWithZone(Class cls, malloc_zone_t *zone)
1758 {
1759 id obj;
1760
1761 #if __OBJC2__
1762 // allocWithZone under __OBJC2__ ignores the zone parameter
1763 (void)zone;
1764 obj = class_createInstance(cls, 0);
1765 #else
1766 if (!zone) {
1767 obj = class_createInstance(cls, 0);
1768 }
1769 else {
1770 obj = class_createInstanceFromZone(cls, 0, zone);
1771 }
1772 #endif
1773
1774 if (slowpath(!obj)) obj = callBadAllocHandler(cls);
1775 return obj;
1776 }
1777
1778
1779 // Call [cls alloc] or [cls allocWithZone:nil], with appropriate
1780 // shortcutting optimizations.
1781 static ALWAYS_INLINE id
1782 callAlloc(Class cls, bool checkNil, bool allocWithZone=false)
1783 {
1784 if (slowpath(checkNil && !cls)) return nil;
1785
1786 #if __OBJC2__
1787 if (fastpath(!cls->ISA()->hasCustomAWZ())) {
1788 // No alloc/allocWithZone implementation. Go straight to the allocator.
1789 // fixme store hasCustomAWZ in the non-meta class and
1790 // add it to canAllocFast's summary
1791 if (fastpath(cls->canAllocFast())) {
1792 // No ctors, raw isa, etc. Go straight to the metal.
1793 bool dtor = cls->hasCxxDtor();
1794 id obj = (id)calloc(1, cls->bits.fastInstanceSize());
1795 if (slowpath(!obj)) return callBadAllocHandler(cls);
1796 obj->initInstanceIsa(cls, dtor);
1797 return obj;
1798 }
1799 else {
1800 // Has ctor or raw isa or something. Use the slower path.
1801 id obj = class_createInstance(cls, 0);
1802 if (slowpath(!obj)) return callBadAllocHandler(cls);
1803 return obj;
1804 }
1805 }
1806 #endif
1807
1808 // No shortcuts available.
1809 if (allocWithZone) return [cls allocWithZone:nil];
1810 return [cls alloc];
1811 }
1812
1813
1814 // Base class implementation of +alloc. cls is not nil.
1815 // Calls [cls allocWithZone:nil].
1816 id
1817 _objc_rootAlloc(Class cls)
1818 {
1819 return callAlloc(cls, false/*checkNil*/, true/*allocWithZone*/);
1820 }
1821
1822 // Calls [cls alloc].
1823 id
1824 objc_alloc(Class cls)
1825 {
1826 return callAlloc(cls, true/*checkNil*/, false/*allocWithZone*/);
1827 }
1828
1829 // Calls [cls allocWithZone:nil].
1830 id
1831 objc_allocWithZone(Class cls)
1832 {
1833 return callAlloc(cls, true/*checkNil*/, true/*allocWithZone*/);
1834 }
1835
1836
1837 void
1838 _objc_rootDealloc(id obj)
1839 {
1840 assert(obj);
1841
1842 obj->rootDealloc();
1843 }
1844
1845 void
1846 _objc_rootFinalize(id obj __unused)
1847 {
1848 assert(obj);
1849 _objc_fatal("_objc_rootFinalize called with garbage collection off");
1850 }
1851
1852
1853 id
1854 _objc_rootInit(id obj)
1855 {
1856 // In practice, it will be hard to rely on this function.
1857 // Many classes do not properly chain -init calls.
1858 return obj;
1859 }
1860
1861
1862 malloc_zone_t *
1863 _objc_rootZone(id obj)
1864 {
1865 (void)obj;
1866 #if __OBJC2__
1867 // allocWithZone under __OBJC2__ ignores the zone parameter
1868 return malloc_default_zone();
1869 #else
1870 malloc_zone_t *rval = malloc_zone_from_ptr(obj);
1871 return rval ? rval : malloc_default_zone();
1872 #endif
1873 }
1874
1875 uintptr_t
1876 _objc_rootHash(id obj)
1877 {
1878 return (uintptr_t)obj;
1879 }
1880
1881 void *
1882 objc_autoreleasePoolPush(void)
1883 {
1884 return AutoreleasePoolPage::push();
1885 }
1886
1887 void
1888 objc_autoreleasePoolPop(void *ctxt)
1889 {
1890 AutoreleasePoolPage::pop(ctxt);
1891 }
1892
1893
1894 void *
1895 _objc_autoreleasePoolPush(void)
1896 {
1897 return objc_autoreleasePoolPush();
1898 }
1899
1900 void
1901 _objc_autoreleasePoolPop(void *ctxt)
1902 {
1903 objc_autoreleasePoolPop(ctxt);
1904 }
1905
1906 void
1907 _objc_autoreleasePoolPrint(void)
1908 {
1909 AutoreleasePoolPage::printAll();
1910 }
1911
1912
1913 // Same as objc_release but suitable for tail-calling
1914 // if you need the value back and don't want to push a frame before this point.
1915 __attribute__((noinline))
1916 static id
1917 objc_releaseAndReturn(id obj)
1918 {
1919 objc_release(obj);
1920 return obj;
1921 }
1922
1923 // Same as objc_retainAutorelease but suitable for tail-calling
1924 // if you don't want to push a frame before this point.
1925 __attribute__((noinline))
1926 static id
1927 objc_retainAutoreleaseAndReturn(id obj)
1928 {
1929 return objc_retainAutorelease(obj);
1930 }
1931
1932
1933 // Prepare a value at +1 for return through a +0 autoreleasing convention.
1934 id
1935 objc_autoreleaseReturnValue(id obj)
1936 {
1937 if (prepareOptimizedReturn(ReturnAtPlus1)) return obj;
1938
1939 return objc_autorelease(obj);
1940 }
1941
1942 // Prepare a value at +0 for return through a +0 autoreleasing convention.
1943 id
1944 objc_retainAutoreleaseReturnValue(id obj)
1945 {
1946 if (prepareOptimizedReturn(ReturnAtPlus0)) return obj;
1947
1948 // not objc_autoreleaseReturnValue(objc_retain(obj))
1949 // because we don't need another optimization attempt
1950 return objc_retainAutoreleaseAndReturn(obj);
1951 }
1952
1953 // Accept a value returned through a +0 autoreleasing convention for use at +1.
1954 id
1955 objc_retainAutoreleasedReturnValue(id obj)
1956 {
1957 if (acceptOptimizedReturn() == ReturnAtPlus1) return obj;
1958
1959 return objc_retain(obj);
1960 }
1961
1962 // Accept a value returned through a +0 autoreleasing convention for use at +0.
1963 id
1964 objc_unsafeClaimAutoreleasedReturnValue(id obj)
1965 {
1966 if (acceptOptimizedReturn() == ReturnAtPlus0) return obj;
1967
1968 return objc_releaseAndReturn(obj);
1969 }
1970
1971 id
1972 objc_retainAutorelease(id obj)
1973 {
1974 return objc_autorelease(objc_retain(obj));
1975 }
1976
1977 void
1978 _objc_deallocOnMainThreadHelper(void *context)
1979 {
1980 id obj = (id)context;
1981 [obj dealloc];
1982 }
1983
1984 // convert objc_objectptr_t to id, callee must take ownership.
1985 id objc_retainedObject(objc_objectptr_t pointer) { return (id)pointer; }
1986
1987 // convert objc_objectptr_t to id, without ownership transfer.
1988 id objc_unretainedObject(objc_objectptr_t pointer) { return (id)pointer; }
1989
1990 // convert id to objc_objectptr_t, no ownership transfer.
1991 objc_objectptr_t objc_unretainedPointer(id object) { return object; }
1992
1993
1994 void arr_init(void)
1995 {
1996 AutoreleasePoolPage::init();
1997 SideTableInit();
1998 }
1999
2000
2001 #if SUPPORT_TAGGED_POINTERS
2002
2003 // Placeholder for old debuggers. When they inspect an
2004 // extended tagged pointer object they will see this isa.
2005
2006 @interface __NSUnrecognizedTaggedPointer : NSObject
2007 @end
2008
2009 @implementation __NSUnrecognizedTaggedPointer
2010 +(void) load { }
2011 -(id) retain { return self; }
2012 -(oneway void) release { }
2013 -(id) autorelease { return self; }
2014 @end
2015
2016 #endif
2017
2018
2019 @implementation NSObject
2020
2021 + (void)load {
2022 }
2023
2024 + (void)initialize {
2025 }
2026
2027 + (id)self {
2028 return (id)self;
2029 }
2030
2031 - (id)self {
2032 return self;
2033 }
2034
2035 + (Class)class {
2036 return self;
2037 }
2038
2039 - (Class)class {
2040 return object_getClass(self);
2041 }
2042
2043 + (Class)superclass {
2044 return self->superclass;
2045 }
2046
2047 - (Class)superclass {
2048 return [self class]->superclass;
2049 }
2050
2051 + (BOOL)isMemberOfClass:(Class)cls {
2052 return object_getClass((id)self) == cls;
2053 }
2054
2055 - (BOOL)isMemberOfClass:(Class)cls {
2056 return [self class] == cls;
2057 }
2058
2059 + (BOOL)isKindOfClass:(Class)cls {
2060 for (Class tcls = object_getClass((id)self); tcls; tcls = tcls->superclass) {
2061 if (tcls == cls) return YES;
2062 }
2063 return NO;
2064 }
2065
2066 - (BOOL)isKindOfClass:(Class)cls {
2067 for (Class tcls = [self class]; tcls; tcls = tcls->superclass) {
2068 if (tcls == cls) return YES;
2069 }
2070 return NO;
2071 }
2072
2073 + (BOOL)isSubclassOfClass:(Class)cls {
2074 for (Class tcls = self; tcls; tcls = tcls->superclass) {
2075 if (tcls == cls) return YES;
2076 }
2077 return NO;
2078 }
2079
2080 + (BOOL)isAncestorOfObject:(NSObject *)obj {
2081 for (Class tcls = [obj class]; tcls; tcls = tcls->superclass) {
2082 if (tcls == self) return YES;
2083 }
2084 return NO;
2085 }
2086
2087 + (BOOL)instancesRespondToSelector:(SEL)sel {
2088 if (!sel) return NO;
2089 return class_respondsToSelector(self, sel);
2090 }
2091
2092 + (BOOL)respondsToSelector:(SEL)sel {
2093 if (!sel) return NO;
2094 return class_respondsToSelector_inst(object_getClass(self), sel, self);
2095 }
2096
2097 - (BOOL)respondsToSelector:(SEL)sel {
2098 if (!sel) return NO;
2099 return class_respondsToSelector_inst([self class], sel, self);
2100 }
2101
2102 + (BOOL)conformsToProtocol:(Protocol *)protocol {
2103 if (!protocol) return NO;
2104 for (Class tcls = self; tcls; tcls = tcls->superclass) {
2105 if (class_conformsToProtocol(tcls, protocol)) return YES;
2106 }
2107 return NO;
2108 }
2109
2110 - (BOOL)conformsToProtocol:(Protocol *)protocol {
2111 if (!protocol) return NO;
2112 for (Class tcls = [self class]; tcls; tcls = tcls->superclass) {
2113 if (class_conformsToProtocol(tcls, protocol)) return YES;
2114 }
2115 return NO;
2116 }
2117
2118 + (NSUInteger)hash {
2119 return _objc_rootHash(self);
2120 }
2121
2122 - (NSUInteger)hash {
2123 return _objc_rootHash(self);
2124 }
2125
2126 + (BOOL)isEqual:(id)obj {
2127 return obj == (id)self;
2128 }
2129
2130 - (BOOL)isEqual:(id)obj {
2131 return obj == self;
2132 }
2133
2134
2135 + (BOOL)isFault {
2136 return NO;
2137 }
2138
2139 - (BOOL)isFault {
2140 return NO;
2141 }
2142
2143 + (BOOL)isProxy {
2144 return NO;
2145 }
2146
2147 - (BOOL)isProxy {
2148 return NO;
2149 }
2150
2151
2152 + (IMP)instanceMethodForSelector:(SEL)sel {
2153 if (!sel) [self doesNotRecognizeSelector:sel];
2154 return class_getMethodImplementation(self, sel);
2155 }
2156
2157 + (IMP)methodForSelector:(SEL)sel {
2158 if (!sel) [self doesNotRecognizeSelector:sel];
2159 return object_getMethodImplementation((id)self, sel);
2160 }
2161
2162 - (IMP)methodForSelector:(SEL)sel {
2163 if (!sel) [self doesNotRecognizeSelector:sel];
2164 return object_getMethodImplementation(self, sel);
2165 }
2166
2167 + (BOOL)resolveClassMethod:(SEL)sel {
2168 return NO;
2169 }
2170
2171 + (BOOL)resolveInstanceMethod:(SEL)sel {
2172 return NO;
2173 }
2174
2175 // Replaced by CF (throws an NSException)
2176 + (void)doesNotRecognizeSelector:(SEL)sel {
2177 _objc_fatal("+[%s %s]: unrecognized selector sent to instance %p",
2178 class_getName(self), sel_getName(sel), self);
2179 }
2180
2181 // Replaced by CF (throws an NSException)
2182 - (void)doesNotRecognizeSelector:(SEL)sel {
2183 _objc_fatal("-[%s %s]: unrecognized selector sent to instance %p",
2184 object_getClassName(self), sel_getName(sel), self);
2185 }
2186
2187
2188 + (id)performSelector:(SEL)sel {
2189 if (!sel) [self doesNotRecognizeSelector:sel];
2190 return ((id(*)(id, SEL))objc_msgSend)((id)self, sel);
2191 }
2192
2193 + (id)performSelector:(SEL)sel withObject:(id)obj {
2194 if (!sel) [self doesNotRecognizeSelector:sel];
2195 return ((id(*)(id, SEL, id))objc_msgSend)((id)self, sel, obj);
2196 }
2197
2198 + (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 {
2199 if (!sel) [self doesNotRecognizeSelector:sel];
2200 return ((id(*)(id, SEL, id, id))objc_msgSend)((id)self, sel, obj1, obj2);
2201 }
2202
2203 - (id)performSelector:(SEL)sel {
2204 if (!sel) [self doesNotRecognizeSelector:sel];
2205 return ((id(*)(id, SEL))objc_msgSend)(self, sel);
2206 }
2207
2208 - (id)performSelector:(SEL)sel withObject:(id)obj {
2209 if (!sel) [self doesNotRecognizeSelector:sel];
2210 return ((id(*)(id, SEL, id))objc_msgSend)(self, sel, obj);
2211 }
2212
2213 - (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 {
2214 if (!sel) [self doesNotRecognizeSelector:sel];
2215 return ((id(*)(id, SEL, id, id))objc_msgSend)(self, sel, obj1, obj2);
2216 }
2217
2218
2219 // Replaced by CF (returns an NSMethodSignature)
2220 + (NSMethodSignature *)instanceMethodSignatureForSelector:(SEL)sel {
2221 _objc_fatal("+[NSObject instanceMethodSignatureForSelector:] "
2222 "not available without CoreFoundation");
2223 }
2224
2225 // Replaced by CF (returns an NSMethodSignature)
2226 + (NSMethodSignature *)methodSignatureForSelector:(SEL)sel {
2227 _objc_fatal("+[NSObject methodSignatureForSelector:] "
2228 "not available without CoreFoundation");
2229 }
2230
2231 // Replaced by CF (returns an NSMethodSignature)
2232 - (NSMethodSignature *)methodSignatureForSelector:(SEL)sel {
2233 _objc_fatal("-[NSObject methodSignatureForSelector:] "
2234 "not available without CoreFoundation");
2235 }
2236
2237 + (void)forwardInvocation:(NSInvocation *)invocation {
2238 [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)];
2239 }
2240
2241 - (void)forwardInvocation:(NSInvocation *)invocation {
2242 [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)];
2243 }
2244
2245 + (id)forwardingTargetForSelector:(SEL)sel {
2246 return nil;
2247 }
2248
2249 - (id)forwardingTargetForSelector:(SEL)sel {
2250 return nil;
2251 }
2252
2253
2254 // Replaced by CF (returns an NSString)
2255 + (NSString *)description {
2256 return nil;
2257 }
2258
2259 // Replaced by CF (returns an NSString)
2260 - (NSString *)description {
2261 return nil;
2262 }
2263
2264 + (NSString *)debugDescription {
2265 return [self description];
2266 }
2267
2268 - (NSString *)debugDescription {
2269 return [self description];
2270 }
2271
2272
2273 + (id)new {
2274 return [callAlloc(self, false/*checkNil*/) init];
2275 }
2276
2277 + (id)retain {
2278 return (id)self;
2279 }
2280
2281 // Replaced by ObjectAlloc
2282 - (id)retain {
2283 return ((id)self)->rootRetain();
2284 }
2285
2286
2287 + (BOOL)_tryRetain {
2288 return YES;
2289 }
2290
2291 // Replaced by ObjectAlloc
2292 - (BOOL)_tryRetain {
2293 return ((id)self)->rootTryRetain();
2294 }
2295
2296 + (BOOL)_isDeallocating {
2297 return NO;
2298 }
2299
2300 - (BOOL)_isDeallocating {
2301 return ((id)self)->rootIsDeallocating();
2302 }
2303
2304 + (BOOL)allowsWeakReference {
2305 return YES;
2306 }
2307
2308 + (BOOL)retainWeakReference {
2309 return YES;
2310 }
2311
2312 - (BOOL)allowsWeakReference {
2313 return ! [self _isDeallocating];
2314 }
2315
2316 - (BOOL)retainWeakReference {
2317 return [self _tryRetain];
2318 }
2319
2320 + (oneway void)release {
2321 }
2322
2323 // Replaced by ObjectAlloc
2324 - (oneway void)release {
2325 ((id)self)->rootRelease();
2326 }
2327
2328 + (id)autorelease {
2329 return (id)self;
2330 }
2331
2332 // Replaced by ObjectAlloc
2333 - (id)autorelease {
2334 return ((id)self)->rootAutorelease();
2335 }
2336
2337 + (NSUInteger)retainCount {
2338 return ULONG_MAX;
2339 }
2340
2341 - (NSUInteger)retainCount {
2342 return ((id)self)->rootRetainCount();
2343 }
2344
2345 + (id)alloc {
2346 return _objc_rootAlloc(self);
2347 }
2348
2349 // Replaced by ObjectAlloc
2350 + (id)allocWithZone:(struct _NSZone *)zone {
2351 return _objc_rootAllocWithZone(self, (malloc_zone_t *)zone);
2352 }
2353
2354 // Replaced by CF (throws an NSException)
2355 + (id)init {
2356 return (id)self;
2357 }
2358
2359 - (id)init {
2360 return _objc_rootInit(self);
2361 }
2362
2363 // Replaced by CF (throws an NSException)
2364 + (void)dealloc {
2365 }
2366
2367
2368 // Replaced by NSZombies
2369 - (void)dealloc {
2370 _objc_rootDealloc(self);
2371 }
2372
2373 // Previously used by GC. Now a placeholder for binary compatibility.
2374 - (void) finalize {
2375 }
2376
2377 + (struct _NSZone *)zone {
2378 return (struct _NSZone *)_objc_rootZone(self);
2379 }
2380
2381 - (struct _NSZone *)zone {
2382 return (struct _NSZone *)_objc_rootZone(self);
2383 }
2384
2385 + (id)copy {
2386 return (id)self;
2387 }
2388
2389 + (id)copyWithZone:(struct _NSZone *)zone {
2390 return (id)self;
2391 }
2392
2393 - (id)copy {
2394 return [(id)self copyWithZone:nil];
2395 }
2396
2397 + (id)mutableCopy {
2398 return (id)self;
2399 }
2400
2401 + (id)mutableCopyWithZone:(struct _NSZone *)zone {
2402 return (id)self;
2403 }
2404
2405 - (id)mutableCopy {
2406 return [(id)self mutableCopyWithZone:nil];
2407 }
2408
2409 @end
2410
2411