]> git.saurik.com Git - apple/objc4.git/blob - runtime/NSObject.mm
objc4-709.tar.gz
[apple/objc4.git] / runtime / NSObject.mm
1 /*
2 * Copyright (c) 2010-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include "objc-private.h"
25 #include "NSObject.h"
26
27 #include "objc-weak.h"
28 #include "llvm-DenseMap.h"
29 #include "NSObject.h"
30
31 #include <malloc/malloc.h>
32 #include <stdint.h>
33 #include <stdbool.h>
34 #include <mach/mach.h>
35 #include <mach-o/dyld.h>
36 #include <mach-o/nlist.h>
37 #include <sys/types.h>
38 #include <sys/mman.h>
39 #include <libkern/OSAtomic.h>
40 #include <Block.h>
41 #include <map>
42 #include <execinfo.h>
43
44 @interface NSInvocation
45 - (SEL)selector;
46 @end
47
48
49 #if TARGET_OS_MAC
50
51 // NSObject used to be in Foundation/CoreFoundation.
52
53 #define SYMBOL_ELSEWHERE_IN_3(sym, vers, n) \
54 OBJC_EXPORT const char elsewhere_ ##n __asm__("$ld$hide$os" #vers "$" #sym); const char elsewhere_ ##n = 0
55 #define SYMBOL_ELSEWHERE_IN_2(sym, vers, n) \
56 SYMBOL_ELSEWHERE_IN_3(sym, vers, n)
57 #define SYMBOL_ELSEWHERE_IN(sym, vers) \
58 SYMBOL_ELSEWHERE_IN_2(sym, vers, __COUNTER__)
59
60 #if __OBJC2__
61 # define NSOBJECT_ELSEWHERE_IN(vers) \
62 SYMBOL_ELSEWHERE_IN(_OBJC_CLASS_$_NSObject, vers); \
63 SYMBOL_ELSEWHERE_IN(_OBJC_METACLASS_$_NSObject, vers); \
64 SYMBOL_ELSEWHERE_IN(_OBJC_IVAR_$_NSObject.isa, vers)
65 #else
66 # define NSOBJECT_ELSEWHERE_IN(vers) \
67 SYMBOL_ELSEWHERE_IN(.objc_class_name_NSObject, vers)
68 #endif
69
70 #if TARGET_OS_IOS
71 NSOBJECT_ELSEWHERE_IN(5.1);
72 NSOBJECT_ELSEWHERE_IN(5.0);
73 NSOBJECT_ELSEWHERE_IN(4.3);
74 NSOBJECT_ELSEWHERE_IN(4.2);
75 NSOBJECT_ELSEWHERE_IN(4.1);
76 NSOBJECT_ELSEWHERE_IN(4.0);
77 NSOBJECT_ELSEWHERE_IN(3.2);
78 NSOBJECT_ELSEWHERE_IN(3.1);
79 NSOBJECT_ELSEWHERE_IN(3.0);
80 NSOBJECT_ELSEWHERE_IN(2.2);
81 NSOBJECT_ELSEWHERE_IN(2.1);
82 NSOBJECT_ELSEWHERE_IN(2.0);
83 #elif TARGET_OS_OSX
84 NSOBJECT_ELSEWHERE_IN(10.7);
85 NSOBJECT_ELSEWHERE_IN(10.6);
86 NSOBJECT_ELSEWHERE_IN(10.5);
87 NSOBJECT_ELSEWHERE_IN(10.4);
88 NSOBJECT_ELSEWHERE_IN(10.3);
89 NSOBJECT_ELSEWHERE_IN(10.2);
90 NSOBJECT_ELSEWHERE_IN(10.1);
91 NSOBJECT_ELSEWHERE_IN(10.0);
92 #else
93 // NSObject has always been in libobjc on these platforms.
94 #endif
95
96 // TARGET_OS_MAC
97 #endif
98
99
100 /***********************************************************************
101 * Weak ivar support
102 **********************************************************************/
103
104 static id defaultBadAllocHandler(Class cls)
105 {
106 _objc_fatal("attempt to allocate object of class '%s' failed",
107 cls->nameForLogging());
108 }
109
110 static id(*badAllocHandler)(Class) = &defaultBadAllocHandler;
111
112 static id callBadAllocHandler(Class cls)
113 {
114 // fixme add re-entrancy protection in case allocation fails inside handler
115 return (*badAllocHandler)(cls);
116 }
117
118 void _objc_setBadAllocHandler(id(*newHandler)(Class))
119 {
120 badAllocHandler = newHandler;
121 }
122
123
124 namespace {
125
126 // The order of these bits is important.
127 #define SIDE_TABLE_WEAKLY_REFERENCED (1UL<<0)
128 #define SIDE_TABLE_DEALLOCATING (1UL<<1) // MSB-ward of weak bit
129 #define SIDE_TABLE_RC_ONE (1UL<<2) // MSB-ward of deallocating bit
130 #define SIDE_TABLE_RC_PINNED (1UL<<(WORD_BITS-1))
131
132 #define SIDE_TABLE_RC_SHIFT 2
133 #define SIDE_TABLE_FLAG_MASK (SIDE_TABLE_RC_ONE-1)
134
135 // RefcountMap disguises its pointers because we
136 // don't want the table to act as a root for `leaks`.
137 typedef objc::DenseMap<DisguisedPtr<objc_object>,size_t,true> RefcountMap;
138
139 // Template parameters.
140 enum HaveOld { DontHaveOld = false, DoHaveOld = true };
141 enum HaveNew { DontHaveNew = false, DoHaveNew = true };
142
143 struct SideTable {
144 spinlock_t slock;
145 RefcountMap refcnts;
146 weak_table_t weak_table;
147
148 SideTable() {
149 memset(&weak_table, 0, sizeof(weak_table));
150 }
151
152 ~SideTable() {
153 _objc_fatal("Do not delete SideTable.");
154 }
155
156 void lock() { slock.lock(); }
157 void unlock() { slock.unlock(); }
158 void forceReset() { slock.forceReset(); }
159
160 // Address-ordered lock discipline for a pair of side tables.
161
162 template<HaveOld, HaveNew>
163 static void lockTwo(SideTable *lock1, SideTable *lock2);
164 template<HaveOld, HaveNew>
165 static void unlockTwo(SideTable *lock1, SideTable *lock2);
166 };
167
168
169 template<>
170 void SideTable::lockTwo<DoHaveOld, DoHaveNew>
171 (SideTable *lock1, SideTable *lock2)
172 {
173 spinlock_t::lockTwo(&lock1->slock, &lock2->slock);
174 }
175
176 template<>
177 void SideTable::lockTwo<DoHaveOld, DontHaveNew>
178 (SideTable *lock1, SideTable *)
179 {
180 lock1->lock();
181 }
182
183 template<>
184 void SideTable::lockTwo<DontHaveOld, DoHaveNew>
185 (SideTable *, SideTable *lock2)
186 {
187 lock2->lock();
188 }
189
190 template<>
191 void SideTable::unlockTwo<DoHaveOld, DoHaveNew>
192 (SideTable *lock1, SideTable *lock2)
193 {
194 spinlock_t::unlockTwo(&lock1->slock, &lock2->slock);
195 }
196
197 template<>
198 void SideTable::unlockTwo<DoHaveOld, DontHaveNew>
199 (SideTable *lock1, SideTable *)
200 {
201 lock1->unlock();
202 }
203
204 template<>
205 void SideTable::unlockTwo<DontHaveOld, DoHaveNew>
206 (SideTable *, SideTable *lock2)
207 {
208 lock2->unlock();
209 }
210
211
212 // We cannot use a C++ static initializer to initialize SideTables because
213 // libc calls us before our C++ initializers run. We also don't want a global
214 // pointer to this struct because of the extra indirection.
215 // Do it the hard way.
216 alignas(StripedMap<SideTable>) static uint8_t
217 SideTableBuf[sizeof(StripedMap<SideTable>)];
218
219 static void SideTableInit() {
220 new (SideTableBuf) StripedMap<SideTable>();
221 }
222
223 static StripedMap<SideTable>& SideTables() {
224 return *reinterpret_cast<StripedMap<SideTable>*>(SideTableBuf);
225 }
226
227 // anonymous namespace
228 };
229
230 void SideTableLockAll() {
231 SideTables().lockAll();
232 }
233
234 void SideTableUnlockAll() {
235 SideTables().unlockAll();
236 }
237
238 void SideTableForceResetAll() {
239 SideTables().forceResetAll();
240 }
241
242 void SideTableDefineLockOrder() {
243 SideTables().defineLockOrder();
244 }
245
246 void SideTableLocksPrecedeLock(const void *newlock) {
247 SideTables().precedeLock(newlock);
248 }
249
250 void SideTableLocksSucceedLock(const void *oldlock) {
251 SideTables().succeedLock(oldlock);
252 }
253
254 //
255 // The -fobjc-arc flag causes the compiler to issue calls to objc_{retain/release/autorelease/retain_block}
256 //
257
258 id objc_retainBlock(id x) {
259 return (id)_Block_copy(x);
260 }
261
262 //
263 // The following SHOULD be called by the compiler directly, but the request hasn't been made yet :-)
264 //
265
266 BOOL objc_should_deallocate(id object) {
267 return YES;
268 }
269
270 id
271 objc_retain_autorelease(id obj)
272 {
273 return objc_autorelease(objc_retain(obj));
274 }
275
276
277 void
278 objc_storeStrong(id *location, id obj)
279 {
280 id prev = *location;
281 if (obj == prev) {
282 return;
283 }
284 objc_retain(obj);
285 *location = obj;
286 objc_release(prev);
287 }
288
289
290 // Update a weak variable.
291 // If HaveOld is true, the variable has an existing value
292 // that needs to be cleaned up. This value might be nil.
293 // If HaveNew is true, there is a new value that needs to be
294 // assigned into the variable. This value might be nil.
295 // If CrashIfDeallocating is true, the process is halted if newObj is
296 // deallocating or newObj's class does not support weak references.
297 // If CrashIfDeallocating is false, nil is stored instead.
298 enum CrashIfDeallocating {
299 DontCrashIfDeallocating = false, DoCrashIfDeallocating = true
300 };
301 template <HaveOld haveOld, HaveNew haveNew,
302 CrashIfDeallocating crashIfDeallocating>
303 static id
304 storeWeak(id *location, objc_object *newObj)
305 {
306 assert(haveOld || haveNew);
307 if (!haveNew) assert(newObj == nil);
308
309 Class previouslyInitializedClass = nil;
310 id oldObj;
311 SideTable *oldTable;
312 SideTable *newTable;
313
314 // Acquire locks for old and new values.
315 // Order by lock address to prevent lock ordering problems.
316 // Retry if the old value changes underneath us.
317 retry:
318 if (haveOld) {
319 oldObj = *location;
320 oldTable = &SideTables()[oldObj];
321 } else {
322 oldTable = nil;
323 }
324 if (haveNew) {
325 newTable = &SideTables()[newObj];
326 } else {
327 newTable = nil;
328 }
329
330 SideTable::lockTwo<haveOld, haveNew>(oldTable, newTable);
331
332 if (haveOld && *location != oldObj) {
333 SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
334 goto retry;
335 }
336
337 // Prevent a deadlock between the weak reference machinery
338 // and the +initialize machinery by ensuring that no
339 // weakly-referenced object has an un-+initialized isa.
340 if (haveNew && newObj) {
341 Class cls = newObj->getIsa();
342 if (cls != previouslyInitializedClass &&
343 !((objc_class *)cls)->isInitialized())
344 {
345 SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
346 _class_initialize(_class_getNonMetaClass(cls, (id)newObj));
347
348 // If this class is finished with +initialize then we're good.
349 // If this class is still running +initialize on this thread
350 // (i.e. +initialize called storeWeak on an instance of itself)
351 // then we may proceed but it will appear initializing and
352 // not yet initialized to the check above.
353 // Instead set previouslyInitializedClass to recognize it on retry.
354 previouslyInitializedClass = cls;
355
356 goto retry;
357 }
358 }
359
360 // Clean up old value, if any.
361 if (haveOld) {
362 weak_unregister_no_lock(&oldTable->weak_table, oldObj, location);
363 }
364
365 // Assign new value, if any.
366 if (haveNew) {
367 newObj = (objc_object *)
368 weak_register_no_lock(&newTable->weak_table, (id)newObj, location,
369 crashIfDeallocating);
370 // weak_register_no_lock returns nil if weak store should be rejected
371
372 // Set is-weakly-referenced bit in refcount table.
373 if (newObj && !newObj->isTaggedPointer()) {
374 newObj->setWeaklyReferenced_nolock();
375 }
376
377 // Do not set *location anywhere else. That would introduce a race.
378 *location = (id)newObj;
379 }
380 else {
381 // No new value. The storage is not changed.
382 }
383
384 SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
385
386 return (id)newObj;
387 }
388
389
390 /**
391 * This function stores a new value into a __weak variable. It would
392 * be used anywhere a __weak variable is the target of an assignment.
393 *
394 * @param location The address of the weak pointer itself
395 * @param newObj The new object this weak ptr should now point to
396 *
397 * @return \e newObj
398 */
399 id
400 objc_storeWeak(id *location, id newObj)
401 {
402 return storeWeak<DoHaveOld, DoHaveNew, DoCrashIfDeallocating>
403 (location, (objc_object *)newObj);
404 }
405
406
407 /**
408 * This function stores a new value into a __weak variable.
409 * If the new object is deallocating or the new object's class
410 * does not support weak references, stores nil instead.
411 *
412 * @param location The address of the weak pointer itself
413 * @param newObj The new object this weak ptr should now point to
414 *
415 * @return The value stored (either the new object or nil)
416 */
417 id
418 objc_storeWeakOrNil(id *location, id newObj)
419 {
420 return storeWeak<DoHaveOld, DoHaveNew, DontCrashIfDeallocating>
421 (location, (objc_object *)newObj);
422 }
423
424
425 /**
426 * Initialize a fresh weak pointer to some object location.
427 * It would be used for code like:
428 *
429 * (The nil case)
430 * __weak id weakPtr;
431 * (The non-nil case)
432 * NSObject *o = ...;
433 * __weak id weakPtr = o;
434 *
435 * This function IS NOT thread-safe with respect to concurrent
436 * modifications to the weak variable. (Concurrent weak clear is safe.)
437 *
438 * @param location Address of __weak ptr.
439 * @param newObj Object ptr.
440 */
441 id
442 objc_initWeak(id *location, id newObj)
443 {
444 if (!newObj) {
445 *location = nil;
446 return nil;
447 }
448
449 return storeWeak<DontHaveOld, DoHaveNew, DoCrashIfDeallocating>
450 (location, (objc_object*)newObj);
451 }
452
453 id
454 objc_initWeakOrNil(id *location, id newObj)
455 {
456 if (!newObj) {
457 *location = nil;
458 return nil;
459 }
460
461 return storeWeak<DontHaveOld, DoHaveNew, DontCrashIfDeallocating>
462 (location, (objc_object*)newObj);
463 }
464
465
466 /**
467 * Destroys the relationship between a weak pointer
468 * and the object it is referencing in the internal weak
469 * table. If the weak pointer is not referencing anything,
470 * there is no need to edit the weak table.
471 *
472 * This function IS NOT thread-safe with respect to concurrent
473 * modifications to the weak variable. (Concurrent weak clear is safe.)
474 *
475 * @param location The weak pointer address.
476 */
477 void
478 objc_destroyWeak(id *location)
479 {
480 (void)storeWeak<DoHaveOld, DontHaveNew, DontCrashIfDeallocating>
481 (location, nil);
482 }
483
484
485 /*
486 Once upon a time we eagerly cleared *location if we saw the object
487 was deallocating. This confuses code like NSPointerFunctions which
488 tries to pre-flight the raw storage and assumes if the storage is
489 zero then the weak system is done interfering. That is false: the
490 weak system is still going to check and clear the storage later.
491 This can cause objc_weak_error complaints and crashes.
492 So we now don't touch the storage until deallocation completes.
493 */
494
495 id
496 objc_loadWeakRetained(id *location)
497 {
498 id obj;
499 id result;
500 Class cls;
501
502 SideTable *table;
503
504 retry:
505 // fixme std::atomic this load
506 obj = *location;
507 if (!obj) return nil;
508 if (obj->isTaggedPointer()) return obj;
509
510 table = &SideTables()[obj];
511
512 table->lock();
513 if (*location != obj) {
514 table->unlock();
515 goto retry;
516 }
517
518 result = obj;
519
520 cls = obj->ISA();
521 if (! cls->hasCustomRR()) {
522 // Fast case. We know +initialize is complete because
523 // default-RR can never be set before then.
524 assert(cls->isInitialized());
525 if (! obj->rootTryRetain()) {
526 result = nil;
527 }
528 }
529 else {
530 // Slow case. We must check for +initialize and call it outside
531 // the lock if necessary in order to avoid deadlocks.
532 if (cls->isInitialized() || _thisThreadIsInitializingClass(cls)) {
533 BOOL (*tryRetain)(id, SEL) = (BOOL(*)(id, SEL))
534 class_getMethodImplementation(cls, SEL_retainWeakReference);
535 if ((IMP)tryRetain == _objc_msgForward) {
536 result = nil;
537 }
538 else if (! (*tryRetain)(obj, SEL_retainWeakReference)) {
539 result = nil;
540 }
541 }
542 else {
543 table->unlock();
544 _class_initialize(cls);
545 goto retry;
546 }
547 }
548
549 table->unlock();
550 return result;
551 }
552
553 /**
554 * This loads the object referenced by a weak pointer and returns it, after
555 * retaining and autoreleasing the object to ensure that it stays alive
556 * long enough for the caller to use it. This function would be used
557 * anywhere a __weak variable is used in an expression.
558 *
559 * @param location The weak pointer address
560 *
561 * @return The object pointed to by \e location, or \c nil if \e location is \c nil.
562 */
563 id
564 objc_loadWeak(id *location)
565 {
566 if (!*location) return nil;
567 return objc_autorelease(objc_loadWeakRetained(location));
568 }
569
570
571 /**
572 * This function copies a weak pointer from one location to another,
573 * when the destination doesn't already contain a weak pointer. It
574 * would be used for code like:
575 *
576 * __weak id src = ...;
577 * __weak id dst = src;
578 *
579 * This function IS NOT thread-safe with respect to concurrent
580 * modifications to the destination variable. (Concurrent weak clear is safe.)
581 *
582 * @param dst The destination variable.
583 * @param src The source variable.
584 */
585 void
586 objc_copyWeak(id *dst, id *src)
587 {
588 id obj = objc_loadWeakRetained(src);
589 objc_initWeak(dst, obj);
590 objc_release(obj);
591 }
592
593 /**
594 * Move a weak pointer from one location to another.
595 * Before the move, the destination must be uninitialized.
596 * After the move, the source is nil.
597 *
598 * This function IS NOT thread-safe with respect to concurrent
599 * modifications to either weak variable. (Concurrent weak clear is safe.)
600 *
601 */
602 void
603 objc_moveWeak(id *dst, id *src)
604 {
605 objc_copyWeak(dst, src);
606 objc_destroyWeak(src);
607 *src = nil;
608 }
609
610
611 /***********************************************************************
612 Autorelease pool implementation
613
614 A thread's autorelease pool is a stack of pointers.
615 Each pointer is either an object to release, or POOL_BOUNDARY which is
616 an autorelease pool boundary.
617 A pool token is a pointer to the POOL_BOUNDARY for that pool. When
618 the pool is popped, every object hotter than the sentinel is released.
619 The stack is divided into a doubly-linked list of pages. Pages are added
620 and deleted as necessary.
621 Thread-local storage points to the hot page, where newly autoreleased
622 objects are stored.
623 **********************************************************************/
624
625 // Set this to 1 to mprotect() autorelease pool contents
626 #define PROTECT_AUTORELEASEPOOL 0
627
628 // Set this to 1 to validate the entire autorelease pool header all the time
629 // (i.e. use check() instead of fastcheck() everywhere)
630 #define CHECK_AUTORELEASEPOOL (DEBUG)
631
632 BREAKPOINT_FUNCTION(void objc_autoreleaseNoPool(id obj));
633 BREAKPOINT_FUNCTION(void objc_autoreleasePoolInvalid(const void *token));
634
635 namespace {
636
637 struct magic_t {
638 static const uint32_t M0 = 0xA1A1A1A1;
639 # define M1 "AUTORELEASE!"
640 static const size_t M1_len = 12;
641 uint32_t m[4];
642
643 magic_t() {
644 assert(M1_len == strlen(M1));
645 assert(M1_len == 3 * sizeof(m[1]));
646
647 m[0] = M0;
648 strncpy((char *)&m[1], M1, M1_len);
649 }
650
651 ~magic_t() {
652 m[0] = m[1] = m[2] = m[3] = 0;
653 }
654
655 bool check() const {
656 return (m[0] == M0 && 0 == strncmp((char *)&m[1], M1, M1_len));
657 }
658
659 bool fastcheck() const {
660 #if CHECK_AUTORELEASEPOOL
661 return check();
662 #else
663 return (m[0] == M0);
664 #endif
665 }
666
667 # undef M1
668 };
669
670
671 class AutoreleasePoolPage
672 {
673 // EMPTY_POOL_PLACEHOLDER is stored in TLS when exactly one pool is
674 // pushed and it has never contained any objects. This saves memory
675 // when the top level (i.e. libdispatch) pushes and pops pools but
676 // never uses them.
677 # define EMPTY_POOL_PLACEHOLDER ((id*)1)
678
679 # define POOL_BOUNDARY nil
680 static pthread_key_t const key = AUTORELEASE_POOL_KEY;
681 static uint8_t const SCRIBBLE = 0xA3; // 0xA3A3A3A3 after releasing
682 static size_t const SIZE =
683 #if PROTECT_AUTORELEASEPOOL
684 PAGE_MAX_SIZE; // must be multiple of vm page size
685 #else
686 PAGE_MAX_SIZE; // size and alignment, power of 2
687 #endif
688 static size_t const COUNT = SIZE / sizeof(id);
689
690 magic_t const magic;
691 id *next;
692 pthread_t const thread;
693 AutoreleasePoolPage * const parent;
694 AutoreleasePoolPage *child;
695 uint32_t const depth;
696 uint32_t hiwat;
697
698 // SIZE-sizeof(*this) bytes of contents follow
699
700 static void * operator new(size_t size) {
701 return malloc_zone_memalign(malloc_default_zone(), SIZE, SIZE);
702 }
703 static void operator delete(void * p) {
704 return free(p);
705 }
706
707 inline void protect() {
708 #if PROTECT_AUTORELEASEPOOL
709 mprotect(this, SIZE, PROT_READ);
710 check();
711 #endif
712 }
713
714 inline void unprotect() {
715 #if PROTECT_AUTORELEASEPOOL
716 check();
717 mprotect(this, SIZE, PROT_READ | PROT_WRITE);
718 #endif
719 }
720
721 AutoreleasePoolPage(AutoreleasePoolPage *newParent)
722 : magic(), next(begin()), thread(pthread_self()),
723 parent(newParent), child(nil),
724 depth(parent ? 1+parent->depth : 0),
725 hiwat(parent ? parent->hiwat : 0)
726 {
727 if (parent) {
728 parent->check();
729 assert(!parent->child);
730 parent->unprotect();
731 parent->child = this;
732 parent->protect();
733 }
734 protect();
735 }
736
737 ~AutoreleasePoolPage()
738 {
739 check();
740 unprotect();
741 assert(empty());
742
743 // Not recursive: we don't want to blow out the stack
744 // if a thread accumulates a stupendous amount of garbage
745 assert(!child);
746 }
747
748
749 void busted(bool die = true)
750 {
751 magic_t right;
752 (die ? _objc_fatal : _objc_inform)
753 ("autorelease pool page %p corrupted\n"
754 " magic 0x%08x 0x%08x 0x%08x 0x%08x\n"
755 " should be 0x%08x 0x%08x 0x%08x 0x%08x\n"
756 " pthread %p\n"
757 " should be %p\n",
758 this,
759 magic.m[0], magic.m[1], magic.m[2], magic.m[3],
760 right.m[0], right.m[1], right.m[2], right.m[3],
761 this->thread, pthread_self());
762 }
763
764 void check(bool die = true)
765 {
766 if (!magic.check() || !pthread_equal(thread, pthread_self())) {
767 busted(die);
768 }
769 }
770
771 void fastcheck(bool die = true)
772 {
773 #if CHECK_AUTORELEASEPOOL
774 check(die);
775 #else
776 if (! magic.fastcheck()) {
777 busted(die);
778 }
779 #endif
780 }
781
782
783 id * begin() {
784 return (id *) ((uint8_t *)this+sizeof(*this));
785 }
786
787 id * end() {
788 return (id *) ((uint8_t *)this+SIZE);
789 }
790
791 bool empty() {
792 return next == begin();
793 }
794
795 bool full() {
796 return next == end();
797 }
798
799 bool lessThanHalfFull() {
800 return (next - begin() < (end() - begin()) / 2);
801 }
802
803 id *add(id obj)
804 {
805 assert(!full());
806 unprotect();
807 id *ret = next; // faster than `return next-1` because of aliasing
808 *next++ = obj;
809 protect();
810 return ret;
811 }
812
813 void releaseAll()
814 {
815 releaseUntil(begin());
816 }
817
818 void releaseUntil(id *stop)
819 {
820 // Not recursive: we don't want to blow out the stack
821 // if a thread accumulates a stupendous amount of garbage
822
823 while (this->next != stop) {
824 // Restart from hotPage() every time, in case -release
825 // autoreleased more objects
826 AutoreleasePoolPage *page = hotPage();
827
828 // fixme I think this `while` can be `if`, but I can't prove it
829 while (page->empty()) {
830 page = page->parent;
831 setHotPage(page);
832 }
833
834 page->unprotect();
835 id obj = *--page->next;
836 memset((void*)page->next, SCRIBBLE, sizeof(*page->next));
837 page->protect();
838
839 if (obj != POOL_BOUNDARY) {
840 objc_release(obj);
841 }
842 }
843
844 setHotPage(this);
845
846 #if DEBUG
847 // we expect any children to be completely empty
848 for (AutoreleasePoolPage *page = child; page; page = page->child) {
849 assert(page->empty());
850 }
851 #endif
852 }
853
854 void kill()
855 {
856 // Not recursive: we don't want to blow out the stack
857 // if a thread accumulates a stupendous amount of garbage
858 AutoreleasePoolPage *page = this;
859 while (page->child) page = page->child;
860
861 AutoreleasePoolPage *deathptr;
862 do {
863 deathptr = page;
864 page = page->parent;
865 if (page) {
866 page->unprotect();
867 page->child = nil;
868 page->protect();
869 }
870 delete deathptr;
871 } while (deathptr != this);
872 }
873
874 static void tls_dealloc(void *p)
875 {
876 if (p == (void*)EMPTY_POOL_PLACEHOLDER) {
877 // No objects or pool pages to clean up here.
878 return;
879 }
880
881 // reinstate TLS value while we work
882 setHotPage((AutoreleasePoolPage *)p);
883
884 if (AutoreleasePoolPage *page = coldPage()) {
885 if (!page->empty()) pop(page->begin()); // pop all of the pools
886 if (DebugMissingPools || DebugPoolAllocation) {
887 // pop() killed the pages already
888 } else {
889 page->kill(); // free all of the pages
890 }
891 }
892
893 // clear TLS value so TLS destruction doesn't loop
894 setHotPage(nil);
895 }
896
897 static AutoreleasePoolPage *pageForPointer(const void *p)
898 {
899 return pageForPointer((uintptr_t)p);
900 }
901
902 static AutoreleasePoolPage *pageForPointer(uintptr_t p)
903 {
904 AutoreleasePoolPage *result;
905 uintptr_t offset = p % SIZE;
906
907 assert(offset >= sizeof(AutoreleasePoolPage));
908
909 result = (AutoreleasePoolPage *)(p - offset);
910 result->fastcheck();
911
912 return result;
913 }
914
915
916 static inline bool haveEmptyPoolPlaceholder()
917 {
918 id *tls = (id *)tls_get_direct(key);
919 return (tls == EMPTY_POOL_PLACEHOLDER);
920 }
921
922 static inline id* setEmptyPoolPlaceholder()
923 {
924 assert(tls_get_direct(key) == nil);
925 tls_set_direct(key, (void *)EMPTY_POOL_PLACEHOLDER);
926 return EMPTY_POOL_PLACEHOLDER;
927 }
928
929 static inline AutoreleasePoolPage *hotPage()
930 {
931 AutoreleasePoolPage *result = (AutoreleasePoolPage *)
932 tls_get_direct(key);
933 if ((id *)result == EMPTY_POOL_PLACEHOLDER) return nil;
934 if (result) result->fastcheck();
935 return result;
936 }
937
938 static inline void setHotPage(AutoreleasePoolPage *page)
939 {
940 if (page) page->fastcheck();
941 tls_set_direct(key, (void *)page);
942 }
943
944 static inline AutoreleasePoolPage *coldPage()
945 {
946 AutoreleasePoolPage *result = hotPage();
947 if (result) {
948 while (result->parent) {
949 result = result->parent;
950 result->fastcheck();
951 }
952 }
953 return result;
954 }
955
956
957 static inline id *autoreleaseFast(id obj)
958 {
959 AutoreleasePoolPage *page = hotPage();
960 if (page && !page->full()) {
961 return page->add(obj);
962 } else if (page) {
963 return autoreleaseFullPage(obj, page);
964 } else {
965 return autoreleaseNoPage(obj);
966 }
967 }
968
969 static __attribute__((noinline))
970 id *autoreleaseFullPage(id obj, AutoreleasePoolPage *page)
971 {
972 // The hot page is full.
973 // Step to the next non-full page, adding a new page if necessary.
974 // Then add the object to that page.
975 assert(page == hotPage());
976 assert(page->full() || DebugPoolAllocation);
977
978 do {
979 if (page->child) page = page->child;
980 else page = new AutoreleasePoolPage(page);
981 } while (page->full());
982
983 setHotPage(page);
984 return page->add(obj);
985 }
986
987 static __attribute__((noinline))
988 id *autoreleaseNoPage(id obj)
989 {
990 // "No page" could mean no pool has been pushed
991 // or an empty placeholder pool has been pushed and has no contents yet
992 assert(!hotPage());
993
994 bool pushExtraBoundary = false;
995 if (haveEmptyPoolPlaceholder()) {
996 // We are pushing a second pool over the empty placeholder pool
997 // or pushing the first object into the empty placeholder pool.
998 // Before doing that, push a pool boundary on behalf of the pool
999 // that is currently represented by the empty placeholder.
1000 pushExtraBoundary = true;
1001 }
1002 else if (obj != POOL_BOUNDARY && DebugMissingPools) {
1003 // We are pushing an object with no pool in place,
1004 // and no-pool debugging was requested by environment.
1005 _objc_inform("MISSING POOLS: (%p) Object %p of class %s "
1006 "autoreleased with no pool in place - "
1007 "just leaking - break on "
1008 "objc_autoreleaseNoPool() to debug",
1009 pthread_self(), (void*)obj, object_getClassName(obj));
1010 objc_autoreleaseNoPool(obj);
1011 return nil;
1012 }
1013 else if (obj == POOL_BOUNDARY && !DebugPoolAllocation) {
1014 // We are pushing a pool with no pool in place,
1015 // and alloc-per-pool debugging was not requested.
1016 // Install and return the empty pool placeholder.
1017 return setEmptyPoolPlaceholder();
1018 }
1019
1020 // We are pushing an object or a non-placeholder'd pool.
1021
1022 // Install the first page.
1023 AutoreleasePoolPage *page = new AutoreleasePoolPage(nil);
1024 setHotPage(page);
1025
1026 // Push a boundary on behalf of the previously-placeholder'd pool.
1027 if (pushExtraBoundary) {
1028 page->add(POOL_BOUNDARY);
1029 }
1030
1031 // Push the requested object or pool.
1032 return page->add(obj);
1033 }
1034
1035
1036 static __attribute__((noinline))
1037 id *autoreleaseNewPage(id obj)
1038 {
1039 AutoreleasePoolPage *page = hotPage();
1040 if (page) return autoreleaseFullPage(obj, page);
1041 else return autoreleaseNoPage(obj);
1042 }
1043
1044 public:
1045 static inline id autorelease(id obj)
1046 {
1047 assert(obj);
1048 assert(!obj->isTaggedPointer());
1049 id *dest __unused = autoreleaseFast(obj);
1050 assert(!dest || dest == EMPTY_POOL_PLACEHOLDER || *dest == obj);
1051 return obj;
1052 }
1053
1054
1055 static inline void *push()
1056 {
1057 id *dest;
1058 if (DebugPoolAllocation) {
1059 // Each autorelease pool starts on a new pool page.
1060 dest = autoreleaseNewPage(POOL_BOUNDARY);
1061 } else {
1062 dest = autoreleaseFast(POOL_BOUNDARY);
1063 }
1064 assert(dest == EMPTY_POOL_PLACEHOLDER || *dest == POOL_BOUNDARY);
1065 return dest;
1066 }
1067
1068 static void badPop(void *token)
1069 {
1070 // Error. For bincompat purposes this is not
1071 // fatal in executables built with old SDKs.
1072
1073 if (DebugPoolAllocation || sdkIsAtLeast(10_12, 10_0, 10_0, 3_0)) {
1074 // OBJC_DEBUG_POOL_ALLOCATION or new SDK. Bad pop is fatal.
1075 _objc_fatal
1076 ("Invalid or prematurely-freed autorelease pool %p.", token);
1077 }
1078
1079 // Old SDK. Bad pop is warned once.
1080 static bool complained = false;
1081 if (!complained) {
1082 complained = true;
1083 _objc_inform_now_and_on_crash
1084 ("Invalid or prematurely-freed autorelease pool %p. "
1085 "Set a breakpoint on objc_autoreleasePoolInvalid to debug. "
1086 "Proceeding anyway because the app is old "
1087 "(SDK version " SDK_FORMAT "). Memory errors are likely.",
1088 token, FORMAT_SDK(sdkVersion()));
1089 }
1090 objc_autoreleasePoolInvalid(token);
1091 }
1092
1093 static inline void pop(void *token)
1094 {
1095 AutoreleasePoolPage *page;
1096 id *stop;
1097
1098 if (token == (void*)EMPTY_POOL_PLACEHOLDER) {
1099 // Popping the top-level placeholder pool.
1100 if (hotPage()) {
1101 // Pool was used. Pop its contents normally.
1102 // Pool pages remain allocated for re-use as usual.
1103 pop(coldPage()->begin());
1104 } else {
1105 // Pool was never used. Clear the placeholder.
1106 setHotPage(nil);
1107 }
1108 return;
1109 }
1110
1111 page = pageForPointer(token);
1112 stop = (id *)token;
1113 if (*stop != POOL_BOUNDARY) {
1114 if (stop == page->begin() && !page->parent) {
1115 // Start of coldest page may correctly not be POOL_BOUNDARY:
1116 // 1. top-level pool is popped, leaving the cold page in place
1117 // 2. an object is autoreleased with no pool
1118 } else {
1119 // Error. For bincompat purposes this is not
1120 // fatal in executables built with old SDKs.
1121 return badPop(token);
1122 }
1123 }
1124
1125 if (PrintPoolHiwat) printHiwat();
1126
1127 page->releaseUntil(stop);
1128
1129 // memory: delete empty children
1130 if (DebugPoolAllocation && page->empty()) {
1131 // special case: delete everything during page-per-pool debugging
1132 AutoreleasePoolPage *parent = page->parent;
1133 page->kill();
1134 setHotPage(parent);
1135 } else if (DebugMissingPools && page->empty() && !page->parent) {
1136 // special case: delete everything for pop(top)
1137 // when debugging missing autorelease pools
1138 page->kill();
1139 setHotPage(nil);
1140 }
1141 else if (page->child) {
1142 // hysteresis: keep one empty child if page is more than half full
1143 if (page->lessThanHalfFull()) {
1144 page->child->kill();
1145 }
1146 else if (page->child->child) {
1147 page->child->child->kill();
1148 }
1149 }
1150 }
1151
1152 static void init()
1153 {
1154 int r __unused = pthread_key_init_np(AutoreleasePoolPage::key,
1155 AutoreleasePoolPage::tls_dealloc);
1156 assert(r == 0);
1157 }
1158
1159 void print()
1160 {
1161 _objc_inform("[%p] ................ PAGE %s %s %s", this,
1162 full() ? "(full)" : "",
1163 this == hotPage() ? "(hot)" : "",
1164 this == coldPage() ? "(cold)" : "");
1165 check(false);
1166 for (id *p = begin(); p < next; p++) {
1167 if (*p == POOL_BOUNDARY) {
1168 _objc_inform("[%p] ################ POOL %p", p, p);
1169 } else {
1170 _objc_inform("[%p] %#16lx %s",
1171 p, (unsigned long)*p, object_getClassName(*p));
1172 }
1173 }
1174 }
1175
1176 static void printAll()
1177 {
1178 _objc_inform("##############");
1179 _objc_inform("AUTORELEASE POOLS for thread %p", pthread_self());
1180
1181 AutoreleasePoolPage *page;
1182 ptrdiff_t objects = 0;
1183 for (page = coldPage(); page; page = page->child) {
1184 objects += page->next - page->begin();
1185 }
1186 _objc_inform("%llu releases pending.", (unsigned long long)objects);
1187
1188 if (haveEmptyPoolPlaceholder()) {
1189 _objc_inform("[%p] ................ PAGE (placeholder)",
1190 EMPTY_POOL_PLACEHOLDER);
1191 _objc_inform("[%p] ################ POOL (placeholder)",
1192 EMPTY_POOL_PLACEHOLDER);
1193 }
1194 else {
1195 for (page = coldPage(); page; page = page->child) {
1196 page->print();
1197 }
1198 }
1199
1200 _objc_inform("##############");
1201 }
1202
1203 static void printHiwat()
1204 {
1205 // Check and propagate high water mark
1206 // Ignore high water marks under 256 to suppress noise.
1207 AutoreleasePoolPage *p = hotPage();
1208 uint32_t mark = p->depth*COUNT + (uint32_t)(p->next - p->begin());
1209 if (mark > p->hiwat && mark > 256) {
1210 for( ; p; p = p->parent) {
1211 p->unprotect();
1212 p->hiwat = mark;
1213 p->protect();
1214 }
1215
1216 _objc_inform("POOL HIGHWATER: new high water mark of %u "
1217 "pending releases for thread %p:",
1218 mark, pthread_self());
1219
1220 void *stack[128];
1221 int count = backtrace(stack, sizeof(stack)/sizeof(stack[0]));
1222 char **sym = backtrace_symbols(stack, count);
1223 for (int i = 0; i < count; i++) {
1224 _objc_inform("POOL HIGHWATER: %s", sym[i]);
1225 }
1226 free(sym);
1227 }
1228 }
1229
1230 #undef POOL_BOUNDARY
1231 };
1232
1233 // anonymous namespace
1234 };
1235
1236
1237 /***********************************************************************
1238 * Slow paths for inline control
1239 **********************************************************************/
1240
1241 #if SUPPORT_NONPOINTER_ISA
1242
1243 NEVER_INLINE id
1244 objc_object::rootRetain_overflow(bool tryRetain)
1245 {
1246 return rootRetain(tryRetain, true);
1247 }
1248
1249
1250 NEVER_INLINE bool
1251 objc_object::rootRelease_underflow(bool performDealloc)
1252 {
1253 return rootRelease(performDealloc, true);
1254 }
1255
1256
1257 // Slow path of clearDeallocating()
1258 // for objects with nonpointer isa
1259 // that were ever weakly referenced
1260 // or whose retain count ever overflowed to the side table.
1261 NEVER_INLINE void
1262 objc_object::clearDeallocating_slow()
1263 {
1264 assert(isa.nonpointer && (isa.weakly_referenced || isa.has_sidetable_rc));
1265
1266 SideTable& table = SideTables()[this];
1267 table.lock();
1268 if (isa.weakly_referenced) {
1269 weak_clear_no_lock(&table.weak_table, (id)this);
1270 }
1271 if (isa.has_sidetable_rc) {
1272 table.refcnts.erase(this);
1273 }
1274 table.unlock();
1275 }
1276
1277 #endif
1278
1279 __attribute__((noinline,used))
1280 id
1281 objc_object::rootAutorelease2()
1282 {
1283 assert(!isTaggedPointer());
1284 return AutoreleasePoolPage::autorelease((id)this);
1285 }
1286
1287
1288 BREAKPOINT_FUNCTION(
1289 void objc_overrelease_during_dealloc_error(void)
1290 );
1291
1292
1293 NEVER_INLINE
1294 bool
1295 objc_object::overrelease_error()
1296 {
1297 _objc_inform_now_and_on_crash("%s object %p overreleased while already deallocating; break on objc_overrelease_during_dealloc_error to debug", object_getClassName((id)this), this);
1298 objc_overrelease_during_dealloc_error();
1299 return false; // allow rootRelease() to tail-call this
1300 }
1301
1302
1303 /***********************************************************************
1304 * Retain count operations for side table.
1305 **********************************************************************/
1306
1307
1308 #if DEBUG
1309 // Used to assert that an object is not present in the side table.
1310 bool
1311 objc_object::sidetable_present()
1312 {
1313 bool result = false;
1314 SideTable& table = SideTables()[this];
1315
1316 table.lock();
1317
1318 RefcountMap::iterator it = table.refcnts.find(this);
1319 if (it != table.refcnts.end()) result = true;
1320
1321 if (weak_is_registered_no_lock(&table.weak_table, (id)this)) result = true;
1322
1323 table.unlock();
1324
1325 return result;
1326 }
1327 #endif
1328
1329 #if SUPPORT_NONPOINTER_ISA
1330
1331 void
1332 objc_object::sidetable_lock()
1333 {
1334 SideTable& table = SideTables()[this];
1335 table.lock();
1336 }
1337
1338 void
1339 objc_object::sidetable_unlock()
1340 {
1341 SideTable& table = SideTables()[this];
1342 table.unlock();
1343 }
1344
1345
1346 // Move the entire retain count to the side table,
1347 // as well as isDeallocating and weaklyReferenced.
1348 void
1349 objc_object::sidetable_moveExtraRC_nolock(size_t extra_rc,
1350 bool isDeallocating,
1351 bool weaklyReferenced)
1352 {
1353 assert(!isa.nonpointer); // should already be changed to raw pointer
1354 SideTable& table = SideTables()[this];
1355
1356 size_t& refcntStorage = table.refcnts[this];
1357 size_t oldRefcnt = refcntStorage;
1358 // not deallocating - that was in the isa
1359 assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1360 assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1361
1362 uintptr_t carry;
1363 size_t refcnt = addc(oldRefcnt, extra_rc << SIDE_TABLE_RC_SHIFT, 0, &carry);
1364 if (carry) refcnt = SIDE_TABLE_RC_PINNED;
1365 if (isDeallocating) refcnt |= SIDE_TABLE_DEALLOCATING;
1366 if (weaklyReferenced) refcnt |= SIDE_TABLE_WEAKLY_REFERENCED;
1367
1368 refcntStorage = refcnt;
1369 }
1370
1371
1372 // Move some retain counts to the side table from the isa field.
1373 // Returns true if the object is now pinned.
1374 bool
1375 objc_object::sidetable_addExtraRC_nolock(size_t delta_rc)
1376 {
1377 assert(isa.nonpointer);
1378 SideTable& table = SideTables()[this];
1379
1380 size_t& refcntStorage = table.refcnts[this];
1381 size_t oldRefcnt = refcntStorage;
1382 // isa-side bits should not be set here
1383 assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1384 assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1385
1386 if (oldRefcnt & SIDE_TABLE_RC_PINNED) return true;
1387
1388 uintptr_t carry;
1389 size_t newRefcnt =
1390 addc(oldRefcnt, delta_rc << SIDE_TABLE_RC_SHIFT, 0, &carry);
1391 if (carry) {
1392 refcntStorage =
1393 SIDE_TABLE_RC_PINNED | (oldRefcnt & SIDE_TABLE_FLAG_MASK);
1394 return true;
1395 }
1396 else {
1397 refcntStorage = newRefcnt;
1398 return false;
1399 }
1400 }
1401
1402
1403 // Move some retain counts from the side table to the isa field.
1404 // Returns the actual count subtracted, which may be less than the request.
1405 size_t
1406 objc_object::sidetable_subExtraRC_nolock(size_t delta_rc)
1407 {
1408 assert(isa.nonpointer);
1409 SideTable& table = SideTables()[this];
1410
1411 RefcountMap::iterator it = table.refcnts.find(this);
1412 if (it == table.refcnts.end() || it->second == 0) {
1413 // Side table retain count is zero. Can't borrow.
1414 return 0;
1415 }
1416 size_t oldRefcnt = it->second;
1417
1418 // isa-side bits should not be set here
1419 assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1420 assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1421
1422 size_t newRefcnt = oldRefcnt - (delta_rc << SIDE_TABLE_RC_SHIFT);
1423 assert(oldRefcnt > newRefcnt); // shouldn't underflow
1424 it->second = newRefcnt;
1425 return delta_rc;
1426 }
1427
1428
1429 size_t
1430 objc_object::sidetable_getExtraRC_nolock()
1431 {
1432 assert(isa.nonpointer);
1433 SideTable& table = SideTables()[this];
1434 RefcountMap::iterator it = table.refcnts.find(this);
1435 if (it == table.refcnts.end()) return 0;
1436 else return it->second >> SIDE_TABLE_RC_SHIFT;
1437 }
1438
1439
1440 // SUPPORT_NONPOINTER_ISA
1441 #endif
1442
1443
1444 id
1445 objc_object::sidetable_retain()
1446 {
1447 #if SUPPORT_NONPOINTER_ISA
1448 assert(!isa.nonpointer);
1449 #endif
1450 SideTable& table = SideTables()[this];
1451
1452 table.lock();
1453 size_t& refcntStorage = table.refcnts[this];
1454 if (! (refcntStorage & SIDE_TABLE_RC_PINNED)) {
1455 refcntStorage += SIDE_TABLE_RC_ONE;
1456 }
1457 table.unlock();
1458
1459 return (id)this;
1460 }
1461
1462
1463 bool
1464 objc_object::sidetable_tryRetain()
1465 {
1466 #if SUPPORT_NONPOINTER_ISA
1467 assert(!isa.nonpointer);
1468 #endif
1469 SideTable& table = SideTables()[this];
1470
1471 // NO SPINLOCK HERE
1472 // _objc_rootTryRetain() is called exclusively by _objc_loadWeak(),
1473 // which already acquired the lock on our behalf.
1474
1475 // fixme can't do this efficiently with os_lock_handoff_s
1476 // if (table.slock == 0) {
1477 // _objc_fatal("Do not call -_tryRetain.");
1478 // }
1479
1480 bool result = true;
1481 RefcountMap::iterator it = table.refcnts.find(this);
1482 if (it == table.refcnts.end()) {
1483 table.refcnts[this] = SIDE_TABLE_RC_ONE;
1484 } else if (it->second & SIDE_TABLE_DEALLOCATING) {
1485 result = false;
1486 } else if (! (it->second & SIDE_TABLE_RC_PINNED)) {
1487 it->second += SIDE_TABLE_RC_ONE;
1488 }
1489
1490 return result;
1491 }
1492
1493
1494 uintptr_t
1495 objc_object::sidetable_retainCount()
1496 {
1497 SideTable& table = SideTables()[this];
1498
1499 size_t refcnt_result = 1;
1500
1501 table.lock();
1502 RefcountMap::iterator it = table.refcnts.find(this);
1503 if (it != table.refcnts.end()) {
1504 // this is valid for SIDE_TABLE_RC_PINNED too
1505 refcnt_result += it->second >> SIDE_TABLE_RC_SHIFT;
1506 }
1507 table.unlock();
1508 return refcnt_result;
1509 }
1510
1511
1512 bool
1513 objc_object::sidetable_isDeallocating()
1514 {
1515 SideTable& table = SideTables()[this];
1516
1517 // NO SPINLOCK HERE
1518 // _objc_rootIsDeallocating() is called exclusively by _objc_storeWeak(),
1519 // which already acquired the lock on our behalf.
1520
1521
1522 // fixme can't do this efficiently with os_lock_handoff_s
1523 // if (table.slock == 0) {
1524 // _objc_fatal("Do not call -_isDeallocating.");
1525 // }
1526
1527 RefcountMap::iterator it = table.refcnts.find(this);
1528 return (it != table.refcnts.end()) && (it->second & SIDE_TABLE_DEALLOCATING);
1529 }
1530
1531
1532 bool
1533 objc_object::sidetable_isWeaklyReferenced()
1534 {
1535 bool result = false;
1536
1537 SideTable& table = SideTables()[this];
1538 table.lock();
1539
1540 RefcountMap::iterator it = table.refcnts.find(this);
1541 if (it != table.refcnts.end()) {
1542 result = it->second & SIDE_TABLE_WEAKLY_REFERENCED;
1543 }
1544
1545 table.unlock();
1546
1547 return result;
1548 }
1549
1550
1551 void
1552 objc_object::sidetable_setWeaklyReferenced_nolock()
1553 {
1554 #if SUPPORT_NONPOINTER_ISA
1555 assert(!isa.nonpointer);
1556 #endif
1557
1558 SideTable& table = SideTables()[this];
1559
1560 table.refcnts[this] |= SIDE_TABLE_WEAKLY_REFERENCED;
1561 }
1562
1563
1564 // rdar://20206767
1565 // return uintptr_t instead of bool so that the various raw-isa
1566 // -release paths all return zero in eax
1567 uintptr_t
1568 objc_object::sidetable_release(bool performDealloc)
1569 {
1570 #if SUPPORT_NONPOINTER_ISA
1571 assert(!isa.nonpointer);
1572 #endif
1573 SideTable& table = SideTables()[this];
1574
1575 bool do_dealloc = false;
1576
1577 table.lock();
1578 RefcountMap::iterator it = table.refcnts.find(this);
1579 if (it == table.refcnts.end()) {
1580 do_dealloc = true;
1581 table.refcnts[this] = SIDE_TABLE_DEALLOCATING;
1582 } else if (it->second < SIDE_TABLE_DEALLOCATING) {
1583 // SIDE_TABLE_WEAKLY_REFERENCED may be set. Don't change it.
1584 do_dealloc = true;
1585 it->second |= SIDE_TABLE_DEALLOCATING;
1586 } else if (! (it->second & SIDE_TABLE_RC_PINNED)) {
1587 it->second -= SIDE_TABLE_RC_ONE;
1588 }
1589 table.unlock();
1590 if (do_dealloc && performDealloc) {
1591 ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_dealloc);
1592 }
1593 return do_dealloc;
1594 }
1595
1596
1597 void
1598 objc_object::sidetable_clearDeallocating()
1599 {
1600 SideTable& table = SideTables()[this];
1601
1602 // clear any weak table items
1603 // clear extra retain count and deallocating bit
1604 // (fixme warn or abort if extra retain count == 0 ?)
1605 table.lock();
1606 RefcountMap::iterator it = table.refcnts.find(this);
1607 if (it != table.refcnts.end()) {
1608 if (it->second & SIDE_TABLE_WEAKLY_REFERENCED) {
1609 weak_clear_no_lock(&table.weak_table, (id)this);
1610 }
1611 table.refcnts.erase(it);
1612 }
1613 table.unlock();
1614 }
1615
1616
1617 /***********************************************************************
1618 * Optimized retain/release/autorelease entrypoints
1619 **********************************************************************/
1620
1621
1622 #if __OBJC2__
1623
1624 __attribute__((aligned(16)))
1625 id
1626 objc_retain(id obj)
1627 {
1628 if (!obj) return obj;
1629 if (obj->isTaggedPointer()) return obj;
1630 return obj->retain();
1631 }
1632
1633
1634 __attribute__((aligned(16)))
1635 void
1636 objc_release(id obj)
1637 {
1638 if (!obj) return;
1639 if (obj->isTaggedPointer()) return;
1640 return obj->release();
1641 }
1642
1643
1644 __attribute__((aligned(16)))
1645 id
1646 objc_autorelease(id obj)
1647 {
1648 if (!obj) return obj;
1649 if (obj->isTaggedPointer()) return obj;
1650 return obj->autorelease();
1651 }
1652
1653
1654 // OBJC2
1655 #else
1656 // not OBJC2
1657
1658
1659 id objc_retain(id obj) { return [obj retain]; }
1660 void objc_release(id obj) { [obj release]; }
1661 id objc_autorelease(id obj) { return [obj autorelease]; }
1662
1663
1664 #endif
1665
1666
1667 /***********************************************************************
1668 * Basic operations for root class implementations a.k.a. _objc_root*()
1669 **********************************************************************/
1670
1671 bool
1672 _objc_rootTryRetain(id obj)
1673 {
1674 assert(obj);
1675
1676 return obj->rootTryRetain();
1677 }
1678
1679 bool
1680 _objc_rootIsDeallocating(id obj)
1681 {
1682 assert(obj);
1683
1684 return obj->rootIsDeallocating();
1685 }
1686
1687
1688 void
1689 objc_clear_deallocating(id obj)
1690 {
1691 assert(obj);
1692
1693 if (obj->isTaggedPointer()) return;
1694 obj->clearDeallocating();
1695 }
1696
1697
1698 bool
1699 _objc_rootReleaseWasZero(id obj)
1700 {
1701 assert(obj);
1702
1703 return obj->rootReleaseShouldDealloc();
1704 }
1705
1706
1707 id
1708 _objc_rootAutorelease(id obj)
1709 {
1710 assert(obj);
1711 return obj->rootAutorelease();
1712 }
1713
1714 uintptr_t
1715 _objc_rootRetainCount(id obj)
1716 {
1717 assert(obj);
1718
1719 return obj->rootRetainCount();
1720 }
1721
1722
1723 id
1724 _objc_rootRetain(id obj)
1725 {
1726 assert(obj);
1727
1728 return obj->rootRetain();
1729 }
1730
1731 void
1732 _objc_rootRelease(id obj)
1733 {
1734 assert(obj);
1735
1736 obj->rootRelease();
1737 }
1738
1739
1740 id
1741 _objc_rootAllocWithZone(Class cls, malloc_zone_t *zone)
1742 {
1743 id obj;
1744
1745 #if __OBJC2__
1746 // allocWithZone under __OBJC2__ ignores the zone parameter
1747 (void)zone;
1748 obj = class_createInstance(cls, 0);
1749 #else
1750 if (!zone) {
1751 obj = class_createInstance(cls, 0);
1752 }
1753 else {
1754 obj = class_createInstanceFromZone(cls, 0, zone);
1755 }
1756 #endif
1757
1758 if (slowpath(!obj)) obj = callBadAllocHandler(cls);
1759 return obj;
1760 }
1761
1762
1763 // Call [cls alloc] or [cls allocWithZone:nil], with appropriate
1764 // shortcutting optimizations.
1765 static ALWAYS_INLINE id
1766 callAlloc(Class cls, bool checkNil, bool allocWithZone=false)
1767 {
1768 if (slowpath(checkNil && !cls)) return nil;
1769
1770 #if __OBJC2__
1771 if (fastpath(!cls->ISA()->hasCustomAWZ())) {
1772 // No alloc/allocWithZone implementation. Go straight to the allocator.
1773 // fixme store hasCustomAWZ in the non-meta class and
1774 // add it to canAllocFast's summary
1775 if (fastpath(cls->canAllocFast())) {
1776 // No ctors, raw isa, etc. Go straight to the metal.
1777 bool dtor = cls->hasCxxDtor();
1778 id obj = (id)calloc(1, cls->bits.fastInstanceSize());
1779 if (slowpath(!obj)) return callBadAllocHandler(cls);
1780 obj->initInstanceIsa(cls, dtor);
1781 return obj;
1782 }
1783 else {
1784 // Has ctor or raw isa or something. Use the slower path.
1785 id obj = class_createInstance(cls, 0);
1786 if (slowpath(!obj)) return callBadAllocHandler(cls);
1787 return obj;
1788 }
1789 }
1790 #endif
1791
1792 // No shortcuts available.
1793 if (allocWithZone) return [cls allocWithZone:nil];
1794 return [cls alloc];
1795 }
1796
1797
1798 // Base class implementation of +alloc. cls is not nil.
1799 // Calls [cls allocWithZone:nil].
1800 id
1801 _objc_rootAlloc(Class cls)
1802 {
1803 return callAlloc(cls, false/*checkNil*/, true/*allocWithZone*/);
1804 }
1805
1806 // Calls [cls alloc].
1807 id
1808 objc_alloc(Class cls)
1809 {
1810 return callAlloc(cls, true/*checkNil*/, false/*allocWithZone*/);
1811 }
1812
1813 // Calls [cls allocWithZone:nil].
1814 id
1815 objc_allocWithZone(Class cls)
1816 {
1817 return callAlloc(cls, true/*checkNil*/, true/*allocWithZone*/);
1818 }
1819
1820
1821 void
1822 _objc_rootDealloc(id obj)
1823 {
1824 assert(obj);
1825
1826 obj->rootDealloc();
1827 }
1828
1829 void
1830 _objc_rootFinalize(id obj __unused)
1831 {
1832 assert(obj);
1833 _objc_fatal("_objc_rootFinalize called with garbage collection off");
1834 }
1835
1836
1837 id
1838 _objc_rootInit(id obj)
1839 {
1840 // In practice, it will be hard to rely on this function.
1841 // Many classes do not properly chain -init calls.
1842 return obj;
1843 }
1844
1845
1846 malloc_zone_t *
1847 _objc_rootZone(id obj)
1848 {
1849 (void)obj;
1850 #if __OBJC2__
1851 // allocWithZone under __OBJC2__ ignores the zone parameter
1852 return malloc_default_zone();
1853 #else
1854 malloc_zone_t *rval = malloc_zone_from_ptr(obj);
1855 return rval ? rval : malloc_default_zone();
1856 #endif
1857 }
1858
1859 uintptr_t
1860 _objc_rootHash(id obj)
1861 {
1862 return (uintptr_t)obj;
1863 }
1864
1865 void *
1866 objc_autoreleasePoolPush(void)
1867 {
1868 return AutoreleasePoolPage::push();
1869 }
1870
1871 void
1872 objc_autoreleasePoolPop(void *ctxt)
1873 {
1874 AutoreleasePoolPage::pop(ctxt);
1875 }
1876
1877
1878 void *
1879 _objc_autoreleasePoolPush(void)
1880 {
1881 return objc_autoreleasePoolPush();
1882 }
1883
1884 void
1885 _objc_autoreleasePoolPop(void *ctxt)
1886 {
1887 objc_autoreleasePoolPop(ctxt);
1888 }
1889
1890 void
1891 _objc_autoreleasePoolPrint(void)
1892 {
1893 AutoreleasePoolPage::printAll();
1894 }
1895
1896
1897 // Same as objc_release but suitable for tail-calling
1898 // if you need the value back and don't want to push a frame before this point.
1899 __attribute__((noinline))
1900 static id
1901 objc_releaseAndReturn(id obj)
1902 {
1903 objc_release(obj);
1904 return obj;
1905 }
1906
1907 // Same as objc_retainAutorelease but suitable for tail-calling
1908 // if you don't want to push a frame before this point.
1909 __attribute__((noinline))
1910 static id
1911 objc_retainAutoreleaseAndReturn(id obj)
1912 {
1913 return objc_retainAutorelease(obj);
1914 }
1915
1916
1917 // Prepare a value at +1 for return through a +0 autoreleasing convention.
1918 id
1919 objc_autoreleaseReturnValue(id obj)
1920 {
1921 if (prepareOptimizedReturn(ReturnAtPlus1)) return obj;
1922
1923 return objc_autorelease(obj);
1924 }
1925
1926 // Prepare a value at +0 for return through a +0 autoreleasing convention.
1927 id
1928 objc_retainAutoreleaseReturnValue(id obj)
1929 {
1930 if (prepareOptimizedReturn(ReturnAtPlus0)) return obj;
1931
1932 // not objc_autoreleaseReturnValue(objc_retain(obj))
1933 // because we don't need another optimization attempt
1934 return objc_retainAutoreleaseAndReturn(obj);
1935 }
1936
1937 // Accept a value returned through a +0 autoreleasing convention for use at +1.
1938 id
1939 objc_retainAutoreleasedReturnValue(id obj)
1940 {
1941 if (acceptOptimizedReturn() == ReturnAtPlus1) return obj;
1942
1943 return objc_retain(obj);
1944 }
1945
1946 // Accept a value returned through a +0 autoreleasing convention for use at +0.
1947 id
1948 objc_unsafeClaimAutoreleasedReturnValue(id obj)
1949 {
1950 if (acceptOptimizedReturn() == ReturnAtPlus0) return obj;
1951
1952 return objc_releaseAndReturn(obj);
1953 }
1954
1955 id
1956 objc_retainAutorelease(id obj)
1957 {
1958 return objc_autorelease(objc_retain(obj));
1959 }
1960
1961 void
1962 _objc_deallocOnMainThreadHelper(void *context)
1963 {
1964 id obj = (id)context;
1965 [obj dealloc];
1966 }
1967
1968 // convert objc_objectptr_t to id, callee must take ownership.
1969 id objc_retainedObject(objc_objectptr_t pointer) { return (id)pointer; }
1970
1971 // convert objc_objectptr_t to id, without ownership transfer.
1972 id objc_unretainedObject(objc_objectptr_t pointer) { return (id)pointer; }
1973
1974 // convert id to objc_objectptr_t, no ownership transfer.
1975 objc_objectptr_t objc_unretainedPointer(id object) { return object; }
1976
1977
1978 void arr_init(void)
1979 {
1980 AutoreleasePoolPage::init();
1981 SideTableInit();
1982 }
1983
1984
1985 #if SUPPORT_TAGGED_POINTERS
1986
1987 // Placeholder for old debuggers. When they inspect an
1988 // extended tagged pointer object they will see this isa.
1989
1990 @interface __NSUnrecognizedTaggedPointer : NSObject
1991 @end
1992
1993 @implementation __NSUnrecognizedTaggedPointer
1994 +(void) load { }
1995 -(id) retain { return self; }
1996 -(oneway void) release { }
1997 -(id) autorelease { return self; }
1998 @end
1999
2000 #endif
2001
2002
2003 @implementation NSObject
2004
2005 + (void)load {
2006 }
2007
2008 + (void)initialize {
2009 }
2010
2011 + (id)self {
2012 return (id)self;
2013 }
2014
2015 - (id)self {
2016 return self;
2017 }
2018
2019 + (Class)class {
2020 return self;
2021 }
2022
2023 - (Class)class {
2024 return object_getClass(self);
2025 }
2026
2027 + (Class)superclass {
2028 return self->superclass;
2029 }
2030
2031 - (Class)superclass {
2032 return [self class]->superclass;
2033 }
2034
2035 + (BOOL)isMemberOfClass:(Class)cls {
2036 return object_getClass((id)self) == cls;
2037 }
2038
2039 - (BOOL)isMemberOfClass:(Class)cls {
2040 return [self class] == cls;
2041 }
2042
2043 + (BOOL)isKindOfClass:(Class)cls {
2044 for (Class tcls = object_getClass((id)self); tcls; tcls = tcls->superclass) {
2045 if (tcls == cls) return YES;
2046 }
2047 return NO;
2048 }
2049
2050 - (BOOL)isKindOfClass:(Class)cls {
2051 for (Class tcls = [self class]; tcls; tcls = tcls->superclass) {
2052 if (tcls == cls) return YES;
2053 }
2054 return NO;
2055 }
2056
2057 + (BOOL)isSubclassOfClass:(Class)cls {
2058 for (Class tcls = self; tcls; tcls = tcls->superclass) {
2059 if (tcls == cls) return YES;
2060 }
2061 return NO;
2062 }
2063
2064 + (BOOL)isAncestorOfObject:(NSObject *)obj {
2065 for (Class tcls = [obj class]; tcls; tcls = tcls->superclass) {
2066 if (tcls == self) return YES;
2067 }
2068 return NO;
2069 }
2070
2071 + (BOOL)instancesRespondToSelector:(SEL)sel {
2072 if (!sel) return NO;
2073 return class_respondsToSelector(self, sel);
2074 }
2075
2076 + (BOOL)respondsToSelector:(SEL)sel {
2077 if (!sel) return NO;
2078 return class_respondsToSelector_inst(object_getClass(self), sel, self);
2079 }
2080
2081 - (BOOL)respondsToSelector:(SEL)sel {
2082 if (!sel) return NO;
2083 return class_respondsToSelector_inst([self class], sel, self);
2084 }
2085
2086 + (BOOL)conformsToProtocol:(Protocol *)protocol {
2087 if (!protocol) return NO;
2088 for (Class tcls = self; tcls; tcls = tcls->superclass) {
2089 if (class_conformsToProtocol(tcls, protocol)) return YES;
2090 }
2091 return NO;
2092 }
2093
2094 - (BOOL)conformsToProtocol:(Protocol *)protocol {
2095 if (!protocol) return NO;
2096 for (Class tcls = [self class]; tcls; tcls = tcls->superclass) {
2097 if (class_conformsToProtocol(tcls, protocol)) return YES;
2098 }
2099 return NO;
2100 }
2101
2102 + (NSUInteger)hash {
2103 return _objc_rootHash(self);
2104 }
2105
2106 - (NSUInteger)hash {
2107 return _objc_rootHash(self);
2108 }
2109
2110 + (BOOL)isEqual:(id)obj {
2111 return obj == (id)self;
2112 }
2113
2114 - (BOOL)isEqual:(id)obj {
2115 return obj == self;
2116 }
2117
2118
2119 + (BOOL)isFault {
2120 return NO;
2121 }
2122
2123 - (BOOL)isFault {
2124 return NO;
2125 }
2126
2127 + (BOOL)isProxy {
2128 return NO;
2129 }
2130
2131 - (BOOL)isProxy {
2132 return NO;
2133 }
2134
2135
2136 + (IMP)instanceMethodForSelector:(SEL)sel {
2137 if (!sel) [self doesNotRecognizeSelector:sel];
2138 return class_getMethodImplementation(self, sel);
2139 }
2140
2141 + (IMP)methodForSelector:(SEL)sel {
2142 if (!sel) [self doesNotRecognizeSelector:sel];
2143 return object_getMethodImplementation((id)self, sel);
2144 }
2145
2146 - (IMP)methodForSelector:(SEL)sel {
2147 if (!sel) [self doesNotRecognizeSelector:sel];
2148 return object_getMethodImplementation(self, sel);
2149 }
2150
2151 + (BOOL)resolveClassMethod:(SEL)sel {
2152 return NO;
2153 }
2154
2155 + (BOOL)resolveInstanceMethod:(SEL)sel {
2156 return NO;
2157 }
2158
2159 // Replaced by CF (throws an NSException)
2160 + (void)doesNotRecognizeSelector:(SEL)sel {
2161 _objc_fatal("+[%s %s]: unrecognized selector sent to instance %p",
2162 class_getName(self), sel_getName(sel), self);
2163 }
2164
2165 // Replaced by CF (throws an NSException)
2166 - (void)doesNotRecognizeSelector:(SEL)sel {
2167 _objc_fatal("-[%s %s]: unrecognized selector sent to instance %p",
2168 object_getClassName(self), sel_getName(sel), self);
2169 }
2170
2171
2172 + (id)performSelector:(SEL)sel {
2173 if (!sel) [self doesNotRecognizeSelector:sel];
2174 return ((id(*)(id, SEL))objc_msgSend)((id)self, sel);
2175 }
2176
2177 + (id)performSelector:(SEL)sel withObject:(id)obj {
2178 if (!sel) [self doesNotRecognizeSelector:sel];
2179 return ((id(*)(id, SEL, id))objc_msgSend)((id)self, sel, obj);
2180 }
2181
2182 + (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 {
2183 if (!sel) [self doesNotRecognizeSelector:sel];
2184 return ((id(*)(id, SEL, id, id))objc_msgSend)((id)self, sel, obj1, obj2);
2185 }
2186
2187 - (id)performSelector:(SEL)sel {
2188 if (!sel) [self doesNotRecognizeSelector:sel];
2189 return ((id(*)(id, SEL))objc_msgSend)(self, sel);
2190 }
2191
2192 - (id)performSelector:(SEL)sel withObject:(id)obj {
2193 if (!sel) [self doesNotRecognizeSelector:sel];
2194 return ((id(*)(id, SEL, id))objc_msgSend)(self, sel, obj);
2195 }
2196
2197 - (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 {
2198 if (!sel) [self doesNotRecognizeSelector:sel];
2199 return ((id(*)(id, SEL, id, id))objc_msgSend)(self, sel, obj1, obj2);
2200 }
2201
2202
2203 // Replaced by CF (returns an NSMethodSignature)
2204 + (NSMethodSignature *)instanceMethodSignatureForSelector:(SEL)sel {
2205 _objc_fatal("+[NSObject instanceMethodSignatureForSelector:] "
2206 "not available without CoreFoundation");
2207 }
2208
2209 // Replaced by CF (returns an NSMethodSignature)
2210 + (NSMethodSignature *)methodSignatureForSelector:(SEL)sel {
2211 _objc_fatal("+[NSObject methodSignatureForSelector:] "
2212 "not available without CoreFoundation");
2213 }
2214
2215 // Replaced by CF (returns an NSMethodSignature)
2216 - (NSMethodSignature *)methodSignatureForSelector:(SEL)sel {
2217 _objc_fatal("-[NSObject methodSignatureForSelector:] "
2218 "not available without CoreFoundation");
2219 }
2220
2221 + (void)forwardInvocation:(NSInvocation *)invocation {
2222 [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)];
2223 }
2224
2225 - (void)forwardInvocation:(NSInvocation *)invocation {
2226 [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)];
2227 }
2228
2229 + (id)forwardingTargetForSelector:(SEL)sel {
2230 return nil;
2231 }
2232
2233 - (id)forwardingTargetForSelector:(SEL)sel {
2234 return nil;
2235 }
2236
2237
2238 // Replaced by CF (returns an NSString)
2239 + (NSString *)description {
2240 return nil;
2241 }
2242
2243 // Replaced by CF (returns an NSString)
2244 - (NSString *)description {
2245 return nil;
2246 }
2247
2248 + (NSString *)debugDescription {
2249 return [self description];
2250 }
2251
2252 - (NSString *)debugDescription {
2253 return [self description];
2254 }
2255
2256
2257 + (id)new {
2258 return [callAlloc(self, false/*checkNil*/) init];
2259 }
2260
2261 + (id)retain {
2262 return (id)self;
2263 }
2264
2265 // Replaced by ObjectAlloc
2266 - (id)retain {
2267 return ((id)self)->rootRetain();
2268 }
2269
2270
2271 + (BOOL)_tryRetain {
2272 return YES;
2273 }
2274
2275 // Replaced by ObjectAlloc
2276 - (BOOL)_tryRetain {
2277 return ((id)self)->rootTryRetain();
2278 }
2279
2280 + (BOOL)_isDeallocating {
2281 return NO;
2282 }
2283
2284 - (BOOL)_isDeallocating {
2285 return ((id)self)->rootIsDeallocating();
2286 }
2287
2288 + (BOOL)allowsWeakReference {
2289 return YES;
2290 }
2291
2292 + (BOOL)retainWeakReference {
2293 return YES;
2294 }
2295
2296 - (BOOL)allowsWeakReference {
2297 return ! [self _isDeallocating];
2298 }
2299
2300 - (BOOL)retainWeakReference {
2301 return [self _tryRetain];
2302 }
2303
2304 + (oneway void)release {
2305 }
2306
2307 // Replaced by ObjectAlloc
2308 - (oneway void)release {
2309 ((id)self)->rootRelease();
2310 }
2311
2312 + (id)autorelease {
2313 return (id)self;
2314 }
2315
2316 // Replaced by ObjectAlloc
2317 - (id)autorelease {
2318 return ((id)self)->rootAutorelease();
2319 }
2320
2321 + (NSUInteger)retainCount {
2322 return ULONG_MAX;
2323 }
2324
2325 - (NSUInteger)retainCount {
2326 return ((id)self)->rootRetainCount();
2327 }
2328
2329 + (id)alloc {
2330 return _objc_rootAlloc(self);
2331 }
2332
2333 // Replaced by ObjectAlloc
2334 + (id)allocWithZone:(struct _NSZone *)zone {
2335 return _objc_rootAllocWithZone(self, (malloc_zone_t *)zone);
2336 }
2337
2338 // Replaced by CF (throws an NSException)
2339 + (id)init {
2340 return (id)self;
2341 }
2342
2343 - (id)init {
2344 return _objc_rootInit(self);
2345 }
2346
2347 // Replaced by CF (throws an NSException)
2348 + (void)dealloc {
2349 }
2350
2351
2352 // Replaced by NSZombies
2353 - (void)dealloc {
2354 _objc_rootDealloc(self);
2355 }
2356
2357 // Previously used by GC. Now a placeholder for binary compatibility.
2358 - (void) finalize {
2359 }
2360
2361 + (struct _NSZone *)zone {
2362 return (struct _NSZone *)_objc_rootZone(self);
2363 }
2364
2365 - (struct _NSZone *)zone {
2366 return (struct _NSZone *)_objc_rootZone(self);
2367 }
2368
2369 + (id)copy {
2370 return (id)self;
2371 }
2372
2373 + (id)copyWithZone:(struct _NSZone *)zone {
2374 return (id)self;
2375 }
2376
2377 - (id)copy {
2378 return [(id)self copyWithZone:nil];
2379 }
2380
2381 + (id)mutableCopy {
2382 return (id)self;
2383 }
2384
2385 + (id)mutableCopyWithZone:(struct _NSZone *)zone {
2386 return (id)self;
2387 }
2388
2389 - (id)mutableCopy {
2390 return [(id)self mutableCopyWithZone:nil];
2391 }
2392
2393 @end
2394
2395