]> git.saurik.com Git - apple/objc4.git/blob - runtime/NSObject.mm
objc4-779.1.tar.gz
[apple/objc4.git] / runtime / NSObject.mm
1 /*
2 * Copyright (c) 2010-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include "objc-private.h"
25 #include "NSObject.h"
26
27 #include "objc-weak.h"
28 #include "DenseMapExtras.h"
29
30 #include <malloc/malloc.h>
31 #include <stdint.h>
32 #include <stdbool.h>
33 #include <mach/mach.h>
34 #include <mach-o/dyld.h>
35 #include <mach-o/nlist.h>
36 #include <sys/types.h>
37 #include <sys/mman.h>
38 #include <Block.h>
39 #include <map>
40 #include <execinfo.h>
41 #include "NSObject-internal.h"
42
43 @interface NSInvocation
44 - (SEL)selector;
45 @end
46
47 OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_magic_offset = __builtin_offsetof(AutoreleasePoolPageData, magic);
48 OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_next_offset = __builtin_offsetof(AutoreleasePoolPageData, next);
49 OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_thread_offset = __builtin_offsetof(AutoreleasePoolPageData, thread);
50 OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_parent_offset = __builtin_offsetof(AutoreleasePoolPageData, parent);
51 OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_child_offset = __builtin_offsetof(AutoreleasePoolPageData, child);
52 OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_depth_offset = __builtin_offsetof(AutoreleasePoolPageData, depth);
53 OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_hiwat_offset = __builtin_offsetof(AutoreleasePoolPageData, hiwat);
54
55 /***********************************************************************
56 * Weak ivar support
57 **********************************************************************/
58
59 static id defaultBadAllocHandler(Class cls)
60 {
61 _objc_fatal("attempt to allocate object of class '%s' failed",
62 cls->nameForLogging());
63 }
64
65 id(*badAllocHandler)(Class) = &defaultBadAllocHandler;
66
67 id _objc_callBadAllocHandler(Class cls)
68 {
69 // fixme add re-entrancy protection in case allocation fails inside handler
70 return (*badAllocHandler)(cls);
71 }
72
73 void _objc_setBadAllocHandler(id(*newHandler)(Class))
74 {
75 badAllocHandler = newHandler;
76 }
77
78
79 namespace {
80
81 // The order of these bits is important.
82 #define SIDE_TABLE_WEAKLY_REFERENCED (1UL<<0)
83 #define SIDE_TABLE_DEALLOCATING (1UL<<1) // MSB-ward of weak bit
84 #define SIDE_TABLE_RC_ONE (1UL<<2) // MSB-ward of deallocating bit
85 #define SIDE_TABLE_RC_PINNED (1UL<<(WORD_BITS-1))
86
87 #define SIDE_TABLE_RC_SHIFT 2
88 #define SIDE_TABLE_FLAG_MASK (SIDE_TABLE_RC_ONE-1)
89
90 struct RefcountMapValuePurgeable {
91 static inline bool isPurgeable(size_t x) {
92 return x == 0;
93 }
94 };
95
96 // RefcountMap disguises its pointers because we
97 // don't want the table to act as a root for `leaks`.
98 typedef objc::DenseMap<DisguisedPtr<objc_object>,size_t,RefcountMapValuePurgeable> RefcountMap;
99
100 // Template parameters.
101 enum HaveOld { DontHaveOld = false, DoHaveOld = true };
102 enum HaveNew { DontHaveNew = false, DoHaveNew = true };
103
104 struct SideTable {
105 spinlock_t slock;
106 RefcountMap refcnts;
107 weak_table_t weak_table;
108
109 SideTable() {
110 memset(&weak_table, 0, sizeof(weak_table));
111 }
112
113 ~SideTable() {
114 _objc_fatal("Do not delete SideTable.");
115 }
116
117 void lock() { slock.lock(); }
118 void unlock() { slock.unlock(); }
119 void forceReset() { slock.forceReset(); }
120
121 // Address-ordered lock discipline for a pair of side tables.
122
123 template<HaveOld, HaveNew>
124 static void lockTwo(SideTable *lock1, SideTable *lock2);
125 template<HaveOld, HaveNew>
126 static void unlockTwo(SideTable *lock1, SideTable *lock2);
127 };
128
129
130 template<>
131 void SideTable::lockTwo<DoHaveOld, DoHaveNew>
132 (SideTable *lock1, SideTable *lock2)
133 {
134 spinlock_t::lockTwo(&lock1->slock, &lock2->slock);
135 }
136
137 template<>
138 void SideTable::lockTwo<DoHaveOld, DontHaveNew>
139 (SideTable *lock1, SideTable *)
140 {
141 lock1->lock();
142 }
143
144 template<>
145 void SideTable::lockTwo<DontHaveOld, DoHaveNew>
146 (SideTable *, SideTable *lock2)
147 {
148 lock2->lock();
149 }
150
151 template<>
152 void SideTable::unlockTwo<DoHaveOld, DoHaveNew>
153 (SideTable *lock1, SideTable *lock2)
154 {
155 spinlock_t::unlockTwo(&lock1->slock, &lock2->slock);
156 }
157
158 template<>
159 void SideTable::unlockTwo<DoHaveOld, DontHaveNew>
160 (SideTable *lock1, SideTable *)
161 {
162 lock1->unlock();
163 }
164
165 template<>
166 void SideTable::unlockTwo<DontHaveOld, DoHaveNew>
167 (SideTable *, SideTable *lock2)
168 {
169 lock2->unlock();
170 }
171
172 static objc::ExplicitInit<StripedMap<SideTable>> SideTablesMap;
173
174 static StripedMap<SideTable>& SideTables() {
175 return SideTablesMap.get();
176 }
177
178 // anonymous namespace
179 };
180
181 void SideTableLockAll() {
182 SideTables().lockAll();
183 }
184
185 void SideTableUnlockAll() {
186 SideTables().unlockAll();
187 }
188
189 void SideTableForceResetAll() {
190 SideTables().forceResetAll();
191 }
192
193 void SideTableDefineLockOrder() {
194 SideTables().defineLockOrder();
195 }
196
197 void SideTableLocksPrecedeLock(const void *newlock) {
198 SideTables().precedeLock(newlock);
199 }
200
201 void SideTableLocksSucceedLock(const void *oldlock) {
202 SideTables().succeedLock(oldlock);
203 }
204
205 void SideTableLocksPrecedeLocks(StripedMap<spinlock_t>& newlocks) {
206 int i = 0;
207 const void *newlock;
208 while ((newlock = newlocks.getLock(i++))) {
209 SideTables().precedeLock(newlock);
210 }
211 }
212
213 void SideTableLocksSucceedLocks(StripedMap<spinlock_t>& oldlocks) {
214 int i = 0;
215 const void *oldlock;
216 while ((oldlock = oldlocks.getLock(i++))) {
217 SideTables().succeedLock(oldlock);
218 }
219 }
220
221 //
222 // The -fobjc-arc flag causes the compiler to issue calls to objc_{retain/release/autorelease/retain_block}
223 //
224
225 id objc_retainBlock(id x) {
226 return (id)_Block_copy(x);
227 }
228
229 //
230 // The following SHOULD be called by the compiler directly, but the request hasn't been made yet :-)
231 //
232
233 BOOL objc_should_deallocate(id object) {
234 return YES;
235 }
236
237 id
238 objc_retain_autorelease(id obj)
239 {
240 return objc_autorelease(objc_retain(obj));
241 }
242
243
244 void
245 objc_storeStrong(id *location, id obj)
246 {
247 id prev = *location;
248 if (obj == prev) {
249 return;
250 }
251 objc_retain(obj);
252 *location = obj;
253 objc_release(prev);
254 }
255
256
257 // Update a weak variable.
258 // If HaveOld is true, the variable has an existing value
259 // that needs to be cleaned up. This value might be nil.
260 // If HaveNew is true, there is a new value that needs to be
261 // assigned into the variable. This value might be nil.
262 // If CrashIfDeallocating is true, the process is halted if newObj is
263 // deallocating or newObj's class does not support weak references.
264 // If CrashIfDeallocating is false, nil is stored instead.
265 enum CrashIfDeallocating {
266 DontCrashIfDeallocating = false, DoCrashIfDeallocating = true
267 };
268 template <HaveOld haveOld, HaveNew haveNew,
269 CrashIfDeallocating crashIfDeallocating>
270 static id
271 storeWeak(id *location, objc_object *newObj)
272 {
273 ASSERT(haveOld || haveNew);
274 if (!haveNew) ASSERT(newObj == nil);
275
276 Class previouslyInitializedClass = nil;
277 id oldObj;
278 SideTable *oldTable;
279 SideTable *newTable;
280
281 // Acquire locks for old and new values.
282 // Order by lock address to prevent lock ordering problems.
283 // Retry if the old value changes underneath us.
284 retry:
285 if (haveOld) {
286 oldObj = *location;
287 oldTable = &SideTables()[oldObj];
288 } else {
289 oldTable = nil;
290 }
291 if (haveNew) {
292 newTable = &SideTables()[newObj];
293 } else {
294 newTable = nil;
295 }
296
297 SideTable::lockTwo<haveOld, haveNew>(oldTable, newTable);
298
299 if (haveOld && *location != oldObj) {
300 SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
301 goto retry;
302 }
303
304 // Prevent a deadlock between the weak reference machinery
305 // and the +initialize machinery by ensuring that no
306 // weakly-referenced object has an un-+initialized isa.
307 if (haveNew && newObj) {
308 Class cls = newObj->getIsa();
309 if (cls != previouslyInitializedClass &&
310 !((objc_class *)cls)->isInitialized())
311 {
312 SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
313 class_initialize(cls, (id)newObj);
314
315 // If this class is finished with +initialize then we're good.
316 // If this class is still running +initialize on this thread
317 // (i.e. +initialize called storeWeak on an instance of itself)
318 // then we may proceed but it will appear initializing and
319 // not yet initialized to the check above.
320 // Instead set previouslyInitializedClass to recognize it on retry.
321 previouslyInitializedClass = cls;
322
323 goto retry;
324 }
325 }
326
327 // Clean up old value, if any.
328 if (haveOld) {
329 weak_unregister_no_lock(&oldTable->weak_table, oldObj, location);
330 }
331
332 // Assign new value, if any.
333 if (haveNew) {
334 newObj = (objc_object *)
335 weak_register_no_lock(&newTable->weak_table, (id)newObj, location,
336 crashIfDeallocating);
337 // weak_register_no_lock returns nil if weak store should be rejected
338
339 // Set is-weakly-referenced bit in refcount table.
340 if (newObj && !newObj->isTaggedPointer()) {
341 newObj->setWeaklyReferenced_nolock();
342 }
343
344 // Do not set *location anywhere else. That would introduce a race.
345 *location = (id)newObj;
346 }
347 else {
348 // No new value. The storage is not changed.
349 }
350
351 SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
352
353 return (id)newObj;
354 }
355
356
357 /**
358 * This function stores a new value into a __weak variable. It would
359 * be used anywhere a __weak variable is the target of an assignment.
360 *
361 * @param location The address of the weak pointer itself
362 * @param newObj The new object this weak ptr should now point to
363 *
364 * @return \e newObj
365 */
366 id
367 objc_storeWeak(id *location, id newObj)
368 {
369 return storeWeak<DoHaveOld, DoHaveNew, DoCrashIfDeallocating>
370 (location, (objc_object *)newObj);
371 }
372
373
374 /**
375 * This function stores a new value into a __weak variable.
376 * If the new object is deallocating or the new object's class
377 * does not support weak references, stores nil instead.
378 *
379 * @param location The address of the weak pointer itself
380 * @param newObj The new object this weak ptr should now point to
381 *
382 * @return The value stored (either the new object or nil)
383 */
384 id
385 objc_storeWeakOrNil(id *location, id newObj)
386 {
387 return storeWeak<DoHaveOld, DoHaveNew, DontCrashIfDeallocating>
388 (location, (objc_object *)newObj);
389 }
390
391
392 /**
393 * Initialize a fresh weak pointer to some object location.
394 * It would be used for code like:
395 *
396 * (The nil case)
397 * __weak id weakPtr;
398 * (The non-nil case)
399 * NSObject *o = ...;
400 * __weak id weakPtr = o;
401 *
402 * This function IS NOT thread-safe with respect to concurrent
403 * modifications to the weak variable. (Concurrent weak clear is safe.)
404 *
405 * @param location Address of __weak ptr.
406 * @param newObj Object ptr.
407 */
408 id
409 objc_initWeak(id *location, id newObj)
410 {
411 if (!newObj) {
412 *location = nil;
413 return nil;
414 }
415
416 return storeWeak<DontHaveOld, DoHaveNew, DoCrashIfDeallocating>
417 (location, (objc_object*)newObj);
418 }
419
420 id
421 objc_initWeakOrNil(id *location, id newObj)
422 {
423 if (!newObj) {
424 *location = nil;
425 return nil;
426 }
427
428 return storeWeak<DontHaveOld, DoHaveNew, DontCrashIfDeallocating>
429 (location, (objc_object*)newObj);
430 }
431
432
433 /**
434 * Destroys the relationship between a weak pointer
435 * and the object it is referencing in the internal weak
436 * table. If the weak pointer is not referencing anything,
437 * there is no need to edit the weak table.
438 *
439 * This function IS NOT thread-safe with respect to concurrent
440 * modifications to the weak variable. (Concurrent weak clear is safe.)
441 *
442 * @param location The weak pointer address.
443 */
444 void
445 objc_destroyWeak(id *location)
446 {
447 (void)storeWeak<DoHaveOld, DontHaveNew, DontCrashIfDeallocating>
448 (location, nil);
449 }
450
451
452 /*
453 Once upon a time we eagerly cleared *location if we saw the object
454 was deallocating. This confuses code like NSPointerFunctions which
455 tries to pre-flight the raw storage and assumes if the storage is
456 zero then the weak system is done interfering. That is false: the
457 weak system is still going to check and clear the storage later.
458 This can cause objc_weak_error complaints and crashes.
459 So we now don't touch the storage until deallocation completes.
460 */
461
462 id
463 objc_loadWeakRetained(id *location)
464 {
465 id obj;
466 id result;
467 Class cls;
468
469 SideTable *table;
470
471 retry:
472 // fixme std::atomic this load
473 obj = *location;
474 if (!obj) return nil;
475 if (obj->isTaggedPointer()) return obj;
476
477 table = &SideTables()[obj];
478
479 table->lock();
480 if (*location != obj) {
481 table->unlock();
482 goto retry;
483 }
484
485 result = obj;
486
487 cls = obj->ISA();
488 if (! cls->hasCustomRR()) {
489 // Fast case. We know +initialize is complete because
490 // default-RR can never be set before then.
491 ASSERT(cls->isInitialized());
492 if (! obj->rootTryRetain()) {
493 result = nil;
494 }
495 }
496 else {
497 // Slow case. We must check for +initialize and call it outside
498 // the lock if necessary in order to avoid deadlocks.
499 if (cls->isInitialized() || _thisThreadIsInitializingClass(cls)) {
500 BOOL (*tryRetain)(id, SEL) = (BOOL(*)(id, SEL))
501 class_getMethodImplementation(cls, @selector(retainWeakReference));
502 if ((IMP)tryRetain == _objc_msgForward) {
503 result = nil;
504 }
505 else if (! (*tryRetain)(obj, @selector(retainWeakReference))) {
506 result = nil;
507 }
508 }
509 else {
510 table->unlock();
511 class_initialize(cls, obj);
512 goto retry;
513 }
514 }
515
516 table->unlock();
517 return result;
518 }
519
520 /**
521 * This loads the object referenced by a weak pointer and returns it, after
522 * retaining and autoreleasing the object to ensure that it stays alive
523 * long enough for the caller to use it. This function would be used
524 * anywhere a __weak variable is used in an expression.
525 *
526 * @param location The weak pointer address
527 *
528 * @return The object pointed to by \e location, or \c nil if \e location is \c nil.
529 */
530 id
531 objc_loadWeak(id *location)
532 {
533 if (!*location) return nil;
534 return objc_autorelease(objc_loadWeakRetained(location));
535 }
536
537
538 /**
539 * This function copies a weak pointer from one location to another,
540 * when the destination doesn't already contain a weak pointer. It
541 * would be used for code like:
542 *
543 * __weak id src = ...;
544 * __weak id dst = src;
545 *
546 * This function IS NOT thread-safe with respect to concurrent
547 * modifications to the destination variable. (Concurrent weak clear is safe.)
548 *
549 * @param dst The destination variable.
550 * @param src The source variable.
551 */
552 void
553 objc_copyWeak(id *dst, id *src)
554 {
555 id obj = objc_loadWeakRetained(src);
556 objc_initWeak(dst, obj);
557 objc_release(obj);
558 }
559
560 /**
561 * Move a weak pointer from one location to another.
562 * Before the move, the destination must be uninitialized.
563 * After the move, the source is nil.
564 *
565 * This function IS NOT thread-safe with respect to concurrent
566 * modifications to either weak variable. (Concurrent weak clear is safe.)
567 *
568 */
569 void
570 objc_moveWeak(id *dst, id *src)
571 {
572 objc_copyWeak(dst, src);
573 objc_destroyWeak(src);
574 *src = nil;
575 }
576
577
578 /***********************************************************************
579 Autorelease pool implementation
580
581 A thread's autorelease pool is a stack of pointers.
582 Each pointer is either an object to release, or POOL_BOUNDARY which is
583 an autorelease pool boundary.
584 A pool token is a pointer to the POOL_BOUNDARY for that pool. When
585 the pool is popped, every object hotter than the sentinel is released.
586 The stack is divided into a doubly-linked list of pages. Pages are added
587 and deleted as necessary.
588 Thread-local storage points to the hot page, where newly autoreleased
589 objects are stored.
590 **********************************************************************/
591
592 BREAKPOINT_FUNCTION(void objc_autoreleaseNoPool(id obj));
593 BREAKPOINT_FUNCTION(void objc_autoreleasePoolInvalid(const void *token));
594
595 class AutoreleasePoolPage : private AutoreleasePoolPageData
596 {
597 friend struct thread_data_t;
598
599 public:
600 static size_t const SIZE =
601 #if PROTECT_AUTORELEASEPOOL
602 PAGE_MAX_SIZE; // must be multiple of vm page size
603 #else
604 PAGE_MIN_SIZE; // size and alignment, power of 2
605 #endif
606
607 private:
608 static pthread_key_t const key = AUTORELEASE_POOL_KEY;
609 static uint8_t const SCRIBBLE = 0xA3; // 0xA3A3A3A3 after releasing
610 static size_t const COUNT = SIZE / sizeof(id);
611
612 // EMPTY_POOL_PLACEHOLDER is stored in TLS when exactly one pool is
613 // pushed and it has never contained any objects. This saves memory
614 // when the top level (i.e. libdispatch) pushes and pops pools but
615 // never uses them.
616 # define EMPTY_POOL_PLACEHOLDER ((id*)1)
617
618 # define POOL_BOUNDARY nil
619
620 // SIZE-sizeof(*this) bytes of contents follow
621
622 static void * operator new(size_t size) {
623 return malloc_zone_memalign(malloc_default_zone(), SIZE, SIZE);
624 }
625 static void operator delete(void * p) {
626 return free(p);
627 }
628
629 inline void protect() {
630 #if PROTECT_AUTORELEASEPOOL
631 mprotect(this, SIZE, PROT_READ);
632 check();
633 #endif
634 }
635
636 inline void unprotect() {
637 #if PROTECT_AUTORELEASEPOOL
638 check();
639 mprotect(this, SIZE, PROT_READ | PROT_WRITE);
640 #endif
641 }
642
643 AutoreleasePoolPage(AutoreleasePoolPage *newParent) :
644 AutoreleasePoolPageData(begin(),
645 objc_thread_self(),
646 newParent,
647 newParent ? 1+newParent->depth : 0,
648 newParent ? newParent->hiwat : 0)
649 {
650 if (parent) {
651 parent->check();
652 ASSERT(!parent->child);
653 parent->unprotect();
654 parent->child = this;
655 parent->protect();
656 }
657 protect();
658 }
659
660 ~AutoreleasePoolPage()
661 {
662 check();
663 unprotect();
664 ASSERT(empty());
665
666 // Not recursive: we don't want to blow out the stack
667 // if a thread accumulates a stupendous amount of garbage
668 ASSERT(!child);
669 }
670
671 template<typename Fn>
672 void
673 busted(Fn log) const
674 {
675 magic_t right;
676 log("autorelease pool page %p corrupted\n"
677 " magic 0x%08x 0x%08x 0x%08x 0x%08x\n"
678 " should be 0x%08x 0x%08x 0x%08x 0x%08x\n"
679 " pthread %p\n"
680 " should be %p\n",
681 this,
682 magic.m[0], magic.m[1], magic.m[2], magic.m[3],
683 right.m[0], right.m[1], right.m[2], right.m[3],
684 this->thread, objc_thread_self());
685 }
686
687 __attribute__((noinline, cold, noreturn))
688 void
689 busted_die() const
690 {
691 busted(_objc_fatal);
692 __builtin_unreachable();
693 }
694
695 inline void
696 check(bool die = true) const
697 {
698 if (!magic.check() || thread != objc_thread_self()) {
699 if (die) {
700 busted_die();
701 } else {
702 busted(_objc_inform);
703 }
704 }
705 }
706
707 inline void
708 fastcheck() const
709 {
710 #if CHECK_AUTORELEASEPOOL
711 check();
712 #else
713 if (! magic.fastcheck()) {
714 busted_die();
715 }
716 #endif
717 }
718
719
720 id * begin() {
721 return (id *) ((uint8_t *)this+sizeof(*this));
722 }
723
724 id * end() {
725 return (id *) ((uint8_t *)this+SIZE);
726 }
727
728 bool empty() {
729 return next == begin();
730 }
731
732 bool full() {
733 return next == end();
734 }
735
736 bool lessThanHalfFull() {
737 return (next - begin() < (end() - begin()) / 2);
738 }
739
740 id *add(id obj)
741 {
742 ASSERT(!full());
743 unprotect();
744 id *ret = next; // faster than `return next-1` because of aliasing
745 *next++ = obj;
746 protect();
747 return ret;
748 }
749
750 void releaseAll()
751 {
752 releaseUntil(begin());
753 }
754
755 void releaseUntil(id *stop)
756 {
757 // Not recursive: we don't want to blow out the stack
758 // if a thread accumulates a stupendous amount of garbage
759
760 while (this->next != stop) {
761 // Restart from hotPage() every time, in case -release
762 // autoreleased more objects
763 AutoreleasePoolPage *page = hotPage();
764
765 // fixme I think this `while` can be `if`, but I can't prove it
766 while (page->empty()) {
767 page = page->parent;
768 setHotPage(page);
769 }
770
771 page->unprotect();
772 id obj = *--page->next;
773 memset((void*)page->next, SCRIBBLE, sizeof(*page->next));
774 page->protect();
775
776 if (obj != POOL_BOUNDARY) {
777 objc_release(obj);
778 }
779 }
780
781 setHotPage(this);
782
783 #if DEBUG
784 // we expect any children to be completely empty
785 for (AutoreleasePoolPage *page = child; page; page = page->child) {
786 ASSERT(page->empty());
787 }
788 #endif
789 }
790
791 void kill()
792 {
793 // Not recursive: we don't want to blow out the stack
794 // if a thread accumulates a stupendous amount of garbage
795 AutoreleasePoolPage *page = this;
796 while (page->child) page = page->child;
797
798 AutoreleasePoolPage *deathptr;
799 do {
800 deathptr = page;
801 page = page->parent;
802 if (page) {
803 page->unprotect();
804 page->child = nil;
805 page->protect();
806 }
807 delete deathptr;
808 } while (deathptr != this);
809 }
810
811 static void tls_dealloc(void *p)
812 {
813 if (p == (void*)EMPTY_POOL_PLACEHOLDER) {
814 // No objects or pool pages to clean up here.
815 return;
816 }
817
818 // reinstate TLS value while we work
819 setHotPage((AutoreleasePoolPage *)p);
820
821 if (AutoreleasePoolPage *page = coldPage()) {
822 if (!page->empty()) objc_autoreleasePoolPop(page->begin()); // pop all of the pools
823 if (slowpath(DebugMissingPools || DebugPoolAllocation)) {
824 // pop() killed the pages already
825 } else {
826 page->kill(); // free all of the pages
827 }
828 }
829
830 // clear TLS value so TLS destruction doesn't loop
831 setHotPage(nil);
832 }
833
834 static AutoreleasePoolPage *pageForPointer(const void *p)
835 {
836 return pageForPointer((uintptr_t)p);
837 }
838
839 static AutoreleasePoolPage *pageForPointer(uintptr_t p)
840 {
841 AutoreleasePoolPage *result;
842 uintptr_t offset = p % SIZE;
843
844 ASSERT(offset >= sizeof(AutoreleasePoolPage));
845
846 result = (AutoreleasePoolPage *)(p - offset);
847 result->fastcheck();
848
849 return result;
850 }
851
852
853 static inline bool haveEmptyPoolPlaceholder()
854 {
855 id *tls = (id *)tls_get_direct(key);
856 return (tls == EMPTY_POOL_PLACEHOLDER);
857 }
858
859 static inline id* setEmptyPoolPlaceholder()
860 {
861 ASSERT(tls_get_direct(key) == nil);
862 tls_set_direct(key, (void *)EMPTY_POOL_PLACEHOLDER);
863 return EMPTY_POOL_PLACEHOLDER;
864 }
865
866 static inline AutoreleasePoolPage *hotPage()
867 {
868 AutoreleasePoolPage *result = (AutoreleasePoolPage *)
869 tls_get_direct(key);
870 if ((id *)result == EMPTY_POOL_PLACEHOLDER) return nil;
871 if (result) result->fastcheck();
872 return result;
873 }
874
875 static inline void setHotPage(AutoreleasePoolPage *page)
876 {
877 if (page) page->fastcheck();
878 tls_set_direct(key, (void *)page);
879 }
880
881 static inline AutoreleasePoolPage *coldPage()
882 {
883 AutoreleasePoolPage *result = hotPage();
884 if (result) {
885 while (result->parent) {
886 result = result->parent;
887 result->fastcheck();
888 }
889 }
890 return result;
891 }
892
893
894 static inline id *autoreleaseFast(id obj)
895 {
896 AutoreleasePoolPage *page = hotPage();
897 if (page && !page->full()) {
898 return page->add(obj);
899 } else if (page) {
900 return autoreleaseFullPage(obj, page);
901 } else {
902 return autoreleaseNoPage(obj);
903 }
904 }
905
906 static __attribute__((noinline))
907 id *autoreleaseFullPage(id obj, AutoreleasePoolPage *page)
908 {
909 // The hot page is full.
910 // Step to the next non-full page, adding a new page if necessary.
911 // Then add the object to that page.
912 ASSERT(page == hotPage());
913 ASSERT(page->full() || DebugPoolAllocation);
914
915 do {
916 if (page->child) page = page->child;
917 else page = new AutoreleasePoolPage(page);
918 } while (page->full());
919
920 setHotPage(page);
921 return page->add(obj);
922 }
923
924 static __attribute__((noinline))
925 id *autoreleaseNoPage(id obj)
926 {
927 // "No page" could mean no pool has been pushed
928 // or an empty placeholder pool has been pushed and has no contents yet
929 ASSERT(!hotPage());
930
931 bool pushExtraBoundary = false;
932 if (haveEmptyPoolPlaceholder()) {
933 // We are pushing a second pool over the empty placeholder pool
934 // or pushing the first object into the empty placeholder pool.
935 // Before doing that, push a pool boundary on behalf of the pool
936 // that is currently represented by the empty placeholder.
937 pushExtraBoundary = true;
938 }
939 else if (obj != POOL_BOUNDARY && DebugMissingPools) {
940 // We are pushing an object with no pool in place,
941 // and no-pool debugging was requested by environment.
942 _objc_inform("MISSING POOLS: (%p) Object %p of class %s "
943 "autoreleased with no pool in place - "
944 "just leaking - break on "
945 "objc_autoreleaseNoPool() to debug",
946 objc_thread_self(), (void*)obj, object_getClassName(obj));
947 objc_autoreleaseNoPool(obj);
948 return nil;
949 }
950 else if (obj == POOL_BOUNDARY && !DebugPoolAllocation) {
951 // We are pushing a pool with no pool in place,
952 // and alloc-per-pool debugging was not requested.
953 // Install and return the empty pool placeholder.
954 return setEmptyPoolPlaceholder();
955 }
956
957 // We are pushing an object or a non-placeholder'd pool.
958
959 // Install the first page.
960 AutoreleasePoolPage *page = new AutoreleasePoolPage(nil);
961 setHotPage(page);
962
963 // Push a boundary on behalf of the previously-placeholder'd pool.
964 if (pushExtraBoundary) {
965 page->add(POOL_BOUNDARY);
966 }
967
968 // Push the requested object or pool.
969 return page->add(obj);
970 }
971
972
973 static __attribute__((noinline))
974 id *autoreleaseNewPage(id obj)
975 {
976 AutoreleasePoolPage *page = hotPage();
977 if (page) return autoreleaseFullPage(obj, page);
978 else return autoreleaseNoPage(obj);
979 }
980
981 public:
982 static inline id autorelease(id obj)
983 {
984 ASSERT(obj);
985 ASSERT(!obj->isTaggedPointer());
986 id *dest __unused = autoreleaseFast(obj);
987 ASSERT(!dest || dest == EMPTY_POOL_PLACEHOLDER || *dest == obj);
988 return obj;
989 }
990
991
992 static inline void *push()
993 {
994 id *dest;
995 if (slowpath(DebugPoolAllocation)) {
996 // Each autorelease pool starts on a new pool page.
997 dest = autoreleaseNewPage(POOL_BOUNDARY);
998 } else {
999 dest = autoreleaseFast(POOL_BOUNDARY);
1000 }
1001 ASSERT(dest == EMPTY_POOL_PLACEHOLDER || *dest == POOL_BOUNDARY);
1002 return dest;
1003 }
1004
1005 __attribute__((noinline, cold))
1006 static void badPop(void *token)
1007 {
1008 // Error. For bincompat purposes this is not
1009 // fatal in executables built with old SDKs.
1010
1011 if (DebugPoolAllocation || sdkIsAtLeast(10_12, 10_0, 10_0, 3_0, 2_0)) {
1012 // OBJC_DEBUG_POOL_ALLOCATION or new SDK. Bad pop is fatal.
1013 _objc_fatal
1014 ("Invalid or prematurely-freed autorelease pool %p.", token);
1015 }
1016
1017 // Old SDK. Bad pop is warned once.
1018 static bool complained = false;
1019 if (!complained) {
1020 complained = true;
1021 _objc_inform_now_and_on_crash
1022 ("Invalid or prematurely-freed autorelease pool %p. "
1023 "Set a breakpoint on objc_autoreleasePoolInvalid to debug. "
1024 "Proceeding anyway because the app is old "
1025 "(SDK version " SDK_FORMAT "). Memory errors are likely.",
1026 token, FORMAT_SDK(sdkVersion()));
1027 }
1028 objc_autoreleasePoolInvalid(token);
1029 }
1030
1031 template<bool allowDebug>
1032 static void
1033 popPage(void *token, AutoreleasePoolPage *page, id *stop)
1034 {
1035 if (allowDebug && PrintPoolHiwat) printHiwat();
1036
1037 page->releaseUntil(stop);
1038
1039 // memory: delete empty children
1040 if (allowDebug && DebugPoolAllocation && page->empty()) {
1041 // special case: delete everything during page-per-pool debugging
1042 AutoreleasePoolPage *parent = page->parent;
1043 page->kill();
1044 setHotPage(parent);
1045 } else if (allowDebug && DebugMissingPools && page->empty() && !page->parent) {
1046 // special case: delete everything for pop(top)
1047 // when debugging missing autorelease pools
1048 page->kill();
1049 setHotPage(nil);
1050 } else if (page->child) {
1051 // hysteresis: keep one empty child if page is more than half full
1052 if (page->lessThanHalfFull()) {
1053 page->child->kill();
1054 }
1055 else if (page->child->child) {
1056 page->child->child->kill();
1057 }
1058 }
1059 }
1060
1061 __attribute__((noinline, cold))
1062 static void
1063 popPageDebug(void *token, AutoreleasePoolPage *page, id *stop)
1064 {
1065 popPage<true>(token, page, stop);
1066 }
1067
1068 static inline void
1069 pop(void *token)
1070 {
1071 AutoreleasePoolPage *page;
1072 id *stop;
1073 if (token == (void*)EMPTY_POOL_PLACEHOLDER) {
1074 // Popping the top-level placeholder pool.
1075 page = hotPage();
1076 if (!page) {
1077 // Pool was never used. Clear the placeholder.
1078 return setHotPage(nil);
1079 }
1080 // Pool was used. Pop its contents normally.
1081 // Pool pages remain allocated for re-use as usual.
1082 page = coldPage();
1083 token = page->begin();
1084 } else {
1085 page = pageForPointer(token);
1086 }
1087
1088 stop = (id *)token;
1089 if (*stop != POOL_BOUNDARY) {
1090 if (stop == page->begin() && !page->parent) {
1091 // Start of coldest page may correctly not be POOL_BOUNDARY:
1092 // 1. top-level pool is popped, leaving the cold page in place
1093 // 2. an object is autoreleased with no pool
1094 } else {
1095 // Error. For bincompat purposes this is not
1096 // fatal in executables built with old SDKs.
1097 return badPop(token);
1098 }
1099 }
1100
1101 if (slowpath(PrintPoolHiwat || DebugPoolAllocation || DebugMissingPools)) {
1102 return popPageDebug(token, page, stop);
1103 }
1104
1105 return popPage<false>(token, page, stop);
1106 }
1107
1108 static void init()
1109 {
1110 int r __unused = pthread_key_init_np(AutoreleasePoolPage::key,
1111 AutoreleasePoolPage::tls_dealloc);
1112 ASSERT(r == 0);
1113 }
1114
1115 __attribute__((noinline, cold))
1116 void print()
1117 {
1118 _objc_inform("[%p] ................ PAGE %s %s %s", this,
1119 full() ? "(full)" : "",
1120 this == hotPage() ? "(hot)" : "",
1121 this == coldPage() ? "(cold)" : "");
1122 check(false);
1123 for (id *p = begin(); p < next; p++) {
1124 if (*p == POOL_BOUNDARY) {
1125 _objc_inform("[%p] ################ POOL %p", p, p);
1126 } else {
1127 _objc_inform("[%p] %#16lx %s",
1128 p, (unsigned long)*p, object_getClassName(*p));
1129 }
1130 }
1131 }
1132
1133 __attribute__((noinline, cold))
1134 static void printAll()
1135 {
1136 _objc_inform("##############");
1137 _objc_inform("AUTORELEASE POOLS for thread %p", objc_thread_self());
1138
1139 AutoreleasePoolPage *page;
1140 ptrdiff_t objects = 0;
1141 for (page = coldPage(); page; page = page->child) {
1142 objects += page->next - page->begin();
1143 }
1144 _objc_inform("%llu releases pending.", (unsigned long long)objects);
1145
1146 if (haveEmptyPoolPlaceholder()) {
1147 _objc_inform("[%p] ................ PAGE (placeholder)",
1148 EMPTY_POOL_PLACEHOLDER);
1149 _objc_inform("[%p] ################ POOL (placeholder)",
1150 EMPTY_POOL_PLACEHOLDER);
1151 }
1152 else {
1153 for (page = coldPage(); page; page = page->child) {
1154 page->print();
1155 }
1156 }
1157
1158 _objc_inform("##############");
1159 }
1160
1161 __attribute__((noinline, cold))
1162 static void printHiwat()
1163 {
1164 // Check and propagate high water mark
1165 // Ignore high water marks under 256 to suppress noise.
1166 AutoreleasePoolPage *p = hotPage();
1167 uint32_t mark = p->depth*COUNT + (uint32_t)(p->next - p->begin());
1168 if (mark > p->hiwat && mark > 256) {
1169 for( ; p; p = p->parent) {
1170 p->unprotect();
1171 p->hiwat = mark;
1172 p->protect();
1173 }
1174
1175 _objc_inform("POOL HIGHWATER: new high water mark of %u "
1176 "pending releases for thread %p:",
1177 mark, objc_thread_self());
1178
1179 void *stack[128];
1180 int count = backtrace(stack, sizeof(stack)/sizeof(stack[0]));
1181 char **sym = backtrace_symbols(stack, count);
1182 for (int i = 0; i < count; i++) {
1183 _objc_inform("POOL HIGHWATER: %s", sym[i]);
1184 }
1185 free(sym);
1186 }
1187 }
1188
1189 #undef POOL_BOUNDARY
1190 };
1191
1192 /***********************************************************************
1193 * Slow paths for inline control
1194 **********************************************************************/
1195
1196 #if SUPPORT_NONPOINTER_ISA
1197
1198 NEVER_INLINE id
1199 objc_object::rootRetain_overflow(bool tryRetain)
1200 {
1201 return rootRetain(tryRetain, true);
1202 }
1203
1204
1205 NEVER_INLINE uintptr_t
1206 objc_object::rootRelease_underflow(bool performDealloc)
1207 {
1208 return rootRelease(performDealloc, true);
1209 }
1210
1211
1212 // Slow path of clearDeallocating()
1213 // for objects with nonpointer isa
1214 // that were ever weakly referenced
1215 // or whose retain count ever overflowed to the side table.
1216 NEVER_INLINE void
1217 objc_object::clearDeallocating_slow()
1218 {
1219 ASSERT(isa.nonpointer && (isa.weakly_referenced || isa.has_sidetable_rc));
1220
1221 SideTable& table = SideTables()[this];
1222 table.lock();
1223 if (isa.weakly_referenced) {
1224 weak_clear_no_lock(&table.weak_table, (id)this);
1225 }
1226 if (isa.has_sidetable_rc) {
1227 table.refcnts.erase(this);
1228 }
1229 table.unlock();
1230 }
1231
1232 #endif
1233
1234 __attribute__((noinline,used))
1235 id
1236 objc_object::rootAutorelease2()
1237 {
1238 ASSERT(!isTaggedPointer());
1239 return AutoreleasePoolPage::autorelease((id)this);
1240 }
1241
1242
1243 BREAKPOINT_FUNCTION(
1244 void objc_overrelease_during_dealloc_error(void)
1245 );
1246
1247
1248 NEVER_INLINE uintptr_t
1249 objc_object::overrelease_error()
1250 {
1251 _objc_inform_now_and_on_crash("%s object %p overreleased while already deallocating; break on objc_overrelease_during_dealloc_error to debug", object_getClassName((id)this), this);
1252 objc_overrelease_during_dealloc_error();
1253 return 0; // allow rootRelease() to tail-call this
1254 }
1255
1256
1257 /***********************************************************************
1258 * Retain count operations for side table.
1259 **********************************************************************/
1260
1261
1262 #if DEBUG
1263 // Used to assert that an object is not present in the side table.
1264 bool
1265 objc_object::sidetable_present()
1266 {
1267 bool result = false;
1268 SideTable& table = SideTables()[this];
1269
1270 table.lock();
1271
1272 RefcountMap::iterator it = table.refcnts.find(this);
1273 if (it != table.refcnts.end()) result = true;
1274
1275 if (weak_is_registered_no_lock(&table.weak_table, (id)this)) result = true;
1276
1277 table.unlock();
1278
1279 return result;
1280 }
1281 #endif
1282
1283 #if SUPPORT_NONPOINTER_ISA
1284
1285 void
1286 objc_object::sidetable_lock()
1287 {
1288 SideTable& table = SideTables()[this];
1289 table.lock();
1290 }
1291
1292 void
1293 objc_object::sidetable_unlock()
1294 {
1295 SideTable& table = SideTables()[this];
1296 table.unlock();
1297 }
1298
1299
1300 // Move the entire retain count to the side table,
1301 // as well as isDeallocating and weaklyReferenced.
1302 void
1303 objc_object::sidetable_moveExtraRC_nolock(size_t extra_rc,
1304 bool isDeallocating,
1305 bool weaklyReferenced)
1306 {
1307 ASSERT(!isa.nonpointer); // should already be changed to raw pointer
1308 SideTable& table = SideTables()[this];
1309
1310 size_t& refcntStorage = table.refcnts[this];
1311 size_t oldRefcnt = refcntStorage;
1312 // not deallocating - that was in the isa
1313 ASSERT((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1314 ASSERT((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1315
1316 uintptr_t carry;
1317 size_t refcnt = addc(oldRefcnt, extra_rc << SIDE_TABLE_RC_SHIFT, 0, &carry);
1318 if (carry) refcnt = SIDE_TABLE_RC_PINNED;
1319 if (isDeallocating) refcnt |= SIDE_TABLE_DEALLOCATING;
1320 if (weaklyReferenced) refcnt |= SIDE_TABLE_WEAKLY_REFERENCED;
1321
1322 refcntStorage = refcnt;
1323 }
1324
1325
1326 // Move some retain counts to the side table from the isa field.
1327 // Returns true if the object is now pinned.
1328 bool
1329 objc_object::sidetable_addExtraRC_nolock(size_t delta_rc)
1330 {
1331 ASSERT(isa.nonpointer);
1332 SideTable& table = SideTables()[this];
1333
1334 size_t& refcntStorage = table.refcnts[this];
1335 size_t oldRefcnt = refcntStorage;
1336 // isa-side bits should not be set here
1337 ASSERT((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1338 ASSERT((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1339
1340 if (oldRefcnt & SIDE_TABLE_RC_PINNED) return true;
1341
1342 uintptr_t carry;
1343 size_t newRefcnt =
1344 addc(oldRefcnt, delta_rc << SIDE_TABLE_RC_SHIFT, 0, &carry);
1345 if (carry) {
1346 refcntStorage =
1347 SIDE_TABLE_RC_PINNED | (oldRefcnt & SIDE_TABLE_FLAG_MASK);
1348 return true;
1349 }
1350 else {
1351 refcntStorage = newRefcnt;
1352 return false;
1353 }
1354 }
1355
1356
1357 // Move some retain counts from the side table to the isa field.
1358 // Returns the actual count subtracted, which may be less than the request.
1359 size_t
1360 objc_object::sidetable_subExtraRC_nolock(size_t delta_rc)
1361 {
1362 ASSERT(isa.nonpointer);
1363 SideTable& table = SideTables()[this];
1364
1365 RefcountMap::iterator it = table.refcnts.find(this);
1366 if (it == table.refcnts.end() || it->second == 0) {
1367 // Side table retain count is zero. Can't borrow.
1368 return 0;
1369 }
1370 size_t oldRefcnt = it->second;
1371
1372 // isa-side bits should not be set here
1373 ASSERT((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1374 ASSERT((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1375
1376 size_t newRefcnt = oldRefcnt - (delta_rc << SIDE_TABLE_RC_SHIFT);
1377 ASSERT(oldRefcnt > newRefcnt); // shouldn't underflow
1378 it->second = newRefcnt;
1379 return delta_rc;
1380 }
1381
1382
1383 size_t
1384 objc_object::sidetable_getExtraRC_nolock()
1385 {
1386 ASSERT(isa.nonpointer);
1387 SideTable& table = SideTables()[this];
1388 RefcountMap::iterator it = table.refcnts.find(this);
1389 if (it == table.refcnts.end()) return 0;
1390 else return it->second >> SIDE_TABLE_RC_SHIFT;
1391 }
1392
1393
1394 // SUPPORT_NONPOINTER_ISA
1395 #endif
1396
1397
1398 id
1399 objc_object::sidetable_retain()
1400 {
1401 #if SUPPORT_NONPOINTER_ISA
1402 ASSERT(!isa.nonpointer);
1403 #endif
1404 SideTable& table = SideTables()[this];
1405
1406 table.lock();
1407 size_t& refcntStorage = table.refcnts[this];
1408 if (! (refcntStorage & SIDE_TABLE_RC_PINNED)) {
1409 refcntStorage += SIDE_TABLE_RC_ONE;
1410 }
1411 table.unlock();
1412
1413 return (id)this;
1414 }
1415
1416
1417 bool
1418 objc_object::sidetable_tryRetain()
1419 {
1420 #if SUPPORT_NONPOINTER_ISA
1421 ASSERT(!isa.nonpointer);
1422 #endif
1423 SideTable& table = SideTables()[this];
1424
1425 // NO SPINLOCK HERE
1426 // _objc_rootTryRetain() is called exclusively by _objc_loadWeak(),
1427 // which already acquired the lock on our behalf.
1428
1429 // fixme can't do this efficiently with os_lock_handoff_s
1430 // if (table.slock == 0) {
1431 // _objc_fatal("Do not call -_tryRetain.");
1432 // }
1433
1434 bool result = true;
1435 auto it = table.refcnts.try_emplace(this, SIDE_TABLE_RC_ONE);
1436 auto &refcnt = it.first->second;
1437 if (it.second) {
1438 // there was no entry
1439 } else if (refcnt & SIDE_TABLE_DEALLOCATING) {
1440 result = false;
1441 } else if (! (refcnt & SIDE_TABLE_RC_PINNED)) {
1442 refcnt += SIDE_TABLE_RC_ONE;
1443 }
1444
1445 return result;
1446 }
1447
1448
1449 uintptr_t
1450 objc_object::sidetable_retainCount()
1451 {
1452 SideTable& table = SideTables()[this];
1453
1454 size_t refcnt_result = 1;
1455
1456 table.lock();
1457 RefcountMap::iterator it = table.refcnts.find(this);
1458 if (it != table.refcnts.end()) {
1459 // this is valid for SIDE_TABLE_RC_PINNED too
1460 refcnt_result += it->second >> SIDE_TABLE_RC_SHIFT;
1461 }
1462 table.unlock();
1463 return refcnt_result;
1464 }
1465
1466
1467 bool
1468 objc_object::sidetable_isDeallocating()
1469 {
1470 SideTable& table = SideTables()[this];
1471
1472 // NO SPINLOCK HERE
1473 // _objc_rootIsDeallocating() is called exclusively by _objc_storeWeak(),
1474 // which already acquired the lock on our behalf.
1475
1476
1477 // fixme can't do this efficiently with os_lock_handoff_s
1478 // if (table.slock == 0) {
1479 // _objc_fatal("Do not call -_isDeallocating.");
1480 // }
1481
1482 RefcountMap::iterator it = table.refcnts.find(this);
1483 return (it != table.refcnts.end()) && (it->second & SIDE_TABLE_DEALLOCATING);
1484 }
1485
1486
1487 bool
1488 objc_object::sidetable_isWeaklyReferenced()
1489 {
1490 bool result = false;
1491
1492 SideTable& table = SideTables()[this];
1493 table.lock();
1494
1495 RefcountMap::iterator it = table.refcnts.find(this);
1496 if (it != table.refcnts.end()) {
1497 result = it->second & SIDE_TABLE_WEAKLY_REFERENCED;
1498 }
1499
1500 table.unlock();
1501
1502 return result;
1503 }
1504
1505
1506 void
1507 objc_object::sidetable_setWeaklyReferenced_nolock()
1508 {
1509 #if SUPPORT_NONPOINTER_ISA
1510 ASSERT(!isa.nonpointer);
1511 #endif
1512
1513 SideTable& table = SideTables()[this];
1514
1515 table.refcnts[this] |= SIDE_TABLE_WEAKLY_REFERENCED;
1516 }
1517
1518
1519 // rdar://20206767
1520 // return uintptr_t instead of bool so that the various raw-isa
1521 // -release paths all return zero in eax
1522 uintptr_t
1523 objc_object::sidetable_release(bool performDealloc)
1524 {
1525 #if SUPPORT_NONPOINTER_ISA
1526 ASSERT(!isa.nonpointer);
1527 #endif
1528 SideTable& table = SideTables()[this];
1529
1530 bool do_dealloc = false;
1531
1532 table.lock();
1533 auto it = table.refcnts.try_emplace(this, SIDE_TABLE_DEALLOCATING);
1534 auto &refcnt = it.first->second;
1535 if (it.second) {
1536 do_dealloc = true;
1537 } else if (refcnt < SIDE_TABLE_DEALLOCATING) {
1538 // SIDE_TABLE_WEAKLY_REFERENCED may be set. Don't change it.
1539 do_dealloc = true;
1540 refcnt |= SIDE_TABLE_DEALLOCATING;
1541 } else if (! (refcnt & SIDE_TABLE_RC_PINNED)) {
1542 refcnt -= SIDE_TABLE_RC_ONE;
1543 }
1544 table.unlock();
1545 if (do_dealloc && performDealloc) {
1546 ((void(*)(objc_object *, SEL))objc_msgSend)(this, @selector(dealloc));
1547 }
1548 return do_dealloc;
1549 }
1550
1551
1552 void
1553 objc_object::sidetable_clearDeallocating()
1554 {
1555 SideTable& table = SideTables()[this];
1556
1557 // clear any weak table items
1558 // clear extra retain count and deallocating bit
1559 // (fixme warn or abort if extra retain count == 0 ?)
1560 table.lock();
1561 RefcountMap::iterator it = table.refcnts.find(this);
1562 if (it != table.refcnts.end()) {
1563 if (it->second & SIDE_TABLE_WEAKLY_REFERENCED) {
1564 weak_clear_no_lock(&table.weak_table, (id)this);
1565 }
1566 table.refcnts.erase(it);
1567 }
1568 table.unlock();
1569 }
1570
1571
1572 /***********************************************************************
1573 * Optimized retain/release/autorelease entrypoints
1574 **********************************************************************/
1575
1576
1577 #if __OBJC2__
1578
1579 __attribute__((aligned(16), flatten, noinline))
1580 id
1581 objc_retain(id obj)
1582 {
1583 if (!obj) return obj;
1584 if (obj->isTaggedPointer()) return obj;
1585 return obj->retain();
1586 }
1587
1588
1589 __attribute__((aligned(16), flatten, noinline))
1590 void
1591 objc_release(id obj)
1592 {
1593 if (!obj) return;
1594 if (obj->isTaggedPointer()) return;
1595 return obj->release();
1596 }
1597
1598
1599 __attribute__((aligned(16), flatten, noinline))
1600 id
1601 objc_autorelease(id obj)
1602 {
1603 if (!obj) return obj;
1604 if (obj->isTaggedPointer()) return obj;
1605 return obj->autorelease();
1606 }
1607
1608
1609 // OBJC2
1610 #else
1611 // not OBJC2
1612
1613
1614 id objc_retain(id obj) { return [obj retain]; }
1615 void objc_release(id obj) { [obj release]; }
1616 id objc_autorelease(id obj) { return [obj autorelease]; }
1617
1618
1619 #endif
1620
1621
1622 /***********************************************************************
1623 * Basic operations for root class implementations a.k.a. _objc_root*()
1624 **********************************************************************/
1625
1626 bool
1627 _objc_rootTryRetain(id obj)
1628 {
1629 ASSERT(obj);
1630
1631 return obj->rootTryRetain();
1632 }
1633
1634 bool
1635 _objc_rootIsDeallocating(id obj)
1636 {
1637 ASSERT(obj);
1638
1639 return obj->rootIsDeallocating();
1640 }
1641
1642
1643 void
1644 objc_clear_deallocating(id obj)
1645 {
1646 ASSERT(obj);
1647
1648 if (obj->isTaggedPointer()) return;
1649 obj->clearDeallocating();
1650 }
1651
1652
1653 bool
1654 _objc_rootReleaseWasZero(id obj)
1655 {
1656 ASSERT(obj);
1657
1658 return obj->rootReleaseShouldDealloc();
1659 }
1660
1661
1662 NEVER_INLINE id
1663 _objc_rootAutorelease(id obj)
1664 {
1665 ASSERT(obj);
1666 return obj->rootAutorelease();
1667 }
1668
1669 uintptr_t
1670 _objc_rootRetainCount(id obj)
1671 {
1672 ASSERT(obj);
1673
1674 return obj->rootRetainCount();
1675 }
1676
1677
1678 NEVER_INLINE id
1679 _objc_rootRetain(id obj)
1680 {
1681 ASSERT(obj);
1682
1683 return obj->rootRetain();
1684 }
1685
1686 NEVER_INLINE void
1687 _objc_rootRelease(id obj)
1688 {
1689 ASSERT(obj);
1690
1691 obj->rootRelease();
1692 }
1693
1694
1695 // Call [cls alloc] or [cls allocWithZone:nil], with appropriate
1696 // shortcutting optimizations.
1697 static ALWAYS_INLINE id
1698 callAlloc(Class cls, bool checkNil, bool allocWithZone=false)
1699 {
1700 #if __OBJC2__
1701 if (slowpath(checkNil && !cls)) return nil;
1702 if (fastpath(!cls->ISA()->hasCustomAWZ())) {
1703 return _objc_rootAllocWithZone(cls, nil);
1704 }
1705 #endif
1706
1707 // No shortcuts available.
1708 if (allocWithZone) {
1709 return ((id(*)(id, SEL, struct _NSZone *))objc_msgSend)(cls, @selector(allocWithZone:), nil);
1710 }
1711 return ((id(*)(id, SEL))objc_msgSend)(cls, @selector(alloc));
1712 }
1713
1714
1715 // Base class implementation of +alloc. cls is not nil.
1716 // Calls [cls allocWithZone:nil].
1717 id
1718 _objc_rootAlloc(Class cls)
1719 {
1720 return callAlloc(cls, false/*checkNil*/, true/*allocWithZone*/);
1721 }
1722
1723 // Calls [cls alloc].
1724 id
1725 objc_alloc(Class cls)
1726 {
1727 return callAlloc(cls, true/*checkNil*/, false/*allocWithZone*/);
1728 }
1729
1730 // Calls [cls allocWithZone:nil].
1731 id
1732 objc_allocWithZone(Class cls)
1733 {
1734 return callAlloc(cls, true/*checkNil*/, true/*allocWithZone*/);
1735 }
1736
1737 // Calls [[cls alloc] init].
1738 id
1739 objc_alloc_init(Class cls)
1740 {
1741 return [callAlloc(cls, true/*checkNil*/, false/*allocWithZone*/) init];
1742 }
1743
1744 // Calls [cls new]
1745 id
1746 objc_opt_new(Class cls)
1747 {
1748 #if __OBJC2__
1749 if (fastpath(cls && !cls->ISA()->hasCustomCore())) {
1750 return [callAlloc(cls, false/*checkNil*/, true/*allocWithZone*/) init];
1751 }
1752 #endif
1753 return ((id(*)(id, SEL))objc_msgSend)(cls, @selector(new));
1754 }
1755
1756 // Calls [obj self]
1757 id
1758 objc_opt_self(id obj)
1759 {
1760 #if __OBJC2__
1761 if (fastpath(!obj || obj->isTaggedPointer() || !obj->ISA()->hasCustomCore())) {
1762 return obj;
1763 }
1764 #endif
1765 return ((id(*)(id, SEL))objc_msgSend)(obj, @selector(self));
1766 }
1767
1768 // Calls [obj class]
1769 Class
1770 objc_opt_class(id obj)
1771 {
1772 #if __OBJC2__
1773 if (slowpath(!obj)) return nil;
1774 Class cls = obj->getIsa();
1775 if (fastpath(!cls->hasCustomCore())) {
1776 return cls->isMetaClass() ? obj : cls;
1777 }
1778 #endif
1779 return ((Class(*)(id, SEL))objc_msgSend)(obj, @selector(class));
1780 }
1781
1782 // Calls [obj isKindOfClass]
1783 BOOL
1784 objc_opt_isKindOfClass(id obj, Class otherClass)
1785 {
1786 #if __OBJC2__
1787 if (slowpath(!obj)) return NO;
1788 Class cls = obj->getIsa();
1789 if (fastpath(!cls->hasCustomCore())) {
1790 for (Class tcls = cls; tcls; tcls = tcls->superclass) {
1791 if (tcls == otherClass) return YES;
1792 }
1793 return NO;
1794 }
1795 #endif
1796 return ((BOOL(*)(id, SEL, Class))objc_msgSend)(obj, @selector(isKindOfClass:), otherClass);
1797 }
1798
1799 // Calls [obj respondsToSelector]
1800 BOOL
1801 objc_opt_respondsToSelector(id obj, SEL sel)
1802 {
1803 #if __OBJC2__
1804 if (slowpath(!obj)) return NO;
1805 Class cls = obj->getIsa();
1806 if (fastpath(!cls->hasCustomCore())) {
1807 return class_respondsToSelector_inst(obj, sel, cls);
1808 }
1809 #endif
1810 return ((BOOL(*)(id, SEL, SEL))objc_msgSend)(obj, @selector(respondsToSelector:), sel);
1811 }
1812
1813 void
1814 _objc_rootDealloc(id obj)
1815 {
1816 ASSERT(obj);
1817
1818 obj->rootDealloc();
1819 }
1820
1821 void
1822 _objc_rootFinalize(id obj __unused)
1823 {
1824 ASSERT(obj);
1825 _objc_fatal("_objc_rootFinalize called with garbage collection off");
1826 }
1827
1828
1829 id
1830 _objc_rootInit(id obj)
1831 {
1832 // In practice, it will be hard to rely on this function.
1833 // Many classes do not properly chain -init calls.
1834 return obj;
1835 }
1836
1837
1838 malloc_zone_t *
1839 _objc_rootZone(id obj)
1840 {
1841 (void)obj;
1842 #if __OBJC2__
1843 // allocWithZone under __OBJC2__ ignores the zone parameter
1844 return malloc_default_zone();
1845 #else
1846 malloc_zone_t *rval = malloc_zone_from_ptr(obj);
1847 return rval ? rval : malloc_default_zone();
1848 #endif
1849 }
1850
1851 uintptr_t
1852 _objc_rootHash(id obj)
1853 {
1854 return (uintptr_t)obj;
1855 }
1856
1857 void *
1858 objc_autoreleasePoolPush(void)
1859 {
1860 return AutoreleasePoolPage::push();
1861 }
1862
1863 NEVER_INLINE
1864 void
1865 objc_autoreleasePoolPop(void *ctxt)
1866 {
1867 AutoreleasePoolPage::pop(ctxt);
1868 }
1869
1870
1871 void *
1872 _objc_autoreleasePoolPush(void)
1873 {
1874 return objc_autoreleasePoolPush();
1875 }
1876
1877 void
1878 _objc_autoreleasePoolPop(void *ctxt)
1879 {
1880 objc_autoreleasePoolPop(ctxt);
1881 }
1882
1883 void
1884 _objc_autoreleasePoolPrint(void)
1885 {
1886 AutoreleasePoolPage::printAll();
1887 }
1888
1889
1890 // Same as objc_release but suitable for tail-calling
1891 // if you need the value back and don't want to push a frame before this point.
1892 __attribute__((noinline))
1893 static id
1894 objc_releaseAndReturn(id obj)
1895 {
1896 objc_release(obj);
1897 return obj;
1898 }
1899
1900 // Same as objc_retainAutorelease but suitable for tail-calling
1901 // if you don't want to push a frame before this point.
1902 __attribute__((noinline))
1903 static id
1904 objc_retainAutoreleaseAndReturn(id obj)
1905 {
1906 return objc_retainAutorelease(obj);
1907 }
1908
1909
1910 // Prepare a value at +1 for return through a +0 autoreleasing convention.
1911 id
1912 objc_autoreleaseReturnValue(id obj)
1913 {
1914 if (prepareOptimizedReturn(ReturnAtPlus1)) return obj;
1915
1916 return objc_autorelease(obj);
1917 }
1918
1919 // Prepare a value at +0 for return through a +0 autoreleasing convention.
1920 id
1921 objc_retainAutoreleaseReturnValue(id obj)
1922 {
1923 if (prepareOptimizedReturn(ReturnAtPlus0)) return obj;
1924
1925 // not objc_autoreleaseReturnValue(objc_retain(obj))
1926 // because we don't need another optimization attempt
1927 return objc_retainAutoreleaseAndReturn(obj);
1928 }
1929
1930 // Accept a value returned through a +0 autoreleasing convention for use at +1.
1931 id
1932 objc_retainAutoreleasedReturnValue(id obj)
1933 {
1934 if (acceptOptimizedReturn() == ReturnAtPlus1) return obj;
1935
1936 return objc_retain(obj);
1937 }
1938
1939 // Accept a value returned through a +0 autoreleasing convention for use at +0.
1940 id
1941 objc_unsafeClaimAutoreleasedReturnValue(id obj)
1942 {
1943 if (acceptOptimizedReturn() == ReturnAtPlus0) return obj;
1944
1945 return objc_releaseAndReturn(obj);
1946 }
1947
1948 id
1949 objc_retainAutorelease(id obj)
1950 {
1951 return objc_autorelease(objc_retain(obj));
1952 }
1953
1954 void
1955 _objc_deallocOnMainThreadHelper(void *context)
1956 {
1957 id obj = (id)context;
1958 [obj dealloc];
1959 }
1960
1961 // convert objc_objectptr_t to id, callee must take ownership.
1962 id objc_retainedObject(objc_objectptr_t pointer) { return (id)pointer; }
1963
1964 // convert objc_objectptr_t to id, without ownership transfer.
1965 id objc_unretainedObject(objc_objectptr_t pointer) { return (id)pointer; }
1966
1967 // convert id to objc_objectptr_t, no ownership transfer.
1968 objc_objectptr_t objc_unretainedPointer(id object) { return object; }
1969
1970
1971 void arr_init(void)
1972 {
1973 AutoreleasePoolPage::init();
1974 SideTablesMap.init();
1975 _objc_associations_init();
1976 }
1977
1978
1979 #if SUPPORT_TAGGED_POINTERS
1980
1981 // Placeholder for old debuggers. When they inspect an
1982 // extended tagged pointer object they will see this isa.
1983
1984 @interface __NSUnrecognizedTaggedPointer : NSObject
1985 @end
1986
1987 __attribute__((objc_nonlazy_class))
1988 @implementation __NSUnrecognizedTaggedPointer
1989 -(id) retain { return self; }
1990 -(oneway void) release { }
1991 -(id) autorelease { return self; }
1992 @end
1993
1994 #endif
1995
1996 __attribute__((objc_nonlazy_class))
1997 @implementation NSObject
1998
1999 + (void)initialize {
2000 }
2001
2002 + (id)self {
2003 return (id)self;
2004 }
2005
2006 - (id)self {
2007 return self;
2008 }
2009
2010 + (Class)class {
2011 return self;
2012 }
2013
2014 - (Class)class {
2015 return object_getClass(self);
2016 }
2017
2018 + (Class)superclass {
2019 return self->superclass;
2020 }
2021
2022 - (Class)superclass {
2023 return [self class]->superclass;
2024 }
2025
2026 + (BOOL)isMemberOfClass:(Class)cls {
2027 return self->ISA() == cls;
2028 }
2029
2030 - (BOOL)isMemberOfClass:(Class)cls {
2031 return [self class] == cls;
2032 }
2033
2034 + (BOOL)isKindOfClass:(Class)cls {
2035 for (Class tcls = self->ISA(); tcls; tcls = tcls->superclass) {
2036 if (tcls == cls) return YES;
2037 }
2038 return NO;
2039 }
2040
2041 - (BOOL)isKindOfClass:(Class)cls {
2042 for (Class tcls = [self class]; tcls; tcls = tcls->superclass) {
2043 if (tcls == cls) return YES;
2044 }
2045 return NO;
2046 }
2047
2048 + (BOOL)isSubclassOfClass:(Class)cls {
2049 for (Class tcls = self; tcls; tcls = tcls->superclass) {
2050 if (tcls == cls) return YES;
2051 }
2052 return NO;
2053 }
2054
2055 + (BOOL)isAncestorOfObject:(NSObject *)obj {
2056 for (Class tcls = [obj class]; tcls; tcls = tcls->superclass) {
2057 if (tcls == self) return YES;
2058 }
2059 return NO;
2060 }
2061
2062 + (BOOL)instancesRespondToSelector:(SEL)sel {
2063 return class_respondsToSelector_inst(nil, sel, self);
2064 }
2065
2066 + (BOOL)respondsToSelector:(SEL)sel {
2067 return class_respondsToSelector_inst(self, sel, self->ISA());
2068 }
2069
2070 - (BOOL)respondsToSelector:(SEL)sel {
2071 return class_respondsToSelector_inst(self, sel, [self class]);
2072 }
2073
2074 + (BOOL)conformsToProtocol:(Protocol *)protocol {
2075 if (!protocol) return NO;
2076 for (Class tcls = self; tcls; tcls = tcls->superclass) {
2077 if (class_conformsToProtocol(tcls, protocol)) return YES;
2078 }
2079 return NO;
2080 }
2081
2082 - (BOOL)conformsToProtocol:(Protocol *)protocol {
2083 if (!protocol) return NO;
2084 for (Class tcls = [self class]; tcls; tcls = tcls->superclass) {
2085 if (class_conformsToProtocol(tcls, protocol)) return YES;
2086 }
2087 return NO;
2088 }
2089
2090 + (NSUInteger)hash {
2091 return _objc_rootHash(self);
2092 }
2093
2094 - (NSUInteger)hash {
2095 return _objc_rootHash(self);
2096 }
2097
2098 + (BOOL)isEqual:(id)obj {
2099 return obj == (id)self;
2100 }
2101
2102 - (BOOL)isEqual:(id)obj {
2103 return obj == self;
2104 }
2105
2106
2107 + (BOOL)isFault {
2108 return NO;
2109 }
2110
2111 - (BOOL)isFault {
2112 return NO;
2113 }
2114
2115 + (BOOL)isProxy {
2116 return NO;
2117 }
2118
2119 - (BOOL)isProxy {
2120 return NO;
2121 }
2122
2123
2124 + (IMP)instanceMethodForSelector:(SEL)sel {
2125 if (!sel) [self doesNotRecognizeSelector:sel];
2126 return class_getMethodImplementation(self, sel);
2127 }
2128
2129 + (IMP)methodForSelector:(SEL)sel {
2130 if (!sel) [self doesNotRecognizeSelector:sel];
2131 return object_getMethodImplementation((id)self, sel);
2132 }
2133
2134 - (IMP)methodForSelector:(SEL)sel {
2135 if (!sel) [self doesNotRecognizeSelector:sel];
2136 return object_getMethodImplementation(self, sel);
2137 }
2138
2139 + (BOOL)resolveClassMethod:(SEL)sel {
2140 return NO;
2141 }
2142
2143 + (BOOL)resolveInstanceMethod:(SEL)sel {
2144 return NO;
2145 }
2146
2147 // Replaced by CF (throws an NSException)
2148 + (void)doesNotRecognizeSelector:(SEL)sel {
2149 _objc_fatal("+[%s %s]: unrecognized selector sent to instance %p",
2150 class_getName(self), sel_getName(sel), self);
2151 }
2152
2153 // Replaced by CF (throws an NSException)
2154 - (void)doesNotRecognizeSelector:(SEL)sel {
2155 _objc_fatal("-[%s %s]: unrecognized selector sent to instance %p",
2156 object_getClassName(self), sel_getName(sel), self);
2157 }
2158
2159
2160 + (id)performSelector:(SEL)sel {
2161 if (!sel) [self doesNotRecognizeSelector:sel];
2162 return ((id(*)(id, SEL))objc_msgSend)((id)self, sel);
2163 }
2164
2165 + (id)performSelector:(SEL)sel withObject:(id)obj {
2166 if (!sel) [self doesNotRecognizeSelector:sel];
2167 return ((id(*)(id, SEL, id))objc_msgSend)((id)self, sel, obj);
2168 }
2169
2170 + (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 {
2171 if (!sel) [self doesNotRecognizeSelector:sel];
2172 return ((id(*)(id, SEL, id, id))objc_msgSend)((id)self, sel, obj1, obj2);
2173 }
2174
2175 - (id)performSelector:(SEL)sel {
2176 if (!sel) [self doesNotRecognizeSelector:sel];
2177 return ((id(*)(id, SEL))objc_msgSend)(self, sel);
2178 }
2179
2180 - (id)performSelector:(SEL)sel withObject:(id)obj {
2181 if (!sel) [self doesNotRecognizeSelector:sel];
2182 return ((id(*)(id, SEL, id))objc_msgSend)(self, sel, obj);
2183 }
2184
2185 - (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 {
2186 if (!sel) [self doesNotRecognizeSelector:sel];
2187 return ((id(*)(id, SEL, id, id))objc_msgSend)(self, sel, obj1, obj2);
2188 }
2189
2190
2191 // Replaced by CF (returns an NSMethodSignature)
2192 + (NSMethodSignature *)instanceMethodSignatureForSelector:(SEL)sel {
2193 _objc_fatal("+[NSObject instanceMethodSignatureForSelector:] "
2194 "not available without CoreFoundation");
2195 }
2196
2197 // Replaced by CF (returns an NSMethodSignature)
2198 + (NSMethodSignature *)methodSignatureForSelector:(SEL)sel {
2199 _objc_fatal("+[NSObject methodSignatureForSelector:] "
2200 "not available without CoreFoundation");
2201 }
2202
2203 // Replaced by CF (returns an NSMethodSignature)
2204 - (NSMethodSignature *)methodSignatureForSelector:(SEL)sel {
2205 _objc_fatal("-[NSObject methodSignatureForSelector:] "
2206 "not available without CoreFoundation");
2207 }
2208
2209 + (void)forwardInvocation:(NSInvocation *)invocation {
2210 [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)];
2211 }
2212
2213 - (void)forwardInvocation:(NSInvocation *)invocation {
2214 [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)];
2215 }
2216
2217 + (id)forwardingTargetForSelector:(SEL)sel {
2218 return nil;
2219 }
2220
2221 - (id)forwardingTargetForSelector:(SEL)sel {
2222 return nil;
2223 }
2224
2225
2226 // Replaced by CF (returns an NSString)
2227 + (NSString *)description {
2228 return nil;
2229 }
2230
2231 // Replaced by CF (returns an NSString)
2232 - (NSString *)description {
2233 return nil;
2234 }
2235
2236 + (NSString *)debugDescription {
2237 return [self description];
2238 }
2239
2240 - (NSString *)debugDescription {
2241 return [self description];
2242 }
2243
2244
2245 + (id)new {
2246 return [callAlloc(self, false/*checkNil*/) init];
2247 }
2248
2249 + (id)retain {
2250 return (id)self;
2251 }
2252
2253 // Replaced by ObjectAlloc
2254 - (id)retain {
2255 return _objc_rootRetain(self);
2256 }
2257
2258
2259 + (BOOL)_tryRetain {
2260 return YES;
2261 }
2262
2263 // Replaced by ObjectAlloc
2264 - (BOOL)_tryRetain {
2265 return _objc_rootTryRetain(self);
2266 }
2267
2268 + (BOOL)_isDeallocating {
2269 return NO;
2270 }
2271
2272 - (BOOL)_isDeallocating {
2273 return _objc_rootIsDeallocating(self);
2274 }
2275
2276 + (BOOL)allowsWeakReference {
2277 return YES;
2278 }
2279
2280 + (BOOL)retainWeakReference {
2281 return YES;
2282 }
2283
2284 - (BOOL)allowsWeakReference {
2285 return ! [self _isDeallocating];
2286 }
2287
2288 - (BOOL)retainWeakReference {
2289 return [self _tryRetain];
2290 }
2291
2292 + (oneway void)release {
2293 }
2294
2295 // Replaced by ObjectAlloc
2296 - (oneway void)release {
2297 _objc_rootRelease(self);
2298 }
2299
2300 + (id)autorelease {
2301 return (id)self;
2302 }
2303
2304 // Replaced by ObjectAlloc
2305 - (id)autorelease {
2306 return _objc_rootAutorelease(self);
2307 }
2308
2309 + (NSUInteger)retainCount {
2310 return ULONG_MAX;
2311 }
2312
2313 - (NSUInteger)retainCount {
2314 return _objc_rootRetainCount(self);
2315 }
2316
2317 + (id)alloc {
2318 return _objc_rootAlloc(self);
2319 }
2320
2321 // Replaced by ObjectAlloc
2322 + (id)allocWithZone:(struct _NSZone *)zone {
2323 return _objc_rootAllocWithZone(self, (malloc_zone_t *)zone);
2324 }
2325
2326 // Replaced by CF (throws an NSException)
2327 + (id)init {
2328 return (id)self;
2329 }
2330
2331 - (id)init {
2332 return _objc_rootInit(self);
2333 }
2334
2335 // Replaced by CF (throws an NSException)
2336 + (void)dealloc {
2337 }
2338
2339
2340 // Replaced by NSZombies
2341 - (void)dealloc {
2342 _objc_rootDealloc(self);
2343 }
2344
2345 // Previously used by GC. Now a placeholder for binary compatibility.
2346 - (void) finalize {
2347 }
2348
2349 + (struct _NSZone *)zone {
2350 return (struct _NSZone *)_objc_rootZone(self);
2351 }
2352
2353 - (struct _NSZone *)zone {
2354 return (struct _NSZone *)_objc_rootZone(self);
2355 }
2356
2357 + (id)copy {
2358 return (id)self;
2359 }
2360
2361 + (id)copyWithZone:(struct _NSZone *)zone {
2362 return (id)self;
2363 }
2364
2365 - (id)copy {
2366 return [(id)self copyWithZone:nil];
2367 }
2368
2369 + (id)mutableCopy {
2370 return (id)self;
2371 }
2372
2373 + (id)mutableCopyWithZone:(struct _NSZone *)zone {
2374 return (id)self;
2375 }
2376
2377 - (id)mutableCopy {
2378 return [(id)self mutableCopyWithZone:nil];
2379 }
2380
2381 @end
2382
2383