]> git.saurik.com Git - apple/objc4.git/blob - runtime/NSObject.mm
objc4-781.tar.gz
[apple/objc4.git] / runtime / NSObject.mm
1 /*
2 * Copyright (c) 2010-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include "objc-private.h"
25 #include "NSObject.h"
26
27 #include "objc-weak.h"
28 #include "DenseMapExtras.h"
29
30 #include <malloc/malloc.h>
31 #include <stdint.h>
32 #include <stdbool.h>
33 #include <mach/mach.h>
34 #include <mach-o/dyld.h>
35 #include <mach-o/nlist.h>
36 #include <sys/types.h>
37 #include <sys/mman.h>
38 #include <Block.h>
39 #include <map>
40 #include <execinfo.h>
41 #include "NSObject-internal.h"
42
43 @interface NSInvocation
44 - (SEL)selector;
45 @end
46
47 OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_magic_offset = __builtin_offsetof(AutoreleasePoolPageData, magic);
48 OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_next_offset = __builtin_offsetof(AutoreleasePoolPageData, next);
49 OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_thread_offset = __builtin_offsetof(AutoreleasePoolPageData, thread);
50 OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_parent_offset = __builtin_offsetof(AutoreleasePoolPageData, parent);
51 OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_child_offset = __builtin_offsetof(AutoreleasePoolPageData, child);
52 OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_depth_offset = __builtin_offsetof(AutoreleasePoolPageData, depth);
53 OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_hiwat_offset = __builtin_offsetof(AutoreleasePoolPageData, hiwat);
54 #if __OBJC2__
55 OBJC_EXTERN const uint32_t objc_class_abi_version = OBJC_CLASS_ABI_VERSION_MAX;
56 #endif
57
58 /***********************************************************************
59 * Weak ivar support
60 **********************************************************************/
61
62 static id defaultBadAllocHandler(Class cls)
63 {
64 _objc_fatal("attempt to allocate object of class '%s' failed",
65 cls->nameForLogging());
66 }
67
68 id(*badAllocHandler)(Class) = &defaultBadAllocHandler;
69
70 id _objc_callBadAllocHandler(Class cls)
71 {
72 // fixme add re-entrancy protection in case allocation fails inside handler
73 return (*badAllocHandler)(cls);
74 }
75
76 void _objc_setBadAllocHandler(id(*newHandler)(Class))
77 {
78 badAllocHandler = newHandler;
79 }
80
81
82 namespace {
83
84 // The order of these bits is important.
85 #define SIDE_TABLE_WEAKLY_REFERENCED (1UL<<0)
86 #define SIDE_TABLE_DEALLOCATING (1UL<<1) // MSB-ward of weak bit
87 #define SIDE_TABLE_RC_ONE (1UL<<2) // MSB-ward of deallocating bit
88 #define SIDE_TABLE_RC_PINNED (1UL<<(WORD_BITS-1))
89
90 #define SIDE_TABLE_RC_SHIFT 2
91 #define SIDE_TABLE_FLAG_MASK (SIDE_TABLE_RC_ONE-1)
92
93 struct RefcountMapValuePurgeable {
94 static inline bool isPurgeable(size_t x) {
95 return x == 0;
96 }
97 };
98
99 // RefcountMap disguises its pointers because we
100 // don't want the table to act as a root for `leaks`.
101 typedef objc::DenseMap<DisguisedPtr<objc_object>,size_t,RefcountMapValuePurgeable> RefcountMap;
102
103 // Template parameters.
104 enum HaveOld { DontHaveOld = false, DoHaveOld = true };
105 enum HaveNew { DontHaveNew = false, DoHaveNew = true };
106
107 struct SideTable {
108 spinlock_t slock;
109 RefcountMap refcnts;
110 weak_table_t weak_table;
111
112 SideTable() {
113 memset(&weak_table, 0, sizeof(weak_table));
114 }
115
116 ~SideTable() {
117 _objc_fatal("Do not delete SideTable.");
118 }
119
120 void lock() { slock.lock(); }
121 void unlock() { slock.unlock(); }
122 void forceReset() { slock.forceReset(); }
123
124 // Address-ordered lock discipline for a pair of side tables.
125
126 template<HaveOld, HaveNew>
127 static void lockTwo(SideTable *lock1, SideTable *lock2);
128 template<HaveOld, HaveNew>
129 static void unlockTwo(SideTable *lock1, SideTable *lock2);
130 };
131
132
133 template<>
134 void SideTable::lockTwo<DoHaveOld, DoHaveNew>
135 (SideTable *lock1, SideTable *lock2)
136 {
137 spinlock_t::lockTwo(&lock1->slock, &lock2->slock);
138 }
139
140 template<>
141 void SideTable::lockTwo<DoHaveOld, DontHaveNew>
142 (SideTable *lock1, SideTable *)
143 {
144 lock1->lock();
145 }
146
147 template<>
148 void SideTable::lockTwo<DontHaveOld, DoHaveNew>
149 (SideTable *, SideTable *lock2)
150 {
151 lock2->lock();
152 }
153
154 template<>
155 void SideTable::unlockTwo<DoHaveOld, DoHaveNew>
156 (SideTable *lock1, SideTable *lock2)
157 {
158 spinlock_t::unlockTwo(&lock1->slock, &lock2->slock);
159 }
160
161 template<>
162 void SideTable::unlockTwo<DoHaveOld, DontHaveNew>
163 (SideTable *lock1, SideTable *)
164 {
165 lock1->unlock();
166 }
167
168 template<>
169 void SideTable::unlockTwo<DontHaveOld, DoHaveNew>
170 (SideTable *, SideTable *lock2)
171 {
172 lock2->unlock();
173 }
174
175 static objc::ExplicitInit<StripedMap<SideTable>> SideTablesMap;
176
177 static StripedMap<SideTable>& SideTables() {
178 return SideTablesMap.get();
179 }
180
181 // anonymous namespace
182 };
183
184 void SideTableLockAll() {
185 SideTables().lockAll();
186 }
187
188 void SideTableUnlockAll() {
189 SideTables().unlockAll();
190 }
191
192 void SideTableForceResetAll() {
193 SideTables().forceResetAll();
194 }
195
196 void SideTableDefineLockOrder() {
197 SideTables().defineLockOrder();
198 }
199
200 void SideTableLocksPrecedeLock(const void *newlock) {
201 SideTables().precedeLock(newlock);
202 }
203
204 void SideTableLocksSucceedLock(const void *oldlock) {
205 SideTables().succeedLock(oldlock);
206 }
207
208 void SideTableLocksPrecedeLocks(StripedMap<spinlock_t>& newlocks) {
209 int i = 0;
210 const void *newlock;
211 while ((newlock = newlocks.getLock(i++))) {
212 SideTables().precedeLock(newlock);
213 }
214 }
215
216 void SideTableLocksSucceedLocks(StripedMap<spinlock_t>& oldlocks) {
217 int i = 0;
218 const void *oldlock;
219 while ((oldlock = oldlocks.getLock(i++))) {
220 SideTables().succeedLock(oldlock);
221 }
222 }
223
224 //
225 // The -fobjc-arc flag causes the compiler to issue calls to objc_{retain/release/autorelease/retain_block}
226 //
227
228 id objc_retainBlock(id x) {
229 return (id)_Block_copy(x);
230 }
231
232 //
233 // The following SHOULD be called by the compiler directly, but the request hasn't been made yet :-)
234 //
235
236 BOOL objc_should_deallocate(id object) {
237 return YES;
238 }
239
240 id
241 objc_retain_autorelease(id obj)
242 {
243 return objc_autorelease(objc_retain(obj));
244 }
245
246
247 void
248 objc_storeStrong(id *location, id obj)
249 {
250 id prev = *location;
251 if (obj == prev) {
252 return;
253 }
254 objc_retain(obj);
255 *location = obj;
256 objc_release(prev);
257 }
258
259
260 // Update a weak variable.
261 // If HaveOld is true, the variable has an existing value
262 // that needs to be cleaned up. This value might be nil.
263 // If HaveNew is true, there is a new value that needs to be
264 // assigned into the variable. This value might be nil.
265 // If CrashIfDeallocating is true, the process is halted if newObj is
266 // deallocating or newObj's class does not support weak references.
267 // If CrashIfDeallocating is false, nil is stored instead.
268 enum CrashIfDeallocating {
269 DontCrashIfDeallocating = false, DoCrashIfDeallocating = true
270 };
271 template <HaveOld haveOld, HaveNew haveNew,
272 CrashIfDeallocating crashIfDeallocating>
273 static id
274 storeWeak(id *location, objc_object *newObj)
275 {
276 ASSERT(haveOld || haveNew);
277 if (!haveNew) ASSERT(newObj == nil);
278
279 Class previouslyInitializedClass = nil;
280 id oldObj;
281 SideTable *oldTable;
282 SideTable *newTable;
283
284 // Acquire locks for old and new values.
285 // Order by lock address to prevent lock ordering problems.
286 // Retry if the old value changes underneath us.
287 retry:
288 if (haveOld) {
289 oldObj = *location;
290 oldTable = &SideTables()[oldObj];
291 } else {
292 oldTable = nil;
293 }
294 if (haveNew) {
295 newTable = &SideTables()[newObj];
296 } else {
297 newTable = nil;
298 }
299
300 SideTable::lockTwo<haveOld, haveNew>(oldTable, newTable);
301
302 if (haveOld && *location != oldObj) {
303 SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
304 goto retry;
305 }
306
307 // Prevent a deadlock between the weak reference machinery
308 // and the +initialize machinery by ensuring that no
309 // weakly-referenced object has an un-+initialized isa.
310 if (haveNew && newObj) {
311 Class cls = newObj->getIsa();
312 if (cls != previouslyInitializedClass &&
313 !((objc_class *)cls)->isInitialized())
314 {
315 SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
316 class_initialize(cls, (id)newObj);
317
318 // If this class is finished with +initialize then we're good.
319 // If this class is still running +initialize on this thread
320 // (i.e. +initialize called storeWeak on an instance of itself)
321 // then we may proceed but it will appear initializing and
322 // not yet initialized to the check above.
323 // Instead set previouslyInitializedClass to recognize it on retry.
324 previouslyInitializedClass = cls;
325
326 goto retry;
327 }
328 }
329
330 // Clean up old value, if any.
331 if (haveOld) {
332 weak_unregister_no_lock(&oldTable->weak_table, oldObj, location);
333 }
334
335 // Assign new value, if any.
336 if (haveNew) {
337 newObj = (objc_object *)
338 weak_register_no_lock(&newTable->weak_table, (id)newObj, location,
339 crashIfDeallocating);
340 // weak_register_no_lock returns nil if weak store should be rejected
341
342 // Set is-weakly-referenced bit in refcount table.
343 if (newObj && !newObj->isTaggedPointer()) {
344 newObj->setWeaklyReferenced_nolock();
345 }
346
347 // Do not set *location anywhere else. That would introduce a race.
348 *location = (id)newObj;
349 }
350 else {
351 // No new value. The storage is not changed.
352 }
353
354 SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
355
356 return (id)newObj;
357 }
358
359
360 /**
361 * This function stores a new value into a __weak variable. It would
362 * be used anywhere a __weak variable is the target of an assignment.
363 *
364 * @param location The address of the weak pointer itself
365 * @param newObj The new object this weak ptr should now point to
366 *
367 * @return \e newObj
368 */
369 id
370 objc_storeWeak(id *location, id newObj)
371 {
372 return storeWeak<DoHaveOld, DoHaveNew, DoCrashIfDeallocating>
373 (location, (objc_object *)newObj);
374 }
375
376
377 /**
378 * This function stores a new value into a __weak variable.
379 * If the new object is deallocating or the new object's class
380 * does not support weak references, stores nil instead.
381 *
382 * @param location The address of the weak pointer itself
383 * @param newObj The new object this weak ptr should now point to
384 *
385 * @return The value stored (either the new object or nil)
386 */
387 id
388 objc_storeWeakOrNil(id *location, id newObj)
389 {
390 return storeWeak<DoHaveOld, DoHaveNew, DontCrashIfDeallocating>
391 (location, (objc_object *)newObj);
392 }
393
394
395 /**
396 * Initialize a fresh weak pointer to some object location.
397 * It would be used for code like:
398 *
399 * (The nil case)
400 * __weak id weakPtr;
401 * (The non-nil case)
402 * NSObject *o = ...;
403 * __weak id weakPtr = o;
404 *
405 * This function IS NOT thread-safe with respect to concurrent
406 * modifications to the weak variable. (Concurrent weak clear is safe.)
407 *
408 * @param location Address of __weak ptr.
409 * @param newObj Object ptr.
410 */
411 id
412 objc_initWeak(id *location, id newObj)
413 {
414 if (!newObj) {
415 *location = nil;
416 return nil;
417 }
418
419 return storeWeak<DontHaveOld, DoHaveNew, DoCrashIfDeallocating>
420 (location, (objc_object*)newObj);
421 }
422
423 id
424 objc_initWeakOrNil(id *location, id newObj)
425 {
426 if (!newObj) {
427 *location = nil;
428 return nil;
429 }
430
431 return storeWeak<DontHaveOld, DoHaveNew, DontCrashIfDeallocating>
432 (location, (objc_object*)newObj);
433 }
434
435
436 /**
437 * Destroys the relationship between a weak pointer
438 * and the object it is referencing in the internal weak
439 * table. If the weak pointer is not referencing anything,
440 * there is no need to edit the weak table.
441 *
442 * This function IS NOT thread-safe with respect to concurrent
443 * modifications to the weak variable. (Concurrent weak clear is safe.)
444 *
445 * @param location The weak pointer address.
446 */
447 void
448 objc_destroyWeak(id *location)
449 {
450 (void)storeWeak<DoHaveOld, DontHaveNew, DontCrashIfDeallocating>
451 (location, nil);
452 }
453
454
455 /*
456 Once upon a time we eagerly cleared *location if we saw the object
457 was deallocating. This confuses code like NSPointerFunctions which
458 tries to pre-flight the raw storage and assumes if the storage is
459 zero then the weak system is done interfering. That is false: the
460 weak system is still going to check and clear the storage later.
461 This can cause objc_weak_error complaints and crashes.
462 So we now don't touch the storage until deallocation completes.
463 */
464
465 id
466 objc_loadWeakRetained(id *location)
467 {
468 id obj;
469 id result;
470 Class cls;
471
472 SideTable *table;
473
474 retry:
475 // fixme std::atomic this load
476 obj = *location;
477 if (!obj) return nil;
478 if (obj->isTaggedPointer()) return obj;
479
480 table = &SideTables()[obj];
481
482 table->lock();
483 if (*location != obj) {
484 table->unlock();
485 goto retry;
486 }
487
488 result = obj;
489
490 cls = obj->ISA();
491 if (! cls->hasCustomRR()) {
492 // Fast case. We know +initialize is complete because
493 // default-RR can never be set before then.
494 ASSERT(cls->isInitialized());
495 if (! obj->rootTryRetain()) {
496 result = nil;
497 }
498 }
499 else {
500 // Slow case. We must check for +initialize and call it outside
501 // the lock if necessary in order to avoid deadlocks.
502 if (cls->isInitialized() || _thisThreadIsInitializingClass(cls)) {
503 BOOL (*tryRetain)(id, SEL) = (BOOL(*)(id, SEL))
504 class_getMethodImplementation(cls, @selector(retainWeakReference));
505 if ((IMP)tryRetain == _objc_msgForward) {
506 result = nil;
507 }
508 else if (! (*tryRetain)(obj, @selector(retainWeakReference))) {
509 result = nil;
510 }
511 }
512 else {
513 table->unlock();
514 class_initialize(cls, obj);
515 goto retry;
516 }
517 }
518
519 table->unlock();
520 return result;
521 }
522
523 /**
524 * This loads the object referenced by a weak pointer and returns it, after
525 * retaining and autoreleasing the object to ensure that it stays alive
526 * long enough for the caller to use it. This function would be used
527 * anywhere a __weak variable is used in an expression.
528 *
529 * @param location The weak pointer address
530 *
531 * @return The object pointed to by \e location, or \c nil if \e location is \c nil.
532 */
533 id
534 objc_loadWeak(id *location)
535 {
536 if (!*location) return nil;
537 return objc_autorelease(objc_loadWeakRetained(location));
538 }
539
540
541 /**
542 * This function copies a weak pointer from one location to another,
543 * when the destination doesn't already contain a weak pointer. It
544 * would be used for code like:
545 *
546 * __weak id src = ...;
547 * __weak id dst = src;
548 *
549 * This function IS NOT thread-safe with respect to concurrent
550 * modifications to the destination variable. (Concurrent weak clear is safe.)
551 *
552 * @param dst The destination variable.
553 * @param src The source variable.
554 */
555 void
556 objc_copyWeak(id *dst, id *src)
557 {
558 id obj = objc_loadWeakRetained(src);
559 objc_initWeak(dst, obj);
560 objc_release(obj);
561 }
562
563 /**
564 * Move a weak pointer from one location to another.
565 * Before the move, the destination must be uninitialized.
566 * After the move, the source is nil.
567 *
568 * This function IS NOT thread-safe with respect to concurrent
569 * modifications to either weak variable. (Concurrent weak clear is safe.)
570 *
571 */
572 void
573 objc_moveWeak(id *dst, id *src)
574 {
575 objc_copyWeak(dst, src);
576 objc_destroyWeak(src);
577 *src = nil;
578 }
579
580
581 /***********************************************************************
582 Autorelease pool implementation
583
584 A thread's autorelease pool is a stack of pointers.
585 Each pointer is either an object to release, or POOL_BOUNDARY which is
586 an autorelease pool boundary.
587 A pool token is a pointer to the POOL_BOUNDARY for that pool. When
588 the pool is popped, every object hotter than the sentinel is released.
589 The stack is divided into a doubly-linked list of pages. Pages are added
590 and deleted as necessary.
591 Thread-local storage points to the hot page, where newly autoreleased
592 objects are stored.
593 **********************************************************************/
594
595 BREAKPOINT_FUNCTION(void objc_autoreleaseNoPool(id obj));
596 BREAKPOINT_FUNCTION(void objc_autoreleasePoolInvalid(const void *token));
597
598 class AutoreleasePoolPage : private AutoreleasePoolPageData
599 {
600 friend struct thread_data_t;
601
602 public:
603 static size_t const SIZE =
604 #if PROTECT_AUTORELEASEPOOL
605 PAGE_MAX_SIZE; // must be multiple of vm page size
606 #else
607 PAGE_MIN_SIZE; // size and alignment, power of 2
608 #endif
609
610 private:
611 static pthread_key_t const key = AUTORELEASE_POOL_KEY;
612 static uint8_t const SCRIBBLE = 0xA3; // 0xA3A3A3A3 after releasing
613 static size_t const COUNT = SIZE / sizeof(id);
614
615 // EMPTY_POOL_PLACEHOLDER is stored in TLS when exactly one pool is
616 // pushed and it has never contained any objects. This saves memory
617 // when the top level (i.e. libdispatch) pushes and pops pools but
618 // never uses them.
619 # define EMPTY_POOL_PLACEHOLDER ((id*)1)
620
621 # define POOL_BOUNDARY nil
622
623 // SIZE-sizeof(*this) bytes of contents follow
624
625 static void * operator new(size_t size) {
626 return malloc_zone_memalign(malloc_default_zone(), SIZE, SIZE);
627 }
628 static void operator delete(void * p) {
629 return free(p);
630 }
631
632 inline void protect() {
633 #if PROTECT_AUTORELEASEPOOL
634 mprotect(this, SIZE, PROT_READ);
635 check();
636 #endif
637 }
638
639 inline void unprotect() {
640 #if PROTECT_AUTORELEASEPOOL
641 check();
642 mprotect(this, SIZE, PROT_READ | PROT_WRITE);
643 #endif
644 }
645
646 AutoreleasePoolPage(AutoreleasePoolPage *newParent) :
647 AutoreleasePoolPageData(begin(),
648 objc_thread_self(),
649 newParent,
650 newParent ? 1+newParent->depth : 0,
651 newParent ? newParent->hiwat : 0)
652 {
653 if (parent) {
654 parent->check();
655 ASSERT(!parent->child);
656 parent->unprotect();
657 parent->child = this;
658 parent->protect();
659 }
660 protect();
661 }
662
663 ~AutoreleasePoolPage()
664 {
665 check();
666 unprotect();
667 ASSERT(empty());
668
669 // Not recursive: we don't want to blow out the stack
670 // if a thread accumulates a stupendous amount of garbage
671 ASSERT(!child);
672 }
673
674 template<typename Fn>
675 void
676 busted(Fn log) const
677 {
678 magic_t right;
679 log("autorelease pool page %p corrupted\n"
680 " magic 0x%08x 0x%08x 0x%08x 0x%08x\n"
681 " should be 0x%08x 0x%08x 0x%08x 0x%08x\n"
682 " pthread %p\n"
683 " should be %p\n",
684 this,
685 magic.m[0], magic.m[1], magic.m[2], magic.m[3],
686 right.m[0], right.m[1], right.m[2], right.m[3],
687 this->thread, objc_thread_self());
688 }
689
690 __attribute__((noinline, cold, noreturn))
691 void
692 busted_die() const
693 {
694 busted(_objc_fatal);
695 __builtin_unreachable();
696 }
697
698 inline void
699 check(bool die = true) const
700 {
701 if (!magic.check() || thread != objc_thread_self()) {
702 if (die) {
703 busted_die();
704 } else {
705 busted(_objc_inform);
706 }
707 }
708 }
709
710 inline void
711 fastcheck() const
712 {
713 #if CHECK_AUTORELEASEPOOL
714 check();
715 #else
716 if (! magic.fastcheck()) {
717 busted_die();
718 }
719 #endif
720 }
721
722
723 id * begin() {
724 return (id *) ((uint8_t *)this+sizeof(*this));
725 }
726
727 id * end() {
728 return (id *) ((uint8_t *)this+SIZE);
729 }
730
731 bool empty() {
732 return next == begin();
733 }
734
735 bool full() {
736 return next == end();
737 }
738
739 bool lessThanHalfFull() {
740 return (next - begin() < (end() - begin()) / 2);
741 }
742
743 id *add(id obj)
744 {
745 ASSERT(!full());
746 unprotect();
747 id *ret = next; // faster than `return next-1` because of aliasing
748 *next++ = obj;
749 protect();
750 return ret;
751 }
752
753 void releaseAll()
754 {
755 releaseUntil(begin());
756 }
757
758 void releaseUntil(id *stop)
759 {
760 // Not recursive: we don't want to blow out the stack
761 // if a thread accumulates a stupendous amount of garbage
762
763 while (this->next != stop) {
764 // Restart from hotPage() every time, in case -release
765 // autoreleased more objects
766 AutoreleasePoolPage *page = hotPage();
767
768 // fixme I think this `while` can be `if`, but I can't prove it
769 while (page->empty()) {
770 page = page->parent;
771 setHotPage(page);
772 }
773
774 page->unprotect();
775 id obj = *--page->next;
776 memset((void*)page->next, SCRIBBLE, sizeof(*page->next));
777 page->protect();
778
779 if (obj != POOL_BOUNDARY) {
780 objc_release(obj);
781 }
782 }
783
784 setHotPage(this);
785
786 #if DEBUG
787 // we expect any children to be completely empty
788 for (AutoreleasePoolPage *page = child; page; page = page->child) {
789 ASSERT(page->empty());
790 }
791 #endif
792 }
793
794 void kill()
795 {
796 // Not recursive: we don't want to blow out the stack
797 // if a thread accumulates a stupendous amount of garbage
798 AutoreleasePoolPage *page = this;
799 while (page->child) page = page->child;
800
801 AutoreleasePoolPage *deathptr;
802 do {
803 deathptr = page;
804 page = page->parent;
805 if (page) {
806 page->unprotect();
807 page->child = nil;
808 page->protect();
809 }
810 delete deathptr;
811 } while (deathptr != this);
812 }
813
814 static void tls_dealloc(void *p)
815 {
816 if (p == (void*)EMPTY_POOL_PLACEHOLDER) {
817 // No objects or pool pages to clean up here.
818 return;
819 }
820
821 // reinstate TLS value while we work
822 setHotPage((AutoreleasePoolPage *)p);
823
824 if (AutoreleasePoolPage *page = coldPage()) {
825 if (!page->empty()) objc_autoreleasePoolPop(page->begin()); // pop all of the pools
826 if (slowpath(DebugMissingPools || DebugPoolAllocation)) {
827 // pop() killed the pages already
828 } else {
829 page->kill(); // free all of the pages
830 }
831 }
832
833 // clear TLS value so TLS destruction doesn't loop
834 setHotPage(nil);
835 }
836
837 static AutoreleasePoolPage *pageForPointer(const void *p)
838 {
839 return pageForPointer((uintptr_t)p);
840 }
841
842 static AutoreleasePoolPage *pageForPointer(uintptr_t p)
843 {
844 AutoreleasePoolPage *result;
845 uintptr_t offset = p % SIZE;
846
847 ASSERT(offset >= sizeof(AutoreleasePoolPage));
848
849 result = (AutoreleasePoolPage *)(p - offset);
850 result->fastcheck();
851
852 return result;
853 }
854
855
856 static inline bool haveEmptyPoolPlaceholder()
857 {
858 id *tls = (id *)tls_get_direct(key);
859 return (tls == EMPTY_POOL_PLACEHOLDER);
860 }
861
862 static inline id* setEmptyPoolPlaceholder()
863 {
864 ASSERT(tls_get_direct(key) == nil);
865 tls_set_direct(key, (void *)EMPTY_POOL_PLACEHOLDER);
866 return EMPTY_POOL_PLACEHOLDER;
867 }
868
869 static inline AutoreleasePoolPage *hotPage()
870 {
871 AutoreleasePoolPage *result = (AutoreleasePoolPage *)
872 tls_get_direct(key);
873 if ((id *)result == EMPTY_POOL_PLACEHOLDER) return nil;
874 if (result) result->fastcheck();
875 return result;
876 }
877
878 static inline void setHotPage(AutoreleasePoolPage *page)
879 {
880 if (page) page->fastcheck();
881 tls_set_direct(key, (void *)page);
882 }
883
884 static inline AutoreleasePoolPage *coldPage()
885 {
886 AutoreleasePoolPage *result = hotPage();
887 if (result) {
888 while (result->parent) {
889 result = result->parent;
890 result->fastcheck();
891 }
892 }
893 return result;
894 }
895
896
897 static inline id *autoreleaseFast(id obj)
898 {
899 AutoreleasePoolPage *page = hotPage();
900 if (page && !page->full()) {
901 return page->add(obj);
902 } else if (page) {
903 return autoreleaseFullPage(obj, page);
904 } else {
905 return autoreleaseNoPage(obj);
906 }
907 }
908
909 static __attribute__((noinline))
910 id *autoreleaseFullPage(id obj, AutoreleasePoolPage *page)
911 {
912 // The hot page is full.
913 // Step to the next non-full page, adding a new page if necessary.
914 // Then add the object to that page.
915 ASSERT(page == hotPage());
916 ASSERT(page->full() || DebugPoolAllocation);
917
918 do {
919 if (page->child) page = page->child;
920 else page = new AutoreleasePoolPage(page);
921 } while (page->full());
922
923 setHotPage(page);
924 return page->add(obj);
925 }
926
927 static __attribute__((noinline))
928 id *autoreleaseNoPage(id obj)
929 {
930 // "No page" could mean no pool has been pushed
931 // or an empty placeholder pool has been pushed and has no contents yet
932 ASSERT(!hotPage());
933
934 bool pushExtraBoundary = false;
935 if (haveEmptyPoolPlaceholder()) {
936 // We are pushing a second pool over the empty placeholder pool
937 // or pushing the first object into the empty placeholder pool.
938 // Before doing that, push a pool boundary on behalf of the pool
939 // that is currently represented by the empty placeholder.
940 pushExtraBoundary = true;
941 }
942 else if (obj != POOL_BOUNDARY && DebugMissingPools) {
943 // We are pushing an object with no pool in place,
944 // and no-pool debugging was requested by environment.
945 _objc_inform("MISSING POOLS: (%p) Object %p of class %s "
946 "autoreleased with no pool in place - "
947 "just leaking - break on "
948 "objc_autoreleaseNoPool() to debug",
949 objc_thread_self(), (void*)obj, object_getClassName(obj));
950 objc_autoreleaseNoPool(obj);
951 return nil;
952 }
953 else if (obj == POOL_BOUNDARY && !DebugPoolAllocation) {
954 // We are pushing a pool with no pool in place,
955 // and alloc-per-pool debugging was not requested.
956 // Install and return the empty pool placeholder.
957 return setEmptyPoolPlaceholder();
958 }
959
960 // We are pushing an object or a non-placeholder'd pool.
961
962 // Install the first page.
963 AutoreleasePoolPage *page = new AutoreleasePoolPage(nil);
964 setHotPage(page);
965
966 // Push a boundary on behalf of the previously-placeholder'd pool.
967 if (pushExtraBoundary) {
968 page->add(POOL_BOUNDARY);
969 }
970
971 // Push the requested object or pool.
972 return page->add(obj);
973 }
974
975
976 static __attribute__((noinline))
977 id *autoreleaseNewPage(id obj)
978 {
979 AutoreleasePoolPage *page = hotPage();
980 if (page) return autoreleaseFullPage(obj, page);
981 else return autoreleaseNoPage(obj);
982 }
983
984 public:
985 static inline id autorelease(id obj)
986 {
987 ASSERT(obj);
988 ASSERT(!obj->isTaggedPointer());
989 id *dest __unused = autoreleaseFast(obj);
990 ASSERT(!dest || dest == EMPTY_POOL_PLACEHOLDER || *dest == obj);
991 return obj;
992 }
993
994
995 static inline void *push()
996 {
997 id *dest;
998 if (slowpath(DebugPoolAllocation)) {
999 // Each autorelease pool starts on a new pool page.
1000 dest = autoreleaseNewPage(POOL_BOUNDARY);
1001 } else {
1002 dest = autoreleaseFast(POOL_BOUNDARY);
1003 }
1004 ASSERT(dest == EMPTY_POOL_PLACEHOLDER || *dest == POOL_BOUNDARY);
1005 return dest;
1006 }
1007
1008 __attribute__((noinline, cold))
1009 static void badPop(void *token)
1010 {
1011 // Error. For bincompat purposes this is not
1012 // fatal in executables built with old SDKs.
1013
1014 if (DebugPoolAllocation || sdkIsAtLeast(10_12, 10_0, 10_0, 3_0, 2_0)) {
1015 // OBJC_DEBUG_POOL_ALLOCATION or new SDK. Bad pop is fatal.
1016 _objc_fatal
1017 ("Invalid or prematurely-freed autorelease pool %p.", token);
1018 }
1019
1020 // Old SDK. Bad pop is warned once.
1021 static bool complained = false;
1022 if (!complained) {
1023 complained = true;
1024 _objc_inform_now_and_on_crash
1025 ("Invalid or prematurely-freed autorelease pool %p. "
1026 "Set a breakpoint on objc_autoreleasePoolInvalid to debug. "
1027 "Proceeding anyway because the app is old "
1028 "(SDK version " SDK_FORMAT "). Memory errors are likely.",
1029 token, FORMAT_SDK(sdkVersion()));
1030 }
1031 objc_autoreleasePoolInvalid(token);
1032 }
1033
1034 template<bool allowDebug>
1035 static void
1036 popPage(void *token, AutoreleasePoolPage *page, id *stop)
1037 {
1038 if (allowDebug && PrintPoolHiwat) printHiwat();
1039
1040 page->releaseUntil(stop);
1041
1042 // memory: delete empty children
1043 if (allowDebug && DebugPoolAllocation && page->empty()) {
1044 // special case: delete everything during page-per-pool debugging
1045 AutoreleasePoolPage *parent = page->parent;
1046 page->kill();
1047 setHotPage(parent);
1048 } else if (allowDebug && DebugMissingPools && page->empty() && !page->parent) {
1049 // special case: delete everything for pop(top)
1050 // when debugging missing autorelease pools
1051 page->kill();
1052 setHotPage(nil);
1053 } else if (page->child) {
1054 // hysteresis: keep one empty child if page is more than half full
1055 if (page->lessThanHalfFull()) {
1056 page->child->kill();
1057 }
1058 else if (page->child->child) {
1059 page->child->child->kill();
1060 }
1061 }
1062 }
1063
1064 __attribute__((noinline, cold))
1065 static void
1066 popPageDebug(void *token, AutoreleasePoolPage *page, id *stop)
1067 {
1068 popPage<true>(token, page, stop);
1069 }
1070
1071 static inline void
1072 pop(void *token)
1073 {
1074 AutoreleasePoolPage *page;
1075 id *stop;
1076 if (token == (void*)EMPTY_POOL_PLACEHOLDER) {
1077 // Popping the top-level placeholder pool.
1078 page = hotPage();
1079 if (!page) {
1080 // Pool was never used. Clear the placeholder.
1081 return setHotPage(nil);
1082 }
1083 // Pool was used. Pop its contents normally.
1084 // Pool pages remain allocated for re-use as usual.
1085 page = coldPage();
1086 token = page->begin();
1087 } else {
1088 page = pageForPointer(token);
1089 }
1090
1091 stop = (id *)token;
1092 if (*stop != POOL_BOUNDARY) {
1093 if (stop == page->begin() && !page->parent) {
1094 // Start of coldest page may correctly not be POOL_BOUNDARY:
1095 // 1. top-level pool is popped, leaving the cold page in place
1096 // 2. an object is autoreleased with no pool
1097 } else {
1098 // Error. For bincompat purposes this is not
1099 // fatal in executables built with old SDKs.
1100 return badPop(token);
1101 }
1102 }
1103
1104 if (slowpath(PrintPoolHiwat || DebugPoolAllocation || DebugMissingPools)) {
1105 return popPageDebug(token, page, stop);
1106 }
1107
1108 return popPage<false>(token, page, stop);
1109 }
1110
1111 static void init()
1112 {
1113 int r __unused = pthread_key_init_np(AutoreleasePoolPage::key,
1114 AutoreleasePoolPage::tls_dealloc);
1115 ASSERT(r == 0);
1116 }
1117
1118 __attribute__((noinline, cold))
1119 void print()
1120 {
1121 _objc_inform("[%p] ................ PAGE %s %s %s", this,
1122 full() ? "(full)" : "",
1123 this == hotPage() ? "(hot)" : "",
1124 this == coldPage() ? "(cold)" : "");
1125 check(false);
1126 for (id *p = begin(); p < next; p++) {
1127 if (*p == POOL_BOUNDARY) {
1128 _objc_inform("[%p] ################ POOL %p", p, p);
1129 } else {
1130 _objc_inform("[%p] %#16lx %s",
1131 p, (unsigned long)*p, object_getClassName(*p));
1132 }
1133 }
1134 }
1135
1136 __attribute__((noinline, cold))
1137 static void printAll()
1138 {
1139 _objc_inform("##############");
1140 _objc_inform("AUTORELEASE POOLS for thread %p", objc_thread_self());
1141
1142 AutoreleasePoolPage *page;
1143 ptrdiff_t objects = 0;
1144 for (page = coldPage(); page; page = page->child) {
1145 objects += page->next - page->begin();
1146 }
1147 _objc_inform("%llu releases pending.", (unsigned long long)objects);
1148
1149 if (haveEmptyPoolPlaceholder()) {
1150 _objc_inform("[%p] ................ PAGE (placeholder)",
1151 EMPTY_POOL_PLACEHOLDER);
1152 _objc_inform("[%p] ################ POOL (placeholder)",
1153 EMPTY_POOL_PLACEHOLDER);
1154 }
1155 else {
1156 for (page = coldPage(); page; page = page->child) {
1157 page->print();
1158 }
1159 }
1160
1161 _objc_inform("##############");
1162 }
1163
1164 __attribute__((noinline, cold))
1165 static void printHiwat()
1166 {
1167 // Check and propagate high water mark
1168 // Ignore high water marks under 256 to suppress noise.
1169 AutoreleasePoolPage *p = hotPage();
1170 uint32_t mark = p->depth*COUNT + (uint32_t)(p->next - p->begin());
1171 if (mark > p->hiwat && mark > 256) {
1172 for( ; p; p = p->parent) {
1173 p->unprotect();
1174 p->hiwat = mark;
1175 p->protect();
1176 }
1177
1178 _objc_inform("POOL HIGHWATER: new high water mark of %u "
1179 "pending releases for thread %p:",
1180 mark, objc_thread_self());
1181
1182 void *stack[128];
1183 int count = backtrace(stack, sizeof(stack)/sizeof(stack[0]));
1184 char **sym = backtrace_symbols(stack, count);
1185 for (int i = 0; i < count; i++) {
1186 _objc_inform("POOL HIGHWATER: %s", sym[i]);
1187 }
1188 free(sym);
1189 }
1190 }
1191
1192 #undef POOL_BOUNDARY
1193 };
1194
1195 /***********************************************************************
1196 * Slow paths for inline control
1197 **********************************************************************/
1198
1199 #if SUPPORT_NONPOINTER_ISA
1200
1201 NEVER_INLINE id
1202 objc_object::rootRetain_overflow(bool tryRetain)
1203 {
1204 return rootRetain(tryRetain, true);
1205 }
1206
1207
1208 NEVER_INLINE uintptr_t
1209 objc_object::rootRelease_underflow(bool performDealloc)
1210 {
1211 return rootRelease(performDealloc, true);
1212 }
1213
1214
1215 // Slow path of clearDeallocating()
1216 // for objects with nonpointer isa
1217 // that were ever weakly referenced
1218 // or whose retain count ever overflowed to the side table.
1219 NEVER_INLINE void
1220 objc_object::clearDeallocating_slow()
1221 {
1222 ASSERT(isa.nonpointer && (isa.weakly_referenced || isa.has_sidetable_rc));
1223
1224 SideTable& table = SideTables()[this];
1225 table.lock();
1226 if (isa.weakly_referenced) {
1227 weak_clear_no_lock(&table.weak_table, (id)this);
1228 }
1229 if (isa.has_sidetable_rc) {
1230 table.refcnts.erase(this);
1231 }
1232 table.unlock();
1233 }
1234
1235 #endif
1236
1237 __attribute__((noinline,used))
1238 id
1239 objc_object::rootAutorelease2()
1240 {
1241 ASSERT(!isTaggedPointer());
1242 return AutoreleasePoolPage::autorelease((id)this);
1243 }
1244
1245
1246 BREAKPOINT_FUNCTION(
1247 void objc_overrelease_during_dealloc_error(void)
1248 );
1249
1250
1251 NEVER_INLINE uintptr_t
1252 objc_object::overrelease_error()
1253 {
1254 _objc_inform_now_and_on_crash("%s object %p overreleased while already deallocating; break on objc_overrelease_during_dealloc_error to debug", object_getClassName((id)this), this);
1255 objc_overrelease_during_dealloc_error();
1256 return 0; // allow rootRelease() to tail-call this
1257 }
1258
1259
1260 /***********************************************************************
1261 * Retain count operations for side table.
1262 **********************************************************************/
1263
1264
1265 #if DEBUG
1266 // Used to assert that an object is not present in the side table.
1267 bool
1268 objc_object::sidetable_present()
1269 {
1270 bool result = false;
1271 SideTable& table = SideTables()[this];
1272
1273 table.lock();
1274
1275 RefcountMap::iterator it = table.refcnts.find(this);
1276 if (it != table.refcnts.end()) result = true;
1277
1278 if (weak_is_registered_no_lock(&table.weak_table, (id)this)) result = true;
1279
1280 table.unlock();
1281
1282 return result;
1283 }
1284 #endif
1285
1286 #if SUPPORT_NONPOINTER_ISA
1287
1288 void
1289 objc_object::sidetable_lock()
1290 {
1291 SideTable& table = SideTables()[this];
1292 table.lock();
1293 }
1294
1295 void
1296 objc_object::sidetable_unlock()
1297 {
1298 SideTable& table = SideTables()[this];
1299 table.unlock();
1300 }
1301
1302
1303 // Move the entire retain count to the side table,
1304 // as well as isDeallocating and weaklyReferenced.
1305 void
1306 objc_object::sidetable_moveExtraRC_nolock(size_t extra_rc,
1307 bool isDeallocating,
1308 bool weaklyReferenced)
1309 {
1310 ASSERT(!isa.nonpointer); // should already be changed to raw pointer
1311 SideTable& table = SideTables()[this];
1312
1313 size_t& refcntStorage = table.refcnts[this];
1314 size_t oldRefcnt = refcntStorage;
1315 // not deallocating - that was in the isa
1316 ASSERT((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1317 ASSERT((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1318
1319 uintptr_t carry;
1320 size_t refcnt = addc(oldRefcnt, extra_rc << SIDE_TABLE_RC_SHIFT, 0, &carry);
1321 if (carry) refcnt = SIDE_TABLE_RC_PINNED;
1322 if (isDeallocating) refcnt |= SIDE_TABLE_DEALLOCATING;
1323 if (weaklyReferenced) refcnt |= SIDE_TABLE_WEAKLY_REFERENCED;
1324
1325 refcntStorage = refcnt;
1326 }
1327
1328
1329 // Move some retain counts to the side table from the isa field.
1330 // Returns true if the object is now pinned.
1331 bool
1332 objc_object::sidetable_addExtraRC_nolock(size_t delta_rc)
1333 {
1334 ASSERT(isa.nonpointer);
1335 SideTable& table = SideTables()[this];
1336
1337 size_t& refcntStorage = table.refcnts[this];
1338 size_t oldRefcnt = refcntStorage;
1339 // isa-side bits should not be set here
1340 ASSERT((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1341 ASSERT((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1342
1343 if (oldRefcnt & SIDE_TABLE_RC_PINNED) return true;
1344
1345 uintptr_t carry;
1346 size_t newRefcnt =
1347 addc(oldRefcnt, delta_rc << SIDE_TABLE_RC_SHIFT, 0, &carry);
1348 if (carry) {
1349 refcntStorage =
1350 SIDE_TABLE_RC_PINNED | (oldRefcnt & SIDE_TABLE_FLAG_MASK);
1351 return true;
1352 }
1353 else {
1354 refcntStorage = newRefcnt;
1355 return false;
1356 }
1357 }
1358
1359
1360 // Move some retain counts from the side table to the isa field.
1361 // Returns the actual count subtracted, which may be less than the request.
1362 size_t
1363 objc_object::sidetable_subExtraRC_nolock(size_t delta_rc)
1364 {
1365 ASSERT(isa.nonpointer);
1366 SideTable& table = SideTables()[this];
1367
1368 RefcountMap::iterator it = table.refcnts.find(this);
1369 if (it == table.refcnts.end() || it->second == 0) {
1370 // Side table retain count is zero. Can't borrow.
1371 return 0;
1372 }
1373 size_t oldRefcnt = it->second;
1374
1375 // isa-side bits should not be set here
1376 ASSERT((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1377 ASSERT((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1378
1379 size_t newRefcnt = oldRefcnt - (delta_rc << SIDE_TABLE_RC_SHIFT);
1380 ASSERT(oldRefcnt > newRefcnt); // shouldn't underflow
1381 it->second = newRefcnt;
1382 return delta_rc;
1383 }
1384
1385
1386 size_t
1387 objc_object::sidetable_getExtraRC_nolock()
1388 {
1389 ASSERT(isa.nonpointer);
1390 SideTable& table = SideTables()[this];
1391 RefcountMap::iterator it = table.refcnts.find(this);
1392 if (it == table.refcnts.end()) return 0;
1393 else return it->second >> SIDE_TABLE_RC_SHIFT;
1394 }
1395
1396
1397 // SUPPORT_NONPOINTER_ISA
1398 #endif
1399
1400
1401 id
1402 objc_object::sidetable_retain()
1403 {
1404 #if SUPPORT_NONPOINTER_ISA
1405 ASSERT(!isa.nonpointer);
1406 #endif
1407 SideTable& table = SideTables()[this];
1408
1409 table.lock();
1410 size_t& refcntStorage = table.refcnts[this];
1411 if (! (refcntStorage & SIDE_TABLE_RC_PINNED)) {
1412 refcntStorage += SIDE_TABLE_RC_ONE;
1413 }
1414 table.unlock();
1415
1416 return (id)this;
1417 }
1418
1419
1420 bool
1421 objc_object::sidetable_tryRetain()
1422 {
1423 #if SUPPORT_NONPOINTER_ISA
1424 ASSERT(!isa.nonpointer);
1425 #endif
1426 SideTable& table = SideTables()[this];
1427
1428 // NO SPINLOCK HERE
1429 // _objc_rootTryRetain() is called exclusively by _objc_loadWeak(),
1430 // which already acquired the lock on our behalf.
1431
1432 // fixme can't do this efficiently with os_lock_handoff_s
1433 // if (table.slock == 0) {
1434 // _objc_fatal("Do not call -_tryRetain.");
1435 // }
1436
1437 bool result = true;
1438 auto it = table.refcnts.try_emplace(this, SIDE_TABLE_RC_ONE);
1439 auto &refcnt = it.first->second;
1440 if (it.second) {
1441 // there was no entry
1442 } else if (refcnt & SIDE_TABLE_DEALLOCATING) {
1443 result = false;
1444 } else if (! (refcnt & SIDE_TABLE_RC_PINNED)) {
1445 refcnt += SIDE_TABLE_RC_ONE;
1446 }
1447
1448 return result;
1449 }
1450
1451
1452 uintptr_t
1453 objc_object::sidetable_retainCount()
1454 {
1455 SideTable& table = SideTables()[this];
1456
1457 size_t refcnt_result = 1;
1458
1459 table.lock();
1460 RefcountMap::iterator it = table.refcnts.find(this);
1461 if (it != table.refcnts.end()) {
1462 // this is valid for SIDE_TABLE_RC_PINNED too
1463 refcnt_result += it->second >> SIDE_TABLE_RC_SHIFT;
1464 }
1465 table.unlock();
1466 return refcnt_result;
1467 }
1468
1469
1470 bool
1471 objc_object::sidetable_isDeallocating()
1472 {
1473 SideTable& table = SideTables()[this];
1474
1475 // NO SPINLOCK HERE
1476 // _objc_rootIsDeallocating() is called exclusively by _objc_storeWeak(),
1477 // which already acquired the lock on our behalf.
1478
1479
1480 // fixme can't do this efficiently with os_lock_handoff_s
1481 // if (table.slock == 0) {
1482 // _objc_fatal("Do not call -_isDeallocating.");
1483 // }
1484
1485 RefcountMap::iterator it = table.refcnts.find(this);
1486 return (it != table.refcnts.end()) && (it->second & SIDE_TABLE_DEALLOCATING);
1487 }
1488
1489
1490 bool
1491 objc_object::sidetable_isWeaklyReferenced()
1492 {
1493 bool result = false;
1494
1495 SideTable& table = SideTables()[this];
1496 table.lock();
1497
1498 RefcountMap::iterator it = table.refcnts.find(this);
1499 if (it != table.refcnts.end()) {
1500 result = it->second & SIDE_TABLE_WEAKLY_REFERENCED;
1501 }
1502
1503 table.unlock();
1504
1505 return result;
1506 }
1507
1508
1509 void
1510 objc_object::sidetable_setWeaklyReferenced_nolock()
1511 {
1512 #if SUPPORT_NONPOINTER_ISA
1513 ASSERT(!isa.nonpointer);
1514 #endif
1515
1516 SideTable& table = SideTables()[this];
1517
1518 table.refcnts[this] |= SIDE_TABLE_WEAKLY_REFERENCED;
1519 }
1520
1521
1522 // rdar://20206767
1523 // return uintptr_t instead of bool so that the various raw-isa
1524 // -release paths all return zero in eax
1525 uintptr_t
1526 objc_object::sidetable_release(bool performDealloc)
1527 {
1528 #if SUPPORT_NONPOINTER_ISA
1529 ASSERT(!isa.nonpointer);
1530 #endif
1531 SideTable& table = SideTables()[this];
1532
1533 bool do_dealloc = false;
1534
1535 table.lock();
1536 auto it = table.refcnts.try_emplace(this, SIDE_TABLE_DEALLOCATING);
1537 auto &refcnt = it.first->second;
1538 if (it.second) {
1539 do_dealloc = true;
1540 } else if (refcnt < SIDE_TABLE_DEALLOCATING) {
1541 // SIDE_TABLE_WEAKLY_REFERENCED may be set. Don't change it.
1542 do_dealloc = true;
1543 refcnt |= SIDE_TABLE_DEALLOCATING;
1544 } else if (! (refcnt & SIDE_TABLE_RC_PINNED)) {
1545 refcnt -= SIDE_TABLE_RC_ONE;
1546 }
1547 table.unlock();
1548 if (do_dealloc && performDealloc) {
1549 ((void(*)(objc_object *, SEL))objc_msgSend)(this, @selector(dealloc));
1550 }
1551 return do_dealloc;
1552 }
1553
1554
1555 void
1556 objc_object::sidetable_clearDeallocating()
1557 {
1558 SideTable& table = SideTables()[this];
1559
1560 // clear any weak table items
1561 // clear extra retain count and deallocating bit
1562 // (fixme warn or abort if extra retain count == 0 ?)
1563 table.lock();
1564 RefcountMap::iterator it = table.refcnts.find(this);
1565 if (it != table.refcnts.end()) {
1566 if (it->second & SIDE_TABLE_WEAKLY_REFERENCED) {
1567 weak_clear_no_lock(&table.weak_table, (id)this);
1568 }
1569 table.refcnts.erase(it);
1570 }
1571 table.unlock();
1572 }
1573
1574
1575 /***********************************************************************
1576 * Optimized retain/release/autorelease entrypoints
1577 **********************************************************************/
1578
1579
1580 #if __OBJC2__
1581
1582 __attribute__((aligned(16), flatten, noinline))
1583 id
1584 objc_retain(id obj)
1585 {
1586 if (!obj) return obj;
1587 if (obj->isTaggedPointer()) return obj;
1588 return obj->retain();
1589 }
1590
1591
1592 __attribute__((aligned(16), flatten, noinline))
1593 void
1594 objc_release(id obj)
1595 {
1596 if (!obj) return;
1597 if (obj->isTaggedPointer()) return;
1598 return obj->release();
1599 }
1600
1601
1602 __attribute__((aligned(16), flatten, noinline))
1603 id
1604 objc_autorelease(id obj)
1605 {
1606 if (!obj) return obj;
1607 if (obj->isTaggedPointer()) return obj;
1608 return obj->autorelease();
1609 }
1610
1611
1612 // OBJC2
1613 #else
1614 // not OBJC2
1615
1616
1617 id objc_retain(id obj) { return [obj retain]; }
1618 void objc_release(id obj) { [obj release]; }
1619 id objc_autorelease(id obj) { return [obj autorelease]; }
1620
1621
1622 #endif
1623
1624
1625 /***********************************************************************
1626 * Basic operations for root class implementations a.k.a. _objc_root*()
1627 **********************************************************************/
1628
1629 bool
1630 _objc_rootTryRetain(id obj)
1631 {
1632 ASSERT(obj);
1633
1634 return obj->rootTryRetain();
1635 }
1636
1637 bool
1638 _objc_rootIsDeallocating(id obj)
1639 {
1640 ASSERT(obj);
1641
1642 return obj->rootIsDeallocating();
1643 }
1644
1645
1646 void
1647 objc_clear_deallocating(id obj)
1648 {
1649 ASSERT(obj);
1650
1651 if (obj->isTaggedPointer()) return;
1652 obj->clearDeallocating();
1653 }
1654
1655
1656 bool
1657 _objc_rootReleaseWasZero(id obj)
1658 {
1659 ASSERT(obj);
1660
1661 return obj->rootReleaseShouldDealloc();
1662 }
1663
1664
1665 NEVER_INLINE id
1666 _objc_rootAutorelease(id obj)
1667 {
1668 ASSERT(obj);
1669 return obj->rootAutorelease();
1670 }
1671
1672 uintptr_t
1673 _objc_rootRetainCount(id obj)
1674 {
1675 ASSERT(obj);
1676
1677 return obj->rootRetainCount();
1678 }
1679
1680
1681 NEVER_INLINE id
1682 _objc_rootRetain(id obj)
1683 {
1684 ASSERT(obj);
1685
1686 return obj->rootRetain();
1687 }
1688
1689 NEVER_INLINE void
1690 _objc_rootRelease(id obj)
1691 {
1692 ASSERT(obj);
1693
1694 obj->rootRelease();
1695 }
1696
1697
1698 // Call [cls alloc] or [cls allocWithZone:nil], with appropriate
1699 // shortcutting optimizations.
1700 static ALWAYS_INLINE id
1701 callAlloc(Class cls, bool checkNil, bool allocWithZone=false)
1702 {
1703 #if __OBJC2__
1704 if (slowpath(checkNil && !cls)) return nil;
1705 if (fastpath(!cls->ISA()->hasCustomAWZ())) {
1706 return _objc_rootAllocWithZone(cls, nil);
1707 }
1708 #endif
1709
1710 // No shortcuts available.
1711 if (allocWithZone) {
1712 return ((id(*)(id, SEL, struct _NSZone *))objc_msgSend)(cls, @selector(allocWithZone:), nil);
1713 }
1714 return ((id(*)(id, SEL))objc_msgSend)(cls, @selector(alloc));
1715 }
1716
1717
1718 // Base class implementation of +alloc. cls is not nil.
1719 // Calls [cls allocWithZone:nil].
1720 id
1721 _objc_rootAlloc(Class cls)
1722 {
1723 return callAlloc(cls, false/*checkNil*/, true/*allocWithZone*/);
1724 }
1725
1726 // Calls [cls alloc].
1727 id
1728 objc_alloc(Class cls)
1729 {
1730 return callAlloc(cls, true/*checkNil*/, false/*allocWithZone*/);
1731 }
1732
1733 // Calls [cls allocWithZone:nil].
1734 id
1735 objc_allocWithZone(Class cls)
1736 {
1737 return callAlloc(cls, true/*checkNil*/, true/*allocWithZone*/);
1738 }
1739
1740 // Calls [[cls alloc] init].
1741 id
1742 objc_alloc_init(Class cls)
1743 {
1744 return [callAlloc(cls, true/*checkNil*/, false/*allocWithZone*/) init];
1745 }
1746
1747 // Calls [cls new]
1748 id
1749 objc_opt_new(Class cls)
1750 {
1751 #if __OBJC2__
1752 if (fastpath(cls && !cls->ISA()->hasCustomCore())) {
1753 return [callAlloc(cls, false/*checkNil*/, true/*allocWithZone*/) init];
1754 }
1755 #endif
1756 return ((id(*)(id, SEL))objc_msgSend)(cls, @selector(new));
1757 }
1758
1759 // Calls [obj self]
1760 id
1761 objc_opt_self(id obj)
1762 {
1763 #if __OBJC2__
1764 if (fastpath(!obj || obj->isTaggedPointer() || !obj->ISA()->hasCustomCore())) {
1765 return obj;
1766 }
1767 #endif
1768 return ((id(*)(id, SEL))objc_msgSend)(obj, @selector(self));
1769 }
1770
1771 // Calls [obj class]
1772 Class
1773 objc_opt_class(id obj)
1774 {
1775 #if __OBJC2__
1776 if (slowpath(!obj)) return nil;
1777 Class cls = obj->getIsa();
1778 if (fastpath(!cls->hasCustomCore())) {
1779 return cls->isMetaClass() ? obj : cls;
1780 }
1781 #endif
1782 return ((Class(*)(id, SEL))objc_msgSend)(obj, @selector(class));
1783 }
1784
1785 // Calls [obj isKindOfClass]
1786 BOOL
1787 objc_opt_isKindOfClass(id obj, Class otherClass)
1788 {
1789 #if __OBJC2__
1790 if (slowpath(!obj)) return NO;
1791 Class cls = obj->getIsa();
1792 if (fastpath(!cls->hasCustomCore())) {
1793 for (Class tcls = cls; tcls; tcls = tcls->superclass) {
1794 if (tcls == otherClass) return YES;
1795 }
1796 return NO;
1797 }
1798 #endif
1799 return ((BOOL(*)(id, SEL, Class))objc_msgSend)(obj, @selector(isKindOfClass:), otherClass);
1800 }
1801
1802 // Calls [obj respondsToSelector]
1803 BOOL
1804 objc_opt_respondsToSelector(id obj, SEL sel)
1805 {
1806 #if __OBJC2__
1807 if (slowpath(!obj)) return NO;
1808 Class cls = obj->getIsa();
1809 if (fastpath(!cls->hasCustomCore())) {
1810 return class_respondsToSelector_inst(obj, sel, cls);
1811 }
1812 #endif
1813 return ((BOOL(*)(id, SEL, SEL))objc_msgSend)(obj, @selector(respondsToSelector:), sel);
1814 }
1815
1816 void
1817 _objc_rootDealloc(id obj)
1818 {
1819 ASSERT(obj);
1820
1821 obj->rootDealloc();
1822 }
1823
1824 void
1825 _objc_rootFinalize(id obj __unused)
1826 {
1827 ASSERT(obj);
1828 _objc_fatal("_objc_rootFinalize called with garbage collection off");
1829 }
1830
1831
1832 id
1833 _objc_rootInit(id obj)
1834 {
1835 // In practice, it will be hard to rely on this function.
1836 // Many classes do not properly chain -init calls.
1837 return obj;
1838 }
1839
1840
1841 malloc_zone_t *
1842 _objc_rootZone(id obj)
1843 {
1844 (void)obj;
1845 #if __OBJC2__
1846 // allocWithZone under __OBJC2__ ignores the zone parameter
1847 return malloc_default_zone();
1848 #else
1849 malloc_zone_t *rval = malloc_zone_from_ptr(obj);
1850 return rval ? rval : malloc_default_zone();
1851 #endif
1852 }
1853
1854 uintptr_t
1855 _objc_rootHash(id obj)
1856 {
1857 return (uintptr_t)obj;
1858 }
1859
1860 void *
1861 objc_autoreleasePoolPush(void)
1862 {
1863 return AutoreleasePoolPage::push();
1864 }
1865
1866 NEVER_INLINE
1867 void
1868 objc_autoreleasePoolPop(void *ctxt)
1869 {
1870 AutoreleasePoolPage::pop(ctxt);
1871 }
1872
1873
1874 void *
1875 _objc_autoreleasePoolPush(void)
1876 {
1877 return objc_autoreleasePoolPush();
1878 }
1879
1880 void
1881 _objc_autoreleasePoolPop(void *ctxt)
1882 {
1883 objc_autoreleasePoolPop(ctxt);
1884 }
1885
1886 void
1887 _objc_autoreleasePoolPrint(void)
1888 {
1889 AutoreleasePoolPage::printAll();
1890 }
1891
1892
1893 // Same as objc_release but suitable for tail-calling
1894 // if you need the value back and don't want to push a frame before this point.
1895 __attribute__((noinline))
1896 static id
1897 objc_releaseAndReturn(id obj)
1898 {
1899 objc_release(obj);
1900 return obj;
1901 }
1902
1903 // Same as objc_retainAutorelease but suitable for tail-calling
1904 // if you don't want to push a frame before this point.
1905 __attribute__((noinline))
1906 static id
1907 objc_retainAutoreleaseAndReturn(id obj)
1908 {
1909 return objc_retainAutorelease(obj);
1910 }
1911
1912
1913 // Prepare a value at +1 for return through a +0 autoreleasing convention.
1914 id
1915 objc_autoreleaseReturnValue(id obj)
1916 {
1917 if (prepareOptimizedReturn(ReturnAtPlus1)) return obj;
1918
1919 return objc_autorelease(obj);
1920 }
1921
1922 // Prepare a value at +0 for return through a +0 autoreleasing convention.
1923 id
1924 objc_retainAutoreleaseReturnValue(id obj)
1925 {
1926 if (prepareOptimizedReturn(ReturnAtPlus0)) return obj;
1927
1928 // not objc_autoreleaseReturnValue(objc_retain(obj))
1929 // because we don't need another optimization attempt
1930 return objc_retainAutoreleaseAndReturn(obj);
1931 }
1932
1933 // Accept a value returned through a +0 autoreleasing convention for use at +1.
1934 id
1935 objc_retainAutoreleasedReturnValue(id obj)
1936 {
1937 if (acceptOptimizedReturn() == ReturnAtPlus1) return obj;
1938
1939 return objc_retain(obj);
1940 }
1941
1942 // Accept a value returned through a +0 autoreleasing convention for use at +0.
1943 id
1944 objc_unsafeClaimAutoreleasedReturnValue(id obj)
1945 {
1946 if (acceptOptimizedReturn() == ReturnAtPlus0) return obj;
1947
1948 return objc_releaseAndReturn(obj);
1949 }
1950
1951 id
1952 objc_retainAutorelease(id obj)
1953 {
1954 return objc_autorelease(objc_retain(obj));
1955 }
1956
1957 void
1958 _objc_deallocOnMainThreadHelper(void *context)
1959 {
1960 id obj = (id)context;
1961 [obj dealloc];
1962 }
1963
1964 // convert objc_objectptr_t to id, callee must take ownership.
1965 id objc_retainedObject(objc_objectptr_t pointer) { return (id)pointer; }
1966
1967 // convert objc_objectptr_t to id, without ownership transfer.
1968 id objc_unretainedObject(objc_objectptr_t pointer) { return (id)pointer; }
1969
1970 // convert id to objc_objectptr_t, no ownership transfer.
1971 objc_objectptr_t objc_unretainedPointer(id object) { return object; }
1972
1973
1974 void arr_init(void)
1975 {
1976 AutoreleasePoolPage::init();
1977 SideTablesMap.init();
1978 _objc_associations_init();
1979 }
1980
1981
1982 #if SUPPORT_TAGGED_POINTERS
1983
1984 // Placeholder for old debuggers. When they inspect an
1985 // extended tagged pointer object they will see this isa.
1986
1987 @interface __NSUnrecognizedTaggedPointer : NSObject
1988 @end
1989
1990 __attribute__((objc_nonlazy_class))
1991 @implementation __NSUnrecognizedTaggedPointer
1992 -(id) retain { return self; }
1993 -(oneway void) release { }
1994 -(id) autorelease { return self; }
1995 @end
1996
1997 #endif
1998
1999 __attribute__((objc_nonlazy_class))
2000 @implementation NSObject
2001
2002 + (void)initialize {
2003 }
2004
2005 + (id)self {
2006 return (id)self;
2007 }
2008
2009 - (id)self {
2010 return self;
2011 }
2012
2013 + (Class)class {
2014 return self;
2015 }
2016
2017 - (Class)class {
2018 return object_getClass(self);
2019 }
2020
2021 + (Class)superclass {
2022 return self->superclass;
2023 }
2024
2025 - (Class)superclass {
2026 return [self class]->superclass;
2027 }
2028
2029 + (BOOL)isMemberOfClass:(Class)cls {
2030 return self->ISA() == cls;
2031 }
2032
2033 - (BOOL)isMemberOfClass:(Class)cls {
2034 return [self class] == cls;
2035 }
2036
2037 + (BOOL)isKindOfClass:(Class)cls {
2038 for (Class tcls = self->ISA(); tcls; tcls = tcls->superclass) {
2039 if (tcls == cls) return YES;
2040 }
2041 return NO;
2042 }
2043
2044 - (BOOL)isKindOfClass:(Class)cls {
2045 for (Class tcls = [self class]; tcls; tcls = tcls->superclass) {
2046 if (tcls == cls) return YES;
2047 }
2048 return NO;
2049 }
2050
2051 + (BOOL)isSubclassOfClass:(Class)cls {
2052 for (Class tcls = self; tcls; tcls = tcls->superclass) {
2053 if (tcls == cls) return YES;
2054 }
2055 return NO;
2056 }
2057
2058 + (BOOL)isAncestorOfObject:(NSObject *)obj {
2059 for (Class tcls = [obj class]; tcls; tcls = tcls->superclass) {
2060 if (tcls == self) return YES;
2061 }
2062 return NO;
2063 }
2064
2065 + (BOOL)instancesRespondToSelector:(SEL)sel {
2066 return class_respondsToSelector_inst(nil, sel, self);
2067 }
2068
2069 + (BOOL)respondsToSelector:(SEL)sel {
2070 return class_respondsToSelector_inst(self, sel, self->ISA());
2071 }
2072
2073 - (BOOL)respondsToSelector:(SEL)sel {
2074 return class_respondsToSelector_inst(self, sel, [self class]);
2075 }
2076
2077 + (BOOL)conformsToProtocol:(Protocol *)protocol {
2078 if (!protocol) return NO;
2079 for (Class tcls = self; tcls; tcls = tcls->superclass) {
2080 if (class_conformsToProtocol(tcls, protocol)) return YES;
2081 }
2082 return NO;
2083 }
2084
2085 - (BOOL)conformsToProtocol:(Protocol *)protocol {
2086 if (!protocol) return NO;
2087 for (Class tcls = [self class]; tcls; tcls = tcls->superclass) {
2088 if (class_conformsToProtocol(tcls, protocol)) return YES;
2089 }
2090 return NO;
2091 }
2092
2093 + (NSUInteger)hash {
2094 return _objc_rootHash(self);
2095 }
2096
2097 - (NSUInteger)hash {
2098 return _objc_rootHash(self);
2099 }
2100
2101 + (BOOL)isEqual:(id)obj {
2102 return obj == (id)self;
2103 }
2104
2105 - (BOOL)isEqual:(id)obj {
2106 return obj == self;
2107 }
2108
2109
2110 + (BOOL)isFault {
2111 return NO;
2112 }
2113
2114 - (BOOL)isFault {
2115 return NO;
2116 }
2117
2118 + (BOOL)isProxy {
2119 return NO;
2120 }
2121
2122 - (BOOL)isProxy {
2123 return NO;
2124 }
2125
2126
2127 + (IMP)instanceMethodForSelector:(SEL)sel {
2128 if (!sel) [self doesNotRecognizeSelector:sel];
2129 return class_getMethodImplementation(self, sel);
2130 }
2131
2132 + (IMP)methodForSelector:(SEL)sel {
2133 if (!sel) [self doesNotRecognizeSelector:sel];
2134 return object_getMethodImplementation((id)self, sel);
2135 }
2136
2137 - (IMP)methodForSelector:(SEL)sel {
2138 if (!sel) [self doesNotRecognizeSelector:sel];
2139 return object_getMethodImplementation(self, sel);
2140 }
2141
2142 + (BOOL)resolveClassMethod:(SEL)sel {
2143 return NO;
2144 }
2145
2146 + (BOOL)resolveInstanceMethod:(SEL)sel {
2147 return NO;
2148 }
2149
2150 // Replaced by CF (throws an NSException)
2151 + (void)doesNotRecognizeSelector:(SEL)sel {
2152 _objc_fatal("+[%s %s]: unrecognized selector sent to instance %p",
2153 class_getName(self), sel_getName(sel), self);
2154 }
2155
2156 // Replaced by CF (throws an NSException)
2157 - (void)doesNotRecognizeSelector:(SEL)sel {
2158 _objc_fatal("-[%s %s]: unrecognized selector sent to instance %p",
2159 object_getClassName(self), sel_getName(sel), self);
2160 }
2161
2162
2163 + (id)performSelector:(SEL)sel {
2164 if (!sel) [self doesNotRecognizeSelector:sel];
2165 return ((id(*)(id, SEL))objc_msgSend)((id)self, sel);
2166 }
2167
2168 + (id)performSelector:(SEL)sel withObject:(id)obj {
2169 if (!sel) [self doesNotRecognizeSelector:sel];
2170 return ((id(*)(id, SEL, id))objc_msgSend)((id)self, sel, obj);
2171 }
2172
2173 + (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 {
2174 if (!sel) [self doesNotRecognizeSelector:sel];
2175 return ((id(*)(id, SEL, id, id))objc_msgSend)((id)self, sel, obj1, obj2);
2176 }
2177
2178 - (id)performSelector:(SEL)sel {
2179 if (!sel) [self doesNotRecognizeSelector:sel];
2180 return ((id(*)(id, SEL))objc_msgSend)(self, sel);
2181 }
2182
2183 - (id)performSelector:(SEL)sel withObject:(id)obj {
2184 if (!sel) [self doesNotRecognizeSelector:sel];
2185 return ((id(*)(id, SEL, id))objc_msgSend)(self, sel, obj);
2186 }
2187
2188 - (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 {
2189 if (!sel) [self doesNotRecognizeSelector:sel];
2190 return ((id(*)(id, SEL, id, id))objc_msgSend)(self, sel, obj1, obj2);
2191 }
2192
2193
2194 // Replaced by CF (returns an NSMethodSignature)
2195 + (NSMethodSignature *)instanceMethodSignatureForSelector:(SEL)sel {
2196 _objc_fatal("+[NSObject instanceMethodSignatureForSelector:] "
2197 "not available without CoreFoundation");
2198 }
2199
2200 // Replaced by CF (returns an NSMethodSignature)
2201 + (NSMethodSignature *)methodSignatureForSelector:(SEL)sel {
2202 _objc_fatal("+[NSObject methodSignatureForSelector:] "
2203 "not available without CoreFoundation");
2204 }
2205
2206 // Replaced by CF (returns an NSMethodSignature)
2207 - (NSMethodSignature *)methodSignatureForSelector:(SEL)sel {
2208 _objc_fatal("-[NSObject methodSignatureForSelector:] "
2209 "not available without CoreFoundation");
2210 }
2211
2212 + (void)forwardInvocation:(NSInvocation *)invocation {
2213 [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)];
2214 }
2215
2216 - (void)forwardInvocation:(NSInvocation *)invocation {
2217 [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)];
2218 }
2219
2220 + (id)forwardingTargetForSelector:(SEL)sel {
2221 return nil;
2222 }
2223
2224 - (id)forwardingTargetForSelector:(SEL)sel {
2225 return nil;
2226 }
2227
2228
2229 // Replaced by CF (returns an NSString)
2230 + (NSString *)description {
2231 return nil;
2232 }
2233
2234 // Replaced by CF (returns an NSString)
2235 - (NSString *)description {
2236 return nil;
2237 }
2238
2239 + (NSString *)debugDescription {
2240 return [self description];
2241 }
2242
2243 - (NSString *)debugDescription {
2244 return [self description];
2245 }
2246
2247
2248 + (id)new {
2249 return [callAlloc(self, false/*checkNil*/) init];
2250 }
2251
2252 + (id)retain {
2253 return (id)self;
2254 }
2255
2256 // Replaced by ObjectAlloc
2257 - (id)retain {
2258 return _objc_rootRetain(self);
2259 }
2260
2261
2262 + (BOOL)_tryRetain {
2263 return YES;
2264 }
2265
2266 // Replaced by ObjectAlloc
2267 - (BOOL)_tryRetain {
2268 return _objc_rootTryRetain(self);
2269 }
2270
2271 + (BOOL)_isDeallocating {
2272 return NO;
2273 }
2274
2275 - (BOOL)_isDeallocating {
2276 return _objc_rootIsDeallocating(self);
2277 }
2278
2279 + (BOOL)allowsWeakReference {
2280 return YES;
2281 }
2282
2283 + (BOOL)retainWeakReference {
2284 return YES;
2285 }
2286
2287 - (BOOL)allowsWeakReference {
2288 return ! [self _isDeallocating];
2289 }
2290
2291 - (BOOL)retainWeakReference {
2292 return [self _tryRetain];
2293 }
2294
2295 + (oneway void)release {
2296 }
2297
2298 // Replaced by ObjectAlloc
2299 - (oneway void)release {
2300 _objc_rootRelease(self);
2301 }
2302
2303 + (id)autorelease {
2304 return (id)self;
2305 }
2306
2307 // Replaced by ObjectAlloc
2308 - (id)autorelease {
2309 return _objc_rootAutorelease(self);
2310 }
2311
2312 + (NSUInteger)retainCount {
2313 return ULONG_MAX;
2314 }
2315
2316 - (NSUInteger)retainCount {
2317 return _objc_rootRetainCount(self);
2318 }
2319
2320 + (id)alloc {
2321 return _objc_rootAlloc(self);
2322 }
2323
2324 // Replaced by ObjectAlloc
2325 + (id)allocWithZone:(struct _NSZone *)zone {
2326 return _objc_rootAllocWithZone(self, (malloc_zone_t *)zone);
2327 }
2328
2329 // Replaced by CF (throws an NSException)
2330 + (id)init {
2331 return (id)self;
2332 }
2333
2334 - (id)init {
2335 return _objc_rootInit(self);
2336 }
2337
2338 // Replaced by CF (throws an NSException)
2339 + (void)dealloc {
2340 }
2341
2342
2343 // Replaced by NSZombies
2344 - (void)dealloc {
2345 _objc_rootDealloc(self);
2346 }
2347
2348 // Previously used by GC. Now a placeholder for binary compatibility.
2349 - (void) finalize {
2350 }
2351
2352 + (struct _NSZone *)zone {
2353 return (struct _NSZone *)_objc_rootZone(self);
2354 }
2355
2356 - (struct _NSZone *)zone {
2357 return (struct _NSZone *)_objc_rootZone(self);
2358 }
2359
2360 + (id)copy {
2361 return (id)self;
2362 }
2363
2364 + (id)copyWithZone:(struct _NSZone *)zone {
2365 return (id)self;
2366 }
2367
2368 - (id)copy {
2369 return [(id)self copyWithZone:nil];
2370 }
2371
2372 + (id)mutableCopy {
2373 return (id)self;
2374 }
2375
2376 + (id)mutableCopyWithZone:(struct _NSZone *)zone {
2377 return (id)self;
2378 }
2379
2380 - (id)mutableCopy {
2381 return [(id)self mutableCopyWithZone:nil];
2382 }
2383
2384 @end
2385
2386