]> git.saurik.com Git - apple/objc4.git/blob - runtime/NSObject.mm
objc4-756.2.tar.gz
[apple/objc4.git] / runtime / NSObject.mm
1 /*
2 * Copyright (c) 2010-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include "objc-private.h"
25 #include "NSObject.h"
26
27 #include "objc-weak.h"
28 #include "llvm-DenseMap.h"
29 #include "NSObject.h"
30
31 #include <malloc/malloc.h>
32 #include <stdint.h>
33 #include <stdbool.h>
34 #include <mach/mach.h>
35 #include <mach-o/dyld.h>
36 #include <mach-o/nlist.h>
37 #include <sys/types.h>
38 #include <sys/mman.h>
39 #include <libkern/OSAtomic.h>
40 #include <Block.h>
41 #include <map>
42 #include <execinfo.h>
43
44 @interface NSInvocation
45 - (SEL)selector;
46 @end
47
48
49 /***********************************************************************
50 * Weak ivar support
51 **********************************************************************/
52
53 static id defaultBadAllocHandler(Class cls)
54 {
55 _objc_fatal("attempt to allocate object of class '%s' failed",
56 cls->nameForLogging());
57 }
58
59 static id(*badAllocHandler)(Class) = &defaultBadAllocHandler;
60
61 static id callBadAllocHandler(Class cls)
62 {
63 // fixme add re-entrancy protection in case allocation fails inside handler
64 return (*badAllocHandler)(cls);
65 }
66
67 void _objc_setBadAllocHandler(id(*newHandler)(Class))
68 {
69 badAllocHandler = newHandler;
70 }
71
72
73 namespace {
74
75 // The order of these bits is important.
76 #define SIDE_TABLE_WEAKLY_REFERENCED (1UL<<0)
77 #define SIDE_TABLE_DEALLOCATING (1UL<<1) // MSB-ward of weak bit
78 #define SIDE_TABLE_RC_ONE (1UL<<2) // MSB-ward of deallocating bit
79 #define SIDE_TABLE_RC_PINNED (1UL<<(WORD_BITS-1))
80
81 #define SIDE_TABLE_RC_SHIFT 2
82 #define SIDE_TABLE_FLAG_MASK (SIDE_TABLE_RC_ONE-1)
83
84 // RefcountMap disguises its pointers because we
85 // don't want the table to act as a root for `leaks`.
86 typedef objc::DenseMap<DisguisedPtr<objc_object>,size_t,true> RefcountMap;
87
88 // Template parameters.
89 enum HaveOld { DontHaveOld = false, DoHaveOld = true };
90 enum HaveNew { DontHaveNew = false, DoHaveNew = true };
91
92 struct SideTable {
93 spinlock_t slock;
94 RefcountMap refcnts;
95 weak_table_t weak_table;
96
97 SideTable() {
98 memset(&weak_table, 0, sizeof(weak_table));
99 }
100
101 ~SideTable() {
102 _objc_fatal("Do not delete SideTable.");
103 }
104
105 void lock() { slock.lock(); }
106 void unlock() { slock.unlock(); }
107 void forceReset() { slock.forceReset(); }
108
109 // Address-ordered lock discipline for a pair of side tables.
110
111 template<HaveOld, HaveNew>
112 static void lockTwo(SideTable *lock1, SideTable *lock2);
113 template<HaveOld, HaveNew>
114 static void unlockTwo(SideTable *lock1, SideTable *lock2);
115 };
116
117
118 template<>
119 void SideTable::lockTwo<DoHaveOld, DoHaveNew>
120 (SideTable *lock1, SideTable *lock2)
121 {
122 spinlock_t::lockTwo(&lock1->slock, &lock2->slock);
123 }
124
125 template<>
126 void SideTable::lockTwo<DoHaveOld, DontHaveNew>
127 (SideTable *lock1, SideTable *)
128 {
129 lock1->lock();
130 }
131
132 template<>
133 void SideTable::lockTwo<DontHaveOld, DoHaveNew>
134 (SideTable *, SideTable *lock2)
135 {
136 lock2->lock();
137 }
138
139 template<>
140 void SideTable::unlockTwo<DoHaveOld, DoHaveNew>
141 (SideTable *lock1, SideTable *lock2)
142 {
143 spinlock_t::unlockTwo(&lock1->slock, &lock2->slock);
144 }
145
146 template<>
147 void SideTable::unlockTwo<DoHaveOld, DontHaveNew>
148 (SideTable *lock1, SideTable *)
149 {
150 lock1->unlock();
151 }
152
153 template<>
154 void SideTable::unlockTwo<DontHaveOld, DoHaveNew>
155 (SideTable *, SideTable *lock2)
156 {
157 lock2->unlock();
158 }
159
160
161 // We cannot use a C++ static initializer to initialize SideTables because
162 // libc calls us before our C++ initializers run. We also don't want a global
163 // pointer to this struct because of the extra indirection.
164 // Do it the hard way.
165 alignas(StripedMap<SideTable>) static uint8_t
166 SideTableBuf[sizeof(StripedMap<SideTable>)];
167
168 static void SideTableInit() {
169 new (SideTableBuf) StripedMap<SideTable>();
170 }
171
172 static StripedMap<SideTable>& SideTables() {
173 return *reinterpret_cast<StripedMap<SideTable>*>(SideTableBuf);
174 }
175
176 // anonymous namespace
177 };
178
179 void SideTableLockAll() {
180 SideTables().lockAll();
181 }
182
183 void SideTableUnlockAll() {
184 SideTables().unlockAll();
185 }
186
187 void SideTableForceResetAll() {
188 SideTables().forceResetAll();
189 }
190
191 void SideTableDefineLockOrder() {
192 SideTables().defineLockOrder();
193 }
194
195 void SideTableLocksPrecedeLock(const void *newlock) {
196 SideTables().precedeLock(newlock);
197 }
198
199 void SideTableLocksSucceedLock(const void *oldlock) {
200 SideTables().succeedLock(oldlock);
201 }
202
203 void SideTableLocksPrecedeLocks(StripedMap<spinlock_t>& newlocks) {
204 int i = 0;
205 const void *newlock;
206 while ((newlock = newlocks.getLock(i++))) {
207 SideTables().precedeLock(newlock);
208 }
209 }
210
211 void SideTableLocksSucceedLocks(StripedMap<spinlock_t>& oldlocks) {
212 int i = 0;
213 const void *oldlock;
214 while ((oldlock = oldlocks.getLock(i++))) {
215 SideTables().succeedLock(oldlock);
216 }
217 }
218
219 //
220 // The -fobjc-arc flag causes the compiler to issue calls to objc_{retain/release/autorelease/retain_block}
221 //
222
223 id objc_retainBlock(id x) {
224 return (id)_Block_copy(x);
225 }
226
227 //
228 // The following SHOULD be called by the compiler directly, but the request hasn't been made yet :-)
229 //
230
231 BOOL objc_should_deallocate(id object) {
232 return YES;
233 }
234
235 id
236 objc_retain_autorelease(id obj)
237 {
238 return objc_autorelease(objc_retain(obj));
239 }
240
241
242 void
243 objc_storeStrong(id *location, id obj)
244 {
245 id prev = *location;
246 if (obj == prev) {
247 return;
248 }
249 objc_retain(obj);
250 *location = obj;
251 objc_release(prev);
252 }
253
254
255 // Update a weak variable.
256 // If HaveOld is true, the variable has an existing value
257 // that needs to be cleaned up. This value might be nil.
258 // If HaveNew is true, there is a new value that needs to be
259 // assigned into the variable. This value might be nil.
260 // If CrashIfDeallocating is true, the process is halted if newObj is
261 // deallocating or newObj's class does not support weak references.
262 // If CrashIfDeallocating is false, nil is stored instead.
263 enum CrashIfDeallocating {
264 DontCrashIfDeallocating = false, DoCrashIfDeallocating = true
265 };
266 template <HaveOld haveOld, HaveNew haveNew,
267 CrashIfDeallocating crashIfDeallocating>
268 static id
269 storeWeak(id *location, objc_object *newObj)
270 {
271 assert(haveOld || haveNew);
272 if (!haveNew) assert(newObj == nil);
273
274 Class previouslyInitializedClass = nil;
275 id oldObj;
276 SideTable *oldTable;
277 SideTable *newTable;
278
279 // Acquire locks for old and new values.
280 // Order by lock address to prevent lock ordering problems.
281 // Retry if the old value changes underneath us.
282 retry:
283 if (haveOld) {
284 oldObj = *location;
285 oldTable = &SideTables()[oldObj];
286 } else {
287 oldTable = nil;
288 }
289 if (haveNew) {
290 newTable = &SideTables()[newObj];
291 } else {
292 newTable = nil;
293 }
294
295 SideTable::lockTwo<haveOld, haveNew>(oldTable, newTable);
296
297 if (haveOld && *location != oldObj) {
298 SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
299 goto retry;
300 }
301
302 // Prevent a deadlock between the weak reference machinery
303 // and the +initialize machinery by ensuring that no
304 // weakly-referenced object has an un-+initialized isa.
305 if (haveNew && newObj) {
306 Class cls = newObj->getIsa();
307 if (cls != previouslyInitializedClass &&
308 !((objc_class *)cls)->isInitialized())
309 {
310 SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
311 class_initialize(cls, (id)newObj);
312
313 // If this class is finished with +initialize then we're good.
314 // If this class is still running +initialize on this thread
315 // (i.e. +initialize called storeWeak on an instance of itself)
316 // then we may proceed but it will appear initializing and
317 // not yet initialized to the check above.
318 // Instead set previouslyInitializedClass to recognize it on retry.
319 previouslyInitializedClass = cls;
320
321 goto retry;
322 }
323 }
324
325 // Clean up old value, if any.
326 if (haveOld) {
327 weak_unregister_no_lock(&oldTable->weak_table, oldObj, location);
328 }
329
330 // Assign new value, if any.
331 if (haveNew) {
332 newObj = (objc_object *)
333 weak_register_no_lock(&newTable->weak_table, (id)newObj, location,
334 crashIfDeallocating);
335 // weak_register_no_lock returns nil if weak store should be rejected
336
337 // Set is-weakly-referenced bit in refcount table.
338 if (newObj && !newObj->isTaggedPointer()) {
339 newObj->setWeaklyReferenced_nolock();
340 }
341
342 // Do not set *location anywhere else. That would introduce a race.
343 *location = (id)newObj;
344 }
345 else {
346 // No new value. The storage is not changed.
347 }
348
349 SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
350
351 return (id)newObj;
352 }
353
354
355 /**
356 * This function stores a new value into a __weak variable. It would
357 * be used anywhere a __weak variable is the target of an assignment.
358 *
359 * @param location The address of the weak pointer itself
360 * @param newObj The new object this weak ptr should now point to
361 *
362 * @return \e newObj
363 */
364 id
365 objc_storeWeak(id *location, id newObj)
366 {
367 return storeWeak<DoHaveOld, DoHaveNew, DoCrashIfDeallocating>
368 (location, (objc_object *)newObj);
369 }
370
371
372 /**
373 * This function stores a new value into a __weak variable.
374 * If the new object is deallocating or the new object's class
375 * does not support weak references, stores nil instead.
376 *
377 * @param location The address of the weak pointer itself
378 * @param newObj The new object this weak ptr should now point to
379 *
380 * @return The value stored (either the new object or nil)
381 */
382 id
383 objc_storeWeakOrNil(id *location, id newObj)
384 {
385 return storeWeak<DoHaveOld, DoHaveNew, DontCrashIfDeallocating>
386 (location, (objc_object *)newObj);
387 }
388
389
390 /**
391 * Initialize a fresh weak pointer to some object location.
392 * It would be used for code like:
393 *
394 * (The nil case)
395 * __weak id weakPtr;
396 * (The non-nil case)
397 * NSObject *o = ...;
398 * __weak id weakPtr = o;
399 *
400 * This function IS NOT thread-safe with respect to concurrent
401 * modifications to the weak variable. (Concurrent weak clear is safe.)
402 *
403 * @param location Address of __weak ptr.
404 * @param newObj Object ptr.
405 */
406 id
407 objc_initWeak(id *location, id newObj)
408 {
409 if (!newObj) {
410 *location = nil;
411 return nil;
412 }
413
414 return storeWeak<DontHaveOld, DoHaveNew, DoCrashIfDeallocating>
415 (location, (objc_object*)newObj);
416 }
417
418 id
419 objc_initWeakOrNil(id *location, id newObj)
420 {
421 if (!newObj) {
422 *location = nil;
423 return nil;
424 }
425
426 return storeWeak<DontHaveOld, DoHaveNew, DontCrashIfDeallocating>
427 (location, (objc_object*)newObj);
428 }
429
430
431 /**
432 * Destroys the relationship between a weak pointer
433 * and the object it is referencing in the internal weak
434 * table. If the weak pointer is not referencing anything,
435 * there is no need to edit the weak table.
436 *
437 * This function IS NOT thread-safe with respect to concurrent
438 * modifications to the weak variable. (Concurrent weak clear is safe.)
439 *
440 * @param location The weak pointer address.
441 */
442 void
443 objc_destroyWeak(id *location)
444 {
445 (void)storeWeak<DoHaveOld, DontHaveNew, DontCrashIfDeallocating>
446 (location, nil);
447 }
448
449
450 /*
451 Once upon a time we eagerly cleared *location if we saw the object
452 was deallocating. This confuses code like NSPointerFunctions which
453 tries to pre-flight the raw storage and assumes if the storage is
454 zero then the weak system is done interfering. That is false: the
455 weak system is still going to check and clear the storage later.
456 This can cause objc_weak_error complaints and crashes.
457 So we now don't touch the storage until deallocation completes.
458 */
459
460 id
461 objc_loadWeakRetained(id *location)
462 {
463 id obj;
464 id result;
465 Class cls;
466
467 SideTable *table;
468
469 retry:
470 // fixme std::atomic this load
471 obj = *location;
472 if (!obj) return nil;
473 if (obj->isTaggedPointer()) return obj;
474
475 table = &SideTables()[obj];
476
477 table->lock();
478 if (*location != obj) {
479 table->unlock();
480 goto retry;
481 }
482
483 result = obj;
484
485 cls = obj->ISA();
486 if (! cls->hasCustomRR()) {
487 // Fast case. We know +initialize is complete because
488 // default-RR can never be set before then.
489 assert(cls->isInitialized());
490 if (! obj->rootTryRetain()) {
491 result = nil;
492 }
493 }
494 else {
495 // Slow case. We must check for +initialize and call it outside
496 // the lock if necessary in order to avoid deadlocks.
497 if (cls->isInitialized() || _thisThreadIsInitializingClass(cls)) {
498 BOOL (*tryRetain)(id, SEL) = (BOOL(*)(id, SEL))
499 class_getMethodImplementation(cls, SEL_retainWeakReference);
500 if ((IMP)tryRetain == _objc_msgForward) {
501 result = nil;
502 }
503 else if (! (*tryRetain)(obj, SEL_retainWeakReference)) {
504 result = nil;
505 }
506 }
507 else {
508 table->unlock();
509 class_initialize(cls, obj);
510 goto retry;
511 }
512 }
513
514 table->unlock();
515 return result;
516 }
517
518 /**
519 * This loads the object referenced by a weak pointer and returns it, after
520 * retaining and autoreleasing the object to ensure that it stays alive
521 * long enough for the caller to use it. This function would be used
522 * anywhere a __weak variable is used in an expression.
523 *
524 * @param location The weak pointer address
525 *
526 * @return The object pointed to by \e location, or \c nil if \e location is \c nil.
527 */
528 id
529 objc_loadWeak(id *location)
530 {
531 if (!*location) return nil;
532 return objc_autorelease(objc_loadWeakRetained(location));
533 }
534
535
536 /**
537 * This function copies a weak pointer from one location to another,
538 * when the destination doesn't already contain a weak pointer. It
539 * would be used for code like:
540 *
541 * __weak id src = ...;
542 * __weak id dst = src;
543 *
544 * This function IS NOT thread-safe with respect to concurrent
545 * modifications to the destination variable. (Concurrent weak clear is safe.)
546 *
547 * @param dst The destination variable.
548 * @param src The source variable.
549 */
550 void
551 objc_copyWeak(id *dst, id *src)
552 {
553 id obj = objc_loadWeakRetained(src);
554 objc_initWeak(dst, obj);
555 objc_release(obj);
556 }
557
558 /**
559 * Move a weak pointer from one location to another.
560 * Before the move, the destination must be uninitialized.
561 * After the move, the source is nil.
562 *
563 * This function IS NOT thread-safe with respect to concurrent
564 * modifications to either weak variable. (Concurrent weak clear is safe.)
565 *
566 */
567 void
568 objc_moveWeak(id *dst, id *src)
569 {
570 objc_copyWeak(dst, src);
571 objc_destroyWeak(src);
572 *src = nil;
573 }
574
575
576 /***********************************************************************
577 Autorelease pool implementation
578
579 A thread's autorelease pool is a stack of pointers.
580 Each pointer is either an object to release, or POOL_BOUNDARY which is
581 an autorelease pool boundary.
582 A pool token is a pointer to the POOL_BOUNDARY for that pool. When
583 the pool is popped, every object hotter than the sentinel is released.
584 The stack is divided into a doubly-linked list of pages. Pages are added
585 and deleted as necessary.
586 Thread-local storage points to the hot page, where newly autoreleased
587 objects are stored.
588 **********************************************************************/
589
590 // Set this to 1 to mprotect() autorelease pool contents
591 #define PROTECT_AUTORELEASEPOOL 0
592
593 // Set this to 1 to validate the entire autorelease pool header all the time
594 // (i.e. use check() instead of fastcheck() everywhere)
595 #define CHECK_AUTORELEASEPOOL (DEBUG)
596
597 BREAKPOINT_FUNCTION(void objc_autoreleaseNoPool(id obj));
598 BREAKPOINT_FUNCTION(void objc_autoreleasePoolInvalid(const void *token));
599
600 namespace {
601
602 struct magic_t {
603 static const uint32_t M0 = 0xA1A1A1A1;
604 # define M1 "AUTORELEASE!"
605 static const size_t M1_len = 12;
606 uint32_t m[4];
607
608 magic_t() {
609 assert(M1_len == strlen(M1));
610 assert(M1_len == 3 * sizeof(m[1]));
611
612 m[0] = M0;
613 strncpy((char *)&m[1], M1, M1_len);
614 }
615
616 ~magic_t() {
617 // Clear magic before deallocation.
618 // This prevents some false positives in memory debugging tools.
619 // fixme semantically this should be memset_s(), but the
620 // compiler doesn't optimize that at all (rdar://44856676).
621 volatile uint64_t *p = (volatile uint64_t *)m;
622 p[0] = 0; p[1] = 0;
623 }
624
625 bool check() const {
626 return (m[0] == M0 && 0 == strncmp((char *)&m[1], M1, M1_len));
627 }
628
629 bool fastcheck() const {
630 #if CHECK_AUTORELEASEPOOL
631 return check();
632 #else
633 return (m[0] == M0);
634 #endif
635 }
636
637 # undef M1
638 };
639
640
641 class AutoreleasePoolPage
642 {
643 // EMPTY_POOL_PLACEHOLDER is stored in TLS when exactly one pool is
644 // pushed and it has never contained any objects. This saves memory
645 // when the top level (i.e. libdispatch) pushes and pops pools but
646 // never uses them.
647 # define EMPTY_POOL_PLACEHOLDER ((id*)1)
648
649 # define POOL_BOUNDARY nil
650 static pthread_key_t const key = AUTORELEASE_POOL_KEY;
651 static uint8_t const SCRIBBLE = 0xA3; // 0xA3A3A3A3 after releasing
652 static size_t const SIZE =
653 #if PROTECT_AUTORELEASEPOOL
654 PAGE_MAX_SIZE; // must be multiple of vm page size
655 #else
656 PAGE_MAX_SIZE; // size and alignment, power of 2
657 #endif
658 static size_t const COUNT = SIZE / sizeof(id);
659
660 magic_t const magic;
661 id *next;
662 pthread_t const thread;
663 AutoreleasePoolPage * const parent;
664 AutoreleasePoolPage *child;
665 uint32_t const depth;
666 uint32_t hiwat;
667
668 // SIZE-sizeof(*this) bytes of contents follow
669
670 static void * operator new(size_t size) {
671 return malloc_zone_memalign(malloc_default_zone(), SIZE, SIZE);
672 }
673 static void operator delete(void * p) {
674 return free(p);
675 }
676
677 inline void protect() {
678 #if PROTECT_AUTORELEASEPOOL
679 mprotect(this, SIZE, PROT_READ);
680 check();
681 #endif
682 }
683
684 inline void unprotect() {
685 #if PROTECT_AUTORELEASEPOOL
686 check();
687 mprotect(this, SIZE, PROT_READ | PROT_WRITE);
688 #endif
689 }
690
691 AutoreleasePoolPage(AutoreleasePoolPage *newParent)
692 : magic(), next(begin()), thread(pthread_self()),
693 parent(newParent), child(nil),
694 depth(parent ? 1+parent->depth : 0),
695 hiwat(parent ? parent->hiwat : 0)
696 {
697 if (parent) {
698 parent->check();
699 assert(!parent->child);
700 parent->unprotect();
701 parent->child = this;
702 parent->protect();
703 }
704 protect();
705 }
706
707 ~AutoreleasePoolPage()
708 {
709 check();
710 unprotect();
711 assert(empty());
712
713 // Not recursive: we don't want to blow out the stack
714 // if a thread accumulates a stupendous amount of garbage
715 assert(!child);
716 }
717
718
719 void busted(bool die = true)
720 {
721 magic_t right;
722 (die ? _objc_fatal : _objc_inform)
723 ("autorelease pool page %p corrupted\n"
724 " magic 0x%08x 0x%08x 0x%08x 0x%08x\n"
725 " should be 0x%08x 0x%08x 0x%08x 0x%08x\n"
726 " pthread %p\n"
727 " should be %p\n",
728 this,
729 magic.m[0], magic.m[1], magic.m[2], magic.m[3],
730 right.m[0], right.m[1], right.m[2], right.m[3],
731 this->thread, pthread_self());
732 }
733
734 void check(bool die = true)
735 {
736 if (!magic.check() || !pthread_equal(thread, pthread_self())) {
737 busted(die);
738 }
739 }
740
741 void fastcheck(bool die = true)
742 {
743 #if CHECK_AUTORELEASEPOOL
744 check(die);
745 #else
746 if (! magic.fastcheck()) {
747 busted(die);
748 }
749 #endif
750 }
751
752
753 id * begin() {
754 return (id *) ((uint8_t *)this+sizeof(*this));
755 }
756
757 id * end() {
758 return (id *) ((uint8_t *)this+SIZE);
759 }
760
761 bool empty() {
762 return next == begin();
763 }
764
765 bool full() {
766 return next == end();
767 }
768
769 bool lessThanHalfFull() {
770 return (next - begin() < (end() - begin()) / 2);
771 }
772
773 id *add(id obj)
774 {
775 assert(!full());
776 unprotect();
777 id *ret = next; // faster than `return next-1` because of aliasing
778 *next++ = obj;
779 protect();
780 return ret;
781 }
782
783 void releaseAll()
784 {
785 releaseUntil(begin());
786 }
787
788 void releaseUntil(id *stop)
789 {
790 // Not recursive: we don't want to blow out the stack
791 // if a thread accumulates a stupendous amount of garbage
792
793 while (this->next != stop) {
794 // Restart from hotPage() every time, in case -release
795 // autoreleased more objects
796 AutoreleasePoolPage *page = hotPage();
797
798 // fixme I think this `while` can be `if`, but I can't prove it
799 while (page->empty()) {
800 page = page->parent;
801 setHotPage(page);
802 }
803
804 page->unprotect();
805 id obj = *--page->next;
806 memset((void*)page->next, SCRIBBLE, sizeof(*page->next));
807 page->protect();
808
809 if (obj != POOL_BOUNDARY) {
810 objc_release(obj);
811 }
812 }
813
814 setHotPage(this);
815
816 #if DEBUG
817 // we expect any children to be completely empty
818 for (AutoreleasePoolPage *page = child; page; page = page->child) {
819 assert(page->empty());
820 }
821 #endif
822 }
823
824 void kill()
825 {
826 // Not recursive: we don't want to blow out the stack
827 // if a thread accumulates a stupendous amount of garbage
828 AutoreleasePoolPage *page = this;
829 while (page->child) page = page->child;
830
831 AutoreleasePoolPage *deathptr;
832 do {
833 deathptr = page;
834 page = page->parent;
835 if (page) {
836 page->unprotect();
837 page->child = nil;
838 page->protect();
839 }
840 delete deathptr;
841 } while (deathptr != this);
842 }
843
844 static void tls_dealloc(void *p)
845 {
846 if (p == (void*)EMPTY_POOL_PLACEHOLDER) {
847 // No objects or pool pages to clean up here.
848 return;
849 }
850
851 // reinstate TLS value while we work
852 setHotPage((AutoreleasePoolPage *)p);
853
854 if (AutoreleasePoolPage *page = coldPage()) {
855 if (!page->empty()) pop(page->begin()); // pop all of the pools
856 if (DebugMissingPools || DebugPoolAllocation) {
857 // pop() killed the pages already
858 } else {
859 page->kill(); // free all of the pages
860 }
861 }
862
863 // clear TLS value so TLS destruction doesn't loop
864 setHotPage(nil);
865 }
866
867 static AutoreleasePoolPage *pageForPointer(const void *p)
868 {
869 return pageForPointer((uintptr_t)p);
870 }
871
872 static AutoreleasePoolPage *pageForPointer(uintptr_t p)
873 {
874 AutoreleasePoolPage *result;
875 uintptr_t offset = p % SIZE;
876
877 assert(offset >= sizeof(AutoreleasePoolPage));
878
879 result = (AutoreleasePoolPage *)(p - offset);
880 result->fastcheck();
881
882 return result;
883 }
884
885
886 static inline bool haveEmptyPoolPlaceholder()
887 {
888 id *tls = (id *)tls_get_direct(key);
889 return (tls == EMPTY_POOL_PLACEHOLDER);
890 }
891
892 static inline id* setEmptyPoolPlaceholder()
893 {
894 assert(tls_get_direct(key) == nil);
895 tls_set_direct(key, (void *)EMPTY_POOL_PLACEHOLDER);
896 return EMPTY_POOL_PLACEHOLDER;
897 }
898
899 static inline AutoreleasePoolPage *hotPage()
900 {
901 AutoreleasePoolPage *result = (AutoreleasePoolPage *)
902 tls_get_direct(key);
903 if ((id *)result == EMPTY_POOL_PLACEHOLDER) return nil;
904 if (result) result->fastcheck();
905 return result;
906 }
907
908 static inline void setHotPage(AutoreleasePoolPage *page)
909 {
910 if (page) page->fastcheck();
911 tls_set_direct(key, (void *)page);
912 }
913
914 static inline AutoreleasePoolPage *coldPage()
915 {
916 AutoreleasePoolPage *result = hotPage();
917 if (result) {
918 while (result->parent) {
919 result = result->parent;
920 result->fastcheck();
921 }
922 }
923 return result;
924 }
925
926
927 static inline id *autoreleaseFast(id obj)
928 {
929 AutoreleasePoolPage *page = hotPage();
930 if (page && !page->full()) {
931 return page->add(obj);
932 } else if (page) {
933 return autoreleaseFullPage(obj, page);
934 } else {
935 return autoreleaseNoPage(obj);
936 }
937 }
938
939 static __attribute__((noinline))
940 id *autoreleaseFullPage(id obj, AutoreleasePoolPage *page)
941 {
942 // The hot page is full.
943 // Step to the next non-full page, adding a new page if necessary.
944 // Then add the object to that page.
945 assert(page == hotPage());
946 assert(page->full() || DebugPoolAllocation);
947
948 do {
949 if (page->child) page = page->child;
950 else page = new AutoreleasePoolPage(page);
951 } while (page->full());
952
953 setHotPage(page);
954 return page->add(obj);
955 }
956
957 static __attribute__((noinline))
958 id *autoreleaseNoPage(id obj)
959 {
960 // "No page" could mean no pool has been pushed
961 // or an empty placeholder pool has been pushed and has no contents yet
962 assert(!hotPage());
963
964 bool pushExtraBoundary = false;
965 if (haveEmptyPoolPlaceholder()) {
966 // We are pushing a second pool over the empty placeholder pool
967 // or pushing the first object into the empty placeholder pool.
968 // Before doing that, push a pool boundary on behalf of the pool
969 // that is currently represented by the empty placeholder.
970 pushExtraBoundary = true;
971 }
972 else if (obj != POOL_BOUNDARY && DebugMissingPools) {
973 // We are pushing an object with no pool in place,
974 // and no-pool debugging was requested by environment.
975 _objc_inform("MISSING POOLS: (%p) Object %p of class %s "
976 "autoreleased with no pool in place - "
977 "just leaking - break on "
978 "objc_autoreleaseNoPool() to debug",
979 pthread_self(), (void*)obj, object_getClassName(obj));
980 objc_autoreleaseNoPool(obj);
981 return nil;
982 }
983 else if (obj == POOL_BOUNDARY && !DebugPoolAllocation) {
984 // We are pushing a pool with no pool in place,
985 // and alloc-per-pool debugging was not requested.
986 // Install and return the empty pool placeholder.
987 return setEmptyPoolPlaceholder();
988 }
989
990 // We are pushing an object or a non-placeholder'd pool.
991
992 // Install the first page.
993 AutoreleasePoolPage *page = new AutoreleasePoolPage(nil);
994 setHotPage(page);
995
996 // Push a boundary on behalf of the previously-placeholder'd pool.
997 if (pushExtraBoundary) {
998 page->add(POOL_BOUNDARY);
999 }
1000
1001 // Push the requested object or pool.
1002 return page->add(obj);
1003 }
1004
1005
1006 static __attribute__((noinline))
1007 id *autoreleaseNewPage(id obj)
1008 {
1009 AutoreleasePoolPage *page = hotPage();
1010 if (page) return autoreleaseFullPage(obj, page);
1011 else return autoreleaseNoPage(obj);
1012 }
1013
1014 public:
1015 static inline id autorelease(id obj)
1016 {
1017 assert(obj);
1018 assert(!obj->isTaggedPointer());
1019 id *dest __unused = autoreleaseFast(obj);
1020 assert(!dest || dest == EMPTY_POOL_PLACEHOLDER || *dest == obj);
1021 return obj;
1022 }
1023
1024
1025 static inline void *push()
1026 {
1027 id *dest;
1028 if (DebugPoolAllocation) {
1029 // Each autorelease pool starts on a new pool page.
1030 dest = autoreleaseNewPage(POOL_BOUNDARY);
1031 } else {
1032 dest = autoreleaseFast(POOL_BOUNDARY);
1033 }
1034 assert(dest == EMPTY_POOL_PLACEHOLDER || *dest == POOL_BOUNDARY);
1035 return dest;
1036 }
1037
1038 static void badPop(void *token)
1039 {
1040 // Error. For bincompat purposes this is not
1041 // fatal in executables built with old SDKs.
1042
1043 if (DebugPoolAllocation || sdkIsAtLeast(10_12, 10_0, 10_0, 3_0, 2_0)) {
1044 // OBJC_DEBUG_POOL_ALLOCATION or new SDK. Bad pop is fatal.
1045 _objc_fatal
1046 ("Invalid or prematurely-freed autorelease pool %p.", token);
1047 }
1048
1049 // Old SDK. Bad pop is warned once.
1050 static bool complained = false;
1051 if (!complained) {
1052 complained = true;
1053 _objc_inform_now_and_on_crash
1054 ("Invalid or prematurely-freed autorelease pool %p. "
1055 "Set a breakpoint on objc_autoreleasePoolInvalid to debug. "
1056 "Proceeding anyway because the app is old "
1057 "(SDK version " SDK_FORMAT "). Memory errors are likely.",
1058 token, FORMAT_SDK(sdkVersion()));
1059 }
1060 objc_autoreleasePoolInvalid(token);
1061 }
1062
1063 static inline void pop(void *token)
1064 {
1065 AutoreleasePoolPage *page;
1066 id *stop;
1067
1068 if (token == (void*)EMPTY_POOL_PLACEHOLDER) {
1069 // Popping the top-level placeholder pool.
1070 if (hotPage()) {
1071 // Pool was used. Pop its contents normally.
1072 // Pool pages remain allocated for re-use as usual.
1073 pop(coldPage()->begin());
1074 } else {
1075 // Pool was never used. Clear the placeholder.
1076 setHotPage(nil);
1077 }
1078 return;
1079 }
1080
1081 page = pageForPointer(token);
1082 stop = (id *)token;
1083 if (*stop != POOL_BOUNDARY) {
1084 if (stop == page->begin() && !page->parent) {
1085 // Start of coldest page may correctly not be POOL_BOUNDARY:
1086 // 1. top-level pool is popped, leaving the cold page in place
1087 // 2. an object is autoreleased with no pool
1088 } else {
1089 // Error. For bincompat purposes this is not
1090 // fatal in executables built with old SDKs.
1091 return badPop(token);
1092 }
1093 }
1094
1095 if (PrintPoolHiwat) printHiwat();
1096
1097 page->releaseUntil(stop);
1098
1099 // memory: delete empty children
1100 if (DebugPoolAllocation && page->empty()) {
1101 // special case: delete everything during page-per-pool debugging
1102 AutoreleasePoolPage *parent = page->parent;
1103 page->kill();
1104 setHotPage(parent);
1105 } else if (DebugMissingPools && page->empty() && !page->parent) {
1106 // special case: delete everything for pop(top)
1107 // when debugging missing autorelease pools
1108 page->kill();
1109 setHotPage(nil);
1110 }
1111 else if (page->child) {
1112 // hysteresis: keep one empty child if page is more than half full
1113 if (page->lessThanHalfFull()) {
1114 page->child->kill();
1115 }
1116 else if (page->child->child) {
1117 page->child->child->kill();
1118 }
1119 }
1120 }
1121
1122 static void init()
1123 {
1124 int r __unused = pthread_key_init_np(AutoreleasePoolPage::key,
1125 AutoreleasePoolPage::tls_dealloc);
1126 assert(r == 0);
1127 }
1128
1129 void print()
1130 {
1131 _objc_inform("[%p] ................ PAGE %s %s %s", this,
1132 full() ? "(full)" : "",
1133 this == hotPage() ? "(hot)" : "",
1134 this == coldPage() ? "(cold)" : "");
1135 check(false);
1136 for (id *p = begin(); p < next; p++) {
1137 if (*p == POOL_BOUNDARY) {
1138 _objc_inform("[%p] ################ POOL %p", p, p);
1139 } else {
1140 _objc_inform("[%p] %#16lx %s",
1141 p, (unsigned long)*p, object_getClassName(*p));
1142 }
1143 }
1144 }
1145
1146 static void printAll()
1147 {
1148 _objc_inform("##############");
1149 _objc_inform("AUTORELEASE POOLS for thread %p", pthread_self());
1150
1151 AutoreleasePoolPage *page;
1152 ptrdiff_t objects = 0;
1153 for (page = coldPage(); page; page = page->child) {
1154 objects += page->next - page->begin();
1155 }
1156 _objc_inform("%llu releases pending.", (unsigned long long)objects);
1157
1158 if (haveEmptyPoolPlaceholder()) {
1159 _objc_inform("[%p] ................ PAGE (placeholder)",
1160 EMPTY_POOL_PLACEHOLDER);
1161 _objc_inform("[%p] ################ POOL (placeholder)",
1162 EMPTY_POOL_PLACEHOLDER);
1163 }
1164 else {
1165 for (page = coldPage(); page; page = page->child) {
1166 page->print();
1167 }
1168 }
1169
1170 _objc_inform("##############");
1171 }
1172
1173 static void printHiwat()
1174 {
1175 // Check and propagate high water mark
1176 // Ignore high water marks under 256 to suppress noise.
1177 AutoreleasePoolPage *p = hotPage();
1178 uint32_t mark = p->depth*COUNT + (uint32_t)(p->next - p->begin());
1179 if (mark > p->hiwat && mark > 256) {
1180 for( ; p; p = p->parent) {
1181 p->unprotect();
1182 p->hiwat = mark;
1183 p->protect();
1184 }
1185
1186 _objc_inform("POOL HIGHWATER: new high water mark of %u "
1187 "pending releases for thread %p:",
1188 mark, pthread_self());
1189
1190 void *stack[128];
1191 int count = backtrace(stack, sizeof(stack)/sizeof(stack[0]));
1192 char **sym = backtrace_symbols(stack, count);
1193 for (int i = 0; i < count; i++) {
1194 _objc_inform("POOL HIGHWATER: %s", sym[i]);
1195 }
1196 free(sym);
1197 }
1198 }
1199
1200 #undef POOL_BOUNDARY
1201 };
1202
1203 // anonymous namespace
1204 };
1205
1206
1207 /***********************************************************************
1208 * Slow paths for inline control
1209 **********************************************************************/
1210
1211 #if SUPPORT_NONPOINTER_ISA
1212
1213 NEVER_INLINE id
1214 objc_object::rootRetain_overflow(bool tryRetain)
1215 {
1216 return rootRetain(tryRetain, true);
1217 }
1218
1219
1220 NEVER_INLINE bool
1221 objc_object::rootRelease_underflow(bool performDealloc)
1222 {
1223 return rootRelease(performDealloc, true);
1224 }
1225
1226
1227 // Slow path of clearDeallocating()
1228 // for objects with nonpointer isa
1229 // that were ever weakly referenced
1230 // or whose retain count ever overflowed to the side table.
1231 NEVER_INLINE void
1232 objc_object::clearDeallocating_slow()
1233 {
1234 assert(isa.nonpointer && (isa.weakly_referenced || isa.has_sidetable_rc));
1235
1236 SideTable& table = SideTables()[this];
1237 table.lock();
1238 if (isa.weakly_referenced) {
1239 weak_clear_no_lock(&table.weak_table, (id)this);
1240 }
1241 if (isa.has_sidetable_rc) {
1242 table.refcnts.erase(this);
1243 }
1244 table.unlock();
1245 }
1246
1247 #endif
1248
1249 __attribute__((noinline,used))
1250 id
1251 objc_object::rootAutorelease2()
1252 {
1253 assert(!isTaggedPointer());
1254 return AutoreleasePoolPage::autorelease((id)this);
1255 }
1256
1257
1258 BREAKPOINT_FUNCTION(
1259 void objc_overrelease_during_dealloc_error(void)
1260 );
1261
1262
1263 NEVER_INLINE
1264 bool
1265 objc_object::overrelease_error()
1266 {
1267 _objc_inform_now_and_on_crash("%s object %p overreleased while already deallocating; break on objc_overrelease_during_dealloc_error to debug", object_getClassName((id)this), this);
1268 objc_overrelease_during_dealloc_error();
1269 return false; // allow rootRelease() to tail-call this
1270 }
1271
1272
1273 /***********************************************************************
1274 * Retain count operations for side table.
1275 **********************************************************************/
1276
1277
1278 #if DEBUG
1279 // Used to assert that an object is not present in the side table.
1280 bool
1281 objc_object::sidetable_present()
1282 {
1283 bool result = false;
1284 SideTable& table = SideTables()[this];
1285
1286 table.lock();
1287
1288 RefcountMap::iterator it = table.refcnts.find(this);
1289 if (it != table.refcnts.end()) result = true;
1290
1291 if (weak_is_registered_no_lock(&table.weak_table, (id)this)) result = true;
1292
1293 table.unlock();
1294
1295 return result;
1296 }
1297 #endif
1298
1299 #if SUPPORT_NONPOINTER_ISA
1300
1301 void
1302 objc_object::sidetable_lock()
1303 {
1304 SideTable& table = SideTables()[this];
1305 table.lock();
1306 }
1307
1308 void
1309 objc_object::sidetable_unlock()
1310 {
1311 SideTable& table = SideTables()[this];
1312 table.unlock();
1313 }
1314
1315
1316 // Move the entire retain count to the side table,
1317 // as well as isDeallocating and weaklyReferenced.
1318 void
1319 objc_object::sidetable_moveExtraRC_nolock(size_t extra_rc,
1320 bool isDeallocating,
1321 bool weaklyReferenced)
1322 {
1323 assert(!isa.nonpointer); // should already be changed to raw pointer
1324 SideTable& table = SideTables()[this];
1325
1326 size_t& refcntStorage = table.refcnts[this];
1327 size_t oldRefcnt = refcntStorage;
1328 // not deallocating - that was in the isa
1329 assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1330 assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1331
1332 uintptr_t carry;
1333 size_t refcnt = addc(oldRefcnt, extra_rc << SIDE_TABLE_RC_SHIFT, 0, &carry);
1334 if (carry) refcnt = SIDE_TABLE_RC_PINNED;
1335 if (isDeallocating) refcnt |= SIDE_TABLE_DEALLOCATING;
1336 if (weaklyReferenced) refcnt |= SIDE_TABLE_WEAKLY_REFERENCED;
1337
1338 refcntStorage = refcnt;
1339 }
1340
1341
1342 // Move some retain counts to the side table from the isa field.
1343 // Returns true if the object is now pinned.
1344 bool
1345 objc_object::sidetable_addExtraRC_nolock(size_t delta_rc)
1346 {
1347 assert(isa.nonpointer);
1348 SideTable& table = SideTables()[this];
1349
1350 size_t& refcntStorage = table.refcnts[this];
1351 size_t oldRefcnt = refcntStorage;
1352 // isa-side bits should not be set here
1353 assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1354 assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1355
1356 if (oldRefcnt & SIDE_TABLE_RC_PINNED) return true;
1357
1358 uintptr_t carry;
1359 size_t newRefcnt =
1360 addc(oldRefcnt, delta_rc << SIDE_TABLE_RC_SHIFT, 0, &carry);
1361 if (carry) {
1362 refcntStorage =
1363 SIDE_TABLE_RC_PINNED | (oldRefcnt & SIDE_TABLE_FLAG_MASK);
1364 return true;
1365 }
1366 else {
1367 refcntStorage = newRefcnt;
1368 return false;
1369 }
1370 }
1371
1372
1373 // Move some retain counts from the side table to the isa field.
1374 // Returns the actual count subtracted, which may be less than the request.
1375 size_t
1376 objc_object::sidetable_subExtraRC_nolock(size_t delta_rc)
1377 {
1378 assert(isa.nonpointer);
1379 SideTable& table = SideTables()[this];
1380
1381 RefcountMap::iterator it = table.refcnts.find(this);
1382 if (it == table.refcnts.end() || it->second == 0) {
1383 // Side table retain count is zero. Can't borrow.
1384 return 0;
1385 }
1386 size_t oldRefcnt = it->second;
1387
1388 // isa-side bits should not be set here
1389 assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1390 assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1391
1392 size_t newRefcnt = oldRefcnt - (delta_rc << SIDE_TABLE_RC_SHIFT);
1393 assert(oldRefcnt > newRefcnt); // shouldn't underflow
1394 it->second = newRefcnt;
1395 return delta_rc;
1396 }
1397
1398
1399 size_t
1400 objc_object::sidetable_getExtraRC_nolock()
1401 {
1402 assert(isa.nonpointer);
1403 SideTable& table = SideTables()[this];
1404 RefcountMap::iterator it = table.refcnts.find(this);
1405 if (it == table.refcnts.end()) return 0;
1406 else return it->second >> SIDE_TABLE_RC_SHIFT;
1407 }
1408
1409
1410 // SUPPORT_NONPOINTER_ISA
1411 #endif
1412
1413
1414 id
1415 objc_object::sidetable_retain()
1416 {
1417 #if SUPPORT_NONPOINTER_ISA
1418 assert(!isa.nonpointer);
1419 #endif
1420 SideTable& table = SideTables()[this];
1421
1422 table.lock();
1423 size_t& refcntStorage = table.refcnts[this];
1424 if (! (refcntStorage & SIDE_TABLE_RC_PINNED)) {
1425 refcntStorage += SIDE_TABLE_RC_ONE;
1426 }
1427 table.unlock();
1428
1429 return (id)this;
1430 }
1431
1432
1433 bool
1434 objc_object::sidetable_tryRetain()
1435 {
1436 #if SUPPORT_NONPOINTER_ISA
1437 assert(!isa.nonpointer);
1438 #endif
1439 SideTable& table = SideTables()[this];
1440
1441 // NO SPINLOCK HERE
1442 // _objc_rootTryRetain() is called exclusively by _objc_loadWeak(),
1443 // which already acquired the lock on our behalf.
1444
1445 // fixme can't do this efficiently with os_lock_handoff_s
1446 // if (table.slock == 0) {
1447 // _objc_fatal("Do not call -_tryRetain.");
1448 // }
1449
1450 bool result = true;
1451 RefcountMap::iterator it = table.refcnts.find(this);
1452 if (it == table.refcnts.end()) {
1453 table.refcnts[this] = SIDE_TABLE_RC_ONE;
1454 } else if (it->second & SIDE_TABLE_DEALLOCATING) {
1455 result = false;
1456 } else if (! (it->second & SIDE_TABLE_RC_PINNED)) {
1457 it->second += SIDE_TABLE_RC_ONE;
1458 }
1459
1460 return result;
1461 }
1462
1463
1464 uintptr_t
1465 objc_object::sidetable_retainCount()
1466 {
1467 SideTable& table = SideTables()[this];
1468
1469 size_t refcnt_result = 1;
1470
1471 table.lock();
1472 RefcountMap::iterator it = table.refcnts.find(this);
1473 if (it != table.refcnts.end()) {
1474 // this is valid for SIDE_TABLE_RC_PINNED too
1475 refcnt_result += it->second >> SIDE_TABLE_RC_SHIFT;
1476 }
1477 table.unlock();
1478 return refcnt_result;
1479 }
1480
1481
1482 bool
1483 objc_object::sidetable_isDeallocating()
1484 {
1485 SideTable& table = SideTables()[this];
1486
1487 // NO SPINLOCK HERE
1488 // _objc_rootIsDeallocating() is called exclusively by _objc_storeWeak(),
1489 // which already acquired the lock on our behalf.
1490
1491
1492 // fixme can't do this efficiently with os_lock_handoff_s
1493 // if (table.slock == 0) {
1494 // _objc_fatal("Do not call -_isDeallocating.");
1495 // }
1496
1497 RefcountMap::iterator it = table.refcnts.find(this);
1498 return (it != table.refcnts.end()) && (it->second & SIDE_TABLE_DEALLOCATING);
1499 }
1500
1501
1502 bool
1503 objc_object::sidetable_isWeaklyReferenced()
1504 {
1505 bool result = false;
1506
1507 SideTable& table = SideTables()[this];
1508 table.lock();
1509
1510 RefcountMap::iterator it = table.refcnts.find(this);
1511 if (it != table.refcnts.end()) {
1512 result = it->second & SIDE_TABLE_WEAKLY_REFERENCED;
1513 }
1514
1515 table.unlock();
1516
1517 return result;
1518 }
1519
1520
1521 void
1522 objc_object::sidetable_setWeaklyReferenced_nolock()
1523 {
1524 #if SUPPORT_NONPOINTER_ISA
1525 assert(!isa.nonpointer);
1526 #endif
1527
1528 SideTable& table = SideTables()[this];
1529
1530 table.refcnts[this] |= SIDE_TABLE_WEAKLY_REFERENCED;
1531 }
1532
1533
1534 // rdar://20206767
1535 // return uintptr_t instead of bool so that the various raw-isa
1536 // -release paths all return zero in eax
1537 uintptr_t
1538 objc_object::sidetable_release(bool performDealloc)
1539 {
1540 #if SUPPORT_NONPOINTER_ISA
1541 assert(!isa.nonpointer);
1542 #endif
1543 SideTable& table = SideTables()[this];
1544
1545 bool do_dealloc = false;
1546
1547 table.lock();
1548 RefcountMap::iterator it = table.refcnts.find(this);
1549 if (it == table.refcnts.end()) {
1550 do_dealloc = true;
1551 table.refcnts[this] = SIDE_TABLE_DEALLOCATING;
1552 } else if (it->second < SIDE_TABLE_DEALLOCATING) {
1553 // SIDE_TABLE_WEAKLY_REFERENCED may be set. Don't change it.
1554 do_dealloc = true;
1555 it->second |= SIDE_TABLE_DEALLOCATING;
1556 } else if (! (it->second & SIDE_TABLE_RC_PINNED)) {
1557 it->second -= SIDE_TABLE_RC_ONE;
1558 }
1559 table.unlock();
1560 if (do_dealloc && performDealloc) {
1561 ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_dealloc);
1562 }
1563 return do_dealloc;
1564 }
1565
1566
1567 void
1568 objc_object::sidetable_clearDeallocating()
1569 {
1570 SideTable& table = SideTables()[this];
1571
1572 // clear any weak table items
1573 // clear extra retain count and deallocating bit
1574 // (fixme warn or abort if extra retain count == 0 ?)
1575 table.lock();
1576 RefcountMap::iterator it = table.refcnts.find(this);
1577 if (it != table.refcnts.end()) {
1578 if (it->second & SIDE_TABLE_WEAKLY_REFERENCED) {
1579 weak_clear_no_lock(&table.weak_table, (id)this);
1580 }
1581 table.refcnts.erase(it);
1582 }
1583 table.unlock();
1584 }
1585
1586
1587 /***********************************************************************
1588 * Optimized retain/release/autorelease entrypoints
1589 **********************************************************************/
1590
1591
1592 #if __OBJC2__
1593
1594 __attribute__((aligned(16)))
1595 id
1596 objc_retain(id obj)
1597 {
1598 if (!obj) return obj;
1599 if (obj->isTaggedPointer()) return obj;
1600 return obj->retain();
1601 }
1602
1603
1604 __attribute__((aligned(16)))
1605 void
1606 objc_release(id obj)
1607 {
1608 if (!obj) return;
1609 if (obj->isTaggedPointer()) return;
1610 return obj->release();
1611 }
1612
1613
1614 __attribute__((aligned(16)))
1615 id
1616 objc_autorelease(id obj)
1617 {
1618 if (!obj) return obj;
1619 if (obj->isTaggedPointer()) return obj;
1620 return obj->autorelease();
1621 }
1622
1623
1624 // OBJC2
1625 #else
1626 // not OBJC2
1627
1628
1629 id objc_retain(id obj) { return [obj retain]; }
1630 void objc_release(id obj) { [obj release]; }
1631 id objc_autorelease(id obj) { return [obj autorelease]; }
1632
1633
1634 #endif
1635
1636
1637 /***********************************************************************
1638 * Basic operations for root class implementations a.k.a. _objc_root*()
1639 **********************************************************************/
1640
1641 bool
1642 _objc_rootTryRetain(id obj)
1643 {
1644 assert(obj);
1645
1646 return obj->rootTryRetain();
1647 }
1648
1649 bool
1650 _objc_rootIsDeallocating(id obj)
1651 {
1652 assert(obj);
1653
1654 return obj->rootIsDeallocating();
1655 }
1656
1657
1658 void
1659 objc_clear_deallocating(id obj)
1660 {
1661 assert(obj);
1662
1663 if (obj->isTaggedPointer()) return;
1664 obj->clearDeallocating();
1665 }
1666
1667
1668 bool
1669 _objc_rootReleaseWasZero(id obj)
1670 {
1671 assert(obj);
1672
1673 return obj->rootReleaseShouldDealloc();
1674 }
1675
1676
1677 id
1678 _objc_rootAutorelease(id obj)
1679 {
1680 assert(obj);
1681 return obj->rootAutorelease();
1682 }
1683
1684 uintptr_t
1685 _objc_rootRetainCount(id obj)
1686 {
1687 assert(obj);
1688
1689 return obj->rootRetainCount();
1690 }
1691
1692
1693 id
1694 _objc_rootRetain(id obj)
1695 {
1696 assert(obj);
1697
1698 return obj->rootRetain();
1699 }
1700
1701 void
1702 _objc_rootRelease(id obj)
1703 {
1704 assert(obj);
1705
1706 obj->rootRelease();
1707 }
1708
1709
1710 id
1711 _objc_rootAllocWithZone(Class cls, malloc_zone_t *zone)
1712 {
1713 id obj;
1714
1715 #if __OBJC2__
1716 // allocWithZone under __OBJC2__ ignores the zone parameter
1717 (void)zone;
1718 obj = class_createInstance(cls, 0);
1719 #else
1720 if (!zone) {
1721 obj = class_createInstance(cls, 0);
1722 }
1723 else {
1724 obj = class_createInstanceFromZone(cls, 0, zone);
1725 }
1726 #endif
1727
1728 if (slowpath(!obj)) obj = callBadAllocHandler(cls);
1729 return obj;
1730 }
1731
1732
1733 // Call [cls alloc] or [cls allocWithZone:nil], with appropriate
1734 // shortcutting optimizations.
1735 static ALWAYS_INLINE id
1736 callAlloc(Class cls, bool checkNil, bool allocWithZone=false)
1737 {
1738 if (slowpath(checkNil && !cls)) return nil;
1739
1740 #if __OBJC2__
1741 if (fastpath(!cls->ISA()->hasCustomAWZ())) {
1742 // No alloc/allocWithZone implementation. Go straight to the allocator.
1743 // fixme store hasCustomAWZ in the non-meta class and
1744 // add it to canAllocFast's summary
1745 if (fastpath(cls->canAllocFast())) {
1746 // No ctors, raw isa, etc. Go straight to the metal.
1747 bool dtor = cls->hasCxxDtor();
1748 id obj = (id)calloc(1, cls->bits.fastInstanceSize());
1749 if (slowpath(!obj)) return callBadAllocHandler(cls);
1750 obj->initInstanceIsa(cls, dtor);
1751 return obj;
1752 }
1753 else {
1754 // Has ctor or raw isa or something. Use the slower path.
1755 id obj = class_createInstance(cls, 0);
1756 if (slowpath(!obj)) return callBadAllocHandler(cls);
1757 return obj;
1758 }
1759 }
1760 #endif
1761
1762 // No shortcuts available.
1763 if (allocWithZone) return [cls allocWithZone:nil];
1764 return [cls alloc];
1765 }
1766
1767
1768 // Base class implementation of +alloc. cls is not nil.
1769 // Calls [cls allocWithZone:nil].
1770 id
1771 _objc_rootAlloc(Class cls)
1772 {
1773 return callAlloc(cls, false/*checkNil*/, true/*allocWithZone*/);
1774 }
1775
1776 // Calls [cls alloc].
1777 id
1778 objc_alloc(Class cls)
1779 {
1780 return callAlloc(cls, true/*checkNil*/, false/*allocWithZone*/);
1781 }
1782
1783 // Calls [cls allocWithZone:nil].
1784 id
1785 objc_allocWithZone(Class cls)
1786 {
1787 return callAlloc(cls, true/*checkNil*/, true/*allocWithZone*/);
1788 }
1789
1790 // Calls [[cls alloc] init].
1791 id
1792 objc_alloc_init(Class cls)
1793 {
1794 return [callAlloc(cls, true/*checkNil*/, false/*allocWithZone*/) init];
1795 }
1796
1797
1798 void
1799 _objc_rootDealloc(id obj)
1800 {
1801 assert(obj);
1802
1803 obj->rootDealloc();
1804 }
1805
1806 void
1807 _objc_rootFinalize(id obj __unused)
1808 {
1809 assert(obj);
1810 _objc_fatal("_objc_rootFinalize called with garbage collection off");
1811 }
1812
1813
1814 id
1815 _objc_rootInit(id obj)
1816 {
1817 // In practice, it will be hard to rely on this function.
1818 // Many classes do not properly chain -init calls.
1819 return obj;
1820 }
1821
1822
1823 malloc_zone_t *
1824 _objc_rootZone(id obj)
1825 {
1826 (void)obj;
1827 #if __OBJC2__
1828 // allocWithZone under __OBJC2__ ignores the zone parameter
1829 return malloc_default_zone();
1830 #else
1831 malloc_zone_t *rval = malloc_zone_from_ptr(obj);
1832 return rval ? rval : malloc_default_zone();
1833 #endif
1834 }
1835
1836 uintptr_t
1837 _objc_rootHash(id obj)
1838 {
1839 return (uintptr_t)obj;
1840 }
1841
1842 void *
1843 objc_autoreleasePoolPush(void)
1844 {
1845 return AutoreleasePoolPage::push();
1846 }
1847
1848 void
1849 objc_autoreleasePoolPop(void *ctxt)
1850 {
1851 AutoreleasePoolPage::pop(ctxt);
1852 }
1853
1854
1855 void *
1856 _objc_autoreleasePoolPush(void)
1857 {
1858 return objc_autoreleasePoolPush();
1859 }
1860
1861 void
1862 _objc_autoreleasePoolPop(void *ctxt)
1863 {
1864 objc_autoreleasePoolPop(ctxt);
1865 }
1866
1867 void
1868 _objc_autoreleasePoolPrint(void)
1869 {
1870 AutoreleasePoolPage::printAll();
1871 }
1872
1873
1874 // Same as objc_release but suitable for tail-calling
1875 // if you need the value back and don't want to push a frame before this point.
1876 __attribute__((noinline))
1877 static id
1878 objc_releaseAndReturn(id obj)
1879 {
1880 objc_release(obj);
1881 return obj;
1882 }
1883
1884 // Same as objc_retainAutorelease but suitable for tail-calling
1885 // if you don't want to push a frame before this point.
1886 __attribute__((noinline))
1887 static id
1888 objc_retainAutoreleaseAndReturn(id obj)
1889 {
1890 return objc_retainAutorelease(obj);
1891 }
1892
1893
1894 // Prepare a value at +1 for return through a +0 autoreleasing convention.
1895 id
1896 objc_autoreleaseReturnValue(id obj)
1897 {
1898 if (prepareOptimizedReturn(ReturnAtPlus1)) return obj;
1899
1900 return objc_autorelease(obj);
1901 }
1902
1903 // Prepare a value at +0 for return through a +0 autoreleasing convention.
1904 id
1905 objc_retainAutoreleaseReturnValue(id obj)
1906 {
1907 if (prepareOptimizedReturn(ReturnAtPlus0)) return obj;
1908
1909 // not objc_autoreleaseReturnValue(objc_retain(obj))
1910 // because we don't need another optimization attempt
1911 return objc_retainAutoreleaseAndReturn(obj);
1912 }
1913
1914 // Accept a value returned through a +0 autoreleasing convention for use at +1.
1915 id
1916 objc_retainAutoreleasedReturnValue(id obj)
1917 {
1918 if (acceptOptimizedReturn() == ReturnAtPlus1) return obj;
1919
1920 return objc_retain(obj);
1921 }
1922
1923 // Accept a value returned through a +0 autoreleasing convention for use at +0.
1924 id
1925 objc_unsafeClaimAutoreleasedReturnValue(id obj)
1926 {
1927 if (acceptOptimizedReturn() == ReturnAtPlus0) return obj;
1928
1929 return objc_releaseAndReturn(obj);
1930 }
1931
1932 id
1933 objc_retainAutorelease(id obj)
1934 {
1935 return objc_autorelease(objc_retain(obj));
1936 }
1937
1938 void
1939 _objc_deallocOnMainThreadHelper(void *context)
1940 {
1941 id obj = (id)context;
1942 [obj dealloc];
1943 }
1944
1945 // convert objc_objectptr_t to id, callee must take ownership.
1946 id objc_retainedObject(objc_objectptr_t pointer) { return (id)pointer; }
1947
1948 // convert objc_objectptr_t to id, without ownership transfer.
1949 id objc_unretainedObject(objc_objectptr_t pointer) { return (id)pointer; }
1950
1951 // convert id to objc_objectptr_t, no ownership transfer.
1952 objc_objectptr_t objc_unretainedPointer(id object) { return object; }
1953
1954
1955 void arr_init(void)
1956 {
1957 AutoreleasePoolPage::init();
1958 SideTableInit();
1959 }
1960
1961
1962 #if SUPPORT_TAGGED_POINTERS
1963
1964 // Placeholder for old debuggers. When they inspect an
1965 // extended tagged pointer object they will see this isa.
1966
1967 @interface __NSUnrecognizedTaggedPointer : NSObject
1968 @end
1969
1970 @implementation __NSUnrecognizedTaggedPointer
1971 +(void) load { }
1972 -(id) retain { return self; }
1973 -(oneway void) release { }
1974 -(id) autorelease { return self; }
1975 @end
1976
1977 #endif
1978
1979
1980 @implementation NSObject
1981
1982 + (void)load {
1983 }
1984
1985 + (void)initialize {
1986 }
1987
1988 + (id)self {
1989 return (id)self;
1990 }
1991
1992 - (id)self {
1993 return self;
1994 }
1995
1996 + (Class)class {
1997 return self;
1998 }
1999
2000 - (Class)class {
2001 return object_getClass(self);
2002 }
2003
2004 + (Class)superclass {
2005 return self->superclass;
2006 }
2007
2008 - (Class)superclass {
2009 return [self class]->superclass;
2010 }
2011
2012 + (BOOL)isMemberOfClass:(Class)cls {
2013 return object_getClass((id)self) == cls;
2014 }
2015
2016 - (BOOL)isMemberOfClass:(Class)cls {
2017 return [self class] == cls;
2018 }
2019
2020 + (BOOL)isKindOfClass:(Class)cls {
2021 for (Class tcls = object_getClass((id)self); tcls; tcls = tcls->superclass) {
2022 if (tcls == cls) return YES;
2023 }
2024 return NO;
2025 }
2026
2027 - (BOOL)isKindOfClass:(Class)cls {
2028 for (Class tcls = [self class]; tcls; tcls = tcls->superclass) {
2029 if (tcls == cls) return YES;
2030 }
2031 return NO;
2032 }
2033
2034 + (BOOL)isSubclassOfClass:(Class)cls {
2035 for (Class tcls = self; tcls; tcls = tcls->superclass) {
2036 if (tcls == cls) return YES;
2037 }
2038 return NO;
2039 }
2040
2041 + (BOOL)isAncestorOfObject:(NSObject *)obj {
2042 for (Class tcls = [obj class]; tcls; tcls = tcls->superclass) {
2043 if (tcls == self) return YES;
2044 }
2045 return NO;
2046 }
2047
2048 + (BOOL)instancesRespondToSelector:(SEL)sel {
2049 if (!sel) return NO;
2050 return class_respondsToSelector(self, sel);
2051 }
2052
2053 + (BOOL)respondsToSelector:(SEL)sel {
2054 if (!sel) return NO;
2055 return class_respondsToSelector_inst(object_getClass(self), sel, self);
2056 }
2057
2058 - (BOOL)respondsToSelector:(SEL)sel {
2059 if (!sel) return NO;
2060 return class_respondsToSelector_inst([self class], sel, self);
2061 }
2062
2063 + (BOOL)conformsToProtocol:(Protocol *)protocol {
2064 if (!protocol) return NO;
2065 for (Class tcls = self; tcls; tcls = tcls->superclass) {
2066 if (class_conformsToProtocol(tcls, protocol)) return YES;
2067 }
2068 return NO;
2069 }
2070
2071 - (BOOL)conformsToProtocol:(Protocol *)protocol {
2072 if (!protocol) return NO;
2073 for (Class tcls = [self class]; tcls; tcls = tcls->superclass) {
2074 if (class_conformsToProtocol(tcls, protocol)) return YES;
2075 }
2076 return NO;
2077 }
2078
2079 + (NSUInteger)hash {
2080 return _objc_rootHash(self);
2081 }
2082
2083 - (NSUInteger)hash {
2084 return _objc_rootHash(self);
2085 }
2086
2087 + (BOOL)isEqual:(id)obj {
2088 return obj == (id)self;
2089 }
2090
2091 - (BOOL)isEqual:(id)obj {
2092 return obj == self;
2093 }
2094
2095
2096 + (BOOL)isFault {
2097 return NO;
2098 }
2099
2100 - (BOOL)isFault {
2101 return NO;
2102 }
2103
2104 + (BOOL)isProxy {
2105 return NO;
2106 }
2107
2108 - (BOOL)isProxy {
2109 return NO;
2110 }
2111
2112
2113 + (IMP)instanceMethodForSelector:(SEL)sel {
2114 if (!sel) [self doesNotRecognizeSelector:sel];
2115 return class_getMethodImplementation(self, sel);
2116 }
2117
2118 + (IMP)methodForSelector:(SEL)sel {
2119 if (!sel) [self doesNotRecognizeSelector:sel];
2120 return object_getMethodImplementation((id)self, sel);
2121 }
2122
2123 - (IMP)methodForSelector:(SEL)sel {
2124 if (!sel) [self doesNotRecognizeSelector:sel];
2125 return object_getMethodImplementation(self, sel);
2126 }
2127
2128 + (BOOL)resolveClassMethod:(SEL)sel {
2129 return NO;
2130 }
2131
2132 + (BOOL)resolveInstanceMethod:(SEL)sel {
2133 return NO;
2134 }
2135
2136 // Replaced by CF (throws an NSException)
2137 + (void)doesNotRecognizeSelector:(SEL)sel {
2138 _objc_fatal("+[%s %s]: unrecognized selector sent to instance %p",
2139 class_getName(self), sel_getName(sel), self);
2140 }
2141
2142 // Replaced by CF (throws an NSException)
2143 - (void)doesNotRecognizeSelector:(SEL)sel {
2144 _objc_fatal("-[%s %s]: unrecognized selector sent to instance %p",
2145 object_getClassName(self), sel_getName(sel), self);
2146 }
2147
2148
2149 + (id)performSelector:(SEL)sel {
2150 if (!sel) [self doesNotRecognizeSelector:sel];
2151 return ((id(*)(id, SEL))objc_msgSend)((id)self, sel);
2152 }
2153
2154 + (id)performSelector:(SEL)sel withObject:(id)obj {
2155 if (!sel) [self doesNotRecognizeSelector:sel];
2156 return ((id(*)(id, SEL, id))objc_msgSend)((id)self, sel, obj);
2157 }
2158
2159 + (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 {
2160 if (!sel) [self doesNotRecognizeSelector:sel];
2161 return ((id(*)(id, SEL, id, id))objc_msgSend)((id)self, sel, obj1, obj2);
2162 }
2163
2164 - (id)performSelector:(SEL)sel {
2165 if (!sel) [self doesNotRecognizeSelector:sel];
2166 return ((id(*)(id, SEL))objc_msgSend)(self, sel);
2167 }
2168
2169 - (id)performSelector:(SEL)sel withObject:(id)obj {
2170 if (!sel) [self doesNotRecognizeSelector:sel];
2171 return ((id(*)(id, SEL, id))objc_msgSend)(self, sel, obj);
2172 }
2173
2174 - (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 {
2175 if (!sel) [self doesNotRecognizeSelector:sel];
2176 return ((id(*)(id, SEL, id, id))objc_msgSend)(self, sel, obj1, obj2);
2177 }
2178
2179
2180 // Replaced by CF (returns an NSMethodSignature)
2181 + (NSMethodSignature *)instanceMethodSignatureForSelector:(SEL)sel {
2182 _objc_fatal("+[NSObject instanceMethodSignatureForSelector:] "
2183 "not available without CoreFoundation");
2184 }
2185
2186 // Replaced by CF (returns an NSMethodSignature)
2187 + (NSMethodSignature *)methodSignatureForSelector:(SEL)sel {
2188 _objc_fatal("+[NSObject methodSignatureForSelector:] "
2189 "not available without CoreFoundation");
2190 }
2191
2192 // Replaced by CF (returns an NSMethodSignature)
2193 - (NSMethodSignature *)methodSignatureForSelector:(SEL)sel {
2194 _objc_fatal("-[NSObject methodSignatureForSelector:] "
2195 "not available without CoreFoundation");
2196 }
2197
2198 + (void)forwardInvocation:(NSInvocation *)invocation {
2199 [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)];
2200 }
2201
2202 - (void)forwardInvocation:(NSInvocation *)invocation {
2203 [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)];
2204 }
2205
2206 + (id)forwardingTargetForSelector:(SEL)sel {
2207 return nil;
2208 }
2209
2210 - (id)forwardingTargetForSelector:(SEL)sel {
2211 return nil;
2212 }
2213
2214
2215 // Replaced by CF (returns an NSString)
2216 + (NSString *)description {
2217 return nil;
2218 }
2219
2220 // Replaced by CF (returns an NSString)
2221 - (NSString *)description {
2222 return nil;
2223 }
2224
2225 + (NSString *)debugDescription {
2226 return [self description];
2227 }
2228
2229 - (NSString *)debugDescription {
2230 return [self description];
2231 }
2232
2233
2234 + (id)new {
2235 return [callAlloc(self, false/*checkNil*/) init];
2236 }
2237
2238 + (id)retain {
2239 return (id)self;
2240 }
2241
2242 // Replaced by ObjectAlloc
2243 - (id)retain {
2244 return ((id)self)->rootRetain();
2245 }
2246
2247
2248 + (BOOL)_tryRetain {
2249 return YES;
2250 }
2251
2252 // Replaced by ObjectAlloc
2253 - (BOOL)_tryRetain {
2254 return ((id)self)->rootTryRetain();
2255 }
2256
2257 + (BOOL)_isDeallocating {
2258 return NO;
2259 }
2260
2261 - (BOOL)_isDeallocating {
2262 return ((id)self)->rootIsDeallocating();
2263 }
2264
2265 + (BOOL)allowsWeakReference {
2266 return YES;
2267 }
2268
2269 + (BOOL)retainWeakReference {
2270 return YES;
2271 }
2272
2273 - (BOOL)allowsWeakReference {
2274 return ! [self _isDeallocating];
2275 }
2276
2277 - (BOOL)retainWeakReference {
2278 return [self _tryRetain];
2279 }
2280
2281 + (oneway void)release {
2282 }
2283
2284 // Replaced by ObjectAlloc
2285 - (oneway void)release {
2286 ((id)self)->rootRelease();
2287 }
2288
2289 + (id)autorelease {
2290 return (id)self;
2291 }
2292
2293 // Replaced by ObjectAlloc
2294 - (id)autorelease {
2295 return ((id)self)->rootAutorelease();
2296 }
2297
2298 + (NSUInteger)retainCount {
2299 return ULONG_MAX;
2300 }
2301
2302 - (NSUInteger)retainCount {
2303 return ((id)self)->rootRetainCount();
2304 }
2305
2306 + (id)alloc {
2307 return _objc_rootAlloc(self);
2308 }
2309
2310 // Replaced by ObjectAlloc
2311 + (id)allocWithZone:(struct _NSZone *)zone {
2312 return _objc_rootAllocWithZone(self, (malloc_zone_t *)zone);
2313 }
2314
2315 // Replaced by CF (throws an NSException)
2316 + (id)init {
2317 return (id)self;
2318 }
2319
2320 - (id)init {
2321 return _objc_rootInit(self);
2322 }
2323
2324 // Replaced by CF (throws an NSException)
2325 + (void)dealloc {
2326 }
2327
2328
2329 // Replaced by NSZombies
2330 - (void)dealloc {
2331 _objc_rootDealloc(self);
2332 }
2333
2334 // Previously used by GC. Now a placeholder for binary compatibility.
2335 - (void) finalize {
2336 }
2337
2338 + (struct _NSZone *)zone {
2339 return (struct _NSZone *)_objc_rootZone(self);
2340 }
2341
2342 - (struct _NSZone *)zone {
2343 return (struct _NSZone *)_objc_rootZone(self);
2344 }
2345
2346 + (id)copy {
2347 return (id)self;
2348 }
2349
2350 + (id)copyWithZone:(struct _NSZone *)zone {
2351 return (id)self;
2352 }
2353
2354 - (id)copy {
2355 return [(id)self copyWithZone:nil];
2356 }
2357
2358 + (id)mutableCopy {
2359 return (id)self;
2360 }
2361
2362 + (id)mutableCopyWithZone:(struct _NSZone *)zone {
2363 return (id)self;
2364 }
2365
2366 - (id)mutableCopy {
2367 return [(id)self mutableCopyWithZone:nil];
2368 }
2369
2370 @end
2371
2372