]> git.saurik.com Git - apple/objc4.git/blob - runtime/NSObject.mm
objc4-680.tar.gz
[apple/objc4.git] / runtime / NSObject.mm
1 /*
2 * Copyright (c) 2010-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include "objc-private.h"
25 #include "NSObject.h"
26
27 #include "objc-weak.h"
28 #include "llvm-DenseMap.h"
29 #include "NSObject.h"
30
31 #include <malloc/malloc.h>
32 #include <stdint.h>
33 #include <stdbool.h>
34 #include <mach/mach.h>
35 #include <mach-o/dyld.h>
36 #include <mach-o/nlist.h>
37 #include <sys/types.h>
38 #include <sys/mman.h>
39 #include <libkern/OSAtomic.h>
40 #include <Block.h>
41 #include <map>
42 #include <execinfo.h>
43
44 @interface NSInvocation
45 - (SEL)selector;
46 @end
47
48
49 #if TARGET_OS_MAC
50
51 // NSObject used to be in Foundation/CoreFoundation.
52
53 #define SYMBOL_ELSEWHERE_IN_3(sym, vers, n) \
54 OBJC_EXPORT const char elsewhere_ ##n __asm__("$ld$hide$os" #vers "$" #sym); const char elsewhere_ ##n = 0
55 #define SYMBOL_ELSEWHERE_IN_2(sym, vers, n) \
56 SYMBOL_ELSEWHERE_IN_3(sym, vers, n)
57 #define SYMBOL_ELSEWHERE_IN(sym, vers) \
58 SYMBOL_ELSEWHERE_IN_2(sym, vers, __COUNTER__)
59
60 #if __OBJC2__
61 # define NSOBJECT_ELSEWHERE_IN(vers) \
62 SYMBOL_ELSEWHERE_IN(_OBJC_CLASS_$_NSObject, vers); \
63 SYMBOL_ELSEWHERE_IN(_OBJC_METACLASS_$_NSObject, vers); \
64 SYMBOL_ELSEWHERE_IN(_OBJC_IVAR_$_NSObject.isa, vers)
65 #else
66 # define NSOBJECT_ELSEWHERE_IN(vers) \
67 SYMBOL_ELSEWHERE_IN(.objc_class_name_NSObject, vers)
68 #endif
69
70 #if TARGET_OS_IOS
71 NSOBJECT_ELSEWHERE_IN(5.1);
72 NSOBJECT_ELSEWHERE_IN(5.0);
73 NSOBJECT_ELSEWHERE_IN(4.3);
74 NSOBJECT_ELSEWHERE_IN(4.2);
75 NSOBJECT_ELSEWHERE_IN(4.1);
76 NSOBJECT_ELSEWHERE_IN(4.0);
77 NSOBJECT_ELSEWHERE_IN(3.2);
78 NSOBJECT_ELSEWHERE_IN(3.1);
79 NSOBJECT_ELSEWHERE_IN(3.0);
80 NSOBJECT_ELSEWHERE_IN(2.2);
81 NSOBJECT_ELSEWHERE_IN(2.1);
82 NSOBJECT_ELSEWHERE_IN(2.0);
83 #elif TARGET_OS_MAC && !TARGET_OS_IPHONE
84 NSOBJECT_ELSEWHERE_IN(10.7);
85 NSOBJECT_ELSEWHERE_IN(10.6);
86 NSOBJECT_ELSEWHERE_IN(10.5);
87 NSOBJECT_ELSEWHERE_IN(10.4);
88 NSOBJECT_ELSEWHERE_IN(10.3);
89 NSOBJECT_ELSEWHERE_IN(10.2);
90 NSOBJECT_ELSEWHERE_IN(10.1);
91 NSOBJECT_ELSEWHERE_IN(10.0);
92 #else
93 // NSObject has always been in libobjc on these platforms.
94 #endif
95
96 // TARGET_OS_MAC
97 #endif
98
99
100 /***********************************************************************
101 * Weak ivar support
102 **********************************************************************/
103
104 static id defaultBadAllocHandler(Class cls)
105 {
106 _objc_fatal("attempt to allocate object of class '%s' failed",
107 cls->nameForLogging());
108 }
109
110 static id(*badAllocHandler)(Class) = &defaultBadAllocHandler;
111
112 static id callBadAllocHandler(Class cls)
113 {
114 // fixme add re-entrancy protection in case allocation fails inside handler
115 return (*badAllocHandler)(cls);
116 }
117
118 void _objc_setBadAllocHandler(id(*newHandler)(Class))
119 {
120 badAllocHandler = newHandler;
121 }
122
123
124 namespace {
125
126 // The order of these bits is important.
127 #define SIDE_TABLE_WEAKLY_REFERENCED (1UL<<0)
128 #define SIDE_TABLE_DEALLOCATING (1UL<<1) // MSB-ward of weak bit
129 #define SIDE_TABLE_RC_ONE (1UL<<2) // MSB-ward of deallocating bit
130 #define SIDE_TABLE_RC_PINNED (1UL<<(WORD_BITS-1))
131
132 #define SIDE_TABLE_RC_SHIFT 2
133 #define SIDE_TABLE_FLAG_MASK (SIDE_TABLE_RC_ONE-1)
134
135 // RefcountMap disguises its pointers because we
136 // don't want the table to act as a root for `leaks`.
137 typedef objc::DenseMap<DisguisedPtr<objc_object>,size_t,true> RefcountMap;
138
139 struct SideTable {
140 spinlock_t slock;
141 RefcountMap refcnts;
142 weak_table_t weak_table;
143
144 SideTable() {
145 memset(&weak_table, 0, sizeof(weak_table));
146 }
147
148 ~SideTable() {
149 _objc_fatal("Do not delete SideTable.");
150 }
151
152 void lock() { slock.lock(); }
153 void unlock() { slock.unlock(); }
154 bool trylock() { return slock.trylock(); }
155
156 // Address-ordered lock discipline for a pair of side tables.
157
158 template<bool HaveOld, bool HaveNew>
159 static void lockTwo(SideTable *lock1, SideTable *lock2);
160 template<bool HaveOld, bool HaveNew>
161 static void unlockTwo(SideTable *lock1, SideTable *lock2);
162 };
163
164
165 template<>
166 void SideTable::lockTwo<true, true>(SideTable *lock1, SideTable *lock2) {
167 spinlock_t::lockTwo(&lock1->slock, &lock2->slock);
168 }
169
170 template<>
171 void SideTable::lockTwo<true, false>(SideTable *lock1, SideTable *) {
172 lock1->lock();
173 }
174
175 template<>
176 void SideTable::lockTwo<false, true>(SideTable *, SideTable *lock2) {
177 lock2->lock();
178 }
179
180 template<>
181 void SideTable::unlockTwo<true, true>(SideTable *lock1, SideTable *lock2) {
182 spinlock_t::unlockTwo(&lock1->slock, &lock2->slock);
183 }
184
185 template<>
186 void SideTable::unlockTwo<true, false>(SideTable *lock1, SideTable *) {
187 lock1->unlock();
188 }
189
190 template<>
191 void SideTable::unlockTwo<false, true>(SideTable *, SideTable *lock2) {
192 lock2->unlock();
193 }
194
195
196
197 // We cannot use a C++ static initializer to initialize SideTables because
198 // libc calls us before our C++ initializers run. We also don't want a global
199 // pointer to this struct because of the extra indirection.
200 // Do it the hard way.
201 alignas(StripedMap<SideTable>) static uint8_t
202 SideTableBuf[sizeof(StripedMap<SideTable>)];
203
204 static void SideTableInit() {
205 new (SideTableBuf) StripedMap<SideTable>();
206 }
207
208 static StripedMap<SideTable>& SideTables() {
209 return *reinterpret_cast<StripedMap<SideTable>*>(SideTableBuf);
210 }
211
212 // anonymous namespace
213 };
214
215
216 //
217 // The -fobjc-arc flag causes the compiler to issue calls to objc_{retain/release/autorelease/retain_block}
218 //
219
220 id objc_retainBlock(id x) {
221 return (id)_Block_copy(x);
222 }
223
224 //
225 // The following SHOULD be called by the compiler directly, but the request hasn't been made yet :-)
226 //
227
228 BOOL objc_should_deallocate(id object) {
229 return YES;
230 }
231
232 id
233 objc_retain_autorelease(id obj)
234 {
235 return objc_autorelease(objc_retain(obj));
236 }
237
238
239 void
240 objc_storeStrong(id *location, id obj)
241 {
242 id prev = *location;
243 if (obj == prev) {
244 return;
245 }
246 objc_retain(obj);
247 *location = obj;
248 objc_release(prev);
249 }
250
251
252 // Update a weak variable.
253 // If HaveOld is true, the variable has an existing value
254 // that needs to be cleaned up. This value might be nil.
255 // If HaveNew is true, there is a new value that needs to be
256 // assigned into the variable. This value might be nil.
257 // If CrashIfDeallocating is true, the process is halted if newObj is
258 // deallocating or newObj's class does not support weak references.
259 // If CrashIfDeallocating is false, nil is stored instead.
260 template <bool HaveOld, bool HaveNew, bool CrashIfDeallocating>
261 static id
262 storeWeak(id *location, objc_object *newObj)
263 {
264 assert(HaveOld || HaveNew);
265 if (!HaveNew) assert(newObj == nil);
266
267 Class previouslyInitializedClass = nil;
268 id oldObj;
269 SideTable *oldTable;
270 SideTable *newTable;
271
272 // Acquire locks for old and new values.
273 // Order by lock address to prevent lock ordering problems.
274 // Retry if the old value changes underneath us.
275 retry:
276 if (HaveOld) {
277 oldObj = *location;
278 oldTable = &SideTables()[oldObj];
279 } else {
280 oldTable = nil;
281 }
282 if (HaveNew) {
283 newTable = &SideTables()[newObj];
284 } else {
285 newTable = nil;
286 }
287
288 SideTable::lockTwo<HaveOld, HaveNew>(oldTable, newTable);
289
290 if (HaveOld && *location != oldObj) {
291 SideTable::unlockTwo<HaveOld, HaveNew>(oldTable, newTable);
292 goto retry;
293 }
294
295 // Prevent a deadlock between the weak reference machinery
296 // and the +initialize machinery by ensuring that no
297 // weakly-referenced object has an un-+initialized isa.
298 if (HaveNew && newObj) {
299 Class cls = newObj->getIsa();
300 if (cls != previouslyInitializedClass &&
301 !((objc_class *)cls)->isInitialized())
302 {
303 SideTable::unlockTwo<HaveOld, HaveNew>(oldTable, newTable);
304 _class_initialize(_class_getNonMetaClass(cls, (id)newObj));
305
306 // If this class is finished with +initialize then we're good.
307 // If this class is still running +initialize on this thread
308 // (i.e. +initialize called storeWeak on an instance of itself)
309 // then we may proceed but it will appear initializing and
310 // not yet initialized to the check above.
311 // Instead set previouslyInitializedClass to recognize it on retry.
312 previouslyInitializedClass = cls;
313
314 goto retry;
315 }
316 }
317
318 // Clean up old value, if any.
319 if (HaveOld) {
320 weak_unregister_no_lock(&oldTable->weak_table, oldObj, location);
321 }
322
323 // Assign new value, if any.
324 if (HaveNew) {
325 newObj = (objc_object *)weak_register_no_lock(&newTable->weak_table,
326 (id)newObj, location,
327 CrashIfDeallocating);
328 // weak_register_no_lock returns nil if weak store should be rejected
329
330 // Set is-weakly-referenced bit in refcount table.
331 if (newObj && !newObj->isTaggedPointer()) {
332 newObj->setWeaklyReferenced_nolock();
333 }
334
335 // Do not set *location anywhere else. That would introduce a race.
336 *location = (id)newObj;
337 }
338 else {
339 // No new value. The storage is not changed.
340 }
341
342 SideTable::unlockTwo<HaveOld, HaveNew>(oldTable, newTable);
343
344 return (id)newObj;
345 }
346
347
348 /**
349 * This function stores a new value into a __weak variable. It would
350 * be used anywhere a __weak variable is the target of an assignment.
351 *
352 * @param location The address of the weak pointer itself
353 * @param newObj The new object this weak ptr should now point to
354 *
355 * @return \e newObj
356 */
357 id
358 objc_storeWeak(id *location, id newObj)
359 {
360 return storeWeak<true/*old*/, true/*new*/, true/*crash*/>
361 (location, (objc_object *)newObj);
362 }
363
364
365 /**
366 * This function stores a new value into a __weak variable.
367 * If the new object is deallocating or the new object's class
368 * does not support weak references, stores nil instead.
369 *
370 * @param location The address of the weak pointer itself
371 * @param newObj The new object this weak ptr should now point to
372 *
373 * @return The value stored (either the new object or nil)
374 */
375 id
376 objc_storeWeakOrNil(id *location, id newObj)
377 {
378 return storeWeak<true/*old*/, true/*new*/, false/*crash*/>
379 (location, (objc_object *)newObj);
380 }
381
382
383 /**
384 * Initialize a fresh weak pointer to some object location.
385 * It would be used for code like:
386 *
387 * (The nil case)
388 * __weak id weakPtr;
389 * (The non-nil case)
390 * NSObject *o = ...;
391 * __weak id weakPtr = o;
392 *
393 * This function IS NOT thread-safe with respect to concurrent
394 * modifications to the weak variable. (Concurrent weak clear is safe.)
395 *
396 * @param location Address of __weak ptr.
397 * @param newObj Object ptr.
398 */
399 id
400 objc_initWeak(id *location, id newObj)
401 {
402 if (!newObj) {
403 *location = nil;
404 return nil;
405 }
406
407 return storeWeak<false/*old*/, true/*new*/, true/*crash*/>
408 (location, (objc_object*)newObj);
409 }
410
411 id
412 objc_initWeakOrNil(id *location, id newObj)
413 {
414 if (!newObj) {
415 *location = nil;
416 return nil;
417 }
418
419 return storeWeak<false/*old*/, true/*new*/, false/*crash*/>
420 (location, (objc_object*)newObj);
421 }
422
423
424 /**
425 * Destroys the relationship between a weak pointer
426 * and the object it is referencing in the internal weak
427 * table. If the weak pointer is not referencing anything,
428 * there is no need to edit the weak table.
429 *
430 * This function IS NOT thread-safe with respect to concurrent
431 * modifications to the weak variable. (Concurrent weak clear is safe.)
432 *
433 * @param location The weak pointer address.
434 */
435 void
436 objc_destroyWeak(id *location)
437 {
438 (void)storeWeak<true/*old*/, false/*new*/, false/*crash*/>
439 (location, nil);
440 }
441
442
443 id
444 objc_loadWeakRetained(id *location)
445 {
446 id result;
447
448 SideTable *table;
449
450 retry:
451 result = *location;
452 if (!result) return nil;
453
454 table = &SideTables()[result];
455
456 table->lock();
457 if (*location != result) {
458 table->unlock();
459 goto retry;
460 }
461
462 result = weak_read_no_lock(&table->weak_table, location);
463
464 table->unlock();
465 return result;
466 }
467
468 /**
469 * This loads the object referenced by a weak pointer and returns it, after
470 * retaining and autoreleasing the object to ensure that it stays alive
471 * long enough for the caller to use it. This function would be used
472 * anywhere a __weak variable is used in an expression.
473 *
474 * @param location The weak pointer address
475 *
476 * @return The object pointed to by \e location, or \c nil if \e location is \c nil.
477 */
478 id
479 objc_loadWeak(id *location)
480 {
481 if (!*location) return nil;
482 return objc_autorelease(objc_loadWeakRetained(location));
483 }
484
485
486 /**
487 * This function copies a weak pointer from one location to another,
488 * when the destination doesn't already contain a weak pointer. It
489 * would be used for code like:
490 *
491 * __weak id src = ...;
492 * __weak id dst = src;
493 *
494 * This function IS NOT thread-safe with respect to concurrent
495 * modifications to the destination variable. (Concurrent weak clear is safe.)
496 *
497 * @param dst The destination variable.
498 * @param src The source variable.
499 */
500 void
501 objc_copyWeak(id *dst, id *src)
502 {
503 id obj = objc_loadWeakRetained(src);
504 objc_initWeak(dst, obj);
505 objc_release(obj);
506 }
507
508 /**
509 * Move a weak pointer from one location to another.
510 * Before the move, the destination must be uninitialized.
511 * After the move, the source is nil.
512 *
513 * This function IS NOT thread-safe with respect to concurrent
514 * modifications to either weak variable. (Concurrent weak clear is safe.)
515 *
516 */
517 void
518 objc_moveWeak(id *dst, id *src)
519 {
520 objc_copyWeak(dst, src);
521 objc_destroyWeak(src);
522 *src = nil;
523 }
524
525
526 /***********************************************************************
527 Autorelease pool implementation
528
529 A thread's autorelease pool is a stack of pointers.
530 Each pointer is either an object to release, or POOL_SENTINEL which is
531 an autorelease pool boundary.
532 A pool token is a pointer to the POOL_SENTINEL for that pool. When
533 the pool is popped, every object hotter than the sentinel is released.
534 The stack is divided into a doubly-linked list of pages. Pages are added
535 and deleted as necessary.
536 Thread-local storage points to the hot page, where newly autoreleased
537 objects are stored.
538 **********************************************************************/
539
540 BREAKPOINT_FUNCTION(void objc_autoreleaseNoPool(id obj));
541
542 namespace {
543
544 struct magic_t {
545 static const uint32_t M0 = 0xA1A1A1A1;
546 # define M1 "AUTORELEASE!"
547 static const size_t M1_len = 12;
548 uint32_t m[4];
549
550 magic_t() {
551 assert(M1_len == strlen(M1));
552 assert(M1_len == 3 * sizeof(m[1]));
553
554 m[0] = M0;
555 strncpy((char *)&m[1], M1, M1_len);
556 }
557
558 ~magic_t() {
559 m[0] = m[1] = m[2] = m[3] = 0;
560 }
561
562 bool check() const {
563 return (m[0] == M0 && 0 == strncmp((char *)&m[1], M1, M1_len));
564 }
565
566 bool fastcheck() const {
567 #if DEBUG
568 return check();
569 #else
570 return (m[0] == M0);
571 #endif
572 }
573
574 # undef M1
575 };
576
577
578 // Set this to 1 to mprotect() autorelease pool contents
579 #define PROTECT_AUTORELEASEPOOL 0
580
581 class AutoreleasePoolPage
582 {
583
584 #define POOL_SENTINEL nil
585 static pthread_key_t const key = AUTORELEASE_POOL_KEY;
586 static uint8_t const SCRIBBLE = 0xA3; // 0xA3A3A3A3 after releasing
587 static size_t const SIZE =
588 #if PROTECT_AUTORELEASEPOOL
589 PAGE_MAX_SIZE; // must be multiple of vm page size
590 #else
591 PAGE_MAX_SIZE; // size and alignment, power of 2
592 #endif
593 static size_t const COUNT = SIZE / sizeof(id);
594
595 magic_t const magic;
596 id *next;
597 pthread_t const thread;
598 AutoreleasePoolPage * const parent;
599 AutoreleasePoolPage *child;
600 uint32_t const depth;
601 uint32_t hiwat;
602
603 // SIZE-sizeof(*this) bytes of contents follow
604
605 static void * operator new(size_t size) {
606 return malloc_zone_memalign(malloc_default_zone(), SIZE, SIZE);
607 }
608 static void operator delete(void * p) {
609 return free(p);
610 }
611
612 inline void protect() {
613 #if PROTECT_AUTORELEASEPOOL
614 mprotect(this, SIZE, PROT_READ);
615 check();
616 #endif
617 }
618
619 inline void unprotect() {
620 #if PROTECT_AUTORELEASEPOOL
621 check();
622 mprotect(this, SIZE, PROT_READ | PROT_WRITE);
623 #endif
624 }
625
626 AutoreleasePoolPage(AutoreleasePoolPage *newParent)
627 : magic(), next(begin()), thread(pthread_self()),
628 parent(newParent), child(nil),
629 depth(parent ? 1+parent->depth : 0),
630 hiwat(parent ? parent->hiwat : 0)
631 {
632 if (parent) {
633 parent->check();
634 assert(!parent->child);
635 parent->unprotect();
636 parent->child = this;
637 parent->protect();
638 }
639 protect();
640 }
641
642 ~AutoreleasePoolPage()
643 {
644 check();
645 unprotect();
646 assert(empty());
647
648 // Not recursive: we don't want to blow out the stack
649 // if a thread accumulates a stupendous amount of garbage
650 assert(!child);
651 }
652
653
654 void busted(bool die = true)
655 {
656 magic_t right;
657 (die ? _objc_fatal : _objc_inform)
658 ("autorelease pool page %p corrupted\n"
659 " magic 0x%08x 0x%08x 0x%08x 0x%08x\n"
660 " should be 0x%08x 0x%08x 0x%08x 0x%08x\n"
661 " pthread %p\n"
662 " should be %p\n",
663 this,
664 magic.m[0], magic.m[1], magic.m[2], magic.m[3],
665 right.m[0], right.m[1], right.m[2], right.m[3],
666 this->thread, pthread_self());
667 }
668
669 void check(bool die = true)
670 {
671 if (!magic.check() || !pthread_equal(thread, pthread_self())) {
672 busted(die);
673 }
674 }
675
676 void fastcheck(bool die = true)
677 {
678 if (! magic.fastcheck()) {
679 busted(die);
680 }
681 }
682
683
684 id * begin() {
685 return (id *) ((uint8_t *)this+sizeof(*this));
686 }
687
688 id * end() {
689 return (id *) ((uint8_t *)this+SIZE);
690 }
691
692 bool empty() {
693 return next == begin();
694 }
695
696 bool full() {
697 return next == end();
698 }
699
700 bool lessThanHalfFull() {
701 return (next - begin() < (end() - begin()) / 2);
702 }
703
704 id *add(id obj)
705 {
706 assert(!full());
707 unprotect();
708 id *ret = next; // faster than `return next-1` because of aliasing
709 *next++ = obj;
710 protect();
711 return ret;
712 }
713
714 void releaseAll()
715 {
716 releaseUntil(begin());
717 }
718
719 void releaseUntil(id *stop)
720 {
721 // Not recursive: we don't want to blow out the stack
722 // if a thread accumulates a stupendous amount of garbage
723
724 while (this->next != stop) {
725 // Restart from hotPage() every time, in case -release
726 // autoreleased more objects
727 AutoreleasePoolPage *page = hotPage();
728
729 // fixme I think this `while` can be `if`, but I can't prove it
730 while (page->empty()) {
731 page = page->parent;
732 setHotPage(page);
733 }
734
735 page->unprotect();
736 id obj = *--page->next;
737 memset((void*)page->next, SCRIBBLE, sizeof(*page->next));
738 page->protect();
739
740 if (obj != POOL_SENTINEL) {
741 objc_release(obj);
742 }
743 }
744
745 setHotPage(this);
746
747 #if DEBUG
748 // we expect any children to be completely empty
749 for (AutoreleasePoolPage *page = child; page; page = page->child) {
750 assert(page->empty());
751 }
752 #endif
753 }
754
755 void kill()
756 {
757 // Not recursive: we don't want to blow out the stack
758 // if a thread accumulates a stupendous amount of garbage
759 AutoreleasePoolPage *page = this;
760 while (page->child) page = page->child;
761
762 AutoreleasePoolPage *deathptr;
763 do {
764 deathptr = page;
765 page = page->parent;
766 if (page) {
767 page->unprotect();
768 page->child = nil;
769 page->protect();
770 }
771 delete deathptr;
772 } while (deathptr != this);
773 }
774
775 static void tls_dealloc(void *p)
776 {
777 // reinstate TLS value while we work
778 setHotPage((AutoreleasePoolPage *)p);
779
780 if (AutoreleasePoolPage *page = coldPage()) {
781 if (!page->empty()) pop(page->begin()); // pop all of the pools
782 if (DebugMissingPools || DebugPoolAllocation) {
783 // pop() killed the pages already
784 } else {
785 page->kill(); // free all of the pages
786 }
787 }
788
789 // clear TLS value so TLS destruction doesn't loop
790 setHotPage(nil);
791 }
792
793 static AutoreleasePoolPage *pageForPointer(const void *p)
794 {
795 return pageForPointer((uintptr_t)p);
796 }
797
798 static AutoreleasePoolPage *pageForPointer(uintptr_t p)
799 {
800 AutoreleasePoolPage *result;
801 uintptr_t offset = p % SIZE;
802
803 assert(offset >= sizeof(AutoreleasePoolPage));
804
805 result = (AutoreleasePoolPage *)(p - offset);
806 result->fastcheck();
807
808 return result;
809 }
810
811
812 static inline AutoreleasePoolPage *hotPage()
813 {
814 AutoreleasePoolPage *result = (AutoreleasePoolPage *)
815 tls_get_direct(key);
816 if (result) result->fastcheck();
817 return result;
818 }
819
820 static inline void setHotPage(AutoreleasePoolPage *page)
821 {
822 if (page) page->fastcheck();
823 tls_set_direct(key, (void *)page);
824 }
825
826 static inline AutoreleasePoolPage *coldPage()
827 {
828 AutoreleasePoolPage *result = hotPage();
829 if (result) {
830 while (result->parent) {
831 result = result->parent;
832 result->fastcheck();
833 }
834 }
835 return result;
836 }
837
838
839 static inline id *autoreleaseFast(id obj)
840 {
841 AutoreleasePoolPage *page = hotPage();
842 if (page && !page->full()) {
843 return page->add(obj);
844 } else if (page) {
845 return autoreleaseFullPage(obj, page);
846 } else {
847 return autoreleaseNoPage(obj);
848 }
849 }
850
851 static __attribute__((noinline))
852 id *autoreleaseFullPage(id obj, AutoreleasePoolPage *page)
853 {
854 // The hot page is full.
855 // Step to the next non-full page, adding a new page if necessary.
856 // Then add the object to that page.
857 assert(page == hotPage());
858 assert(page->full() || DebugPoolAllocation);
859
860 do {
861 if (page->child) page = page->child;
862 else page = new AutoreleasePoolPage(page);
863 } while (page->full());
864
865 setHotPage(page);
866 return page->add(obj);
867 }
868
869 static __attribute__((noinline))
870 id *autoreleaseNoPage(id obj)
871 {
872 // No pool in place.
873 assert(!hotPage());
874
875 if (obj != POOL_SENTINEL && DebugMissingPools) {
876 // We are pushing an object with no pool in place,
877 // and no-pool debugging was requested by environment.
878 _objc_inform("MISSING POOLS: Object %p of class %s "
879 "autoreleased with no pool in place - "
880 "just leaking - break on "
881 "objc_autoreleaseNoPool() to debug",
882 (void*)obj, object_getClassName(obj));
883 objc_autoreleaseNoPool(obj);
884 return nil;
885 }
886
887 // Install the first page.
888 AutoreleasePoolPage *page = new AutoreleasePoolPage(nil);
889 setHotPage(page);
890
891 // Push an autorelease pool boundary if it wasn't already requested.
892 if (obj != POOL_SENTINEL) {
893 page->add(POOL_SENTINEL);
894 }
895
896 // Push the requested object.
897 return page->add(obj);
898 }
899
900
901 static __attribute__((noinline))
902 id *autoreleaseNewPage(id obj)
903 {
904 AutoreleasePoolPage *page = hotPage();
905 if (page) return autoreleaseFullPage(obj, page);
906 else return autoreleaseNoPage(obj);
907 }
908
909 public:
910 static inline id autorelease(id obj)
911 {
912 assert(obj);
913 assert(!obj->isTaggedPointer());
914 id *dest __unused = autoreleaseFast(obj);
915 assert(!dest || *dest == obj);
916 return obj;
917 }
918
919
920 static inline void *push()
921 {
922 id *dest;
923 if (DebugPoolAllocation) {
924 // Each autorelease pool starts on a new pool page.
925 dest = autoreleaseNewPage(POOL_SENTINEL);
926 } else {
927 dest = autoreleaseFast(POOL_SENTINEL);
928 }
929 assert(*dest == POOL_SENTINEL);
930 return dest;
931 }
932
933 static inline void pop(void *token)
934 {
935 AutoreleasePoolPage *page;
936 id *stop;
937
938 page = pageForPointer(token);
939 stop = (id *)token;
940 if (DebugPoolAllocation && *stop != POOL_SENTINEL) {
941 // This check is not valid with DebugPoolAllocation off
942 // after an autorelease with a pool page but no pool in place.
943 _objc_fatal("invalid or prematurely-freed autorelease pool %p; ",
944 token);
945 }
946
947 if (PrintPoolHiwat) printHiwat();
948
949 page->releaseUntil(stop);
950
951 // memory: delete empty children
952 if (DebugPoolAllocation && page->empty()) {
953 // special case: delete everything during page-per-pool debugging
954 AutoreleasePoolPage *parent = page->parent;
955 page->kill();
956 setHotPage(parent);
957 } else if (DebugMissingPools && page->empty() && !page->parent) {
958 // special case: delete everything for pop(top)
959 // when debugging missing autorelease pools
960 page->kill();
961 setHotPage(nil);
962 }
963 else if (page->child) {
964 // hysteresis: keep one empty child if page is more than half full
965 if (page->lessThanHalfFull()) {
966 page->child->kill();
967 }
968 else if (page->child->child) {
969 page->child->child->kill();
970 }
971 }
972 }
973
974 static void init()
975 {
976 int r __unused = pthread_key_init_np(AutoreleasePoolPage::key,
977 AutoreleasePoolPage::tls_dealloc);
978 assert(r == 0);
979 }
980
981 void print()
982 {
983 _objc_inform("[%p] ................ PAGE %s %s %s", this,
984 full() ? "(full)" : "",
985 this == hotPage() ? "(hot)" : "",
986 this == coldPage() ? "(cold)" : "");
987 check(false);
988 for (id *p = begin(); p < next; p++) {
989 if (*p == POOL_SENTINEL) {
990 _objc_inform("[%p] ################ POOL %p", p, p);
991 } else {
992 _objc_inform("[%p] %#16lx %s",
993 p, (unsigned long)*p, object_getClassName(*p));
994 }
995 }
996 }
997
998 static void printAll()
999 {
1000 _objc_inform("##############");
1001 _objc_inform("AUTORELEASE POOLS for thread %p", pthread_self());
1002
1003 AutoreleasePoolPage *page;
1004 ptrdiff_t objects = 0;
1005 for (page = coldPage(); page; page = page->child) {
1006 objects += page->next - page->begin();
1007 }
1008 _objc_inform("%llu releases pending.", (unsigned long long)objects);
1009
1010 for (page = coldPage(); page; page = page->child) {
1011 page->print();
1012 }
1013
1014 _objc_inform("##############");
1015 }
1016
1017 static void printHiwat()
1018 {
1019 // Check and propagate high water mark
1020 // Ignore high water marks under 256 to suppress noise.
1021 AutoreleasePoolPage *p = hotPage();
1022 uint32_t mark = p->depth*COUNT + (uint32_t)(p->next - p->begin());
1023 if (mark > p->hiwat && mark > 256) {
1024 for( ; p; p = p->parent) {
1025 p->unprotect();
1026 p->hiwat = mark;
1027 p->protect();
1028 }
1029
1030 _objc_inform("POOL HIGHWATER: new high water mark of %u "
1031 "pending autoreleases for thread %p:",
1032 mark, pthread_self());
1033
1034 void *stack[128];
1035 int count = backtrace(stack, sizeof(stack)/sizeof(stack[0]));
1036 char **sym = backtrace_symbols(stack, count);
1037 for (int i = 0; i < count; i++) {
1038 _objc_inform("POOL HIGHWATER: %s", sym[i]);
1039 }
1040 free(sym);
1041 }
1042 }
1043
1044 #undef POOL_SENTINEL
1045 };
1046
1047 // anonymous namespace
1048 };
1049
1050
1051 /***********************************************************************
1052 * Slow paths for inline control
1053 **********************************************************************/
1054
1055 #if SUPPORT_NONPOINTER_ISA
1056
1057 NEVER_INLINE id
1058 objc_object::rootRetain_overflow(bool tryRetain)
1059 {
1060 return rootRetain(tryRetain, true);
1061 }
1062
1063
1064 NEVER_INLINE bool
1065 objc_object::rootRelease_underflow(bool performDealloc)
1066 {
1067 return rootRelease(performDealloc, true);
1068 }
1069
1070
1071 // Slow path of clearDeallocating()
1072 // for objects with indexed isa
1073 // that were ever weakly referenced
1074 // or whose retain count ever overflowed to the side table.
1075 NEVER_INLINE void
1076 objc_object::clearDeallocating_slow()
1077 {
1078 assert(isa.indexed && (isa.weakly_referenced || isa.has_sidetable_rc));
1079
1080 SideTable& table = SideTables()[this];
1081 table.lock();
1082 if (isa.weakly_referenced) {
1083 weak_clear_no_lock(&table.weak_table, (id)this);
1084 }
1085 if (isa.has_sidetable_rc) {
1086 table.refcnts.erase(this);
1087 }
1088 table.unlock();
1089 }
1090
1091 #endif
1092
1093 __attribute__((noinline,used))
1094 id
1095 objc_object::rootAutorelease2()
1096 {
1097 assert(!isTaggedPointer());
1098 return AutoreleasePoolPage::autorelease((id)this);
1099 }
1100
1101
1102 BREAKPOINT_FUNCTION(
1103 void objc_overrelease_during_dealloc_error(void)
1104 );
1105
1106
1107 NEVER_INLINE
1108 bool
1109 objc_object::overrelease_error()
1110 {
1111 _objc_inform_now_and_on_crash("%s object %p overreleased while already deallocating; break on objc_overrelease_during_dealloc_error to debug", object_getClassName((id)this), this);
1112 objc_overrelease_during_dealloc_error();
1113 return false; // allow rootRelease() to tail-call this
1114 }
1115
1116
1117 /***********************************************************************
1118 * Retain count operations for side table.
1119 **********************************************************************/
1120
1121
1122 #if DEBUG
1123 // Used to assert that an object is not present in the side table.
1124 bool
1125 objc_object::sidetable_present()
1126 {
1127 bool result = false;
1128 SideTable& table = SideTables()[this];
1129
1130 table.lock();
1131
1132 RefcountMap::iterator it = table.refcnts.find(this);
1133 if (it != table.refcnts.end()) result = true;
1134
1135 if (weak_is_registered_no_lock(&table.weak_table, (id)this)) result = true;
1136
1137 table.unlock();
1138
1139 return result;
1140 }
1141 #endif
1142
1143 #if SUPPORT_NONPOINTER_ISA
1144
1145 void
1146 objc_object::sidetable_lock()
1147 {
1148 SideTable& table = SideTables()[this];
1149 table.lock();
1150 }
1151
1152 void
1153 objc_object::sidetable_unlock()
1154 {
1155 SideTable& table = SideTables()[this];
1156 table.unlock();
1157 }
1158
1159
1160 // Move the entire retain count to the side table,
1161 // as well as isDeallocating and weaklyReferenced.
1162 void
1163 objc_object::sidetable_moveExtraRC_nolock(size_t extra_rc,
1164 bool isDeallocating,
1165 bool weaklyReferenced)
1166 {
1167 assert(!isa.indexed); // should already be changed to not-indexed
1168 SideTable& table = SideTables()[this];
1169
1170 size_t& refcntStorage = table.refcnts[this];
1171 size_t oldRefcnt = refcntStorage;
1172 // not deallocating - that was in the isa
1173 assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1174 assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1175
1176 uintptr_t carry;
1177 size_t refcnt = addc(oldRefcnt, extra_rc << SIDE_TABLE_RC_SHIFT, 0, &carry);
1178 if (carry) refcnt = SIDE_TABLE_RC_PINNED;
1179 if (isDeallocating) refcnt |= SIDE_TABLE_DEALLOCATING;
1180 if (weaklyReferenced) refcnt |= SIDE_TABLE_WEAKLY_REFERENCED;
1181
1182 refcntStorage = refcnt;
1183 }
1184
1185
1186 // Move some retain counts to the side table from the isa field.
1187 // Returns true if the object is now pinned.
1188 bool
1189 objc_object::sidetable_addExtraRC_nolock(size_t delta_rc)
1190 {
1191 assert(isa.indexed);
1192 SideTable& table = SideTables()[this];
1193
1194 size_t& refcntStorage = table.refcnts[this];
1195 size_t oldRefcnt = refcntStorage;
1196 // isa-side bits should not be set here
1197 assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1198 assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1199
1200 if (oldRefcnt & SIDE_TABLE_RC_PINNED) return true;
1201
1202 uintptr_t carry;
1203 size_t newRefcnt =
1204 addc(oldRefcnt, delta_rc << SIDE_TABLE_RC_SHIFT, 0, &carry);
1205 if (carry) {
1206 refcntStorage =
1207 SIDE_TABLE_RC_PINNED | (oldRefcnt & SIDE_TABLE_FLAG_MASK);
1208 return true;
1209 }
1210 else {
1211 refcntStorage = newRefcnt;
1212 return false;
1213 }
1214 }
1215
1216
1217 // Move some retain counts from the side table to the isa field.
1218 // Returns the actual count subtracted, which may be less than the request.
1219 size_t
1220 objc_object::sidetable_subExtraRC_nolock(size_t delta_rc)
1221 {
1222 assert(isa.indexed);
1223 SideTable& table = SideTables()[this];
1224
1225 RefcountMap::iterator it = table.refcnts.find(this);
1226 if (it == table.refcnts.end() || it->second == 0) {
1227 // Side table retain count is zero. Can't borrow.
1228 return 0;
1229 }
1230 size_t oldRefcnt = it->second;
1231
1232 // isa-side bits should not be set here
1233 assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1234 assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1235
1236 size_t newRefcnt = oldRefcnt - (delta_rc << SIDE_TABLE_RC_SHIFT);
1237 assert(oldRefcnt > newRefcnt); // shouldn't underflow
1238 it->second = newRefcnt;
1239 return delta_rc;
1240 }
1241
1242
1243 size_t
1244 objc_object::sidetable_getExtraRC_nolock()
1245 {
1246 assert(isa.indexed);
1247 SideTable& table = SideTables()[this];
1248 RefcountMap::iterator it = table.refcnts.find(this);
1249 if (it == table.refcnts.end()) return 0;
1250 else return it->second >> SIDE_TABLE_RC_SHIFT;
1251 }
1252
1253
1254 // SUPPORT_NONPOINTER_ISA
1255 #endif
1256
1257
1258 __attribute__((used,noinline,nothrow))
1259 id
1260 objc_object::sidetable_retain_slow(SideTable& table)
1261 {
1262 #if SUPPORT_NONPOINTER_ISA
1263 assert(!isa.indexed);
1264 #endif
1265
1266 table.lock();
1267 size_t& refcntStorage = table.refcnts[this];
1268 if (! (refcntStorage & SIDE_TABLE_RC_PINNED)) {
1269 refcntStorage += SIDE_TABLE_RC_ONE;
1270 }
1271 table.unlock();
1272
1273 return (id)this;
1274 }
1275
1276
1277 id
1278 objc_object::sidetable_retain()
1279 {
1280 #if SUPPORT_NONPOINTER_ISA
1281 assert(!isa.indexed);
1282 #endif
1283 SideTable& table = SideTables()[this];
1284
1285 if (table.trylock()) {
1286 size_t& refcntStorage = table.refcnts[this];
1287 if (! (refcntStorage & SIDE_TABLE_RC_PINNED)) {
1288 refcntStorage += SIDE_TABLE_RC_ONE;
1289 }
1290 table.unlock();
1291 return (id)this;
1292 }
1293 return sidetable_retain_slow(table);
1294 }
1295
1296
1297 bool
1298 objc_object::sidetable_tryRetain()
1299 {
1300 #if SUPPORT_NONPOINTER_ISA
1301 assert(!isa.indexed);
1302 #endif
1303 SideTable& table = SideTables()[this];
1304
1305 // NO SPINLOCK HERE
1306 // _objc_rootTryRetain() is called exclusively by _objc_loadWeak(),
1307 // which already acquired the lock on our behalf.
1308
1309 // fixme can't do this efficiently with os_lock_handoff_s
1310 // if (table.slock == 0) {
1311 // _objc_fatal("Do not call -_tryRetain.");
1312 // }
1313
1314 bool result = true;
1315 RefcountMap::iterator it = table.refcnts.find(this);
1316 if (it == table.refcnts.end()) {
1317 table.refcnts[this] = SIDE_TABLE_RC_ONE;
1318 } else if (it->second & SIDE_TABLE_DEALLOCATING) {
1319 result = false;
1320 } else if (! (it->second & SIDE_TABLE_RC_PINNED)) {
1321 it->second += SIDE_TABLE_RC_ONE;
1322 }
1323
1324 return result;
1325 }
1326
1327
1328 uintptr_t
1329 objc_object::sidetable_retainCount()
1330 {
1331 SideTable& table = SideTables()[this];
1332
1333 size_t refcnt_result = 1;
1334
1335 table.lock();
1336 RefcountMap::iterator it = table.refcnts.find(this);
1337 if (it != table.refcnts.end()) {
1338 // this is valid for SIDE_TABLE_RC_PINNED too
1339 refcnt_result += it->second >> SIDE_TABLE_RC_SHIFT;
1340 }
1341 table.unlock();
1342 return refcnt_result;
1343 }
1344
1345
1346 bool
1347 objc_object::sidetable_isDeallocating()
1348 {
1349 SideTable& table = SideTables()[this];
1350
1351 // NO SPINLOCK HERE
1352 // _objc_rootIsDeallocating() is called exclusively by _objc_storeWeak(),
1353 // which already acquired the lock on our behalf.
1354
1355
1356 // fixme can't do this efficiently with os_lock_handoff_s
1357 // if (table.slock == 0) {
1358 // _objc_fatal("Do not call -_isDeallocating.");
1359 // }
1360
1361 RefcountMap::iterator it = table.refcnts.find(this);
1362 return (it != table.refcnts.end()) && (it->second & SIDE_TABLE_DEALLOCATING);
1363 }
1364
1365
1366 bool
1367 objc_object::sidetable_isWeaklyReferenced()
1368 {
1369 bool result = false;
1370
1371 SideTable& table = SideTables()[this];
1372 table.lock();
1373
1374 RefcountMap::iterator it = table.refcnts.find(this);
1375 if (it != table.refcnts.end()) {
1376 result = it->second & SIDE_TABLE_WEAKLY_REFERENCED;
1377 }
1378
1379 table.unlock();
1380
1381 return result;
1382 }
1383
1384
1385 void
1386 objc_object::sidetable_setWeaklyReferenced_nolock()
1387 {
1388 #if SUPPORT_NONPOINTER_ISA
1389 assert(!isa.indexed);
1390 #endif
1391
1392 SideTable& table = SideTables()[this];
1393
1394 table.refcnts[this] |= SIDE_TABLE_WEAKLY_REFERENCED;
1395 }
1396
1397
1398 // rdar://20206767
1399 // return uintptr_t instead of bool so that the various raw-isa
1400 // -release paths all return zero in eax
1401 __attribute__((used,noinline,nothrow))
1402 uintptr_t
1403 objc_object::sidetable_release_slow(SideTable& table, bool performDealloc)
1404 {
1405 #if SUPPORT_NONPOINTER_ISA
1406 assert(!isa.indexed);
1407 #endif
1408 bool do_dealloc = false;
1409
1410 table.lock();
1411 RefcountMap::iterator it = table.refcnts.find(this);
1412 if (it == table.refcnts.end()) {
1413 do_dealloc = true;
1414 table.refcnts[this] = SIDE_TABLE_DEALLOCATING;
1415 } else if (it->second < SIDE_TABLE_DEALLOCATING) {
1416 // SIDE_TABLE_WEAKLY_REFERENCED may be set. Don't change it.
1417 do_dealloc = true;
1418 it->second |= SIDE_TABLE_DEALLOCATING;
1419 } else if (! (it->second & SIDE_TABLE_RC_PINNED)) {
1420 it->second -= SIDE_TABLE_RC_ONE;
1421 }
1422 table.unlock();
1423 if (do_dealloc && performDealloc) {
1424 ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_dealloc);
1425 }
1426 return do_dealloc;
1427 }
1428
1429
1430 // rdar://20206767
1431 // return uintptr_t instead of bool so that the various raw-isa
1432 // -release paths all return zero in eax
1433 uintptr_t
1434 objc_object::sidetable_release(bool performDealloc)
1435 {
1436 #if SUPPORT_NONPOINTER_ISA
1437 assert(!isa.indexed);
1438 #endif
1439 SideTable& table = SideTables()[this];
1440
1441 bool do_dealloc = false;
1442
1443 if (table.trylock()) {
1444 RefcountMap::iterator it = table.refcnts.find(this);
1445 if (it == table.refcnts.end()) {
1446 do_dealloc = true;
1447 table.refcnts[this] = SIDE_TABLE_DEALLOCATING;
1448 } else if (it->second < SIDE_TABLE_DEALLOCATING) {
1449 // SIDE_TABLE_WEAKLY_REFERENCED may be set. Don't change it.
1450 do_dealloc = true;
1451 it->second |= SIDE_TABLE_DEALLOCATING;
1452 } else if (! (it->second & SIDE_TABLE_RC_PINNED)) {
1453 it->second -= SIDE_TABLE_RC_ONE;
1454 }
1455 table.unlock();
1456 if (do_dealloc && performDealloc) {
1457 ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_dealloc);
1458 }
1459 return do_dealloc;
1460 }
1461
1462 return sidetable_release_slow(table, performDealloc);
1463 }
1464
1465
1466 void
1467 objc_object::sidetable_clearDeallocating()
1468 {
1469 SideTable& table = SideTables()[this];
1470
1471 // clear any weak table items
1472 // clear extra retain count and deallocating bit
1473 // (fixme warn or abort if extra retain count == 0 ?)
1474 table.lock();
1475 RefcountMap::iterator it = table.refcnts.find(this);
1476 if (it != table.refcnts.end()) {
1477 if (it->second & SIDE_TABLE_WEAKLY_REFERENCED) {
1478 weak_clear_no_lock(&table.weak_table, (id)this);
1479 }
1480 table.refcnts.erase(it);
1481 }
1482 table.unlock();
1483 }
1484
1485
1486 /***********************************************************************
1487 * Optimized retain/release/autorelease entrypoints
1488 **********************************************************************/
1489
1490
1491 #if __OBJC2__
1492
1493 __attribute__((aligned(16)))
1494 id
1495 objc_retain(id obj)
1496 {
1497 if (!obj) return obj;
1498 if (obj->isTaggedPointer()) return obj;
1499 return obj->retain();
1500 }
1501
1502
1503 __attribute__((aligned(16)))
1504 void
1505 objc_release(id obj)
1506 {
1507 if (!obj) return;
1508 if (obj->isTaggedPointer()) return;
1509 return obj->release();
1510 }
1511
1512
1513 __attribute__((aligned(16)))
1514 id
1515 objc_autorelease(id obj)
1516 {
1517 if (!obj) return obj;
1518 if (obj->isTaggedPointer()) return obj;
1519 return obj->autorelease();
1520 }
1521
1522
1523 // OBJC2
1524 #else
1525 // not OBJC2
1526
1527
1528 id objc_retain(id obj) { return [obj retain]; }
1529 void objc_release(id obj) { [obj release]; }
1530 id objc_autorelease(id obj) { return [obj autorelease]; }
1531
1532
1533 #endif
1534
1535
1536 /***********************************************************************
1537 * Basic operations for root class implementations a.k.a. _objc_root*()
1538 **********************************************************************/
1539
1540 bool
1541 _objc_rootTryRetain(id obj)
1542 {
1543 assert(obj);
1544
1545 return obj->rootTryRetain();
1546 }
1547
1548 bool
1549 _objc_rootIsDeallocating(id obj)
1550 {
1551 assert(obj);
1552
1553 return obj->rootIsDeallocating();
1554 }
1555
1556
1557 void
1558 objc_clear_deallocating(id obj)
1559 {
1560 assert(obj);
1561 assert(!UseGC);
1562
1563 if (obj->isTaggedPointer()) return;
1564 obj->clearDeallocating();
1565 }
1566
1567
1568 bool
1569 _objc_rootReleaseWasZero(id obj)
1570 {
1571 assert(obj);
1572
1573 return obj->rootReleaseShouldDealloc();
1574 }
1575
1576
1577 id
1578 _objc_rootAutorelease(id obj)
1579 {
1580 assert(obj);
1581 // assert(!UseGC);
1582 if (UseGC) return obj; // fixme CF calls this when GC is on
1583
1584 return obj->rootAutorelease();
1585 }
1586
1587 uintptr_t
1588 _objc_rootRetainCount(id obj)
1589 {
1590 assert(obj);
1591
1592 return obj->rootRetainCount();
1593 }
1594
1595
1596 id
1597 _objc_rootRetain(id obj)
1598 {
1599 assert(obj);
1600
1601 return obj->rootRetain();
1602 }
1603
1604 void
1605 _objc_rootRelease(id obj)
1606 {
1607 assert(obj);
1608
1609 obj->rootRelease();
1610 }
1611
1612
1613 id
1614 _objc_rootAllocWithZone(Class cls, malloc_zone_t *zone)
1615 {
1616 id obj;
1617
1618 #if __OBJC2__
1619 // allocWithZone under __OBJC2__ ignores the zone parameter
1620 (void)zone;
1621 obj = class_createInstance(cls, 0);
1622 #else
1623 if (!zone || UseGC) {
1624 obj = class_createInstance(cls, 0);
1625 }
1626 else {
1627 obj = class_createInstanceFromZone(cls, 0, zone);
1628 }
1629 #endif
1630
1631 if (!obj) obj = callBadAllocHandler(cls);
1632 return obj;
1633 }
1634
1635
1636 // Call [cls alloc] or [cls allocWithZone:nil], with appropriate
1637 // shortcutting optimizations.
1638 static ALWAYS_INLINE id
1639 callAlloc(Class cls, bool checkNil, bool allocWithZone=false)
1640 {
1641 if (checkNil && !cls) return nil;
1642
1643 #if __OBJC2__
1644 if (! cls->ISA()->hasCustomAWZ()) {
1645 // No alloc/allocWithZone implementation. Go straight to the allocator.
1646 // fixme store hasCustomAWZ in the non-meta class and
1647 // add it to canAllocFast's summary
1648 if (cls->canAllocFast()) {
1649 // No ctors, raw isa, etc. Go straight to the metal.
1650 bool dtor = cls->hasCxxDtor();
1651 id obj = (id)calloc(1, cls->bits.fastInstanceSize());
1652 if (!obj) return callBadAllocHandler(cls);
1653 obj->initInstanceIsa(cls, dtor);
1654 return obj;
1655 }
1656 else {
1657 // Has ctor or raw isa or something. Use the slower path.
1658 id obj = class_createInstance(cls, 0);
1659 if (!obj) return callBadAllocHandler(cls);
1660 return obj;
1661 }
1662 }
1663 #endif
1664
1665 // No shortcuts available.
1666 if (allocWithZone) return [cls allocWithZone:nil];
1667 return [cls alloc];
1668 }
1669
1670
1671 // Base class implementation of +alloc. cls is not nil.
1672 // Calls [cls allocWithZone:nil].
1673 id
1674 _objc_rootAlloc(Class cls)
1675 {
1676 return callAlloc(cls, false/*checkNil*/, true/*allocWithZone*/);
1677 }
1678
1679 // Calls [cls alloc].
1680 id
1681 objc_alloc(Class cls)
1682 {
1683 return callAlloc(cls, true/*checkNil*/, false/*allocWithZone*/);
1684 }
1685
1686 // Calls [cls allocWithZone:nil].
1687 id
1688 objc_allocWithZone(Class cls)
1689 {
1690 return callAlloc(cls, true/*checkNil*/, true/*allocWithZone*/);
1691 }
1692
1693
1694 void
1695 _objc_rootDealloc(id obj)
1696 {
1697 assert(obj);
1698
1699 obj->rootDealloc();
1700 }
1701
1702 void
1703 _objc_rootFinalize(id obj __unused)
1704 {
1705 assert(obj);
1706 assert(UseGC);
1707
1708 if (UseGC) {
1709 return;
1710 }
1711 _objc_fatal("_objc_rootFinalize called with garbage collection off");
1712 }
1713
1714
1715 id
1716 _objc_rootInit(id obj)
1717 {
1718 // In practice, it will be hard to rely on this function.
1719 // Many classes do not properly chain -init calls.
1720 return obj;
1721 }
1722
1723
1724 malloc_zone_t *
1725 _objc_rootZone(id obj)
1726 {
1727 (void)obj;
1728 if (gc_zone) {
1729 return gc_zone;
1730 }
1731 #if __OBJC2__
1732 // allocWithZone under __OBJC2__ ignores the zone parameter
1733 return malloc_default_zone();
1734 #else
1735 malloc_zone_t *rval = malloc_zone_from_ptr(obj);
1736 return rval ? rval : malloc_default_zone();
1737 #endif
1738 }
1739
1740 uintptr_t
1741 _objc_rootHash(id obj)
1742 {
1743 if (UseGC) {
1744 return _object_getExternalHash(obj);
1745 }
1746 return (uintptr_t)obj;
1747 }
1748
1749 void *
1750 objc_autoreleasePoolPush(void)
1751 {
1752 if (UseGC) return nil;
1753 return AutoreleasePoolPage::push();
1754 }
1755
1756 void
1757 objc_autoreleasePoolPop(void *ctxt)
1758 {
1759 if (UseGC) return;
1760 AutoreleasePoolPage::pop(ctxt);
1761 }
1762
1763
1764 void *
1765 _objc_autoreleasePoolPush(void)
1766 {
1767 return objc_autoreleasePoolPush();
1768 }
1769
1770 void
1771 _objc_autoreleasePoolPop(void *ctxt)
1772 {
1773 objc_autoreleasePoolPop(ctxt);
1774 }
1775
1776 void
1777 _objc_autoreleasePoolPrint(void)
1778 {
1779 if (UseGC) return;
1780 AutoreleasePoolPage::printAll();
1781 }
1782
1783
1784 // Same as objc_release but suitable for tail-calling
1785 // if you need the value back and don't want to push a frame before this point.
1786 __attribute__((noinline))
1787 static id
1788 objc_releaseAndReturn(id obj)
1789 {
1790 objc_release(obj);
1791 return obj;
1792 }
1793
1794 // Same as objc_retainAutorelease but suitable for tail-calling
1795 // if you don't want to push a frame before this point.
1796 __attribute__((noinline))
1797 static id
1798 objc_retainAutoreleaseAndReturn(id obj)
1799 {
1800 return objc_retainAutorelease(obj);
1801 }
1802
1803
1804 // Prepare a value at +1 for return through a +0 autoreleasing convention.
1805 id
1806 objc_autoreleaseReturnValue(id obj)
1807 {
1808 if (prepareOptimizedReturn(ReturnAtPlus1)) return obj;
1809
1810 return objc_autorelease(obj);
1811 }
1812
1813 // Prepare a value at +0 for return through a +0 autoreleasing convention.
1814 id
1815 objc_retainAutoreleaseReturnValue(id obj)
1816 {
1817 if (prepareOptimizedReturn(ReturnAtPlus0)) return obj;
1818
1819 // not objc_autoreleaseReturnValue(objc_retain(obj))
1820 // because we don't need another optimization attempt
1821 return objc_retainAutoreleaseAndReturn(obj);
1822 }
1823
1824 // Accept a value returned through a +0 autoreleasing convention for use at +1.
1825 id
1826 objc_retainAutoreleasedReturnValue(id obj)
1827 {
1828 if (acceptOptimizedReturn() == ReturnAtPlus1) return obj;
1829
1830 return objc_retain(obj);
1831 }
1832
1833 // Accept a value returned through a +0 autoreleasing convention for use at +0.
1834 id
1835 objc_unsafeClaimAutoreleasedReturnValue(id obj)
1836 {
1837 if (acceptOptimizedReturn() == ReturnAtPlus0) return obj;
1838
1839 return objc_releaseAndReturn(obj);
1840 }
1841
1842 id
1843 objc_retainAutorelease(id obj)
1844 {
1845 return objc_autorelease(objc_retain(obj));
1846 }
1847
1848 void
1849 _objc_deallocOnMainThreadHelper(void *context)
1850 {
1851 id obj = (id)context;
1852 [obj dealloc];
1853 }
1854
1855 #undef objc_retainedObject
1856 #undef objc_unretainedObject
1857 #undef objc_unretainedPointer
1858
1859 // convert objc_objectptr_t to id, callee must take ownership.
1860 id objc_retainedObject(objc_objectptr_t pointer) { return (id)pointer; }
1861
1862 // convert objc_objectptr_t to id, without ownership transfer.
1863 id objc_unretainedObject(objc_objectptr_t pointer) { return (id)pointer; }
1864
1865 // convert id to objc_objectptr_t, no ownership transfer.
1866 objc_objectptr_t objc_unretainedPointer(id object) { return object; }
1867
1868
1869 void arr_init(void)
1870 {
1871 AutoreleasePoolPage::init();
1872 SideTableInit();
1873 }
1874
1875 @implementation NSObject
1876
1877 + (void)load {
1878 if (UseGC) gc_init2();
1879 }
1880
1881 + (void)initialize {
1882 }
1883
1884 + (id)self {
1885 return (id)self;
1886 }
1887
1888 - (id)self {
1889 return self;
1890 }
1891
1892 + (Class)class {
1893 return self;
1894 }
1895
1896 - (Class)class {
1897 return object_getClass(self);
1898 }
1899
1900 + (Class)superclass {
1901 return self->superclass;
1902 }
1903
1904 - (Class)superclass {
1905 return [self class]->superclass;
1906 }
1907
1908 + (BOOL)isMemberOfClass:(Class)cls {
1909 return object_getClass((id)self) == cls;
1910 }
1911
1912 - (BOOL)isMemberOfClass:(Class)cls {
1913 return [self class] == cls;
1914 }
1915
1916 + (BOOL)isKindOfClass:(Class)cls {
1917 for (Class tcls = object_getClass((id)self); tcls; tcls = tcls->superclass) {
1918 if (tcls == cls) return YES;
1919 }
1920 return NO;
1921 }
1922
1923 - (BOOL)isKindOfClass:(Class)cls {
1924 for (Class tcls = [self class]; tcls; tcls = tcls->superclass) {
1925 if (tcls == cls) return YES;
1926 }
1927 return NO;
1928 }
1929
1930 + (BOOL)isSubclassOfClass:(Class)cls {
1931 for (Class tcls = self; tcls; tcls = tcls->superclass) {
1932 if (tcls == cls) return YES;
1933 }
1934 return NO;
1935 }
1936
1937 + (BOOL)isAncestorOfObject:(NSObject *)obj {
1938 for (Class tcls = [obj class]; tcls; tcls = tcls->superclass) {
1939 if (tcls == self) return YES;
1940 }
1941 return NO;
1942 }
1943
1944 + (BOOL)instancesRespondToSelector:(SEL)sel {
1945 if (!sel) return NO;
1946 return class_respondsToSelector(self, sel);
1947 }
1948
1949 + (BOOL)respondsToSelector:(SEL)sel {
1950 if (!sel) return NO;
1951 return class_respondsToSelector_inst(object_getClass(self), sel, self);
1952 }
1953
1954 - (BOOL)respondsToSelector:(SEL)sel {
1955 if (!sel) return NO;
1956 return class_respondsToSelector_inst([self class], sel, self);
1957 }
1958
1959 + (BOOL)conformsToProtocol:(Protocol *)protocol {
1960 if (!protocol) return NO;
1961 for (Class tcls = self; tcls; tcls = tcls->superclass) {
1962 if (class_conformsToProtocol(tcls, protocol)) return YES;
1963 }
1964 return NO;
1965 }
1966
1967 - (BOOL)conformsToProtocol:(Protocol *)protocol {
1968 if (!protocol) return NO;
1969 for (Class tcls = [self class]; tcls; tcls = tcls->superclass) {
1970 if (class_conformsToProtocol(tcls, protocol)) return YES;
1971 }
1972 return NO;
1973 }
1974
1975 + (NSUInteger)hash {
1976 return _objc_rootHash(self);
1977 }
1978
1979 - (NSUInteger)hash {
1980 return _objc_rootHash(self);
1981 }
1982
1983 + (BOOL)isEqual:(id)obj {
1984 return obj == (id)self;
1985 }
1986
1987 - (BOOL)isEqual:(id)obj {
1988 return obj == self;
1989 }
1990
1991
1992 + (BOOL)isFault {
1993 return NO;
1994 }
1995
1996 - (BOOL)isFault {
1997 return NO;
1998 }
1999
2000 + (BOOL)isProxy {
2001 return NO;
2002 }
2003
2004 - (BOOL)isProxy {
2005 return NO;
2006 }
2007
2008
2009 + (IMP)instanceMethodForSelector:(SEL)sel {
2010 if (!sel) [self doesNotRecognizeSelector:sel];
2011 return class_getMethodImplementation(self, sel);
2012 }
2013
2014 + (IMP)methodForSelector:(SEL)sel {
2015 if (!sel) [self doesNotRecognizeSelector:sel];
2016 return object_getMethodImplementation((id)self, sel);
2017 }
2018
2019 - (IMP)methodForSelector:(SEL)sel {
2020 if (!sel) [self doesNotRecognizeSelector:sel];
2021 return object_getMethodImplementation(self, sel);
2022 }
2023
2024 + (BOOL)resolveClassMethod:(SEL)sel {
2025 return NO;
2026 }
2027
2028 + (BOOL)resolveInstanceMethod:(SEL)sel {
2029 return NO;
2030 }
2031
2032 // Replaced by CF (throws an NSException)
2033 + (void)doesNotRecognizeSelector:(SEL)sel {
2034 _objc_fatal("+[%s %s]: unrecognized selector sent to instance %p",
2035 class_getName(self), sel_getName(sel), self);
2036 }
2037
2038 // Replaced by CF (throws an NSException)
2039 - (void)doesNotRecognizeSelector:(SEL)sel {
2040 _objc_fatal("-[%s %s]: unrecognized selector sent to instance %p",
2041 object_getClassName(self), sel_getName(sel), self);
2042 }
2043
2044
2045 + (id)performSelector:(SEL)sel {
2046 if (!sel) [self doesNotRecognizeSelector:sel];
2047 return ((id(*)(id, SEL))objc_msgSend)((id)self, sel);
2048 }
2049
2050 + (id)performSelector:(SEL)sel withObject:(id)obj {
2051 if (!sel) [self doesNotRecognizeSelector:sel];
2052 return ((id(*)(id, SEL, id))objc_msgSend)((id)self, sel, obj);
2053 }
2054
2055 + (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 {
2056 if (!sel) [self doesNotRecognizeSelector:sel];
2057 return ((id(*)(id, SEL, id, id))objc_msgSend)((id)self, sel, obj1, obj2);
2058 }
2059
2060 - (id)performSelector:(SEL)sel {
2061 if (!sel) [self doesNotRecognizeSelector:sel];
2062 return ((id(*)(id, SEL))objc_msgSend)(self, sel);
2063 }
2064
2065 - (id)performSelector:(SEL)sel withObject:(id)obj {
2066 if (!sel) [self doesNotRecognizeSelector:sel];
2067 return ((id(*)(id, SEL, id))objc_msgSend)(self, sel, obj);
2068 }
2069
2070 - (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 {
2071 if (!sel) [self doesNotRecognizeSelector:sel];
2072 return ((id(*)(id, SEL, id, id))objc_msgSend)(self, sel, obj1, obj2);
2073 }
2074
2075
2076 // Replaced by CF (returns an NSMethodSignature)
2077 + (NSMethodSignature *)instanceMethodSignatureForSelector:(SEL)sel {
2078 _objc_fatal("+[NSObject instanceMethodSignatureForSelector:] "
2079 "not available without CoreFoundation");
2080 }
2081
2082 // Replaced by CF (returns an NSMethodSignature)
2083 + (NSMethodSignature *)methodSignatureForSelector:(SEL)sel {
2084 _objc_fatal("+[NSObject methodSignatureForSelector:] "
2085 "not available without CoreFoundation");
2086 }
2087
2088 // Replaced by CF (returns an NSMethodSignature)
2089 - (NSMethodSignature *)methodSignatureForSelector:(SEL)sel {
2090 _objc_fatal("-[NSObject methodSignatureForSelector:] "
2091 "not available without CoreFoundation");
2092 }
2093
2094 + (void)forwardInvocation:(NSInvocation *)invocation {
2095 [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)];
2096 }
2097
2098 - (void)forwardInvocation:(NSInvocation *)invocation {
2099 [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)];
2100 }
2101
2102 + (id)forwardingTargetForSelector:(SEL)sel {
2103 return nil;
2104 }
2105
2106 - (id)forwardingTargetForSelector:(SEL)sel {
2107 return nil;
2108 }
2109
2110
2111 // Replaced by CF (returns an NSString)
2112 + (NSString *)description {
2113 return nil;
2114 }
2115
2116 // Replaced by CF (returns an NSString)
2117 - (NSString *)description {
2118 return nil;
2119 }
2120
2121 + (NSString *)debugDescription {
2122 return [self description];
2123 }
2124
2125 - (NSString *)debugDescription {
2126 return [self description];
2127 }
2128
2129
2130 + (id)new {
2131 return [callAlloc(self, false/*checkNil*/) init];
2132 }
2133
2134 + (id)retain {
2135 return (id)self;
2136 }
2137
2138 // Replaced by ObjectAlloc
2139 - (id)retain {
2140 return ((id)self)->rootRetain();
2141 }
2142
2143
2144 + (BOOL)_tryRetain {
2145 return YES;
2146 }
2147
2148 // Replaced by ObjectAlloc
2149 - (BOOL)_tryRetain {
2150 return ((id)self)->rootTryRetain();
2151 }
2152
2153 + (BOOL)_isDeallocating {
2154 return NO;
2155 }
2156
2157 - (BOOL)_isDeallocating {
2158 return ((id)self)->rootIsDeallocating();
2159 }
2160
2161 + (BOOL)allowsWeakReference {
2162 return YES;
2163 }
2164
2165 + (BOOL)retainWeakReference {
2166 return YES;
2167 }
2168
2169 - (BOOL)allowsWeakReference {
2170 return ! [self _isDeallocating];
2171 }
2172
2173 - (BOOL)retainWeakReference {
2174 return [self _tryRetain];
2175 }
2176
2177 + (oneway void)release {
2178 }
2179
2180 // Replaced by ObjectAlloc
2181 - (oneway void)release {
2182 ((id)self)->rootRelease();
2183 }
2184
2185 + (id)autorelease {
2186 return (id)self;
2187 }
2188
2189 // Replaced by ObjectAlloc
2190 - (id)autorelease {
2191 return ((id)self)->rootAutorelease();
2192 }
2193
2194 + (NSUInteger)retainCount {
2195 return ULONG_MAX;
2196 }
2197
2198 - (NSUInteger)retainCount {
2199 return ((id)self)->rootRetainCount();
2200 }
2201
2202 + (id)alloc {
2203 return _objc_rootAlloc(self);
2204 }
2205
2206 // Replaced by ObjectAlloc
2207 + (id)allocWithZone:(struct _NSZone *)zone {
2208 return _objc_rootAllocWithZone(self, (malloc_zone_t *)zone);
2209 }
2210
2211 // Replaced by CF (throws an NSException)
2212 + (id)init {
2213 return (id)self;
2214 }
2215
2216 - (id)init {
2217 return _objc_rootInit(self);
2218 }
2219
2220 // Replaced by CF (throws an NSException)
2221 + (void)dealloc {
2222 }
2223
2224
2225 // Replaced by NSZombies
2226 - (void)dealloc {
2227 _objc_rootDealloc(self);
2228 }
2229
2230 // Replaced by CF (throws an NSException)
2231 + (void)finalize {
2232 }
2233
2234 - (void)finalize {
2235 _objc_rootFinalize(self);
2236 }
2237
2238 + (struct _NSZone *)zone {
2239 return (struct _NSZone *)_objc_rootZone(self);
2240 }
2241
2242 - (struct _NSZone *)zone {
2243 return (struct _NSZone *)_objc_rootZone(self);
2244 }
2245
2246 + (id)copy {
2247 return (id)self;
2248 }
2249
2250 + (id)copyWithZone:(struct _NSZone *)zone {
2251 return (id)self;
2252 }
2253
2254 - (id)copy {
2255 return [(id)self copyWithZone:nil];
2256 }
2257
2258 + (id)mutableCopy {
2259 return (id)self;
2260 }
2261
2262 + (id)mutableCopyWithZone:(struct _NSZone *)zone {
2263 return (id)self;
2264 }
2265
2266 - (id)mutableCopy {
2267 return [(id)self mutableCopyWithZone:nil];
2268 }
2269
2270 @end
2271
2272