2 * Copyright (c) 2010-2012 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
25 /***********************************************************************
26 * Inlineable parts of NSObject / objc_object implementation
27 **********************************************************************/
29 #ifndef _OBJC_OBJCOBJECT_H_
30 #define _OBJC_OBJCOBJECT_H_
32 #include "objc-private.h"
35 enum ReturnDisposition
: bool {
36 ReturnAtPlus0
= false, ReturnAtPlus1
= true
40 bool prepareOptimizedReturn(ReturnDisposition disposition
);
43 #if SUPPORT_TAGGED_POINTERS
46 extern Class objc_debug_taggedpointer_classes
[_OBJC_TAG_SLOT_COUNT
];
47 extern Class objc_debug_taggedpointer_ext_classes
[_OBJC_TAG_EXT_SLOT_COUNT
];
49 #define objc_tag_classes objc_debug_taggedpointer_classes
50 #define objc_tag_ext_classes objc_debug_taggedpointer_ext_classes
54 #if SUPPORT_INDEXED_ISA
57 classForIndex(uintptr_t index
) {
59 ASSERT(index
< (uintptr_t)objc_indexed_classes_count
);
60 return objc_indexed_classes
[index
];
67 objc_object::isClass()
69 if (isTaggedPointer()) return false;
70 return ISA()->isMetaClass();
74 #if SUPPORT_TAGGED_POINTERS
79 if (fastpath(!isTaggedPointer())) return ISA();
81 extern objc_class OBJC_CLASS_$___NSUnrecognizedTaggedPointer
;
82 uintptr_t slot
, ptr
= (uintptr_t)this;
85 slot
= (ptr
>> _OBJC_TAG_SLOT_SHIFT
) & _OBJC_TAG_SLOT_MASK
;
86 cls
= objc_tag_classes
[slot
];
87 if (slowpath(cls
== (Class
)&OBJC_CLASS_$___NSUnrecognizedTaggedPointer
)) {
88 slot
= (ptr
>> _OBJC_TAG_EXT_SLOT_SHIFT
) & _OBJC_TAG_EXT_SLOT_MASK
;
89 cls
= objc_tag_ext_classes
[slot
];
95 objc_object::isaBits() const
101 objc_object::isTaggedPointer()
103 return _objc_isTaggedPointer(this);
107 objc_object::isBasicTaggedPointer()
109 return isTaggedPointer() && !isExtTaggedPointer();
113 objc_object::isExtTaggedPointer()
115 uintptr_t ptr
= _objc_decodeTaggedPointer(this);
116 return (ptr
& _OBJC_TAG_EXT_MASK
) == _OBJC_TAG_EXT_MASK
;
120 // SUPPORT_TAGGED_POINTERS
122 // not SUPPORT_TAGGED_POINTERS
126 objc_object::getIsa()
132 objc_object::isaBits() const
139 objc_object::isTaggedPointer()
145 objc_object::isBasicTaggedPointer()
151 objc_object::isExtTaggedPointer()
157 // not SUPPORT_TAGGED_POINTERS
161 #if SUPPORT_NONPOINTER_ISA
166 ASSERT(!isTaggedPointer());
167 #if SUPPORT_INDEXED_ISA
168 if (isa
.nonpointer
) {
169 uintptr_t slot
= isa
.indexcls
;
170 return classForIndex((unsigned)slot
);
172 return (Class
)isa
.bits
;
174 return (Class
)(isa
.bits
& ISA_MASK
);
179 objc_object::rawISA()
181 ASSERT(!isTaggedPointer() && !isa
.nonpointer
);
182 return (Class
)isa
.bits
;
186 objc_object::hasNonpointerIsa()
188 return isa
.nonpointer
;
193 objc_object::initIsa(Class cls
)
195 initIsa(cls
, false, false);
199 objc_object::initClassIsa(Class cls
)
201 if (DisableNonpointerIsa
|| cls
->instancesRequireRawIsa()) {
202 initIsa(cls
, false/*not nonpointer*/, false);
204 initIsa(cls
, true/*nonpointer*/, false);
209 objc_object::initProtocolIsa(Class cls
)
211 return initClassIsa(cls
);
215 objc_object::initInstanceIsa(Class cls
, bool hasCxxDtor
)
217 ASSERT(!cls
->instancesRequireRawIsa());
218 ASSERT(hasCxxDtor
== cls
->hasCxxDtor());
220 initIsa(cls
, true, hasCxxDtor
);
224 objc_object::initIsa(Class cls
, bool nonpointer
, bool hasCxxDtor
)
226 ASSERT(!isTaggedPointer());
229 isa
= isa_t((uintptr_t)cls
);
231 ASSERT(!DisableNonpointerIsa
);
232 ASSERT(!cls
->instancesRequireRawIsa());
236 #if SUPPORT_INDEXED_ISA
237 ASSERT(cls
->classArrayIndex() > 0);
238 newisa
.bits
= ISA_INDEX_MAGIC_VALUE
;
239 // isa.magic is part of ISA_MAGIC_VALUE
240 // isa.nonpointer is part of ISA_MAGIC_VALUE
241 newisa
.has_cxx_dtor
= hasCxxDtor
;
242 newisa
.indexcls
= (uintptr_t)cls
->classArrayIndex();
244 newisa
.bits
= ISA_MAGIC_VALUE
;
245 // isa.magic is part of ISA_MAGIC_VALUE
246 // isa.nonpointer is part of ISA_MAGIC_VALUE
247 newisa
.has_cxx_dtor
= hasCxxDtor
;
248 newisa
.shiftcls
= (uintptr_t)cls
>> 3;
251 // This write must be performed in a single store in some cases
252 // (for example when realizing a class because other threads
253 // may simultaneously try to use the class).
254 // fixme use atomics here to guarantee single-store and to
255 // guarantee memory order w.r.t. the class index table
256 // ...but not too atomic because we don't want to hurt instantiation
263 objc_object::changeIsa(Class newCls
)
265 // This is almost always true but there are
266 // enough edge cases that we can't assert it.
267 // assert(newCls->isFuture() ||
268 // newCls->isInitializing() || newCls->isInitialized());
270 ASSERT(!isTaggedPointer());
275 bool sideTableLocked
= false;
276 bool transcribeToSideTable
= false;
279 transcribeToSideTable
= false;
280 oldisa
= LoadExclusive(&isa
.bits
);
281 if ((oldisa
.bits
== 0 || oldisa
.nonpointer
) &&
282 !newCls
->isFuture() && newCls
->canAllocNonpointer())
285 // nonpointer -> nonpointer
286 #if SUPPORT_INDEXED_ISA
287 if (oldisa
.bits
== 0) newisa
.bits
= ISA_INDEX_MAGIC_VALUE
;
288 else newisa
= oldisa
;
289 // isa.magic is part of ISA_MAGIC_VALUE
290 // isa.nonpointer is part of ISA_MAGIC_VALUE
291 newisa
.has_cxx_dtor
= newCls
->hasCxxDtor();
292 ASSERT(newCls
->classArrayIndex() > 0);
293 newisa
.indexcls
= (uintptr_t)newCls
->classArrayIndex();
295 if (oldisa
.bits
== 0) newisa
.bits
= ISA_MAGIC_VALUE
;
296 else newisa
= oldisa
;
297 // isa.magic is part of ISA_MAGIC_VALUE
298 // isa.nonpointer is part of ISA_MAGIC_VALUE
299 newisa
.has_cxx_dtor
= newCls
->hasCxxDtor();
300 newisa
.shiftcls
= (uintptr_t)newCls
>> 3;
303 else if (oldisa
.nonpointer
) {
304 // nonpointer -> raw pointer
305 // Need to copy retain count et al to side table.
306 // Acquire side table lock before setting isa to
307 // prevent races such as concurrent -release.
308 if (!sideTableLocked
) sidetable_lock();
309 sideTableLocked
= true;
310 transcribeToSideTable
= true;
314 // raw pointer -> raw pointer
317 } while (!StoreExclusive(&isa
.bits
, oldisa
.bits
, newisa
.bits
));
319 if (transcribeToSideTable
) {
320 // Copy oldisa's retain count et al to side table.
321 // oldisa.has_assoc: nothing to do
322 // oldisa.has_cxx_dtor: nothing to do
323 sidetable_moveExtraRC_nolock(oldisa
.extra_rc
,
325 oldisa
.weakly_referenced
);
328 if (sideTableLocked
) sidetable_unlock();
330 if (oldisa
.nonpointer
) {
331 #if SUPPORT_INDEXED_ISA
332 return classForIndex(oldisa
.indexcls
);
334 return (Class
)((uintptr_t)oldisa
.shiftcls
<< 3);
344 objc_object::hasAssociatedObjects()
346 if (isTaggedPointer()) return true;
347 if (isa
.nonpointer
) return isa
.has_assoc
;
353 objc_object::setHasAssociatedObjects()
355 if (isTaggedPointer()) return;
358 isa_t oldisa
= LoadExclusive(&isa
.bits
);
359 isa_t newisa
= oldisa
;
360 if (!newisa
.nonpointer
|| newisa
.has_assoc
) {
361 ClearExclusive(&isa
.bits
);
364 newisa
.has_assoc
= true;
365 if (!StoreExclusive(&isa
.bits
, oldisa
.bits
, newisa
.bits
)) goto retry
;
370 objc_object::isWeaklyReferenced()
372 ASSERT(!isTaggedPointer());
373 if (isa
.nonpointer
) return isa
.weakly_referenced
;
374 else return sidetable_isWeaklyReferenced();
379 objc_object::setWeaklyReferenced_nolock()
382 isa_t oldisa
= LoadExclusive(&isa
.bits
);
383 isa_t newisa
= oldisa
;
384 if (slowpath(!newisa
.nonpointer
)) {
385 ClearExclusive(&isa
.bits
);
386 sidetable_setWeaklyReferenced_nolock();
389 if (newisa
.weakly_referenced
) {
390 ClearExclusive(&isa
.bits
);
393 newisa
.weakly_referenced
= true;
394 if (!StoreExclusive(&isa
.bits
, oldisa
.bits
, newisa
.bits
)) goto retry
;
399 objc_object::hasCxxDtor()
401 ASSERT(!isTaggedPointer());
402 if (isa
.nonpointer
) return isa
.has_cxx_dtor
;
403 else return isa
.cls
->hasCxxDtor();
409 objc_object::rootIsDeallocating()
411 if (isTaggedPointer()) return false;
412 if (isa
.nonpointer
) return isa
.deallocating
;
413 return sidetable_isDeallocating();
418 objc_object::clearDeallocating()
420 if (slowpath(!isa
.nonpointer
)) {
421 // Slow path for raw pointer isa.
422 sidetable_clearDeallocating();
424 else if (slowpath(isa
.weakly_referenced
|| isa
.has_sidetable_rc
)) {
425 // Slow path for non-pointer isa with weak refs and/or side table data.
426 clearDeallocating_slow();
429 assert(!sidetable_present());
434 objc_object::rootDealloc()
436 if (isTaggedPointer()) return; // fixme necessary?
438 if (fastpath(isa
.nonpointer
&&
439 !isa
.weakly_referenced
&&
442 !isa
.has_sidetable_rc
))
444 assert(!sidetable_present());
448 object_dispose((id
)this);
453 // Equivalent to calling [this retain], with shortcuts if there is no override
455 objc_object::retain()
457 ASSERT(!isTaggedPointer());
459 if (fastpath(!ISA()->hasCustomRR())) {
463 return ((id(*)(objc_object
*, SEL
))objc_msgSend
)(this, @
selector(retain
));
467 // Base retain implementation, ignoring overrides.
468 // This does not check isa.fast_rr; if there is an RR override then
469 // it was already called and it chose to call [super retain].
471 // tryRetain=true is the -_tryRetain path.
472 // handleOverflow=false is the frameless fast path.
473 // handleOverflow=true is the framed slow path including overflow to side table
474 // The code is structured this way to prevent duplication.
477 objc_object::rootRetain()
479 return rootRetain(false, false);
483 objc_object::rootTryRetain()
485 return rootRetain(true, false) ? true : false;
489 objc_object::rootRetain(bool tryRetain
, bool handleOverflow
)
491 if (isTaggedPointer()) return (id
)this;
493 bool sideTableLocked
= false;
494 bool transcribeToSideTable
= false;
500 transcribeToSideTable
= false;
501 oldisa
= LoadExclusive(&isa
.bits
);
503 if (slowpath(!newisa
.nonpointer
)) {
504 ClearExclusive(&isa
.bits
);
505 if (rawISA()->isMetaClass()) return (id
)this;
506 if (!tryRetain
&& sideTableLocked
) sidetable_unlock();
507 if (tryRetain
) return sidetable_tryRetain() ? (id
)this : nil
;
508 else return sidetable_retain();
510 // don't check newisa.fast_rr; we already called any RR overrides
511 if (slowpath(tryRetain
&& newisa
.deallocating
)) {
512 ClearExclusive(&isa
.bits
);
513 if (!tryRetain
&& sideTableLocked
) sidetable_unlock();
517 newisa
.bits
= addc(newisa
.bits
, RC_ONE
, 0, &carry
); // extra_rc++
519 if (slowpath(carry
)) {
520 // newisa.extra_rc++ overflowed
521 if (!handleOverflow
) {
522 ClearExclusive(&isa
.bits
);
523 return rootRetain_overflow(tryRetain
);
525 // Leave half of the retain counts inline and
526 // prepare to copy the other half to the side table.
527 if (!tryRetain
&& !sideTableLocked
) sidetable_lock();
528 sideTableLocked
= true;
529 transcribeToSideTable
= true;
530 newisa
.extra_rc
= RC_HALF
;
531 newisa
.has_sidetable_rc
= true;
533 } while (slowpath(!StoreExclusive(&isa
.bits
, oldisa
.bits
, newisa
.bits
)));
535 if (slowpath(transcribeToSideTable
)) {
536 // Copy the other half of the retain counts to the side table.
537 sidetable_addExtraRC_nolock(RC_HALF
);
540 if (slowpath(!tryRetain
&& sideTableLocked
)) sidetable_unlock();
545 // Equivalent to calling [this release], with shortcuts if there is no override
547 objc_object::release()
549 ASSERT(!isTaggedPointer());
551 if (fastpath(!ISA()->hasCustomRR())) {
556 ((void(*)(objc_object
*, SEL
))objc_msgSend
)(this, @
selector(release
));
560 // Base release implementation, ignoring overrides.
561 // Does not call -dealloc.
562 // Returns true if the object should now be deallocated.
563 // This does not check isa.fast_rr; if there is an RR override then
564 // it was already called and it chose to call [super release].
566 // handleUnderflow=false is the frameless fast path.
567 // handleUnderflow=true is the framed slow path including side table borrow
568 // The code is structured this way to prevent duplication.
571 objc_object::rootRelease()
573 return rootRelease(true, false);
577 objc_object::rootReleaseShouldDealloc()
579 return rootRelease(false, false);
583 objc_object::rootRelease(bool performDealloc
, bool handleUnderflow
)
585 if (isTaggedPointer()) return false;
587 bool sideTableLocked
= false;
594 oldisa
= LoadExclusive(&isa
.bits
);
596 if (slowpath(!newisa
.nonpointer
)) {
597 ClearExclusive(&isa
.bits
);
598 if (rawISA()->isMetaClass()) return false;
599 if (sideTableLocked
) sidetable_unlock();
600 return sidetable_release(performDealloc
);
602 // don't check newisa.fast_rr; we already called any RR overrides
604 newisa
.bits
= subc(newisa
.bits
, RC_ONE
, 0, &carry
); // extra_rc--
605 if (slowpath(carry
)) {
606 // don't ClearExclusive()
609 } while (slowpath(!StoreReleaseExclusive(&isa
.bits
,
610 oldisa
.bits
, newisa
.bits
)));
612 if (slowpath(sideTableLocked
)) sidetable_unlock();
616 // newisa.extra_rc-- underflowed: borrow from side table or deallocate
618 // abandon newisa to undo the decrement
621 if (slowpath(newisa
.has_sidetable_rc
)) {
622 if (!handleUnderflow
) {
623 ClearExclusive(&isa
.bits
);
624 return rootRelease_underflow(performDealloc
);
627 // Transfer retain count from side table to inline storage.
629 if (!sideTableLocked
) {
630 ClearExclusive(&isa
.bits
);
632 sideTableLocked
= true;
633 // Need to start over to avoid a race against
634 // the nonpointer -> raw pointer transition.
638 // Try to remove some retain counts from the side table.
639 size_t borrowed
= sidetable_subExtraRC_nolock(RC_HALF
);
641 // To avoid races, has_sidetable_rc must remain set
642 // even if the side table count is now zero.
645 // Side table retain count decreased.
646 // Try to add them to the inline count.
647 newisa
.extra_rc
= borrowed
- 1; // redo the original decrement too
648 bool stored
= StoreReleaseExclusive(&isa
.bits
,
649 oldisa
.bits
, newisa
.bits
);
651 // Inline update failed.
652 // Try it again right now. This prevents livelock on LL/SC
653 // architectures where the side table access itself may have
654 // dropped the reservation.
655 isa_t oldisa2
= LoadExclusive(&isa
.bits
);
656 isa_t newisa2
= oldisa2
;
657 if (newisa2
.nonpointer
) {
660 addc(newisa2
.bits
, RC_ONE
* (borrowed
-1), 0, &overflow
);
662 stored
= StoreReleaseExclusive(&isa
.bits
, oldisa2
.bits
,
669 // Inline update failed.
670 // Put the retains back in the side table.
671 sidetable_addExtraRC_nolock(borrowed
);
675 // Decrement successful after borrowing from side table.
676 // This decrement cannot be the deallocating decrement - the side
677 // table lock and has_sidetable_rc bit ensure that if everyone
678 // else tried to -release while we worked, the last one would block.
683 // Side table is empty after all. Fall-through to the dealloc path.
687 // Really deallocate.
689 if (slowpath(newisa
.deallocating
)) {
690 ClearExclusive(&isa
.bits
);
691 if (sideTableLocked
) sidetable_unlock();
692 return overrelease_error();
693 // does not actually return
695 newisa
.deallocating
= true;
696 if (!StoreExclusive(&isa
.bits
, oldisa
.bits
, newisa
.bits
)) goto retry
;
698 if (slowpath(sideTableLocked
)) sidetable_unlock();
700 __c11_atomic_thread_fence(__ATOMIC_ACQUIRE
);
702 if (performDealloc
) {
703 ((void(*)(objc_object
*, SEL
))objc_msgSend
)(this, @
selector(dealloc
));
709 // Equivalent to [this autorelease], with shortcuts if there is no override
711 objc_object::autorelease()
713 ASSERT(!isTaggedPointer());
714 if (fastpath(!ISA()->hasCustomRR())) {
715 return rootAutorelease();
718 return ((id(*)(objc_object
*, SEL
))objc_msgSend
)(this, @
selector(autorelease
));
722 // Base autorelease implementation, ignoring overrides.
724 objc_object::rootAutorelease()
726 if (isTaggedPointer()) return (id
)this;
727 if (prepareOptimizedReturn(ReturnAtPlus1
)) return (id
)this;
729 return rootAutorelease2();
734 objc_object::rootRetainCount()
736 if (isTaggedPointer()) return (uintptr_t)this;
739 isa_t bits
= LoadExclusive(&isa
.bits
);
740 ClearExclusive(&isa
.bits
);
741 if (bits
.nonpointer
) {
742 uintptr_t rc
= 1 + bits
.extra_rc
;
743 if (bits
.has_sidetable_rc
) {
744 rc
+= sidetable_getExtraRC_nolock();
751 return sidetable_retainCount();
755 // SUPPORT_NONPOINTER_ISA
757 // not SUPPORT_NONPOINTER_ISA
763 ASSERT(!isTaggedPointer());
768 objc_object::rawISA()
774 objc_object::hasNonpointerIsa()
781 objc_object::initIsa(Class cls
)
783 ASSERT(!isTaggedPointer());
784 isa
= (uintptr_t)cls
;
789 objc_object::initClassIsa(Class cls
)
796 objc_object::initProtocolIsa(Class cls
)
803 objc_object::initInstanceIsa(Class cls
, bool)
810 objc_object::initIsa(Class cls
, bool, bool)
817 objc_object::changeIsa(Class cls
)
819 // This is almost always rue but there are
820 // enough edge cases that we can't assert it.
821 // assert(cls->isFuture() ||
822 // cls->isInitializing() || cls->isInitialized());
824 ASSERT(!isTaggedPointer());
826 isa_t oldisa
, newisa
;
829 oldisa
= LoadExclusive(&isa
.bits
);
830 } while (!StoreExclusive(&isa
.bits
, oldisa
.bits
, newisa
.bits
));
832 if (oldisa
.cls
&& oldisa
.cls
->instancesHaveAssociatedObjects()) {
833 cls
->setInstancesHaveAssociatedObjects();
841 objc_object::hasAssociatedObjects()
843 return getIsa()->instancesHaveAssociatedObjects();
848 objc_object::setHasAssociatedObjects()
850 getIsa()->setInstancesHaveAssociatedObjects();
855 objc_object::isWeaklyReferenced()
857 ASSERT(!isTaggedPointer());
859 return sidetable_isWeaklyReferenced();
864 objc_object::setWeaklyReferenced_nolock()
866 ASSERT(!isTaggedPointer());
868 sidetable_setWeaklyReferenced_nolock();
873 objc_object::hasCxxDtor()
875 ASSERT(!isTaggedPointer());
876 return isa
.cls
->hasCxxDtor();
881 objc_object::rootIsDeallocating()
883 if (isTaggedPointer()) return false;
884 return sidetable_isDeallocating();
889 objc_object::clearDeallocating()
891 sidetable_clearDeallocating();
896 objc_object::rootDealloc()
898 if (isTaggedPointer()) return;
899 object_dispose((id
)this);
903 // Equivalent to calling [this retain], with shortcuts if there is no override
905 objc_object::retain()
907 ASSERT(!isTaggedPointer());
909 if (fastpath(!ISA()->hasCustomRR())) {
910 return sidetable_retain();
913 return ((id(*)(objc_object
*, SEL
))objc_msgSend
)(this, @
selector(retain
));
917 // Base retain implementation, ignoring overrides.
918 // This does not check isa.fast_rr; if there is an RR override then
919 // it was already called and it chose to call [super retain].
921 objc_object::rootRetain()
923 if (isTaggedPointer()) return (id
)this;
924 return sidetable_retain();
928 // Equivalent to calling [this release], with shortcuts if there is no override
930 objc_object::release()
932 ASSERT(!isTaggedPointer());
934 if (fastpath(!ISA()->hasCustomRR())) {
939 ((void(*)(objc_object
*, SEL
))objc_msgSend
)(this, @
selector(release
));
943 // Base release implementation, ignoring overrides.
944 // Does not call -dealloc.
945 // Returns true if the object should now be deallocated.
946 // This does not check isa.fast_rr; if there is an RR override then
947 // it was already called and it chose to call [super release].
949 objc_object::rootRelease()
951 if (isTaggedPointer()) return false;
952 return sidetable_release(true);
956 objc_object::rootReleaseShouldDealloc()
958 if (isTaggedPointer()) return false;
959 return sidetable_release(false);
963 // Equivalent to [this autorelease], with shortcuts if there is no override
965 objc_object::autorelease()
967 if (isTaggedPointer()) return (id
)this;
968 if (fastpath(!ISA()->hasCustomRR())) return rootAutorelease();
970 return ((id(*)(objc_object
*, SEL
))objc_msgSend
)(this, @
selector(autorelease
));
974 // Base autorelease implementation, ignoring overrides.
976 objc_object::rootAutorelease()
978 if (isTaggedPointer()) return (id
)this;
979 if (prepareOptimizedReturn(ReturnAtPlus1
)) return (id
)this;
981 return rootAutorelease2();
985 // Base tryRetain implementation, ignoring overrides.
986 // This does not check isa.fast_rr; if there is an RR override then
987 // it was already called and it chose to call [super _tryRetain].
989 objc_object::rootTryRetain()
991 if (isTaggedPointer()) return true;
992 return sidetable_tryRetain();
997 objc_object::rootRetainCount()
999 if (isTaggedPointer()) return (uintptr_t)this;
1000 return sidetable_retainCount();
1004 // not SUPPORT_NONPOINTER_ISA
1008 #if SUPPORT_RETURN_AUTORELEASE
1010 /***********************************************************************
1011 Fast handling of return through Cocoa's +0 autoreleasing convention.
1012 The caller and callee cooperate to keep the returned object
1013 out of the autorelease pool and eliminate redundant retain/release pairs.
1015 An optimized callee looks at the caller's instructions following the
1016 return. If the caller's instructions are also optimized then the callee
1017 skips all retain count operations: no autorelease, no retain/autorelease.
1018 Instead it saves the result's current retain count (+0 or +1) in
1019 thread-local storage. If the caller does not look optimized then
1020 the callee performs autorelease or retain/autorelease as usual.
1022 An optimized caller looks at the thread-local storage. If the result
1023 is set then it performs any retain or release needed to change the
1024 result from the retain count left by the callee to the retain count
1025 desired by the caller. Otherwise the caller assumes the result is
1026 currently at +0 from an unoptimized callee and performs any retain
1027 needed for that case.
1029 There are two optimized callees:
1030 objc_autoreleaseReturnValue
1031 result is currently +1. The unoptimized path autoreleases it.
1032 objc_retainAutoreleaseReturnValue
1033 result is currently +0. The unoptimized path retains and autoreleases it.
1035 There are two optimized callers:
1036 objc_retainAutoreleasedReturnValue
1037 caller wants the value at +1. The unoptimized path retains it.
1038 objc_unsafeClaimAutoreleasedReturnValue
1039 caller wants the value at +0 unsafely. The unoptimized path does nothing.
1044 // compute ret at +1
1045 return objc_autoreleaseReturnValue(ret);
1049 ret = objc_retainAutoreleasedReturnValue(ret);
1050 // use ret at +1 here
1052 Callee sees the optimized caller, sets TLS, and leaves the result at +1.
1053 Caller sees the TLS, clears it, and accepts the result at +1 as-is.
1055 The callee's recognition of the optimized caller is architecture-dependent.
1056 x86_64: Callee looks for `mov rax, rdi` followed by a call or
1057 jump instruction to objc_retainAutoreleasedReturnValue or
1058 objc_unsafeClaimAutoreleasedReturnValue.
1059 i386: Callee looks for a magic nop `movl %ebp, %ebp` (frame pointer register)
1060 armv7: Callee looks for a magic nop `mov r7, r7` (frame pointer register).
1061 arm64: Callee looks for a magic nop `mov x29, x29` (frame pointer register).
1063 Tagged pointer objects do participate in the optimized return scheme,
1064 because it saves message sends. They are not entered in the autorelease
1065 pool in the unoptimized case.
1066 **********************************************************************/
1070 static ALWAYS_INLINE
bool
1071 callerAcceptsOptimizedReturn(const void * const ra0
)
1073 const uint8_t *ra1
= (const uint8_t *)ra0
;
1074 const unaligned_uint16_t
*ra2
;
1075 const unaligned_uint32_t
*ra4
= (const unaligned_uint32_t
*)ra1
;
1078 #define PREFER_GOTPCREL 0
1080 // 48 89 c7 movq %rax,%rdi
1081 // ff 15 callq *symbol@GOTPCREL(%rip)
1082 if (*ra4
!= 0xffc78948) {
1085 if (ra1
[4] != 0x15) {
1090 // 48 89 c7 movq %rax,%rdi
1092 if (*ra4
!= 0xe8c78948) {
1095 ra1
+= (long)*(const unaligned_int32_t
*)(ra1
+ 4) + 8l;
1096 ra2
= (const unaligned_uint16_t
*)ra1
;
1097 // ff 25 jmpq *symbol@DYLDMAGIC(%rip)
1098 if (*ra2
!= 0x25ff) {
1102 ra1
+= 6l + (long)*(const unaligned_int32_t
*)(ra1
+ 2);
1103 sym
= (const void **)ra1
;
1104 if (*sym
!= objc_retainAutoreleasedReturnValue
&&
1105 *sym
!= objc_unsafeClaimAutoreleasedReturnValue
)
1116 static ALWAYS_INLINE
bool
1117 callerAcceptsOptimizedReturn(const void *ra
)
1119 // if the low bit is set, we're returning to thumb mode
1120 if ((uintptr_t)ra
& 1) {
1122 // we mask off the low bit via subtraction
1123 // 16-bit instructions are well-aligned
1124 if (*(uint16_t *)((uint8_t *)ra
- 1) == 0x463f) {
1128 // 07 70 a0 e1 mov r7, r7
1129 // 32-bit instructions may be only 16-bit aligned
1130 if (*(unaligned_uint32_t
*)ra
== 0xe1a07007) {
1140 static ALWAYS_INLINE
bool
1141 callerAcceptsOptimizedReturn(const void *ra
)
1143 // fd 03 1d aa mov fp, fp
1144 // arm64 instructions are well-aligned
1145 if (*(uint32_t *)ra
== 0xaa1d03fd) {
1154 static ALWAYS_INLINE
bool
1155 callerAcceptsOptimizedReturn(const void *ra
)
1157 // 89 ed movl %ebp, %ebp
1158 if (*(unaligned_uint16_t
*)ra
== 0xed89) {
1167 #warning unknown architecture
1169 static ALWAYS_INLINE
bool
1170 callerAcceptsOptimizedReturn(const void *ra
)
1175 // unknown architecture
1179 static ALWAYS_INLINE ReturnDisposition
1180 getReturnDisposition()
1182 return (ReturnDisposition
)(uintptr_t)tls_get_direct(RETURN_DISPOSITION_KEY
);
1186 static ALWAYS_INLINE
void
1187 setReturnDisposition(ReturnDisposition disposition
)
1189 tls_set_direct(RETURN_DISPOSITION_KEY
, (void*)(uintptr_t)disposition
);
1193 // Try to prepare for optimized return with the given disposition (+0 or +1).
1194 // Returns true if the optimized path is successful.
1195 // Otherwise the return value must be retained and/or autoreleased as usual.
1196 static ALWAYS_INLINE
bool
1197 prepareOptimizedReturn(ReturnDisposition disposition
)
1199 ASSERT(getReturnDisposition() == ReturnAtPlus0
);
1201 if (callerAcceptsOptimizedReturn(__builtin_return_address(0))) {
1202 if (disposition
) setReturnDisposition(disposition
);
1210 // Try to accept an optimized return.
1211 // Returns the disposition of the returned object (+0 or +1).
1212 // An un-optimized return is +0.
1213 static ALWAYS_INLINE ReturnDisposition
1214 acceptOptimizedReturn()
1216 ReturnDisposition disposition
= getReturnDisposition();
1217 setReturnDisposition(ReturnAtPlus0
); // reset to the unoptimized state
1222 // SUPPORT_RETURN_AUTORELEASE
1224 // not SUPPORT_RETURN_AUTORELEASE
1227 static ALWAYS_INLINE
bool
1228 prepareOptimizedReturn(ReturnDisposition disposition __unused
)
1234 static ALWAYS_INLINE ReturnDisposition
1235 acceptOptimizedReturn()
1237 return ReturnAtPlus0
;
1241 // not SUPPORT_RETURN_AUTORELEASE