2 * Copyright (c) 2010-2012 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
25 /***********************************************************************
26 * Inlineable parts of NSObject / objc_object implementation
27 **********************************************************************/
29 #ifndef _OBJC_OBJCOBJECT_H_
30 #define _OBJC_OBJCOBJECT_H_
32 #include "objc-private.h"
35 enum ReturnDisposition
: bool {
36 ReturnAtPlus0
= false, ReturnAtPlus1
= true
40 bool prepareOptimizedReturn(ReturnDisposition disposition
);
43 #if SUPPORT_TAGGED_POINTERS
46 extern Class objc_debug_taggedpointer_classes
[_OBJC_TAG_SLOT_COUNT
];
47 extern Class objc_debug_taggedpointer_ext_classes
[_OBJC_TAG_EXT_SLOT_COUNT
];
49 #define objc_tag_classes objc_debug_taggedpointer_classes
50 #define objc_tag_ext_classes objc_debug_taggedpointer_ext_classes
54 #if SUPPORT_INDEXED_ISA
57 classForIndex(uintptr_t index
) {
59 ASSERT(index
< (uintptr_t)objc_indexed_classes_count
);
60 return objc_indexed_classes
[index
];
67 objc_object::isClass()
69 if (isTaggedPointer()) return false;
70 return ISA()->isMetaClass();
74 #if SUPPORT_TAGGED_POINTERS
79 if (fastpath(!isTaggedPointer())) return ISA();
81 extern objc_class OBJC_CLASS_$___NSUnrecognizedTaggedPointer
;
82 uintptr_t slot
, ptr
= (uintptr_t)this;
85 slot
= (ptr
>> _OBJC_TAG_SLOT_SHIFT
) & _OBJC_TAG_SLOT_MASK
;
86 cls
= objc_tag_classes
[slot
];
87 if (slowpath(cls
== (Class
)&OBJC_CLASS_$___NSUnrecognizedTaggedPointer
)) {
88 slot
= (ptr
>> _OBJC_TAG_EXT_SLOT_SHIFT
) & _OBJC_TAG_EXT_SLOT_MASK
;
89 cls
= objc_tag_ext_classes
[slot
];
95 objc_object::isaBits() const
101 objc_object::isTaggedPointer()
103 return _objc_isTaggedPointer(this);
107 objc_object::isTaggedPointerOrNil()
109 return _objc_isTaggedPointerOrNil(this);
113 objc_object::isBasicTaggedPointer()
115 return isTaggedPointer() && !isExtTaggedPointer();
119 objc_object::isExtTaggedPointer()
121 uintptr_t ptr
= _objc_decodeTaggedPointer(this);
122 return (ptr
& _OBJC_TAG_EXT_MASK
) == _OBJC_TAG_EXT_MASK
;
126 // SUPPORT_TAGGED_POINTERS
128 // not SUPPORT_TAGGED_POINTERS
131 objc_object::getIsa()
137 objc_object::isaBits() const
144 objc_object::isTaggedPointer()
150 objc_object::isTaggedPointerOrNil()
156 objc_object::isBasicTaggedPointer()
162 objc_object::isExtTaggedPointer()
168 // not SUPPORT_TAGGED_POINTERS
172 #if SUPPORT_NONPOINTER_ISA
174 // Set the class field in an isa. Takes both the class to set and
175 // a pointer to the object where the isa will ultimately be used.
176 // This is necessary to get the pointer signing right.
178 // Note: this method does not support setting an indexed isa. When
179 // indexed isas are in use, it can only be used to set the class of a
182 isa_t::setClass(Class newCls
, UNUSED_WITHOUT_PTRAUTH objc_object
*obj
)
184 // Match the conditional in isa.h.
185 #if __has_feature(ptrauth_calls) || TARGET_OS_SIMULATOR
186 # if ISA_SIGNING_SIGN_MODE == ISA_SIGNING_SIGN_NONE
187 // No signing, just use the raw pointer.
188 uintptr_t signedCls
= (uintptr_t)newCls
;
190 # elif ISA_SIGNING_SIGN_MODE == ISA_SIGNING_SIGN_ONLY_SWIFT
191 // We're only signing Swift classes. Non-Swift classes just use
193 uintptr_t signedCls
= (uintptr_t)newCls
;
194 if (newCls
->isSwiftStable())
195 signedCls
= (uintptr_t)ptrauth_sign_unauthenticated((void *)newCls
, ISA_SIGNING_KEY
, ptrauth_blend_discriminator(obj
, ISA_SIGNING_DISCRIMINATOR
));
197 # elif ISA_SIGNING_SIGN_MODE == ISA_SIGNING_SIGN_ALL
198 // We're signing everything
199 uintptr_t signedCls
= (uintptr_t)ptrauth_sign_unauthenticated((void *)newCls
, ISA_SIGNING_KEY
, ptrauth_blend_discriminator(obj
, ISA_SIGNING_DISCRIMINATOR
));
202 # error Unknown isa signing mode.
205 shiftcls_and_sig
= signedCls
>> 3;
207 #elif SUPPORT_INDEXED_ISA
208 // Indexed isa only uses this method to set a raw pointer class.
209 // Setting an indexed class is handled separately.
212 #else // Nonpointer isa, no ptrauth
213 shiftcls
= (uintptr_t)newCls
>> 3;
217 // Get the class pointer out of an isa. When ptrauth is supported,
218 // this operation is optionally authenticated. Many code paths don't
219 // need the authentication, so it can be skipped in those cases for
220 // better performance.
222 // Note: this method does not support retrieving indexed isas. When
223 // indexed isas are in use, it can only be used to retrieve the class
225 #if SUPPORT_INDEXED_ISA || (ISA_SIGNING_AUTH_MODE != ISA_SIGNING_AUTH)
226 #define MAYBE_UNUSED_AUTHENTICATED_PARAM __attribute__((unused))
228 #define MAYBE_UNUSED_AUTHENTICATED_PARAM UNUSED_WITHOUT_PTRAUTH
232 isa_t::getClass(MAYBE_UNUSED_AUTHENTICATED_PARAM
bool authenticated
) {
233 #if SUPPORT_INDEXED_ISA
237 uintptr_t clsbits
= bits
;
239 # if __has_feature(ptrauth_calls)
240 # if ISA_SIGNING_AUTH_MODE == ISA_SIGNING_AUTH
241 // Most callers aren't security critical, so skip the
242 // authentication unless they ask for it. Message sending and
243 // cache filling are protected by the auth code in msgSend.
245 // Mask off all bits besides the class pointer and signature.
249 clsbits
= (uintptr_t)ptrauth_auth_data((void *)clsbits
, ISA_SIGNING_KEY
, ptrauth_blend_discriminator(this, ISA_SIGNING_DISCRIMINATOR
));
251 // If not authenticating, strip using the precomputed class mask.
252 clsbits
&= objc_debug_isa_class_mask
;
255 // If not authenticating, strip using the precomputed class mask.
256 clsbits
&= objc_debug_isa_class_mask
;
263 return (Class
)clsbits
;
268 isa_t::getDecodedClass(bool authenticated
) {
269 #if SUPPORT_INDEXED_ISA
271 return classForIndex(indexcls
);
275 return getClass(authenticated
);
280 objc_object::ISA(bool authenticated
)
282 ASSERT(!isTaggedPointer());
283 return isa
.getDecodedClass(authenticated
);
287 objc_object::rawISA()
289 ASSERT(!isTaggedPointer() && !isa
.nonpointer
);
290 return (Class
)isa
.bits
;
294 objc_object::hasNonpointerIsa()
296 return isa
.nonpointer
;
301 objc_object::initIsa(Class cls
)
303 initIsa(cls
, false, false);
307 objc_object::initClassIsa(Class cls
)
309 if (DisableNonpointerIsa
|| cls
->instancesRequireRawIsa()) {
310 initIsa(cls
, false/*not nonpointer*/, false);
312 initIsa(cls
, true/*nonpointer*/, false);
317 objc_object::initProtocolIsa(Class cls
)
319 return initClassIsa(cls
);
323 objc_object::initInstanceIsa(Class cls
, bool hasCxxDtor
)
325 ASSERT(!cls
->instancesRequireRawIsa());
326 ASSERT(hasCxxDtor
== cls
->hasCxxDtor());
328 initIsa(cls
, true, hasCxxDtor
);
331 #if !SUPPORT_INDEXED_ISA && !ISA_HAS_CXX_DTOR_BIT
332 #define UNUSED_WITHOUT_INDEXED_ISA_AND_DTOR_BIT __attribute__((unused))
334 #define UNUSED_WITHOUT_INDEXED_ISA_AND_DTOR_BIT
338 objc_object::initIsa(Class cls
, bool nonpointer
, UNUSED_WITHOUT_INDEXED_ISA_AND_DTOR_BIT
bool hasCxxDtor
)
340 ASSERT(!isTaggedPointer());
345 newisa
.setClass(cls
, this);
347 ASSERT(!DisableNonpointerIsa
);
348 ASSERT(!cls
->instancesRequireRawIsa());
351 #if SUPPORT_INDEXED_ISA
352 ASSERT(cls
->classArrayIndex() > 0);
353 newisa
.bits
= ISA_INDEX_MAGIC_VALUE
;
354 // isa.magic is part of ISA_MAGIC_VALUE
355 // isa.nonpointer is part of ISA_MAGIC_VALUE
356 newisa
.has_cxx_dtor
= hasCxxDtor
;
357 newisa
.indexcls
= (uintptr_t)cls
->classArrayIndex();
359 newisa
.bits
= ISA_MAGIC_VALUE
;
360 // isa.magic is part of ISA_MAGIC_VALUE
361 // isa.nonpointer is part of ISA_MAGIC_VALUE
362 # if ISA_HAS_CXX_DTOR_BIT
363 newisa
.has_cxx_dtor
= hasCxxDtor
;
365 newisa
.setClass(cls
, this);
370 // This write must be performed in a single store in some cases
371 // (for example when realizing a class because other threads
372 // may simultaneously try to use the class).
373 // fixme use atomics here to guarantee single-store and to
374 // guarantee memory order w.r.t. the class index table
375 // ...but not too atomic because we don't want to hurt instantiation
381 objc_object::changeIsa(Class newCls
)
383 // This is almost always true but there are
384 // enough edge cases that we can't assert it.
385 // assert(newCls->isFuture() ||
386 // newCls->isInitializing() || newCls->isInitialized());
388 ASSERT(!isTaggedPointer());
393 bool sideTableLocked
= false;
394 bool transcribeToSideTable
= false;
396 oldisa
= LoadExclusive(&isa
.bits
);
399 transcribeToSideTable
= false;
400 if ((oldisa
.bits
== 0 || oldisa
.nonpointer
) &&
401 !newCls
->isFuture() && newCls
->canAllocNonpointer())
404 // nonpointer -> nonpointer
405 #if SUPPORT_INDEXED_ISA
406 if (oldisa
.bits
== 0) {
407 newisa
.bits
= ISA_INDEX_MAGIC_VALUE
;
412 // isa.magic is part of ISA_MAGIC_VALUE
413 // isa.nonpointer is part of ISA_MAGIC_VALUE
414 newisa
.has_cxx_dtor
= newCls
->hasCxxDtor();
415 ASSERT(newCls
->classArrayIndex() > 0);
416 newisa
.indexcls
= (uintptr_t)newCls
->classArrayIndex();
418 if (oldisa
.bits
== 0) {
419 newisa
.bits
= ISA_MAGIC_VALUE
;
425 // isa.magic is part of ISA_MAGIC_VALUE
426 // isa.nonpointer is part of ISA_MAGIC_VALUE
427 # if ISA_HAS_CXX_DTOR_BIT
428 newisa
.has_cxx_dtor
= newCls
->hasCxxDtor();
430 newisa
.setClass(newCls
, this);
433 else if (oldisa
.nonpointer
) {
434 // nonpointer -> raw pointer
435 // Need to copy retain count et al to side table.
436 // Acquire side table lock before setting isa to
437 // prevent races such as concurrent -release.
438 if (!sideTableLocked
) sidetable_lock();
439 sideTableLocked
= true;
440 transcribeToSideTable
= true;
441 newisa
.setClass(newCls
, this);
444 // raw pointer -> raw pointer
445 newisa
.setClass(newCls
, this);
447 } while (slowpath(!StoreExclusive(&isa
.bits
, &oldisa
.bits
, newisa
.bits
)));
449 if (transcribeToSideTable
) {
450 // Copy oldisa's retain count et al to side table.
451 // oldisa.has_assoc: nothing to do
452 // oldisa.has_cxx_dtor: nothing to do
453 sidetable_moveExtraRC_nolock(oldisa
.extra_rc
,
454 oldisa
.isDeallocating(),
455 oldisa
.weakly_referenced
);
458 if (sideTableLocked
) sidetable_unlock();
460 return oldisa
.getDecodedClass(false);
464 objc_object::hasAssociatedObjects()
466 if (isTaggedPointer()) return true;
467 if (isa
.nonpointer
) return isa
.has_assoc
;
473 objc_object::setHasAssociatedObjects()
475 if (isTaggedPointer()) return;
477 if (slowpath(!hasNonpointerIsa() && ISA()->hasCustomRR()) && !ISA()->isFuture() && !ISA()->isMetaClass()) {
478 void(*setAssoc
)(id
, SEL
) = (void(*)(id
, SEL
)) object_getMethodImplementation((id
)this, @
selector(_noteAssociatedObjects
));
479 if ((IMP
)setAssoc
!= _objc_msgForward
) {
480 (*setAssoc
)((id
)this, @
selector(_noteAssociatedObjects
));
484 isa_t newisa
, oldisa
= LoadExclusive(&isa
.bits
);
487 if (!newisa
.nonpointer
|| newisa
.has_assoc
) {
488 ClearExclusive(&isa
.bits
);
491 newisa
.has_assoc
= true;
492 } while (slowpath(!StoreExclusive(&isa
.bits
, &oldisa
.bits
, newisa
.bits
)));
497 objc_object::isWeaklyReferenced()
499 ASSERT(!isTaggedPointer());
500 if (isa
.nonpointer
) return isa
.weakly_referenced
;
501 else return sidetable_isWeaklyReferenced();
506 objc_object::setWeaklyReferenced_nolock()
508 isa_t newisa
, oldisa
= LoadExclusive(&isa
.bits
);
511 if (slowpath(!newisa
.nonpointer
)) {
512 ClearExclusive(&isa
.bits
);
513 sidetable_setWeaklyReferenced_nolock();
516 if (newisa
.weakly_referenced
) {
517 ClearExclusive(&isa
.bits
);
520 newisa
.weakly_referenced
= true;
521 } while (slowpath(!StoreExclusive(&isa
.bits
, &oldisa
.bits
, newisa
.bits
)));
526 objc_object::hasCxxDtor()
528 ASSERT(!isTaggedPointer());
529 #if ISA_HAS_CXX_DTOR_BIT
531 return isa
.has_cxx_dtor
;
534 return ISA()->hasCxxDtor();
540 objc_object::rootIsDeallocating()
542 if (isTaggedPointer()) return false;
543 if (isa
.nonpointer
) return isa
.isDeallocating();
544 return sidetable_isDeallocating();
549 objc_object::clearDeallocating()
551 if (slowpath(!isa
.nonpointer
)) {
552 // Slow path for raw pointer isa.
553 sidetable_clearDeallocating();
555 else if (slowpath(isa
.weakly_referenced
|| isa
.has_sidetable_rc
)) {
556 // Slow path for non-pointer isa with weak refs and/or side table data.
557 clearDeallocating_slow();
560 assert(!sidetable_present());
565 objc_object::rootDealloc()
567 if (isTaggedPointer()) return; // fixme necessary?
569 if (fastpath(isa
.nonpointer
&&
570 !isa
.weakly_referenced
&&
572 #if ISA_HAS_CXX_DTOR_BIT
575 !isa
.getClass(false)->hasCxxDtor() &&
577 !isa
.has_sidetable_rc
))
579 assert(!sidetable_present());
583 object_dispose((id
)this);
587 extern explicit_atomic
<id(*)(id
)> swiftRetain
;
588 extern explicit_atomic
<void(*)(id
)> swiftRelease
;
590 // Equivalent to calling [this retain], with shortcuts if there is no override
592 objc_object::retain()
594 ASSERT(!isTaggedPointer());
596 return rootRetain(false, RRVariant::FastOrMsgSend
);
599 // Base retain implementation, ignoring overrides.
600 // This does not check isa.fast_rr; if there is an RR override then
601 // it was already called and it chose to call [super retain].
603 // tryRetain=true is the -_tryRetain path.
604 // handleOverflow=false is the frameless fast path.
605 // handleOverflow=true is the framed slow path including overflow to side table
606 // The code is structured this way to prevent duplication.
609 objc_object::rootRetain()
611 return rootRetain(false, RRVariant::Fast
);
615 objc_object::rootTryRetain()
617 return rootRetain(true, RRVariant::Fast
) ? true : false;
621 objc_object::rootRetain(bool tryRetain
, objc_object::RRVariant variant
)
623 if (slowpath(isTaggedPointer())) return (id
)this;
625 bool sideTableLocked
= false;
626 bool transcribeToSideTable
= false;
631 oldisa
= LoadExclusive(&isa
.bits
);
633 if (variant
== RRVariant::FastOrMsgSend
) {
634 // These checks are only meaningful for objc_retain()
635 // They are here so that we avoid a re-load of the isa.
636 if (slowpath(oldisa
.getDecodedClass(false)->hasCustomRR())) {
637 ClearExclusive(&isa
.bits
);
638 if (oldisa
.getDecodedClass(false)->canCallSwiftRR()) {
639 return swiftRetain
.load(memory_order_relaxed
)((id
)this);
641 return ((id(*)(objc_object
*, SEL
))objc_msgSend
)(this, @
selector(retain
));
645 if (slowpath(!oldisa
.nonpointer
)) {
646 // a Class is a Class forever, so we can perform this check once
647 // outside of the CAS loop
648 if (oldisa
.getDecodedClass(false)->isMetaClass()) {
649 ClearExclusive(&isa
.bits
);
655 transcribeToSideTable
= false;
657 if (slowpath(!newisa
.nonpointer
)) {
658 ClearExclusive(&isa
.bits
);
659 if (tryRetain
) return sidetable_tryRetain() ? (id
)this : nil
;
660 else return sidetable_retain(sideTableLocked
);
662 // don't check newisa.fast_rr; we already called any RR overrides
663 if (slowpath(newisa
.isDeallocating())) {
664 ClearExclusive(&isa
.bits
);
665 if (sideTableLocked
) {
666 ASSERT(variant
== RRVariant::Full
);
669 if (slowpath(tryRetain
)) {
676 newisa
.bits
= addc(newisa
.bits
, RC_ONE
, 0, &carry
); // extra_rc++
678 if (slowpath(carry
)) {
679 // newisa.extra_rc++ overflowed
680 if (variant
!= RRVariant::Full
) {
681 ClearExclusive(&isa
.bits
);
682 return rootRetain_overflow(tryRetain
);
684 // Leave half of the retain counts inline and
685 // prepare to copy the other half to the side table.
686 if (!tryRetain
&& !sideTableLocked
) sidetable_lock();
687 sideTableLocked
= true;
688 transcribeToSideTable
= true;
689 newisa
.extra_rc
= RC_HALF
;
690 newisa
.has_sidetable_rc
= true;
692 } while (slowpath(!StoreExclusive(&isa
.bits
, &oldisa
.bits
, newisa
.bits
)));
694 if (variant
== RRVariant::Full
) {
695 if (slowpath(transcribeToSideTable
)) {
696 // Copy the other half of the retain counts to the side table.
697 sidetable_addExtraRC_nolock(RC_HALF
);
700 if (slowpath(!tryRetain
&& sideTableLocked
)) sidetable_unlock();
702 ASSERT(!transcribeToSideTable
);
703 ASSERT(!sideTableLocked
);
710 // Equivalent to calling [this release], with shortcuts if there is no override
712 objc_object::release()
714 ASSERT(!isTaggedPointer());
716 rootRelease(true, RRVariant::FastOrMsgSend
);
720 // Base release implementation, ignoring overrides.
721 // Does not call -dealloc.
722 // Returns true if the object should now be deallocated.
723 // This does not check isa.fast_rr; if there is an RR override then
724 // it was already called and it chose to call [super release].
726 // handleUnderflow=false is the frameless fast path.
727 // handleUnderflow=true is the framed slow path including side table borrow
728 // The code is structured this way to prevent duplication.
731 objc_object::rootRelease()
733 return rootRelease(true, RRVariant::Fast
);
737 objc_object::rootReleaseShouldDealloc()
739 return rootRelease(false, RRVariant::Fast
);
743 objc_object::rootRelease(bool performDealloc
, objc_object::RRVariant variant
)
745 if (slowpath(isTaggedPointer())) return false;
747 bool sideTableLocked
= false;
749 isa_t newisa
, oldisa
;
751 oldisa
= LoadExclusive(&isa
.bits
);
753 if (variant
== RRVariant::FastOrMsgSend
) {
754 // These checks are only meaningful for objc_release()
755 // They are here so that we avoid a re-load of the isa.
756 if (slowpath(oldisa
.getDecodedClass(false)->hasCustomRR())) {
757 ClearExclusive(&isa
.bits
);
758 if (oldisa
.getDecodedClass(false)->canCallSwiftRR()) {
759 swiftRelease
.load(memory_order_relaxed
)((id
)this);
762 ((void(*)(objc_object
*, SEL
))objc_msgSend
)(this, @
selector(release
));
767 if (slowpath(!oldisa
.nonpointer
)) {
768 // a Class is a Class forever, so we can perform this check once
769 // outside of the CAS loop
770 if (oldisa
.getDecodedClass(false)->isMetaClass()) {
771 ClearExclusive(&isa
.bits
);
779 if (slowpath(!newisa
.nonpointer
)) {
780 ClearExclusive(&isa
.bits
);
781 return sidetable_release(sideTableLocked
, performDealloc
);
783 if (slowpath(newisa
.isDeallocating())) {
784 ClearExclusive(&isa
.bits
);
785 if (sideTableLocked
) {
786 ASSERT(variant
== RRVariant::Full
);
792 // don't check newisa.fast_rr; we already called any RR overrides
794 newisa
.bits
= subc(newisa
.bits
, RC_ONE
, 0, &carry
); // extra_rc--
795 if (slowpath(carry
)) {
796 // don't ClearExclusive()
799 } while (slowpath(!StoreReleaseExclusive(&isa
.bits
, &oldisa
.bits
, newisa
.bits
)));
801 if (slowpath(newisa
.isDeallocating()))
804 if (variant
== RRVariant::Full
) {
805 if (slowpath(sideTableLocked
)) sidetable_unlock();
807 ASSERT(!sideTableLocked
);
812 // newisa.extra_rc-- underflowed: borrow from side table or deallocate
814 // abandon newisa to undo the decrement
817 if (slowpath(newisa
.has_sidetable_rc
)) {
818 if (variant
!= RRVariant::Full
) {
819 ClearExclusive(&isa
.bits
);
820 return rootRelease_underflow(performDealloc
);
823 // Transfer retain count from side table to inline storage.
825 if (!sideTableLocked
) {
826 ClearExclusive(&isa
.bits
);
828 sideTableLocked
= true;
829 // Need to start over to avoid a race against
830 // the nonpointer -> raw pointer transition.
831 oldisa
= LoadExclusive(&isa
.bits
);
835 // Try to remove some retain counts from the side table.
836 auto borrow
= sidetable_subExtraRC_nolock(RC_HALF
);
838 bool emptySideTable
= borrow
.remaining
== 0; // we'll clear the side table if no refcounts remain there
840 if (borrow
.borrowed
> 0) {
841 // Side table retain count decreased.
842 // Try to add them to the inline count.
843 bool didTransitionToDeallocating
= false;
844 newisa
.extra_rc
= borrow
.borrowed
- 1; // redo the original decrement too
845 newisa
.has_sidetable_rc
= !emptySideTable
;
847 bool stored
= StoreReleaseExclusive(&isa
.bits
, &oldisa
.bits
, newisa
.bits
);
849 if (!stored
&& oldisa
.nonpointer
) {
850 // Inline update failed.
851 // Try it again right now. This prevents livelock on LL/SC
852 // architectures where the side table access itself may have
853 // dropped the reservation.
856 addc(oldisa
.bits
, RC_ONE
* (borrow
.borrowed
-1), 0, &overflow
);
857 newisa
.has_sidetable_rc
= !emptySideTable
;
859 stored
= StoreReleaseExclusive(&isa
.bits
, &oldisa
.bits
, newisa
.bits
);
861 didTransitionToDeallocating
= newisa
.isDeallocating();
867 // Inline update failed.
868 // Put the retains back in the side table.
869 ClearExclusive(&isa
.bits
);
870 sidetable_addExtraRC_nolock(borrow
.borrowed
);
871 oldisa
= LoadExclusive(&isa
.bits
);
875 // Decrement successful after borrowing from side table.
877 sidetable_clearExtraRC_nolock();
879 if (!didTransitionToDeallocating
) {
880 if (slowpath(sideTableLocked
)) sidetable_unlock();
885 // Side table is empty after all. Fall-through to the dealloc path.
890 // Really deallocate.
892 ASSERT(newisa
.isDeallocating());
893 ASSERT(isa
.isDeallocating());
895 if (slowpath(sideTableLocked
)) sidetable_unlock();
897 __c11_atomic_thread_fence(__ATOMIC_ACQUIRE
);
899 if (performDealloc
) {
900 ((void(*)(objc_object
*, SEL
))objc_msgSend
)(this, @
selector(dealloc
));
906 // Equivalent to [this autorelease], with shortcuts if there is no override
908 objc_object::autorelease()
910 ASSERT(!isTaggedPointer());
911 if (fastpath(!ISA()->hasCustomRR())) {
912 return rootAutorelease();
915 return ((id(*)(objc_object
*, SEL
))objc_msgSend
)(this, @
selector(autorelease
));
919 // Base autorelease implementation, ignoring overrides.
921 objc_object::rootAutorelease()
923 if (isTaggedPointer()) return (id
)this;
924 if (prepareOptimizedReturn(ReturnAtPlus1
)) return (id
)this;
926 return rootAutorelease2();
931 objc_object::rootRetainCount()
933 if (isTaggedPointer()) return (uintptr_t)this;
936 isa_t bits
= __c11_atomic_load((_Atomic
uintptr_t *)&isa
.bits
, __ATOMIC_RELAXED
);
937 if (bits
.nonpointer
) {
938 uintptr_t rc
= bits
.extra_rc
;
939 if (bits
.has_sidetable_rc
) {
940 rc
+= sidetable_getExtraRC_nolock();
947 return sidetable_retainCount();
951 // SUPPORT_NONPOINTER_ISA
953 // not SUPPORT_NONPOINTER_ISA
956 isa_t::setClass(Class cls
, objc_object
*obj
)
962 isa_t::getClass(bool authenticated __unused
)
968 isa_t::getDecodedClass(bool authenticated
)
970 return getClass(authenticated
);
974 objc_object::ISA(bool authenticated __unused
)
976 ASSERT(!isTaggedPointer());
977 return isa
.getClass(/*authenticated*/false);
981 objc_object::rawISA()
987 objc_object::hasNonpointerIsa()
994 objc_object::initIsa(Class cls
)
996 ASSERT(!isTaggedPointer());
997 isa
.setClass(cls
, this);
1002 objc_object::initClassIsa(Class cls
)
1009 objc_object::initProtocolIsa(Class cls
)
1016 objc_object::initInstanceIsa(Class cls
, bool)
1023 objc_object::initIsa(Class cls
, bool, bool)
1030 objc_object::changeIsa(Class cls
)
1032 // This is almost always rue but there are
1033 // enough edge cases that we can't assert it.
1034 // assert(cls->isFuture() ||
1035 // cls->isInitializing() || cls->isInitialized());
1037 ASSERT(!isTaggedPointer());
1039 isa_t newisa
, oldisa
;
1040 newisa
.setClass(cls
, this);
1041 oldisa
.bits
= __c11_atomic_exchange((_Atomic
uintptr_t *)&isa
.bits
, newisa
.bits
, __ATOMIC_RELAXED
);
1043 Class oldcls
= oldisa
.getDecodedClass(/*authenticated*/false);
1044 if (oldcls
&& oldcls
->instancesHaveAssociatedObjects()) {
1045 cls
->setInstancesHaveAssociatedObjects();
1053 objc_object::hasAssociatedObjects()
1055 return getIsa()->instancesHaveAssociatedObjects();
1060 objc_object::setHasAssociatedObjects()
1062 getIsa()->setInstancesHaveAssociatedObjects();
1067 objc_object::isWeaklyReferenced()
1069 ASSERT(!isTaggedPointer());
1071 return sidetable_isWeaklyReferenced();
1076 objc_object::setWeaklyReferenced_nolock()
1078 ASSERT(!isTaggedPointer());
1080 sidetable_setWeaklyReferenced_nolock();
1085 objc_object::hasCxxDtor()
1087 ASSERT(!isTaggedPointer());
1088 return isa
.getClass(/*authenticated*/false)->hasCxxDtor();
1093 objc_object::rootIsDeallocating()
1095 if (isTaggedPointer()) return false;
1096 return sidetable_isDeallocating();
1101 objc_object::clearDeallocating()
1103 sidetable_clearDeallocating();
1108 objc_object::rootDealloc()
1110 if (isTaggedPointer()) return;
1111 object_dispose((id
)this);
1115 // Equivalent to calling [this retain], with shortcuts if there is no override
1117 objc_object::retain()
1119 ASSERT(!isTaggedPointer());
1121 if (fastpath(!ISA()->hasCustomRR())) {
1122 return sidetable_retain();
1125 return ((id(*)(objc_object
*, SEL
))objc_msgSend
)(this, @
selector(retain
));
1129 // Base retain implementation, ignoring overrides.
1130 // This does not check isa.fast_rr; if there is an RR override then
1131 // it was already called and it chose to call [super retain].
1133 objc_object::rootRetain()
1135 if (isTaggedPointer()) return (id
)this;
1136 return sidetable_retain();
1140 // Equivalent to calling [this release], with shortcuts if there is no override
1142 objc_object::release()
1144 ASSERT(!isTaggedPointer());
1146 if (fastpath(!ISA()->hasCustomRR())) {
1147 sidetable_release();
1151 ((void(*)(objc_object
*, SEL
))objc_msgSend
)(this, @
selector(release
));
1155 // Base release implementation, ignoring overrides.
1156 // Does not call -dealloc.
1157 // Returns true if the object should now be deallocated.
1158 // This does not check isa.fast_rr; if there is an RR override then
1159 // it was already called and it chose to call [super release].
1161 objc_object::rootRelease()
1163 if (isTaggedPointer()) return false;
1164 return sidetable_release();
1168 objc_object::rootReleaseShouldDealloc()
1170 if (isTaggedPointer()) return false;
1171 return sidetable_release(/*locked*/false, /*performDealloc*/false);
1175 // Equivalent to [this autorelease], with shortcuts if there is no override
1177 objc_object::autorelease()
1179 if (isTaggedPointer()) return (id
)this;
1180 if (fastpath(!ISA()->hasCustomRR())) return rootAutorelease();
1182 return ((id(*)(objc_object
*, SEL
))objc_msgSend
)(this, @
selector(autorelease
));
1186 // Base autorelease implementation, ignoring overrides.
1188 objc_object::rootAutorelease()
1190 if (isTaggedPointer()) return (id
)this;
1191 if (prepareOptimizedReturn(ReturnAtPlus1
)) return (id
)this;
1193 return rootAutorelease2();
1197 // Base tryRetain implementation, ignoring overrides.
1198 // This does not check isa.fast_rr; if there is an RR override then
1199 // it was already called and it chose to call [super _tryRetain].
1201 objc_object::rootTryRetain()
1203 if (isTaggedPointer()) return true;
1204 return sidetable_tryRetain();
1209 objc_object::rootRetainCount()
1211 if (isTaggedPointer()) return (uintptr_t)this;
1212 return sidetable_retainCount();
1216 // not SUPPORT_NONPOINTER_ISA
1220 #if SUPPORT_RETURN_AUTORELEASE
1222 /***********************************************************************
1223 Fast handling of return through Cocoa's +0 autoreleasing convention.
1224 The caller and callee cooperate to keep the returned object
1225 out of the autorelease pool and eliminate redundant retain/release pairs.
1227 An optimized callee looks at the caller's instructions following the
1228 return. If the caller's instructions are also optimized then the callee
1229 skips all retain count operations: no autorelease, no retain/autorelease.
1230 Instead it saves the result's current retain count (+0 or +1) in
1231 thread-local storage. If the caller does not look optimized then
1232 the callee performs autorelease or retain/autorelease as usual.
1234 An optimized caller looks at the thread-local storage. If the result
1235 is set then it performs any retain or release needed to change the
1236 result from the retain count left by the callee to the retain count
1237 desired by the caller. Otherwise the caller assumes the result is
1238 currently at +0 from an unoptimized callee and performs any retain
1239 needed for that case.
1241 There are two optimized callees:
1242 objc_autoreleaseReturnValue
1243 result is currently +1. The unoptimized path autoreleases it.
1244 objc_retainAutoreleaseReturnValue
1245 result is currently +0. The unoptimized path retains and autoreleases it.
1247 There are two optimized callers:
1248 objc_retainAutoreleasedReturnValue
1249 caller wants the value at +1. The unoptimized path retains it.
1250 objc_unsafeClaimAutoreleasedReturnValue
1251 caller wants the value at +0 unsafely. The unoptimized path does nothing.
1256 // compute ret at +1
1257 return objc_autoreleaseReturnValue(ret);
1261 ret = objc_retainAutoreleasedReturnValue(ret);
1262 // use ret at +1 here
1264 Callee sees the optimized caller, sets TLS, and leaves the result at +1.
1265 Caller sees the TLS, clears it, and accepts the result at +1 as-is.
1267 The callee's recognition of the optimized caller is architecture-dependent.
1268 x86_64: Callee looks for `mov rax, rdi` followed by a call or
1269 jump instruction to objc_retainAutoreleasedReturnValue or
1270 objc_unsafeClaimAutoreleasedReturnValue.
1271 i386: Callee looks for a magic nop `movl %ebp, %ebp` (frame pointer register)
1272 armv7: Callee looks for a magic nop `mov r7, r7` (frame pointer register).
1273 arm64: Callee looks for a magic nop `mov x29, x29` (frame pointer register).
1275 Tagged pointer objects do participate in the optimized return scheme,
1276 because it saves message sends. They are not entered in the autorelease
1277 pool in the unoptimized case.
1278 **********************************************************************/
1282 static ALWAYS_INLINE
bool
1283 callerAcceptsOptimizedReturn(const void * const ra0
)
1285 const uint8_t *ra1
= (const uint8_t *)ra0
;
1286 const unaligned_uint16_t
*ra2
;
1287 const unaligned_uint32_t
*ra4
= (const unaligned_uint32_t
*)ra1
;
1290 #define PREFER_GOTPCREL 0
1292 // 48 89 c7 movq %rax,%rdi
1293 // ff 15 callq *symbol@GOTPCREL(%rip)
1294 if (*ra4
!= 0xffc78948) {
1297 if (ra1
[4] != 0x15) {
1302 // 48 89 c7 movq %rax,%rdi
1304 if (*ra4
!= 0xe8c78948) {
1307 ra1
+= (long)*(const unaligned_int32_t
*)(ra1
+ 4) + 8l;
1308 ra2
= (const unaligned_uint16_t
*)ra1
;
1309 // ff 25 jmpq *symbol@DYLDMAGIC(%rip)
1310 if (*ra2
!= 0x25ff) {
1314 ra1
+= 6l + (long)*(const unaligned_int32_t
*)(ra1
+ 2);
1315 sym
= (const void **)ra1
;
1316 if (*sym
!= objc_retainAutoreleasedReturnValue
&&
1317 *sym
!= objc_unsafeClaimAutoreleasedReturnValue
)
1328 static ALWAYS_INLINE
bool
1329 callerAcceptsOptimizedReturn(const void *ra
)
1331 // if the low bit is set, we're returning to thumb mode
1332 if ((uintptr_t)ra
& 1) {
1334 // we mask off the low bit via subtraction
1335 // 16-bit instructions are well-aligned
1336 if (*(uint16_t *)((uint8_t *)ra
- 1) == 0x463f) {
1340 // 07 70 a0 e1 mov r7, r7
1341 // 32-bit instructions may be only 16-bit aligned
1342 if (*(unaligned_uint32_t
*)ra
== 0xe1a07007) {
1352 static ALWAYS_INLINE
bool
1353 callerAcceptsOptimizedReturn(const void *ra
)
1355 // fd 03 1d aa mov fp, fp
1356 // arm64 instructions are well-aligned
1357 if (*(uint32_t *)ra
== 0xaa1d03fd) {
1366 static ALWAYS_INLINE
bool
1367 callerAcceptsOptimizedReturn(const void *ra
)
1369 // 89 ed movl %ebp, %ebp
1370 if (*(unaligned_uint16_t
*)ra
== 0xed89) {
1379 #warning unknown architecture
1381 static ALWAYS_INLINE
bool
1382 callerAcceptsOptimizedReturn(const void *ra
)
1387 // unknown architecture
1391 static ALWAYS_INLINE ReturnDisposition
1392 getReturnDisposition()
1394 return (ReturnDisposition
)(uintptr_t)tls_get_direct(RETURN_DISPOSITION_KEY
);
1398 static ALWAYS_INLINE
void
1399 setReturnDisposition(ReturnDisposition disposition
)
1401 tls_set_direct(RETURN_DISPOSITION_KEY
, (void*)(uintptr_t)disposition
);
1405 // Try to prepare for optimized return with the given disposition (+0 or +1).
1406 // Returns true if the optimized path is successful.
1407 // Otherwise the return value must be retained and/or autoreleased as usual.
1408 static ALWAYS_INLINE
bool
1409 prepareOptimizedReturn(ReturnDisposition disposition
)
1411 ASSERT(getReturnDisposition() == ReturnAtPlus0
);
1413 if (callerAcceptsOptimizedReturn(__builtin_return_address(0))) {
1414 if (disposition
) setReturnDisposition(disposition
);
1422 // Try to accept an optimized return.
1423 // Returns the disposition of the returned object (+0 or +1).
1424 // An un-optimized return is +0.
1425 static ALWAYS_INLINE ReturnDisposition
1426 acceptOptimizedReturn()
1428 ReturnDisposition disposition
= getReturnDisposition();
1429 setReturnDisposition(ReturnAtPlus0
); // reset to the unoptimized state
1434 // SUPPORT_RETURN_AUTORELEASE
1436 // not SUPPORT_RETURN_AUTORELEASE
1439 static ALWAYS_INLINE
bool
1440 prepareOptimizedReturn(ReturnDisposition disposition __unused
)
1446 static ALWAYS_INLINE ReturnDisposition
1447 acceptOptimizedReturn()
1449 return ReturnAtPlus0
;
1453 // not SUPPORT_RETURN_AUTORELEASE