2 * Copyright (c) 2010-2012 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
25 /***********************************************************************
26 * Inlineable parts of NSObject / objc_object implementation
27 **********************************************************************/
29 #ifndef _OBJC_OBJCOBJECT_H_
30 #define _OBJC_OBJCOBJECT_H_
32 #include "objc-private.h"
35 enum ReturnDisposition
: bool {
36 ReturnAtPlus0
= false, ReturnAtPlus1
= true
40 bool prepareOptimizedReturn(ReturnDisposition disposition
);
43 #if SUPPORT_TAGGED_POINTERS
46 extern Class objc_debug_taggedpointer_classes
[_OBJC_TAG_SLOT_COUNT
];
47 extern Class objc_debug_taggedpointer_ext_classes
[_OBJC_TAG_EXT_SLOT_COUNT
];
49 #define objc_tag_classes objc_debug_taggedpointer_classes
50 #define objc_tag_ext_classes objc_debug_taggedpointer_ext_classes
54 #if SUPPORT_INDEXED_ISA
57 classForIndex(uintptr_t index
) {
59 assert(index
< (uintptr_t)objc_indexed_classes_count
);
60 return objc_indexed_classes
[index
];
67 objc_object::isClass()
69 if (isTaggedPointer()) return false;
70 return ISA()->isMetaClass();
74 #if SUPPORT_TAGGED_POINTERS
79 if (!isTaggedPointer()) return ISA();
81 uintptr_t ptr
= (uintptr_t)this;
82 if (isExtTaggedPointer()) {
84 (ptr
>> _OBJC_TAG_EXT_SLOT_SHIFT
) & _OBJC_TAG_EXT_SLOT_MASK
;
85 return objc_tag_ext_classes
[slot
];
88 (ptr
>> _OBJC_TAG_SLOT_SHIFT
) & _OBJC_TAG_SLOT_MASK
;
89 return objc_tag_classes
[slot
];
95 objc_object::isTaggedPointer()
97 return _objc_isTaggedPointer(this);
101 objc_object::isBasicTaggedPointer()
103 return isTaggedPointer() && !isExtTaggedPointer();
107 objc_object::isExtTaggedPointer()
109 uintptr_t ptr
= _objc_decodeTaggedPointer(this);
110 return (ptr
& _OBJC_TAG_EXT_MASK
) == _OBJC_TAG_EXT_MASK
;
114 // SUPPORT_TAGGED_POINTERS
116 // not SUPPORT_TAGGED_POINTERS
120 objc_object::getIsa()
127 objc_object::isTaggedPointer()
133 objc_object::isBasicTaggedPointer()
139 objc_object::isExtTaggedPointer()
145 // not SUPPORT_TAGGED_POINTERS
149 #if SUPPORT_NONPOINTER_ISA
154 assert(!isTaggedPointer());
155 #if SUPPORT_INDEXED_ISA
156 if (isa
.nonpointer
) {
157 uintptr_t slot
= isa
.indexcls
;
158 return classForIndex((unsigned)slot
);
160 return (Class
)isa
.bits
;
162 return (Class
)(isa
.bits
& ISA_MASK
);
168 objc_object::hasNonpointerIsa()
170 return isa
.nonpointer
;
175 objc_object::initIsa(Class cls
)
177 initIsa(cls
, false, false);
181 objc_object::initClassIsa(Class cls
)
183 if (DisableNonpointerIsa
|| cls
->instancesRequireRawIsa()) {
184 initIsa(cls
, false/*not nonpointer*/, false);
186 initIsa(cls
, true/*nonpointer*/, false);
191 objc_object::initProtocolIsa(Class cls
)
193 return initClassIsa(cls
);
197 objc_object::initInstanceIsa(Class cls
, bool hasCxxDtor
)
199 assert(!cls
->instancesRequireRawIsa());
200 assert(hasCxxDtor
== cls
->hasCxxDtor());
202 initIsa(cls
, true, hasCxxDtor
);
206 objc_object::initIsa(Class cls
, bool nonpointer
, bool hasCxxDtor
)
208 assert(!isTaggedPointer());
213 assert(!DisableNonpointerIsa
);
214 assert(!cls
->instancesRequireRawIsa());
218 #if SUPPORT_INDEXED_ISA
219 assert(cls
->classArrayIndex() > 0);
220 newisa
.bits
= ISA_INDEX_MAGIC_VALUE
;
221 // isa.magic is part of ISA_MAGIC_VALUE
222 // isa.nonpointer is part of ISA_MAGIC_VALUE
223 newisa
.has_cxx_dtor
= hasCxxDtor
;
224 newisa
.indexcls
= (uintptr_t)cls
->classArrayIndex();
226 newisa
.bits
= ISA_MAGIC_VALUE
;
227 // isa.magic is part of ISA_MAGIC_VALUE
228 // isa.nonpointer is part of ISA_MAGIC_VALUE
229 newisa
.has_cxx_dtor
= hasCxxDtor
;
230 newisa
.shiftcls
= (uintptr_t)cls
>> 3;
233 // This write must be performed in a single store in some cases
234 // (for example when realizing a class because other threads
235 // may simultaneously try to use the class).
236 // fixme use atomics here to guarantee single-store and to
237 // guarantee memory order w.r.t. the class index table
238 // ...but not too atomic because we don't want to hurt instantiation
245 objc_object::changeIsa(Class newCls
)
247 // This is almost always true but there are
248 // enough edge cases that we can't assert it.
249 // assert(newCls->isFuture() ||
250 // newCls->isInitializing() || newCls->isInitialized());
252 assert(!isTaggedPointer());
257 bool sideTableLocked
= false;
258 bool transcribeToSideTable
= false;
261 transcribeToSideTable
= false;
262 oldisa
= LoadExclusive(&isa
.bits
);
263 if ((oldisa
.bits
== 0 || oldisa
.nonpointer
) &&
264 !newCls
->isFuture() && newCls
->canAllocNonpointer())
267 // nonpointer -> nonpointer
268 #if SUPPORT_INDEXED_ISA
269 if (oldisa
.bits
== 0) newisa
.bits
= ISA_INDEX_MAGIC_VALUE
;
270 else newisa
= oldisa
;
271 // isa.magic is part of ISA_MAGIC_VALUE
272 // isa.nonpointer is part of ISA_MAGIC_VALUE
273 newisa
.has_cxx_dtor
= newCls
->hasCxxDtor();
274 assert(newCls
->classArrayIndex() > 0);
275 newisa
.indexcls
= (uintptr_t)newCls
->classArrayIndex();
277 if (oldisa
.bits
== 0) newisa
.bits
= ISA_MAGIC_VALUE
;
278 else newisa
= oldisa
;
279 // isa.magic is part of ISA_MAGIC_VALUE
280 // isa.nonpointer is part of ISA_MAGIC_VALUE
281 newisa
.has_cxx_dtor
= newCls
->hasCxxDtor();
282 newisa
.shiftcls
= (uintptr_t)newCls
>> 3;
285 else if (oldisa
.nonpointer
) {
286 // nonpointer -> raw pointer
287 // Need to copy retain count et al to side table.
288 // Acquire side table lock before setting isa to
289 // prevent races such as concurrent -release.
290 if (!sideTableLocked
) sidetable_lock();
291 sideTableLocked
= true;
292 transcribeToSideTable
= true;
296 // raw pointer -> raw pointer
299 } while (!StoreExclusive(&isa
.bits
, oldisa
.bits
, newisa
.bits
));
301 if (transcribeToSideTable
) {
302 // Copy oldisa's retain count et al to side table.
303 // oldisa.has_assoc: nothing to do
304 // oldisa.has_cxx_dtor: nothing to do
305 sidetable_moveExtraRC_nolock(oldisa
.extra_rc
,
307 oldisa
.weakly_referenced
);
310 if (sideTableLocked
) sidetable_unlock();
312 if (oldisa
.nonpointer
) {
313 #if SUPPORT_INDEXED_ISA
314 return classForIndex(oldisa
.indexcls
);
316 return (Class
)((uintptr_t)oldisa
.shiftcls
<< 3);
326 objc_object::hasAssociatedObjects()
328 if (isTaggedPointer()) return true;
329 if (isa
.nonpointer
) return isa
.has_assoc
;
335 objc_object::setHasAssociatedObjects()
337 if (isTaggedPointer()) return;
340 isa_t oldisa
= LoadExclusive(&isa
.bits
);
341 isa_t newisa
= oldisa
;
342 if (!newisa
.nonpointer
|| newisa
.has_assoc
) {
343 ClearExclusive(&isa
.bits
);
346 newisa
.has_assoc
= true;
347 if (!StoreExclusive(&isa
.bits
, oldisa
.bits
, newisa
.bits
)) goto retry
;
352 objc_object::isWeaklyReferenced()
354 assert(!isTaggedPointer());
355 if (isa
.nonpointer
) return isa
.weakly_referenced
;
356 else return sidetable_isWeaklyReferenced();
361 objc_object::setWeaklyReferenced_nolock()
364 isa_t oldisa
= LoadExclusive(&isa
.bits
);
365 isa_t newisa
= oldisa
;
366 if (slowpath(!newisa
.nonpointer
)) {
367 ClearExclusive(&isa
.bits
);
368 sidetable_setWeaklyReferenced_nolock();
371 if (newisa
.weakly_referenced
) {
372 ClearExclusive(&isa
.bits
);
375 newisa
.weakly_referenced
= true;
376 if (!StoreExclusive(&isa
.bits
, oldisa
.bits
, newisa
.bits
)) goto retry
;
381 objc_object::hasCxxDtor()
383 assert(!isTaggedPointer());
384 if (isa
.nonpointer
) return isa
.has_cxx_dtor
;
385 else return isa
.cls
->hasCxxDtor();
391 objc_object::rootIsDeallocating()
393 if (isTaggedPointer()) return false;
394 if (isa
.nonpointer
) return isa
.deallocating
;
395 return sidetable_isDeallocating();
400 objc_object::clearDeallocating()
402 if (slowpath(!isa
.nonpointer
)) {
403 // Slow path for raw pointer isa.
404 sidetable_clearDeallocating();
406 else if (slowpath(isa
.weakly_referenced
|| isa
.has_sidetable_rc
)) {
407 // Slow path for non-pointer isa with weak refs and/or side table data.
408 clearDeallocating_slow();
411 assert(!sidetable_present());
416 objc_object::rootDealloc()
418 if (isTaggedPointer()) return; // fixme necessary?
420 if (fastpath(isa
.nonpointer
&&
421 !isa
.weakly_referenced
&&
424 !isa
.has_sidetable_rc
))
426 assert(!sidetable_present());
430 object_dispose((id
)this);
435 // Equivalent to calling [this retain], with shortcuts if there is no override
437 objc_object::retain()
439 assert(!isTaggedPointer());
441 if (fastpath(!ISA()->hasCustomRR())) {
445 return ((id(*)(objc_object
*, SEL
))objc_msgSend
)(this, SEL_retain
);
449 // Base retain implementation, ignoring overrides.
450 // This does not check isa.fast_rr; if there is an RR override then
451 // it was already called and it chose to call [super retain].
453 // tryRetain=true is the -_tryRetain path.
454 // handleOverflow=false is the frameless fast path.
455 // handleOverflow=true is the framed slow path including overflow to side table
456 // The code is structured this way to prevent duplication.
459 objc_object::rootRetain()
461 return rootRetain(false, false);
465 objc_object::rootTryRetain()
467 return rootRetain(true, false) ? true : false;
471 objc_object::rootRetain(bool tryRetain
, bool handleOverflow
)
473 if (isTaggedPointer()) return (id
)this;
475 bool sideTableLocked
= false;
476 bool transcribeToSideTable
= false;
482 transcribeToSideTable
= false;
483 oldisa
= LoadExclusive(&isa
.bits
);
485 if (slowpath(!newisa
.nonpointer
)) {
486 ClearExclusive(&isa
.bits
);
487 if (!tryRetain
&& sideTableLocked
) sidetable_unlock();
488 if (tryRetain
) return sidetable_tryRetain() ? (id
)this : nil
;
489 else return sidetable_retain();
491 // don't check newisa.fast_rr; we already called any RR overrides
492 if (slowpath(tryRetain
&& newisa
.deallocating
)) {
493 ClearExclusive(&isa
.bits
);
494 if (!tryRetain
&& sideTableLocked
) sidetable_unlock();
498 newisa
.bits
= addc(newisa
.bits
, RC_ONE
, 0, &carry
); // extra_rc++
500 if (slowpath(carry
)) {
501 // newisa.extra_rc++ overflowed
502 if (!handleOverflow
) {
503 ClearExclusive(&isa
.bits
);
504 return rootRetain_overflow(tryRetain
);
506 // Leave half of the retain counts inline and
507 // prepare to copy the other half to the side table.
508 if (!tryRetain
&& !sideTableLocked
) sidetable_lock();
509 sideTableLocked
= true;
510 transcribeToSideTable
= true;
511 newisa
.extra_rc
= RC_HALF
;
512 newisa
.has_sidetable_rc
= true;
514 } while (slowpath(!StoreExclusive(&isa
.bits
, oldisa
.bits
, newisa
.bits
)));
516 if (slowpath(transcribeToSideTable
)) {
517 // Copy the other half of the retain counts to the side table.
518 sidetable_addExtraRC_nolock(RC_HALF
);
521 if (slowpath(!tryRetain
&& sideTableLocked
)) sidetable_unlock();
526 // Equivalent to calling [this release], with shortcuts if there is no override
528 objc_object::release()
530 assert(!isTaggedPointer());
532 if (fastpath(!ISA()->hasCustomRR())) {
537 ((void(*)(objc_object
*, SEL
))objc_msgSend
)(this, SEL_release
);
541 // Base release implementation, ignoring overrides.
542 // Does not call -dealloc.
543 // Returns true if the object should now be deallocated.
544 // This does not check isa.fast_rr; if there is an RR override then
545 // it was already called and it chose to call [super release].
547 // handleUnderflow=false is the frameless fast path.
548 // handleUnderflow=true is the framed slow path including side table borrow
549 // The code is structured this way to prevent duplication.
552 objc_object::rootRelease()
554 return rootRelease(true, false);
558 objc_object::rootReleaseShouldDealloc()
560 return rootRelease(false, false);
564 objc_object::rootRelease(bool performDealloc
, bool handleUnderflow
)
566 if (isTaggedPointer()) return false;
568 bool sideTableLocked
= false;
575 oldisa
= LoadExclusive(&isa
.bits
);
577 if (slowpath(!newisa
.nonpointer
)) {
578 ClearExclusive(&isa
.bits
);
579 if (sideTableLocked
) sidetable_unlock();
580 return sidetable_release(performDealloc
);
582 // don't check newisa.fast_rr; we already called any RR overrides
584 newisa
.bits
= subc(newisa
.bits
, RC_ONE
, 0, &carry
); // extra_rc--
585 if (slowpath(carry
)) {
586 // don't ClearExclusive()
589 } while (slowpath(!StoreReleaseExclusive(&isa
.bits
,
590 oldisa
.bits
, newisa
.bits
)));
592 if (slowpath(sideTableLocked
)) sidetable_unlock();
596 // newisa.extra_rc-- underflowed: borrow from side table or deallocate
598 // abandon newisa to undo the decrement
601 if (slowpath(newisa
.has_sidetable_rc
)) {
602 if (!handleUnderflow
) {
603 ClearExclusive(&isa
.bits
);
604 return rootRelease_underflow(performDealloc
);
607 // Transfer retain count from side table to inline storage.
609 if (!sideTableLocked
) {
610 ClearExclusive(&isa
.bits
);
612 sideTableLocked
= true;
613 // Need to start over to avoid a race against
614 // the nonpointer -> raw pointer transition.
618 // Try to remove some retain counts from the side table.
619 size_t borrowed
= sidetable_subExtraRC_nolock(RC_HALF
);
621 // To avoid races, has_sidetable_rc must remain set
622 // even if the side table count is now zero.
625 // Side table retain count decreased.
626 // Try to add them to the inline count.
627 newisa
.extra_rc
= borrowed
- 1; // redo the original decrement too
628 bool stored
= StoreReleaseExclusive(&isa
.bits
,
629 oldisa
.bits
, newisa
.bits
);
631 // Inline update failed.
632 // Try it again right now. This prevents livelock on LL/SC
633 // architectures where the side table access itself may have
634 // dropped the reservation.
635 isa_t oldisa2
= LoadExclusive(&isa
.bits
);
636 isa_t newisa2
= oldisa2
;
637 if (newisa2
.nonpointer
) {
640 addc(newisa2
.bits
, RC_ONE
* (borrowed
-1), 0, &overflow
);
642 stored
= StoreReleaseExclusive(&isa
.bits
, oldisa2
.bits
,
649 // Inline update failed.
650 // Put the retains back in the side table.
651 sidetable_addExtraRC_nolock(borrowed
);
655 // Decrement successful after borrowing from side table.
656 // This decrement cannot be the deallocating decrement - the side
657 // table lock and has_sidetable_rc bit ensure that if everyone
658 // else tried to -release while we worked, the last one would block.
663 // Side table is empty after all. Fall-through to the dealloc path.
667 // Really deallocate.
669 if (slowpath(newisa
.deallocating
)) {
670 ClearExclusive(&isa
.bits
);
671 if (sideTableLocked
) sidetable_unlock();
672 return overrelease_error();
673 // does not actually return
675 newisa
.deallocating
= true;
676 if (!StoreExclusive(&isa
.bits
, oldisa
.bits
, newisa
.bits
)) goto retry
;
678 if (slowpath(sideTableLocked
)) sidetable_unlock();
680 __sync_synchronize();
681 if (performDealloc
) {
682 ((void(*)(objc_object
*, SEL
))objc_msgSend
)(this, SEL_dealloc
);
688 // Equivalent to [this autorelease], with shortcuts if there is no override
690 objc_object::autorelease()
692 if (isTaggedPointer()) return (id
)this;
693 if (fastpath(!ISA()->hasCustomRR())) return rootAutorelease();
695 return ((id(*)(objc_object
*, SEL
))objc_msgSend
)(this, SEL_autorelease
);
699 // Base autorelease implementation, ignoring overrides.
701 objc_object::rootAutorelease()
703 if (isTaggedPointer()) return (id
)this;
704 if (prepareOptimizedReturn(ReturnAtPlus1
)) return (id
)this;
706 return rootAutorelease2();
711 objc_object::rootRetainCount()
713 if (isTaggedPointer()) return (uintptr_t)this;
716 isa_t bits
= LoadExclusive(&isa
.bits
);
717 ClearExclusive(&isa
.bits
);
718 if (bits
.nonpointer
) {
719 uintptr_t rc
= 1 + bits
.extra_rc
;
720 if (bits
.has_sidetable_rc
) {
721 rc
+= sidetable_getExtraRC_nolock();
728 return sidetable_retainCount();
732 // SUPPORT_NONPOINTER_ISA
734 // not SUPPORT_NONPOINTER_ISA
740 assert(!isTaggedPointer());
746 objc_object::hasNonpointerIsa()
753 objc_object::initIsa(Class cls
)
755 assert(!isTaggedPointer());
756 isa
= (uintptr_t)cls
;
761 objc_object::initClassIsa(Class cls
)
768 objc_object::initProtocolIsa(Class cls
)
775 objc_object::initInstanceIsa(Class cls
, bool)
782 objc_object::initIsa(Class cls
, bool, bool)
789 objc_object::changeIsa(Class cls
)
791 // This is almost always rue but there are
792 // enough edge cases that we can't assert it.
793 // assert(cls->isFuture() ||
794 // cls->isInitializing() || cls->isInitialized());
796 assert(!isTaggedPointer());
798 isa_t oldisa
, newisa
;
801 oldisa
= LoadExclusive(&isa
.bits
);
802 } while (!StoreExclusive(&isa
.bits
, oldisa
.bits
, newisa
.bits
));
804 if (oldisa
.cls
&& oldisa
.cls
->instancesHaveAssociatedObjects()) {
805 cls
->setInstancesHaveAssociatedObjects();
813 objc_object::hasAssociatedObjects()
815 return getIsa()->instancesHaveAssociatedObjects();
820 objc_object::setHasAssociatedObjects()
822 getIsa()->setInstancesHaveAssociatedObjects();
827 objc_object::isWeaklyReferenced()
829 assert(!isTaggedPointer());
831 return sidetable_isWeaklyReferenced();
836 objc_object::setWeaklyReferenced_nolock()
838 assert(!isTaggedPointer());
840 sidetable_setWeaklyReferenced_nolock();
845 objc_object::hasCxxDtor()
847 assert(!isTaggedPointer());
848 return isa
.cls
->hasCxxDtor();
853 objc_object::rootIsDeallocating()
855 if (isTaggedPointer()) return false;
856 return sidetable_isDeallocating();
861 objc_object::clearDeallocating()
863 sidetable_clearDeallocating();
868 objc_object::rootDealloc()
870 if (isTaggedPointer()) return;
871 object_dispose((id
)this);
875 // Equivalent to calling [this retain], with shortcuts if there is no override
877 objc_object::retain()
879 assert(!isTaggedPointer());
881 if (fastpath(!ISA()->hasCustomRR())) {
882 return sidetable_retain();
885 return ((id(*)(objc_object
*, SEL
))objc_msgSend
)(this, SEL_retain
);
889 // Base retain implementation, ignoring overrides.
890 // This does not check isa.fast_rr; if there is an RR override then
891 // it was already called and it chose to call [super retain].
893 objc_object::rootRetain()
895 if (isTaggedPointer()) return (id
)this;
896 return sidetable_retain();
900 // Equivalent to calling [this release], with shortcuts if there is no override
902 objc_object::release()
904 assert(!isTaggedPointer());
906 if (fastpath(!ISA()->hasCustomRR())) {
911 ((void(*)(objc_object
*, SEL
))objc_msgSend
)(this, SEL_release
);
915 // Base release implementation, ignoring overrides.
916 // Does not call -dealloc.
917 // Returns true if the object should now be deallocated.
918 // This does not check isa.fast_rr; if there is an RR override then
919 // it was already called and it chose to call [super release].
921 objc_object::rootRelease()
923 if (isTaggedPointer()) return false;
924 return sidetable_release(true);
928 objc_object::rootReleaseShouldDealloc()
930 if (isTaggedPointer()) return false;
931 return sidetable_release(false);
935 // Equivalent to [this autorelease], with shortcuts if there is no override
937 objc_object::autorelease()
939 if (isTaggedPointer()) return (id
)this;
940 if (fastpath(!ISA()->hasCustomRR())) return rootAutorelease();
942 return ((id(*)(objc_object
*, SEL
))objc_msgSend
)(this, SEL_autorelease
);
946 // Base autorelease implementation, ignoring overrides.
948 objc_object::rootAutorelease()
950 if (isTaggedPointer()) return (id
)this;
951 if (prepareOptimizedReturn(ReturnAtPlus1
)) return (id
)this;
953 return rootAutorelease2();
957 // Base tryRetain implementation, ignoring overrides.
958 // This does not check isa.fast_rr; if there is an RR override then
959 // it was already called and it chose to call [super _tryRetain].
961 objc_object::rootTryRetain()
963 if (isTaggedPointer()) return true;
964 return sidetable_tryRetain();
969 objc_object::rootRetainCount()
971 if (isTaggedPointer()) return (uintptr_t)this;
972 return sidetable_retainCount();
976 // not SUPPORT_NONPOINTER_ISA
980 #if SUPPORT_RETURN_AUTORELEASE
982 /***********************************************************************
983 Fast handling of return through Cocoa's +0 autoreleasing convention.
984 The caller and callee cooperate to keep the returned object
985 out of the autorelease pool and eliminate redundant retain/release pairs.
987 An optimized callee looks at the caller's instructions following the
988 return. If the caller's instructions are also optimized then the callee
989 skips all retain count operations: no autorelease, no retain/autorelease.
990 Instead it saves the result's current retain count (+0 or +1) in
991 thread-local storage. If the caller does not look optimized then
992 the callee performs autorelease or retain/autorelease as usual.
994 An optimized caller looks at the thread-local storage. If the result
995 is set then it performs any retain or release needed to change the
996 result from the retain count left by the callee to the retain count
997 desired by the caller. Otherwise the caller assumes the result is
998 currently at +0 from an unoptimized callee and performs any retain
999 needed for that case.
1001 There are two optimized callees:
1002 objc_autoreleaseReturnValue
1003 result is currently +1. The unoptimized path autoreleases it.
1004 objc_retainAutoreleaseReturnValue
1005 result is currently +0. The unoptimized path retains and autoreleases it.
1007 There are two optimized callers:
1008 objc_retainAutoreleasedReturnValue
1009 caller wants the value at +1. The unoptimized path retains it.
1010 objc_unsafeClaimAutoreleasedReturnValue
1011 caller wants the value at +0 unsafely. The unoptimized path does nothing.
1016 // compute ret at +1
1017 return objc_autoreleaseReturnValue(ret);
1021 ret = objc_retainAutoreleasedReturnValue(ret);
1022 // use ret at +1 here
1024 Callee sees the optimized caller, sets TLS, and leaves the result at +1.
1025 Caller sees the TLS, clears it, and accepts the result at +1 as-is.
1027 The callee's recognition of the optimized caller is architecture-dependent.
1028 x86_64: Callee looks for `mov rax, rdi` followed by a call or
1029 jump instruction to objc_retainAutoreleasedReturnValue or
1030 objc_unsafeClaimAutoreleasedReturnValue.
1031 i386: Callee looks for a magic nop `movl %ebp, %ebp` (frame pointer register)
1032 armv7: Callee looks for a magic nop `mov r7, r7` (frame pointer register).
1033 arm64: Callee looks for a magic nop `mov x29, x29` (frame pointer register).
1035 Tagged pointer objects do participate in the optimized return scheme,
1036 because it saves message sends. They are not entered in the autorelease
1037 pool in the unoptimized case.
1038 **********************************************************************/
1042 static ALWAYS_INLINE
bool
1043 callerAcceptsOptimizedReturn(const void * const ra0
)
1045 const uint8_t *ra1
= (const uint8_t *)ra0
;
1046 const unaligned_uint16_t
*ra2
;
1047 const unaligned_uint32_t
*ra4
= (const unaligned_uint32_t
*)ra1
;
1050 #define PREFER_GOTPCREL 0
1052 // 48 89 c7 movq %rax,%rdi
1053 // ff 15 callq *symbol@GOTPCREL(%rip)
1054 if (*ra4
!= 0xffc78948) {
1057 if (ra1
[4] != 0x15) {
1062 // 48 89 c7 movq %rax,%rdi
1064 if (*ra4
!= 0xe8c78948) {
1067 ra1
+= (long)*(const unaligned_int32_t
*)(ra1
+ 4) + 8l;
1068 ra2
= (const unaligned_uint16_t
*)ra1
;
1069 // ff 25 jmpq *symbol@DYLDMAGIC(%rip)
1070 if (*ra2
!= 0x25ff) {
1074 ra1
+= 6l + (long)*(const unaligned_int32_t
*)(ra1
+ 2);
1075 sym
= (const void **)ra1
;
1076 if (*sym
!= objc_retainAutoreleasedReturnValue
&&
1077 *sym
!= objc_unsafeClaimAutoreleasedReturnValue
)
1088 static ALWAYS_INLINE
bool
1089 callerAcceptsOptimizedReturn(const void *ra
)
1091 // if the low bit is set, we're returning to thumb mode
1092 if ((uintptr_t)ra
& 1) {
1094 // we mask off the low bit via subtraction
1095 // 16-bit instructions are well-aligned
1096 if (*(uint16_t *)((uint8_t *)ra
- 1) == 0x463f) {
1100 // 07 70 a0 e1 mov r7, r7
1101 // 32-bit instructions may be only 16-bit aligned
1102 if (*(unaligned_uint32_t
*)ra
== 0xe1a07007) {
1112 static ALWAYS_INLINE
bool
1113 callerAcceptsOptimizedReturn(const void *ra
)
1115 // fd 03 1d aa mov fp, fp
1116 // arm64 instructions are well-aligned
1117 if (*(uint32_t *)ra
== 0xaa1d03fd) {
1126 static ALWAYS_INLINE
bool
1127 callerAcceptsOptimizedReturn(const void *ra
)
1129 // 89 ed movl %ebp, %ebp
1130 if (*(unaligned_uint16_t
*)ra
== 0xed89) {
1139 #warning unknown architecture
1141 static ALWAYS_INLINE
bool
1142 callerAcceptsOptimizedReturn(const void *ra
)
1147 // unknown architecture
1151 static ALWAYS_INLINE ReturnDisposition
1152 getReturnDisposition()
1154 return (ReturnDisposition
)(uintptr_t)tls_get_direct(RETURN_DISPOSITION_KEY
);
1158 static ALWAYS_INLINE
void
1159 setReturnDisposition(ReturnDisposition disposition
)
1161 tls_set_direct(RETURN_DISPOSITION_KEY
, (void*)(uintptr_t)disposition
);
1165 // Try to prepare for optimized return with the given disposition (+0 or +1).
1166 // Returns true if the optimized path is successful.
1167 // Otherwise the return value must be retained and/or autoreleased as usual.
1168 static ALWAYS_INLINE
bool
1169 prepareOptimizedReturn(ReturnDisposition disposition
)
1171 assert(getReturnDisposition() == ReturnAtPlus0
);
1173 if (callerAcceptsOptimizedReturn(__builtin_return_address(0))) {
1174 if (disposition
) setReturnDisposition(disposition
);
1182 // Try to accept an optimized return.
1183 // Returns the disposition of the returned object (+0 or +1).
1184 // An un-optimized return is +0.
1185 static ALWAYS_INLINE ReturnDisposition
1186 acceptOptimizedReturn()
1188 ReturnDisposition disposition
= getReturnDisposition();
1189 setReturnDisposition(ReturnAtPlus0
); // reset to the unoptimized state
1194 // SUPPORT_RETURN_AUTORELEASE
1196 // not SUPPORT_RETURN_AUTORELEASE
1199 static ALWAYS_INLINE
bool
1200 prepareOptimizedReturn(ReturnDisposition disposition __unused
)
1206 static ALWAYS_INLINE ReturnDisposition
1207 acceptOptimizedReturn()
1209 return ReturnAtPlus0
;
1213 // not SUPPORT_RETURN_AUTORELEASE