2 * Copyright (c) 2010-2012 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
25 /***********************************************************************
26 * Inlineable parts of NSObject / objc_object implementation
27 **********************************************************************/
29 #ifndef _OBJC_OBJCOBJECT_H_
30 #define _OBJC_OBJCOBJECT_H_
32 #include "objc-private.h"
35 enum ReturnDisposition
: bool {
36 ReturnAtPlus0
= false, ReturnAtPlus1
= true
40 bool prepareOptimizedReturn(ReturnDisposition disposition
);
43 #if SUPPORT_TAGGED_POINTERS
46 extern Class objc_debug_taggedpointer_classes
[_OBJC_TAG_SLOT_COUNT
*2];
47 extern Class objc_debug_taggedpointer_ext_classes
[_OBJC_TAG_EXT_SLOT_COUNT
];
49 #define objc_tag_classes objc_debug_taggedpointer_classes
50 #define objc_tag_ext_classes objc_debug_taggedpointer_ext_classes
54 #if SUPPORT_INDEXED_ISA
57 classForIndex(uintptr_t index
) {
59 assert(index
< (uintptr_t)objc_indexed_classes_count
);
60 return objc_indexed_classes
[index
];
67 objc_object::isClass()
69 if (isTaggedPointer()) return false;
70 return ISA()->isMetaClass();
74 #if SUPPORT_TAGGED_POINTERS
79 if (!isTaggedPointer()) return ISA();
81 uintptr_t ptr
= (uintptr_t)this;
82 if (isExtTaggedPointer()) {
84 (ptr
>> _OBJC_TAG_EXT_SLOT_SHIFT
) & _OBJC_TAG_EXT_SLOT_MASK
;
85 return objc_tag_ext_classes
[slot
];
88 (ptr
>> _OBJC_TAG_SLOT_SHIFT
) & _OBJC_TAG_SLOT_MASK
;
89 return objc_tag_classes
[slot
];
95 objc_object::isTaggedPointer()
97 return _objc_isTaggedPointer(this);
101 objc_object::isBasicTaggedPointer()
103 return isTaggedPointer() && !isExtTaggedPointer();
107 objc_object::isExtTaggedPointer()
109 return ((uintptr_t)this & _OBJC_TAG_EXT_MASK
) == _OBJC_TAG_EXT_MASK
;
113 // SUPPORT_TAGGED_POINTERS
115 // not SUPPORT_TAGGED_POINTERS
119 objc_object::getIsa()
126 objc_object::isTaggedPointer()
132 objc_object::isBasicTaggedPointer()
138 objc_object::isExtTaggedPointer()
144 // not SUPPORT_TAGGED_POINTERS
148 #if SUPPORT_NONPOINTER_ISA
153 assert(!isTaggedPointer());
154 #if SUPPORT_INDEXED_ISA
155 if (isa
.nonpointer
) {
156 uintptr_t slot
= isa
.indexcls
;
157 return classForIndex((unsigned)slot
);
159 return (Class
)isa
.bits
;
161 return (Class
)(isa
.bits
& ISA_MASK
);
167 objc_object::hasNonpointerIsa()
169 return isa
.nonpointer
;
174 objc_object::initIsa(Class cls
)
176 initIsa(cls
, false, false);
180 objc_object::initClassIsa(Class cls
)
182 if (DisableNonpointerIsa
|| cls
->instancesRequireRawIsa()) {
183 initIsa(cls
, false/*not nonpointer*/, false);
185 initIsa(cls
, true/*nonpointer*/, false);
190 objc_object::initProtocolIsa(Class cls
)
192 return initClassIsa(cls
);
196 objc_object::initInstanceIsa(Class cls
, bool hasCxxDtor
)
198 assert(!cls
->instancesRequireRawIsa());
199 assert(hasCxxDtor
== cls
->hasCxxDtor());
201 initIsa(cls
, true, hasCxxDtor
);
205 objc_object::initIsa(Class cls
, bool nonpointer
, bool hasCxxDtor
)
207 assert(!isTaggedPointer());
212 assert(!DisableNonpointerIsa
);
213 assert(!cls
->instancesRequireRawIsa());
217 #if SUPPORT_INDEXED_ISA
218 assert(cls
->classArrayIndex() > 0);
219 newisa
.bits
= ISA_INDEX_MAGIC_VALUE
;
220 // isa.magic is part of ISA_MAGIC_VALUE
221 // isa.nonpointer is part of ISA_MAGIC_VALUE
222 newisa
.has_cxx_dtor
= hasCxxDtor
;
223 newisa
.indexcls
= (uintptr_t)cls
->classArrayIndex();
225 newisa
.bits
= ISA_MAGIC_VALUE
;
226 // isa.magic is part of ISA_MAGIC_VALUE
227 // isa.nonpointer is part of ISA_MAGIC_VALUE
228 newisa
.has_cxx_dtor
= hasCxxDtor
;
229 newisa
.shiftcls
= (uintptr_t)cls
>> 3;
232 // This write must be performed in a single store in some cases
233 // (for example when realizing a class because other threads
234 // may simultaneously try to use the class).
235 // fixme use atomics here to guarantee single-store and to
236 // guarantee memory order w.r.t. the class index table
237 // ...but not too atomic because we don't want to hurt instantiation
244 objc_object::changeIsa(Class newCls
)
246 // This is almost always true but there are
247 // enough edge cases that we can't assert it.
248 // assert(newCls->isFuture() ||
249 // newCls->isInitializing() || newCls->isInitialized());
251 assert(!isTaggedPointer());
256 bool sideTableLocked
= false;
257 bool transcribeToSideTable
= false;
260 transcribeToSideTable
= false;
261 oldisa
= LoadExclusive(&isa
.bits
);
262 if ((oldisa
.bits
== 0 || oldisa
.nonpointer
) &&
263 !newCls
->isFuture() && newCls
->canAllocNonpointer())
266 // nonpointer -> nonpointer
267 #if SUPPORT_INDEXED_ISA
268 if (oldisa
.bits
== 0) newisa
.bits
= ISA_INDEX_MAGIC_VALUE
;
269 else newisa
= oldisa
;
270 // isa.magic is part of ISA_MAGIC_VALUE
271 // isa.nonpointer is part of ISA_MAGIC_VALUE
272 newisa
.has_cxx_dtor
= newCls
->hasCxxDtor();
273 assert(newCls
->classArrayIndex() > 0);
274 newisa
.indexcls
= (uintptr_t)newCls
->classArrayIndex();
276 if (oldisa
.bits
== 0) newisa
.bits
= ISA_MAGIC_VALUE
;
277 else newisa
= oldisa
;
278 // isa.magic is part of ISA_MAGIC_VALUE
279 // isa.nonpointer is part of ISA_MAGIC_VALUE
280 newisa
.has_cxx_dtor
= newCls
->hasCxxDtor();
281 newisa
.shiftcls
= (uintptr_t)newCls
>> 3;
284 else if (oldisa
.nonpointer
) {
285 // nonpointer -> raw pointer
286 // Need to copy retain count et al to side table.
287 // Acquire side table lock before setting isa to
288 // prevent races such as concurrent -release.
289 if (!sideTableLocked
) sidetable_lock();
290 sideTableLocked
= true;
291 transcribeToSideTable
= true;
295 // raw pointer -> raw pointer
298 } while (!StoreExclusive(&isa
.bits
, oldisa
.bits
, newisa
.bits
));
300 if (transcribeToSideTable
) {
301 // Copy oldisa's retain count et al to side table.
302 // oldisa.has_assoc: nothing to do
303 // oldisa.has_cxx_dtor: nothing to do
304 sidetable_moveExtraRC_nolock(oldisa
.extra_rc
,
306 oldisa
.weakly_referenced
);
309 if (sideTableLocked
) sidetable_unlock();
311 if (oldisa
.nonpointer
) {
312 #if SUPPORT_INDEXED_ISA
313 return classForIndex(oldisa
.indexcls
);
315 return (Class
)((uintptr_t)oldisa
.shiftcls
<< 3);
325 objc_object::hasAssociatedObjects()
327 if (isTaggedPointer()) return true;
328 if (isa
.nonpointer
) return isa
.has_assoc
;
334 objc_object::setHasAssociatedObjects()
336 if (isTaggedPointer()) return;
339 isa_t oldisa
= LoadExclusive(&isa
.bits
);
340 isa_t newisa
= oldisa
;
341 if (!newisa
.nonpointer
|| newisa
.has_assoc
) {
342 ClearExclusive(&isa
.bits
);
345 newisa
.has_assoc
= true;
346 if (!StoreExclusive(&isa
.bits
, oldisa
.bits
, newisa
.bits
)) goto retry
;
351 objc_object::isWeaklyReferenced()
353 assert(!isTaggedPointer());
354 if (isa
.nonpointer
) return isa
.weakly_referenced
;
355 else return sidetable_isWeaklyReferenced();
360 objc_object::setWeaklyReferenced_nolock()
363 isa_t oldisa
= LoadExclusive(&isa
.bits
);
364 isa_t newisa
= oldisa
;
365 if (slowpath(!newisa
.nonpointer
)) {
366 ClearExclusive(&isa
.bits
);
367 sidetable_setWeaklyReferenced_nolock();
370 if (newisa
.weakly_referenced
) {
371 ClearExclusive(&isa
.bits
);
374 newisa
.weakly_referenced
= true;
375 if (!StoreExclusive(&isa
.bits
, oldisa
.bits
, newisa
.bits
)) goto retry
;
380 objc_object::hasCxxDtor()
382 assert(!isTaggedPointer());
383 if (isa
.nonpointer
) return isa
.has_cxx_dtor
;
384 else return isa
.cls
->hasCxxDtor();
390 objc_object::rootIsDeallocating()
392 if (isTaggedPointer()) return false;
393 if (isa
.nonpointer
) return isa
.deallocating
;
394 return sidetable_isDeallocating();
399 objc_object::clearDeallocating()
401 if (slowpath(!isa
.nonpointer
)) {
402 // Slow path for raw pointer isa.
403 sidetable_clearDeallocating();
405 else if (slowpath(isa
.weakly_referenced
|| isa
.has_sidetable_rc
)) {
406 // Slow path for non-pointer isa with weak refs and/or side table data.
407 clearDeallocating_slow();
410 assert(!sidetable_present());
415 objc_object::rootDealloc()
417 if (isTaggedPointer()) return; // fixme necessary?
419 if (fastpath(isa
.nonpointer
&&
420 !isa
.weakly_referenced
&&
423 !isa
.has_sidetable_rc
))
425 assert(!sidetable_present());
429 object_dispose((id
)this);
434 // Equivalent to calling [this retain], with shortcuts if there is no override
436 objc_object::retain()
438 assert(!isTaggedPointer());
440 if (fastpath(!ISA()->hasCustomRR())) {
444 return ((id(*)(objc_object
*, SEL
))objc_msgSend
)(this, SEL_retain
);
448 // Base retain implementation, ignoring overrides.
449 // This does not check isa.fast_rr; if there is an RR override then
450 // it was already called and it chose to call [super retain].
452 // tryRetain=true is the -_tryRetain path.
453 // handleOverflow=false is the frameless fast path.
454 // handleOverflow=true is the framed slow path including overflow to side table
455 // The code is structured this way to prevent duplication.
458 objc_object::rootRetain()
460 return rootRetain(false, false);
464 objc_object::rootTryRetain()
466 return rootRetain(true, false) ? true : false;
470 objc_object::rootRetain(bool tryRetain
, bool handleOverflow
)
472 if (isTaggedPointer()) return (id
)this;
474 bool sideTableLocked
= false;
475 bool transcribeToSideTable
= false;
481 transcribeToSideTable
= false;
482 oldisa
= LoadExclusive(&isa
.bits
);
484 if (slowpath(!newisa
.nonpointer
)) {
485 ClearExclusive(&isa
.bits
);
486 if (!tryRetain
&& sideTableLocked
) sidetable_unlock();
487 if (tryRetain
) return sidetable_tryRetain() ? (id
)this : nil
;
488 else return sidetable_retain();
490 // don't check newisa.fast_rr; we already called any RR overrides
491 if (slowpath(tryRetain
&& newisa
.deallocating
)) {
492 ClearExclusive(&isa
.bits
);
493 if (!tryRetain
&& sideTableLocked
) sidetable_unlock();
497 newisa
.bits
= addc(newisa
.bits
, RC_ONE
, 0, &carry
); // extra_rc++
499 if (slowpath(carry
)) {
500 // newisa.extra_rc++ overflowed
501 if (!handleOverflow
) {
502 ClearExclusive(&isa
.bits
);
503 return rootRetain_overflow(tryRetain
);
505 // Leave half of the retain counts inline and
506 // prepare to copy the other half to the side table.
507 if (!tryRetain
&& !sideTableLocked
) sidetable_lock();
508 sideTableLocked
= true;
509 transcribeToSideTable
= true;
510 newisa
.extra_rc
= RC_HALF
;
511 newisa
.has_sidetable_rc
= true;
513 } while (slowpath(!StoreExclusive(&isa
.bits
, oldisa
.bits
, newisa
.bits
)));
515 if (slowpath(transcribeToSideTable
)) {
516 // Copy the other half of the retain counts to the side table.
517 sidetable_addExtraRC_nolock(RC_HALF
);
520 if (slowpath(!tryRetain
&& sideTableLocked
)) sidetable_unlock();
525 // Equivalent to calling [this release], with shortcuts if there is no override
527 objc_object::release()
529 assert(!isTaggedPointer());
531 if (fastpath(!ISA()->hasCustomRR())) {
536 ((void(*)(objc_object
*, SEL
))objc_msgSend
)(this, SEL_release
);
540 // Base release implementation, ignoring overrides.
541 // Does not call -dealloc.
542 // Returns true if the object should now be deallocated.
543 // This does not check isa.fast_rr; if there is an RR override then
544 // it was already called and it chose to call [super release].
546 // handleUnderflow=false is the frameless fast path.
547 // handleUnderflow=true is the framed slow path including side table borrow
548 // The code is structured this way to prevent duplication.
551 objc_object::rootRelease()
553 return rootRelease(true, false);
557 objc_object::rootReleaseShouldDealloc()
559 return rootRelease(false, false);
563 objc_object::rootRelease(bool performDealloc
, bool handleUnderflow
)
565 if (isTaggedPointer()) return false;
567 bool sideTableLocked
= false;
574 oldisa
= LoadExclusive(&isa
.bits
);
576 if (slowpath(!newisa
.nonpointer
)) {
577 ClearExclusive(&isa
.bits
);
578 if (sideTableLocked
) sidetable_unlock();
579 return sidetable_release(performDealloc
);
581 // don't check newisa.fast_rr; we already called any RR overrides
583 newisa
.bits
= subc(newisa
.bits
, RC_ONE
, 0, &carry
); // extra_rc--
584 if (slowpath(carry
)) {
585 // don't ClearExclusive()
588 } while (slowpath(!StoreReleaseExclusive(&isa
.bits
,
589 oldisa
.bits
, newisa
.bits
)));
591 if (slowpath(sideTableLocked
)) sidetable_unlock();
595 // newisa.extra_rc-- underflowed: borrow from side table or deallocate
597 // abandon newisa to undo the decrement
600 if (slowpath(newisa
.has_sidetable_rc
)) {
601 if (!handleUnderflow
) {
602 ClearExclusive(&isa
.bits
);
603 return rootRelease_underflow(performDealloc
);
606 // Transfer retain count from side table to inline storage.
608 if (!sideTableLocked
) {
609 ClearExclusive(&isa
.bits
);
611 sideTableLocked
= true;
612 // Need to start over to avoid a race against
613 // the nonpointer -> raw pointer transition.
617 // Try to remove some retain counts from the side table.
618 size_t borrowed
= sidetable_subExtraRC_nolock(RC_HALF
);
620 // To avoid races, has_sidetable_rc must remain set
621 // even if the side table count is now zero.
624 // Side table retain count decreased.
625 // Try to add them to the inline count.
626 newisa
.extra_rc
= borrowed
- 1; // redo the original decrement too
627 bool stored
= StoreReleaseExclusive(&isa
.bits
,
628 oldisa
.bits
, newisa
.bits
);
630 // Inline update failed.
631 // Try it again right now. This prevents livelock on LL/SC
632 // architectures where the side table access itself may have
633 // dropped the reservation.
634 isa_t oldisa2
= LoadExclusive(&isa
.bits
);
635 isa_t newisa2
= oldisa2
;
636 if (newisa2
.nonpointer
) {
639 addc(newisa2
.bits
, RC_ONE
* (borrowed
-1), 0, &overflow
);
641 stored
= StoreReleaseExclusive(&isa
.bits
, oldisa2
.bits
,
648 // Inline update failed.
649 // Put the retains back in the side table.
650 sidetable_addExtraRC_nolock(borrowed
);
654 // Decrement successful after borrowing from side table.
655 // This decrement cannot be the deallocating decrement - the side
656 // table lock and has_sidetable_rc bit ensure that if everyone
657 // else tried to -release while we worked, the last one would block.
662 // Side table is empty after all. Fall-through to the dealloc path.
666 // Really deallocate.
668 if (slowpath(newisa
.deallocating
)) {
669 ClearExclusive(&isa
.bits
);
670 if (sideTableLocked
) sidetable_unlock();
671 return overrelease_error();
672 // does not actually return
674 newisa
.deallocating
= true;
675 if (!StoreExclusive(&isa
.bits
, oldisa
.bits
, newisa
.bits
)) goto retry
;
677 if (slowpath(sideTableLocked
)) sidetable_unlock();
679 __sync_synchronize();
680 if (performDealloc
) {
681 ((void(*)(objc_object
*, SEL
))objc_msgSend
)(this, SEL_dealloc
);
687 // Equivalent to [this autorelease], with shortcuts if there is no override
689 objc_object::autorelease()
691 if (isTaggedPointer()) return (id
)this;
692 if (fastpath(!ISA()->hasCustomRR())) return rootAutorelease();
694 return ((id(*)(objc_object
*, SEL
))objc_msgSend
)(this, SEL_autorelease
);
698 // Base autorelease implementation, ignoring overrides.
700 objc_object::rootAutorelease()
702 if (isTaggedPointer()) return (id
)this;
703 if (prepareOptimizedReturn(ReturnAtPlus1
)) return (id
)this;
705 return rootAutorelease2();
710 objc_object::rootRetainCount()
712 if (isTaggedPointer()) return (uintptr_t)this;
715 isa_t bits
= LoadExclusive(&isa
.bits
);
716 ClearExclusive(&isa
.bits
);
717 if (bits
.nonpointer
) {
718 uintptr_t rc
= 1 + bits
.extra_rc
;
719 if (bits
.has_sidetable_rc
) {
720 rc
+= sidetable_getExtraRC_nolock();
727 return sidetable_retainCount();
731 // SUPPORT_NONPOINTER_ISA
733 // not SUPPORT_NONPOINTER_ISA
739 assert(!isTaggedPointer());
745 objc_object::hasNonpointerIsa()
752 objc_object::initIsa(Class cls
)
754 assert(!isTaggedPointer());
755 isa
= (uintptr_t)cls
;
760 objc_object::initClassIsa(Class cls
)
767 objc_object::initProtocolIsa(Class cls
)
774 objc_object::initInstanceIsa(Class cls
, bool)
781 objc_object::initIsa(Class cls
, bool, bool)
788 objc_object::changeIsa(Class cls
)
790 // This is almost always rue but there are
791 // enough edge cases that we can't assert it.
792 // assert(cls->isFuture() ||
793 // cls->isInitializing() || cls->isInitialized());
795 assert(!isTaggedPointer());
797 isa_t oldisa
, newisa
;
800 oldisa
= LoadExclusive(&isa
.bits
);
801 } while (!StoreExclusive(&isa
.bits
, oldisa
.bits
, newisa
.bits
));
803 if (oldisa
.cls
&& oldisa
.cls
->instancesHaveAssociatedObjects()) {
804 cls
->setInstancesHaveAssociatedObjects();
812 objc_object::hasAssociatedObjects()
814 return getIsa()->instancesHaveAssociatedObjects();
819 objc_object::setHasAssociatedObjects()
821 getIsa()->setInstancesHaveAssociatedObjects();
826 objc_object::isWeaklyReferenced()
828 assert(!isTaggedPointer());
830 return sidetable_isWeaklyReferenced();
835 objc_object::setWeaklyReferenced_nolock()
837 assert(!isTaggedPointer());
839 sidetable_setWeaklyReferenced_nolock();
844 objc_object::hasCxxDtor()
846 assert(!isTaggedPointer());
847 return isa
.cls
->hasCxxDtor();
852 objc_object::rootIsDeallocating()
854 if (isTaggedPointer()) return false;
855 return sidetable_isDeallocating();
860 objc_object::clearDeallocating()
862 sidetable_clearDeallocating();
867 objc_object::rootDealloc()
869 if (isTaggedPointer()) return;
870 object_dispose((id
)this);
874 // Equivalent to calling [this retain], with shortcuts if there is no override
876 objc_object::retain()
878 assert(!isTaggedPointer());
880 if (fastpath(!ISA()->hasCustomRR())) {
881 return sidetable_retain();
884 return ((id(*)(objc_object
*, SEL
))objc_msgSend
)(this, SEL_retain
);
888 // Base retain implementation, ignoring overrides.
889 // This does not check isa.fast_rr; if there is an RR override then
890 // it was already called and it chose to call [super retain].
892 objc_object::rootRetain()
894 if (isTaggedPointer()) return (id
)this;
895 return sidetable_retain();
899 // Equivalent to calling [this release], with shortcuts if there is no override
901 objc_object::release()
903 assert(!isTaggedPointer());
905 if (fastpath(!ISA()->hasCustomRR())) {
910 ((void(*)(objc_object
*, SEL
))objc_msgSend
)(this, SEL_release
);
914 // Base release implementation, ignoring overrides.
915 // Does not call -dealloc.
916 // Returns true if the object should now be deallocated.
917 // This does not check isa.fast_rr; if there is an RR override then
918 // it was already called and it chose to call [super release].
920 objc_object::rootRelease()
922 if (isTaggedPointer()) return false;
923 return sidetable_release(true);
927 objc_object::rootReleaseShouldDealloc()
929 if (isTaggedPointer()) return false;
930 return sidetable_release(false);
934 // Equivalent to [this autorelease], with shortcuts if there is no override
936 objc_object::autorelease()
938 if (isTaggedPointer()) return (id
)this;
939 if (fastpath(!ISA()->hasCustomRR())) return rootAutorelease();
941 return ((id(*)(objc_object
*, SEL
))objc_msgSend
)(this, SEL_autorelease
);
945 // Base autorelease implementation, ignoring overrides.
947 objc_object::rootAutorelease()
949 if (isTaggedPointer()) return (id
)this;
950 if (prepareOptimizedReturn(ReturnAtPlus1
)) return (id
)this;
952 return rootAutorelease2();
956 // Base tryRetain implementation, ignoring overrides.
957 // This does not check isa.fast_rr; if there is an RR override then
958 // it was already called and it chose to call [super _tryRetain].
960 objc_object::rootTryRetain()
962 if (isTaggedPointer()) return true;
963 return sidetable_tryRetain();
968 objc_object::rootRetainCount()
970 if (isTaggedPointer()) return (uintptr_t)this;
971 return sidetable_retainCount();
975 // not SUPPORT_NONPOINTER_ISA
979 #if SUPPORT_RETURN_AUTORELEASE
981 /***********************************************************************
982 Fast handling of return through Cocoa's +0 autoreleasing convention.
983 The caller and callee cooperate to keep the returned object
984 out of the autorelease pool and eliminate redundant retain/release pairs.
986 An optimized callee looks at the caller's instructions following the
987 return. If the caller's instructions are also optimized then the callee
988 skips all retain count operations: no autorelease, no retain/autorelease.
989 Instead it saves the result's current retain count (+0 or +1) in
990 thread-local storage. If the caller does not look optimized then
991 the callee performs autorelease or retain/autorelease as usual.
993 An optimized caller looks at the thread-local storage. If the result
994 is set then it performs any retain or release needed to change the
995 result from the retain count left by the callee to the retain count
996 desired by the caller. Otherwise the caller assumes the result is
997 currently at +0 from an unoptimized callee and performs any retain
998 needed for that case.
1000 There are two optimized callees:
1001 objc_autoreleaseReturnValue
1002 result is currently +1. The unoptimized path autoreleases it.
1003 objc_retainAutoreleaseReturnValue
1004 result is currently +0. The unoptimized path retains and autoreleases it.
1006 There are two optimized callers:
1007 objc_retainAutoreleasedReturnValue
1008 caller wants the value at +1. The unoptimized path retains it.
1009 objc_unsafeClaimAutoreleasedReturnValue
1010 caller wants the value at +0 unsafely. The unoptimized path does nothing.
1015 // compute ret at +1
1016 return objc_autoreleaseReturnValue(ret);
1020 ret = objc_retainAutoreleasedReturnValue(ret);
1021 // use ret at +1 here
1023 Callee sees the optimized caller, sets TLS, and leaves the result at +1.
1024 Caller sees the TLS, clears it, and accepts the result at +1 as-is.
1026 The callee's recognition of the optimized caller is architecture-dependent.
1027 x86_64: Callee looks for `mov rax, rdi` followed by a call or
1028 jump instruction to objc_retainAutoreleasedReturnValue or
1029 objc_unsafeClaimAutoreleasedReturnValue.
1030 i386: Callee looks for a magic nop `movl %ebp, %ebp` (frame pointer register)
1031 armv7: Callee looks for a magic nop `mov r7, r7` (frame pointer register).
1032 arm64: Callee looks for a magic nop `mov x29, x29` (frame pointer register).
1034 Tagged pointer objects do participate in the optimized return scheme,
1035 because it saves message sends. They are not entered in the autorelease
1036 pool in the unoptimized case.
1037 **********************************************************************/
1041 static ALWAYS_INLINE
bool
1042 callerAcceptsOptimizedReturn(const void * const ra0
)
1044 const uint8_t *ra1
= (const uint8_t *)ra0
;
1045 const unaligned_uint16_t
*ra2
;
1046 const unaligned_uint32_t
*ra4
= (const unaligned_uint32_t
*)ra1
;
1049 #define PREFER_GOTPCREL 0
1051 // 48 89 c7 movq %rax,%rdi
1052 // ff 15 callq *symbol@GOTPCREL(%rip)
1053 if (*ra4
!= 0xffc78948) {
1056 if (ra1
[4] != 0x15) {
1061 // 48 89 c7 movq %rax,%rdi
1063 if (*ra4
!= 0xe8c78948) {
1066 ra1
+= (long)*(const unaligned_int32_t
*)(ra1
+ 4) + 8l;
1067 ra2
= (const unaligned_uint16_t
*)ra1
;
1068 // ff 25 jmpq *symbol@DYLDMAGIC(%rip)
1069 if (*ra2
!= 0x25ff) {
1073 ra1
+= 6l + (long)*(const unaligned_int32_t
*)(ra1
+ 2);
1074 sym
= (const void **)ra1
;
1075 if (*sym
!= objc_retainAutoreleasedReturnValue
&&
1076 *sym
!= objc_unsafeClaimAutoreleasedReturnValue
)
1087 static ALWAYS_INLINE
bool
1088 callerAcceptsOptimizedReturn(const void *ra
)
1090 // if the low bit is set, we're returning to thumb mode
1091 if ((uintptr_t)ra
& 1) {
1093 // we mask off the low bit via subtraction
1094 // 16-bit instructions are well-aligned
1095 if (*(uint16_t *)((uint8_t *)ra
- 1) == 0x463f) {
1099 // 07 70 a0 e1 mov r7, r7
1100 // 32-bit instructions may be only 16-bit aligned
1101 if (*(unaligned_uint32_t
*)ra
== 0xe1a07007) {
1111 static ALWAYS_INLINE
bool
1112 callerAcceptsOptimizedReturn(const void *ra
)
1114 // fd 03 1d aa mov fp, fp
1115 // arm64 instructions are well-aligned
1116 if (*(uint32_t *)ra
== 0xaa1d03fd) {
1125 static ALWAYS_INLINE
bool
1126 callerAcceptsOptimizedReturn(const void *ra
)
1128 // 89 ed movl %ebp, %ebp
1129 if (*(unaligned_uint16_t
*)ra
== 0xed89) {
1138 #warning unknown architecture
1140 static ALWAYS_INLINE
bool
1141 callerAcceptsOptimizedReturn(const void *ra
)
1146 // unknown architecture
1150 static ALWAYS_INLINE ReturnDisposition
1151 getReturnDisposition()
1153 return (ReturnDisposition
)(uintptr_t)tls_get_direct(RETURN_DISPOSITION_KEY
);
1157 static ALWAYS_INLINE
void
1158 setReturnDisposition(ReturnDisposition disposition
)
1160 tls_set_direct(RETURN_DISPOSITION_KEY
, (void*)(uintptr_t)disposition
);
1164 // Try to prepare for optimized return with the given disposition (+0 or +1).
1165 // Returns true if the optimized path is successful.
1166 // Otherwise the return value must be retained and/or autoreleased as usual.
1167 static ALWAYS_INLINE
bool
1168 prepareOptimizedReturn(ReturnDisposition disposition
)
1170 assert(getReturnDisposition() == ReturnAtPlus0
);
1172 if (callerAcceptsOptimizedReturn(__builtin_return_address(0))) {
1173 if (disposition
) setReturnDisposition(disposition
);
1181 // Try to accept an optimized return.
1182 // Returns the disposition of the returned object (+0 or +1).
1183 // An un-optimized return is +0.
1184 static ALWAYS_INLINE ReturnDisposition
1185 acceptOptimizedReturn()
1187 ReturnDisposition disposition
= getReturnDisposition();
1188 setReturnDisposition(ReturnAtPlus0
); // reset to the unoptimized state
1193 // SUPPORT_RETURN_AUTORELEASE
1195 // not SUPPORT_RETURN_AUTORELEASE
1198 static ALWAYS_INLINE
bool
1199 prepareOptimizedReturn(ReturnDisposition disposition __unused
)
1205 static ALWAYS_INLINE ReturnDisposition
1206 acceptOptimizedReturn()
1208 return ReturnAtPlus0
;
1212 // not SUPPORT_RETURN_AUTORELEASE