2 * Copyright (c) 2010-2012 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
25 /***********************************************************************
26 * Inlineable parts of NSObject / objc_object implementation
27 **********************************************************************/
29 #ifndef _OBJC_OBJCOBJECT_H_
30 #define _OBJC_OBJCOBJECT_H_
32 #include "objc-private.h"
35 enum ReturnDisposition
: bool {
36 ReturnAtPlus0
= false, ReturnAtPlus1
= true
40 bool prepareOptimizedReturn(ReturnDisposition disposition
);
43 #if SUPPORT_TAGGED_POINTERS
46 #define TAG_SLOT_MASK 0xf
48 #if SUPPORT_MSB_TAGGED_POINTERS
49 # define TAG_MASK (1ULL<<63)
50 # define TAG_SLOT_SHIFT 60
51 # define TAG_PAYLOAD_LSHIFT 4
52 # define TAG_PAYLOAD_RSHIFT 4
55 # define TAG_SLOT_SHIFT 0
56 # define TAG_PAYLOAD_LSHIFT 0
57 # define TAG_PAYLOAD_RSHIFT 4
60 extern "C" { extern Class objc_debug_taggedpointer_classes
[TAG_COUNT
*2]; }
61 #define objc_tag_classes objc_debug_taggedpointer_classes
67 objc_object::isClass()
69 if (isTaggedPointer()) return false;
70 return ISA()->isMetaClass();
73 #if SUPPORT_NONPOINTER_ISA
75 # if !SUPPORT_TAGGED_POINTERS
83 assert(!isTaggedPointer());
84 return (Class
)(isa
.bits
& ISA_MASK
);
89 objc_object::hasIndexedIsa()
97 if (isTaggedPointer()) {
98 uintptr_t slot
= ((uintptr_t)this >> TAG_SLOT_SHIFT
) & TAG_SLOT_MASK
;
99 return objc_tag_classes
[slot
];
106 objc_object::initIsa(Class cls
)
108 initIsa(cls
, false, false);
112 objc_object::initClassIsa(Class cls
)
114 if (DisableIndexedIsa
) {
115 initIsa(cls
, false, false);
117 initIsa(cls
, true, false);
122 objc_object::initProtocolIsa(Class cls
)
124 return initClassIsa(cls
);
128 objc_object::initInstanceIsa(Class cls
, bool hasCxxDtor
)
131 assert(!cls
->requiresRawIsa());
132 assert(hasCxxDtor
== cls
->hasCxxDtor());
134 initIsa(cls
, true, hasCxxDtor
);
138 objc_object::initIsa(Class cls
, bool indexed
, bool hasCxxDtor
)
140 assert(!isTaggedPointer());
145 assert(!DisableIndexedIsa
);
146 isa
.bits
= ISA_MAGIC_VALUE
;
147 // isa.magic is part of ISA_MAGIC_VALUE
148 // isa.indexed is part of ISA_MAGIC_VALUE
149 isa
.has_cxx_dtor
= hasCxxDtor
;
150 isa
.shiftcls
= (uintptr_t)cls
>> 3;
156 objc_object::changeIsa(Class newCls
)
158 // This is almost always rue but there are
159 // enough edge cases that we can't assert it.
160 // assert(newCls->isFuture() ||
161 // newCls->isInitializing() || newCls->isInitialized());
163 assert(!isTaggedPointer());
168 bool sideTableLocked
= false;
169 bool transcribeToSideTable
= false;
172 transcribeToSideTable
= false;
173 oldisa
= LoadExclusive(&isa
.bits
);
174 if ((oldisa
.bits
== 0 || oldisa
.indexed
) &&
175 !newCls
->isFuture() && newCls
->canAllocIndexed())
178 // indexed -> indexed
179 if (oldisa
.bits
== 0) newisa
.bits
= ISA_MAGIC_VALUE
;
180 else newisa
= oldisa
;
181 // isa.magic is part of ISA_MAGIC_VALUE
182 // isa.indexed is part of ISA_MAGIC_VALUE
183 newisa
.has_cxx_dtor
= newCls
->hasCxxDtor();
184 newisa
.shiftcls
= (uintptr_t)newCls
>> 3;
186 else if (oldisa
.indexed
) {
187 // indexed -> not indexed
188 // Need to copy retain count et al to side table.
189 // Acquire side table lock before setting isa to
190 // prevent races such as concurrent -release.
191 if (!sideTableLocked
) sidetable_lock();
192 sideTableLocked
= true;
193 transcribeToSideTable
= true;
197 // not indexed -> not indexed
200 } while (!StoreExclusive(&isa
.bits
, oldisa
.bits
, newisa
.bits
));
202 if (transcribeToSideTable
) {
203 // Copy oldisa's retain count et al to side table.
204 // oldisa.weakly_referenced: nothing to do
205 // oldisa.has_assoc: nothing to do
206 // oldisa.has_cxx_dtor: nothing to do
207 sidetable_moveExtraRC_nolock(oldisa
.extra_rc
,
209 oldisa
.weakly_referenced
);
212 if (sideTableLocked
) sidetable_unlock();
215 if (oldisa
.indexed
) oldCls
= (Class
)((uintptr_t)oldisa
.shiftcls
<< 3);
216 else oldCls
= oldisa
.cls
;
223 objc_object::isTaggedPointer()
225 return ((uintptr_t)this & TAG_MASK
);
230 objc_object::hasAssociatedObjects()
232 if (isTaggedPointer()) return true;
233 if (isa
.indexed
) return isa
.has_assoc
;
239 objc_object::setHasAssociatedObjects()
241 if (isTaggedPointer()) return;
244 isa_t oldisa
= LoadExclusive(&isa
.bits
);
245 isa_t newisa
= oldisa
;
246 if (!newisa
.indexed
) return;
247 if (newisa
.has_assoc
) return;
248 newisa
.has_assoc
= true;
249 if (!StoreExclusive(&isa
.bits
, oldisa
.bits
, newisa
.bits
)) goto retry
;
254 objc_object::isWeaklyReferenced()
256 assert(!isTaggedPointer());
257 if (isa
.indexed
) return isa
.weakly_referenced
;
258 else return sidetable_isWeaklyReferenced();
263 objc_object::setWeaklyReferenced_nolock()
266 isa_t oldisa
= LoadExclusive(&isa
.bits
);
267 isa_t newisa
= oldisa
;
268 if (!newisa
.indexed
) return sidetable_setWeaklyReferenced_nolock();
269 if (newisa
.weakly_referenced
) return;
270 newisa
.weakly_referenced
= true;
271 if (!StoreExclusive(&isa
.bits
, oldisa
.bits
, newisa
.bits
)) goto retry
;
276 objc_object::hasCxxDtor()
278 assert(!isTaggedPointer());
279 if (isa
.indexed
) return isa
.has_cxx_dtor
;
280 else return isa
.cls
->hasCxxDtor();
286 objc_object::rootIsDeallocating()
290 if (isTaggedPointer()) return false;
291 if (isa
.indexed
) return isa
.deallocating
;
292 return sidetable_isDeallocating();
297 objc_object::clearDeallocating()
300 // Slow path for raw pointer isa.
301 sidetable_clearDeallocating();
303 else if (isa
.weakly_referenced
|| isa
.has_sidetable_rc
) {
304 // Slow path for non-pointer isa with weak refs and/or side table data.
305 clearDeallocating_slow();
308 assert(!sidetable_present());
313 objc_object::rootDealloc()
316 if (isTaggedPointer()) return;
319 !isa
.weakly_referenced
&&
322 !isa
.has_sidetable_rc
)
324 assert(!sidetable_present());
328 object_dispose((id
)this);
333 // Equivalent to calling [this retain], with shortcuts if there is no override
335 objc_object::retain()
337 // UseGC is allowed here, but requires hasCustomRR.
338 assert(!UseGC
|| ISA()->hasCustomRR());
339 assert(!isTaggedPointer());
341 if (! ISA()->hasCustomRR()) {
345 return ((id(*)(objc_object
*, SEL
))objc_msgSend
)(this, SEL_retain
);
349 // Base retain implementation, ignoring overrides.
350 // This does not check isa.fast_rr; if there is an RR override then
351 // it was already called and it chose to call [super retain].
353 // tryRetain=true is the -_tryRetain path.
354 // handleOverflow=false is the frameless fast path.
355 // handleOverflow=true is the framed slow path including overflow to side table
356 // The code is structured this way to prevent duplication.
359 objc_object::rootRetain()
361 return rootRetain(false, false);
365 objc_object::rootTryRetain()
367 return rootRetain(true, false) ? true : false;
371 objc_object::rootRetain(bool tryRetain
, bool handleOverflow
)
374 if (isTaggedPointer()) return (id
)this;
376 bool sideTableLocked
= false;
377 bool transcribeToSideTable
= false;
383 transcribeToSideTable
= false;
384 oldisa
= LoadExclusive(&isa
.bits
);
386 if (!newisa
.indexed
) goto unindexed
;
387 // don't check newisa.fast_rr; we already called any RR overrides
388 if (tryRetain
&& newisa
.deallocating
) goto tryfail
;
390 newisa
.bits
= addc(newisa
.bits
, RC_ONE
, 0, &carry
); // extra_rc++
393 // newisa.extra_rc++ overflowed
394 if (!handleOverflow
) return rootRetain_overflow(tryRetain
);
395 // Leave half of the retain counts inline and
396 // prepare to copy the other half to the side table.
397 if (!tryRetain
&& !sideTableLocked
) sidetable_lock();
398 sideTableLocked
= true;
399 transcribeToSideTable
= true;
400 newisa
.extra_rc
= RC_HALF
;
401 newisa
.has_sidetable_rc
= true;
403 } while (!StoreExclusive(&isa
.bits
, oldisa
.bits
, newisa
.bits
));
405 if (transcribeToSideTable
) {
406 // Copy the other half of the retain counts to the side table.
407 sidetable_addExtraRC_nolock(RC_HALF
);
410 if (!tryRetain
&& sideTableLocked
) sidetable_unlock();
414 if (!tryRetain
&& sideTableLocked
) sidetable_unlock();
418 if (!tryRetain
&& sideTableLocked
) sidetable_unlock();
419 if (tryRetain
) return sidetable_tryRetain() ? (id
)this : nil
;
420 else return sidetable_retain();
424 // Equivalent to calling [this release], with shortcuts if there is no override
426 objc_object::release()
428 // UseGC is allowed here, but requires hasCustomRR.
429 assert(!UseGC
|| ISA()->hasCustomRR());
430 assert(!isTaggedPointer());
432 if (! ISA()->hasCustomRR()) {
437 ((void(*)(objc_object
*, SEL
))objc_msgSend
)(this, SEL_release
);
441 // Base release implementation, ignoring overrides.
442 // Does not call -dealloc.
443 // Returns true if the object should now be deallocated.
444 // This does not check isa.fast_rr; if there is an RR override then
445 // it was already called and it chose to call [super release].
447 // handleUnderflow=false is the frameless fast path.
448 // handleUnderflow=true is the framed slow path including side table borrow
449 // The code is structured this way to prevent duplication.
452 objc_object::rootRelease()
454 return rootRelease(true, false);
458 objc_object::rootReleaseShouldDealloc()
460 return rootRelease(false, false);
464 objc_object::rootRelease(bool performDealloc
, bool handleUnderflow
)
467 if (isTaggedPointer()) return false;
469 bool sideTableLocked
= false;
476 oldisa
= LoadExclusive(&isa
.bits
);
478 if (!newisa
.indexed
) goto unindexed
;
479 // don't check newisa.fast_rr; we already called any RR overrides
481 newisa
.bits
= subc(newisa
.bits
, RC_ONE
, 0, &carry
); // extra_rc--
482 if (carry
) goto underflow
;
483 } while (!StoreReleaseExclusive(&isa
.bits
, oldisa
.bits
, newisa
.bits
));
485 if (sideTableLocked
) sidetable_unlock();
489 // newisa.extra_rc-- underflowed: borrow from side table or deallocate
491 // abandon newisa to undo the decrement
494 if (newisa
.has_sidetable_rc
) {
495 if (!handleUnderflow
) {
496 return rootRelease_underflow(performDealloc
);
499 // Transfer retain count from side table to inline storage.
501 if (!sideTableLocked
) {
503 sideTableLocked
= true;
505 // Lost a race vs the indexed -> not indexed transition
506 // before we got the side table lock. Stop now to avoid
507 // breaking the safety checks in the sidetable ExtraRC code.
512 // Try to remove some retain counts from the side table.
513 size_t borrowed
= sidetable_subExtraRC_nolock(RC_HALF
);
515 // To avoid races, has_sidetable_rc must remain set
516 // even if the side table count is now zero.
519 // Side table retain count decreased.
520 // Try to add them to the inline count.
521 newisa
.extra_rc
= borrowed
- 1; // redo the original decrement too
522 bool stored
= StoreExclusive(&isa
.bits
, oldisa
.bits
, newisa
.bits
);
524 // Inline update failed.
525 // Try it again right now. This prevents livelock on LL/SC
526 // architectures where the side table access itself may have
527 // dropped the reservation.
528 isa_t oldisa2
= LoadExclusive(&isa
.bits
);
529 isa_t newisa2
= oldisa2
;
530 if (newisa2
.indexed
) {
533 addc(newisa2
.bits
, RC_ONE
* (borrowed
-1), 0, &overflow
);
535 stored
= StoreReleaseExclusive(&isa
.bits
, oldisa2
.bits
,
542 // Inline update failed.
543 // Put the retains back in the side table.
544 sidetable_addExtraRC_nolock(borrowed
);
548 // Decrement successful after borrowing from side table.
549 // This decrement cannot be the deallocating decrement - the side
550 // table lock and has_sidetable_rc bit ensure that if everyone
551 // else tried to -release while we worked, the last one would block.
556 // Side table is empty after all. Fall-through to the dealloc path.
560 // Really deallocate.
562 if (sideTableLocked
) sidetable_unlock();
564 if (newisa
.deallocating
) {
565 return overrelease_error();
567 newisa
.deallocating
= true;
568 if (!StoreExclusive(&isa
.bits
, oldisa
.bits
, newisa
.bits
)) goto retry
;
569 __sync_synchronize();
570 if (performDealloc
) {
571 ((void(*)(objc_object
*, SEL
))objc_msgSend
)(this, SEL_dealloc
);
576 if (sideTableLocked
) sidetable_unlock();
577 return sidetable_release(performDealloc
);
581 // Equivalent to [this autorelease], with shortcuts if there is no override
583 objc_object::autorelease()
585 // UseGC is allowed here, but requires hasCustomRR.
586 assert(!UseGC
|| ISA()->hasCustomRR());
588 if (isTaggedPointer()) return (id
)this;
589 if (! ISA()->hasCustomRR()) return rootAutorelease();
591 return ((id(*)(objc_object
*, SEL
))objc_msgSend
)(this, SEL_autorelease
);
595 // Base autorelease implementation, ignoring overrides.
597 objc_object::rootAutorelease()
601 if (isTaggedPointer()) return (id
)this;
602 if (prepareOptimizedReturn(ReturnAtPlus1
)) return (id
)this;
604 return rootAutorelease2();
609 objc_object::rootRetainCount()
612 if (isTaggedPointer()) return (uintptr_t)this;
615 isa_t bits
= LoadExclusive(&isa
.bits
);
617 uintptr_t rc
= 1 + bits
.extra_rc
;
618 if (bits
.has_sidetable_rc
) {
619 rc
+= sidetable_getExtraRC_nolock();
626 return sidetable_retainCount();
630 // SUPPORT_NONPOINTER_ISA
632 // not SUPPORT_NONPOINTER_ISA
638 assert(!isTaggedPointer());
644 objc_object::hasIndexedIsa()
651 objc_object::getIsa()
653 #if SUPPORT_TAGGED_POINTERS
654 if (isTaggedPointer()) {
655 uintptr_t slot
= ((uintptr_t)this >> TAG_SLOT_SHIFT
) & TAG_SLOT_MASK
;
656 return objc_tag_classes
[slot
];
664 objc_object::initIsa(Class cls
)
666 assert(!isTaggedPointer());
667 isa
= (uintptr_t)cls
;
672 objc_object::initClassIsa(Class cls
)
679 objc_object::initProtocolIsa(Class cls
)
686 objc_object::initInstanceIsa(Class cls
, bool)
693 objc_object::initIsa(Class cls
, bool, bool)
700 objc_object::changeIsa(Class cls
)
702 // This is almost always rue but there are
703 // enough edge cases that we can't assert it.
704 // assert(cls->isFuture() ||
705 // cls->isInitializing() || cls->isInitialized());
707 assert(!isTaggedPointer());
709 isa_t oldisa
, newisa
;
712 oldisa
= LoadExclusive(&isa
.bits
);
713 } while (!StoreExclusive(&isa
.bits
, oldisa
.bits
, newisa
.bits
));
715 if (oldisa
.cls
&& oldisa
.cls
->instancesHaveAssociatedObjects()) {
716 cls
->setInstancesHaveAssociatedObjects();
724 objc_object::isTaggedPointer()
726 #if SUPPORT_TAGGED_POINTERS
727 return ((uintptr_t)this & TAG_MASK
);
735 objc_object::hasAssociatedObjects()
739 return getIsa()->instancesHaveAssociatedObjects();
744 objc_object::setHasAssociatedObjects()
748 getIsa()->setInstancesHaveAssociatedObjects();
753 objc_object::isWeaklyReferenced()
755 assert(!isTaggedPointer());
758 return sidetable_isWeaklyReferenced();
763 objc_object::setWeaklyReferenced_nolock()
765 assert(!isTaggedPointer());
768 sidetable_setWeaklyReferenced_nolock();
773 objc_object::hasCxxDtor()
775 assert(!isTaggedPointer());
776 return isa
.cls
->hasCxxDtor();
781 objc_object::rootIsDeallocating()
785 if (isTaggedPointer()) return false;
786 return sidetable_isDeallocating();
791 objc_object::clearDeallocating()
793 sidetable_clearDeallocating();
798 objc_object::rootDealloc()
800 if (isTaggedPointer()) return;
801 object_dispose((id
)this);
805 // Equivalent to calling [this retain], with shortcuts if there is no override
807 objc_object::retain()
809 // UseGC is allowed here, but requires hasCustomRR.
810 assert(!UseGC
|| ISA()->hasCustomRR());
811 assert(!isTaggedPointer());
813 if (! ISA()->hasCustomRR()) {
814 return sidetable_retain();
817 return ((id(*)(objc_object
*, SEL
))objc_msgSend
)(this, SEL_retain
);
821 // Base retain implementation, ignoring overrides.
822 // This does not check isa.fast_rr; if there is an RR override then
823 // it was already called and it chose to call [super retain].
825 objc_object::rootRetain()
829 if (isTaggedPointer()) return (id
)this;
830 return sidetable_retain();
834 // Equivalent to calling [this release], with shortcuts if there is no override
836 objc_object::release()
838 // UseGC is allowed here, but requires hasCustomRR.
839 assert(!UseGC
|| ISA()->hasCustomRR());
840 assert(!isTaggedPointer());
842 if (! ISA()->hasCustomRR()) {
847 ((void(*)(objc_object
*, SEL
))objc_msgSend
)(this, SEL_release
);
851 // Base release implementation, ignoring overrides.
852 // Does not call -dealloc.
853 // Returns true if the object should now be deallocated.
854 // This does not check isa.fast_rr; if there is an RR override then
855 // it was already called and it chose to call [super release].
857 objc_object::rootRelease()
861 if (isTaggedPointer()) return false;
862 return sidetable_release(true);
866 objc_object::rootReleaseShouldDealloc()
868 if (isTaggedPointer()) return false;
869 return sidetable_release(false);
873 // Equivalent to [this autorelease], with shortcuts if there is no override
875 objc_object::autorelease()
877 // UseGC is allowed here, but requires hasCustomRR.
878 assert(!UseGC
|| ISA()->hasCustomRR());
880 if (isTaggedPointer()) return (id
)this;
881 if (! ISA()->hasCustomRR()) return rootAutorelease();
883 return ((id(*)(objc_object
*, SEL
))objc_msgSend
)(this, SEL_autorelease
);
887 // Base autorelease implementation, ignoring overrides.
889 objc_object::rootAutorelease()
893 if (isTaggedPointer()) return (id
)this;
894 if (prepareOptimizedReturn(ReturnAtPlus1
)) return (id
)this;
896 return rootAutorelease2();
900 // Base tryRetain implementation, ignoring overrides.
901 // This does not check isa.fast_rr; if there is an RR override then
902 // it was already called and it chose to call [super _tryRetain].
904 objc_object::rootTryRetain()
908 if (isTaggedPointer()) return true;
909 return sidetable_tryRetain();
914 objc_object::rootRetainCount()
918 if (isTaggedPointer()) return (uintptr_t)this;
919 return sidetable_retainCount();
923 // not SUPPORT_NONPOINTER_ISA
927 #if SUPPORT_RETURN_AUTORELEASE
929 /***********************************************************************
930 Fast handling of return through Cocoa's +0 autoreleasing convention.
931 The caller and callee cooperate to keep the returned object
932 out of the autorelease pool and eliminate redundant retain/release pairs.
934 An optimized callee looks at the caller's instructions following the
935 return. If the caller's instructions are also optimized then the callee
936 skips all retain count operations: no autorelease, no retain/autorelease.
937 Instead it saves the result's current retain count (+0 or +1) in
938 thread-local storage. If the caller does not look optimized then
939 the callee performs autorelease or retain/autorelease as usual.
941 An optimized caller looks at the thread-local storage. If the result
942 is set then it performs any retain or release needed to change the
943 result from the retain count left by the callee to the retain count
944 desired by the caller. Otherwise the caller assumes the result is
945 currently at +0 from an unoptimized callee and performs any retain
946 needed for that case.
948 There are two optimized callees:
949 objc_autoreleaseReturnValue
950 result is currently +1. The unoptimized path autoreleases it.
951 objc_retainAutoreleaseReturnValue
952 result is currently +0. The unoptimized path retains and autoreleases it.
954 There are two optimized callers:
955 objc_retainAutoreleasedReturnValue
956 caller wants the value at +1. The unoptimized path retains it.
957 objc_unsafeClaimAutoreleasedReturnValue
958 caller wants the value at +0 unsafely. The unoptimized path does nothing.
964 return objc_autoreleaseReturnValue(ret);
968 ret = objc_retainAutoreleasedReturnValue(ret);
969 // use ret at +1 here
971 Callee sees the optimized caller, sets TLS, and leaves the result at +1.
972 Caller sees the TLS, clears it, and accepts the result at +1 as-is.
974 The callee's recognition of the optimized caller is architecture-dependent.
975 i386 and x86_64: Callee looks for `mov rax, rdi` followed by a call or
976 jump instruction to objc_retainAutoreleasedReturnValue or
977 objc_unsafeClaimAutoreleasedReturnValue.
978 armv7: Callee looks for a magic nop `mov r7, r7` (frame pointer register).
979 arm64: Callee looks for a magic nop `mov x29, x29` (frame pointer register).
981 Tagged pointer objects do participate in the optimized return scheme,
982 because it saves message sends. They are not entered in the autorelease
983 pool in the unoptimized case.
984 **********************************************************************/
988 static ALWAYS_INLINE
bool
989 callerAcceptsOptimizedReturn(const void * const ra0
)
991 const uint8_t *ra1
= (const uint8_t *)ra0
;
993 const uint32_t *ra4
= (const uint32_t *)ra1
;
996 #define PREFER_GOTPCREL 0
998 // 48 89 c7 movq %rax,%rdi
999 // ff 15 callq *symbol@GOTPCREL(%rip)
1000 if (*ra4
!= 0xffc78948) {
1003 if (ra1
[4] != 0x15) {
1008 // 48 89 c7 movq %rax,%rdi
1010 if (*ra4
!= 0xe8c78948) {
1013 ra1
+= (long)*(const int32_t *)(ra1
+ 4) + 8l;
1014 ra2
= (const uint16_t *)ra1
;
1015 // ff 25 jmpq *symbol@DYLDMAGIC(%rip)
1016 if (*ra2
!= 0x25ff) {
1020 ra1
+= 6l + (long)*(const int32_t *)(ra1
+ 2);
1021 sym
= (const void **)ra1
;
1022 if (*sym
!= objc_retainAutoreleasedReturnValue
&&
1023 *sym
!= objc_unsafeClaimAutoreleasedReturnValue
)
1034 static ALWAYS_INLINE
bool
1035 callerAcceptsOptimizedReturn(const void *ra
)
1037 // if the low bit is set, we're returning to thumb mode
1038 if ((uintptr_t)ra
& 1) {
1040 // we mask off the low bit via subtraction
1041 if (*(uint16_t *)((uint8_t *)ra
- 1) == 0x463f) {
1045 // 07 70 a0 e1 mov r7, r7
1046 if (*(uint32_t *)ra
== 0xe1a07007) {
1056 static ALWAYS_INLINE
bool
1057 callerAcceptsOptimizedReturn(const void *ra
)
1059 // fd 03 1d aa mov fp, fp
1060 if (*(uint32_t *)ra
== 0xaa1d03fd) {
1067 # elif __i386__ && TARGET_IPHONE_SIMULATOR
1070 callerAcceptsOptimizedReturn(const void *ra
)
1075 // __i386__ && TARGET_IPHONE_SIMULATOR
1078 #warning unknown architecture
1080 static ALWAYS_INLINE
bool
1081 callerAcceptsOptimizedReturn(const void *ra
)
1086 // unknown architecture
1090 static ALWAYS_INLINE ReturnDisposition
1091 getReturnDisposition()
1093 return (ReturnDisposition
)(uintptr_t)tls_get_direct(RETURN_DISPOSITION_KEY
);
1097 static ALWAYS_INLINE
void
1098 setReturnDisposition(ReturnDisposition disposition
)
1100 tls_set_direct(RETURN_DISPOSITION_KEY
, (void*)(uintptr_t)disposition
);
1104 // Try to prepare for optimized return with the given disposition (+0 or +1).
1105 // Returns true if the optimized path is successful.
1106 // Otherwise the return value must be retained and/or autoreleased as usual.
1107 static ALWAYS_INLINE
bool
1108 prepareOptimizedReturn(ReturnDisposition disposition
)
1110 assert(getReturnDisposition() == ReturnAtPlus0
);
1112 if (callerAcceptsOptimizedReturn(__builtin_return_address(0))) {
1113 if (disposition
) setReturnDisposition(disposition
);
1121 // Try to accept an optimized return.
1122 // Returns the disposition of the returned object (+0 or +1).
1123 // An un-optimized return is +0.
1124 static ALWAYS_INLINE ReturnDisposition
1125 acceptOptimizedReturn()
1127 ReturnDisposition disposition
= getReturnDisposition();
1128 setReturnDisposition(ReturnAtPlus0
); // reset to the unoptimized state
1133 // SUPPORT_RETURN_AUTORELEASE
1135 // not SUPPORT_RETURN_AUTORELEASE
1138 static ALWAYS_INLINE
bool
1139 prepareOptimizedReturn(ReturnDisposition disposition __unused
)
1145 static ALWAYS_INLINE ReturnDisposition
1146 acceptOptimizedReturn()
1148 return ReturnAtPlus0
;
1152 // not SUPPORT_RETURN_AUTORELEASE