]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-object.h
objc4-680.tar.gz
[apple/objc4.git] / runtime / objc-object.h
1 /*
2 * Copyright (c) 2010-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24
25 /***********************************************************************
26 * Inlineable parts of NSObject / objc_object implementation
27 **********************************************************************/
28
29 #ifndef _OBJC_OBJCOBJECT_H_
30 #define _OBJC_OBJCOBJECT_H_
31
32 #include "objc-private.h"
33
34
35 enum ReturnDisposition : bool {
36 ReturnAtPlus0 = false, ReturnAtPlus1 = true
37 };
38
39 static ALWAYS_INLINE
40 bool prepareOptimizedReturn(ReturnDisposition disposition);
41
42
43 #if SUPPORT_TAGGED_POINTERS
44
45 #define TAG_COUNT 8
46 #define TAG_SLOT_MASK 0xf
47
48 #if SUPPORT_MSB_TAGGED_POINTERS
49 # define TAG_MASK (1ULL<<63)
50 # define TAG_SLOT_SHIFT 60
51 # define TAG_PAYLOAD_LSHIFT 4
52 # define TAG_PAYLOAD_RSHIFT 4
53 #else
54 # define TAG_MASK 1
55 # define TAG_SLOT_SHIFT 0
56 # define TAG_PAYLOAD_LSHIFT 0
57 # define TAG_PAYLOAD_RSHIFT 4
58 #endif
59
60 extern "C" { extern Class objc_debug_taggedpointer_classes[TAG_COUNT*2]; }
61 #define objc_tag_classes objc_debug_taggedpointer_classes
62
63 #endif
64
65
66 inline bool
67 objc_object::isClass()
68 {
69 if (isTaggedPointer()) return false;
70 return ISA()->isMetaClass();
71 }
72
73 #if SUPPORT_NONPOINTER_ISA
74
75 # if !SUPPORT_TAGGED_POINTERS
76 # error sorry
77 # endif
78
79
80 inline Class
81 objc_object::ISA()
82 {
83 assert(!isTaggedPointer());
84 return (Class)(isa.bits & ISA_MASK);
85 }
86
87
88 inline bool
89 objc_object::hasIndexedIsa()
90 {
91 return isa.indexed;
92 }
93
94 inline Class
95 objc_object::getIsa()
96 {
97 if (isTaggedPointer()) {
98 uintptr_t slot = ((uintptr_t)this >> TAG_SLOT_SHIFT) & TAG_SLOT_MASK;
99 return objc_tag_classes[slot];
100 }
101 return ISA();
102 }
103
104
105 inline void
106 objc_object::initIsa(Class cls)
107 {
108 initIsa(cls, false, false);
109 }
110
111 inline void
112 objc_object::initClassIsa(Class cls)
113 {
114 if (DisableIndexedIsa) {
115 initIsa(cls, false, false);
116 } else {
117 initIsa(cls, true, false);
118 }
119 }
120
121 inline void
122 objc_object::initProtocolIsa(Class cls)
123 {
124 return initClassIsa(cls);
125 }
126
127 inline void
128 objc_object::initInstanceIsa(Class cls, bool hasCxxDtor)
129 {
130 assert(!UseGC);
131 assert(!cls->requiresRawIsa());
132 assert(hasCxxDtor == cls->hasCxxDtor());
133
134 initIsa(cls, true, hasCxxDtor);
135 }
136
137 inline void
138 objc_object::initIsa(Class cls, bool indexed, bool hasCxxDtor)
139 {
140 assert(!isTaggedPointer());
141
142 if (!indexed) {
143 isa.cls = cls;
144 } else {
145 assert(!DisableIndexedIsa);
146 isa.bits = ISA_MAGIC_VALUE;
147 // isa.magic is part of ISA_MAGIC_VALUE
148 // isa.indexed is part of ISA_MAGIC_VALUE
149 isa.has_cxx_dtor = hasCxxDtor;
150 isa.shiftcls = (uintptr_t)cls >> 3;
151 }
152 }
153
154
155 inline Class
156 objc_object::changeIsa(Class newCls)
157 {
158 // This is almost always rue but there are
159 // enough edge cases that we can't assert it.
160 // assert(newCls->isFuture() ||
161 // newCls->isInitializing() || newCls->isInitialized());
162
163 assert(!isTaggedPointer());
164
165 isa_t oldisa;
166 isa_t newisa;
167
168 bool sideTableLocked = false;
169 bool transcribeToSideTable = false;
170
171 do {
172 transcribeToSideTable = false;
173 oldisa = LoadExclusive(&isa.bits);
174 if ((oldisa.bits == 0 || oldisa.indexed) &&
175 !newCls->isFuture() && newCls->canAllocIndexed())
176 {
177 // 0 -> indexed
178 // indexed -> indexed
179 if (oldisa.bits == 0) newisa.bits = ISA_MAGIC_VALUE;
180 else newisa = oldisa;
181 // isa.magic is part of ISA_MAGIC_VALUE
182 // isa.indexed is part of ISA_MAGIC_VALUE
183 newisa.has_cxx_dtor = newCls->hasCxxDtor();
184 newisa.shiftcls = (uintptr_t)newCls >> 3;
185 }
186 else if (oldisa.indexed) {
187 // indexed -> not indexed
188 // Need to copy retain count et al to side table.
189 // Acquire side table lock before setting isa to
190 // prevent races such as concurrent -release.
191 if (!sideTableLocked) sidetable_lock();
192 sideTableLocked = true;
193 transcribeToSideTable = true;
194 newisa.cls = newCls;
195 }
196 else {
197 // not indexed -> not indexed
198 newisa.cls = newCls;
199 }
200 } while (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits));
201
202 if (transcribeToSideTable) {
203 // Copy oldisa's retain count et al to side table.
204 // oldisa.weakly_referenced: nothing to do
205 // oldisa.has_assoc: nothing to do
206 // oldisa.has_cxx_dtor: nothing to do
207 sidetable_moveExtraRC_nolock(oldisa.extra_rc,
208 oldisa.deallocating,
209 oldisa.weakly_referenced);
210 }
211
212 if (sideTableLocked) sidetable_unlock();
213
214 Class oldCls;
215 if (oldisa.indexed) oldCls = (Class)((uintptr_t)oldisa.shiftcls << 3);
216 else oldCls = oldisa.cls;
217
218 return oldCls;
219 }
220
221
222 inline bool
223 objc_object::isTaggedPointer()
224 {
225 return ((uintptr_t)this & TAG_MASK);
226 }
227
228
229 inline bool
230 objc_object::hasAssociatedObjects()
231 {
232 if (isTaggedPointer()) return true;
233 if (isa.indexed) return isa.has_assoc;
234 return true;
235 }
236
237
238 inline void
239 objc_object::setHasAssociatedObjects()
240 {
241 if (isTaggedPointer()) return;
242
243 retry:
244 isa_t oldisa = LoadExclusive(&isa.bits);
245 isa_t newisa = oldisa;
246 if (!newisa.indexed) return;
247 if (newisa.has_assoc) return;
248 newisa.has_assoc = true;
249 if (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry;
250 }
251
252
253 inline bool
254 objc_object::isWeaklyReferenced()
255 {
256 assert(!isTaggedPointer());
257 if (isa.indexed) return isa.weakly_referenced;
258 else return sidetable_isWeaklyReferenced();
259 }
260
261
262 inline void
263 objc_object::setWeaklyReferenced_nolock()
264 {
265 retry:
266 isa_t oldisa = LoadExclusive(&isa.bits);
267 isa_t newisa = oldisa;
268 if (!newisa.indexed) return sidetable_setWeaklyReferenced_nolock();
269 if (newisa.weakly_referenced) return;
270 newisa.weakly_referenced = true;
271 if (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry;
272 }
273
274
275 inline bool
276 objc_object::hasCxxDtor()
277 {
278 assert(!isTaggedPointer());
279 if (isa.indexed) return isa.has_cxx_dtor;
280 else return isa.cls->hasCxxDtor();
281 }
282
283
284
285 inline bool
286 objc_object::rootIsDeallocating()
287 {
288 assert(!UseGC);
289
290 if (isTaggedPointer()) return false;
291 if (isa.indexed) return isa.deallocating;
292 return sidetable_isDeallocating();
293 }
294
295
296 inline void
297 objc_object::clearDeallocating()
298 {
299 if (!isa.indexed) {
300 // Slow path for raw pointer isa.
301 sidetable_clearDeallocating();
302 }
303 else if (isa.weakly_referenced || isa.has_sidetable_rc) {
304 // Slow path for non-pointer isa with weak refs and/or side table data.
305 clearDeallocating_slow();
306 }
307
308 assert(!sidetable_present());
309 }
310
311
312 inline void
313 objc_object::rootDealloc()
314 {
315 assert(!UseGC);
316 if (isTaggedPointer()) return;
317
318 if (isa.indexed &&
319 !isa.weakly_referenced &&
320 !isa.has_assoc &&
321 !isa.has_cxx_dtor &&
322 !isa.has_sidetable_rc)
323 {
324 assert(!sidetable_present());
325 free(this);
326 }
327 else {
328 object_dispose((id)this);
329 }
330 }
331
332
333 // Equivalent to calling [this retain], with shortcuts if there is no override
334 inline id
335 objc_object::retain()
336 {
337 // UseGC is allowed here, but requires hasCustomRR.
338 assert(!UseGC || ISA()->hasCustomRR());
339 assert(!isTaggedPointer());
340
341 if (! ISA()->hasCustomRR()) {
342 return rootRetain();
343 }
344
345 return ((id(*)(objc_object *, SEL))objc_msgSend)(this, SEL_retain);
346 }
347
348
349 // Base retain implementation, ignoring overrides.
350 // This does not check isa.fast_rr; if there is an RR override then
351 // it was already called and it chose to call [super retain].
352 //
353 // tryRetain=true is the -_tryRetain path.
354 // handleOverflow=false is the frameless fast path.
355 // handleOverflow=true is the framed slow path including overflow to side table
356 // The code is structured this way to prevent duplication.
357
358 ALWAYS_INLINE id
359 objc_object::rootRetain()
360 {
361 return rootRetain(false, false);
362 }
363
364 ALWAYS_INLINE bool
365 objc_object::rootTryRetain()
366 {
367 return rootRetain(true, false) ? true : false;
368 }
369
370 ALWAYS_INLINE id
371 objc_object::rootRetain(bool tryRetain, bool handleOverflow)
372 {
373 assert(!UseGC);
374 if (isTaggedPointer()) return (id)this;
375
376 bool sideTableLocked = false;
377 bool transcribeToSideTable = false;
378
379 isa_t oldisa;
380 isa_t newisa;
381
382 do {
383 transcribeToSideTable = false;
384 oldisa = LoadExclusive(&isa.bits);
385 newisa = oldisa;
386 if (!newisa.indexed) goto unindexed;
387 // don't check newisa.fast_rr; we already called any RR overrides
388 if (tryRetain && newisa.deallocating) goto tryfail;
389 uintptr_t carry;
390 newisa.bits = addc(newisa.bits, RC_ONE, 0, &carry); // extra_rc++
391
392 if (carry) {
393 // newisa.extra_rc++ overflowed
394 if (!handleOverflow) return rootRetain_overflow(tryRetain);
395 // Leave half of the retain counts inline and
396 // prepare to copy the other half to the side table.
397 if (!tryRetain && !sideTableLocked) sidetable_lock();
398 sideTableLocked = true;
399 transcribeToSideTable = true;
400 newisa.extra_rc = RC_HALF;
401 newisa.has_sidetable_rc = true;
402 }
403 } while (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits));
404
405 if (transcribeToSideTable) {
406 // Copy the other half of the retain counts to the side table.
407 sidetable_addExtraRC_nolock(RC_HALF);
408 }
409
410 if (!tryRetain && sideTableLocked) sidetable_unlock();
411 return (id)this;
412
413 tryfail:
414 if (!tryRetain && sideTableLocked) sidetable_unlock();
415 return nil;
416
417 unindexed:
418 if (!tryRetain && sideTableLocked) sidetable_unlock();
419 if (tryRetain) return sidetable_tryRetain() ? (id)this : nil;
420 else return sidetable_retain();
421 }
422
423
424 // Equivalent to calling [this release], with shortcuts if there is no override
425 inline void
426 objc_object::release()
427 {
428 // UseGC is allowed here, but requires hasCustomRR.
429 assert(!UseGC || ISA()->hasCustomRR());
430 assert(!isTaggedPointer());
431
432 if (! ISA()->hasCustomRR()) {
433 rootRelease();
434 return;
435 }
436
437 ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_release);
438 }
439
440
441 // Base release implementation, ignoring overrides.
442 // Does not call -dealloc.
443 // Returns true if the object should now be deallocated.
444 // This does not check isa.fast_rr; if there is an RR override then
445 // it was already called and it chose to call [super release].
446 //
447 // handleUnderflow=false is the frameless fast path.
448 // handleUnderflow=true is the framed slow path including side table borrow
449 // The code is structured this way to prevent duplication.
450
451 ALWAYS_INLINE bool
452 objc_object::rootRelease()
453 {
454 return rootRelease(true, false);
455 }
456
457 ALWAYS_INLINE bool
458 objc_object::rootReleaseShouldDealloc()
459 {
460 return rootRelease(false, false);
461 }
462
463 ALWAYS_INLINE bool
464 objc_object::rootRelease(bool performDealloc, bool handleUnderflow)
465 {
466 assert(!UseGC);
467 if (isTaggedPointer()) return false;
468
469 bool sideTableLocked = false;
470
471 isa_t oldisa;
472 isa_t newisa;
473
474 retry:
475 do {
476 oldisa = LoadExclusive(&isa.bits);
477 newisa = oldisa;
478 if (!newisa.indexed) goto unindexed;
479 // don't check newisa.fast_rr; we already called any RR overrides
480 uintptr_t carry;
481 newisa.bits = subc(newisa.bits, RC_ONE, 0, &carry); // extra_rc--
482 if (carry) goto underflow;
483 } while (!StoreReleaseExclusive(&isa.bits, oldisa.bits, newisa.bits));
484
485 if (sideTableLocked) sidetable_unlock();
486 return false;
487
488 underflow:
489 // newisa.extra_rc-- underflowed: borrow from side table or deallocate
490
491 // abandon newisa to undo the decrement
492 newisa = oldisa;
493
494 if (newisa.has_sidetable_rc) {
495 if (!handleUnderflow) {
496 return rootRelease_underflow(performDealloc);
497 }
498
499 // Transfer retain count from side table to inline storage.
500
501 if (!sideTableLocked) {
502 sidetable_lock();
503 sideTableLocked = true;
504 if (!isa.indexed) {
505 // Lost a race vs the indexed -> not indexed transition
506 // before we got the side table lock. Stop now to avoid
507 // breaking the safety checks in the sidetable ExtraRC code.
508 goto unindexed;
509 }
510 }
511
512 // Try to remove some retain counts from the side table.
513 size_t borrowed = sidetable_subExtraRC_nolock(RC_HALF);
514
515 // To avoid races, has_sidetable_rc must remain set
516 // even if the side table count is now zero.
517
518 if (borrowed > 0) {
519 // Side table retain count decreased.
520 // Try to add them to the inline count.
521 newisa.extra_rc = borrowed - 1; // redo the original decrement too
522 bool stored = StoreExclusive(&isa.bits, oldisa.bits, newisa.bits);
523 if (!stored) {
524 // Inline update failed.
525 // Try it again right now. This prevents livelock on LL/SC
526 // architectures where the side table access itself may have
527 // dropped the reservation.
528 isa_t oldisa2 = LoadExclusive(&isa.bits);
529 isa_t newisa2 = oldisa2;
530 if (newisa2.indexed) {
531 uintptr_t overflow;
532 newisa2.bits =
533 addc(newisa2.bits, RC_ONE * (borrowed-1), 0, &overflow);
534 if (!overflow) {
535 stored = StoreReleaseExclusive(&isa.bits, oldisa2.bits,
536 newisa2.bits);
537 }
538 }
539 }
540
541 if (!stored) {
542 // Inline update failed.
543 // Put the retains back in the side table.
544 sidetable_addExtraRC_nolock(borrowed);
545 goto retry;
546 }
547
548 // Decrement successful after borrowing from side table.
549 // This decrement cannot be the deallocating decrement - the side
550 // table lock and has_sidetable_rc bit ensure that if everyone
551 // else tried to -release while we worked, the last one would block.
552 sidetable_unlock();
553 return false;
554 }
555 else {
556 // Side table is empty after all. Fall-through to the dealloc path.
557 }
558 }
559
560 // Really deallocate.
561
562 if (sideTableLocked) sidetable_unlock();
563
564 if (newisa.deallocating) {
565 return overrelease_error();
566 }
567 newisa.deallocating = true;
568 if (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry;
569 __sync_synchronize();
570 if (performDealloc) {
571 ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_dealloc);
572 }
573 return true;
574
575 unindexed:
576 if (sideTableLocked) sidetable_unlock();
577 return sidetable_release(performDealloc);
578 }
579
580
581 // Equivalent to [this autorelease], with shortcuts if there is no override
582 inline id
583 objc_object::autorelease()
584 {
585 // UseGC is allowed here, but requires hasCustomRR.
586 assert(!UseGC || ISA()->hasCustomRR());
587
588 if (isTaggedPointer()) return (id)this;
589 if (! ISA()->hasCustomRR()) return rootAutorelease();
590
591 return ((id(*)(objc_object *, SEL))objc_msgSend)(this, SEL_autorelease);
592 }
593
594
595 // Base autorelease implementation, ignoring overrides.
596 inline id
597 objc_object::rootAutorelease()
598 {
599 assert(!UseGC);
600
601 if (isTaggedPointer()) return (id)this;
602 if (prepareOptimizedReturn(ReturnAtPlus1)) return (id)this;
603
604 return rootAutorelease2();
605 }
606
607
608 inline uintptr_t
609 objc_object::rootRetainCount()
610 {
611 assert(!UseGC);
612 if (isTaggedPointer()) return (uintptr_t)this;
613
614 sidetable_lock();
615 isa_t bits = LoadExclusive(&isa.bits);
616 if (bits.indexed) {
617 uintptr_t rc = 1 + bits.extra_rc;
618 if (bits.has_sidetable_rc) {
619 rc += sidetable_getExtraRC_nolock();
620 }
621 sidetable_unlock();
622 return rc;
623 }
624
625 sidetable_unlock();
626 return sidetable_retainCount();
627 }
628
629
630 // SUPPORT_NONPOINTER_ISA
631 #else
632 // not SUPPORT_NONPOINTER_ISA
633
634
635 inline Class
636 objc_object::ISA()
637 {
638 assert(!isTaggedPointer());
639 return isa.cls;
640 }
641
642
643 inline bool
644 objc_object::hasIndexedIsa()
645 {
646 return false;
647 }
648
649
650 inline Class
651 objc_object::getIsa()
652 {
653 #if SUPPORT_TAGGED_POINTERS
654 if (isTaggedPointer()) {
655 uintptr_t slot = ((uintptr_t)this >> TAG_SLOT_SHIFT) & TAG_SLOT_MASK;
656 return objc_tag_classes[slot];
657 }
658 #endif
659 return ISA();
660 }
661
662
663 inline void
664 objc_object::initIsa(Class cls)
665 {
666 assert(!isTaggedPointer());
667 isa = (uintptr_t)cls;
668 }
669
670
671 inline void
672 objc_object::initClassIsa(Class cls)
673 {
674 initIsa(cls);
675 }
676
677
678 inline void
679 objc_object::initProtocolIsa(Class cls)
680 {
681 initIsa(cls);
682 }
683
684
685 inline void
686 objc_object::initInstanceIsa(Class cls, bool)
687 {
688 initIsa(cls);
689 }
690
691
692 inline void
693 objc_object::initIsa(Class cls, bool, bool)
694 {
695 initIsa(cls);
696 }
697
698
699 inline Class
700 objc_object::changeIsa(Class cls)
701 {
702 // This is almost always rue but there are
703 // enough edge cases that we can't assert it.
704 // assert(cls->isFuture() ||
705 // cls->isInitializing() || cls->isInitialized());
706
707 assert(!isTaggedPointer());
708
709 isa_t oldisa, newisa;
710 newisa.cls = cls;
711 do {
712 oldisa = LoadExclusive(&isa.bits);
713 } while (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits));
714
715 if (oldisa.cls && oldisa.cls->instancesHaveAssociatedObjects()) {
716 cls->setInstancesHaveAssociatedObjects();
717 }
718
719 return oldisa.cls;
720 }
721
722
723 inline bool
724 objc_object::isTaggedPointer()
725 {
726 #if SUPPORT_TAGGED_POINTERS
727 return ((uintptr_t)this & TAG_MASK);
728 #else
729 return false;
730 #endif
731 }
732
733
734 inline bool
735 objc_object::hasAssociatedObjects()
736 {
737 assert(!UseGC);
738
739 return getIsa()->instancesHaveAssociatedObjects();
740 }
741
742
743 inline void
744 objc_object::setHasAssociatedObjects()
745 {
746 assert(!UseGC);
747
748 getIsa()->setInstancesHaveAssociatedObjects();
749 }
750
751
752 inline bool
753 objc_object::isWeaklyReferenced()
754 {
755 assert(!isTaggedPointer());
756 assert(!UseGC);
757
758 return sidetable_isWeaklyReferenced();
759 }
760
761
762 inline void
763 objc_object::setWeaklyReferenced_nolock()
764 {
765 assert(!isTaggedPointer());
766 assert(!UseGC);
767
768 sidetable_setWeaklyReferenced_nolock();
769 }
770
771
772 inline bool
773 objc_object::hasCxxDtor()
774 {
775 assert(!isTaggedPointer());
776 return isa.cls->hasCxxDtor();
777 }
778
779
780 inline bool
781 objc_object::rootIsDeallocating()
782 {
783 assert(!UseGC);
784
785 if (isTaggedPointer()) return false;
786 return sidetable_isDeallocating();
787 }
788
789
790 inline void
791 objc_object::clearDeallocating()
792 {
793 sidetable_clearDeallocating();
794 }
795
796
797 inline void
798 objc_object::rootDealloc()
799 {
800 if (isTaggedPointer()) return;
801 object_dispose((id)this);
802 }
803
804
805 // Equivalent to calling [this retain], with shortcuts if there is no override
806 inline id
807 objc_object::retain()
808 {
809 // UseGC is allowed here, but requires hasCustomRR.
810 assert(!UseGC || ISA()->hasCustomRR());
811 assert(!isTaggedPointer());
812
813 if (! ISA()->hasCustomRR()) {
814 return sidetable_retain();
815 }
816
817 return ((id(*)(objc_object *, SEL))objc_msgSend)(this, SEL_retain);
818 }
819
820
821 // Base retain implementation, ignoring overrides.
822 // This does not check isa.fast_rr; if there is an RR override then
823 // it was already called and it chose to call [super retain].
824 inline id
825 objc_object::rootRetain()
826 {
827 assert(!UseGC);
828
829 if (isTaggedPointer()) return (id)this;
830 return sidetable_retain();
831 }
832
833
834 // Equivalent to calling [this release], with shortcuts if there is no override
835 inline void
836 objc_object::release()
837 {
838 // UseGC is allowed here, but requires hasCustomRR.
839 assert(!UseGC || ISA()->hasCustomRR());
840 assert(!isTaggedPointer());
841
842 if (! ISA()->hasCustomRR()) {
843 sidetable_release();
844 return;
845 }
846
847 ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_release);
848 }
849
850
851 // Base release implementation, ignoring overrides.
852 // Does not call -dealloc.
853 // Returns true if the object should now be deallocated.
854 // This does not check isa.fast_rr; if there is an RR override then
855 // it was already called and it chose to call [super release].
856 inline bool
857 objc_object::rootRelease()
858 {
859 assert(!UseGC);
860
861 if (isTaggedPointer()) return false;
862 return sidetable_release(true);
863 }
864
865 inline bool
866 objc_object::rootReleaseShouldDealloc()
867 {
868 if (isTaggedPointer()) return false;
869 return sidetable_release(false);
870 }
871
872
873 // Equivalent to [this autorelease], with shortcuts if there is no override
874 inline id
875 objc_object::autorelease()
876 {
877 // UseGC is allowed here, but requires hasCustomRR.
878 assert(!UseGC || ISA()->hasCustomRR());
879
880 if (isTaggedPointer()) return (id)this;
881 if (! ISA()->hasCustomRR()) return rootAutorelease();
882
883 return ((id(*)(objc_object *, SEL))objc_msgSend)(this, SEL_autorelease);
884 }
885
886
887 // Base autorelease implementation, ignoring overrides.
888 inline id
889 objc_object::rootAutorelease()
890 {
891 assert(!UseGC);
892
893 if (isTaggedPointer()) return (id)this;
894 if (prepareOptimizedReturn(ReturnAtPlus1)) return (id)this;
895
896 return rootAutorelease2();
897 }
898
899
900 // Base tryRetain implementation, ignoring overrides.
901 // This does not check isa.fast_rr; if there is an RR override then
902 // it was already called and it chose to call [super _tryRetain].
903 inline bool
904 objc_object::rootTryRetain()
905 {
906 assert(!UseGC);
907
908 if (isTaggedPointer()) return true;
909 return sidetable_tryRetain();
910 }
911
912
913 inline uintptr_t
914 objc_object::rootRetainCount()
915 {
916 assert(!UseGC);
917
918 if (isTaggedPointer()) return (uintptr_t)this;
919 return sidetable_retainCount();
920 }
921
922
923 // not SUPPORT_NONPOINTER_ISA
924 #endif
925
926
927 #if SUPPORT_RETURN_AUTORELEASE
928
929 /***********************************************************************
930 Fast handling of return through Cocoa's +0 autoreleasing convention.
931 The caller and callee cooperate to keep the returned object
932 out of the autorelease pool and eliminate redundant retain/release pairs.
933
934 An optimized callee looks at the caller's instructions following the
935 return. If the caller's instructions are also optimized then the callee
936 skips all retain count operations: no autorelease, no retain/autorelease.
937 Instead it saves the result's current retain count (+0 or +1) in
938 thread-local storage. If the caller does not look optimized then
939 the callee performs autorelease or retain/autorelease as usual.
940
941 An optimized caller looks at the thread-local storage. If the result
942 is set then it performs any retain or release needed to change the
943 result from the retain count left by the callee to the retain count
944 desired by the caller. Otherwise the caller assumes the result is
945 currently at +0 from an unoptimized callee and performs any retain
946 needed for that case.
947
948 There are two optimized callees:
949 objc_autoreleaseReturnValue
950 result is currently +1. The unoptimized path autoreleases it.
951 objc_retainAutoreleaseReturnValue
952 result is currently +0. The unoptimized path retains and autoreleases it.
953
954 There are two optimized callers:
955 objc_retainAutoreleasedReturnValue
956 caller wants the value at +1. The unoptimized path retains it.
957 objc_unsafeClaimAutoreleasedReturnValue
958 caller wants the value at +0 unsafely. The unoptimized path does nothing.
959
960 Example:
961
962 Callee:
963 // compute ret at +1
964 return objc_autoreleaseReturnValue(ret);
965
966 Caller:
967 ret = callee();
968 ret = objc_retainAutoreleasedReturnValue(ret);
969 // use ret at +1 here
970
971 Callee sees the optimized caller, sets TLS, and leaves the result at +1.
972 Caller sees the TLS, clears it, and accepts the result at +1 as-is.
973
974 The callee's recognition of the optimized caller is architecture-dependent.
975 i386 and x86_64: Callee looks for `mov rax, rdi` followed by a call or
976 jump instruction to objc_retainAutoreleasedReturnValue or
977 objc_unsafeClaimAutoreleasedReturnValue.
978 armv7: Callee looks for a magic nop `mov r7, r7` (frame pointer register).
979 arm64: Callee looks for a magic nop `mov x29, x29` (frame pointer register).
980
981 Tagged pointer objects do participate in the optimized return scheme,
982 because it saves message sends. They are not entered in the autorelease
983 pool in the unoptimized case.
984 **********************************************************************/
985
986 # if __x86_64__
987
988 static ALWAYS_INLINE bool
989 callerAcceptsOptimizedReturn(const void * const ra0)
990 {
991 const uint8_t *ra1 = (const uint8_t *)ra0;
992 const uint16_t *ra2;
993 const uint32_t *ra4 = (const uint32_t *)ra1;
994 const void **sym;
995
996 #define PREFER_GOTPCREL 0
997 #if PREFER_GOTPCREL
998 // 48 89 c7 movq %rax,%rdi
999 // ff 15 callq *symbol@GOTPCREL(%rip)
1000 if (*ra4 != 0xffc78948) {
1001 return false;
1002 }
1003 if (ra1[4] != 0x15) {
1004 return false;
1005 }
1006 ra1 += 3;
1007 #else
1008 // 48 89 c7 movq %rax,%rdi
1009 // e8 callq symbol
1010 if (*ra4 != 0xe8c78948) {
1011 return false;
1012 }
1013 ra1 += (long)*(const int32_t *)(ra1 + 4) + 8l;
1014 ra2 = (const uint16_t *)ra1;
1015 // ff 25 jmpq *symbol@DYLDMAGIC(%rip)
1016 if (*ra2 != 0x25ff) {
1017 return false;
1018 }
1019 #endif
1020 ra1 += 6l + (long)*(const int32_t *)(ra1 + 2);
1021 sym = (const void **)ra1;
1022 if (*sym != objc_retainAutoreleasedReturnValue &&
1023 *sym != objc_unsafeClaimAutoreleasedReturnValue)
1024 {
1025 return false;
1026 }
1027
1028 return true;
1029 }
1030
1031 // __x86_64__
1032 # elif __arm__
1033
1034 static ALWAYS_INLINE bool
1035 callerAcceptsOptimizedReturn(const void *ra)
1036 {
1037 // if the low bit is set, we're returning to thumb mode
1038 if ((uintptr_t)ra & 1) {
1039 // 3f 46 mov r7, r7
1040 // we mask off the low bit via subtraction
1041 if (*(uint16_t *)((uint8_t *)ra - 1) == 0x463f) {
1042 return true;
1043 }
1044 } else {
1045 // 07 70 a0 e1 mov r7, r7
1046 if (*(uint32_t *)ra == 0xe1a07007) {
1047 return true;
1048 }
1049 }
1050 return false;
1051 }
1052
1053 // __arm__
1054 # elif __arm64__
1055
1056 static ALWAYS_INLINE bool
1057 callerAcceptsOptimizedReturn(const void *ra)
1058 {
1059 // fd 03 1d aa mov fp, fp
1060 if (*(uint32_t *)ra == 0xaa1d03fd) {
1061 return true;
1062 }
1063 return false;
1064 }
1065
1066 // __arm64__
1067 # elif __i386__ && TARGET_IPHONE_SIMULATOR
1068
1069 static inline bool
1070 callerAcceptsOptimizedReturn(const void *ra)
1071 {
1072 return false;
1073 }
1074
1075 // __i386__ && TARGET_IPHONE_SIMULATOR
1076 # else
1077
1078 #warning unknown architecture
1079
1080 static ALWAYS_INLINE bool
1081 callerAcceptsOptimizedReturn(const void *ra)
1082 {
1083 return false;
1084 }
1085
1086 // unknown architecture
1087 # endif
1088
1089
1090 static ALWAYS_INLINE ReturnDisposition
1091 getReturnDisposition()
1092 {
1093 return (ReturnDisposition)(uintptr_t)tls_get_direct(RETURN_DISPOSITION_KEY);
1094 }
1095
1096
1097 static ALWAYS_INLINE void
1098 setReturnDisposition(ReturnDisposition disposition)
1099 {
1100 tls_set_direct(RETURN_DISPOSITION_KEY, (void*)(uintptr_t)disposition);
1101 }
1102
1103
1104 // Try to prepare for optimized return with the given disposition (+0 or +1).
1105 // Returns true if the optimized path is successful.
1106 // Otherwise the return value must be retained and/or autoreleased as usual.
1107 static ALWAYS_INLINE bool
1108 prepareOptimizedReturn(ReturnDisposition disposition)
1109 {
1110 assert(getReturnDisposition() == ReturnAtPlus0);
1111
1112 if (callerAcceptsOptimizedReturn(__builtin_return_address(0))) {
1113 if (disposition) setReturnDisposition(disposition);
1114 return true;
1115 }
1116
1117 return false;
1118 }
1119
1120
1121 // Try to accept an optimized return.
1122 // Returns the disposition of the returned object (+0 or +1).
1123 // An un-optimized return is +0.
1124 static ALWAYS_INLINE ReturnDisposition
1125 acceptOptimizedReturn()
1126 {
1127 ReturnDisposition disposition = getReturnDisposition();
1128 setReturnDisposition(ReturnAtPlus0); // reset to the unoptimized state
1129 return disposition;
1130 }
1131
1132
1133 // SUPPORT_RETURN_AUTORELEASE
1134 #else
1135 // not SUPPORT_RETURN_AUTORELEASE
1136
1137
1138 static ALWAYS_INLINE bool
1139 prepareOptimizedReturn(ReturnDisposition disposition __unused)
1140 {
1141 return false;
1142 }
1143
1144
1145 static ALWAYS_INLINE ReturnDisposition
1146 acceptOptimizedReturn()
1147 {
1148 return ReturnAtPlus0;
1149 }
1150
1151
1152 // not SUPPORT_RETURN_AUTORELEASE
1153 #endif
1154
1155
1156 // _OBJC_OBJECT_H_
1157 #endif