]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-object.h
objc4-723.tar.gz
[apple/objc4.git] / runtime / objc-object.h
1 /*
2 * Copyright (c) 2010-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24
25 /***********************************************************************
26 * Inlineable parts of NSObject / objc_object implementation
27 **********************************************************************/
28
29 #ifndef _OBJC_OBJCOBJECT_H_
30 #define _OBJC_OBJCOBJECT_H_
31
32 #include "objc-private.h"
33
34
35 enum ReturnDisposition : bool {
36 ReturnAtPlus0 = false, ReturnAtPlus1 = true
37 };
38
39 static ALWAYS_INLINE
40 bool prepareOptimizedReturn(ReturnDisposition disposition);
41
42
43 #if SUPPORT_TAGGED_POINTERS
44
45 extern "C" {
46 extern Class objc_debug_taggedpointer_classes[_OBJC_TAG_SLOT_COUNT*2];
47 extern Class objc_debug_taggedpointer_ext_classes[_OBJC_TAG_EXT_SLOT_COUNT];
48 }
49 #define objc_tag_classes objc_debug_taggedpointer_classes
50 #define objc_tag_ext_classes objc_debug_taggedpointer_ext_classes
51
52 #endif
53
54 #if SUPPORT_INDEXED_ISA
55
56 ALWAYS_INLINE Class &
57 classForIndex(uintptr_t index) {
58 assert(index > 0);
59 assert(index < (uintptr_t)objc_indexed_classes_count);
60 return objc_indexed_classes[index];
61 }
62
63 #endif
64
65
66 inline bool
67 objc_object::isClass()
68 {
69 if (isTaggedPointer()) return false;
70 return ISA()->isMetaClass();
71 }
72
73
74 #if SUPPORT_TAGGED_POINTERS
75
76 inline Class
77 objc_object::getIsa()
78 {
79 if (!isTaggedPointer()) return ISA();
80
81 uintptr_t ptr = (uintptr_t)this;
82 if (isExtTaggedPointer()) {
83 uintptr_t slot =
84 (ptr >> _OBJC_TAG_EXT_SLOT_SHIFT) & _OBJC_TAG_EXT_SLOT_MASK;
85 return objc_tag_ext_classes[slot];
86 } else {
87 uintptr_t slot =
88 (ptr >> _OBJC_TAG_SLOT_SHIFT) & _OBJC_TAG_SLOT_MASK;
89 return objc_tag_classes[slot];
90 }
91 }
92
93
94 inline bool
95 objc_object::isTaggedPointer()
96 {
97 return _objc_isTaggedPointer(this);
98 }
99
100 inline bool
101 objc_object::isBasicTaggedPointer()
102 {
103 return isTaggedPointer() && !isExtTaggedPointer();
104 }
105
106 inline bool
107 objc_object::isExtTaggedPointer()
108 {
109 return ((uintptr_t)this & _OBJC_TAG_EXT_MASK) == _OBJC_TAG_EXT_MASK;
110 }
111
112
113 // SUPPORT_TAGGED_POINTERS
114 #else
115 // not SUPPORT_TAGGED_POINTERS
116
117
118 inline Class
119 objc_object::getIsa()
120 {
121 return ISA();
122 }
123
124
125 inline bool
126 objc_object::isTaggedPointer()
127 {
128 return false;
129 }
130
131 inline bool
132 objc_object::isBasicTaggedPointer()
133 {
134 return false;
135 }
136
137 inline bool
138 objc_object::isExtTaggedPointer()
139 {
140 return false;
141 }
142
143
144 // not SUPPORT_TAGGED_POINTERS
145 #endif
146
147
148 #if SUPPORT_NONPOINTER_ISA
149
150 inline Class
151 objc_object::ISA()
152 {
153 assert(!isTaggedPointer());
154 #if SUPPORT_INDEXED_ISA
155 if (isa.nonpointer) {
156 uintptr_t slot = isa.indexcls;
157 return classForIndex((unsigned)slot);
158 }
159 return (Class)isa.bits;
160 #else
161 return (Class)(isa.bits & ISA_MASK);
162 #endif
163 }
164
165
166 inline bool
167 objc_object::hasNonpointerIsa()
168 {
169 return isa.nonpointer;
170 }
171
172
173 inline void
174 objc_object::initIsa(Class cls)
175 {
176 initIsa(cls, false, false);
177 }
178
179 inline void
180 objc_object::initClassIsa(Class cls)
181 {
182 if (DisableNonpointerIsa || cls->instancesRequireRawIsa()) {
183 initIsa(cls, false/*not nonpointer*/, false);
184 } else {
185 initIsa(cls, true/*nonpointer*/, false);
186 }
187 }
188
189 inline void
190 objc_object::initProtocolIsa(Class cls)
191 {
192 return initClassIsa(cls);
193 }
194
195 inline void
196 objc_object::initInstanceIsa(Class cls, bool hasCxxDtor)
197 {
198 assert(!cls->instancesRequireRawIsa());
199 assert(hasCxxDtor == cls->hasCxxDtor());
200
201 initIsa(cls, true, hasCxxDtor);
202 }
203
204 inline void
205 objc_object::initIsa(Class cls, bool nonpointer, bool hasCxxDtor)
206 {
207 assert(!isTaggedPointer());
208
209 if (!nonpointer) {
210 isa.cls = cls;
211 } else {
212 assert(!DisableNonpointerIsa);
213 assert(!cls->instancesRequireRawIsa());
214
215 isa_t newisa(0);
216
217 #if SUPPORT_INDEXED_ISA
218 assert(cls->classArrayIndex() > 0);
219 newisa.bits = ISA_INDEX_MAGIC_VALUE;
220 // isa.magic is part of ISA_MAGIC_VALUE
221 // isa.nonpointer is part of ISA_MAGIC_VALUE
222 newisa.has_cxx_dtor = hasCxxDtor;
223 newisa.indexcls = (uintptr_t)cls->classArrayIndex();
224 #else
225 newisa.bits = ISA_MAGIC_VALUE;
226 // isa.magic is part of ISA_MAGIC_VALUE
227 // isa.nonpointer is part of ISA_MAGIC_VALUE
228 newisa.has_cxx_dtor = hasCxxDtor;
229 newisa.shiftcls = (uintptr_t)cls >> 3;
230 #endif
231
232 // This write must be performed in a single store in some cases
233 // (for example when realizing a class because other threads
234 // may simultaneously try to use the class).
235 // fixme use atomics here to guarantee single-store and to
236 // guarantee memory order w.r.t. the class index table
237 // ...but not too atomic because we don't want to hurt instantiation
238 isa = newisa;
239 }
240 }
241
242
243 inline Class
244 objc_object::changeIsa(Class newCls)
245 {
246 // This is almost always true but there are
247 // enough edge cases that we can't assert it.
248 // assert(newCls->isFuture() ||
249 // newCls->isInitializing() || newCls->isInitialized());
250
251 assert(!isTaggedPointer());
252
253 isa_t oldisa;
254 isa_t newisa;
255
256 bool sideTableLocked = false;
257 bool transcribeToSideTable = false;
258
259 do {
260 transcribeToSideTable = false;
261 oldisa = LoadExclusive(&isa.bits);
262 if ((oldisa.bits == 0 || oldisa.nonpointer) &&
263 !newCls->isFuture() && newCls->canAllocNonpointer())
264 {
265 // 0 -> nonpointer
266 // nonpointer -> nonpointer
267 #if SUPPORT_INDEXED_ISA
268 if (oldisa.bits == 0) newisa.bits = ISA_INDEX_MAGIC_VALUE;
269 else newisa = oldisa;
270 // isa.magic is part of ISA_MAGIC_VALUE
271 // isa.nonpointer is part of ISA_MAGIC_VALUE
272 newisa.has_cxx_dtor = newCls->hasCxxDtor();
273 assert(newCls->classArrayIndex() > 0);
274 newisa.indexcls = (uintptr_t)newCls->classArrayIndex();
275 #else
276 if (oldisa.bits == 0) newisa.bits = ISA_MAGIC_VALUE;
277 else newisa = oldisa;
278 // isa.magic is part of ISA_MAGIC_VALUE
279 // isa.nonpointer is part of ISA_MAGIC_VALUE
280 newisa.has_cxx_dtor = newCls->hasCxxDtor();
281 newisa.shiftcls = (uintptr_t)newCls >> 3;
282 #endif
283 }
284 else if (oldisa.nonpointer) {
285 // nonpointer -> raw pointer
286 // Need to copy retain count et al to side table.
287 // Acquire side table lock before setting isa to
288 // prevent races such as concurrent -release.
289 if (!sideTableLocked) sidetable_lock();
290 sideTableLocked = true;
291 transcribeToSideTable = true;
292 newisa.cls = newCls;
293 }
294 else {
295 // raw pointer -> raw pointer
296 newisa.cls = newCls;
297 }
298 } while (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits));
299
300 if (transcribeToSideTable) {
301 // Copy oldisa's retain count et al to side table.
302 // oldisa.has_assoc: nothing to do
303 // oldisa.has_cxx_dtor: nothing to do
304 sidetable_moveExtraRC_nolock(oldisa.extra_rc,
305 oldisa.deallocating,
306 oldisa.weakly_referenced);
307 }
308
309 if (sideTableLocked) sidetable_unlock();
310
311 if (oldisa.nonpointer) {
312 #if SUPPORT_INDEXED_ISA
313 return classForIndex(oldisa.indexcls);
314 #else
315 return (Class)((uintptr_t)oldisa.shiftcls << 3);
316 #endif
317 }
318 else {
319 return oldisa.cls;
320 }
321 }
322
323
324 inline bool
325 objc_object::hasAssociatedObjects()
326 {
327 if (isTaggedPointer()) return true;
328 if (isa.nonpointer) return isa.has_assoc;
329 return true;
330 }
331
332
333 inline void
334 objc_object::setHasAssociatedObjects()
335 {
336 if (isTaggedPointer()) return;
337
338 retry:
339 isa_t oldisa = LoadExclusive(&isa.bits);
340 isa_t newisa = oldisa;
341 if (!newisa.nonpointer || newisa.has_assoc) {
342 ClearExclusive(&isa.bits);
343 return;
344 }
345 newisa.has_assoc = true;
346 if (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry;
347 }
348
349
350 inline bool
351 objc_object::isWeaklyReferenced()
352 {
353 assert(!isTaggedPointer());
354 if (isa.nonpointer) return isa.weakly_referenced;
355 else return sidetable_isWeaklyReferenced();
356 }
357
358
359 inline void
360 objc_object::setWeaklyReferenced_nolock()
361 {
362 retry:
363 isa_t oldisa = LoadExclusive(&isa.bits);
364 isa_t newisa = oldisa;
365 if (slowpath(!newisa.nonpointer)) {
366 ClearExclusive(&isa.bits);
367 sidetable_setWeaklyReferenced_nolock();
368 return;
369 }
370 if (newisa.weakly_referenced) {
371 ClearExclusive(&isa.bits);
372 return;
373 }
374 newisa.weakly_referenced = true;
375 if (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry;
376 }
377
378
379 inline bool
380 objc_object::hasCxxDtor()
381 {
382 assert(!isTaggedPointer());
383 if (isa.nonpointer) return isa.has_cxx_dtor;
384 else return isa.cls->hasCxxDtor();
385 }
386
387
388
389 inline bool
390 objc_object::rootIsDeallocating()
391 {
392 if (isTaggedPointer()) return false;
393 if (isa.nonpointer) return isa.deallocating;
394 return sidetable_isDeallocating();
395 }
396
397
398 inline void
399 objc_object::clearDeallocating()
400 {
401 if (slowpath(!isa.nonpointer)) {
402 // Slow path for raw pointer isa.
403 sidetable_clearDeallocating();
404 }
405 else if (slowpath(isa.weakly_referenced || isa.has_sidetable_rc)) {
406 // Slow path for non-pointer isa with weak refs and/or side table data.
407 clearDeallocating_slow();
408 }
409
410 assert(!sidetable_present());
411 }
412
413
414 inline void
415 objc_object::rootDealloc()
416 {
417 if (isTaggedPointer()) return; // fixme necessary?
418
419 if (fastpath(isa.nonpointer &&
420 !isa.weakly_referenced &&
421 !isa.has_assoc &&
422 !isa.has_cxx_dtor &&
423 !isa.has_sidetable_rc))
424 {
425 assert(!sidetable_present());
426 free(this);
427 }
428 else {
429 object_dispose((id)this);
430 }
431 }
432
433
434 // Equivalent to calling [this retain], with shortcuts if there is no override
435 inline id
436 objc_object::retain()
437 {
438 assert(!isTaggedPointer());
439
440 if (fastpath(!ISA()->hasCustomRR())) {
441 return rootRetain();
442 }
443
444 return ((id(*)(objc_object *, SEL))objc_msgSend)(this, SEL_retain);
445 }
446
447
448 // Base retain implementation, ignoring overrides.
449 // This does not check isa.fast_rr; if there is an RR override then
450 // it was already called and it chose to call [super retain].
451 //
452 // tryRetain=true is the -_tryRetain path.
453 // handleOverflow=false is the frameless fast path.
454 // handleOverflow=true is the framed slow path including overflow to side table
455 // The code is structured this way to prevent duplication.
456
457 ALWAYS_INLINE id
458 objc_object::rootRetain()
459 {
460 return rootRetain(false, false);
461 }
462
463 ALWAYS_INLINE bool
464 objc_object::rootTryRetain()
465 {
466 return rootRetain(true, false) ? true : false;
467 }
468
469 ALWAYS_INLINE id
470 objc_object::rootRetain(bool tryRetain, bool handleOverflow)
471 {
472 if (isTaggedPointer()) return (id)this;
473
474 bool sideTableLocked = false;
475 bool transcribeToSideTable = false;
476
477 isa_t oldisa;
478 isa_t newisa;
479
480 do {
481 transcribeToSideTable = false;
482 oldisa = LoadExclusive(&isa.bits);
483 newisa = oldisa;
484 if (slowpath(!newisa.nonpointer)) {
485 ClearExclusive(&isa.bits);
486 if (!tryRetain && sideTableLocked) sidetable_unlock();
487 if (tryRetain) return sidetable_tryRetain() ? (id)this : nil;
488 else return sidetable_retain();
489 }
490 // don't check newisa.fast_rr; we already called any RR overrides
491 if (slowpath(tryRetain && newisa.deallocating)) {
492 ClearExclusive(&isa.bits);
493 if (!tryRetain && sideTableLocked) sidetable_unlock();
494 return nil;
495 }
496 uintptr_t carry;
497 newisa.bits = addc(newisa.bits, RC_ONE, 0, &carry); // extra_rc++
498
499 if (slowpath(carry)) {
500 // newisa.extra_rc++ overflowed
501 if (!handleOverflow) {
502 ClearExclusive(&isa.bits);
503 return rootRetain_overflow(tryRetain);
504 }
505 // Leave half of the retain counts inline and
506 // prepare to copy the other half to the side table.
507 if (!tryRetain && !sideTableLocked) sidetable_lock();
508 sideTableLocked = true;
509 transcribeToSideTable = true;
510 newisa.extra_rc = RC_HALF;
511 newisa.has_sidetable_rc = true;
512 }
513 } while (slowpath(!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)));
514
515 if (slowpath(transcribeToSideTable)) {
516 // Copy the other half of the retain counts to the side table.
517 sidetable_addExtraRC_nolock(RC_HALF);
518 }
519
520 if (slowpath(!tryRetain && sideTableLocked)) sidetable_unlock();
521 return (id)this;
522 }
523
524
525 // Equivalent to calling [this release], with shortcuts if there is no override
526 inline void
527 objc_object::release()
528 {
529 assert(!isTaggedPointer());
530
531 if (fastpath(!ISA()->hasCustomRR())) {
532 rootRelease();
533 return;
534 }
535
536 ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_release);
537 }
538
539
540 // Base release implementation, ignoring overrides.
541 // Does not call -dealloc.
542 // Returns true if the object should now be deallocated.
543 // This does not check isa.fast_rr; if there is an RR override then
544 // it was already called and it chose to call [super release].
545 //
546 // handleUnderflow=false is the frameless fast path.
547 // handleUnderflow=true is the framed slow path including side table borrow
548 // The code is structured this way to prevent duplication.
549
550 ALWAYS_INLINE bool
551 objc_object::rootRelease()
552 {
553 return rootRelease(true, false);
554 }
555
556 ALWAYS_INLINE bool
557 objc_object::rootReleaseShouldDealloc()
558 {
559 return rootRelease(false, false);
560 }
561
562 ALWAYS_INLINE bool
563 objc_object::rootRelease(bool performDealloc, bool handleUnderflow)
564 {
565 if (isTaggedPointer()) return false;
566
567 bool sideTableLocked = false;
568
569 isa_t oldisa;
570 isa_t newisa;
571
572 retry:
573 do {
574 oldisa = LoadExclusive(&isa.bits);
575 newisa = oldisa;
576 if (slowpath(!newisa.nonpointer)) {
577 ClearExclusive(&isa.bits);
578 if (sideTableLocked) sidetable_unlock();
579 return sidetable_release(performDealloc);
580 }
581 // don't check newisa.fast_rr; we already called any RR overrides
582 uintptr_t carry;
583 newisa.bits = subc(newisa.bits, RC_ONE, 0, &carry); // extra_rc--
584 if (slowpath(carry)) {
585 // don't ClearExclusive()
586 goto underflow;
587 }
588 } while (slowpath(!StoreReleaseExclusive(&isa.bits,
589 oldisa.bits, newisa.bits)));
590
591 if (slowpath(sideTableLocked)) sidetable_unlock();
592 return false;
593
594 underflow:
595 // newisa.extra_rc-- underflowed: borrow from side table or deallocate
596
597 // abandon newisa to undo the decrement
598 newisa = oldisa;
599
600 if (slowpath(newisa.has_sidetable_rc)) {
601 if (!handleUnderflow) {
602 ClearExclusive(&isa.bits);
603 return rootRelease_underflow(performDealloc);
604 }
605
606 // Transfer retain count from side table to inline storage.
607
608 if (!sideTableLocked) {
609 ClearExclusive(&isa.bits);
610 sidetable_lock();
611 sideTableLocked = true;
612 // Need to start over to avoid a race against
613 // the nonpointer -> raw pointer transition.
614 goto retry;
615 }
616
617 // Try to remove some retain counts from the side table.
618 size_t borrowed = sidetable_subExtraRC_nolock(RC_HALF);
619
620 // To avoid races, has_sidetable_rc must remain set
621 // even if the side table count is now zero.
622
623 if (borrowed > 0) {
624 // Side table retain count decreased.
625 // Try to add them to the inline count.
626 newisa.extra_rc = borrowed - 1; // redo the original decrement too
627 bool stored = StoreReleaseExclusive(&isa.bits,
628 oldisa.bits, newisa.bits);
629 if (!stored) {
630 // Inline update failed.
631 // Try it again right now. This prevents livelock on LL/SC
632 // architectures where the side table access itself may have
633 // dropped the reservation.
634 isa_t oldisa2 = LoadExclusive(&isa.bits);
635 isa_t newisa2 = oldisa2;
636 if (newisa2.nonpointer) {
637 uintptr_t overflow;
638 newisa2.bits =
639 addc(newisa2.bits, RC_ONE * (borrowed-1), 0, &overflow);
640 if (!overflow) {
641 stored = StoreReleaseExclusive(&isa.bits, oldisa2.bits,
642 newisa2.bits);
643 }
644 }
645 }
646
647 if (!stored) {
648 // Inline update failed.
649 // Put the retains back in the side table.
650 sidetable_addExtraRC_nolock(borrowed);
651 goto retry;
652 }
653
654 // Decrement successful after borrowing from side table.
655 // This decrement cannot be the deallocating decrement - the side
656 // table lock and has_sidetable_rc bit ensure that if everyone
657 // else tried to -release while we worked, the last one would block.
658 sidetable_unlock();
659 return false;
660 }
661 else {
662 // Side table is empty after all. Fall-through to the dealloc path.
663 }
664 }
665
666 // Really deallocate.
667
668 if (slowpath(newisa.deallocating)) {
669 ClearExclusive(&isa.bits);
670 if (sideTableLocked) sidetable_unlock();
671 return overrelease_error();
672 // does not actually return
673 }
674 newisa.deallocating = true;
675 if (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry;
676
677 if (slowpath(sideTableLocked)) sidetable_unlock();
678
679 __sync_synchronize();
680 if (performDealloc) {
681 ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_dealloc);
682 }
683 return true;
684 }
685
686
687 // Equivalent to [this autorelease], with shortcuts if there is no override
688 inline id
689 objc_object::autorelease()
690 {
691 if (isTaggedPointer()) return (id)this;
692 if (fastpath(!ISA()->hasCustomRR())) return rootAutorelease();
693
694 return ((id(*)(objc_object *, SEL))objc_msgSend)(this, SEL_autorelease);
695 }
696
697
698 // Base autorelease implementation, ignoring overrides.
699 inline id
700 objc_object::rootAutorelease()
701 {
702 if (isTaggedPointer()) return (id)this;
703 if (prepareOptimizedReturn(ReturnAtPlus1)) return (id)this;
704
705 return rootAutorelease2();
706 }
707
708
709 inline uintptr_t
710 objc_object::rootRetainCount()
711 {
712 if (isTaggedPointer()) return (uintptr_t)this;
713
714 sidetable_lock();
715 isa_t bits = LoadExclusive(&isa.bits);
716 ClearExclusive(&isa.bits);
717 if (bits.nonpointer) {
718 uintptr_t rc = 1 + bits.extra_rc;
719 if (bits.has_sidetable_rc) {
720 rc += sidetable_getExtraRC_nolock();
721 }
722 sidetable_unlock();
723 return rc;
724 }
725
726 sidetable_unlock();
727 return sidetable_retainCount();
728 }
729
730
731 // SUPPORT_NONPOINTER_ISA
732 #else
733 // not SUPPORT_NONPOINTER_ISA
734
735
736 inline Class
737 objc_object::ISA()
738 {
739 assert(!isTaggedPointer());
740 return isa.cls;
741 }
742
743
744 inline bool
745 objc_object::hasNonpointerIsa()
746 {
747 return false;
748 }
749
750
751 inline void
752 objc_object::initIsa(Class cls)
753 {
754 assert(!isTaggedPointer());
755 isa = (uintptr_t)cls;
756 }
757
758
759 inline void
760 objc_object::initClassIsa(Class cls)
761 {
762 initIsa(cls);
763 }
764
765
766 inline void
767 objc_object::initProtocolIsa(Class cls)
768 {
769 initIsa(cls);
770 }
771
772
773 inline void
774 objc_object::initInstanceIsa(Class cls, bool)
775 {
776 initIsa(cls);
777 }
778
779
780 inline void
781 objc_object::initIsa(Class cls, bool, bool)
782 {
783 initIsa(cls);
784 }
785
786
787 inline Class
788 objc_object::changeIsa(Class cls)
789 {
790 // This is almost always rue but there are
791 // enough edge cases that we can't assert it.
792 // assert(cls->isFuture() ||
793 // cls->isInitializing() || cls->isInitialized());
794
795 assert(!isTaggedPointer());
796
797 isa_t oldisa, newisa;
798 newisa.cls = cls;
799 do {
800 oldisa = LoadExclusive(&isa.bits);
801 } while (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits));
802
803 if (oldisa.cls && oldisa.cls->instancesHaveAssociatedObjects()) {
804 cls->setInstancesHaveAssociatedObjects();
805 }
806
807 return oldisa.cls;
808 }
809
810
811 inline bool
812 objc_object::hasAssociatedObjects()
813 {
814 return getIsa()->instancesHaveAssociatedObjects();
815 }
816
817
818 inline void
819 objc_object::setHasAssociatedObjects()
820 {
821 getIsa()->setInstancesHaveAssociatedObjects();
822 }
823
824
825 inline bool
826 objc_object::isWeaklyReferenced()
827 {
828 assert(!isTaggedPointer());
829
830 return sidetable_isWeaklyReferenced();
831 }
832
833
834 inline void
835 objc_object::setWeaklyReferenced_nolock()
836 {
837 assert(!isTaggedPointer());
838
839 sidetable_setWeaklyReferenced_nolock();
840 }
841
842
843 inline bool
844 objc_object::hasCxxDtor()
845 {
846 assert(!isTaggedPointer());
847 return isa.cls->hasCxxDtor();
848 }
849
850
851 inline bool
852 objc_object::rootIsDeallocating()
853 {
854 if (isTaggedPointer()) return false;
855 return sidetable_isDeallocating();
856 }
857
858
859 inline void
860 objc_object::clearDeallocating()
861 {
862 sidetable_clearDeallocating();
863 }
864
865
866 inline void
867 objc_object::rootDealloc()
868 {
869 if (isTaggedPointer()) return;
870 object_dispose((id)this);
871 }
872
873
874 // Equivalent to calling [this retain], with shortcuts if there is no override
875 inline id
876 objc_object::retain()
877 {
878 assert(!isTaggedPointer());
879
880 if (fastpath(!ISA()->hasCustomRR())) {
881 return sidetable_retain();
882 }
883
884 return ((id(*)(objc_object *, SEL))objc_msgSend)(this, SEL_retain);
885 }
886
887
888 // Base retain implementation, ignoring overrides.
889 // This does not check isa.fast_rr; if there is an RR override then
890 // it was already called and it chose to call [super retain].
891 inline id
892 objc_object::rootRetain()
893 {
894 if (isTaggedPointer()) return (id)this;
895 return sidetable_retain();
896 }
897
898
899 // Equivalent to calling [this release], with shortcuts if there is no override
900 inline void
901 objc_object::release()
902 {
903 assert(!isTaggedPointer());
904
905 if (fastpath(!ISA()->hasCustomRR())) {
906 sidetable_release();
907 return;
908 }
909
910 ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_release);
911 }
912
913
914 // Base release implementation, ignoring overrides.
915 // Does not call -dealloc.
916 // Returns true if the object should now be deallocated.
917 // This does not check isa.fast_rr; if there is an RR override then
918 // it was already called and it chose to call [super release].
919 inline bool
920 objc_object::rootRelease()
921 {
922 if (isTaggedPointer()) return false;
923 return sidetable_release(true);
924 }
925
926 inline bool
927 objc_object::rootReleaseShouldDealloc()
928 {
929 if (isTaggedPointer()) return false;
930 return sidetable_release(false);
931 }
932
933
934 // Equivalent to [this autorelease], with shortcuts if there is no override
935 inline id
936 objc_object::autorelease()
937 {
938 if (isTaggedPointer()) return (id)this;
939 if (fastpath(!ISA()->hasCustomRR())) return rootAutorelease();
940
941 return ((id(*)(objc_object *, SEL))objc_msgSend)(this, SEL_autorelease);
942 }
943
944
945 // Base autorelease implementation, ignoring overrides.
946 inline id
947 objc_object::rootAutorelease()
948 {
949 if (isTaggedPointer()) return (id)this;
950 if (prepareOptimizedReturn(ReturnAtPlus1)) return (id)this;
951
952 return rootAutorelease2();
953 }
954
955
956 // Base tryRetain implementation, ignoring overrides.
957 // This does not check isa.fast_rr; if there is an RR override then
958 // it was already called and it chose to call [super _tryRetain].
959 inline bool
960 objc_object::rootTryRetain()
961 {
962 if (isTaggedPointer()) return true;
963 return sidetable_tryRetain();
964 }
965
966
967 inline uintptr_t
968 objc_object::rootRetainCount()
969 {
970 if (isTaggedPointer()) return (uintptr_t)this;
971 return sidetable_retainCount();
972 }
973
974
975 // not SUPPORT_NONPOINTER_ISA
976 #endif
977
978
979 #if SUPPORT_RETURN_AUTORELEASE
980
981 /***********************************************************************
982 Fast handling of return through Cocoa's +0 autoreleasing convention.
983 The caller and callee cooperate to keep the returned object
984 out of the autorelease pool and eliminate redundant retain/release pairs.
985
986 An optimized callee looks at the caller's instructions following the
987 return. If the caller's instructions are also optimized then the callee
988 skips all retain count operations: no autorelease, no retain/autorelease.
989 Instead it saves the result's current retain count (+0 or +1) in
990 thread-local storage. If the caller does not look optimized then
991 the callee performs autorelease or retain/autorelease as usual.
992
993 An optimized caller looks at the thread-local storage. If the result
994 is set then it performs any retain or release needed to change the
995 result from the retain count left by the callee to the retain count
996 desired by the caller. Otherwise the caller assumes the result is
997 currently at +0 from an unoptimized callee and performs any retain
998 needed for that case.
999
1000 There are two optimized callees:
1001 objc_autoreleaseReturnValue
1002 result is currently +1. The unoptimized path autoreleases it.
1003 objc_retainAutoreleaseReturnValue
1004 result is currently +0. The unoptimized path retains and autoreleases it.
1005
1006 There are two optimized callers:
1007 objc_retainAutoreleasedReturnValue
1008 caller wants the value at +1. The unoptimized path retains it.
1009 objc_unsafeClaimAutoreleasedReturnValue
1010 caller wants the value at +0 unsafely. The unoptimized path does nothing.
1011
1012 Example:
1013
1014 Callee:
1015 // compute ret at +1
1016 return objc_autoreleaseReturnValue(ret);
1017
1018 Caller:
1019 ret = callee();
1020 ret = objc_retainAutoreleasedReturnValue(ret);
1021 // use ret at +1 here
1022
1023 Callee sees the optimized caller, sets TLS, and leaves the result at +1.
1024 Caller sees the TLS, clears it, and accepts the result at +1 as-is.
1025
1026 The callee's recognition of the optimized caller is architecture-dependent.
1027 x86_64: Callee looks for `mov rax, rdi` followed by a call or
1028 jump instruction to objc_retainAutoreleasedReturnValue or
1029 objc_unsafeClaimAutoreleasedReturnValue.
1030 i386: Callee looks for a magic nop `movl %ebp, %ebp` (frame pointer register)
1031 armv7: Callee looks for a magic nop `mov r7, r7` (frame pointer register).
1032 arm64: Callee looks for a magic nop `mov x29, x29` (frame pointer register).
1033
1034 Tagged pointer objects do participate in the optimized return scheme,
1035 because it saves message sends. They are not entered in the autorelease
1036 pool in the unoptimized case.
1037 **********************************************************************/
1038
1039 # if __x86_64__
1040
1041 static ALWAYS_INLINE bool
1042 callerAcceptsOptimizedReturn(const void * const ra0)
1043 {
1044 const uint8_t *ra1 = (const uint8_t *)ra0;
1045 const unaligned_uint16_t *ra2;
1046 const unaligned_uint32_t *ra4 = (const unaligned_uint32_t *)ra1;
1047 const void **sym;
1048
1049 #define PREFER_GOTPCREL 0
1050 #if PREFER_GOTPCREL
1051 // 48 89 c7 movq %rax,%rdi
1052 // ff 15 callq *symbol@GOTPCREL(%rip)
1053 if (*ra4 != 0xffc78948) {
1054 return false;
1055 }
1056 if (ra1[4] != 0x15) {
1057 return false;
1058 }
1059 ra1 += 3;
1060 #else
1061 // 48 89 c7 movq %rax,%rdi
1062 // e8 callq symbol
1063 if (*ra4 != 0xe8c78948) {
1064 return false;
1065 }
1066 ra1 += (long)*(const unaligned_int32_t *)(ra1 + 4) + 8l;
1067 ra2 = (const unaligned_uint16_t *)ra1;
1068 // ff 25 jmpq *symbol@DYLDMAGIC(%rip)
1069 if (*ra2 != 0x25ff) {
1070 return false;
1071 }
1072 #endif
1073 ra1 += 6l + (long)*(const unaligned_int32_t *)(ra1 + 2);
1074 sym = (const void **)ra1;
1075 if (*sym != objc_retainAutoreleasedReturnValue &&
1076 *sym != objc_unsafeClaimAutoreleasedReturnValue)
1077 {
1078 return false;
1079 }
1080
1081 return true;
1082 }
1083
1084 // __x86_64__
1085 # elif __arm__
1086
1087 static ALWAYS_INLINE bool
1088 callerAcceptsOptimizedReturn(const void *ra)
1089 {
1090 // if the low bit is set, we're returning to thumb mode
1091 if ((uintptr_t)ra & 1) {
1092 // 3f 46 mov r7, r7
1093 // we mask off the low bit via subtraction
1094 // 16-bit instructions are well-aligned
1095 if (*(uint16_t *)((uint8_t *)ra - 1) == 0x463f) {
1096 return true;
1097 }
1098 } else {
1099 // 07 70 a0 e1 mov r7, r7
1100 // 32-bit instructions may be only 16-bit aligned
1101 if (*(unaligned_uint32_t *)ra == 0xe1a07007) {
1102 return true;
1103 }
1104 }
1105 return false;
1106 }
1107
1108 // __arm__
1109 # elif __arm64__
1110
1111 static ALWAYS_INLINE bool
1112 callerAcceptsOptimizedReturn(const void *ra)
1113 {
1114 // fd 03 1d aa mov fp, fp
1115 // arm64 instructions are well-aligned
1116 if (*(uint32_t *)ra == 0xaa1d03fd) {
1117 return true;
1118 }
1119 return false;
1120 }
1121
1122 // __arm64__
1123 # elif __i386__
1124
1125 static ALWAYS_INLINE bool
1126 callerAcceptsOptimizedReturn(const void *ra)
1127 {
1128 // 89 ed movl %ebp, %ebp
1129 if (*(unaligned_uint16_t *)ra == 0xed89) {
1130 return true;
1131 }
1132 return false;
1133 }
1134
1135 // __i386__
1136 # else
1137
1138 #warning unknown architecture
1139
1140 static ALWAYS_INLINE bool
1141 callerAcceptsOptimizedReturn(const void *ra)
1142 {
1143 return false;
1144 }
1145
1146 // unknown architecture
1147 # endif
1148
1149
1150 static ALWAYS_INLINE ReturnDisposition
1151 getReturnDisposition()
1152 {
1153 return (ReturnDisposition)(uintptr_t)tls_get_direct(RETURN_DISPOSITION_KEY);
1154 }
1155
1156
1157 static ALWAYS_INLINE void
1158 setReturnDisposition(ReturnDisposition disposition)
1159 {
1160 tls_set_direct(RETURN_DISPOSITION_KEY, (void*)(uintptr_t)disposition);
1161 }
1162
1163
1164 // Try to prepare for optimized return with the given disposition (+0 or +1).
1165 // Returns true if the optimized path is successful.
1166 // Otherwise the return value must be retained and/or autoreleased as usual.
1167 static ALWAYS_INLINE bool
1168 prepareOptimizedReturn(ReturnDisposition disposition)
1169 {
1170 assert(getReturnDisposition() == ReturnAtPlus0);
1171
1172 if (callerAcceptsOptimizedReturn(__builtin_return_address(0))) {
1173 if (disposition) setReturnDisposition(disposition);
1174 return true;
1175 }
1176
1177 return false;
1178 }
1179
1180
1181 // Try to accept an optimized return.
1182 // Returns the disposition of the returned object (+0 or +1).
1183 // An un-optimized return is +0.
1184 static ALWAYS_INLINE ReturnDisposition
1185 acceptOptimizedReturn()
1186 {
1187 ReturnDisposition disposition = getReturnDisposition();
1188 setReturnDisposition(ReturnAtPlus0); // reset to the unoptimized state
1189 return disposition;
1190 }
1191
1192
1193 // SUPPORT_RETURN_AUTORELEASE
1194 #else
1195 // not SUPPORT_RETURN_AUTORELEASE
1196
1197
1198 static ALWAYS_INLINE bool
1199 prepareOptimizedReturn(ReturnDisposition disposition __unused)
1200 {
1201 return false;
1202 }
1203
1204
1205 static ALWAYS_INLINE ReturnDisposition
1206 acceptOptimizedReturn()
1207 {
1208 return ReturnAtPlus0;
1209 }
1210
1211
1212 // not SUPPORT_RETURN_AUTORELEASE
1213 #endif
1214
1215
1216 // _OBJC_OBJECT_H_
1217 #endif