]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-object.h
objc4-756.2.tar.gz
[apple/objc4.git] / runtime / objc-object.h
1 /*
2 * Copyright (c) 2010-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24
25 /***********************************************************************
26 * Inlineable parts of NSObject / objc_object implementation
27 **********************************************************************/
28
29 #ifndef _OBJC_OBJCOBJECT_H_
30 #define _OBJC_OBJCOBJECT_H_
31
32 #include "objc-private.h"
33
34
35 enum ReturnDisposition : bool {
36 ReturnAtPlus0 = false, ReturnAtPlus1 = true
37 };
38
39 static ALWAYS_INLINE
40 bool prepareOptimizedReturn(ReturnDisposition disposition);
41
42
43 #if SUPPORT_TAGGED_POINTERS
44
45 extern "C" {
46 extern Class objc_debug_taggedpointer_classes[_OBJC_TAG_SLOT_COUNT];
47 extern Class objc_debug_taggedpointer_ext_classes[_OBJC_TAG_EXT_SLOT_COUNT];
48 }
49 #define objc_tag_classes objc_debug_taggedpointer_classes
50 #define objc_tag_ext_classes objc_debug_taggedpointer_ext_classes
51
52 #endif
53
54 #if SUPPORT_INDEXED_ISA
55
56 ALWAYS_INLINE Class &
57 classForIndex(uintptr_t index) {
58 assert(index > 0);
59 assert(index < (uintptr_t)objc_indexed_classes_count);
60 return objc_indexed_classes[index];
61 }
62
63 #endif
64
65
66 inline bool
67 objc_object::isClass()
68 {
69 if (isTaggedPointer()) return false;
70 return ISA()->isMetaClass();
71 }
72
73
74 #if SUPPORT_TAGGED_POINTERS
75
76 inline Class
77 objc_object::getIsa()
78 {
79 if (!isTaggedPointer()) return ISA();
80
81 uintptr_t ptr = (uintptr_t)this;
82 if (isExtTaggedPointer()) {
83 uintptr_t slot =
84 (ptr >> _OBJC_TAG_EXT_SLOT_SHIFT) & _OBJC_TAG_EXT_SLOT_MASK;
85 return objc_tag_ext_classes[slot];
86 } else {
87 uintptr_t slot =
88 (ptr >> _OBJC_TAG_SLOT_SHIFT) & _OBJC_TAG_SLOT_MASK;
89 return objc_tag_classes[slot];
90 }
91 }
92
93
94 inline bool
95 objc_object::isTaggedPointer()
96 {
97 return _objc_isTaggedPointer(this);
98 }
99
100 inline bool
101 objc_object::isBasicTaggedPointer()
102 {
103 return isTaggedPointer() && !isExtTaggedPointer();
104 }
105
106 inline bool
107 objc_object::isExtTaggedPointer()
108 {
109 uintptr_t ptr = _objc_decodeTaggedPointer(this);
110 return (ptr & _OBJC_TAG_EXT_MASK) == _OBJC_TAG_EXT_MASK;
111 }
112
113
114 // SUPPORT_TAGGED_POINTERS
115 #else
116 // not SUPPORT_TAGGED_POINTERS
117
118
119 inline Class
120 objc_object::getIsa()
121 {
122 return ISA();
123 }
124
125
126 inline bool
127 objc_object::isTaggedPointer()
128 {
129 return false;
130 }
131
132 inline bool
133 objc_object::isBasicTaggedPointer()
134 {
135 return false;
136 }
137
138 inline bool
139 objc_object::isExtTaggedPointer()
140 {
141 return false;
142 }
143
144
145 // not SUPPORT_TAGGED_POINTERS
146 #endif
147
148
149 #if SUPPORT_NONPOINTER_ISA
150
151 inline Class
152 objc_object::ISA()
153 {
154 assert(!isTaggedPointer());
155 #if SUPPORT_INDEXED_ISA
156 if (isa.nonpointer) {
157 uintptr_t slot = isa.indexcls;
158 return classForIndex((unsigned)slot);
159 }
160 return (Class)isa.bits;
161 #else
162 return (Class)(isa.bits & ISA_MASK);
163 #endif
164 }
165
166
167 inline bool
168 objc_object::hasNonpointerIsa()
169 {
170 return isa.nonpointer;
171 }
172
173
174 inline void
175 objc_object::initIsa(Class cls)
176 {
177 initIsa(cls, false, false);
178 }
179
180 inline void
181 objc_object::initClassIsa(Class cls)
182 {
183 if (DisableNonpointerIsa || cls->instancesRequireRawIsa()) {
184 initIsa(cls, false/*not nonpointer*/, false);
185 } else {
186 initIsa(cls, true/*nonpointer*/, false);
187 }
188 }
189
190 inline void
191 objc_object::initProtocolIsa(Class cls)
192 {
193 return initClassIsa(cls);
194 }
195
196 inline void
197 objc_object::initInstanceIsa(Class cls, bool hasCxxDtor)
198 {
199 assert(!cls->instancesRequireRawIsa());
200 assert(hasCxxDtor == cls->hasCxxDtor());
201
202 initIsa(cls, true, hasCxxDtor);
203 }
204
205 inline void
206 objc_object::initIsa(Class cls, bool nonpointer, bool hasCxxDtor)
207 {
208 assert(!isTaggedPointer());
209
210 if (!nonpointer) {
211 isa.cls = cls;
212 } else {
213 assert(!DisableNonpointerIsa);
214 assert(!cls->instancesRequireRawIsa());
215
216 isa_t newisa(0);
217
218 #if SUPPORT_INDEXED_ISA
219 assert(cls->classArrayIndex() > 0);
220 newisa.bits = ISA_INDEX_MAGIC_VALUE;
221 // isa.magic is part of ISA_MAGIC_VALUE
222 // isa.nonpointer is part of ISA_MAGIC_VALUE
223 newisa.has_cxx_dtor = hasCxxDtor;
224 newisa.indexcls = (uintptr_t)cls->classArrayIndex();
225 #else
226 newisa.bits = ISA_MAGIC_VALUE;
227 // isa.magic is part of ISA_MAGIC_VALUE
228 // isa.nonpointer is part of ISA_MAGIC_VALUE
229 newisa.has_cxx_dtor = hasCxxDtor;
230 newisa.shiftcls = (uintptr_t)cls >> 3;
231 #endif
232
233 // This write must be performed in a single store in some cases
234 // (for example when realizing a class because other threads
235 // may simultaneously try to use the class).
236 // fixme use atomics here to guarantee single-store and to
237 // guarantee memory order w.r.t. the class index table
238 // ...but not too atomic because we don't want to hurt instantiation
239 isa = newisa;
240 }
241 }
242
243
244 inline Class
245 objc_object::changeIsa(Class newCls)
246 {
247 // This is almost always true but there are
248 // enough edge cases that we can't assert it.
249 // assert(newCls->isFuture() ||
250 // newCls->isInitializing() || newCls->isInitialized());
251
252 assert(!isTaggedPointer());
253
254 isa_t oldisa;
255 isa_t newisa;
256
257 bool sideTableLocked = false;
258 bool transcribeToSideTable = false;
259
260 do {
261 transcribeToSideTable = false;
262 oldisa = LoadExclusive(&isa.bits);
263 if ((oldisa.bits == 0 || oldisa.nonpointer) &&
264 !newCls->isFuture() && newCls->canAllocNonpointer())
265 {
266 // 0 -> nonpointer
267 // nonpointer -> nonpointer
268 #if SUPPORT_INDEXED_ISA
269 if (oldisa.bits == 0) newisa.bits = ISA_INDEX_MAGIC_VALUE;
270 else newisa = oldisa;
271 // isa.magic is part of ISA_MAGIC_VALUE
272 // isa.nonpointer is part of ISA_MAGIC_VALUE
273 newisa.has_cxx_dtor = newCls->hasCxxDtor();
274 assert(newCls->classArrayIndex() > 0);
275 newisa.indexcls = (uintptr_t)newCls->classArrayIndex();
276 #else
277 if (oldisa.bits == 0) newisa.bits = ISA_MAGIC_VALUE;
278 else newisa = oldisa;
279 // isa.magic is part of ISA_MAGIC_VALUE
280 // isa.nonpointer is part of ISA_MAGIC_VALUE
281 newisa.has_cxx_dtor = newCls->hasCxxDtor();
282 newisa.shiftcls = (uintptr_t)newCls >> 3;
283 #endif
284 }
285 else if (oldisa.nonpointer) {
286 // nonpointer -> raw pointer
287 // Need to copy retain count et al to side table.
288 // Acquire side table lock before setting isa to
289 // prevent races such as concurrent -release.
290 if (!sideTableLocked) sidetable_lock();
291 sideTableLocked = true;
292 transcribeToSideTable = true;
293 newisa.cls = newCls;
294 }
295 else {
296 // raw pointer -> raw pointer
297 newisa.cls = newCls;
298 }
299 } while (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits));
300
301 if (transcribeToSideTable) {
302 // Copy oldisa's retain count et al to side table.
303 // oldisa.has_assoc: nothing to do
304 // oldisa.has_cxx_dtor: nothing to do
305 sidetable_moveExtraRC_nolock(oldisa.extra_rc,
306 oldisa.deallocating,
307 oldisa.weakly_referenced);
308 }
309
310 if (sideTableLocked) sidetable_unlock();
311
312 if (oldisa.nonpointer) {
313 #if SUPPORT_INDEXED_ISA
314 return classForIndex(oldisa.indexcls);
315 #else
316 return (Class)((uintptr_t)oldisa.shiftcls << 3);
317 #endif
318 }
319 else {
320 return oldisa.cls;
321 }
322 }
323
324
325 inline bool
326 objc_object::hasAssociatedObjects()
327 {
328 if (isTaggedPointer()) return true;
329 if (isa.nonpointer) return isa.has_assoc;
330 return true;
331 }
332
333
334 inline void
335 objc_object::setHasAssociatedObjects()
336 {
337 if (isTaggedPointer()) return;
338
339 retry:
340 isa_t oldisa = LoadExclusive(&isa.bits);
341 isa_t newisa = oldisa;
342 if (!newisa.nonpointer || newisa.has_assoc) {
343 ClearExclusive(&isa.bits);
344 return;
345 }
346 newisa.has_assoc = true;
347 if (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry;
348 }
349
350
351 inline bool
352 objc_object::isWeaklyReferenced()
353 {
354 assert(!isTaggedPointer());
355 if (isa.nonpointer) return isa.weakly_referenced;
356 else return sidetable_isWeaklyReferenced();
357 }
358
359
360 inline void
361 objc_object::setWeaklyReferenced_nolock()
362 {
363 retry:
364 isa_t oldisa = LoadExclusive(&isa.bits);
365 isa_t newisa = oldisa;
366 if (slowpath(!newisa.nonpointer)) {
367 ClearExclusive(&isa.bits);
368 sidetable_setWeaklyReferenced_nolock();
369 return;
370 }
371 if (newisa.weakly_referenced) {
372 ClearExclusive(&isa.bits);
373 return;
374 }
375 newisa.weakly_referenced = true;
376 if (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry;
377 }
378
379
380 inline bool
381 objc_object::hasCxxDtor()
382 {
383 assert(!isTaggedPointer());
384 if (isa.nonpointer) return isa.has_cxx_dtor;
385 else return isa.cls->hasCxxDtor();
386 }
387
388
389
390 inline bool
391 objc_object::rootIsDeallocating()
392 {
393 if (isTaggedPointer()) return false;
394 if (isa.nonpointer) return isa.deallocating;
395 return sidetable_isDeallocating();
396 }
397
398
399 inline void
400 objc_object::clearDeallocating()
401 {
402 if (slowpath(!isa.nonpointer)) {
403 // Slow path for raw pointer isa.
404 sidetable_clearDeallocating();
405 }
406 else if (slowpath(isa.weakly_referenced || isa.has_sidetable_rc)) {
407 // Slow path for non-pointer isa with weak refs and/or side table data.
408 clearDeallocating_slow();
409 }
410
411 assert(!sidetable_present());
412 }
413
414
415 inline void
416 objc_object::rootDealloc()
417 {
418 if (isTaggedPointer()) return; // fixme necessary?
419
420 if (fastpath(isa.nonpointer &&
421 !isa.weakly_referenced &&
422 !isa.has_assoc &&
423 !isa.has_cxx_dtor &&
424 !isa.has_sidetable_rc))
425 {
426 assert(!sidetable_present());
427 free(this);
428 }
429 else {
430 object_dispose((id)this);
431 }
432 }
433
434
435 // Equivalent to calling [this retain], with shortcuts if there is no override
436 inline id
437 objc_object::retain()
438 {
439 assert(!isTaggedPointer());
440
441 if (fastpath(!ISA()->hasCustomRR())) {
442 return rootRetain();
443 }
444
445 return ((id(*)(objc_object *, SEL))objc_msgSend)(this, SEL_retain);
446 }
447
448
449 // Base retain implementation, ignoring overrides.
450 // This does not check isa.fast_rr; if there is an RR override then
451 // it was already called and it chose to call [super retain].
452 //
453 // tryRetain=true is the -_tryRetain path.
454 // handleOverflow=false is the frameless fast path.
455 // handleOverflow=true is the framed slow path including overflow to side table
456 // The code is structured this way to prevent duplication.
457
458 ALWAYS_INLINE id
459 objc_object::rootRetain()
460 {
461 return rootRetain(false, false);
462 }
463
464 ALWAYS_INLINE bool
465 objc_object::rootTryRetain()
466 {
467 return rootRetain(true, false) ? true : false;
468 }
469
470 ALWAYS_INLINE id
471 objc_object::rootRetain(bool tryRetain, bool handleOverflow)
472 {
473 if (isTaggedPointer()) return (id)this;
474
475 bool sideTableLocked = false;
476 bool transcribeToSideTable = false;
477
478 isa_t oldisa;
479 isa_t newisa;
480
481 do {
482 transcribeToSideTable = false;
483 oldisa = LoadExclusive(&isa.bits);
484 newisa = oldisa;
485 if (slowpath(!newisa.nonpointer)) {
486 ClearExclusive(&isa.bits);
487 if (!tryRetain && sideTableLocked) sidetable_unlock();
488 if (tryRetain) return sidetable_tryRetain() ? (id)this : nil;
489 else return sidetable_retain();
490 }
491 // don't check newisa.fast_rr; we already called any RR overrides
492 if (slowpath(tryRetain && newisa.deallocating)) {
493 ClearExclusive(&isa.bits);
494 if (!tryRetain && sideTableLocked) sidetable_unlock();
495 return nil;
496 }
497 uintptr_t carry;
498 newisa.bits = addc(newisa.bits, RC_ONE, 0, &carry); // extra_rc++
499
500 if (slowpath(carry)) {
501 // newisa.extra_rc++ overflowed
502 if (!handleOverflow) {
503 ClearExclusive(&isa.bits);
504 return rootRetain_overflow(tryRetain);
505 }
506 // Leave half of the retain counts inline and
507 // prepare to copy the other half to the side table.
508 if (!tryRetain && !sideTableLocked) sidetable_lock();
509 sideTableLocked = true;
510 transcribeToSideTable = true;
511 newisa.extra_rc = RC_HALF;
512 newisa.has_sidetable_rc = true;
513 }
514 } while (slowpath(!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)));
515
516 if (slowpath(transcribeToSideTable)) {
517 // Copy the other half of the retain counts to the side table.
518 sidetable_addExtraRC_nolock(RC_HALF);
519 }
520
521 if (slowpath(!tryRetain && sideTableLocked)) sidetable_unlock();
522 return (id)this;
523 }
524
525
526 // Equivalent to calling [this release], with shortcuts if there is no override
527 inline void
528 objc_object::release()
529 {
530 assert(!isTaggedPointer());
531
532 if (fastpath(!ISA()->hasCustomRR())) {
533 rootRelease();
534 return;
535 }
536
537 ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_release);
538 }
539
540
541 // Base release implementation, ignoring overrides.
542 // Does not call -dealloc.
543 // Returns true if the object should now be deallocated.
544 // This does not check isa.fast_rr; if there is an RR override then
545 // it was already called and it chose to call [super release].
546 //
547 // handleUnderflow=false is the frameless fast path.
548 // handleUnderflow=true is the framed slow path including side table borrow
549 // The code is structured this way to prevent duplication.
550
551 ALWAYS_INLINE bool
552 objc_object::rootRelease()
553 {
554 return rootRelease(true, false);
555 }
556
557 ALWAYS_INLINE bool
558 objc_object::rootReleaseShouldDealloc()
559 {
560 return rootRelease(false, false);
561 }
562
563 ALWAYS_INLINE bool
564 objc_object::rootRelease(bool performDealloc, bool handleUnderflow)
565 {
566 if (isTaggedPointer()) return false;
567
568 bool sideTableLocked = false;
569
570 isa_t oldisa;
571 isa_t newisa;
572
573 retry:
574 do {
575 oldisa = LoadExclusive(&isa.bits);
576 newisa = oldisa;
577 if (slowpath(!newisa.nonpointer)) {
578 ClearExclusive(&isa.bits);
579 if (sideTableLocked) sidetable_unlock();
580 return sidetable_release(performDealloc);
581 }
582 // don't check newisa.fast_rr; we already called any RR overrides
583 uintptr_t carry;
584 newisa.bits = subc(newisa.bits, RC_ONE, 0, &carry); // extra_rc--
585 if (slowpath(carry)) {
586 // don't ClearExclusive()
587 goto underflow;
588 }
589 } while (slowpath(!StoreReleaseExclusive(&isa.bits,
590 oldisa.bits, newisa.bits)));
591
592 if (slowpath(sideTableLocked)) sidetable_unlock();
593 return false;
594
595 underflow:
596 // newisa.extra_rc-- underflowed: borrow from side table or deallocate
597
598 // abandon newisa to undo the decrement
599 newisa = oldisa;
600
601 if (slowpath(newisa.has_sidetable_rc)) {
602 if (!handleUnderflow) {
603 ClearExclusive(&isa.bits);
604 return rootRelease_underflow(performDealloc);
605 }
606
607 // Transfer retain count from side table to inline storage.
608
609 if (!sideTableLocked) {
610 ClearExclusive(&isa.bits);
611 sidetable_lock();
612 sideTableLocked = true;
613 // Need to start over to avoid a race against
614 // the nonpointer -> raw pointer transition.
615 goto retry;
616 }
617
618 // Try to remove some retain counts from the side table.
619 size_t borrowed = sidetable_subExtraRC_nolock(RC_HALF);
620
621 // To avoid races, has_sidetable_rc must remain set
622 // even if the side table count is now zero.
623
624 if (borrowed > 0) {
625 // Side table retain count decreased.
626 // Try to add them to the inline count.
627 newisa.extra_rc = borrowed - 1; // redo the original decrement too
628 bool stored = StoreReleaseExclusive(&isa.bits,
629 oldisa.bits, newisa.bits);
630 if (!stored) {
631 // Inline update failed.
632 // Try it again right now. This prevents livelock on LL/SC
633 // architectures where the side table access itself may have
634 // dropped the reservation.
635 isa_t oldisa2 = LoadExclusive(&isa.bits);
636 isa_t newisa2 = oldisa2;
637 if (newisa2.nonpointer) {
638 uintptr_t overflow;
639 newisa2.bits =
640 addc(newisa2.bits, RC_ONE * (borrowed-1), 0, &overflow);
641 if (!overflow) {
642 stored = StoreReleaseExclusive(&isa.bits, oldisa2.bits,
643 newisa2.bits);
644 }
645 }
646 }
647
648 if (!stored) {
649 // Inline update failed.
650 // Put the retains back in the side table.
651 sidetable_addExtraRC_nolock(borrowed);
652 goto retry;
653 }
654
655 // Decrement successful after borrowing from side table.
656 // This decrement cannot be the deallocating decrement - the side
657 // table lock and has_sidetable_rc bit ensure that if everyone
658 // else tried to -release while we worked, the last one would block.
659 sidetable_unlock();
660 return false;
661 }
662 else {
663 // Side table is empty after all. Fall-through to the dealloc path.
664 }
665 }
666
667 // Really deallocate.
668
669 if (slowpath(newisa.deallocating)) {
670 ClearExclusive(&isa.bits);
671 if (sideTableLocked) sidetable_unlock();
672 return overrelease_error();
673 // does not actually return
674 }
675 newisa.deallocating = true;
676 if (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry;
677
678 if (slowpath(sideTableLocked)) sidetable_unlock();
679
680 __sync_synchronize();
681 if (performDealloc) {
682 ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_dealloc);
683 }
684 return true;
685 }
686
687
688 // Equivalent to [this autorelease], with shortcuts if there is no override
689 inline id
690 objc_object::autorelease()
691 {
692 if (isTaggedPointer()) return (id)this;
693 if (fastpath(!ISA()->hasCustomRR())) return rootAutorelease();
694
695 return ((id(*)(objc_object *, SEL))objc_msgSend)(this, SEL_autorelease);
696 }
697
698
699 // Base autorelease implementation, ignoring overrides.
700 inline id
701 objc_object::rootAutorelease()
702 {
703 if (isTaggedPointer()) return (id)this;
704 if (prepareOptimizedReturn(ReturnAtPlus1)) return (id)this;
705
706 return rootAutorelease2();
707 }
708
709
710 inline uintptr_t
711 objc_object::rootRetainCount()
712 {
713 if (isTaggedPointer()) return (uintptr_t)this;
714
715 sidetable_lock();
716 isa_t bits = LoadExclusive(&isa.bits);
717 ClearExclusive(&isa.bits);
718 if (bits.nonpointer) {
719 uintptr_t rc = 1 + bits.extra_rc;
720 if (bits.has_sidetable_rc) {
721 rc += sidetable_getExtraRC_nolock();
722 }
723 sidetable_unlock();
724 return rc;
725 }
726
727 sidetable_unlock();
728 return sidetable_retainCount();
729 }
730
731
732 // SUPPORT_NONPOINTER_ISA
733 #else
734 // not SUPPORT_NONPOINTER_ISA
735
736
737 inline Class
738 objc_object::ISA()
739 {
740 assert(!isTaggedPointer());
741 return isa.cls;
742 }
743
744
745 inline bool
746 objc_object::hasNonpointerIsa()
747 {
748 return false;
749 }
750
751
752 inline void
753 objc_object::initIsa(Class cls)
754 {
755 assert(!isTaggedPointer());
756 isa = (uintptr_t)cls;
757 }
758
759
760 inline void
761 objc_object::initClassIsa(Class cls)
762 {
763 initIsa(cls);
764 }
765
766
767 inline void
768 objc_object::initProtocolIsa(Class cls)
769 {
770 initIsa(cls);
771 }
772
773
774 inline void
775 objc_object::initInstanceIsa(Class cls, bool)
776 {
777 initIsa(cls);
778 }
779
780
781 inline void
782 objc_object::initIsa(Class cls, bool, bool)
783 {
784 initIsa(cls);
785 }
786
787
788 inline Class
789 objc_object::changeIsa(Class cls)
790 {
791 // This is almost always rue but there are
792 // enough edge cases that we can't assert it.
793 // assert(cls->isFuture() ||
794 // cls->isInitializing() || cls->isInitialized());
795
796 assert(!isTaggedPointer());
797
798 isa_t oldisa, newisa;
799 newisa.cls = cls;
800 do {
801 oldisa = LoadExclusive(&isa.bits);
802 } while (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits));
803
804 if (oldisa.cls && oldisa.cls->instancesHaveAssociatedObjects()) {
805 cls->setInstancesHaveAssociatedObjects();
806 }
807
808 return oldisa.cls;
809 }
810
811
812 inline bool
813 objc_object::hasAssociatedObjects()
814 {
815 return getIsa()->instancesHaveAssociatedObjects();
816 }
817
818
819 inline void
820 objc_object::setHasAssociatedObjects()
821 {
822 getIsa()->setInstancesHaveAssociatedObjects();
823 }
824
825
826 inline bool
827 objc_object::isWeaklyReferenced()
828 {
829 assert(!isTaggedPointer());
830
831 return sidetable_isWeaklyReferenced();
832 }
833
834
835 inline void
836 objc_object::setWeaklyReferenced_nolock()
837 {
838 assert(!isTaggedPointer());
839
840 sidetable_setWeaklyReferenced_nolock();
841 }
842
843
844 inline bool
845 objc_object::hasCxxDtor()
846 {
847 assert(!isTaggedPointer());
848 return isa.cls->hasCxxDtor();
849 }
850
851
852 inline bool
853 objc_object::rootIsDeallocating()
854 {
855 if (isTaggedPointer()) return false;
856 return sidetable_isDeallocating();
857 }
858
859
860 inline void
861 objc_object::clearDeallocating()
862 {
863 sidetable_clearDeallocating();
864 }
865
866
867 inline void
868 objc_object::rootDealloc()
869 {
870 if (isTaggedPointer()) return;
871 object_dispose((id)this);
872 }
873
874
875 // Equivalent to calling [this retain], with shortcuts if there is no override
876 inline id
877 objc_object::retain()
878 {
879 assert(!isTaggedPointer());
880
881 if (fastpath(!ISA()->hasCustomRR())) {
882 return sidetable_retain();
883 }
884
885 return ((id(*)(objc_object *, SEL))objc_msgSend)(this, SEL_retain);
886 }
887
888
889 // Base retain implementation, ignoring overrides.
890 // This does not check isa.fast_rr; if there is an RR override then
891 // it was already called and it chose to call [super retain].
892 inline id
893 objc_object::rootRetain()
894 {
895 if (isTaggedPointer()) return (id)this;
896 return sidetable_retain();
897 }
898
899
900 // Equivalent to calling [this release], with shortcuts if there is no override
901 inline void
902 objc_object::release()
903 {
904 assert(!isTaggedPointer());
905
906 if (fastpath(!ISA()->hasCustomRR())) {
907 sidetable_release();
908 return;
909 }
910
911 ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_release);
912 }
913
914
915 // Base release implementation, ignoring overrides.
916 // Does not call -dealloc.
917 // Returns true if the object should now be deallocated.
918 // This does not check isa.fast_rr; if there is an RR override then
919 // it was already called and it chose to call [super release].
920 inline bool
921 objc_object::rootRelease()
922 {
923 if (isTaggedPointer()) return false;
924 return sidetable_release(true);
925 }
926
927 inline bool
928 objc_object::rootReleaseShouldDealloc()
929 {
930 if (isTaggedPointer()) return false;
931 return sidetable_release(false);
932 }
933
934
935 // Equivalent to [this autorelease], with shortcuts if there is no override
936 inline id
937 objc_object::autorelease()
938 {
939 if (isTaggedPointer()) return (id)this;
940 if (fastpath(!ISA()->hasCustomRR())) return rootAutorelease();
941
942 return ((id(*)(objc_object *, SEL))objc_msgSend)(this, SEL_autorelease);
943 }
944
945
946 // Base autorelease implementation, ignoring overrides.
947 inline id
948 objc_object::rootAutorelease()
949 {
950 if (isTaggedPointer()) return (id)this;
951 if (prepareOptimizedReturn(ReturnAtPlus1)) return (id)this;
952
953 return rootAutorelease2();
954 }
955
956
957 // Base tryRetain implementation, ignoring overrides.
958 // This does not check isa.fast_rr; if there is an RR override then
959 // it was already called and it chose to call [super _tryRetain].
960 inline bool
961 objc_object::rootTryRetain()
962 {
963 if (isTaggedPointer()) return true;
964 return sidetable_tryRetain();
965 }
966
967
968 inline uintptr_t
969 objc_object::rootRetainCount()
970 {
971 if (isTaggedPointer()) return (uintptr_t)this;
972 return sidetable_retainCount();
973 }
974
975
976 // not SUPPORT_NONPOINTER_ISA
977 #endif
978
979
980 #if SUPPORT_RETURN_AUTORELEASE
981
982 /***********************************************************************
983 Fast handling of return through Cocoa's +0 autoreleasing convention.
984 The caller and callee cooperate to keep the returned object
985 out of the autorelease pool and eliminate redundant retain/release pairs.
986
987 An optimized callee looks at the caller's instructions following the
988 return. If the caller's instructions are also optimized then the callee
989 skips all retain count operations: no autorelease, no retain/autorelease.
990 Instead it saves the result's current retain count (+0 or +1) in
991 thread-local storage. If the caller does not look optimized then
992 the callee performs autorelease or retain/autorelease as usual.
993
994 An optimized caller looks at the thread-local storage. If the result
995 is set then it performs any retain or release needed to change the
996 result from the retain count left by the callee to the retain count
997 desired by the caller. Otherwise the caller assumes the result is
998 currently at +0 from an unoptimized callee and performs any retain
999 needed for that case.
1000
1001 There are two optimized callees:
1002 objc_autoreleaseReturnValue
1003 result is currently +1. The unoptimized path autoreleases it.
1004 objc_retainAutoreleaseReturnValue
1005 result is currently +0. The unoptimized path retains and autoreleases it.
1006
1007 There are two optimized callers:
1008 objc_retainAutoreleasedReturnValue
1009 caller wants the value at +1. The unoptimized path retains it.
1010 objc_unsafeClaimAutoreleasedReturnValue
1011 caller wants the value at +0 unsafely. The unoptimized path does nothing.
1012
1013 Example:
1014
1015 Callee:
1016 // compute ret at +1
1017 return objc_autoreleaseReturnValue(ret);
1018
1019 Caller:
1020 ret = callee();
1021 ret = objc_retainAutoreleasedReturnValue(ret);
1022 // use ret at +1 here
1023
1024 Callee sees the optimized caller, sets TLS, and leaves the result at +1.
1025 Caller sees the TLS, clears it, and accepts the result at +1 as-is.
1026
1027 The callee's recognition of the optimized caller is architecture-dependent.
1028 x86_64: Callee looks for `mov rax, rdi` followed by a call or
1029 jump instruction to objc_retainAutoreleasedReturnValue or
1030 objc_unsafeClaimAutoreleasedReturnValue.
1031 i386: Callee looks for a magic nop `movl %ebp, %ebp` (frame pointer register)
1032 armv7: Callee looks for a magic nop `mov r7, r7` (frame pointer register).
1033 arm64: Callee looks for a magic nop `mov x29, x29` (frame pointer register).
1034
1035 Tagged pointer objects do participate in the optimized return scheme,
1036 because it saves message sends. They are not entered in the autorelease
1037 pool in the unoptimized case.
1038 **********************************************************************/
1039
1040 # if __x86_64__
1041
1042 static ALWAYS_INLINE bool
1043 callerAcceptsOptimizedReturn(const void * const ra0)
1044 {
1045 const uint8_t *ra1 = (const uint8_t *)ra0;
1046 const unaligned_uint16_t *ra2;
1047 const unaligned_uint32_t *ra4 = (const unaligned_uint32_t *)ra1;
1048 const void **sym;
1049
1050 #define PREFER_GOTPCREL 0
1051 #if PREFER_GOTPCREL
1052 // 48 89 c7 movq %rax,%rdi
1053 // ff 15 callq *symbol@GOTPCREL(%rip)
1054 if (*ra4 != 0xffc78948) {
1055 return false;
1056 }
1057 if (ra1[4] != 0x15) {
1058 return false;
1059 }
1060 ra1 += 3;
1061 #else
1062 // 48 89 c7 movq %rax,%rdi
1063 // e8 callq symbol
1064 if (*ra4 != 0xe8c78948) {
1065 return false;
1066 }
1067 ra1 += (long)*(const unaligned_int32_t *)(ra1 + 4) + 8l;
1068 ra2 = (const unaligned_uint16_t *)ra1;
1069 // ff 25 jmpq *symbol@DYLDMAGIC(%rip)
1070 if (*ra2 != 0x25ff) {
1071 return false;
1072 }
1073 #endif
1074 ra1 += 6l + (long)*(const unaligned_int32_t *)(ra1 + 2);
1075 sym = (const void **)ra1;
1076 if (*sym != objc_retainAutoreleasedReturnValue &&
1077 *sym != objc_unsafeClaimAutoreleasedReturnValue)
1078 {
1079 return false;
1080 }
1081
1082 return true;
1083 }
1084
1085 // __x86_64__
1086 # elif __arm__
1087
1088 static ALWAYS_INLINE bool
1089 callerAcceptsOptimizedReturn(const void *ra)
1090 {
1091 // if the low bit is set, we're returning to thumb mode
1092 if ((uintptr_t)ra & 1) {
1093 // 3f 46 mov r7, r7
1094 // we mask off the low bit via subtraction
1095 // 16-bit instructions are well-aligned
1096 if (*(uint16_t *)((uint8_t *)ra - 1) == 0x463f) {
1097 return true;
1098 }
1099 } else {
1100 // 07 70 a0 e1 mov r7, r7
1101 // 32-bit instructions may be only 16-bit aligned
1102 if (*(unaligned_uint32_t *)ra == 0xe1a07007) {
1103 return true;
1104 }
1105 }
1106 return false;
1107 }
1108
1109 // __arm__
1110 # elif __arm64__
1111
1112 static ALWAYS_INLINE bool
1113 callerAcceptsOptimizedReturn(const void *ra)
1114 {
1115 // fd 03 1d aa mov fp, fp
1116 // arm64 instructions are well-aligned
1117 if (*(uint32_t *)ra == 0xaa1d03fd) {
1118 return true;
1119 }
1120 return false;
1121 }
1122
1123 // __arm64__
1124 # elif __i386__
1125
1126 static ALWAYS_INLINE bool
1127 callerAcceptsOptimizedReturn(const void *ra)
1128 {
1129 // 89 ed movl %ebp, %ebp
1130 if (*(unaligned_uint16_t *)ra == 0xed89) {
1131 return true;
1132 }
1133 return false;
1134 }
1135
1136 // __i386__
1137 # else
1138
1139 #warning unknown architecture
1140
1141 static ALWAYS_INLINE bool
1142 callerAcceptsOptimizedReturn(const void *ra)
1143 {
1144 return false;
1145 }
1146
1147 // unknown architecture
1148 # endif
1149
1150
1151 static ALWAYS_INLINE ReturnDisposition
1152 getReturnDisposition()
1153 {
1154 return (ReturnDisposition)(uintptr_t)tls_get_direct(RETURN_DISPOSITION_KEY);
1155 }
1156
1157
1158 static ALWAYS_INLINE void
1159 setReturnDisposition(ReturnDisposition disposition)
1160 {
1161 tls_set_direct(RETURN_DISPOSITION_KEY, (void*)(uintptr_t)disposition);
1162 }
1163
1164
1165 // Try to prepare for optimized return with the given disposition (+0 or +1).
1166 // Returns true if the optimized path is successful.
1167 // Otherwise the return value must be retained and/or autoreleased as usual.
1168 static ALWAYS_INLINE bool
1169 prepareOptimizedReturn(ReturnDisposition disposition)
1170 {
1171 assert(getReturnDisposition() == ReturnAtPlus0);
1172
1173 if (callerAcceptsOptimizedReturn(__builtin_return_address(0))) {
1174 if (disposition) setReturnDisposition(disposition);
1175 return true;
1176 }
1177
1178 return false;
1179 }
1180
1181
1182 // Try to accept an optimized return.
1183 // Returns the disposition of the returned object (+0 or +1).
1184 // An un-optimized return is +0.
1185 static ALWAYS_INLINE ReturnDisposition
1186 acceptOptimizedReturn()
1187 {
1188 ReturnDisposition disposition = getReturnDisposition();
1189 setReturnDisposition(ReturnAtPlus0); // reset to the unoptimized state
1190 return disposition;
1191 }
1192
1193
1194 // SUPPORT_RETURN_AUTORELEASE
1195 #else
1196 // not SUPPORT_RETURN_AUTORELEASE
1197
1198
1199 static ALWAYS_INLINE bool
1200 prepareOptimizedReturn(ReturnDisposition disposition __unused)
1201 {
1202 return false;
1203 }
1204
1205
1206 static ALWAYS_INLINE ReturnDisposition
1207 acceptOptimizedReturn()
1208 {
1209 return ReturnAtPlus0;
1210 }
1211
1212
1213 // not SUPPORT_RETURN_AUTORELEASE
1214 #endif
1215
1216
1217 // _OBJC_OBJECT_H_
1218 #endif