]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-object.h
objc4-779.1.tar.gz
[apple/objc4.git] / runtime / objc-object.h
1 /*
2 * Copyright (c) 2010-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24
25 /***********************************************************************
26 * Inlineable parts of NSObject / objc_object implementation
27 **********************************************************************/
28
29 #ifndef _OBJC_OBJCOBJECT_H_
30 #define _OBJC_OBJCOBJECT_H_
31
32 #include "objc-private.h"
33
34
35 enum ReturnDisposition : bool {
36 ReturnAtPlus0 = false, ReturnAtPlus1 = true
37 };
38
39 static ALWAYS_INLINE
40 bool prepareOptimizedReturn(ReturnDisposition disposition);
41
42
43 #if SUPPORT_TAGGED_POINTERS
44
45 extern "C" {
46 extern Class objc_debug_taggedpointer_classes[_OBJC_TAG_SLOT_COUNT];
47 extern Class objc_debug_taggedpointer_ext_classes[_OBJC_TAG_EXT_SLOT_COUNT];
48 }
49 #define objc_tag_classes objc_debug_taggedpointer_classes
50 #define objc_tag_ext_classes objc_debug_taggedpointer_ext_classes
51
52 #endif
53
54 #if SUPPORT_INDEXED_ISA
55
56 ALWAYS_INLINE Class &
57 classForIndex(uintptr_t index) {
58 ASSERT(index > 0);
59 ASSERT(index < (uintptr_t)objc_indexed_classes_count);
60 return objc_indexed_classes[index];
61 }
62
63 #endif
64
65
66 inline bool
67 objc_object::isClass()
68 {
69 if (isTaggedPointer()) return false;
70 return ISA()->isMetaClass();
71 }
72
73
74 #if SUPPORT_TAGGED_POINTERS
75
76 inline Class
77 objc_object::getIsa()
78 {
79 if (fastpath(!isTaggedPointer())) return ISA();
80
81 extern objc_class OBJC_CLASS_$___NSUnrecognizedTaggedPointer;
82 uintptr_t slot, ptr = (uintptr_t)this;
83 Class cls;
84
85 slot = (ptr >> _OBJC_TAG_SLOT_SHIFT) & _OBJC_TAG_SLOT_MASK;
86 cls = objc_tag_classes[slot];
87 if (slowpath(cls == (Class)&OBJC_CLASS_$___NSUnrecognizedTaggedPointer)) {
88 slot = (ptr >> _OBJC_TAG_EXT_SLOT_SHIFT) & _OBJC_TAG_EXT_SLOT_MASK;
89 cls = objc_tag_ext_classes[slot];
90 }
91 return cls;
92 }
93
94 inline uintptr_t
95 objc_object::isaBits() const
96 {
97 return isa.bits;
98 }
99
100 inline bool
101 objc_object::isTaggedPointer()
102 {
103 return _objc_isTaggedPointer(this);
104 }
105
106 inline bool
107 objc_object::isBasicTaggedPointer()
108 {
109 return isTaggedPointer() && !isExtTaggedPointer();
110 }
111
112 inline bool
113 objc_object::isExtTaggedPointer()
114 {
115 uintptr_t ptr = _objc_decodeTaggedPointer(this);
116 return (ptr & _OBJC_TAG_EXT_MASK) == _OBJC_TAG_EXT_MASK;
117 }
118
119
120 // SUPPORT_TAGGED_POINTERS
121 #else
122 // not SUPPORT_TAGGED_POINTERS
123
124
125 inline Class
126 objc_object::getIsa()
127 {
128 return ISA();
129 }
130
131 inline uintptr_t
132 objc_object::isaBits() const
133 {
134 return isa.bits;
135 }
136
137
138 inline bool
139 objc_object::isTaggedPointer()
140 {
141 return false;
142 }
143
144 inline bool
145 objc_object::isBasicTaggedPointer()
146 {
147 return false;
148 }
149
150 inline bool
151 objc_object::isExtTaggedPointer()
152 {
153 return false;
154 }
155
156
157 // not SUPPORT_TAGGED_POINTERS
158 #endif
159
160
161 #if SUPPORT_NONPOINTER_ISA
162
163 inline Class
164 objc_object::ISA()
165 {
166 ASSERT(!isTaggedPointer());
167 #if SUPPORT_INDEXED_ISA
168 if (isa.nonpointer) {
169 uintptr_t slot = isa.indexcls;
170 return classForIndex((unsigned)slot);
171 }
172 return (Class)isa.bits;
173 #else
174 return (Class)(isa.bits & ISA_MASK);
175 #endif
176 }
177
178 inline Class
179 objc_object::rawISA()
180 {
181 ASSERT(!isTaggedPointer() && !isa.nonpointer);
182 return (Class)isa.bits;
183 }
184
185 inline bool
186 objc_object::hasNonpointerIsa()
187 {
188 return isa.nonpointer;
189 }
190
191
192 inline void
193 objc_object::initIsa(Class cls)
194 {
195 initIsa(cls, false, false);
196 }
197
198 inline void
199 objc_object::initClassIsa(Class cls)
200 {
201 if (DisableNonpointerIsa || cls->instancesRequireRawIsa()) {
202 initIsa(cls, false/*not nonpointer*/, false);
203 } else {
204 initIsa(cls, true/*nonpointer*/, false);
205 }
206 }
207
208 inline void
209 objc_object::initProtocolIsa(Class cls)
210 {
211 return initClassIsa(cls);
212 }
213
214 inline void
215 objc_object::initInstanceIsa(Class cls, bool hasCxxDtor)
216 {
217 ASSERT(!cls->instancesRequireRawIsa());
218 ASSERT(hasCxxDtor == cls->hasCxxDtor());
219
220 initIsa(cls, true, hasCxxDtor);
221 }
222
223 inline void
224 objc_object::initIsa(Class cls, bool nonpointer, bool hasCxxDtor)
225 {
226 ASSERT(!isTaggedPointer());
227
228 if (!nonpointer) {
229 isa = isa_t((uintptr_t)cls);
230 } else {
231 ASSERT(!DisableNonpointerIsa);
232 ASSERT(!cls->instancesRequireRawIsa());
233
234 isa_t newisa(0);
235
236 #if SUPPORT_INDEXED_ISA
237 ASSERT(cls->classArrayIndex() > 0);
238 newisa.bits = ISA_INDEX_MAGIC_VALUE;
239 // isa.magic is part of ISA_MAGIC_VALUE
240 // isa.nonpointer is part of ISA_MAGIC_VALUE
241 newisa.has_cxx_dtor = hasCxxDtor;
242 newisa.indexcls = (uintptr_t)cls->classArrayIndex();
243 #else
244 newisa.bits = ISA_MAGIC_VALUE;
245 // isa.magic is part of ISA_MAGIC_VALUE
246 // isa.nonpointer is part of ISA_MAGIC_VALUE
247 newisa.has_cxx_dtor = hasCxxDtor;
248 newisa.shiftcls = (uintptr_t)cls >> 3;
249 #endif
250
251 // This write must be performed in a single store in some cases
252 // (for example when realizing a class because other threads
253 // may simultaneously try to use the class).
254 // fixme use atomics here to guarantee single-store and to
255 // guarantee memory order w.r.t. the class index table
256 // ...but not too atomic because we don't want to hurt instantiation
257 isa = newisa;
258 }
259 }
260
261
262 inline Class
263 objc_object::changeIsa(Class newCls)
264 {
265 // This is almost always true but there are
266 // enough edge cases that we can't assert it.
267 // assert(newCls->isFuture() ||
268 // newCls->isInitializing() || newCls->isInitialized());
269
270 ASSERT(!isTaggedPointer());
271
272 isa_t oldisa;
273 isa_t newisa;
274
275 bool sideTableLocked = false;
276 bool transcribeToSideTable = false;
277
278 do {
279 transcribeToSideTable = false;
280 oldisa = LoadExclusive(&isa.bits);
281 if ((oldisa.bits == 0 || oldisa.nonpointer) &&
282 !newCls->isFuture() && newCls->canAllocNonpointer())
283 {
284 // 0 -> nonpointer
285 // nonpointer -> nonpointer
286 #if SUPPORT_INDEXED_ISA
287 if (oldisa.bits == 0) newisa.bits = ISA_INDEX_MAGIC_VALUE;
288 else newisa = oldisa;
289 // isa.magic is part of ISA_MAGIC_VALUE
290 // isa.nonpointer is part of ISA_MAGIC_VALUE
291 newisa.has_cxx_dtor = newCls->hasCxxDtor();
292 ASSERT(newCls->classArrayIndex() > 0);
293 newisa.indexcls = (uintptr_t)newCls->classArrayIndex();
294 #else
295 if (oldisa.bits == 0) newisa.bits = ISA_MAGIC_VALUE;
296 else newisa = oldisa;
297 // isa.magic is part of ISA_MAGIC_VALUE
298 // isa.nonpointer is part of ISA_MAGIC_VALUE
299 newisa.has_cxx_dtor = newCls->hasCxxDtor();
300 newisa.shiftcls = (uintptr_t)newCls >> 3;
301 #endif
302 }
303 else if (oldisa.nonpointer) {
304 // nonpointer -> raw pointer
305 // Need to copy retain count et al to side table.
306 // Acquire side table lock before setting isa to
307 // prevent races such as concurrent -release.
308 if (!sideTableLocked) sidetable_lock();
309 sideTableLocked = true;
310 transcribeToSideTable = true;
311 newisa.cls = newCls;
312 }
313 else {
314 // raw pointer -> raw pointer
315 newisa.cls = newCls;
316 }
317 } while (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits));
318
319 if (transcribeToSideTable) {
320 // Copy oldisa's retain count et al to side table.
321 // oldisa.has_assoc: nothing to do
322 // oldisa.has_cxx_dtor: nothing to do
323 sidetable_moveExtraRC_nolock(oldisa.extra_rc,
324 oldisa.deallocating,
325 oldisa.weakly_referenced);
326 }
327
328 if (sideTableLocked) sidetable_unlock();
329
330 if (oldisa.nonpointer) {
331 #if SUPPORT_INDEXED_ISA
332 return classForIndex(oldisa.indexcls);
333 #else
334 return (Class)((uintptr_t)oldisa.shiftcls << 3);
335 #endif
336 }
337 else {
338 return oldisa.cls;
339 }
340 }
341
342
343 inline bool
344 objc_object::hasAssociatedObjects()
345 {
346 if (isTaggedPointer()) return true;
347 if (isa.nonpointer) return isa.has_assoc;
348 return true;
349 }
350
351
352 inline void
353 objc_object::setHasAssociatedObjects()
354 {
355 if (isTaggedPointer()) return;
356
357 retry:
358 isa_t oldisa = LoadExclusive(&isa.bits);
359 isa_t newisa = oldisa;
360 if (!newisa.nonpointer || newisa.has_assoc) {
361 ClearExclusive(&isa.bits);
362 return;
363 }
364 newisa.has_assoc = true;
365 if (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry;
366 }
367
368
369 inline bool
370 objc_object::isWeaklyReferenced()
371 {
372 ASSERT(!isTaggedPointer());
373 if (isa.nonpointer) return isa.weakly_referenced;
374 else return sidetable_isWeaklyReferenced();
375 }
376
377
378 inline void
379 objc_object::setWeaklyReferenced_nolock()
380 {
381 retry:
382 isa_t oldisa = LoadExclusive(&isa.bits);
383 isa_t newisa = oldisa;
384 if (slowpath(!newisa.nonpointer)) {
385 ClearExclusive(&isa.bits);
386 sidetable_setWeaklyReferenced_nolock();
387 return;
388 }
389 if (newisa.weakly_referenced) {
390 ClearExclusive(&isa.bits);
391 return;
392 }
393 newisa.weakly_referenced = true;
394 if (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry;
395 }
396
397
398 inline bool
399 objc_object::hasCxxDtor()
400 {
401 ASSERT(!isTaggedPointer());
402 if (isa.nonpointer) return isa.has_cxx_dtor;
403 else return isa.cls->hasCxxDtor();
404 }
405
406
407
408 inline bool
409 objc_object::rootIsDeallocating()
410 {
411 if (isTaggedPointer()) return false;
412 if (isa.nonpointer) return isa.deallocating;
413 return sidetable_isDeallocating();
414 }
415
416
417 inline void
418 objc_object::clearDeallocating()
419 {
420 if (slowpath(!isa.nonpointer)) {
421 // Slow path for raw pointer isa.
422 sidetable_clearDeallocating();
423 }
424 else if (slowpath(isa.weakly_referenced || isa.has_sidetable_rc)) {
425 // Slow path for non-pointer isa with weak refs and/or side table data.
426 clearDeallocating_slow();
427 }
428
429 assert(!sidetable_present());
430 }
431
432
433 inline void
434 objc_object::rootDealloc()
435 {
436 if (isTaggedPointer()) return; // fixme necessary?
437
438 if (fastpath(isa.nonpointer &&
439 !isa.weakly_referenced &&
440 !isa.has_assoc &&
441 !isa.has_cxx_dtor &&
442 !isa.has_sidetable_rc))
443 {
444 assert(!sidetable_present());
445 free(this);
446 }
447 else {
448 object_dispose((id)this);
449 }
450 }
451
452
453 // Equivalent to calling [this retain], with shortcuts if there is no override
454 inline id
455 objc_object::retain()
456 {
457 ASSERT(!isTaggedPointer());
458
459 if (fastpath(!ISA()->hasCustomRR())) {
460 return rootRetain();
461 }
462
463 return ((id(*)(objc_object *, SEL))objc_msgSend)(this, @selector(retain));
464 }
465
466
467 // Base retain implementation, ignoring overrides.
468 // This does not check isa.fast_rr; if there is an RR override then
469 // it was already called and it chose to call [super retain].
470 //
471 // tryRetain=true is the -_tryRetain path.
472 // handleOverflow=false is the frameless fast path.
473 // handleOverflow=true is the framed slow path including overflow to side table
474 // The code is structured this way to prevent duplication.
475
476 ALWAYS_INLINE id
477 objc_object::rootRetain()
478 {
479 return rootRetain(false, false);
480 }
481
482 ALWAYS_INLINE bool
483 objc_object::rootTryRetain()
484 {
485 return rootRetain(true, false) ? true : false;
486 }
487
488 ALWAYS_INLINE id
489 objc_object::rootRetain(bool tryRetain, bool handleOverflow)
490 {
491 if (isTaggedPointer()) return (id)this;
492
493 bool sideTableLocked = false;
494 bool transcribeToSideTable = false;
495
496 isa_t oldisa;
497 isa_t newisa;
498
499 do {
500 transcribeToSideTable = false;
501 oldisa = LoadExclusive(&isa.bits);
502 newisa = oldisa;
503 if (slowpath(!newisa.nonpointer)) {
504 ClearExclusive(&isa.bits);
505 if (rawISA()->isMetaClass()) return (id)this;
506 if (!tryRetain && sideTableLocked) sidetable_unlock();
507 if (tryRetain) return sidetable_tryRetain() ? (id)this : nil;
508 else return sidetable_retain();
509 }
510 // don't check newisa.fast_rr; we already called any RR overrides
511 if (slowpath(tryRetain && newisa.deallocating)) {
512 ClearExclusive(&isa.bits);
513 if (!tryRetain && sideTableLocked) sidetable_unlock();
514 return nil;
515 }
516 uintptr_t carry;
517 newisa.bits = addc(newisa.bits, RC_ONE, 0, &carry); // extra_rc++
518
519 if (slowpath(carry)) {
520 // newisa.extra_rc++ overflowed
521 if (!handleOverflow) {
522 ClearExclusive(&isa.bits);
523 return rootRetain_overflow(tryRetain);
524 }
525 // Leave half of the retain counts inline and
526 // prepare to copy the other half to the side table.
527 if (!tryRetain && !sideTableLocked) sidetable_lock();
528 sideTableLocked = true;
529 transcribeToSideTable = true;
530 newisa.extra_rc = RC_HALF;
531 newisa.has_sidetable_rc = true;
532 }
533 } while (slowpath(!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)));
534
535 if (slowpath(transcribeToSideTable)) {
536 // Copy the other half of the retain counts to the side table.
537 sidetable_addExtraRC_nolock(RC_HALF);
538 }
539
540 if (slowpath(!tryRetain && sideTableLocked)) sidetable_unlock();
541 return (id)this;
542 }
543
544
545 // Equivalent to calling [this release], with shortcuts if there is no override
546 inline void
547 objc_object::release()
548 {
549 ASSERT(!isTaggedPointer());
550
551 if (fastpath(!ISA()->hasCustomRR())) {
552 rootRelease();
553 return;
554 }
555
556 ((void(*)(objc_object *, SEL))objc_msgSend)(this, @selector(release));
557 }
558
559
560 // Base release implementation, ignoring overrides.
561 // Does not call -dealloc.
562 // Returns true if the object should now be deallocated.
563 // This does not check isa.fast_rr; if there is an RR override then
564 // it was already called and it chose to call [super release].
565 //
566 // handleUnderflow=false is the frameless fast path.
567 // handleUnderflow=true is the framed slow path including side table borrow
568 // The code is structured this way to prevent duplication.
569
570 ALWAYS_INLINE bool
571 objc_object::rootRelease()
572 {
573 return rootRelease(true, false);
574 }
575
576 ALWAYS_INLINE bool
577 objc_object::rootReleaseShouldDealloc()
578 {
579 return rootRelease(false, false);
580 }
581
582 ALWAYS_INLINE bool
583 objc_object::rootRelease(bool performDealloc, bool handleUnderflow)
584 {
585 if (isTaggedPointer()) return false;
586
587 bool sideTableLocked = false;
588
589 isa_t oldisa;
590 isa_t newisa;
591
592 retry:
593 do {
594 oldisa = LoadExclusive(&isa.bits);
595 newisa = oldisa;
596 if (slowpath(!newisa.nonpointer)) {
597 ClearExclusive(&isa.bits);
598 if (rawISA()->isMetaClass()) return false;
599 if (sideTableLocked) sidetable_unlock();
600 return sidetable_release(performDealloc);
601 }
602 // don't check newisa.fast_rr; we already called any RR overrides
603 uintptr_t carry;
604 newisa.bits = subc(newisa.bits, RC_ONE, 0, &carry); // extra_rc--
605 if (slowpath(carry)) {
606 // don't ClearExclusive()
607 goto underflow;
608 }
609 } while (slowpath(!StoreReleaseExclusive(&isa.bits,
610 oldisa.bits, newisa.bits)));
611
612 if (slowpath(sideTableLocked)) sidetable_unlock();
613 return false;
614
615 underflow:
616 // newisa.extra_rc-- underflowed: borrow from side table or deallocate
617
618 // abandon newisa to undo the decrement
619 newisa = oldisa;
620
621 if (slowpath(newisa.has_sidetable_rc)) {
622 if (!handleUnderflow) {
623 ClearExclusive(&isa.bits);
624 return rootRelease_underflow(performDealloc);
625 }
626
627 // Transfer retain count from side table to inline storage.
628
629 if (!sideTableLocked) {
630 ClearExclusive(&isa.bits);
631 sidetable_lock();
632 sideTableLocked = true;
633 // Need to start over to avoid a race against
634 // the nonpointer -> raw pointer transition.
635 goto retry;
636 }
637
638 // Try to remove some retain counts from the side table.
639 size_t borrowed = sidetable_subExtraRC_nolock(RC_HALF);
640
641 // To avoid races, has_sidetable_rc must remain set
642 // even if the side table count is now zero.
643
644 if (borrowed > 0) {
645 // Side table retain count decreased.
646 // Try to add them to the inline count.
647 newisa.extra_rc = borrowed - 1; // redo the original decrement too
648 bool stored = StoreReleaseExclusive(&isa.bits,
649 oldisa.bits, newisa.bits);
650 if (!stored) {
651 // Inline update failed.
652 // Try it again right now. This prevents livelock on LL/SC
653 // architectures where the side table access itself may have
654 // dropped the reservation.
655 isa_t oldisa2 = LoadExclusive(&isa.bits);
656 isa_t newisa2 = oldisa2;
657 if (newisa2.nonpointer) {
658 uintptr_t overflow;
659 newisa2.bits =
660 addc(newisa2.bits, RC_ONE * (borrowed-1), 0, &overflow);
661 if (!overflow) {
662 stored = StoreReleaseExclusive(&isa.bits, oldisa2.bits,
663 newisa2.bits);
664 }
665 }
666 }
667
668 if (!stored) {
669 // Inline update failed.
670 // Put the retains back in the side table.
671 sidetable_addExtraRC_nolock(borrowed);
672 goto retry;
673 }
674
675 // Decrement successful after borrowing from side table.
676 // This decrement cannot be the deallocating decrement - the side
677 // table lock and has_sidetable_rc bit ensure that if everyone
678 // else tried to -release while we worked, the last one would block.
679 sidetable_unlock();
680 return false;
681 }
682 else {
683 // Side table is empty after all. Fall-through to the dealloc path.
684 }
685 }
686
687 // Really deallocate.
688
689 if (slowpath(newisa.deallocating)) {
690 ClearExclusive(&isa.bits);
691 if (sideTableLocked) sidetable_unlock();
692 return overrelease_error();
693 // does not actually return
694 }
695 newisa.deallocating = true;
696 if (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry;
697
698 if (slowpath(sideTableLocked)) sidetable_unlock();
699
700 __c11_atomic_thread_fence(__ATOMIC_ACQUIRE);
701
702 if (performDealloc) {
703 ((void(*)(objc_object *, SEL))objc_msgSend)(this, @selector(dealloc));
704 }
705 return true;
706 }
707
708
709 // Equivalent to [this autorelease], with shortcuts if there is no override
710 inline id
711 objc_object::autorelease()
712 {
713 ASSERT(!isTaggedPointer());
714 if (fastpath(!ISA()->hasCustomRR())) {
715 return rootAutorelease();
716 }
717
718 return ((id(*)(objc_object *, SEL))objc_msgSend)(this, @selector(autorelease));
719 }
720
721
722 // Base autorelease implementation, ignoring overrides.
723 inline id
724 objc_object::rootAutorelease()
725 {
726 if (isTaggedPointer()) return (id)this;
727 if (prepareOptimizedReturn(ReturnAtPlus1)) return (id)this;
728
729 return rootAutorelease2();
730 }
731
732
733 inline uintptr_t
734 objc_object::rootRetainCount()
735 {
736 if (isTaggedPointer()) return (uintptr_t)this;
737
738 sidetable_lock();
739 isa_t bits = LoadExclusive(&isa.bits);
740 ClearExclusive(&isa.bits);
741 if (bits.nonpointer) {
742 uintptr_t rc = 1 + bits.extra_rc;
743 if (bits.has_sidetable_rc) {
744 rc += sidetable_getExtraRC_nolock();
745 }
746 sidetable_unlock();
747 return rc;
748 }
749
750 sidetable_unlock();
751 return sidetable_retainCount();
752 }
753
754
755 // SUPPORT_NONPOINTER_ISA
756 #else
757 // not SUPPORT_NONPOINTER_ISA
758
759
760 inline Class
761 objc_object::ISA()
762 {
763 ASSERT(!isTaggedPointer());
764 return isa.cls;
765 }
766
767 inline Class
768 objc_object::rawISA()
769 {
770 return ISA();
771 }
772
773 inline bool
774 objc_object::hasNonpointerIsa()
775 {
776 return false;
777 }
778
779
780 inline void
781 objc_object::initIsa(Class cls)
782 {
783 ASSERT(!isTaggedPointer());
784 isa = (uintptr_t)cls;
785 }
786
787
788 inline void
789 objc_object::initClassIsa(Class cls)
790 {
791 initIsa(cls);
792 }
793
794
795 inline void
796 objc_object::initProtocolIsa(Class cls)
797 {
798 initIsa(cls);
799 }
800
801
802 inline void
803 objc_object::initInstanceIsa(Class cls, bool)
804 {
805 initIsa(cls);
806 }
807
808
809 inline void
810 objc_object::initIsa(Class cls, bool, bool)
811 {
812 initIsa(cls);
813 }
814
815
816 inline Class
817 objc_object::changeIsa(Class cls)
818 {
819 // This is almost always rue but there are
820 // enough edge cases that we can't assert it.
821 // assert(cls->isFuture() ||
822 // cls->isInitializing() || cls->isInitialized());
823
824 ASSERT(!isTaggedPointer());
825
826 isa_t oldisa, newisa;
827 newisa.cls = cls;
828 do {
829 oldisa = LoadExclusive(&isa.bits);
830 } while (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits));
831
832 if (oldisa.cls && oldisa.cls->instancesHaveAssociatedObjects()) {
833 cls->setInstancesHaveAssociatedObjects();
834 }
835
836 return oldisa.cls;
837 }
838
839
840 inline bool
841 objc_object::hasAssociatedObjects()
842 {
843 return getIsa()->instancesHaveAssociatedObjects();
844 }
845
846
847 inline void
848 objc_object::setHasAssociatedObjects()
849 {
850 getIsa()->setInstancesHaveAssociatedObjects();
851 }
852
853
854 inline bool
855 objc_object::isWeaklyReferenced()
856 {
857 ASSERT(!isTaggedPointer());
858
859 return sidetable_isWeaklyReferenced();
860 }
861
862
863 inline void
864 objc_object::setWeaklyReferenced_nolock()
865 {
866 ASSERT(!isTaggedPointer());
867
868 sidetable_setWeaklyReferenced_nolock();
869 }
870
871
872 inline bool
873 objc_object::hasCxxDtor()
874 {
875 ASSERT(!isTaggedPointer());
876 return isa.cls->hasCxxDtor();
877 }
878
879
880 inline bool
881 objc_object::rootIsDeallocating()
882 {
883 if (isTaggedPointer()) return false;
884 return sidetable_isDeallocating();
885 }
886
887
888 inline void
889 objc_object::clearDeallocating()
890 {
891 sidetable_clearDeallocating();
892 }
893
894
895 inline void
896 objc_object::rootDealloc()
897 {
898 if (isTaggedPointer()) return;
899 object_dispose((id)this);
900 }
901
902
903 // Equivalent to calling [this retain], with shortcuts if there is no override
904 inline id
905 objc_object::retain()
906 {
907 ASSERT(!isTaggedPointer());
908
909 if (fastpath(!ISA()->hasCustomRR())) {
910 return sidetable_retain();
911 }
912
913 return ((id(*)(objc_object *, SEL))objc_msgSend)(this, @selector(retain));
914 }
915
916
917 // Base retain implementation, ignoring overrides.
918 // This does not check isa.fast_rr; if there is an RR override then
919 // it was already called and it chose to call [super retain].
920 inline id
921 objc_object::rootRetain()
922 {
923 if (isTaggedPointer()) return (id)this;
924 return sidetable_retain();
925 }
926
927
928 // Equivalent to calling [this release], with shortcuts if there is no override
929 inline void
930 objc_object::release()
931 {
932 ASSERT(!isTaggedPointer());
933
934 if (fastpath(!ISA()->hasCustomRR())) {
935 sidetable_release();
936 return;
937 }
938
939 ((void(*)(objc_object *, SEL))objc_msgSend)(this, @selector(release));
940 }
941
942
943 // Base release implementation, ignoring overrides.
944 // Does not call -dealloc.
945 // Returns true if the object should now be deallocated.
946 // This does not check isa.fast_rr; if there is an RR override then
947 // it was already called and it chose to call [super release].
948 inline bool
949 objc_object::rootRelease()
950 {
951 if (isTaggedPointer()) return false;
952 return sidetable_release(true);
953 }
954
955 inline bool
956 objc_object::rootReleaseShouldDealloc()
957 {
958 if (isTaggedPointer()) return false;
959 return sidetable_release(false);
960 }
961
962
963 // Equivalent to [this autorelease], with shortcuts if there is no override
964 inline id
965 objc_object::autorelease()
966 {
967 if (isTaggedPointer()) return (id)this;
968 if (fastpath(!ISA()->hasCustomRR())) return rootAutorelease();
969
970 return ((id(*)(objc_object *, SEL))objc_msgSend)(this, @selector(autorelease));
971 }
972
973
974 // Base autorelease implementation, ignoring overrides.
975 inline id
976 objc_object::rootAutorelease()
977 {
978 if (isTaggedPointer()) return (id)this;
979 if (prepareOptimizedReturn(ReturnAtPlus1)) return (id)this;
980
981 return rootAutorelease2();
982 }
983
984
985 // Base tryRetain implementation, ignoring overrides.
986 // This does not check isa.fast_rr; if there is an RR override then
987 // it was already called and it chose to call [super _tryRetain].
988 inline bool
989 objc_object::rootTryRetain()
990 {
991 if (isTaggedPointer()) return true;
992 return sidetable_tryRetain();
993 }
994
995
996 inline uintptr_t
997 objc_object::rootRetainCount()
998 {
999 if (isTaggedPointer()) return (uintptr_t)this;
1000 return sidetable_retainCount();
1001 }
1002
1003
1004 // not SUPPORT_NONPOINTER_ISA
1005 #endif
1006
1007
1008 #if SUPPORT_RETURN_AUTORELEASE
1009
1010 /***********************************************************************
1011 Fast handling of return through Cocoa's +0 autoreleasing convention.
1012 The caller and callee cooperate to keep the returned object
1013 out of the autorelease pool and eliminate redundant retain/release pairs.
1014
1015 An optimized callee looks at the caller's instructions following the
1016 return. If the caller's instructions are also optimized then the callee
1017 skips all retain count operations: no autorelease, no retain/autorelease.
1018 Instead it saves the result's current retain count (+0 or +1) in
1019 thread-local storage. If the caller does not look optimized then
1020 the callee performs autorelease or retain/autorelease as usual.
1021
1022 An optimized caller looks at the thread-local storage. If the result
1023 is set then it performs any retain or release needed to change the
1024 result from the retain count left by the callee to the retain count
1025 desired by the caller. Otherwise the caller assumes the result is
1026 currently at +0 from an unoptimized callee and performs any retain
1027 needed for that case.
1028
1029 There are two optimized callees:
1030 objc_autoreleaseReturnValue
1031 result is currently +1. The unoptimized path autoreleases it.
1032 objc_retainAutoreleaseReturnValue
1033 result is currently +0. The unoptimized path retains and autoreleases it.
1034
1035 There are two optimized callers:
1036 objc_retainAutoreleasedReturnValue
1037 caller wants the value at +1. The unoptimized path retains it.
1038 objc_unsafeClaimAutoreleasedReturnValue
1039 caller wants the value at +0 unsafely. The unoptimized path does nothing.
1040
1041 Example:
1042
1043 Callee:
1044 // compute ret at +1
1045 return objc_autoreleaseReturnValue(ret);
1046
1047 Caller:
1048 ret = callee();
1049 ret = objc_retainAutoreleasedReturnValue(ret);
1050 // use ret at +1 here
1051
1052 Callee sees the optimized caller, sets TLS, and leaves the result at +1.
1053 Caller sees the TLS, clears it, and accepts the result at +1 as-is.
1054
1055 The callee's recognition of the optimized caller is architecture-dependent.
1056 x86_64: Callee looks for `mov rax, rdi` followed by a call or
1057 jump instruction to objc_retainAutoreleasedReturnValue or
1058 objc_unsafeClaimAutoreleasedReturnValue.
1059 i386: Callee looks for a magic nop `movl %ebp, %ebp` (frame pointer register)
1060 armv7: Callee looks for a magic nop `mov r7, r7` (frame pointer register).
1061 arm64: Callee looks for a magic nop `mov x29, x29` (frame pointer register).
1062
1063 Tagged pointer objects do participate in the optimized return scheme,
1064 because it saves message sends. They are not entered in the autorelease
1065 pool in the unoptimized case.
1066 **********************************************************************/
1067
1068 # if __x86_64__
1069
1070 static ALWAYS_INLINE bool
1071 callerAcceptsOptimizedReturn(const void * const ra0)
1072 {
1073 const uint8_t *ra1 = (const uint8_t *)ra0;
1074 const unaligned_uint16_t *ra2;
1075 const unaligned_uint32_t *ra4 = (const unaligned_uint32_t *)ra1;
1076 const void **sym;
1077
1078 #define PREFER_GOTPCREL 0
1079 #if PREFER_GOTPCREL
1080 // 48 89 c7 movq %rax,%rdi
1081 // ff 15 callq *symbol@GOTPCREL(%rip)
1082 if (*ra4 != 0xffc78948) {
1083 return false;
1084 }
1085 if (ra1[4] != 0x15) {
1086 return false;
1087 }
1088 ra1 += 3;
1089 #else
1090 // 48 89 c7 movq %rax,%rdi
1091 // e8 callq symbol
1092 if (*ra4 != 0xe8c78948) {
1093 return false;
1094 }
1095 ra1 += (long)*(const unaligned_int32_t *)(ra1 + 4) + 8l;
1096 ra2 = (const unaligned_uint16_t *)ra1;
1097 // ff 25 jmpq *symbol@DYLDMAGIC(%rip)
1098 if (*ra2 != 0x25ff) {
1099 return false;
1100 }
1101 #endif
1102 ra1 += 6l + (long)*(const unaligned_int32_t *)(ra1 + 2);
1103 sym = (const void **)ra1;
1104 if (*sym != objc_retainAutoreleasedReturnValue &&
1105 *sym != objc_unsafeClaimAutoreleasedReturnValue)
1106 {
1107 return false;
1108 }
1109
1110 return true;
1111 }
1112
1113 // __x86_64__
1114 # elif __arm__
1115
1116 static ALWAYS_INLINE bool
1117 callerAcceptsOptimizedReturn(const void *ra)
1118 {
1119 // if the low bit is set, we're returning to thumb mode
1120 if ((uintptr_t)ra & 1) {
1121 // 3f 46 mov r7, r7
1122 // we mask off the low bit via subtraction
1123 // 16-bit instructions are well-aligned
1124 if (*(uint16_t *)((uint8_t *)ra - 1) == 0x463f) {
1125 return true;
1126 }
1127 } else {
1128 // 07 70 a0 e1 mov r7, r7
1129 // 32-bit instructions may be only 16-bit aligned
1130 if (*(unaligned_uint32_t *)ra == 0xe1a07007) {
1131 return true;
1132 }
1133 }
1134 return false;
1135 }
1136
1137 // __arm__
1138 # elif __arm64__
1139
1140 static ALWAYS_INLINE bool
1141 callerAcceptsOptimizedReturn(const void *ra)
1142 {
1143 // fd 03 1d aa mov fp, fp
1144 // arm64 instructions are well-aligned
1145 if (*(uint32_t *)ra == 0xaa1d03fd) {
1146 return true;
1147 }
1148 return false;
1149 }
1150
1151 // __arm64__
1152 # elif __i386__
1153
1154 static ALWAYS_INLINE bool
1155 callerAcceptsOptimizedReturn(const void *ra)
1156 {
1157 // 89 ed movl %ebp, %ebp
1158 if (*(unaligned_uint16_t *)ra == 0xed89) {
1159 return true;
1160 }
1161 return false;
1162 }
1163
1164 // __i386__
1165 # else
1166
1167 #warning unknown architecture
1168
1169 static ALWAYS_INLINE bool
1170 callerAcceptsOptimizedReturn(const void *ra)
1171 {
1172 return false;
1173 }
1174
1175 // unknown architecture
1176 # endif
1177
1178
1179 static ALWAYS_INLINE ReturnDisposition
1180 getReturnDisposition()
1181 {
1182 return (ReturnDisposition)(uintptr_t)tls_get_direct(RETURN_DISPOSITION_KEY);
1183 }
1184
1185
1186 static ALWAYS_INLINE void
1187 setReturnDisposition(ReturnDisposition disposition)
1188 {
1189 tls_set_direct(RETURN_DISPOSITION_KEY, (void*)(uintptr_t)disposition);
1190 }
1191
1192
1193 // Try to prepare for optimized return with the given disposition (+0 or +1).
1194 // Returns true if the optimized path is successful.
1195 // Otherwise the return value must be retained and/or autoreleased as usual.
1196 static ALWAYS_INLINE bool
1197 prepareOptimizedReturn(ReturnDisposition disposition)
1198 {
1199 ASSERT(getReturnDisposition() == ReturnAtPlus0);
1200
1201 if (callerAcceptsOptimizedReturn(__builtin_return_address(0))) {
1202 if (disposition) setReturnDisposition(disposition);
1203 return true;
1204 }
1205
1206 return false;
1207 }
1208
1209
1210 // Try to accept an optimized return.
1211 // Returns the disposition of the returned object (+0 or +1).
1212 // An un-optimized return is +0.
1213 static ALWAYS_INLINE ReturnDisposition
1214 acceptOptimizedReturn()
1215 {
1216 ReturnDisposition disposition = getReturnDisposition();
1217 setReturnDisposition(ReturnAtPlus0); // reset to the unoptimized state
1218 return disposition;
1219 }
1220
1221
1222 // SUPPORT_RETURN_AUTORELEASE
1223 #else
1224 // not SUPPORT_RETURN_AUTORELEASE
1225
1226
1227 static ALWAYS_INLINE bool
1228 prepareOptimizedReturn(ReturnDisposition disposition __unused)
1229 {
1230 return false;
1231 }
1232
1233
1234 static ALWAYS_INLINE ReturnDisposition
1235 acceptOptimizedReturn()
1236 {
1237 return ReturnAtPlus0;
1238 }
1239
1240
1241 // not SUPPORT_RETURN_AUTORELEASE
1242 #endif
1243
1244
1245 // _OBJC_OBJECT_H_
1246 #endif