]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-object.h
objc4-818.2.tar.gz
[apple/objc4.git] / runtime / objc-object.h
1 /*
2 * Copyright (c) 2010-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24
25 /***********************************************************************
26 * Inlineable parts of NSObject / objc_object implementation
27 **********************************************************************/
28
29 #ifndef _OBJC_OBJCOBJECT_H_
30 #define _OBJC_OBJCOBJECT_H_
31
32 #include "objc-private.h"
33
34
35 enum ReturnDisposition : bool {
36 ReturnAtPlus0 = false, ReturnAtPlus1 = true
37 };
38
39 static ALWAYS_INLINE
40 bool prepareOptimizedReturn(ReturnDisposition disposition);
41
42
43 #if SUPPORT_TAGGED_POINTERS
44
45 extern "C" {
46 extern Class objc_debug_taggedpointer_classes[_OBJC_TAG_SLOT_COUNT];
47 extern Class objc_debug_taggedpointer_ext_classes[_OBJC_TAG_EXT_SLOT_COUNT];
48 }
49 #define objc_tag_classes objc_debug_taggedpointer_classes
50 #define objc_tag_ext_classes objc_debug_taggedpointer_ext_classes
51
52 #endif
53
54 #if SUPPORT_INDEXED_ISA
55
56 ALWAYS_INLINE Class &
57 classForIndex(uintptr_t index) {
58 ASSERT(index > 0);
59 ASSERT(index < (uintptr_t)objc_indexed_classes_count);
60 return objc_indexed_classes[index];
61 }
62
63 #endif
64
65
66 inline bool
67 objc_object::isClass()
68 {
69 if (isTaggedPointer()) return false;
70 return ISA()->isMetaClass();
71 }
72
73
74 #if SUPPORT_TAGGED_POINTERS
75
76 inline Class
77 objc_object::getIsa()
78 {
79 if (fastpath(!isTaggedPointer())) return ISA();
80
81 extern objc_class OBJC_CLASS_$___NSUnrecognizedTaggedPointer;
82 uintptr_t slot, ptr = (uintptr_t)this;
83 Class cls;
84
85 slot = (ptr >> _OBJC_TAG_SLOT_SHIFT) & _OBJC_TAG_SLOT_MASK;
86 cls = objc_tag_classes[slot];
87 if (slowpath(cls == (Class)&OBJC_CLASS_$___NSUnrecognizedTaggedPointer)) {
88 slot = (ptr >> _OBJC_TAG_EXT_SLOT_SHIFT) & _OBJC_TAG_EXT_SLOT_MASK;
89 cls = objc_tag_ext_classes[slot];
90 }
91 return cls;
92 }
93
94 inline uintptr_t
95 objc_object::isaBits() const
96 {
97 return isa.bits;
98 }
99
100 inline bool
101 objc_object::isTaggedPointer()
102 {
103 return _objc_isTaggedPointer(this);
104 }
105
106 inline bool
107 objc_object::isTaggedPointerOrNil()
108 {
109 return _objc_isTaggedPointerOrNil(this);
110 }
111
112 inline bool
113 objc_object::isBasicTaggedPointer()
114 {
115 return isTaggedPointer() && !isExtTaggedPointer();
116 }
117
118 inline bool
119 objc_object::isExtTaggedPointer()
120 {
121 uintptr_t ptr = _objc_decodeTaggedPointer(this);
122 return (ptr & _OBJC_TAG_EXT_MASK) == _OBJC_TAG_EXT_MASK;
123 }
124
125
126 // SUPPORT_TAGGED_POINTERS
127 #else
128 // not SUPPORT_TAGGED_POINTERS
129
130 inline Class
131 objc_object::getIsa()
132 {
133 return ISA();
134 }
135
136 inline uintptr_t
137 objc_object::isaBits() const
138 {
139 return isa.bits;
140 }
141
142
143 inline bool
144 objc_object::isTaggedPointer()
145 {
146 return false;
147 }
148
149 inline bool
150 objc_object::isTaggedPointerOrNil()
151 {
152 return !this;
153 }
154
155 inline bool
156 objc_object::isBasicTaggedPointer()
157 {
158 return false;
159 }
160
161 inline bool
162 objc_object::isExtTaggedPointer()
163 {
164 return false;
165 }
166
167
168 // not SUPPORT_TAGGED_POINTERS
169 #endif
170
171
172 #if SUPPORT_NONPOINTER_ISA
173
174 // Set the class field in an isa. Takes both the class to set and
175 // a pointer to the object where the isa will ultimately be used.
176 // This is necessary to get the pointer signing right.
177 //
178 // Note: this method does not support setting an indexed isa. When
179 // indexed isas are in use, it can only be used to set the class of a
180 // raw isa.
181 inline void
182 isa_t::setClass(Class newCls, UNUSED_WITHOUT_PTRAUTH objc_object *obj)
183 {
184 // Match the conditional in isa.h.
185 #if __has_feature(ptrauth_calls) || TARGET_OS_SIMULATOR
186 # if ISA_SIGNING_SIGN_MODE == ISA_SIGNING_SIGN_NONE
187 // No signing, just use the raw pointer.
188 uintptr_t signedCls = (uintptr_t)newCls;
189
190 # elif ISA_SIGNING_SIGN_MODE == ISA_SIGNING_SIGN_ONLY_SWIFT
191 // We're only signing Swift classes. Non-Swift classes just use
192 // the raw pointer
193 uintptr_t signedCls = (uintptr_t)newCls;
194 if (newCls->isSwiftStable())
195 signedCls = (uintptr_t)ptrauth_sign_unauthenticated((void *)newCls, ISA_SIGNING_KEY, ptrauth_blend_discriminator(obj, ISA_SIGNING_DISCRIMINATOR));
196
197 # elif ISA_SIGNING_SIGN_MODE == ISA_SIGNING_SIGN_ALL
198 // We're signing everything
199 uintptr_t signedCls = (uintptr_t)ptrauth_sign_unauthenticated((void *)newCls, ISA_SIGNING_KEY, ptrauth_blend_discriminator(obj, ISA_SIGNING_DISCRIMINATOR));
200
201 # else
202 # error Unknown isa signing mode.
203 # endif
204
205 shiftcls_and_sig = signedCls >> 3;
206
207 #elif SUPPORT_INDEXED_ISA
208 // Indexed isa only uses this method to set a raw pointer class.
209 // Setting an indexed class is handled separately.
210 cls = newCls;
211
212 #else // Nonpointer isa, no ptrauth
213 shiftcls = (uintptr_t)newCls >> 3;
214 #endif
215 }
216
217 // Get the class pointer out of an isa. When ptrauth is supported,
218 // this operation is optionally authenticated. Many code paths don't
219 // need the authentication, so it can be skipped in those cases for
220 // better performance.
221 //
222 // Note: this method does not support retrieving indexed isas. When
223 // indexed isas are in use, it can only be used to retrieve the class
224 // of a raw isa.
225 #if SUPPORT_INDEXED_ISA || (ISA_SIGNING_AUTH_MODE != ISA_SIGNING_AUTH)
226 #define MAYBE_UNUSED_AUTHENTICATED_PARAM __attribute__((unused))
227 #else
228 #define MAYBE_UNUSED_AUTHENTICATED_PARAM UNUSED_WITHOUT_PTRAUTH
229 #endif
230
231 inline Class
232 isa_t::getClass(MAYBE_UNUSED_AUTHENTICATED_PARAM bool authenticated) {
233 #if SUPPORT_INDEXED_ISA
234 return cls;
235 #else
236
237 uintptr_t clsbits = bits;
238
239 # if __has_feature(ptrauth_calls)
240 # if ISA_SIGNING_AUTH_MODE == ISA_SIGNING_AUTH
241 // Most callers aren't security critical, so skip the
242 // authentication unless they ask for it. Message sending and
243 // cache filling are protected by the auth code in msgSend.
244 if (authenticated) {
245 // Mask off all bits besides the class pointer and signature.
246 clsbits &= ISA_MASK;
247 if (clsbits == 0)
248 return Nil;
249 clsbits = (uintptr_t)ptrauth_auth_data((void *)clsbits, ISA_SIGNING_KEY, ptrauth_blend_discriminator(this, ISA_SIGNING_DISCRIMINATOR));
250 } else {
251 // If not authenticating, strip using the precomputed class mask.
252 clsbits &= objc_debug_isa_class_mask;
253 }
254 # else
255 // If not authenticating, strip using the precomputed class mask.
256 clsbits &= objc_debug_isa_class_mask;
257 # endif
258
259 # else
260 clsbits &= ISA_MASK;
261 # endif
262
263 return (Class)clsbits;
264 #endif
265 }
266
267 inline Class
268 isa_t::getDecodedClass(bool authenticated) {
269 #if SUPPORT_INDEXED_ISA
270 if (nonpointer) {
271 return classForIndex(indexcls);
272 }
273 return (Class)cls;
274 #else
275 return getClass(authenticated);
276 #endif
277 }
278
279 inline Class
280 objc_object::ISA(bool authenticated)
281 {
282 ASSERT(!isTaggedPointer());
283 return isa.getDecodedClass(authenticated);
284 }
285
286 inline Class
287 objc_object::rawISA()
288 {
289 ASSERT(!isTaggedPointer() && !isa.nonpointer);
290 return (Class)isa.bits;
291 }
292
293 inline bool
294 objc_object::hasNonpointerIsa()
295 {
296 return isa.nonpointer;
297 }
298
299
300 inline void
301 objc_object::initIsa(Class cls)
302 {
303 initIsa(cls, false, false);
304 }
305
306 inline void
307 objc_object::initClassIsa(Class cls)
308 {
309 if (DisableNonpointerIsa || cls->instancesRequireRawIsa()) {
310 initIsa(cls, false/*not nonpointer*/, false);
311 } else {
312 initIsa(cls, true/*nonpointer*/, false);
313 }
314 }
315
316 inline void
317 objc_object::initProtocolIsa(Class cls)
318 {
319 return initClassIsa(cls);
320 }
321
322 inline void
323 objc_object::initInstanceIsa(Class cls, bool hasCxxDtor)
324 {
325 ASSERT(!cls->instancesRequireRawIsa());
326 ASSERT(hasCxxDtor == cls->hasCxxDtor());
327
328 initIsa(cls, true, hasCxxDtor);
329 }
330
331 #if !SUPPORT_INDEXED_ISA && !ISA_HAS_CXX_DTOR_BIT
332 #define UNUSED_WITHOUT_INDEXED_ISA_AND_DTOR_BIT __attribute__((unused))
333 #else
334 #define UNUSED_WITHOUT_INDEXED_ISA_AND_DTOR_BIT
335 #endif
336
337 inline void
338 objc_object::initIsa(Class cls, bool nonpointer, UNUSED_WITHOUT_INDEXED_ISA_AND_DTOR_BIT bool hasCxxDtor)
339 {
340 ASSERT(!isTaggedPointer());
341
342 isa_t newisa(0);
343
344 if (!nonpointer) {
345 newisa.setClass(cls, this);
346 } else {
347 ASSERT(!DisableNonpointerIsa);
348 ASSERT(!cls->instancesRequireRawIsa());
349
350
351 #if SUPPORT_INDEXED_ISA
352 ASSERT(cls->classArrayIndex() > 0);
353 newisa.bits = ISA_INDEX_MAGIC_VALUE;
354 // isa.magic is part of ISA_MAGIC_VALUE
355 // isa.nonpointer is part of ISA_MAGIC_VALUE
356 newisa.has_cxx_dtor = hasCxxDtor;
357 newisa.indexcls = (uintptr_t)cls->classArrayIndex();
358 #else
359 newisa.bits = ISA_MAGIC_VALUE;
360 // isa.magic is part of ISA_MAGIC_VALUE
361 // isa.nonpointer is part of ISA_MAGIC_VALUE
362 # if ISA_HAS_CXX_DTOR_BIT
363 newisa.has_cxx_dtor = hasCxxDtor;
364 # endif
365 newisa.setClass(cls, this);
366 #endif
367 newisa.extra_rc = 1;
368 }
369
370 // This write must be performed in a single store in some cases
371 // (for example when realizing a class because other threads
372 // may simultaneously try to use the class).
373 // fixme use atomics here to guarantee single-store and to
374 // guarantee memory order w.r.t. the class index table
375 // ...but not too atomic because we don't want to hurt instantiation
376 isa = newisa;
377 }
378
379
380 inline Class
381 objc_object::changeIsa(Class newCls)
382 {
383 // This is almost always true but there are
384 // enough edge cases that we can't assert it.
385 // assert(newCls->isFuture() ||
386 // newCls->isInitializing() || newCls->isInitialized());
387
388 ASSERT(!isTaggedPointer());
389
390 isa_t oldisa;
391 isa_t newisa(0);
392
393 bool sideTableLocked = false;
394 bool transcribeToSideTable = false;
395
396 oldisa = LoadExclusive(&isa.bits);
397
398 do {
399 transcribeToSideTable = false;
400 if ((oldisa.bits == 0 || oldisa.nonpointer) &&
401 !newCls->isFuture() && newCls->canAllocNonpointer())
402 {
403 // 0 -> nonpointer
404 // nonpointer -> nonpointer
405 #if SUPPORT_INDEXED_ISA
406 if (oldisa.bits == 0) {
407 newisa.bits = ISA_INDEX_MAGIC_VALUE;
408 newisa.extra_rc = 1;
409 } else {
410 newisa = oldisa;
411 }
412 // isa.magic is part of ISA_MAGIC_VALUE
413 // isa.nonpointer is part of ISA_MAGIC_VALUE
414 newisa.has_cxx_dtor = newCls->hasCxxDtor();
415 ASSERT(newCls->classArrayIndex() > 0);
416 newisa.indexcls = (uintptr_t)newCls->classArrayIndex();
417 #else
418 if (oldisa.bits == 0) {
419 newisa.bits = ISA_MAGIC_VALUE;
420 newisa.extra_rc = 1;
421 }
422 else {
423 newisa = oldisa;
424 }
425 // isa.magic is part of ISA_MAGIC_VALUE
426 // isa.nonpointer is part of ISA_MAGIC_VALUE
427 # if ISA_HAS_CXX_DTOR_BIT
428 newisa.has_cxx_dtor = newCls->hasCxxDtor();
429 # endif
430 newisa.setClass(newCls, this);
431 #endif
432 }
433 else if (oldisa.nonpointer) {
434 // nonpointer -> raw pointer
435 // Need to copy retain count et al to side table.
436 // Acquire side table lock before setting isa to
437 // prevent races such as concurrent -release.
438 if (!sideTableLocked) sidetable_lock();
439 sideTableLocked = true;
440 transcribeToSideTable = true;
441 newisa.setClass(newCls, this);
442 }
443 else {
444 // raw pointer -> raw pointer
445 newisa.setClass(newCls, this);
446 }
447 } while (slowpath(!StoreExclusive(&isa.bits, &oldisa.bits, newisa.bits)));
448
449 if (transcribeToSideTable) {
450 // Copy oldisa's retain count et al to side table.
451 // oldisa.has_assoc: nothing to do
452 // oldisa.has_cxx_dtor: nothing to do
453 sidetable_moveExtraRC_nolock(oldisa.extra_rc,
454 oldisa.isDeallocating(),
455 oldisa.weakly_referenced);
456 }
457
458 if (sideTableLocked) sidetable_unlock();
459
460 return oldisa.getDecodedClass(false);
461 }
462
463 inline bool
464 objc_object::hasAssociatedObjects()
465 {
466 if (isTaggedPointer()) return true;
467 if (isa.nonpointer) return isa.has_assoc;
468 return true;
469 }
470
471
472 inline void
473 objc_object::setHasAssociatedObjects()
474 {
475 if (isTaggedPointer()) return;
476
477 if (slowpath(!hasNonpointerIsa() && ISA()->hasCustomRR()) && !ISA()->isFuture() && !ISA()->isMetaClass()) {
478 void(*setAssoc)(id, SEL) = (void(*)(id, SEL)) object_getMethodImplementation((id)this, @selector(_noteAssociatedObjects));
479 if ((IMP)setAssoc != _objc_msgForward) {
480 (*setAssoc)((id)this, @selector(_noteAssociatedObjects));
481 }
482 }
483
484 isa_t newisa, oldisa = LoadExclusive(&isa.bits);
485 do {
486 newisa = oldisa;
487 if (!newisa.nonpointer || newisa.has_assoc) {
488 ClearExclusive(&isa.bits);
489 return;
490 }
491 newisa.has_assoc = true;
492 } while (slowpath(!StoreExclusive(&isa.bits, &oldisa.bits, newisa.bits)));
493 }
494
495
496 inline bool
497 objc_object::isWeaklyReferenced()
498 {
499 ASSERT(!isTaggedPointer());
500 if (isa.nonpointer) return isa.weakly_referenced;
501 else return sidetable_isWeaklyReferenced();
502 }
503
504
505 inline void
506 objc_object::setWeaklyReferenced_nolock()
507 {
508 isa_t newisa, oldisa = LoadExclusive(&isa.bits);
509 do {
510 newisa = oldisa;
511 if (slowpath(!newisa.nonpointer)) {
512 ClearExclusive(&isa.bits);
513 sidetable_setWeaklyReferenced_nolock();
514 return;
515 }
516 if (newisa.weakly_referenced) {
517 ClearExclusive(&isa.bits);
518 return;
519 }
520 newisa.weakly_referenced = true;
521 } while (slowpath(!StoreExclusive(&isa.bits, &oldisa.bits, newisa.bits)));
522 }
523
524
525 inline bool
526 objc_object::hasCxxDtor()
527 {
528 ASSERT(!isTaggedPointer());
529 #if ISA_HAS_CXX_DTOR_BIT
530 if (isa.nonpointer)
531 return isa.has_cxx_dtor;
532 else
533 #endif
534 return ISA()->hasCxxDtor();
535 }
536
537
538
539 inline bool
540 objc_object::rootIsDeallocating()
541 {
542 if (isTaggedPointer()) return false;
543 if (isa.nonpointer) return isa.isDeallocating();
544 return sidetable_isDeallocating();
545 }
546
547
548 inline void
549 objc_object::clearDeallocating()
550 {
551 if (slowpath(!isa.nonpointer)) {
552 // Slow path for raw pointer isa.
553 sidetable_clearDeallocating();
554 }
555 else if (slowpath(isa.weakly_referenced || isa.has_sidetable_rc)) {
556 // Slow path for non-pointer isa with weak refs and/or side table data.
557 clearDeallocating_slow();
558 }
559
560 assert(!sidetable_present());
561 }
562
563
564 inline void
565 objc_object::rootDealloc()
566 {
567 if (isTaggedPointer()) return; // fixme necessary?
568
569 if (fastpath(isa.nonpointer &&
570 !isa.weakly_referenced &&
571 !isa.has_assoc &&
572 #if ISA_HAS_CXX_DTOR_BIT
573 !isa.has_cxx_dtor &&
574 #else
575 !isa.getClass(false)->hasCxxDtor() &&
576 #endif
577 !isa.has_sidetable_rc))
578 {
579 assert(!sidetable_present());
580 free(this);
581 }
582 else {
583 object_dispose((id)this);
584 }
585 }
586
587 extern explicit_atomic<id(*)(id)> swiftRetain;
588 extern explicit_atomic<void(*)(id)> swiftRelease;
589
590 // Equivalent to calling [this retain], with shortcuts if there is no override
591 inline id
592 objc_object::retain()
593 {
594 ASSERT(!isTaggedPointer());
595
596 return rootRetain(false, RRVariant::FastOrMsgSend);
597 }
598
599 // Base retain implementation, ignoring overrides.
600 // This does not check isa.fast_rr; if there is an RR override then
601 // it was already called and it chose to call [super retain].
602 //
603 // tryRetain=true is the -_tryRetain path.
604 // handleOverflow=false is the frameless fast path.
605 // handleOverflow=true is the framed slow path including overflow to side table
606 // The code is structured this way to prevent duplication.
607
608 ALWAYS_INLINE id
609 objc_object::rootRetain()
610 {
611 return rootRetain(false, RRVariant::Fast);
612 }
613
614 ALWAYS_INLINE bool
615 objc_object::rootTryRetain()
616 {
617 return rootRetain(true, RRVariant::Fast) ? true : false;
618 }
619
620 ALWAYS_INLINE id
621 objc_object::rootRetain(bool tryRetain, objc_object::RRVariant variant)
622 {
623 if (slowpath(isTaggedPointer())) return (id)this;
624
625 bool sideTableLocked = false;
626 bool transcribeToSideTable = false;
627
628 isa_t oldisa;
629 isa_t newisa;
630
631 oldisa = LoadExclusive(&isa.bits);
632
633 if (variant == RRVariant::FastOrMsgSend) {
634 // These checks are only meaningful for objc_retain()
635 // They are here so that we avoid a re-load of the isa.
636 if (slowpath(oldisa.getDecodedClass(false)->hasCustomRR())) {
637 ClearExclusive(&isa.bits);
638 if (oldisa.getDecodedClass(false)->canCallSwiftRR()) {
639 return swiftRetain.load(memory_order_relaxed)((id)this);
640 }
641 return ((id(*)(objc_object *, SEL))objc_msgSend)(this, @selector(retain));
642 }
643 }
644
645 if (slowpath(!oldisa.nonpointer)) {
646 // a Class is a Class forever, so we can perform this check once
647 // outside of the CAS loop
648 if (oldisa.getDecodedClass(false)->isMetaClass()) {
649 ClearExclusive(&isa.bits);
650 return (id)this;
651 }
652 }
653
654 do {
655 transcribeToSideTable = false;
656 newisa = oldisa;
657 if (slowpath(!newisa.nonpointer)) {
658 ClearExclusive(&isa.bits);
659 if (tryRetain) return sidetable_tryRetain() ? (id)this : nil;
660 else return sidetable_retain(sideTableLocked);
661 }
662 // don't check newisa.fast_rr; we already called any RR overrides
663 if (slowpath(newisa.isDeallocating())) {
664 ClearExclusive(&isa.bits);
665 if (sideTableLocked) {
666 ASSERT(variant == RRVariant::Full);
667 sidetable_unlock();
668 }
669 if (slowpath(tryRetain)) {
670 return nil;
671 } else {
672 return (id)this;
673 }
674 }
675 uintptr_t carry;
676 newisa.bits = addc(newisa.bits, RC_ONE, 0, &carry); // extra_rc++
677
678 if (slowpath(carry)) {
679 // newisa.extra_rc++ overflowed
680 if (variant != RRVariant::Full) {
681 ClearExclusive(&isa.bits);
682 return rootRetain_overflow(tryRetain);
683 }
684 // Leave half of the retain counts inline and
685 // prepare to copy the other half to the side table.
686 if (!tryRetain && !sideTableLocked) sidetable_lock();
687 sideTableLocked = true;
688 transcribeToSideTable = true;
689 newisa.extra_rc = RC_HALF;
690 newisa.has_sidetable_rc = true;
691 }
692 } while (slowpath(!StoreExclusive(&isa.bits, &oldisa.bits, newisa.bits)));
693
694 if (variant == RRVariant::Full) {
695 if (slowpath(transcribeToSideTable)) {
696 // Copy the other half of the retain counts to the side table.
697 sidetable_addExtraRC_nolock(RC_HALF);
698 }
699
700 if (slowpath(!tryRetain && sideTableLocked)) sidetable_unlock();
701 } else {
702 ASSERT(!transcribeToSideTable);
703 ASSERT(!sideTableLocked);
704 }
705
706 return (id)this;
707 }
708
709
710 // Equivalent to calling [this release], with shortcuts if there is no override
711 inline void
712 objc_object::release()
713 {
714 ASSERT(!isTaggedPointer());
715
716 rootRelease(true, RRVariant::FastOrMsgSend);
717 }
718
719
720 // Base release implementation, ignoring overrides.
721 // Does not call -dealloc.
722 // Returns true if the object should now be deallocated.
723 // This does not check isa.fast_rr; if there is an RR override then
724 // it was already called and it chose to call [super release].
725 //
726 // handleUnderflow=false is the frameless fast path.
727 // handleUnderflow=true is the framed slow path including side table borrow
728 // The code is structured this way to prevent duplication.
729
730 ALWAYS_INLINE bool
731 objc_object::rootRelease()
732 {
733 return rootRelease(true, RRVariant::Fast);
734 }
735
736 ALWAYS_INLINE bool
737 objc_object::rootReleaseShouldDealloc()
738 {
739 return rootRelease(false, RRVariant::Fast);
740 }
741
742 ALWAYS_INLINE bool
743 objc_object::rootRelease(bool performDealloc, objc_object::RRVariant variant)
744 {
745 if (slowpath(isTaggedPointer())) return false;
746
747 bool sideTableLocked = false;
748
749 isa_t newisa, oldisa;
750
751 oldisa = LoadExclusive(&isa.bits);
752
753 if (variant == RRVariant::FastOrMsgSend) {
754 // These checks are only meaningful for objc_release()
755 // They are here so that we avoid a re-load of the isa.
756 if (slowpath(oldisa.getDecodedClass(false)->hasCustomRR())) {
757 ClearExclusive(&isa.bits);
758 if (oldisa.getDecodedClass(false)->canCallSwiftRR()) {
759 swiftRelease.load(memory_order_relaxed)((id)this);
760 return true;
761 }
762 ((void(*)(objc_object *, SEL))objc_msgSend)(this, @selector(release));
763 return true;
764 }
765 }
766
767 if (slowpath(!oldisa.nonpointer)) {
768 // a Class is a Class forever, so we can perform this check once
769 // outside of the CAS loop
770 if (oldisa.getDecodedClass(false)->isMetaClass()) {
771 ClearExclusive(&isa.bits);
772 return false;
773 }
774 }
775
776 retry:
777 do {
778 newisa = oldisa;
779 if (slowpath(!newisa.nonpointer)) {
780 ClearExclusive(&isa.bits);
781 return sidetable_release(sideTableLocked, performDealloc);
782 }
783 if (slowpath(newisa.isDeallocating())) {
784 ClearExclusive(&isa.bits);
785 if (sideTableLocked) {
786 ASSERT(variant == RRVariant::Full);
787 sidetable_unlock();
788 }
789 return false;
790 }
791
792 // don't check newisa.fast_rr; we already called any RR overrides
793 uintptr_t carry;
794 newisa.bits = subc(newisa.bits, RC_ONE, 0, &carry); // extra_rc--
795 if (slowpath(carry)) {
796 // don't ClearExclusive()
797 goto underflow;
798 }
799 } while (slowpath(!StoreReleaseExclusive(&isa.bits, &oldisa.bits, newisa.bits)));
800
801 if (slowpath(newisa.isDeallocating()))
802 goto deallocate;
803
804 if (variant == RRVariant::Full) {
805 if (slowpath(sideTableLocked)) sidetable_unlock();
806 } else {
807 ASSERT(!sideTableLocked);
808 }
809 return false;
810
811 underflow:
812 // newisa.extra_rc-- underflowed: borrow from side table or deallocate
813
814 // abandon newisa to undo the decrement
815 newisa = oldisa;
816
817 if (slowpath(newisa.has_sidetable_rc)) {
818 if (variant != RRVariant::Full) {
819 ClearExclusive(&isa.bits);
820 return rootRelease_underflow(performDealloc);
821 }
822
823 // Transfer retain count from side table to inline storage.
824
825 if (!sideTableLocked) {
826 ClearExclusive(&isa.bits);
827 sidetable_lock();
828 sideTableLocked = true;
829 // Need to start over to avoid a race against
830 // the nonpointer -> raw pointer transition.
831 oldisa = LoadExclusive(&isa.bits);
832 goto retry;
833 }
834
835 // Try to remove some retain counts from the side table.
836 auto borrow = sidetable_subExtraRC_nolock(RC_HALF);
837
838 bool emptySideTable = borrow.remaining == 0; // we'll clear the side table if no refcounts remain there
839
840 if (borrow.borrowed > 0) {
841 // Side table retain count decreased.
842 // Try to add them to the inline count.
843 bool didTransitionToDeallocating = false;
844 newisa.extra_rc = borrow.borrowed - 1; // redo the original decrement too
845 newisa.has_sidetable_rc = !emptySideTable;
846
847 bool stored = StoreReleaseExclusive(&isa.bits, &oldisa.bits, newisa.bits);
848
849 if (!stored && oldisa.nonpointer) {
850 // Inline update failed.
851 // Try it again right now. This prevents livelock on LL/SC
852 // architectures where the side table access itself may have
853 // dropped the reservation.
854 uintptr_t overflow;
855 newisa.bits =
856 addc(oldisa.bits, RC_ONE * (borrow.borrowed-1), 0, &overflow);
857 newisa.has_sidetable_rc = !emptySideTable;
858 if (!overflow) {
859 stored = StoreReleaseExclusive(&isa.bits, &oldisa.bits, newisa.bits);
860 if (stored) {
861 didTransitionToDeallocating = newisa.isDeallocating();
862 }
863 }
864 }
865
866 if (!stored) {
867 // Inline update failed.
868 // Put the retains back in the side table.
869 ClearExclusive(&isa.bits);
870 sidetable_addExtraRC_nolock(borrow.borrowed);
871 oldisa = LoadExclusive(&isa.bits);
872 goto retry;
873 }
874
875 // Decrement successful after borrowing from side table.
876 if (emptySideTable)
877 sidetable_clearExtraRC_nolock();
878
879 if (!didTransitionToDeallocating) {
880 if (slowpath(sideTableLocked)) sidetable_unlock();
881 return false;
882 }
883 }
884 else {
885 // Side table is empty after all. Fall-through to the dealloc path.
886 }
887 }
888
889 deallocate:
890 // Really deallocate.
891
892 ASSERT(newisa.isDeallocating());
893 ASSERT(isa.isDeallocating());
894
895 if (slowpath(sideTableLocked)) sidetable_unlock();
896
897 __c11_atomic_thread_fence(__ATOMIC_ACQUIRE);
898
899 if (performDealloc) {
900 ((void(*)(objc_object *, SEL))objc_msgSend)(this, @selector(dealloc));
901 }
902 return true;
903 }
904
905
906 // Equivalent to [this autorelease], with shortcuts if there is no override
907 inline id
908 objc_object::autorelease()
909 {
910 ASSERT(!isTaggedPointer());
911 if (fastpath(!ISA()->hasCustomRR())) {
912 return rootAutorelease();
913 }
914
915 return ((id(*)(objc_object *, SEL))objc_msgSend)(this, @selector(autorelease));
916 }
917
918
919 // Base autorelease implementation, ignoring overrides.
920 inline id
921 objc_object::rootAutorelease()
922 {
923 if (isTaggedPointer()) return (id)this;
924 if (prepareOptimizedReturn(ReturnAtPlus1)) return (id)this;
925
926 return rootAutorelease2();
927 }
928
929
930 inline uintptr_t
931 objc_object::rootRetainCount()
932 {
933 if (isTaggedPointer()) return (uintptr_t)this;
934
935 sidetable_lock();
936 isa_t bits = __c11_atomic_load((_Atomic uintptr_t *)&isa.bits, __ATOMIC_RELAXED);
937 if (bits.nonpointer) {
938 uintptr_t rc = bits.extra_rc;
939 if (bits.has_sidetable_rc) {
940 rc += sidetable_getExtraRC_nolock();
941 }
942 sidetable_unlock();
943 return rc;
944 }
945
946 sidetable_unlock();
947 return sidetable_retainCount();
948 }
949
950
951 // SUPPORT_NONPOINTER_ISA
952 #else
953 // not SUPPORT_NONPOINTER_ISA
954
955 inline void
956 isa_t::setClass(Class cls, objc_object *obj)
957 {
958 this->cls = cls;
959 }
960
961 inline Class
962 isa_t::getClass(bool authenticated __unused)
963 {
964 return cls;
965 }
966
967 inline Class
968 isa_t::getDecodedClass(bool authenticated)
969 {
970 return getClass(authenticated);
971 }
972
973 inline Class
974 objc_object::ISA(bool authenticated __unused)
975 {
976 ASSERT(!isTaggedPointer());
977 return isa.getClass(/*authenticated*/false);
978 }
979
980 inline Class
981 objc_object::rawISA()
982 {
983 return ISA();
984 }
985
986 inline bool
987 objc_object::hasNonpointerIsa()
988 {
989 return false;
990 }
991
992
993 inline void
994 objc_object::initIsa(Class cls)
995 {
996 ASSERT(!isTaggedPointer());
997 isa.setClass(cls, this);
998 }
999
1000
1001 inline void
1002 objc_object::initClassIsa(Class cls)
1003 {
1004 initIsa(cls);
1005 }
1006
1007
1008 inline void
1009 objc_object::initProtocolIsa(Class cls)
1010 {
1011 initIsa(cls);
1012 }
1013
1014
1015 inline void
1016 objc_object::initInstanceIsa(Class cls, bool)
1017 {
1018 initIsa(cls);
1019 }
1020
1021
1022 inline void
1023 objc_object::initIsa(Class cls, bool, bool)
1024 {
1025 initIsa(cls);
1026 }
1027
1028
1029 inline Class
1030 objc_object::changeIsa(Class cls)
1031 {
1032 // This is almost always rue but there are
1033 // enough edge cases that we can't assert it.
1034 // assert(cls->isFuture() ||
1035 // cls->isInitializing() || cls->isInitialized());
1036
1037 ASSERT(!isTaggedPointer());
1038
1039 isa_t newisa, oldisa;
1040 newisa.setClass(cls, this);
1041 oldisa.bits = __c11_atomic_exchange((_Atomic uintptr_t *)&isa.bits, newisa.bits, __ATOMIC_RELAXED);
1042
1043 Class oldcls = oldisa.getDecodedClass(/*authenticated*/false);
1044 if (oldcls && oldcls->instancesHaveAssociatedObjects()) {
1045 cls->setInstancesHaveAssociatedObjects();
1046 }
1047
1048 return oldcls;
1049 }
1050
1051
1052 inline bool
1053 objc_object::hasAssociatedObjects()
1054 {
1055 return getIsa()->instancesHaveAssociatedObjects();
1056 }
1057
1058
1059 inline void
1060 objc_object::setHasAssociatedObjects()
1061 {
1062 getIsa()->setInstancesHaveAssociatedObjects();
1063 }
1064
1065
1066 inline bool
1067 objc_object::isWeaklyReferenced()
1068 {
1069 ASSERT(!isTaggedPointer());
1070
1071 return sidetable_isWeaklyReferenced();
1072 }
1073
1074
1075 inline void
1076 objc_object::setWeaklyReferenced_nolock()
1077 {
1078 ASSERT(!isTaggedPointer());
1079
1080 sidetable_setWeaklyReferenced_nolock();
1081 }
1082
1083
1084 inline bool
1085 objc_object::hasCxxDtor()
1086 {
1087 ASSERT(!isTaggedPointer());
1088 return isa.getClass(/*authenticated*/false)->hasCxxDtor();
1089 }
1090
1091
1092 inline bool
1093 objc_object::rootIsDeallocating()
1094 {
1095 if (isTaggedPointer()) return false;
1096 return sidetable_isDeallocating();
1097 }
1098
1099
1100 inline void
1101 objc_object::clearDeallocating()
1102 {
1103 sidetable_clearDeallocating();
1104 }
1105
1106
1107 inline void
1108 objc_object::rootDealloc()
1109 {
1110 if (isTaggedPointer()) return;
1111 object_dispose((id)this);
1112 }
1113
1114
1115 // Equivalent to calling [this retain], with shortcuts if there is no override
1116 inline id
1117 objc_object::retain()
1118 {
1119 ASSERT(!isTaggedPointer());
1120
1121 if (fastpath(!ISA()->hasCustomRR())) {
1122 return sidetable_retain();
1123 }
1124
1125 return ((id(*)(objc_object *, SEL))objc_msgSend)(this, @selector(retain));
1126 }
1127
1128
1129 // Base retain implementation, ignoring overrides.
1130 // This does not check isa.fast_rr; if there is an RR override then
1131 // it was already called and it chose to call [super retain].
1132 inline id
1133 objc_object::rootRetain()
1134 {
1135 if (isTaggedPointer()) return (id)this;
1136 return sidetable_retain();
1137 }
1138
1139
1140 // Equivalent to calling [this release], with shortcuts if there is no override
1141 inline void
1142 objc_object::release()
1143 {
1144 ASSERT(!isTaggedPointer());
1145
1146 if (fastpath(!ISA()->hasCustomRR())) {
1147 sidetable_release();
1148 return;
1149 }
1150
1151 ((void(*)(objc_object *, SEL))objc_msgSend)(this, @selector(release));
1152 }
1153
1154
1155 // Base release implementation, ignoring overrides.
1156 // Does not call -dealloc.
1157 // Returns true if the object should now be deallocated.
1158 // This does not check isa.fast_rr; if there is an RR override then
1159 // it was already called and it chose to call [super release].
1160 inline bool
1161 objc_object::rootRelease()
1162 {
1163 if (isTaggedPointer()) return false;
1164 return sidetable_release();
1165 }
1166
1167 inline bool
1168 objc_object::rootReleaseShouldDealloc()
1169 {
1170 if (isTaggedPointer()) return false;
1171 return sidetable_release(/*locked*/false, /*performDealloc*/false);
1172 }
1173
1174
1175 // Equivalent to [this autorelease], with shortcuts if there is no override
1176 inline id
1177 objc_object::autorelease()
1178 {
1179 if (isTaggedPointer()) return (id)this;
1180 if (fastpath(!ISA()->hasCustomRR())) return rootAutorelease();
1181
1182 return ((id(*)(objc_object *, SEL))objc_msgSend)(this, @selector(autorelease));
1183 }
1184
1185
1186 // Base autorelease implementation, ignoring overrides.
1187 inline id
1188 objc_object::rootAutorelease()
1189 {
1190 if (isTaggedPointer()) return (id)this;
1191 if (prepareOptimizedReturn(ReturnAtPlus1)) return (id)this;
1192
1193 return rootAutorelease2();
1194 }
1195
1196
1197 // Base tryRetain implementation, ignoring overrides.
1198 // This does not check isa.fast_rr; if there is an RR override then
1199 // it was already called and it chose to call [super _tryRetain].
1200 inline bool
1201 objc_object::rootTryRetain()
1202 {
1203 if (isTaggedPointer()) return true;
1204 return sidetable_tryRetain();
1205 }
1206
1207
1208 inline uintptr_t
1209 objc_object::rootRetainCount()
1210 {
1211 if (isTaggedPointer()) return (uintptr_t)this;
1212 return sidetable_retainCount();
1213 }
1214
1215
1216 // not SUPPORT_NONPOINTER_ISA
1217 #endif
1218
1219
1220 #if SUPPORT_RETURN_AUTORELEASE
1221
1222 /***********************************************************************
1223 Fast handling of return through Cocoa's +0 autoreleasing convention.
1224 The caller and callee cooperate to keep the returned object
1225 out of the autorelease pool and eliminate redundant retain/release pairs.
1226
1227 An optimized callee looks at the caller's instructions following the
1228 return. If the caller's instructions are also optimized then the callee
1229 skips all retain count operations: no autorelease, no retain/autorelease.
1230 Instead it saves the result's current retain count (+0 or +1) in
1231 thread-local storage. If the caller does not look optimized then
1232 the callee performs autorelease or retain/autorelease as usual.
1233
1234 An optimized caller looks at the thread-local storage. If the result
1235 is set then it performs any retain or release needed to change the
1236 result from the retain count left by the callee to the retain count
1237 desired by the caller. Otherwise the caller assumes the result is
1238 currently at +0 from an unoptimized callee and performs any retain
1239 needed for that case.
1240
1241 There are two optimized callees:
1242 objc_autoreleaseReturnValue
1243 result is currently +1. The unoptimized path autoreleases it.
1244 objc_retainAutoreleaseReturnValue
1245 result is currently +0. The unoptimized path retains and autoreleases it.
1246
1247 There are two optimized callers:
1248 objc_retainAutoreleasedReturnValue
1249 caller wants the value at +1. The unoptimized path retains it.
1250 objc_unsafeClaimAutoreleasedReturnValue
1251 caller wants the value at +0 unsafely. The unoptimized path does nothing.
1252
1253 Example:
1254
1255 Callee:
1256 // compute ret at +1
1257 return objc_autoreleaseReturnValue(ret);
1258
1259 Caller:
1260 ret = callee();
1261 ret = objc_retainAutoreleasedReturnValue(ret);
1262 // use ret at +1 here
1263
1264 Callee sees the optimized caller, sets TLS, and leaves the result at +1.
1265 Caller sees the TLS, clears it, and accepts the result at +1 as-is.
1266
1267 The callee's recognition of the optimized caller is architecture-dependent.
1268 x86_64: Callee looks for `mov rax, rdi` followed by a call or
1269 jump instruction to objc_retainAutoreleasedReturnValue or
1270 objc_unsafeClaimAutoreleasedReturnValue.
1271 i386: Callee looks for a magic nop `movl %ebp, %ebp` (frame pointer register)
1272 armv7: Callee looks for a magic nop `mov r7, r7` (frame pointer register).
1273 arm64: Callee looks for a magic nop `mov x29, x29` (frame pointer register).
1274
1275 Tagged pointer objects do participate in the optimized return scheme,
1276 because it saves message sends. They are not entered in the autorelease
1277 pool in the unoptimized case.
1278 **********************************************************************/
1279
1280 # if __x86_64__
1281
1282 static ALWAYS_INLINE bool
1283 callerAcceptsOptimizedReturn(const void * const ra0)
1284 {
1285 const uint8_t *ra1 = (const uint8_t *)ra0;
1286 const unaligned_uint16_t *ra2;
1287 const unaligned_uint32_t *ra4 = (const unaligned_uint32_t *)ra1;
1288 const void **sym;
1289
1290 #define PREFER_GOTPCREL 0
1291 #if PREFER_GOTPCREL
1292 // 48 89 c7 movq %rax,%rdi
1293 // ff 15 callq *symbol@GOTPCREL(%rip)
1294 if (*ra4 != 0xffc78948) {
1295 return false;
1296 }
1297 if (ra1[4] != 0x15) {
1298 return false;
1299 }
1300 ra1 += 3;
1301 #else
1302 // 48 89 c7 movq %rax,%rdi
1303 // e8 callq symbol
1304 if (*ra4 != 0xe8c78948) {
1305 return false;
1306 }
1307 ra1 += (long)*(const unaligned_int32_t *)(ra1 + 4) + 8l;
1308 ra2 = (const unaligned_uint16_t *)ra1;
1309 // ff 25 jmpq *symbol@DYLDMAGIC(%rip)
1310 if (*ra2 != 0x25ff) {
1311 return false;
1312 }
1313 #endif
1314 ra1 += 6l + (long)*(const unaligned_int32_t *)(ra1 + 2);
1315 sym = (const void **)ra1;
1316 if (*sym != objc_retainAutoreleasedReturnValue &&
1317 *sym != objc_unsafeClaimAutoreleasedReturnValue)
1318 {
1319 return false;
1320 }
1321
1322 return true;
1323 }
1324
1325 // __x86_64__
1326 # elif __arm__
1327
1328 static ALWAYS_INLINE bool
1329 callerAcceptsOptimizedReturn(const void *ra)
1330 {
1331 // if the low bit is set, we're returning to thumb mode
1332 if ((uintptr_t)ra & 1) {
1333 // 3f 46 mov r7, r7
1334 // we mask off the low bit via subtraction
1335 // 16-bit instructions are well-aligned
1336 if (*(uint16_t *)((uint8_t *)ra - 1) == 0x463f) {
1337 return true;
1338 }
1339 } else {
1340 // 07 70 a0 e1 mov r7, r7
1341 // 32-bit instructions may be only 16-bit aligned
1342 if (*(unaligned_uint32_t *)ra == 0xe1a07007) {
1343 return true;
1344 }
1345 }
1346 return false;
1347 }
1348
1349 // __arm__
1350 # elif __arm64__
1351
1352 static ALWAYS_INLINE bool
1353 callerAcceptsOptimizedReturn(const void *ra)
1354 {
1355 // fd 03 1d aa mov fp, fp
1356 // arm64 instructions are well-aligned
1357 if (*(uint32_t *)ra == 0xaa1d03fd) {
1358 return true;
1359 }
1360 return false;
1361 }
1362
1363 // __arm64__
1364 # elif __i386__
1365
1366 static ALWAYS_INLINE bool
1367 callerAcceptsOptimizedReturn(const void *ra)
1368 {
1369 // 89 ed movl %ebp, %ebp
1370 if (*(unaligned_uint16_t *)ra == 0xed89) {
1371 return true;
1372 }
1373 return false;
1374 }
1375
1376 // __i386__
1377 # else
1378
1379 #warning unknown architecture
1380
1381 static ALWAYS_INLINE bool
1382 callerAcceptsOptimizedReturn(const void *ra)
1383 {
1384 return false;
1385 }
1386
1387 // unknown architecture
1388 # endif
1389
1390
1391 static ALWAYS_INLINE ReturnDisposition
1392 getReturnDisposition()
1393 {
1394 return (ReturnDisposition)(uintptr_t)tls_get_direct(RETURN_DISPOSITION_KEY);
1395 }
1396
1397
1398 static ALWAYS_INLINE void
1399 setReturnDisposition(ReturnDisposition disposition)
1400 {
1401 tls_set_direct(RETURN_DISPOSITION_KEY, (void*)(uintptr_t)disposition);
1402 }
1403
1404
1405 // Try to prepare for optimized return with the given disposition (+0 or +1).
1406 // Returns true if the optimized path is successful.
1407 // Otherwise the return value must be retained and/or autoreleased as usual.
1408 static ALWAYS_INLINE bool
1409 prepareOptimizedReturn(ReturnDisposition disposition)
1410 {
1411 ASSERT(getReturnDisposition() == ReturnAtPlus0);
1412
1413 if (callerAcceptsOptimizedReturn(__builtin_return_address(0))) {
1414 if (disposition) setReturnDisposition(disposition);
1415 return true;
1416 }
1417
1418 return false;
1419 }
1420
1421
1422 // Try to accept an optimized return.
1423 // Returns the disposition of the returned object (+0 or +1).
1424 // An un-optimized return is +0.
1425 static ALWAYS_INLINE ReturnDisposition
1426 acceptOptimizedReturn()
1427 {
1428 ReturnDisposition disposition = getReturnDisposition();
1429 setReturnDisposition(ReturnAtPlus0); // reset to the unoptimized state
1430 return disposition;
1431 }
1432
1433
1434 // SUPPORT_RETURN_AUTORELEASE
1435 #else
1436 // not SUPPORT_RETURN_AUTORELEASE
1437
1438
1439 static ALWAYS_INLINE bool
1440 prepareOptimizedReturn(ReturnDisposition disposition __unused)
1441 {
1442 return false;
1443 }
1444
1445
1446 static ALWAYS_INLINE ReturnDisposition
1447 acceptOptimizedReturn()
1448 {
1449 return ReturnAtPlus0;
1450 }
1451
1452
1453 // not SUPPORT_RETURN_AUTORELEASE
1454 #endif
1455
1456
1457 // _OBJC_OBJECT_H_
1458 #endif