2 * Copyright (c) 2005-2007 Apple Inc. All Rights Reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 #ifndef _OBJC_RUNTIME_NEW_H
25 #define _OBJC_RUNTIME_NEW_H
27 #include "PointerUnion.h"
29 // class_data_bits_t is the class_t->data field (class_rw_t pointer plus flags)
30 // The extra bits are optimized for the retain/release and alloc/dealloc paths.
32 // Values for class_ro_t->flags
33 // These are emitted by the compiler and are part of the ABI.
34 // Note: See CGObjCNonFragileABIMac::BuildClassRoTInitializer in clang
35 // class is a metaclass
36 #define RO_META (1<<0)
37 // class is a root class
38 #define RO_ROOT (1<<1)
39 // class has .cxx_construct/destruct implementations
40 #define RO_HAS_CXX_STRUCTORS (1<<2)
41 // class has +load implementation
42 // #define RO_HAS_LOAD_METHOD (1<<3)
43 // class has visibility=hidden set
44 #define RO_HIDDEN (1<<4)
45 // class has attribute(objc_exception): OBJC_EHTYPE_$_ThisClass is non-weak
46 #define RO_EXCEPTION (1<<5)
47 // class has ro field for Swift metadata initializer callback
48 #define RO_HAS_SWIFT_INITIALIZER (1<<6)
49 // class compiled with ARC
50 #define RO_IS_ARC (1<<7)
51 // class has .cxx_destruct but no .cxx_construct (with RO_HAS_CXX_STRUCTORS)
52 #define RO_HAS_CXX_DTOR_ONLY (1<<8)
53 // class is not ARC but has ARC-style weak ivar layout
54 #define RO_HAS_WEAK_WITHOUT_ARC (1<<9)
55 // class does not allow associated objects on instances
56 #define RO_FORBIDS_ASSOCIATED_OBJECTS (1<<10)
58 // class is in an unloadable bundle - must never be set by compiler
59 #define RO_FROM_BUNDLE (1<<29)
60 // class is unrealized future class - must never be set by compiler
61 #define RO_FUTURE (1<<30)
62 // class is realized - must never be set by compiler
63 #define RO_REALIZED (1<<31)
65 // Values for class_rw_t->flags
66 // These are not emitted by the compiler and are never used in class_ro_t.
67 // Their presence should be considered in future ABI versions.
68 // class_t->data is class_rw_t, not class_ro_t
69 #define RW_REALIZED (1<<31)
70 // class is unresolved future class
71 #define RW_FUTURE (1<<30)
72 // class is initialized
73 #define RW_INITIALIZED (1<<29)
74 // class is initializing
75 #define RW_INITIALIZING (1<<28)
76 // class_rw_t->ro is heap copy of class_ro_t
77 #define RW_COPIED_RO (1<<27)
78 // class allocated but not yet registered
79 #define RW_CONSTRUCTING (1<<26)
80 // class allocated and registered
81 #define RW_CONSTRUCTED (1<<25)
82 // available for use; was RW_FINALIZE_ON_MAIN_THREAD
83 // #define RW_24 (1<<24)
84 // class +load has been called
85 #define RW_LOADED (1<<23)
86 #if !SUPPORT_NONPOINTER_ISA
87 // class instances may have associative references
88 #define RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS (1<<22)
90 // class has instance-specific GC layout
91 #define RW_HAS_INSTANCE_SPECIFIC_LAYOUT (1 << 21)
92 // class does not allow associated objects on its instances
93 #define RW_FORBIDS_ASSOCIATED_OBJECTS (1<<20)
94 // class has started realizing but not yet completed it
95 #define RW_REALIZING (1<<19)
97 // class is a metaclass (copied from ro)
98 #define RW_META RO_META // (1<<0)
101 // NOTE: MORE RW_ FLAGS DEFINED BELOW
104 // Values for class_rw_t->flags (RW_*), cache_t->_flags (FAST_CACHE_*),
105 // or class_t->bits (FAST_*).
107 // FAST_* and FAST_CACHE_* are stored on the class, reducing pointer indirection.
111 // class is a Swift class from the pre-stable Swift ABI
112 #define FAST_IS_SWIFT_LEGACY (1UL<<0)
113 // class is a Swift class from the stable Swift ABI
114 #define FAST_IS_SWIFT_STABLE (1UL<<1)
115 // class or superclass has default retain/release/autorelease/retainCount/
116 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
117 #define FAST_HAS_DEFAULT_RR (1UL<<2)
119 #define FAST_DATA_MASK 0x00007ffffffffff8UL
122 // class or superclass has .cxx_construct/.cxx_destruct implementation
123 // FAST_CACHE_HAS_CXX_DTOR is the first bit so that setting it in
124 // isa_t::has_cxx_dtor is a single bfi
125 #define FAST_CACHE_HAS_CXX_DTOR (1<<0)
126 #define FAST_CACHE_HAS_CXX_CTOR (1<<1)
127 // Denormalized RO_META to avoid an indirection
128 #define FAST_CACHE_META (1<<2)
130 // Denormalized RO_META to avoid an indirection
131 #define FAST_CACHE_META (1<<0)
132 // class or superclass has .cxx_construct/.cxx_destruct implementation
133 // FAST_CACHE_HAS_CXX_DTOR is chosen to alias with isa_t::has_cxx_dtor
134 #define FAST_CACHE_HAS_CXX_CTOR (1<<1)
135 #define FAST_CACHE_HAS_CXX_DTOR (1<<2)
138 // Fast Alloc fields:
139 // This stores the word-aligned size of instances + "ALLOC_DELTA16",
140 // or 0 if the instance size doesn't fit.
142 // These bits occupy the same bits than in the instance size, so that
143 // the size can be extracted with a simple mask operation.
145 // FAST_CACHE_ALLOC_MASK16 allows to extract the instance size rounded
146 // rounded up to the next 16 byte boundary, which is a fastpath for
147 // _objc_rootAllocWithZone()
148 #define FAST_CACHE_ALLOC_MASK 0x1ff8
149 #define FAST_CACHE_ALLOC_MASK16 0x1ff0
150 #define FAST_CACHE_ALLOC_DELTA16 0x0008
152 // class's instances requires raw isa
153 #define FAST_CACHE_REQUIRES_RAW_ISA (1<<13)
154 // class or superclass has default alloc/allocWithZone: implementation
155 // Note this is is stored in the metaclass.
156 #define FAST_CACHE_HAS_DEFAULT_AWZ (1<<14)
157 // class or superclass has default new/self/class/respondsToSelector/isKindOfClass
158 #define FAST_CACHE_HAS_DEFAULT_CORE (1<<15)
162 // class or superclass has .cxx_construct implementation
163 #define RW_HAS_CXX_CTOR (1<<18)
164 // class or superclass has .cxx_destruct implementation
165 #define RW_HAS_CXX_DTOR (1<<17)
166 // class or superclass has default alloc/allocWithZone: implementation
167 // Note this is is stored in the metaclass.
168 #define RW_HAS_DEFAULT_AWZ (1<<16)
169 // class's instances requires raw isa
170 #if SUPPORT_NONPOINTER_ISA
171 #define RW_REQUIRES_RAW_ISA (1<<15)
173 // class or superclass has default retain/release/autorelease/retainCount/
174 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
175 #define RW_HAS_DEFAULT_RR (1<<14)
176 // class or superclass has default new/self/class/respondsToSelector/isKindOfClass
177 #define RW_HAS_DEFAULT_CORE (1<<13)
179 // class is a Swift class from the pre-stable Swift ABI
180 #define FAST_IS_SWIFT_LEGACY (1UL<<0)
181 // class is a Swift class from the stable Swift ABI
182 #define FAST_IS_SWIFT_STABLE (1UL<<1)
184 #define FAST_DATA_MASK 0xfffffffcUL
188 // The Swift ABI requires that these bits be defined like this on all platforms.
189 static_assert(FAST_IS_SWIFT_LEGACY
== 1, "resistance is futile");
190 static_assert(FAST_IS_SWIFT_STABLE
== 2, "resistance is futile");
194 typedef uint32_t mask_t
; // x86_64 & arm64 asm are less efficient with 16-bits
196 typedef uint16_t mask_t
;
198 typedef uintptr_t SEL
;
200 struct swift_class_t
;
202 enum Atomicity
{ Atomic
= true, NotAtomic
= false };
203 enum IMPEncoding
{ Encoded
= true, Raw
= false };
207 // IMP-first is better for arm64e ptrauth and no worse for arm64.
208 // SEL-first is better for armv7* and i386 and x86_64.
210 explicit_atomic
<uintptr_t> _imp
;
211 explicit_atomic
<SEL
> _sel
;
213 explicit_atomic
<SEL
> _sel
;
214 explicit_atomic
<uintptr_t> _imp
;
217 // Compute the ptrauth signing modifier from &_imp, newSel, and cls.
218 uintptr_t modifierForSEL(SEL newSel
, Class cls
) const {
219 return (uintptr_t)&_imp
^ (uintptr_t)newSel
^ (uintptr_t)cls
;
222 // Sign newImp, with &_imp, newSel, and cls as modifiers.
223 uintptr_t encodeImp(IMP newImp
, SEL newSel
, Class cls
) const {
224 if (!newImp
) return 0;
225 #if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_PTRAUTH
227 ptrauth_auth_and_resign(newImp
,
228 ptrauth_key_function_pointer
, 0,
229 ptrauth_key_process_dependent_code
,
230 modifierForSEL(newSel
, cls
));
231 #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR
232 return (uintptr_t)newImp
^ (uintptr_t)cls
;
233 #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_NONE
234 return (uintptr_t)newImp
;
236 #error Unknown method cache IMP encoding.
241 inline SEL
sel() const { return _sel
.load(memory_order::memory_order_relaxed
); }
243 inline IMP
rawImp(objc_class
*cls
) const {
244 uintptr_t imp
= _imp
.load(memory_order::memory_order_relaxed
);
245 if (!imp
) return nil
;
246 #if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_PTRAUTH
247 #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR
248 imp
^= (uintptr_t)cls
;
249 #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_NONE
251 #error Unknown method cache IMP encoding.
256 inline IMP
imp(Class cls
) const {
257 uintptr_t imp
= _imp
.load(memory_order::memory_order_relaxed
);
258 if (!imp
) return nil
;
259 #if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_PTRAUTH
260 SEL sel
= _sel
.load(memory_order::memory_order_relaxed
);
262 ptrauth_auth_and_resign((const void *)imp
,
263 ptrauth_key_process_dependent_code
,
264 modifierForSEL(sel
, cls
),
265 ptrauth_key_function_pointer
, 0);
266 #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR
267 return (IMP
)(imp
^ (uintptr_t)cls
);
268 #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_NONE
271 #error Unknown method cache IMP encoding.
275 template <Atomicity
, IMPEncoding
>
276 void set(SEL newSel
, IMP newImp
, Class cls
);
281 #if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_OUTLINED
282 explicit_atomic
<struct bucket_t
*> _buckets
;
283 explicit_atomic
<mask_t
> _mask
;
284 #elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16
285 explicit_atomic
<uintptr_t> _maskAndBuckets
;
288 // How much the mask is shifted by.
289 static constexpr uintptr_t maskShift
= 48;
291 // Additional bits after the mask which must be zero. msgSend
292 // takes advantage of these additional bits to construct the value
293 // `mask << 4` from `_maskAndBuckets` in a single instruction.
294 static constexpr uintptr_t maskZeroBits
= 4;
296 // The largest mask value we can store.
297 static constexpr uintptr_t maxMask
= ((uintptr_t)1 << (64 - maskShift
)) - 1;
299 // The mask applied to `_maskAndBuckets` to retrieve the buckets pointer.
300 static constexpr uintptr_t bucketsMask
= ((uintptr_t)1 << (maskShift
- maskZeroBits
)) - 1;
302 // Ensure we have enough bits for the buckets pointer.
303 static_assert(bucketsMask
>= MACH_VM_MAX_ADDRESS
, "Bucket field doesn't have enough bits for arbitrary pointers.");
304 #elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4
305 // _maskAndBuckets stores the mask shift in the low 4 bits, and
306 // the buckets pointer in the remainder of the value. The mask
307 // shift is the value where (0xffff >> shift) produces the correct
308 // mask. This is equal to 16 - log2(cache_size).
309 explicit_atomic
<uintptr_t> _maskAndBuckets
;
312 static constexpr uintptr_t maskBits
= 4;
313 static constexpr uintptr_t maskMask
= (1 << maskBits
) - 1;
314 static constexpr uintptr_t bucketsMask
= ~maskMask
;
316 #error Unknown cache mask storage type.
325 static bucket_t
*emptyBuckets();
327 struct bucket_t
*buckets();
330 void incrementOccupied();
331 void setBucketsAndMask(struct bucket_t
*newBuckets
, mask_t newMask
);
332 void initializeToEmpty();
335 bool isConstantEmptyCache();
339 bool getBit(uint16_t flags
) const {
340 return _flags
& flags
;
342 void setBit(uint16_t set
) {
343 __c11_atomic_fetch_or((_Atomic(uint16_t) *)&_flags
, set
, __ATOMIC_RELAXED
);
345 void clearBit(uint16_t clear
) {
346 __c11_atomic_fetch_and((_Atomic(uint16_t) *)&_flags
, ~clear
, __ATOMIC_RELAXED
);
350 #if FAST_CACHE_ALLOC_MASK
351 bool hasFastInstanceSize(size_t extra
) const
353 if (__builtin_constant_p(extra
) && extra
== 0) {
354 return _flags
& FAST_CACHE_ALLOC_MASK16
;
356 return _flags
& FAST_CACHE_ALLOC_MASK
;
359 size_t fastInstanceSize(size_t extra
) const
361 ASSERT(hasFastInstanceSize(extra
));
363 if (__builtin_constant_p(extra
) && extra
== 0) {
364 return _flags
& FAST_CACHE_ALLOC_MASK16
;
366 size_t size
= _flags
& FAST_CACHE_ALLOC_MASK
;
367 // remove the FAST_CACHE_ALLOC_DELTA16 that was added
368 // by setFastInstanceSize
369 return align16(size
+ extra
- FAST_CACHE_ALLOC_DELTA16
);
373 void setFastInstanceSize(size_t newSize
)
375 // Set during realization or construction only. No locking needed.
376 uint16_t newBits
= _flags
& ~FAST_CACHE_ALLOC_MASK
;
379 // Adding FAST_CACHE_ALLOC_DELTA16 allows for FAST_CACHE_ALLOC_MASK16
380 // to yield the proper 16byte aligned allocation size with a single mask
381 sizeBits
= word_align(newSize
) + FAST_CACHE_ALLOC_DELTA16
;
382 sizeBits
&= FAST_CACHE_ALLOC_MASK
;
383 if (newSize
<= sizeBits
) {
389 bool hasFastInstanceSize(size_t extra
) const {
392 size_t fastInstanceSize(size_t extra
) const {
395 void setFastInstanceSize(size_t extra
) {
400 static size_t bytesForCapacity(uint32_t cap
);
401 static struct bucket_t
* endMarker(struct bucket_t
*b
, uint32_t cap
);
403 void reallocate(mask_t oldCapacity
, mask_t newCapacity
, bool freeOld
);
404 void insert(Class cls
, SEL sel
, IMP imp
, id receiver
);
406 static void bad_cache(id receiver
, SEL sel
, Class isa
) __attribute__((noreturn
, cold
));
410 // classref_t is unremapped class_t*
411 typedef struct classref
* classref_t
;
414 /***********************************************************************
416 * A pointer stored as an offset from the address of that offset.
418 * The target address is computed by taking the address of this struct
419 * and adding the offset stored within it. This is a 32-bit signed
420 * offset giving ±2GB of range.
421 **********************************************************************/
422 template <typename T
>
423 struct RelativePointer
: nocopy_t
{
427 uintptr_t base
= (uintptr_t)&offset
;
428 uintptr_t signExtendedOffset
= (uintptr_t)(intptr_t)offset
;
429 uintptr_t pointer
= base
+ signExtendedOffset
;
435 #ifdef __PTRAUTH_INTRINSICS__
436 # define StubClassInitializerPtrauth __ptrauth(ptrauth_key_function_pointer, 1, 0xc671)
438 # define StubClassInitializerPtrauth
440 struct stub_class_t
{
442 _objc_swiftMetadataInitializer StubClassInitializerPtrauth initializer
;
445 // A pointer modifier that does nothing to the pointer.
446 struct PointerModifierNop
{
447 template <typename ListType
, typename T
>
448 static T
*modify(const ListType
&list
, T
*ptr
) { return ptr
; }
451 /***********************************************************************
452 * entsize_list_tt<Element, List, FlagMask, PointerModifier>
453 * Generic implementation of an array of non-fragile structs.
455 * Element is the struct type (e.g. method_t)
456 * List is the specialization of entsize_list_tt (e.g. method_list_t)
457 * FlagMask is used to stash extra bits in the entsize field
458 * (e.g. method list fixup markers)
459 * PointerModifier is applied to the element pointers retrieved from
461 **********************************************************************/
462 template <typename Element
, typename List
, uint32_t FlagMask
, typename PointerModifier
= PointerModifierNop
>
463 struct entsize_list_tt
{
464 uint32_t entsizeAndFlags
;
467 uint32_t entsize() const {
468 return entsizeAndFlags
& ~FlagMask
;
470 uint32_t flags() const {
471 return entsizeAndFlags
& FlagMask
;
474 Element
& getOrEnd(uint32_t i
) const {
476 return *PointerModifier::modify(*this, (Element
*)((uint8_t *)this + sizeof(*this) + i
*entsize()));
478 Element
& get(uint32_t i
) const {
483 size_t byteSize() const {
484 return byteSize(entsize(), count
);
487 static size_t byteSize(uint32_t entsize
, uint32_t count
) {
488 return sizeof(entsize_list_tt
) + count
*entsize
;
492 const iterator
begin() const {
493 return iterator(*static_cast<const List
*>(this), 0);
496 return iterator(*static_cast<const List
*>(this), 0);
498 const iterator
end() const {
499 return iterator(*static_cast<const List
*>(this), count
);
502 return iterator(*static_cast<const List
*>(this), count
);
507 uint32_t index
; // keeping track of this saves a divide in operator-
510 typedef std::random_access_iterator_tag iterator_category
;
511 typedef Element value_type
;
512 typedef ptrdiff_t difference_type
;
513 typedef Element
* pointer
;
514 typedef Element
& reference
;
518 iterator(const List
& list
, uint32_t start
= 0)
519 : entsize(list
.entsize())
521 , element(&list
.getOrEnd(start
))
524 const iterator
& operator += (ptrdiff_t delta
) {
525 element
= (Element
*)((uint8_t *)element
+ delta
*entsize
);
526 index
+= (int32_t)delta
;
529 const iterator
& operator -= (ptrdiff_t delta
) {
530 element
= (Element
*)((uint8_t *)element
- delta
*entsize
);
531 index
-= (int32_t)delta
;
534 const iterator
operator + (ptrdiff_t delta
) const {
535 return iterator(*this) += delta
;
537 const iterator
operator - (ptrdiff_t delta
) const {
538 return iterator(*this) -= delta
;
541 iterator
& operator ++ () { *this += 1; return *this; }
542 iterator
& operator -- () { *this -= 1; return *this; }
543 iterator
operator ++ (int) {
544 iterator
result(*this); *this += 1; return result
;
546 iterator
operator -- (int) {
547 iterator
result(*this); *this -= 1; return result
;
550 ptrdiff_t operator - (const iterator
& rhs
) const {
551 return (ptrdiff_t)this->index
- (ptrdiff_t)rhs
.index
;
554 Element
& operator * () const { return *element
; }
555 Element
* operator -> () const { return element
; }
557 operator Element
& () const { return *element
; }
559 bool operator == (const iterator
& rhs
) const {
560 return this->element
== rhs
.element
;
562 bool operator != (const iterator
& rhs
) const {
563 return this->element
!= rhs
.element
;
566 bool operator < (const iterator
& rhs
) const {
567 return this->element
< rhs
.element
;
569 bool operator > (const iterator
& rhs
) const {
570 return this->element
> rhs
.element
;
577 static const uint32_t smallMethodListFlag
= 0x80000000;
579 method_t(const method_t
&other
) = delete;
581 // The representation of a "big" method. This is the traditional
582 // representation of three pointers storing the selector, types
583 // and implementation.
591 bool isSmall() const {
592 return ((uintptr_t)this & 1) == 1;
595 // The representation of a "small" method. This stores three
596 // relative offsets to the name, types, and implementation.
598 RelativePointer
<SEL
*> name
;
599 RelativePointer
<const char *> types
;
600 RelativePointer
<IMP
> imp
;
603 small
&small() const {
605 return *(struct small
*)((uintptr_t)this & ~(uintptr_t)1);
608 IMP
remappedImp(bool needsLock
) const;
609 void remapImp(IMP imp
);
610 objc_method_description
*getSmallDescription() const;
613 static const auto bigSize
= sizeof(struct big
);
614 static const auto smallSize
= sizeof(struct small
);
616 // The pointer modifier used with method lists. When the method
617 // list contains small methods, set the bottom bit of the pointer.
618 // We use that bottom bit elsewhere to distinguish between big
619 // and small methods.
620 struct pointer_modifier
{
621 template <typename ListType
>
622 static method_t
*modify(const ListType
&list
, method_t
*ptr
) {
623 if (list
.flags() & smallMethodListFlag
)
624 return (method_t
*)((uintptr_t)ptr
| 1);
631 return *(struct big
*)this;
635 return isSmall() ? *small().name
.get() : big().name
;
637 const char *types() const {
638 return isSmall() ? small().types
.get() : big().types
;
640 IMP
imp(bool needsLock
) const {
642 IMP imp
= remappedImp(needsLock
);
644 imp
= ptrauth_sign_unauthenticated(small().imp
.get(),
645 ptrauth_key_function_pointer
, 0);
651 void setImp(IMP imp
) {
660 objc_method_description
*getDescription() const {
661 return isSmall() ? getSmallDescription() : (struct objc_method_description
*)this;
664 struct SortBySELAddress
:
665 public std::binary_function
<const struct method_t::big
&,
666 const struct method_t::big
&, bool>
668 bool operator() (const struct method_t::big
& lhs
,
669 const struct method_t::big
& rhs
)
670 { return lhs
.name
< rhs
.name
; }
673 method_t
&operator=(const method_t
&other
) {
675 big().name
= other
.name();
676 big().types
= other
.types();
677 big().imp
= other
.imp(false);
684 // *offset was originally 64-bit on some x86_64 platforms.
685 // We read and write only 32 bits of it.
686 // Some metadata provides all 64 bits. This is harmless for unsigned
687 // little-endian values.
688 // Some code uses all 64 bits. class_addIvar() over-allocates the
689 // offset for their benefit.
694 // alignment is sometimes -1; use alignment() instead
695 uint32_t alignment_raw
;
698 uint32_t alignment() const {
699 if (alignment_raw
== ~(uint32_t)0) return 1U << WORD_SHIFT
;
700 return 1 << alignment_raw
;
706 const char *attributes
;
709 // Two bits of entsize are used for fixup markers.
710 // Reserve the top half of entsize for more flags. We never
711 // need entry sizes anywhere close to 64kB.
713 // Currently there is one flag defined: the small method list flag,
714 // method_t::smallMethodListFlag. Other flags are currently ignored.
715 // (NOTE: these bits are only ignored on runtimes that support small
716 // method lists. Older runtimes will treat them as part of the entry
718 struct method_list_t
: entsize_list_tt
<method_t
, method_list_t
, 0xffff0003, method_t::pointer_modifier
> {
719 bool isUniqued() const;
720 bool isFixedUp() const;
723 uint32_t indexOfMethod(const method_t
*meth
) const {
725 (uint32_t)(((uintptr_t)meth
- (uintptr_t)this) / entsize());
730 bool isSmallList() const {
731 return flags() & method_t::smallMethodListFlag
;
734 bool isExpectedSize() const {
736 return entsize() == method_t::smallSize
;
738 return entsize() == method_t::bigSize
;
741 method_list_t
*duplicate() const {
744 dup
= (method_list_t
*)calloc(byteSize(method_t::bigSize
, count
), 1);
745 dup
->entsizeAndFlags
= method_t::bigSize
;
747 dup
= (method_list_t
*)calloc(this->byteSize(), 1);
748 dup
->entsizeAndFlags
= this->entsizeAndFlags
;
750 dup
->count
= this->count
;
751 std::copy(begin(), end(), dup
->begin());
756 struct ivar_list_t
: entsize_list_tt
<ivar_t
, ivar_list_t
, 0> {
757 bool containsIvar(Ivar ivar
) const {
758 return (ivar
>= (Ivar
)&*begin() && ivar
< (Ivar
)&*end());
762 struct property_list_t
: entsize_list_tt
<property_t
, property_list_t
, 0> {
766 typedef uintptr_t protocol_ref_t
; // protocol_t *, but unremapped
768 // Values for protocol_t->flags
769 #define PROTOCOL_FIXED_UP_2 (1<<31) // must never be set by compiler
770 #define PROTOCOL_FIXED_UP_1 (1<<30) // must never be set by compiler
771 #define PROTOCOL_IS_CANONICAL (1<<29) // must never be set by compiler
772 // Bits 0..15 are reserved for Swift's use.
774 #define PROTOCOL_FIXED_UP_MASK (PROTOCOL_FIXED_UP_1 | PROTOCOL_FIXED_UP_2)
776 struct protocol_t
: objc_object
{
777 const char *mangledName
;
778 struct protocol_list_t
*protocols
;
779 method_list_t
*instanceMethods
;
780 method_list_t
*classMethods
;
781 method_list_t
*optionalInstanceMethods
;
782 method_list_t
*optionalClassMethods
;
783 property_list_t
*instanceProperties
;
784 uint32_t size
; // sizeof(protocol_t)
786 // Fields below this point are not always present on disk.
787 const char **_extendedMethodTypes
;
788 const char *_demangledName
;
789 property_list_t
*_classProperties
;
791 const char *demangledName();
793 const char *nameForLogging() {
794 return demangledName();
797 bool isFixedUp() const;
800 bool isCanonical() const;
801 void clearIsCanonical();
803 # define HAS_FIELD(f) (size >= offsetof(protocol_t, f) + sizeof(f))
805 bool hasExtendedMethodTypesField() const {
806 return HAS_FIELD(_extendedMethodTypes
);
808 bool hasDemangledNameField() const {
809 return HAS_FIELD(_demangledName
);
811 bool hasClassPropertiesField() const {
812 return HAS_FIELD(_classProperties
);
817 const char **extendedMethodTypes() const {
818 return hasExtendedMethodTypesField() ? _extendedMethodTypes
: nil
;
821 property_list_t
*classProperties() const {
822 return hasClassPropertiesField() ? _classProperties
: nil
;
826 struct protocol_list_t
{
827 // count is pointer-sized by accident.
829 protocol_ref_t list
[0]; // variable-size
831 size_t byteSize() const {
832 return sizeof(*this) + count
*sizeof(list
[0]);
835 protocol_list_t
*duplicate() const {
836 return (protocol_list_t
*)memdup(this, this->byteSize());
839 typedef protocol_ref_t
* iterator
;
840 typedef const protocol_ref_t
* const_iterator
;
842 const_iterator
begin() const {
848 const_iterator
end() const {
858 uint32_t instanceStart
;
859 uint32_t instanceSize
;
864 const uint8_t * ivarLayout
;
867 WrappedPtr
<method_list_t
, PtrauthStrip
> baseMethodList
;
868 protocol_list_t
* baseProtocols
;
869 const ivar_list_t
* ivars
;
871 const uint8_t * weakIvarLayout
;
872 property_list_t
*baseProperties
;
874 // This field exists only when RO_HAS_SWIFT_INITIALIZER is set.
875 _objc_swiftMetadataInitializer __ptrauth_objc_method_list_imp _swiftMetadataInitializer_NEVER_USE
[0];
877 _objc_swiftMetadataInitializer
swiftMetadataInitializer() const {
878 if (flags
& RO_HAS_SWIFT_INITIALIZER
) {
879 return _swiftMetadataInitializer_NEVER_USE
[0];
885 method_list_t
*baseMethods() const {
886 return baseMethodList
;
889 class_ro_t
*duplicate() const {
890 if (flags
& RO_HAS_SWIFT_INITIALIZER
) {
891 size_t size
= sizeof(*this) + sizeof(_swiftMetadataInitializer_NEVER_USE
[0]);
892 class_ro_t
*ro
= (class_ro_t
*)memdup(this, size
);
893 ro
->_swiftMetadataInitializer_NEVER_USE
[0] = this->_swiftMetadataInitializer_NEVER_USE
[0];
896 size_t size
= sizeof(*this);
897 class_ro_t
*ro
= (class_ro_t
*)memdup(this, size
);
904 /***********************************************************************
905 * list_array_tt<Element, List, Ptr>
906 * Generic implementation for metadata that can be augmented by categories.
908 * Element is the underlying metadata type (e.g. method_t)
909 * List is the metadata's list type (e.g. method_list_t)
910 * List is a template applied to Element to make Element*. Useful for
911 * applying qualifiers to the pointer type.
913 * A list_array_tt has one of three values:
915 * - a pointer to a single list
916 * - an array of pointers to lists
918 * countLists/beginLists/endLists iterate the metadata lists
919 * count/begin/end iterate the underlying metadata elements
920 **********************************************************************/
921 template <typename Element
, typename List
, template<typename
> class Ptr
>
922 class list_array_tt
{
927 static size_t byteSize(uint32_t count
) {
928 return sizeof(array_t
) + count
*sizeof(lists
[0]);
931 return byteSize(count
);
937 const Ptr
<List
> *lists
;
938 const Ptr
<List
> *listsEnd
;
939 typename
List::iterator m
, mEnd
;
942 iterator(const Ptr
<List
> *begin
, const Ptr
<List
> *end
)
943 : lists(begin
), listsEnd(end
)
946 m
= (*begin
)->begin();
947 mEnd
= (*begin
)->end();
951 const Element
& operator * () const {
954 Element
& operator * () {
958 bool operator != (const iterator
& rhs
) const {
959 if (lists
!= rhs
.lists
) return true;
960 if (lists
== listsEnd
) return false; // m is undefined
961 if (m
!= rhs
.m
) return true;
965 const iterator
& operator ++ () {
969 ASSERT(lists
!= listsEnd
);
971 if (lists
!= listsEnd
) {
972 m
= (*lists
)->begin();
973 mEnd
= (*lists
)->end();
983 uintptr_t arrayAndFlag
;
986 bool hasArray() const {
987 return arrayAndFlag
& 1;
990 array_t
*array() const {
991 return (array_t
*)(arrayAndFlag
& ~1);
994 void setArray(array_t
*array
) {
995 arrayAndFlag
= (uintptr_t)array
| 1;
999 for (auto cursor
= beginLists(), end
= endLists(); cursor
!= end
; cursor
++)
1004 list_array_tt() : list(nullptr) { }
1005 list_array_tt(List
*l
) : list(l
) { }
1006 list_array_tt(const list_array_tt
&other
) {
1010 list_array_tt
&operator =(const list_array_tt
&other
) {
1011 if (other
.hasArray()) {
1012 arrayAndFlag
= other
.arrayAndFlag
;
1019 uint32_t count() const {
1020 uint32_t result
= 0;
1021 for (auto lists
= beginLists(), end
= endLists();
1025 result
+= (*lists
)->count
;
1030 iterator
begin() const {
1031 return iterator(beginLists(), endLists());
1034 iterator
end() const {
1035 auto e
= endLists();
1036 return iterator(e
, e
);
1040 uint32_t countLists() {
1042 return array()->count
;
1050 const Ptr
<List
>* beginLists() const {
1052 return array()->lists
;
1058 const Ptr
<List
>* endLists() const {
1060 return array()->lists
+ array()->count
;
1068 void attachLists(List
* const * addedLists
, uint32_t addedCount
) {
1069 if (addedCount
== 0) return;
1072 // many lists -> many lists
1073 uint32_t oldCount
= array()->count
;
1074 uint32_t newCount
= oldCount
+ addedCount
;
1075 array_t
*newArray
= (array_t
*)malloc(array_t::byteSize(newCount
));
1076 newArray
->count
= newCount
;
1077 array()->count
= newCount
;
1079 for (int i
= oldCount
- 1; i
>= 0; i
--)
1080 newArray
->lists
[i
+ addedCount
] = array()->lists
[i
];
1081 for (unsigned i
= 0; i
< addedCount
; i
++)
1082 newArray
->lists
[i
] = addedLists
[i
];
1087 else if (!list
&& addedCount
== 1) {
1088 // 0 lists -> 1 list
1089 list
= addedLists
[0];
1093 // 1 list -> many lists
1094 Ptr
<List
> oldList
= list
;
1095 uint32_t oldCount
= oldList
? 1 : 0;
1096 uint32_t newCount
= oldCount
+ addedCount
;
1097 setArray((array_t
*)malloc(array_t::byteSize(newCount
)));
1098 array()->count
= newCount
;
1099 if (oldList
) array()->lists
[addedCount
] = oldList
;
1100 for (unsigned i
= 0; i
< addedCount
; i
++)
1101 array()->lists
[i
] = addedLists
[i
];
1108 for (uint32_t i
= 0; i
< array()->count
; i
++) {
1109 try_free(array()->lists
[i
]);
1118 template<typename Other
>
1119 void duplicateInto(Other
&other
) {
1121 array_t
*a
= array();
1122 other
.setArray((array_t
*)memdup(a
, a
->byteSize()));
1123 for (uint32_t i
= 0; i
< a
->count
; i
++) {
1124 other
.array()->lists
[i
] = a
->lists
[i
]->duplicate();
1127 other
.list
= list
->duplicate();
1135 DECLARE_AUTHED_PTR_TEMPLATE(method_list_t
)
1137 class method_array_t
:
1138 public list_array_tt
<method_t
, method_list_t
, method_list_t_authed_ptr
>
1140 typedef list_array_tt
<method_t
, method_list_t
, method_list_t_authed_ptr
> Super
;
1143 method_array_t() : Super() { }
1144 method_array_t(method_list_t
*l
) : Super(l
) { }
1146 const method_list_t_authed_ptr
<method_list_t
> *beginCategoryMethodLists() const {
1147 return beginLists();
1150 const method_list_t_authed_ptr
<method_list_t
> *endCategoryMethodLists(Class cls
) const;
1154 class property_array_t
:
1155 public list_array_tt
<property_t
, property_list_t
, RawPtr
>
1157 typedef list_array_tt
<property_t
, property_list_t
, RawPtr
> Super
;
1160 property_array_t() : Super() { }
1161 property_array_t(property_list_t
*l
) : Super(l
) { }
1165 class protocol_array_t
:
1166 public list_array_tt
<protocol_ref_t
, protocol_list_t
, RawPtr
>
1168 typedef list_array_tt
<protocol_ref_t
, protocol_list_t
, RawPtr
> Super
;
1171 protocol_array_t() : Super() { }
1172 protocol_array_t(protocol_list_t
*l
) : Super(l
) { }
1175 struct class_rw_ext_t
{
1176 DECLARE_AUTHED_PTR_TEMPLATE(class_ro_t
)
1177 class_ro_t_authed_ptr
<const class_ro_t
> ro
;
1178 method_array_t methods
;
1179 property_array_t properties
;
1180 protocol_array_t protocols
;
1181 char *demangledName
;
1186 // Be warned that Symbolication knows the layout of this structure.
1189 #if SUPPORT_INDEXED_ISA
1193 explicit_atomic
<uintptr_t> ro_or_rw_ext
;
1195 Class firstSubclass
;
1196 Class nextSiblingClass
;
1199 using ro_or_rw_ext_t
= objc::PointerUnion
<const class_ro_t
, class_rw_ext_t
, PTRAUTH_STR("class_ro_t"), PTRAUTH_STR("class_rw_ext_t")>;
1201 const ro_or_rw_ext_t
get_ro_or_rwe() const {
1202 return ro_or_rw_ext_t
{ro_or_rw_ext
};
1205 void set_ro_or_rwe(const class_ro_t
*ro
) {
1206 ro_or_rw_ext_t
{ro
, &ro_or_rw_ext
}.storeAt(ro_or_rw_ext
, memory_order_relaxed
);
1209 void set_ro_or_rwe(class_rw_ext_t
*rwe
, const class_ro_t
*ro
) {
1210 // the release barrier is so that the class_rw_ext_t::ro initialization
1211 // is visible to lockless readers
1213 ro_or_rw_ext_t
{rwe
, &ro_or_rw_ext
}.storeAt(ro_or_rw_ext
, memory_order_release
);
1216 class_rw_ext_t
*extAlloc(const class_ro_t
*ro
, bool deep
= false);
1219 void setFlags(uint32_t set
)
1221 __c11_atomic_fetch_or((_Atomic(uint32_t) *)&flags
, set
, __ATOMIC_RELAXED
);
1224 void clearFlags(uint32_t clear
)
1226 __c11_atomic_fetch_and((_Atomic(uint32_t) *)&flags
, ~clear
, __ATOMIC_RELAXED
);
1229 // set and clear must not overlap
1230 void changeFlags(uint32_t set
, uint32_t clear
)
1232 ASSERT((set
& clear
) == 0);
1234 uint32_t oldf
, newf
;
1237 newf
= (oldf
| set
) & ~clear
;
1238 } while (!OSAtomicCompareAndSwap32Barrier(oldf
, newf
, (volatile int32_t *)&flags
));
1241 class_rw_ext_t
*ext() const {
1242 return get_ro_or_rwe().dyn_cast
<class_rw_ext_t
*>(&ro_or_rw_ext
);
1245 class_rw_ext_t
*extAllocIfNeeded() {
1246 auto v
= get_ro_or_rwe();
1247 if (fastpath(v
.is
<class_rw_ext_t
*>())) {
1248 return v
.get
<class_rw_ext_t
*>(&ro_or_rw_ext
);
1250 return extAlloc(v
.get
<const class_ro_t
*>(&ro_or_rw_ext
));
1254 class_rw_ext_t
*deepCopy(const class_ro_t
*ro
) {
1255 return extAlloc(ro
, true);
1258 const class_ro_t
*ro() const {
1259 auto v
= get_ro_or_rwe();
1260 if (slowpath(v
.is
<class_rw_ext_t
*>())) {
1261 return v
.get
<class_rw_ext_t
*>(&ro_or_rw_ext
)->ro
;
1263 return v
.get
<const class_ro_t
*>(&ro_or_rw_ext
);
1266 void set_ro(const class_ro_t
*ro
) {
1267 auto v
= get_ro_or_rwe();
1268 if (v
.is
<class_rw_ext_t
*>()) {
1269 v
.get
<class_rw_ext_t
*>(&ro_or_rw_ext
)->ro
= ro
;
1275 const method_array_t
methods() const {
1276 auto v
= get_ro_or_rwe();
1277 if (v
.is
<class_rw_ext_t
*>()) {
1278 return v
.get
<class_rw_ext_t
*>(&ro_or_rw_ext
)->methods
;
1280 return method_array_t
{v
.get
<const class_ro_t
*>(&ro_or_rw_ext
)->baseMethods()};
1284 const property_array_t
properties() const {
1285 auto v
= get_ro_or_rwe();
1286 if (v
.is
<class_rw_ext_t
*>()) {
1287 return v
.get
<class_rw_ext_t
*>(&ro_or_rw_ext
)->properties
;
1289 return property_array_t
{v
.get
<const class_ro_t
*>(&ro_or_rw_ext
)->baseProperties
};
1293 const protocol_array_t
protocols() const {
1294 auto v
= get_ro_or_rwe();
1295 if (v
.is
<class_rw_ext_t
*>()) {
1296 return v
.get
<class_rw_ext_t
*>(&ro_or_rw_ext
)->protocols
;
1298 return protocol_array_t
{v
.get
<const class_ro_t
*>(&ro_or_rw_ext
)->baseProtocols
};
1304 struct class_data_bits_t
{
1307 // Values are the FAST_ flags above.
1310 bool getBit(uintptr_t bit
) const
1315 // Atomically set the bits in `set` and clear the bits in `clear`.
1316 // set and clear must not overlap.
1317 void setAndClearBits(uintptr_t set
, uintptr_t clear
)
1319 ASSERT((set
& clear
) == 0);
1323 oldBits
= LoadExclusive(&bits
);
1324 newBits
= (oldBits
| set
) & ~clear
;
1325 } while (!StoreReleaseExclusive(&bits
, oldBits
, newBits
));
1328 void setBits(uintptr_t set
) {
1329 __c11_atomic_fetch_or((_Atomic(uintptr_t) *)&bits
, set
, __ATOMIC_RELAXED
);
1332 void clearBits(uintptr_t clear
) {
1333 __c11_atomic_fetch_and((_Atomic(uintptr_t) *)&bits
, ~clear
, __ATOMIC_RELAXED
);
1338 class_rw_t
* data() const {
1339 return (class_rw_t
*)(bits
& FAST_DATA_MASK
);
1341 void setData(class_rw_t
*newData
)
1343 ASSERT(!data() || (newData
->flags
& (RW_REALIZING
| RW_FUTURE
)));
1344 // Set during realization or construction only. No locking needed.
1345 // Use a store-release fence because there may be concurrent
1346 // readers of data and data's contents.
1347 uintptr_t newBits
= (bits
& ~FAST_DATA_MASK
) | (uintptr_t)newData
;
1348 atomic_thread_fence(memory_order_release
);
1352 // Get the class's ro data, even in the presence of concurrent realization.
1353 // fixme this isn't really safe without a compiler barrier at least
1354 // and probably a memory barrier when realizeClass changes the data field
1355 const class_ro_t
*safe_ro() {
1356 class_rw_t
*maybe_rw
= data();
1357 if (maybe_rw
->flags
& RW_REALIZED
) {
1359 return maybe_rw
->ro();
1361 // maybe_rw is actually ro
1362 return (class_ro_t
*)maybe_rw
;
1366 void setClassArrayIndex(unsigned Idx
) {
1367 #if SUPPORT_INDEXED_ISA
1368 // 0 is unused as then we can rely on zero-initialisation from calloc.
1370 data()->index
= Idx
;
1374 unsigned classArrayIndex() {
1375 #if SUPPORT_INDEXED_ISA
1376 return data()->index
;
1383 return isSwiftStable() || isSwiftLegacy();
1386 bool isSwiftStable() {
1387 return getBit(FAST_IS_SWIFT_STABLE
);
1389 void setIsSwiftStable() {
1390 setAndClearBits(FAST_IS_SWIFT_STABLE
, FAST_IS_SWIFT_LEGACY
);
1393 bool isSwiftLegacy() {
1394 return getBit(FAST_IS_SWIFT_LEGACY
);
1396 void setIsSwiftLegacy() {
1397 setAndClearBits(FAST_IS_SWIFT_LEGACY
, FAST_IS_SWIFT_STABLE
);
1400 // fixme remove this once the Swift runtime uses the stable bits
1401 bool isSwiftStable_ButAllowLegacyForNow() {
1402 return isAnySwift();
1405 _objc_swiftMetadataInitializer
swiftMetadataInitializer() {
1406 // This function is called on un-realized classes without
1407 // holding any locks.
1408 // Beware of races with other realizers.
1409 return safe_ro()->swiftMetadataInitializer();
1414 struct objc_class
: objc_object
{
1417 cache_t cache
; // formerly cache pointer and vtable
1418 class_data_bits_t bits
; // class_rw_t * plus custom rr/alloc flags
1420 class_rw_t
*data() const {
1423 void setData(class_rw_t
*newData
) {
1424 bits
.setData(newData
);
1427 void setInfo(uint32_t set
) {
1428 ASSERT(isFuture() || isRealized());
1429 data()->setFlags(set
);
1432 void clearInfo(uint32_t clear
) {
1433 ASSERT(isFuture() || isRealized());
1434 data()->clearFlags(clear
);
1437 // set and clear must not overlap
1438 void changeInfo(uint32_t set
, uint32_t clear
) {
1439 ASSERT(isFuture() || isRealized());
1440 ASSERT((set
& clear
) == 0);
1441 data()->changeFlags(set
, clear
);
1444 #if FAST_HAS_DEFAULT_RR
1445 bool hasCustomRR() const {
1446 return !bits
.getBit(FAST_HAS_DEFAULT_RR
);
1448 void setHasDefaultRR() {
1449 bits
.setBits(FAST_HAS_DEFAULT_RR
);
1451 void setHasCustomRR() {
1452 bits
.clearBits(FAST_HAS_DEFAULT_RR
);
1455 bool hasCustomRR() const {
1456 return !(bits
.data()->flags
& RW_HAS_DEFAULT_RR
);
1458 void setHasDefaultRR() {
1459 bits
.data()->setFlags(RW_HAS_DEFAULT_RR
);
1461 void setHasCustomRR() {
1462 bits
.data()->clearFlags(RW_HAS_DEFAULT_RR
);
1466 #if FAST_CACHE_HAS_DEFAULT_AWZ
1467 bool hasCustomAWZ() const {
1468 return !cache
.getBit(FAST_CACHE_HAS_DEFAULT_AWZ
);
1470 void setHasDefaultAWZ() {
1471 cache
.setBit(FAST_CACHE_HAS_DEFAULT_AWZ
);
1473 void setHasCustomAWZ() {
1474 cache
.clearBit(FAST_CACHE_HAS_DEFAULT_AWZ
);
1477 bool hasCustomAWZ() const {
1478 return !(bits
.data()->flags
& RW_HAS_DEFAULT_AWZ
);
1480 void setHasDefaultAWZ() {
1481 bits
.data()->setFlags(RW_HAS_DEFAULT_AWZ
);
1483 void setHasCustomAWZ() {
1484 bits
.data()->clearFlags(RW_HAS_DEFAULT_AWZ
);
1488 #if FAST_CACHE_HAS_DEFAULT_CORE
1489 bool hasCustomCore() const {
1490 return !cache
.getBit(FAST_CACHE_HAS_DEFAULT_CORE
);
1492 void setHasDefaultCore() {
1493 return cache
.setBit(FAST_CACHE_HAS_DEFAULT_CORE
);
1495 void setHasCustomCore() {
1496 return cache
.clearBit(FAST_CACHE_HAS_DEFAULT_CORE
);
1499 bool hasCustomCore() const {
1500 return !(bits
.data()->flags
& RW_HAS_DEFAULT_CORE
);
1502 void setHasDefaultCore() {
1503 bits
.data()->setFlags(RW_HAS_DEFAULT_CORE
);
1505 void setHasCustomCore() {
1506 bits
.data()->clearFlags(RW_HAS_DEFAULT_CORE
);
1510 #if FAST_CACHE_HAS_CXX_CTOR
1512 ASSERT(isRealized());
1513 return cache
.getBit(FAST_CACHE_HAS_CXX_CTOR
);
1515 void setHasCxxCtor() {
1516 cache
.setBit(FAST_CACHE_HAS_CXX_CTOR
);
1520 ASSERT(isRealized());
1521 return bits
.data()->flags
& RW_HAS_CXX_CTOR
;
1523 void setHasCxxCtor() {
1524 bits
.data()->setFlags(RW_HAS_CXX_CTOR
);
1528 #if FAST_CACHE_HAS_CXX_DTOR
1530 ASSERT(isRealized());
1531 return cache
.getBit(FAST_CACHE_HAS_CXX_DTOR
);
1533 void setHasCxxDtor() {
1534 cache
.setBit(FAST_CACHE_HAS_CXX_DTOR
);
1538 ASSERT(isRealized());
1539 return bits
.data()->flags
& RW_HAS_CXX_DTOR
;
1541 void setHasCxxDtor() {
1542 bits
.data()->setFlags(RW_HAS_CXX_DTOR
);
1546 #if FAST_CACHE_REQUIRES_RAW_ISA
1547 bool instancesRequireRawIsa() {
1548 return cache
.getBit(FAST_CACHE_REQUIRES_RAW_ISA
);
1550 void setInstancesRequireRawIsa() {
1551 cache
.setBit(FAST_CACHE_REQUIRES_RAW_ISA
);
1553 #elif SUPPORT_NONPOINTER_ISA
1554 bool instancesRequireRawIsa() {
1555 return bits
.data()->flags
& RW_REQUIRES_RAW_ISA
;
1557 void setInstancesRequireRawIsa() {
1558 bits
.data()->setFlags(RW_REQUIRES_RAW_ISA
);
1561 bool instancesRequireRawIsa() {
1564 void setInstancesRequireRawIsa() {
1568 void setInstancesRequireRawIsaRecursively(bool inherited
= false);
1569 void printInstancesRequireRawIsa(bool inherited
);
1571 bool canAllocNonpointer() {
1572 ASSERT(!isFuture());
1573 return !instancesRequireRawIsa();
1576 bool isSwiftStable() {
1577 return bits
.isSwiftStable();
1580 bool isSwiftLegacy() {
1581 return bits
.isSwiftLegacy();
1585 return bits
.isAnySwift();
1588 bool isSwiftStable_ButAllowLegacyForNow() {
1589 return bits
.isSwiftStable_ButAllowLegacyForNow();
1592 bool isStubClass() const {
1593 uintptr_t isa
= (uintptr_t)isaBits();
1594 return 1 <= isa
&& isa
< 16;
1597 // Swift stable ABI built for old deployment targets looks weird.
1598 // The is-legacy bit is set for compatibility with old libobjc.
1599 // We are on a "new" deployment target so we need to rewrite that bit.
1600 // These stable-with-legacy-bit classes are distinguished from real
1601 // legacy classes using another bit in the Swift data
1602 // (ClassFlags::IsSwiftPreStableABI)
1604 bool isUnfixedBackwardDeployingStableSwift() {
1605 // Only classes marked as Swift legacy need apply.
1606 if (!bits
.isSwiftLegacy()) return false;
1608 // Check the true legacy vs stable distinguisher.
1609 // The low bit of Swift's ClassFlags is SET for true legacy
1610 // and UNSET for stable pretending to be legacy.
1611 uint32_t swiftClassFlags
= *(uint32_t *)(&bits
+ 1);
1612 bool isActuallySwiftLegacy
= bool(swiftClassFlags
& 1);
1613 return !isActuallySwiftLegacy
;
1616 void fixupBackwardDeployingStableSwift() {
1617 if (isUnfixedBackwardDeployingStableSwift()) {
1618 // Class really is stable Swift, pretending to be pre-stable.
1620 bits
.setIsSwiftStable();
1624 _objc_swiftMetadataInitializer
swiftMetadataInitializer() {
1625 return bits
.swiftMetadataInitializer();
1628 // Return YES if the class's ivars are managed by ARC,
1629 // or the class is MRC but has ARC-style weak ivars.
1630 bool hasAutomaticIvars() {
1631 return data()->ro()->flags
& (RO_IS_ARC
| RO_HAS_WEAK_WITHOUT_ARC
);
1634 // Return YES if the class's ivars are managed by ARC.
1636 return data()->ro()->flags
& RO_IS_ARC
;
1640 bool forbidsAssociatedObjects() {
1641 return (data()->flags
& RW_FORBIDS_ASSOCIATED_OBJECTS
);
1644 #if SUPPORT_NONPOINTER_ISA
1645 // Tracked in non-pointer isas; not tracked otherwise
1647 bool instancesHaveAssociatedObjects() {
1648 // this may be an unrealized future class in the CF-bridged case
1649 ASSERT(isFuture() || isRealized());
1650 return data()->flags
& RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS
;
1653 void setInstancesHaveAssociatedObjects() {
1654 // this may be an unrealized future class in the CF-bridged case
1655 ASSERT(isFuture() || isRealized());
1656 setInfo(RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS
);
1660 bool shouldGrowCache() {
1664 void setShouldGrowCache(bool) {
1665 // fixme good or bad for memory use?
1668 bool isInitializing() {
1669 return getMeta()->data()->flags
& RW_INITIALIZING
;
1672 void setInitializing() {
1673 ASSERT(!isMetaClass());
1674 ISA()->setInfo(RW_INITIALIZING
);
1677 bool isInitialized() {
1678 return getMeta()->data()->flags
& RW_INITIALIZED
;
1681 void setInitialized();
1684 ASSERT(isRealized());
1685 return true; // any class registered for +load is definitely loadable
1688 IMP
getLoadMethod();
1690 // Locking: To prevent concurrent realization, hold runtimeLock.
1691 bool isRealized() const {
1692 return !isStubClass() && (data()->flags
& RW_REALIZED
);
1695 // Returns true if this is an unrealized future class.
1696 // Locking: To prevent concurrent realization, hold runtimeLock.
1697 bool isFuture() const {
1698 return data()->flags
& RW_FUTURE
;
1701 bool isMetaClass() {
1703 ASSERT(isRealized());
1705 return cache
.getBit(FAST_CACHE_META
);
1707 return data()->flags
& RW_META
;
1711 // Like isMetaClass, but also valid on un-realized classes
1712 bool isMetaClassMaybeUnrealized() {
1713 static_assert(offsetof(class_rw_t
, flags
) == offsetof(class_ro_t
, flags
), "flags alias");
1714 static_assert(RO_META
== RW_META
, "flags alias");
1715 return data()->flags
& RW_META
;
1718 // NOT identical to this->ISA when this is a metaclass
1720 if (isMetaClass()) return (Class
)this;
1721 else return this->ISA();
1724 bool isRootClass() {
1725 return superclass
== nil
;
1727 bool isRootMetaclass() {
1728 return ISA() == (Class
)this;
1731 const char *mangledName() {
1732 // fixme can't assert locks here
1735 if (isRealized() || isFuture()) {
1736 return data()->ro()->name
;
1738 return ((const class_ro_t
*)data())->name
;
1742 const char *demangledName(bool needsLock
);
1743 const char *nameForLogging();
1745 // May be unaligned depending on class's ivars.
1746 uint32_t unalignedInstanceStart() const {
1747 ASSERT(isRealized());
1748 return data()->ro()->instanceStart
;
1751 // Class's instance start rounded up to a pointer-size boundary.
1752 // This is used for ARC layout bitmaps.
1753 uint32_t alignedInstanceStart() const {
1754 return word_align(unalignedInstanceStart());
1757 // May be unaligned depending on class's ivars.
1758 uint32_t unalignedInstanceSize() const {
1759 ASSERT(isRealized());
1760 return data()->ro()->instanceSize
;
1763 // Class's ivar size rounded up to a pointer-size boundary.
1764 uint32_t alignedInstanceSize() const {
1765 return word_align(unalignedInstanceSize());
1768 size_t instanceSize(size_t extraBytes
) const {
1769 if (fastpath(cache
.hasFastInstanceSize(extraBytes
))) {
1770 return cache
.fastInstanceSize(extraBytes
);
1773 size_t size
= alignedInstanceSize() + extraBytes
;
1774 // CF requires all objects be at least 16 bytes.
1775 if (size
< 16) size
= 16;
1779 void setInstanceSize(uint32_t newSize
) {
1780 ASSERT(isRealized());
1781 ASSERT(data()->flags
& RW_REALIZING
);
1782 auto ro
= data()->ro();
1783 if (newSize
!= ro
->instanceSize
) {
1784 ASSERT(data()->flags
& RW_COPIED_RO
);
1785 *const_cast<uint32_t *>(&ro
->instanceSize
) = newSize
;
1787 cache
.setFastInstanceSize(newSize
);
1790 void chooseClassArrayIndex();
1792 void setClassArrayIndex(unsigned Idx
) {
1793 bits
.setClassArrayIndex(Idx
);
1796 unsigned classArrayIndex() {
1797 return bits
.classArrayIndex();
1802 struct swift_class_t
: objc_class
{
1804 uint32_t instanceAddressOffset
;
1805 uint32_t instanceSize
;
1806 uint16_t instanceAlignMask
;
1810 uint32_t classAddressOffset
;
1814 void *baseAddress() {
1815 return (void *)((uint8_t *)this - classAddressOffset
);
1823 WrappedPtr
<method_list_t
, PtrauthStrip
> instanceMethods
;
1824 WrappedPtr
<method_list_t
, PtrauthStrip
> classMethods
;
1825 struct protocol_list_t
*protocols
;
1826 struct property_list_t
*instanceProperties
;
1827 // Fields below this point are not always present on disk.
1828 struct property_list_t
*_classProperties
;
1830 method_list_t
*methodsForMeta(bool isMeta
) {
1831 if (isMeta
) return classMethods
;
1832 else return instanceMethods
;
1835 property_list_t
*propertiesForMeta(bool isMeta
, struct header_info
*hi
);
1837 protocol_list_t
*protocolsForMeta(bool isMeta
) {
1838 if (isMeta
) return nullptr;
1839 else return protocols
;
1843 struct objc_super2
{
1845 Class current_class
;
1848 struct message_ref_t
{
1854 extern Method
protocol_getMethod(protocol_t
*p
, SEL sel
, bool isRequiredMethod
, bool isInstanceMethod
, bool recursive
);