2 * Copyright (c) 2005-2007 Apple Inc. All Rights Reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 #ifndef _OBJC_RUNTIME_NEW_H
25 #define _OBJC_RUNTIME_NEW_H
27 // class_data_bits_t is the class_t->data field (class_rw_t pointer plus flags)
28 // The extra bits are optimized for the retain/release and alloc/dealloc paths.
30 // Values for class_ro_t->flags
31 // These are emitted by the compiler and are part of the ABI.
32 // Note: See CGObjCNonFragileABIMac::BuildClassRoTInitializer in clang
33 // class is a metaclass
34 #define RO_META (1<<0)
35 // class is a root class
36 #define RO_ROOT (1<<1)
37 // class has .cxx_construct/destruct implementations
38 #define RO_HAS_CXX_STRUCTORS (1<<2)
39 // class has +load implementation
40 // #define RO_HAS_LOAD_METHOD (1<<3)
41 // class has visibility=hidden set
42 #define RO_HIDDEN (1<<4)
43 // class has attribute(objc_exception): OBJC_EHTYPE_$_ThisClass is non-weak
44 #define RO_EXCEPTION (1<<5)
45 // class has ro field for Swift metadata initializer callback
46 #define RO_HAS_SWIFT_INITIALIZER (1<<6)
47 // class compiled with ARC
48 #define RO_IS_ARC (1<<7)
49 // class has .cxx_destruct but no .cxx_construct (with RO_HAS_CXX_STRUCTORS)
50 #define RO_HAS_CXX_DTOR_ONLY (1<<8)
51 // class is not ARC but has ARC-style weak ivar layout
52 #define RO_HAS_WEAK_WITHOUT_ARC (1<<9)
53 // class does not allow associated objects on instances
54 #define RO_FORBIDS_ASSOCIATED_OBJECTS (1<<10)
56 // class is in an unloadable bundle - must never be set by compiler
57 #define RO_FROM_BUNDLE (1<<29)
58 // class is unrealized future class - must never be set by compiler
59 #define RO_FUTURE (1<<30)
60 // class is realized - must never be set by compiler
61 #define RO_REALIZED (1<<31)
63 // Values for class_rw_t->flags
64 // These are not emitted by the compiler and are never used in class_ro_t.
65 // Their presence should be considered in future ABI versions.
66 // class_t->data is class_rw_t, not class_ro_t
67 #define RW_REALIZED (1<<31)
68 // class is unresolved future class
69 #define RW_FUTURE (1<<30)
70 // class is initialized
71 #define RW_INITIALIZED (1<<29)
72 // class is initializing
73 #define RW_INITIALIZING (1<<28)
74 // class_rw_t->ro is heap copy of class_ro_t
75 #define RW_COPIED_RO (1<<27)
76 // class allocated but not yet registered
77 #define RW_CONSTRUCTING (1<<26)
78 // class allocated and registered
79 #define RW_CONSTRUCTED (1<<25)
80 // available for use; was RW_FINALIZE_ON_MAIN_THREAD
81 // #define RW_24 (1<<24)
82 // class +load has been called
83 #define RW_LOADED (1<<23)
84 #if !SUPPORT_NONPOINTER_ISA
85 // class instances may have associative references
86 #define RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS (1<<22)
88 // class has instance-specific GC layout
89 #define RW_HAS_INSTANCE_SPECIFIC_LAYOUT (1 << 21)
90 // class does not allow associated objects on its instances
91 #define RW_FORBIDS_ASSOCIATED_OBJECTS (1<<20)
92 // class has started realizing but not yet completed it
93 #define RW_REALIZING (1<<19)
95 // NOTE: MORE RW_ FLAGS DEFINED BELOW
98 // Values for class_rw_t->flags (RW_*), cache_t->_flags (FAST_CACHE_*),
99 // or class_t->bits (FAST_*).
101 // FAST_* and FAST_CACHE_* are stored on the class, reducing pointer indirection.
105 // class is a Swift class from the pre-stable Swift ABI
106 #define FAST_IS_SWIFT_LEGACY (1UL<<0)
107 // class is a Swift class from the stable Swift ABI
108 #define FAST_IS_SWIFT_STABLE (1UL<<1)
109 // class or superclass has default retain/release/autorelease/retainCount/
110 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
111 #define FAST_HAS_DEFAULT_RR (1UL<<2)
113 #define FAST_DATA_MASK 0x00007ffffffffff8UL
116 // class or superclass has .cxx_construct/.cxx_destruct implementation
117 // FAST_CACHE_HAS_CXX_DTOR is the first bit so that setting it in
118 // isa_t::has_cxx_dtor is a single bfi
119 #define FAST_CACHE_HAS_CXX_DTOR (1<<0)
120 #define FAST_CACHE_HAS_CXX_CTOR (1<<1)
121 // Denormalized RO_META to avoid an indirection
122 #define FAST_CACHE_META (1<<2)
124 // Denormalized RO_META to avoid an indirection
125 #define FAST_CACHE_META (1<<0)
126 // class or superclass has .cxx_construct/.cxx_destruct implementation
127 // FAST_CACHE_HAS_CXX_DTOR is chosen to alias with isa_t::has_cxx_dtor
128 #define FAST_CACHE_HAS_CXX_CTOR (1<<1)
129 #define FAST_CACHE_HAS_CXX_DTOR (1<<2)
132 // Fast Alloc fields:
133 // This stores the word-aligned size of instances + "ALLOC_DELTA16",
134 // or 0 if the instance size doesn't fit.
136 // These bits occupy the same bits than in the instance size, so that
137 // the size can be extracted with a simple mask operation.
139 // FAST_CACHE_ALLOC_MASK16 allows to extract the instance size rounded
140 // rounded up to the next 16 byte boundary, which is a fastpath for
141 // _objc_rootAllocWithZone()
142 #define FAST_CACHE_ALLOC_MASK 0x1ff8
143 #define FAST_CACHE_ALLOC_MASK16 0x1ff0
144 #define FAST_CACHE_ALLOC_DELTA16 0x0008
146 // class's instances requires raw isa
147 #define FAST_CACHE_REQUIRES_RAW_ISA (1<<13)
148 // class or superclass has default alloc/allocWithZone: implementation
149 // Note this is is stored in the metaclass.
150 #define FAST_CACHE_HAS_DEFAULT_AWZ (1<<14)
151 // class or superclass has default new/self/class/respondsToSelector/isKindOfClass
152 #define FAST_CACHE_HAS_DEFAULT_CORE (1<<15)
156 // class or superclass has .cxx_construct implementation
157 #define RW_HAS_CXX_CTOR (1<<18)
158 // class or superclass has .cxx_destruct implementation
159 #define RW_HAS_CXX_DTOR (1<<17)
160 // class or superclass has default alloc/allocWithZone: implementation
161 // Note this is is stored in the metaclass.
162 #define RW_HAS_DEFAULT_AWZ (1<<16)
163 // class's instances requires raw isa
164 #if SUPPORT_NONPOINTER_ISA
165 #define RW_REQUIRES_RAW_ISA (1<<15)
167 // class or superclass has default retain/release/autorelease/retainCount/
168 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
169 #define RW_HAS_DEFAULT_RR (1<<14)
170 // class or superclass has default new/self/class/respondsToSelector/isKindOfClass
171 #define RW_HAS_DEFAULT_CORE (1<<13)
173 // class is a Swift class from the pre-stable Swift ABI
174 #define FAST_IS_SWIFT_LEGACY (1UL<<0)
175 // class is a Swift class from the stable Swift ABI
176 #define FAST_IS_SWIFT_STABLE (1UL<<1)
178 #define FAST_DATA_MASK 0xfffffffcUL
182 // The Swift ABI requires that these bits be defined like this on all platforms.
183 static_assert(FAST_IS_SWIFT_LEGACY
== 1, "resistance is futile");
184 static_assert(FAST_IS_SWIFT_STABLE
== 2, "resistance is futile");
188 typedef uint32_t mask_t
; // x86_64 & arm64 asm are less efficient with 16-bits
190 typedef uint16_t mask_t
;
192 typedef uintptr_t SEL
;
194 struct swift_class_t
;
196 enum Atomicity
{ Atomic
= true, NotAtomic
= false };
197 enum IMPEncoding
{ Encoded
= true, Raw
= false };
201 // IMP-first is better for arm64e ptrauth and no worse for arm64.
202 // SEL-first is better for armv7* and i386 and x86_64.
204 explicit_atomic
<uintptr_t> _imp
;
205 explicit_atomic
<SEL
> _sel
;
207 explicit_atomic
<SEL
> _sel
;
208 explicit_atomic
<uintptr_t> _imp
;
211 // Compute the ptrauth signing modifier from &_imp, newSel, and cls.
212 uintptr_t modifierForSEL(SEL newSel
, Class cls
) const {
213 return (uintptr_t)&_imp
^ (uintptr_t)newSel
^ (uintptr_t)cls
;
216 // Sign newImp, with &_imp, newSel, and cls as modifiers.
217 uintptr_t encodeImp(IMP newImp
, SEL newSel
, Class cls
) const {
218 if (!newImp
) return 0;
219 #if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_PTRAUTH
221 ptrauth_auth_and_resign(newImp
,
222 ptrauth_key_function_pointer
, 0,
223 ptrauth_key_process_dependent_code
,
224 modifierForSEL(newSel
, cls
));
225 #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR
226 return (uintptr_t)newImp
^ (uintptr_t)cls
;
227 #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_NONE
228 return (uintptr_t)newImp
;
230 #error Unknown method cache IMP encoding.
235 inline SEL
sel() const { return _sel
.load(memory_order::memory_order_relaxed
); }
237 inline IMP
imp(Class cls
) const {
238 uintptr_t imp
= _imp
.load(memory_order::memory_order_relaxed
);
239 if (!imp
) return nil
;
240 #if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_PTRAUTH
241 SEL sel
= _sel
.load(memory_order::memory_order_relaxed
);
243 ptrauth_auth_and_resign((const void *)imp
,
244 ptrauth_key_process_dependent_code
,
245 modifierForSEL(sel
, cls
),
246 ptrauth_key_function_pointer
, 0);
247 #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR
248 return (IMP
)(imp
^ (uintptr_t)cls
);
249 #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_NONE
252 #error Unknown method cache IMP encoding.
256 template <Atomicity
, IMPEncoding
>
257 void set(SEL newSel
, IMP newImp
, Class cls
);
262 #if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_OUTLINED
263 explicit_atomic
<struct bucket_t
*> _buckets
;
264 explicit_atomic
<mask_t
> _mask
;
265 #elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16
266 explicit_atomic
<uintptr_t> _maskAndBuckets
;
269 // How much the mask is shifted by.
270 static constexpr uintptr_t maskShift
= 48;
272 // Additional bits after the mask which must be zero. msgSend
273 // takes advantage of these additional bits to construct the value
274 // `mask << 4` from `_maskAndBuckets` in a single instruction.
275 static constexpr uintptr_t maskZeroBits
= 4;
277 // The largest mask value we can store.
278 static constexpr uintptr_t maxMask
= ((uintptr_t)1 << (64 - maskShift
)) - 1;
280 // The mask applied to `_maskAndBuckets` to retrieve the buckets pointer.
281 static constexpr uintptr_t bucketsMask
= ((uintptr_t)1 << (maskShift
- maskZeroBits
)) - 1;
283 // Ensure we have enough bits for the buckets pointer.
284 static_assert(bucketsMask
>= MACH_VM_MAX_ADDRESS
, "Bucket field doesn't have enough bits for arbitrary pointers.");
285 #elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4
286 // _maskAndBuckets stores the mask shift in the low 4 bits, and
287 // the buckets pointer in the remainder of the value. The mask
288 // shift is the value where (0xffff >> shift) produces the correct
289 // mask. This is equal to 16 - log2(cache_size).
290 explicit_atomic
<uintptr_t> _maskAndBuckets
;
293 static constexpr uintptr_t maskBits
= 4;
294 static constexpr uintptr_t maskMask
= (1 << maskBits
) - 1;
295 static constexpr uintptr_t bucketsMask
= ~maskMask
;
297 #error Unknown cache mask storage type.
306 static bucket_t
*emptyBuckets();
308 struct bucket_t
*buckets();
311 void incrementOccupied();
312 void setBucketsAndMask(struct bucket_t
*newBuckets
, mask_t newMask
);
313 void initializeToEmpty();
316 bool isConstantEmptyCache();
320 bool getBit(uint16_t flags
) const {
321 return _flags
& flags
;
323 void setBit(uint16_t set
) {
324 __c11_atomic_fetch_or((_Atomic(uint16_t) *)&_flags
, set
, __ATOMIC_RELAXED
);
326 void clearBit(uint16_t clear
) {
327 __c11_atomic_fetch_and((_Atomic(uint16_t) *)&_flags
, ~clear
, __ATOMIC_RELAXED
);
331 #if FAST_CACHE_ALLOC_MASK
332 bool hasFastInstanceSize(size_t extra
) const
334 if (__builtin_constant_p(extra
) && extra
== 0) {
335 return _flags
& FAST_CACHE_ALLOC_MASK16
;
337 return _flags
& FAST_CACHE_ALLOC_MASK
;
340 size_t fastInstanceSize(size_t extra
) const
342 ASSERT(hasFastInstanceSize(extra
));
344 if (__builtin_constant_p(extra
) && extra
== 0) {
345 return _flags
& FAST_CACHE_ALLOC_MASK16
;
347 size_t size
= _flags
& FAST_CACHE_ALLOC_MASK
;
348 // remove the FAST_CACHE_ALLOC_DELTA16 that was added
349 // by setFastInstanceSize
350 return align16(size
+ extra
- FAST_CACHE_ALLOC_DELTA16
);
354 void setFastInstanceSize(size_t newSize
)
356 // Set during realization or construction only. No locking needed.
357 uint16_t newBits
= _flags
& ~FAST_CACHE_ALLOC_MASK
;
360 // Adding FAST_CACHE_ALLOC_DELTA16 allows for FAST_CACHE_ALLOC_MASK16
361 // to yield the proper 16byte aligned allocation size with a single mask
362 sizeBits
= word_align(newSize
) + FAST_CACHE_ALLOC_DELTA16
;
363 sizeBits
&= FAST_CACHE_ALLOC_MASK
;
364 if (newSize
<= sizeBits
) {
370 bool hasFastInstanceSize(size_t extra
) const {
373 size_t fastInstanceSize(size_t extra
) const {
376 void setFastInstanceSize(size_t extra
) {
381 static size_t bytesForCapacity(uint32_t cap
);
382 static struct bucket_t
* endMarker(struct bucket_t
*b
, uint32_t cap
);
384 void reallocate(mask_t oldCapacity
, mask_t newCapacity
, bool freeOld
);
385 void insert(Class cls
, SEL sel
, IMP imp
, id receiver
);
387 static void bad_cache(id receiver
, SEL sel
, Class isa
) __attribute__((noreturn
, cold
));
391 // classref_t is unremapped class_t*
392 typedef struct classref
* classref_t
;
395 #ifdef __PTRAUTH_INTRINSICS__
396 # define StubClassInitializerPtrauth __ptrauth(ptrauth_key_function_pointer, 1, 0xc671)
398 # define StubClassInitializerPtrauth
400 struct stub_class_t
{
402 _objc_swiftMetadataInitializer StubClassInitializerPtrauth initializer
;
405 /***********************************************************************
406 * entsize_list_tt<Element, List, FlagMask>
407 * Generic implementation of an array of non-fragile structs.
409 * Element is the struct type (e.g. method_t)
410 * List is the specialization of entsize_list_tt (e.g. method_list_t)
411 * FlagMask is used to stash extra bits in the entsize field
412 * (e.g. method list fixup markers)
413 **********************************************************************/
414 template <typename Element
, typename List
, uint32_t FlagMask
>
415 struct entsize_list_tt
{
416 uint32_t entsizeAndFlags
;
420 uint32_t entsize() const {
421 return entsizeAndFlags
& ~FlagMask
;
423 uint32_t flags() const {
424 return entsizeAndFlags
& FlagMask
;
427 Element
& getOrEnd(uint32_t i
) const {
429 return *(Element
*)((uint8_t *)&first
+ i
*entsize());
431 Element
& get(uint32_t i
) const {
436 size_t byteSize() const {
437 return byteSize(entsize(), count
);
440 static size_t byteSize(uint32_t entsize
, uint32_t count
) {
441 return sizeof(entsize_list_tt
) + (count
-1)*entsize
;
444 List
*duplicate() const {
445 auto *dup
= (List
*)calloc(this->byteSize(), 1);
446 dup
->entsizeAndFlags
= this->entsizeAndFlags
;
447 dup
->count
= this->count
;
448 std::copy(begin(), end(), dup
->begin());
453 const iterator
begin() const {
454 return iterator(*static_cast<const List
*>(this), 0);
457 return iterator(*static_cast<const List
*>(this), 0);
459 const iterator
end() const {
460 return iterator(*static_cast<const List
*>(this), count
);
463 return iterator(*static_cast<const List
*>(this), count
);
468 uint32_t index
; // keeping track of this saves a divide in operator-
471 typedef std::random_access_iterator_tag iterator_category
;
472 typedef Element value_type
;
473 typedef ptrdiff_t difference_type
;
474 typedef Element
* pointer
;
475 typedef Element
& reference
;
479 iterator(const List
& list
, uint32_t start
= 0)
480 : entsize(list
.entsize())
482 , element(&list
.getOrEnd(start
))
485 const iterator
& operator += (ptrdiff_t delta
) {
486 element
= (Element
*)((uint8_t *)element
+ delta
*entsize
);
487 index
+= (int32_t)delta
;
490 const iterator
& operator -= (ptrdiff_t delta
) {
491 element
= (Element
*)((uint8_t *)element
- delta
*entsize
);
492 index
-= (int32_t)delta
;
495 const iterator
operator + (ptrdiff_t delta
) const {
496 return iterator(*this) += delta
;
498 const iterator
operator - (ptrdiff_t delta
) const {
499 return iterator(*this) -= delta
;
502 iterator
& operator ++ () { *this += 1; return *this; }
503 iterator
& operator -- () { *this -= 1; return *this; }
504 iterator
operator ++ (int) {
505 iterator
result(*this); *this += 1; return result
;
507 iterator
operator -- (int) {
508 iterator
result(*this); *this -= 1; return result
;
511 ptrdiff_t operator - (const iterator
& rhs
) const {
512 return (ptrdiff_t)this->index
- (ptrdiff_t)rhs
.index
;
515 Element
& operator * () const { return *element
; }
516 Element
* operator -> () const { return element
; }
518 operator Element
& () const { return *element
; }
520 bool operator == (const iterator
& rhs
) const {
521 return this->element
== rhs
.element
;
523 bool operator != (const iterator
& rhs
) const {
524 return this->element
!= rhs
.element
;
527 bool operator < (const iterator
& rhs
) const {
528 return this->element
< rhs
.element
;
530 bool operator > (const iterator
& rhs
) const {
531 return this->element
> rhs
.element
;
542 struct SortBySELAddress
:
543 public std::binary_function
<const method_t
&,
544 const method_t
&, bool>
546 bool operator() (const method_t
& lhs
,
548 { return lhs
.name
< rhs
.name
; }
554 // *offset was originally 64-bit on some x86_64 platforms.
555 // We read and write only 32 bits of it.
556 // Some metadata provides all 64 bits. This is harmless for unsigned
557 // little-endian values.
558 // Some code uses all 64 bits. class_addIvar() over-allocates the
559 // offset for their benefit.
564 // alignment is sometimes -1; use alignment() instead
565 uint32_t alignment_raw
;
568 uint32_t alignment() const {
569 if (alignment_raw
== ~(uint32_t)0) return 1U << WORD_SHIFT
;
570 return 1 << alignment_raw
;
576 const char *attributes
;
579 // Two bits of entsize are used for fixup markers.
580 struct method_list_t
: entsize_list_tt
<method_t
, method_list_t
, 0x3> {
581 bool isUniqued() const;
582 bool isFixedUp() const;
585 uint32_t indexOfMethod(const method_t
*meth
) const {
587 (uint32_t)(((uintptr_t)meth
- (uintptr_t)this) / entsize());
593 struct ivar_list_t
: entsize_list_tt
<ivar_t
, ivar_list_t
, 0> {
594 bool containsIvar(Ivar ivar
) const {
595 return (ivar
>= (Ivar
)&*begin() && ivar
< (Ivar
)&*end());
599 struct property_list_t
: entsize_list_tt
<property_t
, property_list_t
, 0> {
603 typedef uintptr_t protocol_ref_t
; // protocol_t *, but unremapped
605 // Values for protocol_t->flags
606 #define PROTOCOL_FIXED_UP_2 (1<<31) // must never be set by compiler
607 #define PROTOCOL_FIXED_UP_1 (1<<30) // must never be set by compiler
608 #define PROTOCOL_IS_CANONICAL (1<<29) // must never be set by compiler
609 // Bits 0..15 are reserved for Swift's use.
611 #define PROTOCOL_FIXED_UP_MASK (PROTOCOL_FIXED_UP_1 | PROTOCOL_FIXED_UP_2)
613 struct protocol_t
: objc_object
{
614 const char *mangledName
;
615 struct protocol_list_t
*protocols
;
616 method_list_t
*instanceMethods
;
617 method_list_t
*classMethods
;
618 method_list_t
*optionalInstanceMethods
;
619 method_list_t
*optionalClassMethods
;
620 property_list_t
*instanceProperties
;
621 uint32_t size
; // sizeof(protocol_t)
623 // Fields below this point are not always present on disk.
624 const char **_extendedMethodTypes
;
625 const char *_demangledName
;
626 property_list_t
*_classProperties
;
628 const char *demangledName();
630 const char *nameForLogging() {
631 return demangledName();
634 bool isFixedUp() const;
637 bool isCanonical() const;
638 void clearIsCanonical();
640 # define HAS_FIELD(f) (size >= offsetof(protocol_t, f) + sizeof(f))
642 bool hasExtendedMethodTypesField() const {
643 return HAS_FIELD(_extendedMethodTypes
);
645 bool hasDemangledNameField() const {
646 return HAS_FIELD(_demangledName
);
648 bool hasClassPropertiesField() const {
649 return HAS_FIELD(_classProperties
);
654 const char **extendedMethodTypes() const {
655 return hasExtendedMethodTypesField() ? _extendedMethodTypes
: nil
;
658 property_list_t
*classProperties() const {
659 return hasClassPropertiesField() ? _classProperties
: nil
;
663 struct protocol_list_t
{
664 // count is pointer-sized by accident.
666 protocol_ref_t list
[0]; // variable-size
668 size_t byteSize() const {
669 return sizeof(*this) + count
*sizeof(list
[0]);
672 protocol_list_t
*duplicate() const {
673 return (protocol_list_t
*)memdup(this, this->byteSize());
676 typedef protocol_ref_t
* iterator
;
677 typedef const protocol_ref_t
* const_iterator
;
679 const_iterator
begin() const {
685 const_iterator
end() const {
695 uint32_t instanceStart
;
696 uint32_t instanceSize
;
701 const uint8_t * ivarLayout
;
704 method_list_t
* baseMethodList
;
705 protocol_list_t
* baseProtocols
;
706 const ivar_list_t
* ivars
;
708 const uint8_t * weakIvarLayout
;
709 property_list_t
*baseProperties
;
711 // This field exists only when RO_HAS_SWIFT_INITIALIZER is set.
712 _objc_swiftMetadataInitializer __ptrauth_objc_method_list_imp _swiftMetadataInitializer_NEVER_USE
[0];
714 _objc_swiftMetadataInitializer
swiftMetadataInitializer() const {
715 if (flags
& RO_HAS_SWIFT_INITIALIZER
) {
716 return _swiftMetadataInitializer_NEVER_USE
[0];
722 method_list_t
*baseMethods() const {
723 return baseMethodList
;
726 class_ro_t
*duplicate() const {
727 if (flags
& RO_HAS_SWIFT_INITIALIZER
) {
728 size_t size
= sizeof(*this) + sizeof(_swiftMetadataInitializer_NEVER_USE
[0]);
729 class_ro_t
*ro
= (class_ro_t
*)memdup(this, size
);
730 ro
->_swiftMetadataInitializer_NEVER_USE
[0] = this->_swiftMetadataInitializer_NEVER_USE
[0];
733 size_t size
= sizeof(*this);
734 class_ro_t
*ro
= (class_ro_t
*)memdup(this, size
);
741 /***********************************************************************
742 * list_array_tt<Element, List>
743 * Generic implementation for metadata that can be augmented by categories.
745 * Element is the underlying metadata type (e.g. method_t)
746 * List is the metadata's list type (e.g. method_list_t)
748 * A list_array_tt has one of three values:
750 * - a pointer to a single list
751 * - an array of pointers to lists
753 * countLists/beginLists/endLists iterate the metadata lists
754 * count/begin/end iterate the underlying metadata elements
755 **********************************************************************/
756 template <typename Element
, typename List
>
757 class list_array_tt
{
762 static size_t byteSize(uint32_t count
) {
763 return sizeof(array_t
) + count
*sizeof(lists
[0]);
766 return byteSize(count
);
774 typename
List::iterator m
, mEnd
;
777 iterator(List
**begin
, List
**end
)
778 : lists(begin
), listsEnd(end
)
781 m
= (*begin
)->begin();
782 mEnd
= (*begin
)->end();
786 const Element
& operator * () const {
789 Element
& operator * () {
793 bool operator != (const iterator
& rhs
) const {
794 if (lists
!= rhs
.lists
) return true;
795 if (lists
== listsEnd
) return false; // m is undefined
796 if (m
!= rhs
.m
) return true;
800 const iterator
& operator ++ () {
804 ASSERT(lists
!= listsEnd
);
806 if (lists
!= listsEnd
) {
807 m
= (*lists
)->begin();
808 mEnd
= (*lists
)->end();
818 uintptr_t arrayAndFlag
;
821 bool hasArray() const {
822 return arrayAndFlag
& 1;
826 return (array_t
*)(arrayAndFlag
& ~1);
829 void setArray(array_t
*array
) {
830 arrayAndFlag
= (uintptr_t)array
| 1;
837 for (auto lists
= beginLists(), end
= endLists();
841 result
+= (*lists
)->count
;
847 return iterator(beginLists(), endLists());
851 List
**e
= endLists();
852 return iterator(e
, e
);
856 uint32_t countLists() {
858 return array()->count
;
866 List
** beginLists() {
868 return array()->lists
;
876 return array()->lists
+ array()->count
;
884 void attachLists(List
* const * addedLists
, uint32_t addedCount
) {
885 if (addedCount
== 0) return;
888 // many lists -> many lists
889 uint32_t oldCount
= array()->count
;
890 uint32_t newCount
= oldCount
+ addedCount
;
891 setArray((array_t
*)realloc(array(), array_t::byteSize(newCount
)));
892 array()->count
= newCount
;
893 memmove(array()->lists
+ addedCount
, array()->lists
,
894 oldCount
* sizeof(array()->lists
[0]));
895 memcpy(array()->lists
, addedLists
,
896 addedCount
* sizeof(array()->lists
[0]));
898 else if (!list
&& addedCount
== 1) {
900 list
= addedLists
[0];
903 // 1 list -> many lists
904 List
* oldList
= list
;
905 uint32_t oldCount
= oldList
? 1 : 0;
906 uint32_t newCount
= oldCount
+ addedCount
;
907 setArray((array_t
*)malloc(array_t::byteSize(newCount
)));
908 array()->count
= newCount
;
909 if (oldList
) array()->lists
[addedCount
] = oldList
;
910 memcpy(array()->lists
, addedLists
,
911 addedCount
* sizeof(array()->lists
[0]));
917 for (uint32_t i
= 0; i
< array()->count
; i
++) {
918 try_free(array()->lists
[i
]);
927 template<typename Result
>
932 array_t
*a
= array();
933 result
.setArray((array_t
*)memdup(a
, a
->byteSize()));
934 for (uint32_t i
= 0; i
< a
->count
; i
++) {
935 result
.array()->lists
[i
] = a
->lists
[i
]->duplicate();
938 result
.list
= list
->duplicate();
948 class method_array_t
:
949 public list_array_tt
<method_t
, method_list_t
>
951 typedef list_array_tt
<method_t
, method_list_t
> Super
;
954 method_list_t
**beginCategoryMethodLists() {
958 method_list_t
**endCategoryMethodLists(Class cls
);
960 method_array_t
duplicate() {
961 return Super::duplicate
<method_array_t
>();
966 class property_array_t
:
967 public list_array_tt
<property_t
, property_list_t
>
969 typedef list_array_tt
<property_t
, property_list_t
> Super
;
972 property_array_t
duplicate() {
973 return Super::duplicate
<property_array_t
>();
978 class protocol_array_t
:
979 public list_array_tt
<protocol_ref_t
, protocol_list_t
>
981 typedef list_array_tt
<protocol_ref_t
, protocol_list_t
> Super
;
984 protocol_array_t
duplicate() {
985 return Super::duplicate
<protocol_array_t
>();
991 // Be warned that Symbolication knows the layout of this structure.
996 const class_ro_t
*ro
;
998 method_array_t methods
;
999 property_array_t properties
;
1000 protocol_array_t protocols
;
1002 Class firstSubclass
;
1003 Class nextSiblingClass
;
1005 char *demangledName
;
1007 #if SUPPORT_INDEXED_ISA
1011 void setFlags(uint32_t set
)
1013 __c11_atomic_fetch_or((_Atomic(uint32_t) *)&flags
, set
, __ATOMIC_RELAXED
);
1016 void clearFlags(uint32_t clear
)
1018 __c11_atomic_fetch_and((_Atomic(uint32_t) *)&flags
, ~clear
, __ATOMIC_RELAXED
);
1021 // set and clear must not overlap
1022 void changeFlags(uint32_t set
, uint32_t clear
)
1024 ASSERT((set
& clear
) == 0);
1026 uint32_t oldf
, newf
;
1029 newf
= (oldf
| set
) & ~clear
;
1030 } while (!OSAtomicCompareAndSwap32Barrier(oldf
, newf
, (volatile int32_t *)&flags
));
1035 struct class_data_bits_t
{
1038 // Values are the FAST_ flags above.
1041 bool getBit(uintptr_t bit
) const
1046 // Atomically set the bits in `set` and clear the bits in `clear`.
1047 // set and clear must not overlap.
1048 void setAndClearBits(uintptr_t set
, uintptr_t clear
)
1050 ASSERT((set
& clear
) == 0);
1054 oldBits
= LoadExclusive(&bits
);
1055 newBits
= (oldBits
| set
) & ~clear
;
1056 } while (!StoreReleaseExclusive(&bits
, oldBits
, newBits
));
1059 void setBits(uintptr_t set
) {
1060 __c11_atomic_fetch_or((_Atomic(uintptr_t) *)&bits
, set
, __ATOMIC_RELAXED
);
1063 void clearBits(uintptr_t clear
) {
1064 __c11_atomic_fetch_and((_Atomic(uintptr_t) *)&bits
, ~clear
, __ATOMIC_RELAXED
);
1069 class_rw_t
* data() const {
1070 return (class_rw_t
*)(bits
& FAST_DATA_MASK
);
1072 void setData(class_rw_t
*newData
)
1074 ASSERT(!data() || (newData
->flags
& (RW_REALIZING
| RW_FUTURE
)));
1075 // Set during realization or construction only. No locking needed.
1076 // Use a store-release fence because there may be concurrent
1077 // readers of data and data's contents.
1078 uintptr_t newBits
= (bits
& ~FAST_DATA_MASK
) | (uintptr_t)newData
;
1079 atomic_thread_fence(memory_order_release
);
1083 // Get the class's ro data, even in the presence of concurrent realization.
1084 // fixme this isn't really safe without a compiler barrier at least
1085 // and probably a memory barrier when realizeClass changes the data field
1086 const class_ro_t
*safe_ro() {
1087 class_rw_t
*maybe_rw
= data();
1088 if (maybe_rw
->flags
& RW_REALIZED
) {
1090 return maybe_rw
->ro
;
1092 // maybe_rw is actually ro
1093 return (class_ro_t
*)maybe_rw
;
1097 void setClassArrayIndex(unsigned Idx
) {
1098 #if SUPPORT_INDEXED_ISA
1099 // 0 is unused as then we can rely on zero-initialisation from calloc.
1101 data()->index
= Idx
;
1105 unsigned classArrayIndex() {
1106 #if SUPPORT_INDEXED_ISA
1107 return data()->index
;
1114 return isSwiftStable() || isSwiftLegacy();
1117 bool isSwiftStable() {
1118 return getBit(FAST_IS_SWIFT_STABLE
);
1120 void setIsSwiftStable() {
1121 setAndClearBits(FAST_IS_SWIFT_STABLE
, FAST_IS_SWIFT_LEGACY
);
1124 bool isSwiftLegacy() {
1125 return getBit(FAST_IS_SWIFT_LEGACY
);
1127 void setIsSwiftLegacy() {
1128 setAndClearBits(FAST_IS_SWIFT_LEGACY
, FAST_IS_SWIFT_STABLE
);
1131 // fixme remove this once the Swift runtime uses the stable bits
1132 bool isSwiftStable_ButAllowLegacyForNow() {
1133 return isAnySwift();
1136 _objc_swiftMetadataInitializer
swiftMetadataInitializer() {
1137 // This function is called on un-realized classes without
1138 // holding any locks.
1139 // Beware of races with other realizers.
1140 return safe_ro()->swiftMetadataInitializer();
1145 struct objc_class
: objc_object
{
1148 cache_t cache
; // formerly cache pointer and vtable
1149 class_data_bits_t bits
; // class_rw_t * plus custom rr/alloc flags
1151 class_rw_t
*data() const {
1154 void setData(class_rw_t
*newData
) {
1155 bits
.setData(newData
);
1158 void setInfo(uint32_t set
) {
1159 ASSERT(isFuture() || isRealized());
1160 data()->setFlags(set
);
1163 void clearInfo(uint32_t clear
) {
1164 ASSERT(isFuture() || isRealized());
1165 data()->clearFlags(clear
);
1168 // set and clear must not overlap
1169 void changeInfo(uint32_t set
, uint32_t clear
) {
1170 ASSERT(isFuture() || isRealized());
1171 ASSERT((set
& clear
) == 0);
1172 data()->changeFlags(set
, clear
);
1175 #if FAST_HAS_DEFAULT_RR
1176 bool hasCustomRR() const {
1177 return !bits
.getBit(FAST_HAS_DEFAULT_RR
);
1179 void setHasDefaultRR() {
1180 bits
.setBits(FAST_HAS_DEFAULT_RR
);
1182 void setHasCustomRR() {
1183 bits
.clearBits(FAST_HAS_DEFAULT_RR
);
1186 bool hasCustomRR() const {
1187 return !(bits
.data()->flags
& RW_HAS_DEFAULT_RR
);
1189 void setHasDefaultRR() {
1190 bits
.data()->setFlags(RW_HAS_DEFAULT_RR
);
1192 void setHasCustomRR() {
1193 bits
.data()->clearFlags(RW_HAS_DEFAULT_RR
);
1197 #if FAST_CACHE_HAS_DEFAULT_AWZ
1198 bool hasCustomAWZ() const {
1199 return !cache
.getBit(FAST_CACHE_HAS_DEFAULT_AWZ
);
1201 void setHasDefaultAWZ() {
1202 cache
.setBit(FAST_CACHE_HAS_DEFAULT_AWZ
);
1204 void setHasCustomAWZ() {
1205 cache
.clearBit(FAST_CACHE_HAS_DEFAULT_AWZ
);
1208 bool hasCustomAWZ() const {
1209 return !(bits
.data()->flags
& RW_HAS_DEFAULT_AWZ
);
1211 void setHasDefaultAWZ() {
1212 bits
.data()->setFlags(RW_HAS_DEFAULT_AWZ
);
1214 void setHasCustomAWZ() {
1215 bits
.data()->clearFlags(RW_HAS_DEFAULT_AWZ
);
1219 #if FAST_CACHE_HAS_DEFAULT_CORE
1220 bool hasCustomCore() const {
1221 return !cache
.getBit(FAST_CACHE_HAS_DEFAULT_CORE
);
1223 void setHasDefaultCore() {
1224 return cache
.setBit(FAST_CACHE_HAS_DEFAULT_CORE
);
1226 void setHasCustomCore() {
1227 return cache
.clearBit(FAST_CACHE_HAS_DEFAULT_CORE
);
1230 bool hasCustomCore() const {
1231 return !(bits
.data()->flags
& RW_HAS_DEFAULT_CORE
);
1233 void setHasDefaultCore() {
1234 bits
.data()->setFlags(RW_HAS_DEFAULT_CORE
);
1236 void setHasCustomCore() {
1237 bits
.data()->clearFlags(RW_HAS_DEFAULT_CORE
);
1241 #if FAST_CACHE_HAS_CXX_CTOR
1243 ASSERT(isRealized());
1244 return cache
.getBit(FAST_CACHE_HAS_CXX_CTOR
);
1246 void setHasCxxCtor() {
1247 cache
.setBit(FAST_CACHE_HAS_CXX_CTOR
);
1251 ASSERT(isRealized());
1252 return bits
.data()->flags
& RW_HAS_CXX_CTOR
;
1254 void setHasCxxCtor() {
1255 bits
.data()->setFlags(RW_HAS_CXX_CTOR
);
1259 #if FAST_CACHE_HAS_CXX_DTOR
1261 ASSERT(isRealized());
1262 return cache
.getBit(FAST_CACHE_HAS_CXX_DTOR
);
1264 void setHasCxxDtor() {
1265 cache
.setBit(FAST_CACHE_HAS_CXX_DTOR
);
1269 ASSERT(isRealized());
1270 return bits
.data()->flags
& RW_HAS_CXX_DTOR
;
1272 void setHasCxxDtor() {
1273 bits
.data()->setFlags(RW_HAS_CXX_DTOR
);
1277 #if FAST_CACHE_REQUIRES_RAW_ISA
1278 bool instancesRequireRawIsa() {
1279 return cache
.getBit(FAST_CACHE_REQUIRES_RAW_ISA
);
1281 void setInstancesRequireRawIsa() {
1282 cache
.setBit(FAST_CACHE_REQUIRES_RAW_ISA
);
1284 #elif SUPPORT_NONPOINTER_ISA
1285 bool instancesRequireRawIsa() {
1286 return bits
.data()->flags
& RW_REQUIRES_RAW_ISA
;
1288 void setInstancesRequireRawIsa() {
1289 bits
.data()->setFlags(RW_REQUIRES_RAW_ISA
);
1292 bool instancesRequireRawIsa() {
1295 void setInstancesRequireRawIsa() {
1299 void setInstancesRequireRawIsaRecursively(bool inherited
= false);
1300 void printInstancesRequireRawIsa(bool inherited
);
1302 bool canAllocNonpointer() {
1303 ASSERT(!isFuture());
1304 return !instancesRequireRawIsa();
1307 bool isSwiftStable() {
1308 return bits
.isSwiftStable();
1311 bool isSwiftLegacy() {
1312 return bits
.isSwiftLegacy();
1316 return bits
.isAnySwift();
1319 bool isSwiftStable_ButAllowLegacyForNow() {
1320 return bits
.isSwiftStable_ButAllowLegacyForNow();
1323 bool isStubClass() const {
1324 uintptr_t isa
= (uintptr_t)isaBits();
1325 return 1 <= isa
&& isa
< 16;
1328 // Swift stable ABI built for old deployment targets looks weird.
1329 // The is-legacy bit is set for compatibility with old libobjc.
1330 // We are on a "new" deployment target so we need to rewrite that bit.
1331 // These stable-with-legacy-bit classes are distinguished from real
1332 // legacy classes using another bit in the Swift data
1333 // (ClassFlags::IsSwiftPreStableABI)
1335 bool isUnfixedBackwardDeployingStableSwift() {
1336 // Only classes marked as Swift legacy need apply.
1337 if (!bits
.isSwiftLegacy()) return false;
1339 // Check the true legacy vs stable distinguisher.
1340 // The low bit of Swift's ClassFlags is SET for true legacy
1341 // and UNSET for stable pretending to be legacy.
1342 uint32_t swiftClassFlags
= *(uint32_t *)(&bits
+ 1);
1343 bool isActuallySwiftLegacy
= bool(swiftClassFlags
& 1);
1344 return !isActuallySwiftLegacy
;
1347 void fixupBackwardDeployingStableSwift() {
1348 if (isUnfixedBackwardDeployingStableSwift()) {
1349 // Class really is stable Swift, pretending to be pre-stable.
1351 bits
.setIsSwiftStable();
1355 _objc_swiftMetadataInitializer
swiftMetadataInitializer() {
1356 return bits
.swiftMetadataInitializer();
1359 // Return YES if the class's ivars are managed by ARC,
1360 // or the class is MRC but has ARC-style weak ivars.
1361 bool hasAutomaticIvars() {
1362 return data()->ro
->flags
& (RO_IS_ARC
| RO_HAS_WEAK_WITHOUT_ARC
);
1365 // Return YES if the class's ivars are managed by ARC.
1367 return data()->ro
->flags
& RO_IS_ARC
;
1371 bool forbidsAssociatedObjects() {
1372 return (data()->flags
& RW_FORBIDS_ASSOCIATED_OBJECTS
);
1375 #if SUPPORT_NONPOINTER_ISA
1376 // Tracked in non-pointer isas; not tracked otherwise
1378 bool instancesHaveAssociatedObjects() {
1379 // this may be an unrealized future class in the CF-bridged case
1380 ASSERT(isFuture() || isRealized());
1381 return data()->flags
& RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS
;
1384 void setInstancesHaveAssociatedObjects() {
1385 // this may be an unrealized future class in the CF-bridged case
1386 ASSERT(isFuture() || isRealized());
1387 setInfo(RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS
);
1391 bool shouldGrowCache() {
1395 void setShouldGrowCache(bool) {
1396 // fixme good or bad for memory use?
1399 bool isInitializing() {
1400 return getMeta()->data()->flags
& RW_INITIALIZING
;
1403 void setInitializing() {
1404 ASSERT(!isMetaClass());
1405 ISA()->setInfo(RW_INITIALIZING
);
1408 bool isInitialized() {
1409 return getMeta()->data()->flags
& RW_INITIALIZED
;
1412 void setInitialized();
1415 ASSERT(isRealized());
1416 return true; // any class registered for +load is definitely loadable
1419 IMP
getLoadMethod();
1421 // Locking: To prevent concurrent realization, hold runtimeLock.
1422 bool isRealized() const {
1423 return !isStubClass() && (data()->flags
& RW_REALIZED
);
1426 // Returns true if this is an unrealized future class.
1427 // Locking: To prevent concurrent realization, hold runtimeLock.
1428 bool isFuture() const {
1429 return data()->flags
& RW_FUTURE
;
1432 bool isMetaClass() {
1434 ASSERT(isRealized());
1436 return cache
.getBit(FAST_CACHE_META
);
1438 return data()->ro
->flags
& RO_META
;
1442 // Like isMetaClass, but also valid on un-realized classes
1443 bool isMetaClassMaybeUnrealized() {
1444 return bits
.safe_ro()->flags
& RO_META
;
1447 // NOT identical to this->ISA when this is a metaclass
1449 if (isMetaClass()) return (Class
)this;
1450 else return this->ISA();
1453 bool isRootClass() {
1454 return superclass
== nil
;
1456 bool isRootMetaclass() {
1457 return ISA() == (Class
)this;
1460 const char *mangledName() {
1461 // fixme can't assert locks here
1464 if (isRealized() || isFuture()) {
1465 return data()->ro
->name
;
1467 return ((const class_ro_t
*)data())->name
;
1471 const char *demangledName();
1472 const char *nameForLogging();
1474 // May be unaligned depending on class's ivars.
1475 uint32_t unalignedInstanceStart() const {
1476 ASSERT(isRealized());
1477 return data()->ro
->instanceStart
;
1480 // Class's instance start rounded up to a pointer-size boundary.
1481 // This is used for ARC layout bitmaps.
1482 uint32_t alignedInstanceStart() const {
1483 return word_align(unalignedInstanceStart());
1486 // May be unaligned depending on class's ivars.
1487 uint32_t unalignedInstanceSize() const {
1488 ASSERT(isRealized());
1489 return data()->ro
->instanceSize
;
1492 // Class's ivar size rounded up to a pointer-size boundary.
1493 uint32_t alignedInstanceSize() const {
1494 return word_align(unalignedInstanceSize());
1497 size_t instanceSize(size_t extraBytes
) const {
1498 if (fastpath(cache
.hasFastInstanceSize(extraBytes
))) {
1499 return cache
.fastInstanceSize(extraBytes
);
1502 size_t size
= alignedInstanceSize() + extraBytes
;
1503 // CF requires all objects be at least 16 bytes.
1504 if (size
< 16) size
= 16;
1508 void setInstanceSize(uint32_t newSize
) {
1509 ASSERT(isRealized());
1510 ASSERT(data()->flags
& RW_REALIZING
);
1511 if (newSize
!= data()->ro
->instanceSize
) {
1512 ASSERT(data()->flags
& RW_COPIED_RO
);
1513 *const_cast<uint32_t *>(&data()->ro
->instanceSize
) = newSize
;
1515 cache
.setFastInstanceSize(newSize
);
1518 void chooseClassArrayIndex();
1520 void setClassArrayIndex(unsigned Idx
) {
1521 bits
.setClassArrayIndex(Idx
);
1524 unsigned classArrayIndex() {
1525 return bits
.classArrayIndex();
1530 struct swift_class_t
: objc_class
{
1532 uint32_t instanceAddressOffset
;
1533 uint32_t instanceSize
;
1534 uint16_t instanceAlignMask
;
1538 uint32_t classAddressOffset
;
1542 void *baseAddress() {
1543 return (void *)((uint8_t *)this - classAddressOffset
);
1551 struct method_list_t
*instanceMethods
;
1552 struct method_list_t
*classMethods
;
1553 struct protocol_list_t
*protocols
;
1554 struct property_list_t
*instanceProperties
;
1555 // Fields below this point are not always present on disk.
1556 struct property_list_t
*_classProperties
;
1558 method_list_t
*methodsForMeta(bool isMeta
) {
1559 if (isMeta
) return classMethods
;
1560 else return instanceMethods
;
1563 property_list_t
*propertiesForMeta(bool isMeta
, struct header_info
*hi
);
1565 protocol_list_t
*protocolsForMeta(bool isMeta
) {
1566 if (isMeta
) return nullptr;
1567 else return protocols
;
1571 struct objc_super2
{
1573 Class current_class
;
1576 struct message_ref_t
{
1582 extern Method
protocol_getMethod(protocol_t
*p
, SEL sel
, bool isRequiredMethod
, bool isInstanceMethod
, bool recursive
);