2 * Copyright (c) 2005-2007 Apple Inc. All Rights Reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 #ifndef _OBJC_RUNTIME_NEW_H
25 #define _OBJC_RUNTIME_NEW_H
27 #include "PointerUnion.h"
28 #include <type_traits>
30 // class_data_bits_t is the class_t->data field (class_rw_t pointer plus flags)
31 // The extra bits are optimized for the retain/release and alloc/dealloc paths.
33 // Values for class_ro_t->flags
34 // These are emitted by the compiler and are part of the ABI.
35 // Note: See CGObjCNonFragileABIMac::BuildClassRoTInitializer in clang
36 // class is a metaclass
37 #define RO_META (1<<0)
38 // class is a root class
39 #define RO_ROOT (1<<1)
40 // class has .cxx_construct/destruct implementations
41 #define RO_HAS_CXX_STRUCTORS (1<<2)
42 // class has +load implementation
43 // #define RO_HAS_LOAD_METHOD (1<<3)
44 // class has visibility=hidden set
45 #define RO_HIDDEN (1<<4)
46 // class has attribute(objc_exception): OBJC_EHTYPE_$_ThisClass is non-weak
47 #define RO_EXCEPTION (1<<5)
48 // class has ro field for Swift metadata initializer callback
49 #define RO_HAS_SWIFT_INITIALIZER (1<<6)
50 // class compiled with ARC
51 #define RO_IS_ARC (1<<7)
52 // class has .cxx_destruct but no .cxx_construct (with RO_HAS_CXX_STRUCTORS)
53 #define RO_HAS_CXX_DTOR_ONLY (1<<8)
54 // class is not ARC but has ARC-style weak ivar layout
55 #define RO_HAS_WEAK_WITHOUT_ARC (1<<9)
56 // class does not allow associated objects on instances
57 #define RO_FORBIDS_ASSOCIATED_OBJECTS (1<<10)
59 // class is in an unloadable bundle - must never be set by compiler
60 #define RO_FROM_BUNDLE (1<<29)
61 // class is unrealized future class - must never be set by compiler
62 #define RO_FUTURE (1<<30)
63 // class is realized - must never be set by compiler
64 #define RO_REALIZED (1<<31)
66 // Values for class_rw_t->flags
67 // These are not emitted by the compiler and are never used in class_ro_t.
68 // Their presence should be considered in future ABI versions.
69 // class_t->data is class_rw_t, not class_ro_t
70 #define RW_REALIZED (1<<31)
71 // class is unresolved future class
72 #define RW_FUTURE (1<<30)
73 // class is initialized
74 #define RW_INITIALIZED (1<<29)
75 // class is initializing
76 #define RW_INITIALIZING (1<<28)
77 // class_rw_t->ro is heap copy of class_ro_t
78 #define RW_COPIED_RO (1<<27)
79 // class allocated but not yet registered
80 #define RW_CONSTRUCTING (1<<26)
81 // class allocated and registered
82 #define RW_CONSTRUCTED (1<<25)
83 // available for use; was RW_FINALIZE_ON_MAIN_THREAD
84 // #define RW_24 (1<<24)
85 // class +load has been called
86 #define RW_LOADED (1<<23)
87 #if !SUPPORT_NONPOINTER_ISA
88 // class instances may have associative references
89 #define RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS (1<<22)
91 // class has instance-specific GC layout
92 #define RW_HAS_INSTANCE_SPECIFIC_LAYOUT (1 << 21)
93 // class does not allow associated objects on its instances
94 #define RW_FORBIDS_ASSOCIATED_OBJECTS (1<<20)
95 // class has started realizing but not yet completed it
96 #define RW_REALIZING (1<<19)
98 #if CONFIG_USE_PREOPT_CACHES
99 // this class and its descendants can't have preopt caches with inlined sels
100 #define RW_NOPREOPT_SELS (1<<2)
101 // this class and its descendants can't have preopt caches
102 #define RW_NOPREOPT_CACHE (1<<1)
105 // class is a metaclass (copied from ro)
106 #define RW_META RO_META // (1<<0)
109 // NOTE: MORE RW_ FLAGS DEFINED BELOW
111 // Values for class_rw_t->flags (RW_*), cache_t->_flags (FAST_CACHE_*),
112 // or class_t->bits (FAST_*).
114 // FAST_* and FAST_CACHE_* are stored on the class, reducing pointer indirection.
118 // class is a Swift class from the pre-stable Swift ABI
119 #define FAST_IS_SWIFT_LEGACY (1UL<<0)
120 // class is a Swift class from the stable Swift ABI
121 #define FAST_IS_SWIFT_STABLE (1UL<<1)
122 // class or superclass has default retain/release/autorelease/retainCount/
123 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
124 #define FAST_HAS_DEFAULT_RR (1UL<<2)
126 #define FAST_DATA_MASK 0x00007ffffffffff8UL
129 // class or superclass has .cxx_construct/.cxx_destruct implementation
130 // FAST_CACHE_HAS_CXX_DTOR is the first bit so that setting it in
131 // isa_t::has_cxx_dtor is a single bfi
132 #define FAST_CACHE_HAS_CXX_DTOR (1<<0)
133 #define FAST_CACHE_HAS_CXX_CTOR (1<<1)
134 // Denormalized RO_META to avoid an indirection
135 #define FAST_CACHE_META (1<<2)
137 // Denormalized RO_META to avoid an indirection
138 #define FAST_CACHE_META (1<<0)
139 // class or superclass has .cxx_construct/.cxx_destruct implementation
140 // FAST_CACHE_HAS_CXX_DTOR is chosen to alias with isa_t::has_cxx_dtor
141 #define FAST_CACHE_HAS_CXX_CTOR (1<<1)
142 #define FAST_CACHE_HAS_CXX_DTOR (1<<2)
145 // Fast Alloc fields:
146 // This stores the word-aligned size of instances + "ALLOC_DELTA16",
147 // or 0 if the instance size doesn't fit.
149 // These bits occupy the same bits than in the instance size, so that
150 // the size can be extracted with a simple mask operation.
152 // FAST_CACHE_ALLOC_MASK16 allows to extract the instance size rounded
153 // rounded up to the next 16 byte boundary, which is a fastpath for
154 // _objc_rootAllocWithZone()
155 #define FAST_CACHE_ALLOC_MASK 0x1ff8
156 #define FAST_CACHE_ALLOC_MASK16 0x1ff0
157 #define FAST_CACHE_ALLOC_DELTA16 0x0008
159 // class's instances requires raw isa
160 #define FAST_CACHE_REQUIRES_RAW_ISA (1<<13)
161 // class or superclass has default alloc/allocWithZone: implementation
162 // Note this is is stored in the metaclass.
163 #define FAST_CACHE_HAS_DEFAULT_AWZ (1<<14)
164 // class or superclass has default new/self/class/respondsToSelector/isKindOfClass
165 #define FAST_CACHE_HAS_DEFAULT_CORE (1<<15)
169 // class or superclass has .cxx_construct implementation
170 #define RW_HAS_CXX_CTOR (1<<18)
171 // class or superclass has .cxx_destruct implementation
172 #define RW_HAS_CXX_DTOR (1<<17)
173 // class or superclass has default alloc/allocWithZone: implementation
174 // Note this is is stored in the metaclass.
175 #define RW_HAS_DEFAULT_AWZ (1<<16)
176 // class's instances requires raw isa
177 #if SUPPORT_NONPOINTER_ISA
178 #define RW_REQUIRES_RAW_ISA (1<<15)
180 // class or superclass has default retain/release/autorelease/retainCount/
181 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
182 #define RW_HAS_DEFAULT_RR (1<<14)
183 // class or superclass has default new/self/class/respondsToSelector/isKindOfClass
184 #define RW_HAS_DEFAULT_CORE (1<<13)
186 // class is a Swift class from the pre-stable Swift ABI
187 #define FAST_IS_SWIFT_LEGACY (1UL<<0)
188 // class is a Swift class from the stable Swift ABI
189 #define FAST_IS_SWIFT_STABLE (1UL<<1)
191 #define FAST_DATA_MASK 0xfffffffcUL
195 // The Swift ABI requires that these bits be defined like this on all platforms.
196 static_assert(FAST_IS_SWIFT_LEGACY
== 1, "resistance is futile");
197 static_assert(FAST_IS_SWIFT_STABLE
== 2, "resistance is futile");
201 typedef uint32_t mask_t
; // x86_64 & arm64 asm are less efficient with 16-bits
203 typedef uint16_t mask_t
;
205 typedef uintptr_t SEL
;
207 struct swift_class_t
;
209 enum Atomicity
{ Atomic
= true, NotAtomic
= false };
210 enum IMPEncoding
{ Encoded
= true, Raw
= false };
214 // IMP-first is better for arm64e ptrauth and no worse for arm64.
215 // SEL-first is better for armv7* and i386 and x86_64.
217 explicit_atomic
<uintptr_t> _imp
;
218 explicit_atomic
<SEL
> _sel
;
220 explicit_atomic
<SEL
> _sel
;
221 explicit_atomic
<uintptr_t> _imp
;
224 // Compute the ptrauth signing modifier from &_imp, newSel, and cls.
225 uintptr_t modifierForSEL(bucket_t
*base
, SEL newSel
, Class cls
) const {
226 return (uintptr_t)base
^ (uintptr_t)newSel
^ (uintptr_t)cls
;
229 // Sign newImp, with &_imp, newSel, and cls as modifiers.
230 uintptr_t encodeImp(UNUSED_WITHOUT_PTRAUTH bucket_t
*base
, IMP newImp
, UNUSED_WITHOUT_PTRAUTH SEL newSel
, Class cls
) const {
231 if (!newImp
) return 0;
232 #if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_PTRAUTH
234 ptrauth_auth_and_resign(newImp
,
235 ptrauth_key_function_pointer
, 0,
236 ptrauth_key_process_dependent_code
,
237 modifierForSEL(base
, newSel
, cls
));
238 #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR
239 return (uintptr_t)newImp
^ (uintptr_t)cls
;
240 #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_NONE
241 return (uintptr_t)newImp
;
243 #error Unknown method cache IMP encoding.
248 static inline size_t offsetOfSel() { return offsetof(bucket_t
, _sel
); }
249 inline SEL
sel() const { return _sel
.load(memory_order_relaxed
); }
251 #if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR
252 #define MAYBE_UNUSED_ISA
254 #define MAYBE_UNUSED_ISA __attribute__((unused))
256 inline IMP
rawImp(MAYBE_UNUSED_ISA objc_class
*cls
) const {
257 uintptr_t imp
= _imp
.load(memory_order_relaxed
);
258 if (!imp
) return nil
;
259 #if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_PTRAUTH
260 #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR
261 imp
^= (uintptr_t)cls
;
262 #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_NONE
264 #error Unknown method cache IMP encoding.
269 inline IMP
imp(UNUSED_WITHOUT_PTRAUTH bucket_t
*base
, Class cls
) const {
270 uintptr_t imp
= _imp
.load(memory_order_relaxed
);
271 if (!imp
) return nil
;
272 #if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_PTRAUTH
273 SEL sel
= _sel
.load(memory_order_relaxed
);
275 ptrauth_auth_and_resign((const void *)imp
,
276 ptrauth_key_process_dependent_code
,
277 modifierForSEL(base
, sel
, cls
),
278 ptrauth_key_function_pointer
, 0);
279 #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR
280 return (IMP
)(imp
^ (uintptr_t)cls
);
281 #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_NONE
284 #error Unknown method cache IMP encoding.
288 template <Atomicity
, IMPEncoding
>
289 void set(bucket_t
*base
, SEL newSel
, IMP newImp
, Class cls
);
292 /* dyld_shared_cache_builder and obj-C agree on these definitions */
294 OBJC_OPT_METHODNAME_START
= 0,
295 OBJC_OPT_METHODNAME_END
= 1,
296 OBJC_OPT_INLINED_METHODS_START
= 2,
297 OBJC_OPT_INLINED_METHODS_END
= 3,
299 __OBJC_OPT_OFFSETS_COUNT
,
302 #if CONFIG_USE_PREOPT_CACHES
303 extern uintptr_t objc_opt_offsets
[__OBJC_OPT_OFFSETS_COUNT
];
306 /* dyld_shared_cache_builder and obj-C agree on these definitions */
307 struct preopt_cache_entry_t
{
312 /* dyld_shared_cache_builder and obj-C agree on these definitions */
313 struct preopt_cache_t
{
314 int32_t fallback_class_offset
;
320 uint16_t hash_params
;
322 uint16_t occupied
: 14;
323 uint16_t has_inlines
: 1;
324 uint16_t bit_one
: 1;
325 preopt_cache_entry_t entries
[];
327 inline int capacity() const {
333 // - the cached IMP when one is found
334 // - nil if there's no cached value and the cache is dynamic
335 // - `value_on_constant_cache_miss` if there's no cached value and the cache is preoptimized
336 extern "C" IMP
cache_getImp(Class cls
, SEL sel
, IMP value_on_constant_cache_miss
= nil
);
340 explicit_atomic
<uintptr_t> _bucketsAndMaybeMask
;
343 explicit_atomic
<mask_t
> _maybeMask
;
349 explicit_atomic
<preopt_cache_t
*> _originalPreoptCache
;
352 #if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_OUTLINED
353 // _bucketsAndMaybeMask is a buckets_t pointer
354 // _maybeMask is the buckets mask
356 static constexpr uintptr_t bucketsMask
= ~0ul;
357 static_assert(!CONFIG_USE_PREOPT_CACHES
, "preoptimized caches not supported");
358 #elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16_BIG_ADDRS
359 static constexpr uintptr_t maskShift
= 48;
360 static constexpr uintptr_t maxMask
= ((uintptr_t)1 << (64 - maskShift
)) - 1;
361 static constexpr uintptr_t bucketsMask
= ((uintptr_t)1 << maskShift
) - 1;
363 static_assert(bucketsMask
>= MACH_VM_MAX_ADDRESS
, "Bucket field doesn't have enough bits for arbitrary pointers.");
364 #if CONFIG_USE_PREOPT_CACHES
365 static constexpr uintptr_t preoptBucketsMarker
= 1ul;
366 static constexpr uintptr_t preoptBucketsMask
= bucketsMask
& ~preoptBucketsMarker
;
368 #elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16
369 // _bucketsAndMaybeMask is a buckets_t pointer in the low 48 bits
370 // _maybeMask is unused, the mask is stored in the top 16 bits.
372 // How much the mask is shifted by.
373 static constexpr uintptr_t maskShift
= 48;
375 // Additional bits after the mask which must be zero. msgSend
376 // takes advantage of these additional bits to construct the value
377 // `mask << 4` from `_maskAndBuckets` in a single instruction.
378 static constexpr uintptr_t maskZeroBits
= 4;
380 // The largest mask value we can store.
381 static constexpr uintptr_t maxMask
= ((uintptr_t)1 << (64 - maskShift
)) - 1;
383 // The mask applied to `_maskAndBuckets` to retrieve the buckets pointer.
384 static constexpr uintptr_t bucketsMask
= ((uintptr_t)1 << (maskShift
- maskZeroBits
)) - 1;
386 // Ensure we have enough bits for the buckets pointer.
387 static_assert(bucketsMask
>= MACH_VM_MAX_ADDRESS
,
388 "Bucket field doesn't have enough bits for arbitrary pointers.");
390 #if CONFIG_USE_PREOPT_CACHES
391 static constexpr uintptr_t preoptBucketsMarker
= 1ul;
392 #if __has_feature(ptrauth_calls)
393 // 63..60: hash_mask_shift
394 // 59..55: hash_shift
395 // 54.. 1: buckets ptr + auth
397 static constexpr uintptr_t preoptBucketsMask
= 0x007ffffffffffffe;
398 static inline uintptr_t preoptBucketsHashParams(const preopt_cache_t
*cache
) {
399 uintptr_t value
= (uintptr_t)cache
->shift
<< 55;
400 // masks have 11 bits but can be 0, so we compute
401 // the right shift for 0x7fff rather than 0xffff
402 return value
| ((objc::mask16ShiftBits(cache
->mask
) - 1) << 60);
406 // 52..48: hash_shift
407 // 47.. 1: buckets ptr
409 static constexpr uintptr_t preoptBucketsMask
= 0x0000fffffffffffe;
410 static inline uintptr_t preoptBucketsHashParams(const preopt_cache_t
*cache
) {
411 return (uintptr_t)cache
->hash_params
<< 48;
414 #endif // CONFIG_USE_PREOPT_CACHES
415 #elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4
416 // _bucketsAndMaybeMask is a buckets_t pointer in the top 28 bits
417 // _maybeMask is unused, the mask length is stored in the low 4 bits
419 static constexpr uintptr_t maskBits
= 4;
420 static constexpr uintptr_t maskMask
= (1 << maskBits
) - 1;
421 static constexpr uintptr_t bucketsMask
= ~maskMask
;
422 static_assert(!CONFIG_USE_PREOPT_CACHES
, "preoptimized caches not supported");
424 #error Unknown cache mask storage type.
427 bool isConstantEmptyCache() const;
428 bool canBeFreed() const;
431 #if CONFIG_USE_PREOPT_CACHES
432 void initializeToPreoptCacheInDisguise(const preopt_cache_t
*cache
);
433 const preopt_cache_t
*disguised_preopt_cache() const;
436 void incrementOccupied();
437 void setBucketsAndMask(struct bucket_t
*newBuckets
, mask_t newMask
);
439 void reallocate(mask_t oldCapacity
, mask_t newCapacity
, bool freeOld
);
440 void collect_free(bucket_t
*oldBuckets
, mask_t oldCapacity
);
442 static bucket_t
*emptyBuckets();
443 static bucket_t
*allocateBuckets(mask_t newCapacity
);
444 static bucket_t
*emptyBucketsForCapacity(mask_t capacity
, bool allocate
= true);
445 static struct bucket_t
* endMarker(struct bucket_t
*b
, uint32_t cap
);
446 void bad_cache(id receiver
, SEL sel
) __attribute__((noreturn
, cold
));
449 // The following four fields are public for objcdt's use only.
450 // objcdt reaches into fields while the process is suspended
451 // hence doesn't care for locks and pesky little details like this
452 // and can safely use these.
453 unsigned capacity() const;
454 struct bucket_t
*buckets() const;
457 #if CONFIG_USE_PREOPT_CACHES
458 const preopt_cache_t
*preopt_cache() const;
461 mask_t
occupied() const;
462 void initializeToEmpty();
464 #if CONFIG_USE_PREOPT_CACHES
465 bool isConstantOptimizedCache(bool strict
= false, uintptr_t empty_addr
= (uintptr_t)&_objc_empty_cache
) const;
466 bool shouldFlush(SEL sel
, IMP imp
) const;
467 bool isConstantOptimizedCacheWithInlinedSels() const;
468 Class
preoptFallbackClass() const;
469 void maybeConvertToPreoptimized();
470 void initializeToEmptyOrPreoptimizedInDisguise();
472 inline bool isConstantOptimizedCache(bool strict
= false, uintptr_t empty_addr
= 0) const { return false; }
473 inline bool shouldFlush(SEL sel
, IMP imp
) const {
474 return cache_getImp(cls(), sel
) == imp
;
476 inline bool isConstantOptimizedCacheWithInlinedSels() const { return false; }
477 inline void initializeToEmptyOrPreoptimizedInDisguise() { initializeToEmpty(); }
480 void insert(SEL sel
, IMP imp
, id receiver
);
481 void copyCacheNolock(objc_imp_cache_entry
*buffer
, int len
);
483 void eraseNolock(const char *func
);
486 static void collectNolock(bool collectALot
);
487 static size_t bytesForCapacity(uint32_t cap
);
490 bool getBit(uint16_t flags
) const {
491 return _flags
& flags
;
493 void setBit(uint16_t set
) {
494 __c11_atomic_fetch_or((_Atomic(uint16_t) *)&_flags
, set
, __ATOMIC_RELAXED
);
496 void clearBit(uint16_t clear
) {
497 __c11_atomic_fetch_and((_Atomic(uint16_t) *)&_flags
, ~clear
, __ATOMIC_RELAXED
);
501 #if FAST_CACHE_ALLOC_MASK
502 bool hasFastInstanceSize(size_t extra
) const
504 if (__builtin_constant_p(extra
) && extra
== 0) {
505 return _flags
& FAST_CACHE_ALLOC_MASK16
;
507 return _flags
& FAST_CACHE_ALLOC_MASK
;
510 size_t fastInstanceSize(size_t extra
) const
512 ASSERT(hasFastInstanceSize(extra
));
514 if (__builtin_constant_p(extra
) && extra
== 0) {
515 return _flags
& FAST_CACHE_ALLOC_MASK16
;
517 size_t size
= _flags
& FAST_CACHE_ALLOC_MASK
;
518 // remove the FAST_CACHE_ALLOC_DELTA16 that was added
519 // by setFastInstanceSize
520 return align16(size
+ extra
- FAST_CACHE_ALLOC_DELTA16
);
524 void setFastInstanceSize(size_t newSize
)
526 // Set during realization or construction only. No locking needed.
527 uint16_t newBits
= _flags
& ~FAST_CACHE_ALLOC_MASK
;
530 // Adding FAST_CACHE_ALLOC_DELTA16 allows for FAST_CACHE_ALLOC_MASK16
531 // to yield the proper 16byte aligned allocation size with a single mask
532 sizeBits
= word_align(newSize
) + FAST_CACHE_ALLOC_DELTA16
;
533 sizeBits
&= FAST_CACHE_ALLOC_MASK
;
534 if (newSize
<= sizeBits
) {
540 bool hasFastInstanceSize(size_t extra
) const {
543 size_t fastInstanceSize(size_t extra
) const {
546 void setFastInstanceSize(size_t extra
) {
553 // classref_t is unremapped class_t*
554 typedef struct classref
* classref_t
;
557 /***********************************************************************
559 * A pointer stored as an offset from the address of that offset.
561 * The target address is computed by taking the address of this struct
562 * and adding the offset stored within it. This is a 32-bit signed
563 * offset giving ±2GB of range.
564 **********************************************************************/
565 template <typename T
>
566 struct RelativePointer
: nocopy_t
{
572 uintptr_t base
= (uintptr_t)&offset
;
573 uintptr_t signExtendedOffset
= (uintptr_t)(intptr_t)offset
;
574 uintptr_t pointer
= base
+ signExtendedOffset
;
580 #ifdef __PTRAUTH_INTRINSICS__
581 # define StubClassInitializerPtrauth __ptrauth(ptrauth_key_function_pointer, 1, 0xc671)
583 # define StubClassInitializerPtrauth
585 struct stub_class_t
{
587 _objc_swiftMetadataInitializer StubClassInitializerPtrauth initializer
;
590 // A pointer modifier that does nothing to the pointer.
591 struct PointerModifierNop
{
592 template <typename ListType
, typename T
>
593 static T
*modify(__unused
const ListType
&list
, T
*ptr
) { return ptr
; }
596 /***********************************************************************
597 * entsize_list_tt<Element, List, FlagMask, PointerModifier>
598 * Generic implementation of an array of non-fragile structs.
600 * Element is the struct type (e.g. method_t)
601 * List is the specialization of entsize_list_tt (e.g. method_list_t)
602 * FlagMask is used to stash extra bits in the entsize field
603 * (e.g. method list fixup markers)
604 * PointerModifier is applied to the element pointers retrieved from
606 **********************************************************************/
607 template <typename Element
, typename List
, uint32_t FlagMask
, typename PointerModifier
= PointerModifierNop
>
608 struct entsize_list_tt
{
609 uint32_t entsizeAndFlags
;
612 uint32_t entsize() const {
613 return entsizeAndFlags
& ~FlagMask
;
615 uint32_t flags() const {
616 return entsizeAndFlags
& FlagMask
;
619 Element
& getOrEnd(uint32_t i
) const {
621 return *PointerModifier::modify(*this, (Element
*)((uint8_t *)this + sizeof(*this) + i
*entsize()));
623 Element
& get(uint32_t i
) const {
628 size_t byteSize() const {
629 return byteSize(entsize(), count
);
632 static size_t byteSize(uint32_t entsize
, uint32_t count
) {
633 return sizeof(entsize_list_tt
) + count
*entsize
;
637 const iterator
begin() const {
638 return iterator(*static_cast<const List
*>(this), 0);
641 return iterator(*static_cast<const List
*>(this), 0);
643 const iterator
end() const {
644 return iterator(*static_cast<const List
*>(this), count
);
647 return iterator(*static_cast<const List
*>(this), count
);
652 uint32_t index
; // keeping track of this saves a divide in operator-
655 typedef std::random_access_iterator_tag iterator_category
;
656 typedef Element value_type
;
657 typedef ptrdiff_t difference_type
;
658 typedef Element
* pointer
;
659 typedef Element
& reference
;
663 iterator(const List
& list
, uint32_t start
= 0)
664 : entsize(list
.entsize())
666 , element(&list
.getOrEnd(start
))
669 const iterator
& operator += (ptrdiff_t delta
) {
670 element
= (Element
*)((uint8_t *)element
+ delta
*entsize
);
671 index
+= (int32_t)delta
;
674 const iterator
& operator -= (ptrdiff_t delta
) {
675 element
= (Element
*)((uint8_t *)element
- delta
*entsize
);
676 index
-= (int32_t)delta
;
679 const iterator
operator + (ptrdiff_t delta
) const {
680 return iterator(*this) += delta
;
682 const iterator
operator - (ptrdiff_t delta
) const {
683 return iterator(*this) -= delta
;
686 iterator
& operator ++ () { *this += 1; return *this; }
687 iterator
& operator -- () { *this -= 1; return *this; }
688 iterator
operator ++ (int) {
689 iterator
result(*this); *this += 1; return result
;
691 iterator
operator -- (int) {
692 iterator
result(*this); *this -= 1; return result
;
695 ptrdiff_t operator - (const iterator
& rhs
) const {
696 return (ptrdiff_t)this->index
- (ptrdiff_t)rhs
.index
;
699 Element
& operator * () const { return *element
; }
700 Element
* operator -> () const { return element
; }
702 operator Element
& () const { return *element
; }
704 bool operator == (const iterator
& rhs
) const {
705 return this->element
== rhs
.element
;
707 bool operator != (const iterator
& rhs
) const {
708 return this->element
!= rhs
.element
;
711 bool operator < (const iterator
& rhs
) const {
712 return this->element
< rhs
.element
;
714 bool operator > (const iterator
& rhs
) const {
715 return this->element
> rhs
.element
;
722 // Let method_t::small use this from objc-private.h.
723 static inline bool inSharedCache(uintptr_t ptr
);
727 static const uint32_t smallMethodListFlag
= 0x80000000;
729 method_t(const method_t
&other
) = delete;
731 // The representation of a "big" method. This is the traditional
732 // representation of three pointers storing the selector, types
733 // and implementation.
741 bool isSmall() const {
742 return ((uintptr_t)this & 1) == 1;
745 // The representation of a "small" method. This stores three
746 // relative offsets to the name, types, and implementation.
748 // The name field either refers to a selector (in the shared
749 // cache) or a selref (everywhere else).
750 RelativePointer
<const void *> name
;
751 RelativePointer
<const char *> types
;
752 RelativePointer
<IMP
> imp
;
754 bool inSharedCache() const {
755 return (CONFIG_SHARED_CACHE_RELATIVE_DIRECT_SELECTORS
&&
756 objc::inSharedCache((uintptr_t)this));
760 small
&small() const {
762 return *(struct small
*)((uintptr_t)this & ~(uintptr_t)1);
765 IMP
remappedImp(bool needsLock
) const;
766 void remapImp(IMP imp
);
767 objc_method_description
*getSmallDescription() const;
770 static const auto bigSize
= sizeof(struct big
);
771 static const auto smallSize
= sizeof(struct small
);
773 // The pointer modifier used with method lists. When the method
774 // list contains small methods, set the bottom bit of the pointer.
775 // We use that bottom bit elsewhere to distinguish between big
776 // and small methods.
777 struct pointer_modifier
{
778 template <typename ListType
>
779 static method_t
*modify(const ListType
&list
, method_t
*ptr
) {
780 if (list
.flags() & smallMethodListFlag
)
781 return (method_t
*)((uintptr_t)ptr
| 1);
788 return *(struct big
*)this;
793 return (small().inSharedCache()
794 ? (SEL
)small().name
.get()
795 : *(SEL
*)small().name
.get());
800 const char *types() const {
801 return isSmall() ? small().types
.get() : big().types
;
803 IMP
imp(bool needsLock
) const {
805 IMP imp
= remappedImp(needsLock
);
807 imp
= ptrauth_sign_unauthenticated(small().imp
.get(),
808 ptrauth_key_function_pointer
, 0);
814 SEL
getSmallNameAsSEL() const {
815 ASSERT(small().inSharedCache());
816 return (SEL
)small().name
.get();
819 SEL
getSmallNameAsSELRef() const {
820 ASSERT(!small().inSharedCache());
821 return *(SEL
*)small().name
.get();
824 void setName(SEL name
) {
826 ASSERT(!small().inSharedCache());
827 *(SEL
*)small().name
.get() = name
;
833 void setImp(IMP imp
) {
841 objc_method_description
*getDescription() const {
842 return isSmall() ? getSmallDescription() : (struct objc_method_description
*)this;
845 struct SortBySELAddress
:
846 public std::binary_function
<const struct method_t::big
&,
847 const struct method_t::big
&, bool>
849 bool operator() (const struct method_t::big
& lhs
,
850 const struct method_t::big
& rhs
)
851 { return lhs
.name
< rhs
.name
; }
854 method_t
&operator=(const method_t
&other
) {
856 big().name
= other
.name();
857 big().types
= other
.types();
858 big().imp
= other
.imp(false);
865 // *offset was originally 64-bit on some x86_64 platforms.
866 // We read and write only 32 bits of it.
867 // Some metadata provides all 64 bits. This is harmless for unsigned
868 // little-endian values.
869 // Some code uses all 64 bits. class_addIvar() over-allocates the
870 // offset for their benefit.
875 // alignment is sometimes -1; use alignment() instead
876 uint32_t alignment_raw
;
879 uint32_t alignment() const {
880 if (alignment_raw
== ~(uint32_t)0) return 1U << WORD_SHIFT
;
881 return 1 << alignment_raw
;
887 const char *attributes
;
890 // Two bits of entsize are used for fixup markers.
891 // Reserve the top half of entsize for more flags. We never
892 // need entry sizes anywhere close to 64kB.
894 // Currently there is one flag defined: the small method list flag,
895 // method_t::smallMethodListFlag. Other flags are currently ignored.
896 // (NOTE: these bits are only ignored on runtimes that support small
897 // method lists. Older runtimes will treat them as part of the entry
899 struct method_list_t
: entsize_list_tt
<method_t
, method_list_t
, 0xffff0003, method_t::pointer_modifier
> {
900 bool isUniqued() const;
901 bool isFixedUp() const;
904 uint32_t indexOfMethod(const method_t
*meth
) const {
906 (uint32_t)(((uintptr_t)meth
- (uintptr_t)this) / entsize());
911 bool isSmallList() const {
912 return flags() & method_t::smallMethodListFlag
;
915 bool isExpectedSize() const {
917 return entsize() == method_t::smallSize
;
919 return entsize() == method_t::bigSize
;
922 method_list_t
*duplicate() const {
925 dup
= (method_list_t
*)calloc(byteSize(method_t::bigSize
, count
), 1);
926 dup
->entsizeAndFlags
= method_t::bigSize
;
928 dup
= (method_list_t
*)calloc(this->byteSize(), 1);
929 dup
->entsizeAndFlags
= this->entsizeAndFlags
;
931 dup
->count
= this->count
;
932 std::copy(begin(), end(), dup
->begin());
937 struct ivar_list_t
: entsize_list_tt
<ivar_t
, ivar_list_t
, 0> {
938 bool containsIvar(Ivar ivar
) const {
939 return (ivar
>= (Ivar
)&*begin() && ivar
< (Ivar
)&*end());
943 struct property_list_t
: entsize_list_tt
<property_t
, property_list_t
, 0> {
947 typedef uintptr_t protocol_ref_t
; // protocol_t *, but unremapped
949 // Values for protocol_t->flags
950 #define PROTOCOL_FIXED_UP_2 (1<<31) // must never be set by compiler
951 #define PROTOCOL_FIXED_UP_1 (1<<30) // must never be set by compiler
952 #define PROTOCOL_IS_CANONICAL (1<<29) // must never be set by compiler
953 // Bits 0..15 are reserved for Swift's use.
955 #define PROTOCOL_FIXED_UP_MASK (PROTOCOL_FIXED_UP_1 | PROTOCOL_FIXED_UP_2)
957 struct protocol_t
: objc_object
{
958 const char *mangledName
;
959 struct protocol_list_t
*protocols
;
960 method_list_t
*instanceMethods
;
961 method_list_t
*classMethods
;
962 method_list_t
*optionalInstanceMethods
;
963 method_list_t
*optionalClassMethods
;
964 property_list_t
*instanceProperties
;
965 uint32_t size
; // sizeof(protocol_t)
967 // Fields below this point are not always present on disk.
968 const char **_extendedMethodTypes
;
969 const char *_demangledName
;
970 property_list_t
*_classProperties
;
972 const char *demangledName();
974 const char *nameForLogging() {
975 return demangledName();
978 bool isFixedUp() const;
981 bool isCanonical() const;
982 void clearIsCanonical();
984 # define HAS_FIELD(f) ((uintptr_t)(&f) < ((uintptr_t)this + size))
986 bool hasExtendedMethodTypesField() const {
987 return HAS_FIELD(_extendedMethodTypes
);
989 bool hasDemangledNameField() const {
990 return HAS_FIELD(_demangledName
);
992 bool hasClassPropertiesField() const {
993 return HAS_FIELD(_classProperties
);
998 const char **extendedMethodTypes() const {
999 return hasExtendedMethodTypesField() ? _extendedMethodTypes
: nil
;
1002 property_list_t
*classProperties() const {
1003 return hasClassPropertiesField() ? _classProperties
: nil
;
1007 struct protocol_list_t
{
1008 // count is pointer-sized by accident.
1010 protocol_ref_t list
[0]; // variable-size
1012 size_t byteSize() const {
1013 return sizeof(*this) + count
*sizeof(list
[0]);
1016 protocol_list_t
*duplicate() const {
1017 return (protocol_list_t
*)memdup(this, this->byteSize());
1020 typedef protocol_ref_t
* iterator
;
1021 typedef const protocol_ref_t
* const_iterator
;
1023 const_iterator
begin() const {
1029 const_iterator
end() const {
1030 return list
+ count
;
1033 return list
+ count
;
1039 uint32_t instanceStart
;
1040 uint32_t instanceSize
;
1046 const uint8_t * ivarLayout
;
1050 explicit_atomic
<const char *> name
;
1051 // With ptrauth, this is signed if it points to a small list, but
1052 // may be unsigned if it points to a big list.
1053 void *baseMethodList
;
1054 protocol_list_t
* baseProtocols
;
1055 const ivar_list_t
* ivars
;
1057 const uint8_t * weakIvarLayout
;
1058 property_list_t
*baseProperties
;
1060 // This field exists only when RO_HAS_SWIFT_INITIALIZER is set.
1061 _objc_swiftMetadataInitializer __ptrauth_objc_method_list_imp _swiftMetadataInitializer_NEVER_USE
[0];
1063 _objc_swiftMetadataInitializer
swiftMetadataInitializer() const {
1064 if (flags
& RO_HAS_SWIFT_INITIALIZER
) {
1065 return _swiftMetadataInitializer_NEVER_USE
[0];
1071 const char *getName() const {
1072 return name
.load(std::memory_order_acquire
);
1075 static const uint16_t methodListPointerDiscriminator
= 0xC310;
1076 #if 0 // FIXME: enable this when we get a non-empty definition of __ptrauth_objc_method_list_pointer from ptrauth.h.
1077 static_assert(std::is_same
<
1078 void * __ptrauth_objc_method_list_pointer
*,
1079 void * __ptrauth(ptrauth_key_method_list_pointer
, 1, methodListPointerDiscriminator
) *>::value
,
1080 "Method list pointer signing discriminator must match ptrauth.h");
1083 method_list_t
*baseMethods() const {
1084 #if __has_feature(ptrauth_calls)
1085 method_list_t
*ptr
= ptrauth_strip((method_list_t
*)baseMethodList
, ptrauth_key_method_list_pointer
);
1089 // Don't auth if the class_ro and the method list are both in the shared cache.
1090 // This is secure since they'll be read-only, and this allows the shared cache
1091 // to cut down on the number of signed pointers it has.
1092 bool roInSharedCache
= objc::inSharedCache((uintptr_t)this);
1093 bool listInSharedCache
= objc::inSharedCache((uintptr_t)ptr
);
1094 if (roInSharedCache
&& listInSharedCache
)
1097 // Auth all other small lists.
1098 if (ptr
->isSmallList())
1099 ptr
= ptrauth_auth_data((method_list_t
*)baseMethodList
,
1100 ptrauth_key_method_list_pointer
,
1101 ptrauth_blend_discriminator(&baseMethodList
,
1102 methodListPointerDiscriminator
));
1105 return (method_list_t
*)baseMethodList
;
1109 uintptr_t baseMethodListPtrauthData() const {
1110 return ptrauth_blend_discriminator(&baseMethodList
,
1111 methodListPointerDiscriminator
);
1114 class_ro_t
*duplicate() const {
1115 bool hasSwiftInitializer
= flags
& RO_HAS_SWIFT_INITIALIZER
;
1117 size_t size
= sizeof(*this);
1118 if (hasSwiftInitializer
)
1119 size
+= sizeof(_swiftMetadataInitializer_NEVER_USE
[0]);
1121 class_ro_t
*ro
= (class_ro_t
*)memdup(this, size
);
1123 if (hasSwiftInitializer
)
1124 ro
->_swiftMetadataInitializer_NEVER_USE
[0] = this->_swiftMetadataInitializer_NEVER_USE
[0];
1126 #if __has_feature(ptrauth_calls)
1127 // Re-sign the method list pointer if it was signed.
1128 // NOTE: It is possible for a signed pointer to have a signature
1129 // that is all zeroes. This is indistinguishable from a raw pointer.
1130 // This code will treat such a pointer as signed and re-sign it. A
1131 // false positive is safe: method list pointers are either authed or
1132 // stripped, so if baseMethods() doesn't expect it to be signed, it
1133 // will ignore the signature.
1134 void *strippedBaseMethodList
= ptrauth_strip(baseMethodList
, ptrauth_key_method_list_pointer
);
1135 void *signedBaseMethodList
= ptrauth_sign_unauthenticated(strippedBaseMethodList
,
1136 ptrauth_key_method_list_pointer
,
1137 baseMethodListPtrauthData());
1138 if (baseMethodList
== signedBaseMethodList
) {
1139 ro
->baseMethodList
= ptrauth_auth_and_resign(baseMethodList
,
1140 ptrauth_key_method_list_pointer
,
1141 baseMethodListPtrauthData(),
1142 ptrauth_key_method_list_pointer
,
1143 ro
->baseMethodListPtrauthData());
1145 // Special case: a class_ro_t in the shared cache pointing to a
1146 // method list in the shared cache will not have a signed pointer,
1147 // but the duplicate will be expected to have a signed pointer since
1148 // it's not in the shared cache. Detect that and sign it.
1149 bool roInSharedCache
= objc::inSharedCache((uintptr_t)this);
1150 bool listInSharedCache
= objc::inSharedCache((uintptr_t)strippedBaseMethodList
);
1151 if (roInSharedCache
&& listInSharedCache
)
1152 ro
->baseMethodList
= ptrauth_sign_unauthenticated(strippedBaseMethodList
,
1153 ptrauth_key_method_list_pointer
,
1154 ro
->baseMethodListPtrauthData());
1161 Class
getNonMetaclass() const {
1162 ASSERT(flags
& RO_META
);
1163 return nonMetaclass
;
1166 const uint8_t *getIvarLayout() const {
1167 if (flags
& RO_META
)
1174 /***********************************************************************
1175 * list_array_tt<Element, List, Ptr>
1176 * Generic implementation for metadata that can be augmented by categories.
1178 * Element is the underlying metadata type (e.g. method_t)
1179 * List is the metadata's list type (e.g. method_list_t)
1180 * List is a template applied to Element to make Element*. Useful for
1181 * applying qualifiers to the pointer type.
1183 * A list_array_tt has one of three values:
1185 * - a pointer to a single list
1186 * - an array of pointers to lists
1188 * countLists/beginLists/endLists iterate the metadata lists
1189 * count/begin/end iterate the underlying metadata elements
1190 **********************************************************************/
1191 template <typename Element
, typename List
, template<typename
> class Ptr
>
1192 class list_array_tt
{
1197 static size_t byteSize(uint32_t count
) {
1198 return sizeof(array_t
) + count
*sizeof(lists
[0]);
1201 return byteSize(count
);
1207 const Ptr
<List
> *lists
;
1208 const Ptr
<List
> *listsEnd
;
1209 typename
List::iterator m
, mEnd
;
1212 iterator(const Ptr
<List
> *begin
, const Ptr
<List
> *end
)
1213 : lists(begin
), listsEnd(end
)
1216 m
= (*begin
)->begin();
1217 mEnd
= (*begin
)->end();
1221 const Element
& operator * () const {
1224 Element
& operator * () {
1228 bool operator != (const iterator
& rhs
) const {
1229 if (lists
!= rhs
.lists
) return true;
1230 if (lists
== listsEnd
) return false; // m is undefined
1231 if (m
!= rhs
.m
) return true;
1235 const iterator
& operator ++ () {
1239 ASSERT(lists
!= listsEnd
);
1241 if (lists
!= listsEnd
) {
1242 m
= (*lists
)->begin();
1243 mEnd
= (*lists
)->end();
1253 uintptr_t arrayAndFlag
;
1256 bool hasArray() const {
1257 return arrayAndFlag
& 1;
1260 array_t
*array() const {
1261 return (array_t
*)(arrayAndFlag
& ~1);
1264 void setArray(array_t
*array
) {
1265 arrayAndFlag
= (uintptr_t)array
| 1;
1269 for (auto cursor
= beginLists(), end
= endLists(); cursor
!= end
; cursor
++)
1274 list_array_tt() : list(nullptr) { }
1275 list_array_tt(List
*l
) : list(l
) { }
1276 list_array_tt(const list_array_tt
&other
) {
1280 list_array_tt
&operator =(const list_array_tt
&other
) {
1281 if (other
.hasArray()) {
1282 arrayAndFlag
= other
.arrayAndFlag
;
1289 uint32_t count() const {
1290 uint32_t result
= 0;
1291 for (auto lists
= beginLists(), end
= endLists();
1295 result
+= (*lists
)->count
;
1300 iterator
begin() const {
1301 return iterator(beginLists(), endLists());
1304 iterator
end() const {
1305 auto e
= endLists();
1306 return iterator(e
, e
);
1309 inline uint32_t countLists(const std::function
<const array_t
* (const array_t
*)> & peek
) const {
1311 return peek(array())->count
;
1319 uint32_t countLists() {
1320 return countLists([](array_t
*x
) { return x
; });
1323 const Ptr
<List
>* beginLists() const {
1325 return array()->lists
;
1331 const Ptr
<List
>* endLists() const {
1333 return array()->lists
+ array()->count
;
1341 void attachLists(List
* const * addedLists
, uint32_t addedCount
) {
1342 if (addedCount
== 0) return;
1345 // many lists -> many lists
1346 uint32_t oldCount
= array()->count
;
1347 uint32_t newCount
= oldCount
+ addedCount
;
1348 array_t
*newArray
= (array_t
*)malloc(array_t::byteSize(newCount
));
1349 newArray
->count
= newCount
;
1350 array()->count
= newCount
;
1352 for (int i
= oldCount
- 1; i
>= 0; i
--)
1353 newArray
->lists
[i
+ addedCount
] = array()->lists
[i
];
1354 for (unsigned i
= 0; i
< addedCount
; i
++)
1355 newArray
->lists
[i
] = addedLists
[i
];
1360 else if (!list
&& addedCount
== 1) {
1361 // 0 lists -> 1 list
1362 list
= addedLists
[0];
1366 // 1 list -> many lists
1367 Ptr
<List
> oldList
= list
;
1368 uint32_t oldCount
= oldList
? 1 : 0;
1369 uint32_t newCount
= oldCount
+ addedCount
;
1370 setArray((array_t
*)malloc(array_t::byteSize(newCount
)));
1371 array()->count
= newCount
;
1372 if (oldList
) array()->lists
[addedCount
] = oldList
;
1373 for (unsigned i
= 0; i
< addedCount
; i
++)
1374 array()->lists
[i
] = addedLists
[i
];
1381 for (uint32_t i
= 0; i
< array()->count
; i
++) {
1382 try_free(array()->lists
[i
]);
1391 template<typename Other
>
1392 void duplicateInto(Other
&other
) {
1394 array_t
*a
= array();
1395 other
.setArray((array_t
*)memdup(a
, a
->byteSize()));
1396 for (uint32_t i
= 0; i
< a
->count
; i
++) {
1397 other
.array()->lists
[i
] = a
->lists
[i
]->duplicate();
1400 other
.list
= list
->duplicate();
1408 DECLARE_AUTHED_PTR_TEMPLATE(method_list_t
)
1410 class method_array_t
:
1411 public list_array_tt
<method_t
, method_list_t
, method_list_t_authed_ptr
>
1413 typedef list_array_tt
<method_t
, method_list_t
, method_list_t_authed_ptr
> Super
;
1416 method_array_t() : Super() { }
1417 method_array_t(method_list_t
*l
) : Super(l
) { }
1419 const method_list_t_authed_ptr
<method_list_t
> *beginCategoryMethodLists() const {
1420 return beginLists();
1423 const method_list_t_authed_ptr
<method_list_t
> *endCategoryMethodLists(Class cls
) const;
1427 class property_array_t
:
1428 public list_array_tt
<property_t
, property_list_t
, RawPtr
>
1430 typedef list_array_tt
<property_t
, property_list_t
, RawPtr
> Super
;
1433 property_array_t() : Super() { }
1434 property_array_t(property_list_t
*l
) : Super(l
) { }
1438 class protocol_array_t
:
1439 public list_array_tt
<protocol_ref_t
, protocol_list_t
, RawPtr
>
1441 typedef list_array_tt
<protocol_ref_t
, protocol_list_t
, RawPtr
> Super
;
1444 protocol_array_t() : Super() { }
1445 protocol_array_t(protocol_list_t
*l
) : Super(l
) { }
1448 struct class_rw_ext_t
{
1449 DECLARE_AUTHED_PTR_TEMPLATE(class_ro_t
)
1450 class_ro_t_authed_ptr
<const class_ro_t
> ro
;
1451 method_array_t methods
;
1452 property_array_t properties
;
1453 protocol_array_t protocols
;
1454 char *demangledName
;
1459 // Be warned that Symbolication knows the layout of this structure.
1462 #if SUPPORT_INDEXED_ISA
1466 explicit_atomic
<uintptr_t> ro_or_rw_ext
;
1468 Class firstSubclass
;
1469 Class nextSiblingClass
;
1472 using ro_or_rw_ext_t
= objc::PointerUnion
<const class_ro_t
, class_rw_ext_t
, PTRAUTH_STR("class_ro_t"), PTRAUTH_STR("class_rw_ext_t")>;
1474 const ro_or_rw_ext_t
get_ro_or_rwe() const {
1475 return ro_or_rw_ext_t
{ro_or_rw_ext
};
1478 void set_ro_or_rwe(const class_ro_t
*ro
) {
1479 ro_or_rw_ext_t
{ro
, &ro_or_rw_ext
}.storeAt(ro_or_rw_ext
, memory_order_relaxed
);
1482 void set_ro_or_rwe(class_rw_ext_t
*rwe
, const class_ro_t
*ro
) {
1483 // the release barrier is so that the class_rw_ext_t::ro initialization
1484 // is visible to lockless readers
1486 ro_or_rw_ext_t
{rwe
, &ro_or_rw_ext
}.storeAt(ro_or_rw_ext
, memory_order_release
);
1489 class_rw_ext_t
*extAlloc(const class_ro_t
*ro
, bool deep
= false);
1492 void setFlags(uint32_t set
)
1494 __c11_atomic_fetch_or((_Atomic(uint32_t) *)&flags
, set
, __ATOMIC_RELAXED
);
1497 void clearFlags(uint32_t clear
)
1499 __c11_atomic_fetch_and((_Atomic(uint32_t) *)&flags
, ~clear
, __ATOMIC_RELAXED
);
1502 // set and clear must not overlap
1503 void changeFlags(uint32_t set
, uint32_t clear
)
1505 ASSERT((set
& clear
) == 0);
1507 uint32_t oldf
, newf
;
1510 newf
= (oldf
| set
) & ~clear
;
1511 } while (!OSAtomicCompareAndSwap32Barrier(oldf
, newf
, (volatile int32_t *)&flags
));
1514 class_rw_ext_t
*ext() const {
1515 return get_ro_or_rwe().dyn_cast
<class_rw_ext_t
*>(&ro_or_rw_ext
);
1518 class_rw_ext_t
*extAllocIfNeeded() {
1519 auto v
= get_ro_or_rwe();
1520 if (fastpath(v
.is
<class_rw_ext_t
*>())) {
1521 return v
.get
<class_rw_ext_t
*>(&ro_or_rw_ext
);
1523 return extAlloc(v
.get
<const class_ro_t
*>(&ro_or_rw_ext
));
1527 class_rw_ext_t
*deepCopy(const class_ro_t
*ro
) {
1528 return extAlloc(ro
, true);
1531 const class_ro_t
*ro() const {
1532 auto v
= get_ro_or_rwe();
1533 if (slowpath(v
.is
<class_rw_ext_t
*>())) {
1534 return v
.get
<class_rw_ext_t
*>(&ro_or_rw_ext
)->ro
;
1536 return v
.get
<const class_ro_t
*>(&ro_or_rw_ext
);
1539 void set_ro(const class_ro_t
*ro
) {
1540 auto v
= get_ro_or_rwe();
1541 if (v
.is
<class_rw_ext_t
*>()) {
1542 v
.get
<class_rw_ext_t
*>(&ro_or_rw_ext
)->ro
= ro
;
1548 const method_array_t
methods() const {
1549 auto v
= get_ro_or_rwe();
1550 if (v
.is
<class_rw_ext_t
*>()) {
1551 return v
.get
<class_rw_ext_t
*>(&ro_or_rw_ext
)->methods
;
1553 return method_array_t
{v
.get
<const class_ro_t
*>(&ro_or_rw_ext
)->baseMethods()};
1557 const property_array_t
properties() const {
1558 auto v
= get_ro_or_rwe();
1559 if (v
.is
<class_rw_ext_t
*>()) {
1560 return v
.get
<class_rw_ext_t
*>(&ro_or_rw_ext
)->properties
;
1562 return property_array_t
{v
.get
<const class_ro_t
*>(&ro_or_rw_ext
)->baseProperties
};
1566 const protocol_array_t
protocols() const {
1567 auto v
= get_ro_or_rwe();
1568 if (v
.is
<class_rw_ext_t
*>()) {
1569 return v
.get
<class_rw_ext_t
*>(&ro_or_rw_ext
)->protocols
;
1571 return protocol_array_t
{v
.get
<const class_ro_t
*>(&ro_or_rw_ext
)->baseProtocols
};
1577 struct class_data_bits_t
{
1580 // Values are the FAST_ flags above.
1583 bool getBit(uintptr_t bit
) const
1588 // Atomically set the bits in `set` and clear the bits in `clear`.
1589 // set and clear must not overlap.
1590 void setAndClearBits(uintptr_t set
, uintptr_t clear
)
1592 ASSERT((set
& clear
) == 0);
1593 uintptr_t newBits
, oldBits
= LoadExclusive(&bits
);
1595 newBits
= (oldBits
| set
) & ~clear
;
1596 } while (slowpath(!StoreReleaseExclusive(&bits
, &oldBits
, newBits
)));
1599 void setBits(uintptr_t set
) {
1600 __c11_atomic_fetch_or((_Atomic(uintptr_t) *)&bits
, set
, __ATOMIC_RELAXED
);
1603 void clearBits(uintptr_t clear
) {
1604 __c11_atomic_fetch_and((_Atomic(uintptr_t) *)&bits
, ~clear
, __ATOMIC_RELAXED
);
1609 class_rw_t
* data() const {
1610 return (class_rw_t
*)(bits
& FAST_DATA_MASK
);
1612 void setData(class_rw_t
*newData
)
1614 ASSERT(!data() || (newData
->flags
& (RW_REALIZING
| RW_FUTURE
)));
1615 // Set during realization or construction only. No locking needed.
1616 // Use a store-release fence because there may be concurrent
1617 // readers of data and data's contents.
1618 uintptr_t newBits
= (bits
& ~FAST_DATA_MASK
) | (uintptr_t)newData
;
1619 atomic_thread_fence(memory_order_release
);
1623 // Get the class's ro data, even in the presence of concurrent realization.
1624 // fixme this isn't really safe without a compiler barrier at least
1625 // and probably a memory barrier when realizeClass changes the data field
1626 const class_ro_t
*safe_ro() const {
1627 class_rw_t
*maybe_rw
= data();
1628 if (maybe_rw
->flags
& RW_REALIZED
) {
1630 return maybe_rw
->ro();
1632 // maybe_rw is actually ro
1633 return (class_ro_t
*)maybe_rw
;
1637 #if SUPPORT_INDEXED_ISA
1638 void setClassArrayIndex(unsigned Idx
) {
1639 // 0 is unused as then we can rely on zero-initialisation from calloc.
1641 data()->index
= Idx
;
1644 void setClassArrayIndex(__unused
unsigned Idx
) {
1648 unsigned classArrayIndex() {
1649 #if SUPPORT_INDEXED_ISA
1650 return data()->index
;
1657 return isSwiftStable() || isSwiftLegacy();
1660 bool isSwiftStable() {
1661 return getBit(FAST_IS_SWIFT_STABLE
);
1663 void setIsSwiftStable() {
1664 setAndClearBits(FAST_IS_SWIFT_STABLE
, FAST_IS_SWIFT_LEGACY
);
1667 bool isSwiftLegacy() {
1668 return getBit(FAST_IS_SWIFT_LEGACY
);
1670 void setIsSwiftLegacy() {
1671 setAndClearBits(FAST_IS_SWIFT_LEGACY
, FAST_IS_SWIFT_STABLE
);
1674 // fixme remove this once the Swift runtime uses the stable bits
1675 bool isSwiftStable_ButAllowLegacyForNow() {
1676 return isAnySwift();
1679 _objc_swiftMetadataInitializer
swiftMetadataInitializer() {
1680 // This function is called on un-realized classes without
1681 // holding any locks.
1682 // Beware of races with other realizers.
1683 return safe_ro()->swiftMetadataInitializer();
1688 struct objc_class
: objc_object
{
1689 objc_class(const objc_class
&) = delete;
1690 objc_class(objc_class
&&) = delete;
1691 void operator=(const objc_class
&) = delete;
1692 void operator=(objc_class
&&) = delete;
1695 cache_t cache
; // formerly cache pointer and vtable
1696 class_data_bits_t bits
; // class_rw_t * plus custom rr/alloc flags
1698 Class
getSuperclass() const {
1699 #if __has_feature(ptrauth_calls)
1700 # if ISA_SIGNING_AUTH_MODE == ISA_SIGNING_AUTH
1701 if (superclass
== Nil
)
1704 #if SUPERCLASS_SIGNING_TREAT_UNSIGNED_AS_NIL
1705 void *stripped
= ptrauth_strip((void *)superclass
, ISA_SIGNING_KEY
);
1706 if ((void *)superclass
== stripped
) {
1707 void *resigned
= ptrauth_sign_unauthenticated(stripped
, ISA_SIGNING_KEY
, ptrauth_blend_discriminator(&superclass
, ISA_SIGNING_DISCRIMINATOR_CLASS_SUPERCLASS
));
1708 if ((void *)superclass
!= resigned
)
1713 void *result
= ptrauth_auth_data((void *)superclass
, ISA_SIGNING_KEY
, ptrauth_blend_discriminator(&superclass
, ISA_SIGNING_DISCRIMINATOR_CLASS_SUPERCLASS
));
1714 return (Class
)result
;
1717 return (Class
)ptrauth_strip((void *)superclass
, ISA_SIGNING_KEY
);
1724 void setSuperclass(Class newSuperclass
) {
1725 #if ISA_SIGNING_SIGN_MODE == ISA_SIGNING_SIGN_ALL
1726 superclass
= (Class
)ptrauth_sign_unauthenticated((void *)newSuperclass
, ISA_SIGNING_KEY
, ptrauth_blend_discriminator(&superclass
, ISA_SIGNING_DISCRIMINATOR_CLASS_SUPERCLASS
));
1728 superclass
= newSuperclass
;
1732 class_rw_t
*data() const {
1735 void setData(class_rw_t
*newData
) {
1736 bits
.setData(newData
);
1739 void setInfo(uint32_t set
) {
1740 ASSERT(isFuture() || isRealized());
1741 data()->setFlags(set
);
1744 void clearInfo(uint32_t clear
) {
1745 ASSERT(isFuture() || isRealized());
1746 data()->clearFlags(clear
);
1749 // set and clear must not overlap
1750 void changeInfo(uint32_t set
, uint32_t clear
) {
1751 ASSERT(isFuture() || isRealized());
1752 ASSERT((set
& clear
) == 0);
1753 data()->changeFlags(set
, clear
);
1756 #if FAST_HAS_DEFAULT_RR
1757 bool hasCustomRR() const {
1758 return !bits
.getBit(FAST_HAS_DEFAULT_RR
);
1760 void setHasDefaultRR() {
1761 bits
.setBits(FAST_HAS_DEFAULT_RR
);
1763 void setHasCustomRR() {
1764 bits
.clearBits(FAST_HAS_DEFAULT_RR
);
1767 bool hasCustomRR() const {
1768 return !(bits
.data()->flags
& RW_HAS_DEFAULT_RR
);
1770 void setHasDefaultRR() {
1771 bits
.data()->setFlags(RW_HAS_DEFAULT_RR
);
1773 void setHasCustomRR() {
1774 bits
.data()->clearFlags(RW_HAS_DEFAULT_RR
);
1778 #if FAST_CACHE_HAS_DEFAULT_AWZ
1779 bool hasCustomAWZ() const {
1780 return !cache
.getBit(FAST_CACHE_HAS_DEFAULT_AWZ
);
1782 void setHasDefaultAWZ() {
1783 cache
.setBit(FAST_CACHE_HAS_DEFAULT_AWZ
);
1785 void setHasCustomAWZ() {
1786 cache
.clearBit(FAST_CACHE_HAS_DEFAULT_AWZ
);
1789 bool hasCustomAWZ() const {
1790 return !(bits
.data()->flags
& RW_HAS_DEFAULT_AWZ
);
1792 void setHasDefaultAWZ() {
1793 bits
.data()->setFlags(RW_HAS_DEFAULT_AWZ
);
1795 void setHasCustomAWZ() {
1796 bits
.data()->clearFlags(RW_HAS_DEFAULT_AWZ
);
1800 #if FAST_CACHE_HAS_DEFAULT_CORE
1801 bool hasCustomCore() const {
1802 return !cache
.getBit(FAST_CACHE_HAS_DEFAULT_CORE
);
1804 void setHasDefaultCore() {
1805 return cache
.setBit(FAST_CACHE_HAS_DEFAULT_CORE
);
1807 void setHasCustomCore() {
1808 return cache
.clearBit(FAST_CACHE_HAS_DEFAULT_CORE
);
1811 bool hasCustomCore() const {
1812 return !(bits
.data()->flags
& RW_HAS_DEFAULT_CORE
);
1814 void setHasDefaultCore() {
1815 bits
.data()->setFlags(RW_HAS_DEFAULT_CORE
);
1817 void setHasCustomCore() {
1818 bits
.data()->clearFlags(RW_HAS_DEFAULT_CORE
);
1822 #if FAST_CACHE_HAS_CXX_CTOR
1824 ASSERT(isRealized());
1825 return cache
.getBit(FAST_CACHE_HAS_CXX_CTOR
);
1827 void setHasCxxCtor() {
1828 cache
.setBit(FAST_CACHE_HAS_CXX_CTOR
);
1832 ASSERT(isRealized());
1833 return bits
.data()->flags
& RW_HAS_CXX_CTOR
;
1835 void setHasCxxCtor() {
1836 bits
.data()->setFlags(RW_HAS_CXX_CTOR
);
1840 #if FAST_CACHE_HAS_CXX_DTOR
1842 ASSERT(isRealized());
1843 return cache
.getBit(FAST_CACHE_HAS_CXX_DTOR
);
1845 void setHasCxxDtor() {
1846 cache
.setBit(FAST_CACHE_HAS_CXX_DTOR
);
1850 ASSERT(isRealized());
1851 return bits
.data()->flags
& RW_HAS_CXX_DTOR
;
1853 void setHasCxxDtor() {
1854 bits
.data()->setFlags(RW_HAS_CXX_DTOR
);
1858 #if FAST_CACHE_REQUIRES_RAW_ISA
1859 bool instancesRequireRawIsa() {
1860 return cache
.getBit(FAST_CACHE_REQUIRES_RAW_ISA
);
1862 void setInstancesRequireRawIsa() {
1863 cache
.setBit(FAST_CACHE_REQUIRES_RAW_ISA
);
1865 #elif SUPPORT_NONPOINTER_ISA
1866 bool instancesRequireRawIsa() {
1867 return bits
.data()->flags
& RW_REQUIRES_RAW_ISA
;
1869 void setInstancesRequireRawIsa() {
1870 bits
.data()->setFlags(RW_REQUIRES_RAW_ISA
);
1873 bool instancesRequireRawIsa() {
1876 void setInstancesRequireRawIsa() {
1880 void setInstancesRequireRawIsaRecursively(bool inherited
= false);
1881 void printInstancesRequireRawIsa(bool inherited
);
1883 #if CONFIG_USE_PREOPT_CACHES
1884 bool allowsPreoptCaches() const {
1885 return !(bits
.data()->flags
& RW_NOPREOPT_CACHE
);
1887 bool allowsPreoptInlinedSels() const {
1888 return !(bits
.data()->flags
& RW_NOPREOPT_SELS
);
1890 void setDisallowPreoptCaches() {
1891 bits
.data()->setFlags(RW_NOPREOPT_CACHE
| RW_NOPREOPT_SELS
);
1893 void setDisallowPreoptInlinedSels() {
1894 bits
.data()->setFlags(RW_NOPREOPT_SELS
);
1896 void setDisallowPreoptCachesRecursively(const char *why
);
1897 void setDisallowPreoptInlinedSelsRecursively(const char *why
);
1899 bool allowsPreoptCaches() const { return false; }
1900 bool allowsPreoptInlinedSels() const { return false; }
1901 void setDisallowPreoptCaches() { }
1902 void setDisallowPreoptInlinedSels() { }
1903 void setDisallowPreoptCachesRecursively(const char *why
) { }
1904 void setDisallowPreoptInlinedSelsRecursively(const char *why
) { }
1907 bool canAllocNonpointer() {
1908 ASSERT(!isFuture());
1909 return !instancesRequireRawIsa();
1912 bool isSwiftStable() {
1913 return bits
.isSwiftStable();
1916 bool isSwiftLegacy() {
1917 return bits
.isSwiftLegacy();
1921 return bits
.isAnySwift();
1924 bool isSwiftStable_ButAllowLegacyForNow() {
1925 return bits
.isSwiftStable_ButAllowLegacyForNow();
1928 uint32_t swiftClassFlags() {
1929 return *(uint32_t *)(&bits
+ 1);
1932 bool usesSwiftRefcounting() {
1933 if (!isSwiftStable()) return false;
1934 return bool(swiftClassFlags() & 2); //ClassFlags::UsesSwiftRefcounting
1937 bool canCallSwiftRR() {
1938 // !hasCustomCore() is being used as a proxy for isInitialized(). All
1939 // classes with Swift refcounting are !hasCustomCore() (unless there are
1940 // category or swizzling shenanigans), but that bit is not set until a
1941 // class is initialized. Checking isInitialized requires an extra
1942 // indirection that we want to avoid on RR fast paths.
1944 // In the unlikely event that someone causes a class with Swift
1945 // refcounting to be hasCustomCore(), we'll fall back to sending -retain
1946 // or -release, which is still correct.
1947 return !hasCustomCore() && usesSwiftRefcounting();
1950 bool isStubClass() const {
1951 uintptr_t isa
= (uintptr_t)isaBits();
1952 return 1 <= isa
&& isa
< 16;
1955 // Swift stable ABI built for old deployment targets looks weird.
1956 // The is-legacy bit is set for compatibility with old libobjc.
1957 // We are on a "new" deployment target so we need to rewrite that bit.
1958 // These stable-with-legacy-bit classes are distinguished from real
1959 // legacy classes using another bit in the Swift data
1960 // (ClassFlags::IsSwiftPreStableABI)
1962 bool isUnfixedBackwardDeployingStableSwift() {
1963 // Only classes marked as Swift legacy need apply.
1964 if (!bits
.isSwiftLegacy()) return false;
1966 // Check the true legacy vs stable distinguisher.
1967 // The low bit of Swift's ClassFlags is SET for true legacy
1968 // and UNSET for stable pretending to be legacy.
1969 bool isActuallySwiftLegacy
= bool(swiftClassFlags() & 1);
1970 return !isActuallySwiftLegacy
;
1973 void fixupBackwardDeployingStableSwift() {
1974 if (isUnfixedBackwardDeployingStableSwift()) {
1975 // Class really is stable Swift, pretending to be pre-stable.
1977 bits
.setIsSwiftStable();
1981 _objc_swiftMetadataInitializer
swiftMetadataInitializer() {
1982 return bits
.swiftMetadataInitializer();
1985 // Return YES if the class's ivars are managed by ARC,
1986 // or the class is MRC but has ARC-style weak ivars.
1987 bool hasAutomaticIvars() {
1988 return data()->ro()->flags
& (RO_IS_ARC
| RO_HAS_WEAK_WITHOUT_ARC
);
1991 // Return YES if the class's ivars are managed by ARC.
1993 return data()->ro()->flags
& RO_IS_ARC
;
1997 bool forbidsAssociatedObjects() {
1998 return (data()->flags
& RW_FORBIDS_ASSOCIATED_OBJECTS
);
2001 #if SUPPORT_NONPOINTER_ISA
2002 // Tracked in non-pointer isas; not tracked otherwise
2004 bool instancesHaveAssociatedObjects() {
2005 // this may be an unrealized future class in the CF-bridged case
2006 ASSERT(isFuture() || isRealized());
2007 return data()->flags
& RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS
;
2010 void setInstancesHaveAssociatedObjects() {
2011 // this may be an unrealized future class in the CF-bridged case
2012 ASSERT(isFuture() || isRealized());
2013 setInfo(RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS
);
2017 bool shouldGrowCache() {
2021 void setShouldGrowCache(bool) {
2022 // fixme good or bad for memory use?
2025 bool isInitializing() {
2026 return getMeta()->data()->flags
& RW_INITIALIZING
;
2029 void setInitializing() {
2030 ASSERT(!isMetaClass());
2031 ISA()->setInfo(RW_INITIALIZING
);
2034 bool isInitialized() {
2035 return getMeta()->data()->flags
& RW_INITIALIZED
;
2038 void setInitialized();
2041 ASSERT(isRealized());
2042 return true; // any class registered for +load is definitely loadable
2045 IMP
getLoadMethod();
2047 // Locking: To prevent concurrent realization, hold runtimeLock.
2048 bool isRealized() const {
2049 return !isStubClass() && (data()->flags
& RW_REALIZED
);
2052 // Returns true if this is an unrealized future class.
2053 // Locking: To prevent concurrent realization, hold runtimeLock.
2054 bool isFuture() const {
2057 return data()->flags
& RW_FUTURE
;
2060 bool isMetaClass() const {
2061 ASSERT_THIS_NOT_NULL
;
2062 ASSERT(isRealized());
2064 return cache
.getBit(FAST_CACHE_META
);
2066 return data()->flags
& RW_META
;
2070 // Like isMetaClass, but also valid on un-realized classes
2071 bool isMetaClassMaybeUnrealized() {
2072 static_assert(offsetof(class_rw_t
, flags
) == offsetof(class_ro_t
, flags
), "flags alias");
2073 static_assert(RO_META
== RW_META
, "flags alias");
2076 return data()->flags
& RW_META
;
2079 // NOT identical to this->ISA when this is a metaclass
2081 if (isMetaClassMaybeUnrealized()) return (Class
)this;
2082 else return this->ISA();
2085 bool isRootClass() {
2086 return getSuperclass() == nil
;
2088 bool isRootMetaclass() {
2089 return ISA() == (Class
)this;
2092 // If this class does not have a name already, we can ask Swift to construct one for us.
2093 const char *installMangledNameForLazilyNamedClass();
2095 // Get the class's mangled name, or NULL if the class has a lazy
2096 // name that hasn't been created yet.
2097 const char *nonlazyMangledName() const {
2098 return bits
.safe_ro()->getName();
2101 const char *mangledName() {
2102 // fixme can't assert locks here
2103 ASSERT_THIS_NOT_NULL
;
2105 const char *result
= nonlazyMangledName();
2108 // This class lazily instantiates its name. Emplace and
2110 result
= installMangledNameForLazilyNamedClass();
2116 const char *demangledName(bool needsLock
);
2117 const char *nameForLogging();
2119 // May be unaligned depending on class's ivars.
2120 uint32_t unalignedInstanceStart() const {
2121 ASSERT(isRealized());
2122 return data()->ro()->instanceStart
;
2125 // Class's instance start rounded up to a pointer-size boundary.
2126 // This is used for ARC layout bitmaps.
2127 uint32_t alignedInstanceStart() const {
2128 return word_align(unalignedInstanceStart());
2131 // May be unaligned depending on class's ivars.
2132 uint32_t unalignedInstanceSize() const {
2133 ASSERT(isRealized());
2134 return data()->ro()->instanceSize
;
2137 // Class's ivar size rounded up to a pointer-size boundary.
2138 uint32_t alignedInstanceSize() const {
2139 return word_align(unalignedInstanceSize());
2142 inline size_t instanceSize(size_t extraBytes
) const {
2143 if (fastpath(cache
.hasFastInstanceSize(extraBytes
))) {
2144 return cache
.fastInstanceSize(extraBytes
);
2147 size_t size
= alignedInstanceSize() + extraBytes
;
2148 // CF requires all objects be at least 16 bytes.
2149 if (size
< 16) size
= 16;
2153 void setInstanceSize(uint32_t newSize
) {
2154 ASSERT(isRealized());
2155 ASSERT(data()->flags
& RW_REALIZING
);
2156 auto ro
= data()->ro();
2157 if (newSize
!= ro
->instanceSize
) {
2158 ASSERT(data()->flags
& RW_COPIED_RO
);
2159 *const_cast<uint32_t *>(&ro
->instanceSize
) = newSize
;
2161 cache
.setFastInstanceSize(newSize
);
2164 void chooseClassArrayIndex();
2166 void setClassArrayIndex(unsigned Idx
) {
2167 bits
.setClassArrayIndex(Idx
);
2170 unsigned classArrayIndex() {
2171 return bits
.classArrayIndex();
2176 struct swift_class_t
: objc_class
{
2178 uint32_t instanceAddressOffset
;
2179 uint32_t instanceSize
;
2180 uint16_t instanceAlignMask
;
2184 uint32_t classAddressOffset
;
2188 void *baseAddress() {
2189 return (void *)((uint8_t *)this - classAddressOffset
);
2197 WrappedPtr
<method_list_t
, PtrauthStrip
> instanceMethods
;
2198 WrappedPtr
<method_list_t
, PtrauthStrip
> classMethods
;
2199 struct protocol_list_t
*protocols
;
2200 struct property_list_t
*instanceProperties
;
2201 // Fields below this point are not always present on disk.
2202 struct property_list_t
*_classProperties
;
2204 method_list_t
*methodsForMeta(bool isMeta
) {
2205 if (isMeta
) return classMethods
;
2206 else return instanceMethods
;
2209 property_list_t
*propertiesForMeta(bool isMeta
, struct header_info
*hi
);
2211 protocol_list_t
*protocolsForMeta(bool isMeta
) {
2212 if (isMeta
) return nullptr;
2213 else return protocols
;
2217 struct objc_super2
{
2219 Class current_class
;
2222 struct message_ref_t
{
2228 extern Method
protocol_getMethod(protocol_t
*p
, SEL sel
, bool isRequiredMethod
, bool isInstanceMethod
, bool recursive
);