]>
Commit | Line | Data |
---|---|---|
b3962a83 A |
1 | /* |
2 | * Copyright (c) 2005-2007 Apple Inc. All Rights Reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. Please obtain a copy of the License at | |
10 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
11 | * file. | |
12 | * | |
13 | * The Original Code and all software distributed under the License are | |
14 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
15 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
16 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
18 | * Please see the License for the specific language governing rights and | |
19 | * limitations under the License. | |
20 | * | |
21 | * @APPLE_LICENSE_HEADER_END@ | |
22 | */ | |
23 | ||
8972963c A |
24 | #ifndef _OBJC_RUNTIME_NEW_H |
25 | #define _OBJC_RUNTIME_NEW_H | |
26 | ||
f192a3e2 | 27 | #include "PointerUnion.h" |
34d5b5e8 | 28 | #include <type_traits> |
f192a3e2 | 29 | |
1807f628 A |
30 | // class_data_bits_t is the class_t->data field (class_rw_t pointer plus flags) |
31 | // The extra bits are optimized for the retain/release and alloc/dealloc paths. | |
32 | ||
33 | // Values for class_ro_t->flags | |
34 | // These are emitted by the compiler and are part of the ABI. | |
35 | // Note: See CGObjCNonFragileABIMac::BuildClassRoTInitializer in clang | |
36 | // class is a metaclass | |
37 | #define RO_META (1<<0) | |
38 | // class is a root class | |
39 | #define RO_ROOT (1<<1) | |
40 | // class has .cxx_construct/destruct implementations | |
41 | #define RO_HAS_CXX_STRUCTORS (1<<2) | |
42 | // class has +load implementation | |
43 | // #define RO_HAS_LOAD_METHOD (1<<3) | |
44 | // class has visibility=hidden set | |
45 | #define RO_HIDDEN (1<<4) | |
46 | // class has attribute(objc_exception): OBJC_EHTYPE_$_ThisClass is non-weak | |
47 | #define RO_EXCEPTION (1<<5) | |
48 | // class has ro field for Swift metadata initializer callback | |
49 | #define RO_HAS_SWIFT_INITIALIZER (1<<6) | |
50 | // class compiled with ARC | |
51 | #define RO_IS_ARC (1<<7) | |
52 | // class has .cxx_destruct but no .cxx_construct (with RO_HAS_CXX_STRUCTORS) | |
53 | #define RO_HAS_CXX_DTOR_ONLY (1<<8) | |
54 | // class is not ARC but has ARC-style weak ivar layout | |
55 | #define RO_HAS_WEAK_WITHOUT_ARC (1<<9) | |
56 | // class does not allow associated objects on instances | |
57 | #define RO_FORBIDS_ASSOCIATED_OBJECTS (1<<10) | |
58 | ||
59 | // class is in an unloadable bundle - must never be set by compiler | |
60 | #define RO_FROM_BUNDLE (1<<29) | |
61 | // class is unrealized future class - must never be set by compiler | |
62 | #define RO_FUTURE (1<<30) | |
63 | // class is realized - must never be set by compiler | |
64 | #define RO_REALIZED (1<<31) | |
65 | ||
66 | // Values for class_rw_t->flags | |
67 | // These are not emitted by the compiler and are never used in class_ro_t. | |
68 | // Their presence should be considered in future ABI versions. | |
69 | // class_t->data is class_rw_t, not class_ro_t | |
70 | #define RW_REALIZED (1<<31) | |
71 | // class is unresolved future class | |
72 | #define RW_FUTURE (1<<30) | |
73 | // class is initialized | |
74 | #define RW_INITIALIZED (1<<29) | |
75 | // class is initializing | |
76 | #define RW_INITIALIZING (1<<28) | |
77 | // class_rw_t->ro is heap copy of class_ro_t | |
78 | #define RW_COPIED_RO (1<<27) | |
79 | // class allocated but not yet registered | |
80 | #define RW_CONSTRUCTING (1<<26) | |
81 | // class allocated and registered | |
82 | #define RW_CONSTRUCTED (1<<25) | |
83 | // available for use; was RW_FINALIZE_ON_MAIN_THREAD | |
84 | // #define RW_24 (1<<24) | |
85 | // class +load has been called | |
86 | #define RW_LOADED (1<<23) | |
87 | #if !SUPPORT_NONPOINTER_ISA | |
88 | // class instances may have associative references | |
89 | #define RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS (1<<22) | |
90 | #endif | |
91 | // class has instance-specific GC layout | |
92 | #define RW_HAS_INSTANCE_SPECIFIC_LAYOUT (1 << 21) | |
93 | // class does not allow associated objects on its instances | |
94 | #define RW_FORBIDS_ASSOCIATED_OBJECTS (1<<20) | |
95 | // class has started realizing but not yet completed it | |
96 | #define RW_REALIZING (1<<19) | |
97 | ||
34d5b5e8 A |
98 | #if CONFIG_USE_PREOPT_CACHES |
99 | // this class and its descendants can't have preopt caches with inlined sels | |
100 | #define RW_NOPREOPT_SELS (1<<2) | |
101 | // this class and its descendants can't have preopt caches | |
102 | #define RW_NOPREOPT_CACHE (1<<1) | |
103 | #endif | |
104 | ||
f192a3e2 A |
105 | // class is a metaclass (copied from ro) |
106 | #define RW_META RO_META // (1<<0) | |
107 | ||
108 | ||
1807f628 A |
109 | // NOTE: MORE RW_ FLAGS DEFINED BELOW |
110 | ||
1807f628 A |
111 | // Values for class_rw_t->flags (RW_*), cache_t->_flags (FAST_CACHE_*), |
112 | // or class_t->bits (FAST_*). | |
113 | // | |
114 | // FAST_* and FAST_CACHE_* are stored on the class, reducing pointer indirection. | |
115 | ||
116 | #if __LP64__ | |
117 | ||
118 | // class is a Swift class from the pre-stable Swift ABI | |
119 | #define FAST_IS_SWIFT_LEGACY (1UL<<0) | |
120 | // class is a Swift class from the stable Swift ABI | |
121 | #define FAST_IS_SWIFT_STABLE (1UL<<1) | |
122 | // class or superclass has default retain/release/autorelease/retainCount/ | |
123 | // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference | |
124 | #define FAST_HAS_DEFAULT_RR (1UL<<2) | |
125 | // data pointer | |
126 | #define FAST_DATA_MASK 0x00007ffffffffff8UL | |
127 | ||
128 | #if __arm64__ | |
129 | // class or superclass has .cxx_construct/.cxx_destruct implementation | |
130 | // FAST_CACHE_HAS_CXX_DTOR is the first bit so that setting it in | |
131 | // isa_t::has_cxx_dtor is a single bfi | |
132 | #define FAST_CACHE_HAS_CXX_DTOR (1<<0) | |
133 | #define FAST_CACHE_HAS_CXX_CTOR (1<<1) | |
134 | // Denormalized RO_META to avoid an indirection | |
135 | #define FAST_CACHE_META (1<<2) | |
136 | #else | |
137 | // Denormalized RO_META to avoid an indirection | |
138 | #define FAST_CACHE_META (1<<0) | |
139 | // class or superclass has .cxx_construct/.cxx_destruct implementation | |
140 | // FAST_CACHE_HAS_CXX_DTOR is chosen to alias with isa_t::has_cxx_dtor | |
141 | #define FAST_CACHE_HAS_CXX_CTOR (1<<1) | |
142 | #define FAST_CACHE_HAS_CXX_DTOR (1<<2) | |
143 | #endif | |
144 | ||
145 | // Fast Alloc fields: | |
146 | // This stores the word-aligned size of instances + "ALLOC_DELTA16", | |
147 | // or 0 if the instance size doesn't fit. | |
148 | // | |
149 | // These bits occupy the same bits than in the instance size, so that | |
150 | // the size can be extracted with a simple mask operation. | |
151 | // | |
152 | // FAST_CACHE_ALLOC_MASK16 allows to extract the instance size rounded | |
153 | // rounded up to the next 16 byte boundary, which is a fastpath for | |
154 | // _objc_rootAllocWithZone() | |
155 | #define FAST_CACHE_ALLOC_MASK 0x1ff8 | |
156 | #define FAST_CACHE_ALLOC_MASK16 0x1ff0 | |
157 | #define FAST_CACHE_ALLOC_DELTA16 0x0008 | |
158 | ||
159 | // class's instances requires raw isa | |
160 | #define FAST_CACHE_REQUIRES_RAW_ISA (1<<13) | |
161 | // class or superclass has default alloc/allocWithZone: implementation | |
162 | // Note this is is stored in the metaclass. | |
163 | #define FAST_CACHE_HAS_DEFAULT_AWZ (1<<14) | |
164 | // class or superclass has default new/self/class/respondsToSelector/isKindOfClass | |
165 | #define FAST_CACHE_HAS_DEFAULT_CORE (1<<15) | |
166 | ||
167 | #else | |
168 | ||
169 | // class or superclass has .cxx_construct implementation | |
170 | #define RW_HAS_CXX_CTOR (1<<18) | |
171 | // class or superclass has .cxx_destruct implementation | |
172 | #define RW_HAS_CXX_DTOR (1<<17) | |
173 | // class or superclass has default alloc/allocWithZone: implementation | |
174 | // Note this is is stored in the metaclass. | |
175 | #define RW_HAS_DEFAULT_AWZ (1<<16) | |
176 | // class's instances requires raw isa | |
177 | #if SUPPORT_NONPOINTER_ISA | |
178 | #define RW_REQUIRES_RAW_ISA (1<<15) | |
179 | #endif | |
180 | // class or superclass has default retain/release/autorelease/retainCount/ | |
181 | // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference | |
182 | #define RW_HAS_DEFAULT_RR (1<<14) | |
183 | // class or superclass has default new/self/class/respondsToSelector/isKindOfClass | |
184 | #define RW_HAS_DEFAULT_CORE (1<<13) | |
185 | ||
186 | // class is a Swift class from the pre-stable Swift ABI | |
187 | #define FAST_IS_SWIFT_LEGACY (1UL<<0) | |
188 | // class is a Swift class from the stable Swift ABI | |
189 | #define FAST_IS_SWIFT_STABLE (1UL<<1) | |
190 | // data pointer | |
191 | #define FAST_DATA_MASK 0xfffffffcUL | |
192 | ||
193 | #endif // __LP64__ | |
194 | ||
195 | // The Swift ABI requires that these bits be defined like this on all platforms. | |
196 | static_assert(FAST_IS_SWIFT_LEGACY == 1, "resistance is futile"); | |
197 | static_assert(FAST_IS_SWIFT_STABLE == 2, "resistance is futile"); | |
198 | ||
199 | ||
7257e56c | 200 | #if __LP64__ |
8070259c | 201 | typedef uint32_t mask_t; // x86_64 & arm64 asm are less efficient with 16-bits |
7257e56c | 202 | #else |
8070259c | 203 | typedef uint16_t mask_t; |
7257e56c | 204 | #endif |
13ba007e | 205 | typedef uintptr_t SEL; |
7257e56c | 206 | |
8070259c | 207 | struct swift_class_t; |
7257e56c | 208 | |
13ba007e | 209 | enum Atomicity { Atomic = true, NotAtomic = false }; |
1807f628 | 210 | enum IMPEncoding { Encoded = true, Raw = false }; |
7257e56c | 211 | |
8070259c A |
212 | struct bucket_t { |
213 | private: | |
66799735 A |
214 | // IMP-first is better for arm64e ptrauth and no worse for arm64. |
215 | // SEL-first is better for armv7* and i386 and x86_64. | |
216 | #if __arm64__ | |
1807f628 A |
217 | explicit_atomic<uintptr_t> _imp; |
218 | explicit_atomic<SEL> _sel; | |
66799735 | 219 | #else |
1807f628 A |
220 | explicit_atomic<SEL> _sel; |
221 | explicit_atomic<uintptr_t> _imp; | |
66799735 | 222 | #endif |
8070259c | 223 | |
1807f628 | 224 | // Compute the ptrauth signing modifier from &_imp, newSel, and cls. |
34d5b5e8 A |
225 | uintptr_t modifierForSEL(bucket_t *base, SEL newSel, Class cls) const { |
226 | return (uintptr_t)base ^ (uintptr_t)newSel ^ (uintptr_t)cls; | |
13ba007e A |
227 | } |
228 | ||
1807f628 | 229 | // Sign newImp, with &_imp, newSel, and cls as modifiers. |
34d5b5e8 | 230 | uintptr_t encodeImp(UNUSED_WITHOUT_PTRAUTH bucket_t *base, IMP newImp, UNUSED_WITHOUT_PTRAUTH SEL newSel, Class cls) const { |
13ba007e | 231 | if (!newImp) return 0; |
1807f628 | 232 | #if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_PTRAUTH |
13ba007e A |
233 | return (uintptr_t) |
234 | ptrauth_auth_and_resign(newImp, | |
235 | ptrauth_key_function_pointer, 0, | |
236 | ptrauth_key_process_dependent_code, | |
34d5b5e8 | 237 | modifierForSEL(base, newSel, cls)); |
1807f628 A |
238 | #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR |
239 | return (uintptr_t)newImp ^ (uintptr_t)cls; | |
240 | #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_NONE | |
241 | return (uintptr_t)newImp; | |
242 | #else | |
243 | #error Unknown method cache IMP encoding. | |
244 | #endif | |
13ba007e A |
245 | } |
246 | ||
8070259c | 247 | public: |
34d5b5e8 A |
248 | static inline size_t offsetOfSel() { return offsetof(bucket_t, _sel); } |
249 | inline SEL sel() const { return _sel.load(memory_order_relaxed); } | |
13ba007e | 250 | |
34d5b5e8 A |
251 | #if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR |
252 | #define MAYBE_UNUSED_ISA | |
253 | #else | |
254 | #define MAYBE_UNUSED_ISA __attribute__((unused)) | |
255 | #endif | |
256 | inline IMP rawImp(MAYBE_UNUSED_ISA objc_class *cls) const { | |
257 | uintptr_t imp = _imp.load(memory_order_relaxed); | |
bc4fafce A |
258 | if (!imp) return nil; |
259 | #if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_PTRAUTH | |
260 | #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR | |
261 | imp ^= (uintptr_t)cls; | |
262 | #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_NONE | |
263 | #else | |
264 | #error Unknown method cache IMP encoding. | |
265 | #endif | |
266 | return (IMP)imp; | |
267 | } | |
268 | ||
34d5b5e8 A |
269 | inline IMP imp(UNUSED_WITHOUT_PTRAUTH bucket_t *base, Class cls) const { |
270 | uintptr_t imp = _imp.load(memory_order_relaxed); | |
1807f628 A |
271 | if (!imp) return nil; |
272 | #if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_PTRAUTH | |
34d5b5e8 | 273 | SEL sel = _sel.load(memory_order_relaxed); |
13ba007e | 274 | return (IMP) |
1807f628 | 275 | ptrauth_auth_and_resign((const void *)imp, |
13ba007e | 276 | ptrauth_key_process_dependent_code, |
34d5b5e8 | 277 | modifierForSEL(base, sel, cls), |
13ba007e | 278 | ptrauth_key_function_pointer, 0); |
1807f628 A |
279 | #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR |
280 | return (IMP)(imp ^ (uintptr_t)cls); | |
281 | #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_NONE | |
282 | return (IMP)imp; | |
283 | #else | |
284 | #error Unknown method cache IMP encoding. | |
285 | #endif | |
13ba007e | 286 | } |
7257e56c | 287 | |
1807f628 | 288 | template <Atomicity, IMPEncoding> |
34d5b5e8 | 289 | void set(bucket_t *base, SEL newSel, IMP newImp, Class cls); |
7257e56c A |
290 | }; |
291 | ||
34d5b5e8 A |
292 | /* dyld_shared_cache_builder and obj-C agree on these definitions */ |
293 | enum { | |
294 | OBJC_OPT_METHODNAME_START = 0, | |
295 | OBJC_OPT_METHODNAME_END = 1, | |
296 | OBJC_OPT_INLINED_METHODS_START = 2, | |
297 | OBJC_OPT_INLINED_METHODS_END = 3, | |
298 | ||
299 | __OBJC_OPT_OFFSETS_COUNT, | |
300 | }; | |
301 | ||
302 | #if CONFIG_USE_PREOPT_CACHES | |
303 | extern uintptr_t objc_opt_offsets[__OBJC_OPT_OFFSETS_COUNT]; | |
304 | #endif | |
305 | ||
306 | /* dyld_shared_cache_builder and obj-C agree on these definitions */ | |
307 | struct preopt_cache_entry_t { | |
308 | uint32_t sel_offs; | |
309 | uint32_t imp_offs; | |
310 | }; | |
311 | ||
312 | /* dyld_shared_cache_builder and obj-C agree on these definitions */ | |
313 | struct preopt_cache_t { | |
314 | int32_t fallback_class_offset; | |
315 | union { | |
316 | struct { | |
317 | uint16_t shift : 5; | |
318 | uint16_t mask : 11; | |
319 | }; | |
320 | uint16_t hash_params; | |
321 | }; | |
322 | uint16_t occupied : 14; | |
323 | uint16_t has_inlines : 1; | |
324 | uint16_t bit_one : 1; | |
325 | preopt_cache_entry_t entries[]; | |
326 | ||
327 | inline int capacity() const { | |
328 | return mask + 1; | |
329 | } | |
330 | }; | |
331 | ||
332 | // returns: | |
333 | // - the cached IMP when one is found | |
334 | // - nil if there's no cached value and the cache is dynamic | |
335 | // - `value_on_constant_cache_miss` if there's no cached value and the cache is preoptimized | |
336 | extern "C" IMP cache_getImp(Class cls, SEL sel, IMP value_on_constant_cache_miss = nil); | |
7257e56c | 337 | |
8070259c | 338 | struct cache_t { |
34d5b5e8 A |
339 | private: |
340 | explicit_atomic<uintptr_t> _bucketsAndMaybeMask; | |
341 | union { | |
342 | struct { | |
343 | explicit_atomic<mask_t> _maybeMask; | |
344 | #if __LP64__ | |
345 | uint16_t _flags; | |
346 | #endif | |
347 | uint16_t _occupied; | |
348 | }; | |
349 | explicit_atomic<preopt_cache_t *> _originalPreoptCache; | |
350 | }; | |
351 | ||
1807f628 | 352 | #if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_OUTLINED |
34d5b5e8 A |
353 | // _bucketsAndMaybeMask is a buckets_t pointer |
354 | // _maybeMask is the buckets mask | |
355 | ||
356 | static constexpr uintptr_t bucketsMask = ~0ul; | |
357 | static_assert(!CONFIG_USE_PREOPT_CACHES, "preoptimized caches not supported"); | |
358 | #elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16_BIG_ADDRS | |
359 | static constexpr uintptr_t maskShift = 48; | |
360 | static constexpr uintptr_t maxMask = ((uintptr_t)1 << (64 - maskShift)) - 1; | |
361 | static constexpr uintptr_t bucketsMask = ((uintptr_t)1 << maskShift) - 1; | |
1807f628 | 362 | |
34d5b5e8 A |
363 | static_assert(bucketsMask >= MACH_VM_MAX_ADDRESS, "Bucket field doesn't have enough bits for arbitrary pointers."); |
364 | #if CONFIG_USE_PREOPT_CACHES | |
365 | static constexpr uintptr_t preoptBucketsMarker = 1ul; | |
366 | static constexpr uintptr_t preoptBucketsMask = bucketsMask & ~preoptBucketsMarker; | |
367 | #endif | |
368 | #elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16 | |
369 | // _bucketsAndMaybeMask is a buckets_t pointer in the low 48 bits | |
370 | // _maybeMask is unused, the mask is stored in the top 16 bits. | |
371 | ||
1807f628 A |
372 | // How much the mask is shifted by. |
373 | static constexpr uintptr_t maskShift = 48; | |
34d5b5e8 | 374 | |
1807f628 A |
375 | // Additional bits after the mask which must be zero. msgSend |
376 | // takes advantage of these additional bits to construct the value | |
377 | // `mask << 4` from `_maskAndBuckets` in a single instruction. | |
378 | static constexpr uintptr_t maskZeroBits = 4; | |
34d5b5e8 | 379 | |
1807f628 A |
380 | // The largest mask value we can store. |
381 | static constexpr uintptr_t maxMask = ((uintptr_t)1 << (64 - maskShift)) - 1; | |
382 | ||
383 | // The mask applied to `_maskAndBuckets` to retrieve the buckets pointer. | |
384 | static constexpr uintptr_t bucketsMask = ((uintptr_t)1 << (maskShift - maskZeroBits)) - 1; | |
385 | ||
386 | // Ensure we have enough bits for the buckets pointer. | |
34d5b5e8 A |
387 | static_assert(bucketsMask >= MACH_VM_MAX_ADDRESS, |
388 | "Bucket field doesn't have enough bits for arbitrary pointers."); | |
389 | ||
390 | #if CONFIG_USE_PREOPT_CACHES | |
391 | static constexpr uintptr_t preoptBucketsMarker = 1ul; | |
392 | #if __has_feature(ptrauth_calls) | |
393 | // 63..60: hash_mask_shift | |
394 | // 59..55: hash_shift | |
395 | // 54.. 1: buckets ptr + auth | |
396 | // 0: always 1 | |
397 | static constexpr uintptr_t preoptBucketsMask = 0x007ffffffffffffe; | |
398 | static inline uintptr_t preoptBucketsHashParams(const preopt_cache_t *cache) { | |
399 | uintptr_t value = (uintptr_t)cache->shift << 55; | |
400 | // masks have 11 bits but can be 0, so we compute | |
401 | // the right shift for 0x7fff rather than 0xffff | |
402 | return value | ((objc::mask16ShiftBits(cache->mask) - 1) << 60); | |
403 | } | |
404 | #else | |
405 | // 63..53: hash_mask | |
406 | // 52..48: hash_shift | |
407 | // 47.. 1: buckets ptr | |
408 | // 0: always 1 | |
409 | static constexpr uintptr_t preoptBucketsMask = 0x0000fffffffffffe; | |
410 | static inline uintptr_t preoptBucketsHashParams(const preopt_cache_t *cache) { | |
411 | return (uintptr_t)cache->hash_params << 48; | |
412 | } | |
413 | #endif | |
414 | #endif // CONFIG_USE_PREOPT_CACHES | |
1807f628 | 415 | #elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4 |
34d5b5e8 A |
416 | // _bucketsAndMaybeMask is a buckets_t pointer in the top 28 bits |
417 | // _maybeMask is unused, the mask length is stored in the low 4 bits | |
1807f628 A |
418 | |
419 | static constexpr uintptr_t maskBits = 4; | |
420 | static constexpr uintptr_t maskMask = (1 << maskBits) - 1; | |
421 | static constexpr uintptr_t bucketsMask = ~maskMask; | |
34d5b5e8 | 422 | static_assert(!CONFIG_USE_PREOPT_CACHES, "preoptimized caches not supported"); |
1807f628 A |
423 | #else |
424 | #error Unknown cache mask storage type. | |
425 | #endif | |
34d5b5e8 A |
426 | |
427 | bool isConstantEmptyCache() const; | |
428 | bool canBeFreed() const; | |
429 | mask_t mask() const; | |
430 | ||
431 | #if CONFIG_USE_PREOPT_CACHES | |
432 | void initializeToPreoptCacheInDisguise(const preopt_cache_t *cache); | |
433 | const preopt_cache_t *disguised_preopt_cache() const; | |
1807f628 | 434 | #endif |
cd5f04f5 | 435 | |
8070259c A |
436 | void incrementOccupied(); |
437 | void setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask); | |
34d5b5e8 A |
438 | |
439 | void reallocate(mask_t oldCapacity, mask_t newCapacity, bool freeOld); | |
440 | void collect_free(bucket_t *oldBuckets, mask_t oldCapacity); | |
441 | ||
442 | static bucket_t *emptyBuckets(); | |
443 | static bucket_t *allocateBuckets(mask_t newCapacity); | |
444 | static bucket_t *emptyBucketsForCapacity(mask_t capacity, bool allocate = true); | |
445 | static struct bucket_t * endMarker(struct bucket_t *b, uint32_t cap); | |
446 | void bad_cache(id receiver, SEL sel) __attribute__((noreturn, cold)); | |
447 | ||
448 | public: | |
449 | // The following four fields are public for objcdt's use only. | |
450 | // objcdt reaches into fields while the process is suspended | |
451 | // hence doesn't care for locks and pesky little details like this | |
452 | // and can safely use these. | |
453 | unsigned capacity() const; | |
454 | struct bucket_t *buckets() const; | |
455 | Class cls() const; | |
456 | ||
457 | #if CONFIG_USE_PREOPT_CACHES | |
458 | const preopt_cache_t *preopt_cache() const; | |
459 | #endif | |
460 | ||
461 | mask_t occupied() const; | |
31875a97 | 462 | void initializeToEmpty(); |
cd5f04f5 | 463 | |
34d5b5e8 A |
464 | #if CONFIG_USE_PREOPT_CACHES |
465 | bool isConstantOptimizedCache(bool strict = false, uintptr_t empty_addr = (uintptr_t)&_objc_empty_cache) const; | |
466 | bool shouldFlush(SEL sel, IMP imp) const; | |
467 | bool isConstantOptimizedCacheWithInlinedSels() const; | |
468 | Class preoptFallbackClass() const; | |
469 | void maybeConvertToPreoptimized(); | |
470 | void initializeToEmptyOrPreoptimizedInDisguise(); | |
471 | #else | |
472 | inline bool isConstantOptimizedCache(bool strict = false, uintptr_t empty_addr = 0) const { return false; } | |
473 | inline bool shouldFlush(SEL sel, IMP imp) const { | |
474 | return cache_getImp(cls(), sel) == imp; | |
475 | } | |
476 | inline bool isConstantOptimizedCacheWithInlinedSels() const { return false; } | |
477 | inline void initializeToEmptyOrPreoptimizedInDisguise() { initializeToEmpty(); } | |
478 | #endif | |
479 | ||
480 | void insert(SEL sel, IMP imp, id receiver); | |
481 | void copyCacheNolock(objc_imp_cache_entry *buffer, int len); | |
482 | void destroy(); | |
483 | void eraseNolock(const char *func); | |
484 | ||
485 | static void init(); | |
486 | static void collectNolock(bool collectALot); | |
487 | static size_t bytesForCapacity(uint32_t cap); | |
8972963c | 488 | |
1807f628 A |
489 | #if __LP64__ |
490 | bool getBit(uint16_t flags) const { | |
491 | return _flags & flags; | |
492 | } | |
493 | void setBit(uint16_t set) { | |
494 | __c11_atomic_fetch_or((_Atomic(uint16_t) *)&_flags, set, __ATOMIC_RELAXED); | |
495 | } | |
496 | void clearBit(uint16_t clear) { | |
497 | __c11_atomic_fetch_and((_Atomic(uint16_t) *)&_flags, ~clear, __ATOMIC_RELAXED); | |
498 | } | |
499 | #endif | |
500 | ||
501 | #if FAST_CACHE_ALLOC_MASK | |
502 | bool hasFastInstanceSize(size_t extra) const | |
503 | { | |
504 | if (__builtin_constant_p(extra) && extra == 0) { | |
505 | return _flags & FAST_CACHE_ALLOC_MASK16; | |
506 | } | |
507 | return _flags & FAST_CACHE_ALLOC_MASK; | |
508 | } | |
509 | ||
510 | size_t fastInstanceSize(size_t extra) const | |
511 | { | |
512 | ASSERT(hasFastInstanceSize(extra)); | |
513 | ||
514 | if (__builtin_constant_p(extra) && extra == 0) { | |
515 | return _flags & FAST_CACHE_ALLOC_MASK16; | |
516 | } else { | |
517 | size_t size = _flags & FAST_CACHE_ALLOC_MASK; | |
518 | // remove the FAST_CACHE_ALLOC_DELTA16 that was added | |
519 | // by setFastInstanceSize | |
520 | return align16(size + extra - FAST_CACHE_ALLOC_DELTA16); | |
521 | } | |
522 | } | |
523 | ||
524 | void setFastInstanceSize(size_t newSize) | |
525 | { | |
526 | // Set during realization or construction only. No locking needed. | |
527 | uint16_t newBits = _flags & ~FAST_CACHE_ALLOC_MASK; | |
528 | uint16_t sizeBits; | |
529 | ||
530 | // Adding FAST_CACHE_ALLOC_DELTA16 allows for FAST_CACHE_ALLOC_MASK16 | |
531 | // to yield the proper 16byte aligned allocation size with a single mask | |
532 | sizeBits = word_align(newSize) + FAST_CACHE_ALLOC_DELTA16; | |
533 | sizeBits &= FAST_CACHE_ALLOC_MASK; | |
534 | if (newSize <= sizeBits) { | |
535 | newBits |= sizeBits; | |
536 | } | |
537 | _flags = newBits; | |
538 | } | |
539 | #else | |
540 | bool hasFastInstanceSize(size_t extra) const { | |
541 | return false; | |
542 | } | |
543 | size_t fastInstanceSize(size_t extra) const { | |
544 | abort(); | |
545 | } | |
546 | void setFastInstanceSize(size_t extra) { | |
547 | // nothing | |
548 | } | |
549 | #endif | |
8070259c | 550 | }; |
b3962a83 | 551 | |
cd5f04f5 A |
552 | |
553 | // classref_t is unremapped class_t* | |
554 | typedef struct classref * classref_t; | |
b3962a83 | 555 | |
1807f628 | 556 | |
bc4fafce A |
557 | /*********************************************************************** |
558 | * RelativePointer<T> | |
559 | * A pointer stored as an offset from the address of that offset. | |
560 | * | |
561 | * The target address is computed by taking the address of this struct | |
562 | * and adding the offset stored within it. This is a 32-bit signed | |
563 | * offset giving ±2GB of range. | |
564 | **********************************************************************/ | |
565 | template <typename T> | |
566 | struct RelativePointer: nocopy_t { | |
567 | int32_t offset; | |
568 | ||
569 | T get() const { | |
34d5b5e8 A |
570 | if (offset == 0) |
571 | return nullptr; | |
bc4fafce A |
572 | uintptr_t base = (uintptr_t)&offset; |
573 | uintptr_t signExtendedOffset = (uintptr_t)(intptr_t)offset; | |
574 | uintptr_t pointer = base + signExtendedOffset; | |
575 | return (T)pointer; | |
576 | } | |
577 | }; | |
578 | ||
579 | ||
1807f628 A |
580 | #ifdef __PTRAUTH_INTRINSICS__ |
581 | # define StubClassInitializerPtrauth __ptrauth(ptrauth_key_function_pointer, 1, 0xc671) | |
582 | #else | |
583 | # define StubClassInitializerPtrauth | |
584 | #endif | |
585 | struct stub_class_t { | |
586 | uintptr_t isa; | |
587 | _objc_swiftMetadataInitializer StubClassInitializerPtrauth initializer; | |
588 | }; | |
589 | ||
bc4fafce A |
590 | // A pointer modifier that does nothing to the pointer. |
591 | struct PointerModifierNop { | |
592 | template <typename ListType, typename T> | |
34d5b5e8 | 593 | static T *modify(__unused const ListType &list, T *ptr) { return ptr; } |
bc4fafce A |
594 | }; |
595 | ||
31875a97 | 596 | /*********************************************************************** |
bc4fafce | 597 | * entsize_list_tt<Element, List, FlagMask, PointerModifier> |
31875a97 A |
598 | * Generic implementation of an array of non-fragile structs. |
599 | * | |
600 | * Element is the struct type (e.g. method_t) | |
601 | * List is the specialization of entsize_list_tt (e.g. method_list_t) | |
602 | * FlagMask is used to stash extra bits in the entsize field | |
603 | * (e.g. method list fixup markers) | |
bc4fafce A |
604 | * PointerModifier is applied to the element pointers retrieved from |
605 | * the array. | |
31875a97 | 606 | **********************************************************************/ |
bc4fafce | 607 | template <typename Element, typename List, uint32_t FlagMask, typename PointerModifier = PointerModifierNop> |
31875a97 A |
608 | struct entsize_list_tt { |
609 | uint32_t entsizeAndFlags; | |
b3962a83 | 610 | uint32_t count; |
8972963c | 611 | |
31875a97 A |
612 | uint32_t entsize() const { |
613 | return entsizeAndFlags & ~FlagMask; | |
8972963c | 614 | } |
31875a97 A |
615 | uint32_t flags() const { |
616 | return entsizeAndFlags & FlagMask; | |
8972963c | 617 | } |
31875a97 A |
618 | |
619 | Element& getOrEnd(uint32_t i) const { | |
1807f628 | 620 | ASSERT(i <= count); |
bc4fafce | 621 | return *PointerModifier::modify(*this, (Element *)((uint8_t *)this + sizeof(*this) + i*entsize())); |
8972963c | 622 | } |
31875a97 | 623 | Element& get(uint32_t i) const { |
1807f628 | 624 | ASSERT(i < count); |
7257e56c A |
625 | return getOrEnd(i); |
626 | } | |
8972963c | 627 | |
31875a97 | 628 | size_t byteSize() const { |
66799735 A |
629 | return byteSize(entsize(), count); |
630 | } | |
631 | ||
632 | static size_t byteSize(uint32_t entsize, uint32_t count) { | |
bc4fafce | 633 | return sizeof(entsize_list_tt) + count*entsize; |
31875a97 A |
634 | } |
635 | ||
636 | struct iterator; | |
637 | const iterator begin() const { | |
638 | return iterator(*static_cast<const List*>(this), 0); | |
639 | } | |
640 | iterator begin() { | |
641 | return iterator(*static_cast<const List*>(this), 0); | |
642 | } | |
643 | const iterator end() const { | |
644 | return iterator(*static_cast<const List*>(this), count); | |
645 | } | |
646 | iterator end() { | |
647 | return iterator(*static_cast<const List*>(this), count); | |
648 | } | |
649 | ||
650 | struct iterator { | |
8972963c A |
651 | uint32_t entsize; |
652 | uint32_t index; // keeping track of this saves a divide in operator- | |
31875a97 | 653 | Element* element; |
8972963c A |
654 | |
655 | typedef std::random_access_iterator_tag iterator_category; | |
31875a97 | 656 | typedef Element value_type; |
8972963c | 657 | typedef ptrdiff_t difference_type; |
31875a97 A |
658 | typedef Element* pointer; |
659 | typedef Element& reference; | |
8972963c | 660 | |
31875a97 | 661 | iterator() { } |
8972963c | 662 | |
31875a97 A |
663 | iterator(const List& list, uint32_t start = 0) |
664 | : entsize(list.entsize()) | |
8972963c | 665 | , index(start) |
31875a97 | 666 | , element(&list.getOrEnd(start)) |
8972963c A |
667 | { } |
668 | ||
31875a97 A |
669 | const iterator& operator += (ptrdiff_t delta) { |
670 | element = (Element*)((uint8_t *)element + delta*entsize); | |
cd5f04f5 | 671 | index += (int32_t)delta; |
8972963c A |
672 | return *this; |
673 | } | |
31875a97 A |
674 | const iterator& operator -= (ptrdiff_t delta) { |
675 | element = (Element*)((uint8_t *)element - delta*entsize); | |
cd5f04f5 | 676 | index -= (int32_t)delta; |
8972963c A |
677 | return *this; |
678 | } | |
31875a97 A |
679 | const iterator operator + (ptrdiff_t delta) const { |
680 | return iterator(*this) += delta; | |
8972963c | 681 | } |
31875a97 A |
682 | const iterator operator - (ptrdiff_t delta) const { |
683 | return iterator(*this) -= delta; | |
8972963c A |
684 | } |
685 | ||
31875a97 A |
686 | iterator& operator ++ () { *this += 1; return *this; } |
687 | iterator& operator -- () { *this -= 1; return *this; } | |
688 | iterator operator ++ (int) { | |
689 | iterator result(*this); *this += 1; return result; | |
8972963c | 690 | } |
31875a97 A |
691 | iterator operator -- (int) { |
692 | iterator result(*this); *this -= 1; return result; | |
8972963c A |
693 | } |
694 | ||
31875a97 | 695 | ptrdiff_t operator - (const iterator& rhs) const { |
8972963c A |
696 | return (ptrdiff_t)this->index - (ptrdiff_t)rhs.index; |
697 | } | |
698 | ||
31875a97 A |
699 | Element& operator * () const { return *element; } |
700 | Element* operator -> () const { return element; } | |
8972963c | 701 | |
31875a97 | 702 | operator Element& () const { return *element; } |
8972963c | 703 | |
31875a97 A |
704 | bool operator == (const iterator& rhs) const { |
705 | return this->element == rhs.element; | |
8972963c | 706 | } |
31875a97 A |
707 | bool operator != (const iterator& rhs) const { |
708 | return this->element != rhs.element; | |
8972963c A |
709 | } |
710 | ||
31875a97 A |
711 | bool operator < (const iterator& rhs) const { |
712 | return this->element < rhs.element; | |
8972963c | 713 | } |
31875a97 A |
714 | bool operator > (const iterator& rhs) const { |
715 | return this->element > rhs.element; | |
8972963c A |
716 | } |
717 | }; | |
31875a97 | 718 | }; |
8972963c | 719 | |
8972963c | 720 | |
34d5b5e8 A |
721 | namespace objc { |
722 | // Let method_t::small use this from objc-private.h. | |
723 | static inline bool inSharedCache(uintptr_t ptr); | |
724 | } | |
725 | ||
31875a97 | 726 | struct method_t { |
bc4fafce A |
727 | static const uint32_t smallMethodListFlag = 0x80000000; |
728 | ||
729 | method_t(const method_t &other) = delete; | |
730 | ||
731 | // The representation of a "big" method. This is the traditional | |
732 | // representation of three pointers storing the selector, types | |
733 | // and implementation. | |
734 | struct big { | |
735 | SEL name; | |
736 | const char *types; | |
737 | MethodListIMP imp; | |
738 | }; | |
739 | ||
740 | private: | |
741 | bool isSmall() const { | |
742 | return ((uintptr_t)this & 1) == 1; | |
743 | } | |
744 | ||
745 | // The representation of a "small" method. This stores three | |
746 | // relative offsets to the name, types, and implementation. | |
747 | struct small { | |
34d5b5e8 A |
748 | // The name field either refers to a selector (in the shared |
749 | // cache) or a selref (everywhere else). | |
750 | RelativePointer<const void *> name; | |
bc4fafce A |
751 | RelativePointer<const char *> types; |
752 | RelativePointer<IMP> imp; | |
34d5b5e8 A |
753 | |
754 | bool inSharedCache() const { | |
755 | return (CONFIG_SHARED_CACHE_RELATIVE_DIRECT_SELECTORS && | |
756 | objc::inSharedCache((uintptr_t)this)); | |
757 | } | |
bc4fafce A |
758 | }; |
759 | ||
760 | small &small() const { | |
761 | ASSERT(isSmall()); | |
762 | return *(struct small *)((uintptr_t)this & ~(uintptr_t)1); | |
763 | } | |
764 | ||
765 | IMP remappedImp(bool needsLock) const; | |
766 | void remapImp(IMP imp); | |
767 | objc_method_description *getSmallDescription() const; | |
768 | ||
769 | public: | |
770 | static const auto bigSize = sizeof(struct big); | |
771 | static const auto smallSize = sizeof(struct small); | |
772 | ||
773 | // The pointer modifier used with method lists. When the method | |
774 | // list contains small methods, set the bottom bit of the pointer. | |
775 | // We use that bottom bit elsewhere to distinguish between big | |
776 | // and small methods. | |
777 | struct pointer_modifier { | |
778 | template <typename ListType> | |
779 | static method_t *modify(const ListType &list, method_t *ptr) { | |
780 | if (list.flags() & smallMethodListFlag) | |
781 | return (method_t *)((uintptr_t)ptr | 1); | |
782 | return ptr; | |
783 | } | |
784 | }; | |
785 | ||
786 | big &big() const { | |
787 | ASSERT(!isSmall()); | |
788 | return *(struct big *)this; | |
789 | } | |
790 | ||
34d5b5e8 A |
791 | SEL name() const { |
792 | if (isSmall()) { | |
793 | return (small().inSharedCache() | |
794 | ? (SEL)small().name.get() | |
795 | : *(SEL *)small().name.get()); | |
796 | } else { | |
797 | return big().name; | |
798 | } | |
bc4fafce A |
799 | } |
800 | const char *types() const { | |
801 | return isSmall() ? small().types.get() : big().types; | |
802 | } | |
803 | IMP imp(bool needsLock) const { | |
804 | if (isSmall()) { | |
805 | IMP imp = remappedImp(needsLock); | |
806 | if (!imp) | |
807 | imp = ptrauth_sign_unauthenticated(small().imp.get(), | |
808 | ptrauth_key_function_pointer, 0); | |
809 | return imp; | |
810 | } | |
811 | return big().imp; | |
812 | } | |
813 | ||
34d5b5e8 A |
814 | SEL getSmallNameAsSEL() const { |
815 | ASSERT(small().inSharedCache()); | |
816 | return (SEL)small().name.get(); | |
817 | } | |
818 | ||
819 | SEL getSmallNameAsSELRef() const { | |
820 | ASSERT(!small().inSharedCache()); | |
821 | return *(SEL *)small().name.get(); | |
822 | } | |
823 | ||
824 | void setName(SEL name) { | |
825 | if (isSmall()) { | |
826 | ASSERT(!small().inSharedCache()); | |
827 | *(SEL *)small().name.get() = name; | |
828 | } else { | |
829 | big().name = name; | |
830 | } | |
831 | } | |
832 | ||
bc4fafce A |
833 | void setImp(IMP imp) { |
834 | if (isSmall()) { | |
835 | remapImp(imp); | |
836 | } else { | |
837 | big().imp = imp; | |
838 | } | |
bc4fafce A |
839 | } |
840 | ||
841 | objc_method_description *getDescription() const { | |
842 | return isSmall() ? getSmallDescription() : (struct objc_method_description *)this; | |
843 | } | |
31875a97 A |
844 | |
845 | struct SortBySELAddress : | |
bc4fafce A |
846 | public std::binary_function<const struct method_t::big&, |
847 | const struct method_t::big&, bool> | |
31875a97 | 848 | { |
bc4fafce A |
849 | bool operator() (const struct method_t::big& lhs, |
850 | const struct method_t::big& rhs) | |
31875a97 A |
851 | { return lhs.name < rhs.name; } |
852 | }; | |
bc4fafce A |
853 | |
854 | method_t &operator=(const method_t &other) { | |
855 | ASSERT(!isSmall()); | |
856 | big().name = other.name(); | |
857 | big().types = other.types(); | |
858 | big().imp = other.imp(false); | |
859 | return *this; | |
860 | } | |
7257e56c | 861 | }; |
b3962a83 | 862 | |
7257e56c A |
863 | struct ivar_t { |
864 | #if __x86_64__ | |
865 | // *offset was originally 64-bit on some x86_64 platforms. | |
866 | // We read and write only 32 bits of it. | |
867 | // Some metadata provides all 64 bits. This is harmless for unsigned | |
868 | // little-endian values. | |
869 | // Some code uses all 64 bits. class_addIvar() over-allocates the | |
870 | // offset for their benefit. | |
871 | #endif | |
872 | int32_t *offset; | |
b3962a83 A |
873 | const char *name; |
874 | const char *type; | |
7257e56c A |
875 | // alignment is sometimes -1; use alignment() instead |
876 | uint32_t alignment_raw; | |
b3962a83 | 877 | uint32_t size; |
b3962a83 | 878 | |
31875a97 | 879 | uint32_t alignment() const { |
7257e56c A |
880 | if (alignment_raw == ~(uint32_t)0) return 1U << WORD_SHIFT; |
881 | return 1 << alignment_raw; | |
882 | } | |
883 | }; | |
884 | ||
7257e56c | 885 | struct property_t { |
8972963c A |
886 | const char *name; |
887 | const char *attributes; | |
7257e56c | 888 | }; |
8972963c | 889 | |
31875a97 | 890 | // Two bits of entsize are used for fixup markers. |
bc4fafce A |
891 | // Reserve the top half of entsize for more flags. We never |
892 | // need entry sizes anywhere close to 64kB. | |
893 | // | |
894 | // Currently there is one flag defined: the small method list flag, | |
895 | // method_t::smallMethodListFlag. Other flags are currently ignored. | |
896 | // (NOTE: these bits are only ignored on runtimes that support small | |
897 | // method lists. Older runtimes will treat them as part of the entry | |
898 | // size!) | |
899 | struct method_list_t : entsize_list_tt<method_t, method_list_t, 0xffff0003, method_t::pointer_modifier> { | |
1807f628 | 900 | bool isUniqued() const; |
31875a97 A |
901 | bool isFixedUp() const; |
902 | void setFixedUp(); | |
903 | ||
904 | uint32_t indexOfMethod(const method_t *meth) const { | |
905 | uint32_t i = | |
906 | (uint32_t)(((uintptr_t)meth - (uintptr_t)this) / entsize()); | |
1807f628 | 907 | ASSERT(i < count); |
31875a97 A |
908 | return i; |
909 | } | |
bc4fafce A |
910 | |
911 | bool isSmallList() const { | |
912 | return flags() & method_t::smallMethodListFlag; | |
913 | } | |
914 | ||
915 | bool isExpectedSize() const { | |
916 | if (isSmallList()) | |
917 | return entsize() == method_t::smallSize; | |
918 | else | |
919 | return entsize() == method_t::bigSize; | |
920 | } | |
921 | ||
922 | method_list_t *duplicate() const { | |
923 | method_list_t *dup; | |
924 | if (isSmallList()) { | |
925 | dup = (method_list_t *)calloc(byteSize(method_t::bigSize, count), 1); | |
926 | dup->entsizeAndFlags = method_t::bigSize; | |
927 | } else { | |
928 | dup = (method_list_t *)calloc(this->byteSize(), 1); | |
929 | dup->entsizeAndFlags = this->entsizeAndFlags; | |
930 | } | |
931 | dup->count = this->count; | |
932 | std::copy(begin(), end(), dup->begin()); | |
933 | return dup; | |
934 | } | |
31875a97 A |
935 | }; |
936 | ||
937 | struct ivar_list_t : entsize_list_tt<ivar_t, ivar_list_t, 0> { | |
c1e772c4 A |
938 | bool containsIvar(Ivar ivar) const { |
939 | return (ivar >= (Ivar)&*begin() && ivar < (Ivar)&*end()); | |
940 | } | |
7257e56c | 941 | }; |
8972963c | 942 | |
31875a97 A |
943 | struct property_list_t : entsize_list_tt<property_t, property_list_t, 0> { |
944 | }; | |
945 | ||
946 | ||
7af964d1 A |
947 | typedef uintptr_t protocol_ref_t; // protocol_t *, but unremapped |
948 | ||
31875a97 | 949 | // Values for protocol_t->flags |
1807f628 A |
950 | #define PROTOCOL_FIXED_UP_2 (1<<31) // must never be set by compiler |
951 | #define PROTOCOL_FIXED_UP_1 (1<<30) // must never be set by compiler | |
952 | #define PROTOCOL_IS_CANONICAL (1<<29) // must never be set by compiler | |
c1e772c4 | 953 | // Bits 0..15 are reserved for Swift's use. |
31875a97 A |
954 | |
955 | #define PROTOCOL_FIXED_UP_MASK (PROTOCOL_FIXED_UP_1 | PROTOCOL_FIXED_UP_2) | |
7257e56c A |
956 | |
957 | struct protocol_t : objc_object { | |
8070259c | 958 | const char *mangledName; |
b3962a83 A |
959 | struct protocol_list_t *protocols; |
960 | method_list_t *instanceMethods; | |
961 | method_list_t *classMethods; | |
962 | method_list_t *optionalInstanceMethods; | |
963 | method_list_t *optionalClassMethods; | |
8972963c | 964 | property_list_t *instanceProperties; |
cd5f04f5 A |
965 | uint32_t size; // sizeof(protocol_t) |
966 | uint32_t flags; | |
31875a97 | 967 | // Fields below this point are not always present on disk. |
c1e772c4 | 968 | const char **_extendedMethodTypes; |
8070259c | 969 | const char *_demangledName; |
c1e772c4 | 970 | property_list_t *_classProperties; |
8070259c A |
971 | |
972 | const char *demangledName(); | |
973 | ||
974 | const char *nameForLogging() { | |
975 | return demangledName(); | |
976 | } | |
977 | ||
31875a97 A |
978 | bool isFixedUp() const; |
979 | void setFixedUp(); | |
7257e56c | 980 | |
1807f628 A |
981 | bool isCanonical() const; |
982 | void clearIsCanonical(); | |
983 | ||
34d5b5e8 | 984 | # define HAS_FIELD(f) ((uintptr_t)(&f) < ((uintptr_t)this + size)) |
c1e772c4 | 985 | |
cd5f04f5 | 986 | bool hasExtendedMethodTypesField() const { |
c1e772c4 A |
987 | return HAS_FIELD(_extendedMethodTypes); |
988 | } | |
989 | bool hasDemangledNameField() const { | |
990 | return HAS_FIELD(_demangledName); | |
991 | } | |
992 | bool hasClassPropertiesField() const { | |
993 | return HAS_FIELD(_classProperties); | |
994 | } | |
995 | ||
996 | # undef HAS_FIELD | |
997 | ||
998 | const char **extendedMethodTypes() const { | |
999 | return hasExtendedMethodTypesField() ? _extendedMethodTypes : nil; | |
cd5f04f5 | 1000 | } |
c1e772c4 A |
1001 | |
1002 | property_list_t *classProperties() const { | |
1003 | return hasClassPropertiesField() ? _classProperties : nil; | |
cd5f04f5 | 1004 | } |
7257e56c | 1005 | }; |
b3962a83 | 1006 | |
7257e56c | 1007 | struct protocol_list_t { |
1807f628 | 1008 | // count is pointer-sized by accident. |
b3962a83 | 1009 | uintptr_t count; |
7af964d1 | 1010 | protocol_ref_t list[0]; // variable-size |
b3962a83 | 1011 | |
31875a97 A |
1012 | size_t byteSize() const { |
1013 | return sizeof(*this) + count*sizeof(list[0]); | |
1014 | } | |
b3962a83 | 1015 | |
31875a97 A |
1016 | protocol_list_t *duplicate() const { |
1017 | return (protocol_list_t *)memdup(this, this->byteSize()); | |
1018 | } | |
8070259c | 1019 | |
31875a97 A |
1020 | typedef protocol_ref_t* iterator; |
1021 | typedef const protocol_ref_t* const_iterator; | |
8070259c | 1022 | |
31875a97 A |
1023 | const_iterator begin() const { |
1024 | return list; | |
8070259c | 1025 | } |
31875a97 A |
1026 | iterator begin() { |
1027 | return list; | |
1028 | } | |
1029 | const_iterator end() const { | |
1030 | return list + count; | |
8070259c | 1031 | } |
31875a97 A |
1032 | iterator end() { |
1033 | return list + count; | |
1034 | } | |
1035 | }; | |
8070259c | 1036 | |
31875a97 A |
1037 | struct class_ro_t { |
1038 | uint32_t flags; | |
1039 | uint32_t instanceStart; | |
1040 | uint32_t instanceSize; | |
1041 | #ifdef __LP64__ | |
1042 | uint32_t reserved; | |
1043 | #endif | |
1044 | ||
34d5b5e8 A |
1045 | union { |
1046 | const uint8_t * ivarLayout; | |
1047 | Class nonMetaclass; | |
1048 | }; | |
1049 | ||
1050 | explicit_atomic<const char *> name; | |
1051 | // With ptrauth, this is signed if it points to a small list, but | |
1052 | // may be unsigned if it points to a big list. | |
1053 | void *baseMethodList; | |
31875a97 A |
1054 | protocol_list_t * baseProtocols; |
1055 | const ivar_list_t * ivars; | |
1056 | ||
1057 | const uint8_t * weakIvarLayout; | |
1058 | property_list_t *baseProperties; | |
1059 | ||
13ba007e A |
1060 | // This field exists only when RO_HAS_SWIFT_INITIALIZER is set. |
1061 | _objc_swiftMetadataInitializer __ptrauth_objc_method_list_imp _swiftMetadataInitializer_NEVER_USE[0]; | |
1062 | ||
1063 | _objc_swiftMetadataInitializer swiftMetadataInitializer() const { | |
1064 | if (flags & RO_HAS_SWIFT_INITIALIZER) { | |
1065 | return _swiftMetadataInitializer_NEVER_USE[0]; | |
1066 | } else { | |
1067 | return nil; | |
1068 | } | |
1069 | } | |
1070 | ||
34d5b5e8 A |
1071 | const char *getName() const { |
1072 | return name.load(std::memory_order_acquire); | |
1073 | } | |
1074 | ||
1075 | static const uint16_t methodListPointerDiscriminator = 0xC310; | |
1076 | #if 0 // FIXME: enable this when we get a non-empty definition of __ptrauth_objc_method_list_pointer from ptrauth.h. | |
1077 | static_assert(std::is_same< | |
1078 | void * __ptrauth_objc_method_list_pointer *, | |
1079 | void * __ptrauth(ptrauth_key_method_list_pointer, 1, methodListPointerDiscriminator) *>::value, | |
1080 | "Method list pointer signing discriminator must match ptrauth.h"); | |
1081 | #endif | |
1082 | ||
31875a97 | 1083 | method_list_t *baseMethods() const { |
34d5b5e8 A |
1084 | #if __has_feature(ptrauth_calls) |
1085 | method_list_t *ptr = ptrauth_strip((method_list_t *)baseMethodList, ptrauth_key_method_list_pointer); | |
1086 | if (ptr == nullptr) | |
1087 | return nullptr; | |
1088 | ||
1089 | // Don't auth if the class_ro and the method list are both in the shared cache. | |
1090 | // This is secure since they'll be read-only, and this allows the shared cache | |
1091 | // to cut down on the number of signed pointers it has. | |
1092 | bool roInSharedCache = objc::inSharedCache((uintptr_t)this); | |
1093 | bool listInSharedCache = objc::inSharedCache((uintptr_t)ptr); | |
1094 | if (roInSharedCache && listInSharedCache) | |
1095 | return ptr; | |
1096 | ||
1097 | // Auth all other small lists. | |
1098 | if (ptr->isSmallList()) | |
1099 | ptr = ptrauth_auth_data((method_list_t *)baseMethodList, | |
1100 | ptrauth_key_method_list_pointer, | |
1101 | ptrauth_blend_discriminator(&baseMethodList, | |
1102 | methodListPointerDiscriminator)); | |
1103 | return ptr; | |
1104 | #else | |
1105 | return (method_list_t *)baseMethodList; | |
1106 | #endif | |
1107 | } | |
1108 | ||
1109 | uintptr_t baseMethodListPtrauthData() const { | |
1110 | return ptrauth_blend_discriminator(&baseMethodList, | |
1111 | methodListPointerDiscriminator); | |
31875a97 | 1112 | } |
13ba007e A |
1113 | |
1114 | class_ro_t *duplicate() const { | |
34d5b5e8 A |
1115 | bool hasSwiftInitializer = flags & RO_HAS_SWIFT_INITIALIZER; |
1116 | ||
1117 | size_t size = sizeof(*this); | |
1118 | if (hasSwiftInitializer) | |
1119 | size += sizeof(_swiftMetadataInitializer_NEVER_USE[0]); | |
1120 | ||
1121 | class_ro_t *ro = (class_ro_t *)memdup(this, size); | |
1122 | ||
1123 | if (hasSwiftInitializer) | |
13ba007e | 1124 | ro->_swiftMetadataInitializer_NEVER_USE[0] = this->_swiftMetadataInitializer_NEVER_USE[0]; |
34d5b5e8 A |
1125 | |
1126 | #if __has_feature(ptrauth_calls) | |
1127 | // Re-sign the method list pointer if it was signed. | |
1128 | // NOTE: It is possible for a signed pointer to have a signature | |
1129 | // that is all zeroes. This is indistinguishable from a raw pointer. | |
1130 | // This code will treat such a pointer as signed and re-sign it. A | |
1131 | // false positive is safe: method list pointers are either authed or | |
1132 | // stripped, so if baseMethods() doesn't expect it to be signed, it | |
1133 | // will ignore the signature. | |
1134 | void *strippedBaseMethodList = ptrauth_strip(baseMethodList, ptrauth_key_method_list_pointer); | |
1135 | void *signedBaseMethodList = ptrauth_sign_unauthenticated(strippedBaseMethodList, | |
1136 | ptrauth_key_method_list_pointer, | |
1137 | baseMethodListPtrauthData()); | |
1138 | if (baseMethodList == signedBaseMethodList) { | |
1139 | ro->baseMethodList = ptrauth_auth_and_resign(baseMethodList, | |
1140 | ptrauth_key_method_list_pointer, | |
1141 | baseMethodListPtrauthData(), | |
1142 | ptrauth_key_method_list_pointer, | |
1143 | ro->baseMethodListPtrauthData()); | |
13ba007e | 1144 | } else { |
34d5b5e8 A |
1145 | // Special case: a class_ro_t in the shared cache pointing to a |
1146 | // method list in the shared cache will not have a signed pointer, | |
1147 | // but the duplicate will be expected to have a signed pointer since | |
1148 | // it's not in the shared cache. Detect that and sign it. | |
1149 | bool roInSharedCache = objc::inSharedCache((uintptr_t)this); | |
1150 | bool listInSharedCache = objc::inSharedCache((uintptr_t)strippedBaseMethodList); | |
1151 | if (roInSharedCache && listInSharedCache) | |
1152 | ro->baseMethodList = ptrauth_sign_unauthenticated(strippedBaseMethodList, | |
1153 | ptrauth_key_method_list_pointer, | |
1154 | ro->baseMethodListPtrauthData()); | |
13ba007e | 1155 | } |
34d5b5e8 A |
1156 | #endif |
1157 | ||
1158 | return ro; | |
1159 | } | |
1160 | ||
1161 | Class getNonMetaclass() const { | |
1162 | ASSERT(flags & RO_META); | |
1163 | return nonMetaclass; | |
1164 | } | |
1165 | ||
1166 | const uint8_t *getIvarLayout() const { | |
1167 | if (flags & RO_META) | |
1168 | return nullptr; | |
1169 | return ivarLayout; | |
13ba007e | 1170 | } |
31875a97 A |
1171 | }; |
1172 | ||
1173 | ||
1174 | /*********************************************************************** | |
bc4fafce | 1175 | * list_array_tt<Element, List, Ptr> |
31875a97 A |
1176 | * Generic implementation for metadata that can be augmented by categories. |
1177 | * | |
1178 | * Element is the underlying metadata type (e.g. method_t) | |
1179 | * List is the metadata's list type (e.g. method_list_t) | |
bc4fafce A |
1180 | * List is a template applied to Element to make Element*. Useful for |
1181 | * applying qualifiers to the pointer type. | |
31875a97 A |
1182 | * |
1183 | * A list_array_tt has one of three values: | |
1184 | * - empty | |
1185 | * - a pointer to a single list | |
1186 | * - an array of pointers to lists | |
1187 | * | |
1188 | * countLists/beginLists/endLists iterate the metadata lists | |
1189 | * count/begin/end iterate the underlying metadata elements | |
1190 | **********************************************************************/ | |
bc4fafce | 1191 | template <typename Element, typename List, template<typename> class Ptr> |
31875a97 A |
1192 | class list_array_tt { |
1193 | struct array_t { | |
1194 | uint32_t count; | |
bc4fafce | 1195 | Ptr<List> lists[0]; |
31875a97 A |
1196 | |
1197 | static size_t byteSize(uint32_t count) { | |
1198 | return sizeof(array_t) + count*sizeof(lists[0]); | |
1199 | } | |
1200 | size_t byteSize() { | |
1201 | return byteSize(count); | |
1202 | } | |
1203 | }; | |
1204 | ||
1205 | protected: | |
1206 | class iterator { | |
bc4fafce A |
1207 | const Ptr<List> *lists; |
1208 | const Ptr<List> *listsEnd; | |
31875a97 A |
1209 | typename List::iterator m, mEnd; |
1210 | ||
1211 | public: | |
bc4fafce | 1212 | iterator(const Ptr<List> *begin, const Ptr<List> *end) |
31875a97 A |
1213 | : lists(begin), listsEnd(end) |
1214 | { | |
1215 | if (begin != end) { | |
1216 | m = (*begin)->begin(); | |
1217 | mEnd = (*begin)->end(); | |
1218 | } | |
1219 | } | |
1220 | ||
1221 | const Element& operator * () const { | |
1222 | return *m; | |
1223 | } | |
1224 | Element& operator * () { | |
1225 | return *m; | |
1226 | } | |
1227 | ||
1228 | bool operator != (const iterator& rhs) const { | |
1229 | if (lists != rhs.lists) return true; | |
1230 | if (lists == listsEnd) return false; // m is undefined | |
1231 | if (m != rhs.m) return true; | |
1232 | return false; | |
1233 | } | |
1234 | ||
1235 | const iterator& operator ++ () { | |
1807f628 | 1236 | ASSERT(m != mEnd); |
31875a97 A |
1237 | m++; |
1238 | if (m == mEnd) { | |
1807f628 | 1239 | ASSERT(lists != listsEnd); |
31875a97 A |
1240 | lists++; |
1241 | if (lists != listsEnd) { | |
1242 | m = (*lists)->begin(); | |
1243 | mEnd = (*lists)->end(); | |
1244 | } | |
1245 | } | |
1246 | return *this; | |
1247 | } | |
1248 | }; | |
1249 | ||
1250 | private: | |
1251 | union { | |
bc4fafce | 1252 | Ptr<List> list; |
31875a97 A |
1253 | uintptr_t arrayAndFlag; |
1254 | }; | |
1255 | ||
1256 | bool hasArray() const { | |
1257 | return arrayAndFlag & 1; | |
1258 | } | |
1259 | ||
f192a3e2 | 1260 | array_t *array() const { |
31875a97 A |
1261 | return (array_t *)(arrayAndFlag & ~1); |
1262 | } | |
1263 | ||
1264 | void setArray(array_t *array) { | |
1265 | arrayAndFlag = (uintptr_t)array | 1; | |
1266 | } | |
1267 | ||
bc4fafce A |
1268 | void validate() { |
1269 | for (auto cursor = beginLists(), end = endLists(); cursor != end; cursor++) | |
1270 | cursor->validate(); | |
1271 | } | |
1272 | ||
31875a97 | 1273 | public: |
f192a3e2 A |
1274 | list_array_tt() : list(nullptr) { } |
1275 | list_array_tt(List *l) : list(l) { } | |
bc4fafce A |
1276 | list_array_tt(const list_array_tt &other) { |
1277 | *this = other; | |
1278 | } | |
1279 | ||
1280 | list_array_tt &operator =(const list_array_tt &other) { | |
1281 | if (other.hasArray()) { | |
1282 | arrayAndFlag = other.arrayAndFlag; | |
1283 | } else { | |
1284 | list = other.list; | |
1285 | } | |
1286 | return *this; | |
1287 | } | |
31875a97 | 1288 | |
f192a3e2 | 1289 | uint32_t count() const { |
31875a97 A |
1290 | uint32_t result = 0; |
1291 | for (auto lists = beginLists(), end = endLists(); | |
1292 | lists != end; | |
1293 | ++lists) | |
1294 | { | |
1295 | result += (*lists)->count; | |
1296 | } | |
1297 | return result; | |
1298 | } | |
1299 | ||
f192a3e2 | 1300 | iterator begin() const { |
31875a97 A |
1301 | return iterator(beginLists(), endLists()); |
1302 | } | |
1303 | ||
f192a3e2 | 1304 | iterator end() const { |
bc4fafce | 1305 | auto e = endLists(); |
31875a97 A |
1306 | return iterator(e, e); |
1307 | } | |
1308 | ||
34d5b5e8 | 1309 | inline uint32_t countLists(const std::function<const array_t * (const array_t *)> & peek) const { |
31875a97 | 1310 | if (hasArray()) { |
34d5b5e8 | 1311 | return peek(array())->count; |
31875a97 A |
1312 | } else if (list) { |
1313 | return 1; | |
1314 | } else { | |
1315 | return 0; | |
1316 | } | |
1317 | } | |
1318 | ||
34d5b5e8 A |
1319 | uint32_t countLists() { |
1320 | return countLists([](array_t *x) { return x; }); | |
1321 | } | |
1322 | ||
bc4fafce | 1323 | const Ptr<List>* beginLists() const { |
31875a97 A |
1324 | if (hasArray()) { |
1325 | return array()->lists; | |
1326 | } else { | |
1327 | return &list; | |
1328 | } | |
1329 | } | |
1330 | ||
bc4fafce | 1331 | const Ptr<List>* endLists() const { |
31875a97 A |
1332 | if (hasArray()) { |
1333 | return array()->lists + array()->count; | |
1334 | } else if (list) { | |
1335 | return &list + 1; | |
1336 | } else { | |
1337 | return &list; | |
1338 | } | |
1339 | } | |
1340 | ||
1341 | void attachLists(List* const * addedLists, uint32_t addedCount) { | |
1342 | if (addedCount == 0) return; | |
1343 | ||
1344 | if (hasArray()) { | |
1345 | // many lists -> many lists | |
1346 | uint32_t oldCount = array()->count; | |
1347 | uint32_t newCount = oldCount + addedCount; | |
bc4fafce A |
1348 | array_t *newArray = (array_t *)malloc(array_t::byteSize(newCount)); |
1349 | newArray->count = newCount; | |
31875a97 | 1350 | array()->count = newCount; |
bc4fafce A |
1351 | |
1352 | for (int i = oldCount - 1; i >= 0; i--) | |
1353 | newArray->lists[i + addedCount] = array()->lists[i]; | |
1354 | for (unsigned i = 0; i < addedCount; i++) | |
1355 | newArray->lists[i] = addedLists[i]; | |
1356 | free(array()); | |
1357 | setArray(newArray); | |
1358 | validate(); | |
31875a97 A |
1359 | } |
1360 | else if (!list && addedCount == 1) { | |
1361 | // 0 lists -> 1 list | |
1362 | list = addedLists[0]; | |
bc4fafce | 1363 | validate(); |
31875a97 A |
1364 | } |
1365 | else { | |
1366 | // 1 list -> many lists | |
bc4fafce | 1367 | Ptr<List> oldList = list; |
31875a97 A |
1368 | uint32_t oldCount = oldList ? 1 : 0; |
1369 | uint32_t newCount = oldCount + addedCount; | |
1370 | setArray((array_t *)malloc(array_t::byteSize(newCount))); | |
1371 | array()->count = newCount; | |
1372 | if (oldList) array()->lists[addedCount] = oldList; | |
bc4fafce A |
1373 | for (unsigned i = 0; i < addedCount; i++) |
1374 | array()->lists[i] = addedLists[i]; | |
1375 | validate(); | |
31875a97 A |
1376 | } |
1377 | } | |
1378 | ||
1379 | void tryFree() { | |
1380 | if (hasArray()) { | |
1381 | for (uint32_t i = 0; i < array()->count; i++) { | |
1382 | try_free(array()->lists[i]); | |
1383 | } | |
1384 | try_free(array()); | |
1385 | } | |
1386 | else if (list) { | |
1387 | try_free(list); | |
1388 | } | |
1389 | } | |
1390 | ||
bc4fafce A |
1391 | template<typename Other> |
1392 | void duplicateInto(Other &other) { | |
31875a97 A |
1393 | if (hasArray()) { |
1394 | array_t *a = array(); | |
bc4fafce | 1395 | other.setArray((array_t *)memdup(a, a->byteSize())); |
31875a97 | 1396 | for (uint32_t i = 0; i < a->count; i++) { |
bc4fafce | 1397 | other.array()->lists[i] = a->lists[i]->duplicate(); |
31875a97 A |
1398 | } |
1399 | } else if (list) { | |
bc4fafce | 1400 | other.list = list->duplicate(); |
31875a97 | 1401 | } else { |
bc4fafce | 1402 | other.list = nil; |
31875a97 | 1403 | } |
31875a97 A |
1404 | } |
1405 | }; | |
1406 | ||
1407 | ||
bc4fafce A |
1408 | DECLARE_AUTHED_PTR_TEMPLATE(method_list_t) |
1409 | ||
31875a97 | 1410 | class method_array_t : |
bc4fafce | 1411 | public list_array_tt<method_t, method_list_t, method_list_t_authed_ptr> |
31875a97 | 1412 | { |
bc4fafce | 1413 | typedef list_array_tt<method_t, method_list_t, method_list_t_authed_ptr> Super; |
31875a97 A |
1414 | |
1415 | public: | |
f192a3e2 A |
1416 | method_array_t() : Super() { } |
1417 | method_array_t(method_list_t *l) : Super(l) { } | |
1418 | ||
bc4fafce | 1419 | const method_list_t_authed_ptr<method_list_t> *beginCategoryMethodLists() const { |
31875a97 A |
1420 | return beginLists(); |
1421 | } | |
1422 | ||
bc4fafce | 1423 | const method_list_t_authed_ptr<method_list_t> *endCategoryMethodLists(Class cls) const; |
31875a97 A |
1424 | }; |
1425 | ||
1426 | ||
1427 | class property_array_t : | |
bc4fafce | 1428 | public list_array_tt<property_t, property_list_t, RawPtr> |
31875a97 | 1429 | { |
bc4fafce | 1430 | typedef list_array_tt<property_t, property_list_t, RawPtr> Super; |
31875a97 A |
1431 | |
1432 | public: | |
f192a3e2 A |
1433 | property_array_t() : Super() { } |
1434 | property_array_t(property_list_t *l) : Super(l) { } | |
31875a97 A |
1435 | }; |
1436 | ||
1437 | ||
1438 | class protocol_array_t : | |
bc4fafce | 1439 | public list_array_tt<protocol_ref_t, protocol_list_t, RawPtr> |
31875a97 | 1440 | { |
bc4fafce | 1441 | typedef list_array_tt<protocol_ref_t, protocol_list_t, RawPtr> Super; |
31875a97 A |
1442 | |
1443 | public: | |
f192a3e2 A |
1444 | protocol_array_t() : Super() { } |
1445 | protocol_array_t(protocol_list_t *l) : Super(l) { } | |
31875a97 A |
1446 | }; |
1447 | ||
f192a3e2 | 1448 | struct class_rw_ext_t { |
bc4fafce A |
1449 | DECLARE_AUTHED_PTR_TEMPLATE(class_ro_t) |
1450 | class_ro_t_authed_ptr<const class_ro_t> ro; | |
f192a3e2 A |
1451 | method_array_t methods; |
1452 | property_array_t properties; | |
1453 | protocol_array_t protocols; | |
1454 | char *demangledName; | |
1455 | uint32_t version; | |
1456 | }; | |
31875a97 A |
1457 | |
1458 | struct class_rw_t { | |
c1e772c4 | 1459 | // Be warned that Symbolication knows the layout of this structure. |
31875a97 | 1460 | uint32_t flags; |
1807f628 | 1461 | uint16_t witness; |
f192a3e2 A |
1462 | #if SUPPORT_INDEXED_ISA |
1463 | uint16_t index; | |
1464 | #endif | |
31875a97 | 1465 | |
f192a3e2 | 1466 | explicit_atomic<uintptr_t> ro_or_rw_ext; |
31875a97 A |
1467 | |
1468 | Class firstSubclass; | |
1469 | Class nextSiblingClass; | |
1470 | ||
f192a3e2 | 1471 | private: |
bc4fafce | 1472 | using ro_or_rw_ext_t = objc::PointerUnion<const class_ro_t, class_rw_ext_t, PTRAUTH_STR("class_ro_t"), PTRAUTH_STR("class_rw_ext_t")>; |
31875a97 | 1473 | |
f192a3e2 A |
1474 | const ro_or_rw_ext_t get_ro_or_rwe() const { |
1475 | return ro_or_rw_ext_t{ro_or_rw_ext}; | |
1476 | } | |
1477 | ||
1478 | void set_ro_or_rwe(const class_ro_t *ro) { | |
bc4fafce | 1479 | ro_or_rw_ext_t{ro, &ro_or_rw_ext}.storeAt(ro_or_rw_ext, memory_order_relaxed); |
f192a3e2 A |
1480 | } |
1481 | ||
1482 | void set_ro_or_rwe(class_rw_ext_t *rwe, const class_ro_t *ro) { | |
1483 | // the release barrier is so that the class_rw_ext_t::ro initialization | |
1484 | // is visible to lockless readers | |
1485 | rwe->ro = ro; | |
bc4fafce | 1486 | ro_or_rw_ext_t{rwe, &ro_or_rw_ext}.storeAt(ro_or_rw_ext, memory_order_release); |
f192a3e2 | 1487 | } |
c1e772c4 | 1488 | |
f192a3e2 A |
1489 | class_rw_ext_t *extAlloc(const class_ro_t *ro, bool deep = false); |
1490 | ||
1491 | public: | |
1492 | void setFlags(uint32_t set) | |
31875a97 | 1493 | { |
1807f628 | 1494 | __c11_atomic_fetch_or((_Atomic(uint32_t) *)&flags, set, __ATOMIC_RELAXED); |
31875a97 A |
1495 | } |
1496 | ||
1497 | void clearFlags(uint32_t clear) | |
1498 | { | |
1807f628 | 1499 | __c11_atomic_fetch_and((_Atomic(uint32_t) *)&flags, ~clear, __ATOMIC_RELAXED); |
31875a97 A |
1500 | } |
1501 | ||
1502 | // set and clear must not overlap | |
1503 | void changeFlags(uint32_t set, uint32_t clear) | |
1504 | { | |
1807f628 | 1505 | ASSERT((set & clear) == 0); |
31875a97 A |
1506 | |
1507 | uint32_t oldf, newf; | |
1508 | do { | |
1509 | oldf = flags; | |
1510 | newf = (oldf | set) & ~clear; | |
1511 | } while (!OSAtomicCompareAndSwap32Barrier(oldf, newf, (volatile int32_t *)&flags)); | |
1512 | } | |
f192a3e2 A |
1513 | |
1514 | class_rw_ext_t *ext() const { | |
bc4fafce | 1515 | return get_ro_or_rwe().dyn_cast<class_rw_ext_t *>(&ro_or_rw_ext); |
f192a3e2 A |
1516 | } |
1517 | ||
1518 | class_rw_ext_t *extAllocIfNeeded() { | |
1519 | auto v = get_ro_or_rwe(); | |
1520 | if (fastpath(v.is<class_rw_ext_t *>())) { | |
bc4fafce | 1521 | return v.get<class_rw_ext_t *>(&ro_or_rw_ext); |
f192a3e2 | 1522 | } else { |
bc4fafce | 1523 | return extAlloc(v.get<const class_ro_t *>(&ro_or_rw_ext)); |
f192a3e2 A |
1524 | } |
1525 | } | |
1526 | ||
1527 | class_rw_ext_t *deepCopy(const class_ro_t *ro) { | |
1528 | return extAlloc(ro, true); | |
1529 | } | |
1530 | ||
1531 | const class_ro_t *ro() const { | |
1532 | auto v = get_ro_or_rwe(); | |
1533 | if (slowpath(v.is<class_rw_ext_t *>())) { | |
bc4fafce | 1534 | return v.get<class_rw_ext_t *>(&ro_or_rw_ext)->ro; |
f192a3e2 | 1535 | } |
bc4fafce | 1536 | return v.get<const class_ro_t *>(&ro_or_rw_ext); |
f192a3e2 A |
1537 | } |
1538 | ||
1539 | void set_ro(const class_ro_t *ro) { | |
1540 | auto v = get_ro_or_rwe(); | |
1541 | if (v.is<class_rw_ext_t *>()) { | |
bc4fafce | 1542 | v.get<class_rw_ext_t *>(&ro_or_rw_ext)->ro = ro; |
f192a3e2 A |
1543 | } else { |
1544 | set_ro_or_rwe(ro); | |
1545 | } | |
1546 | } | |
1547 | ||
1548 | const method_array_t methods() const { | |
1549 | auto v = get_ro_or_rwe(); | |
1550 | if (v.is<class_rw_ext_t *>()) { | |
bc4fafce | 1551 | return v.get<class_rw_ext_t *>(&ro_or_rw_ext)->methods; |
f192a3e2 | 1552 | } else { |
bc4fafce | 1553 | return method_array_t{v.get<const class_ro_t *>(&ro_or_rw_ext)->baseMethods()}; |
f192a3e2 A |
1554 | } |
1555 | } | |
1556 | ||
1557 | const property_array_t properties() const { | |
1558 | auto v = get_ro_or_rwe(); | |
1559 | if (v.is<class_rw_ext_t *>()) { | |
bc4fafce | 1560 | return v.get<class_rw_ext_t *>(&ro_or_rw_ext)->properties; |
f192a3e2 | 1561 | } else { |
bc4fafce | 1562 | return property_array_t{v.get<const class_ro_t *>(&ro_or_rw_ext)->baseProperties}; |
f192a3e2 A |
1563 | } |
1564 | } | |
1565 | ||
1566 | const protocol_array_t protocols() const { | |
1567 | auto v = get_ro_or_rwe(); | |
1568 | if (v.is<class_rw_ext_t *>()) { | |
bc4fafce | 1569 | return v.get<class_rw_ext_t *>(&ro_or_rw_ext)->protocols; |
f192a3e2 | 1570 | } else { |
bc4fafce | 1571 | return protocol_array_t{v.get<const class_ro_t *>(&ro_or_rw_ext)->baseProtocols}; |
f192a3e2 A |
1572 | } |
1573 | } | |
31875a97 A |
1574 | }; |
1575 | ||
1576 | ||
8070259c | 1577 | struct class_data_bits_t { |
1807f628 | 1578 | friend objc_class; |
8070259c A |
1579 | |
1580 | // Values are the FAST_ flags above. | |
1581 | uintptr_t bits; | |
1582 | private: | |
1807f628 | 1583 | bool getBit(uintptr_t bit) const |
8070259c A |
1584 | { |
1585 | return bits & bit; | |
1586 | } | |
1587 | ||
13ba007e A |
1588 | // Atomically set the bits in `set` and clear the bits in `clear`. |
1589 | // set and clear must not overlap. | |
1590 | void setAndClearBits(uintptr_t set, uintptr_t clear) | |
8070259c | 1591 | { |
1807f628 | 1592 | ASSERT((set & clear) == 0); |
34d5b5e8 | 1593 | uintptr_t newBits, oldBits = LoadExclusive(&bits); |
8070259c | 1594 | do { |
1807f628 | 1595 | newBits = (oldBits | set) & ~clear; |
34d5b5e8 | 1596 | } while (slowpath(!StoreReleaseExclusive(&bits, &oldBits, newBits))); |
8070259c A |
1597 | } |
1598 | ||
13ba007e | 1599 | void setBits(uintptr_t set) { |
1807f628 | 1600 | __c11_atomic_fetch_or((_Atomic(uintptr_t) *)&bits, set, __ATOMIC_RELAXED); |
13ba007e A |
1601 | } |
1602 | ||
1603 | void clearBits(uintptr_t clear) { | |
1807f628 | 1604 | __c11_atomic_fetch_and((_Atomic(uintptr_t) *)&bits, ~clear, __ATOMIC_RELAXED); |
8070259c A |
1605 | } |
1606 | ||
1607 | public: | |
1608 | ||
1807f628 | 1609 | class_rw_t* data() const { |
8070259c A |
1610 | return (class_rw_t *)(bits & FAST_DATA_MASK); |
1611 | } | |
1612 | void setData(class_rw_t *newData) | |
1613 | { | |
1807f628 | 1614 | ASSERT(!data() || (newData->flags & (RW_REALIZING | RW_FUTURE))); |
8070259c | 1615 | // Set during realization or construction only. No locking needed. |
bd8dfcfc A |
1616 | // Use a store-release fence because there may be concurrent |
1617 | // readers of data and data's contents. | |
1618 | uintptr_t newBits = (bits & ~FAST_DATA_MASK) | (uintptr_t)newData; | |
1619 | atomic_thread_fence(memory_order_release); | |
1620 | bits = newBits; | |
8070259c A |
1621 | } |
1622 | ||
13ba007e A |
1623 | // Get the class's ro data, even in the presence of concurrent realization. |
1624 | // fixme this isn't really safe without a compiler barrier at least | |
1625 | // and probably a memory barrier when realizeClass changes the data field | |
34d5b5e8 | 1626 | const class_ro_t *safe_ro() const { |
13ba007e A |
1627 | class_rw_t *maybe_rw = data(); |
1628 | if (maybe_rw->flags & RW_REALIZED) { | |
1629 | // maybe_rw is rw | |
f192a3e2 | 1630 | return maybe_rw->ro(); |
13ba007e A |
1631 | } else { |
1632 | // maybe_rw is actually ro | |
1633 | return (class_ro_t *)maybe_rw; | |
1634 | } | |
1635 | } | |
1636 | ||
c1e772c4 | 1637 | #if SUPPORT_INDEXED_ISA |
34d5b5e8 | 1638 | void setClassArrayIndex(unsigned Idx) { |
c1e772c4 | 1639 | // 0 is unused as then we can rely on zero-initialisation from calloc. |
1807f628 | 1640 | ASSERT(Idx > 0); |
c1e772c4 | 1641 | data()->index = Idx; |
c1e772c4 | 1642 | } |
34d5b5e8 A |
1643 | #else |
1644 | void setClassArrayIndex(__unused unsigned Idx) { | |
1645 | } | |
1646 | #endif | |
c1e772c4 A |
1647 | |
1648 | unsigned classArrayIndex() { | |
1649 | #if SUPPORT_INDEXED_ISA | |
1650 | return data()->index; | |
1651 | #else | |
1652 | return 0; | |
1653 | #endif | |
1654 | } | |
1655 | ||
66799735 A |
1656 | bool isAnySwift() { |
1657 | return isSwiftStable() || isSwiftLegacy(); | |
8070259c A |
1658 | } |
1659 | ||
66799735 A |
1660 | bool isSwiftStable() { |
1661 | return getBit(FAST_IS_SWIFT_STABLE); | |
1662 | } | |
1663 | void setIsSwiftStable() { | |
13ba007e | 1664 | setAndClearBits(FAST_IS_SWIFT_STABLE, FAST_IS_SWIFT_LEGACY); |
66799735 A |
1665 | } |
1666 | ||
1667 | bool isSwiftLegacy() { | |
1668 | return getBit(FAST_IS_SWIFT_LEGACY); | |
1669 | } | |
1670 | void setIsSwiftLegacy() { | |
13ba007e A |
1671 | setAndClearBits(FAST_IS_SWIFT_LEGACY, FAST_IS_SWIFT_STABLE); |
1672 | } | |
1673 | ||
1674 | // fixme remove this once the Swift runtime uses the stable bits | |
1675 | bool isSwiftStable_ButAllowLegacyForNow() { | |
1676 | return isAnySwift(); | |
1677 | } | |
1678 | ||
1679 | _objc_swiftMetadataInitializer swiftMetadataInitializer() { | |
1680 | // This function is called on un-realized classes without | |
1681 | // holding any locks. | |
1682 | // Beware of races with other realizers. | |
1683 | return safe_ro()->swiftMetadataInitializer(); | |
8070259c | 1684 | } |
7257e56c | 1685 | }; |
b3962a83 | 1686 | |
8070259c | 1687 | |
7257e56c | 1688 | struct objc_class : objc_object { |
34d5b5e8 A |
1689 | objc_class(const objc_class&) = delete; |
1690 | objc_class(objc_class&&) = delete; | |
1691 | void operator=(const objc_class&) = delete; | |
1692 | void operator=(objc_class&&) = delete; | |
7257e56c A |
1693 | // Class ISA; |
1694 | Class superclass; | |
8070259c A |
1695 | cache_t cache; // formerly cache pointer and vtable |
1696 | class_data_bits_t bits; // class_rw_t * plus custom rr/alloc flags | |
8972963c | 1697 | |
34d5b5e8 A |
1698 | Class getSuperclass() const { |
1699 | #if __has_feature(ptrauth_calls) | |
1700 | # if ISA_SIGNING_AUTH_MODE == ISA_SIGNING_AUTH | |
1701 | if (superclass == Nil) | |
1702 | return Nil; | |
1703 | ||
1704 | #if SUPERCLASS_SIGNING_TREAT_UNSIGNED_AS_NIL | |
1705 | void *stripped = ptrauth_strip((void *)superclass, ISA_SIGNING_KEY); | |
1706 | if ((void *)superclass == stripped) { | |
1707 | void *resigned = ptrauth_sign_unauthenticated(stripped, ISA_SIGNING_KEY, ptrauth_blend_discriminator(&superclass, ISA_SIGNING_DISCRIMINATOR_CLASS_SUPERCLASS)); | |
1708 | if ((void *)superclass != resigned) | |
1709 | return Nil; | |
1710 | } | |
1711 | #endif | |
1712 | ||
1713 | void *result = ptrauth_auth_data((void *)superclass, ISA_SIGNING_KEY, ptrauth_blend_discriminator(&superclass, ISA_SIGNING_DISCRIMINATOR_CLASS_SUPERCLASS)); | |
1714 | return (Class)result; | |
1715 | ||
1716 | # else | |
1717 | return (Class)ptrauth_strip((void *)superclass, ISA_SIGNING_KEY); | |
1718 | # endif | |
1719 | #else | |
1720 | return superclass; | |
1721 | #endif | |
1722 | } | |
1723 | ||
1724 | void setSuperclass(Class newSuperclass) { | |
1725 | #if ISA_SIGNING_SIGN_MODE == ISA_SIGNING_SIGN_ALL | |
1726 | superclass = (Class)ptrauth_sign_unauthenticated((void *)newSuperclass, ISA_SIGNING_KEY, ptrauth_blend_discriminator(&superclass, ISA_SIGNING_DISCRIMINATOR_CLASS_SUPERCLASS)); | |
1727 | #else | |
1728 | superclass = newSuperclass; | |
1729 | #endif | |
1730 | } | |
1731 | ||
1807f628 | 1732 | class_rw_t *data() const { |
8070259c | 1733 | return bits.data(); |
8972963c A |
1734 | } |
1735 | void setData(class_rw_t *newData) { | |
8070259c | 1736 | bits.setData(newData); |
8972963c A |
1737 | } |
1738 | ||
7257e56c | 1739 | void setInfo(uint32_t set) { |
1807f628 | 1740 | ASSERT(isFuture() || isRealized()); |
8070259c | 1741 | data()->setFlags(set); |
7257e56c A |
1742 | } |
1743 | ||
1744 | void clearInfo(uint32_t clear) { | |
1807f628 | 1745 | ASSERT(isFuture() || isRealized()); |
8070259c | 1746 | data()->clearFlags(clear); |
7257e56c A |
1747 | } |
1748 | ||
1749 | // set and clear must not overlap | |
1750 | void changeInfo(uint32_t set, uint32_t clear) { | |
1807f628 A |
1751 | ASSERT(isFuture() || isRealized()); |
1752 | ASSERT((set & clear) == 0); | |
8070259c | 1753 | data()->changeFlags(set, clear); |
7257e56c A |
1754 | } |
1755 | ||
1807f628 A |
1756 | #if FAST_HAS_DEFAULT_RR |
1757 | bool hasCustomRR() const { | |
1758 | return !bits.getBit(FAST_HAS_DEFAULT_RR); | |
8070259c A |
1759 | } |
1760 | void setHasDefaultRR() { | |
1807f628 | 1761 | bits.setBits(FAST_HAS_DEFAULT_RR); |
8972963c | 1762 | } |
1807f628 A |
1763 | void setHasCustomRR() { |
1764 | bits.clearBits(FAST_HAS_DEFAULT_RR); | |
1765 | } | |
1766 | #else | |
1767 | bool hasCustomRR() const { | |
1768 | return !(bits.data()->flags & RW_HAS_DEFAULT_RR); | |
1769 | } | |
1770 | void setHasDefaultRR() { | |
1771 | bits.data()->setFlags(RW_HAS_DEFAULT_RR); | |
1772 | } | |
1773 | void setHasCustomRR() { | |
1774 | bits.data()->clearFlags(RW_HAS_DEFAULT_RR); | |
1775 | } | |
1776 | #endif | |
8972963c | 1777 | |
1807f628 A |
1778 | #if FAST_CACHE_HAS_DEFAULT_AWZ |
1779 | bool hasCustomAWZ() const { | |
1780 | return !cache.getBit(FAST_CACHE_HAS_DEFAULT_AWZ); | |
8070259c A |
1781 | } |
1782 | void setHasDefaultAWZ() { | |
1807f628 | 1783 | cache.setBit(FAST_CACHE_HAS_DEFAULT_AWZ); |
8972963c | 1784 | } |
1807f628 A |
1785 | void setHasCustomAWZ() { |
1786 | cache.clearBit(FAST_CACHE_HAS_DEFAULT_AWZ); | |
8070259c | 1787 | } |
1807f628 A |
1788 | #else |
1789 | bool hasCustomAWZ() const { | |
1790 | return !(bits.data()->flags & RW_HAS_DEFAULT_AWZ); | |
1791 | } | |
1792 | void setHasDefaultAWZ() { | |
1793 | bits.data()->setFlags(RW_HAS_DEFAULT_AWZ); | |
8070259c | 1794 | } |
1807f628 A |
1795 | void setHasCustomAWZ() { |
1796 | bits.data()->clearFlags(RW_HAS_DEFAULT_AWZ); | |
8070259c | 1797 | } |
1807f628 | 1798 | #endif |
8070259c | 1799 | |
1807f628 A |
1800 | #if FAST_CACHE_HAS_DEFAULT_CORE |
1801 | bool hasCustomCore() const { | |
1802 | return !cache.getBit(FAST_CACHE_HAS_DEFAULT_CORE); | |
1803 | } | |
1804 | void setHasDefaultCore() { | |
1805 | return cache.setBit(FAST_CACHE_HAS_DEFAULT_CORE); | |
1806 | } | |
1807 | void setHasCustomCore() { | |
1808 | return cache.clearBit(FAST_CACHE_HAS_DEFAULT_CORE); | |
1809 | } | |
1810 | #else | |
1811 | bool hasCustomCore() const { | |
1812 | return !(bits.data()->flags & RW_HAS_DEFAULT_CORE); | |
1813 | } | |
1814 | void setHasDefaultCore() { | |
1815 | bits.data()->setFlags(RW_HAS_DEFAULT_CORE); | |
1816 | } | |
1817 | void setHasCustomCore() { | |
1818 | bits.data()->clearFlags(RW_HAS_DEFAULT_CORE); | |
1819 | } | |
1820 | #endif | |
8972963c | 1821 | |
1807f628 A |
1822 | #if FAST_CACHE_HAS_CXX_CTOR |
1823 | bool hasCxxCtor() { | |
1824 | ASSERT(isRealized()); | |
1825 | return cache.getBit(FAST_CACHE_HAS_CXX_CTOR); | |
1826 | } | |
1827 | void setHasCxxCtor() { | |
1828 | cache.setBit(FAST_CACHE_HAS_CXX_CTOR); | |
1829 | } | |
1830 | #else | |
7257e56c | 1831 | bool hasCxxCtor() { |
1807f628 A |
1832 | ASSERT(isRealized()); |
1833 | return bits.data()->flags & RW_HAS_CXX_CTOR; | |
8070259c | 1834 | } |
1807f628 A |
1835 | void setHasCxxCtor() { |
1836 | bits.data()->setFlags(RW_HAS_CXX_CTOR); | |
8972963c | 1837 | } |
1807f628 | 1838 | #endif |
7257e56c | 1839 | |
1807f628 A |
1840 | #if FAST_CACHE_HAS_CXX_DTOR |
1841 | bool hasCxxDtor() { | |
1842 | ASSERT(isRealized()); | |
1843 | return cache.getBit(FAST_CACHE_HAS_CXX_DTOR); | |
1844 | } | |
1845 | void setHasCxxDtor() { | |
1846 | cache.setBit(FAST_CACHE_HAS_CXX_DTOR); | |
1847 | } | |
1848 | #else | |
7257e56c | 1849 | bool hasCxxDtor() { |
1807f628 A |
1850 | ASSERT(isRealized()); |
1851 | return bits.data()->flags & RW_HAS_CXX_DTOR; | |
7257e56c | 1852 | } |
1807f628 A |
1853 | void setHasCxxDtor() { |
1854 | bits.data()->setFlags(RW_HAS_CXX_DTOR); | |
8070259c | 1855 | } |
1807f628 | 1856 | #endif |
8070259c | 1857 | |
1807f628 A |
1858 | #if FAST_CACHE_REQUIRES_RAW_ISA |
1859 | bool instancesRequireRawIsa() { | |
1860 | return cache.getBit(FAST_CACHE_REQUIRES_RAW_ISA); | |
1861 | } | |
1862 | void setInstancesRequireRawIsa() { | |
1863 | cache.setBit(FAST_CACHE_REQUIRES_RAW_ISA); | |
1864 | } | |
1865 | #elif SUPPORT_NONPOINTER_ISA | |
1866 | bool instancesRequireRawIsa() { | |
1867 | return bits.data()->flags & RW_REQUIRES_RAW_ISA; | |
1868 | } | |
1869 | void setInstancesRequireRawIsa() { | |
1870 | bits.data()->setFlags(RW_REQUIRES_RAW_ISA); | |
1871 | } | |
1872 | #else | |
1873 | bool instancesRequireRawIsa() { | |
1874 | return true; | |
1875 | } | |
1876 | void setInstancesRequireRawIsa() { | |
1877 | // nothing | |
1878 | } | |
1879 | #endif | |
1880 | void setInstancesRequireRawIsaRecursively(bool inherited = false); | |
1881 | void printInstancesRequireRawIsa(bool inherited); | |
1882 | ||
34d5b5e8 A |
1883 | #if CONFIG_USE_PREOPT_CACHES |
1884 | bool allowsPreoptCaches() const { | |
1885 | return !(bits.data()->flags & RW_NOPREOPT_CACHE); | |
1886 | } | |
1887 | bool allowsPreoptInlinedSels() const { | |
1888 | return !(bits.data()->flags & RW_NOPREOPT_SELS); | |
1889 | } | |
1890 | void setDisallowPreoptCaches() { | |
1891 | bits.data()->setFlags(RW_NOPREOPT_CACHE | RW_NOPREOPT_SELS); | |
1892 | } | |
1893 | void setDisallowPreoptInlinedSels() { | |
1894 | bits.data()->setFlags(RW_NOPREOPT_SELS); | |
1895 | } | |
1896 | void setDisallowPreoptCachesRecursively(const char *why); | |
1897 | void setDisallowPreoptInlinedSelsRecursively(const char *why); | |
1898 | #else | |
1899 | bool allowsPreoptCaches() const { return false; } | |
1900 | bool allowsPreoptInlinedSels() const { return false; } | |
1901 | void setDisallowPreoptCaches() { } | |
1902 | void setDisallowPreoptInlinedSels() { } | |
1903 | void setDisallowPreoptCachesRecursively(const char *why) { } | |
1904 | void setDisallowPreoptInlinedSelsRecursively(const char *why) { } | |
1905 | #endif | |
1906 | ||
1807f628 A |
1907 | bool canAllocNonpointer() { |
1908 | ASSERT(!isFuture()); | |
1909 | return !instancesRequireRawIsa(); | |
1910 | } | |
7257e56c | 1911 | |
66799735 A |
1912 | bool isSwiftStable() { |
1913 | return bits.isSwiftStable(); | |
1914 | } | |
1915 | ||
1916 | bool isSwiftLegacy() { | |
1917 | return bits.isSwiftLegacy(); | |
1918 | } | |
1919 | ||
1920 | bool isAnySwift() { | |
1921 | return bits.isAnySwift(); | |
8070259c A |
1922 | } |
1923 | ||
13ba007e A |
1924 | bool isSwiftStable_ButAllowLegacyForNow() { |
1925 | return bits.isSwiftStable_ButAllowLegacyForNow(); | |
1926 | } | |
1927 | ||
34d5b5e8 A |
1928 | uint32_t swiftClassFlags() { |
1929 | return *(uint32_t *)(&bits + 1); | |
1930 | } | |
1931 | ||
1932 | bool usesSwiftRefcounting() { | |
1933 | if (!isSwiftStable()) return false; | |
1934 | return bool(swiftClassFlags() & 2); //ClassFlags::UsesSwiftRefcounting | |
1935 | } | |
1936 | ||
1937 | bool canCallSwiftRR() { | |
1938 | // !hasCustomCore() is being used as a proxy for isInitialized(). All | |
1939 | // classes with Swift refcounting are !hasCustomCore() (unless there are | |
1940 | // category or swizzling shenanigans), but that bit is not set until a | |
1941 | // class is initialized. Checking isInitialized requires an extra | |
1942 | // indirection that we want to avoid on RR fast paths. | |
1943 | // | |
1944 | // In the unlikely event that someone causes a class with Swift | |
1945 | // refcounting to be hasCustomCore(), we'll fall back to sending -retain | |
1946 | // or -release, which is still correct. | |
1947 | return !hasCustomCore() && usesSwiftRefcounting(); | |
1948 | } | |
1949 | ||
1807f628 A |
1950 | bool isStubClass() const { |
1951 | uintptr_t isa = (uintptr_t)isaBits(); | |
1952 | return 1 <= isa && isa < 16; | |
1953 | } | |
1954 | ||
13ba007e A |
1955 | // Swift stable ABI built for old deployment targets looks weird. |
1956 | // The is-legacy bit is set for compatibility with old libobjc. | |
1957 | // We are on a "new" deployment target so we need to rewrite that bit. | |
1958 | // These stable-with-legacy-bit classes are distinguished from real | |
1959 | // legacy classes using another bit in the Swift data | |
1960 | // (ClassFlags::IsSwiftPreStableABI) | |
1961 | ||
1962 | bool isUnfixedBackwardDeployingStableSwift() { | |
1963 | // Only classes marked as Swift legacy need apply. | |
1964 | if (!bits.isSwiftLegacy()) return false; | |
1965 | ||
1966 | // Check the true legacy vs stable distinguisher. | |
1967 | // The low bit of Swift's ClassFlags is SET for true legacy | |
1968 | // and UNSET for stable pretending to be legacy. | |
34d5b5e8 | 1969 | bool isActuallySwiftLegacy = bool(swiftClassFlags() & 1); |
13ba007e A |
1970 | return !isActuallySwiftLegacy; |
1971 | } | |
1972 | ||
1973 | void fixupBackwardDeployingStableSwift() { | |
1974 | if (isUnfixedBackwardDeployingStableSwift()) { | |
1975 | // Class really is stable Swift, pretending to be pre-stable. | |
1976 | // Fix its lie. | |
1977 | bits.setIsSwiftStable(); | |
1978 | } | |
1979 | } | |
1980 | ||
1981 | _objc_swiftMetadataInitializer swiftMetadataInitializer() { | |
1982 | return bits.swiftMetadataInitializer(); | |
1983 | } | |
8070259c | 1984 | |
c1e772c4 A |
1985 | // Return YES if the class's ivars are managed by ARC, |
1986 | // or the class is MRC but has ARC-style weak ivars. | |
1987 | bool hasAutomaticIvars() { | |
f192a3e2 | 1988 | return data()->ro()->flags & (RO_IS_ARC | RO_HAS_WEAK_WITHOUT_ARC); |
c1e772c4 A |
1989 | } |
1990 | ||
1991 | // Return YES if the class's ivars are managed by ARC. | |
1992 | bool isARC() { | |
f192a3e2 | 1993 | return data()->ro()->flags & RO_IS_ARC; |
c1e772c4 A |
1994 | } |
1995 | ||
1996 | ||
13ba007e A |
1997 | bool forbidsAssociatedObjects() { |
1998 | return (data()->flags & RW_FORBIDS_ASSOCIATED_OBJECTS); | |
1999 | } | |
2000 | ||
8070259c A |
2001 | #if SUPPORT_NONPOINTER_ISA |
2002 | // Tracked in non-pointer isas; not tracked otherwise | |
2003 | #else | |
7257e56c A |
2004 | bool instancesHaveAssociatedObjects() { |
2005 | // this may be an unrealized future class in the CF-bridged case | |
1807f628 | 2006 | ASSERT(isFuture() || isRealized()); |
7257e56c | 2007 | return data()->flags & RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS; |
8972963c | 2008 | } |
b3962a83 | 2009 | |
7257e56c A |
2010 | void setInstancesHaveAssociatedObjects() { |
2011 | // this may be an unrealized future class in the CF-bridged case | |
1807f628 | 2012 | ASSERT(isFuture() || isRealized()); |
7257e56c A |
2013 | setInfo(RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS); |
2014 | } | |
8070259c | 2015 | #endif |
7257e56c A |
2016 | |
2017 | bool shouldGrowCache() { | |
2018 | return true; | |
2019 | } | |
2020 | ||
2021 | void setShouldGrowCache(bool) { | |
2022 | // fixme good or bad for memory use? | |
2023 | } | |
2024 | ||
7257e56c A |
2025 | bool isInitializing() { |
2026 | return getMeta()->data()->flags & RW_INITIALIZING; | |
2027 | } | |
2028 | ||
2029 | void setInitializing() { | |
1807f628 | 2030 | ASSERT(!isMetaClass()); |
7257e56c A |
2031 | ISA()->setInfo(RW_INITIALIZING); |
2032 | } | |
2033 | ||
2034 | bool isInitialized() { | |
2035 | return getMeta()->data()->flags & RW_INITIALIZED; | |
2036 | } | |
2037 | ||
7257e56c A |
2038 | void setInitialized(); |
2039 | ||
2040 | bool isLoadable() { | |
1807f628 | 2041 | ASSERT(isRealized()); |
7257e56c A |
2042 | return true; // any class registered for +load is definitely loadable |
2043 | } | |
2044 | ||
2045 | IMP getLoadMethod(); | |
2046 | ||
2047 | // Locking: To prevent concurrent realization, hold runtimeLock. | |
1807f628 A |
2048 | bool isRealized() const { |
2049 | return !isStubClass() && (data()->flags & RW_REALIZED); | |
7257e56c A |
2050 | } |
2051 | ||
2052 | // Returns true if this is an unrealized future class. | |
2053 | // Locking: To prevent concurrent realization, hold runtimeLock. | |
1807f628 | 2054 | bool isFuture() const { |
34d5b5e8 A |
2055 | if (isStubClass()) |
2056 | return false; | |
7257e56c A |
2057 | return data()->flags & RW_FUTURE; |
2058 | } | |
2059 | ||
34d5b5e8 A |
2060 | bool isMetaClass() const { |
2061 | ASSERT_THIS_NOT_NULL; | |
1807f628 A |
2062 | ASSERT(isRealized()); |
2063 | #if FAST_CACHE_META | |
2064 | return cache.getBit(FAST_CACHE_META); | |
2065 | #else | |
f192a3e2 | 2066 | return data()->flags & RW_META; |
1807f628 | 2067 | #endif |
7257e56c A |
2068 | } |
2069 | ||
13ba007e A |
2070 | // Like isMetaClass, but also valid on un-realized classes |
2071 | bool isMetaClassMaybeUnrealized() { | |
f192a3e2 A |
2072 | static_assert(offsetof(class_rw_t, flags) == offsetof(class_ro_t, flags), "flags alias"); |
2073 | static_assert(RO_META == RW_META, "flags alias"); | |
34d5b5e8 A |
2074 | if (isStubClass()) |
2075 | return false; | |
f192a3e2 | 2076 | return data()->flags & RW_META; |
13ba007e A |
2077 | } |
2078 | ||
7257e56c A |
2079 | // NOT identical to this->ISA when this is a metaclass |
2080 | Class getMeta() { | |
34d5b5e8 | 2081 | if (isMetaClassMaybeUnrealized()) return (Class)this; |
7257e56c A |
2082 | else return this->ISA(); |
2083 | } | |
2084 | ||
2085 | bool isRootClass() { | |
34d5b5e8 | 2086 | return getSuperclass() == nil; |
7257e56c A |
2087 | } |
2088 | bool isRootMetaclass() { | |
2089 | return ISA() == (Class)this; | |
2090 | } | |
34d5b5e8 A |
2091 | |
2092 | // If this class does not have a name already, we can ask Swift to construct one for us. | |
2093 | const char *installMangledNameForLazilyNamedClass(); | |
2094 | ||
2095 | // Get the class's mangled name, or NULL if the class has a lazy | |
2096 | // name that hasn't been created yet. | |
2097 | const char *nonlazyMangledName() const { | |
2098 | return bits.safe_ro()->getName(); | |
2099 | } | |
7257e56c | 2100 | |
8070259c | 2101 | const char *mangledName() { |
7257e56c | 2102 | // fixme can't assert locks here |
34d5b5e8 | 2103 | ASSERT_THIS_NOT_NULL; |
7257e56c | 2104 | |
34d5b5e8 A |
2105 | const char *result = nonlazyMangledName(); |
2106 | ||
2107 | if (!result) { | |
2108 | // This class lazily instantiates its name. Emplace and | |
2109 | // return it. | |
2110 | result = installMangledNameForLazilyNamedClass(); | |
7257e56c | 2111 | } |
34d5b5e8 A |
2112 | |
2113 | return result; | |
7257e56c | 2114 | } |
8070259c | 2115 | |
f192a3e2 | 2116 | const char *demangledName(bool needsLock); |
8070259c | 2117 | const char *nameForLogging(); |
7257e56c | 2118 | |
c1e772c4 | 2119 | // May be unaligned depending on class's ivars. |
1807f628 A |
2120 | uint32_t unalignedInstanceStart() const { |
2121 | ASSERT(isRealized()); | |
f192a3e2 | 2122 | return data()->ro()->instanceStart; |
c1e772c4 A |
2123 | } |
2124 | ||
2125 | // Class's instance start rounded up to a pointer-size boundary. | |
2126 | // This is used for ARC layout bitmaps. | |
1807f628 | 2127 | uint32_t alignedInstanceStart() const { |
c1e772c4 A |
2128 | return word_align(unalignedInstanceStart()); |
2129 | } | |
2130 | ||
7257e56c | 2131 | // May be unaligned depending on class's ivars. |
1807f628 A |
2132 | uint32_t unalignedInstanceSize() const { |
2133 | ASSERT(isRealized()); | |
f192a3e2 | 2134 | return data()->ro()->instanceSize; |
7257e56c A |
2135 | } |
2136 | ||
2137 | // Class's ivar size rounded up to a pointer-size boundary. | |
1807f628 | 2138 | uint32_t alignedInstanceSize() const { |
8070259c A |
2139 | return word_align(unalignedInstanceSize()); |
2140 | } | |
2141 | ||
34d5b5e8 | 2142 | inline size_t instanceSize(size_t extraBytes) const { |
1807f628 A |
2143 | if (fastpath(cache.hasFastInstanceSize(extraBytes))) { |
2144 | return cache.fastInstanceSize(extraBytes); | |
2145 | } | |
2146 | ||
8070259c A |
2147 | size_t size = alignedInstanceSize() + extraBytes; |
2148 | // CF requires all objects be at least 16 bytes. | |
2149 | if (size < 16) size = 16; | |
2150 | return size; | |
2151 | } | |
2152 | ||
2153 | void setInstanceSize(uint32_t newSize) { | |
1807f628 A |
2154 | ASSERT(isRealized()); |
2155 | ASSERT(data()->flags & RW_REALIZING); | |
f192a3e2 A |
2156 | auto ro = data()->ro(); |
2157 | if (newSize != ro->instanceSize) { | |
1807f628 | 2158 | ASSERT(data()->flags & RW_COPIED_RO); |
f192a3e2 | 2159 | *const_cast<uint32_t *>(&ro->instanceSize) = newSize; |
8070259c | 2160 | } |
1807f628 | 2161 | cache.setFastInstanceSize(newSize); |
8070259c | 2162 | } |
c1e772c4 A |
2163 | |
2164 | void chooseClassArrayIndex(); | |
2165 | ||
2166 | void setClassArrayIndex(unsigned Idx) { | |
2167 | bits.setClassArrayIndex(Idx); | |
2168 | } | |
2169 | ||
2170 | unsigned classArrayIndex() { | |
2171 | return bits.classArrayIndex(); | |
2172 | } | |
8070259c A |
2173 | }; |
2174 | ||
2175 | ||
2176 | struct swift_class_t : objc_class { | |
2177 | uint32_t flags; | |
2178 | uint32_t instanceAddressOffset; | |
2179 | uint32_t instanceSize; | |
2180 | uint16_t instanceAlignMask; | |
2181 | uint16_t reserved; | |
2182 | ||
2183 | uint32_t classSize; | |
2184 | uint32_t classAddressOffset; | |
2185 | void *description; | |
2186 | // ... | |
2187 | ||
2188 | void *baseAddress() { | |
2189 | return (void *)((uint8_t *)this - classAddressOffset); | |
7257e56c A |
2190 | } |
2191 | }; | |
2192 | ||
8070259c | 2193 | |
7257e56c | 2194 | struct category_t { |
b3962a83 | 2195 | const char *name; |
cd5f04f5 | 2196 | classref_t cls; |
bc4fafce A |
2197 | WrappedPtr<method_list_t, PtrauthStrip> instanceMethods; |
2198 | WrappedPtr<method_list_t, PtrauthStrip> classMethods; | |
b3962a83 | 2199 | struct protocol_list_t *protocols; |
8972963c | 2200 | struct property_list_t *instanceProperties; |
c1e772c4 A |
2201 | // Fields below this point are not always present on disk. |
2202 | struct property_list_t *_classProperties; | |
31875a97 A |
2203 | |
2204 | method_list_t *methodsForMeta(bool isMeta) { | |
2205 | if (isMeta) return classMethods; | |
2206 | else return instanceMethods; | |
2207 | } | |
2208 | ||
c1e772c4 | 2209 | property_list_t *propertiesForMeta(bool isMeta, struct header_info *hi); |
1807f628 A |
2210 | |
2211 | protocol_list_t *protocolsForMeta(bool isMeta) { | |
2212 | if (isMeta) return nullptr; | |
2213 | else return protocols; | |
2214 | } | |
7257e56c | 2215 | }; |
b3962a83 A |
2216 | |
2217 | struct objc_super2 { | |
2218 | id receiver; | |
2219 | Class current_class; | |
2220 | }; | |
8972963c | 2221 | |
7257e56c | 2222 | struct message_ref_t { |
8972963c A |
2223 | IMP imp; |
2224 | SEL sel; | |
7257e56c A |
2225 | }; |
2226 | ||
2227 | ||
2228 | extern Method protocol_getMethod(protocol_t *p, SEL sel, bool isRequiredMethod, bool isInstanceMethod, bool recursive); | |
2229 | ||
8972963c | 2230 | #endif |