]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-runtime-new.h
objc4-787.1.tar.gz
[apple/objc4.git] / runtime / objc-runtime-new.h
1 /*
2 * Copyright (c) 2005-2007 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #ifndef _OBJC_RUNTIME_NEW_H
25 #define _OBJC_RUNTIME_NEW_H
26
27 #include "PointerUnion.h"
28
29 // class_data_bits_t is the class_t->data field (class_rw_t pointer plus flags)
30 // The extra bits are optimized for the retain/release and alloc/dealloc paths.
31
32 // Values for class_ro_t->flags
33 // These are emitted by the compiler and are part of the ABI.
34 // Note: See CGObjCNonFragileABIMac::BuildClassRoTInitializer in clang
35 // class is a metaclass
36 #define RO_META (1<<0)
37 // class is a root class
38 #define RO_ROOT (1<<1)
39 // class has .cxx_construct/destruct implementations
40 #define RO_HAS_CXX_STRUCTORS (1<<2)
41 // class has +load implementation
42 // #define RO_HAS_LOAD_METHOD (1<<3)
43 // class has visibility=hidden set
44 #define RO_HIDDEN (1<<4)
45 // class has attribute(objc_exception): OBJC_EHTYPE_$_ThisClass is non-weak
46 #define RO_EXCEPTION (1<<5)
47 // class has ro field for Swift metadata initializer callback
48 #define RO_HAS_SWIFT_INITIALIZER (1<<6)
49 // class compiled with ARC
50 #define RO_IS_ARC (1<<7)
51 // class has .cxx_destruct but no .cxx_construct (with RO_HAS_CXX_STRUCTORS)
52 #define RO_HAS_CXX_DTOR_ONLY (1<<8)
53 // class is not ARC but has ARC-style weak ivar layout
54 #define RO_HAS_WEAK_WITHOUT_ARC (1<<9)
55 // class does not allow associated objects on instances
56 #define RO_FORBIDS_ASSOCIATED_OBJECTS (1<<10)
57
58 // class is in an unloadable bundle - must never be set by compiler
59 #define RO_FROM_BUNDLE (1<<29)
60 // class is unrealized future class - must never be set by compiler
61 #define RO_FUTURE (1<<30)
62 // class is realized - must never be set by compiler
63 #define RO_REALIZED (1<<31)
64
65 // Values for class_rw_t->flags
66 // These are not emitted by the compiler and are never used in class_ro_t.
67 // Their presence should be considered in future ABI versions.
68 // class_t->data is class_rw_t, not class_ro_t
69 #define RW_REALIZED (1<<31)
70 // class is unresolved future class
71 #define RW_FUTURE (1<<30)
72 // class is initialized
73 #define RW_INITIALIZED (1<<29)
74 // class is initializing
75 #define RW_INITIALIZING (1<<28)
76 // class_rw_t->ro is heap copy of class_ro_t
77 #define RW_COPIED_RO (1<<27)
78 // class allocated but not yet registered
79 #define RW_CONSTRUCTING (1<<26)
80 // class allocated and registered
81 #define RW_CONSTRUCTED (1<<25)
82 // available for use; was RW_FINALIZE_ON_MAIN_THREAD
83 // #define RW_24 (1<<24)
84 // class +load has been called
85 #define RW_LOADED (1<<23)
86 #if !SUPPORT_NONPOINTER_ISA
87 // class instances may have associative references
88 #define RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS (1<<22)
89 #endif
90 // class has instance-specific GC layout
91 #define RW_HAS_INSTANCE_SPECIFIC_LAYOUT (1 << 21)
92 // class does not allow associated objects on its instances
93 #define RW_FORBIDS_ASSOCIATED_OBJECTS (1<<20)
94 // class has started realizing but not yet completed it
95 #define RW_REALIZING (1<<19)
96
97 // class is a metaclass (copied from ro)
98 #define RW_META RO_META // (1<<0)
99
100
101 // NOTE: MORE RW_ FLAGS DEFINED BELOW
102
103
104 // Values for class_rw_t->flags (RW_*), cache_t->_flags (FAST_CACHE_*),
105 // or class_t->bits (FAST_*).
106 //
107 // FAST_* and FAST_CACHE_* are stored on the class, reducing pointer indirection.
108
109 #if __LP64__
110
111 // class is a Swift class from the pre-stable Swift ABI
112 #define FAST_IS_SWIFT_LEGACY (1UL<<0)
113 // class is a Swift class from the stable Swift ABI
114 #define FAST_IS_SWIFT_STABLE (1UL<<1)
115 // class or superclass has default retain/release/autorelease/retainCount/
116 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
117 #define FAST_HAS_DEFAULT_RR (1UL<<2)
118 // data pointer
119 #define FAST_DATA_MASK 0x00007ffffffffff8UL
120
121 #if __arm64__
122 // class or superclass has .cxx_construct/.cxx_destruct implementation
123 // FAST_CACHE_HAS_CXX_DTOR is the first bit so that setting it in
124 // isa_t::has_cxx_dtor is a single bfi
125 #define FAST_CACHE_HAS_CXX_DTOR (1<<0)
126 #define FAST_CACHE_HAS_CXX_CTOR (1<<1)
127 // Denormalized RO_META to avoid an indirection
128 #define FAST_CACHE_META (1<<2)
129 #else
130 // Denormalized RO_META to avoid an indirection
131 #define FAST_CACHE_META (1<<0)
132 // class or superclass has .cxx_construct/.cxx_destruct implementation
133 // FAST_CACHE_HAS_CXX_DTOR is chosen to alias with isa_t::has_cxx_dtor
134 #define FAST_CACHE_HAS_CXX_CTOR (1<<1)
135 #define FAST_CACHE_HAS_CXX_DTOR (1<<2)
136 #endif
137
138 // Fast Alloc fields:
139 // This stores the word-aligned size of instances + "ALLOC_DELTA16",
140 // or 0 if the instance size doesn't fit.
141 //
142 // These bits occupy the same bits than in the instance size, so that
143 // the size can be extracted with a simple mask operation.
144 //
145 // FAST_CACHE_ALLOC_MASK16 allows to extract the instance size rounded
146 // rounded up to the next 16 byte boundary, which is a fastpath for
147 // _objc_rootAllocWithZone()
148 #define FAST_CACHE_ALLOC_MASK 0x1ff8
149 #define FAST_CACHE_ALLOC_MASK16 0x1ff0
150 #define FAST_CACHE_ALLOC_DELTA16 0x0008
151
152 // class's instances requires raw isa
153 #define FAST_CACHE_REQUIRES_RAW_ISA (1<<13)
154 // class or superclass has default alloc/allocWithZone: implementation
155 // Note this is is stored in the metaclass.
156 #define FAST_CACHE_HAS_DEFAULT_AWZ (1<<14)
157 // class or superclass has default new/self/class/respondsToSelector/isKindOfClass
158 #define FAST_CACHE_HAS_DEFAULT_CORE (1<<15)
159
160 #else
161
162 // class or superclass has .cxx_construct implementation
163 #define RW_HAS_CXX_CTOR (1<<18)
164 // class or superclass has .cxx_destruct implementation
165 #define RW_HAS_CXX_DTOR (1<<17)
166 // class or superclass has default alloc/allocWithZone: implementation
167 // Note this is is stored in the metaclass.
168 #define RW_HAS_DEFAULT_AWZ (1<<16)
169 // class's instances requires raw isa
170 #if SUPPORT_NONPOINTER_ISA
171 #define RW_REQUIRES_RAW_ISA (1<<15)
172 #endif
173 // class or superclass has default retain/release/autorelease/retainCount/
174 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
175 #define RW_HAS_DEFAULT_RR (1<<14)
176 // class or superclass has default new/self/class/respondsToSelector/isKindOfClass
177 #define RW_HAS_DEFAULT_CORE (1<<13)
178
179 // class is a Swift class from the pre-stable Swift ABI
180 #define FAST_IS_SWIFT_LEGACY (1UL<<0)
181 // class is a Swift class from the stable Swift ABI
182 #define FAST_IS_SWIFT_STABLE (1UL<<1)
183 // data pointer
184 #define FAST_DATA_MASK 0xfffffffcUL
185
186 #endif // __LP64__
187
188 // The Swift ABI requires that these bits be defined like this on all platforms.
189 static_assert(FAST_IS_SWIFT_LEGACY == 1, "resistance is futile");
190 static_assert(FAST_IS_SWIFT_STABLE == 2, "resistance is futile");
191
192
193 #if __LP64__
194 typedef uint32_t mask_t; // x86_64 & arm64 asm are less efficient with 16-bits
195 #else
196 typedef uint16_t mask_t;
197 #endif
198 typedef uintptr_t SEL;
199
200 struct swift_class_t;
201
202 enum Atomicity { Atomic = true, NotAtomic = false };
203 enum IMPEncoding { Encoded = true, Raw = false };
204
205 struct bucket_t {
206 private:
207 // IMP-first is better for arm64e ptrauth and no worse for arm64.
208 // SEL-first is better for armv7* and i386 and x86_64.
209 #if __arm64__
210 explicit_atomic<uintptr_t> _imp;
211 explicit_atomic<SEL> _sel;
212 #else
213 explicit_atomic<SEL> _sel;
214 explicit_atomic<uintptr_t> _imp;
215 #endif
216
217 // Compute the ptrauth signing modifier from &_imp, newSel, and cls.
218 uintptr_t modifierForSEL(SEL newSel, Class cls) const {
219 return (uintptr_t)&_imp ^ (uintptr_t)newSel ^ (uintptr_t)cls;
220 }
221
222 // Sign newImp, with &_imp, newSel, and cls as modifiers.
223 uintptr_t encodeImp(IMP newImp, SEL newSel, Class cls) const {
224 if (!newImp) return 0;
225 #if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_PTRAUTH
226 return (uintptr_t)
227 ptrauth_auth_and_resign(newImp,
228 ptrauth_key_function_pointer, 0,
229 ptrauth_key_process_dependent_code,
230 modifierForSEL(newSel, cls));
231 #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR
232 return (uintptr_t)newImp ^ (uintptr_t)cls;
233 #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_NONE
234 return (uintptr_t)newImp;
235 #else
236 #error Unknown method cache IMP encoding.
237 #endif
238 }
239
240 public:
241 inline SEL sel() const { return _sel.load(memory_order::memory_order_relaxed); }
242
243 inline IMP rawImp(objc_class *cls) const {
244 uintptr_t imp = _imp.load(memory_order::memory_order_relaxed);
245 if (!imp) return nil;
246 #if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_PTRAUTH
247 #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR
248 imp ^= (uintptr_t)cls;
249 #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_NONE
250 #else
251 #error Unknown method cache IMP encoding.
252 #endif
253 return (IMP)imp;
254 }
255
256 inline IMP imp(Class cls) const {
257 uintptr_t imp = _imp.load(memory_order::memory_order_relaxed);
258 if (!imp) return nil;
259 #if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_PTRAUTH
260 SEL sel = _sel.load(memory_order::memory_order_relaxed);
261 return (IMP)
262 ptrauth_auth_and_resign((const void *)imp,
263 ptrauth_key_process_dependent_code,
264 modifierForSEL(sel, cls),
265 ptrauth_key_function_pointer, 0);
266 #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR
267 return (IMP)(imp ^ (uintptr_t)cls);
268 #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_NONE
269 return (IMP)imp;
270 #else
271 #error Unknown method cache IMP encoding.
272 #endif
273 }
274
275 template <Atomicity, IMPEncoding>
276 void set(SEL newSel, IMP newImp, Class cls);
277 };
278
279
280 struct cache_t {
281 #if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_OUTLINED
282 explicit_atomic<struct bucket_t *> _buckets;
283 explicit_atomic<mask_t> _mask;
284 #elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16
285 explicit_atomic<uintptr_t> _maskAndBuckets;
286 mask_t _mask_unused;
287
288 // How much the mask is shifted by.
289 static constexpr uintptr_t maskShift = 48;
290
291 // Additional bits after the mask which must be zero. msgSend
292 // takes advantage of these additional bits to construct the value
293 // `mask << 4` from `_maskAndBuckets` in a single instruction.
294 static constexpr uintptr_t maskZeroBits = 4;
295
296 // The largest mask value we can store.
297 static constexpr uintptr_t maxMask = ((uintptr_t)1 << (64 - maskShift)) - 1;
298
299 // The mask applied to `_maskAndBuckets` to retrieve the buckets pointer.
300 static constexpr uintptr_t bucketsMask = ((uintptr_t)1 << (maskShift - maskZeroBits)) - 1;
301
302 // Ensure we have enough bits for the buckets pointer.
303 static_assert(bucketsMask >= MACH_VM_MAX_ADDRESS, "Bucket field doesn't have enough bits for arbitrary pointers.");
304 #elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4
305 // _maskAndBuckets stores the mask shift in the low 4 bits, and
306 // the buckets pointer in the remainder of the value. The mask
307 // shift is the value where (0xffff >> shift) produces the correct
308 // mask. This is equal to 16 - log2(cache_size).
309 explicit_atomic<uintptr_t> _maskAndBuckets;
310 mask_t _mask_unused;
311
312 static constexpr uintptr_t maskBits = 4;
313 static constexpr uintptr_t maskMask = (1 << maskBits) - 1;
314 static constexpr uintptr_t bucketsMask = ~maskMask;
315 #else
316 #error Unknown cache mask storage type.
317 #endif
318
319 #if __LP64__
320 uint16_t _flags;
321 #endif
322 uint16_t _occupied;
323
324 public:
325 static bucket_t *emptyBuckets();
326
327 struct bucket_t *buckets();
328 mask_t mask();
329 mask_t occupied();
330 void incrementOccupied();
331 void setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask);
332 void initializeToEmpty();
333
334 unsigned capacity();
335 bool isConstantEmptyCache();
336 bool canBeFreed();
337
338 #if __LP64__
339 bool getBit(uint16_t flags) const {
340 return _flags & flags;
341 }
342 void setBit(uint16_t set) {
343 __c11_atomic_fetch_or((_Atomic(uint16_t) *)&_flags, set, __ATOMIC_RELAXED);
344 }
345 void clearBit(uint16_t clear) {
346 __c11_atomic_fetch_and((_Atomic(uint16_t) *)&_flags, ~clear, __ATOMIC_RELAXED);
347 }
348 #endif
349
350 #if FAST_CACHE_ALLOC_MASK
351 bool hasFastInstanceSize(size_t extra) const
352 {
353 if (__builtin_constant_p(extra) && extra == 0) {
354 return _flags & FAST_CACHE_ALLOC_MASK16;
355 }
356 return _flags & FAST_CACHE_ALLOC_MASK;
357 }
358
359 size_t fastInstanceSize(size_t extra) const
360 {
361 ASSERT(hasFastInstanceSize(extra));
362
363 if (__builtin_constant_p(extra) && extra == 0) {
364 return _flags & FAST_CACHE_ALLOC_MASK16;
365 } else {
366 size_t size = _flags & FAST_CACHE_ALLOC_MASK;
367 // remove the FAST_CACHE_ALLOC_DELTA16 that was added
368 // by setFastInstanceSize
369 return align16(size + extra - FAST_CACHE_ALLOC_DELTA16);
370 }
371 }
372
373 void setFastInstanceSize(size_t newSize)
374 {
375 // Set during realization or construction only. No locking needed.
376 uint16_t newBits = _flags & ~FAST_CACHE_ALLOC_MASK;
377 uint16_t sizeBits;
378
379 // Adding FAST_CACHE_ALLOC_DELTA16 allows for FAST_CACHE_ALLOC_MASK16
380 // to yield the proper 16byte aligned allocation size with a single mask
381 sizeBits = word_align(newSize) + FAST_CACHE_ALLOC_DELTA16;
382 sizeBits &= FAST_CACHE_ALLOC_MASK;
383 if (newSize <= sizeBits) {
384 newBits |= sizeBits;
385 }
386 _flags = newBits;
387 }
388 #else
389 bool hasFastInstanceSize(size_t extra) const {
390 return false;
391 }
392 size_t fastInstanceSize(size_t extra) const {
393 abort();
394 }
395 void setFastInstanceSize(size_t extra) {
396 // nothing
397 }
398 #endif
399
400 static size_t bytesForCapacity(uint32_t cap);
401 static struct bucket_t * endMarker(struct bucket_t *b, uint32_t cap);
402
403 void reallocate(mask_t oldCapacity, mask_t newCapacity, bool freeOld);
404 void insert(Class cls, SEL sel, IMP imp, id receiver);
405
406 static void bad_cache(id receiver, SEL sel, Class isa) __attribute__((noreturn, cold));
407 };
408
409
410 // classref_t is unremapped class_t*
411 typedef struct classref * classref_t;
412
413
414 /***********************************************************************
415 * RelativePointer<T>
416 * A pointer stored as an offset from the address of that offset.
417 *
418 * The target address is computed by taking the address of this struct
419 * and adding the offset stored within it. This is a 32-bit signed
420 * offset giving ±2GB of range.
421 **********************************************************************/
422 template <typename T>
423 struct RelativePointer: nocopy_t {
424 int32_t offset;
425
426 T get() const {
427 uintptr_t base = (uintptr_t)&offset;
428 uintptr_t signExtendedOffset = (uintptr_t)(intptr_t)offset;
429 uintptr_t pointer = base + signExtendedOffset;
430 return (T)pointer;
431 }
432 };
433
434
435 #ifdef __PTRAUTH_INTRINSICS__
436 # define StubClassInitializerPtrauth __ptrauth(ptrauth_key_function_pointer, 1, 0xc671)
437 #else
438 # define StubClassInitializerPtrauth
439 #endif
440 struct stub_class_t {
441 uintptr_t isa;
442 _objc_swiftMetadataInitializer StubClassInitializerPtrauth initializer;
443 };
444
445 // A pointer modifier that does nothing to the pointer.
446 struct PointerModifierNop {
447 template <typename ListType, typename T>
448 static T *modify(const ListType &list, T *ptr) { return ptr; }
449 };
450
451 /***********************************************************************
452 * entsize_list_tt<Element, List, FlagMask, PointerModifier>
453 * Generic implementation of an array of non-fragile structs.
454 *
455 * Element is the struct type (e.g. method_t)
456 * List is the specialization of entsize_list_tt (e.g. method_list_t)
457 * FlagMask is used to stash extra bits in the entsize field
458 * (e.g. method list fixup markers)
459 * PointerModifier is applied to the element pointers retrieved from
460 * the array.
461 **********************************************************************/
462 template <typename Element, typename List, uint32_t FlagMask, typename PointerModifier = PointerModifierNop>
463 struct entsize_list_tt {
464 uint32_t entsizeAndFlags;
465 uint32_t count;
466
467 uint32_t entsize() const {
468 return entsizeAndFlags & ~FlagMask;
469 }
470 uint32_t flags() const {
471 return entsizeAndFlags & FlagMask;
472 }
473
474 Element& getOrEnd(uint32_t i) const {
475 ASSERT(i <= count);
476 return *PointerModifier::modify(*this, (Element *)((uint8_t *)this + sizeof(*this) + i*entsize()));
477 }
478 Element& get(uint32_t i) const {
479 ASSERT(i < count);
480 return getOrEnd(i);
481 }
482
483 size_t byteSize() const {
484 return byteSize(entsize(), count);
485 }
486
487 static size_t byteSize(uint32_t entsize, uint32_t count) {
488 return sizeof(entsize_list_tt) + count*entsize;
489 }
490
491 struct iterator;
492 const iterator begin() const {
493 return iterator(*static_cast<const List*>(this), 0);
494 }
495 iterator begin() {
496 return iterator(*static_cast<const List*>(this), 0);
497 }
498 const iterator end() const {
499 return iterator(*static_cast<const List*>(this), count);
500 }
501 iterator end() {
502 return iterator(*static_cast<const List*>(this), count);
503 }
504
505 struct iterator {
506 uint32_t entsize;
507 uint32_t index; // keeping track of this saves a divide in operator-
508 Element* element;
509
510 typedef std::random_access_iterator_tag iterator_category;
511 typedef Element value_type;
512 typedef ptrdiff_t difference_type;
513 typedef Element* pointer;
514 typedef Element& reference;
515
516 iterator() { }
517
518 iterator(const List& list, uint32_t start = 0)
519 : entsize(list.entsize())
520 , index(start)
521 , element(&list.getOrEnd(start))
522 { }
523
524 const iterator& operator += (ptrdiff_t delta) {
525 element = (Element*)((uint8_t *)element + delta*entsize);
526 index += (int32_t)delta;
527 return *this;
528 }
529 const iterator& operator -= (ptrdiff_t delta) {
530 element = (Element*)((uint8_t *)element - delta*entsize);
531 index -= (int32_t)delta;
532 return *this;
533 }
534 const iterator operator + (ptrdiff_t delta) const {
535 return iterator(*this) += delta;
536 }
537 const iterator operator - (ptrdiff_t delta) const {
538 return iterator(*this) -= delta;
539 }
540
541 iterator& operator ++ () { *this += 1; return *this; }
542 iterator& operator -- () { *this -= 1; return *this; }
543 iterator operator ++ (int) {
544 iterator result(*this); *this += 1; return result;
545 }
546 iterator operator -- (int) {
547 iterator result(*this); *this -= 1; return result;
548 }
549
550 ptrdiff_t operator - (const iterator& rhs) const {
551 return (ptrdiff_t)this->index - (ptrdiff_t)rhs.index;
552 }
553
554 Element& operator * () const { return *element; }
555 Element* operator -> () const { return element; }
556
557 operator Element& () const { return *element; }
558
559 bool operator == (const iterator& rhs) const {
560 return this->element == rhs.element;
561 }
562 bool operator != (const iterator& rhs) const {
563 return this->element != rhs.element;
564 }
565
566 bool operator < (const iterator& rhs) const {
567 return this->element < rhs.element;
568 }
569 bool operator > (const iterator& rhs) const {
570 return this->element > rhs.element;
571 }
572 };
573 };
574
575
576 struct method_t {
577 static const uint32_t smallMethodListFlag = 0x80000000;
578
579 method_t(const method_t &other) = delete;
580
581 // The representation of a "big" method. This is the traditional
582 // representation of three pointers storing the selector, types
583 // and implementation.
584 struct big {
585 SEL name;
586 const char *types;
587 MethodListIMP imp;
588 };
589
590 private:
591 bool isSmall() const {
592 return ((uintptr_t)this & 1) == 1;
593 }
594
595 // The representation of a "small" method. This stores three
596 // relative offsets to the name, types, and implementation.
597 struct small {
598 RelativePointer<SEL *> name;
599 RelativePointer<const char *> types;
600 RelativePointer<IMP> imp;
601 };
602
603 small &small() const {
604 ASSERT(isSmall());
605 return *(struct small *)((uintptr_t)this & ~(uintptr_t)1);
606 }
607
608 IMP remappedImp(bool needsLock) const;
609 void remapImp(IMP imp);
610 objc_method_description *getSmallDescription() const;
611
612 public:
613 static const auto bigSize = sizeof(struct big);
614 static const auto smallSize = sizeof(struct small);
615
616 // The pointer modifier used with method lists. When the method
617 // list contains small methods, set the bottom bit of the pointer.
618 // We use that bottom bit elsewhere to distinguish between big
619 // and small methods.
620 struct pointer_modifier {
621 template <typename ListType>
622 static method_t *modify(const ListType &list, method_t *ptr) {
623 if (list.flags() & smallMethodListFlag)
624 return (method_t *)((uintptr_t)ptr | 1);
625 return ptr;
626 }
627 };
628
629 big &big() const {
630 ASSERT(!isSmall());
631 return *(struct big *)this;
632 }
633
634 SEL &name() const {
635 return isSmall() ? *small().name.get() : big().name;
636 }
637 const char *types() const {
638 return isSmall() ? small().types.get() : big().types;
639 }
640 IMP imp(bool needsLock) const {
641 if (isSmall()) {
642 IMP imp = remappedImp(needsLock);
643 if (!imp)
644 imp = ptrauth_sign_unauthenticated(small().imp.get(),
645 ptrauth_key_function_pointer, 0);
646 return imp;
647 }
648 return big().imp;
649 }
650
651 void setImp(IMP imp) {
652 if (isSmall()) {
653 remapImp(imp);
654 } else {
655 big().imp = imp;
656 }
657
658 }
659
660 objc_method_description *getDescription() const {
661 return isSmall() ? getSmallDescription() : (struct objc_method_description *)this;
662 }
663
664 struct SortBySELAddress :
665 public std::binary_function<const struct method_t::big&,
666 const struct method_t::big&, bool>
667 {
668 bool operator() (const struct method_t::big& lhs,
669 const struct method_t::big& rhs)
670 { return lhs.name < rhs.name; }
671 };
672
673 method_t &operator=(const method_t &other) {
674 ASSERT(!isSmall());
675 big().name = other.name();
676 big().types = other.types();
677 big().imp = other.imp(false);
678 return *this;
679 }
680 };
681
682 struct ivar_t {
683 #if __x86_64__
684 // *offset was originally 64-bit on some x86_64 platforms.
685 // We read and write only 32 bits of it.
686 // Some metadata provides all 64 bits. This is harmless for unsigned
687 // little-endian values.
688 // Some code uses all 64 bits. class_addIvar() over-allocates the
689 // offset for their benefit.
690 #endif
691 int32_t *offset;
692 const char *name;
693 const char *type;
694 // alignment is sometimes -1; use alignment() instead
695 uint32_t alignment_raw;
696 uint32_t size;
697
698 uint32_t alignment() const {
699 if (alignment_raw == ~(uint32_t)0) return 1U << WORD_SHIFT;
700 return 1 << alignment_raw;
701 }
702 };
703
704 struct property_t {
705 const char *name;
706 const char *attributes;
707 };
708
709 // Two bits of entsize are used for fixup markers.
710 // Reserve the top half of entsize for more flags. We never
711 // need entry sizes anywhere close to 64kB.
712 //
713 // Currently there is one flag defined: the small method list flag,
714 // method_t::smallMethodListFlag. Other flags are currently ignored.
715 // (NOTE: these bits are only ignored on runtimes that support small
716 // method lists. Older runtimes will treat them as part of the entry
717 // size!)
718 struct method_list_t : entsize_list_tt<method_t, method_list_t, 0xffff0003, method_t::pointer_modifier> {
719 bool isUniqued() const;
720 bool isFixedUp() const;
721 void setFixedUp();
722
723 uint32_t indexOfMethod(const method_t *meth) const {
724 uint32_t i =
725 (uint32_t)(((uintptr_t)meth - (uintptr_t)this) / entsize());
726 ASSERT(i < count);
727 return i;
728 }
729
730 bool isSmallList() const {
731 return flags() & method_t::smallMethodListFlag;
732 }
733
734 bool isExpectedSize() const {
735 if (isSmallList())
736 return entsize() == method_t::smallSize;
737 else
738 return entsize() == method_t::bigSize;
739 }
740
741 method_list_t *duplicate() const {
742 method_list_t *dup;
743 if (isSmallList()) {
744 dup = (method_list_t *)calloc(byteSize(method_t::bigSize, count), 1);
745 dup->entsizeAndFlags = method_t::bigSize;
746 } else {
747 dup = (method_list_t *)calloc(this->byteSize(), 1);
748 dup->entsizeAndFlags = this->entsizeAndFlags;
749 }
750 dup->count = this->count;
751 std::copy(begin(), end(), dup->begin());
752 return dup;
753 }
754 };
755
756 struct ivar_list_t : entsize_list_tt<ivar_t, ivar_list_t, 0> {
757 bool containsIvar(Ivar ivar) const {
758 return (ivar >= (Ivar)&*begin() && ivar < (Ivar)&*end());
759 }
760 };
761
762 struct property_list_t : entsize_list_tt<property_t, property_list_t, 0> {
763 };
764
765
766 typedef uintptr_t protocol_ref_t; // protocol_t *, but unremapped
767
768 // Values for protocol_t->flags
769 #define PROTOCOL_FIXED_UP_2 (1<<31) // must never be set by compiler
770 #define PROTOCOL_FIXED_UP_1 (1<<30) // must never be set by compiler
771 #define PROTOCOL_IS_CANONICAL (1<<29) // must never be set by compiler
772 // Bits 0..15 are reserved for Swift's use.
773
774 #define PROTOCOL_FIXED_UP_MASK (PROTOCOL_FIXED_UP_1 | PROTOCOL_FIXED_UP_2)
775
776 struct protocol_t : objc_object {
777 const char *mangledName;
778 struct protocol_list_t *protocols;
779 method_list_t *instanceMethods;
780 method_list_t *classMethods;
781 method_list_t *optionalInstanceMethods;
782 method_list_t *optionalClassMethods;
783 property_list_t *instanceProperties;
784 uint32_t size; // sizeof(protocol_t)
785 uint32_t flags;
786 // Fields below this point are not always present on disk.
787 const char **_extendedMethodTypes;
788 const char *_demangledName;
789 property_list_t *_classProperties;
790
791 const char *demangledName();
792
793 const char *nameForLogging() {
794 return demangledName();
795 }
796
797 bool isFixedUp() const;
798 void setFixedUp();
799
800 bool isCanonical() const;
801 void clearIsCanonical();
802
803 # define HAS_FIELD(f) (size >= offsetof(protocol_t, f) + sizeof(f))
804
805 bool hasExtendedMethodTypesField() const {
806 return HAS_FIELD(_extendedMethodTypes);
807 }
808 bool hasDemangledNameField() const {
809 return HAS_FIELD(_demangledName);
810 }
811 bool hasClassPropertiesField() const {
812 return HAS_FIELD(_classProperties);
813 }
814
815 # undef HAS_FIELD
816
817 const char **extendedMethodTypes() const {
818 return hasExtendedMethodTypesField() ? _extendedMethodTypes : nil;
819 }
820
821 property_list_t *classProperties() const {
822 return hasClassPropertiesField() ? _classProperties : nil;
823 }
824 };
825
826 struct protocol_list_t {
827 // count is pointer-sized by accident.
828 uintptr_t count;
829 protocol_ref_t list[0]; // variable-size
830
831 size_t byteSize() const {
832 return sizeof(*this) + count*sizeof(list[0]);
833 }
834
835 protocol_list_t *duplicate() const {
836 return (protocol_list_t *)memdup(this, this->byteSize());
837 }
838
839 typedef protocol_ref_t* iterator;
840 typedef const protocol_ref_t* const_iterator;
841
842 const_iterator begin() const {
843 return list;
844 }
845 iterator begin() {
846 return list;
847 }
848 const_iterator end() const {
849 return list + count;
850 }
851 iterator end() {
852 return list + count;
853 }
854 };
855
856 struct class_ro_t {
857 uint32_t flags;
858 uint32_t instanceStart;
859 uint32_t instanceSize;
860 #ifdef __LP64__
861 uint32_t reserved;
862 #endif
863
864 const uint8_t * ivarLayout;
865
866 const char * name;
867 WrappedPtr<method_list_t, PtrauthStrip> baseMethodList;
868 protocol_list_t * baseProtocols;
869 const ivar_list_t * ivars;
870
871 const uint8_t * weakIvarLayout;
872 property_list_t *baseProperties;
873
874 // This field exists only when RO_HAS_SWIFT_INITIALIZER is set.
875 _objc_swiftMetadataInitializer __ptrauth_objc_method_list_imp _swiftMetadataInitializer_NEVER_USE[0];
876
877 _objc_swiftMetadataInitializer swiftMetadataInitializer() const {
878 if (flags & RO_HAS_SWIFT_INITIALIZER) {
879 return _swiftMetadataInitializer_NEVER_USE[0];
880 } else {
881 return nil;
882 }
883 }
884
885 method_list_t *baseMethods() const {
886 return baseMethodList;
887 }
888
889 class_ro_t *duplicate() const {
890 if (flags & RO_HAS_SWIFT_INITIALIZER) {
891 size_t size = sizeof(*this) + sizeof(_swiftMetadataInitializer_NEVER_USE[0]);
892 class_ro_t *ro = (class_ro_t *)memdup(this, size);
893 ro->_swiftMetadataInitializer_NEVER_USE[0] = this->_swiftMetadataInitializer_NEVER_USE[0];
894 return ro;
895 } else {
896 size_t size = sizeof(*this);
897 class_ro_t *ro = (class_ro_t *)memdup(this, size);
898 return ro;
899 }
900 }
901 };
902
903
904 /***********************************************************************
905 * list_array_tt<Element, List, Ptr>
906 * Generic implementation for metadata that can be augmented by categories.
907 *
908 * Element is the underlying metadata type (e.g. method_t)
909 * List is the metadata's list type (e.g. method_list_t)
910 * List is a template applied to Element to make Element*. Useful for
911 * applying qualifiers to the pointer type.
912 *
913 * A list_array_tt has one of three values:
914 * - empty
915 * - a pointer to a single list
916 * - an array of pointers to lists
917 *
918 * countLists/beginLists/endLists iterate the metadata lists
919 * count/begin/end iterate the underlying metadata elements
920 **********************************************************************/
921 template <typename Element, typename List, template<typename> class Ptr>
922 class list_array_tt {
923 struct array_t {
924 uint32_t count;
925 Ptr<List> lists[0];
926
927 static size_t byteSize(uint32_t count) {
928 return sizeof(array_t) + count*sizeof(lists[0]);
929 }
930 size_t byteSize() {
931 return byteSize(count);
932 }
933 };
934
935 protected:
936 class iterator {
937 const Ptr<List> *lists;
938 const Ptr<List> *listsEnd;
939 typename List::iterator m, mEnd;
940
941 public:
942 iterator(const Ptr<List> *begin, const Ptr<List> *end)
943 : lists(begin), listsEnd(end)
944 {
945 if (begin != end) {
946 m = (*begin)->begin();
947 mEnd = (*begin)->end();
948 }
949 }
950
951 const Element& operator * () const {
952 return *m;
953 }
954 Element& operator * () {
955 return *m;
956 }
957
958 bool operator != (const iterator& rhs) const {
959 if (lists != rhs.lists) return true;
960 if (lists == listsEnd) return false; // m is undefined
961 if (m != rhs.m) return true;
962 return false;
963 }
964
965 const iterator& operator ++ () {
966 ASSERT(m != mEnd);
967 m++;
968 if (m == mEnd) {
969 ASSERT(lists != listsEnd);
970 lists++;
971 if (lists != listsEnd) {
972 m = (*lists)->begin();
973 mEnd = (*lists)->end();
974 }
975 }
976 return *this;
977 }
978 };
979
980 private:
981 union {
982 Ptr<List> list;
983 uintptr_t arrayAndFlag;
984 };
985
986 bool hasArray() const {
987 return arrayAndFlag & 1;
988 }
989
990 array_t *array() const {
991 return (array_t *)(arrayAndFlag & ~1);
992 }
993
994 void setArray(array_t *array) {
995 arrayAndFlag = (uintptr_t)array | 1;
996 }
997
998 void validate() {
999 for (auto cursor = beginLists(), end = endLists(); cursor != end; cursor++)
1000 cursor->validate();
1001 }
1002
1003 public:
1004 list_array_tt() : list(nullptr) { }
1005 list_array_tt(List *l) : list(l) { }
1006 list_array_tt(const list_array_tt &other) {
1007 *this = other;
1008 }
1009
1010 list_array_tt &operator =(const list_array_tt &other) {
1011 if (other.hasArray()) {
1012 arrayAndFlag = other.arrayAndFlag;
1013 } else {
1014 list = other.list;
1015 }
1016 return *this;
1017 }
1018
1019 uint32_t count() const {
1020 uint32_t result = 0;
1021 for (auto lists = beginLists(), end = endLists();
1022 lists != end;
1023 ++lists)
1024 {
1025 result += (*lists)->count;
1026 }
1027 return result;
1028 }
1029
1030 iterator begin() const {
1031 return iterator(beginLists(), endLists());
1032 }
1033
1034 iterator end() const {
1035 auto e = endLists();
1036 return iterator(e, e);
1037 }
1038
1039
1040 uint32_t countLists() {
1041 if (hasArray()) {
1042 return array()->count;
1043 } else if (list) {
1044 return 1;
1045 } else {
1046 return 0;
1047 }
1048 }
1049
1050 const Ptr<List>* beginLists() const {
1051 if (hasArray()) {
1052 return array()->lists;
1053 } else {
1054 return &list;
1055 }
1056 }
1057
1058 const Ptr<List>* endLists() const {
1059 if (hasArray()) {
1060 return array()->lists + array()->count;
1061 } else if (list) {
1062 return &list + 1;
1063 } else {
1064 return &list;
1065 }
1066 }
1067
1068 void attachLists(List* const * addedLists, uint32_t addedCount) {
1069 if (addedCount == 0) return;
1070
1071 if (hasArray()) {
1072 // many lists -> many lists
1073 uint32_t oldCount = array()->count;
1074 uint32_t newCount = oldCount + addedCount;
1075 array_t *newArray = (array_t *)malloc(array_t::byteSize(newCount));
1076 newArray->count = newCount;
1077 array()->count = newCount;
1078
1079 for (int i = oldCount - 1; i >= 0; i--)
1080 newArray->lists[i + addedCount] = array()->lists[i];
1081 for (unsigned i = 0; i < addedCount; i++)
1082 newArray->lists[i] = addedLists[i];
1083 free(array());
1084 setArray(newArray);
1085 validate();
1086 }
1087 else if (!list && addedCount == 1) {
1088 // 0 lists -> 1 list
1089 list = addedLists[0];
1090 validate();
1091 }
1092 else {
1093 // 1 list -> many lists
1094 Ptr<List> oldList = list;
1095 uint32_t oldCount = oldList ? 1 : 0;
1096 uint32_t newCount = oldCount + addedCount;
1097 setArray((array_t *)malloc(array_t::byteSize(newCount)));
1098 array()->count = newCount;
1099 if (oldList) array()->lists[addedCount] = oldList;
1100 for (unsigned i = 0; i < addedCount; i++)
1101 array()->lists[i] = addedLists[i];
1102 validate();
1103 }
1104 }
1105
1106 void tryFree() {
1107 if (hasArray()) {
1108 for (uint32_t i = 0; i < array()->count; i++) {
1109 try_free(array()->lists[i]);
1110 }
1111 try_free(array());
1112 }
1113 else if (list) {
1114 try_free(list);
1115 }
1116 }
1117
1118 template<typename Other>
1119 void duplicateInto(Other &other) {
1120 if (hasArray()) {
1121 array_t *a = array();
1122 other.setArray((array_t *)memdup(a, a->byteSize()));
1123 for (uint32_t i = 0; i < a->count; i++) {
1124 other.array()->lists[i] = a->lists[i]->duplicate();
1125 }
1126 } else if (list) {
1127 other.list = list->duplicate();
1128 } else {
1129 other.list = nil;
1130 }
1131 }
1132 };
1133
1134
1135 DECLARE_AUTHED_PTR_TEMPLATE(method_list_t)
1136
1137 class method_array_t :
1138 public list_array_tt<method_t, method_list_t, method_list_t_authed_ptr>
1139 {
1140 typedef list_array_tt<method_t, method_list_t, method_list_t_authed_ptr> Super;
1141
1142 public:
1143 method_array_t() : Super() { }
1144 method_array_t(method_list_t *l) : Super(l) { }
1145
1146 const method_list_t_authed_ptr<method_list_t> *beginCategoryMethodLists() const {
1147 return beginLists();
1148 }
1149
1150 const method_list_t_authed_ptr<method_list_t> *endCategoryMethodLists(Class cls) const;
1151 };
1152
1153
1154 class property_array_t :
1155 public list_array_tt<property_t, property_list_t, RawPtr>
1156 {
1157 typedef list_array_tt<property_t, property_list_t, RawPtr> Super;
1158
1159 public:
1160 property_array_t() : Super() { }
1161 property_array_t(property_list_t *l) : Super(l) { }
1162 };
1163
1164
1165 class protocol_array_t :
1166 public list_array_tt<protocol_ref_t, protocol_list_t, RawPtr>
1167 {
1168 typedef list_array_tt<protocol_ref_t, protocol_list_t, RawPtr> Super;
1169
1170 public:
1171 protocol_array_t() : Super() { }
1172 protocol_array_t(protocol_list_t *l) : Super(l) { }
1173 };
1174
1175 struct class_rw_ext_t {
1176 DECLARE_AUTHED_PTR_TEMPLATE(class_ro_t)
1177 class_ro_t_authed_ptr<const class_ro_t> ro;
1178 method_array_t methods;
1179 property_array_t properties;
1180 protocol_array_t protocols;
1181 char *demangledName;
1182 uint32_t version;
1183 };
1184
1185 struct class_rw_t {
1186 // Be warned that Symbolication knows the layout of this structure.
1187 uint32_t flags;
1188 uint16_t witness;
1189 #if SUPPORT_INDEXED_ISA
1190 uint16_t index;
1191 #endif
1192
1193 explicit_atomic<uintptr_t> ro_or_rw_ext;
1194
1195 Class firstSubclass;
1196 Class nextSiblingClass;
1197
1198 private:
1199 using ro_or_rw_ext_t = objc::PointerUnion<const class_ro_t, class_rw_ext_t, PTRAUTH_STR("class_ro_t"), PTRAUTH_STR("class_rw_ext_t")>;
1200
1201 const ro_or_rw_ext_t get_ro_or_rwe() const {
1202 return ro_or_rw_ext_t{ro_or_rw_ext};
1203 }
1204
1205 void set_ro_or_rwe(const class_ro_t *ro) {
1206 ro_or_rw_ext_t{ro, &ro_or_rw_ext}.storeAt(ro_or_rw_ext, memory_order_relaxed);
1207 }
1208
1209 void set_ro_or_rwe(class_rw_ext_t *rwe, const class_ro_t *ro) {
1210 // the release barrier is so that the class_rw_ext_t::ro initialization
1211 // is visible to lockless readers
1212 rwe->ro = ro;
1213 ro_or_rw_ext_t{rwe, &ro_or_rw_ext}.storeAt(ro_or_rw_ext, memory_order_release);
1214 }
1215
1216 class_rw_ext_t *extAlloc(const class_ro_t *ro, bool deep = false);
1217
1218 public:
1219 void setFlags(uint32_t set)
1220 {
1221 __c11_atomic_fetch_or((_Atomic(uint32_t) *)&flags, set, __ATOMIC_RELAXED);
1222 }
1223
1224 void clearFlags(uint32_t clear)
1225 {
1226 __c11_atomic_fetch_and((_Atomic(uint32_t) *)&flags, ~clear, __ATOMIC_RELAXED);
1227 }
1228
1229 // set and clear must not overlap
1230 void changeFlags(uint32_t set, uint32_t clear)
1231 {
1232 ASSERT((set & clear) == 0);
1233
1234 uint32_t oldf, newf;
1235 do {
1236 oldf = flags;
1237 newf = (oldf | set) & ~clear;
1238 } while (!OSAtomicCompareAndSwap32Barrier(oldf, newf, (volatile int32_t *)&flags));
1239 }
1240
1241 class_rw_ext_t *ext() const {
1242 return get_ro_or_rwe().dyn_cast<class_rw_ext_t *>(&ro_or_rw_ext);
1243 }
1244
1245 class_rw_ext_t *extAllocIfNeeded() {
1246 auto v = get_ro_or_rwe();
1247 if (fastpath(v.is<class_rw_ext_t *>())) {
1248 return v.get<class_rw_ext_t *>(&ro_or_rw_ext);
1249 } else {
1250 return extAlloc(v.get<const class_ro_t *>(&ro_or_rw_ext));
1251 }
1252 }
1253
1254 class_rw_ext_t *deepCopy(const class_ro_t *ro) {
1255 return extAlloc(ro, true);
1256 }
1257
1258 const class_ro_t *ro() const {
1259 auto v = get_ro_or_rwe();
1260 if (slowpath(v.is<class_rw_ext_t *>())) {
1261 return v.get<class_rw_ext_t *>(&ro_or_rw_ext)->ro;
1262 }
1263 return v.get<const class_ro_t *>(&ro_or_rw_ext);
1264 }
1265
1266 void set_ro(const class_ro_t *ro) {
1267 auto v = get_ro_or_rwe();
1268 if (v.is<class_rw_ext_t *>()) {
1269 v.get<class_rw_ext_t *>(&ro_or_rw_ext)->ro = ro;
1270 } else {
1271 set_ro_or_rwe(ro);
1272 }
1273 }
1274
1275 const method_array_t methods() const {
1276 auto v = get_ro_or_rwe();
1277 if (v.is<class_rw_ext_t *>()) {
1278 return v.get<class_rw_ext_t *>(&ro_or_rw_ext)->methods;
1279 } else {
1280 return method_array_t{v.get<const class_ro_t *>(&ro_or_rw_ext)->baseMethods()};
1281 }
1282 }
1283
1284 const property_array_t properties() const {
1285 auto v = get_ro_or_rwe();
1286 if (v.is<class_rw_ext_t *>()) {
1287 return v.get<class_rw_ext_t *>(&ro_or_rw_ext)->properties;
1288 } else {
1289 return property_array_t{v.get<const class_ro_t *>(&ro_or_rw_ext)->baseProperties};
1290 }
1291 }
1292
1293 const protocol_array_t protocols() const {
1294 auto v = get_ro_or_rwe();
1295 if (v.is<class_rw_ext_t *>()) {
1296 return v.get<class_rw_ext_t *>(&ro_or_rw_ext)->protocols;
1297 } else {
1298 return protocol_array_t{v.get<const class_ro_t *>(&ro_or_rw_ext)->baseProtocols};
1299 }
1300 }
1301 };
1302
1303
1304 struct class_data_bits_t {
1305 friend objc_class;
1306
1307 // Values are the FAST_ flags above.
1308 uintptr_t bits;
1309 private:
1310 bool getBit(uintptr_t bit) const
1311 {
1312 return bits & bit;
1313 }
1314
1315 // Atomically set the bits in `set` and clear the bits in `clear`.
1316 // set and clear must not overlap.
1317 void setAndClearBits(uintptr_t set, uintptr_t clear)
1318 {
1319 ASSERT((set & clear) == 0);
1320 uintptr_t oldBits;
1321 uintptr_t newBits;
1322 do {
1323 oldBits = LoadExclusive(&bits);
1324 newBits = (oldBits | set) & ~clear;
1325 } while (!StoreReleaseExclusive(&bits, oldBits, newBits));
1326 }
1327
1328 void setBits(uintptr_t set) {
1329 __c11_atomic_fetch_or((_Atomic(uintptr_t) *)&bits, set, __ATOMIC_RELAXED);
1330 }
1331
1332 void clearBits(uintptr_t clear) {
1333 __c11_atomic_fetch_and((_Atomic(uintptr_t) *)&bits, ~clear, __ATOMIC_RELAXED);
1334 }
1335
1336 public:
1337
1338 class_rw_t* data() const {
1339 return (class_rw_t *)(bits & FAST_DATA_MASK);
1340 }
1341 void setData(class_rw_t *newData)
1342 {
1343 ASSERT(!data() || (newData->flags & (RW_REALIZING | RW_FUTURE)));
1344 // Set during realization or construction only. No locking needed.
1345 // Use a store-release fence because there may be concurrent
1346 // readers of data and data's contents.
1347 uintptr_t newBits = (bits & ~FAST_DATA_MASK) | (uintptr_t)newData;
1348 atomic_thread_fence(memory_order_release);
1349 bits = newBits;
1350 }
1351
1352 // Get the class's ro data, even in the presence of concurrent realization.
1353 // fixme this isn't really safe without a compiler barrier at least
1354 // and probably a memory barrier when realizeClass changes the data field
1355 const class_ro_t *safe_ro() {
1356 class_rw_t *maybe_rw = data();
1357 if (maybe_rw->flags & RW_REALIZED) {
1358 // maybe_rw is rw
1359 return maybe_rw->ro();
1360 } else {
1361 // maybe_rw is actually ro
1362 return (class_ro_t *)maybe_rw;
1363 }
1364 }
1365
1366 void setClassArrayIndex(unsigned Idx) {
1367 #if SUPPORT_INDEXED_ISA
1368 // 0 is unused as then we can rely on zero-initialisation from calloc.
1369 ASSERT(Idx > 0);
1370 data()->index = Idx;
1371 #endif
1372 }
1373
1374 unsigned classArrayIndex() {
1375 #if SUPPORT_INDEXED_ISA
1376 return data()->index;
1377 #else
1378 return 0;
1379 #endif
1380 }
1381
1382 bool isAnySwift() {
1383 return isSwiftStable() || isSwiftLegacy();
1384 }
1385
1386 bool isSwiftStable() {
1387 return getBit(FAST_IS_SWIFT_STABLE);
1388 }
1389 void setIsSwiftStable() {
1390 setAndClearBits(FAST_IS_SWIFT_STABLE, FAST_IS_SWIFT_LEGACY);
1391 }
1392
1393 bool isSwiftLegacy() {
1394 return getBit(FAST_IS_SWIFT_LEGACY);
1395 }
1396 void setIsSwiftLegacy() {
1397 setAndClearBits(FAST_IS_SWIFT_LEGACY, FAST_IS_SWIFT_STABLE);
1398 }
1399
1400 // fixme remove this once the Swift runtime uses the stable bits
1401 bool isSwiftStable_ButAllowLegacyForNow() {
1402 return isAnySwift();
1403 }
1404
1405 _objc_swiftMetadataInitializer swiftMetadataInitializer() {
1406 // This function is called on un-realized classes without
1407 // holding any locks.
1408 // Beware of races with other realizers.
1409 return safe_ro()->swiftMetadataInitializer();
1410 }
1411 };
1412
1413
1414 struct objc_class : objc_object {
1415 // Class ISA;
1416 Class superclass;
1417 cache_t cache; // formerly cache pointer and vtable
1418 class_data_bits_t bits; // class_rw_t * plus custom rr/alloc flags
1419
1420 class_rw_t *data() const {
1421 return bits.data();
1422 }
1423 void setData(class_rw_t *newData) {
1424 bits.setData(newData);
1425 }
1426
1427 void setInfo(uint32_t set) {
1428 ASSERT(isFuture() || isRealized());
1429 data()->setFlags(set);
1430 }
1431
1432 void clearInfo(uint32_t clear) {
1433 ASSERT(isFuture() || isRealized());
1434 data()->clearFlags(clear);
1435 }
1436
1437 // set and clear must not overlap
1438 void changeInfo(uint32_t set, uint32_t clear) {
1439 ASSERT(isFuture() || isRealized());
1440 ASSERT((set & clear) == 0);
1441 data()->changeFlags(set, clear);
1442 }
1443
1444 #if FAST_HAS_DEFAULT_RR
1445 bool hasCustomRR() const {
1446 return !bits.getBit(FAST_HAS_DEFAULT_RR);
1447 }
1448 void setHasDefaultRR() {
1449 bits.setBits(FAST_HAS_DEFAULT_RR);
1450 }
1451 void setHasCustomRR() {
1452 bits.clearBits(FAST_HAS_DEFAULT_RR);
1453 }
1454 #else
1455 bool hasCustomRR() const {
1456 return !(bits.data()->flags & RW_HAS_DEFAULT_RR);
1457 }
1458 void setHasDefaultRR() {
1459 bits.data()->setFlags(RW_HAS_DEFAULT_RR);
1460 }
1461 void setHasCustomRR() {
1462 bits.data()->clearFlags(RW_HAS_DEFAULT_RR);
1463 }
1464 #endif
1465
1466 #if FAST_CACHE_HAS_DEFAULT_AWZ
1467 bool hasCustomAWZ() const {
1468 return !cache.getBit(FAST_CACHE_HAS_DEFAULT_AWZ);
1469 }
1470 void setHasDefaultAWZ() {
1471 cache.setBit(FAST_CACHE_HAS_DEFAULT_AWZ);
1472 }
1473 void setHasCustomAWZ() {
1474 cache.clearBit(FAST_CACHE_HAS_DEFAULT_AWZ);
1475 }
1476 #else
1477 bool hasCustomAWZ() const {
1478 return !(bits.data()->flags & RW_HAS_DEFAULT_AWZ);
1479 }
1480 void setHasDefaultAWZ() {
1481 bits.data()->setFlags(RW_HAS_DEFAULT_AWZ);
1482 }
1483 void setHasCustomAWZ() {
1484 bits.data()->clearFlags(RW_HAS_DEFAULT_AWZ);
1485 }
1486 #endif
1487
1488 #if FAST_CACHE_HAS_DEFAULT_CORE
1489 bool hasCustomCore() const {
1490 return !cache.getBit(FAST_CACHE_HAS_DEFAULT_CORE);
1491 }
1492 void setHasDefaultCore() {
1493 return cache.setBit(FAST_CACHE_HAS_DEFAULT_CORE);
1494 }
1495 void setHasCustomCore() {
1496 return cache.clearBit(FAST_CACHE_HAS_DEFAULT_CORE);
1497 }
1498 #else
1499 bool hasCustomCore() const {
1500 return !(bits.data()->flags & RW_HAS_DEFAULT_CORE);
1501 }
1502 void setHasDefaultCore() {
1503 bits.data()->setFlags(RW_HAS_DEFAULT_CORE);
1504 }
1505 void setHasCustomCore() {
1506 bits.data()->clearFlags(RW_HAS_DEFAULT_CORE);
1507 }
1508 #endif
1509
1510 #if FAST_CACHE_HAS_CXX_CTOR
1511 bool hasCxxCtor() {
1512 ASSERT(isRealized());
1513 return cache.getBit(FAST_CACHE_HAS_CXX_CTOR);
1514 }
1515 void setHasCxxCtor() {
1516 cache.setBit(FAST_CACHE_HAS_CXX_CTOR);
1517 }
1518 #else
1519 bool hasCxxCtor() {
1520 ASSERT(isRealized());
1521 return bits.data()->flags & RW_HAS_CXX_CTOR;
1522 }
1523 void setHasCxxCtor() {
1524 bits.data()->setFlags(RW_HAS_CXX_CTOR);
1525 }
1526 #endif
1527
1528 #if FAST_CACHE_HAS_CXX_DTOR
1529 bool hasCxxDtor() {
1530 ASSERT(isRealized());
1531 return cache.getBit(FAST_CACHE_HAS_CXX_DTOR);
1532 }
1533 void setHasCxxDtor() {
1534 cache.setBit(FAST_CACHE_HAS_CXX_DTOR);
1535 }
1536 #else
1537 bool hasCxxDtor() {
1538 ASSERT(isRealized());
1539 return bits.data()->flags & RW_HAS_CXX_DTOR;
1540 }
1541 void setHasCxxDtor() {
1542 bits.data()->setFlags(RW_HAS_CXX_DTOR);
1543 }
1544 #endif
1545
1546 #if FAST_CACHE_REQUIRES_RAW_ISA
1547 bool instancesRequireRawIsa() {
1548 return cache.getBit(FAST_CACHE_REQUIRES_RAW_ISA);
1549 }
1550 void setInstancesRequireRawIsa() {
1551 cache.setBit(FAST_CACHE_REQUIRES_RAW_ISA);
1552 }
1553 #elif SUPPORT_NONPOINTER_ISA
1554 bool instancesRequireRawIsa() {
1555 return bits.data()->flags & RW_REQUIRES_RAW_ISA;
1556 }
1557 void setInstancesRequireRawIsa() {
1558 bits.data()->setFlags(RW_REQUIRES_RAW_ISA);
1559 }
1560 #else
1561 bool instancesRequireRawIsa() {
1562 return true;
1563 }
1564 void setInstancesRequireRawIsa() {
1565 // nothing
1566 }
1567 #endif
1568 void setInstancesRequireRawIsaRecursively(bool inherited = false);
1569 void printInstancesRequireRawIsa(bool inherited);
1570
1571 bool canAllocNonpointer() {
1572 ASSERT(!isFuture());
1573 return !instancesRequireRawIsa();
1574 }
1575
1576 bool isSwiftStable() {
1577 return bits.isSwiftStable();
1578 }
1579
1580 bool isSwiftLegacy() {
1581 return bits.isSwiftLegacy();
1582 }
1583
1584 bool isAnySwift() {
1585 return bits.isAnySwift();
1586 }
1587
1588 bool isSwiftStable_ButAllowLegacyForNow() {
1589 return bits.isSwiftStable_ButAllowLegacyForNow();
1590 }
1591
1592 bool isStubClass() const {
1593 uintptr_t isa = (uintptr_t)isaBits();
1594 return 1 <= isa && isa < 16;
1595 }
1596
1597 // Swift stable ABI built for old deployment targets looks weird.
1598 // The is-legacy bit is set for compatibility with old libobjc.
1599 // We are on a "new" deployment target so we need to rewrite that bit.
1600 // These stable-with-legacy-bit classes are distinguished from real
1601 // legacy classes using another bit in the Swift data
1602 // (ClassFlags::IsSwiftPreStableABI)
1603
1604 bool isUnfixedBackwardDeployingStableSwift() {
1605 // Only classes marked as Swift legacy need apply.
1606 if (!bits.isSwiftLegacy()) return false;
1607
1608 // Check the true legacy vs stable distinguisher.
1609 // The low bit of Swift's ClassFlags is SET for true legacy
1610 // and UNSET for stable pretending to be legacy.
1611 uint32_t swiftClassFlags = *(uint32_t *)(&bits + 1);
1612 bool isActuallySwiftLegacy = bool(swiftClassFlags & 1);
1613 return !isActuallySwiftLegacy;
1614 }
1615
1616 void fixupBackwardDeployingStableSwift() {
1617 if (isUnfixedBackwardDeployingStableSwift()) {
1618 // Class really is stable Swift, pretending to be pre-stable.
1619 // Fix its lie.
1620 bits.setIsSwiftStable();
1621 }
1622 }
1623
1624 _objc_swiftMetadataInitializer swiftMetadataInitializer() {
1625 return bits.swiftMetadataInitializer();
1626 }
1627
1628 // Return YES if the class's ivars are managed by ARC,
1629 // or the class is MRC but has ARC-style weak ivars.
1630 bool hasAutomaticIvars() {
1631 return data()->ro()->flags & (RO_IS_ARC | RO_HAS_WEAK_WITHOUT_ARC);
1632 }
1633
1634 // Return YES if the class's ivars are managed by ARC.
1635 bool isARC() {
1636 return data()->ro()->flags & RO_IS_ARC;
1637 }
1638
1639
1640 bool forbidsAssociatedObjects() {
1641 return (data()->flags & RW_FORBIDS_ASSOCIATED_OBJECTS);
1642 }
1643
1644 #if SUPPORT_NONPOINTER_ISA
1645 // Tracked in non-pointer isas; not tracked otherwise
1646 #else
1647 bool instancesHaveAssociatedObjects() {
1648 // this may be an unrealized future class in the CF-bridged case
1649 ASSERT(isFuture() || isRealized());
1650 return data()->flags & RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS;
1651 }
1652
1653 void setInstancesHaveAssociatedObjects() {
1654 // this may be an unrealized future class in the CF-bridged case
1655 ASSERT(isFuture() || isRealized());
1656 setInfo(RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS);
1657 }
1658 #endif
1659
1660 bool shouldGrowCache() {
1661 return true;
1662 }
1663
1664 void setShouldGrowCache(bool) {
1665 // fixme good or bad for memory use?
1666 }
1667
1668 bool isInitializing() {
1669 return getMeta()->data()->flags & RW_INITIALIZING;
1670 }
1671
1672 void setInitializing() {
1673 ASSERT(!isMetaClass());
1674 ISA()->setInfo(RW_INITIALIZING);
1675 }
1676
1677 bool isInitialized() {
1678 return getMeta()->data()->flags & RW_INITIALIZED;
1679 }
1680
1681 void setInitialized();
1682
1683 bool isLoadable() {
1684 ASSERT(isRealized());
1685 return true; // any class registered for +load is definitely loadable
1686 }
1687
1688 IMP getLoadMethod();
1689
1690 // Locking: To prevent concurrent realization, hold runtimeLock.
1691 bool isRealized() const {
1692 return !isStubClass() && (data()->flags & RW_REALIZED);
1693 }
1694
1695 // Returns true if this is an unrealized future class.
1696 // Locking: To prevent concurrent realization, hold runtimeLock.
1697 bool isFuture() const {
1698 return data()->flags & RW_FUTURE;
1699 }
1700
1701 bool isMetaClass() {
1702 ASSERT(this);
1703 ASSERT(isRealized());
1704 #if FAST_CACHE_META
1705 return cache.getBit(FAST_CACHE_META);
1706 #else
1707 return data()->flags & RW_META;
1708 #endif
1709 }
1710
1711 // Like isMetaClass, but also valid on un-realized classes
1712 bool isMetaClassMaybeUnrealized() {
1713 static_assert(offsetof(class_rw_t, flags) == offsetof(class_ro_t, flags), "flags alias");
1714 static_assert(RO_META == RW_META, "flags alias");
1715 return data()->flags & RW_META;
1716 }
1717
1718 // NOT identical to this->ISA when this is a metaclass
1719 Class getMeta() {
1720 if (isMetaClass()) return (Class)this;
1721 else return this->ISA();
1722 }
1723
1724 bool isRootClass() {
1725 return superclass == nil;
1726 }
1727 bool isRootMetaclass() {
1728 return ISA() == (Class)this;
1729 }
1730
1731 const char *mangledName() {
1732 // fixme can't assert locks here
1733 ASSERT(this);
1734
1735 if (isRealized() || isFuture()) {
1736 return data()->ro()->name;
1737 } else {
1738 return ((const class_ro_t *)data())->name;
1739 }
1740 }
1741
1742 const char *demangledName(bool needsLock);
1743 const char *nameForLogging();
1744
1745 // May be unaligned depending on class's ivars.
1746 uint32_t unalignedInstanceStart() const {
1747 ASSERT(isRealized());
1748 return data()->ro()->instanceStart;
1749 }
1750
1751 // Class's instance start rounded up to a pointer-size boundary.
1752 // This is used for ARC layout bitmaps.
1753 uint32_t alignedInstanceStart() const {
1754 return word_align(unalignedInstanceStart());
1755 }
1756
1757 // May be unaligned depending on class's ivars.
1758 uint32_t unalignedInstanceSize() const {
1759 ASSERT(isRealized());
1760 return data()->ro()->instanceSize;
1761 }
1762
1763 // Class's ivar size rounded up to a pointer-size boundary.
1764 uint32_t alignedInstanceSize() const {
1765 return word_align(unalignedInstanceSize());
1766 }
1767
1768 size_t instanceSize(size_t extraBytes) const {
1769 if (fastpath(cache.hasFastInstanceSize(extraBytes))) {
1770 return cache.fastInstanceSize(extraBytes);
1771 }
1772
1773 size_t size = alignedInstanceSize() + extraBytes;
1774 // CF requires all objects be at least 16 bytes.
1775 if (size < 16) size = 16;
1776 return size;
1777 }
1778
1779 void setInstanceSize(uint32_t newSize) {
1780 ASSERT(isRealized());
1781 ASSERT(data()->flags & RW_REALIZING);
1782 auto ro = data()->ro();
1783 if (newSize != ro->instanceSize) {
1784 ASSERT(data()->flags & RW_COPIED_RO);
1785 *const_cast<uint32_t *>(&ro->instanceSize) = newSize;
1786 }
1787 cache.setFastInstanceSize(newSize);
1788 }
1789
1790 void chooseClassArrayIndex();
1791
1792 void setClassArrayIndex(unsigned Idx) {
1793 bits.setClassArrayIndex(Idx);
1794 }
1795
1796 unsigned classArrayIndex() {
1797 return bits.classArrayIndex();
1798 }
1799 };
1800
1801
1802 struct swift_class_t : objc_class {
1803 uint32_t flags;
1804 uint32_t instanceAddressOffset;
1805 uint32_t instanceSize;
1806 uint16_t instanceAlignMask;
1807 uint16_t reserved;
1808
1809 uint32_t classSize;
1810 uint32_t classAddressOffset;
1811 void *description;
1812 // ...
1813
1814 void *baseAddress() {
1815 return (void *)((uint8_t *)this - classAddressOffset);
1816 }
1817 };
1818
1819
1820 struct category_t {
1821 const char *name;
1822 classref_t cls;
1823 WrappedPtr<method_list_t, PtrauthStrip> instanceMethods;
1824 WrappedPtr<method_list_t, PtrauthStrip> classMethods;
1825 struct protocol_list_t *protocols;
1826 struct property_list_t *instanceProperties;
1827 // Fields below this point are not always present on disk.
1828 struct property_list_t *_classProperties;
1829
1830 method_list_t *methodsForMeta(bool isMeta) {
1831 if (isMeta) return classMethods;
1832 else return instanceMethods;
1833 }
1834
1835 property_list_t *propertiesForMeta(bool isMeta, struct header_info *hi);
1836
1837 protocol_list_t *protocolsForMeta(bool isMeta) {
1838 if (isMeta) return nullptr;
1839 else return protocols;
1840 }
1841 };
1842
1843 struct objc_super2 {
1844 id receiver;
1845 Class current_class;
1846 };
1847
1848 struct message_ref_t {
1849 IMP imp;
1850 SEL sel;
1851 };
1852
1853
1854 extern Method protocol_getMethod(protocol_t *p, SEL sel, bool isRequiredMethod, bool isInstanceMethod, bool recursive);
1855
1856 #endif