]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-runtime-new.h
objc4-781.tar.gz
[apple/objc4.git] / runtime / objc-runtime-new.h
1 /*
2 * Copyright (c) 2005-2007 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #ifndef _OBJC_RUNTIME_NEW_H
25 #define _OBJC_RUNTIME_NEW_H
26
27 #include "PointerUnion.h"
28
29 // class_data_bits_t is the class_t->data field (class_rw_t pointer plus flags)
30 // The extra bits are optimized for the retain/release and alloc/dealloc paths.
31
32 // Values for class_ro_t->flags
33 // These are emitted by the compiler and are part of the ABI.
34 // Note: See CGObjCNonFragileABIMac::BuildClassRoTInitializer in clang
35 // class is a metaclass
36 #define RO_META (1<<0)
37 // class is a root class
38 #define RO_ROOT (1<<1)
39 // class has .cxx_construct/destruct implementations
40 #define RO_HAS_CXX_STRUCTORS (1<<2)
41 // class has +load implementation
42 // #define RO_HAS_LOAD_METHOD (1<<3)
43 // class has visibility=hidden set
44 #define RO_HIDDEN (1<<4)
45 // class has attribute(objc_exception): OBJC_EHTYPE_$_ThisClass is non-weak
46 #define RO_EXCEPTION (1<<5)
47 // class has ro field for Swift metadata initializer callback
48 #define RO_HAS_SWIFT_INITIALIZER (1<<6)
49 // class compiled with ARC
50 #define RO_IS_ARC (1<<7)
51 // class has .cxx_destruct but no .cxx_construct (with RO_HAS_CXX_STRUCTORS)
52 #define RO_HAS_CXX_DTOR_ONLY (1<<8)
53 // class is not ARC but has ARC-style weak ivar layout
54 #define RO_HAS_WEAK_WITHOUT_ARC (1<<9)
55 // class does not allow associated objects on instances
56 #define RO_FORBIDS_ASSOCIATED_OBJECTS (1<<10)
57
58 // class is in an unloadable bundle - must never be set by compiler
59 #define RO_FROM_BUNDLE (1<<29)
60 // class is unrealized future class - must never be set by compiler
61 #define RO_FUTURE (1<<30)
62 // class is realized - must never be set by compiler
63 #define RO_REALIZED (1<<31)
64
65 // Values for class_rw_t->flags
66 // These are not emitted by the compiler and are never used in class_ro_t.
67 // Their presence should be considered in future ABI versions.
68 // class_t->data is class_rw_t, not class_ro_t
69 #define RW_REALIZED (1<<31)
70 // class is unresolved future class
71 #define RW_FUTURE (1<<30)
72 // class is initialized
73 #define RW_INITIALIZED (1<<29)
74 // class is initializing
75 #define RW_INITIALIZING (1<<28)
76 // class_rw_t->ro is heap copy of class_ro_t
77 #define RW_COPIED_RO (1<<27)
78 // class allocated but not yet registered
79 #define RW_CONSTRUCTING (1<<26)
80 // class allocated and registered
81 #define RW_CONSTRUCTED (1<<25)
82 // available for use; was RW_FINALIZE_ON_MAIN_THREAD
83 // #define RW_24 (1<<24)
84 // class +load has been called
85 #define RW_LOADED (1<<23)
86 #if !SUPPORT_NONPOINTER_ISA
87 // class instances may have associative references
88 #define RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS (1<<22)
89 #endif
90 // class has instance-specific GC layout
91 #define RW_HAS_INSTANCE_SPECIFIC_LAYOUT (1 << 21)
92 // class does not allow associated objects on its instances
93 #define RW_FORBIDS_ASSOCIATED_OBJECTS (1<<20)
94 // class has started realizing but not yet completed it
95 #define RW_REALIZING (1<<19)
96
97 // class is a metaclass (copied from ro)
98 #define RW_META RO_META // (1<<0)
99
100
101 // NOTE: MORE RW_ FLAGS DEFINED BELOW
102
103
104 // Values for class_rw_t->flags (RW_*), cache_t->_flags (FAST_CACHE_*),
105 // or class_t->bits (FAST_*).
106 //
107 // FAST_* and FAST_CACHE_* are stored on the class, reducing pointer indirection.
108
109 #if __LP64__
110
111 // class is a Swift class from the pre-stable Swift ABI
112 #define FAST_IS_SWIFT_LEGACY (1UL<<0)
113 // class is a Swift class from the stable Swift ABI
114 #define FAST_IS_SWIFT_STABLE (1UL<<1)
115 // class or superclass has default retain/release/autorelease/retainCount/
116 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
117 #define FAST_HAS_DEFAULT_RR (1UL<<2)
118 // data pointer
119 #define FAST_DATA_MASK 0x00007ffffffffff8UL
120
121 #if __arm64__
122 // class or superclass has .cxx_construct/.cxx_destruct implementation
123 // FAST_CACHE_HAS_CXX_DTOR is the first bit so that setting it in
124 // isa_t::has_cxx_dtor is a single bfi
125 #define FAST_CACHE_HAS_CXX_DTOR (1<<0)
126 #define FAST_CACHE_HAS_CXX_CTOR (1<<1)
127 // Denormalized RO_META to avoid an indirection
128 #define FAST_CACHE_META (1<<2)
129 #else
130 // Denormalized RO_META to avoid an indirection
131 #define FAST_CACHE_META (1<<0)
132 // class or superclass has .cxx_construct/.cxx_destruct implementation
133 // FAST_CACHE_HAS_CXX_DTOR is chosen to alias with isa_t::has_cxx_dtor
134 #define FAST_CACHE_HAS_CXX_CTOR (1<<1)
135 #define FAST_CACHE_HAS_CXX_DTOR (1<<2)
136 #endif
137
138 // Fast Alloc fields:
139 // This stores the word-aligned size of instances + "ALLOC_DELTA16",
140 // or 0 if the instance size doesn't fit.
141 //
142 // These bits occupy the same bits than in the instance size, so that
143 // the size can be extracted with a simple mask operation.
144 //
145 // FAST_CACHE_ALLOC_MASK16 allows to extract the instance size rounded
146 // rounded up to the next 16 byte boundary, which is a fastpath for
147 // _objc_rootAllocWithZone()
148 #define FAST_CACHE_ALLOC_MASK 0x1ff8
149 #define FAST_CACHE_ALLOC_MASK16 0x1ff0
150 #define FAST_CACHE_ALLOC_DELTA16 0x0008
151
152 // class's instances requires raw isa
153 #define FAST_CACHE_REQUIRES_RAW_ISA (1<<13)
154 // class or superclass has default alloc/allocWithZone: implementation
155 // Note this is is stored in the metaclass.
156 #define FAST_CACHE_HAS_DEFAULT_AWZ (1<<14)
157 // class or superclass has default new/self/class/respondsToSelector/isKindOfClass
158 #define FAST_CACHE_HAS_DEFAULT_CORE (1<<15)
159
160 #else
161
162 // class or superclass has .cxx_construct implementation
163 #define RW_HAS_CXX_CTOR (1<<18)
164 // class or superclass has .cxx_destruct implementation
165 #define RW_HAS_CXX_DTOR (1<<17)
166 // class or superclass has default alloc/allocWithZone: implementation
167 // Note this is is stored in the metaclass.
168 #define RW_HAS_DEFAULT_AWZ (1<<16)
169 // class's instances requires raw isa
170 #if SUPPORT_NONPOINTER_ISA
171 #define RW_REQUIRES_RAW_ISA (1<<15)
172 #endif
173 // class or superclass has default retain/release/autorelease/retainCount/
174 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
175 #define RW_HAS_DEFAULT_RR (1<<14)
176 // class or superclass has default new/self/class/respondsToSelector/isKindOfClass
177 #define RW_HAS_DEFAULT_CORE (1<<13)
178
179 // class is a Swift class from the pre-stable Swift ABI
180 #define FAST_IS_SWIFT_LEGACY (1UL<<0)
181 // class is a Swift class from the stable Swift ABI
182 #define FAST_IS_SWIFT_STABLE (1UL<<1)
183 // data pointer
184 #define FAST_DATA_MASK 0xfffffffcUL
185
186 #endif // __LP64__
187
188 // The Swift ABI requires that these bits be defined like this on all platforms.
189 static_assert(FAST_IS_SWIFT_LEGACY == 1, "resistance is futile");
190 static_assert(FAST_IS_SWIFT_STABLE == 2, "resistance is futile");
191
192
193 #if __LP64__
194 typedef uint32_t mask_t; // x86_64 & arm64 asm are less efficient with 16-bits
195 #else
196 typedef uint16_t mask_t;
197 #endif
198 typedef uintptr_t SEL;
199
200 struct swift_class_t;
201
202 enum Atomicity { Atomic = true, NotAtomic = false };
203 enum IMPEncoding { Encoded = true, Raw = false };
204
205 struct bucket_t {
206 private:
207 // IMP-first is better for arm64e ptrauth and no worse for arm64.
208 // SEL-first is better for armv7* and i386 and x86_64.
209 #if __arm64__
210 explicit_atomic<uintptr_t> _imp;
211 explicit_atomic<SEL> _sel;
212 #else
213 explicit_atomic<SEL> _sel;
214 explicit_atomic<uintptr_t> _imp;
215 #endif
216
217 // Compute the ptrauth signing modifier from &_imp, newSel, and cls.
218 uintptr_t modifierForSEL(SEL newSel, Class cls) const {
219 return (uintptr_t)&_imp ^ (uintptr_t)newSel ^ (uintptr_t)cls;
220 }
221
222 // Sign newImp, with &_imp, newSel, and cls as modifiers.
223 uintptr_t encodeImp(IMP newImp, SEL newSel, Class cls) const {
224 if (!newImp) return 0;
225 #if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_PTRAUTH
226 return (uintptr_t)
227 ptrauth_auth_and_resign(newImp,
228 ptrauth_key_function_pointer, 0,
229 ptrauth_key_process_dependent_code,
230 modifierForSEL(newSel, cls));
231 #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR
232 return (uintptr_t)newImp ^ (uintptr_t)cls;
233 #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_NONE
234 return (uintptr_t)newImp;
235 #else
236 #error Unknown method cache IMP encoding.
237 #endif
238 }
239
240 public:
241 inline SEL sel() const { return _sel.load(memory_order::memory_order_relaxed); }
242
243 inline IMP imp(Class cls) const {
244 uintptr_t imp = _imp.load(memory_order::memory_order_relaxed);
245 if (!imp) return nil;
246 #if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_PTRAUTH
247 SEL sel = _sel.load(memory_order::memory_order_relaxed);
248 return (IMP)
249 ptrauth_auth_and_resign((const void *)imp,
250 ptrauth_key_process_dependent_code,
251 modifierForSEL(sel, cls),
252 ptrauth_key_function_pointer, 0);
253 #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR
254 return (IMP)(imp ^ (uintptr_t)cls);
255 #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_NONE
256 return (IMP)imp;
257 #else
258 #error Unknown method cache IMP encoding.
259 #endif
260 }
261
262 template <Atomicity, IMPEncoding>
263 void set(SEL newSel, IMP newImp, Class cls);
264 };
265
266
267 struct cache_t {
268 #if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_OUTLINED
269 explicit_atomic<struct bucket_t *> _buckets;
270 explicit_atomic<mask_t> _mask;
271 #elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16
272 explicit_atomic<uintptr_t> _maskAndBuckets;
273 mask_t _mask_unused;
274
275 // How much the mask is shifted by.
276 static constexpr uintptr_t maskShift = 48;
277
278 // Additional bits after the mask which must be zero. msgSend
279 // takes advantage of these additional bits to construct the value
280 // `mask << 4` from `_maskAndBuckets` in a single instruction.
281 static constexpr uintptr_t maskZeroBits = 4;
282
283 // The largest mask value we can store.
284 static constexpr uintptr_t maxMask = ((uintptr_t)1 << (64 - maskShift)) - 1;
285
286 // The mask applied to `_maskAndBuckets` to retrieve the buckets pointer.
287 static constexpr uintptr_t bucketsMask = ((uintptr_t)1 << (maskShift - maskZeroBits)) - 1;
288
289 // Ensure we have enough bits for the buckets pointer.
290 static_assert(bucketsMask >= MACH_VM_MAX_ADDRESS, "Bucket field doesn't have enough bits for arbitrary pointers.");
291 #elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4
292 // _maskAndBuckets stores the mask shift in the low 4 bits, and
293 // the buckets pointer in the remainder of the value. The mask
294 // shift is the value where (0xffff >> shift) produces the correct
295 // mask. This is equal to 16 - log2(cache_size).
296 explicit_atomic<uintptr_t> _maskAndBuckets;
297 mask_t _mask_unused;
298
299 static constexpr uintptr_t maskBits = 4;
300 static constexpr uintptr_t maskMask = (1 << maskBits) - 1;
301 static constexpr uintptr_t bucketsMask = ~maskMask;
302 #else
303 #error Unknown cache mask storage type.
304 #endif
305
306 #if __LP64__
307 uint16_t _flags;
308 #endif
309 uint16_t _occupied;
310
311 public:
312 static bucket_t *emptyBuckets();
313
314 struct bucket_t *buckets();
315 mask_t mask();
316 mask_t occupied();
317 void incrementOccupied();
318 void setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask);
319 void initializeToEmpty();
320
321 unsigned capacity();
322 bool isConstantEmptyCache();
323 bool canBeFreed();
324
325 #if __LP64__
326 bool getBit(uint16_t flags) const {
327 return _flags & flags;
328 }
329 void setBit(uint16_t set) {
330 __c11_atomic_fetch_or((_Atomic(uint16_t) *)&_flags, set, __ATOMIC_RELAXED);
331 }
332 void clearBit(uint16_t clear) {
333 __c11_atomic_fetch_and((_Atomic(uint16_t) *)&_flags, ~clear, __ATOMIC_RELAXED);
334 }
335 #endif
336
337 #if FAST_CACHE_ALLOC_MASK
338 bool hasFastInstanceSize(size_t extra) const
339 {
340 if (__builtin_constant_p(extra) && extra == 0) {
341 return _flags & FAST_CACHE_ALLOC_MASK16;
342 }
343 return _flags & FAST_CACHE_ALLOC_MASK;
344 }
345
346 size_t fastInstanceSize(size_t extra) const
347 {
348 ASSERT(hasFastInstanceSize(extra));
349
350 if (__builtin_constant_p(extra) && extra == 0) {
351 return _flags & FAST_CACHE_ALLOC_MASK16;
352 } else {
353 size_t size = _flags & FAST_CACHE_ALLOC_MASK;
354 // remove the FAST_CACHE_ALLOC_DELTA16 that was added
355 // by setFastInstanceSize
356 return align16(size + extra - FAST_CACHE_ALLOC_DELTA16);
357 }
358 }
359
360 void setFastInstanceSize(size_t newSize)
361 {
362 // Set during realization or construction only. No locking needed.
363 uint16_t newBits = _flags & ~FAST_CACHE_ALLOC_MASK;
364 uint16_t sizeBits;
365
366 // Adding FAST_CACHE_ALLOC_DELTA16 allows for FAST_CACHE_ALLOC_MASK16
367 // to yield the proper 16byte aligned allocation size with a single mask
368 sizeBits = word_align(newSize) + FAST_CACHE_ALLOC_DELTA16;
369 sizeBits &= FAST_CACHE_ALLOC_MASK;
370 if (newSize <= sizeBits) {
371 newBits |= sizeBits;
372 }
373 _flags = newBits;
374 }
375 #else
376 bool hasFastInstanceSize(size_t extra) const {
377 return false;
378 }
379 size_t fastInstanceSize(size_t extra) const {
380 abort();
381 }
382 void setFastInstanceSize(size_t extra) {
383 // nothing
384 }
385 #endif
386
387 static size_t bytesForCapacity(uint32_t cap);
388 static struct bucket_t * endMarker(struct bucket_t *b, uint32_t cap);
389
390 void reallocate(mask_t oldCapacity, mask_t newCapacity, bool freeOld);
391 void insert(Class cls, SEL sel, IMP imp, id receiver);
392
393 static void bad_cache(id receiver, SEL sel, Class isa) __attribute__((noreturn, cold));
394 };
395
396
397 // classref_t is unremapped class_t*
398 typedef struct classref * classref_t;
399
400
401 #ifdef __PTRAUTH_INTRINSICS__
402 # define StubClassInitializerPtrauth __ptrauth(ptrauth_key_function_pointer, 1, 0xc671)
403 #else
404 # define StubClassInitializerPtrauth
405 #endif
406 struct stub_class_t {
407 uintptr_t isa;
408 _objc_swiftMetadataInitializer StubClassInitializerPtrauth initializer;
409 };
410
411 /***********************************************************************
412 * entsize_list_tt<Element, List, FlagMask>
413 * Generic implementation of an array of non-fragile structs.
414 *
415 * Element is the struct type (e.g. method_t)
416 * List is the specialization of entsize_list_tt (e.g. method_list_t)
417 * FlagMask is used to stash extra bits in the entsize field
418 * (e.g. method list fixup markers)
419 **********************************************************************/
420 template <typename Element, typename List, uint32_t FlagMask>
421 struct entsize_list_tt {
422 uint32_t entsizeAndFlags;
423 uint32_t count;
424 Element first;
425
426 uint32_t entsize() const {
427 return entsizeAndFlags & ~FlagMask;
428 }
429 uint32_t flags() const {
430 return entsizeAndFlags & FlagMask;
431 }
432
433 Element& getOrEnd(uint32_t i) const {
434 ASSERT(i <= count);
435 return *(Element *)((uint8_t *)&first + i*entsize());
436 }
437 Element& get(uint32_t i) const {
438 ASSERT(i < count);
439 return getOrEnd(i);
440 }
441
442 size_t byteSize() const {
443 return byteSize(entsize(), count);
444 }
445
446 static size_t byteSize(uint32_t entsize, uint32_t count) {
447 return sizeof(entsize_list_tt) + (count-1)*entsize;
448 }
449
450 List *duplicate() const {
451 auto *dup = (List *)calloc(this->byteSize(), 1);
452 dup->entsizeAndFlags = this->entsizeAndFlags;
453 dup->count = this->count;
454 std::copy(begin(), end(), dup->begin());
455 return dup;
456 }
457
458 struct iterator;
459 const iterator begin() const {
460 return iterator(*static_cast<const List*>(this), 0);
461 }
462 iterator begin() {
463 return iterator(*static_cast<const List*>(this), 0);
464 }
465 const iterator end() const {
466 return iterator(*static_cast<const List*>(this), count);
467 }
468 iterator end() {
469 return iterator(*static_cast<const List*>(this), count);
470 }
471
472 struct iterator {
473 uint32_t entsize;
474 uint32_t index; // keeping track of this saves a divide in operator-
475 Element* element;
476
477 typedef std::random_access_iterator_tag iterator_category;
478 typedef Element value_type;
479 typedef ptrdiff_t difference_type;
480 typedef Element* pointer;
481 typedef Element& reference;
482
483 iterator() { }
484
485 iterator(const List& list, uint32_t start = 0)
486 : entsize(list.entsize())
487 , index(start)
488 , element(&list.getOrEnd(start))
489 { }
490
491 const iterator& operator += (ptrdiff_t delta) {
492 element = (Element*)((uint8_t *)element + delta*entsize);
493 index += (int32_t)delta;
494 return *this;
495 }
496 const iterator& operator -= (ptrdiff_t delta) {
497 element = (Element*)((uint8_t *)element - delta*entsize);
498 index -= (int32_t)delta;
499 return *this;
500 }
501 const iterator operator + (ptrdiff_t delta) const {
502 return iterator(*this) += delta;
503 }
504 const iterator operator - (ptrdiff_t delta) const {
505 return iterator(*this) -= delta;
506 }
507
508 iterator& operator ++ () { *this += 1; return *this; }
509 iterator& operator -- () { *this -= 1; return *this; }
510 iterator operator ++ (int) {
511 iterator result(*this); *this += 1; return result;
512 }
513 iterator operator -- (int) {
514 iterator result(*this); *this -= 1; return result;
515 }
516
517 ptrdiff_t operator - (const iterator& rhs) const {
518 return (ptrdiff_t)this->index - (ptrdiff_t)rhs.index;
519 }
520
521 Element& operator * () const { return *element; }
522 Element* operator -> () const { return element; }
523
524 operator Element& () const { return *element; }
525
526 bool operator == (const iterator& rhs) const {
527 return this->element == rhs.element;
528 }
529 bool operator != (const iterator& rhs) const {
530 return this->element != rhs.element;
531 }
532
533 bool operator < (const iterator& rhs) const {
534 return this->element < rhs.element;
535 }
536 bool operator > (const iterator& rhs) const {
537 return this->element > rhs.element;
538 }
539 };
540 };
541
542
543 struct method_t {
544 SEL name;
545 const char *types;
546 MethodListIMP imp;
547
548 struct SortBySELAddress :
549 public std::binary_function<const method_t&,
550 const method_t&, bool>
551 {
552 bool operator() (const method_t& lhs,
553 const method_t& rhs)
554 { return lhs.name < rhs.name; }
555 };
556 };
557
558 struct ivar_t {
559 #if __x86_64__
560 // *offset was originally 64-bit on some x86_64 platforms.
561 // We read and write only 32 bits of it.
562 // Some metadata provides all 64 bits. This is harmless for unsigned
563 // little-endian values.
564 // Some code uses all 64 bits. class_addIvar() over-allocates the
565 // offset for their benefit.
566 #endif
567 int32_t *offset;
568 const char *name;
569 const char *type;
570 // alignment is sometimes -1; use alignment() instead
571 uint32_t alignment_raw;
572 uint32_t size;
573
574 uint32_t alignment() const {
575 if (alignment_raw == ~(uint32_t)0) return 1U << WORD_SHIFT;
576 return 1 << alignment_raw;
577 }
578 };
579
580 struct property_t {
581 const char *name;
582 const char *attributes;
583 };
584
585 // Two bits of entsize are used for fixup markers.
586 struct method_list_t : entsize_list_tt<method_t, method_list_t, 0x3> {
587 bool isUniqued() const;
588 bool isFixedUp() const;
589 void setFixedUp();
590
591 uint32_t indexOfMethod(const method_t *meth) const {
592 uint32_t i =
593 (uint32_t)(((uintptr_t)meth - (uintptr_t)this) / entsize());
594 ASSERT(i < count);
595 return i;
596 }
597 };
598
599 struct ivar_list_t : entsize_list_tt<ivar_t, ivar_list_t, 0> {
600 bool containsIvar(Ivar ivar) const {
601 return (ivar >= (Ivar)&*begin() && ivar < (Ivar)&*end());
602 }
603 };
604
605 struct property_list_t : entsize_list_tt<property_t, property_list_t, 0> {
606 };
607
608
609 typedef uintptr_t protocol_ref_t; // protocol_t *, but unremapped
610
611 // Values for protocol_t->flags
612 #define PROTOCOL_FIXED_UP_2 (1<<31) // must never be set by compiler
613 #define PROTOCOL_FIXED_UP_1 (1<<30) // must never be set by compiler
614 #define PROTOCOL_IS_CANONICAL (1<<29) // must never be set by compiler
615 // Bits 0..15 are reserved for Swift's use.
616
617 #define PROTOCOL_FIXED_UP_MASK (PROTOCOL_FIXED_UP_1 | PROTOCOL_FIXED_UP_2)
618
619 struct protocol_t : objc_object {
620 const char *mangledName;
621 struct protocol_list_t *protocols;
622 method_list_t *instanceMethods;
623 method_list_t *classMethods;
624 method_list_t *optionalInstanceMethods;
625 method_list_t *optionalClassMethods;
626 property_list_t *instanceProperties;
627 uint32_t size; // sizeof(protocol_t)
628 uint32_t flags;
629 // Fields below this point are not always present on disk.
630 const char **_extendedMethodTypes;
631 const char *_demangledName;
632 property_list_t *_classProperties;
633
634 const char *demangledName();
635
636 const char *nameForLogging() {
637 return demangledName();
638 }
639
640 bool isFixedUp() const;
641 void setFixedUp();
642
643 bool isCanonical() const;
644 void clearIsCanonical();
645
646 # define HAS_FIELD(f) (size >= offsetof(protocol_t, f) + sizeof(f))
647
648 bool hasExtendedMethodTypesField() const {
649 return HAS_FIELD(_extendedMethodTypes);
650 }
651 bool hasDemangledNameField() const {
652 return HAS_FIELD(_demangledName);
653 }
654 bool hasClassPropertiesField() const {
655 return HAS_FIELD(_classProperties);
656 }
657
658 # undef HAS_FIELD
659
660 const char **extendedMethodTypes() const {
661 return hasExtendedMethodTypesField() ? _extendedMethodTypes : nil;
662 }
663
664 property_list_t *classProperties() const {
665 return hasClassPropertiesField() ? _classProperties : nil;
666 }
667 };
668
669 struct protocol_list_t {
670 // count is pointer-sized by accident.
671 uintptr_t count;
672 protocol_ref_t list[0]; // variable-size
673
674 size_t byteSize() const {
675 return sizeof(*this) + count*sizeof(list[0]);
676 }
677
678 protocol_list_t *duplicate() const {
679 return (protocol_list_t *)memdup(this, this->byteSize());
680 }
681
682 typedef protocol_ref_t* iterator;
683 typedef const protocol_ref_t* const_iterator;
684
685 const_iterator begin() const {
686 return list;
687 }
688 iterator begin() {
689 return list;
690 }
691 const_iterator end() const {
692 return list + count;
693 }
694 iterator end() {
695 return list + count;
696 }
697 };
698
699 struct class_ro_t {
700 uint32_t flags;
701 uint32_t instanceStart;
702 uint32_t instanceSize;
703 #ifdef __LP64__
704 uint32_t reserved;
705 #endif
706
707 const uint8_t * ivarLayout;
708
709 const char * name;
710 method_list_t * baseMethodList;
711 protocol_list_t * baseProtocols;
712 const ivar_list_t * ivars;
713
714 const uint8_t * weakIvarLayout;
715 property_list_t *baseProperties;
716
717 // This field exists only when RO_HAS_SWIFT_INITIALIZER is set.
718 _objc_swiftMetadataInitializer __ptrauth_objc_method_list_imp _swiftMetadataInitializer_NEVER_USE[0];
719
720 _objc_swiftMetadataInitializer swiftMetadataInitializer() const {
721 if (flags & RO_HAS_SWIFT_INITIALIZER) {
722 return _swiftMetadataInitializer_NEVER_USE[0];
723 } else {
724 return nil;
725 }
726 }
727
728 method_list_t *baseMethods() const {
729 return baseMethodList;
730 }
731
732 class_ro_t *duplicate() const {
733 if (flags & RO_HAS_SWIFT_INITIALIZER) {
734 size_t size = sizeof(*this) + sizeof(_swiftMetadataInitializer_NEVER_USE[0]);
735 class_ro_t *ro = (class_ro_t *)memdup(this, size);
736 ro->_swiftMetadataInitializer_NEVER_USE[0] = this->_swiftMetadataInitializer_NEVER_USE[0];
737 return ro;
738 } else {
739 size_t size = sizeof(*this);
740 class_ro_t *ro = (class_ro_t *)memdup(this, size);
741 return ro;
742 }
743 }
744 };
745
746
747 /***********************************************************************
748 * list_array_tt<Element, List>
749 * Generic implementation for metadata that can be augmented by categories.
750 *
751 * Element is the underlying metadata type (e.g. method_t)
752 * List is the metadata's list type (e.g. method_list_t)
753 *
754 * A list_array_tt has one of three values:
755 * - empty
756 * - a pointer to a single list
757 * - an array of pointers to lists
758 *
759 * countLists/beginLists/endLists iterate the metadata lists
760 * count/begin/end iterate the underlying metadata elements
761 **********************************************************************/
762 template <typename Element, typename List>
763 class list_array_tt {
764 struct array_t {
765 uint32_t count;
766 List* lists[0];
767
768 static size_t byteSize(uint32_t count) {
769 return sizeof(array_t) + count*sizeof(lists[0]);
770 }
771 size_t byteSize() {
772 return byteSize(count);
773 }
774 };
775
776 protected:
777 class iterator {
778 List * const *lists;
779 List * const *listsEnd;
780 typename List::iterator m, mEnd;
781
782 public:
783 iterator(List *const *begin, List *const *end)
784 : lists(begin), listsEnd(end)
785 {
786 if (begin != end) {
787 m = (*begin)->begin();
788 mEnd = (*begin)->end();
789 }
790 }
791
792 const Element& operator * () const {
793 return *m;
794 }
795 Element& operator * () {
796 return *m;
797 }
798
799 bool operator != (const iterator& rhs) const {
800 if (lists != rhs.lists) return true;
801 if (lists == listsEnd) return false; // m is undefined
802 if (m != rhs.m) return true;
803 return false;
804 }
805
806 const iterator& operator ++ () {
807 ASSERT(m != mEnd);
808 m++;
809 if (m == mEnd) {
810 ASSERT(lists != listsEnd);
811 lists++;
812 if (lists != listsEnd) {
813 m = (*lists)->begin();
814 mEnd = (*lists)->end();
815 }
816 }
817 return *this;
818 }
819 };
820
821 private:
822 union {
823 List* list;
824 uintptr_t arrayAndFlag;
825 };
826
827 bool hasArray() const {
828 return arrayAndFlag & 1;
829 }
830
831 array_t *array() const {
832 return (array_t *)(arrayAndFlag & ~1);
833 }
834
835 void setArray(array_t *array) {
836 arrayAndFlag = (uintptr_t)array | 1;
837 }
838
839 public:
840 list_array_tt() : list(nullptr) { }
841 list_array_tt(List *l) : list(l) { }
842
843 uint32_t count() const {
844 uint32_t result = 0;
845 for (auto lists = beginLists(), end = endLists();
846 lists != end;
847 ++lists)
848 {
849 result += (*lists)->count;
850 }
851 return result;
852 }
853
854 iterator begin() const {
855 return iterator(beginLists(), endLists());
856 }
857
858 iterator end() const {
859 List * const *e = endLists();
860 return iterator(e, e);
861 }
862
863
864 uint32_t countLists() {
865 if (hasArray()) {
866 return array()->count;
867 } else if (list) {
868 return 1;
869 } else {
870 return 0;
871 }
872 }
873
874 List* const * beginLists() const {
875 if (hasArray()) {
876 return array()->lists;
877 } else {
878 return &list;
879 }
880 }
881
882 List* const * endLists() const {
883 if (hasArray()) {
884 return array()->lists + array()->count;
885 } else if (list) {
886 return &list + 1;
887 } else {
888 return &list;
889 }
890 }
891
892 void attachLists(List* const * addedLists, uint32_t addedCount) {
893 if (addedCount == 0) return;
894
895 if (hasArray()) {
896 // many lists -> many lists
897 uint32_t oldCount = array()->count;
898 uint32_t newCount = oldCount + addedCount;
899 setArray((array_t *)realloc(array(), array_t::byteSize(newCount)));
900 array()->count = newCount;
901 memmove(array()->lists + addedCount, array()->lists,
902 oldCount * sizeof(array()->lists[0]));
903 memcpy(array()->lists, addedLists,
904 addedCount * sizeof(array()->lists[0]));
905 }
906 else if (!list && addedCount == 1) {
907 // 0 lists -> 1 list
908 list = addedLists[0];
909 }
910 else {
911 // 1 list -> many lists
912 List* oldList = list;
913 uint32_t oldCount = oldList ? 1 : 0;
914 uint32_t newCount = oldCount + addedCount;
915 setArray((array_t *)malloc(array_t::byteSize(newCount)));
916 array()->count = newCount;
917 if (oldList) array()->lists[addedCount] = oldList;
918 memcpy(array()->lists, addedLists,
919 addedCount * sizeof(array()->lists[0]));
920 }
921 }
922
923 void tryFree() {
924 if (hasArray()) {
925 for (uint32_t i = 0; i < array()->count; i++) {
926 try_free(array()->lists[i]);
927 }
928 try_free(array());
929 }
930 else if (list) {
931 try_free(list);
932 }
933 }
934
935 template<typename Result>
936 Result duplicate() {
937 Result result;
938
939 if (hasArray()) {
940 array_t *a = array();
941 result.setArray((array_t *)memdup(a, a->byteSize()));
942 for (uint32_t i = 0; i < a->count; i++) {
943 result.array()->lists[i] = a->lists[i]->duplicate();
944 }
945 } else if (list) {
946 result.list = list->duplicate();
947 } else {
948 result.list = nil;
949 }
950
951 return result;
952 }
953 };
954
955
956 class method_array_t :
957 public list_array_tt<method_t, method_list_t>
958 {
959 typedef list_array_tt<method_t, method_list_t> Super;
960
961 public:
962 method_array_t() : Super() { }
963 method_array_t(method_list_t *l) : Super(l) { }
964
965 method_list_t * const *beginCategoryMethodLists() const {
966 return beginLists();
967 }
968
969 method_list_t * const *endCategoryMethodLists(Class cls) const;
970
971 method_array_t duplicate() {
972 return Super::duplicate<method_array_t>();
973 }
974 };
975
976
977 class property_array_t :
978 public list_array_tt<property_t, property_list_t>
979 {
980 typedef list_array_tt<property_t, property_list_t> Super;
981
982 public:
983 property_array_t() : Super() { }
984 property_array_t(property_list_t *l) : Super(l) { }
985
986 property_array_t duplicate() {
987 return Super::duplicate<property_array_t>();
988 }
989 };
990
991
992 class protocol_array_t :
993 public list_array_tt<protocol_ref_t, protocol_list_t>
994 {
995 typedef list_array_tt<protocol_ref_t, protocol_list_t> Super;
996
997 public:
998 protocol_array_t() : Super() { }
999 protocol_array_t(protocol_list_t *l) : Super(l) { }
1000
1001 protocol_array_t duplicate() {
1002 return Super::duplicate<protocol_array_t>();
1003 }
1004 };
1005
1006 struct class_rw_ext_t {
1007 const class_ro_t *ro;
1008 method_array_t methods;
1009 property_array_t properties;
1010 protocol_array_t protocols;
1011 char *demangledName;
1012 uint32_t version;
1013 };
1014
1015 struct class_rw_t {
1016 // Be warned that Symbolication knows the layout of this structure.
1017 uint32_t flags;
1018 uint16_t witness;
1019 #if SUPPORT_INDEXED_ISA
1020 uint16_t index;
1021 #endif
1022
1023 explicit_atomic<uintptr_t> ro_or_rw_ext;
1024
1025 Class firstSubclass;
1026 Class nextSiblingClass;
1027
1028 private:
1029 using ro_or_rw_ext_t = objc::PointerUnion<const class_ro_t *, class_rw_ext_t *>;
1030
1031 const ro_or_rw_ext_t get_ro_or_rwe() const {
1032 return ro_or_rw_ext_t{ro_or_rw_ext};
1033 }
1034
1035 void set_ro_or_rwe(const class_ro_t *ro) {
1036 ro_or_rw_ext_t{ro}.storeAt(ro_or_rw_ext, memory_order_relaxed);
1037 }
1038
1039 void set_ro_or_rwe(class_rw_ext_t *rwe, const class_ro_t *ro) {
1040 // the release barrier is so that the class_rw_ext_t::ro initialization
1041 // is visible to lockless readers
1042 rwe->ro = ro;
1043 ro_or_rw_ext_t{rwe}.storeAt(ro_or_rw_ext, memory_order_release);
1044 }
1045
1046 class_rw_ext_t *extAlloc(const class_ro_t *ro, bool deep = false);
1047
1048 public:
1049 void setFlags(uint32_t set)
1050 {
1051 __c11_atomic_fetch_or((_Atomic(uint32_t) *)&flags, set, __ATOMIC_RELAXED);
1052 }
1053
1054 void clearFlags(uint32_t clear)
1055 {
1056 __c11_atomic_fetch_and((_Atomic(uint32_t) *)&flags, ~clear, __ATOMIC_RELAXED);
1057 }
1058
1059 // set and clear must not overlap
1060 void changeFlags(uint32_t set, uint32_t clear)
1061 {
1062 ASSERT((set & clear) == 0);
1063
1064 uint32_t oldf, newf;
1065 do {
1066 oldf = flags;
1067 newf = (oldf | set) & ~clear;
1068 } while (!OSAtomicCompareAndSwap32Barrier(oldf, newf, (volatile int32_t *)&flags));
1069 }
1070
1071 class_rw_ext_t *ext() const {
1072 return get_ro_or_rwe().dyn_cast<class_rw_ext_t *>();
1073 }
1074
1075 class_rw_ext_t *extAllocIfNeeded() {
1076 auto v = get_ro_or_rwe();
1077 if (fastpath(v.is<class_rw_ext_t *>())) {
1078 return v.get<class_rw_ext_t *>();
1079 } else {
1080 return extAlloc(v.get<const class_ro_t *>());
1081 }
1082 }
1083
1084 class_rw_ext_t *deepCopy(const class_ro_t *ro) {
1085 return extAlloc(ro, true);
1086 }
1087
1088 const class_ro_t *ro() const {
1089 auto v = get_ro_or_rwe();
1090 if (slowpath(v.is<class_rw_ext_t *>())) {
1091 return v.get<class_rw_ext_t *>()->ro;
1092 }
1093 return v.get<const class_ro_t *>();
1094 }
1095
1096 void set_ro(const class_ro_t *ro) {
1097 auto v = get_ro_or_rwe();
1098 if (v.is<class_rw_ext_t *>()) {
1099 v.get<class_rw_ext_t *>()->ro = ro;
1100 } else {
1101 set_ro_or_rwe(ro);
1102 }
1103 }
1104
1105 const method_array_t methods() const {
1106 auto v = get_ro_or_rwe();
1107 if (v.is<class_rw_ext_t *>()) {
1108 return v.get<class_rw_ext_t *>()->methods;
1109 } else {
1110 return method_array_t{v.get<const class_ro_t *>()->baseMethods()};
1111 }
1112 }
1113
1114 const property_array_t properties() const {
1115 auto v = get_ro_or_rwe();
1116 if (v.is<class_rw_ext_t *>()) {
1117 return v.get<class_rw_ext_t *>()->properties;
1118 } else {
1119 return property_array_t{v.get<const class_ro_t *>()->baseProperties};
1120 }
1121 }
1122
1123 const protocol_array_t protocols() const {
1124 auto v = get_ro_or_rwe();
1125 if (v.is<class_rw_ext_t *>()) {
1126 return v.get<class_rw_ext_t *>()->protocols;
1127 } else {
1128 return protocol_array_t{v.get<const class_ro_t *>()->baseProtocols};
1129 }
1130 }
1131 };
1132
1133
1134 struct class_data_bits_t {
1135 friend objc_class;
1136
1137 // Values are the FAST_ flags above.
1138 uintptr_t bits;
1139 private:
1140 bool getBit(uintptr_t bit) const
1141 {
1142 return bits & bit;
1143 }
1144
1145 // Atomically set the bits in `set` and clear the bits in `clear`.
1146 // set and clear must not overlap.
1147 void setAndClearBits(uintptr_t set, uintptr_t clear)
1148 {
1149 ASSERT((set & clear) == 0);
1150 uintptr_t oldBits;
1151 uintptr_t newBits;
1152 do {
1153 oldBits = LoadExclusive(&bits);
1154 newBits = (oldBits | set) & ~clear;
1155 } while (!StoreReleaseExclusive(&bits, oldBits, newBits));
1156 }
1157
1158 void setBits(uintptr_t set) {
1159 __c11_atomic_fetch_or((_Atomic(uintptr_t) *)&bits, set, __ATOMIC_RELAXED);
1160 }
1161
1162 void clearBits(uintptr_t clear) {
1163 __c11_atomic_fetch_and((_Atomic(uintptr_t) *)&bits, ~clear, __ATOMIC_RELAXED);
1164 }
1165
1166 public:
1167
1168 class_rw_t* data() const {
1169 return (class_rw_t *)(bits & FAST_DATA_MASK);
1170 }
1171 void setData(class_rw_t *newData)
1172 {
1173 ASSERT(!data() || (newData->flags & (RW_REALIZING | RW_FUTURE)));
1174 // Set during realization or construction only. No locking needed.
1175 // Use a store-release fence because there may be concurrent
1176 // readers of data and data's contents.
1177 uintptr_t newBits = (bits & ~FAST_DATA_MASK) | (uintptr_t)newData;
1178 atomic_thread_fence(memory_order_release);
1179 bits = newBits;
1180 }
1181
1182 // Get the class's ro data, even in the presence of concurrent realization.
1183 // fixme this isn't really safe without a compiler barrier at least
1184 // and probably a memory barrier when realizeClass changes the data field
1185 const class_ro_t *safe_ro() {
1186 class_rw_t *maybe_rw = data();
1187 if (maybe_rw->flags & RW_REALIZED) {
1188 // maybe_rw is rw
1189 return maybe_rw->ro();
1190 } else {
1191 // maybe_rw is actually ro
1192 return (class_ro_t *)maybe_rw;
1193 }
1194 }
1195
1196 void setClassArrayIndex(unsigned Idx) {
1197 #if SUPPORT_INDEXED_ISA
1198 // 0 is unused as then we can rely on zero-initialisation from calloc.
1199 ASSERT(Idx > 0);
1200 data()->index = Idx;
1201 #endif
1202 }
1203
1204 unsigned classArrayIndex() {
1205 #if SUPPORT_INDEXED_ISA
1206 return data()->index;
1207 #else
1208 return 0;
1209 #endif
1210 }
1211
1212 bool isAnySwift() {
1213 return isSwiftStable() || isSwiftLegacy();
1214 }
1215
1216 bool isSwiftStable() {
1217 return getBit(FAST_IS_SWIFT_STABLE);
1218 }
1219 void setIsSwiftStable() {
1220 setAndClearBits(FAST_IS_SWIFT_STABLE, FAST_IS_SWIFT_LEGACY);
1221 }
1222
1223 bool isSwiftLegacy() {
1224 return getBit(FAST_IS_SWIFT_LEGACY);
1225 }
1226 void setIsSwiftLegacy() {
1227 setAndClearBits(FAST_IS_SWIFT_LEGACY, FAST_IS_SWIFT_STABLE);
1228 }
1229
1230 // fixme remove this once the Swift runtime uses the stable bits
1231 bool isSwiftStable_ButAllowLegacyForNow() {
1232 return isAnySwift();
1233 }
1234
1235 _objc_swiftMetadataInitializer swiftMetadataInitializer() {
1236 // This function is called on un-realized classes without
1237 // holding any locks.
1238 // Beware of races with other realizers.
1239 return safe_ro()->swiftMetadataInitializer();
1240 }
1241 };
1242
1243
1244 struct objc_class : objc_object {
1245 // Class ISA;
1246 Class superclass;
1247 cache_t cache; // formerly cache pointer and vtable
1248 class_data_bits_t bits; // class_rw_t * plus custom rr/alloc flags
1249
1250 class_rw_t *data() const {
1251 return bits.data();
1252 }
1253 void setData(class_rw_t *newData) {
1254 bits.setData(newData);
1255 }
1256
1257 void setInfo(uint32_t set) {
1258 ASSERT(isFuture() || isRealized());
1259 data()->setFlags(set);
1260 }
1261
1262 void clearInfo(uint32_t clear) {
1263 ASSERT(isFuture() || isRealized());
1264 data()->clearFlags(clear);
1265 }
1266
1267 // set and clear must not overlap
1268 void changeInfo(uint32_t set, uint32_t clear) {
1269 ASSERT(isFuture() || isRealized());
1270 ASSERT((set & clear) == 0);
1271 data()->changeFlags(set, clear);
1272 }
1273
1274 #if FAST_HAS_DEFAULT_RR
1275 bool hasCustomRR() const {
1276 return !bits.getBit(FAST_HAS_DEFAULT_RR);
1277 }
1278 void setHasDefaultRR() {
1279 bits.setBits(FAST_HAS_DEFAULT_RR);
1280 }
1281 void setHasCustomRR() {
1282 bits.clearBits(FAST_HAS_DEFAULT_RR);
1283 }
1284 #else
1285 bool hasCustomRR() const {
1286 return !(bits.data()->flags & RW_HAS_DEFAULT_RR);
1287 }
1288 void setHasDefaultRR() {
1289 bits.data()->setFlags(RW_HAS_DEFAULT_RR);
1290 }
1291 void setHasCustomRR() {
1292 bits.data()->clearFlags(RW_HAS_DEFAULT_RR);
1293 }
1294 #endif
1295
1296 #if FAST_CACHE_HAS_DEFAULT_AWZ
1297 bool hasCustomAWZ() const {
1298 return !cache.getBit(FAST_CACHE_HAS_DEFAULT_AWZ);
1299 }
1300 void setHasDefaultAWZ() {
1301 cache.setBit(FAST_CACHE_HAS_DEFAULT_AWZ);
1302 }
1303 void setHasCustomAWZ() {
1304 cache.clearBit(FAST_CACHE_HAS_DEFAULT_AWZ);
1305 }
1306 #else
1307 bool hasCustomAWZ() const {
1308 return !(bits.data()->flags & RW_HAS_DEFAULT_AWZ);
1309 }
1310 void setHasDefaultAWZ() {
1311 bits.data()->setFlags(RW_HAS_DEFAULT_AWZ);
1312 }
1313 void setHasCustomAWZ() {
1314 bits.data()->clearFlags(RW_HAS_DEFAULT_AWZ);
1315 }
1316 #endif
1317
1318 #if FAST_CACHE_HAS_DEFAULT_CORE
1319 bool hasCustomCore() const {
1320 return !cache.getBit(FAST_CACHE_HAS_DEFAULT_CORE);
1321 }
1322 void setHasDefaultCore() {
1323 return cache.setBit(FAST_CACHE_HAS_DEFAULT_CORE);
1324 }
1325 void setHasCustomCore() {
1326 return cache.clearBit(FAST_CACHE_HAS_DEFAULT_CORE);
1327 }
1328 #else
1329 bool hasCustomCore() const {
1330 return !(bits.data()->flags & RW_HAS_DEFAULT_CORE);
1331 }
1332 void setHasDefaultCore() {
1333 bits.data()->setFlags(RW_HAS_DEFAULT_CORE);
1334 }
1335 void setHasCustomCore() {
1336 bits.data()->clearFlags(RW_HAS_DEFAULT_CORE);
1337 }
1338 #endif
1339
1340 #if FAST_CACHE_HAS_CXX_CTOR
1341 bool hasCxxCtor() {
1342 ASSERT(isRealized());
1343 return cache.getBit(FAST_CACHE_HAS_CXX_CTOR);
1344 }
1345 void setHasCxxCtor() {
1346 cache.setBit(FAST_CACHE_HAS_CXX_CTOR);
1347 }
1348 #else
1349 bool hasCxxCtor() {
1350 ASSERT(isRealized());
1351 return bits.data()->flags & RW_HAS_CXX_CTOR;
1352 }
1353 void setHasCxxCtor() {
1354 bits.data()->setFlags(RW_HAS_CXX_CTOR);
1355 }
1356 #endif
1357
1358 #if FAST_CACHE_HAS_CXX_DTOR
1359 bool hasCxxDtor() {
1360 ASSERT(isRealized());
1361 return cache.getBit(FAST_CACHE_HAS_CXX_DTOR);
1362 }
1363 void setHasCxxDtor() {
1364 cache.setBit(FAST_CACHE_HAS_CXX_DTOR);
1365 }
1366 #else
1367 bool hasCxxDtor() {
1368 ASSERT(isRealized());
1369 return bits.data()->flags & RW_HAS_CXX_DTOR;
1370 }
1371 void setHasCxxDtor() {
1372 bits.data()->setFlags(RW_HAS_CXX_DTOR);
1373 }
1374 #endif
1375
1376 #if FAST_CACHE_REQUIRES_RAW_ISA
1377 bool instancesRequireRawIsa() {
1378 return cache.getBit(FAST_CACHE_REQUIRES_RAW_ISA);
1379 }
1380 void setInstancesRequireRawIsa() {
1381 cache.setBit(FAST_CACHE_REQUIRES_RAW_ISA);
1382 }
1383 #elif SUPPORT_NONPOINTER_ISA
1384 bool instancesRequireRawIsa() {
1385 return bits.data()->flags & RW_REQUIRES_RAW_ISA;
1386 }
1387 void setInstancesRequireRawIsa() {
1388 bits.data()->setFlags(RW_REQUIRES_RAW_ISA);
1389 }
1390 #else
1391 bool instancesRequireRawIsa() {
1392 return true;
1393 }
1394 void setInstancesRequireRawIsa() {
1395 // nothing
1396 }
1397 #endif
1398 void setInstancesRequireRawIsaRecursively(bool inherited = false);
1399 void printInstancesRequireRawIsa(bool inherited);
1400
1401 bool canAllocNonpointer() {
1402 ASSERT(!isFuture());
1403 return !instancesRequireRawIsa();
1404 }
1405
1406 bool isSwiftStable() {
1407 return bits.isSwiftStable();
1408 }
1409
1410 bool isSwiftLegacy() {
1411 return bits.isSwiftLegacy();
1412 }
1413
1414 bool isAnySwift() {
1415 return bits.isAnySwift();
1416 }
1417
1418 bool isSwiftStable_ButAllowLegacyForNow() {
1419 return bits.isSwiftStable_ButAllowLegacyForNow();
1420 }
1421
1422 bool isStubClass() const {
1423 uintptr_t isa = (uintptr_t)isaBits();
1424 return 1 <= isa && isa < 16;
1425 }
1426
1427 // Swift stable ABI built for old deployment targets looks weird.
1428 // The is-legacy bit is set for compatibility with old libobjc.
1429 // We are on a "new" deployment target so we need to rewrite that bit.
1430 // These stable-with-legacy-bit classes are distinguished from real
1431 // legacy classes using another bit in the Swift data
1432 // (ClassFlags::IsSwiftPreStableABI)
1433
1434 bool isUnfixedBackwardDeployingStableSwift() {
1435 // Only classes marked as Swift legacy need apply.
1436 if (!bits.isSwiftLegacy()) return false;
1437
1438 // Check the true legacy vs stable distinguisher.
1439 // The low bit of Swift's ClassFlags is SET for true legacy
1440 // and UNSET for stable pretending to be legacy.
1441 uint32_t swiftClassFlags = *(uint32_t *)(&bits + 1);
1442 bool isActuallySwiftLegacy = bool(swiftClassFlags & 1);
1443 return !isActuallySwiftLegacy;
1444 }
1445
1446 void fixupBackwardDeployingStableSwift() {
1447 if (isUnfixedBackwardDeployingStableSwift()) {
1448 // Class really is stable Swift, pretending to be pre-stable.
1449 // Fix its lie.
1450 bits.setIsSwiftStable();
1451 }
1452 }
1453
1454 _objc_swiftMetadataInitializer swiftMetadataInitializer() {
1455 return bits.swiftMetadataInitializer();
1456 }
1457
1458 // Return YES if the class's ivars are managed by ARC,
1459 // or the class is MRC but has ARC-style weak ivars.
1460 bool hasAutomaticIvars() {
1461 return data()->ro()->flags & (RO_IS_ARC | RO_HAS_WEAK_WITHOUT_ARC);
1462 }
1463
1464 // Return YES if the class's ivars are managed by ARC.
1465 bool isARC() {
1466 return data()->ro()->flags & RO_IS_ARC;
1467 }
1468
1469
1470 bool forbidsAssociatedObjects() {
1471 return (data()->flags & RW_FORBIDS_ASSOCIATED_OBJECTS);
1472 }
1473
1474 #if SUPPORT_NONPOINTER_ISA
1475 // Tracked in non-pointer isas; not tracked otherwise
1476 #else
1477 bool instancesHaveAssociatedObjects() {
1478 // this may be an unrealized future class in the CF-bridged case
1479 ASSERT(isFuture() || isRealized());
1480 return data()->flags & RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS;
1481 }
1482
1483 void setInstancesHaveAssociatedObjects() {
1484 // this may be an unrealized future class in the CF-bridged case
1485 ASSERT(isFuture() || isRealized());
1486 setInfo(RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS);
1487 }
1488 #endif
1489
1490 bool shouldGrowCache() {
1491 return true;
1492 }
1493
1494 void setShouldGrowCache(bool) {
1495 // fixme good or bad for memory use?
1496 }
1497
1498 bool isInitializing() {
1499 return getMeta()->data()->flags & RW_INITIALIZING;
1500 }
1501
1502 void setInitializing() {
1503 ASSERT(!isMetaClass());
1504 ISA()->setInfo(RW_INITIALIZING);
1505 }
1506
1507 bool isInitialized() {
1508 return getMeta()->data()->flags & RW_INITIALIZED;
1509 }
1510
1511 void setInitialized();
1512
1513 bool isLoadable() {
1514 ASSERT(isRealized());
1515 return true; // any class registered for +load is definitely loadable
1516 }
1517
1518 IMP getLoadMethod();
1519
1520 // Locking: To prevent concurrent realization, hold runtimeLock.
1521 bool isRealized() const {
1522 return !isStubClass() && (data()->flags & RW_REALIZED);
1523 }
1524
1525 // Returns true if this is an unrealized future class.
1526 // Locking: To prevent concurrent realization, hold runtimeLock.
1527 bool isFuture() const {
1528 return data()->flags & RW_FUTURE;
1529 }
1530
1531 bool isMetaClass() {
1532 ASSERT(this);
1533 ASSERT(isRealized());
1534 #if FAST_CACHE_META
1535 return cache.getBit(FAST_CACHE_META);
1536 #else
1537 return data()->flags & RW_META;
1538 #endif
1539 }
1540
1541 // Like isMetaClass, but also valid on un-realized classes
1542 bool isMetaClassMaybeUnrealized() {
1543 static_assert(offsetof(class_rw_t, flags) == offsetof(class_ro_t, flags), "flags alias");
1544 static_assert(RO_META == RW_META, "flags alias");
1545 return data()->flags & RW_META;
1546 }
1547
1548 // NOT identical to this->ISA when this is a metaclass
1549 Class getMeta() {
1550 if (isMetaClass()) return (Class)this;
1551 else return this->ISA();
1552 }
1553
1554 bool isRootClass() {
1555 return superclass == nil;
1556 }
1557 bool isRootMetaclass() {
1558 return ISA() == (Class)this;
1559 }
1560
1561 const char *mangledName() {
1562 // fixme can't assert locks here
1563 ASSERT(this);
1564
1565 if (isRealized() || isFuture()) {
1566 return data()->ro()->name;
1567 } else {
1568 return ((const class_ro_t *)data())->name;
1569 }
1570 }
1571
1572 const char *demangledName(bool needsLock);
1573 const char *nameForLogging();
1574
1575 // May be unaligned depending on class's ivars.
1576 uint32_t unalignedInstanceStart() const {
1577 ASSERT(isRealized());
1578 return data()->ro()->instanceStart;
1579 }
1580
1581 // Class's instance start rounded up to a pointer-size boundary.
1582 // This is used for ARC layout bitmaps.
1583 uint32_t alignedInstanceStart() const {
1584 return word_align(unalignedInstanceStart());
1585 }
1586
1587 // May be unaligned depending on class's ivars.
1588 uint32_t unalignedInstanceSize() const {
1589 ASSERT(isRealized());
1590 return data()->ro()->instanceSize;
1591 }
1592
1593 // Class's ivar size rounded up to a pointer-size boundary.
1594 uint32_t alignedInstanceSize() const {
1595 return word_align(unalignedInstanceSize());
1596 }
1597
1598 size_t instanceSize(size_t extraBytes) const {
1599 if (fastpath(cache.hasFastInstanceSize(extraBytes))) {
1600 return cache.fastInstanceSize(extraBytes);
1601 }
1602
1603 size_t size = alignedInstanceSize() + extraBytes;
1604 // CF requires all objects be at least 16 bytes.
1605 if (size < 16) size = 16;
1606 return size;
1607 }
1608
1609 void setInstanceSize(uint32_t newSize) {
1610 ASSERT(isRealized());
1611 ASSERT(data()->flags & RW_REALIZING);
1612 auto ro = data()->ro();
1613 if (newSize != ro->instanceSize) {
1614 ASSERT(data()->flags & RW_COPIED_RO);
1615 *const_cast<uint32_t *>(&ro->instanceSize) = newSize;
1616 }
1617 cache.setFastInstanceSize(newSize);
1618 }
1619
1620 void chooseClassArrayIndex();
1621
1622 void setClassArrayIndex(unsigned Idx) {
1623 bits.setClassArrayIndex(Idx);
1624 }
1625
1626 unsigned classArrayIndex() {
1627 return bits.classArrayIndex();
1628 }
1629 };
1630
1631
1632 struct swift_class_t : objc_class {
1633 uint32_t flags;
1634 uint32_t instanceAddressOffset;
1635 uint32_t instanceSize;
1636 uint16_t instanceAlignMask;
1637 uint16_t reserved;
1638
1639 uint32_t classSize;
1640 uint32_t classAddressOffset;
1641 void *description;
1642 // ...
1643
1644 void *baseAddress() {
1645 return (void *)((uint8_t *)this - classAddressOffset);
1646 }
1647 };
1648
1649
1650 struct category_t {
1651 const char *name;
1652 classref_t cls;
1653 struct method_list_t *instanceMethods;
1654 struct method_list_t *classMethods;
1655 struct protocol_list_t *protocols;
1656 struct property_list_t *instanceProperties;
1657 // Fields below this point are not always present on disk.
1658 struct property_list_t *_classProperties;
1659
1660 method_list_t *methodsForMeta(bool isMeta) {
1661 if (isMeta) return classMethods;
1662 else return instanceMethods;
1663 }
1664
1665 property_list_t *propertiesForMeta(bool isMeta, struct header_info *hi);
1666
1667 protocol_list_t *protocolsForMeta(bool isMeta) {
1668 if (isMeta) return nullptr;
1669 else return protocols;
1670 }
1671 };
1672
1673 struct objc_super2 {
1674 id receiver;
1675 Class current_class;
1676 };
1677
1678 struct message_ref_t {
1679 IMP imp;
1680 SEL sel;
1681 };
1682
1683
1684 extern Method protocol_getMethod(protocol_t *p, SEL sel, bool isRequiredMethod, bool isInstanceMethod, bool recursive);
1685
1686 #endif