]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-runtime-new.h
objc4-779.1.tar.gz
[apple/objc4.git] / runtime / objc-runtime-new.h
1 /*
2 * Copyright (c) 2005-2007 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #ifndef _OBJC_RUNTIME_NEW_H
25 #define _OBJC_RUNTIME_NEW_H
26
27 // class_data_bits_t is the class_t->data field (class_rw_t pointer plus flags)
28 // The extra bits are optimized for the retain/release and alloc/dealloc paths.
29
30 // Values for class_ro_t->flags
31 // These are emitted by the compiler and are part of the ABI.
32 // Note: See CGObjCNonFragileABIMac::BuildClassRoTInitializer in clang
33 // class is a metaclass
34 #define RO_META (1<<0)
35 // class is a root class
36 #define RO_ROOT (1<<1)
37 // class has .cxx_construct/destruct implementations
38 #define RO_HAS_CXX_STRUCTORS (1<<2)
39 // class has +load implementation
40 // #define RO_HAS_LOAD_METHOD (1<<3)
41 // class has visibility=hidden set
42 #define RO_HIDDEN (1<<4)
43 // class has attribute(objc_exception): OBJC_EHTYPE_$_ThisClass is non-weak
44 #define RO_EXCEPTION (1<<5)
45 // class has ro field for Swift metadata initializer callback
46 #define RO_HAS_SWIFT_INITIALIZER (1<<6)
47 // class compiled with ARC
48 #define RO_IS_ARC (1<<7)
49 // class has .cxx_destruct but no .cxx_construct (with RO_HAS_CXX_STRUCTORS)
50 #define RO_HAS_CXX_DTOR_ONLY (1<<8)
51 // class is not ARC but has ARC-style weak ivar layout
52 #define RO_HAS_WEAK_WITHOUT_ARC (1<<9)
53 // class does not allow associated objects on instances
54 #define RO_FORBIDS_ASSOCIATED_OBJECTS (1<<10)
55
56 // class is in an unloadable bundle - must never be set by compiler
57 #define RO_FROM_BUNDLE (1<<29)
58 // class is unrealized future class - must never be set by compiler
59 #define RO_FUTURE (1<<30)
60 // class is realized - must never be set by compiler
61 #define RO_REALIZED (1<<31)
62
63 // Values for class_rw_t->flags
64 // These are not emitted by the compiler and are never used in class_ro_t.
65 // Their presence should be considered in future ABI versions.
66 // class_t->data is class_rw_t, not class_ro_t
67 #define RW_REALIZED (1<<31)
68 // class is unresolved future class
69 #define RW_FUTURE (1<<30)
70 // class is initialized
71 #define RW_INITIALIZED (1<<29)
72 // class is initializing
73 #define RW_INITIALIZING (1<<28)
74 // class_rw_t->ro is heap copy of class_ro_t
75 #define RW_COPIED_RO (1<<27)
76 // class allocated but not yet registered
77 #define RW_CONSTRUCTING (1<<26)
78 // class allocated and registered
79 #define RW_CONSTRUCTED (1<<25)
80 // available for use; was RW_FINALIZE_ON_MAIN_THREAD
81 // #define RW_24 (1<<24)
82 // class +load has been called
83 #define RW_LOADED (1<<23)
84 #if !SUPPORT_NONPOINTER_ISA
85 // class instances may have associative references
86 #define RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS (1<<22)
87 #endif
88 // class has instance-specific GC layout
89 #define RW_HAS_INSTANCE_SPECIFIC_LAYOUT (1 << 21)
90 // class does not allow associated objects on its instances
91 #define RW_FORBIDS_ASSOCIATED_OBJECTS (1<<20)
92 // class has started realizing but not yet completed it
93 #define RW_REALIZING (1<<19)
94
95 // NOTE: MORE RW_ FLAGS DEFINED BELOW
96
97
98 // Values for class_rw_t->flags (RW_*), cache_t->_flags (FAST_CACHE_*),
99 // or class_t->bits (FAST_*).
100 //
101 // FAST_* and FAST_CACHE_* are stored on the class, reducing pointer indirection.
102
103 #if __LP64__
104
105 // class is a Swift class from the pre-stable Swift ABI
106 #define FAST_IS_SWIFT_LEGACY (1UL<<0)
107 // class is a Swift class from the stable Swift ABI
108 #define FAST_IS_SWIFT_STABLE (1UL<<1)
109 // class or superclass has default retain/release/autorelease/retainCount/
110 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
111 #define FAST_HAS_DEFAULT_RR (1UL<<2)
112 // data pointer
113 #define FAST_DATA_MASK 0x00007ffffffffff8UL
114
115 #if __arm64__
116 // class or superclass has .cxx_construct/.cxx_destruct implementation
117 // FAST_CACHE_HAS_CXX_DTOR is the first bit so that setting it in
118 // isa_t::has_cxx_dtor is a single bfi
119 #define FAST_CACHE_HAS_CXX_DTOR (1<<0)
120 #define FAST_CACHE_HAS_CXX_CTOR (1<<1)
121 // Denormalized RO_META to avoid an indirection
122 #define FAST_CACHE_META (1<<2)
123 #else
124 // Denormalized RO_META to avoid an indirection
125 #define FAST_CACHE_META (1<<0)
126 // class or superclass has .cxx_construct/.cxx_destruct implementation
127 // FAST_CACHE_HAS_CXX_DTOR is chosen to alias with isa_t::has_cxx_dtor
128 #define FAST_CACHE_HAS_CXX_CTOR (1<<1)
129 #define FAST_CACHE_HAS_CXX_DTOR (1<<2)
130 #endif
131
132 // Fast Alloc fields:
133 // This stores the word-aligned size of instances + "ALLOC_DELTA16",
134 // or 0 if the instance size doesn't fit.
135 //
136 // These bits occupy the same bits than in the instance size, so that
137 // the size can be extracted with a simple mask operation.
138 //
139 // FAST_CACHE_ALLOC_MASK16 allows to extract the instance size rounded
140 // rounded up to the next 16 byte boundary, which is a fastpath for
141 // _objc_rootAllocWithZone()
142 #define FAST_CACHE_ALLOC_MASK 0x1ff8
143 #define FAST_CACHE_ALLOC_MASK16 0x1ff0
144 #define FAST_CACHE_ALLOC_DELTA16 0x0008
145
146 // class's instances requires raw isa
147 #define FAST_CACHE_REQUIRES_RAW_ISA (1<<13)
148 // class or superclass has default alloc/allocWithZone: implementation
149 // Note this is is stored in the metaclass.
150 #define FAST_CACHE_HAS_DEFAULT_AWZ (1<<14)
151 // class or superclass has default new/self/class/respondsToSelector/isKindOfClass
152 #define FAST_CACHE_HAS_DEFAULT_CORE (1<<15)
153
154 #else
155
156 // class or superclass has .cxx_construct implementation
157 #define RW_HAS_CXX_CTOR (1<<18)
158 // class or superclass has .cxx_destruct implementation
159 #define RW_HAS_CXX_DTOR (1<<17)
160 // class or superclass has default alloc/allocWithZone: implementation
161 // Note this is is stored in the metaclass.
162 #define RW_HAS_DEFAULT_AWZ (1<<16)
163 // class's instances requires raw isa
164 #if SUPPORT_NONPOINTER_ISA
165 #define RW_REQUIRES_RAW_ISA (1<<15)
166 #endif
167 // class or superclass has default retain/release/autorelease/retainCount/
168 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
169 #define RW_HAS_DEFAULT_RR (1<<14)
170 // class or superclass has default new/self/class/respondsToSelector/isKindOfClass
171 #define RW_HAS_DEFAULT_CORE (1<<13)
172
173 // class is a Swift class from the pre-stable Swift ABI
174 #define FAST_IS_SWIFT_LEGACY (1UL<<0)
175 // class is a Swift class from the stable Swift ABI
176 #define FAST_IS_SWIFT_STABLE (1UL<<1)
177 // data pointer
178 #define FAST_DATA_MASK 0xfffffffcUL
179
180 #endif // __LP64__
181
182 // The Swift ABI requires that these bits be defined like this on all platforms.
183 static_assert(FAST_IS_SWIFT_LEGACY == 1, "resistance is futile");
184 static_assert(FAST_IS_SWIFT_STABLE == 2, "resistance is futile");
185
186
187 #if __LP64__
188 typedef uint32_t mask_t; // x86_64 & arm64 asm are less efficient with 16-bits
189 #else
190 typedef uint16_t mask_t;
191 #endif
192 typedef uintptr_t SEL;
193
194 struct swift_class_t;
195
196 enum Atomicity { Atomic = true, NotAtomic = false };
197 enum IMPEncoding { Encoded = true, Raw = false };
198
199 struct bucket_t {
200 private:
201 // IMP-first is better for arm64e ptrauth and no worse for arm64.
202 // SEL-first is better for armv7* and i386 and x86_64.
203 #if __arm64__
204 explicit_atomic<uintptr_t> _imp;
205 explicit_atomic<SEL> _sel;
206 #else
207 explicit_atomic<SEL> _sel;
208 explicit_atomic<uintptr_t> _imp;
209 #endif
210
211 // Compute the ptrauth signing modifier from &_imp, newSel, and cls.
212 uintptr_t modifierForSEL(SEL newSel, Class cls) const {
213 return (uintptr_t)&_imp ^ (uintptr_t)newSel ^ (uintptr_t)cls;
214 }
215
216 // Sign newImp, with &_imp, newSel, and cls as modifiers.
217 uintptr_t encodeImp(IMP newImp, SEL newSel, Class cls) const {
218 if (!newImp) return 0;
219 #if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_PTRAUTH
220 return (uintptr_t)
221 ptrauth_auth_and_resign(newImp,
222 ptrauth_key_function_pointer, 0,
223 ptrauth_key_process_dependent_code,
224 modifierForSEL(newSel, cls));
225 #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR
226 return (uintptr_t)newImp ^ (uintptr_t)cls;
227 #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_NONE
228 return (uintptr_t)newImp;
229 #else
230 #error Unknown method cache IMP encoding.
231 #endif
232 }
233
234 public:
235 inline SEL sel() const { return _sel.load(memory_order::memory_order_relaxed); }
236
237 inline IMP imp(Class cls) const {
238 uintptr_t imp = _imp.load(memory_order::memory_order_relaxed);
239 if (!imp) return nil;
240 #if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_PTRAUTH
241 SEL sel = _sel.load(memory_order::memory_order_relaxed);
242 return (IMP)
243 ptrauth_auth_and_resign((const void *)imp,
244 ptrauth_key_process_dependent_code,
245 modifierForSEL(sel, cls),
246 ptrauth_key_function_pointer, 0);
247 #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR
248 return (IMP)(imp ^ (uintptr_t)cls);
249 #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_NONE
250 return (IMP)imp;
251 #else
252 #error Unknown method cache IMP encoding.
253 #endif
254 }
255
256 template <Atomicity, IMPEncoding>
257 void set(SEL newSel, IMP newImp, Class cls);
258 };
259
260
261 struct cache_t {
262 #if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_OUTLINED
263 explicit_atomic<struct bucket_t *> _buckets;
264 explicit_atomic<mask_t> _mask;
265 #elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16
266 explicit_atomic<uintptr_t> _maskAndBuckets;
267 mask_t _mask_unused;
268
269 // How much the mask is shifted by.
270 static constexpr uintptr_t maskShift = 48;
271
272 // Additional bits after the mask which must be zero. msgSend
273 // takes advantage of these additional bits to construct the value
274 // `mask << 4` from `_maskAndBuckets` in a single instruction.
275 static constexpr uintptr_t maskZeroBits = 4;
276
277 // The largest mask value we can store.
278 static constexpr uintptr_t maxMask = ((uintptr_t)1 << (64 - maskShift)) - 1;
279
280 // The mask applied to `_maskAndBuckets` to retrieve the buckets pointer.
281 static constexpr uintptr_t bucketsMask = ((uintptr_t)1 << (maskShift - maskZeroBits)) - 1;
282
283 // Ensure we have enough bits for the buckets pointer.
284 static_assert(bucketsMask >= MACH_VM_MAX_ADDRESS, "Bucket field doesn't have enough bits for arbitrary pointers.");
285 #elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4
286 // _maskAndBuckets stores the mask shift in the low 4 bits, and
287 // the buckets pointer in the remainder of the value. The mask
288 // shift is the value where (0xffff >> shift) produces the correct
289 // mask. This is equal to 16 - log2(cache_size).
290 explicit_atomic<uintptr_t> _maskAndBuckets;
291 mask_t _mask_unused;
292
293 static constexpr uintptr_t maskBits = 4;
294 static constexpr uintptr_t maskMask = (1 << maskBits) - 1;
295 static constexpr uintptr_t bucketsMask = ~maskMask;
296 #else
297 #error Unknown cache mask storage type.
298 #endif
299
300 #if __LP64__
301 uint16_t _flags;
302 #endif
303 uint16_t _occupied;
304
305 public:
306 static bucket_t *emptyBuckets();
307
308 struct bucket_t *buckets();
309 mask_t mask();
310 mask_t occupied();
311 void incrementOccupied();
312 void setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask);
313 void initializeToEmpty();
314
315 unsigned capacity();
316 bool isConstantEmptyCache();
317 bool canBeFreed();
318
319 #if __LP64__
320 bool getBit(uint16_t flags) const {
321 return _flags & flags;
322 }
323 void setBit(uint16_t set) {
324 __c11_atomic_fetch_or((_Atomic(uint16_t) *)&_flags, set, __ATOMIC_RELAXED);
325 }
326 void clearBit(uint16_t clear) {
327 __c11_atomic_fetch_and((_Atomic(uint16_t) *)&_flags, ~clear, __ATOMIC_RELAXED);
328 }
329 #endif
330
331 #if FAST_CACHE_ALLOC_MASK
332 bool hasFastInstanceSize(size_t extra) const
333 {
334 if (__builtin_constant_p(extra) && extra == 0) {
335 return _flags & FAST_CACHE_ALLOC_MASK16;
336 }
337 return _flags & FAST_CACHE_ALLOC_MASK;
338 }
339
340 size_t fastInstanceSize(size_t extra) const
341 {
342 ASSERT(hasFastInstanceSize(extra));
343
344 if (__builtin_constant_p(extra) && extra == 0) {
345 return _flags & FAST_CACHE_ALLOC_MASK16;
346 } else {
347 size_t size = _flags & FAST_CACHE_ALLOC_MASK;
348 // remove the FAST_CACHE_ALLOC_DELTA16 that was added
349 // by setFastInstanceSize
350 return align16(size + extra - FAST_CACHE_ALLOC_DELTA16);
351 }
352 }
353
354 void setFastInstanceSize(size_t newSize)
355 {
356 // Set during realization or construction only. No locking needed.
357 uint16_t newBits = _flags & ~FAST_CACHE_ALLOC_MASK;
358 uint16_t sizeBits;
359
360 // Adding FAST_CACHE_ALLOC_DELTA16 allows for FAST_CACHE_ALLOC_MASK16
361 // to yield the proper 16byte aligned allocation size with a single mask
362 sizeBits = word_align(newSize) + FAST_CACHE_ALLOC_DELTA16;
363 sizeBits &= FAST_CACHE_ALLOC_MASK;
364 if (newSize <= sizeBits) {
365 newBits |= sizeBits;
366 }
367 _flags = newBits;
368 }
369 #else
370 bool hasFastInstanceSize(size_t extra) const {
371 return false;
372 }
373 size_t fastInstanceSize(size_t extra) const {
374 abort();
375 }
376 void setFastInstanceSize(size_t extra) {
377 // nothing
378 }
379 #endif
380
381 static size_t bytesForCapacity(uint32_t cap);
382 static struct bucket_t * endMarker(struct bucket_t *b, uint32_t cap);
383
384 void reallocate(mask_t oldCapacity, mask_t newCapacity, bool freeOld);
385 void insert(Class cls, SEL sel, IMP imp, id receiver);
386
387 static void bad_cache(id receiver, SEL sel, Class isa) __attribute__((noreturn, cold));
388 };
389
390
391 // classref_t is unremapped class_t*
392 typedef struct classref * classref_t;
393
394
395 #ifdef __PTRAUTH_INTRINSICS__
396 # define StubClassInitializerPtrauth __ptrauth(ptrauth_key_function_pointer, 1, 0xc671)
397 #else
398 # define StubClassInitializerPtrauth
399 #endif
400 struct stub_class_t {
401 uintptr_t isa;
402 _objc_swiftMetadataInitializer StubClassInitializerPtrauth initializer;
403 };
404
405 /***********************************************************************
406 * entsize_list_tt<Element, List, FlagMask>
407 * Generic implementation of an array of non-fragile structs.
408 *
409 * Element is the struct type (e.g. method_t)
410 * List is the specialization of entsize_list_tt (e.g. method_list_t)
411 * FlagMask is used to stash extra bits in the entsize field
412 * (e.g. method list fixup markers)
413 **********************************************************************/
414 template <typename Element, typename List, uint32_t FlagMask>
415 struct entsize_list_tt {
416 uint32_t entsizeAndFlags;
417 uint32_t count;
418 Element first;
419
420 uint32_t entsize() const {
421 return entsizeAndFlags & ~FlagMask;
422 }
423 uint32_t flags() const {
424 return entsizeAndFlags & FlagMask;
425 }
426
427 Element& getOrEnd(uint32_t i) const {
428 ASSERT(i <= count);
429 return *(Element *)((uint8_t *)&first + i*entsize());
430 }
431 Element& get(uint32_t i) const {
432 ASSERT(i < count);
433 return getOrEnd(i);
434 }
435
436 size_t byteSize() const {
437 return byteSize(entsize(), count);
438 }
439
440 static size_t byteSize(uint32_t entsize, uint32_t count) {
441 return sizeof(entsize_list_tt) + (count-1)*entsize;
442 }
443
444 List *duplicate() const {
445 auto *dup = (List *)calloc(this->byteSize(), 1);
446 dup->entsizeAndFlags = this->entsizeAndFlags;
447 dup->count = this->count;
448 std::copy(begin(), end(), dup->begin());
449 return dup;
450 }
451
452 struct iterator;
453 const iterator begin() const {
454 return iterator(*static_cast<const List*>(this), 0);
455 }
456 iterator begin() {
457 return iterator(*static_cast<const List*>(this), 0);
458 }
459 const iterator end() const {
460 return iterator(*static_cast<const List*>(this), count);
461 }
462 iterator end() {
463 return iterator(*static_cast<const List*>(this), count);
464 }
465
466 struct iterator {
467 uint32_t entsize;
468 uint32_t index; // keeping track of this saves a divide in operator-
469 Element* element;
470
471 typedef std::random_access_iterator_tag iterator_category;
472 typedef Element value_type;
473 typedef ptrdiff_t difference_type;
474 typedef Element* pointer;
475 typedef Element& reference;
476
477 iterator() { }
478
479 iterator(const List& list, uint32_t start = 0)
480 : entsize(list.entsize())
481 , index(start)
482 , element(&list.getOrEnd(start))
483 { }
484
485 const iterator& operator += (ptrdiff_t delta) {
486 element = (Element*)((uint8_t *)element + delta*entsize);
487 index += (int32_t)delta;
488 return *this;
489 }
490 const iterator& operator -= (ptrdiff_t delta) {
491 element = (Element*)((uint8_t *)element - delta*entsize);
492 index -= (int32_t)delta;
493 return *this;
494 }
495 const iterator operator + (ptrdiff_t delta) const {
496 return iterator(*this) += delta;
497 }
498 const iterator operator - (ptrdiff_t delta) const {
499 return iterator(*this) -= delta;
500 }
501
502 iterator& operator ++ () { *this += 1; return *this; }
503 iterator& operator -- () { *this -= 1; return *this; }
504 iterator operator ++ (int) {
505 iterator result(*this); *this += 1; return result;
506 }
507 iterator operator -- (int) {
508 iterator result(*this); *this -= 1; return result;
509 }
510
511 ptrdiff_t operator - (const iterator& rhs) const {
512 return (ptrdiff_t)this->index - (ptrdiff_t)rhs.index;
513 }
514
515 Element& operator * () const { return *element; }
516 Element* operator -> () const { return element; }
517
518 operator Element& () const { return *element; }
519
520 bool operator == (const iterator& rhs) const {
521 return this->element == rhs.element;
522 }
523 bool operator != (const iterator& rhs) const {
524 return this->element != rhs.element;
525 }
526
527 bool operator < (const iterator& rhs) const {
528 return this->element < rhs.element;
529 }
530 bool operator > (const iterator& rhs) const {
531 return this->element > rhs.element;
532 }
533 };
534 };
535
536
537 struct method_t {
538 SEL name;
539 const char *types;
540 MethodListIMP imp;
541
542 struct SortBySELAddress :
543 public std::binary_function<const method_t&,
544 const method_t&, bool>
545 {
546 bool operator() (const method_t& lhs,
547 const method_t& rhs)
548 { return lhs.name < rhs.name; }
549 };
550 };
551
552 struct ivar_t {
553 #if __x86_64__
554 // *offset was originally 64-bit on some x86_64 platforms.
555 // We read and write only 32 bits of it.
556 // Some metadata provides all 64 bits. This is harmless for unsigned
557 // little-endian values.
558 // Some code uses all 64 bits. class_addIvar() over-allocates the
559 // offset for their benefit.
560 #endif
561 int32_t *offset;
562 const char *name;
563 const char *type;
564 // alignment is sometimes -1; use alignment() instead
565 uint32_t alignment_raw;
566 uint32_t size;
567
568 uint32_t alignment() const {
569 if (alignment_raw == ~(uint32_t)0) return 1U << WORD_SHIFT;
570 return 1 << alignment_raw;
571 }
572 };
573
574 struct property_t {
575 const char *name;
576 const char *attributes;
577 };
578
579 // Two bits of entsize are used for fixup markers.
580 struct method_list_t : entsize_list_tt<method_t, method_list_t, 0x3> {
581 bool isUniqued() const;
582 bool isFixedUp() const;
583 void setFixedUp();
584
585 uint32_t indexOfMethod(const method_t *meth) const {
586 uint32_t i =
587 (uint32_t)(((uintptr_t)meth - (uintptr_t)this) / entsize());
588 ASSERT(i < count);
589 return i;
590 }
591 };
592
593 struct ivar_list_t : entsize_list_tt<ivar_t, ivar_list_t, 0> {
594 bool containsIvar(Ivar ivar) const {
595 return (ivar >= (Ivar)&*begin() && ivar < (Ivar)&*end());
596 }
597 };
598
599 struct property_list_t : entsize_list_tt<property_t, property_list_t, 0> {
600 };
601
602
603 typedef uintptr_t protocol_ref_t; // protocol_t *, but unremapped
604
605 // Values for protocol_t->flags
606 #define PROTOCOL_FIXED_UP_2 (1<<31) // must never be set by compiler
607 #define PROTOCOL_FIXED_UP_1 (1<<30) // must never be set by compiler
608 #define PROTOCOL_IS_CANONICAL (1<<29) // must never be set by compiler
609 // Bits 0..15 are reserved for Swift's use.
610
611 #define PROTOCOL_FIXED_UP_MASK (PROTOCOL_FIXED_UP_1 | PROTOCOL_FIXED_UP_2)
612
613 struct protocol_t : objc_object {
614 const char *mangledName;
615 struct protocol_list_t *protocols;
616 method_list_t *instanceMethods;
617 method_list_t *classMethods;
618 method_list_t *optionalInstanceMethods;
619 method_list_t *optionalClassMethods;
620 property_list_t *instanceProperties;
621 uint32_t size; // sizeof(protocol_t)
622 uint32_t flags;
623 // Fields below this point are not always present on disk.
624 const char **_extendedMethodTypes;
625 const char *_demangledName;
626 property_list_t *_classProperties;
627
628 const char *demangledName();
629
630 const char *nameForLogging() {
631 return demangledName();
632 }
633
634 bool isFixedUp() const;
635 void setFixedUp();
636
637 bool isCanonical() const;
638 void clearIsCanonical();
639
640 # define HAS_FIELD(f) (size >= offsetof(protocol_t, f) + sizeof(f))
641
642 bool hasExtendedMethodTypesField() const {
643 return HAS_FIELD(_extendedMethodTypes);
644 }
645 bool hasDemangledNameField() const {
646 return HAS_FIELD(_demangledName);
647 }
648 bool hasClassPropertiesField() const {
649 return HAS_FIELD(_classProperties);
650 }
651
652 # undef HAS_FIELD
653
654 const char **extendedMethodTypes() const {
655 return hasExtendedMethodTypesField() ? _extendedMethodTypes : nil;
656 }
657
658 property_list_t *classProperties() const {
659 return hasClassPropertiesField() ? _classProperties : nil;
660 }
661 };
662
663 struct protocol_list_t {
664 // count is pointer-sized by accident.
665 uintptr_t count;
666 protocol_ref_t list[0]; // variable-size
667
668 size_t byteSize() const {
669 return sizeof(*this) + count*sizeof(list[0]);
670 }
671
672 protocol_list_t *duplicate() const {
673 return (protocol_list_t *)memdup(this, this->byteSize());
674 }
675
676 typedef protocol_ref_t* iterator;
677 typedef const protocol_ref_t* const_iterator;
678
679 const_iterator begin() const {
680 return list;
681 }
682 iterator begin() {
683 return list;
684 }
685 const_iterator end() const {
686 return list + count;
687 }
688 iterator end() {
689 return list + count;
690 }
691 };
692
693 struct class_ro_t {
694 uint32_t flags;
695 uint32_t instanceStart;
696 uint32_t instanceSize;
697 #ifdef __LP64__
698 uint32_t reserved;
699 #endif
700
701 const uint8_t * ivarLayout;
702
703 const char * name;
704 method_list_t * baseMethodList;
705 protocol_list_t * baseProtocols;
706 const ivar_list_t * ivars;
707
708 const uint8_t * weakIvarLayout;
709 property_list_t *baseProperties;
710
711 // This field exists only when RO_HAS_SWIFT_INITIALIZER is set.
712 _objc_swiftMetadataInitializer __ptrauth_objc_method_list_imp _swiftMetadataInitializer_NEVER_USE[0];
713
714 _objc_swiftMetadataInitializer swiftMetadataInitializer() const {
715 if (flags & RO_HAS_SWIFT_INITIALIZER) {
716 return _swiftMetadataInitializer_NEVER_USE[0];
717 } else {
718 return nil;
719 }
720 }
721
722 method_list_t *baseMethods() const {
723 return baseMethodList;
724 }
725
726 class_ro_t *duplicate() const {
727 if (flags & RO_HAS_SWIFT_INITIALIZER) {
728 size_t size = sizeof(*this) + sizeof(_swiftMetadataInitializer_NEVER_USE[0]);
729 class_ro_t *ro = (class_ro_t *)memdup(this, size);
730 ro->_swiftMetadataInitializer_NEVER_USE[0] = this->_swiftMetadataInitializer_NEVER_USE[0];
731 return ro;
732 } else {
733 size_t size = sizeof(*this);
734 class_ro_t *ro = (class_ro_t *)memdup(this, size);
735 return ro;
736 }
737 }
738 };
739
740
741 /***********************************************************************
742 * list_array_tt<Element, List>
743 * Generic implementation for metadata that can be augmented by categories.
744 *
745 * Element is the underlying metadata type (e.g. method_t)
746 * List is the metadata's list type (e.g. method_list_t)
747 *
748 * A list_array_tt has one of three values:
749 * - empty
750 * - a pointer to a single list
751 * - an array of pointers to lists
752 *
753 * countLists/beginLists/endLists iterate the metadata lists
754 * count/begin/end iterate the underlying metadata elements
755 **********************************************************************/
756 template <typename Element, typename List>
757 class list_array_tt {
758 struct array_t {
759 uint32_t count;
760 List* lists[0];
761
762 static size_t byteSize(uint32_t count) {
763 return sizeof(array_t) + count*sizeof(lists[0]);
764 }
765 size_t byteSize() {
766 return byteSize(count);
767 }
768 };
769
770 protected:
771 class iterator {
772 List **lists;
773 List **listsEnd;
774 typename List::iterator m, mEnd;
775
776 public:
777 iterator(List **begin, List **end)
778 : lists(begin), listsEnd(end)
779 {
780 if (begin != end) {
781 m = (*begin)->begin();
782 mEnd = (*begin)->end();
783 }
784 }
785
786 const Element& operator * () const {
787 return *m;
788 }
789 Element& operator * () {
790 return *m;
791 }
792
793 bool operator != (const iterator& rhs) const {
794 if (lists != rhs.lists) return true;
795 if (lists == listsEnd) return false; // m is undefined
796 if (m != rhs.m) return true;
797 return false;
798 }
799
800 const iterator& operator ++ () {
801 ASSERT(m != mEnd);
802 m++;
803 if (m == mEnd) {
804 ASSERT(lists != listsEnd);
805 lists++;
806 if (lists != listsEnd) {
807 m = (*lists)->begin();
808 mEnd = (*lists)->end();
809 }
810 }
811 return *this;
812 }
813 };
814
815 private:
816 union {
817 List* list;
818 uintptr_t arrayAndFlag;
819 };
820
821 bool hasArray() const {
822 return arrayAndFlag & 1;
823 }
824
825 array_t *array() {
826 return (array_t *)(arrayAndFlag & ~1);
827 }
828
829 void setArray(array_t *array) {
830 arrayAndFlag = (uintptr_t)array | 1;
831 }
832
833 public:
834
835 uint32_t count() {
836 uint32_t result = 0;
837 for (auto lists = beginLists(), end = endLists();
838 lists != end;
839 ++lists)
840 {
841 result += (*lists)->count;
842 }
843 return result;
844 }
845
846 iterator begin() {
847 return iterator(beginLists(), endLists());
848 }
849
850 iterator end() {
851 List **e = endLists();
852 return iterator(e, e);
853 }
854
855
856 uint32_t countLists() {
857 if (hasArray()) {
858 return array()->count;
859 } else if (list) {
860 return 1;
861 } else {
862 return 0;
863 }
864 }
865
866 List** beginLists() {
867 if (hasArray()) {
868 return array()->lists;
869 } else {
870 return &list;
871 }
872 }
873
874 List** endLists() {
875 if (hasArray()) {
876 return array()->lists + array()->count;
877 } else if (list) {
878 return &list + 1;
879 } else {
880 return &list;
881 }
882 }
883
884 void attachLists(List* const * addedLists, uint32_t addedCount) {
885 if (addedCount == 0) return;
886
887 if (hasArray()) {
888 // many lists -> many lists
889 uint32_t oldCount = array()->count;
890 uint32_t newCount = oldCount + addedCount;
891 setArray((array_t *)realloc(array(), array_t::byteSize(newCount)));
892 array()->count = newCount;
893 memmove(array()->lists + addedCount, array()->lists,
894 oldCount * sizeof(array()->lists[0]));
895 memcpy(array()->lists, addedLists,
896 addedCount * sizeof(array()->lists[0]));
897 }
898 else if (!list && addedCount == 1) {
899 // 0 lists -> 1 list
900 list = addedLists[0];
901 }
902 else {
903 // 1 list -> many lists
904 List* oldList = list;
905 uint32_t oldCount = oldList ? 1 : 0;
906 uint32_t newCount = oldCount + addedCount;
907 setArray((array_t *)malloc(array_t::byteSize(newCount)));
908 array()->count = newCount;
909 if (oldList) array()->lists[addedCount] = oldList;
910 memcpy(array()->lists, addedLists,
911 addedCount * sizeof(array()->lists[0]));
912 }
913 }
914
915 void tryFree() {
916 if (hasArray()) {
917 for (uint32_t i = 0; i < array()->count; i++) {
918 try_free(array()->lists[i]);
919 }
920 try_free(array());
921 }
922 else if (list) {
923 try_free(list);
924 }
925 }
926
927 template<typename Result>
928 Result duplicate() {
929 Result result;
930
931 if (hasArray()) {
932 array_t *a = array();
933 result.setArray((array_t *)memdup(a, a->byteSize()));
934 for (uint32_t i = 0; i < a->count; i++) {
935 result.array()->lists[i] = a->lists[i]->duplicate();
936 }
937 } else if (list) {
938 result.list = list->duplicate();
939 } else {
940 result.list = nil;
941 }
942
943 return result;
944 }
945 };
946
947
948 class method_array_t :
949 public list_array_tt<method_t, method_list_t>
950 {
951 typedef list_array_tt<method_t, method_list_t> Super;
952
953 public:
954 method_list_t **beginCategoryMethodLists() {
955 return beginLists();
956 }
957
958 method_list_t **endCategoryMethodLists(Class cls);
959
960 method_array_t duplicate() {
961 return Super::duplicate<method_array_t>();
962 }
963 };
964
965
966 class property_array_t :
967 public list_array_tt<property_t, property_list_t>
968 {
969 typedef list_array_tt<property_t, property_list_t> Super;
970
971 public:
972 property_array_t duplicate() {
973 return Super::duplicate<property_array_t>();
974 }
975 };
976
977
978 class protocol_array_t :
979 public list_array_tt<protocol_ref_t, protocol_list_t>
980 {
981 typedef list_array_tt<protocol_ref_t, protocol_list_t> Super;
982
983 public:
984 protocol_array_t duplicate() {
985 return Super::duplicate<protocol_array_t>();
986 }
987 };
988
989
990 struct class_rw_t {
991 // Be warned that Symbolication knows the layout of this structure.
992 uint32_t flags;
993 uint16_t version;
994 uint16_t witness;
995
996 const class_ro_t *ro;
997
998 method_array_t methods;
999 property_array_t properties;
1000 protocol_array_t protocols;
1001
1002 Class firstSubclass;
1003 Class nextSiblingClass;
1004
1005 char *demangledName;
1006
1007 #if SUPPORT_INDEXED_ISA
1008 uint32_t index;
1009 #endif
1010
1011 void setFlags(uint32_t set)
1012 {
1013 __c11_atomic_fetch_or((_Atomic(uint32_t) *)&flags, set, __ATOMIC_RELAXED);
1014 }
1015
1016 void clearFlags(uint32_t clear)
1017 {
1018 __c11_atomic_fetch_and((_Atomic(uint32_t) *)&flags, ~clear, __ATOMIC_RELAXED);
1019 }
1020
1021 // set and clear must not overlap
1022 void changeFlags(uint32_t set, uint32_t clear)
1023 {
1024 ASSERT((set & clear) == 0);
1025
1026 uint32_t oldf, newf;
1027 do {
1028 oldf = flags;
1029 newf = (oldf | set) & ~clear;
1030 } while (!OSAtomicCompareAndSwap32Barrier(oldf, newf, (volatile int32_t *)&flags));
1031 }
1032 };
1033
1034
1035 struct class_data_bits_t {
1036 friend objc_class;
1037
1038 // Values are the FAST_ flags above.
1039 uintptr_t bits;
1040 private:
1041 bool getBit(uintptr_t bit) const
1042 {
1043 return bits & bit;
1044 }
1045
1046 // Atomically set the bits in `set` and clear the bits in `clear`.
1047 // set and clear must not overlap.
1048 void setAndClearBits(uintptr_t set, uintptr_t clear)
1049 {
1050 ASSERT((set & clear) == 0);
1051 uintptr_t oldBits;
1052 uintptr_t newBits;
1053 do {
1054 oldBits = LoadExclusive(&bits);
1055 newBits = (oldBits | set) & ~clear;
1056 } while (!StoreReleaseExclusive(&bits, oldBits, newBits));
1057 }
1058
1059 void setBits(uintptr_t set) {
1060 __c11_atomic_fetch_or((_Atomic(uintptr_t) *)&bits, set, __ATOMIC_RELAXED);
1061 }
1062
1063 void clearBits(uintptr_t clear) {
1064 __c11_atomic_fetch_and((_Atomic(uintptr_t) *)&bits, ~clear, __ATOMIC_RELAXED);
1065 }
1066
1067 public:
1068
1069 class_rw_t* data() const {
1070 return (class_rw_t *)(bits & FAST_DATA_MASK);
1071 }
1072 void setData(class_rw_t *newData)
1073 {
1074 ASSERT(!data() || (newData->flags & (RW_REALIZING | RW_FUTURE)));
1075 // Set during realization or construction only. No locking needed.
1076 // Use a store-release fence because there may be concurrent
1077 // readers of data and data's contents.
1078 uintptr_t newBits = (bits & ~FAST_DATA_MASK) | (uintptr_t)newData;
1079 atomic_thread_fence(memory_order_release);
1080 bits = newBits;
1081 }
1082
1083 // Get the class's ro data, even in the presence of concurrent realization.
1084 // fixme this isn't really safe without a compiler barrier at least
1085 // and probably a memory barrier when realizeClass changes the data field
1086 const class_ro_t *safe_ro() {
1087 class_rw_t *maybe_rw = data();
1088 if (maybe_rw->flags & RW_REALIZED) {
1089 // maybe_rw is rw
1090 return maybe_rw->ro;
1091 } else {
1092 // maybe_rw is actually ro
1093 return (class_ro_t *)maybe_rw;
1094 }
1095 }
1096
1097 void setClassArrayIndex(unsigned Idx) {
1098 #if SUPPORT_INDEXED_ISA
1099 // 0 is unused as then we can rely on zero-initialisation from calloc.
1100 ASSERT(Idx > 0);
1101 data()->index = Idx;
1102 #endif
1103 }
1104
1105 unsigned classArrayIndex() {
1106 #if SUPPORT_INDEXED_ISA
1107 return data()->index;
1108 #else
1109 return 0;
1110 #endif
1111 }
1112
1113 bool isAnySwift() {
1114 return isSwiftStable() || isSwiftLegacy();
1115 }
1116
1117 bool isSwiftStable() {
1118 return getBit(FAST_IS_SWIFT_STABLE);
1119 }
1120 void setIsSwiftStable() {
1121 setAndClearBits(FAST_IS_SWIFT_STABLE, FAST_IS_SWIFT_LEGACY);
1122 }
1123
1124 bool isSwiftLegacy() {
1125 return getBit(FAST_IS_SWIFT_LEGACY);
1126 }
1127 void setIsSwiftLegacy() {
1128 setAndClearBits(FAST_IS_SWIFT_LEGACY, FAST_IS_SWIFT_STABLE);
1129 }
1130
1131 // fixme remove this once the Swift runtime uses the stable bits
1132 bool isSwiftStable_ButAllowLegacyForNow() {
1133 return isAnySwift();
1134 }
1135
1136 _objc_swiftMetadataInitializer swiftMetadataInitializer() {
1137 // This function is called on un-realized classes without
1138 // holding any locks.
1139 // Beware of races with other realizers.
1140 return safe_ro()->swiftMetadataInitializer();
1141 }
1142 };
1143
1144
1145 struct objc_class : objc_object {
1146 // Class ISA;
1147 Class superclass;
1148 cache_t cache; // formerly cache pointer and vtable
1149 class_data_bits_t bits; // class_rw_t * plus custom rr/alloc flags
1150
1151 class_rw_t *data() const {
1152 return bits.data();
1153 }
1154 void setData(class_rw_t *newData) {
1155 bits.setData(newData);
1156 }
1157
1158 void setInfo(uint32_t set) {
1159 ASSERT(isFuture() || isRealized());
1160 data()->setFlags(set);
1161 }
1162
1163 void clearInfo(uint32_t clear) {
1164 ASSERT(isFuture() || isRealized());
1165 data()->clearFlags(clear);
1166 }
1167
1168 // set and clear must not overlap
1169 void changeInfo(uint32_t set, uint32_t clear) {
1170 ASSERT(isFuture() || isRealized());
1171 ASSERT((set & clear) == 0);
1172 data()->changeFlags(set, clear);
1173 }
1174
1175 #if FAST_HAS_DEFAULT_RR
1176 bool hasCustomRR() const {
1177 return !bits.getBit(FAST_HAS_DEFAULT_RR);
1178 }
1179 void setHasDefaultRR() {
1180 bits.setBits(FAST_HAS_DEFAULT_RR);
1181 }
1182 void setHasCustomRR() {
1183 bits.clearBits(FAST_HAS_DEFAULT_RR);
1184 }
1185 #else
1186 bool hasCustomRR() const {
1187 return !(bits.data()->flags & RW_HAS_DEFAULT_RR);
1188 }
1189 void setHasDefaultRR() {
1190 bits.data()->setFlags(RW_HAS_DEFAULT_RR);
1191 }
1192 void setHasCustomRR() {
1193 bits.data()->clearFlags(RW_HAS_DEFAULT_RR);
1194 }
1195 #endif
1196
1197 #if FAST_CACHE_HAS_DEFAULT_AWZ
1198 bool hasCustomAWZ() const {
1199 return !cache.getBit(FAST_CACHE_HAS_DEFAULT_AWZ);
1200 }
1201 void setHasDefaultAWZ() {
1202 cache.setBit(FAST_CACHE_HAS_DEFAULT_AWZ);
1203 }
1204 void setHasCustomAWZ() {
1205 cache.clearBit(FAST_CACHE_HAS_DEFAULT_AWZ);
1206 }
1207 #else
1208 bool hasCustomAWZ() const {
1209 return !(bits.data()->flags & RW_HAS_DEFAULT_AWZ);
1210 }
1211 void setHasDefaultAWZ() {
1212 bits.data()->setFlags(RW_HAS_DEFAULT_AWZ);
1213 }
1214 void setHasCustomAWZ() {
1215 bits.data()->clearFlags(RW_HAS_DEFAULT_AWZ);
1216 }
1217 #endif
1218
1219 #if FAST_CACHE_HAS_DEFAULT_CORE
1220 bool hasCustomCore() const {
1221 return !cache.getBit(FAST_CACHE_HAS_DEFAULT_CORE);
1222 }
1223 void setHasDefaultCore() {
1224 return cache.setBit(FAST_CACHE_HAS_DEFAULT_CORE);
1225 }
1226 void setHasCustomCore() {
1227 return cache.clearBit(FAST_CACHE_HAS_DEFAULT_CORE);
1228 }
1229 #else
1230 bool hasCustomCore() const {
1231 return !(bits.data()->flags & RW_HAS_DEFAULT_CORE);
1232 }
1233 void setHasDefaultCore() {
1234 bits.data()->setFlags(RW_HAS_DEFAULT_CORE);
1235 }
1236 void setHasCustomCore() {
1237 bits.data()->clearFlags(RW_HAS_DEFAULT_CORE);
1238 }
1239 #endif
1240
1241 #if FAST_CACHE_HAS_CXX_CTOR
1242 bool hasCxxCtor() {
1243 ASSERT(isRealized());
1244 return cache.getBit(FAST_CACHE_HAS_CXX_CTOR);
1245 }
1246 void setHasCxxCtor() {
1247 cache.setBit(FAST_CACHE_HAS_CXX_CTOR);
1248 }
1249 #else
1250 bool hasCxxCtor() {
1251 ASSERT(isRealized());
1252 return bits.data()->flags & RW_HAS_CXX_CTOR;
1253 }
1254 void setHasCxxCtor() {
1255 bits.data()->setFlags(RW_HAS_CXX_CTOR);
1256 }
1257 #endif
1258
1259 #if FAST_CACHE_HAS_CXX_DTOR
1260 bool hasCxxDtor() {
1261 ASSERT(isRealized());
1262 return cache.getBit(FAST_CACHE_HAS_CXX_DTOR);
1263 }
1264 void setHasCxxDtor() {
1265 cache.setBit(FAST_CACHE_HAS_CXX_DTOR);
1266 }
1267 #else
1268 bool hasCxxDtor() {
1269 ASSERT(isRealized());
1270 return bits.data()->flags & RW_HAS_CXX_DTOR;
1271 }
1272 void setHasCxxDtor() {
1273 bits.data()->setFlags(RW_HAS_CXX_DTOR);
1274 }
1275 #endif
1276
1277 #if FAST_CACHE_REQUIRES_RAW_ISA
1278 bool instancesRequireRawIsa() {
1279 return cache.getBit(FAST_CACHE_REQUIRES_RAW_ISA);
1280 }
1281 void setInstancesRequireRawIsa() {
1282 cache.setBit(FAST_CACHE_REQUIRES_RAW_ISA);
1283 }
1284 #elif SUPPORT_NONPOINTER_ISA
1285 bool instancesRequireRawIsa() {
1286 return bits.data()->flags & RW_REQUIRES_RAW_ISA;
1287 }
1288 void setInstancesRequireRawIsa() {
1289 bits.data()->setFlags(RW_REQUIRES_RAW_ISA);
1290 }
1291 #else
1292 bool instancesRequireRawIsa() {
1293 return true;
1294 }
1295 void setInstancesRequireRawIsa() {
1296 // nothing
1297 }
1298 #endif
1299 void setInstancesRequireRawIsaRecursively(bool inherited = false);
1300 void printInstancesRequireRawIsa(bool inherited);
1301
1302 bool canAllocNonpointer() {
1303 ASSERT(!isFuture());
1304 return !instancesRequireRawIsa();
1305 }
1306
1307 bool isSwiftStable() {
1308 return bits.isSwiftStable();
1309 }
1310
1311 bool isSwiftLegacy() {
1312 return bits.isSwiftLegacy();
1313 }
1314
1315 bool isAnySwift() {
1316 return bits.isAnySwift();
1317 }
1318
1319 bool isSwiftStable_ButAllowLegacyForNow() {
1320 return bits.isSwiftStable_ButAllowLegacyForNow();
1321 }
1322
1323 bool isStubClass() const {
1324 uintptr_t isa = (uintptr_t)isaBits();
1325 return 1 <= isa && isa < 16;
1326 }
1327
1328 // Swift stable ABI built for old deployment targets looks weird.
1329 // The is-legacy bit is set for compatibility with old libobjc.
1330 // We are on a "new" deployment target so we need to rewrite that bit.
1331 // These stable-with-legacy-bit classes are distinguished from real
1332 // legacy classes using another bit in the Swift data
1333 // (ClassFlags::IsSwiftPreStableABI)
1334
1335 bool isUnfixedBackwardDeployingStableSwift() {
1336 // Only classes marked as Swift legacy need apply.
1337 if (!bits.isSwiftLegacy()) return false;
1338
1339 // Check the true legacy vs stable distinguisher.
1340 // The low bit of Swift's ClassFlags is SET for true legacy
1341 // and UNSET for stable pretending to be legacy.
1342 uint32_t swiftClassFlags = *(uint32_t *)(&bits + 1);
1343 bool isActuallySwiftLegacy = bool(swiftClassFlags & 1);
1344 return !isActuallySwiftLegacy;
1345 }
1346
1347 void fixupBackwardDeployingStableSwift() {
1348 if (isUnfixedBackwardDeployingStableSwift()) {
1349 // Class really is stable Swift, pretending to be pre-stable.
1350 // Fix its lie.
1351 bits.setIsSwiftStable();
1352 }
1353 }
1354
1355 _objc_swiftMetadataInitializer swiftMetadataInitializer() {
1356 return bits.swiftMetadataInitializer();
1357 }
1358
1359 // Return YES if the class's ivars are managed by ARC,
1360 // or the class is MRC but has ARC-style weak ivars.
1361 bool hasAutomaticIvars() {
1362 return data()->ro->flags & (RO_IS_ARC | RO_HAS_WEAK_WITHOUT_ARC);
1363 }
1364
1365 // Return YES if the class's ivars are managed by ARC.
1366 bool isARC() {
1367 return data()->ro->flags & RO_IS_ARC;
1368 }
1369
1370
1371 bool forbidsAssociatedObjects() {
1372 return (data()->flags & RW_FORBIDS_ASSOCIATED_OBJECTS);
1373 }
1374
1375 #if SUPPORT_NONPOINTER_ISA
1376 // Tracked in non-pointer isas; not tracked otherwise
1377 #else
1378 bool instancesHaveAssociatedObjects() {
1379 // this may be an unrealized future class in the CF-bridged case
1380 ASSERT(isFuture() || isRealized());
1381 return data()->flags & RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS;
1382 }
1383
1384 void setInstancesHaveAssociatedObjects() {
1385 // this may be an unrealized future class in the CF-bridged case
1386 ASSERT(isFuture() || isRealized());
1387 setInfo(RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS);
1388 }
1389 #endif
1390
1391 bool shouldGrowCache() {
1392 return true;
1393 }
1394
1395 void setShouldGrowCache(bool) {
1396 // fixme good or bad for memory use?
1397 }
1398
1399 bool isInitializing() {
1400 return getMeta()->data()->flags & RW_INITIALIZING;
1401 }
1402
1403 void setInitializing() {
1404 ASSERT(!isMetaClass());
1405 ISA()->setInfo(RW_INITIALIZING);
1406 }
1407
1408 bool isInitialized() {
1409 return getMeta()->data()->flags & RW_INITIALIZED;
1410 }
1411
1412 void setInitialized();
1413
1414 bool isLoadable() {
1415 ASSERT(isRealized());
1416 return true; // any class registered for +load is definitely loadable
1417 }
1418
1419 IMP getLoadMethod();
1420
1421 // Locking: To prevent concurrent realization, hold runtimeLock.
1422 bool isRealized() const {
1423 return !isStubClass() && (data()->flags & RW_REALIZED);
1424 }
1425
1426 // Returns true if this is an unrealized future class.
1427 // Locking: To prevent concurrent realization, hold runtimeLock.
1428 bool isFuture() const {
1429 return data()->flags & RW_FUTURE;
1430 }
1431
1432 bool isMetaClass() {
1433 ASSERT(this);
1434 ASSERT(isRealized());
1435 #if FAST_CACHE_META
1436 return cache.getBit(FAST_CACHE_META);
1437 #else
1438 return data()->ro->flags & RO_META;
1439 #endif
1440 }
1441
1442 // Like isMetaClass, but also valid on un-realized classes
1443 bool isMetaClassMaybeUnrealized() {
1444 return bits.safe_ro()->flags & RO_META;
1445 }
1446
1447 // NOT identical to this->ISA when this is a metaclass
1448 Class getMeta() {
1449 if (isMetaClass()) return (Class)this;
1450 else return this->ISA();
1451 }
1452
1453 bool isRootClass() {
1454 return superclass == nil;
1455 }
1456 bool isRootMetaclass() {
1457 return ISA() == (Class)this;
1458 }
1459
1460 const char *mangledName() {
1461 // fixme can't assert locks here
1462 ASSERT(this);
1463
1464 if (isRealized() || isFuture()) {
1465 return data()->ro->name;
1466 } else {
1467 return ((const class_ro_t *)data())->name;
1468 }
1469 }
1470
1471 const char *demangledName();
1472 const char *nameForLogging();
1473
1474 // May be unaligned depending on class's ivars.
1475 uint32_t unalignedInstanceStart() const {
1476 ASSERT(isRealized());
1477 return data()->ro->instanceStart;
1478 }
1479
1480 // Class's instance start rounded up to a pointer-size boundary.
1481 // This is used for ARC layout bitmaps.
1482 uint32_t alignedInstanceStart() const {
1483 return word_align(unalignedInstanceStart());
1484 }
1485
1486 // May be unaligned depending on class's ivars.
1487 uint32_t unalignedInstanceSize() const {
1488 ASSERT(isRealized());
1489 return data()->ro->instanceSize;
1490 }
1491
1492 // Class's ivar size rounded up to a pointer-size boundary.
1493 uint32_t alignedInstanceSize() const {
1494 return word_align(unalignedInstanceSize());
1495 }
1496
1497 size_t instanceSize(size_t extraBytes) const {
1498 if (fastpath(cache.hasFastInstanceSize(extraBytes))) {
1499 return cache.fastInstanceSize(extraBytes);
1500 }
1501
1502 size_t size = alignedInstanceSize() + extraBytes;
1503 // CF requires all objects be at least 16 bytes.
1504 if (size < 16) size = 16;
1505 return size;
1506 }
1507
1508 void setInstanceSize(uint32_t newSize) {
1509 ASSERT(isRealized());
1510 ASSERT(data()->flags & RW_REALIZING);
1511 if (newSize != data()->ro->instanceSize) {
1512 ASSERT(data()->flags & RW_COPIED_RO);
1513 *const_cast<uint32_t *>(&data()->ro->instanceSize) = newSize;
1514 }
1515 cache.setFastInstanceSize(newSize);
1516 }
1517
1518 void chooseClassArrayIndex();
1519
1520 void setClassArrayIndex(unsigned Idx) {
1521 bits.setClassArrayIndex(Idx);
1522 }
1523
1524 unsigned classArrayIndex() {
1525 return bits.classArrayIndex();
1526 }
1527 };
1528
1529
1530 struct swift_class_t : objc_class {
1531 uint32_t flags;
1532 uint32_t instanceAddressOffset;
1533 uint32_t instanceSize;
1534 uint16_t instanceAlignMask;
1535 uint16_t reserved;
1536
1537 uint32_t classSize;
1538 uint32_t classAddressOffset;
1539 void *description;
1540 // ...
1541
1542 void *baseAddress() {
1543 return (void *)((uint8_t *)this - classAddressOffset);
1544 }
1545 };
1546
1547
1548 struct category_t {
1549 const char *name;
1550 classref_t cls;
1551 struct method_list_t *instanceMethods;
1552 struct method_list_t *classMethods;
1553 struct protocol_list_t *protocols;
1554 struct property_list_t *instanceProperties;
1555 // Fields below this point are not always present on disk.
1556 struct property_list_t *_classProperties;
1557
1558 method_list_t *methodsForMeta(bool isMeta) {
1559 if (isMeta) return classMethods;
1560 else return instanceMethods;
1561 }
1562
1563 property_list_t *propertiesForMeta(bool isMeta, struct header_info *hi);
1564
1565 protocol_list_t *protocolsForMeta(bool isMeta) {
1566 if (isMeta) return nullptr;
1567 else return protocols;
1568 }
1569 };
1570
1571 struct objc_super2 {
1572 id receiver;
1573 Class current_class;
1574 };
1575
1576 struct message_ref_t {
1577 IMP imp;
1578 SEL sel;
1579 };
1580
1581
1582 extern Method protocol_getMethod(protocol_t *p, SEL sel, bool isRequiredMethod, bool isInstanceMethod, bool recursive);
1583
1584 #endif