]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-runtime-new.h
19258f6981b4b35c55ceaaa9d2c883145eb32874
[apple/objc4.git] / runtime / objc-runtime-new.h
1 /*
2 * Copyright (c) 2005-2007 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #ifndef _OBJC_RUNTIME_NEW_H
25 #define _OBJC_RUNTIME_NEW_H
26
27 #if __LP64__
28 typedef uint32_t mask_t; // x86_64 & arm64 asm are less efficient with 16-bits
29 #else
30 typedef uint16_t mask_t;
31 #endif
32 typedef uintptr_t SEL;
33
34 struct swift_class_t;
35
36 enum Atomicity { Atomic = true, NotAtomic = false };
37
38 struct bucket_t {
39 private:
40 // IMP-first is better for arm64e ptrauth and no worse for arm64.
41 // SEL-first is better for armv7* and i386 and x86_64.
42 #if __arm64__
43 uintptr_t _imp;
44 SEL _sel;
45 #else
46 SEL _sel;
47 uintptr_t _imp;
48 #endif
49
50 // Compute the ptrauth signing modifier from &_imp and newSel
51 uintptr_t modifierForSEL(SEL newSel) const {
52 return (uintptr_t)&_imp ^ (uintptr_t)newSel;
53 }
54
55 // Sign newImp, with &_imp and newSel as modifiers.
56 uintptr_t signIMP(IMP newImp, SEL newSel) const {
57 if (!newImp) return 0;
58 return (uintptr_t)
59 ptrauth_auth_and_resign(newImp,
60 ptrauth_key_function_pointer, 0,
61 ptrauth_key_process_dependent_code,
62 modifierForSEL(newSel));
63 }
64
65 public:
66 inline SEL sel() const { return _sel; }
67
68 inline IMP imp() const {
69 if (!_imp) return nil;
70 return (IMP)
71 ptrauth_auth_and_resign((const void *)_imp,
72 ptrauth_key_process_dependent_code,
73 modifierForSEL(_sel),
74 ptrauth_key_function_pointer, 0);
75 }
76
77 template <Atomicity>
78 void set(SEL newSel, IMP newImp);
79 };
80
81
82 struct cache_t {
83 struct bucket_t *_buckets;
84 mask_t _mask;
85 mask_t _occupied;
86
87 public:
88 struct bucket_t *buckets();
89 mask_t mask();
90 mask_t occupied();
91 void incrementOccupied();
92 void setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask);
93 void initializeToEmpty();
94
95 mask_t capacity();
96 bool isConstantEmptyCache();
97 bool canBeFreed();
98
99 static size_t bytesForCapacity(uint32_t cap);
100 static struct bucket_t * endMarker(struct bucket_t *b, uint32_t cap);
101
102 void expand();
103 void reallocate(mask_t oldCapacity, mask_t newCapacity);
104 struct bucket_t * find(SEL sel, id receiver);
105
106 static void bad_cache(id receiver, SEL sel, Class isa) __attribute__((noreturn));
107 };
108
109
110 // classref_t is unremapped class_t*
111 typedef struct classref * classref_t;
112
113 /***********************************************************************
114 * entsize_list_tt<Element, List, FlagMask>
115 * Generic implementation of an array of non-fragile structs.
116 *
117 * Element is the struct type (e.g. method_t)
118 * List is the specialization of entsize_list_tt (e.g. method_list_t)
119 * FlagMask is used to stash extra bits in the entsize field
120 * (e.g. method list fixup markers)
121 **********************************************************************/
122 template <typename Element, typename List, uint32_t FlagMask>
123 struct entsize_list_tt {
124 uint32_t entsizeAndFlags;
125 uint32_t count;
126 Element first;
127
128 uint32_t entsize() const {
129 return entsizeAndFlags & ~FlagMask;
130 }
131 uint32_t flags() const {
132 return entsizeAndFlags & FlagMask;
133 }
134
135 Element& getOrEnd(uint32_t i) const {
136 assert(i <= count);
137 return *(Element *)((uint8_t *)&first + i*entsize());
138 }
139 Element& get(uint32_t i) const {
140 assert(i < count);
141 return getOrEnd(i);
142 }
143
144 size_t byteSize() const {
145 return byteSize(entsize(), count);
146 }
147
148 static size_t byteSize(uint32_t entsize, uint32_t count) {
149 return sizeof(entsize_list_tt) + (count-1)*entsize;
150 }
151
152 List *duplicate() const {
153 auto *dup = (List *)calloc(this->byteSize(), 1);
154 dup->entsizeAndFlags = this->entsizeAndFlags;
155 dup->count = this->count;
156 std::copy(begin(), end(), dup->begin());
157 return dup;
158 }
159
160 struct iterator;
161 const iterator begin() const {
162 return iterator(*static_cast<const List*>(this), 0);
163 }
164 iterator begin() {
165 return iterator(*static_cast<const List*>(this), 0);
166 }
167 const iterator end() const {
168 return iterator(*static_cast<const List*>(this), count);
169 }
170 iterator end() {
171 return iterator(*static_cast<const List*>(this), count);
172 }
173
174 struct iterator {
175 uint32_t entsize;
176 uint32_t index; // keeping track of this saves a divide in operator-
177 Element* element;
178
179 typedef std::random_access_iterator_tag iterator_category;
180 typedef Element value_type;
181 typedef ptrdiff_t difference_type;
182 typedef Element* pointer;
183 typedef Element& reference;
184
185 iterator() { }
186
187 iterator(const List& list, uint32_t start = 0)
188 : entsize(list.entsize())
189 , index(start)
190 , element(&list.getOrEnd(start))
191 { }
192
193 const iterator& operator += (ptrdiff_t delta) {
194 element = (Element*)((uint8_t *)element + delta*entsize);
195 index += (int32_t)delta;
196 return *this;
197 }
198 const iterator& operator -= (ptrdiff_t delta) {
199 element = (Element*)((uint8_t *)element - delta*entsize);
200 index -= (int32_t)delta;
201 return *this;
202 }
203 const iterator operator + (ptrdiff_t delta) const {
204 return iterator(*this) += delta;
205 }
206 const iterator operator - (ptrdiff_t delta) const {
207 return iterator(*this) -= delta;
208 }
209
210 iterator& operator ++ () { *this += 1; return *this; }
211 iterator& operator -- () { *this -= 1; return *this; }
212 iterator operator ++ (int) {
213 iterator result(*this); *this += 1; return result;
214 }
215 iterator operator -- (int) {
216 iterator result(*this); *this -= 1; return result;
217 }
218
219 ptrdiff_t operator - (const iterator& rhs) const {
220 return (ptrdiff_t)this->index - (ptrdiff_t)rhs.index;
221 }
222
223 Element& operator * () const { return *element; }
224 Element* operator -> () const { return element; }
225
226 operator Element& () const { return *element; }
227
228 bool operator == (const iterator& rhs) const {
229 return this->element == rhs.element;
230 }
231 bool operator != (const iterator& rhs) const {
232 return this->element != rhs.element;
233 }
234
235 bool operator < (const iterator& rhs) const {
236 return this->element < rhs.element;
237 }
238 bool operator > (const iterator& rhs) const {
239 return this->element > rhs.element;
240 }
241 };
242 };
243
244
245 struct method_t {
246 SEL name;
247 const char *types;
248 MethodListIMP imp;
249
250 struct SortBySELAddress :
251 public std::binary_function<const method_t&,
252 const method_t&, bool>
253 {
254 bool operator() (const method_t& lhs,
255 const method_t& rhs)
256 { return lhs.name < rhs.name; }
257 };
258 };
259
260 struct ivar_t {
261 #if __x86_64__
262 // *offset was originally 64-bit on some x86_64 platforms.
263 // We read and write only 32 bits of it.
264 // Some metadata provides all 64 bits. This is harmless for unsigned
265 // little-endian values.
266 // Some code uses all 64 bits. class_addIvar() over-allocates the
267 // offset for their benefit.
268 #endif
269 int32_t *offset;
270 const char *name;
271 const char *type;
272 // alignment is sometimes -1; use alignment() instead
273 uint32_t alignment_raw;
274 uint32_t size;
275
276 uint32_t alignment() const {
277 if (alignment_raw == ~(uint32_t)0) return 1U << WORD_SHIFT;
278 return 1 << alignment_raw;
279 }
280 };
281
282 struct property_t {
283 const char *name;
284 const char *attributes;
285 };
286
287 // Two bits of entsize are used for fixup markers.
288 struct method_list_t : entsize_list_tt<method_t, method_list_t, 0x3> {
289 bool isFixedUp() const;
290 void setFixedUp();
291
292 uint32_t indexOfMethod(const method_t *meth) const {
293 uint32_t i =
294 (uint32_t)(((uintptr_t)meth - (uintptr_t)this) / entsize());
295 assert(i < count);
296 return i;
297 }
298 };
299
300 struct ivar_list_t : entsize_list_tt<ivar_t, ivar_list_t, 0> {
301 bool containsIvar(Ivar ivar) const {
302 return (ivar >= (Ivar)&*begin() && ivar < (Ivar)&*end());
303 }
304 };
305
306 struct property_list_t : entsize_list_tt<property_t, property_list_t, 0> {
307 };
308
309
310 typedef uintptr_t protocol_ref_t; // protocol_t *, but unremapped
311
312 // Values for protocol_t->flags
313 #define PROTOCOL_FIXED_UP_2 (1<<31) // must never be set by compiler
314 #define PROTOCOL_FIXED_UP_1 (1<<30) // must never be set by compiler
315 // Bits 0..15 are reserved for Swift's use.
316
317 #define PROTOCOL_FIXED_UP_MASK (PROTOCOL_FIXED_UP_1 | PROTOCOL_FIXED_UP_2)
318
319 struct protocol_t : objc_object {
320 const char *mangledName;
321 struct protocol_list_t *protocols;
322 method_list_t *instanceMethods;
323 method_list_t *classMethods;
324 method_list_t *optionalInstanceMethods;
325 method_list_t *optionalClassMethods;
326 property_list_t *instanceProperties;
327 uint32_t size; // sizeof(protocol_t)
328 uint32_t flags;
329 // Fields below this point are not always present on disk.
330 const char **_extendedMethodTypes;
331 const char *_demangledName;
332 property_list_t *_classProperties;
333
334 const char *demangledName();
335
336 const char *nameForLogging() {
337 return demangledName();
338 }
339
340 bool isFixedUp() const;
341 void setFixedUp();
342
343 # define HAS_FIELD(f) (size >= offsetof(protocol_t, f) + sizeof(f))
344
345 bool hasExtendedMethodTypesField() const {
346 return HAS_FIELD(_extendedMethodTypes);
347 }
348 bool hasDemangledNameField() const {
349 return HAS_FIELD(_demangledName);
350 }
351 bool hasClassPropertiesField() const {
352 return HAS_FIELD(_classProperties);
353 }
354
355 # undef HAS_FIELD
356
357 const char **extendedMethodTypes() const {
358 return hasExtendedMethodTypesField() ? _extendedMethodTypes : nil;
359 }
360
361 property_list_t *classProperties() const {
362 return hasClassPropertiesField() ? _classProperties : nil;
363 }
364 };
365
366 struct protocol_list_t {
367 // count is 64-bit by accident.
368 uintptr_t count;
369 protocol_ref_t list[0]; // variable-size
370
371 size_t byteSize() const {
372 return sizeof(*this) + count*sizeof(list[0]);
373 }
374
375 protocol_list_t *duplicate() const {
376 return (protocol_list_t *)memdup(this, this->byteSize());
377 }
378
379 typedef protocol_ref_t* iterator;
380 typedef const protocol_ref_t* const_iterator;
381
382 const_iterator begin() const {
383 return list;
384 }
385 iterator begin() {
386 return list;
387 }
388 const_iterator end() const {
389 return list + count;
390 }
391 iterator end() {
392 return list + count;
393 }
394 };
395
396 struct locstamped_category_t {
397 category_t *cat;
398 struct header_info *hi;
399 };
400
401 struct locstamped_category_list_t {
402 uint32_t count;
403 #if __LP64__
404 uint32_t reserved;
405 #endif
406 locstamped_category_t list[0];
407 };
408
409
410 // class_data_bits_t is the class_t->data field (class_rw_t pointer plus flags)
411 // The extra bits are optimized for the retain/release and alloc/dealloc paths.
412
413 // Values for class_ro_t->flags
414 // These are emitted by the compiler and are part of the ABI.
415 // Note: See CGObjCNonFragileABIMac::BuildClassRoTInitializer in clang
416 // class is a metaclass
417 #define RO_META (1<<0)
418 // class is a root class
419 #define RO_ROOT (1<<1)
420 // class has .cxx_construct/destruct implementations
421 #define RO_HAS_CXX_STRUCTORS (1<<2)
422 // class has +load implementation
423 // #define RO_HAS_LOAD_METHOD (1<<3)
424 // class has visibility=hidden set
425 #define RO_HIDDEN (1<<4)
426 // class has attribute(objc_exception): OBJC_EHTYPE_$_ThisClass is non-weak
427 #define RO_EXCEPTION (1<<5)
428 // class has ro field for Swift metadata initializer callback
429 #define RO_HAS_SWIFT_INITIALIZER (1<<6)
430 // class compiled with ARC
431 #define RO_IS_ARC (1<<7)
432 // class has .cxx_destruct but no .cxx_construct (with RO_HAS_CXX_STRUCTORS)
433 #define RO_HAS_CXX_DTOR_ONLY (1<<8)
434 // class is not ARC but has ARC-style weak ivar layout
435 #define RO_HAS_WEAK_WITHOUT_ARC (1<<9)
436 // class does not allow associated objects on instances
437 #define RO_FORBIDS_ASSOCIATED_OBJECTS (1<<10)
438
439 // class is in an unloadable bundle - must never be set by compiler
440 #define RO_FROM_BUNDLE (1<<29)
441 // class is unrealized future class - must never be set by compiler
442 #define RO_FUTURE (1<<30)
443 // class is realized - must never be set by compiler
444 #define RO_REALIZED (1<<31)
445
446 // Values for class_rw_t->flags
447 // These are not emitted by the compiler and are never used in class_ro_t.
448 // Their presence should be considered in future ABI versions.
449 // class_t->data is class_rw_t, not class_ro_t
450 #define RW_REALIZED (1<<31)
451 // class is unresolved future class
452 #define RW_FUTURE (1<<30)
453 // class is initialized
454 #define RW_INITIALIZED (1<<29)
455 // class is initializing
456 #define RW_INITIALIZING (1<<28)
457 // class_rw_t->ro is heap copy of class_ro_t
458 #define RW_COPIED_RO (1<<27)
459 // class allocated but not yet registered
460 #define RW_CONSTRUCTING (1<<26)
461 // class allocated and registered
462 #define RW_CONSTRUCTED (1<<25)
463 // available for use; was RW_FINALIZE_ON_MAIN_THREAD
464 // #define RW_24 (1<<24)
465 // class +load has been called
466 #define RW_LOADED (1<<23)
467 #if !SUPPORT_NONPOINTER_ISA
468 // class instances may have associative references
469 #define RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS (1<<22)
470 #endif
471 // class has instance-specific GC layout
472 #define RW_HAS_INSTANCE_SPECIFIC_LAYOUT (1 << 21)
473 // class does not allow associated objects on its instances
474 #define RW_FORBIDS_ASSOCIATED_OBJECTS (1<<20)
475 // class has started realizing but not yet completed it
476 #define RW_REALIZING (1<<19)
477
478 // NOTE: MORE RW_ FLAGS DEFINED BELOW
479
480
481 // Values for class_rw_t->flags or class_t->bits
482 // These flags are optimized for retain/release and alloc/dealloc
483 // 64-bit stores more of them in class_t->bits to reduce pointer indirection.
484
485 #if !__LP64__
486
487 // class or superclass has .cxx_construct implementation
488 #define RW_HAS_CXX_CTOR (1<<18)
489 // class or superclass has .cxx_destruct implementation
490 #define RW_HAS_CXX_DTOR (1<<17)
491 // class or superclass has default alloc/allocWithZone: implementation
492 // Note this is is stored in the metaclass.
493 #define RW_HAS_DEFAULT_AWZ (1<<16)
494 // class's instances requires raw isa
495 #if SUPPORT_NONPOINTER_ISA
496 #define RW_REQUIRES_RAW_ISA (1<<15)
497 #endif
498 // class or superclass has default retain/release/autorelease/retainCount/
499 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
500 #define RW_HAS_DEFAULT_RR (1<<14)
501
502 // class is a Swift class from the pre-stable Swift ABI
503 #define FAST_IS_SWIFT_LEGACY (1UL<<0)
504 // class is a Swift class from the stable Swift ABI
505 #define FAST_IS_SWIFT_STABLE (1UL<<1)
506 // data pointer
507 #define FAST_DATA_MASK 0xfffffffcUL
508
509 #elif 1
510 // Leaks-compatible version that steals low bits only.
511
512 // class or superclass has .cxx_construct implementation
513 #define RW_HAS_CXX_CTOR (1<<18)
514 // class or superclass has .cxx_destruct implementation
515 #define RW_HAS_CXX_DTOR (1<<17)
516 // class or superclass has default alloc/allocWithZone: implementation
517 // Note this is is stored in the metaclass.
518 #define RW_HAS_DEFAULT_AWZ (1<<16)
519 // class's instances requires raw isa
520 #define RW_REQUIRES_RAW_ISA (1<<15)
521
522 // class is a Swift class from the pre-stable Swift ABI
523 #define FAST_IS_SWIFT_LEGACY (1UL<<0)
524 // class is a Swift class from the stable Swift ABI
525 #define FAST_IS_SWIFT_STABLE (1UL<<1)
526 // class or superclass has default retain/release/autorelease/retainCount/
527 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
528 #define FAST_HAS_DEFAULT_RR (1UL<<2)
529 // data pointer
530 #define FAST_DATA_MASK 0x00007ffffffffff8UL
531
532 #else
533 // Leaks-incompatible version that steals lots of bits.
534
535 // class is a Swift class from the pre-stable Swift ABI
536 #define FAST_IS_SWIFT_LEGACY (1UL<<0)
537 // class is a Swift class from the stable Swift ABI
538 #define FAST_IS_SWIFT_STABLE (1UL<<1)
539 // summary bit for fast alloc path: !hasCxxCtor and
540 // !instancesRequireRawIsa and instanceSize fits into shiftedSize
541 #define FAST_ALLOC (1UL<<2)
542 // data pointer
543 #define FAST_DATA_MASK 0x00007ffffffffff8UL
544 // class or superclass has .cxx_construct implementation
545 #define FAST_HAS_CXX_CTOR (1UL<<47)
546 // class or superclass has default alloc/allocWithZone: implementation
547 // Note this is is stored in the metaclass.
548 #define FAST_HAS_DEFAULT_AWZ (1UL<<48)
549 // class or superclass has default retain/release/autorelease/retainCount/
550 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
551 #define FAST_HAS_DEFAULT_RR (1UL<<49)
552 // class's instances requires raw isa
553 // This bit is aligned with isa_t->hasCxxDtor to save an instruction.
554 #define FAST_REQUIRES_RAW_ISA (1UL<<50)
555 // class or superclass has .cxx_destruct implementation
556 #define FAST_HAS_CXX_DTOR (1UL<<51)
557 // instance size in units of 16 bytes
558 // or 0 if the instance size is too big in this field
559 // This field must be LAST
560 #define FAST_SHIFTED_SIZE_SHIFT 52
561
562 // FAST_ALLOC means
563 // FAST_HAS_CXX_CTOR is set
564 // FAST_REQUIRES_RAW_ISA is not set
565 // FAST_SHIFTED_SIZE is not zero
566 // FAST_ALLOC does NOT check FAST_HAS_DEFAULT_AWZ because that
567 // bit is stored on the metaclass.
568 #define FAST_ALLOC_MASK (FAST_HAS_CXX_CTOR | FAST_REQUIRES_RAW_ISA)
569 #define FAST_ALLOC_VALUE (0)
570
571 #endif
572
573 // The Swift ABI requires that these bits be defined like this on all platforms.
574 static_assert(FAST_IS_SWIFT_LEGACY == 1, "resistance is futile");
575 static_assert(FAST_IS_SWIFT_STABLE == 2, "resistance is futile");
576
577
578 struct class_ro_t {
579 uint32_t flags;
580 uint32_t instanceStart;
581 uint32_t instanceSize;
582 #ifdef __LP64__
583 uint32_t reserved;
584 #endif
585
586 const uint8_t * ivarLayout;
587
588 const char * name;
589 method_list_t * baseMethodList;
590 protocol_list_t * baseProtocols;
591 const ivar_list_t * ivars;
592
593 const uint8_t * weakIvarLayout;
594 property_list_t *baseProperties;
595
596 // This field exists only when RO_HAS_SWIFT_INITIALIZER is set.
597 _objc_swiftMetadataInitializer __ptrauth_objc_method_list_imp _swiftMetadataInitializer_NEVER_USE[0];
598
599 _objc_swiftMetadataInitializer swiftMetadataInitializer() const {
600 if (flags & RO_HAS_SWIFT_INITIALIZER) {
601 return _swiftMetadataInitializer_NEVER_USE[0];
602 } else {
603 return nil;
604 }
605 }
606
607 method_list_t *baseMethods() const {
608 return baseMethodList;
609 }
610
611 class_ro_t *duplicate() const {
612 if (flags & RO_HAS_SWIFT_INITIALIZER) {
613 size_t size = sizeof(*this) + sizeof(_swiftMetadataInitializer_NEVER_USE[0]);
614 class_ro_t *ro = (class_ro_t *)memdup(this, size);
615 ro->_swiftMetadataInitializer_NEVER_USE[0] = this->_swiftMetadataInitializer_NEVER_USE[0];
616 return ro;
617 } else {
618 size_t size = sizeof(*this);
619 class_ro_t *ro = (class_ro_t *)memdup(this, size);
620 return ro;
621 }
622 }
623 };
624
625
626 /***********************************************************************
627 * list_array_tt<Element, List>
628 * Generic implementation for metadata that can be augmented by categories.
629 *
630 * Element is the underlying metadata type (e.g. method_t)
631 * List is the metadata's list type (e.g. method_list_t)
632 *
633 * A list_array_tt has one of three values:
634 * - empty
635 * - a pointer to a single list
636 * - an array of pointers to lists
637 *
638 * countLists/beginLists/endLists iterate the metadata lists
639 * count/begin/end iterate the underlying metadata elements
640 **********************************************************************/
641 template <typename Element, typename List>
642 class list_array_tt {
643 struct array_t {
644 uint32_t count;
645 List* lists[0];
646
647 static size_t byteSize(uint32_t count) {
648 return sizeof(array_t) + count*sizeof(lists[0]);
649 }
650 size_t byteSize() {
651 return byteSize(count);
652 }
653 };
654
655 protected:
656 class iterator {
657 List **lists;
658 List **listsEnd;
659 typename List::iterator m, mEnd;
660
661 public:
662 iterator(List **begin, List **end)
663 : lists(begin), listsEnd(end)
664 {
665 if (begin != end) {
666 m = (*begin)->begin();
667 mEnd = (*begin)->end();
668 }
669 }
670
671 const Element& operator * () const {
672 return *m;
673 }
674 Element& operator * () {
675 return *m;
676 }
677
678 bool operator != (const iterator& rhs) const {
679 if (lists != rhs.lists) return true;
680 if (lists == listsEnd) return false; // m is undefined
681 if (m != rhs.m) return true;
682 return false;
683 }
684
685 const iterator& operator ++ () {
686 assert(m != mEnd);
687 m++;
688 if (m == mEnd) {
689 assert(lists != listsEnd);
690 lists++;
691 if (lists != listsEnd) {
692 m = (*lists)->begin();
693 mEnd = (*lists)->end();
694 }
695 }
696 return *this;
697 }
698 };
699
700 private:
701 union {
702 List* list;
703 uintptr_t arrayAndFlag;
704 };
705
706 bool hasArray() const {
707 return arrayAndFlag & 1;
708 }
709
710 array_t *array() {
711 return (array_t *)(arrayAndFlag & ~1);
712 }
713
714 void setArray(array_t *array) {
715 arrayAndFlag = (uintptr_t)array | 1;
716 }
717
718 public:
719
720 uint32_t count() {
721 uint32_t result = 0;
722 for (auto lists = beginLists(), end = endLists();
723 lists != end;
724 ++lists)
725 {
726 result += (*lists)->count;
727 }
728 return result;
729 }
730
731 iterator begin() {
732 return iterator(beginLists(), endLists());
733 }
734
735 iterator end() {
736 List **e = endLists();
737 return iterator(e, e);
738 }
739
740
741 uint32_t countLists() {
742 if (hasArray()) {
743 return array()->count;
744 } else if (list) {
745 return 1;
746 } else {
747 return 0;
748 }
749 }
750
751 List** beginLists() {
752 if (hasArray()) {
753 return array()->lists;
754 } else {
755 return &list;
756 }
757 }
758
759 List** endLists() {
760 if (hasArray()) {
761 return array()->lists + array()->count;
762 } else if (list) {
763 return &list + 1;
764 } else {
765 return &list;
766 }
767 }
768
769 void attachLists(List* const * addedLists, uint32_t addedCount) {
770 if (addedCount == 0) return;
771
772 if (hasArray()) {
773 // many lists -> many lists
774 uint32_t oldCount = array()->count;
775 uint32_t newCount = oldCount + addedCount;
776 setArray((array_t *)realloc(array(), array_t::byteSize(newCount)));
777 array()->count = newCount;
778 memmove(array()->lists + addedCount, array()->lists,
779 oldCount * sizeof(array()->lists[0]));
780 memcpy(array()->lists, addedLists,
781 addedCount * sizeof(array()->lists[0]));
782 }
783 else if (!list && addedCount == 1) {
784 // 0 lists -> 1 list
785 list = addedLists[0];
786 }
787 else {
788 // 1 list -> many lists
789 List* oldList = list;
790 uint32_t oldCount = oldList ? 1 : 0;
791 uint32_t newCount = oldCount + addedCount;
792 setArray((array_t *)malloc(array_t::byteSize(newCount)));
793 array()->count = newCount;
794 if (oldList) array()->lists[addedCount] = oldList;
795 memcpy(array()->lists, addedLists,
796 addedCount * sizeof(array()->lists[0]));
797 }
798 }
799
800 void tryFree() {
801 if (hasArray()) {
802 for (uint32_t i = 0; i < array()->count; i++) {
803 try_free(array()->lists[i]);
804 }
805 try_free(array());
806 }
807 else if (list) {
808 try_free(list);
809 }
810 }
811
812 template<typename Result>
813 Result duplicate() {
814 Result result;
815
816 if (hasArray()) {
817 array_t *a = array();
818 result.setArray((array_t *)memdup(a, a->byteSize()));
819 for (uint32_t i = 0; i < a->count; i++) {
820 result.array()->lists[i] = a->lists[i]->duplicate();
821 }
822 } else if (list) {
823 result.list = list->duplicate();
824 } else {
825 result.list = nil;
826 }
827
828 return result;
829 }
830 };
831
832
833 class method_array_t :
834 public list_array_tt<method_t, method_list_t>
835 {
836 typedef list_array_tt<method_t, method_list_t> Super;
837
838 public:
839 method_list_t **beginCategoryMethodLists() {
840 return beginLists();
841 }
842
843 method_list_t **endCategoryMethodLists(Class cls);
844
845 method_array_t duplicate() {
846 return Super::duplicate<method_array_t>();
847 }
848 };
849
850
851 class property_array_t :
852 public list_array_tt<property_t, property_list_t>
853 {
854 typedef list_array_tt<property_t, property_list_t> Super;
855
856 public:
857 property_array_t duplicate() {
858 return Super::duplicate<property_array_t>();
859 }
860 };
861
862
863 class protocol_array_t :
864 public list_array_tt<protocol_ref_t, protocol_list_t>
865 {
866 typedef list_array_tt<protocol_ref_t, protocol_list_t> Super;
867
868 public:
869 protocol_array_t duplicate() {
870 return Super::duplicate<protocol_array_t>();
871 }
872 };
873
874
875 struct class_rw_t {
876 // Be warned that Symbolication knows the layout of this structure.
877 uint32_t flags;
878 uint32_t version;
879
880 const class_ro_t *ro;
881
882 method_array_t methods;
883 property_array_t properties;
884 protocol_array_t protocols;
885
886 Class firstSubclass;
887 Class nextSiblingClass;
888
889 char *demangledName;
890
891 #if SUPPORT_INDEXED_ISA
892 uint32_t index;
893 #endif
894
895 void setFlags(uint32_t set)
896 {
897 OSAtomicOr32Barrier(set, &flags);
898 }
899
900 void clearFlags(uint32_t clear)
901 {
902 OSAtomicXor32Barrier(clear, &flags);
903 }
904
905 // set and clear must not overlap
906 void changeFlags(uint32_t set, uint32_t clear)
907 {
908 assert((set & clear) == 0);
909
910 uint32_t oldf, newf;
911 do {
912 oldf = flags;
913 newf = (oldf | set) & ~clear;
914 } while (!OSAtomicCompareAndSwap32Barrier(oldf, newf, (volatile int32_t *)&flags));
915 }
916 };
917
918
919 struct class_data_bits_t {
920
921 // Values are the FAST_ flags above.
922 uintptr_t bits;
923 private:
924 bool getBit(uintptr_t bit)
925 {
926 return bits & bit;
927 }
928
929 #if FAST_ALLOC
930 // On entry, `newBits` is a bits value after setting and/or clearing
931 // the bits in `change`. Fix the fast-alloc parts of newBits if necessary
932 // and return the updated value.
933 static uintptr_t updateFastAlloc(uintptr_t newBits, uintptr_t change)
934 {
935 if (change & FAST_ALLOC_MASK) {
936 if (((newBits & FAST_ALLOC_MASK) == FAST_ALLOC_VALUE) &&
937 ((newBits >> FAST_SHIFTED_SIZE_SHIFT) != 0))
938 {
939 newBits |= FAST_ALLOC;
940 } else {
941 newBits &= ~FAST_ALLOC;
942 }
943 }
944 return newBits;
945 }
946 #else
947 static uintptr_t updateFastAlloc(uintptr_t newBits, uintptr_t change) {
948 return newBits;
949 }
950 #endif
951
952 // Atomically set the bits in `set` and clear the bits in `clear`.
953 // set and clear must not overlap.
954 void setAndClearBits(uintptr_t set, uintptr_t clear)
955 {
956 assert((set & clear) == 0);
957 uintptr_t oldBits;
958 uintptr_t newBits;
959 do {
960 oldBits = LoadExclusive(&bits);
961 newBits = updateFastAlloc((oldBits | set) & ~clear, set | clear);
962 } while (!StoreReleaseExclusive(&bits, oldBits, newBits));
963 }
964
965 void setBits(uintptr_t set) {
966 setAndClearBits(set, 0);
967 }
968
969 void clearBits(uintptr_t clear) {
970 setAndClearBits(0, clear);
971 }
972
973 public:
974
975 class_rw_t* data() {
976 return (class_rw_t *)(bits & FAST_DATA_MASK);
977 }
978 void setData(class_rw_t *newData)
979 {
980 assert(!data() || (newData->flags & (RW_REALIZING | RW_FUTURE)));
981 // Set during realization or construction only. No locking needed.
982 // Use a store-release fence because there may be concurrent
983 // readers of data and data's contents.
984 uintptr_t newBits = (bits & ~FAST_DATA_MASK) | (uintptr_t)newData;
985 atomic_thread_fence(memory_order_release);
986 bits = newBits;
987 }
988
989 // Get the class's ro data, even in the presence of concurrent realization.
990 // fixme this isn't really safe without a compiler barrier at least
991 // and probably a memory barrier when realizeClass changes the data field
992 const class_ro_t *safe_ro() {
993 class_rw_t *maybe_rw = data();
994 if (maybe_rw->flags & RW_REALIZED) {
995 // maybe_rw is rw
996 return maybe_rw->ro;
997 } else {
998 // maybe_rw is actually ro
999 return (class_ro_t *)maybe_rw;
1000 }
1001 }
1002
1003 #if FAST_HAS_DEFAULT_RR
1004 bool hasDefaultRR() {
1005 return getBit(FAST_HAS_DEFAULT_RR);
1006 }
1007 void setHasDefaultRR() {
1008 setBits(FAST_HAS_DEFAULT_RR);
1009 }
1010 void setHasCustomRR() {
1011 clearBits(FAST_HAS_DEFAULT_RR);
1012 }
1013 #else
1014 bool hasDefaultRR() {
1015 return data()->flags & RW_HAS_DEFAULT_RR;
1016 }
1017 void setHasDefaultRR() {
1018 data()->setFlags(RW_HAS_DEFAULT_RR);
1019 }
1020 void setHasCustomRR() {
1021 data()->clearFlags(RW_HAS_DEFAULT_RR);
1022 }
1023 #endif
1024
1025 #if FAST_HAS_DEFAULT_AWZ
1026 bool hasDefaultAWZ() {
1027 return getBit(FAST_HAS_DEFAULT_AWZ);
1028 }
1029 void setHasDefaultAWZ() {
1030 setBits(FAST_HAS_DEFAULT_AWZ);
1031 }
1032 void setHasCustomAWZ() {
1033 clearBits(FAST_HAS_DEFAULT_AWZ);
1034 }
1035 #else
1036 bool hasDefaultAWZ() {
1037 return data()->flags & RW_HAS_DEFAULT_AWZ;
1038 }
1039 void setHasDefaultAWZ() {
1040 data()->setFlags(RW_HAS_DEFAULT_AWZ);
1041 }
1042 void setHasCustomAWZ() {
1043 data()->clearFlags(RW_HAS_DEFAULT_AWZ);
1044 }
1045 #endif
1046
1047 #if FAST_HAS_CXX_CTOR
1048 bool hasCxxCtor() {
1049 return getBit(FAST_HAS_CXX_CTOR);
1050 }
1051 void setHasCxxCtor() {
1052 setBits(FAST_HAS_CXX_CTOR);
1053 }
1054 #else
1055 bool hasCxxCtor() {
1056 return data()->flags & RW_HAS_CXX_CTOR;
1057 }
1058 void setHasCxxCtor() {
1059 data()->setFlags(RW_HAS_CXX_CTOR);
1060 }
1061 #endif
1062
1063 #if FAST_HAS_CXX_DTOR
1064 bool hasCxxDtor() {
1065 return getBit(FAST_HAS_CXX_DTOR);
1066 }
1067 void setHasCxxDtor() {
1068 setBits(FAST_HAS_CXX_DTOR);
1069 }
1070 #else
1071 bool hasCxxDtor() {
1072 return data()->flags & RW_HAS_CXX_DTOR;
1073 }
1074 void setHasCxxDtor() {
1075 data()->setFlags(RW_HAS_CXX_DTOR);
1076 }
1077 #endif
1078
1079 #if FAST_REQUIRES_RAW_ISA
1080 bool instancesRequireRawIsa() {
1081 return getBit(FAST_REQUIRES_RAW_ISA);
1082 }
1083 void setInstancesRequireRawIsa() {
1084 setBits(FAST_REQUIRES_RAW_ISA);
1085 }
1086 #elif SUPPORT_NONPOINTER_ISA
1087 bool instancesRequireRawIsa() {
1088 return data()->flags & RW_REQUIRES_RAW_ISA;
1089 }
1090 void setInstancesRequireRawIsa() {
1091 data()->setFlags(RW_REQUIRES_RAW_ISA);
1092 }
1093 #else
1094 bool instancesRequireRawIsa() {
1095 return true;
1096 }
1097 void setInstancesRequireRawIsa() {
1098 // nothing
1099 }
1100 #endif
1101
1102 #if FAST_ALLOC
1103 size_t fastInstanceSize()
1104 {
1105 assert(bits & FAST_ALLOC);
1106 return (bits >> FAST_SHIFTED_SIZE_SHIFT) * 16;
1107 }
1108 void setFastInstanceSize(size_t newSize)
1109 {
1110 // Set during realization or construction only. No locking needed.
1111 assert(data()->flags & RW_REALIZING);
1112
1113 // Round up to 16-byte boundary, then divide to get 16-byte units
1114 newSize = ((newSize + 15) & ~15) / 16;
1115
1116 uintptr_t newBits = newSize << FAST_SHIFTED_SIZE_SHIFT;
1117 if ((newBits >> FAST_SHIFTED_SIZE_SHIFT) == newSize) {
1118 int shift = WORD_BITS - FAST_SHIFTED_SIZE_SHIFT;
1119 uintptr_t oldBits = (bits << shift) >> shift;
1120 if ((oldBits & FAST_ALLOC_MASK) == FAST_ALLOC_VALUE) {
1121 newBits |= FAST_ALLOC;
1122 }
1123 bits = oldBits | newBits;
1124 }
1125 }
1126
1127 bool canAllocFast() {
1128 return bits & FAST_ALLOC;
1129 }
1130 #else
1131 size_t fastInstanceSize() {
1132 abort();
1133 }
1134 void setFastInstanceSize(size_t) {
1135 // nothing
1136 }
1137 bool canAllocFast() {
1138 return false;
1139 }
1140 #endif
1141
1142 void setClassArrayIndex(unsigned Idx) {
1143 #if SUPPORT_INDEXED_ISA
1144 // 0 is unused as then we can rely on zero-initialisation from calloc.
1145 assert(Idx > 0);
1146 data()->index = Idx;
1147 #endif
1148 }
1149
1150 unsigned classArrayIndex() {
1151 #if SUPPORT_INDEXED_ISA
1152 return data()->index;
1153 #else
1154 return 0;
1155 #endif
1156 }
1157
1158 bool isAnySwift() {
1159 return isSwiftStable() || isSwiftLegacy();
1160 }
1161
1162 bool isSwiftStable() {
1163 return getBit(FAST_IS_SWIFT_STABLE);
1164 }
1165 void setIsSwiftStable() {
1166 setAndClearBits(FAST_IS_SWIFT_STABLE, FAST_IS_SWIFT_LEGACY);
1167 }
1168
1169 bool isSwiftLegacy() {
1170 return getBit(FAST_IS_SWIFT_LEGACY);
1171 }
1172 void setIsSwiftLegacy() {
1173 setAndClearBits(FAST_IS_SWIFT_LEGACY, FAST_IS_SWIFT_STABLE);
1174 }
1175
1176 // fixme remove this once the Swift runtime uses the stable bits
1177 bool isSwiftStable_ButAllowLegacyForNow() {
1178 return isAnySwift();
1179 }
1180
1181 _objc_swiftMetadataInitializer swiftMetadataInitializer() {
1182 // This function is called on un-realized classes without
1183 // holding any locks.
1184 // Beware of races with other realizers.
1185 return safe_ro()->swiftMetadataInitializer();
1186 }
1187 };
1188
1189
1190 struct objc_class : objc_object {
1191 // Class ISA;
1192 Class superclass;
1193 cache_t cache; // formerly cache pointer and vtable
1194 class_data_bits_t bits; // class_rw_t * plus custom rr/alloc flags
1195
1196 class_rw_t *data() {
1197 return bits.data();
1198 }
1199 void setData(class_rw_t *newData) {
1200 bits.setData(newData);
1201 }
1202
1203 void setInfo(uint32_t set) {
1204 assert(isFuture() || isRealized());
1205 data()->setFlags(set);
1206 }
1207
1208 void clearInfo(uint32_t clear) {
1209 assert(isFuture() || isRealized());
1210 data()->clearFlags(clear);
1211 }
1212
1213 // set and clear must not overlap
1214 void changeInfo(uint32_t set, uint32_t clear) {
1215 assert(isFuture() || isRealized());
1216 assert((set & clear) == 0);
1217 data()->changeFlags(set, clear);
1218 }
1219
1220 bool hasCustomRR() {
1221 return ! bits.hasDefaultRR();
1222 }
1223 void setHasDefaultRR() {
1224 assert(isInitializing());
1225 bits.setHasDefaultRR();
1226 }
1227 void setHasCustomRR(bool inherited = false);
1228 void printCustomRR(bool inherited);
1229
1230 bool hasCustomAWZ() {
1231 return ! bits.hasDefaultAWZ();
1232 }
1233 void setHasDefaultAWZ() {
1234 assert(isInitializing());
1235 bits.setHasDefaultAWZ();
1236 }
1237 void setHasCustomAWZ(bool inherited = false);
1238 void printCustomAWZ(bool inherited);
1239
1240 bool instancesRequireRawIsa() {
1241 return bits.instancesRequireRawIsa();
1242 }
1243 void setInstancesRequireRawIsa(bool inherited = false);
1244 void printInstancesRequireRawIsa(bool inherited);
1245
1246 bool canAllocNonpointer() {
1247 assert(!isFuture());
1248 return !instancesRequireRawIsa();
1249 }
1250 bool canAllocFast() {
1251 assert(!isFuture());
1252 return bits.canAllocFast();
1253 }
1254
1255
1256 bool hasCxxCtor() {
1257 // addSubclass() propagates this flag from the superclass.
1258 assert(isRealized());
1259 return bits.hasCxxCtor();
1260 }
1261 void setHasCxxCtor() {
1262 bits.setHasCxxCtor();
1263 }
1264
1265 bool hasCxxDtor() {
1266 // addSubclass() propagates this flag from the superclass.
1267 assert(isRealized());
1268 return bits.hasCxxDtor();
1269 }
1270 void setHasCxxDtor() {
1271 bits.setHasCxxDtor();
1272 }
1273
1274
1275 bool isSwiftStable() {
1276 return bits.isSwiftStable();
1277 }
1278
1279 bool isSwiftLegacy() {
1280 return bits.isSwiftLegacy();
1281 }
1282
1283 bool isAnySwift() {
1284 return bits.isAnySwift();
1285 }
1286
1287 bool isSwiftStable_ButAllowLegacyForNow() {
1288 return bits.isSwiftStable_ButAllowLegacyForNow();
1289 }
1290
1291 // Swift stable ABI built for old deployment targets looks weird.
1292 // The is-legacy bit is set for compatibility with old libobjc.
1293 // We are on a "new" deployment target so we need to rewrite that bit.
1294 // These stable-with-legacy-bit classes are distinguished from real
1295 // legacy classes using another bit in the Swift data
1296 // (ClassFlags::IsSwiftPreStableABI)
1297
1298 bool isUnfixedBackwardDeployingStableSwift() {
1299 // Only classes marked as Swift legacy need apply.
1300 if (!bits.isSwiftLegacy()) return false;
1301
1302 // Check the true legacy vs stable distinguisher.
1303 // The low bit of Swift's ClassFlags is SET for true legacy
1304 // and UNSET for stable pretending to be legacy.
1305 uint32_t swiftClassFlags = *(uint32_t *)(&bits + 1);
1306 bool isActuallySwiftLegacy = bool(swiftClassFlags & 1);
1307 return !isActuallySwiftLegacy;
1308 }
1309
1310 void fixupBackwardDeployingStableSwift() {
1311 if (isUnfixedBackwardDeployingStableSwift()) {
1312 // Class really is stable Swift, pretending to be pre-stable.
1313 // Fix its lie.
1314 bits.setIsSwiftStable();
1315 }
1316 }
1317
1318 _objc_swiftMetadataInitializer swiftMetadataInitializer() {
1319 return bits.swiftMetadataInitializer();
1320 }
1321
1322 // Return YES if the class's ivars are managed by ARC,
1323 // or the class is MRC but has ARC-style weak ivars.
1324 bool hasAutomaticIvars() {
1325 return data()->ro->flags & (RO_IS_ARC | RO_HAS_WEAK_WITHOUT_ARC);
1326 }
1327
1328 // Return YES if the class's ivars are managed by ARC.
1329 bool isARC() {
1330 return data()->ro->flags & RO_IS_ARC;
1331 }
1332
1333
1334 bool forbidsAssociatedObjects() {
1335 return (data()->flags & RW_FORBIDS_ASSOCIATED_OBJECTS);
1336 }
1337
1338 #if SUPPORT_NONPOINTER_ISA
1339 // Tracked in non-pointer isas; not tracked otherwise
1340 #else
1341 bool instancesHaveAssociatedObjects() {
1342 // this may be an unrealized future class in the CF-bridged case
1343 assert(isFuture() || isRealized());
1344 return data()->flags & RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS;
1345 }
1346
1347 void setInstancesHaveAssociatedObjects() {
1348 // this may be an unrealized future class in the CF-bridged case
1349 assert(isFuture() || isRealized());
1350 setInfo(RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS);
1351 }
1352 #endif
1353
1354 bool shouldGrowCache() {
1355 return true;
1356 }
1357
1358 void setShouldGrowCache(bool) {
1359 // fixme good or bad for memory use?
1360 }
1361
1362 bool isInitializing() {
1363 return getMeta()->data()->flags & RW_INITIALIZING;
1364 }
1365
1366 void setInitializing() {
1367 assert(!isMetaClass());
1368 ISA()->setInfo(RW_INITIALIZING);
1369 }
1370
1371 bool isInitialized() {
1372 return getMeta()->data()->flags & RW_INITIALIZED;
1373 }
1374
1375 void setInitialized();
1376
1377 bool isLoadable() {
1378 assert(isRealized());
1379 return true; // any class registered for +load is definitely loadable
1380 }
1381
1382 IMP getLoadMethod();
1383
1384 // Locking: To prevent concurrent realization, hold runtimeLock.
1385 bool isRealized() {
1386 return data()->flags & RW_REALIZED;
1387 }
1388
1389 // Returns true if this is an unrealized future class.
1390 // Locking: To prevent concurrent realization, hold runtimeLock.
1391 bool isFuture() {
1392 return data()->flags & RW_FUTURE;
1393 }
1394
1395 bool isMetaClass() {
1396 assert(this);
1397 assert(isRealized());
1398 return data()->ro->flags & RO_META;
1399 }
1400
1401 // Like isMetaClass, but also valid on un-realized classes
1402 bool isMetaClassMaybeUnrealized() {
1403 return bits.safe_ro()->flags & RO_META;
1404 }
1405
1406 // NOT identical to this->ISA when this is a metaclass
1407 Class getMeta() {
1408 if (isMetaClass()) return (Class)this;
1409 else return this->ISA();
1410 }
1411
1412 bool isRootClass() {
1413 return superclass == nil;
1414 }
1415 bool isRootMetaclass() {
1416 return ISA() == (Class)this;
1417 }
1418
1419 const char *mangledName() {
1420 // fixme can't assert locks here
1421 assert(this);
1422
1423 if (isRealized() || isFuture()) {
1424 return data()->ro->name;
1425 } else {
1426 return ((const class_ro_t *)data())->name;
1427 }
1428 }
1429
1430 const char *demangledName();
1431 const char *nameForLogging();
1432
1433 // May be unaligned depending on class's ivars.
1434 uint32_t unalignedInstanceStart() {
1435 assert(isRealized());
1436 return data()->ro->instanceStart;
1437 }
1438
1439 // Class's instance start rounded up to a pointer-size boundary.
1440 // This is used for ARC layout bitmaps.
1441 uint32_t alignedInstanceStart() {
1442 return word_align(unalignedInstanceStart());
1443 }
1444
1445 // May be unaligned depending on class's ivars.
1446 uint32_t unalignedInstanceSize() {
1447 assert(isRealized());
1448 return data()->ro->instanceSize;
1449 }
1450
1451 // Class's ivar size rounded up to a pointer-size boundary.
1452 uint32_t alignedInstanceSize() {
1453 return word_align(unalignedInstanceSize());
1454 }
1455
1456 size_t instanceSize(size_t extraBytes) {
1457 size_t size = alignedInstanceSize() + extraBytes;
1458 // CF requires all objects be at least 16 bytes.
1459 if (size < 16) size = 16;
1460 return size;
1461 }
1462
1463 void setInstanceSize(uint32_t newSize) {
1464 assert(isRealized());
1465 if (newSize != data()->ro->instanceSize) {
1466 assert(data()->flags & RW_COPIED_RO);
1467 *const_cast<uint32_t *>(&data()->ro->instanceSize) = newSize;
1468 }
1469 bits.setFastInstanceSize(newSize);
1470 }
1471
1472 void chooseClassArrayIndex();
1473
1474 void setClassArrayIndex(unsigned Idx) {
1475 bits.setClassArrayIndex(Idx);
1476 }
1477
1478 unsigned classArrayIndex() {
1479 return bits.classArrayIndex();
1480 }
1481
1482 };
1483
1484
1485 struct swift_class_t : objc_class {
1486 uint32_t flags;
1487 uint32_t instanceAddressOffset;
1488 uint32_t instanceSize;
1489 uint16_t instanceAlignMask;
1490 uint16_t reserved;
1491
1492 uint32_t classSize;
1493 uint32_t classAddressOffset;
1494 void *description;
1495 // ...
1496
1497 void *baseAddress() {
1498 return (void *)((uint8_t *)this - classAddressOffset);
1499 }
1500 };
1501
1502
1503 struct category_t {
1504 const char *name;
1505 classref_t cls;
1506 struct method_list_t *instanceMethods;
1507 struct method_list_t *classMethods;
1508 struct protocol_list_t *protocols;
1509 struct property_list_t *instanceProperties;
1510 // Fields below this point are not always present on disk.
1511 struct property_list_t *_classProperties;
1512
1513 method_list_t *methodsForMeta(bool isMeta) {
1514 if (isMeta) return classMethods;
1515 else return instanceMethods;
1516 }
1517
1518 property_list_t *propertiesForMeta(bool isMeta, struct header_info *hi);
1519 };
1520
1521 struct objc_super2 {
1522 id receiver;
1523 Class current_class;
1524 };
1525
1526 struct message_ref_t {
1527 IMP imp;
1528 SEL sel;
1529 };
1530
1531
1532 extern Method protocol_getMethod(protocol_t *p, SEL sel, bool isRequiredMethod, bool isInstanceMethod, bool recursive);
1533
1534 static inline void
1535 foreach_realized_class_and_subclass_2(Class top, unsigned& count,
1536 std::function<bool (Class)> code)
1537 {
1538 // runtimeLock.assertLocked();
1539 assert(top);
1540 Class cls = top;
1541 while (1) {
1542 if (--count == 0) {
1543 _objc_fatal("Memory corruption in class list.");
1544 }
1545 if (!code(cls)) break;
1546
1547 if (cls->data()->firstSubclass) {
1548 cls = cls->data()->firstSubclass;
1549 } else {
1550 while (!cls->data()->nextSiblingClass && cls != top) {
1551 cls = cls->superclass;
1552 if (--count == 0) {
1553 _objc_fatal("Memory corruption in class list.");
1554 }
1555 }
1556 if (cls == top) break;
1557 cls = cls->data()->nextSiblingClass;
1558 }
1559 }
1560 }
1561
1562 extern Class firstRealizedClass();
1563 extern unsigned int unreasonableClassCount();
1564
1565 // Enumerates a class and all of its realized subclasses.
1566 static inline void
1567 foreach_realized_class_and_subclass(Class top,
1568 std::function<void (Class)> code)
1569 {
1570 unsigned int count = unreasonableClassCount();
1571
1572 foreach_realized_class_and_subclass_2(top, count,
1573 [&code](Class cls) -> bool
1574 {
1575 code(cls);
1576 return true;
1577 });
1578 }
1579
1580 // Enumerates all realized classes and metaclasses.
1581 static inline void
1582 foreach_realized_class_and_metaclass(std::function<void (Class)> code)
1583 {
1584 unsigned int count = unreasonableClassCount();
1585
1586 for (Class top = firstRealizedClass();
1587 top != nil;
1588 top = top->data()->nextSiblingClass)
1589 {
1590 foreach_realized_class_and_subclass_2(top, count,
1591 [&code](Class cls) -> bool
1592 {
1593 code(cls);
1594 return true;
1595 });
1596 }
1597
1598 }
1599
1600 #endif