]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-runtime-new.h
objc4-750.tar.gz
[apple/objc4.git] / runtime / objc-runtime-new.h
1 /*
2 * Copyright (c) 2005-2007 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #ifndef _OBJC_RUNTIME_NEW_H
25 #define _OBJC_RUNTIME_NEW_H
26
27 #if __LP64__
28 typedef uint32_t mask_t; // x86_64 & arm64 asm are less efficient with 16-bits
29 #else
30 typedef uint16_t mask_t;
31 #endif
32 typedef uintptr_t cache_key_t;
33
34 struct swift_class_t;
35
36
37 struct bucket_t {
38 private:
39 // IMP-first is better for arm64e ptrauth and no worse for arm64.
40 // SEL-first is better for armv7* and i386 and x86_64.
41 #if __arm64__
42 MethodCacheIMP _imp;
43 cache_key_t _key;
44 #else
45 cache_key_t _key;
46 MethodCacheIMP _imp;
47 #endif
48
49 public:
50 inline cache_key_t key() const { return _key; }
51 inline IMP imp() const { return (IMP)_imp; }
52 inline void setKey(cache_key_t newKey) { _key = newKey; }
53 inline void setImp(IMP newImp) { _imp = newImp; }
54
55 void set(cache_key_t newKey, IMP newImp);
56 };
57
58
59 struct cache_t {
60 struct bucket_t *_buckets;
61 mask_t _mask;
62 mask_t _occupied;
63
64 public:
65 struct bucket_t *buckets();
66 mask_t mask();
67 mask_t occupied();
68 void incrementOccupied();
69 void setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask);
70 void initializeToEmpty();
71
72 mask_t capacity();
73 bool isConstantEmptyCache();
74 bool canBeFreed();
75
76 static size_t bytesForCapacity(uint32_t cap);
77 static struct bucket_t * endMarker(struct bucket_t *b, uint32_t cap);
78
79 void expand();
80 void reallocate(mask_t oldCapacity, mask_t newCapacity);
81 struct bucket_t * find(cache_key_t key, id receiver);
82
83 static void bad_cache(id receiver, SEL sel, Class isa) __attribute__((noreturn));
84 };
85
86
87 // classref_t is unremapped class_t*
88 typedef struct classref * classref_t;
89
90 /***********************************************************************
91 * entsize_list_tt<Element, List, FlagMask>
92 * Generic implementation of an array of non-fragile structs.
93 *
94 * Element is the struct type (e.g. method_t)
95 * List is the specialization of entsize_list_tt (e.g. method_list_t)
96 * FlagMask is used to stash extra bits in the entsize field
97 * (e.g. method list fixup markers)
98 **********************************************************************/
99 template <typename Element, typename List, uint32_t FlagMask>
100 struct entsize_list_tt {
101 uint32_t entsizeAndFlags;
102 uint32_t count;
103 Element first;
104
105 uint32_t entsize() const {
106 return entsizeAndFlags & ~FlagMask;
107 }
108 uint32_t flags() const {
109 return entsizeAndFlags & FlagMask;
110 }
111
112 Element& getOrEnd(uint32_t i) const {
113 assert(i <= count);
114 return *(Element *)((uint8_t *)&first + i*entsize());
115 }
116 Element& get(uint32_t i) const {
117 assert(i < count);
118 return getOrEnd(i);
119 }
120
121 size_t byteSize() const {
122 return byteSize(entsize(), count);
123 }
124
125 static size_t byteSize(uint32_t entsize, uint32_t count) {
126 return sizeof(entsize_list_tt) + (count-1)*entsize;
127 }
128
129 List *duplicate() const {
130 auto *dup = (List *)calloc(this->byteSize(), 1);
131 dup->entsizeAndFlags = this->entsizeAndFlags;
132 dup->count = this->count;
133 std::copy(begin(), end(), dup->begin());
134 return dup;
135 }
136
137 struct iterator;
138 const iterator begin() const {
139 return iterator(*static_cast<const List*>(this), 0);
140 }
141 iterator begin() {
142 return iterator(*static_cast<const List*>(this), 0);
143 }
144 const iterator end() const {
145 return iterator(*static_cast<const List*>(this), count);
146 }
147 iterator end() {
148 return iterator(*static_cast<const List*>(this), count);
149 }
150
151 struct iterator {
152 uint32_t entsize;
153 uint32_t index; // keeping track of this saves a divide in operator-
154 Element* element;
155
156 typedef std::random_access_iterator_tag iterator_category;
157 typedef Element value_type;
158 typedef ptrdiff_t difference_type;
159 typedef Element* pointer;
160 typedef Element& reference;
161
162 iterator() { }
163
164 iterator(const List& list, uint32_t start = 0)
165 : entsize(list.entsize())
166 , index(start)
167 , element(&list.getOrEnd(start))
168 { }
169
170 const iterator& operator += (ptrdiff_t delta) {
171 element = (Element*)((uint8_t *)element + delta*entsize);
172 index += (int32_t)delta;
173 return *this;
174 }
175 const iterator& operator -= (ptrdiff_t delta) {
176 element = (Element*)((uint8_t *)element - delta*entsize);
177 index -= (int32_t)delta;
178 return *this;
179 }
180 const iterator operator + (ptrdiff_t delta) const {
181 return iterator(*this) += delta;
182 }
183 const iterator operator - (ptrdiff_t delta) const {
184 return iterator(*this) -= delta;
185 }
186
187 iterator& operator ++ () { *this += 1; return *this; }
188 iterator& operator -- () { *this -= 1; return *this; }
189 iterator operator ++ (int) {
190 iterator result(*this); *this += 1; return result;
191 }
192 iterator operator -- (int) {
193 iterator result(*this); *this -= 1; return result;
194 }
195
196 ptrdiff_t operator - (const iterator& rhs) const {
197 return (ptrdiff_t)this->index - (ptrdiff_t)rhs.index;
198 }
199
200 Element& operator * () const { return *element; }
201 Element* operator -> () const { return element; }
202
203 operator Element& () const { return *element; }
204
205 bool operator == (const iterator& rhs) const {
206 return this->element == rhs.element;
207 }
208 bool operator != (const iterator& rhs) const {
209 return this->element != rhs.element;
210 }
211
212 bool operator < (const iterator& rhs) const {
213 return this->element < rhs.element;
214 }
215 bool operator > (const iterator& rhs) const {
216 return this->element > rhs.element;
217 }
218 };
219 };
220
221
222 struct method_t {
223 SEL name;
224 const char *types;
225 MethodListIMP imp;
226
227 struct SortBySELAddress :
228 public std::binary_function<const method_t&,
229 const method_t&, bool>
230 {
231 bool operator() (const method_t& lhs,
232 const method_t& rhs)
233 { return lhs.name < rhs.name; }
234 };
235 };
236
237 struct ivar_t {
238 #if __x86_64__
239 // *offset was originally 64-bit on some x86_64 platforms.
240 // We read and write only 32 bits of it.
241 // Some metadata provides all 64 bits. This is harmless for unsigned
242 // little-endian values.
243 // Some code uses all 64 bits. class_addIvar() over-allocates the
244 // offset for their benefit.
245 #endif
246 int32_t *offset;
247 const char *name;
248 const char *type;
249 // alignment is sometimes -1; use alignment() instead
250 uint32_t alignment_raw;
251 uint32_t size;
252
253 uint32_t alignment() const {
254 if (alignment_raw == ~(uint32_t)0) return 1U << WORD_SHIFT;
255 return 1 << alignment_raw;
256 }
257 };
258
259 struct property_t {
260 const char *name;
261 const char *attributes;
262 };
263
264 // Two bits of entsize are used for fixup markers.
265 struct method_list_t : entsize_list_tt<method_t, method_list_t, 0x3> {
266 bool isFixedUp() const;
267 void setFixedUp();
268
269 uint32_t indexOfMethod(const method_t *meth) const {
270 uint32_t i =
271 (uint32_t)(((uintptr_t)meth - (uintptr_t)this) / entsize());
272 assert(i < count);
273 return i;
274 }
275 };
276
277 struct ivar_list_t : entsize_list_tt<ivar_t, ivar_list_t, 0> {
278 bool containsIvar(Ivar ivar) const {
279 return (ivar >= (Ivar)&*begin() && ivar < (Ivar)&*end());
280 }
281 };
282
283 struct property_list_t : entsize_list_tt<property_t, property_list_t, 0> {
284 };
285
286
287 typedef uintptr_t protocol_ref_t; // protocol_t *, but unremapped
288
289 // Values for protocol_t->flags
290 #define PROTOCOL_FIXED_UP_2 (1<<31) // must never be set by compiler
291 #define PROTOCOL_FIXED_UP_1 (1<<30) // must never be set by compiler
292 // Bits 0..15 are reserved for Swift's use.
293
294 #define PROTOCOL_FIXED_UP_MASK (PROTOCOL_FIXED_UP_1 | PROTOCOL_FIXED_UP_2)
295
296 struct protocol_t : objc_object {
297 const char *mangledName;
298 struct protocol_list_t *protocols;
299 method_list_t *instanceMethods;
300 method_list_t *classMethods;
301 method_list_t *optionalInstanceMethods;
302 method_list_t *optionalClassMethods;
303 property_list_t *instanceProperties;
304 uint32_t size; // sizeof(protocol_t)
305 uint32_t flags;
306 // Fields below this point are not always present on disk.
307 const char **_extendedMethodTypes;
308 const char *_demangledName;
309 property_list_t *_classProperties;
310
311 const char *demangledName();
312
313 const char *nameForLogging() {
314 return demangledName();
315 }
316
317 bool isFixedUp() const;
318 void setFixedUp();
319
320 # define HAS_FIELD(f) (size >= offsetof(protocol_t, f) + sizeof(f))
321
322 bool hasExtendedMethodTypesField() const {
323 return HAS_FIELD(_extendedMethodTypes);
324 }
325 bool hasDemangledNameField() const {
326 return HAS_FIELD(_demangledName);
327 }
328 bool hasClassPropertiesField() const {
329 return HAS_FIELD(_classProperties);
330 }
331
332 # undef HAS_FIELD
333
334 const char **extendedMethodTypes() const {
335 return hasExtendedMethodTypesField() ? _extendedMethodTypes : nil;
336 }
337
338 property_list_t *classProperties() const {
339 return hasClassPropertiesField() ? _classProperties : nil;
340 }
341 };
342
343 struct protocol_list_t {
344 // count is 64-bit by accident.
345 uintptr_t count;
346 protocol_ref_t list[0]; // variable-size
347
348 size_t byteSize() const {
349 return sizeof(*this) + count*sizeof(list[0]);
350 }
351
352 protocol_list_t *duplicate() const {
353 return (protocol_list_t *)memdup(this, this->byteSize());
354 }
355
356 typedef protocol_ref_t* iterator;
357 typedef const protocol_ref_t* const_iterator;
358
359 const_iterator begin() const {
360 return list;
361 }
362 iterator begin() {
363 return list;
364 }
365 const_iterator end() const {
366 return list + count;
367 }
368 iterator end() {
369 return list + count;
370 }
371 };
372
373 struct locstamped_category_t {
374 category_t *cat;
375 struct header_info *hi;
376 };
377
378 struct locstamped_category_list_t {
379 uint32_t count;
380 #if __LP64__
381 uint32_t reserved;
382 #endif
383 locstamped_category_t list[0];
384 };
385
386
387 // class_data_bits_t is the class_t->data field (class_rw_t pointer plus flags)
388 // The extra bits are optimized for the retain/release and alloc/dealloc paths.
389
390 // Values for class_ro_t->flags
391 // These are emitted by the compiler and are part of the ABI.
392 // Note: See CGObjCNonFragileABIMac::BuildClassRoTInitializer in clang
393 // class is a metaclass
394 #define RO_META (1<<0)
395 // class is a root class
396 #define RO_ROOT (1<<1)
397 // class has .cxx_construct/destruct implementations
398 #define RO_HAS_CXX_STRUCTORS (1<<2)
399 // class has +load implementation
400 // #define RO_HAS_LOAD_METHOD (1<<3)
401 // class has visibility=hidden set
402 #define RO_HIDDEN (1<<4)
403 // class has attribute(objc_exception): OBJC_EHTYPE_$_ThisClass is non-weak
404 #define RO_EXCEPTION (1<<5)
405 // this bit is available for reassignment
406 // #define RO_REUSE_ME (1<<6)
407 // class compiled with ARC
408 #define RO_IS_ARC (1<<7)
409 // class has .cxx_destruct but no .cxx_construct (with RO_HAS_CXX_STRUCTORS)
410 #define RO_HAS_CXX_DTOR_ONLY (1<<8)
411 // class is not ARC but has ARC-style weak ivar layout
412 #define RO_HAS_WEAK_WITHOUT_ARC (1<<9)
413
414 // class is in an unloadable bundle - must never be set by compiler
415 #define RO_FROM_BUNDLE (1<<29)
416 // class is unrealized future class - must never be set by compiler
417 #define RO_FUTURE (1<<30)
418 // class is realized - must never be set by compiler
419 #define RO_REALIZED (1<<31)
420
421 // Values for class_rw_t->flags
422 // These are not emitted by the compiler and are never used in class_ro_t.
423 // Their presence should be considered in future ABI versions.
424 // class_t->data is class_rw_t, not class_ro_t
425 #define RW_REALIZED (1<<31)
426 // class is unresolved future class
427 #define RW_FUTURE (1<<30)
428 // class is initialized
429 #define RW_INITIALIZED (1<<29)
430 // class is initializing
431 #define RW_INITIALIZING (1<<28)
432 // class_rw_t->ro is heap copy of class_ro_t
433 #define RW_COPIED_RO (1<<27)
434 // class allocated but not yet registered
435 #define RW_CONSTRUCTING (1<<26)
436 // class allocated and registered
437 #define RW_CONSTRUCTED (1<<25)
438 // available for use; was RW_FINALIZE_ON_MAIN_THREAD
439 // #define RW_24 (1<<24)
440 // class +load has been called
441 #define RW_LOADED (1<<23)
442 #if !SUPPORT_NONPOINTER_ISA
443 // class instances may have associative references
444 #define RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS (1<<22)
445 #endif
446 // class has instance-specific GC layout
447 #define RW_HAS_INSTANCE_SPECIFIC_LAYOUT (1 << 21)
448 // available for use
449 // #define RW_20 (1<<20)
450 // class has started realizing but not yet completed it
451 #define RW_REALIZING (1<<19)
452
453 // NOTE: MORE RW_ FLAGS DEFINED BELOW
454
455
456 // Values for class_rw_t->flags or class_t->bits
457 // These flags are optimized for retain/release and alloc/dealloc
458 // 64-bit stores more of them in class_t->bits to reduce pointer indirection.
459
460 #if !__LP64__
461
462 // class or superclass has .cxx_construct implementation
463 #define RW_HAS_CXX_CTOR (1<<18)
464 // class or superclass has .cxx_destruct implementation
465 #define RW_HAS_CXX_DTOR (1<<17)
466 // class or superclass has default alloc/allocWithZone: implementation
467 // Note this is is stored in the metaclass.
468 #define RW_HAS_DEFAULT_AWZ (1<<16)
469 // class's instances requires raw isa
470 #if SUPPORT_NONPOINTER_ISA
471 #define RW_REQUIRES_RAW_ISA (1<<15)
472 #endif
473 // class or superclass has default retain/release/autorelease/retainCount/
474 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
475 #define RW_HAS_DEFAULT_RR (1<<14)
476
477 // class is a Swift class from the pre-stable Swift ABI
478 #define FAST_IS_SWIFT_LEGACY (1UL<<0)
479 // class is a Swift class from the stable Swift ABI
480 #define FAST_IS_SWIFT_STABLE (1UL<<1)
481 // data pointer
482 #define FAST_DATA_MASK 0xfffffffcUL
483
484 #elif 1
485 // Leaks-compatible version that steals low bits only.
486
487 // class or superclass has .cxx_construct implementation
488 #define RW_HAS_CXX_CTOR (1<<18)
489 // class or superclass has .cxx_destruct implementation
490 #define RW_HAS_CXX_DTOR (1<<17)
491 // class or superclass has default alloc/allocWithZone: implementation
492 // Note this is is stored in the metaclass.
493 #define RW_HAS_DEFAULT_AWZ (1<<16)
494 // class's instances requires raw isa
495 #define RW_REQUIRES_RAW_ISA (1<<15)
496
497 // class is a Swift class from the pre-stable Swift ABI
498 #define FAST_IS_SWIFT_LEGACY (1UL<<0)
499 // class is a Swift class from the stable Swift ABI
500 #define FAST_IS_SWIFT_STABLE (1UL<<1)
501 // class or superclass has default retain/release/autorelease/retainCount/
502 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
503 #define FAST_HAS_DEFAULT_RR (1UL<<2)
504 // data pointer
505 #define FAST_DATA_MASK 0x00007ffffffffff8UL
506
507 #else
508 // Leaks-incompatible version that steals lots of bits.
509
510 // class is a Swift class from the pre-stable Swift ABI
511 #define FAST_IS_SWIFT_LEGACY (1UL<<0)
512 // class is a Swift class from the stable Swift ABI
513 #define FAST_IS_SWIFT_STABLE (1UL<<1)
514 // summary bit for fast alloc path: !hasCxxCtor and
515 // !instancesRequireRawIsa and instanceSize fits into shiftedSize
516 #define FAST_ALLOC (1UL<<2)
517 // data pointer
518 #define FAST_DATA_MASK 0x00007ffffffffff8UL
519 // class or superclass has .cxx_construct implementation
520 #define FAST_HAS_CXX_CTOR (1UL<<47)
521 // class or superclass has default alloc/allocWithZone: implementation
522 // Note this is is stored in the metaclass.
523 #define FAST_HAS_DEFAULT_AWZ (1UL<<48)
524 // class or superclass has default retain/release/autorelease/retainCount/
525 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
526 #define FAST_HAS_DEFAULT_RR (1UL<<49)
527 // class's instances requires raw isa
528 // This bit is aligned with isa_t->hasCxxDtor to save an instruction.
529 #define FAST_REQUIRES_RAW_ISA (1UL<<50)
530 // class or superclass has .cxx_destruct implementation
531 #define FAST_HAS_CXX_DTOR (1UL<<51)
532 // instance size in units of 16 bytes
533 // or 0 if the instance size is too big in this field
534 // This field must be LAST
535 #define FAST_SHIFTED_SIZE_SHIFT 52
536
537 // FAST_ALLOC means
538 // FAST_HAS_CXX_CTOR is set
539 // FAST_REQUIRES_RAW_ISA is not set
540 // FAST_SHIFTED_SIZE is not zero
541 // FAST_ALLOC does NOT check FAST_HAS_DEFAULT_AWZ because that
542 // bit is stored on the metaclass.
543 #define FAST_ALLOC_MASK (FAST_HAS_CXX_CTOR | FAST_REQUIRES_RAW_ISA)
544 #define FAST_ALLOC_VALUE (0)
545
546 #endif
547
548 // The Swift ABI requires that these bits be defined like this on all platforms.
549 static_assert(FAST_IS_SWIFT_LEGACY == 1, "resistance is futile");
550 static_assert(FAST_IS_SWIFT_STABLE == 2, "resistance is futile");
551
552
553 struct class_ro_t {
554 uint32_t flags;
555 uint32_t instanceStart;
556 uint32_t instanceSize;
557 #ifdef __LP64__
558 uint32_t reserved;
559 #endif
560
561 const uint8_t * ivarLayout;
562
563 const char * name;
564 method_list_t * baseMethodList;
565 protocol_list_t * baseProtocols;
566 const ivar_list_t * ivars;
567
568 const uint8_t * weakIvarLayout;
569 property_list_t *baseProperties;
570
571 method_list_t *baseMethods() const {
572 return baseMethodList;
573 }
574 };
575
576
577 /***********************************************************************
578 * list_array_tt<Element, List>
579 * Generic implementation for metadata that can be augmented by categories.
580 *
581 * Element is the underlying metadata type (e.g. method_t)
582 * List is the metadata's list type (e.g. method_list_t)
583 *
584 * A list_array_tt has one of three values:
585 * - empty
586 * - a pointer to a single list
587 * - an array of pointers to lists
588 *
589 * countLists/beginLists/endLists iterate the metadata lists
590 * count/begin/end iterate the underlying metadata elements
591 **********************************************************************/
592 template <typename Element, typename List>
593 class list_array_tt {
594 struct array_t {
595 uint32_t count;
596 List* lists[0];
597
598 static size_t byteSize(uint32_t count) {
599 return sizeof(array_t) + count*sizeof(lists[0]);
600 }
601 size_t byteSize() {
602 return byteSize(count);
603 }
604 };
605
606 protected:
607 class iterator {
608 List **lists;
609 List **listsEnd;
610 typename List::iterator m, mEnd;
611
612 public:
613 iterator(List **begin, List **end)
614 : lists(begin), listsEnd(end)
615 {
616 if (begin != end) {
617 m = (*begin)->begin();
618 mEnd = (*begin)->end();
619 }
620 }
621
622 const Element& operator * () const {
623 return *m;
624 }
625 Element& operator * () {
626 return *m;
627 }
628
629 bool operator != (const iterator& rhs) const {
630 if (lists != rhs.lists) return true;
631 if (lists == listsEnd) return false; // m is undefined
632 if (m != rhs.m) return true;
633 return false;
634 }
635
636 const iterator& operator ++ () {
637 assert(m != mEnd);
638 m++;
639 if (m == mEnd) {
640 assert(lists != listsEnd);
641 lists++;
642 if (lists != listsEnd) {
643 m = (*lists)->begin();
644 mEnd = (*lists)->end();
645 }
646 }
647 return *this;
648 }
649 };
650
651 private:
652 union {
653 List* list;
654 uintptr_t arrayAndFlag;
655 };
656
657 bool hasArray() const {
658 return arrayAndFlag & 1;
659 }
660
661 array_t *array() {
662 return (array_t *)(arrayAndFlag & ~1);
663 }
664
665 void setArray(array_t *array) {
666 arrayAndFlag = (uintptr_t)array | 1;
667 }
668
669 public:
670
671 uint32_t count() {
672 uint32_t result = 0;
673 for (auto lists = beginLists(), end = endLists();
674 lists != end;
675 ++lists)
676 {
677 result += (*lists)->count;
678 }
679 return result;
680 }
681
682 iterator begin() {
683 return iterator(beginLists(), endLists());
684 }
685
686 iterator end() {
687 List **e = endLists();
688 return iterator(e, e);
689 }
690
691
692 uint32_t countLists() {
693 if (hasArray()) {
694 return array()->count;
695 } else if (list) {
696 return 1;
697 } else {
698 return 0;
699 }
700 }
701
702 List** beginLists() {
703 if (hasArray()) {
704 return array()->lists;
705 } else {
706 return &list;
707 }
708 }
709
710 List** endLists() {
711 if (hasArray()) {
712 return array()->lists + array()->count;
713 } else if (list) {
714 return &list + 1;
715 } else {
716 return &list;
717 }
718 }
719
720 void attachLists(List* const * addedLists, uint32_t addedCount) {
721 if (addedCount == 0) return;
722
723 if (hasArray()) {
724 // many lists -> many lists
725 uint32_t oldCount = array()->count;
726 uint32_t newCount = oldCount + addedCount;
727 setArray((array_t *)realloc(array(), array_t::byteSize(newCount)));
728 array()->count = newCount;
729 memmove(array()->lists + addedCount, array()->lists,
730 oldCount * sizeof(array()->lists[0]));
731 memcpy(array()->lists, addedLists,
732 addedCount * sizeof(array()->lists[0]));
733 }
734 else if (!list && addedCount == 1) {
735 // 0 lists -> 1 list
736 list = addedLists[0];
737 }
738 else {
739 // 1 list -> many lists
740 List* oldList = list;
741 uint32_t oldCount = oldList ? 1 : 0;
742 uint32_t newCount = oldCount + addedCount;
743 setArray((array_t *)malloc(array_t::byteSize(newCount)));
744 array()->count = newCount;
745 if (oldList) array()->lists[addedCount] = oldList;
746 memcpy(array()->lists, addedLists,
747 addedCount * sizeof(array()->lists[0]));
748 }
749 }
750
751 void tryFree() {
752 if (hasArray()) {
753 for (uint32_t i = 0; i < array()->count; i++) {
754 try_free(array()->lists[i]);
755 }
756 try_free(array());
757 }
758 else if (list) {
759 try_free(list);
760 }
761 }
762
763 template<typename Result>
764 Result duplicate() {
765 Result result;
766
767 if (hasArray()) {
768 array_t *a = array();
769 result.setArray((array_t *)memdup(a, a->byteSize()));
770 for (uint32_t i = 0; i < a->count; i++) {
771 result.array()->lists[i] = a->lists[i]->duplicate();
772 }
773 } else if (list) {
774 result.list = list->duplicate();
775 } else {
776 result.list = nil;
777 }
778
779 return result;
780 }
781 };
782
783
784 class method_array_t :
785 public list_array_tt<method_t, method_list_t>
786 {
787 typedef list_array_tt<method_t, method_list_t> Super;
788
789 public:
790 method_list_t **beginCategoryMethodLists() {
791 return beginLists();
792 }
793
794 method_list_t **endCategoryMethodLists(Class cls);
795
796 method_array_t duplicate() {
797 return Super::duplicate<method_array_t>();
798 }
799 };
800
801
802 class property_array_t :
803 public list_array_tt<property_t, property_list_t>
804 {
805 typedef list_array_tt<property_t, property_list_t> Super;
806
807 public:
808 property_array_t duplicate() {
809 return Super::duplicate<property_array_t>();
810 }
811 };
812
813
814 class protocol_array_t :
815 public list_array_tt<protocol_ref_t, protocol_list_t>
816 {
817 typedef list_array_tt<protocol_ref_t, protocol_list_t> Super;
818
819 public:
820 protocol_array_t duplicate() {
821 return Super::duplicate<protocol_array_t>();
822 }
823 };
824
825
826 struct class_rw_t {
827 // Be warned that Symbolication knows the layout of this structure.
828 uint32_t flags;
829 uint32_t version;
830
831 const class_ro_t *ro;
832
833 method_array_t methods;
834 property_array_t properties;
835 protocol_array_t protocols;
836
837 Class firstSubclass;
838 Class nextSiblingClass;
839
840 char *demangledName;
841
842 #if SUPPORT_INDEXED_ISA
843 uint32_t index;
844 #endif
845
846 void setFlags(uint32_t set)
847 {
848 OSAtomicOr32Barrier(set, &flags);
849 }
850
851 void clearFlags(uint32_t clear)
852 {
853 OSAtomicXor32Barrier(clear, &flags);
854 }
855
856 // set and clear must not overlap
857 void changeFlags(uint32_t set, uint32_t clear)
858 {
859 assert((set & clear) == 0);
860
861 uint32_t oldf, newf;
862 do {
863 oldf = flags;
864 newf = (oldf | set) & ~clear;
865 } while (!OSAtomicCompareAndSwap32Barrier(oldf, newf, (volatile int32_t *)&flags));
866 }
867 };
868
869
870 struct class_data_bits_t {
871
872 // Values are the FAST_ flags above.
873 uintptr_t bits;
874 private:
875 bool getBit(uintptr_t bit)
876 {
877 return bits & bit;
878 }
879
880 #if FAST_ALLOC
881 static uintptr_t updateFastAlloc(uintptr_t oldBits, uintptr_t change)
882 {
883 if (change & FAST_ALLOC_MASK) {
884 if (((oldBits & FAST_ALLOC_MASK) == FAST_ALLOC_VALUE) &&
885 ((oldBits >> FAST_SHIFTED_SIZE_SHIFT) != 0))
886 {
887 oldBits |= FAST_ALLOC;
888 } else {
889 oldBits &= ~FAST_ALLOC;
890 }
891 }
892 return oldBits;
893 }
894 #else
895 static uintptr_t updateFastAlloc(uintptr_t oldBits, uintptr_t change) {
896 return oldBits;
897 }
898 #endif
899
900 void setBits(uintptr_t set)
901 {
902 uintptr_t oldBits;
903 uintptr_t newBits;
904 do {
905 oldBits = LoadExclusive(&bits);
906 newBits = updateFastAlloc(oldBits | set, set);
907 } while (!StoreReleaseExclusive(&bits, oldBits, newBits));
908 }
909
910 void clearBits(uintptr_t clear)
911 {
912 uintptr_t oldBits;
913 uintptr_t newBits;
914 do {
915 oldBits = LoadExclusive(&bits);
916 newBits = updateFastAlloc(oldBits & ~clear, clear);
917 } while (!StoreReleaseExclusive(&bits, oldBits, newBits));
918 }
919
920 public:
921
922 class_rw_t* data() {
923 return (class_rw_t *)(bits & FAST_DATA_MASK);
924 }
925 void setData(class_rw_t *newData)
926 {
927 assert(!data() || (newData->flags & (RW_REALIZING | RW_FUTURE)));
928 // Set during realization or construction only. No locking needed.
929 // Use a store-release fence because there may be concurrent
930 // readers of data and data's contents.
931 uintptr_t newBits = (bits & ~FAST_DATA_MASK) | (uintptr_t)newData;
932 atomic_thread_fence(memory_order_release);
933 bits = newBits;
934 }
935
936 #if FAST_HAS_DEFAULT_RR
937 bool hasDefaultRR() {
938 return getBit(FAST_HAS_DEFAULT_RR);
939 }
940 void setHasDefaultRR() {
941 setBits(FAST_HAS_DEFAULT_RR);
942 }
943 void setHasCustomRR() {
944 clearBits(FAST_HAS_DEFAULT_RR);
945 }
946 #else
947 bool hasDefaultRR() {
948 return data()->flags & RW_HAS_DEFAULT_RR;
949 }
950 void setHasDefaultRR() {
951 data()->setFlags(RW_HAS_DEFAULT_RR);
952 }
953 void setHasCustomRR() {
954 data()->clearFlags(RW_HAS_DEFAULT_RR);
955 }
956 #endif
957
958 #if FAST_HAS_DEFAULT_AWZ
959 bool hasDefaultAWZ() {
960 return getBit(FAST_HAS_DEFAULT_AWZ);
961 }
962 void setHasDefaultAWZ() {
963 setBits(FAST_HAS_DEFAULT_AWZ);
964 }
965 void setHasCustomAWZ() {
966 clearBits(FAST_HAS_DEFAULT_AWZ);
967 }
968 #else
969 bool hasDefaultAWZ() {
970 return data()->flags & RW_HAS_DEFAULT_AWZ;
971 }
972 void setHasDefaultAWZ() {
973 data()->setFlags(RW_HAS_DEFAULT_AWZ);
974 }
975 void setHasCustomAWZ() {
976 data()->clearFlags(RW_HAS_DEFAULT_AWZ);
977 }
978 #endif
979
980 #if FAST_HAS_CXX_CTOR
981 bool hasCxxCtor() {
982 return getBit(FAST_HAS_CXX_CTOR);
983 }
984 void setHasCxxCtor() {
985 setBits(FAST_HAS_CXX_CTOR);
986 }
987 #else
988 bool hasCxxCtor() {
989 return data()->flags & RW_HAS_CXX_CTOR;
990 }
991 void setHasCxxCtor() {
992 data()->setFlags(RW_HAS_CXX_CTOR);
993 }
994 #endif
995
996 #if FAST_HAS_CXX_DTOR
997 bool hasCxxDtor() {
998 return getBit(FAST_HAS_CXX_DTOR);
999 }
1000 void setHasCxxDtor() {
1001 setBits(FAST_HAS_CXX_DTOR);
1002 }
1003 #else
1004 bool hasCxxDtor() {
1005 return data()->flags & RW_HAS_CXX_DTOR;
1006 }
1007 void setHasCxxDtor() {
1008 data()->setFlags(RW_HAS_CXX_DTOR);
1009 }
1010 #endif
1011
1012 #if FAST_REQUIRES_RAW_ISA
1013 bool instancesRequireRawIsa() {
1014 return getBit(FAST_REQUIRES_RAW_ISA);
1015 }
1016 void setInstancesRequireRawIsa() {
1017 setBits(FAST_REQUIRES_RAW_ISA);
1018 }
1019 #elif SUPPORT_NONPOINTER_ISA
1020 bool instancesRequireRawIsa() {
1021 return data()->flags & RW_REQUIRES_RAW_ISA;
1022 }
1023 void setInstancesRequireRawIsa() {
1024 data()->setFlags(RW_REQUIRES_RAW_ISA);
1025 }
1026 #else
1027 bool instancesRequireRawIsa() {
1028 return true;
1029 }
1030 void setInstancesRequireRawIsa() {
1031 // nothing
1032 }
1033 #endif
1034
1035 #if FAST_ALLOC
1036 size_t fastInstanceSize()
1037 {
1038 assert(bits & FAST_ALLOC);
1039 return (bits >> FAST_SHIFTED_SIZE_SHIFT) * 16;
1040 }
1041 void setFastInstanceSize(size_t newSize)
1042 {
1043 // Set during realization or construction only. No locking needed.
1044 assert(data()->flags & RW_REALIZING);
1045
1046 // Round up to 16-byte boundary, then divide to get 16-byte units
1047 newSize = ((newSize + 15) & ~15) / 16;
1048
1049 uintptr_t newBits = newSize << FAST_SHIFTED_SIZE_SHIFT;
1050 if ((newBits >> FAST_SHIFTED_SIZE_SHIFT) == newSize) {
1051 int shift = WORD_BITS - FAST_SHIFTED_SIZE_SHIFT;
1052 uintptr_t oldBits = (bits << shift) >> shift;
1053 if ((oldBits & FAST_ALLOC_MASK) == FAST_ALLOC_VALUE) {
1054 newBits |= FAST_ALLOC;
1055 }
1056 bits = oldBits | newBits;
1057 }
1058 }
1059
1060 bool canAllocFast() {
1061 return bits & FAST_ALLOC;
1062 }
1063 #else
1064 size_t fastInstanceSize() {
1065 abort();
1066 }
1067 void setFastInstanceSize(size_t) {
1068 // nothing
1069 }
1070 bool canAllocFast() {
1071 return false;
1072 }
1073 #endif
1074
1075 void setClassArrayIndex(unsigned Idx) {
1076 #if SUPPORT_INDEXED_ISA
1077 // 0 is unused as then we can rely on zero-initialisation from calloc.
1078 assert(Idx > 0);
1079 data()->index = Idx;
1080 #endif
1081 }
1082
1083 unsigned classArrayIndex() {
1084 #if SUPPORT_INDEXED_ISA
1085 return data()->index;
1086 #else
1087 return 0;
1088 #endif
1089 }
1090
1091 bool isAnySwift() {
1092 return isSwiftStable() || isSwiftLegacy();
1093 }
1094
1095 bool isSwiftStable() {
1096 return getBit(FAST_IS_SWIFT_STABLE);
1097 }
1098 void setIsSwiftStable() {
1099 setBits(FAST_IS_SWIFT_STABLE);
1100 }
1101
1102 bool isSwiftLegacy() {
1103 return getBit(FAST_IS_SWIFT_LEGACY);
1104 }
1105 void setIsSwiftLegacy() {
1106 setBits(FAST_IS_SWIFT_LEGACY);
1107 }
1108 };
1109
1110
1111 struct objc_class : objc_object {
1112 // Class ISA;
1113 Class superclass;
1114 cache_t cache; // formerly cache pointer and vtable
1115 class_data_bits_t bits; // class_rw_t * plus custom rr/alloc flags
1116
1117 class_rw_t *data() {
1118 return bits.data();
1119 }
1120 void setData(class_rw_t *newData) {
1121 bits.setData(newData);
1122 }
1123
1124 void setInfo(uint32_t set) {
1125 assert(isFuture() || isRealized());
1126 data()->setFlags(set);
1127 }
1128
1129 void clearInfo(uint32_t clear) {
1130 assert(isFuture() || isRealized());
1131 data()->clearFlags(clear);
1132 }
1133
1134 // set and clear must not overlap
1135 void changeInfo(uint32_t set, uint32_t clear) {
1136 assert(isFuture() || isRealized());
1137 assert((set & clear) == 0);
1138 data()->changeFlags(set, clear);
1139 }
1140
1141 bool hasCustomRR() {
1142 return ! bits.hasDefaultRR();
1143 }
1144 void setHasDefaultRR() {
1145 assert(isInitializing());
1146 bits.setHasDefaultRR();
1147 }
1148 void setHasCustomRR(bool inherited = false);
1149 void printCustomRR(bool inherited);
1150
1151 bool hasCustomAWZ() {
1152 return ! bits.hasDefaultAWZ();
1153 }
1154 void setHasDefaultAWZ() {
1155 assert(isInitializing());
1156 bits.setHasDefaultAWZ();
1157 }
1158 void setHasCustomAWZ(bool inherited = false);
1159 void printCustomAWZ(bool inherited);
1160
1161 bool instancesRequireRawIsa() {
1162 return bits.instancesRequireRawIsa();
1163 }
1164 void setInstancesRequireRawIsa(bool inherited = false);
1165 void printInstancesRequireRawIsa(bool inherited);
1166
1167 bool canAllocNonpointer() {
1168 assert(!isFuture());
1169 return !instancesRequireRawIsa();
1170 }
1171 bool canAllocFast() {
1172 assert(!isFuture());
1173 return bits.canAllocFast();
1174 }
1175
1176
1177 bool hasCxxCtor() {
1178 // addSubclass() propagates this flag from the superclass.
1179 assert(isRealized());
1180 return bits.hasCxxCtor();
1181 }
1182 void setHasCxxCtor() {
1183 bits.setHasCxxCtor();
1184 }
1185
1186 bool hasCxxDtor() {
1187 // addSubclass() propagates this flag from the superclass.
1188 assert(isRealized());
1189 return bits.hasCxxDtor();
1190 }
1191 void setHasCxxDtor() {
1192 bits.setHasCxxDtor();
1193 }
1194
1195
1196 bool isSwiftStable() {
1197 return bits.isSwiftStable();
1198 }
1199
1200 bool isSwiftLegacy() {
1201 return bits.isSwiftLegacy();
1202 }
1203
1204 bool isAnySwift() {
1205 return bits.isAnySwift();
1206 }
1207
1208
1209 // Return YES if the class's ivars are managed by ARC,
1210 // or the class is MRC but has ARC-style weak ivars.
1211 bool hasAutomaticIvars() {
1212 return data()->ro->flags & (RO_IS_ARC | RO_HAS_WEAK_WITHOUT_ARC);
1213 }
1214
1215 // Return YES if the class's ivars are managed by ARC.
1216 bool isARC() {
1217 return data()->ro->flags & RO_IS_ARC;
1218 }
1219
1220
1221 #if SUPPORT_NONPOINTER_ISA
1222 // Tracked in non-pointer isas; not tracked otherwise
1223 #else
1224 bool instancesHaveAssociatedObjects() {
1225 // this may be an unrealized future class in the CF-bridged case
1226 assert(isFuture() || isRealized());
1227 return data()->flags & RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS;
1228 }
1229
1230 void setInstancesHaveAssociatedObjects() {
1231 // this may be an unrealized future class in the CF-bridged case
1232 assert(isFuture() || isRealized());
1233 setInfo(RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS);
1234 }
1235 #endif
1236
1237 bool shouldGrowCache() {
1238 return true;
1239 }
1240
1241 void setShouldGrowCache(bool) {
1242 // fixme good or bad for memory use?
1243 }
1244
1245 bool isInitializing() {
1246 return getMeta()->data()->flags & RW_INITIALIZING;
1247 }
1248
1249 void setInitializing() {
1250 assert(!isMetaClass());
1251 ISA()->setInfo(RW_INITIALIZING);
1252 }
1253
1254 bool isInitialized() {
1255 return getMeta()->data()->flags & RW_INITIALIZED;
1256 }
1257
1258 void setInitialized();
1259
1260 bool isLoadable() {
1261 assert(isRealized());
1262 return true; // any class registered for +load is definitely loadable
1263 }
1264
1265 IMP getLoadMethod();
1266
1267 // Locking: To prevent concurrent realization, hold runtimeLock.
1268 bool isRealized() {
1269 return data()->flags & RW_REALIZED;
1270 }
1271
1272 // Returns true if this is an unrealized future class.
1273 // Locking: To prevent concurrent realization, hold runtimeLock.
1274 bool isFuture() {
1275 return data()->flags & RW_FUTURE;
1276 }
1277
1278 bool isMetaClass() {
1279 assert(this);
1280 assert(isRealized());
1281 return data()->ro->flags & RO_META;
1282 }
1283
1284 // NOT identical to this->ISA when this is a metaclass
1285 Class getMeta() {
1286 if (isMetaClass()) return (Class)this;
1287 else return this->ISA();
1288 }
1289
1290 bool isRootClass() {
1291 return superclass == nil;
1292 }
1293 bool isRootMetaclass() {
1294 return ISA() == (Class)this;
1295 }
1296
1297 const char *mangledName() {
1298 // fixme can't assert locks here
1299 assert(this);
1300
1301 if (isRealized() || isFuture()) {
1302 return data()->ro->name;
1303 } else {
1304 return ((const class_ro_t *)data())->name;
1305 }
1306 }
1307
1308 const char *demangledName(bool realize = false);
1309 const char *nameForLogging();
1310
1311 // May be unaligned depending on class's ivars.
1312 uint32_t unalignedInstanceStart() {
1313 assert(isRealized());
1314 return data()->ro->instanceStart;
1315 }
1316
1317 // Class's instance start rounded up to a pointer-size boundary.
1318 // This is used for ARC layout bitmaps.
1319 uint32_t alignedInstanceStart() {
1320 return word_align(unalignedInstanceStart());
1321 }
1322
1323 // May be unaligned depending on class's ivars.
1324 uint32_t unalignedInstanceSize() {
1325 assert(isRealized());
1326 return data()->ro->instanceSize;
1327 }
1328
1329 // Class's ivar size rounded up to a pointer-size boundary.
1330 uint32_t alignedInstanceSize() {
1331 return word_align(unalignedInstanceSize());
1332 }
1333
1334 size_t instanceSize(size_t extraBytes) {
1335 size_t size = alignedInstanceSize() + extraBytes;
1336 // CF requires all objects be at least 16 bytes.
1337 if (size < 16) size = 16;
1338 return size;
1339 }
1340
1341 void setInstanceSize(uint32_t newSize) {
1342 assert(isRealized());
1343 if (newSize != data()->ro->instanceSize) {
1344 assert(data()->flags & RW_COPIED_RO);
1345 *const_cast<uint32_t *>(&data()->ro->instanceSize) = newSize;
1346 }
1347 bits.setFastInstanceSize(newSize);
1348 }
1349
1350 void chooseClassArrayIndex();
1351
1352 void setClassArrayIndex(unsigned Idx) {
1353 bits.setClassArrayIndex(Idx);
1354 }
1355
1356 unsigned classArrayIndex() {
1357 return bits.classArrayIndex();
1358 }
1359
1360 };
1361
1362
1363 struct swift_class_t : objc_class {
1364 uint32_t flags;
1365 uint32_t instanceAddressOffset;
1366 uint32_t instanceSize;
1367 uint16_t instanceAlignMask;
1368 uint16_t reserved;
1369
1370 uint32_t classSize;
1371 uint32_t classAddressOffset;
1372 void *description;
1373 // ...
1374
1375 void *baseAddress() {
1376 return (void *)((uint8_t *)this - classAddressOffset);
1377 }
1378 };
1379
1380
1381 struct category_t {
1382 const char *name;
1383 classref_t cls;
1384 struct method_list_t *instanceMethods;
1385 struct method_list_t *classMethods;
1386 struct protocol_list_t *protocols;
1387 struct property_list_t *instanceProperties;
1388 // Fields below this point are not always present on disk.
1389 struct property_list_t *_classProperties;
1390
1391 method_list_t *methodsForMeta(bool isMeta) {
1392 if (isMeta) return classMethods;
1393 else return instanceMethods;
1394 }
1395
1396 property_list_t *propertiesForMeta(bool isMeta, struct header_info *hi);
1397 };
1398
1399 struct objc_super2 {
1400 id receiver;
1401 Class current_class;
1402 };
1403
1404 struct message_ref_t {
1405 IMP imp;
1406 SEL sel;
1407 };
1408
1409
1410 extern Method protocol_getMethod(protocol_t *p, SEL sel, bool isRequiredMethod, bool isInstanceMethod, bool recursive);
1411
1412 static inline void
1413 foreach_realized_class_and_subclass_2(Class top, unsigned& count,
1414 std::function<bool (Class)> code)
1415 {
1416 // runtimeLock.assertLocked();
1417 assert(top);
1418 Class cls = top;
1419 while (1) {
1420 if (--count == 0) {
1421 _objc_fatal("Memory corruption in class list.");
1422 }
1423 if (!code(cls)) break;
1424
1425 if (cls->data()->firstSubclass) {
1426 cls = cls->data()->firstSubclass;
1427 } else {
1428 while (!cls->data()->nextSiblingClass && cls != top) {
1429 cls = cls->superclass;
1430 if (--count == 0) {
1431 _objc_fatal("Memory corruption in class list.");
1432 }
1433 }
1434 if (cls == top) break;
1435 cls = cls->data()->nextSiblingClass;
1436 }
1437 }
1438 }
1439
1440 extern Class firstRealizedClass();
1441 extern unsigned int unreasonableClassCount();
1442
1443 // Enumerates a class and all of its realized subclasses.
1444 static inline void
1445 foreach_realized_class_and_subclass(Class top,
1446 std::function<void (Class)> code)
1447 {
1448 unsigned int count = unreasonableClassCount();
1449
1450 foreach_realized_class_and_subclass_2(top, count,
1451 [&code](Class cls) -> bool
1452 {
1453 code(cls);
1454 return true;
1455 });
1456 }
1457
1458 // Enumerates all realized classes and metaclasses.
1459 static inline void
1460 foreach_realized_class_and_metaclass(std::function<void (Class)> code)
1461 {
1462 unsigned int count = unreasonableClassCount();
1463
1464 for (Class top = firstRealizedClass();
1465 top != nil;
1466 top = top->data()->nextSiblingClass)
1467 {
1468 foreach_realized_class_and_subclass_2(top, count,
1469 [&code](Class cls) -> bool
1470 {
1471 code(cls);
1472 return true;
1473 });
1474 }
1475
1476 }
1477
1478 #endif