]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-runtime-new.h
objc4-709.tar.gz
[apple/objc4.git] / runtime / objc-runtime-new.h
1 /*
2 * Copyright (c) 2005-2007 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #ifndef _OBJC_RUNTIME_NEW_H
25 #define _OBJC_RUNTIME_NEW_H
26
27 #if __LP64__
28 typedef uint32_t mask_t; // x86_64 & arm64 asm are less efficient with 16-bits
29 #else
30 typedef uint16_t mask_t;
31 #endif
32 typedef uintptr_t cache_key_t;
33
34 struct swift_class_t;
35
36
37 struct bucket_t {
38 private:
39 cache_key_t _key;
40 IMP _imp;
41
42 public:
43 inline cache_key_t key() const { return _key; }
44 inline IMP imp() const { return (IMP)_imp; }
45 inline void setKey(cache_key_t newKey) { _key = newKey; }
46 inline void setImp(IMP newImp) { _imp = newImp; }
47
48 void set(cache_key_t newKey, IMP newImp);
49 };
50
51
52 struct cache_t {
53 struct bucket_t *_buckets;
54 mask_t _mask;
55 mask_t _occupied;
56
57 public:
58 struct bucket_t *buckets();
59 mask_t mask();
60 mask_t occupied();
61 void incrementOccupied();
62 void setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask);
63 void initializeToEmpty();
64
65 mask_t capacity();
66 bool isConstantEmptyCache();
67 bool canBeFreed();
68
69 static size_t bytesForCapacity(uint32_t cap);
70 static struct bucket_t * endMarker(struct bucket_t *b, uint32_t cap);
71
72 void expand();
73 void reallocate(mask_t oldCapacity, mask_t newCapacity);
74 struct bucket_t * find(cache_key_t key, id receiver);
75
76 static void bad_cache(id receiver, SEL sel, Class isa) __attribute__((noreturn));
77 };
78
79
80 // classref_t is unremapped class_t*
81 typedef struct classref * classref_t;
82
83 /***********************************************************************
84 * entsize_list_tt<Element, List, FlagMask>
85 * Generic implementation of an array of non-fragile structs.
86 *
87 * Element is the struct type (e.g. method_t)
88 * List is the specialization of entsize_list_tt (e.g. method_list_t)
89 * FlagMask is used to stash extra bits in the entsize field
90 * (e.g. method list fixup markers)
91 **********************************************************************/
92 template <typename Element, typename List, uint32_t FlagMask>
93 struct entsize_list_tt {
94 uint32_t entsizeAndFlags;
95 uint32_t count;
96 Element first;
97
98 uint32_t entsize() const {
99 return entsizeAndFlags & ~FlagMask;
100 }
101 uint32_t flags() const {
102 return entsizeAndFlags & FlagMask;
103 }
104
105 Element& getOrEnd(uint32_t i) const {
106 assert(i <= count);
107 return *(Element *)((uint8_t *)&first + i*entsize());
108 }
109 Element& get(uint32_t i) const {
110 assert(i < count);
111 return getOrEnd(i);
112 }
113
114 size_t byteSize() const {
115 return sizeof(*this) + (count-1)*entsize();
116 }
117
118 List *duplicate() const {
119 return (List *)memdup(this, this->byteSize());
120 }
121
122 struct iterator;
123 const iterator begin() const {
124 return iterator(*static_cast<const List*>(this), 0);
125 }
126 iterator begin() {
127 return iterator(*static_cast<const List*>(this), 0);
128 }
129 const iterator end() const {
130 return iterator(*static_cast<const List*>(this), count);
131 }
132 iterator end() {
133 return iterator(*static_cast<const List*>(this), count);
134 }
135
136 struct iterator {
137 uint32_t entsize;
138 uint32_t index; // keeping track of this saves a divide in operator-
139 Element* element;
140
141 typedef std::random_access_iterator_tag iterator_category;
142 typedef Element value_type;
143 typedef ptrdiff_t difference_type;
144 typedef Element* pointer;
145 typedef Element& reference;
146
147 iterator() { }
148
149 iterator(const List& list, uint32_t start = 0)
150 : entsize(list.entsize())
151 , index(start)
152 , element(&list.getOrEnd(start))
153 { }
154
155 const iterator& operator += (ptrdiff_t delta) {
156 element = (Element*)((uint8_t *)element + delta*entsize);
157 index += (int32_t)delta;
158 return *this;
159 }
160 const iterator& operator -= (ptrdiff_t delta) {
161 element = (Element*)((uint8_t *)element - delta*entsize);
162 index -= (int32_t)delta;
163 return *this;
164 }
165 const iterator operator + (ptrdiff_t delta) const {
166 return iterator(*this) += delta;
167 }
168 const iterator operator - (ptrdiff_t delta) const {
169 return iterator(*this) -= delta;
170 }
171
172 iterator& operator ++ () { *this += 1; return *this; }
173 iterator& operator -- () { *this -= 1; return *this; }
174 iterator operator ++ (int) {
175 iterator result(*this); *this += 1; return result;
176 }
177 iterator operator -- (int) {
178 iterator result(*this); *this -= 1; return result;
179 }
180
181 ptrdiff_t operator - (const iterator& rhs) const {
182 return (ptrdiff_t)this->index - (ptrdiff_t)rhs.index;
183 }
184
185 Element& operator * () const { return *element; }
186 Element* operator -> () const { return element; }
187
188 operator Element& () const { return *element; }
189
190 bool operator == (const iterator& rhs) const {
191 return this->element == rhs.element;
192 }
193 bool operator != (const iterator& rhs) const {
194 return this->element != rhs.element;
195 }
196
197 bool operator < (const iterator& rhs) const {
198 return this->element < rhs.element;
199 }
200 bool operator > (const iterator& rhs) const {
201 return this->element > rhs.element;
202 }
203 };
204 };
205
206
207 struct method_t {
208 SEL name;
209 const char *types;
210 IMP imp;
211
212 struct SortBySELAddress :
213 public std::binary_function<const method_t&,
214 const method_t&, bool>
215 {
216 bool operator() (const method_t& lhs,
217 const method_t& rhs)
218 { return lhs.name < rhs.name; }
219 };
220 };
221
222 struct ivar_t {
223 #if __x86_64__
224 // *offset was originally 64-bit on some x86_64 platforms.
225 // We read and write only 32 bits of it.
226 // Some metadata provides all 64 bits. This is harmless for unsigned
227 // little-endian values.
228 // Some code uses all 64 bits. class_addIvar() over-allocates the
229 // offset for their benefit.
230 #endif
231 int32_t *offset;
232 const char *name;
233 const char *type;
234 // alignment is sometimes -1; use alignment() instead
235 uint32_t alignment_raw;
236 uint32_t size;
237
238 uint32_t alignment() const {
239 if (alignment_raw == ~(uint32_t)0) return 1U << WORD_SHIFT;
240 return 1 << alignment_raw;
241 }
242 };
243
244 struct property_t {
245 const char *name;
246 const char *attributes;
247 };
248
249 // Two bits of entsize are used for fixup markers.
250 struct method_list_t : entsize_list_tt<method_t, method_list_t, 0x3> {
251 bool isFixedUp() const;
252 void setFixedUp();
253
254 uint32_t indexOfMethod(const method_t *meth) const {
255 uint32_t i =
256 (uint32_t)(((uintptr_t)meth - (uintptr_t)this) / entsize());
257 assert(i < count);
258 return i;
259 }
260 };
261
262 struct ivar_list_t : entsize_list_tt<ivar_t, ivar_list_t, 0> {
263 bool containsIvar(Ivar ivar) const {
264 return (ivar >= (Ivar)&*begin() && ivar < (Ivar)&*end());
265 }
266 };
267
268 struct property_list_t : entsize_list_tt<property_t, property_list_t, 0> {
269 };
270
271
272 typedef uintptr_t protocol_ref_t; // protocol_t *, but unremapped
273
274 // Values for protocol_t->flags
275 #define PROTOCOL_FIXED_UP_2 (1<<31) // must never be set by compiler
276 #define PROTOCOL_FIXED_UP_1 (1<<30) // must never be set by compiler
277 // Bits 0..15 are reserved for Swift's use.
278
279 #define PROTOCOL_FIXED_UP_MASK (PROTOCOL_FIXED_UP_1 | PROTOCOL_FIXED_UP_2)
280
281 struct protocol_t : objc_object {
282 const char *mangledName;
283 struct protocol_list_t *protocols;
284 method_list_t *instanceMethods;
285 method_list_t *classMethods;
286 method_list_t *optionalInstanceMethods;
287 method_list_t *optionalClassMethods;
288 property_list_t *instanceProperties;
289 uint32_t size; // sizeof(protocol_t)
290 uint32_t flags;
291 // Fields below this point are not always present on disk.
292 const char **_extendedMethodTypes;
293 const char *_demangledName;
294 property_list_t *_classProperties;
295
296 const char *demangledName();
297
298 const char *nameForLogging() {
299 return demangledName();
300 }
301
302 bool isFixedUp() const;
303 void setFixedUp();
304
305 # define HAS_FIELD(f) (size >= offsetof(protocol_t, f) + sizeof(f))
306
307 bool hasExtendedMethodTypesField() const {
308 return HAS_FIELD(_extendedMethodTypes);
309 }
310 bool hasDemangledNameField() const {
311 return HAS_FIELD(_demangledName);
312 }
313 bool hasClassPropertiesField() const {
314 return HAS_FIELD(_classProperties);
315 }
316
317 # undef HAS_FIELD
318
319 const char **extendedMethodTypes() const {
320 return hasExtendedMethodTypesField() ? _extendedMethodTypes : nil;
321 }
322
323 property_list_t *classProperties() const {
324 return hasClassPropertiesField() ? _classProperties : nil;
325 }
326 };
327
328 struct protocol_list_t {
329 // count is 64-bit by accident.
330 uintptr_t count;
331 protocol_ref_t list[0]; // variable-size
332
333 size_t byteSize() const {
334 return sizeof(*this) + count*sizeof(list[0]);
335 }
336
337 protocol_list_t *duplicate() const {
338 return (protocol_list_t *)memdup(this, this->byteSize());
339 }
340
341 typedef protocol_ref_t* iterator;
342 typedef const protocol_ref_t* const_iterator;
343
344 const_iterator begin() const {
345 return list;
346 }
347 iterator begin() {
348 return list;
349 }
350 const_iterator end() const {
351 return list + count;
352 }
353 iterator end() {
354 return list + count;
355 }
356 };
357
358 struct locstamped_category_t {
359 category_t *cat;
360 struct header_info *hi;
361 };
362
363 struct locstamped_category_list_t {
364 uint32_t count;
365 #if __LP64__
366 uint32_t reserved;
367 #endif
368 locstamped_category_t list[0];
369 };
370
371
372 // class_data_bits_t is the class_t->data field (class_rw_t pointer plus flags)
373 // The extra bits are optimized for the retain/release and alloc/dealloc paths.
374
375 // Values for class_ro_t->flags
376 // These are emitted by the compiler and are part of the ABI.
377 // Note: See CGObjCNonFragileABIMac::BuildClassRoTInitializer in clang
378 // class is a metaclass
379 #define RO_META (1<<0)
380 // class is a root class
381 #define RO_ROOT (1<<1)
382 // class has .cxx_construct/destruct implementations
383 #define RO_HAS_CXX_STRUCTORS (1<<2)
384 // class has +load implementation
385 // #define RO_HAS_LOAD_METHOD (1<<3)
386 // class has visibility=hidden set
387 #define RO_HIDDEN (1<<4)
388 // class has attribute(objc_exception): OBJC_EHTYPE_$_ThisClass is non-weak
389 #define RO_EXCEPTION (1<<5)
390 // this bit is available for reassignment
391 // #define RO_REUSE_ME (1<<6)
392 // class compiled with ARC
393 #define RO_IS_ARC (1<<7)
394 // class has .cxx_destruct but no .cxx_construct (with RO_HAS_CXX_STRUCTORS)
395 #define RO_HAS_CXX_DTOR_ONLY (1<<8)
396 // class is not ARC but has ARC-style weak ivar layout
397 #define RO_HAS_WEAK_WITHOUT_ARC (1<<9)
398
399 // class is in an unloadable bundle - must never be set by compiler
400 #define RO_FROM_BUNDLE (1<<29)
401 // class is unrealized future class - must never be set by compiler
402 #define RO_FUTURE (1<<30)
403 // class is realized - must never be set by compiler
404 #define RO_REALIZED (1<<31)
405
406 // Values for class_rw_t->flags
407 // These are not emitted by the compiler and are never used in class_ro_t.
408 // Their presence should be considered in future ABI versions.
409 // class_t->data is class_rw_t, not class_ro_t
410 #define RW_REALIZED (1<<31)
411 // class is unresolved future class
412 #define RW_FUTURE (1<<30)
413 // class is initialized
414 #define RW_INITIALIZED (1<<29)
415 // class is initializing
416 #define RW_INITIALIZING (1<<28)
417 // class_rw_t->ro is heap copy of class_ro_t
418 #define RW_COPIED_RO (1<<27)
419 // class allocated but not yet registered
420 #define RW_CONSTRUCTING (1<<26)
421 // class allocated and registered
422 #define RW_CONSTRUCTED (1<<25)
423 // available for use; was RW_FINALIZE_ON_MAIN_THREAD
424 // #define RW_24 (1<<24)
425 // class +load has been called
426 #define RW_LOADED (1<<23)
427 #if !SUPPORT_NONPOINTER_ISA
428 // class instances may have associative references
429 #define RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS (1<<22)
430 #endif
431 // class has instance-specific GC layout
432 #define RW_HAS_INSTANCE_SPECIFIC_LAYOUT (1 << 21)
433 // available for use
434 // #define RW_20 (1<<20)
435 // class has started realizing but not yet completed it
436 #define RW_REALIZING (1<<19)
437
438 // NOTE: MORE RW_ FLAGS DEFINED BELOW
439
440
441 // Values for class_rw_t->flags or class_t->bits
442 // These flags are optimized for retain/release and alloc/dealloc
443 // 64-bit stores more of them in class_t->bits to reduce pointer indirection.
444
445 #if !__LP64__
446
447 // class or superclass has .cxx_construct implementation
448 #define RW_HAS_CXX_CTOR (1<<18)
449 // class or superclass has .cxx_destruct implementation
450 #define RW_HAS_CXX_DTOR (1<<17)
451 // class or superclass has default alloc/allocWithZone: implementation
452 // Note this is is stored in the metaclass.
453 #define RW_HAS_DEFAULT_AWZ (1<<16)
454 // class's instances requires raw isa
455 #if SUPPORT_NONPOINTER_ISA
456 #define RW_REQUIRES_RAW_ISA (1<<15)
457 #endif
458
459 // class is a Swift class
460 #define FAST_IS_SWIFT (1UL<<0)
461 // class or superclass has default retain/release/autorelease/retainCount/
462 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
463 #define FAST_HAS_DEFAULT_RR (1UL<<1)
464 // data pointer
465 #define FAST_DATA_MASK 0xfffffffcUL
466
467 #elif 1
468 // Leaks-compatible version that steals low bits only.
469
470 // class or superclass has .cxx_construct implementation
471 #define RW_HAS_CXX_CTOR (1<<18)
472 // class or superclass has .cxx_destruct implementation
473 #define RW_HAS_CXX_DTOR (1<<17)
474 // class or superclass has default alloc/allocWithZone: implementation
475 // Note this is is stored in the metaclass.
476 #define RW_HAS_DEFAULT_AWZ (1<<16)
477
478 // class is a Swift class
479 #define FAST_IS_SWIFT (1UL<<0)
480 // class or superclass has default retain/release/autorelease/retainCount/
481 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
482 #define FAST_HAS_DEFAULT_RR (1UL<<1)
483 // class's instances requires raw isa
484 #define FAST_REQUIRES_RAW_ISA (1UL<<2)
485 // data pointer
486 #define FAST_DATA_MASK 0x00007ffffffffff8UL
487
488 #else
489 // Leaks-incompatible version that steals lots of bits.
490
491 // class is a Swift class
492 #define FAST_IS_SWIFT (1UL<<0)
493 // class's instances requires raw isa
494 #define FAST_REQUIRES_RAW_ISA (1UL<<1)
495 // class or superclass has .cxx_destruct implementation
496 // This bit is aligned with isa_t->hasCxxDtor to save an instruction.
497 #define FAST_HAS_CXX_DTOR (1UL<<2)
498 // data pointer
499 #define FAST_DATA_MASK 0x00007ffffffffff8UL
500 // class or superclass has .cxx_construct implementation
501 #define FAST_HAS_CXX_CTOR (1UL<<47)
502 // class or superclass has default alloc/allocWithZone: implementation
503 // Note this is is stored in the metaclass.
504 #define FAST_HAS_DEFAULT_AWZ (1UL<<48)
505 // class or superclass has default retain/release/autorelease/retainCount/
506 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
507 #define FAST_HAS_DEFAULT_RR (1UL<<49)
508 // summary bit for fast alloc path: !hasCxxCtor and
509 // !instancesRequireRawIsa and instanceSize fits into shiftedSize
510 #define FAST_ALLOC (1UL<<50)
511 // instance size in units of 16 bytes
512 // or 0 if the instance size is too big in this field
513 // This field must be LAST
514 #define FAST_SHIFTED_SIZE_SHIFT 51
515
516 // FAST_ALLOC means
517 // FAST_HAS_CXX_CTOR is set
518 // FAST_REQUIRES_RAW_ISA is not set
519 // FAST_SHIFTED_SIZE is not zero
520 // FAST_ALLOC does NOT check FAST_HAS_DEFAULT_AWZ because that
521 // bit is stored on the metaclass.
522 #define FAST_ALLOC_MASK (FAST_HAS_CXX_CTOR | FAST_REQUIRES_RAW_ISA)
523 #define FAST_ALLOC_VALUE (0)
524
525 #endif
526
527
528 struct class_ro_t {
529 uint32_t flags;
530 uint32_t instanceStart;
531 uint32_t instanceSize;
532 #ifdef __LP64__
533 uint32_t reserved;
534 #endif
535
536 const uint8_t * ivarLayout;
537
538 const char * name;
539 method_list_t * baseMethodList;
540 protocol_list_t * baseProtocols;
541 const ivar_list_t * ivars;
542
543 const uint8_t * weakIvarLayout;
544 property_list_t *baseProperties;
545
546 method_list_t *baseMethods() const {
547 return baseMethodList;
548 }
549 };
550
551
552 /***********************************************************************
553 * list_array_tt<Element, List>
554 * Generic implementation for metadata that can be augmented by categories.
555 *
556 * Element is the underlying metadata type (e.g. method_t)
557 * List is the metadata's list type (e.g. method_list_t)
558 *
559 * A list_array_tt has one of three values:
560 * - empty
561 * - a pointer to a single list
562 * - an array of pointers to lists
563 *
564 * countLists/beginLists/endLists iterate the metadata lists
565 * count/begin/end iterate the underlying metadata elements
566 **********************************************************************/
567 template <typename Element, typename List>
568 class list_array_tt {
569 struct array_t {
570 uint32_t count;
571 List* lists[0];
572
573 static size_t byteSize(uint32_t count) {
574 return sizeof(array_t) + count*sizeof(lists[0]);
575 }
576 size_t byteSize() {
577 return byteSize(count);
578 }
579 };
580
581 protected:
582 class iterator {
583 List **lists;
584 List **listsEnd;
585 typename List::iterator m, mEnd;
586
587 public:
588 iterator(List **begin, List **end)
589 : lists(begin), listsEnd(end)
590 {
591 if (begin != end) {
592 m = (*begin)->begin();
593 mEnd = (*begin)->end();
594 }
595 }
596
597 const Element& operator * () const {
598 return *m;
599 }
600 Element& operator * () {
601 return *m;
602 }
603
604 bool operator != (const iterator& rhs) const {
605 if (lists != rhs.lists) return true;
606 if (lists == listsEnd) return false; // m is undefined
607 if (m != rhs.m) return true;
608 return false;
609 }
610
611 const iterator& operator ++ () {
612 assert(m != mEnd);
613 m++;
614 if (m == mEnd) {
615 assert(lists != listsEnd);
616 lists++;
617 if (lists != listsEnd) {
618 m = (*lists)->begin();
619 mEnd = (*lists)->end();
620 }
621 }
622 return *this;
623 }
624 };
625
626 private:
627 union {
628 List* list;
629 uintptr_t arrayAndFlag;
630 };
631
632 bool hasArray() const {
633 return arrayAndFlag & 1;
634 }
635
636 array_t *array() {
637 return (array_t *)(arrayAndFlag & ~1);
638 }
639
640 void setArray(array_t *array) {
641 arrayAndFlag = (uintptr_t)array | 1;
642 }
643
644 public:
645
646 uint32_t count() {
647 uint32_t result = 0;
648 for (auto lists = beginLists(), end = endLists();
649 lists != end;
650 ++lists)
651 {
652 result += (*lists)->count;
653 }
654 return result;
655 }
656
657 iterator begin() {
658 return iterator(beginLists(), endLists());
659 }
660
661 iterator end() {
662 List **e = endLists();
663 return iterator(e, e);
664 }
665
666
667 uint32_t countLists() {
668 if (hasArray()) {
669 return array()->count;
670 } else if (list) {
671 return 1;
672 } else {
673 return 0;
674 }
675 }
676
677 List** beginLists() {
678 if (hasArray()) {
679 return array()->lists;
680 } else {
681 return &list;
682 }
683 }
684
685 List** endLists() {
686 if (hasArray()) {
687 return array()->lists + array()->count;
688 } else if (list) {
689 return &list + 1;
690 } else {
691 return &list;
692 }
693 }
694
695 void attachLists(List* const * addedLists, uint32_t addedCount) {
696 if (addedCount == 0) return;
697
698 if (hasArray()) {
699 // many lists -> many lists
700 uint32_t oldCount = array()->count;
701 uint32_t newCount = oldCount + addedCount;
702 setArray((array_t *)realloc(array(), array_t::byteSize(newCount)));
703 array()->count = newCount;
704 memmove(array()->lists + addedCount, array()->lists,
705 oldCount * sizeof(array()->lists[0]));
706 memcpy(array()->lists, addedLists,
707 addedCount * sizeof(array()->lists[0]));
708 }
709 else if (!list && addedCount == 1) {
710 // 0 lists -> 1 list
711 list = addedLists[0];
712 }
713 else {
714 // 1 list -> many lists
715 List* oldList = list;
716 uint32_t oldCount = oldList ? 1 : 0;
717 uint32_t newCount = oldCount + addedCount;
718 setArray((array_t *)malloc(array_t::byteSize(newCount)));
719 array()->count = newCount;
720 if (oldList) array()->lists[addedCount] = oldList;
721 memcpy(array()->lists, addedLists,
722 addedCount * sizeof(array()->lists[0]));
723 }
724 }
725
726 void tryFree() {
727 if (hasArray()) {
728 for (uint32_t i = 0; i < array()->count; i++) {
729 try_free(array()->lists[i]);
730 }
731 try_free(array());
732 }
733 else if (list) {
734 try_free(list);
735 }
736 }
737
738 template<typename Result>
739 Result duplicate() {
740 Result result;
741
742 if (hasArray()) {
743 array_t *a = array();
744 result.setArray((array_t *)memdup(a, a->byteSize()));
745 for (uint32_t i = 0; i < a->count; i++) {
746 result.array()->lists[i] = a->lists[i]->duplicate();
747 }
748 } else if (list) {
749 result.list = list->duplicate();
750 } else {
751 result.list = nil;
752 }
753
754 return result;
755 }
756 };
757
758
759 class method_array_t :
760 public list_array_tt<method_t, method_list_t>
761 {
762 typedef list_array_tt<method_t, method_list_t> Super;
763
764 public:
765 method_list_t **beginCategoryMethodLists() {
766 return beginLists();
767 }
768
769 method_list_t **endCategoryMethodLists(Class cls);
770
771 method_array_t duplicate() {
772 return Super::duplicate<method_array_t>();
773 }
774 };
775
776
777 class property_array_t :
778 public list_array_tt<property_t, property_list_t>
779 {
780 typedef list_array_tt<property_t, property_list_t> Super;
781
782 public:
783 property_array_t duplicate() {
784 return Super::duplicate<property_array_t>();
785 }
786 };
787
788
789 class protocol_array_t :
790 public list_array_tt<protocol_ref_t, protocol_list_t>
791 {
792 typedef list_array_tt<protocol_ref_t, protocol_list_t> Super;
793
794 public:
795 protocol_array_t duplicate() {
796 return Super::duplicate<protocol_array_t>();
797 }
798 };
799
800
801 struct class_rw_t {
802 // Be warned that Symbolication knows the layout of this structure.
803 uint32_t flags;
804 uint32_t version;
805
806 const class_ro_t *ro;
807
808 method_array_t methods;
809 property_array_t properties;
810 protocol_array_t protocols;
811
812 Class firstSubclass;
813 Class nextSiblingClass;
814
815 char *demangledName;
816
817 #if SUPPORT_INDEXED_ISA
818 uint32_t index;
819 #endif
820
821 void setFlags(uint32_t set)
822 {
823 OSAtomicOr32Barrier(set, &flags);
824 }
825
826 void clearFlags(uint32_t clear)
827 {
828 OSAtomicXor32Barrier(clear, &flags);
829 }
830
831 // set and clear must not overlap
832 void changeFlags(uint32_t set, uint32_t clear)
833 {
834 assert((set & clear) == 0);
835
836 uint32_t oldf, newf;
837 do {
838 oldf = flags;
839 newf = (oldf | set) & ~clear;
840 } while (!OSAtomicCompareAndSwap32Barrier(oldf, newf, (volatile int32_t *)&flags));
841 }
842 };
843
844
845 struct class_data_bits_t {
846
847 // Values are the FAST_ flags above.
848 uintptr_t bits;
849 private:
850 bool getBit(uintptr_t bit)
851 {
852 return bits & bit;
853 }
854
855 #if FAST_ALLOC
856 static uintptr_t updateFastAlloc(uintptr_t oldBits, uintptr_t change)
857 {
858 if (change & FAST_ALLOC_MASK) {
859 if (((oldBits & FAST_ALLOC_MASK) == FAST_ALLOC_VALUE) &&
860 ((oldBits >> FAST_SHIFTED_SIZE_SHIFT) != 0))
861 {
862 oldBits |= FAST_ALLOC;
863 } else {
864 oldBits &= ~FAST_ALLOC;
865 }
866 }
867 return oldBits;
868 }
869 #else
870 static uintptr_t updateFastAlloc(uintptr_t oldBits, uintptr_t change) {
871 return oldBits;
872 }
873 #endif
874
875 void setBits(uintptr_t set)
876 {
877 uintptr_t oldBits;
878 uintptr_t newBits;
879 do {
880 oldBits = LoadExclusive(&bits);
881 newBits = updateFastAlloc(oldBits | set, set);
882 } while (!StoreReleaseExclusive(&bits, oldBits, newBits));
883 }
884
885 void clearBits(uintptr_t clear)
886 {
887 uintptr_t oldBits;
888 uintptr_t newBits;
889 do {
890 oldBits = LoadExclusive(&bits);
891 newBits = updateFastAlloc(oldBits & ~clear, clear);
892 } while (!StoreReleaseExclusive(&bits, oldBits, newBits));
893 }
894
895 public:
896
897 class_rw_t* data() {
898 return (class_rw_t *)(bits & FAST_DATA_MASK);
899 }
900 void setData(class_rw_t *newData)
901 {
902 assert(!data() || (newData->flags & (RW_REALIZING | RW_FUTURE)));
903 // Set during realization or construction only. No locking needed.
904 // Use a store-release fence because there may be concurrent
905 // readers of data and data's contents.
906 uintptr_t newBits = (bits & ~FAST_DATA_MASK) | (uintptr_t)newData;
907 atomic_thread_fence(memory_order_release);
908 bits = newBits;
909 }
910
911 bool hasDefaultRR() {
912 return getBit(FAST_HAS_DEFAULT_RR);
913 }
914 void setHasDefaultRR() {
915 setBits(FAST_HAS_DEFAULT_RR);
916 }
917 void setHasCustomRR() {
918 clearBits(FAST_HAS_DEFAULT_RR);
919 }
920
921 #if FAST_HAS_DEFAULT_AWZ
922 bool hasDefaultAWZ() {
923 return getBit(FAST_HAS_DEFAULT_AWZ);
924 }
925 void setHasDefaultAWZ() {
926 setBits(FAST_HAS_DEFAULT_AWZ);
927 }
928 void setHasCustomAWZ() {
929 clearBits(FAST_HAS_DEFAULT_AWZ);
930 }
931 #else
932 bool hasDefaultAWZ() {
933 return data()->flags & RW_HAS_DEFAULT_AWZ;
934 }
935 void setHasDefaultAWZ() {
936 data()->setFlags(RW_HAS_DEFAULT_AWZ);
937 }
938 void setHasCustomAWZ() {
939 data()->clearFlags(RW_HAS_DEFAULT_AWZ);
940 }
941 #endif
942
943 #if FAST_HAS_CXX_CTOR
944 bool hasCxxCtor() {
945 return getBit(FAST_HAS_CXX_CTOR);
946 }
947 void setHasCxxCtor() {
948 setBits(FAST_HAS_CXX_CTOR);
949 }
950 #else
951 bool hasCxxCtor() {
952 return data()->flags & RW_HAS_CXX_CTOR;
953 }
954 void setHasCxxCtor() {
955 data()->setFlags(RW_HAS_CXX_CTOR);
956 }
957 #endif
958
959 #if FAST_HAS_CXX_DTOR
960 bool hasCxxDtor() {
961 return getBit(FAST_HAS_CXX_DTOR);
962 }
963 void setHasCxxDtor() {
964 setBits(FAST_HAS_CXX_DTOR);
965 }
966 #else
967 bool hasCxxDtor() {
968 return data()->flags & RW_HAS_CXX_DTOR;
969 }
970 void setHasCxxDtor() {
971 data()->setFlags(RW_HAS_CXX_DTOR);
972 }
973 #endif
974
975 #if FAST_REQUIRES_RAW_ISA
976 bool instancesRequireRawIsa() {
977 return getBit(FAST_REQUIRES_RAW_ISA);
978 }
979 void setInstancesRequireRawIsa() {
980 setBits(FAST_REQUIRES_RAW_ISA);
981 }
982 #elif SUPPORT_NONPOINTER_ISA
983 bool instancesRequireRawIsa() {
984 return data()->flags & RW_REQUIRES_RAW_ISA;
985 }
986 void setInstancesRequireRawIsa() {
987 data()->setFlags(RW_REQUIRES_RAW_ISA);
988 }
989 #else
990 bool instancesRequireRawIsa() {
991 return true;
992 }
993 void setInstancesRequireRawIsa() {
994 // nothing
995 }
996 #endif
997
998 #if FAST_ALLOC
999 size_t fastInstanceSize()
1000 {
1001 assert(bits & FAST_ALLOC);
1002 return (bits >> FAST_SHIFTED_SIZE_SHIFT) * 16;
1003 }
1004 void setFastInstanceSize(size_t newSize)
1005 {
1006 // Set during realization or construction only. No locking needed.
1007 assert(data()->flags & RW_REALIZING);
1008
1009 // Round up to 16-byte boundary, then divide to get 16-byte units
1010 newSize = ((newSize + 15) & ~15) / 16;
1011
1012 uintptr_t newBits = newSize << FAST_SHIFTED_SIZE_SHIFT;
1013 if ((newBits >> FAST_SHIFTED_SIZE_SHIFT) == newSize) {
1014 int shift = WORD_BITS - FAST_SHIFTED_SIZE_SHIFT;
1015 uintptr_t oldBits = (bits << shift) >> shift;
1016 if ((oldBits & FAST_ALLOC_MASK) == FAST_ALLOC_VALUE) {
1017 newBits |= FAST_ALLOC;
1018 }
1019 bits = oldBits | newBits;
1020 }
1021 }
1022
1023 bool canAllocFast() {
1024 return bits & FAST_ALLOC;
1025 }
1026 #else
1027 size_t fastInstanceSize() {
1028 abort();
1029 }
1030 void setFastInstanceSize(size_t) {
1031 // nothing
1032 }
1033 bool canAllocFast() {
1034 return false;
1035 }
1036 #endif
1037
1038 void setClassArrayIndex(unsigned Idx) {
1039 #if SUPPORT_INDEXED_ISA
1040 // 0 is unused as then we can rely on zero-initialisation from calloc.
1041 assert(Idx > 0);
1042 data()->index = Idx;
1043 #endif
1044 }
1045
1046 unsigned classArrayIndex() {
1047 #if SUPPORT_INDEXED_ISA
1048 return data()->index;
1049 #else
1050 return 0;
1051 #endif
1052 }
1053
1054 bool isSwift() {
1055 return getBit(FAST_IS_SWIFT);
1056 }
1057
1058 void setIsSwift() {
1059 setBits(FAST_IS_SWIFT);
1060 }
1061 };
1062
1063
1064 struct objc_class : objc_object {
1065 // Class ISA;
1066 Class superclass;
1067 cache_t cache; // formerly cache pointer and vtable
1068 class_data_bits_t bits; // class_rw_t * plus custom rr/alloc flags
1069
1070 class_rw_t *data() {
1071 return bits.data();
1072 }
1073 void setData(class_rw_t *newData) {
1074 bits.setData(newData);
1075 }
1076
1077 void setInfo(uint32_t set) {
1078 assert(isFuture() || isRealized());
1079 data()->setFlags(set);
1080 }
1081
1082 void clearInfo(uint32_t clear) {
1083 assert(isFuture() || isRealized());
1084 data()->clearFlags(clear);
1085 }
1086
1087 // set and clear must not overlap
1088 void changeInfo(uint32_t set, uint32_t clear) {
1089 assert(isFuture() || isRealized());
1090 assert((set & clear) == 0);
1091 data()->changeFlags(set, clear);
1092 }
1093
1094 bool hasCustomRR() {
1095 return ! bits.hasDefaultRR();
1096 }
1097 void setHasDefaultRR() {
1098 assert(isInitializing());
1099 bits.setHasDefaultRR();
1100 }
1101 void setHasCustomRR(bool inherited = false);
1102 void printCustomRR(bool inherited);
1103
1104 bool hasCustomAWZ() {
1105 return ! bits.hasDefaultAWZ();
1106 }
1107 void setHasDefaultAWZ() {
1108 assert(isInitializing());
1109 bits.setHasDefaultAWZ();
1110 }
1111 void setHasCustomAWZ(bool inherited = false);
1112 void printCustomAWZ(bool inherited);
1113
1114 bool instancesRequireRawIsa() {
1115 return bits.instancesRequireRawIsa();
1116 }
1117 void setInstancesRequireRawIsa(bool inherited = false);
1118 void printInstancesRequireRawIsa(bool inherited);
1119
1120 bool canAllocNonpointer() {
1121 assert(!isFuture());
1122 return !instancesRequireRawIsa();
1123 }
1124 bool canAllocFast() {
1125 assert(!isFuture());
1126 return bits.canAllocFast();
1127 }
1128
1129
1130 bool hasCxxCtor() {
1131 // addSubclass() propagates this flag from the superclass.
1132 assert(isRealized());
1133 return bits.hasCxxCtor();
1134 }
1135 void setHasCxxCtor() {
1136 bits.setHasCxxCtor();
1137 }
1138
1139 bool hasCxxDtor() {
1140 // addSubclass() propagates this flag from the superclass.
1141 assert(isRealized());
1142 return bits.hasCxxDtor();
1143 }
1144 void setHasCxxDtor() {
1145 bits.setHasCxxDtor();
1146 }
1147
1148
1149 bool isSwift() {
1150 return bits.isSwift();
1151 }
1152
1153
1154 // Return YES if the class's ivars are managed by ARC,
1155 // or the class is MRC but has ARC-style weak ivars.
1156 bool hasAutomaticIvars() {
1157 return data()->ro->flags & (RO_IS_ARC | RO_HAS_WEAK_WITHOUT_ARC);
1158 }
1159
1160 // Return YES if the class's ivars are managed by ARC.
1161 bool isARC() {
1162 return data()->ro->flags & RO_IS_ARC;
1163 }
1164
1165
1166 #if SUPPORT_NONPOINTER_ISA
1167 // Tracked in non-pointer isas; not tracked otherwise
1168 #else
1169 bool instancesHaveAssociatedObjects() {
1170 // this may be an unrealized future class in the CF-bridged case
1171 assert(isFuture() || isRealized());
1172 return data()->flags & RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS;
1173 }
1174
1175 void setInstancesHaveAssociatedObjects() {
1176 // this may be an unrealized future class in the CF-bridged case
1177 assert(isFuture() || isRealized());
1178 setInfo(RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS);
1179 }
1180 #endif
1181
1182 bool shouldGrowCache() {
1183 return true;
1184 }
1185
1186 void setShouldGrowCache(bool) {
1187 // fixme good or bad for memory use?
1188 }
1189
1190 bool isInitializing() {
1191 return getMeta()->data()->flags & RW_INITIALIZING;
1192 }
1193
1194 void setInitializing() {
1195 assert(!isMetaClass());
1196 ISA()->setInfo(RW_INITIALIZING);
1197 }
1198
1199 bool isInitialized() {
1200 return getMeta()->data()->flags & RW_INITIALIZED;
1201 }
1202
1203 void setInitialized();
1204
1205 bool isLoadable() {
1206 assert(isRealized());
1207 return true; // any class registered for +load is definitely loadable
1208 }
1209
1210 IMP getLoadMethod();
1211
1212 // Locking: To prevent concurrent realization, hold runtimeLock.
1213 bool isRealized() {
1214 return data()->flags & RW_REALIZED;
1215 }
1216
1217 // Returns true if this is an unrealized future class.
1218 // Locking: To prevent concurrent realization, hold runtimeLock.
1219 bool isFuture() {
1220 return data()->flags & RW_FUTURE;
1221 }
1222
1223 bool isMetaClass() {
1224 assert(this);
1225 assert(isRealized());
1226 return data()->ro->flags & RO_META;
1227 }
1228
1229 // NOT identical to this->ISA when this is a metaclass
1230 Class getMeta() {
1231 if (isMetaClass()) return (Class)this;
1232 else return this->ISA();
1233 }
1234
1235 bool isRootClass() {
1236 return superclass == nil;
1237 }
1238 bool isRootMetaclass() {
1239 return ISA() == (Class)this;
1240 }
1241
1242 const char *mangledName() {
1243 // fixme can't assert locks here
1244 assert(this);
1245
1246 if (isRealized() || isFuture()) {
1247 return data()->ro->name;
1248 } else {
1249 return ((const class_ro_t *)data())->name;
1250 }
1251 }
1252
1253 const char *demangledName(bool realize = false);
1254 const char *nameForLogging();
1255
1256 // May be unaligned depending on class's ivars.
1257 uint32_t unalignedInstanceStart() {
1258 assert(isRealized());
1259 return data()->ro->instanceStart;
1260 }
1261
1262 // Class's instance start rounded up to a pointer-size boundary.
1263 // This is used for ARC layout bitmaps.
1264 uint32_t alignedInstanceStart() {
1265 return word_align(unalignedInstanceStart());
1266 }
1267
1268 // May be unaligned depending on class's ivars.
1269 uint32_t unalignedInstanceSize() {
1270 assert(isRealized());
1271 return data()->ro->instanceSize;
1272 }
1273
1274 // Class's ivar size rounded up to a pointer-size boundary.
1275 uint32_t alignedInstanceSize() {
1276 return word_align(unalignedInstanceSize());
1277 }
1278
1279 size_t instanceSize(size_t extraBytes) {
1280 size_t size = alignedInstanceSize() + extraBytes;
1281 // CF requires all objects be at least 16 bytes.
1282 if (size < 16) size = 16;
1283 return size;
1284 }
1285
1286 void setInstanceSize(uint32_t newSize) {
1287 assert(isRealized());
1288 if (newSize != data()->ro->instanceSize) {
1289 assert(data()->flags & RW_COPIED_RO);
1290 *const_cast<uint32_t *>(&data()->ro->instanceSize) = newSize;
1291 }
1292 bits.setFastInstanceSize(newSize);
1293 }
1294
1295 void chooseClassArrayIndex();
1296
1297 void setClassArrayIndex(unsigned Idx) {
1298 bits.setClassArrayIndex(Idx);
1299 }
1300
1301 unsigned classArrayIndex() {
1302 return bits.classArrayIndex();
1303 }
1304
1305 };
1306
1307
1308 struct swift_class_t : objc_class {
1309 uint32_t flags;
1310 uint32_t instanceAddressOffset;
1311 uint32_t instanceSize;
1312 uint16_t instanceAlignMask;
1313 uint16_t reserved;
1314
1315 uint32_t classSize;
1316 uint32_t classAddressOffset;
1317 void *description;
1318 // ...
1319
1320 void *baseAddress() {
1321 return (void *)((uint8_t *)this - classAddressOffset);
1322 }
1323 };
1324
1325
1326 struct category_t {
1327 const char *name;
1328 classref_t cls;
1329 struct method_list_t *instanceMethods;
1330 struct method_list_t *classMethods;
1331 struct protocol_list_t *protocols;
1332 struct property_list_t *instanceProperties;
1333 // Fields below this point are not always present on disk.
1334 struct property_list_t *_classProperties;
1335
1336 method_list_t *methodsForMeta(bool isMeta) {
1337 if (isMeta) return classMethods;
1338 else return instanceMethods;
1339 }
1340
1341 property_list_t *propertiesForMeta(bool isMeta, struct header_info *hi);
1342 };
1343
1344 struct objc_super2 {
1345 id receiver;
1346 Class current_class;
1347 };
1348
1349 struct message_ref_t {
1350 IMP imp;
1351 SEL sel;
1352 };
1353
1354
1355 extern Method protocol_getMethod(protocol_t *p, SEL sel, bool isRequiredMethod, bool isInstanceMethod, bool recursive);
1356
1357 static inline void
1358 foreach_realized_class_and_subclass_2(Class top, unsigned& count,
1359 std::function<bool (Class)> code)
1360 {
1361 // runtimeLock.assertWriting();
1362 assert(top);
1363 Class cls = top;
1364 while (1) {
1365 if (--count == 0) {
1366 _objc_fatal("Memory corruption in class list.");
1367 }
1368 if (!code(cls)) break;
1369
1370 if (cls->data()->firstSubclass) {
1371 cls = cls->data()->firstSubclass;
1372 } else {
1373 while (!cls->data()->nextSiblingClass && cls != top) {
1374 cls = cls->superclass;
1375 if (--count == 0) {
1376 _objc_fatal("Memory corruption in class list.");
1377 }
1378 }
1379 if (cls == top) break;
1380 cls = cls->data()->nextSiblingClass;
1381 }
1382 }
1383 }
1384
1385 extern Class firstRealizedClass();
1386 extern unsigned int unreasonableClassCount();
1387
1388 // Enumerates a class and all of its realized subclasses.
1389 static inline void
1390 foreach_realized_class_and_subclass(Class top,
1391 std::function<void (Class)> code)
1392 {
1393 unsigned int count = unreasonableClassCount();
1394
1395 foreach_realized_class_and_subclass_2(top, count,
1396 [&code](Class cls) -> bool
1397 {
1398 code(cls);
1399 return true;
1400 });
1401 }
1402
1403 // Enumerates all realized classes and metaclasses.
1404 static inline void
1405 foreach_realized_class_and_metaclass(std::function<void (Class)> code)
1406 {
1407 unsigned int count = unreasonableClassCount();
1408
1409 for (Class top = firstRealizedClass();
1410 top != nil;
1411 top = top->data()->nextSiblingClass)
1412 {
1413 foreach_realized_class_and_subclass_2(top, count,
1414 [&code](Class cls) -> bool
1415 {
1416 code(cls);
1417 return true;
1418 });
1419 }
1420
1421 }
1422
1423 #endif