]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-runtime-new.h
cdea9410e554ff5f238e9eb5fc45d6e1f6ebdf46
[apple/objc4.git] / runtime / objc-runtime-new.h
1 /*
2 * Copyright (c) 2005-2007 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #ifndef _OBJC_RUNTIME_NEW_H
25 #define _OBJC_RUNTIME_NEW_H
26
27 #if __LP64__
28 typedef uint32_t mask_t; // x86_64 & arm64 asm are less efficient with 16-bits
29 #else
30 typedef uint16_t mask_t;
31 #endif
32 typedef uintptr_t cache_key_t;
33
34 struct swift_class_t;
35
36
37 struct bucket_t {
38 private:
39 cache_key_t _key;
40 IMP _imp;
41
42 public:
43 inline cache_key_t key() const { return _key; }
44 inline IMP imp() const { return (IMP)_imp; }
45 inline void setKey(cache_key_t newKey) { _key = newKey; }
46 inline void setImp(IMP newImp) { _imp = newImp; }
47
48 void set(cache_key_t newKey, IMP newImp);
49 };
50
51
52 struct cache_t {
53 struct bucket_t *_buckets;
54 mask_t _mask;
55 mask_t _occupied;
56
57 public:
58 struct bucket_t *buckets();
59 mask_t mask();
60 mask_t occupied();
61 void incrementOccupied();
62 void setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask);
63 void initializeToEmpty();
64
65 mask_t capacity();
66 bool isConstantEmptyCache();
67 bool canBeFreed();
68
69 static size_t bytesForCapacity(uint32_t cap);
70 static struct bucket_t * endMarker(struct bucket_t *b, uint32_t cap);
71
72 void expand();
73 void reallocate(mask_t oldCapacity, mask_t newCapacity);
74 struct bucket_t * find(cache_key_t key, id receiver);
75
76 static void bad_cache(id receiver, SEL sel, Class isa) __attribute__((noreturn));
77 };
78
79
80 // classref_t is unremapped class_t*
81 typedef struct classref * classref_t;
82
83 /***********************************************************************
84 * entsize_list_tt<Element, List, FlagMask>
85 * Generic implementation of an array of non-fragile structs.
86 *
87 * Element is the struct type (e.g. method_t)
88 * List is the specialization of entsize_list_tt (e.g. method_list_t)
89 * FlagMask is used to stash extra bits in the entsize field
90 * (e.g. method list fixup markers)
91 **********************************************************************/
92 template <typename Element, typename List, uint32_t FlagMask>
93 struct entsize_list_tt {
94 uint32_t entsizeAndFlags;
95 uint32_t count;
96 Element first;
97
98 uint32_t entsize() const {
99 return entsizeAndFlags & ~FlagMask;
100 }
101 uint32_t flags() const {
102 return entsizeAndFlags & FlagMask;
103 }
104
105 Element& getOrEnd(uint32_t i) const {
106 assert(i <= count);
107 return *(Element *)((uint8_t *)&first + i*entsize());
108 }
109 Element& get(uint32_t i) const {
110 assert(i < count);
111 return getOrEnd(i);
112 }
113
114 size_t byteSize() const {
115 return sizeof(*this) + (count-1)*entsize();
116 }
117
118 List *duplicate() const {
119 return (List *)memdup(this, this->byteSize());
120 }
121
122 struct iterator;
123 const iterator begin() const {
124 return iterator(*static_cast<const List*>(this), 0);
125 }
126 iterator begin() {
127 return iterator(*static_cast<const List*>(this), 0);
128 }
129 const iterator end() const {
130 return iterator(*static_cast<const List*>(this), count);
131 }
132 iterator end() {
133 return iterator(*static_cast<const List*>(this), count);
134 }
135
136 struct iterator {
137 uint32_t entsize;
138 uint32_t index; // keeping track of this saves a divide in operator-
139 Element* element;
140
141 typedef std::random_access_iterator_tag iterator_category;
142 typedef Element value_type;
143 typedef ptrdiff_t difference_type;
144 typedef Element* pointer;
145 typedef Element& reference;
146
147 iterator() { }
148
149 iterator(const List& list, uint32_t start = 0)
150 : entsize(list.entsize())
151 , index(start)
152 , element(&list.getOrEnd(start))
153 { }
154
155 const iterator& operator += (ptrdiff_t delta) {
156 element = (Element*)((uint8_t *)element + delta*entsize);
157 index += (int32_t)delta;
158 return *this;
159 }
160 const iterator& operator -= (ptrdiff_t delta) {
161 element = (Element*)((uint8_t *)element - delta*entsize);
162 index -= (int32_t)delta;
163 return *this;
164 }
165 const iterator operator + (ptrdiff_t delta) const {
166 return iterator(*this) += delta;
167 }
168 const iterator operator - (ptrdiff_t delta) const {
169 return iterator(*this) -= delta;
170 }
171
172 iterator& operator ++ () { *this += 1; return *this; }
173 iterator& operator -- () { *this -= 1; return *this; }
174 iterator operator ++ (int) {
175 iterator result(*this); *this += 1; return result;
176 }
177 iterator operator -- (int) {
178 iterator result(*this); *this -= 1; return result;
179 }
180
181 ptrdiff_t operator - (const iterator& rhs) const {
182 return (ptrdiff_t)this->index - (ptrdiff_t)rhs.index;
183 }
184
185 Element& operator * () const { return *element; }
186 Element* operator -> () const { return element; }
187
188 operator Element& () const { return *element; }
189
190 bool operator == (const iterator& rhs) const {
191 return this->element == rhs.element;
192 }
193 bool operator != (const iterator& rhs) const {
194 return this->element != rhs.element;
195 }
196
197 bool operator < (const iterator& rhs) const {
198 return this->element < rhs.element;
199 }
200 bool operator > (const iterator& rhs) const {
201 return this->element > rhs.element;
202 }
203 };
204 };
205
206
207 struct method_t {
208 SEL name;
209 const char *types;
210 IMP imp;
211
212 struct SortBySELAddress :
213 public std::binary_function<const method_t&,
214 const method_t&, bool>
215 {
216 bool operator() (const method_t& lhs,
217 const method_t& rhs)
218 { return lhs.name < rhs.name; }
219 };
220 };
221
222 struct ivar_t {
223 #if __x86_64__
224 // *offset was originally 64-bit on some x86_64 platforms.
225 // We read and write only 32 bits of it.
226 // Some metadata provides all 64 bits. This is harmless for unsigned
227 // little-endian values.
228 // Some code uses all 64 bits. class_addIvar() over-allocates the
229 // offset for their benefit.
230 #endif
231 int32_t *offset;
232 const char *name;
233 const char *type;
234 // alignment is sometimes -1; use alignment() instead
235 uint32_t alignment_raw;
236 uint32_t size;
237
238 uint32_t alignment() const {
239 if (alignment_raw == ~(uint32_t)0) return 1U << WORD_SHIFT;
240 return 1 << alignment_raw;
241 }
242 };
243
244 struct property_t {
245 const char *name;
246 const char *attributes;
247 };
248
249 // Two bits of entsize are used for fixup markers.
250 struct method_list_t : entsize_list_tt<method_t, method_list_t, 0x3> {
251 bool isFixedUp() const;
252 void setFixedUp();
253
254 uint32_t indexOfMethod(const method_t *meth) const {
255 uint32_t i =
256 (uint32_t)(((uintptr_t)meth - (uintptr_t)this) / entsize());
257 assert(i < count);
258 return i;
259 }
260 };
261
262 struct ivar_list_t : entsize_list_tt<ivar_t, ivar_list_t, 0> {
263 };
264
265 struct property_list_t : entsize_list_tt<property_t, property_list_t, 0> {
266 };
267
268
269 typedef uintptr_t protocol_ref_t; // protocol_t *, but unremapped
270
271 // Values for protocol_t->flags
272 #define PROTOCOL_FIXED_UP_2 (1<<31) // must never be set by compiler
273 #define PROTOCOL_FIXED_UP_1 (1<<30) // must never be set by compiler
274
275 #define PROTOCOL_FIXED_UP_MASK (PROTOCOL_FIXED_UP_1 | PROTOCOL_FIXED_UP_2)
276
277 struct protocol_t : objc_object {
278 const char *mangledName;
279 struct protocol_list_t *protocols;
280 method_list_t *instanceMethods;
281 method_list_t *classMethods;
282 method_list_t *optionalInstanceMethods;
283 method_list_t *optionalClassMethods;
284 property_list_t *instanceProperties;
285 uint32_t size; // sizeof(protocol_t)
286 uint32_t flags;
287 // Fields below this point are not always present on disk.
288 const char **extendedMethodTypes;
289 const char *_demangledName;
290
291 const char *demangledName();
292
293 const char *nameForLogging() {
294 return demangledName();
295 }
296
297 bool isFixedUp() const;
298 void setFixedUp();
299
300 bool hasExtendedMethodTypesField() const {
301 return size >= (offsetof(protocol_t, extendedMethodTypes)
302 + sizeof(extendedMethodTypes));
303 }
304 bool hasExtendedMethodTypes() const {
305 return hasExtendedMethodTypesField() && extendedMethodTypes;
306 }
307 };
308
309 struct protocol_list_t {
310 // count is 64-bit by accident.
311 uintptr_t count;
312 protocol_ref_t list[0]; // variable-size
313
314 size_t byteSize() const {
315 return sizeof(*this) + count*sizeof(list[0]);
316 }
317
318 protocol_list_t *duplicate() const {
319 return (protocol_list_t *)memdup(this, this->byteSize());
320 }
321
322 typedef protocol_ref_t* iterator;
323 typedef const protocol_ref_t* const_iterator;
324
325 const_iterator begin() const {
326 return list;
327 }
328 iterator begin() {
329 return list;
330 }
331 const_iterator end() const {
332 return list + count;
333 }
334 iterator end() {
335 return list + count;
336 }
337 };
338
339 struct locstamped_category_t {
340 category_t *cat;
341 struct header_info *hi;
342 };
343
344 struct locstamped_category_list_t {
345 uint32_t count;
346 #if __LP64__
347 uint32_t reserved;
348 #endif
349 locstamped_category_t list[0];
350 };
351
352
353 // class_data_bits_t is the class_t->data field (class_rw_t pointer plus flags)
354 // The extra bits are optimized for the retain/release and alloc/dealloc paths.
355
356 // Values for class_ro_t->flags
357 // These are emitted by the compiler and are part of the ABI.
358 // class is a metaclass
359 #define RO_META (1<<0)
360 // class is a root class
361 #define RO_ROOT (1<<1)
362 // class has .cxx_construct/destruct implementations
363 #define RO_HAS_CXX_STRUCTORS (1<<2)
364 // class has +load implementation
365 // #define RO_HAS_LOAD_METHOD (1<<3)
366 // class has visibility=hidden set
367 #define RO_HIDDEN (1<<4)
368 // class has attribute(objc_exception): OBJC_EHTYPE_$_ThisClass is non-weak
369 #define RO_EXCEPTION (1<<5)
370 // this bit is available for reassignment
371 // #define RO_REUSE_ME (1<<6)
372 // class compiled with -fobjc-arc (automatic retain/release)
373 #define RO_IS_ARR (1<<7)
374 // class has .cxx_destruct but no .cxx_construct (with RO_HAS_CXX_STRUCTORS)
375 #define RO_HAS_CXX_DTOR_ONLY (1<<8)
376
377 // class is in an unloadable bundle - must never be set by compiler
378 #define RO_FROM_BUNDLE (1<<29)
379 // class is unrealized future class - must never be set by compiler
380 #define RO_FUTURE (1<<30)
381 // class is realized - must never be set by compiler
382 #define RO_REALIZED (1<<31)
383
384 // Values for class_rw_t->flags
385 // These are not emitted by the compiler and are never used in class_ro_t.
386 // Their presence should be considered in future ABI versions.
387 // class_t->data is class_rw_t, not class_ro_t
388 #define RW_REALIZED (1<<31)
389 // class is unresolved future class
390 #define RW_FUTURE (1<<30)
391 // class is initialized
392 #define RW_INITIALIZED (1<<29)
393 // class is initializing
394 #define RW_INITIALIZING (1<<28)
395 // class_rw_t->ro is heap copy of class_ro_t
396 #define RW_COPIED_RO (1<<27)
397 // class allocated but not yet registered
398 #define RW_CONSTRUCTING (1<<26)
399 // class allocated and registered
400 #define RW_CONSTRUCTED (1<<25)
401 // GC: class has unsafe finalize method
402 #define RW_FINALIZE_ON_MAIN_THREAD (1<<24)
403 // class +load has been called
404 #define RW_LOADED (1<<23)
405 #if !SUPPORT_NONPOINTER_ISA
406 // class instances may have associative references
407 #define RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS (1<<22)
408 #endif
409 // class has instance-specific GC layout
410 #define RW_HAS_INSTANCE_SPECIFIC_LAYOUT (1 << 21)
411 // available for use
412 // #define RW_20 (1<<20)
413 // class has started realizing but not yet completed it
414 #define RW_REALIZING (1<<19)
415
416 // NOTE: MORE RW_ FLAGS DEFINED BELOW
417
418
419 // Values for class_rw_t->flags or class_t->bits
420 // These flags are optimized for retain/release and alloc/dealloc
421 // 64-bit stores more of them in class_t->bits to reduce pointer indirection.
422
423 #if !__LP64__
424
425 // class or superclass has .cxx_construct implementation
426 #define RW_HAS_CXX_CTOR (1<<18)
427 // class or superclass has .cxx_destruct implementation
428 #define RW_HAS_CXX_DTOR (1<<17)
429 // class or superclass has default alloc/allocWithZone: implementation
430 // Note this is is stored in the metaclass.
431 #define RW_HAS_DEFAULT_AWZ (1<<16)
432 // class's instances requires raw isa
433 // not tracked for 32-bit because it only applies to non-pointer isa
434 // #define RW_REQUIRES_RAW_ISA
435
436 // class is a Swift class
437 #define FAST_IS_SWIFT (1UL<<0)
438 // class or superclass has default retain/release/autorelease/retainCount/
439 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
440 #define FAST_HAS_DEFAULT_RR (1UL<<1)
441 // data pointer
442 #define FAST_DATA_MASK 0xfffffffcUL
443
444 #elif 1
445 // Leaks-compatible version that steals low bits only.
446
447 // class or superclass has .cxx_construct implementation
448 #define RW_HAS_CXX_CTOR (1<<18)
449 // class or superclass has .cxx_destruct implementation
450 #define RW_HAS_CXX_DTOR (1<<17)
451 // class or superclass has default alloc/allocWithZone: implementation
452 // Note this is is stored in the metaclass.
453 #define RW_HAS_DEFAULT_AWZ (1<<16)
454
455 // class is a Swift class
456 #define FAST_IS_SWIFT (1UL<<0)
457 // class or superclass has default retain/release/autorelease/retainCount/
458 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
459 #define FAST_HAS_DEFAULT_RR (1UL<<1)
460 // class's instances requires raw isa
461 #define FAST_REQUIRES_RAW_ISA (1UL<<2)
462 // data pointer
463 #define FAST_DATA_MASK 0x00007ffffffffff8UL
464
465 #else
466 // Leaks-incompatible version that steals lots of bits.
467
468 // class is a Swift class
469 #define FAST_IS_SWIFT (1UL<<0)
470 // class's instances requires raw isa
471 #define FAST_REQUIRES_RAW_ISA (1UL<<1)
472 // class or superclass has .cxx_destruct implementation
473 // This bit is aligned with isa_t->hasCxxDtor to save an instruction.
474 #define FAST_HAS_CXX_DTOR (1UL<<2)
475 // data pointer
476 #define FAST_DATA_MASK 0x00007ffffffffff8UL
477 // class or superclass has .cxx_construct implementation
478 #define FAST_HAS_CXX_CTOR (1UL<<47)
479 // class or superclass has default alloc/allocWithZone: implementation
480 // Note this is is stored in the metaclass.
481 #define FAST_HAS_DEFAULT_AWZ (1UL<<48)
482 // class or superclass has default retain/release/autorelease/retainCount/
483 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
484 #define FAST_HAS_DEFAULT_RR (1UL<<49)
485 // summary bit for fast alloc path: !hasCxxCtor and
486 // !requiresRawIsa and instanceSize fits into shiftedSize
487 #define FAST_ALLOC (1UL<<50)
488 // instance size in units of 16 bytes
489 // or 0 if the instance size is too big in this field
490 // This field must be LAST
491 #define FAST_SHIFTED_SIZE_SHIFT 51
492
493 // FAST_ALLOC means
494 // FAST_HAS_CXX_CTOR is set
495 // FAST_REQUIRES_RAW_ISA is not set
496 // FAST_SHIFTED_SIZE is not zero
497 // FAST_ALLOC does NOT check FAST_HAS_DEFAULT_AWZ because that
498 // bit is stored on the metaclass.
499 #define FAST_ALLOC_MASK (FAST_HAS_CXX_CTOR | FAST_REQUIRES_RAW_ISA)
500 #define FAST_ALLOC_VALUE (0)
501
502 #endif
503
504
505 struct class_ro_t {
506 uint32_t flags;
507 uint32_t instanceStart;
508 uint32_t instanceSize;
509 #ifdef __LP64__
510 uint32_t reserved;
511 #endif
512
513 const uint8_t * ivarLayout;
514
515 const char * name;
516 method_list_t * baseMethodList;
517 protocol_list_t * baseProtocols;
518 const ivar_list_t * ivars;
519
520 const uint8_t * weakIvarLayout;
521 property_list_t *baseProperties;
522
523 method_list_t *baseMethods() const {
524 return baseMethodList;
525 }
526 };
527
528
529 /***********************************************************************
530 * list_array_tt<Element, List>
531 * Generic implementation for metadata that can be augmented by categories.
532 *
533 * Element is the underlying metadata type (e.g. method_t)
534 * List is the metadata's list type (e.g. method_list_t)
535 *
536 * A list_array_tt has one of three values:
537 * - empty
538 * - a pointer to a single list
539 * - an array of pointers to lists
540 *
541 * countLists/beginLists/endLists iterate the metadata lists
542 * count/begin/end iterate the underlying metadata elements
543 **********************************************************************/
544 template <typename Element, typename List>
545 class list_array_tt {
546 struct array_t {
547 uint32_t count;
548 List* lists[0];
549
550 static size_t byteSize(uint32_t count) {
551 return sizeof(array_t) + count*sizeof(lists[0]);
552 }
553 size_t byteSize() {
554 return byteSize(count);
555 }
556 };
557
558 protected:
559 class iterator {
560 List **lists;
561 List **listsEnd;
562 typename List::iterator m, mEnd;
563
564 public:
565 iterator(List **begin, List **end)
566 : lists(begin), listsEnd(end)
567 {
568 if (begin != end) {
569 m = (*begin)->begin();
570 mEnd = (*begin)->end();
571 }
572 }
573
574 const Element& operator * () const {
575 return *m;
576 }
577 Element& operator * () {
578 return *m;
579 }
580
581 bool operator != (const iterator& rhs) const {
582 if (lists != rhs.lists) return true;
583 if (lists == listsEnd) return false; // m is undefined
584 if (m != rhs.m) return true;
585 return false;
586 }
587
588 const iterator& operator ++ () {
589 assert(m != mEnd);
590 m++;
591 if (m == mEnd) {
592 assert(lists != listsEnd);
593 lists++;
594 if (lists != listsEnd) {
595 m = (*lists)->begin();
596 mEnd = (*lists)->end();
597 }
598 }
599 return *this;
600 }
601 };
602
603 private:
604 union {
605 List* list;
606 uintptr_t arrayAndFlag;
607 };
608
609 bool hasArray() const {
610 return arrayAndFlag & 1;
611 }
612
613 array_t *array() {
614 return (array_t *)(arrayAndFlag & ~1);
615 }
616
617 void setArray(array_t *array) {
618 arrayAndFlag = (uintptr_t)array | 1;
619 }
620
621 public:
622
623 uint32_t count() {
624 uint32_t result = 0;
625 for (auto lists = beginLists(), end = endLists();
626 lists != end;
627 ++lists)
628 {
629 result += (*lists)->count;
630 }
631 return result;
632 }
633
634 iterator begin() {
635 return iterator(beginLists(), endLists());
636 }
637
638 iterator end() {
639 List **e = endLists();
640 return iterator(e, e);
641 }
642
643
644 uint32_t countLists() {
645 if (hasArray()) {
646 return array()->count;
647 } else if (list) {
648 return 1;
649 } else {
650 return 0;
651 }
652 }
653
654 List** beginLists() {
655 if (hasArray()) {
656 return array()->lists;
657 } else {
658 return &list;
659 }
660 }
661
662 List** endLists() {
663 if (hasArray()) {
664 return array()->lists + array()->count;
665 } else if (list) {
666 return &list + 1;
667 } else {
668 return &list;
669 }
670 }
671
672 void attachLists(List* const * addedLists, uint32_t addedCount) {
673 if (addedCount == 0) return;
674
675 if (hasArray()) {
676 // many lists -> many lists
677 uint32_t oldCount = array()->count;
678 uint32_t newCount = oldCount + addedCount;
679 setArray((array_t *)realloc(array(), array_t::byteSize(newCount)));
680 array()->count = newCount;
681 memmove(array()->lists + addedCount, array()->lists,
682 oldCount * sizeof(array()->lists[0]));
683 memcpy(array()->lists, addedLists,
684 addedCount * sizeof(array()->lists[0]));
685 }
686 else if (!list && addedCount == 1) {
687 // 0 lists -> 1 list
688 list = addedLists[0];
689 }
690 else {
691 // 1 list -> many lists
692 List* oldList = list;
693 uint32_t oldCount = oldList ? 1 : 0;
694 uint32_t newCount = oldCount + addedCount;
695 setArray((array_t *)malloc(array_t::byteSize(newCount)));
696 array()->count = newCount;
697 if (oldList) array()->lists[addedCount] = oldList;
698 memcpy(array()->lists, addedLists,
699 addedCount * sizeof(array()->lists[0]));
700 }
701 }
702
703 void tryFree() {
704 if (hasArray()) {
705 for (uint32_t i = 0; i < array()->count; i++) {
706 try_free(array()->lists[i]);
707 }
708 try_free(array());
709 }
710 else if (list) {
711 try_free(list);
712 }
713 }
714
715 template<typename Result>
716 Result duplicate() {
717 Result result;
718
719 if (hasArray()) {
720 array_t *a = array();
721 result.setArray((array_t *)memdup(a, a->byteSize()));
722 for (uint32_t i = 0; i < a->count; i++) {
723 result.array()->lists[i] = a->lists[i]->duplicate();
724 }
725 } else if (list) {
726 result.list = list->duplicate();
727 } else {
728 result.list = nil;
729 }
730
731 return result;
732 }
733 };
734
735
736 class method_array_t :
737 public list_array_tt<method_t, method_list_t>
738 {
739 typedef list_array_tt<method_t, method_list_t> Super;
740
741 public:
742 method_list_t **beginCategoryMethodLists() {
743 return beginLists();
744 }
745
746 method_list_t **endCategoryMethodLists(Class cls);
747
748 method_array_t duplicate() {
749 return Super::duplicate<method_array_t>();
750 }
751 };
752
753
754 class property_array_t :
755 public list_array_tt<property_t, property_list_t>
756 {
757 typedef list_array_tt<property_t, property_list_t> Super;
758
759 public:
760 property_array_t duplicate() {
761 return Super::duplicate<property_array_t>();
762 }
763 };
764
765
766 class protocol_array_t :
767 public list_array_tt<protocol_ref_t, protocol_list_t>
768 {
769 typedef list_array_tt<protocol_ref_t, protocol_list_t> Super;
770
771 public:
772 protocol_array_t duplicate() {
773 return Super::duplicate<protocol_array_t>();
774 }
775 };
776
777
778 struct class_rw_t {
779 uint32_t flags;
780 uint32_t version;
781
782 const class_ro_t *ro;
783
784 method_array_t methods;
785 property_array_t properties;
786 protocol_array_t protocols;
787
788 Class firstSubclass;
789 Class nextSiblingClass;
790
791 char *demangledName;
792
793 void setFlags(uint32_t set)
794 {
795 OSAtomicOr32Barrier(set, &flags);
796 }
797
798 void clearFlags(uint32_t clear)
799 {
800 OSAtomicXor32Barrier(clear, &flags);
801 }
802
803 // set and clear must not overlap
804 void changeFlags(uint32_t set, uint32_t clear)
805 {
806 assert((set & clear) == 0);
807
808 uint32_t oldf, newf;
809 do {
810 oldf = flags;
811 newf = (oldf | set) & ~clear;
812 } while (!OSAtomicCompareAndSwap32Barrier(oldf, newf, (volatile int32_t *)&flags));
813 }
814 };
815
816
817 struct class_data_bits_t {
818
819 // Values are the FAST_ flags above.
820 uintptr_t bits;
821 private:
822 bool getBit(uintptr_t bit)
823 {
824 return bits & bit;
825 }
826
827 #if FAST_ALLOC
828 static uintptr_t updateFastAlloc(uintptr_t oldBits, uintptr_t change)
829 {
830 if (change & FAST_ALLOC_MASK) {
831 if (((oldBits & FAST_ALLOC_MASK) == FAST_ALLOC_VALUE) &&
832 ((oldBits >> FAST_SHIFTED_SIZE_SHIFT) != 0))
833 {
834 oldBits |= FAST_ALLOC;
835 } else {
836 oldBits &= ~FAST_ALLOC;
837 }
838 }
839 return oldBits;
840 }
841 #else
842 static uintptr_t updateFastAlloc(uintptr_t oldBits, uintptr_t change) {
843 return oldBits;
844 }
845 #endif
846
847 void setBits(uintptr_t set)
848 {
849 uintptr_t oldBits;
850 uintptr_t newBits;
851 do {
852 oldBits = LoadExclusive(&bits);
853 newBits = updateFastAlloc(oldBits | set, set);
854 } while (!StoreReleaseExclusive(&bits, oldBits, newBits));
855 }
856
857 void clearBits(uintptr_t clear)
858 {
859 uintptr_t oldBits;
860 uintptr_t newBits;
861 do {
862 oldBits = LoadExclusive(&bits);
863 newBits = updateFastAlloc(oldBits & ~clear, clear);
864 } while (!StoreReleaseExclusive(&bits, oldBits, newBits));
865 }
866
867 public:
868
869 class_rw_t* data() {
870 return (class_rw_t *)(bits & FAST_DATA_MASK);
871 }
872 void setData(class_rw_t *newData)
873 {
874 assert(!data() || (newData->flags & (RW_REALIZING | RW_FUTURE)));
875 // Set during realization or construction only. No locking needed.
876 bits = (bits & ~FAST_DATA_MASK) | (uintptr_t)newData;
877 }
878
879 bool hasDefaultRR() {
880 return getBit(FAST_HAS_DEFAULT_RR);
881 }
882 void setHasDefaultRR() {
883 setBits(FAST_HAS_DEFAULT_RR);
884 }
885 void setHasCustomRR() {
886 clearBits(FAST_HAS_DEFAULT_RR);
887 }
888
889 #if FAST_HAS_DEFAULT_AWZ
890 bool hasDefaultAWZ() {
891 return getBit(FAST_HAS_DEFAULT_AWZ);
892 }
893 void setHasDefaultAWZ() {
894 setBits(FAST_HAS_DEFAULT_AWZ);
895 }
896 void setHasCustomAWZ() {
897 clearBits(FAST_HAS_DEFAULT_AWZ);
898 }
899 #else
900 bool hasDefaultAWZ() {
901 return data()->flags & RW_HAS_DEFAULT_AWZ;
902 }
903 void setHasDefaultAWZ() {
904 data()->setFlags(RW_HAS_DEFAULT_AWZ);
905 }
906 void setHasCustomAWZ() {
907 data()->clearFlags(RW_HAS_DEFAULT_AWZ);
908 }
909 #endif
910
911 #if FAST_HAS_CXX_CTOR
912 bool hasCxxCtor() {
913 return getBit(FAST_HAS_CXX_CTOR);
914 }
915 void setHasCxxCtor() {
916 setBits(FAST_HAS_CXX_CTOR);
917 }
918 #else
919 bool hasCxxCtor() {
920 return data()->flags & RW_HAS_CXX_CTOR;
921 }
922 void setHasCxxCtor() {
923 data()->setFlags(RW_HAS_CXX_CTOR);
924 }
925 #endif
926
927 #if FAST_HAS_CXX_DTOR
928 bool hasCxxDtor() {
929 return getBit(FAST_HAS_CXX_DTOR);
930 }
931 void setHasCxxDtor() {
932 setBits(FAST_HAS_CXX_DTOR);
933 }
934 #else
935 bool hasCxxDtor() {
936 return data()->flags & RW_HAS_CXX_DTOR;
937 }
938 void setHasCxxDtor() {
939 data()->setFlags(RW_HAS_CXX_DTOR);
940 }
941 #endif
942
943 #if FAST_REQUIRES_RAW_ISA
944 bool requiresRawIsa() {
945 return getBit(FAST_REQUIRES_RAW_ISA);
946 }
947 void setRequiresRawIsa() {
948 setBits(FAST_REQUIRES_RAW_ISA);
949 }
950 #else
951 # if SUPPORT_NONPOINTER_ISA
952 # error oops
953 # endif
954 bool requiresRawIsa() {
955 return true;
956 }
957 void setRequiresRawIsa() {
958 // nothing
959 }
960 #endif
961
962 #if FAST_ALLOC
963 size_t fastInstanceSize()
964 {
965 assert(bits & FAST_ALLOC);
966 return (bits >> FAST_SHIFTED_SIZE_SHIFT) * 16;
967 }
968 void setFastInstanceSize(size_t newSize)
969 {
970 // Set during realization or construction only. No locking needed.
971 assert(data()->flags & RW_REALIZING);
972
973 // Round up to 16-byte boundary, then divide to get 16-byte units
974 newSize = ((newSize + 15) & ~15) / 16;
975
976 uintptr_t newBits = newSize << FAST_SHIFTED_SIZE_SHIFT;
977 if ((newBits >> FAST_SHIFTED_SIZE_SHIFT) == newSize) {
978 int shift = WORD_BITS - FAST_SHIFTED_SIZE_SHIFT;
979 uintptr_t oldBits = (bits << shift) >> shift;
980 if ((oldBits & FAST_ALLOC_MASK) == FAST_ALLOC_VALUE) {
981 newBits |= FAST_ALLOC;
982 }
983 bits = oldBits | newBits;
984 }
985 }
986
987 bool canAllocFast() {
988 return bits & FAST_ALLOC;
989 }
990 #else
991 size_t fastInstanceSize() {
992 abort();
993 }
994 void setFastInstanceSize(size_t) {
995 // nothing
996 }
997 bool canAllocFast() {
998 return false;
999 }
1000 #endif
1001
1002 bool isSwift() {
1003 return getBit(FAST_IS_SWIFT);
1004 }
1005
1006 void setIsSwift() {
1007 setBits(FAST_IS_SWIFT);
1008 }
1009 };
1010
1011
1012 struct objc_class : objc_object {
1013 // Class ISA;
1014 Class superclass;
1015 cache_t cache; // formerly cache pointer and vtable
1016 class_data_bits_t bits; // class_rw_t * plus custom rr/alloc flags
1017
1018 class_rw_t *data() {
1019 return bits.data();
1020 }
1021 void setData(class_rw_t *newData) {
1022 bits.setData(newData);
1023 }
1024
1025 void setInfo(uint32_t set) {
1026 assert(isFuture() || isRealized());
1027 data()->setFlags(set);
1028 }
1029
1030 void clearInfo(uint32_t clear) {
1031 assert(isFuture() || isRealized());
1032 data()->clearFlags(clear);
1033 }
1034
1035 // set and clear must not overlap
1036 void changeInfo(uint32_t set, uint32_t clear) {
1037 assert(isFuture() || isRealized());
1038 assert((set & clear) == 0);
1039 data()->changeFlags(set, clear);
1040 }
1041
1042 bool hasCustomRR() {
1043 return ! bits.hasDefaultRR();
1044 }
1045 void setHasDefaultRR() {
1046 assert(isInitializing());
1047 bits.setHasDefaultRR();
1048 }
1049 void setHasCustomRR(bool inherited = false);
1050 void printCustomRR(bool inherited);
1051
1052 bool hasCustomAWZ() {
1053 return ! bits.hasDefaultAWZ();
1054 }
1055 void setHasDefaultAWZ() {
1056 assert(isInitializing());
1057 bits.setHasDefaultAWZ();
1058 }
1059 void setHasCustomAWZ(bool inherited = false);
1060 void printCustomAWZ(bool inherited);
1061
1062 bool requiresRawIsa() {
1063 return bits.requiresRawIsa();
1064 }
1065 void setRequiresRawIsa(bool inherited = false);
1066 void printRequiresRawIsa(bool inherited);
1067
1068 bool canAllocIndexed() {
1069 assert(!isFuture());
1070 return !requiresRawIsa();
1071 }
1072 bool canAllocFast() {
1073 assert(!isFuture());
1074 return bits.canAllocFast();
1075 }
1076
1077
1078 bool hasCxxCtor() {
1079 // addSubclass() propagates this flag from the superclass.
1080 assert(isRealized());
1081 return bits.hasCxxCtor();
1082 }
1083 void setHasCxxCtor() {
1084 bits.setHasCxxCtor();
1085 }
1086
1087 bool hasCxxDtor() {
1088 // addSubclass() propagates this flag from the superclass.
1089 assert(isRealized());
1090 return bits.hasCxxDtor();
1091 }
1092 void setHasCxxDtor() {
1093 bits.setHasCxxDtor();
1094 }
1095
1096
1097 bool isSwift() {
1098 return bits.isSwift();
1099 }
1100
1101
1102 #if SUPPORT_NONPOINTER_ISA
1103 // Tracked in non-pointer isas; not tracked otherwise
1104 #else
1105 bool instancesHaveAssociatedObjects() {
1106 // this may be an unrealized future class in the CF-bridged case
1107 assert(isFuture() || isRealized());
1108 return data()->flags & RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS;
1109 }
1110
1111 void setInstancesHaveAssociatedObjects() {
1112 // this may be an unrealized future class in the CF-bridged case
1113 assert(isFuture() || isRealized());
1114 setInfo(RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS);
1115 }
1116 #endif
1117
1118 bool shouldGrowCache() {
1119 return true;
1120 }
1121
1122 void setShouldGrowCache(bool) {
1123 // fixme good or bad for memory use?
1124 }
1125
1126 bool shouldFinalizeOnMainThread() {
1127 // finishInitializing() propagates this flag from the superclass.
1128 assert(isRealized());
1129 return data()->flags & RW_FINALIZE_ON_MAIN_THREAD;
1130 }
1131
1132 void setShouldFinalizeOnMainThread() {
1133 assert(isRealized());
1134 setInfo(RW_FINALIZE_ON_MAIN_THREAD);
1135 }
1136
1137 bool isInitializing() {
1138 return getMeta()->data()->flags & RW_INITIALIZING;
1139 }
1140
1141 void setInitializing() {
1142 assert(!isMetaClass());
1143 ISA()->setInfo(RW_INITIALIZING);
1144 }
1145
1146 bool isInitialized() {
1147 return getMeta()->data()->flags & RW_INITIALIZED;
1148 }
1149
1150 void setInitialized();
1151
1152 bool isLoadable() {
1153 assert(isRealized());
1154 return true; // any class registered for +load is definitely loadable
1155 }
1156
1157 IMP getLoadMethod();
1158
1159 // Locking: To prevent concurrent realization, hold runtimeLock.
1160 bool isRealized() {
1161 return data()->flags & RW_REALIZED;
1162 }
1163
1164 // Returns true if this is an unrealized future class.
1165 // Locking: To prevent concurrent realization, hold runtimeLock.
1166 bool isFuture() {
1167 return data()->flags & RW_FUTURE;
1168 }
1169
1170 bool isMetaClass() {
1171 assert(this);
1172 assert(isRealized());
1173 return data()->ro->flags & RO_META;
1174 }
1175
1176 // NOT identical to this->ISA when this is a metaclass
1177 Class getMeta() {
1178 if (isMetaClass()) return (Class)this;
1179 else return this->ISA();
1180 }
1181
1182 bool isRootClass() {
1183 return superclass == nil;
1184 }
1185 bool isRootMetaclass() {
1186 return ISA() == (Class)this;
1187 }
1188
1189 const char *mangledName() {
1190 // fixme can't assert locks here
1191 assert(this);
1192
1193 if (isRealized() || isFuture()) {
1194 return data()->ro->name;
1195 } else {
1196 return ((const class_ro_t *)data())->name;
1197 }
1198 }
1199
1200 const char *demangledName(bool realize = false);
1201 const char *nameForLogging();
1202
1203 // May be unaligned depending on class's ivars.
1204 uint32_t unalignedInstanceSize() {
1205 assert(isRealized());
1206 return data()->ro->instanceSize;
1207 }
1208
1209 // Class's ivar size rounded up to a pointer-size boundary.
1210 uint32_t alignedInstanceSize() {
1211 return word_align(unalignedInstanceSize());
1212 }
1213
1214 size_t instanceSize(size_t extraBytes) {
1215 size_t size = alignedInstanceSize() + extraBytes;
1216 // CF requires all objects be at least 16 bytes.
1217 if (size < 16) size = 16;
1218 return size;
1219 }
1220
1221 void setInstanceSize(uint32_t newSize) {
1222 assert(isRealized());
1223 if (newSize != data()->ro->instanceSize) {
1224 assert(data()->flags & RW_COPIED_RO);
1225 *const_cast<uint32_t *>(&data()->ro->instanceSize) = newSize;
1226 }
1227 bits.setFastInstanceSize(newSize);
1228 }
1229 };
1230
1231
1232 struct swift_class_t : objc_class {
1233 uint32_t flags;
1234 uint32_t instanceAddressOffset;
1235 uint32_t instanceSize;
1236 uint16_t instanceAlignMask;
1237 uint16_t reserved;
1238
1239 uint32_t classSize;
1240 uint32_t classAddressOffset;
1241 void *description;
1242 // ...
1243
1244 void *baseAddress() {
1245 return (void *)((uint8_t *)this - classAddressOffset);
1246 }
1247 };
1248
1249
1250 struct category_t {
1251 const char *name;
1252 classref_t cls;
1253 struct method_list_t *instanceMethods;
1254 struct method_list_t *classMethods;
1255 struct protocol_list_t *protocols;
1256 struct property_list_t *instanceProperties;
1257
1258 method_list_t *methodsForMeta(bool isMeta) {
1259 if (isMeta) return classMethods;
1260 else return instanceMethods;
1261 }
1262
1263 property_list_t *propertiesForMeta(bool isMeta) {
1264 if (isMeta) return nil; // classProperties;
1265 else return instanceProperties;
1266 }
1267 };
1268
1269 struct objc_super2 {
1270 id receiver;
1271 Class current_class;
1272 };
1273
1274 struct message_ref_t {
1275 IMP imp;
1276 SEL sel;
1277 };
1278
1279
1280 extern Method protocol_getMethod(protocol_t *p, SEL sel, bool isRequiredMethod, bool isInstanceMethod, bool recursive);
1281
1282 static inline void
1283 foreach_realized_class_and_subclass_2(Class top, bool (^code)(Class))
1284 {
1285 // runtimeLock.assertWriting();
1286 assert(top);
1287 Class cls = top;
1288 while (1) {
1289 if (!code(cls)) break;
1290
1291 if (cls->data()->firstSubclass) {
1292 cls = cls->data()->firstSubclass;
1293 } else {
1294 while (!cls->data()->nextSiblingClass && cls != top) {
1295 cls = cls->superclass;
1296 }
1297 if (cls == top) break;
1298 cls = cls->data()->nextSiblingClass;
1299 }
1300 }
1301 }
1302
1303 static inline void
1304 foreach_realized_class_and_subclass(Class top, void (^code)(Class))
1305 {
1306 foreach_realized_class_and_subclass_2(top, ^bool(Class cls) {
1307 code(cls); return true;
1308 });
1309 }
1310
1311 #endif