]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-runtime-new.h
5825de7e2f9bd5ab52c54232bbe9f31ebe99486b
[apple/objc4.git] / runtime / objc-runtime-new.h
1 /*
2 * Copyright (c) 2005-2007 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #ifndef _OBJC_RUNTIME_NEW_H
25 #define _OBJC_RUNTIME_NEW_H
26
27 __BEGIN_DECLS
28
29 #if __LP64__
30 typedef uint32_t mask_t; // x86_64 & arm64 asm are less efficient with 16-bits
31 #else
32 typedef uint16_t mask_t;
33 #endif
34 typedef uintptr_t cache_key_t;
35
36 struct swift_class_t;
37
38
39 struct bucket_t {
40 private:
41 cache_key_t _key;
42 IMP _imp;
43
44 public:
45 inline cache_key_t key() const { return _key; }
46 inline IMP imp() const { return (IMP)_imp; }
47 inline void setKey(cache_key_t newKey) { _key = newKey; }
48 inline void setImp(IMP newImp) { _imp = newImp; }
49
50 void set(cache_key_t newKey, IMP newImp);
51 };
52
53
54 struct cache_t {
55 struct bucket_t *_buckets;
56 mask_t _mask;
57 mask_t _occupied;
58
59 public:
60 struct bucket_t *buckets();
61 mask_t mask();
62 mask_t occupied();
63 void incrementOccupied();
64 void setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask);
65 void setEmpty();
66
67 mask_t capacity();
68 bool canBeFreed();
69
70 static size_t bytesForCapacity(uint32_t cap);
71 static struct bucket_t * endMarker(struct bucket_t *b, uint32_t cap);
72
73 void expand();
74 void reallocate(mask_t oldCapacity, mask_t newCapacity);
75 struct bucket_t * find(cache_key_t key);
76
77 static void bad_cache(id receiver, SEL sel, Class isa) __attribute__((noreturn));
78 };
79
80
81 // classref_t is unremapped class_t*
82 typedef struct classref * classref_t;
83
84 struct method_t {
85 SEL name;
86 const char *types;
87 IMP imp;
88
89 struct SortBySELAddress :
90 public std::binary_function<const method_t&,
91 const method_t&, bool>
92 {
93 bool operator() (const method_t& lhs,
94 const method_t& rhs)
95 { return lhs.name < rhs.name; }
96 };
97 };
98
99 struct method_list_t {
100 uint32_t entsize_NEVER_USE; // high bits used for fixup markers
101 uint32_t count;
102 method_t first;
103
104 uint32_t getEntsize() const {
105 return entsize_NEVER_USE & ~(uint32_t)3;
106 }
107 uint32_t getCount() const {
108 return count;
109 }
110 method_t& getOrEnd(uint32_t i) const {
111 assert(i <= count);
112 return *(method_t *)((uint8_t *)&first + i*getEntsize());
113 }
114 method_t& get(uint32_t i) const {
115 assert(i < count);
116 return getOrEnd(i);
117 }
118
119 // iterate methods, taking entsize into account
120 // fixme need a proper const_iterator
121 struct method_iterator {
122 uint32_t entsize;
123 uint32_t index; // keeping track of this saves a divide in operator-
124 method_t* method;
125
126 typedef std::random_access_iterator_tag iterator_category;
127 typedef method_t value_type;
128 typedef ptrdiff_t difference_type;
129 typedef method_t* pointer;
130 typedef method_t& reference;
131
132 method_iterator() { }
133
134 method_iterator(const method_list_t& mlist, uint32_t start = 0)
135 : entsize(mlist.getEntsize())
136 , index(start)
137 , method(&mlist.getOrEnd(start))
138 { }
139
140 const method_iterator& operator += (ptrdiff_t delta) {
141 method = (method_t*)((uint8_t *)method + delta*entsize);
142 index += (int32_t)delta;
143 return *this;
144 }
145 const method_iterator& operator -= (ptrdiff_t delta) {
146 method = (method_t*)((uint8_t *)method - delta*entsize);
147 index -= (int32_t)delta;
148 return *this;
149 }
150 const method_iterator operator + (ptrdiff_t delta) const {
151 return method_iterator(*this) += delta;
152 }
153 const method_iterator operator - (ptrdiff_t delta) const {
154 return method_iterator(*this) -= delta;
155 }
156
157 method_iterator& operator ++ () { *this += 1; return *this; }
158 method_iterator& operator -- () { *this -= 1; return *this; }
159 method_iterator operator ++ (int) {
160 method_iterator result(*this); *this += 1; return result;
161 }
162 method_iterator operator -- (int) {
163 method_iterator result(*this); *this -= 1; return result;
164 }
165
166 ptrdiff_t operator - (const method_iterator& rhs) const {
167 return (ptrdiff_t)this->index - (ptrdiff_t)rhs.index;
168 }
169
170 method_t& operator * () const { return *method; }
171 method_t* operator -> () const { return method; }
172
173 operator method_t& () const { return *method; }
174
175 bool operator == (const method_iterator& rhs) {
176 return this->method == rhs.method;
177 }
178 bool operator != (const method_iterator& rhs) {
179 return this->method != rhs.method;
180 }
181
182 bool operator < (const method_iterator& rhs) {
183 return this->method < rhs.method;
184 }
185 bool operator > (const method_iterator& rhs) {
186 return this->method > rhs.method;
187 }
188 };
189
190 method_iterator begin() const { return method_iterator(*this, 0); }
191 method_iterator end() const { return method_iterator(*this, getCount()); }
192
193 };
194
195 struct ivar_t {
196 #if __x86_64__
197 // *offset was originally 64-bit on some x86_64 platforms.
198 // We read and write only 32 bits of it.
199 // Some metadata provides all 64 bits. This is harmless for unsigned
200 // little-endian values.
201 // Some code uses all 64 bits. class_addIvar() over-allocates the
202 // offset for their benefit.
203 #endif
204 int32_t *offset;
205 const char *name;
206 const char *type;
207 // alignment is sometimes -1; use alignment() instead
208 uint32_t alignment_raw;
209 uint32_t size;
210
211 uint32_t alignment() {
212 if (alignment_raw == ~(uint32_t)0) return 1U << WORD_SHIFT;
213 return 1 << alignment_raw;
214 }
215 };
216
217 struct ivar_list_t {
218 uint32_t entsize;
219 uint32_t count;
220 ivar_t first;
221 };
222
223 struct property_t {
224 const char *name;
225 const char *attributes;
226 };
227
228 struct property_list_t {
229 uint32_t entsize;
230 uint32_t count;
231 property_t first;
232 };
233
234 typedef uintptr_t protocol_ref_t; // protocol_t *, but unremapped
235
236 #define PROTOCOL_FIXED_UP (1<<31) // must never be set by compiler
237
238 struct protocol_t : objc_object {
239 const char *mangledName;
240 struct protocol_list_t *protocols;
241 method_list_t *instanceMethods;
242 method_list_t *classMethods;
243 method_list_t *optionalInstanceMethods;
244 method_list_t *optionalClassMethods;
245 property_list_t *instanceProperties;
246 uint32_t size; // sizeof(protocol_t)
247 uint32_t flags;
248 const char **extendedMethodTypes;
249
250 // Fields below this point are allocated at runtime
251 // and are not present on disk.
252 const char *_demangledName;
253
254 const char *demangledName();
255
256 const char *nameForLogging() {
257 return demangledName();
258 }
259
260 bool isFixedUp() const {
261 return flags & PROTOCOL_FIXED_UP;
262 }
263
264 bool hasExtendedMethodTypesField() const {
265 return size >= (offsetof(protocol_t, extendedMethodTypes)
266 + sizeof(extendedMethodTypes));
267 }
268 bool hasExtendedMethodTypes() const {
269 return hasExtendedMethodTypesField() && extendedMethodTypes;
270 }
271 };
272
273 struct protocol_list_t {
274 // count is 64-bit by accident.
275 uintptr_t count;
276 protocol_ref_t list[0]; // variable-size
277 };
278
279 struct class_ro_t {
280 uint32_t flags;
281 uint32_t instanceStart;
282 uint32_t instanceSize;
283 #ifdef __LP64__
284 uint32_t reserved;
285 #endif
286
287 const uint8_t * ivarLayout;
288
289 const char * name;
290 const method_list_t * baseMethods;
291 const protocol_list_t * baseProtocols;
292 const ivar_list_t * ivars;
293
294 const uint8_t * weakIvarLayout;
295 const property_list_t *baseProperties;
296 };
297
298 struct class_rw_t {
299 uint32_t flags;
300 uint32_t version;
301
302 const class_ro_t *ro;
303
304 union {
305 method_list_t **method_lists; // RW_METHOD_ARRAY == 1
306 method_list_t *method_list; // RW_METHOD_ARRAY == 0
307 };
308 struct chained_property_list *properties;
309 const protocol_list_t ** protocols;
310
311 Class firstSubclass;
312 Class nextSiblingClass;
313
314 char *demangledName;
315
316 void setFlags(uint32_t set)
317 {
318 OSAtomicOr32Barrier(set, &flags);
319 }
320
321 void clearFlags(uint32_t clear)
322 {
323 OSAtomicXor32Barrier(clear, &flags);
324 }
325
326 // set and clear must not overlap
327 void changeFlags(uint32_t set, uint32_t clear)
328 {
329 assert((set & clear) == 0);
330
331 uint32_t oldf, newf;
332 do {
333 oldf = flags;
334 newf = (oldf | set) & ~clear;
335 } while (!OSAtomicCompareAndSwap32Barrier(oldf, newf, (volatile int32_t *)&flags));
336 }
337 };
338
339
340 // class_data_bits_t is the class_t->data field (class_rw_t pointer plus flags)
341 // The extra bits are optimized for the retain/release and alloc/dealloc paths.
342
343 // Values for class_ro_t->flags
344 // These are emitted by the compiler and are part of the ABI.
345 // class is a metaclass
346 #define RO_META (1<<0)
347 // class is a root class
348 #define RO_ROOT (1<<1)
349 // class has .cxx_construct/destruct implementations
350 #define RO_HAS_CXX_STRUCTORS (1<<2)
351 // class has +load implementation
352 // #define RO_HAS_LOAD_METHOD (1<<3)
353 // class has visibility=hidden set
354 #define RO_HIDDEN (1<<4)
355 // class has attribute(objc_exception): OBJC_EHTYPE_$_ThisClass is non-weak
356 #define RO_EXCEPTION (1<<5)
357 // this bit is available for reassignment
358 // #define RO_REUSE_ME (1<<6)
359 // class compiled with -fobjc-arc (automatic retain/release)
360 #define RO_IS_ARR (1<<7)
361 // class has .cxx_destruct but no .cxx_construct (with RO_HAS_CXX_STRUCTORS)
362 #define RO_HAS_CXX_DTOR_ONLY (1<<8)
363
364 // class is in an unloadable bundle - must never be set by compiler
365 #define RO_FROM_BUNDLE (1<<29)
366 // class is unrealized future class - must never be set by compiler
367 #define RO_FUTURE (1<<30)
368 // class is realized - must never be set by compiler
369 #define RO_REALIZED (1<<31)
370
371 // Values for class_rw_t->flags
372 // These are not emitted by the compiler and are never used in class_ro_t.
373 // Their presence should be considered in future ABI versions.
374 // class_t->data is class_rw_t, not class_ro_t
375 #define RW_REALIZED (1<<31)
376 // class is unresolved future class
377 #define RW_FUTURE (1<<30)
378 // class is initialized
379 #define RW_INITIALIZED (1<<29)
380 // class is initializing
381 #define RW_INITIALIZING (1<<28)
382 // class_rw_t->ro is heap copy of class_ro_t
383 #define RW_COPIED_RO (1<<27)
384 // class allocated but not yet registered
385 #define RW_CONSTRUCTING (1<<26)
386 // class allocated and registered
387 #define RW_CONSTRUCTED (1<<25)
388 // GC: class has unsafe finalize method
389 #define RW_FINALIZE_ON_MAIN_THREAD (1<<24)
390 // class +load has been called
391 #define RW_LOADED (1<<23)
392 #if !SUPPORT_NONPOINTER_ISA
393 // class instances may have associative references
394 #define RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS (1<<22)
395 #endif
396 // class has instance-specific GC layout
397 #define RW_HAS_INSTANCE_SPECIFIC_LAYOUT (1 << 21)
398 // class's method list is an array of method lists
399 #define RW_METHOD_ARRAY (1<<20)
400 // class has started realizing but not yet completed it
401 #define RW_REALIZING (1<<19)
402
403 // NOTE: MORE RW_ FLAGS DEFINED BELOW
404
405
406 // Values for class_rw_t->flags or class_t->bits
407 // These flags are optimized for retain/release and alloc/dealloc
408 // 64-bit stores more of them in class_t->bits to reduce pointer indirection.
409
410 #if !__LP64__
411
412 // class or superclass has .cxx_construct implementation
413 #define RW_HAS_CXX_CTOR (1<<18)
414 // class or superclass has .cxx_destruct implementation
415 #define RW_HAS_CXX_DTOR (1<<17)
416 // class or superclass has default alloc/allocWithZone: implementation
417 // Note this is is stored in the metaclass.
418 #define RW_HAS_DEFAULT_AWZ (1<<16)
419 // class's instances requires raw isa
420 // not tracked for 32-bit because it only applies to non-pointer isa
421 // #define RW_REQUIRES_RAW_ISA
422
423 // class is a Swift class
424 #define FAST_IS_SWIFT (1UL<<0)
425 // class or superclass has default retain/release/autorelease/retainCount/
426 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
427 #define FAST_HAS_DEFAULT_RR (1UL<<1)
428 // data pointer
429 #define FAST_DATA_MASK 0xfffffffcUL
430
431 #elif 1
432 // Leaks-compatible version that steals low bits only.
433
434 // class or superclass has .cxx_construct implementation
435 #define RW_HAS_CXX_CTOR (1<<18)
436 // class or superclass has .cxx_destruct implementation
437 #define RW_HAS_CXX_DTOR (1<<17)
438 // class or superclass has default alloc/allocWithZone: implementation
439 // Note this is is stored in the metaclass.
440 #define RW_HAS_DEFAULT_AWZ (1<<16)
441
442 // class is a Swift class
443 #define FAST_IS_SWIFT (1UL<<0)
444 // class or superclass has default retain/release/autorelease/retainCount/
445 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
446 #define FAST_HAS_DEFAULT_RR (1UL<<1)
447 // class's instances requires raw isa
448 #define FAST_REQUIRES_RAW_ISA (1UL<<2)
449 // data pointer
450 #define FAST_DATA_MASK 0x00007ffffffffff8UL
451
452 #else
453 // Leaks-incompatible version that steals lots of bits.
454
455 // class is a Swift class
456 #define FAST_IS_SWIFT (1UL<<0)
457 // class's instances requires raw isa
458 #define FAST_REQUIRES_RAW_ISA (1UL<<1)
459 // class or superclass has .cxx_destruct implementation
460 // This bit is aligned with isa_t->hasCxxDtor to save an instruction.
461 #define FAST_HAS_CXX_DTOR (1UL<<2)
462 // data pointer
463 #define FAST_DATA_MASK 0x00007ffffffffff8UL
464 // class or superclass has .cxx_construct implementation
465 #define FAST_HAS_CXX_CTOR (1UL<<47)
466 // class or superclass has default alloc/allocWithZone: implementation
467 // Note this is is stored in the metaclass.
468 #define FAST_HAS_DEFAULT_AWZ (1UL<<48)
469 // class or superclass has default retain/release/autorelease/retainCount/
470 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
471 #define FAST_HAS_DEFAULT_RR (1UL<<49)
472 // summary bit for fast alloc path: !hasCxxCtor and
473 // !requiresRawIsa and instanceSize fits into shiftedSize
474 #define FAST_ALLOC (1UL<<50)
475 // instance size in units of 16 bytes
476 // or 0 if the instance size is too big in this field
477 // This field must be LAST
478 #define FAST_SHIFTED_SIZE_SHIFT 51
479
480 // FAST_ALLOC means
481 // FAST_HAS_CXX_CTOR is set
482 // FAST_REQUIRES_RAW_ISA is not set
483 // FAST_SHIFTED_SIZE is not zero
484 // FAST_ALLOC does NOT check FAST_HAS_DEFAULT_AWZ because that
485 // bit is stored on the metaclass.
486 #define FAST_ALLOC_MASK (FAST_HAS_CXX_CTOR | FAST_REQUIRES_RAW_ISA)
487 #define FAST_ALLOC_VALUE (0)
488
489 #endif
490
491
492 struct class_data_bits_t {
493
494 // Values are the FAST_ flags above.
495 uintptr_t bits;
496 private:
497 bool getBit(uintptr_t bit)
498 {
499 return bits & bit;
500 }
501
502 #if FAST_ALLOC
503 static uintptr_t updateFastAlloc(uintptr_t oldBits, uintptr_t change)
504 {
505 if (change & FAST_ALLOC_MASK) {
506 if (((oldBits & FAST_ALLOC_MASK) == FAST_ALLOC_VALUE) &&
507 ((oldBits >> FAST_SHIFTED_SIZE_SHIFT) != 0))
508 {
509 oldBits |= FAST_ALLOC;
510 } else {
511 oldBits &= ~FAST_ALLOC;
512 }
513 }
514 return oldBits;
515 }
516 #else
517 static uintptr_t updateFastAlloc(uintptr_t oldBits, uintptr_t change) {
518 return oldBits;
519 }
520 #endif
521
522 void setBits(uintptr_t set)
523 {
524 uintptr_t oldBits;
525 uintptr_t newBits;
526 do {
527 oldBits = LoadExclusive(&bits);
528 newBits = updateFastAlloc(oldBits | set, set);
529 } while (!StoreReleaseExclusive(&bits, oldBits, newBits));
530 }
531
532 void clearBits(uintptr_t clear)
533 {
534 uintptr_t oldBits;
535 uintptr_t newBits;
536 do {
537 oldBits = LoadExclusive(&bits);
538 newBits = updateFastAlloc(oldBits & ~clear, clear);
539 } while (!StoreReleaseExclusive(&bits, oldBits, newBits));
540 }
541
542 public:
543
544 class_rw_t* data() {
545 return (class_rw_t *)(bits & FAST_DATA_MASK);
546 }
547 void setData(class_rw_t *newData)
548 {
549 assert(!data() || (newData->flags & (RW_REALIZING | RW_FUTURE)));
550 // Set during realization or construction only. No locking needed.
551 bits = (bits & ~FAST_DATA_MASK) | (uintptr_t)newData;
552 }
553
554 bool hasDefaultRR() {
555 return getBit(FAST_HAS_DEFAULT_RR);
556 }
557 void setHasDefaultRR() {
558 setBits(FAST_HAS_DEFAULT_RR);
559 }
560 void setHasCustomRR() {
561 clearBits(FAST_HAS_DEFAULT_RR);
562 }
563
564 #if FAST_HAS_DEFAULT_AWZ
565 bool hasDefaultAWZ() {
566 return getBit(FAST_HAS_DEFAULT_AWZ);
567 }
568 void setHasDefaultAWZ() {
569 setBits(FAST_HAS_DEFAULT_AWZ);
570 }
571 void setHasCustomAWZ() {
572 clearBits(FAST_HAS_DEFAULT_AWZ);
573 }
574 #else
575 bool hasDefaultAWZ() {
576 return data()->flags & RW_HAS_DEFAULT_AWZ;
577 }
578 void setHasDefaultAWZ() {
579 data()->setFlags(RW_HAS_DEFAULT_AWZ);
580 }
581 void setHasCustomAWZ() {
582 data()->clearFlags(RW_HAS_DEFAULT_AWZ);
583 }
584 #endif
585
586 #if FAST_HAS_CXX_CTOR
587 bool hasCxxCtor() {
588 return getBit(FAST_HAS_CXX_CTOR);
589 }
590 void setHasCxxCtor() {
591 setBits(FAST_HAS_CXX_CTOR);
592 }
593 #else
594 bool hasCxxCtor() {
595 return data()->flags & RW_HAS_CXX_CTOR;
596 }
597 void setHasCxxCtor() {
598 data()->setFlags(RW_HAS_CXX_CTOR);
599 }
600 #endif
601
602 #if FAST_HAS_CXX_DTOR
603 bool hasCxxDtor() {
604 return getBit(FAST_HAS_CXX_DTOR);
605 }
606 void setHasCxxDtor() {
607 setBits(FAST_HAS_CXX_DTOR);
608 }
609 #else
610 bool hasCxxDtor() {
611 return data()->flags & RW_HAS_CXX_DTOR;
612 }
613 void setHasCxxDtor() {
614 data()->setFlags(RW_HAS_CXX_DTOR);
615 }
616 #endif
617
618 #if FAST_REQUIRES_RAW_ISA
619 bool requiresRawIsa() {
620 return getBit(FAST_REQUIRES_RAW_ISA);
621 }
622 void setRequiresRawIsa() {
623 setBits(FAST_REQUIRES_RAW_ISA);
624 }
625 #else
626 # if SUPPORT_NONPOINTER_ISA
627 # error oops
628 # endif
629 bool requiresRawIsa() {
630 return true;
631 }
632 void setRequiresRawIsa() {
633 // nothing
634 }
635 #endif
636
637 #if FAST_ALLOC
638 size_t fastInstanceSize()
639 {
640 assert(bits & FAST_ALLOC);
641 return (bits >> FAST_SHIFTED_SIZE_SHIFT) * 16;
642 }
643 void setFastInstanceSize(size_t newSize)
644 {
645 // Set during realization or construction only. No locking needed.
646 assert(data()->flags & RW_REALIZING);
647
648 // Round up to 16-byte boundary, then divide to get 16-byte units
649 newSize = ((newSize + 15) & ~15) / 16;
650
651 uintptr_t newBits = newSize << FAST_SHIFTED_SIZE_SHIFT;
652 if ((newBits >> FAST_SHIFTED_SIZE_SHIFT) == newSize) {
653 int shift = WORD_BITS - FAST_SHIFTED_SIZE_SHIFT;
654 uintptr_t oldBits = (bits << shift) >> shift;
655 if ((oldBits & FAST_ALLOC_MASK) == FAST_ALLOC_VALUE) {
656 newBits |= FAST_ALLOC;
657 }
658 bits = oldBits | newBits;
659 }
660 }
661
662 bool canAllocFast() {
663 return bits & FAST_ALLOC;
664 }
665 #else
666 size_t fastInstanceSize() {
667 abort();
668 }
669 void setFastInstanceSize(size_t) {
670 // nothing
671 }
672 bool canAllocFast() {
673 return false;
674 }
675 #endif
676
677 bool isSwift() {
678 return getBit(FAST_IS_SWIFT);
679 }
680
681 void setIsSwift() {
682 setBits(FAST_IS_SWIFT);
683 }
684 };
685
686
687 struct objc_class : objc_object {
688 // Class ISA;
689 Class superclass;
690 cache_t cache; // formerly cache pointer and vtable
691 class_data_bits_t bits; // class_rw_t * plus custom rr/alloc flags
692
693 class_rw_t *data() {
694 return bits.data();
695 }
696 void setData(class_rw_t *newData) {
697 bits.setData(newData);
698 }
699
700 void setInfo(uint32_t set) {
701 assert(isFuture() || isRealized());
702 data()->setFlags(set);
703 }
704
705 void clearInfo(uint32_t clear) {
706 assert(isFuture() || isRealized());
707 data()->clearFlags(clear);
708 }
709
710 // set and clear must not overlap
711 void changeInfo(uint32_t set, uint32_t clear) {
712 assert(isFuture() || isRealized());
713 assert((set & clear) == 0);
714 data()->changeFlags(set, clear);
715 }
716
717 bool hasCustomRR() {
718 return ! bits.hasDefaultRR();
719 }
720 void setHasDefaultRR() {
721 assert(isInitializing());
722 bits.setHasDefaultRR();
723 }
724 void setHasCustomRR(bool inherited = false);
725 void printCustomRR(bool inherited);
726
727 bool hasCustomAWZ() {
728 return ! bits.hasDefaultAWZ();
729 }
730 void setHasDefaultAWZ() {
731 assert(isInitializing());
732 bits.setHasDefaultAWZ();
733 }
734 void setHasCustomAWZ(bool inherited = false);
735 void printCustomAWZ(bool inherited);
736
737 bool requiresRawIsa() {
738 return bits.requiresRawIsa();
739 }
740 void setRequiresRawIsa(bool inherited = false);
741 void printRequiresRawIsa(bool inherited);
742
743 bool canAllocIndexed() {
744 return !requiresRawIsa();
745 }
746 bool canAllocFast() {
747 return bits.canAllocFast();
748 }
749
750
751 bool hasCxxCtor() {
752 // addSubclass() propagates this flag from the superclass.
753 assert(isRealized());
754 return bits.hasCxxCtor();
755 }
756 void setHasCxxCtor() {
757 bits.setHasCxxCtor();
758 }
759
760 bool hasCxxDtor() {
761 // addSubclass() propagates this flag from the superclass.
762 assert(isRealized());
763 return bits.hasCxxDtor();
764 }
765 void setHasCxxDtor() {
766 bits.setHasCxxDtor();
767 }
768
769
770 bool isSwift() {
771 return bits.isSwift();
772 }
773
774
775 #if SUPPORT_NONPOINTER_ISA
776 // Tracked in non-pointer isas; not tracked otherwise
777 #else
778 bool instancesHaveAssociatedObjects() {
779 // this may be an unrealized future class in the CF-bridged case
780 assert(isFuture() || isRealized());
781 return data()->flags & RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS;
782 }
783
784 void setInstancesHaveAssociatedObjects() {
785 // this may be an unrealized future class in the CF-bridged case
786 assert(isFuture() || isRealized());
787 setInfo(RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS);
788 }
789 #endif
790
791 bool shouldGrowCache() {
792 return true;
793 }
794
795 void setShouldGrowCache(bool) {
796 // fixme good or bad for memory use?
797 }
798
799 bool shouldFinalizeOnMainThread() {
800 // finishInitializing() propagates this flag from the superclass.
801 assert(isRealized());
802 return data()->flags & RW_FINALIZE_ON_MAIN_THREAD;
803 }
804
805 void setShouldFinalizeOnMainThread() {
806 assert(isRealized());
807 setInfo(RW_FINALIZE_ON_MAIN_THREAD);
808 }
809
810 bool isInitializing() {
811 return getMeta()->data()->flags & RW_INITIALIZING;
812 }
813
814 void setInitializing() {
815 assert(!isMetaClass());
816 ISA()->setInfo(RW_INITIALIZING);
817 }
818
819 bool isInitialized() {
820 return getMeta()->data()->flags & RW_INITIALIZED;
821 }
822
823 void setInitialized();
824
825 bool isLoadable() {
826 assert(isRealized());
827 return true; // any class registered for +load is definitely loadable
828 }
829
830 IMP getLoadMethod();
831
832 // Locking: To prevent concurrent realization, hold runtimeLock.
833 bool isRealized() {
834 return data()->flags & RW_REALIZED;
835 }
836
837 // Returns true if this is an unrealized future class.
838 // Locking: To prevent concurrent realization, hold runtimeLock.
839 bool isFuture() {
840 return data()->flags & RW_FUTURE;
841 }
842
843 bool isMetaClass() {
844 assert(this);
845 assert(isRealized());
846 return data()->ro->flags & RO_META;
847 }
848
849 // NOT identical to this->ISA when this is a metaclass
850 Class getMeta() {
851 if (isMetaClass()) return (Class)this;
852 else return this->ISA();
853 }
854
855 bool isRootClass() {
856 return superclass == nil;
857 }
858 bool isRootMetaclass() {
859 return ISA() == (Class)this;
860 }
861
862 const char *mangledName() {
863 // fixme can't assert locks here
864 assert(this);
865
866 if (isRealized() || isFuture()) {
867 return data()->ro->name;
868 } else {
869 return ((const class_ro_t *)data())->name;
870 }
871 }
872
873 const char *demangledName(bool realize = false);
874 const char *nameForLogging();
875
876 // May be unaligned depending on class's ivars.
877 uint32_t unalignedInstanceSize() {
878 assert(isRealized());
879 return data()->ro->instanceSize;
880 }
881
882 // Class's ivar size rounded up to a pointer-size boundary.
883 uint32_t alignedInstanceSize() {
884 return word_align(unalignedInstanceSize());
885 }
886
887 size_t instanceSize(size_t extraBytes) {
888 size_t size = alignedInstanceSize() + extraBytes;
889 // CF requires all objects be at least 16 bytes.
890 if (size < 16) size = 16;
891 return size;
892 }
893
894 void setInstanceSize(uint32_t newSize) {
895 assert(isRealized());
896 if (newSize != data()->ro->instanceSize) {
897 assert(data()->flags & RW_COPIED_RO);
898 *const_cast<uint32_t *>(&data()->ro->instanceSize) = newSize;
899 }
900 bits.setFastInstanceSize(newSize);
901 }
902 };
903
904
905 struct swift_class_t : objc_class {
906 uint32_t flags;
907 uint32_t instanceAddressOffset;
908 uint32_t instanceSize;
909 uint16_t instanceAlignMask;
910 uint16_t reserved;
911
912 uint32_t classSize;
913 uint32_t classAddressOffset;
914 void *description;
915 // ...
916
917 void *baseAddress() {
918 return (void *)((uint8_t *)this - classAddressOffset);
919 }
920 };
921
922
923 struct category_t {
924 const char *name;
925 classref_t cls;
926 struct method_list_t *instanceMethods;
927 struct method_list_t *classMethods;
928 struct protocol_list_t *protocols;
929 struct property_list_t *instanceProperties;
930 };
931
932 struct objc_super2 {
933 id receiver;
934 Class current_class;
935 };
936
937 struct message_ref_t {
938 IMP imp;
939 SEL sel;
940 };
941
942
943 extern Method protocol_getMethod(protocol_t *p, SEL sel, bool isRequiredMethod, bool isInstanceMethod, bool recursive);
944
945 static inline void
946 foreach_realized_class_and_subclass_2(Class top, bool (^code)(Class))
947 {
948 // rwlock_assert_writing(&runtimeLock);
949 assert(top);
950 Class cls = top;
951 while (1) {
952 if (!code(cls)) break;
953
954 if (cls->data()->firstSubclass) {
955 cls = cls->data()->firstSubclass;
956 } else {
957 while (!cls->data()->nextSiblingClass && cls != top) {
958 cls = cls->superclass;
959 }
960 if (cls == top) break;
961 cls = cls->data()->nextSiblingClass;
962 }
963 }
964 }
965
966 static inline void
967 foreach_realized_class_and_subclass(Class top, void (^code)(Class))
968 {
969 foreach_realized_class_and_subclass_2(top, ^bool(Class cls) {
970 code(cls); return true;
971 });
972 }
973
974 __END_DECLS
975
976 #endif