2 * Copyright (c) 2005-2007 Apple Inc. All Rights Reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 #ifndef _OBJC_RUNTIME_NEW_H
25 #define _OBJC_RUNTIME_NEW_H
28 typedef uint32_t mask_t
; // x86_64 & arm64 asm are less efficient with 16-bits
30 typedef uint16_t mask_t
;
32 typedef uintptr_t cache_key_t
;
43 inline cache_key_t
key() const { return _key
; }
44 inline IMP
imp() const { return (IMP
)_imp
; }
45 inline void setKey(cache_key_t newKey
) { _key
= newKey
; }
46 inline void setImp(IMP newImp
) { _imp
= newImp
; }
48 void set(cache_key_t newKey
, IMP newImp
);
53 struct bucket_t
*_buckets
;
58 struct bucket_t
*buckets();
61 void incrementOccupied();
62 void setBucketsAndMask(struct bucket_t
*newBuckets
, mask_t newMask
);
63 void initializeToEmpty();
66 bool isConstantEmptyCache();
69 static size_t bytesForCapacity(uint32_t cap
);
70 static struct bucket_t
* endMarker(struct bucket_t
*b
, uint32_t cap
);
73 void reallocate(mask_t oldCapacity
, mask_t newCapacity
);
74 struct bucket_t
* find(cache_key_t key
, id receiver
);
76 static void bad_cache(id receiver
, SEL sel
, Class isa
) __attribute__((noreturn
));
80 // classref_t is unremapped class_t*
81 typedef struct classref
* classref_t
;
83 /***********************************************************************
84 * entsize_list_tt<Element, List, FlagMask>
85 * Generic implementation of an array of non-fragile structs.
87 * Element is the struct type (e.g. method_t)
88 * List is the specialization of entsize_list_tt (e.g. method_list_t)
89 * FlagMask is used to stash extra bits in the entsize field
90 * (e.g. method list fixup markers)
91 **********************************************************************/
92 template <typename Element
, typename List
, uint32_t FlagMask
>
93 struct entsize_list_tt
{
94 uint32_t entsizeAndFlags
;
98 uint32_t entsize() const {
99 return entsizeAndFlags
& ~FlagMask
;
101 uint32_t flags() const {
102 return entsizeAndFlags
& FlagMask
;
105 Element
& getOrEnd(uint32_t i
) const {
107 return *(Element
*)((uint8_t *)&first
+ i
*entsize());
109 Element
& get(uint32_t i
) const {
114 size_t byteSize() const {
115 return sizeof(*this) + (count
-1)*entsize();
118 List
*duplicate() const {
119 return (List
*)memdup(this, this->byteSize());
123 const iterator
begin() const {
124 return iterator(*static_cast<const List
*>(this), 0);
127 return iterator(*static_cast<const List
*>(this), 0);
129 const iterator
end() const {
130 return iterator(*static_cast<const List
*>(this), count
);
133 return iterator(*static_cast<const List
*>(this), count
);
138 uint32_t index
; // keeping track of this saves a divide in operator-
141 typedef std::random_access_iterator_tag iterator_category
;
142 typedef Element value_type
;
143 typedef ptrdiff_t difference_type
;
144 typedef Element
* pointer
;
145 typedef Element
& reference
;
149 iterator(const List
& list
, uint32_t start
= 0)
150 : entsize(list
.entsize())
152 , element(&list
.getOrEnd(start
))
155 const iterator
& operator += (ptrdiff_t delta
) {
156 element
= (Element
*)((uint8_t *)element
+ delta
*entsize
);
157 index
+= (int32_t)delta
;
160 const iterator
& operator -= (ptrdiff_t delta
) {
161 element
= (Element
*)((uint8_t *)element
- delta
*entsize
);
162 index
-= (int32_t)delta
;
165 const iterator
operator + (ptrdiff_t delta
) const {
166 return iterator(*this) += delta
;
168 const iterator
operator - (ptrdiff_t delta
) const {
169 return iterator(*this) -= delta
;
172 iterator
& operator ++ () { *this += 1; return *this; }
173 iterator
& operator -- () { *this -= 1; return *this; }
174 iterator
operator ++ (int) {
175 iterator
result(*this); *this += 1; return result
;
177 iterator
operator -- (int) {
178 iterator
result(*this); *this -= 1; return result
;
181 ptrdiff_t operator - (const iterator
& rhs
) const {
182 return (ptrdiff_t)this->index
- (ptrdiff_t)rhs
.index
;
185 Element
& operator * () const { return *element
; }
186 Element
* operator -> () const { return element
; }
188 operator Element
& () const { return *element
; }
190 bool operator == (const iterator
& rhs
) const {
191 return this->element
== rhs
.element
;
193 bool operator != (const iterator
& rhs
) const {
194 return this->element
!= rhs
.element
;
197 bool operator < (const iterator
& rhs
) const {
198 return this->element
< rhs
.element
;
200 bool operator > (const iterator
& rhs
) const {
201 return this->element
> rhs
.element
;
212 struct SortBySELAddress
:
213 public std::binary_function
<const method_t
&,
214 const method_t
&, bool>
216 bool operator() (const method_t
& lhs
,
218 { return lhs
.name
< rhs
.name
; }
224 // *offset was originally 64-bit on some x86_64 platforms.
225 // We read and write only 32 bits of it.
226 // Some metadata provides all 64 bits. This is harmless for unsigned
227 // little-endian values.
228 // Some code uses all 64 bits. class_addIvar() over-allocates the
229 // offset for their benefit.
234 // alignment is sometimes -1; use alignment() instead
235 uint32_t alignment_raw
;
238 uint32_t alignment() const {
239 if (alignment_raw
== ~(uint32_t)0) return 1U << WORD_SHIFT
;
240 return 1 << alignment_raw
;
246 const char *attributes
;
249 // Two bits of entsize are used for fixup markers.
250 struct method_list_t
: entsize_list_tt
<method_t
, method_list_t
, 0x3> {
251 bool isFixedUp() const;
254 uint32_t indexOfMethod(const method_t
*meth
) const {
256 (uint32_t)(((uintptr_t)meth
- (uintptr_t)this) / entsize());
262 struct ivar_list_t
: entsize_list_tt
<ivar_t
, ivar_list_t
, 0> {
265 struct property_list_t
: entsize_list_tt
<property_t
, property_list_t
, 0> {
269 typedef uintptr_t protocol_ref_t
; // protocol_t *, but unremapped
271 // Values for protocol_t->flags
272 #define PROTOCOL_FIXED_UP_2 (1<<31) // must never be set by compiler
273 #define PROTOCOL_FIXED_UP_1 (1<<30) // must never be set by compiler
275 #define PROTOCOL_FIXED_UP_MASK (PROTOCOL_FIXED_UP_1 | PROTOCOL_FIXED_UP_2)
277 struct protocol_t
: objc_object
{
278 const char *mangledName
;
279 struct protocol_list_t
*protocols
;
280 method_list_t
*instanceMethods
;
281 method_list_t
*classMethods
;
282 method_list_t
*optionalInstanceMethods
;
283 method_list_t
*optionalClassMethods
;
284 property_list_t
*instanceProperties
;
285 uint32_t size
; // sizeof(protocol_t)
287 // Fields below this point are not always present on disk.
288 const char **extendedMethodTypes
;
289 const char *_demangledName
;
291 const char *demangledName();
293 const char *nameForLogging() {
294 return demangledName();
297 bool isFixedUp() const;
300 bool hasExtendedMethodTypesField() const {
301 return size
>= (offsetof(protocol_t
, extendedMethodTypes
)
302 + sizeof(extendedMethodTypes
));
304 bool hasExtendedMethodTypes() const {
305 return hasExtendedMethodTypesField() && extendedMethodTypes
;
309 struct protocol_list_t
{
310 // count is 64-bit by accident.
312 protocol_ref_t list
[0]; // variable-size
314 size_t byteSize() const {
315 return sizeof(*this) + count
*sizeof(list
[0]);
318 protocol_list_t
*duplicate() const {
319 return (protocol_list_t
*)memdup(this, this->byteSize());
322 typedef protocol_ref_t
* iterator
;
323 typedef const protocol_ref_t
* const_iterator
;
325 const_iterator
begin() const {
331 const_iterator
end() const {
339 struct locstamped_category_t
{
341 struct header_info
*hi
;
344 struct locstamped_category_list_t
{
349 locstamped_category_t list
[0];
353 // class_data_bits_t is the class_t->data field (class_rw_t pointer plus flags)
354 // The extra bits are optimized for the retain/release and alloc/dealloc paths.
356 // Values for class_ro_t->flags
357 // These are emitted by the compiler and are part of the ABI.
358 // class is a metaclass
359 #define RO_META (1<<0)
360 // class is a root class
361 #define RO_ROOT (1<<1)
362 // class has .cxx_construct/destruct implementations
363 #define RO_HAS_CXX_STRUCTORS (1<<2)
364 // class has +load implementation
365 // #define RO_HAS_LOAD_METHOD (1<<3)
366 // class has visibility=hidden set
367 #define RO_HIDDEN (1<<4)
368 // class has attribute(objc_exception): OBJC_EHTYPE_$_ThisClass is non-weak
369 #define RO_EXCEPTION (1<<5)
370 // this bit is available for reassignment
371 // #define RO_REUSE_ME (1<<6)
372 // class compiled with -fobjc-arc (automatic retain/release)
373 #define RO_IS_ARR (1<<7)
374 // class has .cxx_destruct but no .cxx_construct (with RO_HAS_CXX_STRUCTORS)
375 #define RO_HAS_CXX_DTOR_ONLY (1<<8)
377 // class is in an unloadable bundle - must never be set by compiler
378 #define RO_FROM_BUNDLE (1<<29)
379 // class is unrealized future class - must never be set by compiler
380 #define RO_FUTURE (1<<30)
381 // class is realized - must never be set by compiler
382 #define RO_REALIZED (1<<31)
384 // Values for class_rw_t->flags
385 // These are not emitted by the compiler and are never used in class_ro_t.
386 // Their presence should be considered in future ABI versions.
387 // class_t->data is class_rw_t, not class_ro_t
388 #define RW_REALIZED (1<<31)
389 // class is unresolved future class
390 #define RW_FUTURE (1<<30)
391 // class is initialized
392 #define RW_INITIALIZED (1<<29)
393 // class is initializing
394 #define RW_INITIALIZING (1<<28)
395 // class_rw_t->ro is heap copy of class_ro_t
396 #define RW_COPIED_RO (1<<27)
397 // class allocated but not yet registered
398 #define RW_CONSTRUCTING (1<<26)
399 // class allocated and registered
400 #define RW_CONSTRUCTED (1<<25)
401 // GC: class has unsafe finalize method
402 #define RW_FINALIZE_ON_MAIN_THREAD (1<<24)
403 // class +load has been called
404 #define RW_LOADED (1<<23)
405 #if !SUPPORT_NONPOINTER_ISA
406 // class instances may have associative references
407 #define RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS (1<<22)
409 // class has instance-specific GC layout
410 #define RW_HAS_INSTANCE_SPECIFIC_LAYOUT (1 << 21)
412 // #define RW_20 (1<<20)
413 // class has started realizing but not yet completed it
414 #define RW_REALIZING (1<<19)
416 // NOTE: MORE RW_ FLAGS DEFINED BELOW
419 // Values for class_rw_t->flags or class_t->bits
420 // These flags are optimized for retain/release and alloc/dealloc
421 // 64-bit stores more of them in class_t->bits to reduce pointer indirection.
425 // class or superclass has .cxx_construct implementation
426 #define RW_HAS_CXX_CTOR (1<<18)
427 // class or superclass has .cxx_destruct implementation
428 #define RW_HAS_CXX_DTOR (1<<17)
429 // class or superclass has default alloc/allocWithZone: implementation
430 // Note this is is stored in the metaclass.
431 #define RW_HAS_DEFAULT_AWZ (1<<16)
432 // class's instances requires raw isa
433 // not tracked for 32-bit because it only applies to non-pointer isa
434 // #define RW_REQUIRES_RAW_ISA
436 // class is a Swift class
437 #define FAST_IS_SWIFT (1UL<<0)
438 // class or superclass has default retain/release/autorelease/retainCount/
439 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
440 #define FAST_HAS_DEFAULT_RR (1UL<<1)
442 #define FAST_DATA_MASK 0xfffffffcUL
445 // Leaks-compatible version that steals low bits only.
447 // class or superclass has .cxx_construct implementation
448 #define RW_HAS_CXX_CTOR (1<<18)
449 // class or superclass has .cxx_destruct implementation
450 #define RW_HAS_CXX_DTOR (1<<17)
451 // class or superclass has default alloc/allocWithZone: implementation
452 // Note this is is stored in the metaclass.
453 #define RW_HAS_DEFAULT_AWZ (1<<16)
455 // class is a Swift class
456 #define FAST_IS_SWIFT (1UL<<0)
457 // class or superclass has default retain/release/autorelease/retainCount/
458 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
459 #define FAST_HAS_DEFAULT_RR (1UL<<1)
460 // class's instances requires raw isa
461 #define FAST_REQUIRES_RAW_ISA (1UL<<2)
463 #define FAST_DATA_MASK 0x00007ffffffffff8UL
466 // Leaks-incompatible version that steals lots of bits.
468 // class is a Swift class
469 #define FAST_IS_SWIFT (1UL<<0)
470 // class's instances requires raw isa
471 #define FAST_REQUIRES_RAW_ISA (1UL<<1)
472 // class or superclass has .cxx_destruct implementation
473 // This bit is aligned with isa_t->hasCxxDtor to save an instruction.
474 #define FAST_HAS_CXX_DTOR (1UL<<2)
476 #define FAST_DATA_MASK 0x00007ffffffffff8UL
477 // class or superclass has .cxx_construct implementation
478 #define FAST_HAS_CXX_CTOR (1UL<<47)
479 // class or superclass has default alloc/allocWithZone: implementation
480 // Note this is is stored in the metaclass.
481 #define FAST_HAS_DEFAULT_AWZ (1UL<<48)
482 // class or superclass has default retain/release/autorelease/retainCount/
483 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
484 #define FAST_HAS_DEFAULT_RR (1UL<<49)
485 // summary bit for fast alloc path: !hasCxxCtor and
486 // !requiresRawIsa and instanceSize fits into shiftedSize
487 #define FAST_ALLOC (1UL<<50)
488 // instance size in units of 16 bytes
489 // or 0 if the instance size is too big in this field
490 // This field must be LAST
491 #define FAST_SHIFTED_SIZE_SHIFT 51
494 // FAST_HAS_CXX_CTOR is set
495 // FAST_REQUIRES_RAW_ISA is not set
496 // FAST_SHIFTED_SIZE is not zero
497 // FAST_ALLOC does NOT check FAST_HAS_DEFAULT_AWZ because that
498 // bit is stored on the metaclass.
499 #define FAST_ALLOC_MASK (FAST_HAS_CXX_CTOR | FAST_REQUIRES_RAW_ISA)
500 #define FAST_ALLOC_VALUE (0)
507 uint32_t instanceStart
;
508 uint32_t instanceSize
;
513 const uint8_t * ivarLayout
;
516 method_list_t
* baseMethodList
;
517 protocol_list_t
* baseProtocols
;
518 const ivar_list_t
* ivars
;
520 const uint8_t * weakIvarLayout
;
521 property_list_t
*baseProperties
;
523 method_list_t
*baseMethods() const {
524 return baseMethodList
;
529 /***********************************************************************
530 * list_array_tt<Element, List>
531 * Generic implementation for metadata that can be augmented by categories.
533 * Element is the underlying metadata type (e.g. method_t)
534 * List is the metadata's list type (e.g. method_list_t)
536 * A list_array_tt has one of three values:
538 * - a pointer to a single list
539 * - an array of pointers to lists
541 * countLists/beginLists/endLists iterate the metadata lists
542 * count/begin/end iterate the underlying metadata elements
543 **********************************************************************/
544 template <typename Element
, typename List
>
545 class list_array_tt
{
550 static size_t byteSize(uint32_t count
) {
551 return sizeof(array_t
) + count
*sizeof(lists
[0]);
554 return byteSize(count
);
562 typename
List::iterator m
, mEnd
;
565 iterator(List
**begin
, List
**end
)
566 : lists(begin
), listsEnd(end
)
569 m
= (*begin
)->begin();
570 mEnd
= (*begin
)->end();
574 const Element
& operator * () const {
577 Element
& operator * () {
581 bool operator != (const iterator
& rhs
) const {
582 if (lists
!= rhs
.lists
) return true;
583 if (lists
== listsEnd
) return false; // m is undefined
584 if (m
!= rhs
.m
) return true;
588 const iterator
& operator ++ () {
592 assert(lists
!= listsEnd
);
594 if (lists
!= listsEnd
) {
595 m
= (*lists
)->begin();
596 mEnd
= (*lists
)->end();
606 uintptr_t arrayAndFlag
;
609 bool hasArray() const {
610 return arrayAndFlag
& 1;
614 return (array_t
*)(arrayAndFlag
& ~1);
617 void setArray(array_t
*array
) {
618 arrayAndFlag
= (uintptr_t)array
| 1;
625 for (auto lists
= beginLists(), end
= endLists();
629 result
+= (*lists
)->count
;
635 return iterator(beginLists(), endLists());
639 List
**e
= endLists();
640 return iterator(e
, e
);
644 uint32_t countLists() {
646 return array()->count
;
654 List
** beginLists() {
656 return array()->lists
;
664 return array()->lists
+ array()->count
;
672 void attachLists(List
* const * addedLists
, uint32_t addedCount
) {
673 if (addedCount
== 0) return;
676 // many lists -> many lists
677 uint32_t oldCount
= array()->count
;
678 uint32_t newCount
= oldCount
+ addedCount
;
679 setArray((array_t
*)realloc(array(), array_t::byteSize(newCount
)));
680 array()->count
= newCount
;
681 memmove(array()->lists
+ addedCount
, array()->lists
,
682 oldCount
* sizeof(array()->lists
[0]));
683 memcpy(array()->lists
, addedLists
,
684 addedCount
* sizeof(array()->lists
[0]));
686 else if (!list
&& addedCount
== 1) {
688 list
= addedLists
[0];
691 // 1 list -> many lists
692 List
* oldList
= list
;
693 uint32_t oldCount
= oldList
? 1 : 0;
694 uint32_t newCount
= oldCount
+ addedCount
;
695 setArray((array_t
*)malloc(array_t::byteSize(newCount
)));
696 array()->count
= newCount
;
697 if (oldList
) array()->lists
[addedCount
] = oldList
;
698 memcpy(array()->lists
, addedLists
,
699 addedCount
* sizeof(array()->lists
[0]));
705 for (uint32_t i
= 0; i
< array()->count
; i
++) {
706 try_free(array()->lists
[i
]);
715 template<typename Result
>
720 array_t
*a
= array();
721 result
.setArray((array_t
*)memdup(a
, a
->byteSize()));
722 for (uint32_t i
= 0; i
< a
->count
; i
++) {
723 result
.array()->lists
[i
] = a
->lists
[i
]->duplicate();
726 result
.list
= list
->duplicate();
736 class method_array_t
:
737 public list_array_tt
<method_t
, method_list_t
>
739 typedef list_array_tt
<method_t
, method_list_t
> Super
;
742 method_list_t
**beginCategoryMethodLists() {
746 method_list_t
**endCategoryMethodLists(Class cls
);
748 method_array_t
duplicate() {
749 return Super::duplicate
<method_array_t
>();
754 class property_array_t
:
755 public list_array_tt
<property_t
, property_list_t
>
757 typedef list_array_tt
<property_t
, property_list_t
> Super
;
760 property_array_t
duplicate() {
761 return Super::duplicate
<property_array_t
>();
766 class protocol_array_t
:
767 public list_array_tt
<protocol_ref_t
, protocol_list_t
>
769 typedef list_array_tt
<protocol_ref_t
, protocol_list_t
> Super
;
772 protocol_array_t
duplicate() {
773 return Super::duplicate
<protocol_array_t
>();
782 const class_ro_t
*ro
;
784 method_array_t methods
;
785 property_array_t properties
;
786 protocol_array_t protocols
;
789 Class nextSiblingClass
;
793 void setFlags(uint32_t set
)
795 OSAtomicOr32Barrier(set
, &flags
);
798 void clearFlags(uint32_t clear
)
800 OSAtomicXor32Barrier(clear
, &flags
);
803 // set and clear must not overlap
804 void changeFlags(uint32_t set
, uint32_t clear
)
806 assert((set
& clear
) == 0);
811 newf
= (oldf
| set
) & ~clear
;
812 } while (!OSAtomicCompareAndSwap32Barrier(oldf
, newf
, (volatile int32_t *)&flags
));
817 struct class_data_bits_t
{
819 // Values are the FAST_ flags above.
822 bool getBit(uintptr_t bit
)
828 static uintptr_t updateFastAlloc(uintptr_t oldBits
, uintptr_t change
)
830 if (change
& FAST_ALLOC_MASK
) {
831 if (((oldBits
& FAST_ALLOC_MASK
) == FAST_ALLOC_VALUE
) &&
832 ((oldBits
>> FAST_SHIFTED_SIZE_SHIFT
) != 0))
834 oldBits
|= FAST_ALLOC
;
836 oldBits
&= ~FAST_ALLOC
;
842 static uintptr_t updateFastAlloc(uintptr_t oldBits
, uintptr_t change
) {
847 void setBits(uintptr_t set
)
852 oldBits
= LoadExclusive(&bits
);
853 newBits
= updateFastAlloc(oldBits
| set
, set
);
854 } while (!StoreReleaseExclusive(&bits
, oldBits
, newBits
));
857 void clearBits(uintptr_t clear
)
862 oldBits
= LoadExclusive(&bits
);
863 newBits
= updateFastAlloc(oldBits
& ~clear
, clear
);
864 } while (!StoreReleaseExclusive(&bits
, oldBits
, newBits
));
870 return (class_rw_t
*)(bits
& FAST_DATA_MASK
);
872 void setData(class_rw_t
*newData
)
874 assert(!data() || (newData
->flags
& (RW_REALIZING
| RW_FUTURE
)));
875 // Set during realization or construction only. No locking needed.
876 bits
= (bits
& ~FAST_DATA_MASK
) | (uintptr_t)newData
;
879 bool hasDefaultRR() {
880 return getBit(FAST_HAS_DEFAULT_RR
);
882 void setHasDefaultRR() {
883 setBits(FAST_HAS_DEFAULT_RR
);
885 void setHasCustomRR() {
886 clearBits(FAST_HAS_DEFAULT_RR
);
889 #if FAST_HAS_DEFAULT_AWZ
890 bool hasDefaultAWZ() {
891 return getBit(FAST_HAS_DEFAULT_AWZ
);
893 void setHasDefaultAWZ() {
894 setBits(FAST_HAS_DEFAULT_AWZ
);
896 void setHasCustomAWZ() {
897 clearBits(FAST_HAS_DEFAULT_AWZ
);
900 bool hasDefaultAWZ() {
901 return data()->flags
& RW_HAS_DEFAULT_AWZ
;
903 void setHasDefaultAWZ() {
904 data()->setFlags(RW_HAS_DEFAULT_AWZ
);
906 void setHasCustomAWZ() {
907 data()->clearFlags(RW_HAS_DEFAULT_AWZ
);
911 #if FAST_HAS_CXX_CTOR
913 return getBit(FAST_HAS_CXX_CTOR
);
915 void setHasCxxCtor() {
916 setBits(FAST_HAS_CXX_CTOR
);
920 return data()->flags
& RW_HAS_CXX_CTOR
;
922 void setHasCxxCtor() {
923 data()->setFlags(RW_HAS_CXX_CTOR
);
927 #if FAST_HAS_CXX_DTOR
929 return getBit(FAST_HAS_CXX_DTOR
);
931 void setHasCxxDtor() {
932 setBits(FAST_HAS_CXX_DTOR
);
936 return data()->flags
& RW_HAS_CXX_DTOR
;
938 void setHasCxxDtor() {
939 data()->setFlags(RW_HAS_CXX_DTOR
);
943 #if FAST_REQUIRES_RAW_ISA
944 bool requiresRawIsa() {
945 return getBit(FAST_REQUIRES_RAW_ISA
);
947 void setRequiresRawIsa() {
948 setBits(FAST_REQUIRES_RAW_ISA
);
951 # if SUPPORT_NONPOINTER_ISA
954 bool requiresRawIsa() {
957 void setRequiresRawIsa() {
963 size_t fastInstanceSize()
965 assert(bits
& FAST_ALLOC
);
966 return (bits
>> FAST_SHIFTED_SIZE_SHIFT
) * 16;
968 void setFastInstanceSize(size_t newSize
)
970 // Set during realization or construction only. No locking needed.
971 assert(data()->flags
& RW_REALIZING
);
973 // Round up to 16-byte boundary, then divide to get 16-byte units
974 newSize
= ((newSize
+ 15) & ~15) / 16;
976 uintptr_t newBits
= newSize
<< FAST_SHIFTED_SIZE_SHIFT
;
977 if ((newBits
>> FAST_SHIFTED_SIZE_SHIFT
) == newSize
) {
978 int shift
= WORD_BITS
- FAST_SHIFTED_SIZE_SHIFT
;
979 uintptr_t oldBits
= (bits
<< shift
) >> shift
;
980 if ((oldBits
& FAST_ALLOC_MASK
) == FAST_ALLOC_VALUE
) {
981 newBits
|= FAST_ALLOC
;
983 bits
= oldBits
| newBits
;
987 bool canAllocFast() {
988 return bits
& FAST_ALLOC
;
991 size_t fastInstanceSize() {
994 void setFastInstanceSize(size_t) {
997 bool canAllocFast() {
1003 return getBit(FAST_IS_SWIFT
);
1007 setBits(FAST_IS_SWIFT
);
1012 struct objc_class
: objc_object
{
1015 cache_t cache
; // formerly cache pointer and vtable
1016 class_data_bits_t bits
; // class_rw_t * plus custom rr/alloc flags
1018 class_rw_t
*data() {
1021 void setData(class_rw_t
*newData
) {
1022 bits
.setData(newData
);
1025 void setInfo(uint32_t set
) {
1026 assert(isFuture() || isRealized());
1027 data()->setFlags(set
);
1030 void clearInfo(uint32_t clear
) {
1031 assert(isFuture() || isRealized());
1032 data()->clearFlags(clear
);
1035 // set and clear must not overlap
1036 void changeInfo(uint32_t set
, uint32_t clear
) {
1037 assert(isFuture() || isRealized());
1038 assert((set
& clear
) == 0);
1039 data()->changeFlags(set
, clear
);
1042 bool hasCustomRR() {
1043 return ! bits
.hasDefaultRR();
1045 void setHasDefaultRR() {
1046 assert(isInitializing());
1047 bits
.setHasDefaultRR();
1049 void setHasCustomRR(bool inherited
= false);
1050 void printCustomRR(bool inherited
);
1052 bool hasCustomAWZ() {
1053 return ! bits
.hasDefaultAWZ();
1055 void setHasDefaultAWZ() {
1056 assert(isInitializing());
1057 bits
.setHasDefaultAWZ();
1059 void setHasCustomAWZ(bool inherited
= false);
1060 void printCustomAWZ(bool inherited
);
1062 bool requiresRawIsa() {
1063 return bits
.requiresRawIsa();
1065 void setRequiresRawIsa(bool inherited
= false);
1066 void printRequiresRawIsa(bool inherited
);
1068 bool canAllocIndexed() {
1069 assert(!isFuture());
1070 return !requiresRawIsa();
1072 bool canAllocFast() {
1073 assert(!isFuture());
1074 return bits
.canAllocFast();
1079 // addSubclass() propagates this flag from the superclass.
1080 assert(isRealized());
1081 return bits
.hasCxxCtor();
1083 void setHasCxxCtor() {
1084 bits
.setHasCxxCtor();
1088 // addSubclass() propagates this flag from the superclass.
1089 assert(isRealized());
1090 return bits
.hasCxxDtor();
1092 void setHasCxxDtor() {
1093 bits
.setHasCxxDtor();
1098 return bits
.isSwift();
1102 #if SUPPORT_NONPOINTER_ISA
1103 // Tracked in non-pointer isas; not tracked otherwise
1105 bool instancesHaveAssociatedObjects() {
1106 // this may be an unrealized future class in the CF-bridged case
1107 assert(isFuture() || isRealized());
1108 return data()->flags
& RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS
;
1111 void setInstancesHaveAssociatedObjects() {
1112 // this may be an unrealized future class in the CF-bridged case
1113 assert(isFuture() || isRealized());
1114 setInfo(RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS
);
1118 bool shouldGrowCache() {
1122 void setShouldGrowCache(bool) {
1123 // fixme good or bad for memory use?
1126 bool shouldFinalizeOnMainThread() {
1127 // finishInitializing() propagates this flag from the superclass.
1128 assert(isRealized());
1129 return data()->flags
& RW_FINALIZE_ON_MAIN_THREAD
;
1132 void setShouldFinalizeOnMainThread() {
1133 assert(isRealized());
1134 setInfo(RW_FINALIZE_ON_MAIN_THREAD
);
1137 bool isInitializing() {
1138 return getMeta()->data()->flags
& RW_INITIALIZING
;
1141 void setInitializing() {
1142 assert(!isMetaClass());
1143 ISA()->setInfo(RW_INITIALIZING
);
1146 bool isInitialized() {
1147 return getMeta()->data()->flags
& RW_INITIALIZED
;
1150 void setInitialized();
1153 assert(isRealized());
1154 return true; // any class registered for +load is definitely loadable
1157 IMP
getLoadMethod();
1159 // Locking: To prevent concurrent realization, hold runtimeLock.
1161 return data()->flags
& RW_REALIZED
;
1164 // Returns true if this is an unrealized future class.
1165 // Locking: To prevent concurrent realization, hold runtimeLock.
1167 return data()->flags
& RW_FUTURE
;
1170 bool isMetaClass() {
1172 assert(isRealized());
1173 return data()->ro
->flags
& RO_META
;
1176 // NOT identical to this->ISA when this is a metaclass
1178 if (isMetaClass()) return (Class
)this;
1179 else return this->ISA();
1182 bool isRootClass() {
1183 return superclass
== nil
;
1185 bool isRootMetaclass() {
1186 return ISA() == (Class
)this;
1189 const char *mangledName() {
1190 // fixme can't assert locks here
1193 if (isRealized() || isFuture()) {
1194 return data()->ro
->name
;
1196 return ((const class_ro_t
*)data())->name
;
1200 const char *demangledName(bool realize
= false);
1201 const char *nameForLogging();
1203 // May be unaligned depending on class's ivars.
1204 uint32_t unalignedInstanceSize() {
1205 assert(isRealized());
1206 return data()->ro
->instanceSize
;
1209 // Class's ivar size rounded up to a pointer-size boundary.
1210 uint32_t alignedInstanceSize() {
1211 return word_align(unalignedInstanceSize());
1214 size_t instanceSize(size_t extraBytes
) {
1215 size_t size
= alignedInstanceSize() + extraBytes
;
1216 // CF requires all objects be at least 16 bytes.
1217 if (size
< 16) size
= 16;
1221 void setInstanceSize(uint32_t newSize
) {
1222 assert(isRealized());
1223 if (newSize
!= data()->ro
->instanceSize
) {
1224 assert(data()->flags
& RW_COPIED_RO
);
1225 *const_cast<uint32_t *>(&data()->ro
->instanceSize
) = newSize
;
1227 bits
.setFastInstanceSize(newSize
);
1232 struct swift_class_t
: objc_class
{
1234 uint32_t instanceAddressOffset
;
1235 uint32_t instanceSize
;
1236 uint16_t instanceAlignMask
;
1240 uint32_t classAddressOffset
;
1244 void *baseAddress() {
1245 return (void *)((uint8_t *)this - classAddressOffset
);
1253 struct method_list_t
*instanceMethods
;
1254 struct method_list_t
*classMethods
;
1255 struct protocol_list_t
*protocols
;
1256 struct property_list_t
*instanceProperties
;
1258 method_list_t
*methodsForMeta(bool isMeta
) {
1259 if (isMeta
) return classMethods
;
1260 else return instanceMethods
;
1263 property_list_t
*propertiesForMeta(bool isMeta
) {
1264 if (isMeta
) return nil
; // classProperties;
1265 else return instanceProperties
;
1269 struct objc_super2
{
1271 Class current_class
;
1274 struct message_ref_t
{
1280 extern Method
protocol_getMethod(protocol_t
*p
, SEL sel
, bool isRequiredMethod
, bool isInstanceMethod
, bool recursive
);
1283 foreach_realized_class_and_subclass_2(Class top
, bool (^code
)(Class
))
1285 // runtimeLock.assertWriting();
1289 if (!code(cls
)) break;
1291 if (cls
->data()->firstSubclass
) {
1292 cls
= cls
->data()->firstSubclass
;
1294 while (!cls
->data()->nextSiblingClass
&& cls
!= top
) {
1295 cls
= cls
->superclass
;
1297 if (cls
== top
) break;
1298 cls
= cls
->data()->nextSiblingClass
;
1304 foreach_realized_class_and_subclass(Class top
, void (^code
)(Class
))
1306 foreach_realized_class_and_subclass_2(top
, ^bool(Class cls
) {
1307 code(cls
); return true;