2 * Copyright (c) 2005-2007 Apple Inc. All Rights Reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 #ifndef _OBJC_RUNTIME_NEW_H
25 #define _OBJC_RUNTIME_NEW_H
28 typedef uint32_t mask_t
; // x86_64 & arm64 asm are less efficient with 16-bits
30 typedef uint16_t mask_t
;
32 typedef uintptr_t cache_key_t
;
43 inline cache_key_t
key() const { return _key
; }
44 inline IMP
imp() const { return (IMP
)_imp
; }
45 inline void setKey(cache_key_t newKey
) { _key
= newKey
; }
46 inline void setImp(IMP newImp
) { _imp
= newImp
; }
48 void set(cache_key_t newKey
, IMP newImp
);
53 struct bucket_t
*_buckets
;
58 struct bucket_t
*buckets();
61 void incrementOccupied();
62 void setBucketsAndMask(struct bucket_t
*newBuckets
, mask_t newMask
);
63 void initializeToEmpty();
66 bool isConstantEmptyCache();
69 static size_t bytesForCapacity(uint32_t cap
);
70 static struct bucket_t
* endMarker(struct bucket_t
*b
, uint32_t cap
);
73 void reallocate(mask_t oldCapacity
, mask_t newCapacity
);
74 struct bucket_t
* find(cache_key_t key
, id receiver
);
76 static void bad_cache(id receiver
, SEL sel
, Class isa
) __attribute__((noreturn
));
80 // classref_t is unremapped class_t*
81 typedef struct classref
* classref_t
;
83 /***********************************************************************
84 * entsize_list_tt<Element, List, FlagMask>
85 * Generic implementation of an array of non-fragile structs.
87 * Element is the struct type (e.g. method_t)
88 * List is the specialization of entsize_list_tt (e.g. method_list_t)
89 * FlagMask is used to stash extra bits in the entsize field
90 * (e.g. method list fixup markers)
91 **********************************************************************/
92 template <typename Element
, typename List
, uint32_t FlagMask
>
93 struct entsize_list_tt
{
94 uint32_t entsizeAndFlags
;
98 uint32_t entsize() const {
99 return entsizeAndFlags
& ~FlagMask
;
101 uint32_t flags() const {
102 return entsizeAndFlags
& FlagMask
;
105 Element
& getOrEnd(uint32_t i
) const {
107 return *(Element
*)((uint8_t *)&first
+ i
*entsize());
109 Element
& get(uint32_t i
) const {
114 size_t byteSize() const {
115 return sizeof(*this) + (count
-1)*entsize();
118 List
*duplicate() const {
119 return (List
*)memdup(this, this->byteSize());
123 const iterator
begin() const {
124 return iterator(*static_cast<const List
*>(this), 0);
127 return iterator(*static_cast<const List
*>(this), 0);
129 const iterator
end() const {
130 return iterator(*static_cast<const List
*>(this), count
);
133 return iterator(*static_cast<const List
*>(this), count
);
138 uint32_t index
; // keeping track of this saves a divide in operator-
141 typedef std::random_access_iterator_tag iterator_category
;
142 typedef Element value_type
;
143 typedef ptrdiff_t difference_type
;
144 typedef Element
* pointer
;
145 typedef Element
& reference
;
149 iterator(const List
& list
, uint32_t start
= 0)
150 : entsize(list
.entsize())
152 , element(&list
.getOrEnd(start
))
155 const iterator
& operator += (ptrdiff_t delta
) {
156 element
= (Element
*)((uint8_t *)element
+ delta
*entsize
);
157 index
+= (int32_t)delta
;
160 const iterator
& operator -= (ptrdiff_t delta
) {
161 element
= (Element
*)((uint8_t *)element
- delta
*entsize
);
162 index
-= (int32_t)delta
;
165 const iterator
operator + (ptrdiff_t delta
) const {
166 return iterator(*this) += delta
;
168 const iterator
operator - (ptrdiff_t delta
) const {
169 return iterator(*this) -= delta
;
172 iterator
& operator ++ () { *this += 1; return *this; }
173 iterator
& operator -- () { *this -= 1; return *this; }
174 iterator
operator ++ (int) {
175 iterator
result(*this); *this += 1; return result
;
177 iterator
operator -- (int) {
178 iterator
result(*this); *this -= 1; return result
;
181 ptrdiff_t operator - (const iterator
& rhs
) const {
182 return (ptrdiff_t)this->index
- (ptrdiff_t)rhs
.index
;
185 Element
& operator * () const { return *element
; }
186 Element
* operator -> () const { return element
; }
188 operator Element
& () const { return *element
; }
190 bool operator == (const iterator
& rhs
) const {
191 return this->element
== rhs
.element
;
193 bool operator != (const iterator
& rhs
) const {
194 return this->element
!= rhs
.element
;
197 bool operator < (const iterator
& rhs
) const {
198 return this->element
< rhs
.element
;
200 bool operator > (const iterator
& rhs
) const {
201 return this->element
> rhs
.element
;
212 struct SortBySELAddress
:
213 public std::binary_function
<const method_t
&,
214 const method_t
&, bool>
216 bool operator() (const method_t
& lhs
,
218 { return lhs
.name
< rhs
.name
; }
224 // *offset was originally 64-bit on some x86_64 platforms.
225 // We read and write only 32 bits of it.
226 // Some metadata provides all 64 bits. This is harmless for unsigned
227 // little-endian values.
228 // Some code uses all 64 bits. class_addIvar() over-allocates the
229 // offset for their benefit.
234 // alignment is sometimes -1; use alignment() instead
235 uint32_t alignment_raw
;
238 uint32_t alignment() const {
239 if (alignment_raw
== ~(uint32_t)0) return 1U << WORD_SHIFT
;
240 return 1 << alignment_raw
;
246 const char *attributes
;
249 // Two bits of entsize are used for fixup markers.
250 struct method_list_t
: entsize_list_tt
<method_t
, method_list_t
, 0x3> {
251 bool isFixedUp() const;
254 uint32_t indexOfMethod(const method_t
*meth
) const {
256 (uint32_t)(((uintptr_t)meth
- (uintptr_t)this) / entsize());
262 struct ivar_list_t
: entsize_list_tt
<ivar_t
, ivar_list_t
, 0> {
263 bool containsIvar(Ivar ivar
) const {
264 return (ivar
>= (Ivar
)&*begin() && ivar
< (Ivar
)&*end());
268 struct property_list_t
: entsize_list_tt
<property_t
, property_list_t
, 0> {
272 typedef uintptr_t protocol_ref_t
; // protocol_t *, but unremapped
274 // Values for protocol_t->flags
275 #define PROTOCOL_FIXED_UP_2 (1<<31) // must never be set by compiler
276 #define PROTOCOL_FIXED_UP_1 (1<<30) // must never be set by compiler
277 // Bits 0..15 are reserved for Swift's use.
279 #define PROTOCOL_FIXED_UP_MASK (PROTOCOL_FIXED_UP_1 | PROTOCOL_FIXED_UP_2)
281 struct protocol_t
: objc_object
{
282 const char *mangledName
;
283 struct protocol_list_t
*protocols
;
284 method_list_t
*instanceMethods
;
285 method_list_t
*classMethods
;
286 method_list_t
*optionalInstanceMethods
;
287 method_list_t
*optionalClassMethods
;
288 property_list_t
*instanceProperties
;
289 uint32_t size
; // sizeof(protocol_t)
291 // Fields below this point are not always present on disk.
292 const char **_extendedMethodTypes
;
293 const char *_demangledName
;
294 property_list_t
*_classProperties
;
296 const char *demangledName();
298 const char *nameForLogging() {
299 return demangledName();
302 bool isFixedUp() const;
305 # define HAS_FIELD(f) (size >= offsetof(protocol_t, f) + sizeof(f))
307 bool hasExtendedMethodTypesField() const {
308 return HAS_FIELD(_extendedMethodTypes
);
310 bool hasDemangledNameField() const {
311 return HAS_FIELD(_demangledName
);
313 bool hasClassPropertiesField() const {
314 return HAS_FIELD(_classProperties
);
319 const char **extendedMethodTypes() const {
320 return hasExtendedMethodTypesField() ? _extendedMethodTypes
: nil
;
323 property_list_t
*classProperties() const {
324 return hasClassPropertiesField() ? _classProperties
: nil
;
328 struct protocol_list_t
{
329 // count is 64-bit by accident.
331 protocol_ref_t list
[0]; // variable-size
333 size_t byteSize() const {
334 return sizeof(*this) + count
*sizeof(list
[0]);
337 protocol_list_t
*duplicate() const {
338 return (protocol_list_t
*)memdup(this, this->byteSize());
341 typedef protocol_ref_t
* iterator
;
342 typedef const protocol_ref_t
* const_iterator
;
344 const_iterator
begin() const {
350 const_iterator
end() const {
358 struct locstamped_category_t
{
360 struct header_info
*hi
;
363 struct locstamped_category_list_t
{
368 locstamped_category_t list
[0];
372 // class_data_bits_t is the class_t->data field (class_rw_t pointer plus flags)
373 // The extra bits are optimized for the retain/release and alloc/dealloc paths.
375 // Values for class_ro_t->flags
376 // These are emitted by the compiler and are part of the ABI.
377 // Note: See CGObjCNonFragileABIMac::BuildClassRoTInitializer in clang
378 // class is a metaclass
379 #define RO_META (1<<0)
380 // class is a root class
381 #define RO_ROOT (1<<1)
382 // class has .cxx_construct/destruct implementations
383 #define RO_HAS_CXX_STRUCTORS (1<<2)
384 // class has +load implementation
385 // #define RO_HAS_LOAD_METHOD (1<<3)
386 // class has visibility=hidden set
387 #define RO_HIDDEN (1<<4)
388 // class has attribute(objc_exception): OBJC_EHTYPE_$_ThisClass is non-weak
389 #define RO_EXCEPTION (1<<5)
390 // this bit is available for reassignment
391 // #define RO_REUSE_ME (1<<6)
392 // class compiled with ARC
393 #define RO_IS_ARC (1<<7)
394 // class has .cxx_destruct but no .cxx_construct (with RO_HAS_CXX_STRUCTORS)
395 #define RO_HAS_CXX_DTOR_ONLY (1<<8)
396 // class is not ARC but has ARC-style weak ivar layout
397 #define RO_HAS_WEAK_WITHOUT_ARC (1<<9)
399 // class is in an unloadable bundle - must never be set by compiler
400 #define RO_FROM_BUNDLE (1<<29)
401 // class is unrealized future class - must never be set by compiler
402 #define RO_FUTURE (1<<30)
403 // class is realized - must never be set by compiler
404 #define RO_REALIZED (1<<31)
406 // Values for class_rw_t->flags
407 // These are not emitted by the compiler and are never used in class_ro_t.
408 // Their presence should be considered in future ABI versions.
409 // class_t->data is class_rw_t, not class_ro_t
410 #define RW_REALIZED (1<<31)
411 // class is unresolved future class
412 #define RW_FUTURE (1<<30)
413 // class is initialized
414 #define RW_INITIALIZED (1<<29)
415 // class is initializing
416 #define RW_INITIALIZING (1<<28)
417 // class_rw_t->ro is heap copy of class_ro_t
418 #define RW_COPIED_RO (1<<27)
419 // class allocated but not yet registered
420 #define RW_CONSTRUCTING (1<<26)
421 // class allocated and registered
422 #define RW_CONSTRUCTED (1<<25)
423 // available for use; was RW_FINALIZE_ON_MAIN_THREAD
424 // #define RW_24 (1<<24)
425 // class +load has been called
426 #define RW_LOADED (1<<23)
427 #if !SUPPORT_NONPOINTER_ISA
428 // class instances may have associative references
429 #define RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS (1<<22)
431 // class has instance-specific GC layout
432 #define RW_HAS_INSTANCE_SPECIFIC_LAYOUT (1 << 21)
434 // #define RW_20 (1<<20)
435 // class has started realizing but not yet completed it
436 #define RW_REALIZING (1<<19)
438 // NOTE: MORE RW_ FLAGS DEFINED BELOW
441 // Values for class_rw_t->flags or class_t->bits
442 // These flags are optimized for retain/release and alloc/dealloc
443 // 64-bit stores more of them in class_t->bits to reduce pointer indirection.
447 // class or superclass has .cxx_construct implementation
448 #define RW_HAS_CXX_CTOR (1<<18)
449 // class or superclass has .cxx_destruct implementation
450 #define RW_HAS_CXX_DTOR (1<<17)
451 // class or superclass has default alloc/allocWithZone: implementation
452 // Note this is is stored in the metaclass.
453 #define RW_HAS_DEFAULT_AWZ (1<<16)
454 // class's instances requires raw isa
455 #if SUPPORT_NONPOINTER_ISA
456 #define RW_REQUIRES_RAW_ISA (1<<15)
459 // class is a Swift class
460 #define FAST_IS_SWIFT (1UL<<0)
461 // class or superclass has default retain/release/autorelease/retainCount/
462 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
463 #define FAST_HAS_DEFAULT_RR (1UL<<1)
465 #define FAST_DATA_MASK 0xfffffffcUL
468 // Leaks-compatible version that steals low bits only.
470 // class or superclass has .cxx_construct implementation
471 #define RW_HAS_CXX_CTOR (1<<18)
472 // class or superclass has .cxx_destruct implementation
473 #define RW_HAS_CXX_DTOR (1<<17)
474 // class or superclass has default alloc/allocWithZone: implementation
475 // Note this is is stored in the metaclass.
476 #define RW_HAS_DEFAULT_AWZ (1<<16)
478 // class is a Swift class
479 #define FAST_IS_SWIFT (1UL<<0)
480 // class or superclass has default retain/release/autorelease/retainCount/
481 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
482 #define FAST_HAS_DEFAULT_RR (1UL<<1)
483 // class's instances requires raw isa
484 #define FAST_REQUIRES_RAW_ISA (1UL<<2)
486 #define FAST_DATA_MASK 0x00007ffffffffff8UL
489 // Leaks-incompatible version that steals lots of bits.
491 // class is a Swift class
492 #define FAST_IS_SWIFT (1UL<<0)
493 // class's instances requires raw isa
494 #define FAST_REQUIRES_RAW_ISA (1UL<<1)
495 // class or superclass has .cxx_destruct implementation
496 // This bit is aligned with isa_t->hasCxxDtor to save an instruction.
497 #define FAST_HAS_CXX_DTOR (1UL<<2)
499 #define FAST_DATA_MASK 0x00007ffffffffff8UL
500 // class or superclass has .cxx_construct implementation
501 #define FAST_HAS_CXX_CTOR (1UL<<47)
502 // class or superclass has default alloc/allocWithZone: implementation
503 // Note this is is stored in the metaclass.
504 #define FAST_HAS_DEFAULT_AWZ (1UL<<48)
505 // class or superclass has default retain/release/autorelease/retainCount/
506 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
507 #define FAST_HAS_DEFAULT_RR (1UL<<49)
508 // summary bit for fast alloc path: !hasCxxCtor and
509 // !instancesRequireRawIsa and instanceSize fits into shiftedSize
510 #define FAST_ALLOC (1UL<<50)
511 // instance size in units of 16 bytes
512 // or 0 if the instance size is too big in this field
513 // This field must be LAST
514 #define FAST_SHIFTED_SIZE_SHIFT 51
517 // FAST_HAS_CXX_CTOR is set
518 // FAST_REQUIRES_RAW_ISA is not set
519 // FAST_SHIFTED_SIZE is not zero
520 // FAST_ALLOC does NOT check FAST_HAS_DEFAULT_AWZ because that
521 // bit is stored on the metaclass.
522 #define FAST_ALLOC_MASK (FAST_HAS_CXX_CTOR | FAST_REQUIRES_RAW_ISA)
523 #define FAST_ALLOC_VALUE (0)
530 uint32_t instanceStart
;
531 uint32_t instanceSize
;
536 const uint8_t * ivarLayout
;
539 method_list_t
* baseMethodList
;
540 protocol_list_t
* baseProtocols
;
541 const ivar_list_t
* ivars
;
543 const uint8_t * weakIvarLayout
;
544 property_list_t
*baseProperties
;
546 method_list_t
*baseMethods() const {
547 return baseMethodList
;
552 /***********************************************************************
553 * list_array_tt<Element, List>
554 * Generic implementation for metadata that can be augmented by categories.
556 * Element is the underlying metadata type (e.g. method_t)
557 * List is the metadata's list type (e.g. method_list_t)
559 * A list_array_tt has one of three values:
561 * - a pointer to a single list
562 * - an array of pointers to lists
564 * countLists/beginLists/endLists iterate the metadata lists
565 * count/begin/end iterate the underlying metadata elements
566 **********************************************************************/
567 template <typename Element
, typename List
>
568 class list_array_tt
{
573 static size_t byteSize(uint32_t count
) {
574 return sizeof(array_t
) + count
*sizeof(lists
[0]);
577 return byteSize(count
);
585 typename
List::iterator m
, mEnd
;
588 iterator(List
**begin
, List
**end
)
589 : lists(begin
), listsEnd(end
)
592 m
= (*begin
)->begin();
593 mEnd
= (*begin
)->end();
597 const Element
& operator * () const {
600 Element
& operator * () {
604 bool operator != (const iterator
& rhs
) const {
605 if (lists
!= rhs
.lists
) return true;
606 if (lists
== listsEnd
) return false; // m is undefined
607 if (m
!= rhs
.m
) return true;
611 const iterator
& operator ++ () {
615 assert(lists
!= listsEnd
);
617 if (lists
!= listsEnd
) {
618 m
= (*lists
)->begin();
619 mEnd
= (*lists
)->end();
629 uintptr_t arrayAndFlag
;
632 bool hasArray() const {
633 return arrayAndFlag
& 1;
637 return (array_t
*)(arrayAndFlag
& ~1);
640 void setArray(array_t
*array
) {
641 arrayAndFlag
= (uintptr_t)array
| 1;
648 for (auto lists
= beginLists(), end
= endLists();
652 result
+= (*lists
)->count
;
658 return iterator(beginLists(), endLists());
662 List
**e
= endLists();
663 return iterator(e
, e
);
667 uint32_t countLists() {
669 return array()->count
;
677 List
** beginLists() {
679 return array()->lists
;
687 return array()->lists
+ array()->count
;
695 void attachLists(List
* const * addedLists
, uint32_t addedCount
) {
696 if (addedCount
== 0) return;
699 // many lists -> many lists
700 uint32_t oldCount
= array()->count
;
701 uint32_t newCount
= oldCount
+ addedCount
;
702 setArray((array_t
*)realloc(array(), array_t::byteSize(newCount
)));
703 array()->count
= newCount
;
704 memmove(array()->lists
+ addedCount
, array()->lists
,
705 oldCount
* sizeof(array()->lists
[0]));
706 memcpy(array()->lists
, addedLists
,
707 addedCount
* sizeof(array()->lists
[0]));
709 else if (!list
&& addedCount
== 1) {
711 list
= addedLists
[0];
714 // 1 list -> many lists
715 List
* oldList
= list
;
716 uint32_t oldCount
= oldList
? 1 : 0;
717 uint32_t newCount
= oldCount
+ addedCount
;
718 setArray((array_t
*)malloc(array_t::byteSize(newCount
)));
719 array()->count
= newCount
;
720 if (oldList
) array()->lists
[addedCount
] = oldList
;
721 memcpy(array()->lists
, addedLists
,
722 addedCount
* sizeof(array()->lists
[0]));
728 for (uint32_t i
= 0; i
< array()->count
; i
++) {
729 try_free(array()->lists
[i
]);
738 template<typename Result
>
743 array_t
*a
= array();
744 result
.setArray((array_t
*)memdup(a
, a
->byteSize()));
745 for (uint32_t i
= 0; i
< a
->count
; i
++) {
746 result
.array()->lists
[i
] = a
->lists
[i
]->duplicate();
749 result
.list
= list
->duplicate();
759 class method_array_t
:
760 public list_array_tt
<method_t
, method_list_t
>
762 typedef list_array_tt
<method_t
, method_list_t
> Super
;
765 method_list_t
**beginCategoryMethodLists() {
769 method_list_t
**endCategoryMethodLists(Class cls
);
771 method_array_t
duplicate() {
772 return Super::duplicate
<method_array_t
>();
777 class property_array_t
:
778 public list_array_tt
<property_t
, property_list_t
>
780 typedef list_array_tt
<property_t
, property_list_t
> Super
;
783 property_array_t
duplicate() {
784 return Super::duplicate
<property_array_t
>();
789 class protocol_array_t
:
790 public list_array_tt
<protocol_ref_t
, protocol_list_t
>
792 typedef list_array_tt
<protocol_ref_t
, protocol_list_t
> Super
;
795 protocol_array_t
duplicate() {
796 return Super::duplicate
<protocol_array_t
>();
802 // Be warned that Symbolication knows the layout of this structure.
806 const class_ro_t
*ro
;
808 method_array_t methods
;
809 property_array_t properties
;
810 protocol_array_t protocols
;
813 Class nextSiblingClass
;
817 #if SUPPORT_INDEXED_ISA
821 void setFlags(uint32_t set
)
823 OSAtomicOr32Barrier(set
, &flags
);
826 void clearFlags(uint32_t clear
)
828 OSAtomicXor32Barrier(clear
, &flags
);
831 // set and clear must not overlap
832 void changeFlags(uint32_t set
, uint32_t clear
)
834 assert((set
& clear
) == 0);
839 newf
= (oldf
| set
) & ~clear
;
840 } while (!OSAtomicCompareAndSwap32Barrier(oldf
, newf
, (volatile int32_t *)&flags
));
845 struct class_data_bits_t
{
847 // Values are the FAST_ flags above.
850 bool getBit(uintptr_t bit
)
856 static uintptr_t updateFastAlloc(uintptr_t oldBits
, uintptr_t change
)
858 if (change
& FAST_ALLOC_MASK
) {
859 if (((oldBits
& FAST_ALLOC_MASK
) == FAST_ALLOC_VALUE
) &&
860 ((oldBits
>> FAST_SHIFTED_SIZE_SHIFT
) != 0))
862 oldBits
|= FAST_ALLOC
;
864 oldBits
&= ~FAST_ALLOC
;
870 static uintptr_t updateFastAlloc(uintptr_t oldBits
, uintptr_t change
) {
875 void setBits(uintptr_t set
)
880 oldBits
= LoadExclusive(&bits
);
881 newBits
= updateFastAlloc(oldBits
| set
, set
);
882 } while (!StoreReleaseExclusive(&bits
, oldBits
, newBits
));
885 void clearBits(uintptr_t clear
)
890 oldBits
= LoadExclusive(&bits
);
891 newBits
= updateFastAlloc(oldBits
& ~clear
, clear
);
892 } while (!StoreReleaseExclusive(&bits
, oldBits
, newBits
));
898 return (class_rw_t
*)(bits
& FAST_DATA_MASK
);
900 void setData(class_rw_t
*newData
)
902 assert(!data() || (newData
->flags
& (RW_REALIZING
| RW_FUTURE
)));
903 // Set during realization or construction only. No locking needed.
904 // Use a store-release fence because there may be concurrent
905 // readers of data and data's contents.
906 uintptr_t newBits
= (bits
& ~FAST_DATA_MASK
) | (uintptr_t)newData
;
907 atomic_thread_fence(memory_order_release
);
911 bool hasDefaultRR() {
912 return getBit(FAST_HAS_DEFAULT_RR
);
914 void setHasDefaultRR() {
915 setBits(FAST_HAS_DEFAULT_RR
);
917 void setHasCustomRR() {
918 clearBits(FAST_HAS_DEFAULT_RR
);
921 #if FAST_HAS_DEFAULT_AWZ
922 bool hasDefaultAWZ() {
923 return getBit(FAST_HAS_DEFAULT_AWZ
);
925 void setHasDefaultAWZ() {
926 setBits(FAST_HAS_DEFAULT_AWZ
);
928 void setHasCustomAWZ() {
929 clearBits(FAST_HAS_DEFAULT_AWZ
);
932 bool hasDefaultAWZ() {
933 return data()->flags
& RW_HAS_DEFAULT_AWZ
;
935 void setHasDefaultAWZ() {
936 data()->setFlags(RW_HAS_DEFAULT_AWZ
);
938 void setHasCustomAWZ() {
939 data()->clearFlags(RW_HAS_DEFAULT_AWZ
);
943 #if FAST_HAS_CXX_CTOR
945 return getBit(FAST_HAS_CXX_CTOR
);
947 void setHasCxxCtor() {
948 setBits(FAST_HAS_CXX_CTOR
);
952 return data()->flags
& RW_HAS_CXX_CTOR
;
954 void setHasCxxCtor() {
955 data()->setFlags(RW_HAS_CXX_CTOR
);
959 #if FAST_HAS_CXX_DTOR
961 return getBit(FAST_HAS_CXX_DTOR
);
963 void setHasCxxDtor() {
964 setBits(FAST_HAS_CXX_DTOR
);
968 return data()->flags
& RW_HAS_CXX_DTOR
;
970 void setHasCxxDtor() {
971 data()->setFlags(RW_HAS_CXX_DTOR
);
975 #if FAST_REQUIRES_RAW_ISA
976 bool instancesRequireRawIsa() {
977 return getBit(FAST_REQUIRES_RAW_ISA
);
979 void setInstancesRequireRawIsa() {
980 setBits(FAST_REQUIRES_RAW_ISA
);
982 #elif SUPPORT_NONPOINTER_ISA
983 bool instancesRequireRawIsa() {
984 return data()->flags
& RW_REQUIRES_RAW_ISA
;
986 void setInstancesRequireRawIsa() {
987 data()->setFlags(RW_REQUIRES_RAW_ISA
);
990 bool instancesRequireRawIsa() {
993 void setInstancesRequireRawIsa() {
999 size_t fastInstanceSize()
1001 assert(bits
& FAST_ALLOC
);
1002 return (bits
>> FAST_SHIFTED_SIZE_SHIFT
) * 16;
1004 void setFastInstanceSize(size_t newSize
)
1006 // Set during realization or construction only. No locking needed.
1007 assert(data()->flags
& RW_REALIZING
);
1009 // Round up to 16-byte boundary, then divide to get 16-byte units
1010 newSize
= ((newSize
+ 15) & ~15) / 16;
1012 uintptr_t newBits
= newSize
<< FAST_SHIFTED_SIZE_SHIFT
;
1013 if ((newBits
>> FAST_SHIFTED_SIZE_SHIFT
) == newSize
) {
1014 int shift
= WORD_BITS
- FAST_SHIFTED_SIZE_SHIFT
;
1015 uintptr_t oldBits
= (bits
<< shift
) >> shift
;
1016 if ((oldBits
& FAST_ALLOC_MASK
) == FAST_ALLOC_VALUE
) {
1017 newBits
|= FAST_ALLOC
;
1019 bits
= oldBits
| newBits
;
1023 bool canAllocFast() {
1024 return bits
& FAST_ALLOC
;
1027 size_t fastInstanceSize() {
1030 void setFastInstanceSize(size_t) {
1033 bool canAllocFast() {
1038 void setClassArrayIndex(unsigned Idx
) {
1039 #if SUPPORT_INDEXED_ISA
1040 // 0 is unused as then we can rely on zero-initialisation from calloc.
1042 data()->index
= Idx
;
1046 unsigned classArrayIndex() {
1047 #if SUPPORT_INDEXED_ISA
1048 return data()->index
;
1055 return getBit(FAST_IS_SWIFT
);
1059 setBits(FAST_IS_SWIFT
);
1064 struct objc_class
: objc_object
{
1067 cache_t cache
; // formerly cache pointer and vtable
1068 class_data_bits_t bits
; // class_rw_t * plus custom rr/alloc flags
1070 class_rw_t
*data() {
1073 void setData(class_rw_t
*newData
) {
1074 bits
.setData(newData
);
1077 void setInfo(uint32_t set
) {
1078 assert(isFuture() || isRealized());
1079 data()->setFlags(set
);
1082 void clearInfo(uint32_t clear
) {
1083 assert(isFuture() || isRealized());
1084 data()->clearFlags(clear
);
1087 // set and clear must not overlap
1088 void changeInfo(uint32_t set
, uint32_t clear
) {
1089 assert(isFuture() || isRealized());
1090 assert((set
& clear
) == 0);
1091 data()->changeFlags(set
, clear
);
1094 bool hasCustomRR() {
1095 return ! bits
.hasDefaultRR();
1097 void setHasDefaultRR() {
1098 assert(isInitializing());
1099 bits
.setHasDefaultRR();
1101 void setHasCustomRR(bool inherited
= false);
1102 void printCustomRR(bool inherited
);
1104 bool hasCustomAWZ() {
1105 return ! bits
.hasDefaultAWZ();
1107 void setHasDefaultAWZ() {
1108 assert(isInitializing());
1109 bits
.setHasDefaultAWZ();
1111 void setHasCustomAWZ(bool inherited
= false);
1112 void printCustomAWZ(bool inherited
);
1114 bool instancesRequireRawIsa() {
1115 return bits
.instancesRequireRawIsa();
1117 void setInstancesRequireRawIsa(bool inherited
= false);
1118 void printInstancesRequireRawIsa(bool inherited
);
1120 bool canAllocNonpointer() {
1121 assert(!isFuture());
1122 return !instancesRequireRawIsa();
1124 bool canAllocFast() {
1125 assert(!isFuture());
1126 return bits
.canAllocFast();
1131 // addSubclass() propagates this flag from the superclass.
1132 assert(isRealized());
1133 return bits
.hasCxxCtor();
1135 void setHasCxxCtor() {
1136 bits
.setHasCxxCtor();
1140 // addSubclass() propagates this flag from the superclass.
1141 assert(isRealized());
1142 return bits
.hasCxxDtor();
1144 void setHasCxxDtor() {
1145 bits
.setHasCxxDtor();
1150 return bits
.isSwift();
1154 // Return YES if the class's ivars are managed by ARC,
1155 // or the class is MRC but has ARC-style weak ivars.
1156 bool hasAutomaticIvars() {
1157 return data()->ro
->flags
& (RO_IS_ARC
| RO_HAS_WEAK_WITHOUT_ARC
);
1160 // Return YES if the class's ivars are managed by ARC.
1162 return data()->ro
->flags
& RO_IS_ARC
;
1166 #if SUPPORT_NONPOINTER_ISA
1167 // Tracked in non-pointer isas; not tracked otherwise
1169 bool instancesHaveAssociatedObjects() {
1170 // this may be an unrealized future class in the CF-bridged case
1171 assert(isFuture() || isRealized());
1172 return data()->flags
& RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS
;
1175 void setInstancesHaveAssociatedObjects() {
1176 // this may be an unrealized future class in the CF-bridged case
1177 assert(isFuture() || isRealized());
1178 setInfo(RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS
);
1182 bool shouldGrowCache() {
1186 void setShouldGrowCache(bool) {
1187 // fixme good or bad for memory use?
1190 bool isInitializing() {
1191 return getMeta()->data()->flags
& RW_INITIALIZING
;
1194 void setInitializing() {
1195 assert(!isMetaClass());
1196 ISA()->setInfo(RW_INITIALIZING
);
1199 bool isInitialized() {
1200 return getMeta()->data()->flags
& RW_INITIALIZED
;
1203 void setInitialized();
1206 assert(isRealized());
1207 return true; // any class registered for +load is definitely loadable
1210 IMP
getLoadMethod();
1212 // Locking: To prevent concurrent realization, hold runtimeLock.
1214 return data()->flags
& RW_REALIZED
;
1217 // Returns true if this is an unrealized future class.
1218 // Locking: To prevent concurrent realization, hold runtimeLock.
1220 return data()->flags
& RW_FUTURE
;
1223 bool isMetaClass() {
1225 assert(isRealized());
1226 return data()->ro
->flags
& RO_META
;
1229 // NOT identical to this->ISA when this is a metaclass
1231 if (isMetaClass()) return (Class
)this;
1232 else return this->ISA();
1235 bool isRootClass() {
1236 return superclass
== nil
;
1238 bool isRootMetaclass() {
1239 return ISA() == (Class
)this;
1242 const char *mangledName() {
1243 // fixme can't assert locks here
1246 if (isRealized() || isFuture()) {
1247 return data()->ro
->name
;
1249 return ((const class_ro_t
*)data())->name
;
1253 const char *demangledName(bool realize
= false);
1254 const char *nameForLogging();
1256 // May be unaligned depending on class's ivars.
1257 uint32_t unalignedInstanceStart() {
1258 assert(isRealized());
1259 return data()->ro
->instanceStart
;
1262 // Class's instance start rounded up to a pointer-size boundary.
1263 // This is used for ARC layout bitmaps.
1264 uint32_t alignedInstanceStart() {
1265 return word_align(unalignedInstanceStart());
1268 // May be unaligned depending on class's ivars.
1269 uint32_t unalignedInstanceSize() {
1270 assert(isRealized());
1271 return data()->ro
->instanceSize
;
1274 // Class's ivar size rounded up to a pointer-size boundary.
1275 uint32_t alignedInstanceSize() {
1276 return word_align(unalignedInstanceSize());
1279 size_t instanceSize(size_t extraBytes
) {
1280 size_t size
= alignedInstanceSize() + extraBytes
;
1281 // CF requires all objects be at least 16 bytes.
1282 if (size
< 16) size
= 16;
1286 void setInstanceSize(uint32_t newSize
) {
1287 assert(isRealized());
1288 if (newSize
!= data()->ro
->instanceSize
) {
1289 assert(data()->flags
& RW_COPIED_RO
);
1290 *const_cast<uint32_t *>(&data()->ro
->instanceSize
) = newSize
;
1292 bits
.setFastInstanceSize(newSize
);
1295 void chooseClassArrayIndex();
1297 void setClassArrayIndex(unsigned Idx
) {
1298 bits
.setClassArrayIndex(Idx
);
1301 unsigned classArrayIndex() {
1302 return bits
.classArrayIndex();
1308 struct swift_class_t
: objc_class
{
1310 uint32_t instanceAddressOffset
;
1311 uint32_t instanceSize
;
1312 uint16_t instanceAlignMask
;
1316 uint32_t classAddressOffset
;
1320 void *baseAddress() {
1321 return (void *)((uint8_t *)this - classAddressOffset
);
1329 struct method_list_t
*instanceMethods
;
1330 struct method_list_t
*classMethods
;
1331 struct protocol_list_t
*protocols
;
1332 struct property_list_t
*instanceProperties
;
1333 // Fields below this point are not always present on disk.
1334 struct property_list_t
*_classProperties
;
1336 method_list_t
*methodsForMeta(bool isMeta
) {
1337 if (isMeta
) return classMethods
;
1338 else return instanceMethods
;
1341 property_list_t
*propertiesForMeta(bool isMeta
, struct header_info
*hi
);
1344 struct objc_super2
{
1346 Class current_class
;
1349 struct message_ref_t
{
1355 extern Method
protocol_getMethod(protocol_t
*p
, SEL sel
, bool isRequiredMethod
, bool isInstanceMethod
, bool recursive
);
1358 foreach_realized_class_and_subclass_2(Class top
, unsigned& count
,
1359 std::function
<bool (Class
)> code
)
1361 // runtimeLock.assertWriting();
1366 _objc_fatal("Memory corruption in class list.");
1368 if (!code(cls
)) break;
1370 if (cls
->data()->firstSubclass
) {
1371 cls
= cls
->data()->firstSubclass
;
1373 while (!cls
->data()->nextSiblingClass
&& cls
!= top
) {
1374 cls
= cls
->superclass
;
1376 _objc_fatal("Memory corruption in class list.");
1379 if (cls
== top
) break;
1380 cls
= cls
->data()->nextSiblingClass
;
1385 extern Class
firstRealizedClass();
1386 extern unsigned int unreasonableClassCount();
1388 // Enumerates a class and all of its realized subclasses.
1390 foreach_realized_class_and_subclass(Class top
,
1391 std::function
<void (Class
)> code
)
1393 unsigned int count
= unreasonableClassCount();
1395 foreach_realized_class_and_subclass_2(top
, count
,
1396 [&code
](Class cls
) -> bool
1403 // Enumerates all realized classes and metaclasses.
1405 foreach_realized_class_and_metaclass(std::function
<void (Class
)> code
)
1407 unsigned int count
= unreasonableClassCount();
1409 for (Class top
= firstRealizedClass();
1411 top
= top
->data()->nextSiblingClass
)
1413 foreach_realized_class_and_subclass_2(top
, count
,
1414 [&code
](Class cls
) -> bool