2 * Copyright (c) 2005-2007 Apple Inc. All Rights Reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 #ifndef _OBJC_RUNTIME_NEW_H
25 #define _OBJC_RUNTIME_NEW_H
28 typedef uint32_t mask_t
; // x86_64 & arm64 asm are less efficient with 16-bits
30 typedef uint16_t mask_t
;
32 typedef uintptr_t cache_key_t
;
43 inline cache_key_t
key() const { return _key
; }
44 inline IMP
imp() const { return (IMP
)_imp
; }
45 inline void setKey(cache_key_t newKey
) { _key
= newKey
; }
46 inline void setImp(IMP newImp
) { _imp
= newImp
; }
48 void set(cache_key_t newKey
, IMP newImp
);
53 struct bucket_t
*_buckets
;
58 struct bucket_t
*buckets();
61 void incrementOccupied();
62 void setBucketsAndMask(struct bucket_t
*newBuckets
, mask_t newMask
);
63 void initializeToEmpty();
66 bool isConstantEmptyCache();
69 static size_t bytesForCapacity(uint32_t cap
);
70 static struct bucket_t
* endMarker(struct bucket_t
*b
, uint32_t cap
);
73 void reallocate(mask_t oldCapacity
, mask_t newCapacity
);
74 struct bucket_t
* find(cache_key_t key
, id receiver
);
76 static void bad_cache(id receiver
, SEL sel
, Class isa
) __attribute__((noreturn
));
80 // classref_t is unremapped class_t*
81 typedef struct classref
* classref_t
;
83 /***********************************************************************
84 * entsize_list_tt<Element, List, FlagMask>
85 * Generic implementation of an array of non-fragile structs.
87 * Element is the struct type (e.g. method_t)
88 * List is the specialization of entsize_list_tt (e.g. method_list_t)
89 * FlagMask is used to stash extra bits in the entsize field
90 * (e.g. method list fixup markers)
91 **********************************************************************/
92 template <typename Element
, typename List
, uint32_t FlagMask
>
93 struct entsize_list_tt
{
94 uint32_t entsizeAndFlags
;
98 uint32_t entsize() const {
99 return entsizeAndFlags
& ~FlagMask
;
101 uint32_t flags() const {
102 return entsizeAndFlags
& FlagMask
;
105 Element
& getOrEnd(uint32_t i
) const {
107 return *(Element
*)((uint8_t *)&first
+ i
*entsize());
109 Element
& get(uint32_t i
) const {
114 size_t byteSize() const {
115 return sizeof(*this) + (count
-1)*entsize();
118 List
*duplicate() const {
119 return (List
*)memdup(this, this->byteSize());
123 const iterator
begin() const {
124 return iterator(*static_cast<const List
*>(this), 0);
127 return iterator(*static_cast<const List
*>(this), 0);
129 const iterator
end() const {
130 return iterator(*static_cast<const List
*>(this), count
);
133 return iterator(*static_cast<const List
*>(this), count
);
138 uint32_t index
; // keeping track of this saves a divide in operator-
141 typedef std::random_access_iterator_tag iterator_category
;
142 typedef Element value_type
;
143 typedef ptrdiff_t difference_type
;
144 typedef Element
* pointer
;
145 typedef Element
& reference
;
149 iterator(const List
& list
, uint32_t start
= 0)
150 : entsize(list
.entsize())
152 , element(&list
.getOrEnd(start
))
155 const iterator
& operator += (ptrdiff_t delta
) {
156 element
= (Element
*)((uint8_t *)element
+ delta
*entsize
);
157 index
+= (int32_t)delta
;
160 const iterator
& operator -= (ptrdiff_t delta
) {
161 element
= (Element
*)((uint8_t *)element
- delta
*entsize
);
162 index
-= (int32_t)delta
;
165 const iterator
operator + (ptrdiff_t delta
) const {
166 return iterator(*this) += delta
;
168 const iterator
operator - (ptrdiff_t delta
) const {
169 return iterator(*this) -= delta
;
172 iterator
& operator ++ () { *this += 1; return *this; }
173 iterator
& operator -- () { *this -= 1; return *this; }
174 iterator
operator ++ (int) {
175 iterator
result(*this); *this += 1; return result
;
177 iterator
operator -- (int) {
178 iterator
result(*this); *this -= 1; return result
;
181 ptrdiff_t operator - (const iterator
& rhs
) const {
182 return (ptrdiff_t)this->index
- (ptrdiff_t)rhs
.index
;
185 Element
& operator * () const { return *element
; }
186 Element
* operator -> () const { return element
; }
188 operator Element
& () const { return *element
; }
190 bool operator == (const iterator
& rhs
) const {
191 return this->element
== rhs
.element
;
193 bool operator != (const iterator
& rhs
) const {
194 return this->element
!= rhs
.element
;
197 bool operator < (const iterator
& rhs
) const {
198 return this->element
< rhs
.element
;
200 bool operator > (const iterator
& rhs
) const {
201 return this->element
> rhs
.element
;
212 struct SortBySELAddress
:
213 public std::binary_function
<const method_t
&,
214 const method_t
&, bool>
216 bool operator() (const method_t
& lhs
,
218 { return lhs
.name
< rhs
.name
; }
224 // *offset was originally 64-bit on some x86_64 platforms.
225 // We read and write only 32 bits of it.
226 // Some metadata provides all 64 bits. This is harmless for unsigned
227 // little-endian values.
228 // Some code uses all 64 bits. class_addIvar() over-allocates the
229 // offset for their benefit.
234 // alignment is sometimes -1; use alignment() instead
235 uint32_t alignment_raw
;
238 uint32_t alignment() const {
239 if (alignment_raw
== ~(uint32_t)0) return 1U << WORD_SHIFT
;
240 return 1 << alignment_raw
;
246 const char *attributes
;
249 // Two bits of entsize are used for fixup markers.
250 struct method_list_t
: entsize_list_tt
<method_t
, method_list_t
, 0x3> {
251 bool isFixedUp() const;
254 uint32_t indexOfMethod(const method_t
*meth
) const {
256 (uint32_t)(((uintptr_t)meth
- (uintptr_t)this) / entsize());
262 struct ivar_list_t
: entsize_list_tt
<ivar_t
, ivar_list_t
, 0> {
263 bool containsIvar(Ivar ivar
) const {
264 return (ivar
>= (Ivar
)&*begin() && ivar
< (Ivar
)&*end());
268 struct property_list_t
: entsize_list_tt
<property_t
, property_list_t
, 0> {
272 typedef uintptr_t protocol_ref_t
; // protocol_t *, but unremapped
274 // Values for protocol_t->flags
275 #define PROTOCOL_FIXED_UP_2 (1<<31) // must never be set by compiler
276 #define PROTOCOL_FIXED_UP_1 (1<<30) // must never be set by compiler
277 // Bits 0..15 are reserved for Swift's use.
279 #define PROTOCOL_FIXED_UP_MASK (PROTOCOL_FIXED_UP_1 | PROTOCOL_FIXED_UP_2)
281 struct protocol_t
: objc_object
{
282 const char *mangledName
;
283 struct protocol_list_t
*protocols
;
284 method_list_t
*instanceMethods
;
285 method_list_t
*classMethods
;
286 method_list_t
*optionalInstanceMethods
;
287 method_list_t
*optionalClassMethods
;
288 property_list_t
*instanceProperties
;
289 uint32_t size
; // sizeof(protocol_t)
291 // Fields below this point are not always present on disk.
292 const char **_extendedMethodTypes
;
293 const char *_demangledName
;
294 property_list_t
*_classProperties
;
296 const char *demangledName();
298 const char *nameForLogging() {
299 return demangledName();
302 bool isFixedUp() const;
305 # define HAS_FIELD(f) (size >= offsetof(protocol_t, f) + sizeof(f))
307 bool hasExtendedMethodTypesField() const {
308 return HAS_FIELD(_extendedMethodTypes
);
310 bool hasDemangledNameField() const {
311 return HAS_FIELD(_demangledName
);
313 bool hasClassPropertiesField() const {
314 return HAS_FIELD(_classProperties
);
319 const char **extendedMethodTypes() const {
320 return hasExtendedMethodTypesField() ? _extendedMethodTypes
: nil
;
323 property_list_t
*classProperties() const {
324 return hasClassPropertiesField() ? _classProperties
: nil
;
328 struct protocol_list_t
{
329 // count is 64-bit by accident.
331 protocol_ref_t list
[0]; // variable-size
333 size_t byteSize() const {
334 return sizeof(*this) + count
*sizeof(list
[0]);
337 protocol_list_t
*duplicate() const {
338 return (protocol_list_t
*)memdup(this, this->byteSize());
341 typedef protocol_ref_t
* iterator
;
342 typedef const protocol_ref_t
* const_iterator
;
344 const_iterator
begin() const {
350 const_iterator
end() const {
358 struct locstamped_category_t
{
360 struct header_info
*hi
;
363 struct locstamped_category_list_t
{
368 locstamped_category_t list
[0];
372 // class_data_bits_t is the class_t->data field (class_rw_t pointer plus flags)
373 // The extra bits are optimized for the retain/release and alloc/dealloc paths.
375 // Values for class_ro_t->flags
376 // These are emitted by the compiler and are part of the ABI.
377 // Note: See CGObjCNonFragileABIMac::BuildClassRoTInitializer in clang
378 // class is a metaclass
379 #define RO_META (1<<0)
380 // class is a root class
381 #define RO_ROOT (1<<1)
382 // class has .cxx_construct/destruct implementations
383 #define RO_HAS_CXX_STRUCTORS (1<<2)
384 // class has +load implementation
385 // #define RO_HAS_LOAD_METHOD (1<<3)
386 // class has visibility=hidden set
387 #define RO_HIDDEN (1<<4)
388 // class has attribute(objc_exception): OBJC_EHTYPE_$_ThisClass is non-weak
389 #define RO_EXCEPTION (1<<5)
390 // this bit is available for reassignment
391 // #define RO_REUSE_ME (1<<6)
392 // class compiled with ARC
393 #define RO_IS_ARC (1<<7)
394 // class has .cxx_destruct but no .cxx_construct (with RO_HAS_CXX_STRUCTORS)
395 #define RO_HAS_CXX_DTOR_ONLY (1<<8)
396 // class is not ARC but has ARC-style weak ivar layout
397 #define RO_HAS_WEAK_WITHOUT_ARC (1<<9)
399 // class is in an unloadable bundle - must never be set by compiler
400 #define RO_FROM_BUNDLE (1<<29)
401 // class is unrealized future class - must never be set by compiler
402 #define RO_FUTURE (1<<30)
403 // class is realized - must never be set by compiler
404 #define RO_REALIZED (1<<31)
406 // Values for class_rw_t->flags
407 // These are not emitted by the compiler and are never used in class_ro_t.
408 // Their presence should be considered in future ABI versions.
409 // class_t->data is class_rw_t, not class_ro_t
410 #define RW_REALIZED (1<<31)
411 // class is unresolved future class
412 #define RW_FUTURE (1<<30)
413 // class is initialized
414 #define RW_INITIALIZED (1<<29)
415 // class is initializing
416 #define RW_INITIALIZING (1<<28)
417 // class_rw_t->ro is heap copy of class_ro_t
418 #define RW_COPIED_RO (1<<27)
419 // class allocated but not yet registered
420 #define RW_CONSTRUCTING (1<<26)
421 // class allocated and registered
422 #define RW_CONSTRUCTED (1<<25)
423 // available for use; was RW_FINALIZE_ON_MAIN_THREAD
424 // #define RW_24 (1<<24)
425 // class +load has been called
426 #define RW_LOADED (1<<23)
427 #if !SUPPORT_NONPOINTER_ISA
428 // class instances may have associative references
429 #define RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS (1<<22)
431 // class has instance-specific GC layout
432 #define RW_HAS_INSTANCE_SPECIFIC_LAYOUT (1 << 21)
434 // #define RW_20 (1<<20)
435 // class has started realizing but not yet completed it
436 #define RW_REALIZING (1<<19)
438 // NOTE: MORE RW_ FLAGS DEFINED BELOW
441 // Values for class_rw_t->flags or class_t->bits
442 // These flags are optimized for retain/release and alloc/dealloc
443 // 64-bit stores more of them in class_t->bits to reduce pointer indirection.
447 // class or superclass has .cxx_construct implementation
448 #define RW_HAS_CXX_CTOR (1<<18)
449 // class or superclass has .cxx_destruct implementation
450 #define RW_HAS_CXX_DTOR (1<<17)
451 // class or superclass has default alloc/allocWithZone: implementation
452 // Note this is is stored in the metaclass.
453 #define RW_HAS_DEFAULT_AWZ (1<<16)
454 // class's instances requires raw isa
455 #if SUPPORT_NONPOINTER_ISA
456 #define RW_REQUIRES_RAW_ISA (1<<15)
459 // class is a Swift class
460 #define FAST_IS_SWIFT (1UL<<0)
461 // class or superclass has default retain/release/autorelease/retainCount/
462 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
463 #define FAST_HAS_DEFAULT_RR (1UL<<1)
465 #define FAST_DATA_MASK 0xfffffffcUL
468 // Leaks-compatible version that steals low bits only.
470 // class or superclass has .cxx_construct implementation
471 #define RW_HAS_CXX_CTOR (1<<18)
472 // class or superclass has .cxx_destruct implementation
473 #define RW_HAS_CXX_DTOR (1<<17)
474 // class or superclass has default alloc/allocWithZone: implementation
475 // Note this is is stored in the metaclass.
476 #define RW_HAS_DEFAULT_AWZ (1<<16)
478 // class is a Swift class
479 #define FAST_IS_SWIFT (1UL<<0)
480 // class or superclass has default retain/release/autorelease/retainCount/
481 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
482 #define FAST_HAS_DEFAULT_RR (1UL<<1)
483 // class's instances requires raw isa
484 #define FAST_REQUIRES_RAW_ISA (1UL<<2)
486 #define FAST_DATA_MASK 0x00007ffffffffff8UL
489 // Leaks-incompatible version that steals lots of bits.
491 // class is a Swift class
492 #define FAST_IS_SWIFT (1UL<<0)
493 // class's instances requires raw isa
494 #define FAST_REQUIRES_RAW_ISA (1UL<<1)
495 // class or superclass has .cxx_destruct implementation
496 // This bit is aligned with isa_t->hasCxxDtor to save an instruction.
497 #define FAST_HAS_CXX_DTOR (1UL<<2)
499 #define FAST_DATA_MASK 0x00007ffffffffff8UL
500 // class or superclass has .cxx_construct implementation
501 #define FAST_HAS_CXX_CTOR (1UL<<47)
502 // class or superclass has default alloc/allocWithZone: implementation
503 // Note this is is stored in the metaclass.
504 #define FAST_HAS_DEFAULT_AWZ (1UL<<48)
505 // class or superclass has default retain/release/autorelease/retainCount/
506 // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
507 #define FAST_HAS_DEFAULT_RR (1UL<<49)
508 // summary bit for fast alloc path: !hasCxxCtor and
509 // !instancesRequireRawIsa and instanceSize fits into shiftedSize
510 #define FAST_ALLOC (1UL<<50)
511 // instance size in units of 16 bytes
512 // or 0 if the instance size is too big in this field
513 // This field must be LAST
514 #define FAST_SHIFTED_SIZE_SHIFT 51
517 // FAST_HAS_CXX_CTOR is set
518 // FAST_REQUIRES_RAW_ISA is not set
519 // FAST_SHIFTED_SIZE is not zero
520 // FAST_ALLOC does NOT check FAST_HAS_DEFAULT_AWZ because that
521 // bit is stored on the metaclass.
522 #define FAST_ALLOC_MASK (FAST_HAS_CXX_CTOR | FAST_REQUIRES_RAW_ISA)
523 #define FAST_ALLOC_VALUE (0)
530 uint32_t instanceStart
;
531 uint32_t instanceSize
;
536 const uint8_t * ivarLayout
;
539 method_list_t
* baseMethodList
;
540 protocol_list_t
* baseProtocols
;
541 const ivar_list_t
* ivars
;
543 const uint8_t * weakIvarLayout
;
544 property_list_t
*baseProperties
;
546 method_list_t
*baseMethods() const {
547 return baseMethodList
;
552 /***********************************************************************
553 * list_array_tt<Element, List>
554 * Generic implementation for metadata that can be augmented by categories.
556 * Element is the underlying metadata type (e.g. method_t)
557 * List is the metadata's list type (e.g. method_list_t)
559 * A list_array_tt has one of three values:
561 * - a pointer to a single list
562 * - an array of pointers to lists
564 * countLists/beginLists/endLists iterate the metadata lists
565 * count/begin/end iterate the underlying metadata elements
566 **********************************************************************/
567 template <typename Element
, typename List
>
568 class list_array_tt
{
573 static size_t byteSize(uint32_t count
) {
574 return sizeof(array_t
) + count
*sizeof(lists
[0]);
577 return byteSize(count
);
585 typename
List::iterator m
, mEnd
;
588 iterator(List
**begin
, List
**end
)
589 : lists(begin
), listsEnd(end
)
592 m
= (*begin
)->begin();
593 mEnd
= (*begin
)->end();
597 const Element
& operator * () const {
600 Element
& operator * () {
604 bool operator != (const iterator
& rhs
) const {
605 if (lists
!= rhs
.lists
) return true;
606 if (lists
== listsEnd
) return false; // m is undefined
607 if (m
!= rhs
.m
) return true;
611 const iterator
& operator ++ () {
615 assert(lists
!= listsEnd
);
617 if (lists
!= listsEnd
) {
618 m
= (*lists
)->begin();
619 mEnd
= (*lists
)->end();
629 uintptr_t arrayAndFlag
;
632 bool hasArray() const {
633 return arrayAndFlag
& 1;
637 return (array_t
*)(arrayAndFlag
& ~1);
640 void setArray(array_t
*array
) {
641 arrayAndFlag
= (uintptr_t)array
| 1;
648 for (auto lists
= beginLists(), end
= endLists();
652 result
+= (*lists
)->count
;
658 return iterator(beginLists(), endLists());
662 List
**e
= endLists();
663 return iterator(e
, e
);
667 uint32_t countLists() {
669 return array()->count
;
677 List
** beginLists() {
679 return array()->lists
;
687 return array()->lists
+ array()->count
;
695 void attachLists(List
* const * addedLists
, uint32_t addedCount
) {
696 if (addedCount
== 0) return;
699 // many lists -> many lists
700 uint32_t oldCount
= array()->count
;
701 uint32_t newCount
= oldCount
+ addedCount
;
702 setArray((array_t
*)realloc(array(), array_t::byteSize(newCount
)));
703 array()->count
= newCount
;
704 memmove(array()->lists
+ addedCount
, array()->lists
,
705 oldCount
* sizeof(array()->lists
[0]));
706 memcpy(array()->lists
, addedLists
,
707 addedCount
* sizeof(array()->lists
[0]));
709 else if (!list
&& addedCount
== 1) {
711 list
= addedLists
[0];
714 // 1 list -> many lists
715 List
* oldList
= list
;
716 uint32_t oldCount
= oldList
? 1 : 0;
717 uint32_t newCount
= oldCount
+ addedCount
;
718 setArray((array_t
*)malloc(array_t::byteSize(newCount
)));
719 array()->count
= newCount
;
720 if (oldList
) array()->lists
[addedCount
] = oldList
;
721 memcpy(array()->lists
, addedLists
,
722 addedCount
* sizeof(array()->lists
[0]));
728 for (uint32_t i
= 0; i
< array()->count
; i
++) {
729 try_free(array()->lists
[i
]);
738 template<typename Result
>
743 array_t
*a
= array();
744 result
.setArray((array_t
*)memdup(a
, a
->byteSize()));
745 for (uint32_t i
= 0; i
< a
->count
; i
++) {
746 result
.array()->lists
[i
] = a
->lists
[i
]->duplicate();
749 result
.list
= list
->duplicate();
759 class method_array_t
:
760 public list_array_tt
<method_t
, method_list_t
>
762 typedef list_array_tt
<method_t
, method_list_t
> Super
;
765 method_list_t
**beginCategoryMethodLists() {
769 method_list_t
**endCategoryMethodLists(Class cls
);
771 method_array_t
duplicate() {
772 return Super::duplicate
<method_array_t
>();
777 class property_array_t
:
778 public list_array_tt
<property_t
, property_list_t
>
780 typedef list_array_tt
<property_t
, property_list_t
> Super
;
783 property_array_t
duplicate() {
784 return Super::duplicate
<property_array_t
>();
789 class protocol_array_t
:
790 public list_array_tt
<protocol_ref_t
, protocol_list_t
>
792 typedef list_array_tt
<protocol_ref_t
, protocol_list_t
> Super
;
795 protocol_array_t
duplicate() {
796 return Super::duplicate
<protocol_array_t
>();
802 // Be warned that Symbolication knows the layout of this structure.
806 const class_ro_t
*ro
;
808 method_array_t methods
;
809 property_array_t properties
;
810 protocol_array_t protocols
;
813 Class nextSiblingClass
;
817 #if SUPPORT_INDEXED_ISA
821 void setFlags(uint32_t set
)
823 OSAtomicOr32Barrier(set
, &flags
);
826 void clearFlags(uint32_t clear
)
828 OSAtomicXor32Barrier(clear
, &flags
);
831 // set and clear must not overlap
832 void changeFlags(uint32_t set
, uint32_t clear
)
834 assert((set
& clear
) == 0);
839 newf
= (oldf
| set
) & ~clear
;
840 } while (!OSAtomicCompareAndSwap32Barrier(oldf
, newf
, (volatile int32_t *)&flags
));
845 struct class_data_bits_t
{
847 // Values are the FAST_ flags above.
850 bool getBit(uintptr_t bit
)
856 static uintptr_t updateFastAlloc(uintptr_t oldBits
, uintptr_t change
)
858 if (change
& FAST_ALLOC_MASK
) {
859 if (((oldBits
& FAST_ALLOC_MASK
) == FAST_ALLOC_VALUE
) &&
860 ((oldBits
>> FAST_SHIFTED_SIZE_SHIFT
) != 0))
862 oldBits
|= FAST_ALLOC
;
864 oldBits
&= ~FAST_ALLOC
;
870 static uintptr_t updateFastAlloc(uintptr_t oldBits
, uintptr_t change
) {
875 void setBits(uintptr_t set
)
880 oldBits
= LoadExclusive(&bits
);
881 newBits
= updateFastAlloc(oldBits
| set
, set
);
882 } while (!StoreReleaseExclusive(&bits
, oldBits
, newBits
));
885 void clearBits(uintptr_t clear
)
890 oldBits
= LoadExclusive(&bits
);
891 newBits
= updateFastAlloc(oldBits
& ~clear
, clear
);
892 } while (!StoreReleaseExclusive(&bits
, oldBits
, newBits
));
898 return (class_rw_t
*)(bits
& FAST_DATA_MASK
);
900 void setData(class_rw_t
*newData
)
902 assert(!data() || (newData
->flags
& (RW_REALIZING
| RW_FUTURE
)));
903 // Set during realization or construction only. No locking needed.
904 bits
= (bits
& ~FAST_DATA_MASK
) | (uintptr_t)newData
;
907 bool hasDefaultRR() {
908 return getBit(FAST_HAS_DEFAULT_RR
);
910 void setHasDefaultRR() {
911 setBits(FAST_HAS_DEFAULT_RR
);
913 void setHasCustomRR() {
914 clearBits(FAST_HAS_DEFAULT_RR
);
917 #if FAST_HAS_DEFAULT_AWZ
918 bool hasDefaultAWZ() {
919 return getBit(FAST_HAS_DEFAULT_AWZ
);
921 void setHasDefaultAWZ() {
922 setBits(FAST_HAS_DEFAULT_AWZ
);
924 void setHasCustomAWZ() {
925 clearBits(FAST_HAS_DEFAULT_AWZ
);
928 bool hasDefaultAWZ() {
929 return data()->flags
& RW_HAS_DEFAULT_AWZ
;
931 void setHasDefaultAWZ() {
932 data()->setFlags(RW_HAS_DEFAULT_AWZ
);
934 void setHasCustomAWZ() {
935 data()->clearFlags(RW_HAS_DEFAULT_AWZ
);
939 #if FAST_HAS_CXX_CTOR
941 return getBit(FAST_HAS_CXX_CTOR
);
943 void setHasCxxCtor() {
944 setBits(FAST_HAS_CXX_CTOR
);
948 return data()->flags
& RW_HAS_CXX_CTOR
;
950 void setHasCxxCtor() {
951 data()->setFlags(RW_HAS_CXX_CTOR
);
955 #if FAST_HAS_CXX_DTOR
957 return getBit(FAST_HAS_CXX_DTOR
);
959 void setHasCxxDtor() {
960 setBits(FAST_HAS_CXX_DTOR
);
964 return data()->flags
& RW_HAS_CXX_DTOR
;
966 void setHasCxxDtor() {
967 data()->setFlags(RW_HAS_CXX_DTOR
);
971 #if FAST_REQUIRES_RAW_ISA
972 bool instancesRequireRawIsa() {
973 return getBit(FAST_REQUIRES_RAW_ISA
);
975 void setInstancesRequireRawIsa() {
976 setBits(FAST_REQUIRES_RAW_ISA
);
978 #elif SUPPORT_NONPOINTER_ISA
979 bool instancesRequireRawIsa() {
980 return data()->flags
& RW_REQUIRES_RAW_ISA
;
982 void setInstancesRequireRawIsa() {
983 data()->setFlags(RW_REQUIRES_RAW_ISA
);
986 bool instancesRequireRawIsa() {
989 void setInstancesRequireRawIsa() {
995 size_t fastInstanceSize()
997 assert(bits
& FAST_ALLOC
);
998 return (bits
>> FAST_SHIFTED_SIZE_SHIFT
) * 16;
1000 void setFastInstanceSize(size_t newSize
)
1002 // Set during realization or construction only. No locking needed.
1003 assert(data()->flags
& RW_REALIZING
);
1005 // Round up to 16-byte boundary, then divide to get 16-byte units
1006 newSize
= ((newSize
+ 15) & ~15) / 16;
1008 uintptr_t newBits
= newSize
<< FAST_SHIFTED_SIZE_SHIFT
;
1009 if ((newBits
>> FAST_SHIFTED_SIZE_SHIFT
) == newSize
) {
1010 int shift
= WORD_BITS
- FAST_SHIFTED_SIZE_SHIFT
;
1011 uintptr_t oldBits
= (bits
<< shift
) >> shift
;
1012 if ((oldBits
& FAST_ALLOC_MASK
) == FAST_ALLOC_VALUE
) {
1013 newBits
|= FAST_ALLOC
;
1015 bits
= oldBits
| newBits
;
1019 bool canAllocFast() {
1020 return bits
& FAST_ALLOC
;
1023 size_t fastInstanceSize() {
1026 void setFastInstanceSize(size_t) {
1029 bool canAllocFast() {
1034 void setClassArrayIndex(unsigned Idx
) {
1035 #if SUPPORT_INDEXED_ISA
1036 // 0 is unused as then we can rely on zero-initialisation from calloc.
1038 data()->index
= Idx
;
1042 unsigned classArrayIndex() {
1043 #if SUPPORT_INDEXED_ISA
1044 return data()->index
;
1051 return getBit(FAST_IS_SWIFT
);
1055 setBits(FAST_IS_SWIFT
);
1060 struct objc_class
: objc_object
{
1063 cache_t cache
; // formerly cache pointer and vtable
1064 class_data_bits_t bits
; // class_rw_t * plus custom rr/alloc flags
1066 class_rw_t
*data() {
1069 void setData(class_rw_t
*newData
) {
1070 bits
.setData(newData
);
1073 void setInfo(uint32_t set
) {
1074 assert(isFuture() || isRealized());
1075 data()->setFlags(set
);
1078 void clearInfo(uint32_t clear
) {
1079 assert(isFuture() || isRealized());
1080 data()->clearFlags(clear
);
1083 // set and clear must not overlap
1084 void changeInfo(uint32_t set
, uint32_t clear
) {
1085 assert(isFuture() || isRealized());
1086 assert((set
& clear
) == 0);
1087 data()->changeFlags(set
, clear
);
1090 bool hasCustomRR() {
1091 return ! bits
.hasDefaultRR();
1093 void setHasDefaultRR() {
1094 assert(isInitializing());
1095 bits
.setHasDefaultRR();
1097 void setHasCustomRR(bool inherited
= false);
1098 void printCustomRR(bool inherited
);
1100 bool hasCustomAWZ() {
1101 return ! bits
.hasDefaultAWZ();
1103 void setHasDefaultAWZ() {
1104 assert(isInitializing());
1105 bits
.setHasDefaultAWZ();
1107 void setHasCustomAWZ(bool inherited
= false);
1108 void printCustomAWZ(bool inherited
);
1110 bool instancesRequireRawIsa() {
1111 return bits
.instancesRequireRawIsa();
1113 void setInstancesRequireRawIsa(bool inherited
= false);
1114 void printInstancesRequireRawIsa(bool inherited
);
1116 bool canAllocNonpointer() {
1117 assert(!isFuture());
1118 return !instancesRequireRawIsa();
1120 bool canAllocFast() {
1121 assert(!isFuture());
1122 return bits
.canAllocFast();
1127 // addSubclass() propagates this flag from the superclass.
1128 assert(isRealized());
1129 return bits
.hasCxxCtor();
1131 void setHasCxxCtor() {
1132 bits
.setHasCxxCtor();
1136 // addSubclass() propagates this flag from the superclass.
1137 assert(isRealized());
1138 return bits
.hasCxxDtor();
1140 void setHasCxxDtor() {
1141 bits
.setHasCxxDtor();
1146 return bits
.isSwift();
1150 // Return YES if the class's ivars are managed by ARC,
1151 // or the class is MRC but has ARC-style weak ivars.
1152 bool hasAutomaticIvars() {
1153 return data()->ro
->flags
& (RO_IS_ARC
| RO_HAS_WEAK_WITHOUT_ARC
);
1156 // Return YES if the class's ivars are managed by ARC.
1158 return data()->ro
->flags
& RO_IS_ARC
;
1162 #if SUPPORT_NONPOINTER_ISA
1163 // Tracked in non-pointer isas; not tracked otherwise
1165 bool instancesHaveAssociatedObjects() {
1166 // this may be an unrealized future class in the CF-bridged case
1167 assert(isFuture() || isRealized());
1168 return data()->flags
& RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS
;
1171 void setInstancesHaveAssociatedObjects() {
1172 // this may be an unrealized future class in the CF-bridged case
1173 assert(isFuture() || isRealized());
1174 setInfo(RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS
);
1178 bool shouldGrowCache() {
1182 void setShouldGrowCache(bool) {
1183 // fixme good or bad for memory use?
1186 bool isInitializing() {
1187 return getMeta()->data()->flags
& RW_INITIALIZING
;
1190 void setInitializing() {
1191 assert(!isMetaClass());
1192 ISA()->setInfo(RW_INITIALIZING
);
1195 bool isInitialized() {
1196 return getMeta()->data()->flags
& RW_INITIALIZED
;
1199 void setInitialized();
1202 assert(isRealized());
1203 return true; // any class registered for +load is definitely loadable
1206 IMP
getLoadMethod();
1208 // Locking: To prevent concurrent realization, hold runtimeLock.
1210 return data()->flags
& RW_REALIZED
;
1213 // Returns true if this is an unrealized future class.
1214 // Locking: To prevent concurrent realization, hold runtimeLock.
1216 return data()->flags
& RW_FUTURE
;
1219 bool isMetaClass() {
1221 assert(isRealized());
1222 return data()->ro
->flags
& RO_META
;
1225 // NOT identical to this->ISA when this is a metaclass
1227 if (isMetaClass()) return (Class
)this;
1228 else return this->ISA();
1231 bool isRootClass() {
1232 return superclass
== nil
;
1234 bool isRootMetaclass() {
1235 return ISA() == (Class
)this;
1238 const char *mangledName() {
1239 // fixme can't assert locks here
1242 if (isRealized() || isFuture()) {
1243 return data()->ro
->name
;
1245 return ((const class_ro_t
*)data())->name
;
1249 const char *demangledName(bool realize
= false);
1250 const char *nameForLogging();
1252 // May be unaligned depending on class's ivars.
1253 uint32_t unalignedInstanceStart() {
1254 assert(isRealized());
1255 return data()->ro
->instanceStart
;
1258 // Class's instance start rounded up to a pointer-size boundary.
1259 // This is used for ARC layout bitmaps.
1260 uint32_t alignedInstanceStart() {
1261 return word_align(unalignedInstanceStart());
1264 // May be unaligned depending on class's ivars.
1265 uint32_t unalignedInstanceSize() {
1266 assert(isRealized());
1267 return data()->ro
->instanceSize
;
1270 // Class's ivar size rounded up to a pointer-size boundary.
1271 uint32_t alignedInstanceSize() {
1272 return word_align(unalignedInstanceSize());
1275 size_t instanceSize(size_t extraBytes
) {
1276 size_t size
= alignedInstanceSize() + extraBytes
;
1277 // CF requires all objects be at least 16 bytes.
1278 if (size
< 16) size
= 16;
1282 void setInstanceSize(uint32_t newSize
) {
1283 assert(isRealized());
1284 if (newSize
!= data()->ro
->instanceSize
) {
1285 assert(data()->flags
& RW_COPIED_RO
);
1286 *const_cast<uint32_t *>(&data()->ro
->instanceSize
) = newSize
;
1288 bits
.setFastInstanceSize(newSize
);
1291 void chooseClassArrayIndex();
1293 void setClassArrayIndex(unsigned Idx
) {
1294 bits
.setClassArrayIndex(Idx
);
1297 unsigned classArrayIndex() {
1298 return bits
.classArrayIndex();
1304 struct swift_class_t
: objc_class
{
1306 uint32_t instanceAddressOffset
;
1307 uint32_t instanceSize
;
1308 uint16_t instanceAlignMask
;
1312 uint32_t classAddressOffset
;
1316 void *baseAddress() {
1317 return (void *)((uint8_t *)this - classAddressOffset
);
1325 struct method_list_t
*instanceMethods
;
1326 struct method_list_t
*classMethods
;
1327 struct protocol_list_t
*protocols
;
1328 struct property_list_t
*instanceProperties
;
1329 // Fields below this point are not always present on disk.
1330 struct property_list_t
*_classProperties
;
1332 method_list_t
*methodsForMeta(bool isMeta
) {
1333 if (isMeta
) return classMethods
;
1334 else return instanceMethods
;
1337 property_list_t
*propertiesForMeta(bool isMeta
, struct header_info
*hi
);
1340 struct objc_super2
{
1342 Class current_class
;
1345 struct message_ref_t
{
1351 extern Method
protocol_getMethod(protocol_t
*p
, SEL sel
, bool isRequiredMethod
, bool isInstanceMethod
, bool recursive
);
1354 foreach_realized_class_and_subclass_2(Class top
, bool (^code
)(Class
))
1356 // runtimeLock.assertWriting();
1360 if (!code(cls
)) break;
1362 if (cls
->data()->firstSubclass
) {
1363 cls
= cls
->data()->firstSubclass
;
1365 while (!cls
->data()->nextSiblingClass
&& cls
!= top
) {
1366 cls
= cls
->superclass
;
1368 if (cls
== top
) break;
1369 cls
= cls
->data()->nextSiblingClass
;
1374 // Enumerates a class and all of its realized subclasses.
1376 foreach_realized_class_and_subclass(Class top
, void (^code
)(Class
))
1378 foreach_realized_class_and_subclass_2(top
, ^bool(Class cls
) {
1379 code(cls
); return true;
1383 // Enumerates all realized classes and metaclasses.
1384 extern Class
firstRealizedClass();
1386 foreach_realized_class_and_metaclass(void (^code
)(Class
))
1388 for (Class top
= firstRealizedClass();
1390 top
= top
->data()->nextSiblingClass
)
1392 foreach_realized_class_and_subclass_2(top
, ^bool(Class cls
) {
1393 code(cls
); return true;