__objc_init
_environ_init
_tls_init
-_lock_init
+_runtime_init
_recursive_mutex_init
_exception_init
_map_images
__ZNSt3__119__merge_move_assignIRN8method_t16SortBySELAddressEPS1_S4_N13method_list_t15method_iteratorEEEvT0_S7_T1_S8_T2_T_
_NXPtrIsEqual
__getObjc2CategoryList
+__getObjc2CategoryList2
__Z29addUnattachedCategoryForClassP10category_tP7class_tP12_header_info
__Z16remethodizeClassP7class_t
__Z11flushCachesP7class_t
_objc_msgSend_fixup
__objc_fixupMessageRef
_objc_msgSend
-__class_lookupMethodAndLoadCache3
_lookUpMethod
_prepareForMethodLookup
__class_initialize
_sel_getUid
__ZN12_GLOBAL__N_119AutoreleasePoolPage11tls_deallocEPv
__ZN12_GLOBAL__N_119AutoreleasePoolPage4killEv
-__objc_constructOrFree
_object_cxxConstruct
_object_cxxConstructFromClass
__class_hasCxxStructors
393CEAC60DC69E67000B69DE /* objc-references.h in Headers */ = {isa = PBXBuildFile; fileRef = 393CEAC50DC69E67000B69DE /* objc-references.h */; };
39ABD72312F0B61800D1054C /* objc-weak.h in Headers */ = {isa = PBXBuildFile; fileRef = 39ABD71F12F0B61800D1054C /* objc-weak.h */; };
39ABD72412F0B61800D1054C /* objc-weak.mm in Sources */ = {isa = PBXBuildFile; fileRef = 39ABD72012F0B61800D1054C /* objc-weak.mm */; };
+ 6E1475EA21DFDB1B001357EA /* llvm-AlignOf.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E1475E521DFDB1A001357EA /* llvm-AlignOf.h */; };
+ 6E1475EB21DFDB1B001357EA /* llvm-DenseMap.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E1475E621DFDB1B001357EA /* llvm-DenseMap.h */; };
+ 6E1475EC21DFDB1B001357EA /* llvm-DenseMapInfo.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E1475E721DFDB1B001357EA /* llvm-DenseMapInfo.h */; };
+ 6E1475ED21DFDB1B001357EA /* llvm-type_traits.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E1475E821DFDB1B001357EA /* llvm-type_traits.h */; };
+ 6E1475EE21DFDB1B001357EA /* llvm-MathExtras.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E1475E921DFDB1B001357EA /* llvm-MathExtras.h */; };
+ 6ECD0B1F2244999E00910D88 /* llvm-DenseSet.h in Headers */ = {isa = PBXBuildFile; fileRef = 6ECD0B1E2244999E00910D88 /* llvm-DenseSet.h */; };
+ 7213C36321FA7C730090A271 /* NSObject-internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 7213C36221FA7C730090A271 /* NSObject-internal.h */; settings = {ATTRIBUTES = (Private, ); }; };
7593EC58202248E50046AB96 /* objc-object.h in Headers */ = {isa = PBXBuildFile; fileRef = 7593EC57202248DF0046AB96 /* objc-object.h */; };
75A9504F202BAA0600D7D56F /* objc-locks-new.h in Headers */ = {isa = PBXBuildFile; fileRef = 75A9504E202BAA0300D7D56F /* objc-locks-new.h */; };
75A95051202BAA9A00D7D56F /* objc-locks.h in Headers */ = {isa = PBXBuildFile; fileRef = 75A95050202BAA9A00D7D56F /* objc-locks.h */; };
83F550E0155E030800E95D3B /* objc-cache-old.mm in Sources */ = {isa = PBXBuildFile; fileRef = 83F550DF155E030800E95D3B /* objc-cache-old.mm */; };
87BB4EA70EC39854005D08E1 /* objc-probes.d in Sources */ = {isa = PBXBuildFile; fileRef = 87BB4E900EC39633005D08E1 /* objc-probes.d */; };
9672F7EE14D5F488007CEC96 /* NSObject.mm in Sources */ = {isa = PBXBuildFile; fileRef = 9672F7ED14D5F488007CEC96 /* NSObject.mm */; };
+ C2E6D3FC2225DCF00059DFAA /* DenseMapExtras.h in Headers */ = {isa = PBXBuildFile; fileRef = C2E6D3FB2225DCF00059DFAA /* DenseMapExtras.h */; };
E8923DA5116AB2820071B552 /* objc-block-trampolines.mm in Sources */ = {isa = PBXBuildFile; fileRef = E8923DA0116AB2820071B552 /* objc-block-trampolines.mm */; };
F9BCC71B205C68E800DD9AFC /* objc-blocktramps-arm64.s in Sources */ = {isa = PBXBuildFile; fileRef = 8379996D13CBAF6F007C2B5F /* objc-blocktramps-arm64.s */; };
/* End PBXBuildFile section */
393CEAC50DC69E67000B69DE /* objc-references.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-references.h"; path = "runtime/objc-references.h"; sourceTree = "<group>"; };
39ABD71F12F0B61800D1054C /* objc-weak.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-weak.h"; path = "runtime/objc-weak.h"; sourceTree = "<group>"; };
39ABD72012F0B61800D1054C /* objc-weak.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-weak.mm"; path = "runtime/objc-weak.mm"; sourceTree = "<group>"; };
+ 6E1475E521DFDB1A001357EA /* llvm-AlignOf.h */ = {isa = PBXFileReference; fileEncoding = 4; indentWidth = 2; lastKnownFileType = sourcecode.c.h; name = "llvm-AlignOf.h"; path = "runtime/llvm-AlignOf.h"; sourceTree = "<group>"; tabWidth = 2; };
+ 6E1475E621DFDB1B001357EA /* llvm-DenseMap.h */ = {isa = PBXFileReference; fileEncoding = 4; indentWidth = 2; lastKnownFileType = sourcecode.c.h; name = "llvm-DenseMap.h"; path = "runtime/llvm-DenseMap.h"; sourceTree = "<group>"; tabWidth = 2; };
+ 6E1475E721DFDB1B001357EA /* llvm-DenseMapInfo.h */ = {isa = PBXFileReference; fileEncoding = 4; indentWidth = 2; lastKnownFileType = sourcecode.c.h; name = "llvm-DenseMapInfo.h"; path = "runtime/llvm-DenseMapInfo.h"; sourceTree = "<group>"; tabWidth = 2; };
+ 6E1475E821DFDB1B001357EA /* llvm-type_traits.h */ = {isa = PBXFileReference; fileEncoding = 4; indentWidth = 2; lastKnownFileType = sourcecode.c.h; name = "llvm-type_traits.h"; path = "runtime/llvm-type_traits.h"; sourceTree = "<group>"; tabWidth = 2; };
+ 6E1475E921DFDB1B001357EA /* llvm-MathExtras.h */ = {isa = PBXFileReference; fileEncoding = 4; indentWidth = 2; lastKnownFileType = sourcecode.c.h; name = "llvm-MathExtras.h"; path = "runtime/llvm-MathExtras.h"; sourceTree = "<group>"; tabWidth = 2; };
+ 6ECD0B1E2244999E00910D88 /* llvm-DenseSet.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "llvm-DenseSet.h"; path = "runtime/llvm-DenseSet.h"; sourceTree = "<group>"; };
+ 7213C36221FA7C730090A271 /* NSObject-internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "NSObject-internal.h"; path = "runtime/NSObject-internal.h"; sourceTree = "<group>"; };
7593EC57202248DF0046AB96 /* objc-object.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = "objc-object.h"; path = "runtime/objc-object.h"; sourceTree = "<group>"; };
75A9504E202BAA0300D7D56F /* objc-locks-new.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = "objc-locks-new.h"; path = "runtime/objc-locks-new.h"; sourceTree = "<group>"; };
75A95050202BAA9A00D7D56F /* objc-locks.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-locks.h"; path = "runtime/objc-locks.h"; sourceTree = "<group>"; };
87BB4E900EC39633005D08E1 /* objc-probes.d */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.dtrace; name = "objc-probes.d"; path = "runtime/objc-probes.d"; sourceTree = "<group>"; };
9672F7ED14D5F488007CEC96 /* NSObject.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = NSObject.mm; path = runtime/NSObject.mm; sourceTree = "<group>"; };
BC8B5D1212D3D48100C78A5B /* libauto.dylib */ = {isa = PBXFileReference; lastKnownFileType = "compiled.mach-o.dylib"; name = libauto.dylib; path = /usr/lib/libauto.dylib; sourceTree = "<absolute>"; };
+ C2E6D3FB2225DCF00059DFAA /* DenseMapExtras.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DenseMapExtras.h; path = runtime/DenseMapExtras.h; sourceTree = "<group>"; };
D2AAC0630554660B00DB518D /* libobjc.A.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libobjc.A.dylib; sourceTree = BUILT_PRODUCTS_DIR; };
E8923D9C116AB2820071B552 /* objc-blocktramps-i386.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "objc-blocktramps-i386.s"; path = "runtime/objc-blocktramps-i386.s"; sourceTree = "<group>"; };
E8923D9D116AB2820071B552 /* objc-blocktramps-x86_64.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "objc-blocktramps-x86_64.s"; path = "runtime/objc-blocktramps-x86_64.s"; sourceTree = "<group>"; };
838485C70D6D688200CEA253 /* Private Headers */ = {
isa = PBXGroup;
children = (
+ 7213C36221FA7C730090A271 /* NSObject-internal.h */,
83112ED30F00599600A5FBAF /* objc-internal.h */,
834EC0A311614167009B2563 /* objc-abi.h */,
838485BB0D6D687300CEA253 /* maptable.h */,
8384862A0D6D6ABC00CEA253 /* Project Headers */ = {
isa = PBXGroup;
children = (
+ 6E1475E521DFDB1A001357EA /* llvm-AlignOf.h */,
+ 6E1475E621DFDB1B001357EA /* llvm-DenseMap.h */,
+ 6E1475E721DFDB1B001357EA /* llvm-DenseMapInfo.h */,
+ 6ECD0B1E2244999E00910D88 /* llvm-DenseSet.h */,
+ 6E1475E921DFDB1B001357EA /* llvm-MathExtras.h */,
+ 6E1475E821DFDB1B001357EA /* llvm-type_traits.h */,
+ C2E6D3FB2225DCF00059DFAA /* DenseMapExtras.h */,
83D9269721225A7400299F69 /* arm64-asm.h */,
83D92695212254CF00299F69 /* isa.h */,
838485CF0D6D68A200CEA253 /* objc-config.h */,
files = (
83A4AEDE1EA08C7200ACADDE /* ObjectiveC.apinotes in Headers */,
75A95051202BAA9A00D7D56F /* objc-locks.h in Headers */,
+ 6E1475ED21DFDB1B001357EA /* llvm-type_traits.h in Headers */,
83A4AEDC1EA0840800ACADDE /* module.modulemap in Headers */,
830F2A980D738DC200392440 /* hashtable.h in Headers */,
+ 6E1475EA21DFDB1B001357EA /* llvm-AlignOf.h in Headers */,
838485BF0D6D687300CEA253 /* hashtable2.h in Headers */,
+ 6E1475EC21DFDB1B001357EA /* llvm-DenseMapInfo.h in Headers */,
+ C2E6D3FC2225DCF00059DFAA /* DenseMapExtras.h in Headers */,
838486260D6D68F000CEA253 /* List.h in Headers */,
838485C30D6D687300CEA253 /* maptable.h in Headers */,
838486280D6D6A2400CEA253 /* message.h in Headers */,
838485F00D6D68A200CEA253 /* objc-auto.h in Headers */,
838485F40D6D68A200CEA253 /* objc-class.h in Headers */,
838485F60D6D68A200CEA253 /* objc-config.h in Headers */,
+ 6E1475EE21DFDB1B001357EA /* llvm-MathExtras.h in Headers */,
838485F80D6D68A200CEA253 /* objc-exception.h in Headers */,
83BE02E80FCCB24D00661494 /* objc-file-old.h in Headers */,
83BE02E90FCCB24D00661494 /* objc-file.h in Headers */,
831C85D50E10CF850066E64C /* objc-os.h in Headers */,
838486030D6D68A200CEA253 /* objc-private.h in Headers */,
393CEAC60DC69E67000B69DE /* objc-references.h in Headers */,
+ 6E1475EB21DFDB1B001357EA /* llvm-DenseMap.h in Headers */,
838486070D6D68A200CEA253 /* objc-runtime-new.h in Headers */,
83BE02EA0FCCB24D00661494 /* objc-runtime-old.h in Headers */,
8384860A0D6D68A200CEA253 /* objc-runtime.h in Headers */,
8384860C0D6D68A200CEA253 /* objc-sel-set.h in Headers */,
+ 7213C36321FA7C730090A271 /* NSObject-internal.h in Headers */,
838486100D6D68A200CEA253 /* objc-sync.h in Headers */,
83D92696212254CF00299F69 /* isa.h in Headers */,
838486130D6D68A200CEA253 /* objc.h in Headers */,
838486200D6D68A800CEA253 /* runtime.h in Headers */,
39ABD72312F0B61800D1054C /* objc-weak.h in Headers */,
83F4B52815E843B100E0926F /* NSObjCRuntime.h in Headers */,
+ 6ECD0B1F2244999E00910D88 /* llvm-DenseSet.h in Headers */,
83F4B52915E843B100E0926F /* NSObject.h in Headers */,
);
runOnlyForDeploymentPostprocessing = 0;
1DEB914B08733D8E0010E9CD /* Debug */ = {
isa = XCBuildConfiguration;
buildSettings = {
- ARCHS = "$(ARCHS_STANDARD_32_64_BIT)";
COPY_HEADERS_RUN_UNIFDEF = YES;
COPY_HEADERS_UNIFDEF_FLAGS = "-UBUILD_FOR_OSX";
"COPY_HEADERS_UNIFDEF_FLAGS[sdk=macosx*]" = "-DBUILD_FOR_OSX";
COPY_PHASE_STRIP = NO;
+ DEPLOYMENT_LOCATION = YES;
DYLIB_CURRENT_VERSION = 228;
EXECUTABLE_PREFIX = lib;
GCC_CW_ASM_SYNTAX = NO;
);
INSTALL_PATH = /usr/lib;
IS_ZIPPERED = YES;
+ LLVM_LTO = NO;
ORDER_FILE = "$(SDKROOT)/AppleInternal/OrderFiles/libobjc.order";
"ORDER_FILE[sdk=iphonesimulator*]" = "";
OTHER_CFLAGS = (
"-fdollars-in-identifiers",
+ "-fno-objc-convert-messages-to-runtime-calls",
"$(OTHER_CFLAGS)",
);
"OTHER_LDFLAGS[sdk=iphoneos*][arch=*]" = (
COPY_HEADERS_RUN_UNIFDEF = YES;
COPY_HEADERS_UNIFDEF_FLAGS = "-UBUILD_FOR_OSX";
"COPY_HEADERS_UNIFDEF_FLAGS[sdk=macosx*]" = "-DBUILD_FOR_OSX";
+ DEPLOYMENT_LOCATION = YES;
DYLIB_CURRENT_VERSION = 228;
EXECUTABLE_PREFIX = lib;
GCC_CW_ASM_SYNTAX = NO;
"ORDER_FILE[sdk=iphonesimulator*]" = "";
OTHER_CFLAGS = (
"-fdollars-in-identifiers",
+ "-fno-objc-convert-messages-to-runtime-calls",
"$(OTHER_CFLAGS)",
);
"OTHER_LDFLAGS[sdk=iphoneos*][arch=*]" = (
GCC_WARN_ABOUT_RETURN_TYPE = YES;
GCC_WARN_SHADOW = YES;
GCC_WARN_UNUSED_VARIABLE = YES;
+ LLVM_LTO = YES;
OTHER_CFLAGS = "";
"OTHER_CFLAGS[arch=x86_64]" = "-fobjc-legacy-dispatch";
OTHER_CPLUSPLUSFLAGS = (
"$(OTHER_CFLAGS)",
"-D_LIBCPP_VISIBLE=\"\"",
);
+ SDKROOT = macosx.internal;
+ SUPPORTED_PLATFORMS = "macosx iphoneos";
WARNING_CFLAGS = (
"-Wall",
"-Wextra",
GCC_WARN_ABOUT_RETURN_TYPE = YES;
GCC_WARN_SHADOW = YES;
GCC_WARN_UNUSED_VARIABLE = YES;
- "OTHER_CFLAGS[arch=i386]" = "-momit-leaf-frame-pointer";
+ LLVM_LTO = YES;
+ OTHER_CFLAGS = "-momit-leaf-frame-pointer";
"OTHER_CFLAGS[arch=x86_64]" = (
"-momit-leaf-frame-pointer",
"-fobjc-legacy-dispatch",
"$(OTHER_CFLAGS)",
"-D_LIBCPP_VISIBLE=\"\"",
);
+ SDKROOT = macosx.internal;
+ SUPPORTED_PLATFORMS = "macosx iphoneos";
WARNING_CFLAGS = (
"-Wall",
"-Wextra",
F9BCC725205C68E800DD9AFC /* Debug */ = {
isa = XCBuildConfiguration;
buildSettings = {
- ARCHS = "$(ARCHS_STANDARD_32_64_BIT)";
COPY_HEADERS_RUN_UNIFDEF = YES;
COPY_HEADERS_UNIFDEF_FLAGS = "-UBUILD_FOR_OSX";
"COPY_HEADERS_UNIFDEF_FLAGS[sdk=macosx*]" = "-DBUILD_FOR_OSX";
--- /dev/null
+/*
+ * Copyright (c) 2019 Apple Inc. All Rights Reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#ifndef DENSEMAPEXTRAS_H
+#define DENSEMAPEXTRAS_H
+
+#include "llvm-DenseMap.h"
+#include "llvm-DenseSet.h"
+
+namespace objc {
+
+// We cannot use a C++ static initializer to initialize certain globals because
+// libc calls us before our C++ initializers run. We also don't want a global
+// pointer to some globals because of the extra indirection.
+//
+// ExplicitInit / LazyInit wrap doing it the hard way.
+template <typename Type>
+class ExplicitInit {
+ alignas(Type) uint8_t _storage[sizeof(Type)];
+
+public:
+ template <typename... Ts>
+ void init(Ts &&... Args) {
+ new (_storage) Type(std::forward<Ts>(Args)...);
+ }
+
+ Type &get() {
+ return *reinterpret_cast<Type *>(_storage);
+ }
+};
+
+template <typename Type>
+class LazyInit {
+ alignas(Type) uint8_t _storage[sizeof(Type)];
+ bool _didInit;
+
+public:
+ template <typename... Ts>
+ Type *get(bool allowCreate, Ts &&... Args) {
+ if (!_didInit) {
+ if (!allowCreate) {
+ return nullptr;
+ }
+ new (_storage) Type(std::forward<Ts>(Args)...);
+ _didInit = true;
+ }
+ return reinterpret_cast<Type *>(_storage);
+ }
+};
+
+// Convenience class for Dense Maps & Sets
+template <typename Key, typename Value>
+class ExplicitInitDenseMap : public ExplicitInit<DenseMap<Key, Value>> { };
+
+template <typename Key, typename Value>
+class LazyInitDenseMap : public LazyInit<DenseMap<Key, Value>> { };
+
+template <typename Value>
+class ExplicitInitDenseSet : public ExplicitInit<DenseSet<Value>> { };
+
+template <typename Value>
+class LazyInitDenseSet : public LazyInit<DenseSet<Value>> { };
+
+} // namespace objc
+
+#endif /* DENSEMAPEXTRAS_H */
-// _objc_entryPoints and _objc_exitPoints are used by method dispatch
+// _objc_restartableRanges is used by method dispatch
// caching code to figure out whether any threads are actively
// in the cache for dispatching. The labels surround the asm code
// that do cache lookups. The tables are zero-terminated.
-.align 2
-.private_extern _objc_entryPoints
-_objc_entryPoints:
- .long _cache_getImp
- .long _objc_msgSend
- .long _objc_msgSend_stret
- .long _objc_msgSendSuper
- .long _objc_msgSendSuper_stret
- .long _objc_msgSendSuper2
- .long _objc_msgSendSuper2_stret
- .long _objc_msgLookup
- .long _objc_msgLookup_stret
- .long _objc_msgLookupSuper2
- .long _objc_msgLookupSuper2_stret
- .long 0
-
-.private_extern _objc_exitPoints
-_objc_exitPoints:
- .long LExit_cache_getImp
- .long LExit_objc_msgSend
- .long LExit_objc_msgSend_stret
- .long LExit_objc_msgSendSuper
- .long LExit_objc_msgSendSuper_stret
- .long LExit_objc_msgSendSuper2
- .long LExit_objc_msgSendSuper2_stret
- .long LExit_objc_msgLookup
- .long LExit_objc_msgLookup_stret
- .long LExit_objc_msgLookupSuper2
- .long LExit_objc_msgLookupSuper2_stret
- .long 0
+.macro RestartableEntry
+ .long LLookupStart$0
+ .long 0
+ .short LLookupEnd$0 - LLookupStart$0
+ .short 0xffff // poor ol' armv7 doesn't support kernel based recovery
+ .long 0
+.endmacro
+
+ .align 4
+ .private_extern _objc_restartableRanges
+_objc_restartableRanges:
+ RestartableEntry _cache_getImp
+ RestartableEntry _objc_msgSend
+ RestartableEntry _objc_msgSend_stret
+ RestartableEntry _objc_msgSendSuper
+ RestartableEntry _objc_msgSendSuper_stret
+ RestartableEntry _objc_msgSendSuper2
+ RestartableEntry _objc_msgSendSuper2_stret
+ RestartableEntry _objc_msgLookup
+ RestartableEntry _objc_msgLookup_stret
+ RestartableEntry _objc_msgLookupSuper2
+ RestartableEntry _objc_msgLookupSuper2_stret
+ .fill 16, 1, 0
/********************************************************************
/////////////////////////////////////////////////////////////////////
//
-// CacheLookup NORMAL|STRET
-// CacheLookup2 NORMAL|STRET
+// CacheLookup NORMAL|STRET <function>
+// CacheLookup2 NORMAL|STRET <function>
//
// Locate the implementation for a selector in a class's method cache.
//
/////////////////////////////////////////////////////////////////////
.macro CacheLookup
-
+ //
+ // Restart protocol:
+ //
+ // As soon as we're past the LLookupStart$1 label we may have loaded
+ // an invalid cache pointer or mask.
+ //
+ // When task_restartable_ranges_synchronize() is called,
+ // (or when a signal hits us) before we're past LLookupEnd$1,
+ // then our PC will be reset to LCacheMiss$1 which forcefully
+ // jumps to the cache-miss codepath.
+ //
+ // It is assumed that the CacheMiss codepath starts right at the end
+ // of CacheLookup2 and will re-setup the registers to meet the cache-miss
+ // requirements:
+ //
+ // GETIMP:
+ // The cache-miss is just returning NULL (setting r9 to 0)
+ //
+ // NORMAL and STRET:
+ // - r0 or r1 (STRET) contains the receiver
+ // - r1 or r2 (STRET) contains the selector
+ // - r9 contains the isa (reloaded from r0/r1)
+ // - other registers are set as per calling conventions
+ //
+LLookupStart$1:
+
ldrh r12, [r9, #CACHE_MASK] // r12 = mask
ldr r9, [r9, #CACHE] // r9 = buckets
.if $0 == STRET
#endif
8:
cmp r12, #1
- blo 8f // if (bucket->sel == 0) cache miss
+ blo LCacheMiss$1 // if (bucket->sel == 0) cache miss
it eq // if (bucket->sel == 1) cache wrap
ldreq r9, [r9, #CACHED_IMP] // bucket->imp is before first bucket
ldr r12, [r9, #8]! // r12 = (++bucket)->sel
b 6b
-8:
+
+LLookupEnd$1:
+LCacheMiss$1:
.endmacro
STATIC_ENTRY _cache_getImp
mov r9, r0
- CacheLookup NORMAL
+ CacheLookup NORMAL, _cache_getImp
// cache hit, IMP in r12
mov r0, r12
bx lr // return imp
- CacheLookup2 GETIMP
+ CacheLookup2 GETIMP, _cache_getImp
// cache miss, return nil
mov r0, #0
bx lr
ldr r9, [r0] // r9 = self->isa
GetClassFromIsa // r9 = class
- CacheLookup NORMAL
+ CacheLookup NORMAL, _objc_msgSend
// cache hit, IMP in r12, eq already set for nonstret forwarding
bx r12 // call imp
- CacheLookup2 NORMAL
+ CacheLookup2 NORMAL, _objc_msgSend
// cache miss
ldr r9, [r0] // r9 = self->isa
GetClassFromIsa // r9 = class
ldr r9, [r0] // r9 = self->isa
GetClassFromIsa // r9 = class
- CacheLookup NORMAL
+ CacheLookup NORMAL, _objc_msgLookup
// cache hit, IMP in r12, eq already set for nonstret forwarding
bx lr
- CacheLookup2 NORMAL
+ CacheLookup2 NORMAL, _objc_msgLookup
// cache miss
ldr r9, [r0] // r9 = self->isa
GetClassFromIsa // r9 = class
ldr r9, [r1] // r9 = self->isa
GetClassFromIsa // r9 = class
- CacheLookup STRET
+ CacheLookup STRET, _objc_msgSend_stret
// cache hit, IMP in r12, ne already set for stret forwarding
bx r12
- CacheLookup2 STRET
+ CacheLookup2 STRET, _objc_msgSend_stret
// cache miss
ldr r9, [r1] // r9 = self->isa
GetClassFromIsa // r9 = class
ldr r9, [r1] // r9 = self->isa
GetClassFromIsa // r9 = class
- CacheLookup STRET
+ CacheLookup STRET, _objc_msgLookup_stret
// cache hit, IMP in r12, ne already set for stret forwarding
bx lr
- CacheLookup2 STRET
+ CacheLookup2 STRET, _objc_msgLookup_stret
// cache miss
ldr r9, [r1] // r9 = self->isa
GetClassFromIsa // r9 = class
ENTRY _objc_msgSendSuper
ldr r9, [r0, #CLASS] // r9 = struct super->class
- CacheLookup NORMAL
+ CacheLookup NORMAL, _objc_msgSendSuper
// cache hit, IMP in r12, eq already set for nonstret forwarding
ldr r0, [r0, #RECEIVER] // load real receiver
bx r12 // call imp
- CacheLookup2 NORMAL
+ CacheLookup2 NORMAL, _objc_msgSendSuper
// cache miss
ldr r9, [r0, #CLASS] // r9 = struct super->class
ldr r0, [r0, #RECEIVER] // load real receiver
ldr r9, [r0, #CLASS] // class = struct super->class
ldr r9, [r9, #SUPERCLASS] // class = class->superclass
- CacheLookup NORMAL
+ CacheLookup NORMAL, _objc_msgSendSuper2
// cache hit, IMP in r12, eq already set for nonstret forwarding
ldr r0, [r0, #RECEIVER] // load real receiver
bx r12 // call imp
- CacheLookup2 NORMAL
+ CacheLookup2 NORMAL, _objc_msgSendSuper2
// cache miss
ldr r9, [r0, #CLASS] // class = struct super->class
ldr r9, [r9, #SUPERCLASS] // class = class->superclass
ldr r9, [r0, #CLASS] // class = struct super->class
ldr r9, [r9, #SUPERCLASS] // class = class->superclass
- CacheLookup NORMAL
+ CacheLookup NORMAL, _objc_msgLookupSuper2
// cache hit, IMP in r12, eq already set for nonstret forwarding
ldr r0, [r0, #RECEIVER] // load real receiver
bx lr
- CacheLookup2 NORMAL
+ CacheLookup2 NORMAL, _objc_msgLookupSuper2
// cache miss
ldr r9, [r0, #CLASS]
ldr r9, [r9, #SUPERCLASS] // r9 = class to search
ENTRY _objc_msgSendSuper_stret
ldr r9, [r1, #CLASS] // r9 = struct super->class
- CacheLookup STRET
+ CacheLookup STRET, _objc_msgSendSuper_stret
// cache hit, IMP in r12, ne already set for stret forwarding
ldr r1, [r1, #RECEIVER] // load real receiver
bx r12 // call imp
- CacheLookup2 STRET
+ CacheLookup2 STRET, _objc_msgSendSuper_stret
// cache miss
ldr r9, [r1, #CLASS] // r9 = struct super->class
ldr r1, [r1, #RECEIVER] // load real receiver
ldr r9, [r1, #CLASS] // class = struct super->class
ldr r9, [r9, #SUPERCLASS] // class = class->superclass
- CacheLookup STRET
+ CacheLookup STRET, _objc_msgSendSuper2_stret
// cache hit, IMP in r12, ne already set for stret forwarding
ldr r1, [r1, #RECEIVER] // load real receiver
bx r12 // call imp
- CacheLookup2 STRET
+ CacheLookup2 STRET, _objc_msgSendSuper2_stret
// cache miss
ldr r9, [r1, #CLASS] // class = struct super->class
ldr r9, [r9, #SUPERCLASS] // class = class->superclass
ldr r9, [r1, #CLASS] // class = struct super->class
ldr r9, [r9, #SUPERCLASS] // class = class->superclass
- CacheLookup STRET
+ CacheLookup STRET, _objc_msgLookupSuper2_stret
// cache hit, IMP in r12, ne already set for stret forwarding
ldr r1, [r1, #RECEIVER] // load real receiver
bx lr
- CacheLookup2 STRET
+ CacheLookup2 STRET, _objc_msgLookupSuper2_stret
// cache miss
ldr r9, [r1, #CLASS]
ldr r9, [r9, #SUPERCLASS] // r9 = class to search
sub sp, #8 // align stack
FP_SAVE
+ // lookUpImpOrForward(obj, sel, cls, LOOKUP_INITIALIZE | LOOKUP_RESOLVER)
.if $0 == NORMAL
// receiver already in r0
// selector already in r1
mov r1, r2 // selector
.endif
mov r2, r9 // class to search
-
- blx __class_lookupMethodAndLoadCache3
+ mov r3, #3 // LOOKUP_INITIALIZE | LOOKUP_INITIALIZE
+ blx _lookUpImpOrForward
mov r12, r0 // r12 = IMP
.if $0 == NORMAL
#include <arm/arch.h>
#include "isa.h"
#include "arm64-asm.h"
+#include "objc-config.h"
.data
-// _objc_entryPoints and _objc_exitPoints are used by method dispatch
+// _objc_restartableRanges is used by method dispatch
// caching code to figure out whether any threads are actively
// in the cache for dispatching. The labels surround the asm code
// that do cache lookups. The tables are zero-terminated.
-.align 4
-.private_extern _objc_entryPoints
-_objc_entryPoints:
- PTR _cache_getImp
- PTR _objc_msgSend
- PTR _objc_msgSendSuper
- PTR _objc_msgSendSuper2
- PTR _objc_msgLookup
- PTR _objc_msgLookupSuper2
- PTR 0
-
-.private_extern _objc_exitPoints
-_objc_exitPoints:
- PTR LExit_cache_getImp
- PTR LExit_objc_msgSend
- PTR LExit_objc_msgSendSuper
- PTR LExit_objc_msgSendSuper2
- PTR LExit_objc_msgLookup
- PTR LExit_objc_msgLookupSuper2
- PTR 0
+.macro RestartableEntry
+#if __LP64__
+ .quad LLookupStart$0
+#else
+ .long LLookupStart$0
+ .long 0
+#endif
+ .short LLookupEnd$0 - LLookupStart$0
+ .short LLookupRecover$0 - LLookupStart$0
+ .long 0
+.endmacro
+
+ .align 4
+ .private_extern _objc_restartableRanges
+_objc_restartableRanges:
+ RestartableEntry _cache_getImp
+ RestartableEntry _objc_msgSend
+ RestartableEntry _objc_msgSendSuper
+ RestartableEntry _objc_msgSendSuper2
+ RestartableEntry _objc_msgLookup
+ RestartableEntry _objc_msgLookupSuper2
+ .fill 16, 1, 0
/* objc_super parameter to sendSuper */
/********************************************************************
*
- * CacheLookup NORMAL|GETIMP|LOOKUP
- *
+ * CacheLookup NORMAL|GETIMP|LOOKUP <function>
+ *
* Locate the implementation for a selector in a class method cache.
*
+ * When this is used in a function that doesn't hold the runtime lock,
+ * this represents the critical section that may access dead memory.
+ * If the kernel causes one of these functions to go down the recovery
+ * path, we pretend the lookup failed by jumping the JumpMiss branch.
+ *
* Takes:
* x1 = selector
* x16 = class to be searched
#define GETIMP 1
#define LOOKUP 2
-// CacheHit: x17 = cached IMP, x12 = address of cached IMP, x1 = SEL
+// CacheHit: x17 = cached IMP, x12 = address of cached IMP, x1 = SEL, x16 = isa
.macro CacheHit
.if $0 == NORMAL
- TailCallCachedImp x17, x12, x1 // authenticate and call imp
+ TailCallCachedImp x17, x12, x1, x16 // authenticate and call imp
.elseif $0 == GETIMP
mov p0, p17
cbz p0, 9f // don't ptrauth a nil imp
- AuthAndResignAsIMP x0, x12, x1 // authenticate imp and re-sign as IMP
+ AuthAndResignAsIMP x0, x12, x1, x16 // authenticate imp and re-sign as IMP
9: ret // return IMP
.elseif $0 == LOOKUP
// No nil check for ptrauth: the caller would crash anyway when they
// jump to a nil IMP. We don't care if that jump also fails ptrauth.
- AuthAndResignAsIMP x17, x12, x1 // authenticate imp and re-sign as IMP
+ AuthAndResignAsIMP x17, x12, x1, x16 // authenticate imp and re-sign as IMP
ret // return imp via x17
.else
.abort oops
.endmacro
.macro CacheLookup
+ //
+ // Restart protocol:
+ //
+ // As soon as we're past the LLookupStart$1 label we may have loaded
+ // an invalid cache pointer or mask.
+ //
+ // When task_restartable_ranges_synchronize() is called,
+ // (or when a signal hits us) before we're past LLookupEnd$1,
+ // then our PC will be reset to LLookupRecover$1 which forcefully
+ // jumps to the cache-miss codepath which have the following
+ // requirements:
+ //
+ // GETIMP:
+ // The cache-miss is just returning NULL (setting x0 to 0)
+ //
+ // NORMAL and LOOKUP:
+ // - x0 contains the receiver
+ // - x1 contains the selector
+ // - x16 contains the isa
+ // - other registers are set as per calling conventions
+ //
+LLookupStart$1:
+
// p1 = SEL, p16 = isa
- ldp p10, p11, [x16, #CACHE] // p10 = buckets, p11 = occupied|mask
-#if !__LP64__
- and w11, w11, 0xffff // p11 = mask
+ ldr p11, [x16, #CACHE] // p11 = mask|buckets
+
+#if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16
+ and p10, p11, #0x0000ffffffffffff // p10 = buckets
+ and p12, p1, p11, LSR #48 // x12 = _cmd & mask
+#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4
+ and p10, p11, #~0xf // p10 = buckets
+ and p11, p11, #0xf // p11 = maskShift
+ mov p12, #0xffff
+ lsr p11, p12, p11 // p11 = mask = 0xffff >> p11
+ and p12, p1, p11 // x12 = _cmd & mask
+#else
+#error Unsupported cache mask storage for ARM64.
#endif
- and w12, w1, w11 // x12 = _cmd & mask
+
+
add p12, p10, p12, LSL #(1+PTRSHIFT)
// p12 = buckets + ((_cmd & mask) << (1+PTRSHIFT))
b 1b // loop
3: // wrap: p12 = first bucket, w11 = mask
- add p12, p12, w11, UXTW #(1+PTRSHIFT)
- // p12 = buckets + (mask << 1+PTRSHIFT)
+#if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16
+ add p12, p12, p11, LSR #(48 - (1+PTRSHIFT))
+ // p12 = buckets + (mask << 1+PTRSHIFT)
+#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4
+ add p12, p12, p11, LSL #(1+PTRSHIFT)
+ // p12 = buckets + (mask << 1+PTRSHIFT)
+#else
+#error Unsupported cache mask storage for ARM64.
+#endif
// Clone scanning loop to miss instead of hang when cache is corrupt.
// The slow path may detect any corruption and halt later.
ldp p17, p9, [x12, #-BUCKET_SIZE]! // {imp, sel} = *--bucket
b 1b // loop
+LLookupEnd$1:
+LLookupRecover$1:
3: // double wrap
JumpMiss $0
-
+
.endmacro
ldr p13, [x0] // p13 = isa
GetClassFromIsa_p16 p13 // p16 = class
LGetIsaDone:
- CacheLookup NORMAL // calls imp or objc_msgSend_uncached
+ // calls imp or objc_msgSend_uncached
+ CacheLookup NORMAL, _objc_msgSend
#if SUPPORT_TAGGED_POINTERS
LNilOrTagged:
ldr p13, [x0] // p13 = isa
GetClassFromIsa_p16 p13 // p16 = class
LLookup_GetIsaDone:
- CacheLookup LOOKUP // returns imp
+ // returns imp
+ CacheLookup LOOKUP, _objc_msgLookup
#if SUPPORT_TAGGED_POINTERS
LLookup_NilOrTagged:
b.eq LLookup_Nil // nil check
// tagged
- mov x10, #0xf000000000000000
- cmp x0, x10
- b.hs LLookup_ExtTag
adrp x10, _objc_debug_taggedpointer_classes@PAGE
add x10, x10, _objc_debug_taggedpointer_classes@PAGEOFF
ubfx x11, x0, #60, #4
ldr x16, [x10, x11, LSL #3]
- b LLookup_GetIsaDone
+ adrp x10, _OBJC_CLASS_$___NSUnrecognizedTaggedPointer@PAGE
+ add x10, x10, _OBJC_CLASS_$___NSUnrecognizedTaggedPointer@PAGEOFF
+ cmp x10, x16
+ b.ne LLookup_GetIsaDone
LLookup_ExtTag:
adrp x10, _objc_debug_taggedpointer_ext_classes@PAGE
UNWIND _objc_msgSendSuper, NoFrame
ldp p0, p16, [x0] // p0 = real receiver, p16 = class
- CacheLookup NORMAL // calls imp or objc_msgSend_uncached
+ // calls imp or objc_msgSend_uncached
+ CacheLookup NORMAL, _objc_msgSendSuper
END_ENTRY _objc_msgSendSuper
ldp p0, p16, [x0] // p0 = real receiver, p16 = class
ldr p16, [x16, #SUPERCLASS] // p16 = class->superclass
- CacheLookup NORMAL
+ CacheLookup NORMAL, _objc_msgSendSuper2
END_ENTRY _objc_msgSendSuper2
ldp p0, p16, [x0] // p0 = real receiver, p16 = class
ldr p16, [x16, #SUPERCLASS] // p16 = class->superclass
- CacheLookup LOOKUP
+ CacheLookup LOOKUP, _objc_msgLookupSuper2
END_ENTRY _objc_msgLookupSuper2
stp x6, x7, [sp, #(8*16+6*8)]
str x8, [sp, #(8*16+8*8)]
+ // lookUpImpOrForward(obj, sel, cls, LOOKUP_INITIALIZE | LOOKUP_RESOLVER)
// receiver and selector already in x0 and x1
mov x2, x16
- bl __class_lookupMethodAndLoadCache3
+ mov x3, #3
+ bl _lookUpImpOrForward
// IMP in x0
mov x17, x0
STATIC_ENTRY _cache_getImp
GetClassFromIsa_p16 p0
- CacheLookup GETIMP
+ CacheLookup GETIMP, _cache_getImp
LGetImpMiss:
mov p0, #0
.data
-// _objc_entryPoints and _objc_exitPoints are used by objc
+// _objc_restartableRanges is used by method dispatch
// to get the critical regions for which method caches
// cannot be garbage collected.
-.align 2
-.private_extern _objc_entryPoints
-_objc_entryPoints:
- .long __cache_getImp
- .long __cache_getMethod
- .long _objc_msgSend
- .long _objc_msgSend_fpret
- .long _objc_msgSend_stret
- .long _objc_msgSendSuper
- .long _objc_msgSendSuper_stret
+.macro RestartableEntry
+ .long $0
.long 0
-
-.private_extern _objc_exitPoints
-_objc_exitPoints:
- .long LGetImpExit
- .long LGetMethodExit
- .long LMsgSendExit
- .long LMsgSendFpretExit
- .long LMsgSendStretExit
- .long LMsgSendSuperExit
- .long LMsgSendSuperStretExit
+ .short $1 - $0
+ .short 0xffff // The old runtime doesn't support kernel based recovery
.long 0
+.endmacro
+
+ .align 4
+ .private_extern _objc_restartableRanges
+_objc_restartableRanges:
+ RestartableEntry __cache_getImp, LGetImpExit
+ RestartableEntry __cache_getMethod, LGetMethodExit
+ RestartableEntry _objc_msgSend, LMsgSendExit
+ RestartableEntry _objc_msgSend_fpret, LMsgSendFpretExit
+ RestartableEntry _objc_msgSend_stret, LMsgSendStretExit
+ RestartableEntry _objc_msgSendSuper, LMsgSendSuperExit
+ RestartableEntry _objc_msgSendSuper_stret, LMsgSendSuperStretExit
+ .fill 16, 1, 0
/********************************************************************
movdqa %xmm1, 2*16(%esp)
movdqa %xmm0, 1*16(%esp)
+ // lookUpImpOrForward(obj, sel, cls, LOOKUP_INITIALIZE | LOOKUP_RESOLVER)
+ movl $$3, 12(%esp) // LOOKUP_INITIALIZE | LOOKUP_RESOLVER
movl %eax, 8(%esp) // class
movl %ecx, 4(%esp) // selector
movl %edx, 0(%esp) // receiver
- call __class_lookupMethodAndLoadCache3
+ call _lookUpImpOrForward
movdqa 4*16(%esp), %xmm3
movdqa 3*16(%esp), %xmm2
.data
-// _objc_entryPoints and _objc_exitPoints are used by objc
+// _objc_restartableRanges is used by method dispatch
// to get the critical regions for which method caches
// cannot be garbage collected.
-.align 2
-.private_extern _objc_entryPoints
-_objc_entryPoints:
- .long _cache_getImp
- .long _objc_msgSend
- .long _objc_msgSend_fpret
- .long _objc_msgSend_stret
- .long _objc_msgSendSuper
- .long _objc_msgSendSuper2
- .long _objc_msgSendSuper_stret
- .long _objc_msgSendSuper2_stret
- .long _objc_msgLookup
- .long _objc_msgLookup_fpret
- .long _objc_msgLookup_stret
- .long _objc_msgLookupSuper2
- .long _objc_msgLookupSuper2_stret
+.macro RestartableEntry
+ .long $0
.long 0
-
-.private_extern _objc_exitPoints
-_objc_exitPoints:
- .long LExit_cache_getImp
- .long LExit_objc_msgSend
- .long LExit_objc_msgSend_fpret
- .long LExit_objc_msgSend_stret
- .long LExit_objc_msgSendSuper
- .long LExit_objc_msgSendSuper2
- .long LExit_objc_msgSendSuper_stret
- .long LExit_objc_msgSendSuper2_stret
- .long LExit_objc_msgLookup
- .long LExit_objc_msgLookup_fpret
- .long LExit_objc_msgLookup_stret
- .long LExit_objc_msgLookupSuper2
- .long LExit_objc_msgLookupSuper2_stret
+ .short LExit$0 - $0
+ .short 0xffff // The simulator doesn't support kernel based recovery
.long 0
+.endmacro
+
+ .align 4
+ .private_extern _objc_restartableRanges
+_objc_restartableRanges:
+ RestartableEntry _cache_getImp
+ RestartableEntry _objc_msgSend
+ RestartableEntry _objc_msgSend_fpret
+ RestartableEntry _objc_msgSend_stret
+ RestartableEntry _objc_msgSendSuper
+ RestartableEntry _objc_msgSendSuper2
+ RestartableEntry _objc_msgSendSuper_stret
+ RestartableEntry _objc_msgSendSuper2_stret
+ RestartableEntry _objc_msgLookup
+ RestartableEntry _objc_msgLookup_fpret
+ RestartableEntry _objc_msgLookup_stret
+ RestartableEntry _objc_msgLookupSuper2
+ RestartableEntry _objc_msgLookupSuper2_stret
+ .fill 16, 1, 0
/********************************************************************
.if $1 == GETIMP
movl cached_imp(%eax), %eax // return imp
- ret
+ cmpl $$0, %eax
+ jz 9f // don't xor a nil imp
+ xorl %edx, %eax // xor the isa with the imp
+9: ret
.else
+.if $1 == CALL
+ xorl cached_imp(%eax), %edx // xor imp and isa
.if $0 != STRET
- // eq already set for forwarding by `jne`
+ // ne already set for forwarding by `xor`
.else
- test %eax, %eax // set ne for stret forwarding
+ cmp %eax, %eax // set eq for stret forwarding
.endif
-
-.if $1 == CALL
- jmp *cached_imp(%eax) // call imp
+ jmp *%edx // call imp
.elseif $1 == LOOKUP
movl cached_imp(%eax), %eax // return imp
+ xorl %edx, %eax // xor isa into imp
ret
.else
movdqa %xmm1, 2*16(%esp)
movdqa %xmm0, 1*16(%esp)
+ // lookUpImpOrForward(obj, sel, cls, LOOKUP_INITIALIZE | LOOKUP_RESOLVER)
+ movl $$3, 12(%esp) // LOOKUP_INITIALIZE | LOOKUP_RESOLVER
movl %edx, 8(%esp) // class
movl %ecx, 4(%esp) // selector
movl %eax, 0(%esp) // receiver
- call __class_lookupMethodAndLoadCache3
+ call _lookUpImpOrForward
// imp in eax
movdqa 1*16(%esp), %xmm0
.if $0 == NORMAL
- cmp %eax, %eax // set eq for nonstret forwarding
-.else
test %eax, %eax // set ne for stret forwarding
+.else
+ cmp %eax, %eax // set eq for nonstret forwarding
.endif
leave
// THIS IS NOT A CALLABLE C FUNCTION
// Out-of-band condition register is NE for stret, EQ otherwise.
- jne __objc_msgForward_stret
+ je __objc_msgForward_stret
jmp __objc_msgForward
END_ENTRY _objc_msgForward_impcache
.data
-// _objc_entryPoints and _objc_exitPoints are used by objc
+// _objc_restartableRanges is used by method dispatch
// to get the critical regions for which method caches
// cannot be garbage collected.
-.align 4
-.private_extern _objc_entryPoints
-_objc_entryPoints:
- .quad _cache_getImp
- .quad _objc_msgSend
- .quad _objc_msgSend_fpret
- .quad _objc_msgSend_fp2ret
- .quad _objc_msgSend_stret
- .quad _objc_msgSendSuper
- .quad _objc_msgSendSuper_stret
- .quad _objc_msgSendSuper2
- .quad _objc_msgSendSuper2_stret
- .quad _objc_msgLookup
- .quad _objc_msgLookup_fpret
- .quad _objc_msgLookup_fp2ret
- .quad _objc_msgLookup_stret
- .quad _objc_msgLookupSuper2
- .quad _objc_msgLookupSuper2_stret
- .quad 0
-
-.private_extern _objc_exitPoints
-_objc_exitPoints:
- .quad LExit_cache_getImp
- .quad LExit_objc_msgSend
- .quad LExit_objc_msgSend_fpret
- .quad LExit_objc_msgSend_fp2ret
- .quad LExit_objc_msgSend_stret
- .quad LExit_objc_msgSendSuper
- .quad LExit_objc_msgSendSuper_stret
- .quad LExit_objc_msgSendSuper2
- .quad LExit_objc_msgSendSuper2_stret
- .quad LExit_objc_msgLookup
- .quad LExit_objc_msgLookup_fpret
- .quad LExit_objc_msgLookup_fp2ret
- .quad LExit_objc_msgLookup_stret
- .quad LExit_objc_msgLookupSuper2
- .quad LExit_objc_msgLookupSuper2_stret
- .quad 0
+.macro RestartableEntry
+ .quad $0
+ .short LExit$0 - $0
+ .short 0xffff // The simulator doesn't support kernel based recovery
+ .long 0
+.endmacro
+
+ .align 4
+ .private_extern _objc_restartableRanges
+_objc_restartableRanges:
+ RestartableEntry _cache_getImp
+ RestartableEntry _objc_msgSend
+ RestartableEntry _objc_msgSend_fpret
+ RestartableEntry _objc_msgSend_fp2ret
+ RestartableEntry _objc_msgSend_stret
+ RestartableEntry _objc_msgSendSuper
+ RestartableEntry _objc_msgSendSuper_stret
+ RestartableEntry _objc_msgSendSuper2
+ RestartableEntry _objc_msgSendSuper2_stret
+ RestartableEntry _objc_msgLookup
+ RestartableEntry _objc_msgLookup_fpret
+ RestartableEntry _objc_msgLookup_fp2ret
+ RestartableEntry _objc_msgLookup_stret
+ RestartableEntry _objc_msgLookupSuper2
+ RestartableEntry _objc_msgLookupSuper2_stret
+ .fill 16, 1, 0
/********************************************************************
.macro CacheHit
- // CacheHit must always be preceded by a not-taken `jne` instruction
- // in order to set the correct flags for _objc_msgForward_impcache.
-
// r11 = found bucket
.if $1 == GETIMP
movq cached_imp(%r11), %rax // return imp
- ret
+ cmpq $$0, %rax
+ jz 9f // don't xor a nil imp
+ xorq %r10, %rax // xor the isa with the imp
+9: ret
.else
+.if $1 == CALL
+ movq cached_imp(%r11), %r11 // load imp
+ xorq %r10, %r11 // xor imp and isa
.if $0 != STRET
- // eq already set for forwarding by `jne`
+ // ne already set for forwarding by `xor`
.else
- test %r11, %r11 // set ne for stret forwarding
+ cmp %r11, %r11 // set eq for stret forwarding
.endif
+ jmp *%r11 // call imp
-.if $1 == CALL
- jmp *cached_imp(%r11) // call imp
-
.elseif $1 == LOOKUP
- movq cached_imp(%r11), %r11 // return imp
+ movq cached_imp(%r11), %r11
+ xorq %r10, %r11 // return imp ^ isa
ret
.else
cmpq cached_sel(%r11), %a3 // if (bucket->sel != _cmd)
.endif
jne 1f // scan more
- // CacheHit must always be preceded by a not-taken `jne` instruction
CacheHit $0, $1 // call or return imp
1:
cmpq cached_sel(%r11), %a3 // if (bucket->sel != _cmd)
.endif
jne 1b // scan more
- // CacheHit must always be preceded by a not-taken `jne` instruction
CacheHit $0, $1 // call or return imp
3:
cmpq cached_sel(%r11), %a3 // if (bucket->sel != _cmd)
.endif
jne 1b // scan more
- // CacheHit must always be preceded by a not-taken `jne` instruction
CacheHit $0, $1 // call or return imp
3:
push %a6
movdqa %xmm7, -0x10(%rbp)
- // _class_lookupMethodAndLoadCache3(receiver, selector, class)
-
+ // lookUpImpOrForward(obj, sel, cls, LOOKUP_INITIALIZE | LOOKUP_RESOLVER)
.if $0 == NORMAL
// receiver already in a1
// selector already in a2
movq %a3, %a2
.endif
movq %r10, %a3
- call __class_lookupMethodAndLoadCache3
+ movl $$3, %a4d
+ call _lookUpImpOrForward
// IMP is now in %rax
movq %rax, %r11
movdqa -0x10(%rbp), %xmm7
.if $0 == NORMAL
- cmp %r11, %r11 // set eq for nonstret forwarding
-.else
test %r11, %r11 // set ne for stret forwarding
+.else
+ cmp %r11, %r11 // set eq for nonstret forwarding
.endif
leave
// THIS IS NOT A CALLABLE C FUNCTION
// Out-of-band condition register is NE for stret, EQ otherwise.
- jne __objc_msgForward_stret
+ je __objc_msgForward_stret
jmp __objc_msgForward
END_ENTRY __objc_msgForward_impcache
mov eax, isa[edx]\r
\r
// MethodTableLookup WORD_RETURN, MSG_SEND\r
+ push $3\r
push eax\r
push ecx\r
push edx\r
- call _class_lookupMethodAndLoadCache3\r
+ call lookUpImpOrFoward\r
\r
mov edx, kFwdMsgSend\r
leave\r
mov eax, isa[edx]\r
\r
// MethodTableLookup WORD_RETURN, MSG_SEND\r
+ push $3\r
push eax\r
push ecx\r
push edx\r
- call _class_lookupMethodAndLoadCache3\r
+ call lookUpImpOrFoward\r
\r
mov edx, kFwdMsgSend\r
leave\r
mov eax, super_class[eax]\r
\r
// MethodTableLookup WORD_RETURN, MSG_SENDSUPER\r
+ push $3\r
push eax\r
push ecx\r
push edx\r
- call _class_lookupMethodAndLoadCache3\r
+ call lookUpImpOrFoward\r
\r
mov edx, kFwdMsgSend\r
leave\r
mov eax, isa[edx]\r
\r
// MethodTableLookup WORD_RETURN, MSG_SEND\r
+ push $3\r
push eax\r
push ecx\r
push edx\r
- call _class_lookupMethodAndLoadCache3\r
+ call lookUpImpOrFoward\r
\r
mov edx, kFwdMsgSendStret\r
leave\r
mov eax, super_class[eax]\r
\r
// MethodTableLookup WORD_RETURN, MSG_SENDSUPER\r
+ push $3\r
push eax\r
push ecx\r
push edx\r
- call _class_lookupMethodAndLoadCache3\r
+ call lookUpImpOrFoward\r
\r
mov edx, kFwdMsgSendStret\r
leave\r
.data
-// _objc_entryPoints and _objc_exitPoints are used by objc
+// _objc_restartableRanges is used by method dispatch
// to get the critical regions for which method caches
// cannot be garbage collected.
-.align 4
-.private_extern _objc_entryPoints
-_objc_entryPoints:
- .quad _cache_getImp
- .quad _objc_msgSend
- .quad _objc_msgSend_fpret
- .quad _objc_msgSend_fp2ret
- .quad _objc_msgSend_stret
- .quad _objc_msgSendSuper
- .quad _objc_msgSendSuper_stret
- .quad _objc_msgSendSuper2
- .quad _objc_msgSendSuper2_stret
- .quad _objc_msgLookup
- .quad _objc_msgLookup_fpret
- .quad _objc_msgLookup_fp2ret
- .quad _objc_msgLookup_stret
- .quad _objc_msgLookupSuper2
- .quad _objc_msgLookupSuper2_stret
- .quad 0
-
-.private_extern _objc_exitPoints
-_objc_exitPoints:
- .quad LExit_cache_getImp
- .quad LExit_objc_msgSend
- .quad LExit_objc_msgSend_fpret
- .quad LExit_objc_msgSend_fp2ret
- .quad LExit_objc_msgSend_stret
- .quad LExit_objc_msgSendSuper
- .quad LExit_objc_msgSendSuper_stret
- .quad LExit_objc_msgSendSuper2
- .quad LExit_objc_msgSendSuper2_stret
- .quad LExit_objc_msgLookup
- .quad LExit_objc_msgLookup_fpret
- .quad LExit_objc_msgLookup_fp2ret
- .quad LExit_objc_msgLookup_stret
- .quad LExit_objc_msgLookupSuper2
- .quad LExit_objc_msgLookupSuper2_stret
- .quad 0
+.macro RestartableEntry
+ .quad LLookupStart$0
+ .short LLookupEnd$0 - LLookupStart$0
+ .short LCacheMiss$0 - LLookupStart$0
+ .long 0
+.endmacro
+
+ .align 4
+ .private_extern _objc_restartableRanges
+_objc_restartableRanges:
+ RestartableEntry _cache_getImp
+ RestartableEntry _objc_msgSend
+ RestartableEntry _objc_msgSend_fpret
+ RestartableEntry _objc_msgSend_fp2ret
+ RestartableEntry _objc_msgSend_stret
+ RestartableEntry _objc_msgSendSuper
+ RestartableEntry _objc_msgSendSuper_stret
+ RestartableEntry _objc_msgSendSuper2
+ RestartableEntry _objc_msgSendSuper2_stret
+ RestartableEntry _objc_msgLookup
+ RestartableEntry _objc_msgLookup_fpret
+ RestartableEntry _objc_msgLookup_fp2ret
+ RestartableEntry _objc_msgLookup_stret
+ RestartableEntry _objc_msgLookupSuper2
+ RestartableEntry _objc_msgLookupSuper2_stret
+ .fill 16, 1, 0
/********************************************************************
* DO NOT USE THESE LABELS ELSEWHERE
* Reserved labels: 6: 7: 8: 9:
********************************************************************/
-#define LCacheMiss 6
-#define LCacheMiss_f 6f
-#define LCacheMiss_b 6b
#define LNilTestSlow 7
#define LNilTestSlow_f 7f
#define LNilTestSlow_b 7b
/////////////////////////////////////////////////////////////////////
//
-// CacheLookup return-type, caller
+// CacheLookup return-type, caller, function
//
// Locate the implementation for a class in a selector's method cache.
//
+// When this is used in a function that doesn't hold the runtime lock,
+// this represents the critical section that may access dead memory.
+// If the kernel causes one of these functions to go down the recovery
+// path, we pretend the lookup failed by jumping the JumpMiss branch.
+//
// Takes:
// $0 = NORMAL, FPRET, FP2RET, STRET
// $1 = CALL, LOOKUP, GETIMP
.macro CacheHit
- // CacheHit must always be preceded by a not-taken `jne` instruction
- // in order to set the correct flags for _objc_msgForward_impcache.
-
// r11 = found bucket
.if $1 == GETIMP
movq cached_imp(%r11), %rax // return imp
- ret
+ cmpq $$0, %rax
+ jz 9f // don't xor a nil imp
+ xorq %r10, %rax // xor the isa with the imp
+9: ret
.else
+.if $1 == CALL
+ movq cached_imp(%r11), %r11 // load imp
+ xorq %r10, %r11 // xor imp and isa
.if $0 != STRET
- // eq already set for forwarding by `jne`
+ // ne already set for forwarding by `xor`
.else
- test %r11, %r11 // set ne for stret forwarding
+ cmp %r11, %r11 // set eq for stret forwarding
.endif
+ jmp *%r11 // call imp
-.if $1 == CALL
- jmp *cached_imp(%r11) // call imp
-
.elseif $1 == LOOKUP
- movq cached_imp(%r11), %r11 // return imp
+ movq cached_imp(%r11), %r11
+ xorq %r10, %r11 // return imp ^ isa
ret
.else
.macro CacheLookup
+ //
+ // Restart protocol:
+ //
+ // As soon as we're past the LLookupStart$1 label we may have loaded
+ // an invalid cache pointer or mask.
+ //
+ // When task_restartable_ranges_synchronize() is called,
+ // (or when a signal hits us) before we're past LLookupEnd$1,
+ // then our PC will be reset to LCacheMiss$1 which forcefully
+ // jumps to the cache-miss codepath which have the following
+ // requirements:
+ //
+ // GETIMP:
+ // The cache-miss is just returning NULL (setting %rax to 0)
+ //
+ // NORMAL and STRET:
+ // - a1 or a2 (STRET) contains the receiver
+ // - a2 or a3 (STRET) contains the selector
+ // - r10 contains the isa
+ // - other registers are set as per calling conventions
+ //
+LLookupStart$2:
+
.if $0 != STRET
movq %a2, %r11 // r11 = _cmd
.else
cmpq cached_sel(%r11), %a3 // if (bucket->sel != _cmd)
.endif
jne 1f // scan more
- // CacheHit must always be preceded by a not-taken `jne` instruction
CacheHit $0, $1 // call or return imp
1:
cmpq cached_sel(%r11), %a3 // if (bucket->sel != _cmd)
.endif
jne 1b // scan more
- // CacheHit must always be preceded by a not-taken `jne` instruction
CacheHit $0, $1 // call or return imp
3:
// wrap or miss
- jb LCacheMiss_f // if (bucket->sel < 1) cache miss
+ jb LCacheMiss$2 // if (bucket->sel < 1) cache miss
// wrap
movq cached_imp(%r11), %r11 // bucket->imp is really first bucket
jmp 2f
cmpq cached_sel(%r11), %a3 // if (bucket->sel != _cmd)
.endif
jne 1b // scan more
- // CacheHit must always be preceded by a not-taken `jne` instruction
CacheHit $0, $1 // call or return imp
3:
// double wrap or miss
- jmp LCacheMiss_f
+ jmp LCacheMiss$2
+LLookupEnd$2:
.endmacro
push %a6
movdqa %xmm7, -0x10(%rbp)
- // _class_lookupMethodAndLoadCache3(receiver, selector, class)
-
+ // lookUpImpOrForward(obj, sel, cls, LOOKUP_INITIALIZE | LOOKUP_RESOLVER)
.if $0 == NORMAL
// receiver already in a1
// selector already in a2
movq %a3, %a2
.endif
movq %r10, %a3
- call __class_lookupMethodAndLoadCache3
+ movl $$3, %a4d
+ call _lookUpImpOrForward
// IMP is now in %rax
movq %rax, %r11
movdqa -0x10(%rbp), %xmm7
.if $0 == NORMAL
- cmp %r11, %r11 // set eq for nonstret forwarding
+ test %r11, %r11 // set ne for nonstret forwarding
.else
- test %r11, %r11 // set ne for stret forwarding
+ cmp %r11, %r11 // set eq for stret forwarding
.endif
leave
// do lookup
movq %a1, %r10 // move class to r10 for CacheLookup
- CacheLookup NORMAL, GETIMP // returns IMP on success
+ // returns IMP on success
+ CacheLookup NORMAL, GETIMP, _cache_getImp
-LCacheMiss:
+LCacheMiss_cache_getImp:
// cache miss, return nil
xorl %eax, %eax
ret
NilTest NORMAL
GetIsaFast NORMAL // r10 = self->isa
- CacheLookup NORMAL, CALL // calls IMP on success
+ // calls IMP on success
+ CacheLookup NORMAL, CALL, _objc_msgSend
NilTestReturnZero NORMAL
GetIsaSupport NORMAL
// cache miss: go search the method lists
-LCacheMiss:
+LCacheMiss_objc_msgSend:
// isa still in r10
jmp __objc_msgSend_uncached
NilTest NORMAL
GetIsaFast NORMAL // r10 = self->isa
- CacheLookup NORMAL, LOOKUP // returns IMP on success
+ // returns IMP on success
+ CacheLookup NORMAL, LOOKUP, _objc_msgLookup
NilTestReturnIMP NORMAL
GetIsaSupport NORMAL
// cache miss: go search the method lists
-LCacheMiss:
+LCacheMiss_objc_msgLookup:
// isa still in r10
jmp __objc_msgLookup_uncached
// search the cache (objc_super in %a1)
movq class(%a1), %r10 // class = objc_super->class
movq receiver(%a1), %a1 // load real receiver
- CacheLookup NORMAL, CALL // calls IMP on success
+ // calls IMP on success
+ CacheLookup NORMAL, CALL, _objc_msgSendSuper
// cache miss: go search the method lists
-LCacheMiss:
+LCacheMiss_objc_msgSendSuper:
// class still in r10
jmp __objc_msgSend_uncached
movq class(%a1), %r10 // cls = objc_super->class
movq receiver(%a1), %a1 // load real receiver
movq 8(%r10), %r10 // cls = class->superclass
- CacheLookup NORMAL, CALL // calls IMP on success
+ // calls IMP on success
+ CacheLookup NORMAL, CALL, _objc_msgSendSuper2
// cache miss: go search the method lists
-LCacheMiss:
+LCacheMiss_objc_msgSendSuper2:
// superclass still in r10
jmp __objc_msgSend_uncached
movq class(%a1), %r10 // cls = objc_super->class
movq receiver(%a1), %a1 // load real receiver
movq 8(%r10), %r10 // cls = class->superclass
- CacheLookup NORMAL, LOOKUP // returns IMP on success
+ // returns IMP on success
+ CacheLookup NORMAL, LOOKUP, _objc_msgLookupSuper2
// cache miss: go search the method lists
-LCacheMiss:
+LCacheMiss_objc_msgLookupSuper2:
// superclass still in r10
jmp __objc_msgLookup_uncached
NilTest FPRET
GetIsaFast FPRET // r10 = self->isa
- CacheLookup FPRET, CALL // calls IMP on success
+ // calls IMP on success
+ CacheLookup FPRET, CALL, _objc_msgSend_fpret
NilTestReturnZero FPRET
GetIsaSupport FPRET
// cache miss: go search the method lists
-LCacheMiss:
+LCacheMiss_objc_msgSend_fpret:
// isa still in r10
jmp __objc_msgSend_uncached
NilTest FPRET
GetIsaFast FPRET // r10 = self->isa
- CacheLookup FPRET, LOOKUP // returns IMP on success
+ // returns IMP on success
+ CacheLookup FPRET, LOOKUP, _objc_msgLookup_fpret
NilTestReturnIMP FPRET
GetIsaSupport FPRET
// cache miss: go search the method lists
-LCacheMiss:
+LCacheMiss_objc_msgLookup_fpret:
// isa still in r10
jmp __objc_msgLookup_uncached
NilTest FP2RET
GetIsaFast FP2RET // r10 = self->isa
- CacheLookup FP2RET, CALL // calls IMP on success
+ // calls IMP on success
+ CacheLookup FP2RET, CALL, _objc_msgSend_fp2ret
NilTestReturnZero FP2RET
GetIsaSupport FP2RET
// cache miss: go search the method lists
-LCacheMiss:
+LCacheMiss_objc_msgSend_fp2ret:
// isa still in r10
jmp __objc_msgSend_uncached
NilTest FP2RET
GetIsaFast FP2RET // r10 = self->isa
- CacheLookup FP2RET, LOOKUP // returns IMP on success
+ // returns IMP on success
+ CacheLookup FP2RET, LOOKUP, _objc_msgLookup_fp2ret
NilTestReturnIMP FP2RET
GetIsaSupport FP2RET
// cache miss: go search the method lists
-LCacheMiss:
+LCacheMiss_objc_msgLookup_fp2ret:
// isa still in r10
jmp __objc_msgLookup_uncached
NilTest STRET
GetIsaFast STRET // r10 = self->isa
- CacheLookup STRET, CALL // calls IMP on success
+ // calls IMP on success
+ CacheLookup STRET, CALL, _objc_msgSend_stret
NilTestReturnZero STRET
GetIsaSupport STRET
// cache miss: go search the method lists
-LCacheMiss:
+LCacheMiss_objc_msgSend_stret:
// isa still in r10
jmp __objc_msgSend_stret_uncached
NilTest STRET
GetIsaFast STRET // r10 = self->isa
- CacheLookup STRET, LOOKUP // returns IMP on success
+ // returns IMP on success
+ CacheLookup STRET, LOOKUP, _objc_msgLookup_stret
NilTestReturnIMP STRET
GetIsaSupport STRET
// cache miss: go search the method lists
-LCacheMiss:
+LCacheMiss_objc_msgLookup_stret:
// isa still in r10
jmp __objc_msgLookup_stret_uncached
// search the cache (objc_super in %a2)
movq class(%a2), %r10 // class = objc_super->class
movq receiver(%a2), %a2 // load real receiver
- CacheLookup STRET, CALL // calls IMP on success
+ // calls IMP on success
+ CacheLookup STRET, CALL, _objc_msgSendSuper_stret
// cache miss: go search the method lists
-LCacheMiss:
+LCacheMiss_objc_msgSendSuper_stret:
// class still in r10
jmp __objc_msgSend_stret_uncached
movq class(%a2), %r10 // class = objc_super->class
movq receiver(%a2), %a2 // load real receiver
movq 8(%r10), %r10 // class = class->superclass
- CacheLookup STRET, CALL // calls IMP on success
+ // calls IMP on success
+ CacheLookup STRET, CALL, _objc_msgSendSuper2_stret
// cache miss: go search the method lists
-LCacheMiss:
+LCacheMiss_objc_msgSendSuper2_stret:
// superclass still in r10
jmp __objc_msgSend_stret_uncached
movq class(%a2), %r10 // class = objc_super->class
movq receiver(%a2), %a2 // load real receiver
movq 8(%r10), %r10 // class = class->superclass
- CacheLookup STRET, LOOKUP // returns IMP on success
+ // returns IMP on success
+ CacheLookup STRET, LOOKUP, _objc_msgLookupSuper2_stret
// cache miss: go search the method lists
-LCacheMiss:
+LCacheMiss_objc_msgLookupSuper2_stret:
// superclass still in r10
jmp __objc_msgLookup_stret_uncached
// THIS IS NOT A CALLABLE C FUNCTION
// Out-of-band condition register is NE for stret, EQ otherwise.
- jne __objc_msgForward_stret
+ je __objc_msgForward_stret
jmp __objc_msgForward
END_ENTRY __objc_msgForward_impcache
--- /dev/null
+/*
+ * Copyright (c) 2019 Apple Inc. All Rights Reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#ifndef _NSOBJECT_INTERNAL_H
+#define _NSOBJECT_INTERNAL_H
+
+/*
+ * WARNING DANGER HAZARD BEWARE EEK
+ *
+ * Everything in this file is for Apple Internal use only.
+ * These will change in arbitrary OS updates and in unpredictable ways.
+ * When your program breaks, you get to keep both pieces.
+ */
+
+/*
+ * NSObject-internal.h: Private SPI for use by other system frameworks.
+ */
+
+/***********************************************************************
+ Autorelease pool implementation
+
+ A thread's autorelease pool is a stack of pointers.
+ Each pointer is either an object to release, or POOL_BOUNDARY which is
+ an autorelease pool boundary.
+ A pool token is a pointer to the POOL_BOUNDARY for that pool. When
+ the pool is popped, every object hotter than the sentinel is released.
+ The stack is divided into a doubly-linked list of pages. Pages are added
+ and deleted as necessary.
+ Thread-local storage points to the hot page, where newly autoreleased
+ objects are stored.
+**********************************************************************/
+
+// structure version number. Only bump if ABI compatability is broken
+#define AUTORELEASEPOOL_VERSION 1
+
+// Set this to 1 to mprotect() autorelease pool contents
+#define PROTECT_AUTORELEASEPOOL 0
+
+// Set this to 1 to validate the entire autorelease pool header all the time
+// (i.e. use check() instead of fastcheck() everywhere)
+#define CHECK_AUTORELEASEPOOL (DEBUG)
+
+#ifdef __cplusplus
+#include <string.h>
+#include <assert.h>
+#include <objc/objc.h>
+#include <pthread.h>
+
+
+#ifndef C_ASSERT
+ #if __has_feature(cxx_static_assert)
+ #define C_ASSERT(expr) static_assert(expr, "(" #expr ")!")
+ #elif __has_feature(c_static_assert)
+ #define C_ASSERT(expr) _Static_assert(expr, "(" #expr ")!")
+ #else
+ #define C_ASSERT(expr)
+ #endif
+#endif
+
+// Make ASSERT work when objc-private.h hasn't been included.
+#ifndef ASSERT
+#define ASSERT(x) assert(x)
+#endif
+
+struct magic_t {
+ static const uint32_t M0 = 0xA1A1A1A1;
+# define M1 "AUTORELEASE!"
+ static const size_t M1_len = 12;
+ uint32_t m[4];
+
+ magic_t() {
+ ASSERT(M1_len == strlen(M1));
+ ASSERT(M1_len == 3 * sizeof(m[1]));
+
+ m[0] = M0;
+ strncpy((char *)&m[1], M1, M1_len);
+ }
+
+ ~magic_t() {
+ // Clear magic before deallocation.
+ // This prevents some false positives in memory debugging tools.
+ // fixme semantically this should be memset_s(), but the
+ // compiler doesn't optimize that at all (rdar://44856676).
+ volatile uint64_t *p = (volatile uint64_t *)m;
+ p[0] = 0; p[1] = 0;
+ }
+
+ bool check() const {
+ return (m[0] == M0 && 0 == strncmp((char *)&m[1], M1, M1_len));
+ }
+
+ bool fastcheck() const {
+#if CHECK_AUTORELEASEPOOL
+ return check();
+#else
+ return (m[0] == M0);
+#endif
+ }
+
+# undef M1
+};
+
+class AutoreleasePoolPage;
+struct AutoreleasePoolPageData
+{
+ magic_t const magic;
+ __unsafe_unretained id *next;
+ pthread_t const thread;
+ AutoreleasePoolPage * const parent;
+ AutoreleasePoolPage *child;
+ uint32_t const depth;
+ uint32_t hiwat;
+
+ AutoreleasePoolPageData(__unsafe_unretained id* _next, pthread_t _thread, AutoreleasePoolPage* _parent, uint32_t _depth, uint32_t _hiwat)
+ : magic(), next(_next), thread(_thread),
+ parent(_parent), child(nil),
+ depth(_depth), hiwat(_hiwat)
+ {
+ }
+};
+
+
+struct thread_data_t
+{
+#ifdef __LP64__
+ pthread_t const thread;
+ uint32_t const hiwat;
+ uint32_t const depth;
+#else
+ pthread_t const thread;
+ uint32_t const hiwat;
+ uint32_t const depth;
+ uint32_t padding;
+#endif
+};
+C_ASSERT(sizeof(thread_data_t) == 16);
+
+#undef C_ASSERT
+
+#endif
+#endif
#include "NSObject.h"
#include "objc-weak.h"
-#include "llvm-DenseMap.h"
-#include "NSObject.h"
+#include "DenseMapExtras.h"
#include <malloc/malloc.h>
#include <stdint.h>
#include <mach-o/nlist.h>
#include <sys/types.h>
#include <sys/mman.h>
-#include <libkern/OSAtomic.h>
#include <Block.h>
#include <map>
#include <execinfo.h>
+#include "NSObject-internal.h"
@interface NSInvocation
- (SEL)selector;
@end
+OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_magic_offset = __builtin_offsetof(AutoreleasePoolPageData, magic);
+OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_next_offset = __builtin_offsetof(AutoreleasePoolPageData, next);
+OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_thread_offset = __builtin_offsetof(AutoreleasePoolPageData, thread);
+OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_parent_offset = __builtin_offsetof(AutoreleasePoolPageData, parent);
+OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_child_offset = __builtin_offsetof(AutoreleasePoolPageData, child);
+OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_depth_offset = __builtin_offsetof(AutoreleasePoolPageData, depth);
+OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_hiwat_offset = __builtin_offsetof(AutoreleasePoolPageData, hiwat);
/***********************************************************************
* Weak ivar support
cls->nameForLogging());
}
-static id(*badAllocHandler)(Class) = &defaultBadAllocHandler;
+id(*badAllocHandler)(Class) = &defaultBadAllocHandler;
-static id callBadAllocHandler(Class cls)
+id _objc_callBadAllocHandler(Class cls)
{
// fixme add re-entrancy protection in case allocation fails inside handler
return (*badAllocHandler)(cls);
#define SIDE_TABLE_RC_SHIFT 2
#define SIDE_TABLE_FLAG_MASK (SIDE_TABLE_RC_ONE-1)
+struct RefcountMapValuePurgeable {
+ static inline bool isPurgeable(size_t x) {
+ return x == 0;
+ }
+};
+
// RefcountMap disguises its pointers because we
// don't want the table to act as a root for `leaks`.
-typedef objc::DenseMap<DisguisedPtr<objc_object>,size_t,true> RefcountMap;
+typedef objc::DenseMap<DisguisedPtr<objc_object>,size_t,RefcountMapValuePurgeable> RefcountMap;
// Template parameters.
enum HaveOld { DontHaveOld = false, DoHaveOld = true };
lock2->unlock();
}
-
-// We cannot use a C++ static initializer to initialize SideTables because
-// libc calls us before our C++ initializers run. We also don't want a global
-// pointer to this struct because of the extra indirection.
-// Do it the hard way.
-alignas(StripedMap<SideTable>) static uint8_t
- SideTableBuf[sizeof(StripedMap<SideTable>)];
-
-static void SideTableInit() {
- new (SideTableBuf) StripedMap<SideTable>();
-}
+static objc::ExplicitInit<StripedMap<SideTable>> SideTablesMap;
static StripedMap<SideTable>& SideTables() {
- return *reinterpret_cast<StripedMap<SideTable>*>(SideTableBuf);
+ return SideTablesMap.get();
}
// anonymous namespace
static id
storeWeak(id *location, objc_object *newObj)
{
- assert(haveOld || haveNew);
- if (!haveNew) assert(newObj == nil);
+ ASSERT(haveOld || haveNew);
+ if (!haveNew) ASSERT(newObj == nil);
Class previouslyInitializedClass = nil;
id oldObj;
if (! cls->hasCustomRR()) {
// Fast case. We know +initialize is complete because
// default-RR can never be set before then.
- assert(cls->isInitialized());
+ ASSERT(cls->isInitialized());
if (! obj->rootTryRetain()) {
result = nil;
}
// the lock if necessary in order to avoid deadlocks.
if (cls->isInitialized() || _thisThreadIsInitializingClass(cls)) {
BOOL (*tryRetain)(id, SEL) = (BOOL(*)(id, SEL))
- class_getMethodImplementation(cls, SEL_retainWeakReference);
+ class_getMethodImplementation(cls, @selector(retainWeakReference));
if ((IMP)tryRetain == _objc_msgForward) {
result = nil;
}
- else if (! (*tryRetain)(obj, SEL_retainWeakReference)) {
+ else if (! (*tryRetain)(obj, @selector(retainWeakReference))) {
result = nil;
}
}
objects are stored.
**********************************************************************/
-// Set this to 1 to mprotect() autorelease pool contents
-#define PROTECT_AUTORELEASEPOOL 0
-
-// Set this to 1 to validate the entire autorelease pool header all the time
-// (i.e. use check() instead of fastcheck() everywhere)
-#define CHECK_AUTORELEASEPOOL (DEBUG)
-
BREAKPOINT_FUNCTION(void objc_autoreleaseNoPool(id obj));
BREAKPOINT_FUNCTION(void objc_autoreleasePoolInvalid(const void *token));
-namespace {
-
-struct magic_t {
- static const uint32_t M0 = 0xA1A1A1A1;
-# define M1 "AUTORELEASE!"
- static const size_t M1_len = 12;
- uint32_t m[4];
-
- magic_t() {
- assert(M1_len == strlen(M1));
- assert(M1_len == 3 * sizeof(m[1]));
-
- m[0] = M0;
- strncpy((char *)&m[1], M1, M1_len);
- }
-
- ~magic_t() {
- // Clear magic before deallocation.
- // This prevents some false positives in memory debugging tools.
- // fixme semantically this should be memset_s(), but the
- // compiler doesn't optimize that at all (rdar://44856676).
- volatile uint64_t *p = (volatile uint64_t *)m;
- p[0] = 0; p[1] = 0;
- }
-
- bool check() const {
- return (m[0] == M0 && 0 == strncmp((char *)&m[1], M1, M1_len));
- }
+class AutoreleasePoolPage : private AutoreleasePoolPageData
+{
+ friend struct thread_data_t;
- bool fastcheck() const {
-#if CHECK_AUTORELEASEPOOL
- return check();
+public:
+ static size_t const SIZE =
+#if PROTECT_AUTORELEASEPOOL
+ PAGE_MAX_SIZE; // must be multiple of vm page size
#else
- return (m[0] == M0);
+ PAGE_MIN_SIZE; // size and alignment, power of 2
#endif
- }
-
-# undef M1
-};
+private:
+ static pthread_key_t const key = AUTORELEASE_POOL_KEY;
+ static uint8_t const SCRIBBLE = 0xA3; // 0xA3A3A3A3 after releasing
+ static size_t const COUNT = SIZE / sizeof(id);
-class AutoreleasePoolPage
-{
// EMPTY_POOL_PLACEHOLDER is stored in TLS when exactly one pool is
// pushed and it has never contained any objects. This saves memory
// when the top level (i.e. libdispatch) pushes and pops pools but
# define EMPTY_POOL_PLACEHOLDER ((id*)1)
# define POOL_BOUNDARY nil
- static pthread_key_t const key = AUTORELEASE_POOL_KEY;
- static uint8_t const SCRIBBLE = 0xA3; // 0xA3A3A3A3 after releasing
- static size_t const SIZE =
-#if PROTECT_AUTORELEASEPOOL
- PAGE_MAX_SIZE; // must be multiple of vm page size
-#else
- PAGE_MAX_SIZE; // size and alignment, power of 2
-#endif
- static size_t const COUNT = SIZE / sizeof(id);
-
- magic_t const magic;
- id *next;
- pthread_t const thread;
- AutoreleasePoolPage * const parent;
- AutoreleasePoolPage *child;
- uint32_t const depth;
- uint32_t hiwat;
// SIZE-sizeof(*this) bytes of contents follow
#endif
}
- AutoreleasePoolPage(AutoreleasePoolPage *newParent)
- : magic(), next(begin()), thread(pthread_self()),
- parent(newParent), child(nil),
- depth(parent ? 1+parent->depth : 0),
- hiwat(parent ? parent->hiwat : 0)
+ AutoreleasePoolPage(AutoreleasePoolPage *newParent) :
+ AutoreleasePoolPageData(begin(),
+ objc_thread_self(),
+ newParent,
+ newParent ? 1+newParent->depth : 0,
+ newParent ? newParent->hiwat : 0)
{
if (parent) {
parent->check();
- assert(!parent->child);
+ ASSERT(!parent->child);
parent->unprotect();
parent->child = this;
parent->protect();
{
check();
unprotect();
- assert(empty());
+ ASSERT(empty());
// Not recursive: we don't want to blow out the stack
// if a thread accumulates a stupendous amount of garbage
- assert(!child);
+ ASSERT(!child);
}
-
- void busted(bool die = true)
+ template<typename Fn>
+ void
+ busted(Fn log) const
{
magic_t right;
- (die ? _objc_fatal : _objc_inform)
- ("autorelease pool page %p corrupted\n"
+ log("autorelease pool page %p corrupted\n"
" magic 0x%08x 0x%08x 0x%08x 0x%08x\n"
" should be 0x%08x 0x%08x 0x%08x 0x%08x\n"
" pthread %p\n"
this,
magic.m[0], magic.m[1], magic.m[2], magic.m[3],
right.m[0], right.m[1], right.m[2], right.m[3],
- this->thread, pthread_self());
+ this->thread, objc_thread_self());
}
- void check(bool die = true)
+ __attribute__((noinline, cold, noreturn))
+ void
+ busted_die() const
{
- if (!magic.check() || !pthread_equal(thread, pthread_self())) {
- busted(die);
+ busted(_objc_fatal);
+ __builtin_unreachable();
+ }
+
+ inline void
+ check(bool die = true) const
+ {
+ if (!magic.check() || thread != objc_thread_self()) {
+ if (die) {
+ busted_die();
+ } else {
+ busted(_objc_inform);
+ }
}
}
- void fastcheck(bool die = true)
+ inline void
+ fastcheck() const
{
#if CHECK_AUTORELEASEPOOL
- check(die);
+ check();
#else
if (! magic.fastcheck()) {
- busted(die);
+ busted_die();
}
#endif
}
id *add(id obj)
{
- assert(!full());
+ ASSERT(!full());
unprotect();
id *ret = next; // faster than `return next-1` because of aliasing
*next++ = obj;
#if DEBUG
// we expect any children to be completely empty
for (AutoreleasePoolPage *page = child; page; page = page->child) {
- assert(page->empty());
+ ASSERT(page->empty());
}
#endif
}
setHotPage((AutoreleasePoolPage *)p);
if (AutoreleasePoolPage *page = coldPage()) {
- if (!page->empty()) pop(page->begin()); // pop all of the pools
- if (DebugMissingPools || DebugPoolAllocation) {
+ if (!page->empty()) objc_autoreleasePoolPop(page->begin()); // pop all of the pools
+ if (slowpath(DebugMissingPools || DebugPoolAllocation)) {
// pop() killed the pages already
} else {
page->kill(); // free all of the pages
AutoreleasePoolPage *result;
uintptr_t offset = p % SIZE;
- assert(offset >= sizeof(AutoreleasePoolPage));
+ ASSERT(offset >= sizeof(AutoreleasePoolPage));
result = (AutoreleasePoolPage *)(p - offset);
result->fastcheck();
static inline id* setEmptyPoolPlaceholder()
{
- assert(tls_get_direct(key) == nil);
+ ASSERT(tls_get_direct(key) == nil);
tls_set_direct(key, (void *)EMPTY_POOL_PLACEHOLDER);
return EMPTY_POOL_PLACEHOLDER;
}
// The hot page is full.
// Step to the next non-full page, adding a new page if necessary.
// Then add the object to that page.
- assert(page == hotPage());
- assert(page->full() || DebugPoolAllocation);
+ ASSERT(page == hotPage());
+ ASSERT(page->full() || DebugPoolAllocation);
do {
if (page->child) page = page->child;
{
// "No page" could mean no pool has been pushed
// or an empty placeholder pool has been pushed and has no contents yet
- assert(!hotPage());
+ ASSERT(!hotPage());
bool pushExtraBoundary = false;
if (haveEmptyPoolPlaceholder()) {
"autoreleased with no pool in place - "
"just leaking - break on "
"objc_autoreleaseNoPool() to debug",
- pthread_self(), (void*)obj, object_getClassName(obj));
+ objc_thread_self(), (void*)obj, object_getClassName(obj));
objc_autoreleaseNoPool(obj);
return nil;
}
public:
static inline id autorelease(id obj)
{
- assert(obj);
- assert(!obj->isTaggedPointer());
+ ASSERT(obj);
+ ASSERT(!obj->isTaggedPointer());
id *dest __unused = autoreleaseFast(obj);
- assert(!dest || dest == EMPTY_POOL_PLACEHOLDER || *dest == obj);
+ ASSERT(!dest || dest == EMPTY_POOL_PLACEHOLDER || *dest == obj);
return obj;
}
static inline void *push()
{
id *dest;
- if (DebugPoolAllocation) {
+ if (slowpath(DebugPoolAllocation)) {
// Each autorelease pool starts on a new pool page.
dest = autoreleaseNewPage(POOL_BOUNDARY);
} else {
dest = autoreleaseFast(POOL_BOUNDARY);
}
- assert(dest == EMPTY_POOL_PLACEHOLDER || *dest == POOL_BOUNDARY);
+ ASSERT(dest == EMPTY_POOL_PLACEHOLDER || *dest == POOL_BOUNDARY);
return dest;
}
+ __attribute__((noinline, cold))
static void badPop(void *token)
{
// Error. For bincompat purposes this is not
}
objc_autoreleasePoolInvalid(token);
}
-
- static inline void pop(void *token)
+
+ template<bool allowDebug>
+ static void
+ popPage(void *token, AutoreleasePoolPage *page, id *stop)
+ {
+ if (allowDebug && PrintPoolHiwat) printHiwat();
+
+ page->releaseUntil(stop);
+
+ // memory: delete empty children
+ if (allowDebug && DebugPoolAllocation && page->empty()) {
+ // special case: delete everything during page-per-pool debugging
+ AutoreleasePoolPage *parent = page->parent;
+ page->kill();
+ setHotPage(parent);
+ } else if (allowDebug && DebugMissingPools && page->empty() && !page->parent) {
+ // special case: delete everything for pop(top)
+ // when debugging missing autorelease pools
+ page->kill();
+ setHotPage(nil);
+ } else if (page->child) {
+ // hysteresis: keep one empty child if page is more than half full
+ if (page->lessThanHalfFull()) {
+ page->child->kill();
+ }
+ else if (page->child->child) {
+ page->child->child->kill();
+ }
+ }
+ }
+
+ __attribute__((noinline, cold))
+ static void
+ popPageDebug(void *token, AutoreleasePoolPage *page, id *stop)
+ {
+ popPage<true>(token, page, stop);
+ }
+
+ static inline void
+ pop(void *token)
{
AutoreleasePoolPage *page;
id *stop;
-
if (token == (void*)EMPTY_POOL_PLACEHOLDER) {
// Popping the top-level placeholder pool.
- if (hotPage()) {
- // Pool was used. Pop its contents normally.
- // Pool pages remain allocated for re-use as usual.
- pop(coldPage()->begin());
- } else {
+ page = hotPage();
+ if (!page) {
// Pool was never used. Clear the placeholder.
- setHotPage(nil);
+ return setHotPage(nil);
}
- return;
+ // Pool was used. Pop its contents normally.
+ // Pool pages remain allocated for re-use as usual.
+ page = coldPage();
+ token = page->begin();
+ } else {
+ page = pageForPointer(token);
}
- page = pageForPointer(token);
stop = (id *)token;
if (*stop != POOL_BOUNDARY) {
if (stop == page->begin() && !page->parent) {
}
}
- if (PrintPoolHiwat) printHiwat();
-
- page->releaseUntil(stop);
-
- // memory: delete empty children
- if (DebugPoolAllocation && page->empty()) {
- // special case: delete everything during page-per-pool debugging
- AutoreleasePoolPage *parent = page->parent;
- page->kill();
- setHotPage(parent);
- } else if (DebugMissingPools && page->empty() && !page->parent) {
- // special case: delete everything for pop(top)
- // when debugging missing autorelease pools
- page->kill();
- setHotPage(nil);
- }
- else if (page->child) {
- // hysteresis: keep one empty child if page is more than half full
- if (page->lessThanHalfFull()) {
- page->child->kill();
- }
- else if (page->child->child) {
- page->child->child->kill();
- }
+ if (slowpath(PrintPoolHiwat || DebugPoolAllocation || DebugMissingPools)) {
+ return popPageDebug(token, page, stop);
}
+
+ return popPage<false>(token, page, stop);
}
static void init()
{
int r __unused = pthread_key_init_np(AutoreleasePoolPage::key,
AutoreleasePoolPage::tls_dealloc);
- assert(r == 0);
+ ASSERT(r == 0);
}
- void print()
+ __attribute__((noinline, cold))
+ void print()
{
_objc_inform("[%p] ................ PAGE %s %s %s", this,
full() ? "(full)" : "",
}
}
+ __attribute__((noinline, cold))
static void printAll()
- {
+ {
_objc_inform("##############");
- _objc_inform("AUTORELEASE POOLS for thread %p", pthread_self());
+ _objc_inform("AUTORELEASE POOLS for thread %p", objc_thread_self());
AutoreleasePoolPage *page;
ptrdiff_t objects = 0;
_objc_inform("##############");
}
+ __attribute__((noinline, cold))
static void printHiwat()
{
// Check and propagate high water mark
p->hiwat = mark;
p->protect();
}
-
+
_objc_inform("POOL HIGHWATER: new high water mark of %u "
- "pending releases for thread %p:",
- mark, pthread_self());
-
+ "pending releases for thread %p:",
+ mark, objc_thread_self());
+
void *stack[128];
int count = backtrace(stack, sizeof(stack)/sizeof(stack[0]));
char **sym = backtrace_symbols(stack, count);
#undef POOL_BOUNDARY
};
-// anonymous namespace
-};
-
-
/***********************************************************************
* Slow paths for inline control
**********************************************************************/
}
-NEVER_INLINE bool
+NEVER_INLINE uintptr_t
objc_object::rootRelease_underflow(bool performDealloc)
{
return rootRelease(performDealloc, true);
NEVER_INLINE void
objc_object::clearDeallocating_slow()
{
- assert(isa.nonpointer && (isa.weakly_referenced || isa.has_sidetable_rc));
+ ASSERT(isa.nonpointer && (isa.weakly_referenced || isa.has_sidetable_rc));
SideTable& table = SideTables()[this];
table.lock();
id
objc_object::rootAutorelease2()
{
- assert(!isTaggedPointer());
+ ASSERT(!isTaggedPointer());
return AutoreleasePoolPage::autorelease((id)this);
}
);
-NEVER_INLINE
-bool
+NEVER_INLINE uintptr_t
objc_object::overrelease_error()
{
_objc_inform_now_and_on_crash("%s object %p overreleased while already deallocating; break on objc_overrelease_during_dealloc_error to debug", object_getClassName((id)this), this);
objc_overrelease_during_dealloc_error();
- return false; // allow rootRelease() to tail-call this
+ return 0; // allow rootRelease() to tail-call this
}
bool isDeallocating,
bool weaklyReferenced)
{
- assert(!isa.nonpointer); // should already be changed to raw pointer
+ ASSERT(!isa.nonpointer); // should already be changed to raw pointer
SideTable& table = SideTables()[this];
size_t& refcntStorage = table.refcnts[this];
size_t oldRefcnt = refcntStorage;
// not deallocating - that was in the isa
- assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
- assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
+ ASSERT((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
+ ASSERT((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
uintptr_t carry;
size_t refcnt = addc(oldRefcnt, extra_rc << SIDE_TABLE_RC_SHIFT, 0, &carry);
bool
objc_object::sidetable_addExtraRC_nolock(size_t delta_rc)
{
- assert(isa.nonpointer);
+ ASSERT(isa.nonpointer);
SideTable& table = SideTables()[this];
size_t& refcntStorage = table.refcnts[this];
size_t oldRefcnt = refcntStorage;
// isa-side bits should not be set here
- assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
- assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
+ ASSERT((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
+ ASSERT((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
if (oldRefcnt & SIDE_TABLE_RC_PINNED) return true;
size_t
objc_object::sidetable_subExtraRC_nolock(size_t delta_rc)
{
- assert(isa.nonpointer);
+ ASSERT(isa.nonpointer);
SideTable& table = SideTables()[this];
RefcountMap::iterator it = table.refcnts.find(this);
size_t oldRefcnt = it->second;
// isa-side bits should not be set here
- assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
- assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
+ ASSERT((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
+ ASSERT((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
size_t newRefcnt = oldRefcnt - (delta_rc << SIDE_TABLE_RC_SHIFT);
- assert(oldRefcnt > newRefcnt); // shouldn't underflow
+ ASSERT(oldRefcnt > newRefcnt); // shouldn't underflow
it->second = newRefcnt;
return delta_rc;
}
size_t
objc_object::sidetable_getExtraRC_nolock()
{
- assert(isa.nonpointer);
+ ASSERT(isa.nonpointer);
SideTable& table = SideTables()[this];
RefcountMap::iterator it = table.refcnts.find(this);
if (it == table.refcnts.end()) return 0;
objc_object::sidetable_retain()
{
#if SUPPORT_NONPOINTER_ISA
- assert(!isa.nonpointer);
+ ASSERT(!isa.nonpointer);
#endif
SideTable& table = SideTables()[this];
objc_object::sidetable_tryRetain()
{
#if SUPPORT_NONPOINTER_ISA
- assert(!isa.nonpointer);
+ ASSERT(!isa.nonpointer);
#endif
SideTable& table = SideTables()[this];
// }
bool result = true;
- RefcountMap::iterator it = table.refcnts.find(this);
- if (it == table.refcnts.end()) {
- table.refcnts[this] = SIDE_TABLE_RC_ONE;
- } else if (it->second & SIDE_TABLE_DEALLOCATING) {
+ auto it = table.refcnts.try_emplace(this, SIDE_TABLE_RC_ONE);
+ auto &refcnt = it.first->second;
+ if (it.second) {
+ // there was no entry
+ } else if (refcnt & SIDE_TABLE_DEALLOCATING) {
result = false;
- } else if (! (it->second & SIDE_TABLE_RC_PINNED)) {
- it->second += SIDE_TABLE_RC_ONE;
+ } else if (! (refcnt & SIDE_TABLE_RC_PINNED)) {
+ refcnt += SIDE_TABLE_RC_ONE;
}
return result;
objc_object::sidetable_setWeaklyReferenced_nolock()
{
#if SUPPORT_NONPOINTER_ISA
- assert(!isa.nonpointer);
+ ASSERT(!isa.nonpointer);
#endif
SideTable& table = SideTables()[this];
objc_object::sidetable_release(bool performDealloc)
{
#if SUPPORT_NONPOINTER_ISA
- assert(!isa.nonpointer);
+ ASSERT(!isa.nonpointer);
#endif
SideTable& table = SideTables()[this];
bool do_dealloc = false;
table.lock();
- RefcountMap::iterator it = table.refcnts.find(this);
- if (it == table.refcnts.end()) {
+ auto it = table.refcnts.try_emplace(this, SIDE_TABLE_DEALLOCATING);
+ auto &refcnt = it.first->second;
+ if (it.second) {
do_dealloc = true;
- table.refcnts[this] = SIDE_TABLE_DEALLOCATING;
- } else if (it->second < SIDE_TABLE_DEALLOCATING) {
+ } else if (refcnt < SIDE_TABLE_DEALLOCATING) {
// SIDE_TABLE_WEAKLY_REFERENCED may be set. Don't change it.
do_dealloc = true;
- it->second |= SIDE_TABLE_DEALLOCATING;
- } else if (! (it->second & SIDE_TABLE_RC_PINNED)) {
- it->second -= SIDE_TABLE_RC_ONE;
+ refcnt |= SIDE_TABLE_DEALLOCATING;
+ } else if (! (refcnt & SIDE_TABLE_RC_PINNED)) {
+ refcnt -= SIDE_TABLE_RC_ONE;
}
table.unlock();
if (do_dealloc && performDealloc) {
- ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_dealloc);
+ ((void(*)(objc_object *, SEL))objc_msgSend)(this, @selector(dealloc));
}
return do_dealloc;
}
#if __OBJC2__
-__attribute__((aligned(16)))
+__attribute__((aligned(16), flatten, noinline))
id
objc_retain(id obj)
{
}
-__attribute__((aligned(16)))
+__attribute__((aligned(16), flatten, noinline))
void
objc_release(id obj)
{
}
-__attribute__((aligned(16)))
+__attribute__((aligned(16), flatten, noinline))
id
objc_autorelease(id obj)
{
bool
_objc_rootTryRetain(id obj)
{
- assert(obj);
+ ASSERT(obj);
return obj->rootTryRetain();
}
bool
_objc_rootIsDeallocating(id obj)
{
- assert(obj);
+ ASSERT(obj);
return obj->rootIsDeallocating();
}
void
objc_clear_deallocating(id obj)
{
- assert(obj);
+ ASSERT(obj);
if (obj->isTaggedPointer()) return;
obj->clearDeallocating();
bool
_objc_rootReleaseWasZero(id obj)
{
- assert(obj);
+ ASSERT(obj);
return obj->rootReleaseShouldDealloc();
}
-id
+NEVER_INLINE id
_objc_rootAutorelease(id obj)
{
- assert(obj);
+ ASSERT(obj);
return obj->rootAutorelease();
}
uintptr_t
_objc_rootRetainCount(id obj)
{
- assert(obj);
+ ASSERT(obj);
return obj->rootRetainCount();
}
-id
+NEVER_INLINE id
_objc_rootRetain(id obj)
{
- assert(obj);
+ ASSERT(obj);
return obj->rootRetain();
}
-void
+NEVER_INLINE void
_objc_rootRelease(id obj)
{
- assert(obj);
+ ASSERT(obj);
obj->rootRelease();
}
-id
-_objc_rootAllocWithZone(Class cls, malloc_zone_t *zone)
-{
- id obj;
-
-#if __OBJC2__
- // allocWithZone under __OBJC2__ ignores the zone parameter
- (void)zone;
- obj = class_createInstance(cls, 0);
-#else
- if (!zone) {
- obj = class_createInstance(cls, 0);
- }
- else {
- obj = class_createInstanceFromZone(cls, 0, zone);
- }
-#endif
-
- if (slowpath(!obj)) obj = callBadAllocHandler(cls);
- return obj;
-}
-
-
// Call [cls alloc] or [cls allocWithZone:nil], with appropriate
// shortcutting optimizations.
static ALWAYS_INLINE id
callAlloc(Class cls, bool checkNil, bool allocWithZone=false)
{
- if (slowpath(checkNil && !cls)) return nil;
-
#if __OBJC2__
+ if (slowpath(checkNil && !cls)) return nil;
if (fastpath(!cls->ISA()->hasCustomAWZ())) {
- // No alloc/allocWithZone implementation. Go straight to the allocator.
- // fixme store hasCustomAWZ in the non-meta class and
- // add it to canAllocFast's summary
- if (fastpath(cls->canAllocFast())) {
- // No ctors, raw isa, etc. Go straight to the metal.
- bool dtor = cls->hasCxxDtor();
- id obj = (id)calloc(1, cls->bits.fastInstanceSize());
- if (slowpath(!obj)) return callBadAllocHandler(cls);
- obj->initInstanceIsa(cls, dtor);
- return obj;
- }
- else {
- // Has ctor or raw isa or something. Use the slower path.
- id obj = class_createInstance(cls, 0);
- if (slowpath(!obj)) return callBadAllocHandler(cls);
- return obj;
- }
+ return _objc_rootAllocWithZone(cls, nil);
}
#endif
// No shortcuts available.
- if (allocWithZone) return [cls allocWithZone:nil];
- return [cls alloc];
+ if (allocWithZone) {
+ return ((id(*)(id, SEL, struct _NSZone *))objc_msgSend)(cls, @selector(allocWithZone:), nil);
+ }
+ return ((id(*)(id, SEL))objc_msgSend)(cls, @selector(alloc));
}
return [callAlloc(cls, true/*checkNil*/, false/*allocWithZone*/) init];
}
+// Calls [cls new]
+id
+objc_opt_new(Class cls)
+{
+#if __OBJC2__
+ if (fastpath(cls && !cls->ISA()->hasCustomCore())) {
+ return [callAlloc(cls, false/*checkNil*/, true/*allocWithZone*/) init];
+ }
+#endif
+ return ((id(*)(id, SEL))objc_msgSend)(cls, @selector(new));
+}
+
+// Calls [obj self]
+id
+objc_opt_self(id obj)
+{
+#if __OBJC2__
+ if (fastpath(!obj || obj->isTaggedPointer() || !obj->ISA()->hasCustomCore())) {
+ return obj;
+ }
+#endif
+ return ((id(*)(id, SEL))objc_msgSend)(obj, @selector(self));
+}
+
+// Calls [obj class]
+Class
+objc_opt_class(id obj)
+{
+#if __OBJC2__
+ if (slowpath(!obj)) return nil;
+ Class cls = obj->getIsa();
+ if (fastpath(!cls->hasCustomCore())) {
+ return cls->isMetaClass() ? obj : cls;
+ }
+#endif
+ return ((Class(*)(id, SEL))objc_msgSend)(obj, @selector(class));
+}
+
+// Calls [obj isKindOfClass]
+BOOL
+objc_opt_isKindOfClass(id obj, Class otherClass)
+{
+#if __OBJC2__
+ if (slowpath(!obj)) return NO;
+ Class cls = obj->getIsa();
+ if (fastpath(!cls->hasCustomCore())) {
+ for (Class tcls = cls; tcls; tcls = tcls->superclass) {
+ if (tcls == otherClass) return YES;
+ }
+ return NO;
+ }
+#endif
+ return ((BOOL(*)(id, SEL, Class))objc_msgSend)(obj, @selector(isKindOfClass:), otherClass);
+}
+
+// Calls [obj respondsToSelector]
+BOOL
+objc_opt_respondsToSelector(id obj, SEL sel)
+{
+#if __OBJC2__
+ if (slowpath(!obj)) return NO;
+ Class cls = obj->getIsa();
+ if (fastpath(!cls->hasCustomCore())) {
+ return class_respondsToSelector_inst(obj, sel, cls);
+ }
+#endif
+ return ((BOOL(*)(id, SEL, SEL))objc_msgSend)(obj, @selector(respondsToSelector:), sel);
+}
void
_objc_rootDealloc(id obj)
{
- assert(obj);
+ ASSERT(obj);
obj->rootDealloc();
}
void
_objc_rootFinalize(id obj __unused)
{
- assert(obj);
+ ASSERT(obj);
_objc_fatal("_objc_rootFinalize called with garbage collection off");
}
return AutoreleasePoolPage::push();
}
+NEVER_INLINE
void
objc_autoreleasePoolPop(void *ctxt)
{
void arr_init(void)
{
AutoreleasePoolPage::init();
- SideTableInit();
+ SideTablesMap.init();
+ _objc_associations_init();
}
@interface __NSUnrecognizedTaggedPointer : NSObject
@end
+__attribute__((objc_nonlazy_class))
@implementation __NSUnrecognizedTaggedPointer
-+(void) load { }
-(id) retain { return self; }
-(oneway void) release { }
-(id) autorelease { return self; }
#endif
-
+__attribute__((objc_nonlazy_class))
@implementation NSObject
-+ (void)load {
-}
-
+ (void)initialize {
}
}
+ (BOOL)isMemberOfClass:(Class)cls {
- return object_getClass((id)self) == cls;
+ return self->ISA() == cls;
}
- (BOOL)isMemberOfClass:(Class)cls {
}
+ (BOOL)isKindOfClass:(Class)cls {
- for (Class tcls = object_getClass((id)self); tcls; tcls = tcls->superclass) {
+ for (Class tcls = self->ISA(); tcls; tcls = tcls->superclass) {
if (tcls == cls) return YES;
}
return NO;
}
+ (BOOL)instancesRespondToSelector:(SEL)sel {
- if (!sel) return NO;
- return class_respondsToSelector(self, sel);
+ return class_respondsToSelector_inst(nil, sel, self);
}
+ (BOOL)respondsToSelector:(SEL)sel {
- if (!sel) return NO;
- return class_respondsToSelector_inst(object_getClass(self), sel, self);
+ return class_respondsToSelector_inst(self, sel, self->ISA());
}
- (BOOL)respondsToSelector:(SEL)sel {
- if (!sel) return NO;
- return class_respondsToSelector_inst([self class], sel, self);
+ return class_respondsToSelector_inst(self, sel, [self class]);
}
+ (BOOL)conformsToProtocol:(Protocol *)protocol {
// Replaced by ObjectAlloc
- (id)retain {
- return ((id)self)->rootRetain();
+ return _objc_rootRetain(self);
}
// Replaced by ObjectAlloc
- (BOOL)_tryRetain {
- return ((id)self)->rootTryRetain();
+ return _objc_rootTryRetain(self);
}
+ (BOOL)_isDeallocating {
}
- (BOOL)_isDeallocating {
- return ((id)self)->rootIsDeallocating();
+ return _objc_rootIsDeallocating(self);
}
+ (BOOL)allowsWeakReference {
return YES;
}
-+ (BOOL)retainWeakReference {
++ (BOOL)retainWeakReference {
return YES;
}
// Replaced by ObjectAlloc
- (oneway void)release {
- ((id)self)->rootRelease();
+ _objc_rootRelease(self);
}
+ (id)autorelease {
// Replaced by ObjectAlloc
- (id)autorelease {
- return ((id)self)->rootAutorelease();
+ return _objc_rootAutorelease(self);
}
+ (NSUInteger)retainCount {
}
- (NSUInteger)retainCount {
- return ((id)self)->rootRetainCount();
+ return _objc_rootRetainCount(self);
}
+ (id)alloc {
__IOS_DEPRECATED(2.0, 2.0, "use protocol_getMethodDescription instead")
__TVOS_DEPRECATED(9.0, 9.0, "use protocol_getMethodDescription instead")
__WATCHOS_DEPRECATED(1.0, 1.0, "use protocol_getMethodDescription instead")
- __BRIDGEOS_DEPRECATED(2.0, 2.0, "use protocol_getMethodDescription instead");
+#ifndef __APPLE_BLEACH_SDK__
+ __BRIDGEOS_DEPRECATED(2.0, 2.0, "use protocol_getMethodDescription instead")
+#endif
+;
- (struct objc_method_description *) descriptionForClassMethod:(SEL)aSel
__OSX_DEPRECATED(10.0, 10.5, "use protocol_getMethodDescription instead")
__IOS_DEPRECATED(2.0, 2.0, "use protocol_getMethodDescription instead")
__TVOS_DEPRECATED(9.0, 9.0, "use protocol_getMethodDescription instead")
__WATCHOS_DEPRECATED(1.0, 1.0, "use protocol_getMethodDescription instead")
- __BRIDGEOS_DEPRECATED(2.0, 2.0, "use protocol_getMethodDescription instead");
+#ifndef __APPLE_BLEACH_SDK__
+ __BRIDGEOS_DEPRECATED(2.0, 2.0, "use protocol_getMethodDescription instead")
+#endif
+;
@end
// by CF, so __IncompleteProtocol would be left without an R/R implementation
// otherwise, which would break ARC.
-@interface __IncompleteProtocol : NSObject @end
-@implementation __IncompleteProtocol
+@interface __IncompleteProtocol : NSObject
+@end
+
#if __OBJC2__
-// fixme hack - make __IncompleteProtocol a non-lazy class
-+ (void) load { }
+__attribute__((objc_nonlazy_class))
#endif
+@implementation __IncompleteProtocol
@end
-@implementation Protocol
-
#if __OBJC2__
-// fixme hack - make Protocol a non-lazy class
-+ (void) load { }
+__attribute__((objc_nonlazy_class))
#endif
-
+@implementation Protocol
- (BOOL) conformsTo: (Protocol *)aProtocolObj
{
.endmacro
.macro TailCallCachedImp
- // $0 = cached imp, $1 = address of cached imp, $2 = SEL
+ // $0 = cached imp, $1 = address of cached imp, $2 = SEL, $3 = isa
eor $1, $1, $2 // mix SEL into ptrauth modifier
+ eor $1, $1, $3 // mix isa into ptrauth modifier
brab $0, $1
.endmacro
.endmacro
.macro AuthAndResignAsIMP
- // $0 = cached imp, $1 = address of cached imp, $2 = SEL
+ // $0 = cached imp, $1 = address of cached imp, $2 = SEL, $3 = isa
// note: assumes the imp is not nil
eor $1, $1, $2 // mix SEL into ptrauth modifier
+ eor $1, $1, $3 // mix isa into ptrauth modifier
autib $0, $1 // authenticate cached imp
ldr xzr, [$0] // crash if authentication failed
paciza $0 // resign cached imp as IMP
.endmacro
.macro TailCallCachedImp
- // $0 = cached imp, $1 = address of cached imp, $2 = SEL
+ // $0 = cached imp, $1 = address of cached imp, $2 = SEL, $3 = isa
+ eor $0, $0, $3
br $0
.endmacro
.macro AuthAndResignAsIMP
// $0 = cached imp, $1 = address of cached imp, $2 = SEL
- // empty
+ eor $0, $0, $3
.endmacro
// not JOP
#define _OBJC_LITTLE_HASHTABLE_H_
#ifndef _OBJC_PRIVATE_H_
-# define OBJC_HASH_AVAILABILITY \
- __OSX_DEPRECATED(10.0, 10.1, "NXHashTable is deprecated") \
- __IOS_UNAVAILABLE __TVOS_UNAVAILABLE \
- __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE
+# define OBJC_HASH_AVAILABILITY \
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.0, 10.1, "NXHashTable is deprecated")
#else
# define OBJC_HASH_AVAILABILITY
#endif
//
//===----------------------------------------------------------------------===//
-// Taken from llvmCore-3425.0.31.
+// Taken from clang-1100.247.11.10.9
#ifndef LLVM_ADT_DENSEMAP_H
#define LLVM_ADT_DENSEMAP_H
#include "llvm-AlignOf.h"
#include "llvm-DenseMapInfo.h"
#include <algorithm>
-#include <iterator>
-#include <new>
-#include <utility>
#include <cassert>
-#include <climits>
#include <cstddef>
#include <cstring>
+#include <iterator>
+#include <new>
+#include <type_traits>
+#include <utility>
#include <TargetConditionals.h>
#include "objc-private.h"
-// From llvm/Support/Compiler.h
-#define LLVM_USE_RVALUE_REFERENCES 1
-#define llvm_move(value) (::std::move(value))
-
#define MIN_BUCKETS 4
#define MIN_COMPACT 1024
+#define LLVM_UNLIKELY slowpath
+#define LLVM_LIKELY fastpath
namespace objc {
-template<typename KeyT, typename ValueT,
- typename KeyInfoT = DenseMapInfo<KeyT>,
- bool IsConst = false>
+namespace detail {
+
+// We extend a pair to allow users to override the bucket type with their own
+// implementation without requiring two members.
+template <typename KeyT, typename ValueT>
+struct DenseMapPair : public std::pair<KeyT, ValueT> {
+
+ // FIXME: Switch to inheriting constructors when we drop support for older
+ // clang versions.
+ // NOTE: This default constructor is declared with '{}' rather than
+ // '= default' to work around a separate bug in clang-3.8. This can
+ // also go when we switch to inheriting constructors.
+ DenseMapPair() {}
+
+ DenseMapPair(const KeyT &Key, const ValueT &Value)
+ : std::pair<KeyT, ValueT>(Key, Value) {}
+
+ DenseMapPair(KeyT &&Key, ValueT &&Value)
+ : std::pair<KeyT, ValueT>(std::move(Key), std::move(Value)) {}
+
+ template <typename AltKeyT, typename AltValueT>
+ DenseMapPair(AltKeyT &&AltKey, AltValueT &&AltValue,
+ typename std::enable_if<
+ std::is_convertible<AltKeyT, KeyT>::value &&
+ std::is_convertible<AltValueT, ValueT>::value>::type * = 0)
+ : std::pair<KeyT, ValueT>(std::forward<AltKeyT>(AltKey),
+ std::forward<AltValueT>(AltValue)) {}
+
+ template <typename AltPairT>
+ DenseMapPair(AltPairT &&AltPair,
+ typename std::enable_if<std::is_convertible<
+ AltPairT, std::pair<KeyT, ValueT>>::value>::type * = 0)
+ : std::pair<KeyT, ValueT>(std::forward<AltPairT>(AltPair)) {}
+
+ KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
+ const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
+ ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
+ const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
+};
+
+} // end namespace detail
+
+template <
+ typename KeyT, typename ValueT,
+ typename ValueInfoT = DenseMapValueInfo<ValueT>,
+ typename KeyInfoT = DenseMapInfo<KeyT>,
+ typename Bucket = detail::DenseMapPair<KeyT, ValueT>,
+ bool IsConst = false>
class DenseMapIterator;
-// ZeroValuesArePurgeable=true is used by the refcount table.
+// ValueInfoT is used by the refcount table.
// A key/value pair with value==0 is not required to be stored
// in the refcount table; it could correctly be erased instead.
// For performance, we do keep zero values in the table when the
// true refcount decreases to 1: this makes any future retain faster.
// For memory size, we allow rehashes and table insertions to
// remove a zero value as if it were a tombstone.
-
-template<typename DerivedT,
- typename KeyT, typename ValueT, typename KeyInfoT,
- bool ZeroValuesArePurgeable = false>
+template <typename DerivedT, typename KeyT, typename ValueT,
+ typename ValueInfoT, typename KeyInfoT, typename BucketT>
class DenseMapBase {
-protected:
- typedef std::pair<KeyT, ValueT> BucketT;
+ template <typename T>
+ using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
public:
- typedef KeyT key_type;
- typedef ValueT mapped_type;
- typedef BucketT value_type;
+ using size_type = unsigned;
+ using key_type = KeyT;
+ using mapped_type = ValueT;
+ using value_type = BucketT;
+
+ using iterator = DenseMapIterator<KeyT, ValueT, ValueInfoT, KeyInfoT, BucketT>;
+ using const_iterator =
+ DenseMapIterator<KeyT, ValueT, ValueInfoT, KeyInfoT, BucketT, true>;
- typedef DenseMapIterator<KeyT, ValueT, KeyInfoT> iterator;
- typedef DenseMapIterator<KeyT, ValueT,
- KeyInfoT, true> const_iterator;
inline iterator begin() {
- // When the map is empty, avoid the overhead of AdvancePastEmptyBuckets().
- return empty() ? end() : iterator(getBuckets(), getBucketsEnd());
+ // When the map is empty, avoid the overhead of advancing/retreating past
+ // empty buckets.
+ if (empty())
+ return end();
+ return makeIterator(getBuckets(), getBucketsEnd());
}
inline iterator end() {
- return iterator(getBucketsEnd(), getBucketsEnd(), true);
+ return makeIterator(getBucketsEnd(), getBucketsEnd(), true);
}
inline const_iterator begin() const {
- return empty() ? end() : const_iterator(getBuckets(), getBucketsEnd());
+ if (empty())
+ return end();
+ return makeConstIterator(getBuckets(), getBucketsEnd());
}
inline const_iterator end() const {
- return const_iterator(getBucketsEnd(), getBucketsEnd(), true);
+ return makeConstIterator(getBucketsEnd(), getBucketsEnd(), true);
}
- bool empty() const { return getNumEntries() == 0; }
+ bool empty() const {
+ return getNumEntries() == 0;
+ }
unsigned size() const { return getNumEntries(); }
- /// Grow the densemap so that it has at least Size buckets. Does not shrink
- void resize(size_t Size) {
- if (Size > getNumBuckets())
- grow(Size);
+ /// Grow the densemap so that it can contain at least \p NumEntries items
+ /// before resizing again.
+ void reserve(size_type NumEntries) {
+ auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
+ if (NumBuckets > getNumBuckets())
+ grow(NumBuckets);
}
void clear() {
if (getNumEntries() == 0 && getNumTombstones() == 0) return;
-
+
// If the capacity of the array is huge, and the # elements used is small,
// shrink the array.
- if (getNumEntries() * 4 < getNumBuckets() &&
- getNumBuckets() > MIN_BUCKETS) {
+ if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > MIN_BUCKETS) {
shrink_and_clear();
return;
}
const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
+ if (is_trivially_copyable<KeyT>::value &&
+ is_trivially_copyable<ValueT>::value) {
+ // Use a simpler loop when these are trivial types.
+ for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)
+ P->getFirst() = EmptyKey;
+ } else {
+ unsigned NumEntries = getNumEntries();
for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
- if (!KeyInfoT::isEqual(P->first, EmptyKey)) {
- if (!KeyInfoT::isEqual(P->first, TombstoneKey)) {
- P->second.~ValueT();
- decrementNumEntries();
+ if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
+ if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
+ P->getSecond().~ValueT();
+ --NumEntries;
+ }
+ P->getFirst() = EmptyKey;
}
- P->first = EmptyKey;
}
+ ASSERT(NumEntries == 0 && "Node count imbalance!");
}
- assert(getNumEntries() == 0 && "Node count imbalance!");
+ setNumEntries(0);
setNumTombstones(0);
}
- /// count - Return true if the specified key is in the map.
- bool count(const KeyT &Val) const {
+ /// Return 1 if the specified key is in the map, 0 otherwise.
+ size_type count(const_arg_type_t<KeyT> Val) const {
const BucketT *TheBucket;
- return LookupBucketFor(Val, TheBucket);
+ return LookupBucketFor(Val, TheBucket) ? 1 : 0;
}
- iterator find(const KeyT &Val) {
+ iterator find(const_arg_type_t<KeyT> Val) {
BucketT *TheBucket;
if (LookupBucketFor(Val, TheBucket))
- return iterator(TheBucket, getBucketsEnd(), true);
+ return makeIterator(TheBucket, getBucketsEnd(), true);
return end();
}
- const_iterator find(const KeyT &Val) const {
+ const_iterator find(const_arg_type_t<KeyT> Val) const {
const BucketT *TheBucket;
if (LookupBucketFor(Val, TheBucket))
- return const_iterator(TheBucket, getBucketsEnd(), true);
+ return makeConstIterator(TheBucket, getBucketsEnd(), true);
return end();
}
iterator find_as(const LookupKeyT &Val) {
BucketT *TheBucket;
if (LookupBucketFor(Val, TheBucket))
- return iterator(TheBucket, getBucketsEnd(), true);
+ return makeIterator(TheBucket, getBucketsEnd(), true);
return end();
}
template<class LookupKeyT>
const_iterator find_as(const LookupKeyT &Val) const {
const BucketT *TheBucket;
if (LookupBucketFor(Val, TheBucket))
- return const_iterator(TheBucket, getBucketsEnd(), true);
+ return makeConstIterator(TheBucket, getBucketsEnd(), true);
return end();
}
/// lookup - Return the entry for the specified key, or a default
/// constructed value if no such entry exists.
- ValueT lookup(const KeyT &Val) const {
+ ValueT lookup(const_arg_type_t<KeyT> Val) const {
const BucketT *TheBucket;
if (LookupBucketFor(Val, TheBucket))
- return TheBucket->second;
+ return TheBucket->getSecond();
return ValueT();
}
// If the key is already in the map, it returns false and doesn't update the
// value.
std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
+ return try_emplace(KV.first, KV.second);
+ }
+
+ // Inserts key,value pair into the map if the key isn't already in the map.
+ // If the key is already in the map, it returns false and doesn't update the
+ // value.
+ std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
+ return try_emplace(std::move(KV.first), std::move(KV.second));
+ }
+
+ // Inserts key,value pair into the map if the key isn't already in the map.
+ // The value is constructed in-place if the key is not in the map, otherwise
+ // it is not moved.
+ template <typename... Ts>
+ std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&... Args) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return std::make_pair(
+ makeIterator(TheBucket, getBucketsEnd(), true),
+ false); // Already in map.
+
+ // Otherwise, insert the new element.
+ TheBucket =
+ InsertIntoBucket(TheBucket, std::move(Key), std::forward<Ts>(Args)...);
+ return std::make_pair(
+ makeIterator(TheBucket, getBucketsEnd(), true),
+ true);
+ }
+
+ // Inserts key,value pair into the map if the key isn't already in the map.
+ // The value is constructed in-place if the key is not in the map, otherwise
+ // it is not moved.
+ template <typename... Ts>
+ std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&... Args) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return std::make_pair(
+ makeIterator(TheBucket, getBucketsEnd(), true),
+ false); // Already in map.
+
+ // Otherwise, insert the new element.
+ TheBucket = InsertIntoBucket(TheBucket, Key, std::forward<Ts>(Args)...);
+ return std::make_pair(
+ makeIterator(TheBucket, getBucketsEnd(), true),
+ true);
+ }
+
+ /// Alternate version of insert() which allows a different, and possibly
+ /// less expensive, key type.
+ /// The DenseMapInfo is responsible for supplying methods
+ /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
+ /// type used.
+ template <typename LookupKeyT>
+ std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV,
+ const LookupKeyT &Val) {
BucketT *TheBucket;
- if (LookupBucketFor(KV.first, TheBucket))
- return std::make_pair(iterator(TheBucket, getBucketsEnd(), true),
- false); // Already in map.
+ if (LookupBucketFor(Val, TheBucket))
+ return std::make_pair(
+ makeIterator(TheBucket, getBucketsEnd(), *this, true),
+ false); // Already in map.
// Otherwise, insert the new element.
- TheBucket = InsertIntoBucket(KV.first, KV.second, TheBucket);
- return std::make_pair(iterator(TheBucket, getBucketsEnd(), true), true);
+ TheBucket = InsertIntoBucketWithLookup(TheBucket, std::move(KV.first),
+ std::move(KV.second), Val);
+ return std::make_pair(
+ makeIterator(TheBucket, getBucketsEnd(), *this, true),
+ true);
}
/// insert - Range insertion of pairs.
void compact() {
if (getNumEntries() == 0) {
shrink_and_clear();
- }
- else if (getNumBuckets() / 16 > getNumEntries() &&
- getNumBuckets() > MIN_COMPACT)
+ }
+ else if (getNumBuckets() / 16 > getNumEntries() &&
+ getNumBuckets() > MIN_COMPACT)
{
grow(getNumEntries() * 2);
}
if (!LookupBucketFor(Val, TheBucket))
return false; // not in map.
- TheBucket->second.~ValueT();
- TheBucket->first = getTombstoneKey();
+ TheBucket->getSecond().~ValueT();
+ TheBucket->getFirst() = getTombstoneKey();
decrementNumEntries();
incrementNumTombstones();
compact();
}
void erase(iterator I) {
BucketT *TheBucket = &*I;
- TheBucket->second.~ValueT();
- TheBucket->first = getTombstoneKey();
+ TheBucket->getSecond().~ValueT();
+ TheBucket->getFirst() = getTombstoneKey();
decrementNumEntries();
incrementNumTombstones();
compact();
if (LookupBucketFor(Key, TheBucket))
return *TheBucket;
- return *InsertIntoBucket(Key, ValueT(), TheBucket);
+ return *InsertIntoBucket(TheBucket, Key);
}
ValueT &operator[](const KeyT &Key) {
return FindAndConstruct(Key).second;
}
-#if LLVM_USE_RVALUE_REFERENCES
value_type& FindAndConstruct(KeyT &&Key) {
BucketT *TheBucket;
if (LookupBucketFor(Key, TheBucket))
return *TheBucket;
- return *InsertIntoBucket(Key, ValueT(), TheBucket);
+ return *InsertIntoBucket(TheBucket, std::move(Key));
}
ValueT &operator[](KeyT &&Key) {
- return FindAndConstruct(Key).second;
+ return FindAndConstruct(std::move(Key)).second;
}
-#endif
/// isPointerIntoBucketsArray - Return true if the specified pointer points
/// somewhere into the DenseMap's array of buckets (i.e. either to a key or
const void *getPointerIntoBucketsArray() const { return getBuckets(); }
protected:
- DenseMapBase() {}
+ DenseMapBase() = default;
void destroyAll() {
if (getNumBuckets() == 0) // Nothing to do.
const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
- if (!KeyInfoT::isEqual(P->first, EmptyKey) &&
- !KeyInfoT::isEqual(P->first, TombstoneKey))
- P->second.~ValueT();
- P->first.~KeyT();
- }
-
-#ifndef NDEBUG
- memset((void*)getBuckets(), 0x5a, sizeof(BucketT)*getNumBuckets());
-#endif
+ if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
+ !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
+ P->getSecond().~ValueT();
+ P->getFirst().~KeyT();
}
+ }
void initEmpty() {
setNumEntries(0);
setNumTombstones(0);
- assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
+ ASSERT((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
"# initial buckets must be a power of two!");
const KeyT EmptyKey = getEmptyKey();
for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
- new (&B->first) KeyT(EmptyKey);
+ ::new (&B->getFirst()) KeyT(EmptyKey);
+ }
+
+ /// Returns the number of buckets to allocate to ensure that the DenseMap can
+ /// accommodate \p NumEntries without need to grow().
+ unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
+ // Ensure that "NumEntries * 4 < NumBuckets * 3"
+ if (NumEntries == 0)
+ return 0;
+ // +1 is required because of the strict equality.
+ // For example if NumEntries is 48, we need to return 401.
+ return NextPowerOf2(NumEntries * 4 / 3 + 1);
}
void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
const KeyT EmptyKey = getEmptyKey();
const KeyT TombstoneKey = getTombstoneKey();
for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
- if (!KeyInfoT::isEqual(B->first, EmptyKey) &&
- !KeyInfoT::isEqual(B->first, TombstoneKey) &&
- !(ZeroValuesArePurgeable && B->second == 0)) {
+ if (ValueInfoT::isPurgeable(B->getSecond())) {
+ // Free the value.
+ B->getSecond().~ValueT();
+ } else if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
+ !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
// Insert the key/value into the new table.
BucketT *DestBucket;
- bool FoundVal = LookupBucketFor(B->first, DestBucket);
+ bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
(void)FoundVal; // silence warning.
- assert(!FoundVal && "Key already in new map?");
- DestBucket->first = llvm_move(B->first);
- new (&DestBucket->second) ValueT(llvm_move(B->second));
+ ASSERT(!FoundVal && "Key already in new map?");
+ DestBucket->getFirst() = std::move(B->getFirst());
+ ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
incrementNumEntries();
-
+
// Free the value.
- B->second.~ValueT();
+ B->getSecond().~ValueT();
}
- B->first.~KeyT();
+ B->getFirst().~KeyT();
}
-
-#ifndef NDEBUG
- if (OldBucketsBegin != OldBucketsEnd)
- memset((void*)OldBucketsBegin, 0x5a,
- sizeof(BucketT) * (OldBucketsEnd - OldBucketsBegin));
-#endif
}
template <typename OtherBaseT>
- void copyFrom(const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT>& other) {
- assert(getNumBuckets() == other.getNumBuckets());
+ void copyFrom(
+ const DenseMapBase<OtherBaseT, KeyT, ValueT, ValueInfoT, KeyInfoT, BucketT> &other) {
+ ASSERT(&other != this);
+ ASSERT(getNumBuckets() == other.getNumBuckets());
setNumEntries(other.getNumEntries());
setNumTombstones(other.getNumTombstones());
- if (isPodLike<KeyT>::value && isPodLike<ValueT>::value)
- memcpy(getBuckets(), other.getBuckets(),
+ if (is_trivially_copyable<KeyT>::value &&
+ is_trivially_copyable<ValueT>::value)
+ memcpy(reinterpret_cast<void *>(getBuckets()), other.getBuckets(),
getNumBuckets() * sizeof(BucketT));
else
for (size_t i = 0; i < getNumBuckets(); ++i) {
- new (&getBuckets()[i].first) KeyT(other.getBuckets()[i].first);
- if (!KeyInfoT::isEqual(getBuckets()[i].first, getEmptyKey()) &&
- !KeyInfoT::isEqual(getBuckets()[i].first, getTombstoneKey()))
- new (&getBuckets()[i].second) ValueT(other.getBuckets()[i].second);
+ ::new (&getBuckets()[i].getFirst())
+ KeyT(other.getBuckets()[i].getFirst());
+ if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
+ !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
+ ::new (&getBuckets()[i].getSecond())
+ ValueT(other.getBuckets()[i].getSecond());
}
}
- void swap(DenseMapBase& RHS) {
- std::swap(getNumEntries(), RHS.getNumEntries());
- std::swap(getNumTombstones(), RHS.getNumTombstones());
- }
-
static unsigned getHashValue(const KeyT &Val) {
return KeyInfoT::getHashValue(Val);
}
+
template<typename LookupKeyT>
static unsigned getHashValue(const LookupKeyT &Val) {
return KeyInfoT::getHashValue(Val);
}
+
static const KeyT getEmptyKey() {
+ static_assert(std::is_base_of<DenseMapBase, DerivedT>::value,
+ "Must pass the derived type to this template!");
return KeyInfoT::getEmptyKey();
}
+
static const KeyT getTombstoneKey() {
return KeyInfoT::getTombstoneKey();
}
private:
+ iterator makeIterator(BucketT *P, BucketT *E,
+ bool NoAdvance=false) {
+ return iterator(P, E, NoAdvance);
+ }
+
+ const_iterator makeConstIterator(const BucketT *P, const BucketT *E,
+ const bool NoAdvance=false) const {
+ return const_iterator(P, E, NoAdvance);
+ }
+
unsigned getNumEntries() const {
return static_cast<const DerivedT *>(this)->getNumEntries();
}
+
void setNumEntries(unsigned Num) {
static_cast<DerivedT *>(this)->setNumEntries(Num);
}
+
void incrementNumEntries() {
setNumEntries(getNumEntries() + 1);
}
+
void decrementNumEntries() {
setNumEntries(getNumEntries() - 1);
}
+
unsigned getNumTombstones() const {
return static_cast<const DerivedT *>(this)->getNumTombstones();
}
+
void setNumTombstones(unsigned Num) {
static_cast<DerivedT *>(this)->setNumTombstones(Num);
}
+
void incrementNumTombstones() {
setNumTombstones(getNumTombstones() + 1);
}
+
void decrementNumTombstones() {
setNumTombstones(getNumTombstones() - 1);
}
+
const BucketT *getBuckets() const {
return static_cast<const DerivedT *>(this)->getBuckets();
}
+
BucketT *getBuckets() {
return static_cast<DerivedT *>(this)->getBuckets();
}
+
unsigned getNumBuckets() const {
return static_cast<const DerivedT *>(this)->getNumBuckets();
}
+
BucketT *getBucketsEnd() {
return getBuckets() + getNumBuckets();
}
+
const BucketT *getBucketsEnd() const {
return getBuckets() + getNumBuckets();
}
static_cast<DerivedT *>(this)->shrink_and_clear();
}
+ template <typename KeyArg, typename... ValueArgs>
+ BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
+ ValueArgs &&... Values) {
+ TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
- BucketT *InsertIntoBucket(const KeyT &Key, const ValueT &Value,
- BucketT *TheBucket) {
- TheBucket = InsertIntoBucketImpl(Key, TheBucket);
-
- TheBucket->first = Key;
- new (&TheBucket->second) ValueT(Value);
+ TheBucket->getFirst() = std::forward<KeyArg>(Key);
+ ::new (&TheBucket->getSecond()) ValueT(std::forward<ValueArgs>(Values)...);
return TheBucket;
}
-#if LLVM_USE_RVALUE_REFERENCES
- BucketT *InsertIntoBucket(const KeyT &Key, ValueT &&Value,
- BucketT *TheBucket) {
- TheBucket = InsertIntoBucketImpl(Key, TheBucket);
-
- TheBucket->first = Key;
- new (&TheBucket->second) ValueT(std::move(Value));
- return TheBucket;
- }
-
- BucketT *InsertIntoBucket(KeyT &&Key, ValueT &&Value, BucketT *TheBucket) {
- TheBucket = InsertIntoBucketImpl(Key, TheBucket);
+ template <typename LookupKeyT>
+ BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,
+ ValueT &&Value, LookupKeyT &Lookup) {
+ TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);
- TheBucket->first = std::move(Key);
- new (&TheBucket->second) ValueT(std::move(Value));
+ TheBucket->getFirst() = std::move(Key);
+ ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
return TheBucket;
}
-#endif
- BucketT *InsertIntoBucketImpl(const KeyT &Key, BucketT *TheBucket) {
- // If the load of the hash table is more than 3/4, grow the table.
- // If fewer than 1/8 of the buckets are empty (meaning that many are
- // filled with tombstones), rehash the table without growing.
+ template <typename LookupKeyT>
+ BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
+ BucketT *TheBucket) {
+ // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
+ // the buckets are empty (meaning that many are filled with tombstones),
+ // grow the table.
//
// The later case is tricky. For example, if we had one empty bucket with
// tons of tombstones, failing lookups (e.g. for insertion) would have to
// causing infinite loops in lookup.
unsigned NewNumEntries = getNumEntries() + 1;
unsigned NumBuckets = getNumBuckets();
- if (NewNumEntries*4 >= NumBuckets*3) {
+ if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
this->grow(NumBuckets * 2);
- LookupBucketFor(Key, TheBucket);
+ LookupBucketFor(Lookup, TheBucket);
NumBuckets = getNumBuckets();
- }
- if (NumBuckets-(NewNumEntries+getNumTombstones()) <= NumBuckets/8) {
+ } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <=
+ NumBuckets/8)) {
this->grow(NumBuckets);
- LookupBucketFor(Key, TheBucket);
+ LookupBucketFor(Lookup, TheBucket);
}
- assert(TheBucket);
+ ASSERT(TheBucket);
// Only update the state after we've grown our bucket space appropriately
// so that when growing buckets we have self-consistent entry count.
// If we are writing over a tombstone or zero value, remember this.
- if (KeyInfoT::isEqual(TheBucket->first, getEmptyKey())) {
+ if (KeyInfoT::isEqual(TheBucket->getFirst(), getEmptyKey())) {
// Replacing an empty bucket.
- incrementNumEntries();
- }
- else if (KeyInfoT::isEqual(TheBucket->first, getTombstoneKey())) {
+ incrementNumEntries();
+ } else if (KeyInfoT::isEqual(TheBucket->getFirst(), getTombstoneKey())) {
// Replacing a tombstone.
incrementNumEntries();
decrementNumTombstones();
- }
- else if (ZeroValuesArePurgeable && TheBucket->second == 0) {
- // Purging a zero. No accounting changes.
- TheBucket->second.~ValueT();
} else {
- // Updating an existing entry. No accounting changes.
+ // we should be purging a zero. No accounting changes.
+ ASSERT(ValueInfoT::isPurgeable(TheBucket->getSecond()));
+ TheBucket->getSecond().~ValueT();
}
return TheBucket;
}
+ __attribute__((noinline, noreturn, cold))
+ void FatalCorruptHashTables(const BucketT *BucketsPtr, unsigned NumBuckets) const
+ {
+ _objc_fatal("Hash table corrupted. This is probably a memory error "
+ "somewhere. (table at %p, buckets at %p (%zu bytes), "
+ "%u buckets, %u entries, %u tombstones, "
+ "data %p %p %p %p)",
+ this, BucketsPtr, malloc_size(BucketsPtr),
+ NumBuckets, getNumEntries(), getNumTombstones(),
+ ((void**)BucketsPtr)[0], ((void**)BucketsPtr)[1],
+ ((void**)BucketsPtr)[2], ((void**)BucketsPtr)[3]);
+ }
+
/// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
/// FoundBucket. If the bucket contains the key and a value, this returns
- /// true, otherwise it returns a bucket with an empty marker or tombstone
- /// or zero value and returns false.
+ /// true, otherwise it returns a bucket with an empty marker or tombstone and
+ /// returns false.
template<typename LookupKeyT>
bool LookupBucketFor(const LookupKeyT &Val,
const BucketT *&FoundBucket) const {
const unsigned NumBuckets = getNumBuckets();
if (NumBuckets == 0) {
- FoundBucket = 0;
+ FoundBucket = nullptr;
return false;
}
- // FoundTombstone - Keep track of whether we find a tombstone or zero value while probing.
- const BucketT *FoundTombstone = 0;
+ // FoundTombstone - Keep track of whether we find a tombstone while probing.
+ const BucketT *FoundTombstone = nullptr;
const KeyT EmptyKey = getEmptyKey();
const KeyT TombstoneKey = getTombstoneKey();
assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
unsigned ProbeAmt = 1;
- while (1) {
+ while (true) {
const BucketT *ThisBucket = BucketsPtr + BucketNo;
// Found Val's bucket? If so, return it.
- if (KeyInfoT::isEqual(Val, ThisBucket->first)) {
+ if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
FoundBucket = ThisBucket;
return true;
}
// If we found an empty bucket, the key doesn't exist in the set.
// Insert it and return the default value.
- if (KeyInfoT::isEqual(ThisBucket->first, EmptyKey)) {
+ if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
// If we've already seen a tombstone while probing, fill it in instead
// of the empty bucket we eventually probed to.
- if (FoundTombstone) ThisBucket = FoundTombstone;
FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
return false;
}
// If this is a tombstone, remember it. If Val ends up not in the map, we
// prefer to return it than something that would require more probing.
// Ditto for zero values.
- if (KeyInfoT::isEqual(ThisBucket->first, TombstoneKey) && !FoundTombstone)
+ if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
+ !FoundTombstone)
FoundTombstone = ThisBucket; // Remember the first tombstone found.
- if (ZeroValuesArePurgeable &&
- ThisBucket->second == 0 && !FoundTombstone)
+ if (ValueInfoT::isPurgeable(ThisBucket->getSecond()) && !FoundTombstone)
FoundTombstone = ThisBucket;
// Otherwise, it's a hash collision or a tombstone, continue quadratic
// probing.
if (ProbeAmt > NumBuckets) {
- // No empty buckets in table. Die.
- _objc_fatal("Hash table corrupted. This is probably a memory error "
- "somewhere. (table at %p, buckets at %p (%zu bytes), "
- "%u buckets, %u entries, %u tombstones, "
- "data %p %p %p %p)",
- this, BucketsPtr, malloc_size(BucketsPtr),
- NumBuckets, getNumEntries(), getNumTombstones(),
- ((void**)BucketsPtr)[0], ((void**)BucketsPtr)[1],
- ((void**)BucketsPtr)[2], ((void**)BucketsPtr)[3]);
+ FatalCorruptHashTables(BucketsPtr, NumBuckets);
}
BucketNo += ProbeAmt++;
- BucketNo&= (NumBuckets-1);
+ BucketNo &= (NumBuckets-1);
}
}
}
};
-template<typename KeyT, typename ValueT,
- bool ZeroValuesArePurgeable = false,
- typename KeyInfoT = DenseMapInfo<KeyT> >
-class DenseMap
- : public DenseMapBase<DenseMap<KeyT, ValueT, ZeroValuesArePurgeable, KeyInfoT>,
- KeyT, ValueT, KeyInfoT, ZeroValuesArePurgeable> {
+/// Equality comparison for DenseMap.
+///
+/// Iterates over elements of LHS confirming that each (key, value) pair in LHS
+/// is also in RHS, and that no additional pairs are in RHS.
+/// Equivalent to N calls to RHS.find and N value comparisons. Amortized
+/// complexity is linear, worst case is O(N^2) (if every hash collides).
+template <typename DerivedT, typename KeyT, typename ValueT,
+ typename ValueInfoT, typename KeyInfoT, typename BucketT>
+bool operator==(
+ const DenseMapBase<DerivedT, KeyT, ValueT, ValueInfoT, KeyInfoT, BucketT> &LHS,
+ const DenseMapBase<DerivedT, KeyT, ValueT, ValueInfoT, KeyInfoT, BucketT> &RHS) {
+ if (LHS.size() != RHS.size())
+ return false;
+
+ for (auto &KV : LHS) {
+ auto I = RHS.find(KV.first);
+ if (I == RHS.end() || I->second != KV.second)
+ return false;
+ }
+
+ return true;
+}
+
+/// Inequality comparison for DenseMap.
+///
+/// Equivalent to !(LHS == RHS). See operator== for performance notes.
+template <typename DerivedT, typename KeyT, typename ValueT,
+ typename ValueInfoT, typename KeyInfoT, typename BucketT>
+bool operator!=(
+ const DenseMapBase<DerivedT, KeyT, ValueT, ValueInfoT, KeyInfoT, BucketT> &LHS,
+ const DenseMapBase<DerivedT, KeyT, ValueT, ValueInfoT, KeyInfoT, BucketT> &RHS) {
+ return !(LHS == RHS);
+}
+
+template <typename KeyT, typename ValueT,
+ typename ValueInfoT = DenseMapValueInfo<ValueT>,
+ typename KeyInfoT = DenseMapInfo<KeyT>,
+ typename BucketT = detail::DenseMapPair<KeyT, ValueT>>
+class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, ValueInfoT, KeyInfoT, BucketT>,
+ KeyT, ValueT, ValueInfoT, KeyInfoT, BucketT> {
+ friend class DenseMapBase<DenseMap, KeyT, ValueT, ValueInfoT, KeyInfoT, BucketT>;
+
// Lift some types from the dependent base class into this class for
// simplicity of referring to them.
- typedef DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, ZeroValuesArePurgeable> BaseT;
- typedef typename BaseT::BucketT BucketT;
- friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, ZeroValuesArePurgeable>;
+ using BaseT = DenseMapBase<DenseMap, KeyT, ValueT, ValueInfoT, KeyInfoT, BucketT>;
BucketT *Buckets;
unsigned NumEntries;
unsigned NumBuckets;
public:
- explicit DenseMap(unsigned NumInitBuckets = 0) {
- init(NumInitBuckets);
- }
+ /// Create a DenseMap wth an optional \p InitialReserve that guarantee that
+ /// this number of elements can be inserted in the map without grow()
+ explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); }
- DenseMap(const DenseMap &other) {
+ DenseMap(const DenseMap &other) : BaseT() {
init(0);
copyFrom(other);
}
-#if LLVM_USE_RVALUE_REFERENCES
- DenseMap(DenseMap &&other) {
+ DenseMap(DenseMap &&other) : BaseT() {
init(0);
swap(other);
}
-#endif
template<typename InputIt>
DenseMap(const InputIt &I, const InputIt &E) {
- init(NextPowerOf2(std::distance(I, E)));
+ init(std::distance(I, E));
this->insert(I, E);
}
+ DenseMap(std::initializer_list<typename BaseT::value_type> Vals) {
+ init(Vals.size());
+ this->insert(Vals.begin(), Vals.end());
+ }
+
~DenseMap() {
this->destroyAll();
operator delete(Buckets);
}
DenseMap& operator=(const DenseMap& other) {
- copyFrom(other);
+ if (&other != this)
+ copyFrom(other);
return *this;
}
-#if LLVM_USE_RVALUE_REFERENCES
DenseMap& operator=(DenseMap &&other) {
this->destroyAll();
operator delete(Buckets);
swap(other);
return *this;
}
-#endif
void copyFrom(const DenseMap& other) {
this->destroyAll();
}
}
- void init(unsigned InitBuckets) {
+ void init(unsigned InitNumEntries) {
+ auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
if (allocateBuckets(InitBuckets)) {
this->BaseT::initEmpty();
} else {
unsigned OldNumBuckets = NumBuckets;
BucketT *OldBuckets = Buckets;
- allocateBuckets(std::max<unsigned>(MIN_BUCKETS, NextPowerOf2(AtLeast)));
- assert(Buckets);
+ allocateBuckets(std::max<unsigned>(MIN_BUCKETS, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
+ ASSERT(Buckets);
if (!OldBuckets) {
this->BaseT::initEmpty();
return;
unsigned getNumEntries() const {
return NumEntries;
}
+
void setNumEntries(unsigned Num) {
NumEntries = Num;
}
unsigned getNumTombstones() const {
return NumTombstones;
}
+
void setNumTombstones(unsigned Num) {
NumTombstones = Num;
}
bool allocateBuckets(unsigned Num) {
NumBuckets = Num;
if (NumBuckets == 0) {
- Buckets = 0;
+ Buckets = nullptr;
return false;
}
- Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT)*NumBuckets));
+ Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT) * NumBuckets));
return true;
}
};
-template<typename KeyT, typename ValueT,
- unsigned InlineBuckets = 4,
- bool ZeroValuesArePurgeable = false,
- typename KeyInfoT = DenseMapInfo<KeyT> >
+template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
+ typename ValueInfoT = DenseMapValueInfo<ValueT>,
+ typename KeyInfoT = DenseMapInfo<KeyT>,
+ typename BucketT = detail::DenseMapPair<KeyT, ValueT>>
class SmallDenseMap
- : public DenseMapBase<SmallDenseMap<KeyT, ValueT, InlineBuckets, ZeroValuesArePurgeable, KeyInfoT>,
- KeyT, ValueT, KeyInfoT, ZeroValuesArePurgeable> {
+ : public DenseMapBase<
+ SmallDenseMap<KeyT, ValueT, InlineBuckets, ValueInfoT, KeyInfoT, BucketT>, KeyT,
+ ValueT, ValueInfoT, KeyInfoT, BucketT> {
+ friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, ValueInfoT, KeyInfoT, BucketT>;
+
// Lift some types from the dependent base class into this class for
// simplicity of referring to them.
- typedef DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, ZeroValuesArePurgeable> BaseT;
- typedef typename BaseT::BucketT BucketT;
- friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, ZeroValuesArePurgeable>;
+ using BaseT = DenseMapBase<SmallDenseMap, KeyT, ValueT, ValueInfoT, KeyInfoT, BucketT>;
+
+ static_assert(powerof2(InlineBuckets),
+ "InlineBuckets must be a power of 2.");
unsigned Small : 1;
unsigned NumEntries : 31;
init(NumInitBuckets);
}
- SmallDenseMap(const SmallDenseMap &other) {
+ SmallDenseMap(const SmallDenseMap &other) : BaseT() {
init(0);
copyFrom(other);
}
-#if LLVM_USE_RVALUE_REFERENCES
- SmallDenseMap(SmallDenseMap &&other) {
+ SmallDenseMap(SmallDenseMap &&other) : BaseT() {
init(0);
swap(other);
}
-#endif
template<typename InputIt>
SmallDenseMap(const InputIt &I, const InputIt &E) {
for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
BucketT *LHSB = &getInlineBuckets()[i],
*RHSB = &RHS.getInlineBuckets()[i];
- bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->first, EmptyKey) &&
- !KeyInfoT::isEqual(LHSB->first, TombstoneKey));
- bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->first, EmptyKey) &&
- !KeyInfoT::isEqual(RHSB->first, TombstoneKey));
+ bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
+ !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
+ bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
+ !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
if (hasLHSValue && hasRHSValue) {
// Swap together if we can...
std::swap(*LHSB, *RHSB);
continue;
}
// Swap separately and handle any assymetry.
- std::swap(LHSB->first, RHSB->first);
+ std::swap(LHSB->getFirst(), RHSB->getFirst());
if (hasLHSValue) {
- new (&RHSB->second) ValueT(llvm_move(LHSB->second));
- LHSB->second.~ValueT();
+ ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
+ LHSB->getSecond().~ValueT();
} else if (hasRHSValue) {
- new (&LHSB->second) ValueT(llvm_move(RHSB->second));
- RHSB->second.~ValueT();
+ ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
+ RHSB->getSecond().~ValueT();
}
}
return;
SmallDenseMap &LargeSide = Small ? RHS : *this;
// First stash the large side's rep and move the small side across.
- LargeRep TmpRep = llvm_move(*LargeSide.getLargeRep());
+ LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
LargeSide.getLargeRep()->~LargeRep();
LargeSide.Small = true;
// This is similar to the standard move-from-old-buckets, but the bucket
for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
BucketT *NewB = &LargeSide.getInlineBuckets()[i],
*OldB = &SmallSide.getInlineBuckets()[i];
- new (&NewB->first) KeyT(llvm_move(OldB->first));
- OldB->first.~KeyT();
- if (!KeyInfoT::isEqual(NewB->first, EmptyKey) &&
- !KeyInfoT::isEqual(NewB->first, TombstoneKey)) {
- new (&NewB->second) ValueT(llvm_move(OldB->second));
- OldB->second.~ValueT();
+ ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
+ OldB->getFirst().~KeyT();
+ if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
+ !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
+ ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
+ OldB->getSecond().~ValueT();
}
}
// The hard part of moving the small buckets across is done, just move
// the TmpRep into its new home.
SmallSide.Small = false;
- new (SmallSide.getLargeRep()) LargeRep(llvm_move(TmpRep));
+ new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
}
SmallDenseMap& operator=(const SmallDenseMap& other) {
- copyFrom(other);
+ if (&other != this)
+ copyFrom(other);
return *this;
}
-#if LLVM_USE_RVALUE_REFERENCES
SmallDenseMap& operator=(SmallDenseMap &&other) {
this->destroyAll();
deallocateBuckets();
swap(other);
return *this;
}
-#endif
void copyFrom(const SmallDenseMap& other) {
this->destroyAll();
Small = true;
if (other.getNumBuckets() > InlineBuckets) {
Small = false;
- allocateBuckets(other.getNumBuckets());
+ new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets()));
}
this->BaseT::copyFrom(other);
}
}
void grow(unsigned AtLeast) {
- if (AtLeast > InlineBuckets)
+ if (AtLeast >= InlineBuckets)
AtLeast = std::max<unsigned>(MIN_BUCKETS, NextPowerOf2(AtLeast));
if (Small) {
- if (AtLeast <= InlineBuckets)
+ if (AtLeast < InlineBuckets)
return; // Nothing to do.
// First move the inline buckets into a temporary storage.
const KeyT EmptyKey = this->getEmptyKey();
const KeyT TombstoneKey = this->getTombstoneKey();
for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
- if (!KeyInfoT::isEqual(P->first, EmptyKey) &&
- !KeyInfoT::isEqual(P->first, TombstoneKey)) {
+ if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
+ !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
"Too many inline buckets!");
- new (&TmpEnd->first) KeyT(llvm_move(P->first));
- new (&TmpEnd->second) ValueT(llvm_move(P->second));
+ ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst()));
+ ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond()));
++TmpEnd;
- P->second.~ValueT();
+ P->getSecond().~ValueT();
}
- P->first.~KeyT();
+ P->getFirst().~KeyT();
}
// Now make this map use the large rep, and move all the entries back
return;
}
- LargeRep OldRep = llvm_move(*getLargeRep());
+ LargeRep OldRep = std::move(*getLargeRep());
getLargeRep()->~LargeRep();
if (AtLeast <= InlineBuckets) {
Small = true;
unsigned getNumEntries() const {
return NumEntries;
}
+
void setNumEntries(unsigned Num) {
- assert(Num < INT_MAX && "Cannot support more than INT_MAX entries");
+ // NumEntries is hardcoded to be 31 bits wide.
+ ASSERT(Num < (1U << 31) && "Cannot support more than 1<<31 entries");
NumEntries = Num;
}
unsigned getNumTombstones() const {
return NumTombstones;
}
+
void setNumTombstones(unsigned Num) {
NumTombstones = Num;
}
const BucketT *getInlineBuckets() const {
- assert(Small);
+ ASSERT(Small);
// Note that this cast does not violate aliasing rules as we assert that
// the memory's dynamic type is the small, inline bucket buffer, and the
// 'storage.buffer' static type is 'char *'.
return reinterpret_cast<const BucketT *>(storage.buffer);
}
+
BucketT *getInlineBuckets() {
return const_cast<BucketT *>(
const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
}
+
const LargeRep *getLargeRep() const {
- assert(!Small);
+ ASSERT(!Small);
// Note, same rule about aliasing as with getInlineBuckets.
return reinterpret_cast<const LargeRep *>(storage.buffer);
}
+
LargeRep *getLargeRep() {
return const_cast<LargeRep *>(
const_cast<const SmallDenseMap *>(this)->getLargeRep());
const BucketT *getBuckets() const {
return Small ? getInlineBuckets() : getLargeRep()->Buckets;
}
+
BucketT *getBuckets() {
return const_cast<BucketT *>(
const_cast<const SmallDenseMap *>(this)->getBuckets());
}
+
unsigned getNumBuckets() const {
return Small ? InlineBuckets : getLargeRep()->NumBuckets;
}
}
LargeRep allocateBuckets(unsigned Num) {
- assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
+ ASSERT(Num > InlineBuckets && "Must allocate more buckets than are inline");
LargeRep Rep = {
static_cast<BucketT*>(operator new(sizeof(BucketT) * Num)), Num
-};
+ };
return Rep;
}
};
-template<typename KeyT, typename ValueT,
- typename KeyInfoT, bool IsConst>
+template <typename KeyT, typename ValueT, typename ValueInfoT,
+ typename KeyInfoT, typename Bucket, bool IsConst>
class DenseMapIterator {
- typedef std::pair<KeyT, ValueT> Bucket;
- typedef DenseMapIterator<KeyT, ValueT,
- KeyInfoT, true> ConstIterator;
- friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, true>;
+ friend class DenseMapIterator<KeyT, ValueT, ValueInfoT, KeyInfoT, Bucket, true>;
+ friend class DenseMapIterator<KeyT, ValueT, ValueInfoT, KeyInfoT, Bucket, false>;
+
+ using ConstIterator = DenseMapIterator<KeyT, ValueT, ValueInfoT, KeyInfoT, Bucket, true>;
+
public:
- typedef ptrdiff_t difference_type;
- typedef typename conditional<IsConst, const Bucket, Bucket>::type value_type;
- typedef value_type *pointer;
- typedef value_type &reference;
- typedef std::forward_iterator_tag iterator_category;
+ using difference_type = ptrdiff_t;
+ using value_type =
+ typename std::conditional<IsConst, const Bucket, Bucket>::type;
+ using pointer = value_type *;
+ using reference = value_type &;
+ using iterator_category = std::forward_iterator_tag;
+
private:
- pointer Ptr, End;
+ pointer Ptr = nullptr;
+ pointer End = nullptr;
+
public:
- DenseMapIterator() : Ptr(0), End(0) {}
+ DenseMapIterator() = default;
- DenseMapIterator(pointer Pos, pointer E, bool NoAdvance = false)
- : Ptr(Pos), End(E) {
- if (!NoAdvance) AdvancePastEmptyBuckets();
+ DenseMapIterator(pointer Pos, pointer E,
+ bool NoAdvance = false)
+ : Ptr(Pos), End(E) {
+ if (NoAdvance) return;
+ AdvancePastEmptyBuckets();
}
- // If IsConst is true this is a converting constructor from iterator to
- // const_iterator and the default copy constructor is used.
- // Otherwise this is a copy constructor for iterator.
- DenseMapIterator(const DenseMapIterator<KeyT, ValueT,
- KeyInfoT, false>& I)
- : Ptr(I.Ptr), End(I.End) {}
+ // Converting ctor from non-const iterators to const iterators. SFINAE'd out
+ // for const iterator destinations so it doesn't end up as a user defined copy
+ // constructor.
+ template <bool IsConstSrc,
+ typename = typename std::enable_if<!IsConstSrc && IsConst>::type>
+ DenseMapIterator(
+ const DenseMapIterator<KeyT, ValueT, ValueInfoT, KeyInfoT, Bucket, IsConstSrc> &I)
+ : Ptr(I.Ptr), End(I.End) {}
reference operator*() const {
return *Ptr;
}
bool operator==(const ConstIterator &RHS) const {
- return Ptr == RHS.operator->();
+ return Ptr == RHS.Ptr;
}
bool operator!=(const ConstIterator &RHS) const {
- return Ptr != RHS.operator->();
+ return Ptr != RHS.Ptr;
}
inline DenseMapIterator& operator++() { // Preincrement
private:
void AdvancePastEmptyBuckets() {
+ ASSERT(Ptr <= End);
const KeyT Empty = KeyInfoT::getEmptyKey();
const KeyT Tombstone = KeyInfoT::getTombstoneKey();
- while (Ptr != End &&
- (KeyInfoT::isEqual(Ptr->first, Empty) ||
- KeyInfoT::isEqual(Ptr->first, Tombstone)))
+ while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
+ KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
++Ptr;
}
+
+ void RetreatPastEmptyBuckets() {
+ ASSERT(Ptr >= End);
+ const KeyT Empty = KeyInfoT::getEmptyKey();
+ const KeyT Tombstone = KeyInfoT::getTombstoneKey();
+
+ while (Ptr != End && (KeyInfoT::isEqual(Ptr[-1].getFirst(), Empty) ||
+ KeyInfoT::isEqual(Ptr[-1].getFirst(), Tombstone)))
+ --Ptr;
+ }
};
+template <typename KeyT, typename ValueT, typename KeyInfoT>
+inline size_t capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT> &X) {
+ return X.getMemorySize();
+}
+
} // end namespace objc
-#endif
+#endif // LLVM_ADT_DENSEMAP_H
return _objc_strhash(Val);
}
static bool isEqual(const char* const &LHS, const char* const &RHS) {
+ if (LHS == RHS) {
+ return true;
+ }
+ if (LHS == getEmptyKey() || RHS == getEmptyKey()) {
+ return false;
+ }
+ if (LHS == getTombstoneKey() || RHS == getTombstoneKey()) {
+ return false;
+ }
return 0 == strcmp(LHS, RHS);
}
};
}
};
+template<typename T>
+struct DenseMapValueInfo {
+ static inline bool isPurgeable(const T &value) {
+ return false;
+ }
+};
+
} // end namespace objc
#endif
--- /dev/null
+//===- llvm/ADT/DenseSet.h - Dense probed hash table ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the DenseSet and SmallDenseSet classes.
+//
+//===----------------------------------------------------------------------===//
+
+// Taken from clang-1100.247.11.10.9
+
+#ifndef LLVM_ADT_DENSESET_H
+#define LLVM_ADT_DENSESET_H
+
+#include "llvm-DenseMap.h"
+#include "llvm-DenseMapInfo.h"
+#include "llvm-type_traits.h"
+#include <algorithm>
+#include <cstddef>
+#include <initializer_list>
+#include <iterator>
+#include <utility>
+#include <TargetConditionals.h>
+
+#include "objc-private.h"
+
+namespace objc {
+
+namespace detail {
+
+struct DenseSetEmpty {};
+
+// Use the empty base class trick so we can create a DenseMap where the buckets
+// contain only a single item.
+template <typename KeyT> class DenseSetPair : public DenseSetEmpty {
+ KeyT key;
+
+public:
+ KeyT &getFirst() { return key; }
+ const KeyT &getFirst() const { return key; }
+ DenseSetEmpty &getSecond() { return *this; }
+ const DenseSetEmpty &getSecond() const { return *this; }
+};
+
+/// Base class for DenseSet and DenseSmallSet.
+///
+/// MapTy should be either
+///
+/// DenseMap<ValueT, detail::DenseSetEmpty,
+/// DenseMapValueInfo<detail::DenseSetEmpty>,
+/// ValueInfoT, detail::DenseSetPair<ValueT>>
+///
+/// or the equivalent SmallDenseMap type. ValueInfoT must implement the
+/// DenseMapInfo "concept".
+template <typename ValueT, typename MapTy, typename ValueInfoT>
+class DenseSetImpl {
+ static_assert(sizeof(typename MapTy::value_type) == sizeof(ValueT),
+ "DenseMap buckets unexpectedly large!");
+ MapTy TheMap;
+
+ template <typename T>
+ using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
+
+public:
+ using key_type = ValueT;
+ using value_type = ValueT;
+ using size_type = unsigned;
+
+ explicit DenseSetImpl(unsigned InitialReserve = 0) : TheMap(InitialReserve) {}
+
+ DenseSetImpl(std::initializer_list<ValueT> Elems)
+ : DenseSetImpl(PowerOf2Ceil(Elems.size())) {
+ insert(Elems.begin(), Elems.end());
+ }
+
+ bool empty() const { return TheMap.empty(); }
+ size_type size() const { return TheMap.size(); }
+ size_t getMemorySize() const { return TheMap.getMemorySize(); }
+
+ /// Grow the DenseSet so that it has at least Size buckets. Will not shrink
+ /// the Size of the set.
+ void resize(size_t Size) { TheMap.resize(Size); }
+
+ /// Grow the DenseSet so that it can contain at least \p NumEntries items
+ /// before resizing again.
+ void reserve(size_t Size) { TheMap.reserve(Size); }
+
+ void clear() {
+ TheMap.clear();
+ }
+
+ /// Return 1 if the specified key is in the set, 0 otherwise.
+ size_type count(const_arg_type_t<ValueT> V) const {
+ return TheMap.count(V);
+ }
+
+ bool erase(const ValueT &V) {
+ return TheMap.erase(V);
+ }
+
+ void swap(DenseSetImpl &RHS) { TheMap.swap(RHS.TheMap); }
+
+ // Iterators.
+
+ class ConstIterator;
+
+ class Iterator {
+ typename MapTy::iterator I;
+ friend class DenseSetImpl;
+ friend class ConstIterator;
+
+ public:
+ using difference_type = typename MapTy::iterator::difference_type;
+ using value_type = ValueT;
+ using pointer = value_type *;
+ using reference = value_type &;
+ using iterator_category = std::forward_iterator_tag;
+
+ Iterator() = default;
+ Iterator(const typename MapTy::iterator &i) : I(i) {}
+
+ ValueT &operator*() { return I->getFirst(); }
+ const ValueT &operator*() const { return I->getFirst(); }
+ ValueT *operator->() { return &I->getFirst(); }
+ const ValueT *operator->() const { return &I->getFirst(); }
+
+ Iterator& operator++() { ++I; return *this; }
+ Iterator operator++(int) { auto T = *this; ++I; return T; }
+ bool operator==(const ConstIterator& X) const { return I == X.I; }
+ bool operator!=(const ConstIterator& X) const { return I != X.I; }
+ };
+
+ class ConstIterator {
+ typename MapTy::const_iterator I;
+ friend class DenseSet;
+ friend class Iterator;
+
+ public:
+ using difference_type = typename MapTy::const_iterator::difference_type;
+ using value_type = ValueT;
+ using pointer = const value_type *;
+ using reference = const value_type &;
+ using iterator_category = std::forward_iterator_tag;
+
+ ConstIterator() = default;
+ ConstIterator(const Iterator &B) : I(B.I) {}
+ ConstIterator(const typename MapTy::const_iterator &i) : I(i) {}
+
+ const ValueT &operator*() const { return I->getFirst(); }
+ const ValueT *operator->() const { return &I->getFirst(); }
+
+ ConstIterator& operator++() { ++I; return *this; }
+ ConstIterator operator++(int) { auto T = *this; ++I; return T; }
+ bool operator==(const ConstIterator& X) const { return I == X.I; }
+ bool operator!=(const ConstIterator& X) const { return I != X.I; }
+ };
+
+ using iterator = Iterator;
+ using const_iterator = ConstIterator;
+
+ iterator begin() { return Iterator(TheMap.begin()); }
+ iterator end() { return Iterator(TheMap.end()); }
+
+ const_iterator begin() const { return ConstIterator(TheMap.begin()); }
+ const_iterator end() const { return ConstIterator(TheMap.end()); }
+
+ iterator find(const_arg_type_t<ValueT> V) { return Iterator(TheMap.find(V)); }
+ const_iterator find(const_arg_type_t<ValueT> V) const {
+ return ConstIterator(TheMap.find(V));
+ }
+
+ /// Alternative version of find() which allows a different, and possibly less
+ /// expensive, key type.
+ /// The DenseMapInfo is responsible for supplying methods
+ /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key type
+ /// used.
+ template <class LookupKeyT>
+ iterator find_as(const LookupKeyT &Val) {
+ return Iterator(TheMap.find_as(Val));
+ }
+ template <class LookupKeyT>
+ const_iterator find_as(const LookupKeyT &Val) const {
+ return ConstIterator(TheMap.find_as(Val));
+ }
+
+ void erase(Iterator I) { return TheMap.erase(I.I); }
+ void erase(ConstIterator CI) { return TheMap.erase(CI.I); }
+
+ std::pair<iterator, bool> insert(const ValueT &V) {
+ detail::DenseSetEmpty Empty;
+ return TheMap.try_emplace(V, Empty);
+ }
+
+ std::pair<iterator, bool> insert(ValueT &&V) {
+ detail::DenseSetEmpty Empty;
+ return TheMap.try_emplace(std::move(V), Empty);
+ }
+
+ /// Alternative version of insert that uses a different (and possibly less
+ /// expensive) key type.
+ template <typename LookupKeyT>
+ std::pair<iterator, bool> insert_as(const ValueT &V,
+ const LookupKeyT &LookupKey) {
+ return TheMap.insert_as({V, detail::DenseSetEmpty()}, LookupKey);
+ }
+ template <typename LookupKeyT>
+ std::pair<iterator, bool> insert_as(ValueT &&V, const LookupKeyT &LookupKey) {
+ return TheMap.insert_as({std::move(V), detail::DenseSetEmpty()}, LookupKey);
+ }
+
+ // Range insertion of values.
+ template<typename InputIt>
+ void insert(InputIt I, InputIt E) {
+ for (; I != E; ++I)
+ insert(*I);
+ }
+};
+
+/// Equality comparison for DenseSet.
+///
+/// Iterates over elements of LHS confirming that each element is also a member
+/// of RHS, and that RHS contains no additional values.
+/// Equivalent to N calls to RHS.count. Amortized complexity is linear, worst
+/// case is O(N^2) (if every hash collides).
+template <typename ValueT, typename MapTy, typename ValueInfoT>
+bool operator==(const DenseSetImpl<ValueT, MapTy, ValueInfoT> &LHS,
+ const DenseSetImpl<ValueT, MapTy, ValueInfoT> &RHS) {
+ if (LHS.size() != RHS.size())
+ return false;
+
+ for (auto &E : LHS)
+ if (!RHS.count(E))
+ return false;
+
+ return true;
+}
+
+/// Inequality comparison for DenseSet.
+///
+/// Equivalent to !(LHS == RHS). See operator== for performance notes.
+template <typename ValueT, typename MapTy, typename ValueInfoT>
+bool operator!=(const DenseSetImpl<ValueT, MapTy, ValueInfoT> &LHS,
+ const DenseSetImpl<ValueT, MapTy, ValueInfoT> &RHS) {
+ return !(LHS == RHS);
+}
+
+} // end namespace detail
+
+/// Implements a dense probed hash-table based set.
+template <typename ValueT, typename ValueInfoT = DenseMapInfo<ValueT>>
+class DenseSet : public detail::DenseSetImpl<
+ ValueT, DenseMap<ValueT, detail::DenseSetEmpty,
+ DenseMapValueInfo<detail::DenseSetEmpty>,
+ ValueInfoT, detail::DenseSetPair<ValueT>>,
+ ValueInfoT> {
+ using BaseT =
+ detail::DenseSetImpl<ValueT,
+ DenseMap<ValueT, detail::DenseSetEmpty,
+ DenseMapValueInfo<detail::DenseSetEmpty>,
+ ValueInfoT, detail::DenseSetPair<ValueT>>,
+ ValueInfoT>;
+
+public:
+ using BaseT::BaseT;
+};
+
+/// Implements a dense probed hash-table based set with some number of buckets
+/// stored inline.
+template <typename ValueT, unsigned InlineBuckets = 4,
+ typename ValueInfoT = DenseMapInfo<ValueT>>
+class SmallDenseSet
+ : public detail::DenseSetImpl<
+ ValueT, SmallDenseMap<ValueT, detail::DenseSetEmpty, InlineBuckets,
+ DenseMapValueInfo<detail::DenseSetEmpty>,
+ ValueInfoT, detail::DenseSetPair<ValueT>>,
+ ValueInfoT> {
+ using BaseT = detail::DenseSetImpl<
+ ValueT, SmallDenseMap<ValueT, detail::DenseSetEmpty, InlineBuckets,
+ DenseMapValueInfo<detail::DenseSetEmpty>,
+ ValueInfoT, detail::DenseSetPair<ValueT>>,
+ ValueInfoT>;
+
+public:
+ using BaseT::BaseT;
+};
+
+} // end namespace objc
+
+#endif // LLVM_ADT_DENSESET_H
//===- llvm/Support/type_traits.h - Simplfied type traits -------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
-// This file provides a template class that determines if a type is a class or
-// not. The basic mechanism, based on using the pointer to member function of
-// a zero argument to a function was "boosted" from the boost type_traits
-// library. See http://www.boost.org/ for all the gory details.
+// This file provides useful additions to the standard type_traits library.
//
//===----------------------------------------------------------------------===//
-// Taken from llvmCore-3425.0.31.
+// Taken from clang-1100.247.11.10.9
#ifndef LLVM_SUPPORT_TYPE_TRAITS_H
#define LLVM_SUPPORT_TYPE_TRAITS_H
+#define HAVE_STD_IS_TRIVIALLY_COPYABLE 1
+
#include <cstddef>
#include <utility>
#define __has_feature(x) 0
#endif
-// This is actually the conforming implementation which works with abstract
-// classes. However, enough compilers have trouble with it that most will use
-// the one in boost/type_traits/object_traits.hpp. This implementation actually
-// works with VC7.0, but other interactions seem to fail when we use it.
-
namespace objc {
-
-namespace dont_use
-{
- // These two functions should never be used. They are helpers to
- // the is_class template below. They cannot be located inside
- // is_class because doing so causes at least GCC to think that
- // the value of the "value" enumerator is not constant. Placing
- // them out here (for some strange reason) allows the sizeof
- // operator against them to magically be constant. This is
- // important to make the is_class<T>::value idiom zero cost. it
- // evaluates to a constant 1 or 0 depending on whether the
- // parameter T is a class or not (respectively).
- template<typename T> char is_class_helper(void(T::*)());
- template<typename T> double is_class_helper(...);
-}
-template <typename T>
-struct is_class
-{
- // is_class<> metafunction due to Paul Mensonides (leavings@attbi.com). For
- // more details:
- // http://groups.google.com/groups?hl=en&selm=000001c1cc83%24e154d5e0%247772e50c%40c161550a&rnum=1
+
+/// Metafunction that determines whether the given type is either an
+/// integral type or an enumeration type, including enum classes.
+///
+/// Note that this accepts potentially more integral types than is_integral
+/// because it is based on being implicitly convertible to an integral type.
+/// Also note that enum classes aren't implicitly convertible to integral types,
+/// the value may therefore need to be explicitly converted before being used.
+template <typename T> class is_integral_or_enum {
+ using UnderlyingT = typename std::remove_reference<T>::type;
+
public:
static const bool value =
- sizeof(char) == sizeof(dont_use::is_class_helper<T>(0));
-};
-
-
-/// isPodLike - This is a type trait that is used to determine whether a given
-/// type can be copied around with memcpy instead of running ctors etc.
-template <typename T>
-struct isPodLike {
-#if __has_feature(is_trivially_copyable)
- // If the compiler supports the is_trivially_copyable trait use it, as it
- // matches the definition of isPodLike closely.
- static const bool value = __is_trivially_copyable(T);
-#else
- // If we don't know anything else, we can (at least) assume that all non-class
- // types are PODs.
- static const bool value = !is_class<T>::value;
-#endif
+ !std::is_class<UnderlyingT>::value && // Filter conversion operators.
+ !std::is_pointer<UnderlyingT>::value &&
+ !std::is_floating_point<UnderlyingT>::value &&
+ (std::is_enum<UnderlyingT>::value ||
+ std::is_convertible<UnderlyingT, unsigned long long>::value);
};
-// std::pair's are pod-like if their elements are.
-template<typename T, typename U>
-struct isPodLike<std::pair<T, U> > {
- static const bool value = isPodLike<T>::value && isPodLike<U>::value;
-};
-
-
-template <class T, T v>
-struct integral_constant {
- typedef T value_type;
- static const value_type value = v;
- typedef integral_constant<T,v> type;
- operator value_type() { return value; }
+/// If T is a pointer, just return it. If it is not, return T&.
+template<typename T, typename Enable = void>
+struct add_lvalue_reference_if_not_pointer { using type = T &; };
+
+template <typename T>
+struct add_lvalue_reference_if_not_pointer<
+ T, typename std::enable_if<std::is_pointer<T>::value>::type> {
+ using type = T;
};
-typedef integral_constant<bool, true> true_type;
-typedef integral_constant<bool, false> false_type;
+/// If T is a pointer to X, return a pointer to const X. If it is not,
+/// return const T.
+template<typename T, typename Enable = void>
+struct add_const_past_pointer { using type = const T; };
-/// \brief Metafunction that determines whether the two given types are
-/// equivalent.
-template<typename T, typename U> struct is_same : public false_type {};
-template<typename T> struct is_same<T, T> : public true_type {};
+template <typename T>
+struct add_const_past_pointer<
+ T, typename std::enable_if<std::is_pointer<T>::value>::type> {
+ using type = const typename std::remove_pointer<T>::type *;
+};
-/// \brief Metafunction that removes const qualification from a type.
-template <typename T> struct remove_const { typedef T type; };
-template <typename T> struct remove_const<const T> { typedef T type; };
+template <typename T, typename Enable = void>
+struct const_pointer_or_const_ref {
+ using type = const T &;
+};
+template <typename T>
+struct const_pointer_or_const_ref<
+ T, typename std::enable_if<std::is_pointer<T>::value>::type> {
+ using type = typename add_const_past_pointer<T>::type;
+};
-/// \brief Metafunction that removes volatile qualification from a type.
-template <typename T> struct remove_volatile { typedef T type; };
-template <typename T> struct remove_volatile<volatile T> { typedef T type; };
+namespace detail {
+/// Internal utility to detect trivial copy construction.
+template<typename T> union copy_construction_triviality_helper {
+ T t;
+ copy_construction_triviality_helper() = default;
+ copy_construction_triviality_helper(const copy_construction_triviality_helper&) = default;
+ ~copy_construction_triviality_helper() = default;
+};
+/// Internal utility to detect trivial move construction.
+template<typename T> union move_construction_triviality_helper {
+ T t;
+ move_construction_triviality_helper() = default;
+ move_construction_triviality_helper(move_construction_triviality_helper&&) = default;
+ ~move_construction_triviality_helper() = default;
+};
-/// \brief Metafunction that removes both const and volatile qualification from
-/// a type.
-template <typename T> struct remove_cv {
- typedef typename remove_const<typename remove_volatile<T>::type>::type type;
+template<class T>
+union trivial_helper {
+ T t;
};
-/// \brief Helper to implement is_integral metafunction.
-template <typename T> struct is_integral_impl : false_type {};
-template <> struct is_integral_impl< bool> : true_type {};
-template <> struct is_integral_impl< char> : true_type {};
-template <> struct is_integral_impl< signed char> : true_type {};
-template <> struct is_integral_impl<unsigned char> : true_type {};
-template <> struct is_integral_impl< wchar_t> : true_type {};
-template <> struct is_integral_impl< short> : true_type {};
-template <> struct is_integral_impl<unsigned short> : true_type {};
-template <> struct is_integral_impl< int> : true_type {};
-template <> struct is_integral_impl<unsigned int> : true_type {};
-template <> struct is_integral_impl< long> : true_type {};
-template <> struct is_integral_impl<unsigned long> : true_type {};
-template <> struct is_integral_impl< long long> : true_type {};
-template <> struct is_integral_impl<unsigned long long> : true_type {};
-
-/// \brief Metafunction that determines whether the given type is an integral
-/// type.
+} // end namespace detail
+
+/// An implementation of `std::is_trivially_copy_constructible` since we have
+/// users with STLs that don't yet include it.
template <typename T>
-struct is_integral : is_integral_impl<T> {};
-
-/// \brief Metafunction to remove reference from a type.
-template <typename T> struct remove_reference { typedef T type; };
-template <typename T> struct remove_reference<T&> { typedef T type; };
-
-/// \brief Metafunction that determines whether the given type is a pointer
-/// type.
-template <typename T> struct is_pointer : false_type {};
-template <typename T> struct is_pointer<T*> : true_type {};
-template <typename T> struct is_pointer<T* const> : true_type {};
-template <typename T> struct is_pointer<T* volatile> : true_type {};
-template <typename T> struct is_pointer<T* const volatile> : true_type {};
-
-/// \brief Metafunction that determines whether the given type is either an
-/// integral type or an enumeration type.
-///
-/// Note that this accepts potentially more integral types than we whitelist
-/// above for is_integral because it is based on merely being convertible
-/// implicitly to an integral type.
-template <typename T> class is_integral_or_enum {
- // Provide an overload which can be called with anything implicitly
- // convertible to an unsigned long long. This should catch integer types and
- // enumeration types at least. We blacklist classes with conversion operators
- // below.
- static double check_int_convertible(unsigned long long);
- static char check_int_convertible(...);
+struct is_trivially_copy_constructible
+ : std::is_copy_constructible<
+ ::objc::detail::copy_construction_triviality_helper<T>> {};
+template <typename T>
+struct is_trivially_copy_constructible<T &> : std::true_type {};
+template <typename T>
+struct is_trivially_copy_constructible<T &&> : std::false_type {};
- typedef typename remove_reference<T>::type UnderlyingT;
- static UnderlyingT &nonce_instance;
+/// An implementation of `std::is_trivially_move_constructible` since we have
+/// users with STLs that don't yet include it.
+template <typename T>
+struct is_trivially_move_constructible
+ : std::is_move_constructible<
+ ::objc::detail::move_construction_triviality_helper<T>> {};
+template <typename T>
+struct is_trivially_move_constructible<T &> : std::true_type {};
+template <typename T>
+struct is_trivially_move_constructible<T &&> : std::true_type {};
-public:
- static const bool
- value = (!is_class<UnderlyingT>::value && !is_pointer<UnderlyingT>::value &&
- !is_same<UnderlyingT, float>::value &&
- !is_same<UnderlyingT, double>::value &&
- sizeof(char) != sizeof(check_int_convertible(nonce_instance)));
-};
-// enable_if_c - Enable/disable a template based on a metafunction
-template<bool Cond, typename T = void>
-struct enable_if_c {
- typedef T type;
+template <typename T>
+struct is_copy_assignable {
+ template<class F>
+ static auto get(F*) -> decltype(std::declval<F &>() = std::declval<const F &>(), std::true_type{});
+ static std::false_type get(...);
+ static constexpr bool value = decltype(get((T*)nullptr))::value;
};
-template<typename T> struct enable_if_c<false, T> { };
-
-// enable_if - Enable/disable a template based on a metafunction
-template<typename Cond, typename T = void>
-struct enable_if : public enable_if_c<Cond::value, T> { };
-
-namespace dont_use {
- template<typename Base> char base_of_helper(const volatile Base*);
- template<typename Base> double base_of_helper(...);
-}
-
-/// is_base_of - Metafunction to determine whether one type is a base class of
-/// (or identical to) another type.
-template<typename Base, typename Derived>
-struct is_base_of {
- static const bool value
- = is_class<Base>::value && is_class<Derived>::value &&
- sizeof(char) == sizeof(dont_use::base_of_helper<Base>((Derived*)0));
+template <typename T>
+struct is_move_assignable {
+ template<class F>
+ static auto get(F*) -> decltype(std::declval<F &>() = std::declval<F &&>(), std::true_type{});
+ static std::false_type get(...);
+ static constexpr bool value = decltype(get((T*)nullptr))::value;
};
-// remove_pointer - Metafunction to turn Foo* into Foo. Defined in
-// C++0x [meta.trans.ptr].
-template <typename T> struct remove_pointer { typedef T type; };
-template <typename T> struct remove_pointer<T*> { typedef T type; };
-template <typename T> struct remove_pointer<T*const> { typedef T type; };
-template <typename T> struct remove_pointer<T*volatile> { typedef T type; };
-template <typename T> struct remove_pointer<T*const volatile> {
- typedef T type; };
-template <bool, typename T, typename F>
-struct conditional { typedef T type; };
+// An implementation of `std::is_trivially_copyable` since STL version
+// is not equally supported by all compilers, especially GCC 4.9.
+// Uniform implementation of this trait is important for ABI compatibility
+// as it has an impact on SmallVector's ABI (among others).
+template <typename T>
+class is_trivially_copyable {
+
+ // copy constructors
+ static constexpr bool has_trivial_copy_constructor =
+ std::is_copy_constructible<detail::trivial_helper<T>>::value;
+ static constexpr bool has_deleted_copy_constructor =
+ !std::is_copy_constructible<T>::value;
+
+ // move constructors
+ static constexpr bool has_trivial_move_constructor =
+ std::is_move_constructible<detail::trivial_helper<T>>::value;
+ static constexpr bool has_deleted_move_constructor =
+ !std::is_move_constructible<T>::value;
+
+ // copy assign
+ static constexpr bool has_trivial_copy_assign =
+ is_copy_assignable<detail::trivial_helper<T>>::value;
+ static constexpr bool has_deleted_copy_assign =
+ !is_copy_assignable<T>::value;
+
+ // move assign
+ static constexpr bool has_trivial_move_assign =
+ is_move_assignable<detail::trivial_helper<T>>::value;
+ static constexpr bool has_deleted_move_assign =
+ !is_move_assignable<T>::value;
+
+ // destructor
+ static constexpr bool has_trivial_destructor =
+ std::is_destructible<detail::trivial_helper<T>>::value;
+
+ public:
+
+ static constexpr bool value =
+ has_trivial_destructor &&
+ (has_deleted_move_assign || has_trivial_move_assign) &&
+ (has_deleted_move_constructor || has_trivial_move_constructor) &&
+ (has_deleted_copy_assign || has_trivial_copy_assign) &&
+ (has_deleted_copy_constructor || has_trivial_copy_constructor);
+
+#ifdef HAVE_STD_IS_TRIVIALLY_COPYABLE
+ static_assert(value == std::is_trivially_copyable<T>::value,
+ "inconsistent behavior between llvm:: and std:: implementation of is_trivially_copyable");
+#endif
+};
+template <typename T>
+class is_trivially_copyable<T*> : public std::true_type {
+};
-template <typename T, typename F>
-struct conditional<false, T, F> { typedef F type; };
-}
+} // end namespace llvm
#ifdef LLVM_DEFINED_HAS_FEATURE
#undef __has_feature
#endif
-#endif
+#endif // LLVM_SUPPORT_TYPE_TRAITS_H
#define _OBJC_MAPTABLE_H_
#ifndef _OBJC_PRIVATE_H_
-# define OBJC_MAP_AVAILABILITY \
- __OSX_DEPRECATED(10.0, 10.1, "NXMapTable is deprecated") \
- __IOS_UNAVAILABLE __TVOS_UNAVAILABLE \
- __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE
+# define OBJC_MAP_AVAILABILITY \
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.0, 10.1, "NXMapTable is deprecated")
#else
# define OBJC_MAP_AVAILABILITY
#endif
}
}
-static int mapRemove = 0;
-
void *NXMapRemove(NXMapTable *table, const void *key) {
MapPair *pairs = (MapPair *)table->buckets;
unsigned index = bucketOf(table, key);
int found = 0;
const void *old = NULL;
if (pair->key == NX_MAPNOTAKEY) return NULL;
- mapRemove ++;
/* compute chain */
{
unsigned index2 = index;
* before being called.
*/
#if !OBJC_OLD_DISPATCH_PROTOTYPES
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wincompatible-library-redeclaration"
OBJC_EXPORT void
objc_msgSend(void /* id self, SEL op, ... */ )
OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0, 2.0);
OBJC_EXPORT void
objc_msgSendSuper(void /* struct objc_super *super, SEL op, ... */ )
OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0, 2.0);
+#pragma clang diagnostic pop
#else
/**
* Sends a message with a simple return value to an instance of a class.
* before being called.
*/
#if !OBJC_OLD_DISPATCH_PROTOTYPES
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wincompatible-library-redeclaration"
OBJC_EXPORT void
objc_msgSend_stret(void /* id self, SEL op, ... */ )
OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0, 2.0)
objc_msgSendSuper_stret(void /* struct objc_super *super, SEL op, ... */ )
OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0, 2.0)
OBJC_ARM64_UNAVAILABLE;
+#pragma clang diagnostic pop
#else
/**
* Sends a message with a data-structure return value to an instance of a class.
* before being called.
*/
#if !OBJC_OLD_DISPATCH_PROTOTYPES
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wincompatible-library-redeclaration"
# if defined(__i386__)
objc_msgSend_fp2ret(void /* id self, SEL op, ... */ )
OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0, 2.0);
+#pragma clang diagnostic pop
# endif
// !OBJC_OLD_DISPATCH_PROTOTYPES
* before being called.
*/
#if !OBJC_OLD_DISPATCH_PROTOTYPES
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wincompatible-library-redeclaration"
OBJC_EXPORT void
method_invoke(void /* id receiver, Method m, ... */ )
OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0, 2.0);
method_invoke_stret(void /* id receiver, Method m, ... */ )
OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0, 2.0)
OBJC_ARM64_UNAVAILABLE;
+#pragma clang diagnostic pop
#else
OBJC_EXPORT id _Nullable
method_invoke(id _Nullable receiver, Method _Nonnull m, ...)
* but may be compared to other IMP values.
*/
#if !OBJC_OLD_DISPATCH_PROTOTYPES
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wincompatible-library-redeclaration"
OBJC_EXPORT void
_objc_msgForward(void /* id receiver, SEL sel, ... */ )
OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0, 2.0);
_objc_msgForward_stret(void /* id receiver, SEL sel, ... */ )
OBJC_AVAILABLE(10.6, 3.0, 9.0, 1.0, 2.0)
OBJC_ARM64_UNAVAILABLE;
+#pragma clang diagnostic pop
#else
OBJC_EXPORT id _Nullable
_objc_msgForward(id _Nonnull receiver, SEL _Nonnull sel, ...)
CorrectedSynthesize = 1<<4, // used for an old workaround, now ignored
IsSimulated = 1<<5, // image compiled for a simulator platform
HasCategoryClassProperties = 1<<6, // class properties in category_t
- // not yet used = 1<<7
+ OptimizedByDyldClosure = 1 << 7, // dyld (not the shared cache) optimized this.
// 1 byte Swift unstable ABI version number
SwiftUnstableVersionMaskShift = 8,
bool requiresGC() const { return flags & RequiresGC; }
bool optimizedByDyld() const { return flags & OptimizedByDyld; }
bool hasCategoryClassProperties() const { return flags & HasCategoryClassProperties; }
+ bool optimizedByDyldClosure() const { return flags & OptimizedByDyldClosure; }
bool containsSwift() const { return (flags & SwiftUnstableVersionMask) != 0; }
uint32_t swiftUnstableVersion() const { return (flags & SwiftUnstableVersionMask) >> SwiftUnstableVersionMaskShift; }
#endif
#if !defined(OBJC_DECLARE_SYMBOLS)
__OSX_AVAILABLE(10.0)
__IOS_UNAVAILABLE __TVOS_UNAVAILABLE
-__WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE
+__WATCHOS_UNAVAILABLE
+# ifndef __APPLE_BLEACH_SDK__
+__BRIDGEOS_UNAVAILABLE
+# endif
#endif
OBJC_ROOT_CLASS
@interface Object {
// Variadic IMP is comparable via OpaquePointer; non-variadic IMP isn't.
# define OBJC_OLD_DISPATCH_PROTOTYPES 1
# else
-# define OBJC_OLD_DISPATCH_PROTOTYPES 1
+# define OBJC_OLD_DISPATCH_PROTOTYPES 0
# endif
#endif
/* OBJC_AVAILABLE: shorthand for all-OS availability */
-#if !defined(OBJC_AVAILABLE)
-# define OBJC_AVAILABLE(x, i, t, w, b) \
- __OSX_AVAILABLE(x) __IOS_AVAILABLE(i) __TVOS_AVAILABLE(t) \
- __WATCHOS_AVAILABLE(w) __BRIDGEOS_AVAILABLE(b)
+#ifndef __APPLE_BLEACH_SDK__
+# if !defined(OBJC_AVAILABLE)
+# define OBJC_AVAILABLE(x, i, t, w, b) \
+ __OSX_AVAILABLE(x) __IOS_AVAILABLE(i) __TVOS_AVAILABLE(t) \
+ __WATCHOS_AVAILABLE(w) __BRIDGEOS_AVAILABLE(b)
+# endif
+#else
+# if !defined(OBJC_AVAILABLE)
+# define OBJC_AVAILABLE(x, i, t, w, b) \
+ __OSX_AVAILABLE(x) __IOS_AVAILABLE(i) __TVOS_AVAILABLE(t) \
+ __WATCHOS_AVAILABLE(w)
+# endif
+#endif
+
+
+/* OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE: Deprecated on OS X,
+ * unavailable everywhere else. */
+#ifndef __APPLE_BLEACH_SDK__
+# if !defined(OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE)
+# define OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(_start, _dep, _msg) \
+ __OSX_DEPRECATED(_start, _dep, _msg) \
+ __IOS_UNAVAILABLE __TVOS_UNAVAILABLE \
+ __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE
+# endif
+#else
+# if !defined(OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE)
+# define OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(_start, _dep, _msg) \
+ __OSX_DEPRECATED(_start, _dep, _msg) \
+ __IOS_UNAVAILABLE __TVOS_UNAVAILABLE \
+ __WATCHOS_UNAVAILABLE
+# endif
+#endif
+
+
+/* OBJC_OSX_AVAILABLE_OTHERS_UNAVAILABLE: Available on OS X,
+ * unavailable everywhere else. */
+#ifndef __APPLE_BLEACH_SDK__
+# if !defined(OBJC_OSX_AVAILABLE_OTHERS_UNAVAILABLE)
+# define OBJC_OSX_AVAILABLE_OTHERS_UNAVAILABLE(vers) \
+ __OSX_AVAILABLE(vers) \
+ __IOS_UNAVAILABLE __TVOS_UNAVAILABLE \
+ __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE
+# endif
+#else
+# if !defined(OBJC_OSX_AVAILABLE_OTHERS_UNAVAILABLE)
+# define OBJC_OSX_AVAILABLE_OTHERS_UNAVAILABLE(vers) \
+ __OSX_AVAILABLE(vers) \
+ __IOS_UNAVAILABLE __TVOS_UNAVAILABLE \
+ __WATCHOS_UNAVAILABLE
+# endif
#endif
/* Out-of-line declarations */
OBJC_EXPORT void objc_collect(unsigned long options)
- __OSX_DEPRECATED(10.6, 10.8, "it does nothing") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.6, 10.8, "it does nothing");
OBJC_EXPORT BOOL objc_collectingEnabled(void)
- __OSX_DEPRECATED(10.5, 10.8, "it always returns NO") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.5, 10.8, "it always returns NO");
OBJC_EXPORT malloc_zone_t *objc_collectableZone(void)
- __OSX_DEPRECATED(10.7, 10.8, "it always returns nil") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.7, 10.8, "it always returns nil");
OBJC_EXPORT void objc_setCollectionThreshold(size_t threshold)
- __OSX_DEPRECATED(10.5, 10.8, "it does nothing") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.5, 10.8, "it does nothing");
OBJC_EXPORT void objc_setCollectionRatio(size_t ratio)
- __OSX_DEPRECATED(10.5, 10.8, "it does nothing") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.5, 10.8, "it does nothing");
OBJC_EXPORT BOOL objc_atomicCompareAndSwapPtr(id predicate, id replacement, volatile id *objectLocation)
- __OSX_DEPRECATED(10.6, 10.8, "use OSAtomicCompareAndSwapPtr instead") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE OBJC_ARC_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.6, 10.8, "use OSAtomicCompareAndSwapPtr instead");
OBJC_EXPORT BOOL objc_atomicCompareAndSwapPtrBarrier(id predicate, id replacement, volatile id *objectLocation)
- __OSX_DEPRECATED(10.6, 10.8, "use OSAtomicCompareAndSwapPtrBarrier instead") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE OBJC_ARC_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.6, 10.8, "use OSAtomicCompareAndSwapPtrBarrier instead");
OBJC_EXPORT BOOL objc_atomicCompareAndSwapGlobal(id predicate, id replacement, volatile id *objectLocation)
- __OSX_DEPRECATED(10.6, 10.8, "use OSAtomicCompareAndSwapPtr instead") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE OBJC_ARC_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.6, 10.8, "use OSAtomicCompareAndSwapPtr instead");
OBJC_EXPORT BOOL objc_atomicCompareAndSwapGlobalBarrier(id predicate, id replacement, volatile id *objectLocation)
- __OSX_DEPRECATED(10.6, 10.8, "use OSAtomicCompareAndSwapPtrBarrier instead") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE OBJC_ARC_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.6, 10.8, "use OSAtomicCompareAndSwapPtrBarrier instead");
OBJC_EXPORT BOOL objc_atomicCompareAndSwapInstanceVariable(id predicate, id replacement, volatile id *objectLocation)
- __OSX_DEPRECATED(10.6, 10.8, "use OSAtomicCompareAndSwapPtr instead") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE OBJC_ARC_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.6, 10.8, "use OSAtomicCompareAndSwapPtr instead");
OBJC_EXPORT BOOL objc_atomicCompareAndSwapInstanceVariableBarrier(id predicate, id replacement, volatile id *objectLocation)
- __OSX_DEPRECATED(10.6, 10.8, "use OSAtomicCompareAndSwapPtrBarrier instead") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE OBJC_ARC_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.6, 10.8, "use OSAtomicCompareAndSwapPtrBarrier instead");
OBJC_EXPORT id objc_assign_strongCast(id val, id *dest)
- __OSX_DEPRECATED(10.4, 10.8, "use a simple assignment instead") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.4, 10.8, "use a simple assignment instead");
OBJC_EXPORT id objc_assign_global(id val, id *dest)
- __OSX_DEPRECATED(10.4, 10.8, "use a simple assignment instead") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.4, 10.8, "use a simple assignment instead");
OBJC_EXPORT id objc_assign_threadlocal(id val, id *dest)
- __OSX_DEPRECATED(10.7, 10.8, "use a simple assignment instead") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.7, 10.8, "use a simple assignment instead");
OBJC_EXPORT id objc_assign_ivar(id value, id dest, ptrdiff_t offset)
- __OSX_DEPRECATED(10.4, 10.8, "use a simple assignment instead") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.4, 10.8, "use a simple assignment instead");
OBJC_EXPORT void *objc_memmove_collectable(void *dst, const void *src, size_t size)
- __OSX_DEPRECATED(10.4, 10.8, "use memmove instead") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.4, 10.8, "use memmove instead");
OBJC_EXPORT id objc_read_weak(id *location)
- __OSX_DEPRECATED(10.5, 10.8, "use a simple read instead, or convert to zeroing __weak") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.5, 10.8, "use a simple read instead, or convert to zeroing __weak");
OBJC_EXPORT id objc_assign_weak(id value, id *location)
- __OSX_DEPRECATED(10.5, 10.8, "use a simple assignment instead, or convert to zeroing __weak") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.5, 10.8, "use a simple assignment instead, or convert to zeroing __weak");
OBJC_EXPORT void objc_registerThreadWithCollector(void)
- __OSX_DEPRECATED(10.6, 10.8, "it does nothing") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.6, 10.8, "it does nothing");
OBJC_EXPORT void objc_unregisterThreadWithCollector(void)
- __OSX_DEPRECATED(10.6, 10.8, "it does nothing") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.6, 10.8, "it does nothing");
OBJC_EXPORT void objc_assertRegisteredThreadWithCollector(void)
- __OSX_DEPRECATED(10.6, 10.8, "it does nothing") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.6, 10.8, "it does nothing");
OBJC_EXPORT void objc_clear_stack(unsigned long options)
- __OSX_DEPRECATED(10.5, 10.8, "it does nothing") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.5, 10.8, "it does nothing");
OBJC_EXPORT BOOL objc_is_finalized(void *ptr)
- __OSX_DEPRECATED(10.4, 10.8, "it always returns NO") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.4, 10.8, "it always returns NO");
OBJC_EXPORT void objc_finalizeOnMainThread(Class cls)
- __OSX_DEPRECATED(10.5, 10.5, "it does nothing") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.5, 10.5, "it does nothing");
OBJC_EXPORT BOOL objc_collecting_enabled(void)
- __OSX_DEPRECATED(10.4, 10.5, "it always returns NO") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.4, 10.5, "it always returns NO");
OBJC_EXPORT void objc_set_collection_threshold(size_t threshold)
- __OSX_DEPRECATED(10.4, 10.5, "it does nothing") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.4, 10.5, "it does nothing");
OBJC_EXPORT void objc_set_collection_ratio(size_t ratio)
- __OSX_DEPRECATED(10.4, 10.5, "it does nothing") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.4, 10.5, "it does nothing");
OBJC_EXPORT void objc_start_collector_thread(void)
- __OSX_DEPRECATED(10.4, 10.5, "it does nothing") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.4, 10.5, "it does nothing");
OBJC_EXPORT void objc_startCollectorThread(void)
- __OSX_DEPRECATED(10.5, 10.7, "it does nothing") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.5, 10.7, "it does nothing");
OBJC_EXPORT id objc_allocate_object(Class cls, int extra)
- __OSX_DEPRECATED(10.4, 10.4, "use class_createInstance instead") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.4, 10.4, "use class_createInstance instead");
/* !defined(OBJC_NO_GC) */
void check() {
#if DEBUG
- assert(impl.address() == textSegment + PAGE_MAX_SIZE);
- assert(impl.address() % PAGE_SIZE == 0); // not PAGE_MAX_SIZE
+ ASSERT(impl.address() == textSegment + PAGE_MAX_SIZE);
+ ASSERT(impl.address() % PAGE_SIZE == 0); // not PAGE_MAX_SIZE
assert(impl.address() + PAGE_MAX_SIZE ==
last.address() + SLOT_SIZE);
- assert(last.address()+8 < textSegment + textSegmentSize);
- assert((last.address() - start.address()) % SLOT_SIZE == 0);
+ ASSERT(last.address()+8 < textSegment + textSegmentSize);
+ ASSERT((last.address() - start.address()) % SLOT_SIZE == 0);
# if SUPPORT_STRET
- assert(impl_stret.address() == textSegment + 2*PAGE_MAX_SIZE);
- assert(impl_stret.address() % PAGE_SIZE == 0); // not PAGE_MAX_SIZE
+ ASSERT(impl_stret.address() == textSegment + 2*PAGE_MAX_SIZE);
+ ASSERT(impl_stret.address() % PAGE_SIZE == 0); // not PAGE_MAX_SIZE
assert(impl_stret.address() + PAGE_MAX_SIZE ==
last_stret.address() + SLOT_SIZE);
assert(start.address() - impl.address() ==
}
Payload *payload(uintptr_t index) {
- assert(validIndex(index));
+ ASSERT(validIndex(index));
return (Payload *)((char *)this + index*slotSize());
}
}
IMP trampoline(int aMode, uintptr_t index) {
- assert(validIndex(index));
+ ASSERT(validIndex(index));
char *base = (char *)trampolinesForMode(aMode);
char *imp = base + index*slotSize();
#if __arm__
}
static void check() {
- assert(TrampolineBlockPageGroup::headerSize() >= sizeof(TrampolineBlockPageGroup));
- assert(TrampolineBlockPageGroup::headerSize() % TrampolineBlockPageGroup::slotSize() == 0);
+ ASSERT(TrampolineBlockPageGroup::headerSize() >= sizeof(TrampolineBlockPageGroup));
+ ASSERT(TrampolineBlockPageGroup::headerSize() % TrampolineBlockPageGroup::slotSize() == 0);
}
};
// We assume that our code begins on the second TEXT page, but are robust
// against other additions to the end of the TEXT segment.
- assert(HeadPageGroup == nil || HeadPageGroup->nextAvailablePage == nil);
+ ASSERT(HeadPageGroup == nil || HeadPageGroup->nextAvailablePage == nil);
auto textSource = Trampolines.textSegment();
auto textSourceSize = Trampolines.textSegmentSize();
if (_Block_has_signature(block) && _Block_use_stret(block))
aMode = ReturnValueOnStackArgumentMode;
#else
- assert(! (_Block_has_signature(block) && _Block_use_stret(block)));
+ ASSERT(! (_Block_has_signature(block) && _Block_use_stret(block)));
#endif
return aMode;
}
+/// Initialize the trampoline machinery. Normally this does nothing, as
+/// everything is initialized lazily, but for certain processes we eagerly load
+/// the trampolines dylib.
+void
+_imp_implementationWithBlock_init(void)
+{
+#if TARGET_OS_OSX
+ // Eagerly load libobjc-trampolines.dylib in certain processes. Some
+ // programs (most notably QtWebEngineProcess used by older versions of
+ // embedded Chromium) enable a highly restrictive sandbox profile which
+ // blocks access to that dylib. If anything calls
+ // imp_implementationWithBlock (as AppKit has started doing) then we'll
+ // crash trying to load it. Loading it here sets it up before the sandbox
+ // profile is enabled and blocks it.
+ //
+ // This fixes EA Origin (rdar://problem/50813789)
+ // and Steam (rdar://problem/55286131)
+ if (__progname &&
+ (strcmp(__progname, "QtWebEngineProcess") == 0 ||
+ strcmp(__progname, "Steam Helper") == 0)) {
+ Trampolines.Initialize();
+ }
+#endif
+}
+
// `block` must already have been copied
IMP
getOrAllocatePageGroupWithNextAvailable();
uintptr_t index = pageGroup->nextAvailable;
- assert(index >= pageGroup->startIndex() && index < pageGroup->endIndex());
+ ASSERT(index >= pageGroup->startIndex() && index < pageGroup->endIndex());
TrampolineBlockPageGroup::Payload *payload = pageGroup->payload(index);
uintptr_t nextAvailableIndex = payload->nextAvailable;
extern IMP _cache_getImp(Class cls, SEL sel);
extern Method _cache_getMethod(Class cls, SEL sel, IMP objc_msgForward_internal_imp);
+extern void cache_init(void);
extern void flush_cache(Class cls);
extern bool _cache_fill(Class cls, Method meth, SEL sel);
extern void _cache_addForwardEntry(Class cls, SEL sel);
* reading function is in progress because it might still be using
* the garbage memory.
**********************************************************************/
-extern "C" uintptr_t objc_entryPoints[];
-extern "C" uintptr_t objc_exitPoints[];
+typedef struct {
+ uint64_t location;
+ unsigned short length;
+ unsigned short recovery_offs;
+ unsigned int flags;
+} task_restartable_range_t;
+
+extern "C" task_restartable_range_t objc_restartableRanges[];
static int _collecting_in_critical(void)
{
kern_return_t ret;
int result;
- mach_port_t mythread = pthread_mach_thread_np(pthread_self());
+ mach_port_t mythread = pthread_mach_thread_np(objc_thread_self());
// Get a list of all the threads in the current task
ret = task_threads (mach_task_self (), &threads, &number);
}
// Check whether it is in the cache lookup code
- for (region = 0; objc_entryPoints[region] != 0; region++)
+ for (region = 0; objc_restartableRanges[region].location != 0; region++)
{
- if ((pc >= objc_entryPoints[region]) &&
- (pc <= objc_exitPoints[region]))
+ uint32_t loc = (uint32_t)objc_restartableRanges[region].location;
+ if ((pc > loc) &&
+ (pc - loc) < objc_restartableRanges[region].length)
{
result = TRUE;
goto done;
#endif
+void cache_init()
+{
+}
// !__OBJC2__
#endif
__BEGIN_DECLS
+extern void cache_init(void);
+
extern IMP cache_getImp(Class cls, SEL sel);
extern void cache_fill(Class cls, SEL sel, IMP imp, id receiver);
/* Initial cache bucket count. INIT_CACHE_SIZE must be a power of two. */
enum {
INIT_CACHE_SIZE_LOG2 = 2,
- INIT_CACHE_SIZE = (1 << INIT_CACHE_SIZE_LOG2)
+ INIT_CACHE_SIZE = (1 << INIT_CACHE_SIZE_LOG2),
+ MAX_CACHE_SIZE_LOG2 = 16,
+ MAX_CACHE_SIZE = (1 << MAX_CACHE_SIZE_LOG2),
};
static void cache_collect_free(struct bucket_t *data, mask_t capacity);
"\n .globl __objc_empty_vtable"
"\n .set __objc_empty_vtable, 0"
"\n .globl __objc_empty_cache"
+#if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4
+ "\n .align 4"
+ "\n L__objc_empty_cache: .space " stringize2(EMPTY_BYTES)
+ "\n .set __objc_empty_cache, L__objc_empty_cache + 0xf"
+#else
"\n .align 3"
"\n __objc_empty_cache: .space " stringize2(EMPTY_BYTES)
+#endif
);
#endif
-// copied from dispatch_atomic_maximally_synchronizing_barrier
-// fixme verify that this barrier hack does in fact work here
-#if __x86_64__
-#define mega_barrier() \
- do { unsigned long _clbr; __asm__ __volatile__( \
- "cpuid" \
- : "=a" (_clbr) : "0" (0) : "rbx", "rcx", "rdx", "cc", "memory" \
- ); } while(0)
-
-#elif __i386__
-#define mega_barrier() \
- do { unsigned long _clbr; __asm__ __volatile__( \
- "cpuid" \
- : "=a" (_clbr) : "0" (0) : "ebx", "ecx", "edx", "cc", "memory" \
- ); } while(0)
-
-#elif __arm__ || __arm64__
+// mega_barrier doesn't really work, but it works enough on ARM that
+// we leave well enough alone and keep using it there.
+#if __arm__
#define mega_barrier() \
__asm__ __volatile__( \
"dsb ish" \
: : : "memory")
-#else
-#error unknown architecture
#endif
#if __arm64__
cache_t *getCache(Class cls)
{
- assert(cls);
+ ASSERT(cls);
return &cls->cache;
}
#if __arm64__
-template<Atomicity atomicity>
-void bucket_t::set(SEL newSel, IMP newImp)
+template<Atomicity atomicity, IMPEncoding impEncoding>
+void bucket_t::set(SEL newSel, IMP newImp, Class cls)
{
- assert(_sel == 0 || _sel == newSel);
+ ASSERT(_sel.load(memory_order::memory_order_relaxed) == 0 ||
+ _sel.load(memory_order::memory_order_relaxed) == newSel);
static_assert(offsetof(bucket_t,_imp) == 0 &&
offsetof(bucket_t,_sel) == sizeof(void *),
"bucket_t layout doesn't match arm64 bucket_t::set()");
- uintptr_t signedImp = signIMP(newImp, newSel);
+ uintptr_t encodedImp = (impEncoding == Encoded
+ ? encodeImp(newImp, newSel, cls)
+ : (uintptr_t)newImp);
- if (atomicity == Atomic) {
- // LDP/STP guarantees that all observers get
- // either imp/sel or newImp/newSel
- stp(signedImp, (uintptr_t)newSel, this);
- } else {
- _sel = newSel;
- _imp = signedImp;
- }
+ // LDP/STP guarantees that all observers get
+ // either imp/sel or newImp/newSel
+ stp(encodedImp, (uintptr_t)newSel, this);
}
#else
-template<Atomicity atomicity>
-void bucket_t::set(SEL newSel, IMP newImp)
+template<Atomicity atomicity, IMPEncoding impEncoding>
+void bucket_t::set(SEL newSel, IMP newImp, Class cls)
{
- assert(_sel == 0 || _sel == newSel);
+ ASSERT(_sel.load(memory_order::memory_order_relaxed) == 0 ||
+ _sel.load(memory_order::memory_order_relaxed) == newSel);
// objc_msgSend uses sel and imp with no locks.
// It is safe for objc_msgSend to see new imp but NULL sel
// It is unsafe for objc_msgSend to see old imp and new sel.
// Therefore we write new imp, wait a lot, then write new sel.
- _imp = (uintptr_t)newImp;
-
- if (_sel != newSel) {
- if (atomicity == Atomic) {
+ uintptr_t newIMP = (impEncoding == Encoded
+ ? encodeImp(newImp, newSel, cls)
+ : (uintptr_t)newImp);
+
+ if (atomicity == Atomic) {
+ _imp.store(newIMP, memory_order::memory_order_relaxed);
+
+ if (_sel.load(memory_order::memory_order_relaxed) != newSel) {
+#ifdef __arm__
mega_barrier();
+ _sel.store(newSel, memory_order::memory_order_relaxed);
+#elif __x86_64__ || __i386__
+ _sel.store(newSel, memory_order::memory_order_release);
+#else
+#error Don't know how to do bucket_t::set on this architecture.
+#endif
}
- _sel = newSel;
+ } else {
+ _imp.store(newIMP, memory_order::memory_order_relaxed);
+ _sel.store(newSel, memory_order::memory_order_relaxed);
}
}
#endif
+#if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_OUTLINED
+
void cache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask)
{
// objc_msgSend uses mask and buckets with no locks.
// Therefore we write new buckets, wait a lot, then write new mask.
// objc_msgSend reads mask first, then buckets.
+#ifdef __arm__
// ensure other threads see buckets contents before buckets pointer
mega_barrier();
- _buckets = newBuckets;
+ _buckets.store(newBuckets, memory_order::memory_order_relaxed);
// ensure other threads see new buckets before new mask
mega_barrier();
- _mask = newMask;
+ _mask.store(newMask, memory_order::memory_order_relaxed);
_occupied = 0;
+#elif __x86_64__ || i386
+ // ensure other threads see buckets contents before buckets pointer
+ _buckets.store(newBuckets, memory_order::memory_order_release);
+
+ // ensure other threads see new buckets before new mask
+ _mask.store(newMask, memory_order::memory_order_release);
+ _occupied = 0;
+#else
+#error Don't know how to do setBucketsAndMask on this architecture.
+#endif
}
+struct bucket_t *cache_t::emptyBuckets()
+{
+ return (bucket_t *)&_objc_empty_cache;
+}
struct bucket_t *cache_t::buckets()
{
- return _buckets;
+ return _buckets.load(memory_order::memory_order_relaxed);
}
mask_t cache_t::mask()
{
- return _mask;
+ return _mask.load(memory_order::memory_order_relaxed);
}
-mask_t cache_t::occupied()
+void cache_t::initializeToEmpty()
{
- return _occupied;
+ bzero(this, sizeof(*this));
+ _buckets.store((bucket_t *)&_objc_empty_cache, memory_order::memory_order_relaxed);
}
-void cache_t::incrementOccupied()
+#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16
+
+void cache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask)
{
- _occupied++;
+ uintptr_t buckets = (uintptr_t)newBuckets;
+ uintptr_t mask = (uintptr_t)newMask;
+
+ ASSERT(buckets <= bucketsMask);
+ ASSERT(mask <= maxMask);
+
+ _maskAndBuckets.store(((uintptr_t)newMask << maskShift) | (uintptr_t)newBuckets, std::memory_order_relaxed);
+ _occupied = 0;
+}
+
+struct bucket_t *cache_t::emptyBuckets()
+{
+ return (bucket_t *)&_objc_empty_cache;
+}
+
+struct bucket_t *cache_t::buckets()
+{
+ uintptr_t maskAndBuckets = _maskAndBuckets.load(memory_order::memory_order_relaxed);
+ return (bucket_t *)(maskAndBuckets & bucketsMask);
+}
+
+mask_t cache_t::mask()
+{
+ uintptr_t maskAndBuckets = _maskAndBuckets.load(memory_order::memory_order_relaxed);
+ return maskAndBuckets >> maskShift;
}
void cache_t::initializeToEmpty()
{
bzero(this, sizeof(*this));
- _buckets = (bucket_t *)&_objc_empty_cache;
+ _maskAndBuckets.store((uintptr_t)&_objc_empty_cache, std::memory_order_relaxed);
+}
+
+#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4
+
+void cache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask)
+{
+ uintptr_t buckets = (uintptr_t)newBuckets;
+ unsigned mask = (unsigned)newMask;
+
+ ASSERT(buckets == (buckets & bucketsMask));
+ ASSERT(mask <= 0xffff);
+
+ // The shift amount is equal to the number of leading zeroes in
+ // the last 16 bits of mask. Count all the leading zeroes, then
+ // subtract to ignore the top half.
+ uintptr_t maskShift = __builtin_clz(mask) - (sizeof(mask) * CHAR_BIT - 16);
+ ASSERT(mask == (0xffff >> maskShift));
+
+ _maskAndBuckets.store(buckets | maskShift, memory_order::memory_order_relaxed);
+ _occupied = 0;
+
+ ASSERT(this->buckets() == newBuckets);
+ ASSERT(this->mask() == newMask);
+}
+
+struct bucket_t *cache_t::emptyBuckets()
+{
+ return (bucket_t *)((uintptr_t)&_objc_empty_cache & bucketsMask);
+}
+
+struct bucket_t *cache_t::buckets()
+{
+ uintptr_t maskAndBuckets = _maskAndBuckets.load(memory_order::memory_order_relaxed);
+ return (bucket_t *)(maskAndBuckets & bucketsMask);
+}
+
+mask_t cache_t::mask()
+{
+ uintptr_t maskAndBuckets = _maskAndBuckets.load(memory_order::memory_order_relaxed);
+ uintptr_t maskShift = (maskAndBuckets & maskMask);
+ return 0xffff >> maskShift;
+}
+
+void cache_t::initializeToEmpty()
+{
+ bzero(this, sizeof(*this));
+ _maskAndBuckets.store((uintptr_t)&_objc_empty_cache, std::memory_order_relaxed);
+}
+
+#else
+#error Unknown cache mask storage type.
+#endif
+
+mask_t cache_t::occupied()
+{
+ return _occupied;
}
+void cache_t::incrementOccupied()
+{
+ _occupied++;
+}
-mask_t cache_t::capacity()
+unsigned cache_t::capacity()
{
return mask() ? mask()+1 : 0;
}
#if __arm__
// End marker's sel is 1 and imp points BEFORE the first bucket.
// This saves an instruction in objc_msgSend.
- end->set<NotAtomic>((SEL)(uintptr_t)1, (IMP)(newBuckets - 1));
+ end->set<NotAtomic, Raw>((SEL)(uintptr_t)1, (IMP)(newBuckets - 1), nil);
#else
// End marker's sel is 1 and imp points to the first bucket.
- end->set<NotAtomic>((SEL)(uintptr_t)1, (IMP)newBuckets);
+ end->set<NotAtomic, Raw>((SEL)(uintptr_t)1, (IMP)newBuckets, nil);
#endif
if (PrintCaches) recordNewCache(newCapacity);
bucket_t *emptyBucketsForCapacity(mask_t capacity, bool allocate = true)
{
+#if CONFIG_USE_CACHE_LOCK
cacheUpdateLock.assertLocked();
+#else
+ runtimeLock.assertLocked();
+#endif
size_t bytes = cache_t::bytesForCapacity(capacity);
// Use _objc_empty_cache if the buckets is small enough.
if (bytes <= EMPTY_BYTES) {
- return (bucket_t *)&_objc_empty_cache;
+ return cache_t::emptyBuckets();
}
// Use shared empty buckets allocated on the heap.
return !isConstantEmptyCache();
}
-
-void cache_t::reallocate(mask_t oldCapacity, mask_t newCapacity)
+ALWAYS_INLINE
+void cache_t::reallocate(mask_t oldCapacity, mask_t newCapacity, bool freeOld)
{
- bool freeOld = canBeFreed();
-
bucket_t *oldBuckets = buckets();
bucket_t *newBuckets = allocateBuckets(newCapacity);
// This is thought to save cache memory at the cost of extra cache fills.
// fixme re-measure this
- assert(newCapacity > 0);
- assert((uintptr_t)(mask_t)(newCapacity-1) == newCapacity-1);
+ ASSERT(newCapacity > 0);
+ ASSERT((uintptr_t)(mask_t)(newCapacity-1) == newCapacity-1);
setBucketsAndMask(newBuckets, newCapacity - 1);
if (freeOld) {
cache_collect_free(oldBuckets, oldCapacity);
- cache_collect(false);
}
}
("Method cache corrupted. This may be a message to an "
"invalid object, or a memory error somewhere else.");
cache_t *cache = &isa->cache;
+#if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_OUTLINED
+ bucket_t *buckets = cache->_buckets.load(memory_order::memory_order_relaxed);
_objc_inform_now_and_on_crash
("%s %p, SEL %p, isa %p, cache %p, buckets %p, "
"mask 0x%x, occupied 0x%x",
receiver ? "receiver" : "unused", receiver,
- sel, isa, cache, cache->_buckets,
- cache->_mask, cache->_occupied);
+ sel, isa, cache, buckets,
+ cache->_mask.load(memory_order::memory_order_relaxed),
+ cache->_occupied);
_objc_inform_now_and_on_crash
("%s %zu bytes, buckets %zu bytes",
receiver ? "receiver" : "unused", malloc_size(receiver),
- malloc_size(cache->_buckets));
+ malloc_size(buckets));
+#elif (CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16 || \
+ CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4)
+ uintptr_t maskAndBuckets = cache->_maskAndBuckets.load(memory_order::memory_order_relaxed);
+ _objc_inform_now_and_on_crash
+ ("%s %p, SEL %p, isa %p, cache %p, buckets and mask 0x%lx, "
+ "occupied 0x%x",
+ receiver ? "receiver" : "unused", receiver,
+ sel, isa, cache, maskAndBuckets,
+ cache->_occupied);
+ _objc_inform_now_and_on_crash
+ ("%s %zu bytes, buckets %zu bytes",
+ receiver ? "receiver" : "unused", malloc_size(receiver),
+ malloc_size(cache->buckets()));
+#else
+#error Unknown cache mask storage type.
+#endif
_objc_inform_now_and_on_crash
("selector '%s'", sel_getName(sel));
_objc_inform_now_and_on_crash
"invalid object, or a memory error somewhere else.");
}
-
-bucket_t * cache_t::find(SEL s, id receiver)
-{
- assert(s != 0);
-
- bucket_t *b = buckets();
- mask_t m = mask();
- mask_t begin = cache_hash(s, m);
- mask_t i = begin;
- do {
- if (b[i].sel() == 0 || b[i].sel() == s) {
- return &b[i];
- }
- } while ((i = cache_next(i, m)) != begin);
-
- // hack
- Class cls = (Class)((uintptr_t)this - offsetof(objc_class, cache));
- cache_t::bad_cache(receiver, (SEL)s, cls);
-}
-
-
-void cache_t::expand()
-{
- cacheUpdateLock.assertLocked();
-
- uint32_t oldCapacity = capacity();
- uint32_t newCapacity = oldCapacity ? oldCapacity*2 : INIT_CACHE_SIZE;
-
- if ((uint32_t)(mask_t)newCapacity != newCapacity) {
- // mask overflow - can't grow further
- // fixme this wastes one bit of mask
- newCapacity = oldCapacity;
- }
-
- reallocate(oldCapacity, newCapacity);
-}
-
-
-static void cache_fill_nolock(Class cls, SEL sel, IMP imp, id receiver)
+ALWAYS_INLINE
+void cache_t::insert(Class cls, SEL sel, IMP imp, id receiver)
{
+#if CONFIG_USE_CACHE_LOCK
cacheUpdateLock.assertLocked();
+#else
+ runtimeLock.assertLocked();
+#endif
- // Never cache before +initialize is done
- if (!cls->isInitialized()) return;
-
- // Make sure the entry wasn't added to the cache by some other thread
- // before we grabbed the cacheUpdateLock.
- if (cache_getImp(cls, sel)) return;
-
- cache_t *cache = getCache(cls);
+ ASSERT(sel != 0 && cls->isInitialized());
// Use the cache as-is if it is less than 3/4 full
- mask_t newOccupied = cache->occupied() + 1;
- mask_t capacity = cache->capacity();
- if (cache->isConstantEmptyCache()) {
+ mask_t newOccupied = occupied() + 1;
+ unsigned oldCapacity = capacity(), capacity = oldCapacity;
+ if (slowpath(isConstantEmptyCache())) {
// Cache is read-only. Replace it.
- cache->reallocate(capacity, capacity ?: INIT_CACHE_SIZE);
+ if (!capacity) capacity = INIT_CACHE_SIZE;
+ reallocate(oldCapacity, capacity, /* freeOld */false);
}
- else if (newOccupied <= capacity / 4 * 3) {
+ else if (fastpath(newOccupied <= capacity / 4 * 3)) {
// Cache is less than 3/4 full. Use it as-is.
}
else {
- // Cache is too full. Expand it.
- cache->expand();
+ capacity = capacity ? capacity * 2 : INIT_CACHE_SIZE;
+ if (capacity > MAX_CACHE_SIZE) {
+ capacity = MAX_CACHE_SIZE;
+ }
+ reallocate(oldCapacity, capacity, true);
}
+ bucket_t *b = buckets();
+ mask_t m = capacity - 1;
+ mask_t begin = cache_hash(sel, m);
+ mask_t i = begin;
+
// Scan for the first unused slot and insert there.
- // There is guaranteed to be an empty slot because the
+ // There is guaranteed to be an empty slot because the
// minimum size is 4 and we resized at 3/4 full.
- bucket_t *bucket = cache->find(sel, receiver);
- if (bucket->sel() == 0) cache->incrementOccupied();
- bucket->set<Atomic>(sel, imp);
+ do {
+ if (fastpath(b[i].sel() == 0)) {
+ incrementOccupied();
+ b[i].set<Atomic, Encoded>(sel, imp, cls);
+ return;
+ }
+ if (b[i].sel() == sel) {
+ // The entry was added to the cache by some other thread
+ // before we grabbed the cacheUpdateLock.
+ return;
+ }
+ } while (fastpath((i = cache_next(i, m)) != begin));
+
+ cache_t::bad_cache(receiver, (SEL)sel, cls);
}
void cache_fill(Class cls, SEL sel, IMP imp, id receiver)
{
+ runtimeLock.assertLocked();
+
#if !DEBUG_TASK_THREADS
- mutex_locker_t lock(cacheUpdateLock);
- cache_fill_nolock(cls, sel, imp, receiver);
+ // Never cache before +initialize is done
+ if (cls->isInitialized()) {
+ cache_t *cache = getCache(cls);
+#if CONFIG_USE_CACHE_LOCK
+ mutex_locker_t lock(cacheUpdateLock);
+#endif
+ cache->insert(cls, sel, imp, receiver);
+ }
#else
_collecting_in_critical();
- return;
#endif
}
// This must not shrink the cache - that breaks the lock-free scheme.
void cache_erase_nolock(Class cls)
{
+#if CONFIG_USE_CACHE_LOCK
cacheUpdateLock.assertLocked();
+#else
+ runtimeLock.assertLocked();
+#endif
cache_t *cache = getCache(cls);
cache->setBucketsAndMask(buckets, capacity - 1); // also clears occupied
cache_collect_free(oldBuckets, capacity);
- cache_collect(false);
}
}
void cache_delete(Class cls)
{
+#if CONFIG_USE_CACHE_LOCK
mutex_locker_t lock(cacheUpdateLock);
+#else
+ runtimeLock.assertLocked();
+#endif
if (cls->cache.canBeFreed()) {
if (PrintCaches) recordDeadCache(cls->cache.capacity());
free(cls->cache.buckets());
arm_thread_state64_t state;
unsigned int count = ARM_THREAD_STATE64_COUNT;
kern_return_t okay = thread_get_state (thread, ARM_THREAD_STATE64, (thread_state_t)&state, &count);
- return (okay == KERN_SUCCESS) ? arm_thread_state64_get_pc(state) : PC_SENTINEL;
+ return (okay == KERN_SUCCESS) ? (uintptr_t)arm_thread_state64_get_pc(state) : PC_SENTINEL;
}
#else
{
* reading function is in progress because it might still be using
* the garbage memory.
**********************************************************************/
-extern "C" uintptr_t objc_entryPoints[];
-extern "C" uintptr_t objc_exitPoints[];
+#if HAVE_TASK_RESTARTABLE_RANGES
+#include <kern/restartable.h>
+#else
+typedef struct {
+ uint64_t location;
+ unsigned short length;
+ unsigned short recovery_offs;
+ unsigned int flags;
+} task_restartable_range_t;
+#endif
+
+extern "C" task_restartable_range_t objc_restartableRanges[];
+
+#if HAVE_TASK_RESTARTABLE_RANGES
+static bool shouldUseRestartableRanges = true;
+#endif
+
+void cache_init()
+{
+#if HAVE_TASK_RESTARTABLE_RANGES
+ mach_msg_type_number_t count = 0;
+ kern_return_t kr;
+
+ while (objc_restartableRanges[count].location) {
+ count++;
+ }
+
+ kr = task_restartable_ranges_register(mach_task_self(),
+ objc_restartableRanges, count);
+ if (kr == KERN_SUCCESS) return;
+ _objc_fatal("task_restartable_ranges_register failed (result 0x%x: %s)",
+ kr, mach_error_string(kr));
+#endif // HAVE_TASK_RESTARTABLE_RANGES
+}
static int _collecting_in_critical(void)
{
#if TARGET_OS_WIN32
return TRUE;
-#else
+#elif HAVE_TASK_RESTARTABLE_RANGES
+ // Only use restartable ranges if we registered them earlier.
+ if (shouldUseRestartableRanges) {
+ kern_return_t kr = task_restartable_ranges_synchronize(mach_task_self());
+ if (kr == KERN_SUCCESS) return FALSE;
+ _objc_fatal("task_restartable_ranges_synchronize failed (result 0x%x: %s)",
+ kr, mach_error_string(kr));
+ }
+#endif // !HAVE_TASK_RESTARTABLE_RANGES
+
+ // Fallthrough if we didn't use restartable ranges.
+
thread_act_port_array_t threads;
unsigned number;
unsigned count;
kern_return_t ret;
int result;
- mach_port_t mythread = pthread_mach_thread_np(pthread_self());
+ mach_port_t mythread = pthread_mach_thread_np(objc_thread_self());
// Get a list of all the threads in the current task
#if !DEBUG_TASK_THREADS
}
// Check whether it is in the cache lookup code
- for (region = 0; objc_entryPoints[region] != 0; region++)
+ for (region = 0; objc_restartableRanges[region].location != 0; region++)
{
- if ((pc >= objc_entryPoints[region]) &&
- (pc <= objc_exitPoints[region]))
+ uint64_t loc = objc_restartableRanges[region].location;
+ if ((pc > loc) &&
+ (pc - loc < (uint64_t)objc_restartableRanges[region].length))
{
result = TRUE;
goto done;
// Return our finding
return result;
-#endif
}
**********************************************************************/
static void cache_collect_free(bucket_t *data, mask_t capacity)
{
+#if CONFIG_USE_CACHE_LOCK
cacheUpdateLock.assertLocked();
+#else
+ runtimeLock.assertLocked();
+#endif
if (PrintCaches) recordDeadCache(capacity);
_garbage_make_room ();
garbage_byte_size += cache_t::bytesForCapacity(capacity);
garbage_refs[garbage_count++] = data;
+ cache_collect(false);
}
**********************************************************************/
void cache_collect(bool collectALot)
{
+#if CONFIG_USE_CACHE_LOCK
cacheUpdateLock.assertLocked();
+#else
+ runtimeLock.assertLocked();
+#endif
// Done if the garbage is not full
if (garbage_byte_size < garbage_threshold && !collectALot) {
* cls should be a metaclass.
* Does not check if the method already exists.
**********************************************************************/
-static void _class_resolveClassMethod(Class cls, SEL sel, id inst)
+static void _class_resolveClassMethod(id inst, SEL sel, Class cls)
{
- assert(cls->isMetaClass());
+ ASSERT(cls->isMetaClass());
+ SEL resolve_sel = @selector(resolveClassMethod:);
- if (! lookUpImpOrNil(cls, SEL_resolveClassMethod, inst,
- NO/*initialize*/, YES/*cache*/, NO/*resolver*/))
- {
+ if (!lookUpImpOrNil(inst, resolve_sel, cls)) {
// Resolver not implemented.
return;
}
BOOL (*msg)(Class, SEL, SEL) = (typeof(msg))objc_msgSend;
- bool resolved = msg(_class_getNonMetaClass(cls, inst),
- SEL_resolveClassMethod, sel);
+ bool resolved = msg(_class_getNonMetaClass(cls, inst), resolve_sel, sel);
// Cache the result (good or bad) so the resolver doesn't fire next time.
// +resolveClassMethod adds to self->ISA() a.k.a. cls
- IMP imp = lookUpImpOrNil(cls, sel, inst,
- NO/*initialize*/, YES/*cache*/, NO/*resolver*/);
-
+ IMP imp = lookUpImpOrNil(inst, sel, cls);
if (resolved && PrintResolving) {
if (imp) {
_objc_inform("RESOLVE: method %c[%s %s] "
* cls may be a metaclass or a non-meta class.
* Does not check if the method already exists.
**********************************************************************/
-static void _class_resolveInstanceMethod(Class cls, SEL sel, id inst)
+static void _class_resolveInstanceMethod(id inst, SEL sel, Class cls)
{
- if (! lookUpImpOrNil(cls->ISA(), SEL_resolveInstanceMethod, cls,
- NO/*initialize*/, YES/*cache*/, NO/*resolver*/))
- {
+ SEL resolve_sel = @selector(resolveInstanceMethod:);
+
+ if (! lookUpImpOrNil(cls, resolve_sel, cls->ISA())) {
// Resolver not implemented.
return;
}
BOOL (*msg)(Class, SEL, SEL) = (typeof(msg))objc_msgSend;
- bool resolved = msg(cls, SEL_resolveInstanceMethod, sel);
+ bool resolved = msg(cls, resolve_sel, sel);
// Cache the result (good or bad) so the resolver doesn't fire next time.
// +resolveInstanceMethod adds to self a.k.a. cls
- IMP imp = lookUpImpOrNil(cls, sel, inst,
- NO/*initialize*/, YES/*cache*/, NO/*resolver*/);
+ IMP imp = lookUpImpOrNil(inst, sel, cls);
if (resolved && PrintResolving) {
if (imp) {
* Returns nothing; any result would be potentially out-of-date already.
* Does not check if the method already exists.
**********************************************************************/
-void _class_resolveMethod(Class cls, SEL sel, id inst)
+static void
+_class_resolveMethod(id inst, SEL sel, Class cls)
{
if (! cls->isMetaClass()) {
// try [cls resolveInstanceMethod:sel]
- _class_resolveInstanceMethod(cls, sel, inst);
+ _class_resolveInstanceMethod(inst, sel, cls);
}
else {
// try [nonMetaClass resolveClassMethod:sel]
// and [cls resolveInstanceMethod:sel]
- _class_resolveClassMethod(cls, sel, inst);
- if (!lookUpImpOrNil(cls, sel, inst,
- NO/*initialize*/, YES/*cache*/, NO/*resolver*/))
- {
- _class_resolveInstanceMethod(cls, sel, inst);
+ _class_resolveClassMethod(inst, sel, cls);
+ if (!lookUpImpOrNil(inst, sel, cls)) {
+ _class_resolveInstanceMethod(inst, sel, cls);
}
}
}
}
-/***********************************************************************
-* _class_lookupMethodAndLoadCache.
-* Method lookup for dispatchers ONLY. OTHER CODE SHOULD USE lookUpImp().
-* This lookup avoids optimistic cache scan because the dispatcher
-* already tried that.
-**********************************************************************/
-IMP _class_lookupMethodAndLoadCache3(id obj, SEL sel, Class cls)
-{
- return lookUpImpOrForward(cls, sel, obj,
- YES/*initialize*/, NO/*cache*/, YES/*resolver*/);
-}
-
-
/***********************************************************************
* lookUpImpOrForward.
* The standard IMP lookup.
-* initialize==NO tries to avoid +initialize (but sometimes fails)
-* cache==NO skips optimistic unlocked lookup (but uses cache elsewhere)
-* Most callers should use initialize==YES and cache==YES.
-* inst is an instance of cls or a subclass thereof, or nil if none is known.
+* Without LOOKUP_INITIALIZE: tries to avoid +initialize (but sometimes fails)
+* Without LOOKUP_CACHE: skips optimistic unlocked lookup (but uses cache elsewhere)
+* Most callers should use LOOKUP_INITIALIZE and LOOKUP_CACHE
+* inst is an instance of cls or a subclass thereof, or nil if none is known.
* If cls is an un-initialized metaclass then a non-nil inst is faster.
* May return _objc_msgForward_impcache. IMPs destined for external use
* must be converted to _objc_msgForward or _objc_msgForward_stret.
-* If you don't want forwarding at all, use lookUpImpOrNil() instead.
+* If you don't want forwarding at all, use LOOKUP_NIL.
**********************************************************************/
-IMP lookUpImpOrForward(Class cls, SEL sel, id inst,
- bool initialize, bool cache, bool resolver)
+IMP lookUpImpOrForward(id inst, SEL sel, Class cls, int behavior)
{
Class curClass;
IMP methodPC = nil;
methodListLock.assertUnlocked();
// Optimistic cache lookup
- if (cache) {
+ if (behavior & LOOKUP_CACHE) {
methodPC = _cache_getImp(cls, sel);
- if (methodPC) return methodPC;
+ if (methodPC) goto out_nolock;
}
// Check for freed class
return (IMP) _freedHandler;
// Check for +initialize
- if (initialize && !cls->isInitialized()) {
+ if ((behavior & LOOKUP_INITIALIZE) && !cls->isInitialized()) {
initializeNonMetaClass (_class_getNonMetaClass(cls, inst));
// If sel == initialize, initializeNonMetaClass will send +initialize
// and then the messenger will send +initialize again after this
// No implementation found. Try method resolver once.
- if (resolver && !triedResolver) {
+ if ((behavior & LOOKUP_RESOLVER) && !triedResolver) {
methodListLock.unlock();
_class_resolveMethod(cls, sel, inst);
triedResolver = YES;
done:
methodListLock.unlock();
+ out_nolock:
+ if ((behavior & LOOKUP_NIL) && methodPC == (IMP)_objc_msgForward_impcache) {
+ return nil;
+ }
return methodPC;
}
-/***********************************************************************
-* lookUpImpOrNil.
-* Like lookUpImpOrForward, but returns nil instead of _objc_msgForward_impcache
-**********************************************************************/
-IMP lookUpImpOrNil(Class cls, SEL sel, id inst,
- bool initialize, bool cache, bool resolver)
-{
- IMP imp = lookUpImpOrForward(cls, sel, inst, initialize, cache, resolver);
- if (imp == _objc_msgForward_impcache) return nil;
- else return imp;
-}
-
-
/***********************************************************************
* lookupMethodInClassAndLoadCache.
* Like _class_lookupMethodAndLoadCache, but does not search superclasses.
else {
cls = objc_getClass(cls->name);
}
- assert(cls);
+ ASSERT(cls);
}
return cls;
}
// Search method lists, try method resolver, etc.
- lookUpImpOrNil(cls, sel, nil,
- NO/*initialize*/, NO/*cache*/, YES/*resolver*/);
+ lookUpImpOrForward(nil, sel, cls, LOOKUP_INITIALIZE | LOOKUP_RESOLVER);
meth = _cache_getMethod(cls, sel, _objc_msgForward_impcache);
if (meth == (Method)1) {
obj->initIsa(cls);
if (cls->hasCxxCtor()) {
- return object_cxxConstructFromClass(obj, cls);
+ return object_cxxConstructFromClass(obj, cls, OBJECT_CONSTRUCT_NONE);
} else {
return obj;
}
return (*_zoneAlloc)(cls, extraBytes, z);
}
+id
+_objc_rootAllocWithZone(Class cls, malloc_zone_t *zone)
+{
+ id obj;
+
+ if (fastpath(!zone)) {
+ obj = class_createInstance(cls, 0);
+ } else {
+ obj = class_createInstanceFromZone(cls, 0, zone);
+ }
+
+ if (slowpath(!obj)) obj = _objc_callBadAllocHandler(cls);
+ return obj;
+}
+
unsigned class_createInstances(Class cls, size_t extraBytes,
id *results, unsigned num_requested)
{
// weakly-referenced object has an un-+initialized isa.
// Unresolved future classes are not so protected.
if (!cls->isFuture() && !cls->isInitialized()) {
- // use lookUpImpOrForward to indirectly provoke +initialize
+ // use lookUpImpOrNil to indirectly provoke +initialize
// to avoid duplicating the code to actually send +initialize
- lookUpImpOrForward(cls, SEL_initialize, nil,
- YES/*initialize*/, YES/*cache*/, NO/*resolver*/);
+ lookUpImpOrNil(nil, @selector(initialize), cls, LOOKUP_INITIALIZE);
}
return obj->changeIsa(cls);
* return nil: construction failed because a C++ constructor threw an exception
**********************************************************************/
id
-object_cxxConstructFromClass(id obj, Class cls)
+object_cxxConstructFromClass(id obj, Class cls, int flags)
{
- assert(cls->hasCxxCtor()); // required for performance, not correctness
+ ASSERT(cls->hasCxxCtor()); // required for performance, not correctness
id (*ctor)(id);
Class supercls;
// Call superclasses' ctors first, if any.
if (supercls && supercls->hasCxxCtor()) {
- bool ok = object_cxxConstructFromClass(obj, supercls);
- if (!ok) return nil; // some superclass's ctor failed - give up
+ bool ok = object_cxxConstructFromClass(obj, supercls, flags);
+ if (slowpath(!ok)) return nil; // some superclass's ctor failed - give up
}
// Find this class's ctor, if any.
_objc_inform("CXX: calling C++ constructors for class %s",
cls->nameForLogging());
}
- if ((*ctor)(obj)) return obj; // ctor called and succeeded - ok
+ if (fastpath((*ctor)(obj))) return obj; // ctor called and succeeded - ok
- // This class's ctor was called and failed.
+ supercls = cls->superclass; // this reload avoids a spill on the stack
+
+ // This class's ctor was called and failed.
// Call superclasses's dtors to clean up.
if (supercls) object_cxxDestructFromClass(obj, supercls);
+ if (flags & OBJECT_CONSTRUCT_FREE_ONFAILURE) free(obj);
+ if (flags & OBJECT_CONSTRUCT_CALL_BADALLOC) {
+ return _objc_callBadAllocHandler(cls);
+ }
return nil;
}
BOOL class_respondsToSelector(Class cls, SEL sel)
{
- return class_respondsToSelector_inst(cls, sel, nil);
+ return class_respondsToSelector_inst(nil, sel, cls);
}
// inst is an instance of cls or a subclass thereof, or nil if none is known.
// Non-nil inst is faster in some cases. See lookUpImpOrForward() for details.
-bool class_respondsToSelector_inst(Class cls, SEL sel, id inst)
+NEVER_INLINE BOOL
+class_respondsToSelector_inst(id inst, SEL sel, Class cls)
{
- IMP imp;
-
- if (!sel || !cls) return NO;
-
// Avoids +initialize because it historically did so.
// We're not returning a callable IMP anyway.
- imp = lookUpImpOrNil(cls, sel, inst,
- NO/*initialize*/, YES/*cache*/, YES/*resolver*/);
- return bool(imp);
+ return sel && cls && lookUpImpOrNil(inst, sel, cls, LOOKUP_RESOLVER);
}
if (!cls || !sel) return nil;
- imp = lookUpImpOrNil(cls, sel, nil,
- YES/*initialize*/, YES/*cache*/, YES/*resolver*/);
+ imp = lookUpImpOrNil(nil, sel, cls, LOOKUP_INITIALIZE | LOOKUP_RESOLVER);
// Translate forwarding function to C-callable external version
if (!imp) {
return encoding_copyArgumentType(method_getTypeEncoding(m), index);
}
-
-/***********************************************************************
-* _objc_constructOrFree
-* Call C++ constructors, and free() if they fail.
-* bytes->isa must already be set.
-* cls must have cxx constructors.
-* Returns the object, or nil.
-**********************************************************************/
-id
-_objc_constructOrFree(id bytes, Class cls)
-{
- assert(cls->hasCxxCtor()); // for performance, not correctness
-
- id obj = object_cxxConstructFromClass(bytes, cls);
- if (!obj) free(bytes);
-
- return obj;
-}
-
-
/***********************************************************************
* _class_createInstancesFromZone
* Batch-allocating version of _class_createInstanceFromZone.
for (unsigned i = 0; i < num_allocated; i++) {
id obj = results[i];
obj->initIsa(cls); // fixme allow nonpointer
- if (ctor) obj = _objc_constructOrFree(obj, cls);
-
+ if (ctor) {
+ obj = object_cxxConstructFromClass(obj, cls,
+ OBJECT_CONSTRUCT_FREE_ONFAILURE);
+ }
if (obj) {
results[i-shift] = obj;
} else {
#if DEBUG
// debug build: sanitize input
for (i = 0; i < count; i++) {
- assert(attrs[i].name);
- assert(strlen(attrs[i].name) > 0);
- assert(! strchr(attrs[i].name, ','));
- assert(! strchr(attrs[i].name, '"'));
- if (attrs[i].value) assert(! strchr(attrs[i].value, ','));
+ ASSERT(attrs[i].name);
+ ASSERT(strlen(attrs[i].name) > 0);
+ ASSERT(! strchr(attrs[i].name, ','));
+ ASSERT(! strchr(attrs[i].name, '"'));
+ if (attrs[i].value) ASSERT(! strchr(attrs[i].value, ','));
}
#endif
const char *nameStart;
const char *nameEnd;
- assert(start < end);
- assert(*start);
+ ASSERT(start < end);
+ ASSERT(*start);
if (*start != '\"') {
// single-char short name
nameStart = start;
const char *valueStart;
const char *valueEnd;
- assert(start <= end);
+ ASSERT(start <= end);
valueStart = start;
valueEnd = end;
attrcount = iteratePropertyAttributes(attrs, copyOneAttribute, &ra, &rs);
- assert((uint8_t *)(ra+1) <= (uint8_t *)result+size);
- assert((uint8_t *)rs <= (uint8_t *)result+size);
+ ASSERT((uint8_t *)(ra+1) <= (uint8_t *)result+size);
+ ASSERT((uint8_t *)rs <= (uint8_t *)result+size);
if (attrcount == 0) {
free(result);
#endif
// Define SUPPORT_PREOPT=1 to enable dyld shared cache optimizations
-#if TARGET_OS_WIN32 || TARGET_OS_SIMULATOR
+#if TARGET_OS_WIN32
# define SUPPORT_PREOPT 0
#else
# define SUPPORT_PREOPT 1
# define SUPPORT_MESSAGE_LOGGING 1
#endif
+// Define HAVE_TASK_RESTARTABLE_RANGES to enable usage of
+// task_restartable_ranges_synchronize()
+#if TARGET_OS_SIMULATOR || defined(__i386__) || defined(__arm__) || !TARGET_OS_MAC
+# define HAVE_TASK_RESTARTABLE_RANGES 0
+#else
+# define HAVE_TASK_RESTARTABLE_RANGES 1
+#endif
+
// OBJC_INSTRUMENTED controls whether message dispatching is dynamically
// monitored. Monitoring introduces substantial overhead.
// NOTE: To define this condition, do so in the build command, NOT by
// because objc-class.h is public and objc-config.h is not.
//#define OBJC_INSTRUMENTED
+// In __OBJC2__, the runtimeLock is a mutex always held
+// hence the cache lock is redundant and can be elided.
+//
+// If the runtime lock ever becomes a rwlock again,
+// the cache lock would need to be used again
+#if __OBJC2__
+#define CONFIG_USE_CACHE_LOCK 0
+#else
+#define CONFIG_USE_CACHE_LOCK 1
+#endif
+
+// Determine how the method cache stores IMPs.
+#define CACHE_IMP_ENCODING_NONE 1 // Method cache contains raw IMP.
+#define CACHE_IMP_ENCODING_ISA_XOR 2 // Method cache contains ISA ^ IMP.
+#define CACHE_IMP_ENCODING_PTRAUTH 3 // Method cache contains ptrauth'd IMP.
+
+#if __PTRAUTH_INTRINSICS__
+// Always use ptrauth when it's supported.
+#define CACHE_IMP_ENCODING CACHE_IMP_ENCODING_PTRAUTH
+#elif defined(__arm__)
+// 32-bit ARM uses no encoding.
+#define CACHE_IMP_ENCODING CACHE_IMP_ENCODING_NONE
+#else
+// Everything else uses ISA ^ IMP.
+#define CACHE_IMP_ENCODING CACHE_IMP_ENCODING_ISA_XOR
+#endif
+
+#define CACHE_MASK_STORAGE_OUTLINED 1
+#define CACHE_MASK_STORAGE_HIGH_16 2
+#define CACHE_MASK_STORAGE_LOW_4 3
+
+#if defined(__arm64__) && __LP64__
+#define CACHE_MASK_STORAGE CACHE_MASK_STORAGE_HIGH_16
+#elif defined(__arm64__) && !__LP64__
+#define CACHE_MASK_STORAGE CACHE_MASK_STORAGE_LOW_4
+#else
+#define CACHE_MASK_STORAGE CACHE_MASK_STORAGE_OUTLINED
+#endif
+
#endif
OPTION( PrintReplacedMethods, OBJC_PRINT_REPLACED_METHODS, "log methods replaced by category implementations")
OPTION( PrintDeprecation, OBJC_PRINT_DEPRECATION_WARNINGS, "warn about calls to deprecated runtime functions")
OPTION( PrintPoolHiwat, OBJC_PRINT_POOL_HIGHWATER, "log high-water marks for autorelease pools")
-OPTION( PrintCustomRR, OBJC_PRINT_CUSTOM_RR, "log classes with un-optimized custom retain/release methods")
-OPTION( PrintCustomAWZ, OBJC_PRINT_CUSTOM_AWZ, "log classes with un-optimized custom allocWithZone methods")
+OPTION( PrintCustomCore, OBJC_PRINT_CUSTOM_CORE, "log classes with custom core methods")
+OPTION( PrintCustomRR, OBJC_PRINT_CUSTOM_RR, "log classes with custom retain/release methods")
+OPTION( PrintCustomAWZ, OBJC_PRINT_CUSTOM_AWZ, "log classes with custom allocWithZone methods")
OPTION( PrintRawIsa, OBJC_PRINT_RAW_ISA, "log classes that require raw pointer isa fields")
OPTION( DebugUnload, OBJC_DEBUG_UNLOAD, "warn about poorly-behaving bundles when unloaded")
#if !__OBJC2__
// used by ExceptionHandling.framework
#endif
-__attribute__((noreturn))
+__attribute__((noreturn, cold))
void _objc_error(id self, const char *fmt, va_list ap)
{
char *buf;
va_end(vp);
}
-static __attribute__((noreturn))
+static __attribute__((noreturn, cold))
void _objc_fatalv(uint64_t reason, uint64_t flags, const char *fmt, va_list ap)
{
char *buf1;
_Exit(1);
}
else {
+ _objc_crashlog(buf1);
abort_with_reason(OS_REASON_OBJC, reason, buf1, flags);
}
}
OBJC_EXPORT void
objc_exception_throw(id _Nonnull exception)
- __OSX_AVAILABLE(10.3)
- __IOS_UNAVAILABLE __TVOS_UNAVAILABLE
- __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_AVAILABLE_OTHERS_UNAVAILABLE(10.3);
OBJC_EXPORT void
objc_exception_try_enter(void * _Nonnull localExceptionData)
- __OSX_AVAILABLE(10.3)
- __IOS_UNAVAILABLE __TVOS_UNAVAILABLE
- __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_AVAILABLE_OTHERS_UNAVAILABLE(10.3);
OBJC_EXPORT void
objc_exception_try_exit(void * _Nonnull localExceptionData)
- __OSX_AVAILABLE(10.3)
- __IOS_UNAVAILABLE __TVOS_UNAVAILABLE
- __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_AVAILABLE_OTHERS_UNAVAILABLE(10.3);
OBJC_EXPORT id _Nonnull
objc_exception_extract(void * _Nonnull localExceptionData)
- __OSX_AVAILABLE(10.3)
- __IOS_UNAVAILABLE __TVOS_UNAVAILABLE
- __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_AVAILABLE_OTHERS_UNAVAILABLE(10.3);
OBJC_EXPORT int objc_exception_match(Class _Nonnull exceptionClass,
id _Nonnull exception)
- __OSX_AVAILABLE(10.3)
- __IOS_UNAVAILABLE __TVOS_UNAVAILABLE
- __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_AVAILABLE_OTHERS_UNAVAILABLE(10.3);
typedef struct {
// get table; version tells how many
OBJC_EXPORT void
objc_exception_get_functions(objc_exception_functions_t * _Nullable table)
- __OSX_AVAILABLE(10.3)
- __IOS_UNAVAILABLE __TVOS_UNAVAILABLE
- __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_AVAILABLE_OTHERS_UNAVAILABLE(10.3);
// set table
OBJC_EXPORT void
objc_exception_set_functions(objc_exception_functions_t * _Nullable table)
- __OSX_AVAILABLE(10.3)
- __IOS_UNAVAILABLE __TVOS_UNAVAILABLE
- __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_AVAILABLE_OTHERS_UNAVAILABLE(10.3);
// !__OBJC2__
OBJC_EXPORT uintptr_t
objc_addExceptionHandler(objc_exception_handler _Nonnull fn,
void * _Nullable context)
- __OSX_AVAILABLE(10.5)
- __IOS_UNAVAILABLE __TVOS_UNAVAILABLE
- __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_AVAILABLE_OTHERS_UNAVAILABLE(10.5);
OBJC_EXPORT void
objc_removeExceptionHandler(uintptr_t token)
- __OSX_AVAILABLE(10.5)
- __IOS_UNAVAILABLE __TVOS_UNAVAILABLE
- __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_AVAILABLE_OTHERS_UNAVAILABLE(10.5);
// __OBJC2__
#endif
static ThreadChainLink_t ThreadChainLink;
static ThreadChainLink_t *getChainLink() {
- // follow links until thread_self() found (someday) XXX
- objc_thread_t self = thread_self();
+ // follow links until objc_thread_self() found (someday) XXX
+ objc_thread_t self = objc_thread_self();
ThreadChainLink_t *walker = &ThreadChainLink;
while (walker->perThreadID != self) {
if (walker->next != nil) {
struct objc_typeinfo tinfo;
};
+extern "C" {
-extern "C" void _objc_exception_noop(void) { }
-extern "C" bool _objc_exception_false(void) { return 0; }
-// extern "C" bool _objc_exception_true(void) { return 1; }
-extern "C" void _objc_exception_abort1(void) {
- _objc_fatal("unexpected call into objc exception typeinfo vtable %d", 1);
-}
-extern "C" void _objc_exception_abort2(void) {
- _objc_fatal("unexpected call into objc exception typeinfo vtable %d", 2);
-}
-extern "C" void _objc_exception_abort3(void) {
- _objc_fatal("unexpected call into objc exception typeinfo vtable %d", 3);
-}
-extern "C" void _objc_exception_abort4(void) {
- _objc_fatal("unexpected call into objc exception typeinfo vtable %d", 4);
-}
-
-extern "C" bool _objc_exception_do_catch(struct objc_typeinfo *catch_tinfo,
- struct objc_typeinfo *throw_tinfo,
- void **throw_obj_p,
- unsigned outer);
+__attribute__((used))
+void _objc_exception_noop(void) { }
+__attribute__((used))
+bool _objc_exception_false(void) { return 0; }
+// bool _objc_exception_true(void) { return 1; }
+__attribute__((used))
+void _objc_exception_abort1(void) {
+ _objc_fatal("unexpected call into objc exception typeinfo vtable %d", 1);
+}
+__attribute__((used))
+void _objc_exception_abort2(void) {
+ _objc_fatal("unexpected call into objc exception typeinfo vtable %d", 2);
+}
+__attribute__((used))
+void _objc_exception_abort3(void) {
+ _objc_fatal("unexpected call into objc exception typeinfo vtable %d", 3);
+}
+__attribute__((used))
+void _objc_exception_abort4(void) {
+ _objc_fatal("unexpected call into objc exception typeinfo vtable %d", 4);
+}
+__attribute__((used))
+bool _objc_exception_do_catch(struct objc_typeinfo *catch_tinfo,
+ struct objc_typeinfo *throw_tinfo,
+ void **throw_obj_p,
+ unsigned outer);
+}
// C++ pointers to vtables are signed with no extra data.
// C++ vtable entries are signed with a number derived from the function name.
bzero(data->debug, sizeof(*data->debug));
}
- pthread_getname_np(pthread_self(), data->debug->thread, THREADNAME_COUNT);
- strlcpy(data->debug->queue,
- dispatch_queue_get_label(dispatch_get_current_queue()),
+ pthread_getname_np(objc_thread_self(), data->debug->thread, THREADNAME_COUNT);
+ strlcpy(data->debug->queue,
+ dispatch_queue_get_label(DISPATCH_CURRENT_QUEUE_LABEL),
THREADNAME_COUNT);
data->debug->backtraceSize =
backtrace(data->debug->backtrace, BACKTRACE_COUNT);
// classref_t is not fixed up at launch; use remapClass() to convert
+// classlist and catlist and protolist sections are marked const here
+// because those sections may be in read-only __DATA_CONST segments.
+
extern SEL *_getObjc2SelectorRefs(const header_info *hi, size_t *count);
extern message_ref_t *_getObjc2MessageRefs(const header_info *hi, size_t *count);
extern Class*_getObjc2ClassRefs(const header_info *hi, size_t *count);
extern Class*_getObjc2SuperRefs(const header_info *hi, size_t *count);
-extern classref_t *_getObjc2ClassList(const header_info *hi, size_t *count);
-extern classref_t *_getObjc2NonlazyClassList(const header_info *hi, size_t *count);
-extern category_t **_getObjc2CategoryList(const header_info *hi, size_t *count);
-extern category_t **_getObjc2NonlazyCategoryList(const header_info *hi, size_t *count);
-extern protocol_t **_getObjc2ProtocolList(const header_info *hi, size_t *count);
+extern classref_t const *_getObjc2ClassList(const header_info *hi, size_t *count);
+extern classref_t const *_getObjc2NonlazyClassList(const header_info *hi, size_t *count);
+extern category_t * const *_getObjc2CategoryList(const header_info *hi, size_t *count);
+extern category_t * const *_getObjc2CategoryList2(const header_info *hi, size_t *count);
+extern category_t * const *_getObjc2NonlazyCategoryList(const header_info *hi, size_t *count);
+extern protocol_t * const *_getObjc2ProtocolList(const header_info *hi, size_t *count);
extern protocol_t **_getObjc2ProtocolRefs(const header_info *hi, size_t *count);
// FIXME: rdar://29241917&33734254 clang doesn't sign static initializers.
extern UnsignedInitializer *getLibobjcInitializers(const header_info *hi, size_t *count);
-extern classref_t *_getObjc2NonlazyClassList(const headerType *mhdr, size_t *count);
-extern category_t **_getObjc2NonlazyCategoryList(const headerType *mhdr, size_t *count);
+extern classref_t const *_getObjc2NonlazyClassList(const headerType *mhdr, size_t *count);
+extern category_t * const *_getObjc2NonlazyCategoryList(const headerType *mhdr, size_t *count);
extern UnsignedInitializer *getLibobjcInitializers(const headerType *mhdr, size_t *count);
static inline void
GETSECT(_getObjc2MessageRefs, message_ref_t, "__objc_msgrefs");
GETSECT(_getObjc2ClassRefs, Class, "__objc_classrefs");
GETSECT(_getObjc2SuperRefs, Class, "__objc_superrefs");
-GETSECT(_getObjc2ClassList, classref_t, "__objc_classlist");
-GETSECT(_getObjc2NonlazyClassList, classref_t, "__objc_nlclslist");
-GETSECT(_getObjc2CategoryList, category_t *, "__objc_catlist");
-GETSECT(_getObjc2NonlazyCategoryList, category_t *, "__objc_nlcatlist");
-GETSECT(_getObjc2ProtocolList, protocol_t *, "__objc_protolist");
+GETSECT(_getObjc2ClassList, classref_t const, "__objc_classlist");
+GETSECT(_getObjc2NonlazyClassList, classref_t const, "__objc_nlclslist");
+GETSECT(_getObjc2CategoryList, category_t * const, "__objc_catlist");
+GETSECT(_getObjc2CategoryList2, category_t * const, "__objc_catlist2");
+GETSECT(_getObjc2NonlazyCategoryList, category_t * const, "__objc_nlcatlist");
+GETSECT(_getObjc2ProtocolList, protocol_t * const, "__objc_protolist");
GETSECT(_getObjc2ProtocolRefs, protocol_t *, "__objc_protorefs");
GETSECT(getLibobjcInitializers, UnsignedInitializer, "__objc_init_func");
OBJC_EXPORT Class _Nonnull gdb_object_getClass(id _Nullable obj)
OBJC_AVAILABLE(10.7, 4.3, 9.0, 1.0, 2.0);
+/***********************************************************************
+* Class inspection
+**********************************************************************/
+
+#if __OBJC2__
+
+// Return the raw, mangled name of cls.
+OBJC_EXPORT const char * _Nonnull
+objc_debug_class_getNameRaw(Class _Nullable cls)
+OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 5.0);
+
+#endif
/***********************************************************************
* Class lists for heap.
OBJC_EXPORT NXMapTable * _Nullable gdb_objc_realized_classes
OBJC_AVAILABLE(10.6, 3.1, 9.0, 1.0, 2.0);
+// A generation count of realized classes. Increments when new classes
+// are realized. This is NOT an exact count of the number of classes.
+// It's not guaranteed by how much it increases when classes are
+// realized, only that it increases by something. When classes are
+// removed (unloading bundles or destroying dynamically allocated
+// classes) the number will also increase to signal that there has
+// been a change.
+OBJC_EXPORT uintptr_t objc_debug_realized_class_generation_count;
+
#else
// Hashes Classes, for all known classes. Custom prototype.
**********************************************************************/
#if __OBJC2__
OBJC_EXPORT const uintptr_t objc_debug_swift_stable_abi_bit
-OBJC_AVAILABLE(10.14, 12.0, 12.0, 5.0, 3.0);
+OBJC_AVAILABLE(10.14.4, 12.2, 12.2, 5.2, 3.2);
#endif
+
+
+/***********************************************************************
+* AutoreleasePoolPage
+**********************************************************************/
+OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_magic_offset OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 5.0);
+OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_next_offset OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 5.0);
+OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_thread_offset OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 5.0);
+OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_parent_offset OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 5.0);
+OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_child_offset OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 5.0);
+OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_depth_offset OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 5.0);
+OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_hiwat_offset OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 5.0);
+
__END_DECLS
// APPLE_API_PRIVATE
#include "objc-private.h"
#include "message.h"
#include "objc-initialize.h"
+#include "DenseMapExtras.h"
/* classInitLock protects CLS_INITIALIZED and CLS_INITIALIZING, and
* is signalled when any class is done initializing.
monitor_t classInitLock;
+struct _objc_willInitializeClassCallback {
+ _objc_func_willInitializeClass f;
+ void *context;
+};
+static GlobalSmallVector<_objc_willInitializeClassCallback, 1> willInitializeFuncs;
+
+
/***********************************************************************
* struct _objc_initializing_classes
* Per-thread list of classes currently being initialized by that thread.
typedef struct PendingInitialize {
Class subclass;
struct PendingInitialize *next;
+
+ PendingInitialize(Class cls) : subclass(cls), next(nullptr) { }
} PendingInitialize;
-static NXMapTable *pendingInitializeMap;
+typedef objc::DenseMap<Class, PendingInitialize *> PendingInitializeMap;
+static PendingInitializeMap *pendingInitializeMap;
/***********************************************************************
* _finishInitializing
PendingInitialize *pending;
classInitLock.assertLocked();
- assert(!supercls || supercls->isInitialized());
+ ASSERT(!supercls || supercls->isInitialized());
if (PrintInitializing) {
_objc_inform("INITIALIZE: thread %p: %s is fully +initialized",
- pthread_self(), cls->nameForLogging());
+ objc_thread_self(), cls->nameForLogging());
}
// mark this class as fully +initialized
// mark any subclasses that were merely waiting for this class
if (!pendingInitializeMap) return;
- pending = (PendingInitialize *)NXMapGet(pendingInitializeMap, cls);
- if (!pending) return;
- NXMapRemove(pendingInitializeMap, cls);
-
+ auto it = pendingInitializeMap->find(cls);
+ if (it == pendingInitializeMap->end()) return;
+
+ pending = it->second;
+ pendingInitializeMap->erase(it);
+
// Destroy the pending table if it's now empty, to save memory.
- if (NXCountMapTable(pendingInitializeMap) == 0) {
- NXFreeMapTable(pendingInitializeMap);
+ if (pendingInitializeMap->size() == 0) {
+ delete pendingInitializeMap;
pendingInitializeMap = nil;
}
while (pending) {
PendingInitialize *next = pending->next;
if (pending->subclass) _finishInitializing(pending->subclass, cls);
- free(pending);
+ delete pending;
pending = next;
}
}
**********************************************************************/
static void _finishInitializingAfter(Class cls, Class supercls)
{
- PendingInitialize *pending;
classInitLock.assertLocked();
if (PrintInitializing) {
_objc_inform("INITIALIZE: thread %p: class %s will be marked as fully "
"+initialized after superclass +[%s initialize] completes",
- pthread_self(), cls->nameForLogging(),
+ objc_thread_self(), cls->nameForLogging(),
supercls->nameForLogging());
}
if (!pendingInitializeMap) {
- pendingInitializeMap =
- NXCreateMapTable(NXPtrValueMapPrototype, 10);
+ pendingInitializeMap = new PendingInitializeMap{10};
// fixme pre-size this table for CF/NSObject +initialize
}
- pending = (PendingInitialize *)malloc(sizeof(*pending));
- pending->subclass = cls;
- pending->next = (PendingInitialize *)
- NXMapGet(pendingInitializeMap, supercls);
- NXMapInsert(pendingInitializeMap, supercls, pending);
+ PendingInitialize *pending = new PendingInitialize{cls};
+ auto result = pendingInitializeMap->try_emplace(supercls, pending);
+ if (!result.second) {
+ pending->next = result.first->second;
+ result.first->second = pending;
+ }
}
{
if (PrintInitializing) {
_objc_inform("INITIALIZE: thread %p: blocking until +[%s initialize] "
- "completes", pthread_self(), cls->nameForLogging());
+ "completes", objc_thread_self(), cls->nameForLogging());
}
monitor_locker_t lock(classInitLock);
void callInitialize(Class cls)
{
- ((void(*)(Class, SEL))objc_msgSend)(cls, SEL_initialize);
+ ((void(*)(Class, SEL))objc_msgSend)(cls, @selector(initialize));
asm("");
}
Class rootCls = cls->ISA()->ISA()->superclass;
- IMP rootImp = lookUpImpOrNil(rootCls->ISA(), SEL_initialize, rootCls,
- NO/*initialize*/, YES/*cache*/, NO/*resolver*/);
- IMP imp = lookUpImpOrNil(cls->ISA(), SEL_initialize, cls,
- NO/*initialize*/, YES/*cache*/, NO/*resolver*/);
+ IMP rootImp = lookUpImpOrNil(rootCls, @selector(initialize), rootCls->ISA());
+ IMP imp = lookUpImpOrNil(cls, @selector(initialize), cls->ISA());
return (imp == nil || imp == (IMP)&objc_noop_imp || imp == rootImp);
}
if (PrintInitializing) {
_objc_inform("INITIALIZE: thread %p: skipping trivial +[%s "
"initialize] in fork() child process",
- pthread_self(), cls->nameForLogging());
+ objc_thread_self(), cls->nameForLogging());
}
lockAndFinishInitializing(cls, supercls);
}
_objc_inform("INITIALIZE: thread %p: refusing to call +[%s "
"initialize] in fork() child process because "
"it may have been in progress when fork() was called",
- pthread_self(), cls->nameForLogging());
+ objc_thread_self(), cls->nameForLogging());
}
_objc_inform_now_and_on_crash
("+[%s initialize] may have been in progress in another thread "
**********************************************************************/
void initializeNonMetaClass(Class cls)
{
- assert(!cls->isMetaClass());
+ ASSERT(!cls->isMetaClass());
Class supercls;
bool reallyInitialize = NO;
}
// Try to atomically set CLS_INITIALIZING.
+ SmallVector<_objc_willInitializeClassCallback, 1> localWillInitializeFuncs;
{
monitor_locker_t lock(classInitLock);
if (!cls->isInitialized() && !cls->isInitializing()) {
cls->setInitializing();
reallyInitialize = YES;
+
+ // Grab a copy of the will-initialize funcs with the lock held.
+ localWillInitializeFuncs.initFrom(willInitializeFuncs);
}
}
return;
}
+ for (auto callback : localWillInitializeFuncs)
+ callback.f(callback.context, cls);
+
// Send the +initialize message.
// Note that +initialize is sent to the superclass (again) if
// this class doesn't implement +initialize. 2157218
if (PrintInitializing) {
_objc_inform("INITIALIZE: thread %p: calling +[%s initialize]",
- pthread_self(), cls->nameForLogging());
+ objc_thread_self(), cls->nameForLogging());
}
// Exceptions: A +initialize call that throws an exception
if (PrintInitializing) {
_objc_inform("INITIALIZE: thread %p: finished +[%s initialize]",
- pthread_self(), cls->nameForLogging());
+ objc_thread_self(), cls->nameForLogging());
}
}
#if __OBJC2__
if (PrintInitializing) {
_objc_inform("INITIALIZE: thread %p: +[%s initialize] "
"threw an exception",
- pthread_self(), cls->nameForLogging());
+ objc_thread_self(), cls->nameForLogging());
}
@throw;
}
_objc_fatal("thread-safe class init in objc runtime is buggy!");
}
}
+
+void _objc_addWillInitializeClassFunc(_objc_func_willInitializeClass _Nonnull func, void * _Nullable context) {
+#if __OBJC2__
+ unsigned count;
+ Class *realizedClasses;
+
+ // Fetch all currently initialized classes. Do this with classInitLock held
+ // so we don't race with setting those flags.
+ {
+ monitor_locker_t initLock(classInitLock);
+ realizedClasses = objc_copyRealizedClassList(&count);
+ for (unsigned i = 0; i < count; i++) {
+ // Remove uninitialized classes from the array.
+ if (!realizedClasses[i]->isInitializing() && !realizedClasses[i]->isInitialized())
+ realizedClasses[i] = Nil;
+ }
+
+ willInitializeFuncs.append({func, context});
+ }
+
+ // Invoke the callback for all realized classes that weren't cleared out.
+ for (unsigned i = 0; i < count; i++) {
+ if (Class cls = realizedClasses[i]) {
+ func(context, cls);
+ }
+ }
+
+ free(realizedClasses);
+#endif
+}
// Return YES if GC is on and `object` is a GC allocation.
OBJC_EXPORT BOOL
objc_isAuto(id _Nullable object)
- __OSX_DEPRECATED(10.4, 10.8, "it always returns NO")
- __IOS_UNAVAILABLE __TVOS_UNAVAILABLE
- __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.4, 10.8, "it always returns NO");
// GC debugging
OBJC_EXPORT BOOL
objc_dumpHeap(char * _Nonnull filename, unsigned long length)
- __OSX_DEPRECATED(10.4, 10.8, "it always returns NO")
- __IOS_UNAVAILABLE __TVOS_UNAVAILABLE
- __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.4, 10.8, "it always returns NO");
// GC startup callback from Foundation
OBJC_EXPORT malloc_zone_t * _Nullable
objc_collect_init(int (* _Nonnull callback)(void))
- __OSX_DEPRECATED(10.4, 10.8, "it does nothing")
- __IOS_UNAVAILABLE __TVOS_UNAVAILABLE
- __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.4, 10.8, "it does nothing");
+
+#if __OBJC2__
+// Copies the list of currently realized classes
+// intended for introspection only
+// most users will want objc_copyClassList instead.
+OBJC_EXPORT
+Class _Nonnull * _Nullable
+objc_copyRealizedClassList(unsigned int *_Nullable outCount)
+ OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 5.0);
+
+typedef struct objc_imp_cache_entry {
+ SEL _Nonnull sel;
+ IMP _Nonnull imp;
+} objc_imp_cache_entry;
+
+OBJC_EXPORT
+objc_imp_cache_entry *_Nullable
+class_copyImpCache(Class _Nonnull cls, int * _Nullable outCount)
+ OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 5.0);
+#endif
// Plainly-implemented GC barriers. Rosetta used to use these.
OBJC_EXPORT id _Nullable
objc_appRequiresGC(int fd)
__OSX_AVAILABLE(10.11)
__IOS_UNAVAILABLE __TVOS_UNAVAILABLE
- __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ __WATCHOS_UNAVAILABLE
+#ifndef __APPLE_BLEACH_SDK__
+ __BRIDGEOS_UNAVAILABLE
+#endif
+;
// Install missing-class callback. Used by the late unlamented ZeroLink.
OBJC_EXPORT void
OBJC2_UNAVAILABLE;
#if !(TARGET_OS_OSX && !TARGET_OS_IOSMAC && __i386__)
+// Add a class copy fixup handler. The name is a misnomer, as
+// multiple calls will install multiple handlers. Older versions
+// of the Swift runtime call it by name, and it's only used by Swift
+// so it's not worth deprecating this name in favor of a better one.
OBJC_EXPORT void
_objc_setClassCopyFixupHandler(void (* _Nonnull newFixupHandler)
- (Class _Nonnull oldClass, Class _Nonnull newClass));
-// fixme work around bug in Swift
-// OBJC_AVAILABLE(10.14, 12.0, 12.0, 5.0, 3.0)
+ (Class _Nonnull oldClass, Class _Nonnull newClass))
+ OBJC_AVAILABLE(10.14, 12.0, 12.0, 5.0, 3.0);
#endif
// Install handler for allocation failures.
#if !__OBJC2__
OBJC_EXPORT void
_objc_error(id _Nullable rcv, const char * _Nonnull fmt, va_list args)
- __attribute__((noreturn))
- __OSX_DEPRECATED(10.0, 10.5, "use other logging facilities instead")
- __IOS_UNAVAILABLE __TVOS_UNAVAILABLE
- __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE;
+ __attribute__((noreturn, cold))
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.0, 10.5, "use other logging facilities instead");
#endif
OBJC_TAG_XPC_2 = 13,
OBJC_TAG_XPC_3 = 14,
OBJC_TAG_XPC_4 = 15,
+ OBJC_TAG_NSColor = 16,
+ OBJC_TAG_UIColor = 17,
+ OBJC_TAG_CGColor = 18,
+ OBJC_TAG_NSIndexSet = 19,
OBJC_TAG_First60BitPayload = 0,
OBJC_TAG_Last60BitPayload = 6,
// PAYLOAD_LSHIFT and PAYLOAD_RSHIFT are the payload extraction shifts.
// They are reversed here for payload insertion.
- // assert(_objc_taggedPointersEnabled());
+ // ASSERT(_objc_taggedPointersEnabled());
if (tag <= OBJC_TAG_Last60BitPayload) {
- // assert(((value << _OBJC_TAG_PAYLOAD_RSHIFT) >> _OBJC_TAG_PAYLOAD_LSHIFT) == value);
+ // ASSERT(((value << _OBJC_TAG_PAYLOAD_RSHIFT) >> _OBJC_TAG_PAYLOAD_LSHIFT) == value);
uintptr_t result =
(_OBJC_TAG_MASK |
((uintptr_t)tag << _OBJC_TAG_INDEX_SHIFT) |
((value << _OBJC_TAG_PAYLOAD_RSHIFT) >> _OBJC_TAG_PAYLOAD_LSHIFT));
return _objc_encodeTaggedPointer(result);
} else {
- // assert(tag >= OBJC_TAG_First52BitPayload);
- // assert(tag <= OBJC_TAG_Last52BitPayload);
- // assert(((value << _OBJC_TAG_EXT_PAYLOAD_RSHIFT) >> _OBJC_TAG_EXT_PAYLOAD_LSHIFT) == value);
+ // ASSERT(tag >= OBJC_TAG_First52BitPayload);
+ // ASSERT(tag <= OBJC_TAG_Last52BitPayload);
+ // ASSERT(((value << _OBJC_TAG_EXT_PAYLOAD_RSHIFT) >> _OBJC_TAG_EXT_PAYLOAD_LSHIFT) == value);
uintptr_t result =
(_OBJC_TAG_EXT_MASK |
((uintptr_t)(tag - OBJC_TAG_First52BitPayload) << _OBJC_TAG_EXT_INDEX_SHIFT) |
static inline objc_tag_index_t
_objc_getTaggedPointerTag(const void * _Nullable ptr)
{
- // assert(_objc_isTaggedPointer(ptr));
+ // ASSERT(_objc_isTaggedPointer(ptr));
uintptr_t value = _objc_decodeTaggedPointer(ptr);
uintptr_t basicTag = (value >> _OBJC_TAG_INDEX_SHIFT) & _OBJC_TAG_INDEX_MASK;
uintptr_t extTag = (value >> _OBJC_TAG_EXT_INDEX_SHIFT) & _OBJC_TAG_EXT_INDEX_MASK;
static inline uintptr_t
_objc_getTaggedPointerValue(const void * _Nullable ptr)
{
- // assert(_objc_isTaggedPointer(ptr));
+ // ASSERT(_objc_isTaggedPointer(ptr));
uintptr_t value = _objc_decodeTaggedPointer(ptr);
uintptr_t basicTag = (value >> _OBJC_TAG_INDEX_SHIFT) & _OBJC_TAG_INDEX_MASK;
if (basicTag == _OBJC_TAG_INDEX_MASK) {
static inline intptr_t
_objc_getTaggedPointerSignedValue(const void * _Nullable ptr)
{
- // assert(_objc_isTaggedPointer(ptr));
+ // ASSERT(_objc_isTaggedPointer(ptr));
uintptr_t value = _objc_decodeTaggedPointer(ptr);
uintptr_t basicTag = (value >> _OBJC_TAG_INDEX_SHIFT) & _OBJC_TAG_INDEX_MASK;
if (basicTag == _OBJC_TAG_INDEX_MASK) {
OBJC_EXPORT id _Nullable
objc_alloc_init(Class _Nullable cls)
- OBJC_AVAILABLE(10.14, 12.0, 12.0, 5.0, 3.0);
-// rdar://44986431 fixme correct availability for objc_alloc_init()
+ OBJC_AVAILABLE(10.14.4, 12.2, 12.2, 5.2, 3.2);
+
+OBJC_EXPORT id _Nullable
+objc_opt_new(Class _Nullable cls)
+ OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 5.0);
+
+OBJC_EXPORT id _Nullable
+objc_opt_self(id _Nullable obj)
+ OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 5.0);
+
+OBJC_EXPORT Class _Nullable
+objc_opt_class(id _Nullable obj)
+ OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 5.0);
+
+OBJC_EXPORT BOOL
+objc_opt_respondsToSelector(id _Nullable obj, SEL _Nullable sel)
+ OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 5.0);
+
+OBJC_EXPORT BOOL
+objc_opt_isKindOfClass(id _Nullable obj, Class _Nullable cls)
+ OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 5.0);
+
+
+OBJC_EXPORT BOOL
+objc_sync_try_enter(id _Nonnull obj)
+ OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 5.0);
OBJC_EXPORT id _Nullable
objc_retain(id _Nullable obj)
OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0, 2.0);
+/**
+ * Load a classref, which is a chunk of data containing a class
+ * pointer. May perform initialization and rewrite the classref to
+ * point to a new object, if needed. Returns the loaded Class.
+ *
+ * In particular, if the classref points to a stub class (indicated
+ * by setting the bottom bit of the class pointer to 1), then this
+ * will call the stub's initializer and then replace the classref
+ * value with the value returned by the initializer.
+ *
+ * @param clsref The classref to load.
+ * @return The loaded Class pointer.
+ */
+#if __OBJC2__
+OBJC_EXPORT _Nullable Class
+objc_loadClassref(_Nullable Class * _Nonnull clsref)
+ OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 5.0);
+#endif
+
+
// Extra @encode data for XPC, or NULL
OBJC_EXPORT const char * _Nullable
_protocol_getMethodTypeEncoding(Protocol * _Nonnull proto, SEL _Nonnull sel,
OBJC_AVAILABLE(10.8, 6.0, 9.0, 1.0, 2.0);
+/**
+ * Function type for a function that is called when a realized class
+ * is about to be initialized.
+ *
+ * @param context The context pointer the function was registered with.
+ * @param cls The class that's about to be initialized.
+ */
+struct mach_header;
+typedef void (*_objc_func_willInitializeClass)(void * _Nullable context, Class _Nonnull cls);
+
+/**
+ * Add a function to be called when a realized class is about to be
+ * initialized. The class can be queried and manipulated using runtime
+ * functions. Don't message it.
+ *
+ * When adding a new function, that function is immediately called with all
+ * realized classes that are already initialized or are in the process
+ * of initialization.
+ *
+ * @param func The function to add.
+ * @param context A context pointer that will be passed to the function when called.
+ */
+#define OBJC_WILLINITIALIZECLASSFUNC_DEFINED 1
+OBJC_EXPORT void _objc_addWillInitializeClassFunc(_objc_func_willInitializeClass _Nonnull func, void * _Nullable context)
+ OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 4.0);
+
// API to only be called by classes that provide their own reference count storage
OBJC_EXPORT void
realloc(bits->bits, (newAllocated+7) / 8);
bits->bitsAllocated = newAllocated;
}
- assert(bits->bitsAllocated >= bits->bitCount);
- assert(bits->bitsAllocated >= newCount);
+ ASSERT(bits->bitsAllocated >= bits->bitCount);
+ ASSERT(bits->bitsAllocated >= newCount);
}
if (PrintLoading) {
_objc_inform("LOAD: +[%s load]\n", cls->nameForLogging());
}
- (*load_method)(cls, SEL_load);
+ (*load_method)(cls, @selector(load));
}
// Destroy the detached list.
cls->nameForLogging(),
_category_getName(cat));
}
- (*load_method)(cls, SEL_load);
+ (*load_method)(cls, @selector(load));
cats[i].cat = nil;
}
}
extern monitor_t classInitLock;
extern mutex_t selLock;
+#if CONFIG_USE_CACHE_LOCK
extern mutex_t cacheUpdateLock;
+#endif
extern recursive_mutex_t loadMethodLock;
extern mutex_t crashlog_lock;
extern spinlock_t objcMsgLogLock;
ALWAYS_INLINE Class &
classForIndex(uintptr_t index) {
- assert(index > 0);
- assert(index < (uintptr_t)objc_indexed_classes_count);
+ ASSERT(index > 0);
+ ASSERT(index < (uintptr_t)objc_indexed_classes_count);
return objc_indexed_classes[index];
}
inline Class
objc_object::getIsa()
{
- if (!isTaggedPointer()) return ISA();
+ if (fastpath(!isTaggedPointer())) return ISA();
- uintptr_t ptr = (uintptr_t)this;
- if (isExtTaggedPointer()) {
- uintptr_t slot =
- (ptr >> _OBJC_TAG_EXT_SLOT_SHIFT) & _OBJC_TAG_EXT_SLOT_MASK;
- return objc_tag_ext_classes[slot];
- } else {
- uintptr_t slot =
- (ptr >> _OBJC_TAG_SLOT_SHIFT) & _OBJC_TAG_SLOT_MASK;
- return objc_tag_classes[slot];
+ extern objc_class OBJC_CLASS_$___NSUnrecognizedTaggedPointer;
+ uintptr_t slot, ptr = (uintptr_t)this;
+ Class cls;
+
+ slot = (ptr >> _OBJC_TAG_SLOT_SHIFT) & _OBJC_TAG_SLOT_MASK;
+ cls = objc_tag_classes[slot];
+ if (slowpath(cls == (Class)&OBJC_CLASS_$___NSUnrecognizedTaggedPointer)) {
+ slot = (ptr >> _OBJC_TAG_EXT_SLOT_SHIFT) & _OBJC_TAG_EXT_SLOT_MASK;
+ cls = objc_tag_ext_classes[slot];
}
+ return cls;
}
+inline uintptr_t
+objc_object::isaBits() const
+{
+ return isa.bits;
+}
inline bool
objc_object::isTaggedPointer()
return ISA();
}
+inline uintptr_t
+objc_object::isaBits() const
+{
+ return isa.bits;
+}
+
inline bool
objc_object::isTaggedPointer()
inline Class
objc_object::ISA()
{
- assert(!isTaggedPointer());
+ ASSERT(!isTaggedPointer());
#if SUPPORT_INDEXED_ISA
if (isa.nonpointer) {
uintptr_t slot = isa.indexcls;
#endif
}
+inline Class
+objc_object::rawISA()
+{
+ ASSERT(!isTaggedPointer() && !isa.nonpointer);
+ return (Class)isa.bits;
+}
inline bool
objc_object::hasNonpointerIsa()
inline void
objc_object::initInstanceIsa(Class cls, bool hasCxxDtor)
{
- assert(!cls->instancesRequireRawIsa());
- assert(hasCxxDtor == cls->hasCxxDtor());
+ ASSERT(!cls->instancesRequireRawIsa());
+ ASSERT(hasCxxDtor == cls->hasCxxDtor());
initIsa(cls, true, hasCxxDtor);
}
inline void
objc_object::initIsa(Class cls, bool nonpointer, bool hasCxxDtor)
{
- assert(!isTaggedPointer());
+ ASSERT(!isTaggedPointer());
if (!nonpointer) {
- isa.cls = cls;
+ isa = isa_t((uintptr_t)cls);
} else {
- assert(!DisableNonpointerIsa);
- assert(!cls->instancesRequireRawIsa());
+ ASSERT(!DisableNonpointerIsa);
+ ASSERT(!cls->instancesRequireRawIsa());
isa_t newisa(0);
#if SUPPORT_INDEXED_ISA
- assert(cls->classArrayIndex() > 0);
+ ASSERT(cls->classArrayIndex() > 0);
newisa.bits = ISA_INDEX_MAGIC_VALUE;
// isa.magic is part of ISA_MAGIC_VALUE
// isa.nonpointer is part of ISA_MAGIC_VALUE
// assert(newCls->isFuture() ||
// newCls->isInitializing() || newCls->isInitialized());
- assert(!isTaggedPointer());
+ ASSERT(!isTaggedPointer());
isa_t oldisa;
isa_t newisa;
// isa.magic is part of ISA_MAGIC_VALUE
// isa.nonpointer is part of ISA_MAGIC_VALUE
newisa.has_cxx_dtor = newCls->hasCxxDtor();
- assert(newCls->classArrayIndex() > 0);
+ ASSERT(newCls->classArrayIndex() > 0);
newisa.indexcls = (uintptr_t)newCls->classArrayIndex();
#else
if (oldisa.bits == 0) newisa.bits = ISA_MAGIC_VALUE;
inline bool
objc_object::isWeaklyReferenced()
{
- assert(!isTaggedPointer());
+ ASSERT(!isTaggedPointer());
if (isa.nonpointer) return isa.weakly_referenced;
else return sidetable_isWeaklyReferenced();
}
inline bool
objc_object::hasCxxDtor()
{
- assert(!isTaggedPointer());
+ ASSERT(!isTaggedPointer());
if (isa.nonpointer) return isa.has_cxx_dtor;
else return isa.cls->hasCxxDtor();
}
inline id
objc_object::retain()
{
- assert(!isTaggedPointer());
+ ASSERT(!isTaggedPointer());
if (fastpath(!ISA()->hasCustomRR())) {
return rootRetain();
}
- return ((id(*)(objc_object *, SEL))objc_msgSend)(this, SEL_retain);
+ return ((id(*)(objc_object *, SEL))objc_msgSend)(this, @selector(retain));
}
newisa = oldisa;
if (slowpath(!newisa.nonpointer)) {
ClearExclusive(&isa.bits);
+ if (rawISA()->isMetaClass()) return (id)this;
if (!tryRetain && sideTableLocked) sidetable_unlock();
if (tryRetain) return sidetable_tryRetain() ? (id)this : nil;
else return sidetable_retain();
inline void
objc_object::release()
{
- assert(!isTaggedPointer());
+ ASSERT(!isTaggedPointer());
if (fastpath(!ISA()->hasCustomRR())) {
rootRelease();
return;
}
- ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_release);
+ ((void(*)(objc_object *, SEL))objc_msgSend)(this, @selector(release));
}
newisa = oldisa;
if (slowpath(!newisa.nonpointer)) {
ClearExclusive(&isa.bits);
+ if (rawISA()->isMetaClass()) return false;
if (sideTableLocked) sidetable_unlock();
return sidetable_release(performDealloc);
}
if (slowpath(sideTableLocked)) sidetable_unlock();
- __sync_synchronize();
+ __c11_atomic_thread_fence(__ATOMIC_ACQUIRE);
+
if (performDealloc) {
- ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_dealloc);
+ ((void(*)(objc_object *, SEL))objc_msgSend)(this, @selector(dealloc));
}
return true;
}
inline id
objc_object::autorelease()
{
- if (isTaggedPointer()) return (id)this;
- if (fastpath(!ISA()->hasCustomRR())) return rootAutorelease();
+ ASSERT(!isTaggedPointer());
+ if (fastpath(!ISA()->hasCustomRR())) {
+ return rootAutorelease();
+ }
- return ((id(*)(objc_object *, SEL))objc_msgSend)(this, SEL_autorelease);
+ return ((id(*)(objc_object *, SEL))objc_msgSend)(this, @selector(autorelease));
}
inline Class
objc_object::ISA()
{
- assert(!isTaggedPointer());
+ ASSERT(!isTaggedPointer());
return isa.cls;
}
+inline Class
+objc_object::rawISA()
+{
+ return ISA();
+}
inline bool
objc_object::hasNonpointerIsa()
inline void
objc_object::initIsa(Class cls)
{
- assert(!isTaggedPointer());
+ ASSERT(!isTaggedPointer());
isa = (uintptr_t)cls;
}
// assert(cls->isFuture() ||
// cls->isInitializing() || cls->isInitialized());
- assert(!isTaggedPointer());
+ ASSERT(!isTaggedPointer());
isa_t oldisa, newisa;
newisa.cls = cls;
inline bool
objc_object::isWeaklyReferenced()
{
- assert(!isTaggedPointer());
+ ASSERT(!isTaggedPointer());
return sidetable_isWeaklyReferenced();
}
inline void
objc_object::setWeaklyReferenced_nolock()
{
- assert(!isTaggedPointer());
+ ASSERT(!isTaggedPointer());
sidetable_setWeaklyReferenced_nolock();
}
inline bool
objc_object::hasCxxDtor()
{
- assert(!isTaggedPointer());
+ ASSERT(!isTaggedPointer());
return isa.cls->hasCxxDtor();
}
inline id
objc_object::retain()
{
- assert(!isTaggedPointer());
+ ASSERT(!isTaggedPointer());
if (fastpath(!ISA()->hasCustomRR())) {
return sidetable_retain();
}
- return ((id(*)(objc_object *, SEL))objc_msgSend)(this, SEL_retain);
+ return ((id(*)(objc_object *, SEL))objc_msgSend)(this, @selector(retain));
}
inline void
objc_object::release()
{
- assert(!isTaggedPointer());
+ ASSERT(!isTaggedPointer());
if (fastpath(!ISA()->hasCustomRR())) {
sidetable_release();
return;
}
- ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_release);
+ ((void(*)(objc_object *, SEL))objc_msgSend)(this, @selector(release));
}
if (isTaggedPointer()) return (id)this;
if (fastpath(!ISA()->hasCustomRR())) return rootAutorelease();
- return ((id(*)(objc_object *, SEL))objc_msgSend)(this, SEL_autorelease);
+ return ((id(*)(objc_object *, SEL))objc_msgSend)(this, @selector(autorelease));
}
static ALWAYS_INLINE bool
prepareOptimizedReturn(ReturnDisposition disposition)
{
- assert(getReturnDisposition() == ReturnAtPlus0);
+ ASSERT(getReturnDisposition() == ReturnAtPlus0);
if (callerAcceptsOptimizedReturn(__builtin_return_address(0))) {
if (disposition) setReturnDisposition(disposition);
return false;
}
+bool header_info::hasPreoptimizedSelectors() const
+{
+ return false;
+}
+
+bool header_info::hasPreoptimizedClasses() const
+{
+ return false;
+}
+
+bool header_info::hasPreoptimizedProtocols() const
+{
+ return false;
+}
+
objc_selopt_t *preoptimizedSelectors(void)
{
return nil;
}
+bool sharedCacheSupportsProtocolRoots(void)
+{
+ return false;
+}
+
Protocol *getPreoptimizedProtocol(const char *name)
{
return nil;
return nil;
}
-bool sharedRegionContains(const void *ptr)
-{
- return false;
-}
-
header_info *preoptimizedHinfoForHeader(const headerType *mhdr)
{
return nil;
using objc_opt::objc_stringhash_offset_t;
using objc_opt::objc_protocolopt_t;
+using objc_opt::objc_protocolopt2_t;
using objc_opt::objc_clsopt_t;
using objc_opt::objc_headeropt_ro_t;
using objc_opt::objc_headeropt_rw_t;
// opt is initialized to ~0 to detect incorrect use before preopt_init()
static const objc_opt_t *opt = (objc_opt_t *)~0;
-static uintptr_t shared_cache_start;
-static uintptr_t shared_cache_end;
static bool preoptimized;
extern const objc_opt_t _objc_opt_data; // in __TEXT, __objc_opt_ro
return YES;
}
+bool header_info::hasPreoptimizedSelectors() const
+{
+ // preoptimization disabled for some reason
+ if (!preoptimized) return NO;
+
+ return info()->optimizedByDyld() || info()->optimizedByDyldClosure();
+}
+
+bool header_info::hasPreoptimizedClasses() const
+{
+ // preoptimization disabled for some reason
+ if (!preoptimized) return NO;
+
+ return info()->optimizedByDyld() || info()->optimizedByDyldClosure();
+}
+
+bool header_info::hasPreoptimizedProtocols() const
+{
+ // preoptimization disabled for some reason
+ if (!preoptimized) return NO;
+
+ return info()->optimizedByDyld() || info()->optimizedByDyldClosure();
+}
+
objc_selopt_t *preoptimizedSelectors(void)
{
return opt ? opt->selopt() : nil;
}
+bool sharedCacheSupportsProtocolRoots(void)
+{
+ return (opt != nil) && (opt->protocolopt2() != nil);
+}
-Protocol *getPreoptimizedProtocol(const char *name)
+
+Protocol *getSharedCachePreoptimizedProtocol(const char *name)
{
+ // Look in the new table if we have it
+ if (objc_protocolopt2_t *protocols2 = opt ? opt->protocolopt2() : nil) {
+ // Note, we have to pass the lambda directly here as otherwise we would try
+ // message copy and autorelease.
+ return (Protocol *)protocols2->getProtocol(name, [](const void* hi) -> bool {
+ return ((header_info *)hi)->isLoaded();
+ });
+ }
+
objc_protocolopt_t *protocols = opt ? opt->protocolopt() : nil;
if (!protocols) return nil;
}
+Protocol *getPreoptimizedProtocol(const char *name)
+{
+ // Try table from dyld closure first. It was built to ignore the dupes it
+ // knows will come from the cache, so anything left in here was there when
+ // we launched
+ Protocol *result = nil;
+ // Note, we have to pass the lambda directly here as otherwise we would try
+ // message copy and autorelease.
+ _dyld_for_each_objc_protocol(name, [&result](void* protocolPtr, bool isLoaded, bool* stop) {
+ // Skip images which aren't loaded. This supports the case where dyld
+ // might soft link an image from the main binary so its possibly not
+ // loaded yet.
+ if (!isLoaded)
+ return;
+
+ // Found a loaded image with this class name, so stop the search
+ result = (Protocol *)protocolPtr;
+ *stop = true;
+ });
+ if (result) return result;
+
+ return getSharedCachePreoptimizedProtocol(name);
+}
+
+
unsigned int getPreoptimizedClassUnreasonableCount()
{
objc_clsopt_t *classes = opt ? opt->clsopt() : nil;
objc_clsopt_t *classes = opt ? opt->clsopt() : nil;
if (!classes) return nil;
+ // Try table from dyld closure first. It was built to ignore the dupes it
+ // knows will come from the cache, so anything left in here was there when
+ // we launched
+ Class result = nil;
+ // Note, we have to pass the lambda directly here as otherwise we would try
+ // message copy and autorelease.
+ _dyld_for_each_objc_class(name, [&result](void* classPtr, bool isLoaded, bool* stop) {
+ // Skip images which aren't loaded. This supports the case where dyld
+ // might soft link an image from the main binary so its possibly not
+ // loaded yet.
+ if (!isLoaded)
+ return;
+
+ // Found a loaded image with this class name, so stop the search
+ result = (Class)classPtr;
+ *stop = true;
+ });
+ if (result) return result;
+
void *cls;
void *hi;
uint32_t count = classes->getClassAndHeader(name, cls, hi);
return nil;
}
-/***********************************************************************
-* Return YES if the given pointer lies within the shared cache.
-* If the shared cache is not set up or is not valid,
-**********************************************************************/
-bool sharedRegionContains(const void *ptr)
-{
- uintptr_t address = (uintptr_t)ptr;
- return shared_cache_start <= address && address < shared_cache_end;
-}
-
namespace objc_opt {
struct objc_headeropt_ro_t {
uint32_t count;
header_info *get(const headerType *mhdr)
{
- assert(entsize == sizeof(header_info));
+ ASSERT(entsize == sizeof(header_info));
int32_t start = 0;
int32_t end = count;
hdr->fname(), hdr, hinfoRO, hinfoRW);
}
int32_t index = (int32_t)(hdr - hinfoRO->headers);
- assert(hinfoRW->entsize == sizeof(header_info_rw));
+ ASSERT(hinfoRW->entsize == sizeof(header_info_rw));
return &hinfoRW->headers[index];
}
{
// Get the memory region occupied by the shared cache.
size_t length;
- const void *start = _dyld_get_shared_cache_range(&length);
+ const uintptr_t start = (uintptr_t)_dyld_get_shared_cache_range(&length);
+
if (start) {
- shared_cache_start = (uintptr_t)start;
- shared_cache_end = shared_cache_start + length;
- } else {
- shared_cache_start = shared_cache_end = 0;
+ objc::dataSegmentsRanges.add(start, start + length);
}
// `opt` not set at compile time in order to detect too-early usage
#ifndef _OBJC_OS_H
#define _OBJC_OS_H
+#include <atomic>
#include <TargetConditionals.h>
#include "objc-config.h"
+#include "objc-private.h"
#ifdef __LP64__
# define WORD_SHIFT 3UL
static inline size_t word_align(size_t x) {
return (x + WORD_MASK) & ~WORD_MASK;
}
-
+static inline size_t align16(size_t x) {
+ return (x + size_t(15)) & ~size_t(15);
+}
// Mix-in for classes that must not be copied.
class nocopy_t {
~nocopy_t() = default;
};
+// Version of std::atomic that does not allow implicit conversions
+// to/from the wrapped type, and requires an explicit memory order
+// be passed to load() and store().
+template <typename T>
+struct explicit_atomic : public std::atomic<T> {
+ explicit explicit_atomic(T initial) noexcept : std::atomic<T>(std::move(initial)) {}
+ operator T() const = delete;
+
+ T load(std::memory_order order) const noexcept {
+ return std::atomic<T>::load(order);
+ }
+ void store(T desired, std::memory_order order) noexcept {
+ std::atomic<T>::store(desired, order);
+ }
+
+ // Convert a normal pointer to an atomic pointer. This is a
+ // somewhat dodgy thing to do, but if the atomic type is lock
+ // free and the same size as the non-atomic type, we know the
+ // representations are the same, and the compiler generates good
+ // code.
+ static explicit_atomic<T> *from_pointer(T *ptr) {
+ static_assert(sizeof(explicit_atomic<T> *) == sizeof(T *),
+ "Size of atomic must match size of original");
+ explicit_atomic<T> *atomic = (explicit_atomic<T> *)ptr;
+ ASSERT(atomic->is_lock_free());
+ return atomic;
+ }
+};
#if TARGET_OS_MAC
#define ALWAYS_INLINE inline __attribute__((always_inline))
-#define NEVER_INLINE inline __attribute__((noinline))
+#define NEVER_INLINE __attribute__((noinline))
#define fastpath(x) (__builtin_expect(bool(x), 1))
#define slowpath(x) (__builtin_expect(bool(x), 0))
#include <objc/objc-api.h>
extern void _objc_fatal(const char *fmt, ...)
- __attribute__((noreturn, format (printf, 1, 2)));
+ __attribute__((noreturn, cold, format (printf, 1, 2)));
extern void _objc_fatal_with_reason(uint64_t reason, uint64_t flags,
const char *fmt, ...)
- __attribute__((noreturn, format (printf, 3, 4)));
+ __attribute__((noreturn, cold, format (printf, 3, 4)));
#define INIT_ONCE_PTR(var, create, delete) \
do { \
static __inline int thread_equal(objc_thread_t t1, objc_thread_t t2) {
return t1 == t2;
}
-static __inline objc_thread_t thread_self(void) {
+static __inline objc_thread_t objc_thread_self(void) {
return GetCurrentThreadId();
}
#define RECURSIVE_MUTEX_NOT_LOCKED 1
extern void recursive_mutex_init(recursive_mutex_t *m);
static __inline int _recursive_mutex_lock_nodebug(recursive_mutex_t *m) {
- assert(m->mutex);
+ ASSERT(m->mutex);
return WaitForSingleObject(m->mutex, INFINITE);
}
static __inline bool _recursive_mutex_try_lock_nodebug(recursive_mutex_t *m) {
- assert(m->mutex);
+ ASSERT(m->mutex);
return (WAIT_OBJECT_0 == WaitForSingleObject(m->mutex, 0));
}
static __inline int _recursive_mutex_unlock_nodebug(recursive_mutex_t *m) {
- assert(m->mutex);
+ ASSERT(m->mutex);
return ReleaseMutex(m->mutex) ? 0 : RECURSIVE_MUTEX_NOT_LOCKED;
}
static __inline int thread_equal(objc_thread_t t1, objc_thread_t t2) {
return pthread_equal(t1, t2);
}
-static __inline objc_thread_t thread_self(void) {
- return pthread_self();
-}
-
typedef pthread_key_t tls_key_t;
return k;
}
static inline void *tls_get(tls_key_t k) {
- return pthread_getspecific(k);
+ return pthread_getspecific(k);
}
static inline void tls_set(tls_key_t k, void *value) {
pthread_setspecific(k, value);
#if SUPPORT_DIRECT_THREAD_KEYS
-#if DEBUG
-static bool is_valid_direct_key(tls_key_t k) {
+static inline bool is_valid_direct_key(tls_key_t k) {
return ( k == SYNC_DATA_DIRECT_KEY
|| k == SYNC_COUNT_DIRECT_KEY
|| k == AUTORELEASE_POOL_KEY
+ || k == _PTHREAD_TSD_SLOT_PTHREAD_SELF
# if SUPPORT_RETURN_AUTORELEASE
|| k == RETURN_DISPOSITION_KEY
# endif
);
}
-#endif
-static inline void *tls_get_direct(tls_key_t k)
+static inline void *tls_get_direct(tls_key_t k)
{
- assert(is_valid_direct_key(k));
+ ASSERT(is_valid_direct_key(k));
if (_pthread_has_direct_tsd()) {
return _pthread_getspecific_direct(k);
}
static inline void tls_set_direct(tls_key_t k, void *value)
{
- assert(is_valid_direct_key(k));
+ ASSERT(is_valid_direct_key(k));
if (_pthread_has_direct_tsd()) {
_pthread_setspecific_direct(k, value);
}
}
-// SUPPORT_DIRECT_THREAD_KEYS
-#endif
-
-
-static inline pthread_t pthread_self_direct()
+__attribute__((const))
+static inline pthread_t objc_thread_self()
{
- return (pthread_t)
- _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_SELF);
+ return (pthread_t)tls_get_direct(_PTHREAD_TSD_SLOT_PTHREAD_SELF);
}
-
-static inline mach_port_t mach_thread_self_direct()
+#else
+__attribute__((const))
+static inline pthread_t objc_thread_self()
{
- return (mach_port_t)(uintptr_t)
- _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
+ return pthread_self();
}
+#endif // SUPPORT_DIRECT_THREAD_KEYS
template <bool Debug> class mutex_tt;
void lock() {
lockdebug_mutex_lock(this);
+ // <rdar://problem/50384154>
+ uint32_t opts = OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION | OS_UNFAIR_LOCK_ADAPTIVE_SPIN;
os_unfair_lock_lock_with_options_inline
- (&mLock, OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION);
+ (&mLock, (os_unfair_lock_options_t)opts);
}
void unlock() {
mLock = os_unfair_recursive_lock OS_UNFAIR_RECURSIVE_LOCK_INIT;
}
+ bool tryLock()
+ {
+ if (os_unfair_recursive_lock_trylock(&mLock)) {
+ lockdebug_recursive_mutex_lock(this);
+ return true;
+ }
+ return false;
+ }
+
bool tryUnlock()
{
if (os_unfair_recursive_lock_tryunlock4objc(&mLock)) {
#include "objc-private.h"
#include "objc-loadmethod.h"
+#include "objc-cache.h"
#if TARGET_OS_WIN32
case DLL_PROCESS_ATTACH:
environ_init();
tls_init();
- lock_init();
+ runtime_init();
sel_init(3500); // old selector heuristic
exception_init();
break;
// Verify image_info
size_t info_size = 0;
const objc_image_info *image_info = _getObjcImageInfo(mhdr,&info_size);
- assert(image_info == hi->info());
+ ASSERT(image_info == hi->info());
#endif
}
else
{
// Didn't find an hinfo in the dyld shared cache.
- // Weed out duplicates
- for (hi = FirstHeader; hi; hi = hi->getNext()) {
- if (mhdr == hi->mhdr()) return NULL;
- }
-
// Locate the __OBJC segment
size_t info_size = 0;
unsigned long seg_size;
**********************************************************************/
static bool shouldRejectGCApp(const header_info *hi)
{
- assert(hi->mhdr()->filetype == MH_EXECUTE);
+ ASSERT(hi->mhdr()->filetype == MH_EXECUTE);
if (!hi->info()->supportsGC()) {
// App does not use GC. Don't reject it.
**********************************************************************/
static bool shouldRejectGCImage(const headerType *mhdr)
{
- assert(mhdr->filetype != MH_EXECUTE);
+ ASSERT(mhdr->filetype != MH_EXECUTE);
objc_image_info *image_info;
size_t size;
#endif
+// Swift currently adds 4 callbacks.
+static GlobalSmallVector<objc_func_loadImage, 4> loadImageFuncs;
+
+void objc_addLoadImageFunc(objc_func_loadImage _Nonnull func) {
+ // Not supported on the old runtime. Not that the old runtime is supported anyway.
+#if __OBJC2__
+ mutex_locker_t lock(runtimeLock);
+
+ // Call it with all the existing images first.
+ for (auto header = FirstHeader; header; header = header->getNext()) {
+ func((struct mach_header *)header->mhdr());
+ }
+
+ // Add it to the vector for future loads.
+ loadImageFuncs.append(func);
+#endif
+}
+
+
/***********************************************************************
* map_images_nolock
* Process the given images which are being mapped in by dyld.
}
firstTime = NO;
+
+ // Call image load funcs after everything is set up.
+ for (auto func : loadImageFuncs) {
+ for (uint32_t i = 0; i < mhCount; i++) {
+ func(mhdrs[i]);
+ }
+ }
}
lockdebug_lock_precedes_lock(&impLock, &crashlog_lock);
#endif
lockdebug_lock_precedes_lock(&selLock, &crashlog_lock);
+#if CONFIG_USE_CACHE_LOCK
lockdebug_lock_precedes_lock(&cacheUpdateLock, &crashlog_lock);
+#endif
lockdebug_lock_precedes_lock(&objcMsgLogLock, &crashlog_lock);
lockdebug_lock_precedes_lock(&AltHandlerDebugLock, &crashlog_lock);
lockdebug_lock_precedes_lock(&AssociationsManagerLock, &crashlog_lock);
lockdebug_lock_precedes_lock(&loadMethodLock, &impLock);
#endif
lockdebug_lock_precedes_lock(&loadMethodLock, &selLock);
+#if CONFIG_USE_CACHE_LOCK
lockdebug_lock_precedes_lock(&loadMethodLock, &cacheUpdateLock);
+#endif
lockdebug_lock_precedes_lock(&loadMethodLock, &objcMsgLogLock);
lockdebug_lock_precedes_lock(&loadMethodLock, &AltHandlerDebugLock);
lockdebug_lock_precedes_lock(&loadMethodLock, &AssociationsManagerLock);
#endif
PropertyAndCppObjectAndAssocLocksPrecedeLock(&classInitLock);
PropertyAndCppObjectAndAssocLocksPrecedeLock(&selLock);
+#if CONFIG_USE_CACHE_LOCK
PropertyAndCppObjectAndAssocLocksPrecedeLock(&cacheUpdateLock);
+#endif
PropertyAndCppObjectAndAssocLocksPrecedeLock(&objcMsgLogLock);
PropertyAndCppObjectAndAssocLocksPrecedeLock(&AltHandlerDebugLock);
SideTableLocksPrecedeLock(&classInitLock);
// Some operations may occur inside runtimeLock.
lockdebug_lock_precedes_lock(&runtimeLock, &selLock);
+#if CONFIG_USE_CACHE_LOCK
lockdebug_lock_precedes_lock(&runtimeLock, &cacheUpdateLock);
+#endif
lockdebug_lock_precedes_lock(&runtimeLock, &DemangleCacheLock);
#else
// Runtime operations may occur inside SideTable locks
// Method lookup and fixup.
lockdebug_lock_precedes_lock(&methodListLock, &classLock);
lockdebug_lock_precedes_lock(&methodListLock, &selLock);
+#if CONFIG_USE_CACHE_LOCK
lockdebug_lock_precedes_lock(&methodListLock, &cacheUpdateLock);
+#endif
lockdebug_lock_precedes_lock(&methodListLock, &impLock);
lockdebug_lock_precedes_lock(&classLock, &selLock);
lockdebug_lock_precedes_lock(&classLock, &cacheUpdateLock);
impLock.lock();
#endif
selLock.lock();
+#if CONFIG_USE_CACHE_LOCK
cacheUpdateLock.lock();
+#endif
objcMsgLogLock.lock();
AltHandlerDebugLock.lock();
StructLocks.lockAll();
objcMsgLogLock.unlock();
crashlog_lock.unlock();
loadMethodLock.unlock();
+#if CONFIG_USE_CACHE_LOCK
cacheUpdateLock.unlock();
+#endif
selLock.unlock();
SideTableUnlockAll();
#if __OBJC2__
objcMsgLogLock.forceReset();
crashlog_lock.forceReset();
loadMethodLock.forceReset();
+#if CONFIG_USE_CACHE_LOCK
cacheUpdateLock.forceReset();
+#endif
selLock.forceReset();
SideTableForceResetAll();
#if __OBJC2__
environ_init();
tls_init();
static_init();
- lock_init();
+ runtime_init();
exception_init();
+ cache_init();
+ _imp_implementationWithBlock_init();
_dyld_objc_notify_register(&map_images, load_images, unmap_image);
}
#include <stdint.h>
#include <assert.h>
+// An assert that's disabled for release builds but still ensures the expression compiles.
+#ifdef NDEBUG
+#define ASSERT(x) (void)sizeof(!(x))
+#else
+#define ASSERT(x) assert(x)
+#endif
+
struct objc_class;
struct objc_object;
// ISA() assumes this is NOT a tagged pointer object
Class ISA();
+ // rawISA() assumes this is NOT a tagged pointer object or a non pointer ISA
+ Class rawISA();
+
// getIsa() allows this to be a tagged pointer object
Class getIsa();
+
+ uintptr_t isaBits() const;
// initIsa() should be used to init the isa of new objects only.
// If this object already has an isa, use changeIsa() for correctness.
// Slow paths for inline control
id rootAutorelease2();
- bool overrelease_error();
+ uintptr_t overrelease_error();
#if SUPPORT_NONPOINTER_ISA
// Unified retain count manipulation for nonpointer isa
id rootRetain(bool tryRetain, bool handleOverflow);
bool rootRelease(bool performDealloc, bool handleUnderflow);
id rootRetain_overflow(bool tryRetain);
- bool rootRelease_underflow(bool performDealloc);
+ uintptr_t rootRelease_underflow(bool performDealloc);
void clearDeallocating_slow();
__BEGIN_DECLS
+namespace objc {
+
+struct SafeRanges {
+private:
+ struct Range {
+ uintptr_t start;
+ uintptr_t end;
+
+ inline bool contains(uintptr_t ptr) const {
+ uintptr_t m_start, m_end;
+#if __arm64__
+ // <rdar://problem/48304934> Force the compiler to use ldp
+ // we really don't want 2 loads and 2 jumps.
+ __asm__(
+# if __LP64__
+ "ldp %x[one], %x[two], [%x[src]]"
+# else
+ "ldp %w[one], %w[two], [%x[src]]"
+# endif
+ : [one] "=r" (m_start), [two] "=r" (m_end)
+ : [src] "r" (this)
+ );
+#else
+ m_start = start;
+ m_end = end;
+#endif
+ return m_start <= ptr && ptr < m_end;
+ }
+ };
+
+ struct Range *ranges;
+ uint32_t count;
+ uint32_t size : 31;
+ uint32_t sorted : 1;
+
+public:
+ inline bool contains(uint16_t witness, uintptr_t ptr) const {
+ return witness < count && ranges[witness].contains(ptr);
+ }
+
+ bool find(uintptr_t ptr, uint32_t &pos);
+ void add(uintptr_t start, uintptr_t end);
+ void remove(uintptr_t start, uintptr_t end);
+};
+
+extern struct SafeRanges dataSegmentsRanges;
+
+} // objc
+
struct header_info;
// Split out the rw data from header info. For now put it in a huge array
bool isPreoptimized() const;
+ bool hasPreoptimizedSelectors() const;
+
+ bool hasPreoptimizedClasses() const;
+
+ bool hasPreoptimizedProtocols() const;
+
#if !__OBJC2__
struct old_protocol **proto_refs;
struct objc_module *mod_ptr;
extern header_info *FirstHeader;
extern header_info *LastHeader;
-extern int HeaderCount;
extern void appendHeader(header_info *hi);
extern void removeHeader(header_info *hi);
extern void sel_init(size_t selrefCount);
extern SEL sel_registerNameNoLock(const char *str, bool copy);
-extern SEL SEL_load;
-extern SEL SEL_initialize;
-extern SEL SEL_resolveClassMethod;
-extern SEL SEL_resolveInstanceMethod;
extern SEL SEL_cxx_construct;
extern SEL SEL_cxx_destruct;
-extern SEL SEL_retain;
-extern SEL SEL_release;
-extern SEL SEL_autorelease;
-extern SEL SEL_retainCount;
-extern SEL SEL_alloc;
-extern SEL SEL_allocWithZone;
-extern SEL SEL_dealloc;
-extern SEL SEL_copy;
-extern SEL SEL_new;
-extern SEL SEL_forwardInvocation;
-extern SEL SEL_tryRetain;
-extern SEL SEL_isDeallocating;
-extern SEL SEL_retainWeakReference;
-extern SEL SEL_allowsWeakReference;
/* preoptimization */
extern void preopt_init(void);
extern objc_selopt_t *preoptimizedSelectors(void);
+extern bool sharedCacheSupportsProtocolRoots(void);
extern Protocol *getPreoptimizedProtocol(const char *name);
+extern Protocol *getSharedCachePreoptimizedProtocol(const char *name);
extern unsigned getPreoptimizedClassUnreasonableCount();
extern Class getPreoptimizedClass(const char *name);
extern Class* copyPreoptimizedClasses(const char *name, int *outCount);
-extern bool sharedRegionContains(const void *ptr);
-
extern Class _calloc_class(size_t size);
/* method lookup */
-extern IMP lookUpImpOrNil(Class, SEL, id obj, bool initialize, bool cache, bool resolver);
-extern IMP lookUpImpOrForward(Class, SEL, id obj, bool initialize, bool cache, bool resolver);
+enum {
+ LOOKUP_INITIALIZE = 1,
+ LOOKUP_RESOLVER = 2,
+ LOOKUP_CACHE = 4,
+ LOOKUP_NIL = 8,
+};
+extern IMP lookUpImpOrForward(id obj, SEL, Class cls, int behavior);
+
+static inline IMP
+lookUpImpOrNil(id obj, SEL sel, Class cls, int behavior = 0)
+{
+ return lookUpImpOrForward(obj, sel, cls, behavior | LOOKUP_CACHE | LOOKUP_NIL);
+}
extern IMP lookupMethodInClassAndLoadCache(Class cls, SEL sel);
-extern bool class_respondsToSelector_inst(Class cls, SEL sel, id inst);
+extern BOOL class_respondsToSelector_inst(id inst, SEL sel, Class cls);
extern Class class_initialize(Class cls, id inst);
extern bool objcMsgLogEnabled;
SEL selector);
/* message dispatcher */
-extern IMP _class_lookupMethodAndLoadCache3(id, SEL, Class);
#if !OBJC_OLD_DISPATCH_PROTOTYPES
extern void _objc_msgForward_impcache(void);
#endif
/* errors */
-extern void __objc_error(id, const char *, ...) __attribute__((format (printf, 2, 3), noreturn));
-extern void _objc_inform(const char *fmt, ...) __attribute__((format (printf, 1, 2)));
-extern void _objc_inform_on_crash(const char *fmt, ...) __attribute__((format (printf, 1, 2)));
-extern void _objc_inform_now_and_on_crash(const char *fmt, ...) __attribute__((format (printf, 1, 2)));
-extern void _objc_inform_deprecated(const char *oldname, const char *newname) __attribute__((noinline));
+extern id(*badAllocHandler)(Class);
+extern id _objc_callBadAllocHandler(Class cls) __attribute__((cold, noinline));
+extern void __objc_error(id, const char *, ...) __attribute__((cold, format (printf, 2, 3), noreturn));
+extern void _objc_inform(const char *fmt, ...) __attribute__((cold, format(printf, 1, 2)));
+extern void _objc_inform_on_crash(const char *fmt, ...) __attribute__((cold, format (printf, 1, 2)));
+extern void _objc_inform_now_and_on_crash(const char *fmt, ...) __attribute__((cold, format (printf, 1, 2)));
+extern void _objc_inform_deprecated(const char *oldname, const char *newname) __attribute__((cold, noinline));
extern void inform_duplicate(const char *name, Class oldCls, Class cls);
/* magic */
extern char *copyPropertyAttributeValue(const char *attrs, const char *name);
/* locking */
-extern void lock_init(void);
class monitor_locker_t : nocopy_t {
monitor_t& lock;
#undef OPTION
extern void environ_init(void);
+extern void runtime_init(void);
extern void logReplacedMethod(const char *className, SEL s, bool isMeta, const char *catName, IMP oldImp, IMP newImp);
extern id objc_autoreleaseReturnValue(id obj);
// block trampolines
+extern void _imp_implementationWithBlock_init(void);
extern IMP _imp_implementationWithBlockNoCopy(id block);
// layout.h
extern Ivar _class_getVariable(Class cls, const char *name);
extern unsigned _class_createInstancesFromZone(Class cls, size_t extraBytes, void *zone, id *results, unsigned num_requested);
-extern id _objc_constructOrFree(id bytes, Class cls);
extern const char *_category_getName(Category cat);
extern const char *_category_getClassName(Category cat);
extern Class _category_getClass(Category cat);
extern IMP _category_getLoadMethod(Category cat);
-extern id object_cxxConstructFromClass(id obj, Class cls);
+enum {
+ OBJECT_CONSTRUCT_NONE = 0,
+ OBJECT_CONSTRUCT_FREE_ONFAILURE = 1,
+ OBJECT_CONSTRUCT_CALL_BADALLOC = 2,
+};
+extern id object_cxxConstructFromClass(id obj, Class cls, int flags);
extern void object_cxxDestruct(id obj);
extern void fixupCopiedIvars(id newObject, id oldObject);
// Verify alignment expectations.
uintptr_t base = (uintptr_t)&array[0].value;
uintptr_t delta = (uintptr_t)&array[1].value - base;
- assert(delta % CacheLineSize == 0);
- assert(base % CacheLineSize == 0);
+ ASSERT(delta % CacheLineSize == 0);
+ ASSERT(base % CacheLineSize == 0);
}
#else
constexpr StripedMap() {}
};
+// A small vector for use as a global variable. Only supports appending and
+// iteration. Stores a single element inline, and multiple elements in a heap
+// allocation. There is no attempt to amortize reallocation cost; this is
+// intended to be used in situation where zero or one element is common, two
+// might happen, and three or more is very rare.
+//
+// This does not clean up its allocation, and thus cannot be used as a local
+// variable or member of something with limited lifetime.
+
+template <typename T, unsigned InlineCount>
+class GlobalSmallVector {
+ static_assert(std::is_pod<T>::value, "SmallVector requires POD types");
+
+protected:
+ unsigned count{0};
+ union {
+ T inlineElements[InlineCount];
+ T *elements;
+ };
+
+public:
+ void append(const T &val) {
+ if (count < InlineCount) {
+ // We have space. Store the new value inline.
+ inlineElements[count] = val;
+ } else if (count == InlineCount) {
+ // Inline storage is full. Switch to a heap allocation.
+ T *newElements = (T *)malloc((count + 1) * sizeof(T));
+ memcpy(newElements, inlineElements, count * sizeof(T));
+ newElements[count] = val;
+ elements = newElements;
+ } else {
+ // Resize the heap allocation and append.
+ elements = (T *)realloc(elements, (count + 1) * sizeof(T));
+ elements[count] = val;
+ }
+ count++;
+ }
+
+ const T *begin() const {
+ return count <= InlineCount ? inlineElements : elements;
+ }
+
+ const T *end() const {
+ return begin() + count;
+ }
+};
+
+// A small vector that cleans up its internal memory allocation when destroyed.
+template <typename T, unsigned InlineCount>
+class SmallVector: public GlobalSmallVector<T, InlineCount> {
+public:
+ ~SmallVector() {
+ if (this->count > InlineCount)
+ free(this->elements);
+ }
+
+ template <unsigned OtherCount>
+ void initFrom(const GlobalSmallVector<T, OtherCount> &other) {
+ ASSERT(this->count == 0);
+ this->count = (unsigned)(other.end() - other.begin());
+ if (this->count > InlineCount) {
+ this->elements = (T *)memdup(other.begin(), this->count * sizeof(T));
+ } else {
+ memcpy(this->inlineElements, other.begin(), this->count * sizeof(T));
+ }
+ }
+};
+
// Pointer hash function.
// This is not a terrific hash, but it is fast
// and not outrageously flawed for our purposes.
__BEGIN_DECLS
-extern void _object_set_associative_reference(id object, void *key, id value, uintptr_t policy);
-extern id _object_get_associative_reference(id object, void *key);
+extern void _objc_associations_init();
+extern void _object_set_associative_reference(id object, const void *key, id value, uintptr_t policy);
+extern id _object_get_associative_reference(id object, const void *key);
extern void _object_remove_assocations(id object);
__END_DECLS
#include "objc-private.h"
#include <objc/message.h>
#include <map>
+#include "DenseMapExtras.h"
-#if _LIBCPP_VERSION
-# include <unordered_map>
-#else
-# include <tr1/unordered_map>
- using namespace tr1;
-#endif
+// expanded policy bits.
+enum {
+ OBJC_ASSOCIATION_SETTER_ASSIGN = 0,
+ OBJC_ASSOCIATION_SETTER_RETAIN = 1,
+ OBJC_ASSOCIATION_SETTER_COPY = 3, // NOTE: both bits are set, so we can simply test 1 bit in releaseValue below.
+ OBJC_ASSOCIATION_GETTER_READ = (0 << 8),
+ OBJC_ASSOCIATION_GETTER_RETAIN = (1 << 8),
+ OBJC_ASSOCIATION_GETTER_AUTORELEASE = (2 << 8)
+};
-// wrap all the murky C++ details in a namespace to get them out of the way.
+spinlock_t AssociationsManagerLock;
-namespace objc_references_support {
- struct DisguisedPointerEqual {
- bool operator()(uintptr_t p1, uintptr_t p2) const {
- return p1 == p2;
- }
- };
-
- struct DisguisedPointerHash {
- uintptr_t operator()(uintptr_t k) const {
- // borrowed from CFSet.c
-#if __LP64__
- uintptr_t a = 0x4368726973746F70ULL;
- uintptr_t b = 0x686572204B616E65ULL;
-#else
- uintptr_t a = 0x4B616E65UL;
- uintptr_t b = 0x4B616E65UL;
-#endif
- uintptr_t c = 1;
- a += k;
-#if __LP64__
- a -= b; a -= c; a ^= (c >> 43);
- b -= c; b -= a; b ^= (a << 9);
- c -= a; c -= b; c ^= (b >> 8);
- a -= b; a -= c; a ^= (c >> 38);
- b -= c; b -= a; b ^= (a << 23);
- c -= a; c -= b; c ^= (b >> 5);
- a -= b; a -= c; a ^= (c >> 35);
- b -= c; b -= a; b ^= (a << 49);
- c -= a; c -= b; c ^= (b >> 11);
- a -= b; a -= c; a ^= (c >> 12);
- b -= c; b -= a; b ^= (a << 18);
- c -= a; c -= b; c ^= (b >> 22);
-#else
- a -= b; a -= c; a ^= (c >> 13);
- b -= c; b -= a; b ^= (a << 8);
- c -= a; c -= b; c ^= (b >> 13);
- a -= b; a -= c; a ^= (c >> 12);
- b -= c; b -= a; b ^= (a << 16);
- c -= a; c -= b; c ^= (b >> 5);
- a -= b; a -= c; a ^= (c >> 3);
- b -= c; b -= a; b ^= (a << 10);
- c -= a; c -= b; c ^= (b >> 15);
-#endif
- return c;
- }
- };
-
- struct ObjectPointerLess {
- bool operator()(const void *p1, const void *p2) const {
- return p1 < p2;
- }
- };
-
- struct ObjcPointerHash {
- uintptr_t operator()(void *p) const {
- return DisguisedPointerHash()(uintptr_t(p));
- }
- };
-
- // STL allocator that uses the runtime's internal allocator.
-
- template <typename T> struct ObjcAllocator {
- typedef T value_type;
- typedef value_type* pointer;
- typedef const value_type *const_pointer;
- typedef value_type& reference;
- typedef const value_type& const_reference;
- typedef size_t size_type;
- typedef ptrdiff_t difference_type;
-
- template <typename U> struct rebind { typedef ObjcAllocator<U> other; };
-
- template <typename U> ObjcAllocator(const ObjcAllocator<U>&) {}
- ObjcAllocator() {}
- ObjcAllocator(const ObjcAllocator&) {}
- ~ObjcAllocator() {}
-
- pointer address(reference x) const { return &x; }
- const_pointer address(const_reference x) const {
- return x;
- }
+namespace objc {
- pointer allocate(size_type n, const_pointer = 0) {
- return static_cast<pointer>(::malloc(n * sizeof(T)));
- }
+class ObjcAssociation {
+ uintptr_t _policy;
+ id _value;
+public:
+ ObjcAssociation(uintptr_t policy, id value) : _policy(policy), _value(value) {}
+ ObjcAssociation() : _policy(0), _value(nil) {}
+ ObjcAssociation(const ObjcAssociation &other) = default;
+ ObjcAssociation &operator=(const ObjcAssociation &other) = default;
+ ObjcAssociation(ObjcAssociation &&other) : ObjcAssociation() {
+ swap(other);
+ }
+
+ inline void swap(ObjcAssociation &other) {
+ std::swap(_policy, other._policy);
+ std::swap(_value, other._value);
+ }
- void deallocate(pointer p, size_type) { ::free(p); }
+ inline uintptr_t policy() const { return _policy; }
+ inline id value() const { return _value; }
+
+ inline void acquireValue() {
+ if (_value) {
+ switch (_policy & 0xFF) {
+ case OBJC_ASSOCIATION_SETTER_RETAIN:
+ _value = objc_retain(_value);
+ break;
+ case OBJC_ASSOCIATION_SETTER_COPY:
+ _value = ((id(*)(id, SEL))objc_msgSend)(_value, @selector(copy));
+ break;
+ }
+ }
+ }
- size_type max_size() const {
- return static_cast<size_type>(-1) / sizeof(T);
+ inline void releaseHeldValue() {
+ if (_value && (_policy & OBJC_ASSOCIATION_SETTER_RETAIN)) {
+ objc_release(_value);
}
+ }
- void construct(pointer p, const value_type& x) {
- new(p) value_type(x);
+ inline void retainReturnedValue() {
+ if (_value && (_policy & OBJC_ASSOCIATION_GETTER_RETAIN)) {
+ objc_retain(_value);
}
+ }
- void destroy(pointer p) { p->~value_type(); }
-
- void operator=(const ObjcAllocator&);
-
- };
-
- template<> struct ObjcAllocator<void> {
- typedef void value_type;
- typedef void* pointer;
- typedef const void *const_pointer;
- template <typename U> struct rebind { typedef ObjcAllocator<U> other; };
- };
-
- typedef uintptr_t disguised_ptr_t;
- inline disguised_ptr_t DISGUISE(id value) { return ~uintptr_t(value); }
- inline id UNDISGUISE(disguised_ptr_t dptr) { return id(~dptr); }
-
- class ObjcAssociation {
- uintptr_t _policy;
- id _value;
- public:
- ObjcAssociation(uintptr_t policy, id value) : _policy(policy), _value(value) {}
- ObjcAssociation() : _policy(0), _value(nil) {}
-
- uintptr_t policy() const { return _policy; }
- id value() const { return _value; }
-
- bool hasValue() { return _value != nil; }
- };
-
-#if TARGET_OS_WIN32
- typedef hash_map<void *, ObjcAssociation> ObjectAssociationMap;
- typedef hash_map<disguised_ptr_t, ObjectAssociationMap *> AssociationsHashMap;
-#else
- typedef ObjcAllocator<std::pair<void * const, ObjcAssociation> > ObjectAssociationMapAllocator;
- class ObjectAssociationMap : public std::map<void *, ObjcAssociation, ObjectPointerLess, ObjectAssociationMapAllocator> {
- public:
- void *operator new(size_t n) { return ::malloc(n); }
- void operator delete(void *ptr) { ::free(ptr); }
- };
- typedef ObjcAllocator<std::pair<const disguised_ptr_t, ObjectAssociationMap*> > AssociationsHashMapAllocator;
- class AssociationsHashMap : public unordered_map<disguised_ptr_t, ObjectAssociationMap *, DisguisedPointerHash, DisguisedPointerEqual, AssociationsHashMapAllocator> {
- public:
- void *operator new(size_t n) { return ::malloc(n); }
- void operator delete(void *ptr) { ::free(ptr); }
- };
-#endif
-}
+ inline id autoreleaseReturnedValue() {
+ if (slowpath(_value && (_policy & OBJC_ASSOCIATION_GETTER_AUTORELEASE))) {
+ return objc_autorelease(_value);
+ }
+ return _value;
+ }
+};
-using namespace objc_references_support;
+typedef DenseMap<const void *, ObjcAssociation> ObjectAssociationMap;
+typedef DenseMap<DisguisedPtr<objc_object>, ObjectAssociationMap> AssociationsHashMap;
// class AssociationsManager manages a lock / hash table singleton pair.
-// Allocating an instance acquires the lock, and calling its assocations()
-// method lazily allocates the hash table.
-
-spinlock_t AssociationsManagerLock;
+// Allocating an instance acquires the lock
class AssociationsManager {
- // associative references: object pointer -> PtrPtrHashMap.
- static AssociationsHashMap *_map;
+ using Storage = ExplicitInitDenseMap<DisguisedPtr<objc_object>, ObjectAssociationMap>;
+ static Storage _mapStorage;
+
public:
AssociationsManager() { AssociationsManagerLock.lock(); }
~AssociationsManager() { AssociationsManagerLock.unlock(); }
-
- AssociationsHashMap &associations() {
- if (_map == NULL)
- _map = new AssociationsHashMap();
- return *_map;
+
+ AssociationsHashMap &get() {
+ return _mapStorage.get();
+ }
+
+ static void init() {
+ _mapStorage.init();
}
};
-AssociationsHashMap *AssociationsManager::_map = NULL;
+AssociationsManager::Storage AssociationsManager::_mapStorage;
-// expanded policy bits.
+} // namespace objc
-enum {
- OBJC_ASSOCIATION_SETTER_ASSIGN = 0,
- OBJC_ASSOCIATION_SETTER_RETAIN = 1,
- OBJC_ASSOCIATION_SETTER_COPY = 3, // NOTE: both bits are set, so we can simply test 1 bit in releaseValue below.
- OBJC_ASSOCIATION_GETTER_READ = (0 << 8),
- OBJC_ASSOCIATION_GETTER_RETAIN = (1 << 8),
- OBJC_ASSOCIATION_GETTER_AUTORELEASE = (2 << 8)
-};
+using namespace objc;
+
+void
+_objc_associations_init()
+{
+ AssociationsManager::init();
+}
+
+id
+_object_get_associative_reference(id object, const void *key)
+{
+ ObjcAssociation association{};
-id _object_get_associative_reference(id object, void *key) {
- id value = nil;
- uintptr_t policy = OBJC_ASSOCIATION_ASSIGN;
{
AssociationsManager manager;
- AssociationsHashMap &associations(manager.associations());
- disguised_ptr_t disguised_object = DISGUISE(object);
- AssociationsHashMap::iterator i = associations.find(disguised_object);
+ AssociationsHashMap &associations(manager.get());
+ AssociationsHashMap::iterator i = associations.find((objc_object *)object);
if (i != associations.end()) {
- ObjectAssociationMap *refs = i->second;
- ObjectAssociationMap::iterator j = refs->find(key);
- if (j != refs->end()) {
- ObjcAssociation &entry = j->second;
- value = entry.value();
- policy = entry.policy();
- if (policy & OBJC_ASSOCIATION_GETTER_RETAIN) {
- objc_retain(value);
- }
+ ObjectAssociationMap &refs = i->second;
+ ObjectAssociationMap::iterator j = refs.find(key);
+ if (j != refs.end()) {
+ association = j->second;
+ association.retainReturnedValue();
}
}
}
- if (value && (policy & OBJC_ASSOCIATION_GETTER_AUTORELEASE)) {
- objc_autorelease(value);
- }
- return value;
-}
-
-static id acquireValue(id value, uintptr_t policy) {
- switch (policy & 0xFF) {
- case OBJC_ASSOCIATION_SETTER_RETAIN:
- return objc_retain(value);
- case OBJC_ASSOCIATION_SETTER_COPY:
- return ((id(*)(id, SEL))objc_msgSend)(value, SEL_copy);
- }
- return value;
-}
-static void releaseValue(id value, uintptr_t policy) {
- if (policy & OBJC_ASSOCIATION_SETTER_RETAIN) {
- return objc_release(value);
- }
+ return association.autoreleaseReturnedValue();
}
-struct ReleaseValue {
- void operator() (ObjcAssociation &association) {
- releaseValue(association.value(), association.policy());
- }
-};
-
-void _object_set_associative_reference(id object, void *key, id value, uintptr_t policy) {
+void
+_object_set_associative_reference(id object, const void *key, id value, uintptr_t policy)
+{
// This code used to work when nil was passed for object and key. Some code
// probably relies on that to not crash. Check and handle it explicitly.
// rdar://problem/44094390
if (!object && !value) return;
-
- assert(object);
-
+
if (object->getIsa()->forbidsAssociatedObjects())
_objc_fatal("objc_setAssociatedObject called on instance (%p) of class %s which does not allow associated objects", object, object_getClassName(object));
-
+
+ DisguisedPtr<objc_object> disguised{(objc_object *)object};
+ ObjcAssociation association{policy, value};
+
// retain the new value (if any) outside the lock.
- ObjcAssociation old_association(0, nil);
- id new_value = value ? acquireValue(value, policy) : nil;
+ association.acquireValue();
+
{
AssociationsManager manager;
- AssociationsHashMap &associations(manager.associations());
- disguised_ptr_t disguised_object = DISGUISE(object);
- if (new_value) {
- // break any existing association.
- AssociationsHashMap::iterator i = associations.find(disguised_object);
- if (i != associations.end()) {
- // secondary table exists
- ObjectAssociationMap *refs = i->second;
- ObjectAssociationMap::iterator j = refs->find(key);
- if (j != refs->end()) {
- old_association = j->second;
- j->second = ObjcAssociation(policy, new_value);
- } else {
- (*refs)[key] = ObjcAssociation(policy, new_value);
- }
- } else {
- // create the new association (first time).
- ObjectAssociationMap *refs = new ObjectAssociationMap;
- associations[disguised_object] = refs;
- (*refs)[key] = ObjcAssociation(policy, new_value);
+ AssociationsHashMap &associations(manager.get());
+
+ if (value) {
+ auto refs_result = associations.try_emplace(disguised, ObjectAssociationMap{});
+ if (refs_result.second) {
+ /* it's the first association we make */
object->setHasAssociatedObjects();
}
+
+ /* establish or replace the association */
+ auto &refs = refs_result.first->second;
+ auto result = refs.try_emplace(key, std::move(association));
+ if (!result.second) {
+ association.swap(result.first->second);
+ }
} else {
- // setting the association to nil breaks the association.
- AssociationsHashMap::iterator i = associations.find(disguised_object);
- if (i != associations.end()) {
- ObjectAssociationMap *refs = i->second;
- ObjectAssociationMap::iterator j = refs->find(key);
- if (j != refs->end()) {
- old_association = j->second;
- refs->erase(j);
+ auto refs_it = associations.find(disguised);
+ if (refs_it != associations.end()) {
+ auto &refs = refs_it->second;
+ auto it = refs.find(key);
+ if (it != refs.end()) {
+ association.swap(it->second);
+ refs.erase(it);
+ if (refs.size() == 0) {
+ associations.erase(refs_it);
+
+ }
}
}
}
}
+
// release the old value (outside of the lock).
- if (old_association.hasValue()) ReleaseValue()(old_association);
+ association.releaseHeldValue();
}
-void _object_remove_assocations(id object) {
- vector< ObjcAssociation,ObjcAllocator<ObjcAssociation> > elements;
+// Unlike setting/getting an associated reference,
+// this function is performance sensitive because of
+// raw isa objects (such as OS Objects) that can't track
+// whether they have associated objects.
+void
+_object_remove_assocations(id object)
+{
+ ObjectAssociationMap refs{};
+
{
AssociationsManager manager;
- AssociationsHashMap &associations(manager.associations());
- if (associations.size() == 0) return;
- disguised_ptr_t disguised_object = DISGUISE(object);
- AssociationsHashMap::iterator i = associations.find(disguised_object);
+ AssociationsHashMap &associations(manager.get());
+ AssociationsHashMap::iterator i = associations.find((objc_object *)object);
if (i != associations.end()) {
- // copy all of the associations that need to be removed.
- ObjectAssociationMap *refs = i->second;
- for (ObjectAssociationMap::iterator j = refs->begin(), end = refs->end(); j != end; ++j) {
- elements.push_back(j->second);
- }
- // remove the secondary table.
- delete refs;
+ refs.swap(i->second);
associations.erase(i);
}
}
- // the calls to releaseValue() happen outside of the lock.
- for_each(elements.begin(), elements.end(), ReleaseValue());
+
+ // release everything (outside of the lock).
+ for (auto &i: refs) {
+ i.second.releaseHeldValue();
+ }
}
#ifndef _OBJC_RUNTIME_NEW_H
#define _OBJC_RUNTIME_NEW_H
+// class_data_bits_t is the class_t->data field (class_rw_t pointer plus flags)
+// The extra bits are optimized for the retain/release and alloc/dealloc paths.
+
+// Values for class_ro_t->flags
+// These are emitted by the compiler and are part of the ABI.
+// Note: See CGObjCNonFragileABIMac::BuildClassRoTInitializer in clang
+// class is a metaclass
+#define RO_META (1<<0)
+// class is a root class
+#define RO_ROOT (1<<1)
+// class has .cxx_construct/destruct implementations
+#define RO_HAS_CXX_STRUCTORS (1<<2)
+// class has +load implementation
+// #define RO_HAS_LOAD_METHOD (1<<3)
+// class has visibility=hidden set
+#define RO_HIDDEN (1<<4)
+// class has attribute(objc_exception): OBJC_EHTYPE_$_ThisClass is non-weak
+#define RO_EXCEPTION (1<<5)
+// class has ro field for Swift metadata initializer callback
+#define RO_HAS_SWIFT_INITIALIZER (1<<6)
+// class compiled with ARC
+#define RO_IS_ARC (1<<7)
+// class has .cxx_destruct but no .cxx_construct (with RO_HAS_CXX_STRUCTORS)
+#define RO_HAS_CXX_DTOR_ONLY (1<<8)
+// class is not ARC but has ARC-style weak ivar layout
+#define RO_HAS_WEAK_WITHOUT_ARC (1<<9)
+// class does not allow associated objects on instances
+#define RO_FORBIDS_ASSOCIATED_OBJECTS (1<<10)
+
+// class is in an unloadable bundle - must never be set by compiler
+#define RO_FROM_BUNDLE (1<<29)
+// class is unrealized future class - must never be set by compiler
+#define RO_FUTURE (1<<30)
+// class is realized - must never be set by compiler
+#define RO_REALIZED (1<<31)
+
+// Values for class_rw_t->flags
+// These are not emitted by the compiler and are never used in class_ro_t.
+// Their presence should be considered in future ABI versions.
+// class_t->data is class_rw_t, not class_ro_t
+#define RW_REALIZED (1<<31)
+// class is unresolved future class
+#define RW_FUTURE (1<<30)
+// class is initialized
+#define RW_INITIALIZED (1<<29)
+// class is initializing
+#define RW_INITIALIZING (1<<28)
+// class_rw_t->ro is heap copy of class_ro_t
+#define RW_COPIED_RO (1<<27)
+// class allocated but not yet registered
+#define RW_CONSTRUCTING (1<<26)
+// class allocated and registered
+#define RW_CONSTRUCTED (1<<25)
+// available for use; was RW_FINALIZE_ON_MAIN_THREAD
+// #define RW_24 (1<<24)
+// class +load has been called
+#define RW_LOADED (1<<23)
+#if !SUPPORT_NONPOINTER_ISA
+// class instances may have associative references
+#define RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS (1<<22)
+#endif
+// class has instance-specific GC layout
+#define RW_HAS_INSTANCE_SPECIFIC_LAYOUT (1 << 21)
+// class does not allow associated objects on its instances
+#define RW_FORBIDS_ASSOCIATED_OBJECTS (1<<20)
+// class has started realizing but not yet completed it
+#define RW_REALIZING (1<<19)
+
+// NOTE: MORE RW_ FLAGS DEFINED BELOW
+
+
+// Values for class_rw_t->flags (RW_*), cache_t->_flags (FAST_CACHE_*),
+// or class_t->bits (FAST_*).
+//
+// FAST_* and FAST_CACHE_* are stored on the class, reducing pointer indirection.
+
+#if __LP64__
+
+// class is a Swift class from the pre-stable Swift ABI
+#define FAST_IS_SWIFT_LEGACY (1UL<<0)
+// class is a Swift class from the stable Swift ABI
+#define FAST_IS_SWIFT_STABLE (1UL<<1)
+// class or superclass has default retain/release/autorelease/retainCount/
+// _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
+#define FAST_HAS_DEFAULT_RR (1UL<<2)
+// data pointer
+#define FAST_DATA_MASK 0x00007ffffffffff8UL
+
+#if __arm64__
+// class or superclass has .cxx_construct/.cxx_destruct implementation
+// FAST_CACHE_HAS_CXX_DTOR is the first bit so that setting it in
+// isa_t::has_cxx_dtor is a single bfi
+#define FAST_CACHE_HAS_CXX_DTOR (1<<0)
+#define FAST_CACHE_HAS_CXX_CTOR (1<<1)
+// Denormalized RO_META to avoid an indirection
+#define FAST_CACHE_META (1<<2)
+#else
+// Denormalized RO_META to avoid an indirection
+#define FAST_CACHE_META (1<<0)
+// class or superclass has .cxx_construct/.cxx_destruct implementation
+// FAST_CACHE_HAS_CXX_DTOR is chosen to alias with isa_t::has_cxx_dtor
+#define FAST_CACHE_HAS_CXX_CTOR (1<<1)
+#define FAST_CACHE_HAS_CXX_DTOR (1<<2)
+#endif
+
+// Fast Alloc fields:
+// This stores the word-aligned size of instances + "ALLOC_DELTA16",
+// or 0 if the instance size doesn't fit.
+//
+// These bits occupy the same bits than in the instance size, so that
+// the size can be extracted with a simple mask operation.
+//
+// FAST_CACHE_ALLOC_MASK16 allows to extract the instance size rounded
+// rounded up to the next 16 byte boundary, which is a fastpath for
+// _objc_rootAllocWithZone()
+#define FAST_CACHE_ALLOC_MASK 0x1ff8
+#define FAST_CACHE_ALLOC_MASK16 0x1ff0
+#define FAST_CACHE_ALLOC_DELTA16 0x0008
+
+// class's instances requires raw isa
+#define FAST_CACHE_REQUIRES_RAW_ISA (1<<13)
+// class or superclass has default alloc/allocWithZone: implementation
+// Note this is is stored in the metaclass.
+#define FAST_CACHE_HAS_DEFAULT_AWZ (1<<14)
+// class or superclass has default new/self/class/respondsToSelector/isKindOfClass
+#define FAST_CACHE_HAS_DEFAULT_CORE (1<<15)
+
+#else
+
+// class or superclass has .cxx_construct implementation
+#define RW_HAS_CXX_CTOR (1<<18)
+// class or superclass has .cxx_destruct implementation
+#define RW_HAS_CXX_DTOR (1<<17)
+// class or superclass has default alloc/allocWithZone: implementation
+// Note this is is stored in the metaclass.
+#define RW_HAS_DEFAULT_AWZ (1<<16)
+// class's instances requires raw isa
+#if SUPPORT_NONPOINTER_ISA
+#define RW_REQUIRES_RAW_ISA (1<<15)
+#endif
+// class or superclass has default retain/release/autorelease/retainCount/
+// _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
+#define RW_HAS_DEFAULT_RR (1<<14)
+// class or superclass has default new/self/class/respondsToSelector/isKindOfClass
+#define RW_HAS_DEFAULT_CORE (1<<13)
+
+// class is a Swift class from the pre-stable Swift ABI
+#define FAST_IS_SWIFT_LEGACY (1UL<<0)
+// class is a Swift class from the stable Swift ABI
+#define FAST_IS_SWIFT_STABLE (1UL<<1)
+// data pointer
+#define FAST_DATA_MASK 0xfffffffcUL
+
+#endif // __LP64__
+
+// The Swift ABI requires that these bits be defined like this on all platforms.
+static_assert(FAST_IS_SWIFT_LEGACY == 1, "resistance is futile");
+static_assert(FAST_IS_SWIFT_STABLE == 2, "resistance is futile");
+
+
#if __LP64__
typedef uint32_t mask_t; // x86_64 & arm64 asm are less efficient with 16-bits
#else
struct swift_class_t;
enum Atomicity { Atomic = true, NotAtomic = false };
+enum IMPEncoding { Encoded = true, Raw = false };
struct bucket_t {
private:
// IMP-first is better for arm64e ptrauth and no worse for arm64.
// SEL-first is better for armv7* and i386 and x86_64.
#if __arm64__
- uintptr_t _imp;
- SEL _sel;
+ explicit_atomic<uintptr_t> _imp;
+ explicit_atomic<SEL> _sel;
#else
- SEL _sel;
- uintptr_t _imp;
+ explicit_atomic<SEL> _sel;
+ explicit_atomic<uintptr_t> _imp;
#endif
- // Compute the ptrauth signing modifier from &_imp and newSel
- uintptr_t modifierForSEL(SEL newSel) const {
- return (uintptr_t)&_imp ^ (uintptr_t)newSel;
+ // Compute the ptrauth signing modifier from &_imp, newSel, and cls.
+ uintptr_t modifierForSEL(SEL newSel, Class cls) const {
+ return (uintptr_t)&_imp ^ (uintptr_t)newSel ^ (uintptr_t)cls;
}
- // Sign newImp, with &_imp and newSel as modifiers.
- uintptr_t signIMP(IMP newImp, SEL newSel) const {
+ // Sign newImp, with &_imp, newSel, and cls as modifiers.
+ uintptr_t encodeImp(IMP newImp, SEL newSel, Class cls) const {
if (!newImp) return 0;
+#if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_PTRAUTH
return (uintptr_t)
ptrauth_auth_and_resign(newImp,
ptrauth_key_function_pointer, 0,
ptrauth_key_process_dependent_code,
- modifierForSEL(newSel));
+ modifierForSEL(newSel, cls));
+#elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR
+ return (uintptr_t)newImp ^ (uintptr_t)cls;
+#elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_NONE
+ return (uintptr_t)newImp;
+#else
+#error Unknown method cache IMP encoding.
+#endif
}
public:
- inline SEL sel() const { return _sel; }
+ inline SEL sel() const { return _sel.load(memory_order::memory_order_relaxed); }
- inline IMP imp() const {
- if (!_imp) return nil;
+ inline IMP imp(Class cls) const {
+ uintptr_t imp = _imp.load(memory_order::memory_order_relaxed);
+ if (!imp) return nil;
+#if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_PTRAUTH
+ SEL sel = _sel.load(memory_order::memory_order_relaxed);
return (IMP)
- ptrauth_auth_and_resign((const void *)_imp,
+ ptrauth_auth_and_resign((const void *)imp,
ptrauth_key_process_dependent_code,
- modifierForSEL(_sel),
+ modifierForSEL(sel, cls),
ptrauth_key_function_pointer, 0);
+#elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR
+ return (IMP)(imp ^ (uintptr_t)cls);
+#elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_NONE
+ return (IMP)imp;
+#else
+#error Unknown method cache IMP encoding.
+#endif
}
- template <Atomicity>
- void set(SEL newSel, IMP newImp);
+ template <Atomicity, IMPEncoding>
+ void set(SEL newSel, IMP newImp, Class cls);
};
struct cache_t {
- struct bucket_t *_buckets;
- mask_t _mask;
- mask_t _occupied;
+#if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_OUTLINED
+ explicit_atomic<struct bucket_t *> _buckets;
+ explicit_atomic<mask_t> _mask;
+#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16
+ explicit_atomic<uintptr_t> _maskAndBuckets;
+ mask_t _mask_unused;
+
+ // How much the mask is shifted by.
+ static constexpr uintptr_t maskShift = 48;
+
+ // Additional bits after the mask which must be zero. msgSend
+ // takes advantage of these additional bits to construct the value
+ // `mask << 4` from `_maskAndBuckets` in a single instruction.
+ static constexpr uintptr_t maskZeroBits = 4;
+
+ // The largest mask value we can store.
+ static constexpr uintptr_t maxMask = ((uintptr_t)1 << (64 - maskShift)) - 1;
+
+ // The mask applied to `_maskAndBuckets` to retrieve the buckets pointer.
+ static constexpr uintptr_t bucketsMask = ((uintptr_t)1 << (maskShift - maskZeroBits)) - 1;
+
+ // Ensure we have enough bits for the buckets pointer.
+ static_assert(bucketsMask >= MACH_VM_MAX_ADDRESS, "Bucket field doesn't have enough bits for arbitrary pointers.");
+#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4
+ // _maskAndBuckets stores the mask shift in the low 4 bits, and
+ // the buckets pointer in the remainder of the value. The mask
+ // shift is the value where (0xffff >> shift) produces the correct
+ // mask. This is equal to 16 - log2(cache_size).
+ explicit_atomic<uintptr_t> _maskAndBuckets;
+ mask_t _mask_unused;
+
+ static constexpr uintptr_t maskBits = 4;
+ static constexpr uintptr_t maskMask = (1 << maskBits) - 1;
+ static constexpr uintptr_t bucketsMask = ~maskMask;
+#else
+#error Unknown cache mask storage type.
+#endif
+
+#if __LP64__
+ uint16_t _flags;
+#endif
+ uint16_t _occupied;
public:
+ static bucket_t *emptyBuckets();
+
struct bucket_t *buckets();
mask_t mask();
mask_t occupied();
void setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask);
void initializeToEmpty();
- mask_t capacity();
+ unsigned capacity();
bool isConstantEmptyCache();
bool canBeFreed();
+#if __LP64__
+ bool getBit(uint16_t flags) const {
+ return _flags & flags;
+ }
+ void setBit(uint16_t set) {
+ __c11_atomic_fetch_or((_Atomic(uint16_t) *)&_flags, set, __ATOMIC_RELAXED);
+ }
+ void clearBit(uint16_t clear) {
+ __c11_atomic_fetch_and((_Atomic(uint16_t) *)&_flags, ~clear, __ATOMIC_RELAXED);
+ }
+#endif
+
+#if FAST_CACHE_ALLOC_MASK
+ bool hasFastInstanceSize(size_t extra) const
+ {
+ if (__builtin_constant_p(extra) && extra == 0) {
+ return _flags & FAST_CACHE_ALLOC_MASK16;
+ }
+ return _flags & FAST_CACHE_ALLOC_MASK;
+ }
+
+ size_t fastInstanceSize(size_t extra) const
+ {
+ ASSERT(hasFastInstanceSize(extra));
+
+ if (__builtin_constant_p(extra) && extra == 0) {
+ return _flags & FAST_CACHE_ALLOC_MASK16;
+ } else {
+ size_t size = _flags & FAST_CACHE_ALLOC_MASK;
+ // remove the FAST_CACHE_ALLOC_DELTA16 that was added
+ // by setFastInstanceSize
+ return align16(size + extra - FAST_CACHE_ALLOC_DELTA16);
+ }
+ }
+
+ void setFastInstanceSize(size_t newSize)
+ {
+ // Set during realization or construction only. No locking needed.
+ uint16_t newBits = _flags & ~FAST_CACHE_ALLOC_MASK;
+ uint16_t sizeBits;
+
+ // Adding FAST_CACHE_ALLOC_DELTA16 allows for FAST_CACHE_ALLOC_MASK16
+ // to yield the proper 16byte aligned allocation size with a single mask
+ sizeBits = word_align(newSize) + FAST_CACHE_ALLOC_DELTA16;
+ sizeBits &= FAST_CACHE_ALLOC_MASK;
+ if (newSize <= sizeBits) {
+ newBits |= sizeBits;
+ }
+ _flags = newBits;
+ }
+#else
+ bool hasFastInstanceSize(size_t extra) const {
+ return false;
+ }
+ size_t fastInstanceSize(size_t extra) const {
+ abort();
+ }
+ void setFastInstanceSize(size_t extra) {
+ // nothing
+ }
+#endif
+
static size_t bytesForCapacity(uint32_t cap);
static struct bucket_t * endMarker(struct bucket_t *b, uint32_t cap);
- void expand();
- void reallocate(mask_t oldCapacity, mask_t newCapacity);
- struct bucket_t * find(SEL sel, id receiver);
+ void reallocate(mask_t oldCapacity, mask_t newCapacity, bool freeOld);
+ void insert(Class cls, SEL sel, IMP imp, id receiver);
- static void bad_cache(id receiver, SEL sel, Class isa) __attribute__((noreturn));
+ static void bad_cache(id receiver, SEL sel, Class isa) __attribute__((noreturn, cold));
};
// classref_t is unremapped class_t*
typedef struct classref * classref_t;
+
+#ifdef __PTRAUTH_INTRINSICS__
+# define StubClassInitializerPtrauth __ptrauth(ptrauth_key_function_pointer, 1, 0xc671)
+#else
+# define StubClassInitializerPtrauth
+#endif
+struct stub_class_t {
+ uintptr_t isa;
+ _objc_swiftMetadataInitializer StubClassInitializerPtrauth initializer;
+};
+
/***********************************************************************
* entsize_list_tt<Element, List, FlagMask>
* Generic implementation of an array of non-fragile structs.
}
Element& getOrEnd(uint32_t i) const {
- assert(i <= count);
+ ASSERT(i <= count);
return *(Element *)((uint8_t *)&first + i*entsize());
}
Element& get(uint32_t i) const {
- assert(i < count);
+ ASSERT(i < count);
return getOrEnd(i);
}
// Two bits of entsize are used for fixup markers.
struct method_list_t : entsize_list_tt<method_t, method_list_t, 0x3> {
+ bool isUniqued() const;
bool isFixedUp() const;
void setFixedUp();
uint32_t indexOfMethod(const method_t *meth) const {
uint32_t i =
(uint32_t)(((uintptr_t)meth - (uintptr_t)this) / entsize());
- assert(i < count);
+ ASSERT(i < count);
return i;
}
};
typedef uintptr_t protocol_ref_t; // protocol_t *, but unremapped
// Values for protocol_t->flags
-#define PROTOCOL_FIXED_UP_2 (1<<31) // must never be set by compiler
-#define PROTOCOL_FIXED_UP_1 (1<<30) // must never be set by compiler
+#define PROTOCOL_FIXED_UP_2 (1<<31) // must never be set by compiler
+#define PROTOCOL_FIXED_UP_1 (1<<30) // must never be set by compiler
+#define PROTOCOL_IS_CANONICAL (1<<29) // must never be set by compiler
// Bits 0..15 are reserved for Swift's use.
#define PROTOCOL_FIXED_UP_MASK (PROTOCOL_FIXED_UP_1 | PROTOCOL_FIXED_UP_2)
bool isFixedUp() const;
void setFixedUp();
+ bool isCanonical() const;
+ void clearIsCanonical();
+
# define HAS_FIELD(f) (size >= offsetof(protocol_t, f) + sizeof(f))
bool hasExtendedMethodTypesField() const {
};
struct protocol_list_t {
- // count is 64-bit by accident.
+ // count is pointer-sized by accident.
uintptr_t count;
protocol_ref_t list[0]; // variable-size
}
};
-struct locstamped_category_t {
- category_t *cat;
- struct header_info *hi;
-};
-
-struct locstamped_category_list_t {
- uint32_t count;
-#if __LP64__
- uint32_t reserved;
-#endif
- locstamped_category_t list[0];
-};
-
-
-// class_data_bits_t is the class_t->data field (class_rw_t pointer plus flags)
-// The extra bits are optimized for the retain/release and alloc/dealloc paths.
-
-// Values for class_ro_t->flags
-// These are emitted by the compiler and are part of the ABI.
-// Note: See CGObjCNonFragileABIMac::BuildClassRoTInitializer in clang
-// class is a metaclass
-#define RO_META (1<<0)
-// class is a root class
-#define RO_ROOT (1<<1)
-// class has .cxx_construct/destruct implementations
-#define RO_HAS_CXX_STRUCTORS (1<<2)
-// class has +load implementation
-// #define RO_HAS_LOAD_METHOD (1<<3)
-// class has visibility=hidden set
-#define RO_HIDDEN (1<<4)
-// class has attribute(objc_exception): OBJC_EHTYPE_$_ThisClass is non-weak
-#define RO_EXCEPTION (1<<5)
-// class has ro field for Swift metadata initializer callback
-#define RO_HAS_SWIFT_INITIALIZER (1<<6)
-// class compiled with ARC
-#define RO_IS_ARC (1<<7)
-// class has .cxx_destruct but no .cxx_construct (with RO_HAS_CXX_STRUCTORS)
-#define RO_HAS_CXX_DTOR_ONLY (1<<8)
-// class is not ARC but has ARC-style weak ivar layout
-#define RO_HAS_WEAK_WITHOUT_ARC (1<<9)
-// class does not allow associated objects on instances
-#define RO_FORBIDS_ASSOCIATED_OBJECTS (1<<10)
-
-// class is in an unloadable bundle - must never be set by compiler
-#define RO_FROM_BUNDLE (1<<29)
-// class is unrealized future class - must never be set by compiler
-#define RO_FUTURE (1<<30)
-// class is realized - must never be set by compiler
-#define RO_REALIZED (1<<31)
-
-// Values for class_rw_t->flags
-// These are not emitted by the compiler and are never used in class_ro_t.
-// Their presence should be considered in future ABI versions.
-// class_t->data is class_rw_t, not class_ro_t
-#define RW_REALIZED (1<<31)
-// class is unresolved future class
-#define RW_FUTURE (1<<30)
-// class is initialized
-#define RW_INITIALIZED (1<<29)
-// class is initializing
-#define RW_INITIALIZING (1<<28)
-// class_rw_t->ro is heap copy of class_ro_t
-#define RW_COPIED_RO (1<<27)
-// class allocated but not yet registered
-#define RW_CONSTRUCTING (1<<26)
-// class allocated and registered
-#define RW_CONSTRUCTED (1<<25)
-// available for use; was RW_FINALIZE_ON_MAIN_THREAD
-// #define RW_24 (1<<24)
-// class +load has been called
-#define RW_LOADED (1<<23)
-#if !SUPPORT_NONPOINTER_ISA
-// class instances may have associative references
-#define RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS (1<<22)
-#endif
-// class has instance-specific GC layout
-#define RW_HAS_INSTANCE_SPECIFIC_LAYOUT (1 << 21)
-// class does not allow associated objects on its instances
-#define RW_FORBIDS_ASSOCIATED_OBJECTS (1<<20)
-// class has started realizing but not yet completed it
-#define RW_REALIZING (1<<19)
-
-// NOTE: MORE RW_ FLAGS DEFINED BELOW
-
-
-// Values for class_rw_t->flags or class_t->bits
-// These flags are optimized for retain/release and alloc/dealloc
-// 64-bit stores more of them in class_t->bits to reduce pointer indirection.
-
-#if !__LP64__
-
-// class or superclass has .cxx_construct implementation
-#define RW_HAS_CXX_CTOR (1<<18)
-// class or superclass has .cxx_destruct implementation
-#define RW_HAS_CXX_DTOR (1<<17)
-// class or superclass has default alloc/allocWithZone: implementation
-// Note this is is stored in the metaclass.
-#define RW_HAS_DEFAULT_AWZ (1<<16)
-// class's instances requires raw isa
-#if SUPPORT_NONPOINTER_ISA
-#define RW_REQUIRES_RAW_ISA (1<<15)
-#endif
-// class or superclass has default retain/release/autorelease/retainCount/
-// _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
-#define RW_HAS_DEFAULT_RR (1<<14)
-
-// class is a Swift class from the pre-stable Swift ABI
-#define FAST_IS_SWIFT_LEGACY (1UL<<0)
-// class is a Swift class from the stable Swift ABI
-#define FAST_IS_SWIFT_STABLE (1UL<<1)
-// data pointer
-#define FAST_DATA_MASK 0xfffffffcUL
-
-#elif 1
-// Leaks-compatible version that steals low bits only.
-
-// class or superclass has .cxx_construct implementation
-#define RW_HAS_CXX_CTOR (1<<18)
-// class or superclass has .cxx_destruct implementation
-#define RW_HAS_CXX_DTOR (1<<17)
-// class or superclass has default alloc/allocWithZone: implementation
-// Note this is is stored in the metaclass.
-#define RW_HAS_DEFAULT_AWZ (1<<16)
-// class's instances requires raw isa
-#define RW_REQUIRES_RAW_ISA (1<<15)
-
-// class is a Swift class from the pre-stable Swift ABI
-#define FAST_IS_SWIFT_LEGACY (1UL<<0)
-// class is a Swift class from the stable Swift ABI
-#define FAST_IS_SWIFT_STABLE (1UL<<1)
-// class or superclass has default retain/release/autorelease/retainCount/
-// _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
-#define FAST_HAS_DEFAULT_RR (1UL<<2)
-// data pointer
-#define FAST_DATA_MASK 0x00007ffffffffff8UL
-
-#else
-// Leaks-incompatible version that steals lots of bits.
-
-// class is a Swift class from the pre-stable Swift ABI
-#define FAST_IS_SWIFT_LEGACY (1UL<<0)
-// class is a Swift class from the stable Swift ABI
-#define FAST_IS_SWIFT_STABLE (1UL<<1)
-// summary bit for fast alloc path: !hasCxxCtor and
-// !instancesRequireRawIsa and instanceSize fits into shiftedSize
-#define FAST_ALLOC (1UL<<2)
-// data pointer
-#define FAST_DATA_MASK 0x00007ffffffffff8UL
-// class or superclass has .cxx_construct implementation
-#define FAST_HAS_CXX_CTOR (1UL<<47)
-// class or superclass has default alloc/allocWithZone: implementation
-// Note this is is stored in the metaclass.
-#define FAST_HAS_DEFAULT_AWZ (1UL<<48)
-// class or superclass has default retain/release/autorelease/retainCount/
-// _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
-#define FAST_HAS_DEFAULT_RR (1UL<<49)
-// class's instances requires raw isa
-// This bit is aligned with isa_t->hasCxxDtor to save an instruction.
-#define FAST_REQUIRES_RAW_ISA (1UL<<50)
-// class or superclass has .cxx_destruct implementation
-#define FAST_HAS_CXX_DTOR (1UL<<51)
-// instance size in units of 16 bytes
-// or 0 if the instance size is too big in this field
-// This field must be LAST
-#define FAST_SHIFTED_SIZE_SHIFT 52
-
-// FAST_ALLOC means
-// FAST_HAS_CXX_CTOR is set
-// FAST_REQUIRES_RAW_ISA is not set
-// FAST_SHIFTED_SIZE is not zero
-// FAST_ALLOC does NOT check FAST_HAS_DEFAULT_AWZ because that
-// bit is stored on the metaclass.
-#define FAST_ALLOC_MASK (FAST_HAS_CXX_CTOR | FAST_REQUIRES_RAW_ISA)
-#define FAST_ALLOC_VALUE (0)
-
-#endif
-
-// The Swift ABI requires that these bits be defined like this on all platforms.
-static_assert(FAST_IS_SWIFT_LEGACY == 1, "resistance is futile");
-static_assert(FAST_IS_SWIFT_STABLE == 2, "resistance is futile");
-
-
struct class_ro_t {
uint32_t flags;
uint32_t instanceStart;
}
const iterator& operator ++ () {
- assert(m != mEnd);
+ ASSERT(m != mEnd);
m++;
if (m == mEnd) {
- assert(lists != listsEnd);
+ ASSERT(lists != listsEnd);
lists++;
if (lists != listsEnd) {
m = (*lists)->begin();
struct class_rw_t {
// Be warned that Symbolication knows the layout of this structure.
uint32_t flags;
- uint32_t version;
+ uint16_t version;
+ uint16_t witness;
const class_ro_t *ro;
void setFlags(uint32_t set)
{
- OSAtomicOr32Barrier(set, &flags);
+ __c11_atomic_fetch_or((_Atomic(uint32_t) *)&flags, set, __ATOMIC_RELAXED);
}
void clearFlags(uint32_t clear)
{
- OSAtomicXor32Barrier(clear, &flags);
+ __c11_atomic_fetch_and((_Atomic(uint32_t) *)&flags, ~clear, __ATOMIC_RELAXED);
}
// set and clear must not overlap
void changeFlags(uint32_t set, uint32_t clear)
{
- assert((set & clear) == 0);
+ ASSERT((set & clear) == 0);
uint32_t oldf, newf;
do {
struct class_data_bits_t {
+ friend objc_class;
// Values are the FAST_ flags above.
uintptr_t bits;
private:
- bool getBit(uintptr_t bit)
+ bool getBit(uintptr_t bit) const
{
return bits & bit;
}
-#if FAST_ALLOC
- // On entry, `newBits` is a bits value after setting and/or clearing
- // the bits in `change`. Fix the fast-alloc parts of newBits if necessary
- // and return the updated value.
- static uintptr_t updateFastAlloc(uintptr_t newBits, uintptr_t change)
- {
- if (change & FAST_ALLOC_MASK) {
- if (((newBits & FAST_ALLOC_MASK) == FAST_ALLOC_VALUE) &&
- ((newBits >> FAST_SHIFTED_SIZE_SHIFT) != 0))
- {
- newBits |= FAST_ALLOC;
- } else {
- newBits &= ~FAST_ALLOC;
- }
- }
- return newBits;
- }
-#else
- static uintptr_t updateFastAlloc(uintptr_t newBits, uintptr_t change) {
- return newBits;
- }
-#endif
-
// Atomically set the bits in `set` and clear the bits in `clear`.
// set and clear must not overlap.
void setAndClearBits(uintptr_t set, uintptr_t clear)
{
- assert((set & clear) == 0);
+ ASSERT((set & clear) == 0);
uintptr_t oldBits;
uintptr_t newBits;
do {
oldBits = LoadExclusive(&bits);
- newBits = updateFastAlloc((oldBits | set) & ~clear, set | clear);
+ newBits = (oldBits | set) & ~clear;
} while (!StoreReleaseExclusive(&bits, oldBits, newBits));
}
void setBits(uintptr_t set) {
- setAndClearBits(set, 0);
+ __c11_atomic_fetch_or((_Atomic(uintptr_t) *)&bits, set, __ATOMIC_RELAXED);
}
void clearBits(uintptr_t clear) {
- setAndClearBits(0, clear);
+ __c11_atomic_fetch_and((_Atomic(uintptr_t) *)&bits, ~clear, __ATOMIC_RELAXED);
}
public:
- class_rw_t* data() {
+ class_rw_t* data() const {
return (class_rw_t *)(bits & FAST_DATA_MASK);
}
void setData(class_rw_t *newData)
{
- assert(!data() || (newData->flags & (RW_REALIZING | RW_FUTURE)));
+ ASSERT(!data() || (newData->flags & (RW_REALIZING | RW_FUTURE)));
// Set during realization or construction only. No locking needed.
// Use a store-release fence because there may be concurrent
// readers of data and data's contents.
}
}
-#if FAST_HAS_DEFAULT_RR
- bool hasDefaultRR() {
- return getBit(FAST_HAS_DEFAULT_RR);
- }
- void setHasDefaultRR() {
- setBits(FAST_HAS_DEFAULT_RR);
- }
- void setHasCustomRR() {
- clearBits(FAST_HAS_DEFAULT_RR);
- }
-#else
- bool hasDefaultRR() {
- return data()->flags & RW_HAS_DEFAULT_RR;
- }
- void setHasDefaultRR() {
- data()->setFlags(RW_HAS_DEFAULT_RR);
- }
- void setHasCustomRR() {
- data()->clearFlags(RW_HAS_DEFAULT_RR);
- }
-#endif
-
-#if FAST_HAS_DEFAULT_AWZ
- bool hasDefaultAWZ() {
- return getBit(FAST_HAS_DEFAULT_AWZ);
- }
- void setHasDefaultAWZ() {
- setBits(FAST_HAS_DEFAULT_AWZ);
- }
- void setHasCustomAWZ() {
- clearBits(FAST_HAS_DEFAULT_AWZ);
- }
-#else
- bool hasDefaultAWZ() {
- return data()->flags & RW_HAS_DEFAULT_AWZ;
- }
- void setHasDefaultAWZ() {
- data()->setFlags(RW_HAS_DEFAULT_AWZ);
- }
- void setHasCustomAWZ() {
- data()->clearFlags(RW_HAS_DEFAULT_AWZ);
- }
-#endif
-
-#if FAST_HAS_CXX_CTOR
- bool hasCxxCtor() {
- return getBit(FAST_HAS_CXX_CTOR);
- }
- void setHasCxxCtor() {
- setBits(FAST_HAS_CXX_CTOR);
- }
-#else
- bool hasCxxCtor() {
- return data()->flags & RW_HAS_CXX_CTOR;
- }
- void setHasCxxCtor() {
- data()->setFlags(RW_HAS_CXX_CTOR);
- }
-#endif
-
-#if FAST_HAS_CXX_DTOR
- bool hasCxxDtor() {
- return getBit(FAST_HAS_CXX_DTOR);
- }
- void setHasCxxDtor() {
- setBits(FAST_HAS_CXX_DTOR);
- }
-#else
- bool hasCxxDtor() {
- return data()->flags & RW_HAS_CXX_DTOR;
- }
- void setHasCxxDtor() {
- data()->setFlags(RW_HAS_CXX_DTOR);
- }
-#endif
-
-#if FAST_REQUIRES_RAW_ISA
- bool instancesRequireRawIsa() {
- return getBit(FAST_REQUIRES_RAW_ISA);
- }
- void setInstancesRequireRawIsa() {
- setBits(FAST_REQUIRES_RAW_ISA);
- }
-#elif SUPPORT_NONPOINTER_ISA
- bool instancesRequireRawIsa() {
- return data()->flags & RW_REQUIRES_RAW_ISA;
- }
- void setInstancesRequireRawIsa() {
- data()->setFlags(RW_REQUIRES_RAW_ISA);
- }
-#else
- bool instancesRequireRawIsa() {
- return true;
- }
- void setInstancesRequireRawIsa() {
- // nothing
- }
-#endif
-
-#if FAST_ALLOC
- size_t fastInstanceSize()
- {
- assert(bits & FAST_ALLOC);
- return (bits >> FAST_SHIFTED_SIZE_SHIFT) * 16;
- }
- void setFastInstanceSize(size_t newSize)
- {
- // Set during realization or construction only. No locking needed.
- assert(data()->flags & RW_REALIZING);
-
- // Round up to 16-byte boundary, then divide to get 16-byte units
- newSize = ((newSize + 15) & ~15) / 16;
-
- uintptr_t newBits = newSize << FAST_SHIFTED_SIZE_SHIFT;
- if ((newBits >> FAST_SHIFTED_SIZE_SHIFT) == newSize) {
- int shift = WORD_BITS - FAST_SHIFTED_SIZE_SHIFT;
- uintptr_t oldBits = (bits << shift) >> shift;
- if ((oldBits & FAST_ALLOC_MASK) == FAST_ALLOC_VALUE) {
- newBits |= FAST_ALLOC;
- }
- bits = oldBits | newBits;
- }
- }
-
- bool canAllocFast() {
- return bits & FAST_ALLOC;
- }
-#else
- size_t fastInstanceSize() {
- abort();
- }
- void setFastInstanceSize(size_t) {
- // nothing
- }
- bool canAllocFast() {
- return false;
- }
-#endif
-
void setClassArrayIndex(unsigned Idx) {
#if SUPPORT_INDEXED_ISA
// 0 is unused as then we can rely on zero-initialisation from calloc.
- assert(Idx > 0);
+ ASSERT(Idx > 0);
data()->index = Idx;
#endif
}
cache_t cache; // formerly cache pointer and vtable
class_data_bits_t bits; // class_rw_t * plus custom rr/alloc flags
- class_rw_t *data() {
+ class_rw_t *data() const {
return bits.data();
}
void setData(class_rw_t *newData) {
}
void setInfo(uint32_t set) {
- assert(isFuture() || isRealized());
+ ASSERT(isFuture() || isRealized());
data()->setFlags(set);
}
void clearInfo(uint32_t clear) {
- assert(isFuture() || isRealized());
+ ASSERT(isFuture() || isRealized());
data()->clearFlags(clear);
}
// set and clear must not overlap
void changeInfo(uint32_t set, uint32_t clear) {
- assert(isFuture() || isRealized());
- assert((set & clear) == 0);
+ ASSERT(isFuture() || isRealized());
+ ASSERT((set & clear) == 0);
data()->changeFlags(set, clear);
}
- bool hasCustomRR() {
- return ! bits.hasDefaultRR();
+#if FAST_HAS_DEFAULT_RR
+ bool hasCustomRR() const {
+ return !bits.getBit(FAST_HAS_DEFAULT_RR);
}
void setHasDefaultRR() {
- assert(isInitializing());
- bits.setHasDefaultRR();
+ bits.setBits(FAST_HAS_DEFAULT_RR);
}
- void setHasCustomRR(bool inherited = false);
- void printCustomRR(bool inherited);
+ void setHasCustomRR() {
+ bits.clearBits(FAST_HAS_DEFAULT_RR);
+ }
+#else
+ bool hasCustomRR() const {
+ return !(bits.data()->flags & RW_HAS_DEFAULT_RR);
+ }
+ void setHasDefaultRR() {
+ bits.data()->setFlags(RW_HAS_DEFAULT_RR);
+ }
+ void setHasCustomRR() {
+ bits.data()->clearFlags(RW_HAS_DEFAULT_RR);
+ }
+#endif
- bool hasCustomAWZ() {
- return ! bits.hasDefaultAWZ();
+#if FAST_CACHE_HAS_DEFAULT_AWZ
+ bool hasCustomAWZ() const {
+ return !cache.getBit(FAST_CACHE_HAS_DEFAULT_AWZ);
}
void setHasDefaultAWZ() {
- assert(isInitializing());
- bits.setHasDefaultAWZ();
+ cache.setBit(FAST_CACHE_HAS_DEFAULT_AWZ);
}
- void setHasCustomAWZ(bool inherited = false);
- void printCustomAWZ(bool inherited);
-
- bool instancesRequireRawIsa() {
- return bits.instancesRequireRawIsa();
+ void setHasCustomAWZ() {
+ cache.clearBit(FAST_CACHE_HAS_DEFAULT_AWZ);
}
- void setInstancesRequireRawIsa(bool inherited = false);
- void printInstancesRequireRawIsa(bool inherited);
-
- bool canAllocNonpointer() {
- assert(!isFuture());
- return !instancesRequireRawIsa();
+#else
+ bool hasCustomAWZ() const {
+ return !(bits.data()->flags & RW_HAS_DEFAULT_AWZ);
+ }
+ void setHasDefaultAWZ() {
+ bits.data()->setFlags(RW_HAS_DEFAULT_AWZ);
}
- bool canAllocFast() {
- assert(!isFuture());
- return bits.canAllocFast();
+ void setHasCustomAWZ() {
+ bits.data()->clearFlags(RW_HAS_DEFAULT_AWZ);
}
+#endif
+#if FAST_CACHE_HAS_DEFAULT_CORE
+ bool hasCustomCore() const {
+ return !cache.getBit(FAST_CACHE_HAS_DEFAULT_CORE);
+ }
+ void setHasDefaultCore() {
+ return cache.setBit(FAST_CACHE_HAS_DEFAULT_CORE);
+ }
+ void setHasCustomCore() {
+ return cache.clearBit(FAST_CACHE_HAS_DEFAULT_CORE);
+ }
+#else
+ bool hasCustomCore() const {
+ return !(bits.data()->flags & RW_HAS_DEFAULT_CORE);
+ }
+ void setHasDefaultCore() {
+ bits.data()->setFlags(RW_HAS_DEFAULT_CORE);
+ }
+ void setHasCustomCore() {
+ bits.data()->clearFlags(RW_HAS_DEFAULT_CORE);
+ }
+#endif
+#if FAST_CACHE_HAS_CXX_CTOR
+ bool hasCxxCtor() {
+ ASSERT(isRealized());
+ return cache.getBit(FAST_CACHE_HAS_CXX_CTOR);
+ }
+ void setHasCxxCtor() {
+ cache.setBit(FAST_CACHE_HAS_CXX_CTOR);
+ }
+#else
bool hasCxxCtor() {
- // addSubclass() propagates this flag from the superclass.
- assert(isRealized());
- return bits.hasCxxCtor();
+ ASSERT(isRealized());
+ return bits.data()->flags & RW_HAS_CXX_CTOR;
}
- void setHasCxxCtor() {
- bits.setHasCxxCtor();
+ void setHasCxxCtor() {
+ bits.data()->setFlags(RW_HAS_CXX_CTOR);
}
+#endif
+#if FAST_CACHE_HAS_CXX_DTOR
+ bool hasCxxDtor() {
+ ASSERT(isRealized());
+ return cache.getBit(FAST_CACHE_HAS_CXX_DTOR);
+ }
+ void setHasCxxDtor() {
+ cache.setBit(FAST_CACHE_HAS_CXX_DTOR);
+ }
+#else
bool hasCxxDtor() {
- // addSubclass() propagates this flag from the superclass.
- assert(isRealized());
- return bits.hasCxxDtor();
+ ASSERT(isRealized());
+ return bits.data()->flags & RW_HAS_CXX_DTOR;
}
- void setHasCxxDtor() {
- bits.setHasCxxDtor();
+ void setHasCxxDtor() {
+ bits.data()->setFlags(RW_HAS_CXX_DTOR);
}
+#endif
+#if FAST_CACHE_REQUIRES_RAW_ISA
+ bool instancesRequireRawIsa() {
+ return cache.getBit(FAST_CACHE_REQUIRES_RAW_ISA);
+ }
+ void setInstancesRequireRawIsa() {
+ cache.setBit(FAST_CACHE_REQUIRES_RAW_ISA);
+ }
+#elif SUPPORT_NONPOINTER_ISA
+ bool instancesRequireRawIsa() {
+ return bits.data()->flags & RW_REQUIRES_RAW_ISA;
+ }
+ void setInstancesRequireRawIsa() {
+ bits.data()->setFlags(RW_REQUIRES_RAW_ISA);
+ }
+#else
+ bool instancesRequireRawIsa() {
+ return true;
+ }
+ void setInstancesRequireRawIsa() {
+ // nothing
+ }
+#endif
+ void setInstancesRequireRawIsaRecursively(bool inherited = false);
+ void printInstancesRequireRawIsa(bool inherited);
+
+ bool canAllocNonpointer() {
+ ASSERT(!isFuture());
+ return !instancesRequireRawIsa();
+ }
bool isSwiftStable() {
return bits.isSwiftStable();
return bits.isSwiftStable_ButAllowLegacyForNow();
}
+ bool isStubClass() const {
+ uintptr_t isa = (uintptr_t)isaBits();
+ return 1 <= isa && isa < 16;
+ }
+
// Swift stable ABI built for old deployment targets looks weird.
// The is-legacy bit is set for compatibility with old libobjc.
// We are on a "new" deployment target so we need to rewrite that bit.
#else
bool instancesHaveAssociatedObjects() {
// this may be an unrealized future class in the CF-bridged case
- assert(isFuture() || isRealized());
+ ASSERT(isFuture() || isRealized());
return data()->flags & RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS;
}
void setInstancesHaveAssociatedObjects() {
// this may be an unrealized future class in the CF-bridged case
- assert(isFuture() || isRealized());
+ ASSERT(isFuture() || isRealized());
setInfo(RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS);
}
#endif
}
void setInitializing() {
- assert(!isMetaClass());
+ ASSERT(!isMetaClass());
ISA()->setInfo(RW_INITIALIZING);
}
void setInitialized();
bool isLoadable() {
- assert(isRealized());
+ ASSERT(isRealized());
return true; // any class registered for +load is definitely loadable
}
IMP getLoadMethod();
// Locking: To prevent concurrent realization, hold runtimeLock.
- bool isRealized() {
- return data()->flags & RW_REALIZED;
+ bool isRealized() const {
+ return !isStubClass() && (data()->flags & RW_REALIZED);
}
// Returns true if this is an unrealized future class.
// Locking: To prevent concurrent realization, hold runtimeLock.
- bool isFuture() {
+ bool isFuture() const {
return data()->flags & RW_FUTURE;
}
bool isMetaClass() {
- assert(this);
- assert(isRealized());
+ ASSERT(this);
+ ASSERT(isRealized());
+#if FAST_CACHE_META
+ return cache.getBit(FAST_CACHE_META);
+#else
return data()->ro->flags & RO_META;
+#endif
}
// Like isMetaClass, but also valid on un-realized classes
const char *mangledName() {
// fixme can't assert locks here
- assert(this);
+ ASSERT(this);
if (isRealized() || isFuture()) {
return data()->ro->name;
const char *nameForLogging();
// May be unaligned depending on class's ivars.
- uint32_t unalignedInstanceStart() {
- assert(isRealized());
+ uint32_t unalignedInstanceStart() const {
+ ASSERT(isRealized());
return data()->ro->instanceStart;
}
// Class's instance start rounded up to a pointer-size boundary.
// This is used for ARC layout bitmaps.
- uint32_t alignedInstanceStart() {
+ uint32_t alignedInstanceStart() const {
return word_align(unalignedInstanceStart());
}
// May be unaligned depending on class's ivars.
- uint32_t unalignedInstanceSize() {
- assert(isRealized());
+ uint32_t unalignedInstanceSize() const {
+ ASSERT(isRealized());
return data()->ro->instanceSize;
}
// Class's ivar size rounded up to a pointer-size boundary.
- uint32_t alignedInstanceSize() {
+ uint32_t alignedInstanceSize() const {
return word_align(unalignedInstanceSize());
}
- size_t instanceSize(size_t extraBytes) {
+ size_t instanceSize(size_t extraBytes) const {
+ if (fastpath(cache.hasFastInstanceSize(extraBytes))) {
+ return cache.fastInstanceSize(extraBytes);
+ }
+
size_t size = alignedInstanceSize() + extraBytes;
// CF requires all objects be at least 16 bytes.
if (size < 16) size = 16;
}
void setInstanceSize(uint32_t newSize) {
- assert(isRealized());
+ ASSERT(isRealized());
+ ASSERT(data()->flags & RW_REALIZING);
if (newSize != data()->ro->instanceSize) {
- assert(data()->flags & RW_COPIED_RO);
+ ASSERT(data()->flags & RW_COPIED_RO);
*const_cast<uint32_t *>(&data()->ro->instanceSize) = newSize;
}
- bits.setFastInstanceSize(newSize);
+ cache.setFastInstanceSize(newSize);
}
void chooseClassArrayIndex();
unsigned classArrayIndex() {
return bits.classArrayIndex();
}
-
};
}
property_list_t *propertiesForMeta(bool isMeta, struct header_info *hi);
+
+ protocol_list_t *protocolsForMeta(bool isMeta) {
+ if (isMeta) return nullptr;
+ else return protocols;
+ }
};
struct objc_super2 {
extern Method protocol_getMethod(protocol_t *p, SEL sel, bool isRequiredMethod, bool isInstanceMethod, bool recursive);
-static inline void
-foreach_realized_class_and_subclass_2(Class top, unsigned& count,
- std::function<bool (Class)> code)
-{
- // runtimeLock.assertLocked();
- assert(top);
- Class cls = top;
- while (1) {
- if (--count == 0) {
- _objc_fatal("Memory corruption in class list.");
- }
- if (!code(cls)) break;
-
- if (cls->data()->firstSubclass) {
- cls = cls->data()->firstSubclass;
- } else {
- while (!cls->data()->nextSiblingClass && cls != top) {
- cls = cls->superclass;
- if (--count == 0) {
- _objc_fatal("Memory corruption in class list.");
- }
- }
- if (cls == top) break;
- cls = cls->data()->nextSiblingClass;
- }
- }
-}
-
-extern Class firstRealizedClass();
-extern unsigned int unreasonableClassCount();
-
-// Enumerates a class and all of its realized subclasses.
-static inline void
-foreach_realized_class_and_subclass(Class top,
- std::function<void (Class)> code)
-{
- unsigned int count = unreasonableClassCount();
-
- foreach_realized_class_and_subclass_2(top, count,
- [&code](Class cls) -> bool
- {
- code(cls);
- return true;
- });
-}
-
-// Enumerates all realized classes and metaclasses.
-static inline void
-foreach_realized_class_and_metaclass(std::function<void (Class)> code)
-{
- unsigned int count = unreasonableClassCount();
-
- for (Class top = firstRealizedClass();
- top != nil;
- top = top->data()->nextSiblingClass)
- {
- foreach_realized_class_and_subclass_2(top, count,
- [&code](Class cls) -> bool
- {
- code(cls);
- return true;
- });
- }
-
-}
-
#endif
#if __OBJC2__
+#include "DenseMapExtras.h"
#include "objc-private.h"
#include "objc-runtime-new.h"
#include "objc-file.h"
static void disableTaggedPointers();
static void detach_class(Class cls, bool isMeta);
static void free_class(Class cls);
-static Class setSuperclass(Class cls, Class newSuper);
-static method_t *getMethodNoSuper_nolock(Class cls, SEL sel);
-static method_t *getMethod_nolock(Class cls, SEL sel);
static IMP addMethod(Class cls, SEL name, IMP imp, const char *types, bool replace);
-static bool isRRSelector(SEL sel);
-static bool isAWZSelector(SEL sel);
-static bool methodListImplementsRR(const method_list_t *mlist);
-static bool methodListImplementsAWZ(const method_list_t *mlist);
-static void updateCustomRR_AWZ(Class cls, method_t *meth);
+static void adjustCustomFlagsForMethodChange(Class cls, method_t *meth);
static method_t *search_method_list(const method_list_t *mlist, SEL sel);
+static bool method_lists_contains_any(method_list_t **mlists, method_list_t **end,
+ SEL sels[], size_t selcount);
static void flushCaches(Class cls);
static void initializeTaggedPointerObfuscator(void);
#if SUPPORT_FIXUP
static Class realizeClassMaybeSwiftAndUnlock(Class cls, mutex_t& lock);
static Class readClass(Class cls, bool headerIsBundle, bool headerIsPreoptimized);
-static bool MetaclassNSObjectAWZSwizzled;
-static bool ClassNSObjectRRSwizzled;
+struct locstamped_category_t {
+ category_t *cat;
+ struct header_info *hi;
+};
+enum {
+ ATTACH_CLASS = 1 << 0,
+ ATTACH_METACLASS = 1 << 1,
+ ATTACH_CLASS_AND_METACLASS = 1 << 2,
+ ATTACH_EXISTING = 1 << 3,
+};
+static void attachCategories(Class cls, const struct locstamped_category_t *cats_list, uint32_t cats_count, int flags);
/***********************************************************************
**********************************************************************/
mutex_t runtimeLock;
mutex_t selLock;
+#if CONFIG_USE_CACHE_LOCK
mutex_t cacheUpdateLock;
+#endif
recursive_mutex_t loadMethodLock;
-void lock_init(void)
-{
-}
-
-
/***********************************************************************
* Class structure decoding
**********************************************************************/
* A table of all classes (and metaclasses) which have been allocated
* with objc_allocateClassPair.
**********************************************************************/
-static NXHashTable *allocatedClasses = nil;
-
-
-typedef locstamped_category_list_t category_list;
+namespace objc {
+static ExplicitInitDenseSet<Class> allocatedClasses;
+}
+/***********************************************************************
+* _firstRealizedClass
+* The root of all realized classes
+**********************************************************************/
+static Class _firstRealizedClass = nil;
/*
Low two bits of mlist->entsize is used as the fixed-up marker.
*/
static uint32_t fixed_up_method_list = 3;
+static uint32_t uniqued_method_list = 1;
static uint32_t fixed_up_protocol = PROTOCOL_FIXED_UP_1;
+static uint32_t canonical_protocol = PROTOCOL_IS_CANONICAL;
void
disableSharedCacheOptimizations(void)
{
fixed_up_method_list = 2;
+ // It is safe to set uniqued method lists to 0 as we'll never call it unless
+ // the method list was already in need of being fixed up
+ uniqued_method_list = 0;
fixed_up_protocol = PROTOCOL_FIXED_UP_1 | PROTOCOL_FIXED_UP_2;
+ // Its safe to just set canonical protocol to 0 as we'll never call
+ // clearIsCanonical() unless isCanonical() returned true, which can't happen
+ // with a 0 mask
+ canonical_protocol = 0;
+}
+
+bool method_list_t::isUniqued() const {
+ return (flags() & uniqued_method_list) != 0;
}
bool method_list_t::isFixedUp() const {
void method_list_t::setFixedUp() {
runtimeLock.assertLocked();
- assert(!isFixedUp());
+ ASSERT(!isFixedUp());
entsizeAndFlags = entsize() | fixed_up_method_list;
}
void protocol_t::setFixedUp() {
runtimeLock.assertLocked();
- assert(!isFixedUp());
+ ASSERT(!isFixedUp());
flags = (flags & ~PROTOCOL_FIXED_UP_MASK) | fixed_up_protocol;
}
+bool protocol_t::isCanonical() const {
+ return (flags & canonical_protocol) != 0;
+}
+
+void protocol_t::clearIsCanonical() {
+ runtimeLock.assertLocked();
+ ASSERT(isCanonical());
+ flags = flags & ~canonical_protocol;
+}
+
method_list_t **method_array_t::endCategoryMethodLists(Class cls)
{
}
-static void (*classCopyFixupHandler)(Class _Nonnull oldClass,
- Class _Nonnull newClass);
+using ClassCopyFixupHandler = void (*)(Class _Nonnull oldClass,
+ Class _Nonnull newClass);
+// Normally there's only one handler registered.
+static GlobalSmallVector<ClassCopyFixupHandler, 1> classCopyFixupHandlers;
void _objc_setClassCopyFixupHandler(void (* _Nonnull newFixupHandler)
(Class _Nonnull oldClass, Class _Nonnull newClass)) {
- classCopyFixupHandler = newFixupHandler;
+ mutex_locker_t lock(runtimeLock);
+
+ classCopyFixupHandlers.append(newFixupHandler);
}
static Class
bzero(swcls, sizeof(objc_class));
swcls->description = nil;
- if (classCopyFixupHandler) {
- classCopyFixupHandler(supercls, (Class)swcls);
+ for (auto handler : classCopyFixupHandlers) {
+ handler(supercls, (Class)swcls);
}
// Mark this class as Swift-enhanced.
}
-/***********************************************************************
-* unattachedCategories
-* Returns the class => categories map of unattached categories.
-* Locking: runtimeLock must be held by the caller.
-**********************************************************************/
-static NXMapTable *unattachedCategories(void)
-{
- runtimeLock.assertLocked();
-
- static NXMapTable *category_map = nil;
-
- if (category_map) return category_map;
-
- // fixme initial map size
- category_map = NXCreateMapTable(NXPtrValueMapPrototype, 16);
-
- return category_map;
-}
-
-
/***********************************************************************
* dataSegmentsContain
* Returns true if the given address lies within a data segment in any
* loaded image.
-*
-* This is optimized for use where the return value is expected to be
-* true. A call where the return value is false always results in a
-* slow linear search of all loaded images. A call where the return
-* value is fast will often be fast due to caching.
**********************************************************************/
-static bool dataSegmentsContain(const void *ptr) {
- struct Range {
- uintptr_t start, end;
- bool contains(uintptr_t ptr) {
- return start <= ptr && ptr <= end;
- }
- };
-
- // This is a really simple linear searched cache. On a cache hit,
- // the hit entry is moved to the front of the array. On a cache
- // miss where a range is successfully found on the slow path, the
- // found range is inserted at the beginning of the cache. This gives
- // us fast access to the most recently used elements, and LRU
- // eviction.
- enum { cacheCount = 16 };
- static Range cache[cacheCount];
-
- uintptr_t addr = (uintptr_t)ptr;
-
- // Special case a hit on the first entry of the cache. No
- // bookkeeping is required at all in this case.
- if (cache[0].contains(addr)) {
- return true;
- }
-
- // Search the rest of the cache.
- for (unsigned i = 1; i < cacheCount; i++) {
- if (cache[i].contains(addr)) {
- // Cache hit. Move all preceding entries down one element,
- // then place this entry at the front.
- Range r = cache[i];
- memmove(&cache[1], &cache[0], i * sizeof(cache[0]));
- cache[0] = r;
- return true;
+NEVER_INLINE
+static bool
+dataSegmentsContain(Class cls)
+{
+ uint32_t index;
+ if (objc::dataSegmentsRanges.find((uintptr_t)cls, index)) {
+ // if the class is realized (hence has a class_rw_t),
+ // memorize where we found the range
+ if (cls->isRealized()) {
+ cls->data()->witness = (uint16_t)index;
}
- }
-
- // Cache miss. Find the image header containing the given address.
- // If there isn't one, then we're definitely not in any image,
- // so return false.
- Range found = { 0, 0 };
- auto *h = (headerType *)dyld_image_header_containing_address(ptr);
- if (h == nullptr)
- return false;
-
- // Iterate over the data segments in the found image. If the address
- // lies within one, note the data segment range in `found`.
- // TODO: this is more work than we'd like to do. All we really need
- // is the full range of the image. Addresses within the TEXT segment
- // would also be acceptable for our use case. If possible, we should
- // change this to work with the full address range of the found
- // image header. Another possibility would be to use the range
- // from `h` to the end of the page containing `addr`.
- foreach_data_segment(h, [&](const segmentType *seg, intptr_t slide) {
- Range r;
- r.start = seg->vmaddr + slide;
- r.end = r.start + seg->vmsize;
- if (r.contains(addr))
- found = r;
- });
-
- if (found.start != 0) {
- memmove(&cache[1], &cache[0], (cacheCount - 1) * sizeof(cache[0]));
- cache[0] = found;
return true;
}
-
return false;
}
* Return true if the class is known to the runtime (located within the
* shared cache, within the data segment of a loaded image, or has been
* allocated with obj_allocateClassPair).
+*
+* The result of this operation is cached on the class in a "witness"
+* value that is cheaply checked in the fastpath.
**********************************************************************/
-static bool isKnownClass(Class cls) {
- // The order of conditionals here is important for speed. We want to
- // put the most common cases first, but also the fastest cases
- // first. Checking the shared region is both fast and common.
- // Checking allocatedClasses is fast, but may not be common,
- // depending on what the program is doing. Checking if data segments
- // contain the address is slow, so do it last.
- return (sharedRegionContains(cls) ||
- NXHashMember(allocatedClasses, cls) ||
- dataSegmentsContain(cls));
+ALWAYS_INLINE
+static bool
+isKnownClass(Class cls)
+{
+ if (fastpath(objc::dataSegmentsRanges.contains(cls->data()->witness, (uintptr_t)cls))) {
+ return true;
+ }
+ auto &set = objc::allocatedClasses.get();
+ return set.find(cls) != set.end() || dataSegmentsContain(cls);
}
* automatically adds the metaclass of the class as well.
* Locking: runtimeLock must be held by the caller.
**********************************************************************/
-static void addClassTableEntry(Class cls, bool addMeta = true) {
+static void
+addClassTableEntry(Class cls, bool addMeta = true)
+{
runtimeLock.assertLocked();
// This class is allowed to be a known class via the shared cache or via
// data segments, but it is not allowed to be in the dynamic table already.
- assert(!NXHashMember(allocatedClasses, cls));
+ auto &set = objc::allocatedClasses.get();
+
+ ASSERT(set.find(cls) == set.end());
if (!isKnownClass(cls))
- NXHashInsert(allocatedClasses, cls);
+ set.insert(cls);
if (addMeta)
addClassTableEntry(cls->ISA(), false);
}
* with a fatal error if the class is not known.
* Locking: runtimeLock must be held by the caller.
**********************************************************************/
-static void checkIsKnownClass(Class cls)
+ALWAYS_INLINE
+static void
+checkIsKnownClass(Class cls)
{
- if (!isKnownClass(cls))
+ if (slowpath(!isKnownClass(cls))) {
_objc_fatal("Attempt to use unknown class %p.", cls);
-}
-
-
-/***********************************************************************
-* addUnattachedCategoryForClass
-* Records an unattached category.
-* Locking: runtimeLock must be held by the caller.
-**********************************************************************/
-static void addUnattachedCategoryForClass(category_t *cat, Class cls,
- header_info *catHeader)
-{
- runtimeLock.assertLocked();
-
- // DO NOT use cat->cls! cls may be cat->cls->isa instead
- NXMapTable *cats = unattachedCategories();
- category_list *list;
-
- list = (category_list *)NXMapGet(cats, cls);
- if (!list) {
- list = (category_list *)
- calloc(sizeof(*list) + sizeof(list->list[0]), 1);
- } else {
- list = (category_list *)
- realloc(list, sizeof(*list) + sizeof(list->list[0]) * (list->count + 1));
}
- list->list[list->count++] = (locstamped_category_t){cat, catHeader};
- NXMapInsert(cats, cls, list);
}
-
-/***********************************************************************
-* removeUnattachedCategoryForClass
-* Removes an unattached category.
-* Locking: runtimeLock must be held by the caller.
-**********************************************************************/
-static void removeUnattachedCategoryForClass(category_t *cat, Class cls)
-{
- runtimeLock.assertLocked();
-
- // DO NOT use cat->cls! cls may be cat->cls->isa instead
- NXMapTable *cats = unattachedCategories();
- category_list *list;
-
- list = (category_list *)NXMapGet(cats, cls);
- if (!list) return;
-
- uint32_t i;
- for (i = 0; i < list->count; i++) {
- if (list->list[i].cat == cat) {
- // shift entries to preserve list order
- memmove(&list->list[i], &list->list[i+1],
- (list->count-i-1) * sizeof(list->list[i]));
- list->count--;
- return;
- }
- }
-}
-
-
-/***********************************************************************
-* unattachedCategoriesForClass
-* Returns the list of unattached categories for a class, and
-* deletes them from the list.
-* The result must be freed by the caller.
-* Locking: runtimeLock must be held by the caller.
-**********************************************************************/
-static category_list *
-unattachedCategoriesForClass(Class cls, bool realizing)
-{
- runtimeLock.assertLocked();
- return (category_list *)NXMapRemove(unattachedCategories(), cls);
-}
-
-
-/***********************************************************************
-* removeAllUnattachedCategoriesForClass
-* Deletes all unattached categories (loaded or not) for a class.
-* Locking: runtimeLock must be held by the caller.
-**********************************************************************/
-static void removeAllUnattachedCategoriesForClass(Class cls)
-{
- runtimeLock.assertLocked();
-
- void *list = NXMapRemove(unattachedCategories(), cls);
- if (list) free(list);
-}
-
-
/***********************************************************************
* classNSObject
* Returns class NSObject.
return (Class)&OBJC_CLASS_$_NSObject;
}
+static Class metaclassNSObject(void)
+{
+ extern objc_class OBJC_METACLASS_$_NSObject;
+ return (Class)&OBJC_METACLASS_$_NSObject;
+}
/***********************************************************************
* printReplacements
* Warn about methods from cats that override other methods in cats or cls.
* Assumes no methods from cats have been added to cls yet.
**********************************************************************/
-static void printReplacements(Class cls, category_list *cats)
+__attribute__((cold, noinline))
+static void
+printReplacements(Class cls, const locstamped_category_t *cats_list, uint32_t cats_count)
{
uint32_t c;
bool isMeta = cls->isMetaClass();
- if (!cats) return;
-
// Newest categories are LAST in cats
// Later categories override earlier ones.
- for (c = 0; c < cats->count; c++) {
- category_t *cat = cats->list[c].cat;
+ for (c = 0; c < cats_count; c++) {
+ category_t *cat = cats_list[c].cat;
method_list_t *mlist = cat->methodsForMeta(isMeta);
if (!mlist) continue;
// Look for method in earlier categories
for (uint32_t c2 = 0; c2 < c; c2++) {
- category_t *cat2 = cats->list[c2].cat;
+ category_t *cat2 = cats_list[c2].cat;
const method_list_t *mlist2 = cat2->methodsForMeta(isMeta);
if (!mlist2) continue;
}
+/***********************************************************************
+* unreasonableClassCount
+* Provides an upper bound for any iteration of classes,
+* to prevent spins when runtime metadata is corrupted.
+**********************************************************************/
+static unsigned unreasonableClassCount()
+{
+ runtimeLock.assertLocked();
+
+ int base = NXCountMapTable(gdb_objc_realized_classes) +
+ getPreoptimizedClassUnreasonableCount();
+
+ // Provide lots of slack here. Some iterations touch metaclasses too.
+ // Some iterations backtrack (like realized class iteration).
+ // We don't need an efficient bound, merely one that prevents spins.
+ return (base + 1) * 16;
+}
+
+
+/***********************************************************************
+* Class enumerators
+* The passed in block returns `false` if subclasses can be skipped
+* Locking: runtimeLock must be held by the caller.
+**********************************************************************/
+static inline void
+foreach_realized_class_and_subclass_2(Class top, unsigned &count,
+ bool skip_metaclass,
+ bool (^code)(Class) __attribute((noescape)))
+{
+ Class cls = top;
+
+ runtimeLock.assertLocked();
+ ASSERT(top);
+
+ while (1) {
+ if (--count == 0) {
+ _objc_fatal("Memory corruption in class list.");
+ }
+
+ bool skip_subclasses;
+
+ if (skip_metaclass && cls->isMetaClass()) {
+ skip_subclasses = true;
+ } else {
+ skip_subclasses = !code(cls);
+ }
+
+ if (!skip_subclasses && cls->data()->firstSubclass) {
+ cls = cls->data()->firstSubclass;
+ } else {
+ while (!cls->data()->nextSiblingClass && cls != top) {
+ cls = cls->superclass;
+ if (--count == 0) {
+ _objc_fatal("Memory corruption in class list.");
+ }
+ }
+ if (cls == top) break;
+ cls = cls->data()->nextSiblingClass;
+ }
+ }
+}
+
+// Enumerates a class and all of its realized subclasses.
+static void
+foreach_realized_class_and_subclass(Class top, bool (^code)(Class) __attribute((noescape)))
+{
+ unsigned int count = unreasonableClassCount();
+
+ foreach_realized_class_and_subclass_2(top, count, false, code);
+}
+
+// Enumerates all realized classes and metaclasses.
+static void
+foreach_realized_class_and_metaclass(bool (^code)(Class) __attribute((noescape)))
+{
+ unsigned int count = unreasonableClassCount();
+
+ for (Class top = _firstRealizedClass;
+ top != nil;
+ top = top->data()->nextSiblingClass)
+ {
+ foreach_realized_class_and_subclass_2(top, count, false, code);
+ }
+}
+
+// Enumerates all realized classes (ignoring metaclasses).
+static void
+foreach_realized_class(bool (^code)(Class) __attribute((noescape)))
+{
+ unsigned int count = unreasonableClassCount();
+
+ for (Class top = _firstRealizedClass;
+ top != nil;
+ top = top->data()->nextSiblingClass)
+ {
+ foreach_realized_class_and_subclass_2(top, count, true, code);
+ }
+}
+
+
+/***********************************************************************
+ * Method Scanners / Optimization tracking
+ * Implementation of scanning for various implementations of methods.
+ **********************************************************************/
+
+namespace objc {
+
+enum SelectorBundle {
+ AWZ,
+ RR,
+ Core,
+};
+
+namespace scanner {
+
+// The current state of NSObject swizzling for every scanner
+//
+// It allows for cheap checks of global swizzles, and also lets
+// things like IMP Swizzling before NSObject has been initialized
+// to be remembered, as setInitialized() would miss these.
+//
+// Every pair of bits describes a SelectorBundle.
+// even bits: is NSObject class swizzled for this bundle
+// odd bits: is NSObject meta class swizzled for this bundle
+static uintptr_t NSObjectSwizzledMask;
+
+static ALWAYS_INLINE uintptr_t
+swizzlingBit(SelectorBundle bundle, bool isMeta)
+{
+ return 1UL << (2 * bundle + isMeta);
+}
+
+static void __attribute__((cold, noinline))
+printCustom(Class cls, SelectorBundle bundle, bool inherited)
+{
+ static char const * const SelectorBundleName[] = {
+ [AWZ] = "CUSTOM AWZ",
+ [RR] = "CUSTOM RR",
+ [Core] = "CUSTOM Core",
+ };
+
+ _objc_inform("%s: %s%s%s", SelectorBundleName[bundle],
+ cls->nameForLogging(),
+ cls->isMetaClass() ? " (meta)" : "",
+ inherited ? " (inherited)" : "");
+}
+
+enum class Scope { Instances, Classes, Both };
+
+template <typename Traits, SelectorBundle Bundle, bool &ShouldPrint, Scope Domain = Scope::Both>
+class Mixin {
+
+ // work around compiler being broken with templates using Class/objc_class,
+ // probably some weird confusion with Class being builtin
+ ALWAYS_INLINE static objc_class *as_objc_class(Class cls) {
+ return (objc_class *)cls;
+ }
+
+ static void
+ setCustomRecursively(Class cls, bool inherited = false)
+ {
+ foreach_realized_class_and_subclass(cls, [=](Class c){
+ if (c != cls && !as_objc_class(c)->isInitialized()) {
+ // Subclass not yet initialized. Wait for setInitialized() to do it
+ return false;
+ }
+ if (Traits::isCustom(c)) {
+ return false;
+ }
+ Traits::setCustom(c);
+ if (ShouldPrint) {
+ printCustom(cls, Bundle, inherited || c != cls);
+ }
+ return true;
+ });
+ }
+
+ static bool
+ isNSObjectSwizzled(bool isMeta)
+ {
+ return NSObjectSwizzledMask & swizzlingBit(Bundle, isMeta);
+ }
+
+ static void
+ setNSObjectSwizzled(Class NSOClass, bool isMeta)
+ {
+ NSObjectSwizzledMask |= swizzlingBit(Bundle, isMeta);
+ if (as_objc_class(NSOClass)->isInitialized()) {
+ setCustomRecursively(NSOClass);
+ }
+ }
+
+ static void
+ scanChangedMethodForUnknownClass(const method_t *meth)
+ {
+ Class cls;
+
+ cls = classNSObject();
+ if (Domain != Scope::Classes && !isNSObjectSwizzled(NO)) {
+ for (const auto &meth2: as_objc_class(cls)->data()->methods) {
+ if (meth == &meth2) {
+ setNSObjectSwizzled(cls, NO);
+ break;
+ }
+ }
+ }
+
+ cls = metaclassNSObject();
+ if (Domain != Scope::Instances && !isNSObjectSwizzled(YES)) {
+ for (const auto &meth2: as_objc_class(cls)->data()->methods) {
+ if (meth == &meth2) {
+ setNSObjectSwizzled(cls, YES);
+ break;
+ }
+ }
+ }
+ }
+
+ static void
+ scanAddedClassImpl(Class cls, bool isMeta)
+ {
+ Class NSOClass = (isMeta ? metaclassNSObject() : classNSObject());
+ bool setCustom = NO, inherited = NO;
+
+ if (isNSObjectSwizzled(isMeta)) {
+ setCustom = YES;
+ } else if (cls == NSOClass) {
+ // NSObject is default but we need to check categories
+ auto &methods = as_objc_class(cls)->data()->methods;
+ setCustom = Traits::scanMethodLists(methods.beginCategoryMethodLists(),
+ methods.endCategoryMethodLists(cls));
+ } else if (!isMeta && !as_objc_class(cls)->superclass) {
+ // Custom Root class
+ setCustom = YES;
+ } else if (Traits::isCustom(as_objc_class(cls)->superclass)) {
+ // Superclass is custom, therefore we are too.
+ setCustom = YES;
+ inherited = YES;
+ } else {
+ // Not NSObject.
+ auto &methods = as_objc_class(cls)->data()->methods;
+ setCustom = Traits::scanMethodLists(methods.beginLists(),
+ methods.endLists());
+ }
+ if (slowpath(setCustom)) {
+ if (ShouldPrint) printCustom(cls, Bundle, inherited);
+ } else {
+ Traits::setDefault(cls);
+ }
+ }
+
+public:
+ // Scan a class that is about to be marked Initialized for particular
+ // bundles of selectors, and mark the class and its children
+ // accordingly.
+ //
+ // This also handles inheriting properties from its superclass.
+ //
+ // Caller: objc_class::setInitialized()
+ static void
+ scanInitializedClass(Class cls, Class metacls)
+ {
+ if (Domain != Scope::Classes) {
+ scanAddedClassImpl(cls, false);
+ }
+ if (Domain != Scope::Instances) {
+ scanAddedClassImpl(metacls, true);
+ }
+ }
+
+ // Inherit various properties from the superclass when a class
+ // is being added to the graph.
+ //
+ // Caller: addSubclass()
+ static void
+ scanAddedSubClass(Class subcls, Class supercls)
+ {
+ if (slowpath(Traits::isCustom(supercls) && !Traits::isCustom(subcls))) {
+ setCustomRecursively(subcls, true);
+ }
+ }
+
+ // Scan Method lists for selectors that would override things
+ // in a Bundle.
+ //
+ // This is used to detect when categories override problematic selectors
+ // are injected in a class after it has been initialized.
+ //
+ // Caller: prepareMethodLists()
+ static void
+ scanAddedMethodLists(Class cls, method_list_t **mlists, int count)
+ {
+ if (slowpath(Traits::isCustom(cls))) {
+ return;
+ }
+ if (slowpath(Traits::scanMethodLists(mlists, mlists + count))) {
+ setCustomRecursively(cls);
+ }
+ }
+
+ // Handle IMP Swizzling (the IMP for an exisiting method being changed).
+ //
+ // In almost all cases, IMP swizzling does not affect custom bits.
+ // Custom search will already find the method whether or not
+ // it is swizzled, so it does not transition from non-custom to custom.
+ //
+ // The only cases where IMP swizzling can affect the custom bits is
+ // if the swizzled method is one of the methods that is assumed to be
+ // non-custom. These special cases are listed in setInitialized().
+ // We look for such cases here.
+ //
+ // Caller: Swizzling methods via adjustCustomFlagsForMethodChange()
+ static void
+ scanChangedMethod(Class cls, const method_t *meth)
+ {
+ if (fastpath(!Traits::isInterestingSelector(meth->name))) {
+ return;
+ }
+
+ if (cls) {
+ bool isMeta = as_objc_class(cls)->isMetaClass();
+ if (isMeta && Domain != Scope::Instances) {
+ if (cls == metaclassNSObject() && !isNSObjectSwizzled(isMeta)) {
+ setNSObjectSwizzled(cls, isMeta);
+ }
+ }
+ if (!isMeta && Domain != Scope::Classes) {
+ if (cls == classNSObject() && !isNSObjectSwizzled(isMeta)) {
+ setNSObjectSwizzled(cls, isMeta);
+ }
+ }
+ } else {
+ // We're called from method_exchangeImplementations, only NSObject
+ // class and metaclass may be problematic (exchanging the default
+ // builtin IMP of an interesting seleector, is a swizzling that,
+ // may flip our scanned property. For other classes, the previous
+ // value had already flipped the property).
+ //
+ // However, as we don't know the class, we need to scan all of
+ // NSObject class and metaclass methods (this is SLOW).
+ scanChangedMethodForUnknownClass(meth);
+ }
+ }
+};
+
+} // namespace scanner
+
+// AWZ methods: +alloc / +allocWithZone:
+struct AWZScanner : scanner::Mixin<AWZScanner, AWZ, PrintCustomAWZ, scanner::Scope::Classes> {
+ static bool isCustom(Class cls) {
+ return cls->hasCustomAWZ();
+ }
+ static void setCustom(Class cls) {
+ cls->setHasCustomAWZ();
+ }
+ static void setDefault(Class cls) {
+ cls->setHasDefaultAWZ();
+ }
+ static bool isInterestingSelector(SEL sel) {
+ return sel == @selector(alloc) || sel == @selector(allocWithZone:);
+ }
+ static bool scanMethodLists(method_list_t **mlists, method_list_t **end) {
+ SEL sels[2] = { @selector(alloc), @selector(allocWithZone:), };
+ return method_lists_contains_any(mlists, end, sels, 2);
+ }
+};
+
+// Retain/Release methods that are extremely rarely overridden
+//
+// retain/release/autorelease/retainCount/
+// _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
+struct RRScanner : scanner::Mixin<RRScanner, RR, PrintCustomRR
+#if !SUPPORT_NONPOINTER_ISA
+, scanner::Scope::Instances
+#endif
+> {
+ static bool isCustom(Class cls) {
+ return cls->hasCustomRR();
+ }
+ static void setCustom(Class cls) {
+ cls->setHasCustomRR();
+ }
+ static void setDefault(Class cls) {
+ cls->setHasDefaultRR();
+ }
+ static bool isInterestingSelector(SEL sel) {
+ return sel == @selector(retain) ||
+ sel == @selector(release) ||
+ sel == @selector(autorelease) ||
+ sel == @selector(_tryRetain) ||
+ sel == @selector(_isDeallocating) ||
+ sel == @selector(retainCount) ||
+ sel == @selector(allowsWeakReference) ||
+ sel == @selector(retainWeakReference);
+ }
+ static bool scanMethodLists(method_list_t **mlists, method_list_t **end) {
+ SEL sels[8] = {
+ @selector(retain),
+ @selector(release),
+ @selector(autorelease),
+ @selector(_tryRetain),
+ @selector(_isDeallocating),
+ @selector(retainCount),
+ @selector(allowsWeakReference),
+ @selector(retainWeakReference),
+ };
+ return method_lists_contains_any(mlists, end, sels, 8);
+ }
+};
+
+// Core NSObject methods that are extremely rarely overridden
+//
+// +new, ±class, ±self, ±isKindOfClass:, ±respondsToSelector
+struct CoreScanner : scanner::Mixin<CoreScanner, Core, PrintCustomCore> {
+ static bool isCustom(Class cls) {
+ return cls->hasCustomCore();
+ }
+ static void setCustom(Class cls) {
+ cls->setHasCustomCore();
+ }
+ static void setDefault(Class cls) {
+ cls->setHasDefaultCore();
+ }
+ static bool isInterestingSelector(SEL sel) {
+ return sel == @selector(new) ||
+ sel == @selector(self) ||
+ sel == @selector(class) ||
+ sel == @selector(isKindOfClass:) ||
+ sel == @selector(respondsToSelector:);
+ }
+ static bool scanMethodLists(method_list_t **mlists, method_list_t **end) {
+ SEL sels[5] = {
+ @selector(new),
+ @selector(self),
+ @selector(class),
+ @selector(isKindOfClass:),
+ @selector(respondsToSelector:)
+ };
+ return method_lists_contains_any(mlists, end, sels, 5);
+ }
+};
+
+class category_list : nocopy_t {
+ union {
+ locstamped_category_t lc;
+ struct {
+ locstamped_category_t *array;
+ // this aliases with locstamped_category_t::hi
+ // which is an aliased pointer
+ uint32_t is_array : 1;
+ uint32_t count : 31;
+ uint32_t size : 32;
+ };
+ } _u;
+
+public:
+ category_list() : _u{{nullptr, nullptr}} { }
+ category_list(locstamped_category_t lc) : _u{{lc}} { }
+ category_list(category_list &&other) : category_list() {
+ std::swap(_u, other._u);
+ }
+ ~category_list()
+ {
+ if (_u.is_array) {
+ free(_u.array);
+ }
+ }
+
+ uint32_t count() const
+ {
+ if (_u.is_array) return _u.count;
+ return _u.lc.cat ? 1 : 0;
+ }
+
+ uint32_t arrayByteSize(uint32_t size) const
+ {
+ return sizeof(locstamped_category_t) * size;
+ }
+
+ const locstamped_category_t *array() const
+ {
+ return _u.is_array ? _u.array : &_u.lc;
+ }
+
+ void append(locstamped_category_t lc)
+ {
+ if (_u.is_array) {
+ if (_u.count == _u.size) {
+ // Have a typical malloc growth:
+ // - size <= 8: grow by 2
+ // - size <= 16: grow by 4
+ // - size <= 32: grow by 8
+ // ... etc
+ _u.size += _u.size < 8 ? 2 : 1 << (fls(_u.size) - 2);
+ _u.array = (locstamped_category_t *)reallocf(_u.array, arrayByteSize(_u.size));
+ }
+ _u.array[_u.count++] = lc;
+ } else if (_u.lc.cat == NULL) {
+ _u.lc = lc;
+ } else {
+ locstamped_category_t *arr = (locstamped_category_t *)malloc(arrayByteSize(2));
+ arr[0] = _u.lc;
+ arr[1] = lc;
+
+ _u.array = arr;
+ _u.is_array = true;
+ _u.count = 2;
+ _u.size = 2;
+ }
+ }
+
+ void erase(category_t *cat)
+ {
+ if (_u.is_array) {
+ for (int i = 0; i < _u.count; i++) {
+ if (_u.array[i].cat == cat) {
+ // shift entries to preserve list order
+ memmove(&_u.array[i], &_u.array[i+1], arrayByteSize(_u.count - i - 1));
+ return;
+ }
+ }
+ } else if (_u.lc.cat == cat) {
+ _u.lc.cat = NULL;
+ _u.lc.hi = NULL;
+ }
+ }
+};
+
+class UnattachedCategories : public ExplicitInitDenseMap<Class, category_list>
+{
+public:
+ void addForClass(locstamped_category_t lc, Class cls)
+ {
+ runtimeLock.assertLocked();
+
+ if (slowpath(PrintConnecting)) {
+ _objc_inform("CLASS: found category %c%s(%s)",
+ cls->isMetaClass() ? '+' : '-',
+ cls->nameForLogging(), lc.cat->name);
+ }
+
+ auto result = get().try_emplace(cls, lc);
+ if (!result.second) {
+ result.first->second.append(lc);
+ }
+ }
+
+ void attachToClass(Class cls, Class previously, int flags)
+ {
+ runtimeLock.assertLocked();
+ ASSERT((flags & ATTACH_CLASS) ||
+ (flags & ATTACH_METACLASS) ||
+ (flags & ATTACH_CLASS_AND_METACLASS));
+
+ auto &map = get();
+ auto it = map.find(previously);
+
+ if (it != map.end()) {
+ category_list &list = it->second;
+ if (flags & ATTACH_CLASS_AND_METACLASS) {
+ int otherFlags = flags & ~ATTACH_CLASS_AND_METACLASS;
+ attachCategories(cls, list.array(), list.count(), otherFlags | ATTACH_CLASS);
+ attachCategories(cls->ISA(), list.array(), list.count(), otherFlags | ATTACH_METACLASS);
+ } else {
+ attachCategories(cls, list.array(), list.count(), flags);
+ }
+ map.erase(it);
+ }
+ }
+
+ void eraseCategoryForClass(category_t *cat, Class cls)
+ {
+ runtimeLock.assertLocked();
+
+ auto &map = get();
+ auto it = map.find(cls);
+ if (it != map.end()) {
+ category_list &list = it->second;
+ list.erase(cat);
+ if (list.count() == 0) {
+ map.erase(it);
+ }
+ }
+ }
+
+ void eraseClass(Class cls)
+ {
+ runtimeLock.assertLocked();
+
+ get().erase(cls);
+ }
+};
+
+static UnattachedCategories unattachedCategories;
+
+} // namespace objc
+
static bool isBundleClass(Class cls)
{
return cls->data()->ro->flags & RO_FROM_BUNDLE;
fixupMethodList(method_list_t *mlist, bool bundleCopy, bool sort)
{
runtimeLock.assertLocked();
- assert(!mlist->isFixedUp());
+ ASSERT(!mlist->isFixedUp());
// fixme lock less in attachMethodLists ?
- {
+ // dyld3 may have already uniqued, but not sorted, the list
+ if (!mlist->isUniqued()) {
mutex_locker_t lock(selLock);
// Unique selectors in list.
static void
-prepareMethodLists(Class cls, method_list_t **addedLists, int addedCount,
+prepareMethodLists(Class cls, method_list_t **addedLists, int addedCount,
bool baseMethods, bool methodsFromBundle)
{
runtimeLock.assertLocked();
if (addedCount == 0) return;
- // Don't scan redundantly
- bool scanForCustomRR = !cls->hasCustomRR();
- bool scanForCustomAWZ = !cls->hasCustomAWZ();
-
- // There exist RR/AWZ special cases for some class's base methods.
- // But this code should never need to scan base methods for RR/AWZ:
- // default RR/AWZ cannot be set before setInitialized().
+ // There exist RR/AWZ/Core special cases for some class's base methods.
+ // But this code should never need to scan base methods for RR/AWZ/Core:
+ // default RR/AWZ/Core cannot be set before setInitialized().
// Therefore we need not handle any special cases here.
if (baseMethods) {
- assert(!scanForCustomRR && !scanForCustomAWZ);
+ ASSERT(cls->hasCustomAWZ() && cls->hasCustomRR() && cls->hasCustomCore());
}
// Add method lists to array.
for (int i = 0; i < addedCount; i++) {
method_list_t *mlist = addedLists[i];
- assert(mlist);
+ ASSERT(mlist);
// Fixup selectors if necessary
if (!mlist->isFixedUp()) {
fixupMethodList(mlist, methodsFromBundle, true/*sort*/);
}
+ }
- // Scan for method implementations tracked by the class's flags
- if (scanForCustomRR && methodListImplementsRR(mlist)) {
- cls->setHasCustomRR();
- scanForCustomRR = false;
- }
- if (scanForCustomAWZ && methodListImplementsAWZ(mlist)) {
- cls->setHasCustomAWZ();
- scanForCustomAWZ = false;
- }
+ // If the class is initialized, then scan for method implementations
+ // tracked by the class's flags. If it's not initialized yet,
+ // then objc_class::setInitialized() will take care of it.
+ if (cls->isInitialized()) {
+ objc::AWZScanner::scanAddedMethodLists(cls, addedLists, addedCount);
+ objc::RRScanner::scanAddedMethodLists(cls, addedLists, addedCount);
+ objc::CoreScanner::scanAddedMethodLists(cls, addedLists, addedCount);
}
}
// Attach method lists and properties and protocols from categories to a class.
// Assumes the categories in cats are all loaded and sorted by load order,
// oldest categories first.
-static void
-attachCategories(Class cls, category_list *cats, bool flush_caches)
-{
- if (!cats) return;
- if (PrintReplacedMethods) printReplacements(cls, cats);
-
- bool isMeta = cls->isMetaClass();
-
- // fixme rearrange to remove these intermediate allocations
- method_list_t **mlists = (method_list_t **)
- malloc(cats->count * sizeof(*mlists));
- property_list_t **proplists = (property_list_t **)
- malloc(cats->count * sizeof(*proplists));
- protocol_list_t **protolists = (protocol_list_t **)
- malloc(cats->count * sizeof(*protolists));
-
- // Count backwards through cats to get newest categories first
- int mcount = 0;
- int propcount = 0;
- int protocount = 0;
- int i = cats->count;
+static void
+attachCategories(Class cls, const locstamped_category_t *cats_list, uint32_t cats_count,
+ int flags)
+{
+ if (slowpath(PrintReplacedMethods)) {
+ printReplacements(cls, cats_list, cats_count);
+ }
+ if (slowpath(PrintConnecting)) {
+ _objc_inform("CLASS: attaching %d categories to%s class '%s'%s",
+ cats_count, (flags & ATTACH_EXISTING) ? " existing" : "",
+ cls->nameForLogging(), (flags & ATTACH_METACLASS) ? " (meta)" : "");
+ }
+
+ /*
+ * Only a few classes have more than 64 categories during launch.
+ * This uses a little stack, and avoids malloc.
+ *
+ * Categories must be added in the proper order, which is back
+ * to front. To do that with the chunking, we iterate cats_list
+ * from front to back, build up the local buffers backwards,
+ * and call attachLists on the chunks. attachLists prepends the
+ * lists, so the final result is in the expected order.
+ */
+ constexpr uint32_t ATTACH_BUFSIZ = 64;
+ method_list_t *mlists[ATTACH_BUFSIZ];
+ property_list_t *proplists[ATTACH_BUFSIZ];
+ protocol_list_t *protolists[ATTACH_BUFSIZ];
+
+ uint32_t mcount = 0;
+ uint32_t propcount = 0;
+ uint32_t protocount = 0;
bool fromBundle = NO;
- while (i--) {
- auto& entry = cats->list[i];
+ bool isMeta = (flags & ATTACH_METACLASS);
+ auto rw = cls->data();
+
+ for (uint32_t i = 0; i < cats_count; i++) {
+ auto& entry = cats_list[i];
method_list_t *mlist = entry.cat->methodsForMeta(isMeta);
if (mlist) {
- mlists[mcount++] = mlist;
+ if (mcount == ATTACH_BUFSIZ) {
+ prepareMethodLists(cls, mlists, mcount, NO, fromBundle);
+ rw->methods.attachLists(mlists, mcount);
+ mcount = 0;
+ }
+ mlists[ATTACH_BUFSIZ - ++mcount] = mlist;
fromBundle |= entry.hi->isBundle();
}
- property_list_t *proplist =
+ property_list_t *proplist =
entry.cat->propertiesForMeta(isMeta, entry.hi);
if (proplist) {
- proplists[propcount++] = proplist;
+ if (propcount == ATTACH_BUFSIZ) {
+ rw->properties.attachLists(proplists, propcount);
+ propcount = 0;
+ }
+ proplists[ATTACH_BUFSIZ - ++propcount] = proplist;
}
- protocol_list_t *protolist = entry.cat->protocols;
+ protocol_list_t *protolist = entry.cat->protocolsForMeta(isMeta);
if (protolist) {
- protolists[protocount++] = protolist;
+ if (protocount == ATTACH_BUFSIZ) {
+ rw->protocols.attachLists(protolists, protocount);
+ protocount = 0;
+ }
+ protolists[ATTACH_BUFSIZ - ++protocount] = protolist;
}
}
- auto rw = cls->data();
-
- prepareMethodLists(cls, mlists, mcount, NO, fromBundle);
- rw->methods.attachLists(mlists, mcount);
- free(mlists);
- if (flush_caches && mcount > 0) flushCaches(cls);
+ if (mcount > 0) {
+ prepareMethodLists(cls, mlists + ATTACH_BUFSIZ - mcount, mcount, NO, fromBundle);
+ rw->methods.attachLists(mlists + ATTACH_BUFSIZ - mcount, mcount);
+ if (flags & ATTACH_EXISTING) flushCaches(cls);
+ }
- rw->properties.attachLists(proplists, propcount);
- free(proplists);
+ rw->properties.attachLists(proplists + ATTACH_BUFSIZ - propcount, propcount);
- rw->protocols.attachLists(protolists, protocount);
- free(protolists);
+ rw->protocols.attachLists(protolists + ATTACH_BUFSIZ - protocount, protocount);
}
* Attaches any outstanding categories.
* Locking: runtimeLock must be held by the caller
**********************************************************************/
-static void methodizeClass(Class cls)
+static void methodizeClass(Class cls, Class previously)
{
runtimeLock.assertLocked();
// them already. These apply before category replacements.
if (cls->isRootMetaclass()) {
// root metaclass
- addMethod(cls, SEL_initialize, (IMP)&objc_noop_imp, "", NO);
+ addMethod(cls, @selector(initialize), (IMP)&objc_noop_imp, "", NO);
}
// Attach categories.
- category_list *cats = unattachedCategoriesForClass(cls, true /*realizing*/);
- attachCategories(cls, cats, false /*don't flush caches*/);
-
- if (PrintConnecting) {
- if (cats) {
- for (uint32_t i = 0; i < cats->count; i++) {
- _objc_inform("CLASS: attached category %c%s(%s)",
- isMeta ? '+' : '-',
- cls->nameForLogging(), cats->list[i].cat->name);
- }
+ if (previously) {
+ if (isMeta) {
+ objc::unattachedCategories.attachToClass(cls, previously,
+ ATTACH_METACLASS);
+ } else {
+ // When a class relocates, categories with class methods
+ // may be registered on the class itself rather than on
+ // the metaclass. Tell attachToClass to look for those.
+ objc::unattachedCategories.attachToClass(cls, previously,
+ ATTACH_CLASS_AND_METACLASS);
}
}
-
- if (cats) free(cats);
+ objc::unattachedCategories.attachToClass(cls, cls,
+ isMeta ? ATTACH_METACLASS : ATTACH_CLASS);
#if DEBUG
// Debug: sanity-check all SELs; log method list contents
_objc_inform("METHOD %c[%s %s]", isMeta ? '+' : '-',
cls->nameForLogging(), sel_getName(meth.name));
}
- assert(sel_registerName(sel_getName(meth.name)) == meth.name);
+ ASSERT(sel_registerName(sel_getName(meth.name)) == meth.name);
}
#endif
}
-/***********************************************************************
-* remethodizeClass
-* Attach outstanding categories to an existing class.
-* Fixes up cls's method list, protocol list, and property list.
-* Updates method caches for cls and its subclasses.
-* Locking: runtimeLock must be held by the caller
-**********************************************************************/
-static void remethodizeClass(Class cls)
-{
- category_list *cats;
- bool isMeta;
-
- runtimeLock.assertLocked();
-
- isMeta = cls->isMetaClass();
-
- // Re-methodizing: check for more categories
- if ((cats = unattachedCategoriesForClass(cls, false/*not realizing*/))) {
- if (PrintConnecting) {
- _objc_inform("CLASS: attaching categories to class '%s' %s",
- cls->nameForLogging(), isMeta ? "(meta)" : "");
- }
-
- attachCategories(cls, cats, true /*flush caches*/);
- free(cats);
- }
-}
-
-
/***********************************************************************
* nonMetaClasses
* Returns the secondary metaclass => class map
void *old;
old = NXMapInsert(nonMetaClasses(), cls->ISA(), cls);
- assert(!cls->isMetaClassMaybeUnrealized());
- assert(cls->ISA()->isMetaClassMaybeUnrealized());
- assert(!old);
+ ASSERT(!cls->isMetaClassMaybeUnrealized());
+ ASSERT(cls->ISA()->isMetaClassMaybeUnrealized());
+ ASSERT(!old);
}
// This is a misnomer: gdb_objc_realized_classes is actually a list of
// named classes not in the dyld shared cache, whether realized or not.
NXMapTable *gdb_objc_realized_classes; // exported for debuggers in objc-gdb.h
+uintptr_t objc_debug_realized_class_generation_count;
static Class getClass_impl(const char *name)
{
runtimeLock.assertLocked();
// allocated in _read_images
- assert(gdb_objc_realized_classes);
+ ASSERT(gdb_objc_realized_classes);
// Try runtime-allocated table
Class result = (Class)NXMapGet(gdb_objc_realized_classes, name);
if (result) return result;
- // Try table from dyld shared cache
+ // Try table from dyld shared cache.
+ // Note we do this last to handle the case where we dlopen'ed a shared cache
+ // dylib with duplicates of classes already present in the main executable.
+ // In that case, we put the class from the main executable in
+ // gdb_objc_realized_classes and want to check that before considering any
+ // newly loaded shared cache binaries.
return getPreoptimizedClass(name);
}
} else {
NXMapInsert(gdb_objc_realized_classes, name, cls);
}
- assert(!(cls->data()->flags & RO_META));
+ ASSERT(!(cls->data()->flags & RO_META));
// wrong: constructed classes are already realized when they get here
- // assert(!cls->isRealized());
+ // ASSERT(!cls->isRealized());
}
static void removeNamedClass(Class cls, const char *name)
{
runtimeLock.assertLocked();
- assert(!(cls->data()->flags & RO_META));
+ ASSERT(!(cls->data()->flags & RO_META));
if (cls == NXMapGet(gdb_objc_realized_classes, name)) {
NXMapRemove(gdb_objc_realized_classes, name);
} else {
}
-/***********************************************************************
-* unreasonableClassCount
-* Provides an upper bound for any iteration of classes,
-* to prevent spins when runtime metadata is corrupted.
-**********************************************************************/
-unsigned unreasonableClassCount()
-{
- runtimeLock.assertLocked();
-
- int base = NXCountMapTable(gdb_objc_realized_classes) +
- getPreoptimizedClassUnreasonableCount();
-
- // Provide lots of slack here. Some iterations touch metaclasses too.
- // Some iterations backtrack (like realized class iteration).
- // We don't need an efficient bound, merely one that prevents spins.
- return (base + 1) * 16;
-}
-
-
/***********************************************************************
* futureNamedClasses
* Returns the classname => future class map for unrealized future classes.
cls->data()->flags = RO_FUTURE;
old = NXMapKeyCopyingInsert(futureNamedClasses(), name, cls);
- assert(!old);
+ ASSERT(!old);
}
* Returns the oldClass => nil map for ignored weak-linked classes.
* Locking: runtimeLock must be read- or write-locked by the caller
**********************************************************************/
-static NXMapTable *remappedClasses(bool create)
+static objc::DenseMap<Class, Class> *remappedClasses(bool create)
{
- static NXMapTable *remapped_class_map = nil;
+ static objc::LazyInitDenseMap<Class, Class> remapped_class_map;
runtimeLock.assertLocked();
- if (remapped_class_map) return remapped_class_map;
- if (!create) return nil;
-
- // remapped_class_map is big enough to hold CF's classes and a few others
- INIT_ONCE_PTR(remapped_class_map,
- NXCreateMapTable(NXPtrValueMapPrototype, 32),
- NXFreeMapTable(v));
-
- return remapped_class_map;
+ // start big enough to hold CF's classes and a few others
+ return remapped_class_map.get(create, 32);
}
bool result = (remappedClasses(NO) == nil);
#if DEBUG
// Catch construction of an empty table, which defeats optimization.
- NXMapTable *map = remappedClasses(NO);
- if (map) assert(NXCountMapTable(map) > 0);
+ auto *map = remappedClasses(NO);
+ if (map) ASSERT(map->size() > 0);
#endif
return result;
}
(void*)newcls, (void*)oldcls, oldcls->nameForLogging());
}
- void *old;
- old = NXMapInsert(remappedClasses(YES), oldcls, newcls);
- assert(!old);
+ auto result = remappedClasses(YES)->insert({ oldcls, newcls });
+#if DEBUG
+ if (!std::get<1>(result)) {
+ // An existing mapping was overwritten. This is not allowed
+ // unless it was to nil.
+ auto iterator = std::get<0>(result);
+ auto value = std::get<1>(*iterator);
+ ASSERT(value == nil);
+ }
+#else
+ (void)result;
+#endif
}
{
runtimeLock.assertLocked();
- Class c2;
-
if (!cls) return nil;
- NXMapTable *map = remappedClasses(NO);
- if (!map || NXMapMember(map, cls, (void**)&c2) == NX_MAPNOTAKEY) {
+ auto *map = remappedClasses(NO);
+ if (!map)
return cls;
- } else {
- return c2;
- }
+
+ auto iterator = map->find(cls);
+ if (iterator == map->end())
+ return cls;
+ return std::get<1>(*iterator);
}
static Class remapClass(classref_t cls)
}
+_Nullable Class
+objc_loadClassref(_Nullable Class * _Nonnull clsref)
+{
+ auto *atomicClsref = explicit_atomic<uintptr_t>::from_pointer((uintptr_t *)clsref);
+
+ uintptr_t cls = atomicClsref->load(std::memory_order_relaxed);
+ if (fastpath((cls & 1) == 0))
+ return (Class)cls;
+
+ auto stub = (stub_class_t *)(cls & ~1ULL);
+ Class initialized = stub->initializer((Class)stub, nil);
+ atomicClsref->store((uintptr_t)initialized, std::memory_order_relaxed);
+ return initialized;
+}
+
+
/***********************************************************************
* getMaybeUnrealizedNonMetaClass
* Return the ordinary class for this class or metaclass.
**********************************************************************/
static Class getMaybeUnrealizedNonMetaClass(Class metacls, id inst)
{
- static int total, named, secondary, sharedcache;
+ static int total, named, secondary, sharedcache, dyld3;
runtimeLock.assertLocked();
- assert(metacls->isRealized());
+ ASSERT(metacls->isRealized());
total++;
// where inst == inst->ISA() == metacls is possible
if (metacls->ISA() == metacls) {
Class cls = metacls->superclass;
- assert(cls->isRealized());
- assert(!cls->isMetaClass());
- assert(cls->ISA() == metacls);
+ ASSERT(cls->isRealized());
+ ASSERT(!cls->isMetaClass());
+ ASSERT(cls->ISA() == metacls);
if (cls->ISA() == metacls) return cls;
}
// reallocating classes if cls is unrealized.
while (cls) {
if (cls->ISA() == metacls) {
- assert(!cls->isMetaClassMaybeUnrealized());
+ ASSERT(!cls->isMetaClassMaybeUnrealized());
return cls;
}
cls = cls->superclass;
// try name lookup
{
Class cls = getClassExceptSomeSwift(metacls->mangledName());
- if (cls->ISA() == metacls) {
+ if (cls && cls->ISA() == metacls) {
named++;
if (PrintInitializing) {
_objc_inform("INITIALIZE: %d/%d (%g%%) "
secondary, total, secondary*100.0/total);
}
- assert(cls->ISA() == metacls);
+ ASSERT(cls->ISA() == metacls);
+ return cls;
+ }
+ }
+
+ // try the dyld closure table
+ if (isPreoptimized())
+ {
+ // Try table from dyld closure first. It was built to ignore the dupes it
+ // knows will come from the cache, so anything left in here was there when
+ // we launched
+ Class cls = nil;
+ // Note, we have to pass the lambda directly here as otherwise we would try
+ // message copy and autorelease.
+ _dyld_for_each_objc_class(metacls->mangledName(),
+ [&cls, metacls](void* classPtr, bool isLoaded, bool* stop) {
+ // Skip images which aren't loaded. This supports the case where dyld
+ // might soft link an image from the main binary so its possibly not
+ // loaded yet.
+ if (!isLoaded)
+ return;
+
+ // Found a loaded image with this class name, so check if its the right one
+ Class result = (Class)classPtr;
+ if (result->ISA() == metacls) {
+ cls = result;
+ *stop = true;
+ }
+ });
+
+ if (cls) {
+ dyld3++;
+ if (PrintInitializing) {
+ _objc_inform("INITIALIZE: %d/%d (%g%%) "
+ "successful dyld closure metaclass lookups",
+ dyld3, total, dyld3*100.0/total);
+ }
+
return cls;
}
}
mutex_t& lock, bool leaveLocked)
{
lock.assertLocked();
- assert(cls->isRealized());
+ ASSERT(cls->isRealized());
if (cls->isInitialized()) {
if (!leaveLocked) lock.unlock();
}
// runtimeLock is now unlocked, for +initialize dispatch
- assert(nonmeta->isRealized());
+ ASSERT(nonmeta->isRealized());
initializeNonMetaClass(nonmeta);
if (leaveLocked) runtimeLock.lock();
* Adds cls as a new realized root class.
* Locking: runtimeLock must be held by the caller.
**********************************************************************/
-static Class _firstRealizedClass = nil;
-Class firstRealizedClass()
-{
- runtimeLock.assertLocked();
- return _firstRealizedClass;
-}
-
static void addRootClass(Class cls)
{
runtimeLock.assertLocked();
- assert(cls->isRealized());
+ ASSERT(cls->isRealized());
+
+ objc_debug_realized_class_generation_count++;
+
cls->data()->nextSiblingClass = _firstRealizedClass;
_firstRealizedClass = cls;
}
{
runtimeLock.assertLocked();
+ objc_debug_realized_class_generation_count++;
+
Class *classp;
for (classp = &_firstRealizedClass;
*classp != cls;
runtimeLock.assertLocked();
if (supercls && subcls) {
- assert(supercls->isRealized());
- assert(subcls->isRealized());
+ ASSERT(supercls->isRealized());
+ ASSERT(subcls->isRealized());
+
+ objc_debug_realized_class_generation_count++;
+
subcls->data()->nextSiblingClass = supercls->data()->firstSubclass;
supercls->data()->firstSubclass = subcls;
subcls->setHasCxxDtor();
}
- if (supercls->hasCustomRR()) {
- subcls->setHasCustomRR(true);
- }
-
- if (supercls->hasCustomAWZ()) {
- subcls->setHasCustomAWZ(true);
- }
+ objc::AWZScanner::scanAddedSubClass(subcls, supercls);
+ objc::RRScanner::scanAddedSubClass(subcls, supercls);
+ objc::CoreScanner::scanAddedSubClass(subcls, supercls);
- // Special case: instancesRequireRawIsa does not propagate
+ // Special case: instancesRequireRawIsa does not propagate
// from root class to root metaclass
if (supercls->instancesRequireRawIsa() && supercls->superclass) {
- subcls->setInstancesRequireRawIsa(true);
+ subcls->setInstancesRequireRawIsaRecursively(true);
}
}
}
static void removeSubclass(Class supercls, Class subcls)
{
runtimeLock.assertLocked();
- assert(supercls->isRealized());
- assert(subcls->isRealized());
- assert(subcls->superclass == supercls);
+ ASSERT(supercls->isRealized());
+ ASSERT(subcls->isRealized());
+ ASSERT(subcls->superclass == supercls);
+ objc_debug_realized_class_generation_count++;
+
Class *cp;
for (cp = &supercls->data()->firstSubclass;
*cp && *cp != subcls;
cp = &(*cp)->data()->nextSiblingClass)
;
- assert(*cp == subcls);
+ ASSERT(*cp == subcls);
*cp = subcls->data()->nextSiblingClass;
}
* Looks up a protocol by name. Demangled Swift names are recognized.
* Locking: runtimeLock must be read- or write-locked by the caller.
**********************************************************************/
-static Protocol *getProtocol(const char *name)
+static NEVER_INLINE Protocol *getProtocol(const char *name)
{
runtimeLock.assertLocked();
if (char *swName = copySwiftV1MangledName(name, true/*isProtocol*/)) {
result = (Protocol *)NXMapGet(protocols(), swName);
free(swName);
- return result;
+ if (result) return result;
+ }
+
+ // Try table from dyld shared cache
+ // Temporarily check that we are using the new table. Eventually this check
+ // will always be true.
+ // FIXME: Remove this check when we can
+ if (sharedCacheSupportsProtocolRoots()) {
+ result = getPreoptimizedProtocol(name);
+ if (result) return result;
}
return nil;
* a protocol struct that has been reallocated.
* Locking: runtimeLock must be read- or write-locked by the caller
**********************************************************************/
-static protocol_t *remapProtocol(protocol_ref_t proto)
+static ALWAYS_INLINE protocol_t *remapProtocol(protocol_ref_t proto)
{
runtimeLock.assertLocked();
+ // Protocols in shared cache images have a canonical bit to mark that they
+ // are the definition we should use
+ if (((protocol_t *)proto)->isCanonical())
+ return (protocol_t *)proto;
+
protocol_t *newproto = (protocol_t *)
getProtocol(((protocol_t *)proto)->mangledName);
return newproto ? newproto : (protocol_t *)proto;
uint32_t diff;
- assert(superSize > ro->instanceStart);
+ ASSERT(superSize > ro->instanceStart);
diff = superSize - ro->instanceStart;
if (ro->ivars) {
{
class_rw_t *rw = cls->data();
- assert(supercls);
- assert(!cls->isMetaClass());
+ ASSERT(supercls);
+ ASSERT(!cls->isMetaClass());
/* debug: print them all before sliding
if (ro->ivars) {
* Returns the real class structure for the class.
* Locking: runtimeLock must be write-locked by the caller
**********************************************************************/
-static Class realizeClassWithoutSwift(Class cls)
+static Class realizeClassWithoutSwift(Class cls, Class previously)
{
runtimeLock.assertLocked();
if (!cls) return nil;
if (cls->isRealized()) return cls;
- assert(cls == remapClass(cls));
+ ASSERT(cls == remapClass(cls));
// fixme verify class is not in an un-dlopened part of the shared cache?
}
isMeta = ro->flags & RO_META;
-
+#if FAST_CACHE_META
+ if (isMeta) cls->cache.setBit(FAST_CACHE_META);
+#endif
rw->version = isMeta ? 7 : 0; // old runtime went up to 6
// or that Swift's initializers have already been called.
// fixme that assumption will be wrong if we add support
// for ObjC subclasses of Swift classes.
- supercls = realizeClassWithoutSwift(remapClass(cls->superclass));
- metacls = realizeClassWithoutSwift(remapClass(cls->ISA()));
+ supercls = realizeClassWithoutSwift(remapClass(cls->superclass), nil);
+ metacls = realizeClassWithoutSwift(remapClass(cls->ISA()), nil);
#if SUPPORT_NONPOINTER_ISA
- // Disable non-pointer isa for some classes and/or platforms.
- // Set instancesRequireRawIsa.
- bool instancesRequireRawIsa = cls->instancesRequireRawIsa();
- bool rawIsaIsInherited = false;
- static bool hackedDispatch = false;
-
- if (DisableNonpointerIsa) {
- // Non-pointer isa disabled by environment or app SDK version
- instancesRequireRawIsa = true;
- }
- else if (!hackedDispatch && !(ro->flags & RO_META) &&
- 0 == strcmp(ro->name, "OS_object"))
- {
- // hack for libdispatch et al - isa also acts as vtable pointer
- hackedDispatch = true;
- instancesRequireRawIsa = true;
- }
- else if (supercls && supercls->superclass &&
- supercls->instancesRequireRawIsa())
- {
- // This is also propagated by addSubclass()
- // but nonpointer isa setup needs it earlier.
- // Special case: instancesRequireRawIsa does not propagate
- // from root class to root metaclass
- instancesRequireRawIsa = true;
- rawIsaIsInherited = true;
- }
-
- if (instancesRequireRawIsa) {
- cls->setInstancesRequireRawIsa(rawIsaIsInherited);
+ if (isMeta) {
+ // Metaclasses do not need any features from non pointer ISA
+ // This allows for a faspath for classes in objc_retain/objc_release.
+ cls->setInstancesRequireRawIsa();
+ } else {
+ // Disable non-pointer isa for some classes and/or platforms.
+ // Set instancesRequireRawIsa.
+ bool instancesRequireRawIsa = cls->instancesRequireRawIsa();
+ bool rawIsaIsInherited = false;
+ static bool hackedDispatch = false;
+
+ if (DisableNonpointerIsa) {
+ // Non-pointer isa disabled by environment or app SDK version
+ instancesRequireRawIsa = true;
+ }
+ else if (!hackedDispatch && 0 == strcmp(ro->name, "OS_object"))
+ {
+ // hack for libdispatch et al - isa also acts as vtable pointer
+ hackedDispatch = true;
+ instancesRequireRawIsa = true;
+ }
+ else if (supercls && supercls->superclass &&
+ supercls->instancesRequireRawIsa())
+ {
+ // This is also propagated by addSubclass()
+ // but nonpointer isa setup needs it earlier.
+ // Special case: instancesRequireRawIsa does not propagate
+ // from root class to root metaclass
+ instancesRequireRawIsa = true;
+ rawIsaIsInherited = true;
+ }
+
+ if (instancesRequireRawIsa) {
+ cls->setInstancesRequireRawIsaRecursively(rawIsaIsInherited);
+ }
}
// SUPPORT_NONPOINTER_ISA
#endif
}
// Attach categories
- methodizeClass(cls);
+ methodizeClass(cls, previously);
return cls;
}
if (cls) {
if (previously && previously != (void*)cls) {
// #3: relocation
- // In the future this will mean remapping the old address
- // to the new class, and installing dispatch forwarding
- // machinery at the old address
- _objc_fatal("Swift requested that class %p be reallocated, "
- "but libobjc does not support that.", previously);
+ mutex_locker_t lock(runtimeLock);
+ addRemappedClass((Class)previously, cls);
+ addClassTableEntry(cls);
+ addNamedClass(cls, cls->mangledName(), /*replacing*/nil);
+ return realizeClassWithoutSwift(cls, (Class)previously);
} else {
// #1 and #2: realization in place, or new class
mutex_locker_t lock(runtimeLock);
// #1 and #2: realization in place, or new class
// We ignore the Swift metadata initializer callback.
// We assume that's all handled since we're being called from Swift.
- return realizeClassWithoutSwift(cls);
+ return realizeClassWithoutSwift(cls, nil);
}
}
else {
#if DEBUG
runtimeLock.lock();
- assert(remapClass(cls) == cls);
- assert(cls->isSwiftStable_ButAllowLegacyForNow());
- assert(!cls->isMetaClassMaybeUnrealized());
- assert(cls->superclass);
+ ASSERT(remapClass(cls) == cls);
+ ASSERT(cls->isSwiftStable_ButAllowLegacyForNow());
+ ASSERT(!cls->isMetaClassMaybeUnrealized());
+ ASSERT(cls->superclass);
runtimeLock.unlock();
#endif
// fixme someday Swift will need to relocate classes at this point,
// but we don't accept that yet.
if (cls != newcls) {
- _objc_fatal("Swift metadata initializer moved a class "
- "from %p to %p, but libobjc does not yet allow that.",
- cls, newcls);
+ mutex_locker_t lock(runtimeLock);
+ addRemappedClass(cls, newcls);
}
return newcls;
// No Swift-side initialization callback.
// Perform our own realization directly.
mutex_locker_t lock(runtimeLock);
- return realizeClassWithoutSwift(cls);
+ return realizeClassWithoutSwift(cls, nil);
}
}
if (!cls->isSwiftStable_ButAllowLegacyForNow()) {
// Non-Swift class. Realize it now with the lock still held.
// fixme wrong in the future for objc subclasses of swift classes
- realizeClassWithoutSwift(cls);
+ realizeClassWithoutSwift(cls, nil);
if (!leaveLocked) lock.unlock();
} else {
// Swift class. We need to drop locks and call the Swift
// runtime to initialize it.
lock.unlock();
cls = realizeSwiftClass(cls);
- assert(cls->isRealized()); // callback must have provoked realization
+ ASSERT(cls->isRealized()); // callback must have provoked realization
if (leaveLocked) lock.lock();
}
static bool
missingWeakSuperclass(Class cls)
{
- assert(!cls->isRealized());
+ ASSERT(!cls->isRealized());
if (!cls->superclass) {
// superclass nil. This is normal for root classes only.
} else {
// superclass not nil. Check if a higher superclass is missing.
Class supercls = remapClass(cls->superclass);
- assert(cls != cls->superclass);
- assert(cls != supercls);
+ ASSERT(cls != cls->superclass);
+ ASSERT(cls != supercls);
if (!supercls) return YES;
if (supercls->isRealized()) return NO;
return missingWeakSuperclass(supercls);
runtimeLock.assertLocked();
size_t count, i;
- classref_t *classlist;
+ classref_t const *classlist;
if (hi->areAllClassesRealized()) return;
static void flushCaches(Class cls)
{
runtimeLock.assertLocked();
-
+#if CONFIG_USE_CACHE_LOCK
mutex_locker_t lock(cacheUpdateLock);
+#endif
if (cls) {
- foreach_realized_class_and_subclass(cls, ^(Class c){
+ foreach_realized_class_and_subclass(cls, [](Class c){
cache_erase_nolock(c);
+ return true;
});
}
else {
- foreach_realized_class_and_metaclass(^(Class c){
+ foreach_realized_class_and_metaclass([](Class c){
cache_erase_nolock(c);
+ return true;
});
}
}
if (!cls) {
// collectALot if cls==nil
+#if CONFIG_USE_CACHE_LOCK
mutex_locker_t lock(cacheUpdateLock);
+#else
+ mutex_locker_t lock(runtimeLock);
+#endif
cache_collect(true);
}
}
}
-
-
/***********************************************************************
* mustReadClasses
* Preflight check in advance of readClass() from an image.
**********************************************************************/
-bool mustReadClasses(header_info *hi)
+bool mustReadClasses(header_info *hi, bool hasDyldRoots)
{
const char *reason;
// If the image is not preoptimized then we must read classes.
- if (!hi->isPreoptimized()) {
+ if (!hi->hasPreoptimizedClasses()) {
reason = nil; // Don't log this one because it is noisy.
goto readthem;
}
goto readthem;
#endif
- assert(!hi->isBundle()); // no MH_BUNDLE in shared cache
+ ASSERT(!hi->isBundle()); // no MH_BUNDLE in shared cache
// If the image may have missing weak superclasses then we must read classes
- if (!noMissingWeakSuperclasses()) {
+ if (!noMissingWeakSuperclasses() || hasDyldRoots) {
reason = "the image may contain classes with missing weak superclasses";
goto readthem;
}
#if DEBUG
{
size_t count;
- classref_t *classlist = _getObjc2ClassList(hi, &count);
+ classref_t const *classlist = _getObjc2ClassList(hi, &count);
for (size_t i = 0; i < count; i++) {
Class cls = remapClass(classlist[i]);
- assert(!cls->isUnfixedBackwardDeployingStableSwift());
+ ASSERT(!cls->isUnfixedBackwardDeployingStableSwift());
}
}
#endif
return nil;
}
- // Note: Class __ARCLite__'s hack does not go through here.
- // Class structure fixups that apply to it also need to be
- // performed in non-lazy realization below.
-
- // These fields should be set to zero because of the
- // binding of _objc_empty_vtable, but OS X 10.8's dyld
- // does not bind shared cache absolute symbols as expected.
- // This (and the __ARCLite__ hack below) can be removed
- // once the simulator drops 10.8 support.
-#if TARGET_OS_SIMULATOR
- if (cls->cache._mask) cls->cache._mask = 0;
- if (cls->cache._occupied) cls->cache._occupied = 0;
- if (cls->ISA()->cache._mask) cls->ISA()->cache._mask = 0;
- if (cls->ISA()->cache._occupied) cls->ISA()->cache._occupied = 0;
-#endif
-
cls->fixupBackwardDeployingStableSwift();
Class replacing = nil;
if (headerIsPreoptimized && !replacing) {
// class list built in shared cache
// fixme strict assert doesn't work because of duplicates
- // assert(cls == getClass(name));
- assert(getClassExceptSomeSwift(mangledName));
+ // ASSERT(cls == getClass(name));
+ ASSERT(getClassExceptSomeSwift(mangledName));
} else {
addNamedClass(cls, mangledName, replacing);
addClassTableEntry(cls);
protocol_t *oldproto = (protocol_t *)getProtocol(newproto->mangledName);
if (oldproto) {
- // Some other definition already won.
- if (PrintProtocols) {
- _objc_inform("PROTOCOLS: protocol at %p is %s "
- "(duplicate of %p)",
- newproto, oldproto->nameForLogging(), oldproto);
+ if (oldproto != newproto) {
+ // Some other definition already won.
+ if (PrintProtocols) {
+ _objc_inform("PROTOCOLS: protocol at %p is %s "
+ "(duplicate of %p)",
+ newproto, oldproto->nameForLogging(), oldproto);
+ }
+
+ // If we are a shared cache binary then we have a definition of this
+ // protocol, but if another one was chosen then we need to clear our
+ // isCanonical bit so that no-one trusts it.
+ // Note, if getProtocol returned a shared cache protocol then the
+ // canonical definition is already in the shared cache and we don't
+ // need to do anything.
+ if (headerIsPreoptimized && !oldproto->isCanonical()) {
+ // Note newproto is an entry in our __objc_protolist section which
+ // for shared cache binaries points to the original protocol in
+ // that binary, not the shared cache uniqued one.
+ auto cacheproto = (protocol_t *)
+ getSharedCachePreoptimizedProtocol(newproto->mangledName);
+ if (cacheproto && cacheproto->isCanonical())
+ cacheproto->clearIsCanonical();
+ }
}
}
else if (headerIsPreoptimized) {
installedproto = newproto;
}
- assert(installedproto->getIsa() == protocol_class);
- assert(installedproto->size >= sizeof(protocol_t));
+ ASSERT(installedproto->getIsa() == protocol_class);
+ ASSERT(installedproto->size >= sizeof(protocol_t));
insertFn(protocol_map, installedproto->mangledName,
installedproto);
Class *resolvedFutureClasses = nil;
size_t resolvedFutureClassCount = 0;
static bool doneOnce;
+ bool launchTime = NO;
TimeLogger ts(PrintImageTimes);
runtimeLock.assertLocked();
if (!doneOnce) {
doneOnce = YES;
+ launchTime = YES;
#if SUPPORT_NONPOINTER_ISA
// Disable non-pointer isa under some conditions.
(isPreoptimized() ? unoptimizedTotalClasses : totalClasses) * 4 / 3;
gdb_objc_realized_classes =
NXCreateMapTable(NXStrValueMapPrototype, namedClassesSize);
-
- allocatedClasses = NXCreateHashTable(NXPtrPrototype, 0, nil);
-
+
ts.log("IMAGE TIMES: first time tasks");
}
+ // Fix up @selector references
+ static size_t UnfixedSelectors;
+ {
+ mutex_locker_t lock(selLock);
+ for (EACH_HEADER) {
+ if (hi->hasPreoptimizedSelectors()) continue;
+
+ bool isBundle = hi->isBundle();
+ SEL *sels = _getObjc2SelectorRefs(hi, &count);
+ UnfixedSelectors += count;
+ for (i = 0; i < count; i++) {
+ const char *name = sel_cname(sels[i]);
+ SEL sel = sel_registerNameNoLock(name, isBundle);
+ if (sels[i] != sel) {
+ sels[i] = sel;
+ }
+ }
+ }
+ }
+
+ ts.log("IMAGE TIMES: fix up selector references");
// Discover classes. Fix up unresolved future classes. Mark bundle classes.
+ bool hasDyldRoots = dyld_shared_cache_some_image_overridden();
for (EACH_HEADER) {
- classref_t *classlist = _getObjc2ClassList(hi, &count);
-
- if (! mustReadClasses(hi)) {
+ if (! mustReadClasses(hi, hasDyldRoots)) {
// Image is sufficiently optimized that we need not call readClass()
continue;
}
+ classref_t const *classlist = _getObjc2ClassList(hi, &count);
+
bool headerIsBundle = hi->isBundle();
- bool headerIsPreoptimized = hi->isPreoptimized();
+ bool headerIsPreoptimized = hi->hasPreoptimizedClasses();
for (i = 0; i < count; i++) {
Class cls = (Class)classlist[i];
ts.log("IMAGE TIMES: remap classes");
- // Fix up @selector references
- static size_t UnfixedSelectors;
- {
- mutex_locker_t lock(selLock);
- for (EACH_HEADER) {
- if (hi->isPreoptimized()) continue;
-
- bool isBundle = hi->isBundle();
- SEL *sels = _getObjc2SelectorRefs(hi, &count);
- UnfixedSelectors += count;
- for (i = 0; i < count; i++) {
- const char *name = sel_cname(sels[i]);
- sels[i] = sel_registerNameNoLock(name, isBundle);
- }
- }
- }
-
- ts.log("IMAGE TIMES: fix up selector references");
-
#if SUPPORT_FIXUP
// Fix up old objc_msgSend_fixup call sites
for (EACH_HEADER) {
ts.log("IMAGE TIMES: fix up objc_msgSend_fixup");
#endif
+ bool cacheSupportsProtocolRoots = sharedCacheSupportsProtocolRoots();
+
// Discover protocols. Fix up protocol refs.
for (EACH_HEADER) {
extern objc_class OBJC_CLASS_$_Protocol;
Class cls = (Class)&OBJC_CLASS_$_Protocol;
- assert(cls);
+ ASSERT(cls);
NXMapTable *protocol_map = protocols();
- bool isPreoptimized = hi->isPreoptimized();
+ bool isPreoptimized = hi->hasPreoptimizedProtocols();
+
+ // Skip reading protocols if this is an image from the shared cache
+ // and we support roots
+ // Note, after launch we do need to walk the protocol as the protocol
+ // in the shared cache is marked with isCanonical() and that may not
+ // be true if some non-shared cache binary was chosen as the canonical
+ // definition
+ if (launchTime && isPreoptimized && cacheSupportsProtocolRoots) {
+ if (PrintProtocols) {
+ _objc_inform("PROTOCOLS: Skipping reading protocols in image: %s",
+ hi->fname());
+ }
+ continue;
+ }
+
bool isBundle = hi->isBundle();
- protocol_t **protolist = _getObjc2ProtocolList(hi, &count);
+ protocol_t * const *protolist = _getObjc2ProtocolList(hi, &count);
for (i = 0; i < count; i++) {
readProtocol(protolist[i], cls, protocol_map,
isPreoptimized, isBundle);
// Preoptimized images may have the right
// answer already but we don't know for sure.
for (EACH_HEADER) {
+ // At launch time, we know preoptimized image refs are pointing at the
+ // shared cache definition of a protocol. We can skip the check on
+ // launch, but have to visit @protocol refs for shared cache images
+ // loaded later.
+ if (launchTime && cacheSupportsProtocolRoots && hi->isPreoptimized())
+ continue;
protocol_t **protolist = _getObjc2ProtocolRefs(hi, &count);
for (i = 0; i < count; i++) {
remapProtocolRef(&protolist[i]);
ts.log("IMAGE TIMES: fix up @protocol references");
+ // Discover categories.
+ for (EACH_HEADER) {
+ bool hasClassProperties = hi->info()->hasCategoryClassProperties();
+
+ auto processCatlist = [&](category_t * const *catlist) {
+ for (i = 0; i < count; i++) {
+ category_t *cat = catlist[i];
+ Class cls = remapClass(cat->cls);
+ locstamped_category_t lc{cat, hi};
+
+ if (!cls) {
+ // Category's target class is missing (probably weak-linked).
+ // Ignore the category.
+ if (PrintConnecting) {
+ _objc_inform("CLASS: IGNORING category \?\?\?(%s) %p with "
+ "missing weak-linked target class",
+ cat->name, cat);
+ }
+ continue;
+ }
+
+ // Process this category.
+ if (cls->isStubClass()) {
+ // Stub classes are never realized. Stub classes
+ // don't know their metaclass until they're
+ // initialized, so we have to add categories with
+ // class methods or properties to the stub itself.
+ // methodizeClass() will find them and add them to
+ // the metaclass as appropriate.
+ if (cat->instanceMethods ||
+ cat->protocols ||
+ cat->instanceProperties ||
+ cat->classMethods ||
+ cat->protocols ||
+ (hasClassProperties && cat->_classProperties))
+ {
+ objc::unattachedCategories.addForClass(lc, cls);
+ }
+ } else {
+ // First, register the category with its target class.
+ // Then, rebuild the class's method lists (etc) if
+ // the class is realized.
+ if (cat->instanceMethods || cat->protocols
+ || cat->instanceProperties)
+ {
+ if (cls->isRealized()) {
+ attachCategories(cls, &lc, 1, ATTACH_EXISTING);
+ } else {
+ objc::unattachedCategories.addForClass(lc, cls);
+ }
+ }
+
+ if (cat->classMethods || cat->protocols
+ || (hasClassProperties && cat->_classProperties))
+ {
+ if (cls->ISA()->isRealized()) {
+ attachCategories(cls->ISA(), &lc, 1, ATTACH_EXISTING | ATTACH_METACLASS);
+ } else {
+ objc::unattachedCategories.addForClass(lc, cls->ISA());
+ }
+ }
+ }
+ }
+ };
+ processCatlist(_getObjc2CategoryList(hi, &count));
+ processCatlist(_getObjc2CategoryList2(hi, &count));
+ }
+
+ ts.log("IMAGE TIMES: discover categories");
+
+ // Category discovery MUST BE Late to avoid potential races
+ // when other threads call the new category code before
+ // this thread finishes its fixups.
+
+ // +load handled by prepare_load_methods()
+
// Realize non-lazy classes (for +load methods and static instances)
for (EACH_HEADER) {
- classref_t *classlist =
+ classref_t const *classlist =
_getObjc2NonlazyClassList(hi, &count);
for (i = 0; i < count; i++) {
Class cls = remapClass(classlist[i]);
if (!cls) continue;
- // hack for class __ARCLite__, which didn't get this above
-#if TARGET_OS_SIMULATOR
- if (cls->cache._buckets == (void*)&_objc_empty_cache &&
- (cls->cache._mask || cls->cache._occupied))
- {
- cls->cache._mask = 0;
- cls->cache._occupied = 0;
- }
- if (cls->ISA()->cache._buckets == (void*)&_objc_empty_cache &&
- (cls->ISA()->cache._mask || cls->ISA()->cache._occupied))
- {
- cls->ISA()->cache._mask = 0;
- cls->ISA()->cache._occupied = 0;
- }
-#endif
-
addClassTableEntry(cls);
if (cls->isSwiftStable()) {
// We can't disallow all Swift classes because of
// classes like Swift.__EmptyArrayStorage
}
- realizeClassWithoutSwift(cls);
+ realizeClassWithoutSwift(cls, nil);
}
}
if (cls->isSwiftStable()) {
_objc_fatal("Swift class is not allowed to be future");
}
- realizeClassWithoutSwift(cls);
- cls->setInstancesRequireRawIsa(false/*inherited*/);
+ realizeClassWithoutSwift(cls, nil);
+ cls->setInstancesRequireRawIsaRecursively(false/*inherited*/);
}
free(resolvedFutureClasses);
- }
-
- ts.log("IMAGE TIMES: realize future classes");
-
- // Discover categories.
- for (EACH_HEADER) {
- category_t **catlist =
- _getObjc2CategoryList(hi, &count);
- bool hasClassProperties = hi->info()->hasCategoryClassProperties();
-
- for (i = 0; i < count; i++) {
- category_t *cat = catlist[i];
- Class cls = remapClass(cat->cls);
-
- if (!cls) {
- // Category's target class is missing (probably weak-linked).
- // Disavow any knowledge of this category.
- catlist[i] = nil;
- if (PrintConnecting) {
- _objc_inform("CLASS: IGNORING category \?\?\?(%s) %p with "
- "missing weak-linked target class",
- cat->name, cat);
- }
- continue;
- }
-
- // Process this category.
- // First, register the category with its target class.
- // Then, rebuild the class's method lists (etc) if
- // the class is realized.
- bool classExists = NO;
- if (cat->instanceMethods || cat->protocols
- || cat->instanceProperties)
- {
- addUnattachedCategoryForClass(cat, cls, hi);
- if (cls->isRealized()) {
- remethodizeClass(cls);
- classExists = YES;
- }
- if (PrintConnecting) {
- _objc_inform("CLASS: found category -%s(%s) %s",
- cls->nameForLogging(), cat->name,
- classExists ? "on existing class" : "");
- }
- }
-
- if (cat->classMethods || cat->protocols
- || (hasClassProperties && cat->_classProperties))
- {
- addUnattachedCategoryForClass(cat, cls->ISA(), hi);
- if (cls->ISA()->isRealized()) {
- remethodizeClass(cls->ISA());
- }
- if (PrintConnecting) {
- _objc_inform("CLASS: found category +%s(%s)",
- cls->nameForLogging(), cat->name);
- }
- }
- }
}
- ts.log("IMAGE TIMES: discover categories");
-
- // Category discovery MUST BE LAST to avoid potential races
- // when other threads call the new category code before
- // this thread finishes its fixups.
-
- // +load handled by prepare_load_methods()
+ ts.log("IMAGE TIMES: realize future classes");
if (DebugNonFragileIvars) {
realizeAllClasses();
static unsigned int PreoptOptimizedClasses;
for (EACH_HEADER) {
- if (hi->isPreoptimized()) {
+ if (hi->hasPreoptimizedSelectors()) {
_objc_inform("PREOPTIMIZATION: honoring preoptimized selectors "
"in %s", hi->fname());
}
"in %s", hi->fname());
}
- classref_t *classlist = _getObjc2ClassList(hi, &count);
+ classref_t const *classlist = _getObjc2ClassList(hi, &count);
for (i = 0; i < count; i++) {
Class cls = remapClass(classlist[i]);
if (!cls) continue;
PreoptTotalClasses++;
- if (hi->isPreoptimized()) {
+ if (hi->hasPreoptimizedClasses()) {
PreoptOptimizedClasses++;
}
static void schedule_class_load(Class cls)
{
if (!cls) return;
- assert(cls->isRealized()); // _read_images should realize
+ ASSERT(cls->isRealized()); // _read_images should realize
if (cls->data()->flags & RW_LOADED) return;
runtimeLock.assertLocked();
- classref_t *classlist =
+ classref_t const *classlist =
_getObjc2NonlazyClassList(mhdr, &count);
for (i = 0; i < count; i++) {
schedule_class_load(remapClass(classlist[i]));
}
- category_t **categorylist = _getObjc2NonlazyCategoryList(mhdr, &count);
+ category_t * const *categorylist = _getObjc2NonlazyCategoryList(mhdr, &count);
for (i = 0; i < count; i++) {
category_t *cat = categorylist[i];
Class cls = remapClass(cat->cls);
_objc_fatal("Swift class extensions and categories on Swift "
"classes are not allowed to have +load methods");
}
- realizeClassWithoutSwift(cls);
- assert(cls->ISA()->isRealized());
+ realizeClassWithoutSwift(cls, nil);
+ ASSERT(cls->ISA()->isRealized());
add_category_to_loadable_list(cat);
}
}
// Unload unattached categories and categories waiting for +load.
- category_t **catlist = _getObjc2CategoryList(hi, &count);
+ // Ignore __objc_catlist2. We don't support unloading Swift
+ // and we never will.
+ category_t * const *catlist = _getObjc2CategoryList(hi, &count);
for (i = 0; i < count; i++) {
category_t *cat = catlist[i];
- if (!cat) continue; // category for ignored weak-linked class
Class cls = remapClass(cat->cls);
- assert(cls); // shouldn't have live category for dead class
+ if (!cls) continue; // category for ignored weak-linked class
// fixme for MH_DYLIB cat's class may have been unloaded already
// unattached list
- removeUnattachedCategoryForClass(cat, cls);
+ objc::unattachedCategories.eraseCategoryForClass(cat, cls);
// +load queue
remove_category_from_loadable_list(cat);
// and __DATA,__objc_nlclslist. arclite's hack puts a class in the latter
// only, and we need to unload that class if we unload an arclite image.
- NXHashTable *classes = NXCreateHashTable(NXPtrPrototype, 0, nil);
- classref_t *classlist;
+ objc::DenseSet<Class> classes{};
+ classref_t const *classlist;
classlist = _getObjc2ClassList(hi, &count);
for (i = 0; i < count; i++) {
Class cls = remapClass(classlist[i]);
- if (cls) NXHashInsert(classes, cls);
+ if (cls) classes.insert(cls);
}
classlist = _getObjc2NonlazyClassList(hi, &count);
for (i = 0; i < count; i++) {
Class cls = remapClass(classlist[i]);
- if (cls) NXHashInsert(classes, cls);
+ if (cls) classes.insert(cls);
}
// First detach classes from each other. Then free each class.
// This avoid bugs where this loop unloads a subclass before its superclass
- NXHashState hs;
- Class cls;
-
- hs = NXInitHashState(classes);
- while (NXNextHashState(classes, &hs, (void**)&cls)) {
+ for (Class cls: classes) {
remove_class_from_loadable_list(cls);
detach_class(cls->ISA(), YES);
detach_class(cls, NO);
}
- hs = NXInitHashState(classes);
- while (NXNextHashState(classes, &hs, (void**)&cls)) {
+ for (Class cls: classes) {
free_class(cls->ISA());
free_class(cls);
}
- NXFreeHashTable(classes);
-
// XXX FIXME -- Clean up protocols:
// <rdar://problem/9033191> Support unloading protocols at dylib/image unload time
{
if (!m) return nil;
- assert(m->name == sel_registerName(sel_getName(m->name)));
+ ASSERT(m->name == sel_registerName(sel_getName(m->name)));
return m->name;
}
flushCaches(cls);
- updateCustomRR_AWZ(cls, m);
+ adjustCustomFlagsForMethodChange(cls, m);
return old;
}
flushCaches(nil);
- updateCustomRR_AWZ(nil, m1);
- updateCustomRR_AWZ(nil, m2);
+ adjustCustomFlagsForMethodChange(nil, m1);
+ adjustCustomFlagsForMethodChange(nil, m2);
}
fixupProtocolIfNeeded(protocol_t *proto)
{
runtimeLock.assertUnlocked();
- assert(proto);
+ ASSERT(proto);
if (!proto->isFixedUp()) {
mutex_locker_t lock(runtimeLock);
if (!proto || !sel) return nil;
- assert(proto->isFixedUp());
+ ASSERT(proto->isFixedUp());
method_list_t *mlist =
getProtocolMethodList(proto, isRequiredMethod, isInstanceMethod);
if (!proto) return nil;
if (!proto->extendedMethodTypes()) return nil;
- assert(proto->isFixedUp());
+ ASSERT(proto->isFixedUp());
method_t *m =
protocol_getMethod_nolock(proto, sel,
const char *
protocol_t::demangledName()
{
- assert(hasDemangledNameField());
+ ASSERT(hasDemangledNameField());
if (! _demangledName) {
char *de = copySwiftV1DemangledName(mangledName, true/*isProtocol*/);
uintptr_t i;
for (i = 0; i < self->protocols->count; i++) {
protocol_t *proto = remapProtocol(self->protocols->list[i]);
+ if (other == proto) {
+ return YES;
+ }
if (0 == strcmp(other->mangledName, proto->mangledName)) {
return YES;
}
// have been retained and we must preserve that count.
proto->changeIsa(cls);
- NXMapKeyCopyingInsert(protocols(), proto->mangledName, proto);
+ // Don't add this protocol if we already have it.
+ // Should we warn on duplicates?
+ if (getProtocol(proto->mangledName) == nil) {
+ NXMapKeyCopyingInsert(protocols(), proto->mangledName, proto);
+ }
}
//}
}
+static int
+objc_getRealizedClassList_nolock(Class *buffer, int bufferLen)
+{
+ int count = 0;
+
+ if (buffer) {
+ int c = 0;
+ foreach_realized_class([=, &count, &c](Class cls) {
+ count++;
+ if (c < bufferLen) {
+ buffer[c++] = cls;
+ }
+ return true;
+ });
+ } else {
+ foreach_realized_class([&count](Class cls) {
+ count++;
+ return true;
+ });
+ }
+
+ return count;
+}
+
+static Class *
+objc_copyRealizedClassList_nolock(unsigned int *outCount)
+{
+ Class *result = nil;
+ unsigned int count = 0;
+
+ foreach_realized_class([&count](Class cls) {
+ count++;
+ return true;
+ });
+
+ if (count > 0) {
+ unsigned int c = 0;
+
+ result = (Class *)malloc((1+count) * sizeof(Class));
+ foreach_realized_class([=, &c](Class cls) {
+ result[c++] = cls;
+ return true;
+ });
+ result[c] = nil;
+ }
+
+ if (outCount) *outCount = count;
+ return result;
+}
+
+static void
+class_getImpCache_nolock(Class cls, cache_t &cache, objc_imp_cache_entry *buffer, int len)
+{
+ bucket_t *buckets = cache.buckets();
+
+ uintptr_t count = cache.capacity();
+ uintptr_t index;
+ int wpos = 0;
+
+ for (index = 0; index < count && wpos < len; index += 1) {
+ if (buckets[index].sel()) {
+ buffer[wpos].imp = buckets[index].imp(cls);
+ buffer[wpos].sel = buckets[index].sel();
+ wpos++;
+ }
+ }
+}
/***********************************************************************
-* objc_getClassList
-* Returns pointers to all classes.
-* This requires all classes be realized, which is regretfully non-lazy.
-* Locking: acquires runtimeLock
-**********************************************************************/
-int
-objc_getClassList(Class *buffer, int bufferLen)
+ * objc_getClassList
+ * Returns pointers to all classes.
+ * This requires all classes be realized, which is regretfully non-lazy.
+ * Locking: acquires runtimeLock
+ **********************************************************************/
+int
+objc_getClassList(Class *buffer, int bufferLen)
{
mutex_locker_t lock(runtimeLock);
realizeAllClasses();
- __block int count = 0;
- foreach_realized_class_and_metaclass(^(Class cls) {
- if (!cls->isMetaClass()) count++;
- });
+ return objc_getRealizedClassList_nolock(buffer, bufferLen);
+}
- if (buffer) {
- __block int c = 0;
- foreach_realized_class_and_metaclass(^(Class cls) {
- if (c < bufferLen && !cls->isMetaClass()) {
- buffer[c++] = cls;
- }
- });
- }
+/***********************************************************************
+ * objc_copyClassList
+ * Returns pointers to Realized classes.
+ *
+ * outCount may be nil. *outCount is the number of classes returned.
+ * If the returned array is not nil, it is nil-terminated and must be
+ * freed with free().
+ * Locking: write-locks runtimeLock
+ **********************************************************************/
+Class *
+objc_copyRealizedClassList(unsigned int *outCount)
+{
+ mutex_locker_t lock(runtimeLock);
- return count;
+ return objc_copyRealizedClassList_nolock(outCount);
}
realizeAllClasses();
- Class *result = nil;
+ return objc_copyRealizedClassList_nolock(outCount);
+}
- __block unsigned int count = 0;
- foreach_realized_class_and_metaclass(^(Class cls) {
- if (!cls->isMetaClass()) count++;
- });
+/***********************************************************************
+ * class_copyImpCache
+ * Returns the current content of the Class IMP Cache
+ *
+ * outCount may be nil. *outCount is the number of entries returned.
+ * If the returned array is not nil, it is nil-terminated and must be
+ * freed with free().
+ * Locking: write-locks cacheUpdateLock
+ **********************************************************************/
+objc_imp_cache_entry *
+class_copyImpCache(Class cls, int *outCount)
+{
+ objc_imp_cache_entry *buffer = nullptr;
- if (count > 0) {
- result = (Class *)malloc((1+count) * sizeof(Class));
- __block unsigned int c = 0;
- foreach_realized_class_and_metaclass(^(Class cls) {
- if (!cls->isMetaClass()) {
- result[c++] = cls;
- }
- });
- result[c] = nil;
+#if CONFIG_USE_CACHE_LOCK
+ mutex_locker_t lock(cacheUpdateLock);
+#else
+ mutex_locker_t lock(runtimeLock);
+#endif
+
+ cache_t &cache = cls->cache;
+ int count = (int)cache.occupied();
+
+ if (count) {
+ buffer = (objc_imp_cache_entry *)calloc(1+count, sizeof(objc_imp_cache_entry));
+ class_getImpCache_nolock(cls, cache, buffer, count);
}
if (outCount) *outCount = count;
- return result;
+ return buffer;
}
NXMapTable *protocol_map = protocols();
- unsigned int count = NXCountMapTable(protocol_map);
+ // Find all the protocols from the pre-optimized images. These protocols
+ // won't be in the protocol map.
+ objc::DenseMap<const char*, Protocol*> preoptimizedProtocols;
+ if (sharedCacheSupportsProtocolRoots()) {
+ header_info *hi;
+ for (hi = FirstHeader; hi; hi = hi->getNext()) {
+ if (!hi->hasPreoptimizedProtocols())
+ continue;
+
+ size_t count, i;
+ const protocol_t * const *protolist = _getObjc2ProtocolList(hi, &count);
+ for (i = 0; i < count; i++) {
+ const protocol_t* protocol = protolist[i];
+
+ // Skip protocols we have in the run time map. These likely
+ // correspond to protocols added dynamically which have the same
+ // name as a protocol found later in a dlopen'ed shared cache image.
+ if (NXMapGet(protocol_map, protocol->mangledName) != nil)
+ continue;
+
+ // The protocols in the shared cache protolist point to their
+ // original on-disk object, not the optimized one. We can use the name
+ // to find the optimized one.
+ Protocol* optimizedProto = getPreoptimizedProtocol(protocol->mangledName);
+ preoptimizedProtocols.insert({ protocol->mangledName, optimizedProto });
+ }
+ }
+ }
+
+ unsigned int count = NXCountMapTable(protocol_map) + (unsigned int)preoptimizedProtocols.size();
if (count == 0) {
if (outCount) *outCount = 0;
return nil;
{
result[i++] = proto;
}
+
+ // Add any protocols found in the pre-optimized table
+ for (auto it : preoptimizedProtocols) {
+ result[i++] = it.second;
+ }
result[i++] = nil;
- assert(i == count+1);
+ ASSERT(i == count+1);
if (outCount) *outCount = count;
return result;
mutex_locker_t lock(runtimeLock);
- assert(cls->isRealized());
+ ASSERT(cls->isRealized());
count = cls->data()->methods.count();
mutex_locker_t lock(runtimeLock);
- assert(cls->isRealized());
+ ASSERT(cls->isRealized());
if ((ivars = cls->data()->ro->ivars) && ivars->count) {
result = (Ivar *)malloc((ivars->count+1) * sizeof(Ivar));
mutex_locker_t lock(runtimeLock);
checkIsKnownClass(cls);
- assert(cls->isRealized());
+ ASSERT(cls->isRealized());
auto rw = cls->data();
const method_list_t *mlist;
- assert(isRealized());
- assert(ISA()->isRealized());
- assert(!isMetaClass());
- assert(ISA()->isMetaClass());
+ ASSERT(isRealized());
+ ASSERT(ISA()->isRealized());
+ ASSERT(!isMetaClass());
+ ASSERT(ISA()->isMetaClass());
mlist = ISA()->data()->ro->baseMethods();
if (mlist) {
{
mutex_locker_t lock(runtimeLock);
Class result = remapClass(cat->cls);
- assert(result->isRealized()); // ok for call_category_loads' usage
+ ASSERT(result->isRealized()); // ok for call_category_loads' usage
return result;
}
checkIsKnownClass(cls);
- assert(cls->isRealized());
+ ASSERT(cls->isRealized());
count = cls->data()->protocols.count();
const char **objc_copyImageNames(unsigned int *outCount)
{
mutex_locker_t lock(runtimeLock);
-
+
+ int HeaderCount = 0;
+ for (header_info *hi = FirstHeader; hi != nil; hi = hi->getNext()) {
+ HeaderCount++;
+ }
+
#if TARGET_OS_WIN32
const TCHAR **names = (const TCHAR **)
malloc((HeaderCount+1) * sizeof(TCHAR *));
copyClassNamesForImage_nolock(header_info *hi, unsigned int *outCount)
{
runtimeLock.assertLocked();
- assert(hi);
+ ASSERT(hi);
size_t count;
- classref_t *classlist = _getObjc2ClassList(hi, &count);
+ classref_t const *classlist = _getObjc2ClassList(hi, &count);
const char **names = (const char **)
malloc((count+1) * sizeof(const char *));
* Locking: runtimeLock may or may not be held by the caller.
**********************************************************************/
mutex_t DemangleCacheLock;
-static NXHashTable *DemangleCache;
+static objc::DenseSet<const char *> *DemangleCache;
const char *
objc_class::demangledName()
{
// fixme lldb's calls to class_getName() can also get here when
// interrogating the dyld shared cache. (rdar://27258517)
// fixme runtimeLock.assertLocked();
- // fixme assert(realize);
+ // fixme ASSERT(realize);
- char *cached;
+ const char *cached;
{
mutex_locker_t lock(DemangleCacheLock);
if (!DemangleCache) {
- DemangleCache = NXCreateHashTable(NXStrPrototype, 0, nil);
+ DemangleCache = new objc::DenseSet<const char *>{};
}
- cached = (char *)NXHashInsertIfAbsent(DemangleCache, de);
+ cached = *DemangleCache->insert(de).first;
}
if (cached != de) free(de);
return cached;
/***********************************************************************
* class_getName
* fixme
-* Locking: acquires runtimeLock
+* Locking: may acquire DemangleCacheLock
**********************************************************************/
const char *class_getName(Class cls)
{
if (!cls) return "nil";
// fixme lldb calls class_getName() on unrealized classes (rdar://27258517)
- // assert(cls->isRealized() || cls->isFuture());
+ // ASSERT(cls->isRealized() || cls->isFuture());
return cls->demangledName();
}
+/***********************************************************************
+* objc_debug_class_getNameRaw
+* fixme
+* Locking: none
+**********************************************************************/
+const char *objc_debug_class_getNameRaw(Class cls)
+{
+ if (!cls) return "nil";
+ return cls->mangledName();
+}
+
/***********************************************************************
* class_getVersion
class_getVersion(Class cls)
{
if (!cls) return 0;
- assert(cls->isRealized());
+ ASSERT(cls->isRealized());
return cls->data()->version;
}
class_setVersion(Class cls, int version)
{
if (!cls) return;
- assert(cls->isRealized());
+ ASSERT(cls->isRealized());
cls->data()->version = version;
}
-
-static method_t *findMethodInSortedMethodList(SEL key, const method_list_t *list)
+/***********************************************************************
+ * search_method_list_inline
+ **********************************************************************/
+ALWAYS_INLINE static method_t *
+findMethodInSortedMethodList(SEL key, const method_list_t *list)
{
- assert(list);
+ ASSERT(list);
const method_t * const first = &list->first;
const method_t *base = first;
return nil;
}
-/***********************************************************************
-* getMethodNoSuper_nolock
-* fixme
-* Locking: runtimeLock must be read- or write-locked by the caller
-**********************************************************************/
-static method_t *search_method_list(const method_list_t *mlist, SEL sel)
+ALWAYS_INLINE static method_t *
+search_method_list_inline(const method_list_t *mlist, SEL sel)
{
int methodListIsFixedUp = mlist->isFixedUp();
int methodListHasExpectedSize = mlist->entsize() == sizeof(method_t);
- if (__builtin_expect(methodListIsFixedUp && methodListHasExpectedSize, 1)) {
+ if (fastpath(methodListIsFixedUp && methodListHasExpectedSize)) {
return findMethodInSortedMethodList(sel, mlist);
} else {
// Linear search of unsorted method list
return nil;
}
+NEVER_INLINE static method_t *
+search_method_list(const method_list_t *mlist, SEL sel)
+{
+ return search_method_list_inline(mlist, sel);
+}
+
+/***********************************************************************
+ * method_lists_contains_any
+ **********************************************************************/
+static NEVER_INLINE bool
+method_lists_contains_any(method_list_t **mlists, method_list_t **end,
+ SEL sels[], size_t selcount)
+{
+ while (mlists < end) {
+ const method_list_t *mlist = *mlists++;
+ int methodListIsFixedUp = mlist->isFixedUp();
+ int methodListHasExpectedSize = mlist->entsize() == sizeof(method_t);
+
+ if (fastpath(methodListIsFixedUp && methodListHasExpectedSize)) {
+ for (size_t i = 0; i < selcount; i++) {
+ if (findMethodInSortedMethodList(sels[i], mlist)) {
+ return true;
+ }
+ }
+ } else {
+ for (auto& meth : *mlist) {
+ for (size_t i = 0; i < selcount; i++) {
+ if (meth.name == sels[i]) {
+ return true;
+ }
+ }
+ }
+ }
+ }
+ return false;
+}
+
+/***********************************************************************
+ * getMethodNoSuper_nolock
+ * fixme
+ * Locking: runtimeLock must be read- or write-locked by the caller
+ **********************************************************************/
static method_t *
getMethodNoSuper_nolock(Class cls, SEL sel)
{
runtimeLock.assertLocked();
- assert(cls->isRealized());
+ ASSERT(cls->isRealized());
// fixme nil cls?
// fixme nil sel?
mlists != end;
++mlists)
{
- method_t *m = search_method_list(*mlists, sel);
+ // <rdar://problem/46904873> getMethodNoSuper_nolock is the hottest
+ // caller of search_method_list, inlining it turns
+ // getMethodNoSuper_nolock into a frame-less function and eliminates
+ // any store from this codepath.
+ method_t *m = search_method_list_inline(*mlists, sel);
if (m) return m;
}
// fixme nil cls?
// fixme nil sel?
- assert(cls->isRealized());
+ ASSERT(cls->isRealized());
while (cls && ((m = getMethodNoSuper_nolock(cls, sel))) == nil) {
cls = cls->superclass;
#warning fixme build and search caches
// Search method lists, try method resolver, etc.
- lookUpImpOrNil(cls, sel, nil,
- NO/*initialize*/, NO/*cache*/, YES/*resolver*/);
+ lookUpImpOrForward(nil, sel, cls, LOOKUP_RESOLVER);
#warning fixme build and search caches
* cls should be a metaclass.
* Does not check if the method already exists.
**********************************************************************/
-static void resolveClassMethod(Class cls, SEL sel, id inst)
+static void resolveClassMethod(id inst, SEL sel, Class cls)
{
runtimeLock.assertUnlocked();
- assert(cls->isRealized());
- assert(cls->isMetaClass());
+ ASSERT(cls->isRealized());
+ ASSERT(cls->isMetaClass());
- if (! lookUpImpOrNil(cls, SEL_resolveClassMethod, inst,
- NO/*initialize*/, YES/*cache*/, NO/*resolver*/))
- {
+ if (!lookUpImpOrNil(inst, @selector(resolveClassMethod:), cls)) {
// Resolver not implemented.
return;
}
}
}
BOOL (*msg)(Class, SEL, SEL) = (typeof(msg))objc_msgSend;
- bool resolved = msg(nonmeta, SEL_resolveClassMethod, sel);
+ bool resolved = msg(nonmeta, @selector(resolveClassMethod:), sel);
// Cache the result (good or bad) so the resolver doesn't fire next time.
// +resolveClassMethod adds to self->ISA() a.k.a. cls
- IMP imp = lookUpImpOrNil(cls, sel, inst,
- NO/*initialize*/, YES/*cache*/, NO/*resolver*/);
+ IMP imp = lookUpImpOrNil(inst, sel, cls);
if (resolved && PrintResolving) {
if (imp) {
* cls may be a metaclass or a non-meta class.
* Does not check if the method already exists.
**********************************************************************/
-static void resolveInstanceMethod(Class cls, SEL sel, id inst)
+static void resolveInstanceMethod(id inst, SEL sel, Class cls)
{
runtimeLock.assertUnlocked();
- assert(cls->isRealized());
+ ASSERT(cls->isRealized());
+ SEL resolve_sel = @selector(resolveInstanceMethod:);
- if (! lookUpImpOrNil(cls->ISA(), SEL_resolveInstanceMethod, cls,
- NO/*initialize*/, YES/*cache*/, NO/*resolver*/))
- {
+ if (!lookUpImpOrNil(cls, resolve_sel, cls->ISA())) {
// Resolver not implemented.
return;
}
BOOL (*msg)(Class, SEL, SEL) = (typeof(msg))objc_msgSend;
- bool resolved = msg(cls, SEL_resolveInstanceMethod, sel);
+ bool resolved = msg(cls, resolve_sel, sel);
// Cache the result (good or bad) so the resolver doesn't fire next time.
// +resolveInstanceMethod adds to self a.k.a. cls
- IMP imp = lookUpImpOrNil(cls, sel, inst,
- NO/*initialize*/, YES/*cache*/, NO/*resolver*/);
+ IMP imp = lookUpImpOrNil(inst, sel, cls);
if (resolved && PrintResolving) {
if (imp) {
/***********************************************************************
-* resolveMethod
+* resolveMethod_locked
* Call +resolveClassMethod or +resolveInstanceMethod.
-* Returns nothing; any result would be potentially out-of-date already.
-* Does not check if the method already exists.
+*
+* Called with the runtimeLock held to avoid pressure in the caller
+* Tail calls into lookUpImpOrForward, also to avoid pressure in the callerb
**********************************************************************/
-static void resolveMethod(Class cls, SEL sel, id inst)
+static NEVER_INLINE IMP
+resolveMethod_locked(id inst, SEL sel, Class cls, int behavior)
{
- runtimeLock.assertUnlocked();
- assert(cls->isRealized());
+ runtimeLock.assertLocked();
+ ASSERT(cls->isRealized());
+
+ runtimeLock.unlock();
if (! cls->isMetaClass()) {
// try [cls resolveInstanceMethod:sel]
- resolveInstanceMethod(cls, sel, inst);
+ resolveInstanceMethod(inst, sel, cls);
}
else {
// try [nonMetaClass resolveClassMethod:sel]
// and [cls resolveInstanceMethod:sel]
- resolveClassMethod(cls, sel, inst);
- if (!lookUpImpOrNil(cls, sel, inst,
- NO/*initialize*/, YES/*cache*/, NO/*resolver*/))
- {
- resolveInstanceMethod(cls, sel, inst);
+ resolveClassMethod(inst, sel, cls);
+ if (!lookUpImpOrNil(inst, sel, cls)) {
+ resolveInstanceMethod(inst, sel, cls);
}
}
+
+ // chances are that calling the resolver have populated the cache
+ // so attempt using it
+ return lookUpImpOrForward(inst, sel, cls, behavior | LOOKUP_CACHE);
}
log_and_fill_cache(Class cls, IMP imp, SEL sel, id receiver, Class implementer)
{
#if SUPPORT_MESSAGE_LOGGING
- if (objcMsgLogEnabled) {
+ if (slowpath(objcMsgLogEnabled && implementer)) {
bool cacheIt = logMessageSend(implementer->isMetaClass(),
cls->nameForLogging(),
implementer->nameForLogging(),
if (!cacheIt) return;
}
#endif
- cache_fill (cls, sel, imp, receiver);
-}
-
-
-/***********************************************************************
-* _class_lookupMethodAndLoadCache.
-* Method lookup for dispatchers ONLY. OTHER CODE SHOULD USE lookUpImp().
-* This lookup avoids optimistic cache scan because the dispatcher
-* already tried that.
-**********************************************************************/
-IMP _class_lookupMethodAndLoadCache3(id obj, SEL sel, Class cls)
-{
- return lookUpImpOrForward(cls, sel, obj,
- YES/*initialize*/, NO/*cache*/, YES/*resolver*/);
+ cache_fill(cls, sel, imp, receiver);
}
/***********************************************************************
* lookUpImpOrForward.
* The standard IMP lookup.
-* initialize==NO tries to avoid +initialize (but sometimes fails)
-* cache==NO skips optimistic unlocked lookup (but uses cache elsewhere)
-* Most callers should use initialize==YES and cache==YES.
+* Without LOOKUP_INITIALIZE: tries to avoid +initialize (but sometimes fails)
+* Without LOOKUP_CACHE: skips optimistic unlocked lookup (but uses cache elsewhere)
+* Most callers should use LOOKUP_INITIALIZE and LOOKUP_CACHE
* inst is an instance of cls or a subclass thereof, or nil if none is known.
* If cls is an un-initialized metaclass then a non-nil inst is faster.
* May return _objc_msgForward_impcache. IMPs destined for external use
* must be converted to _objc_msgForward or _objc_msgForward_stret.
-* If you don't want forwarding at all, use lookUpImpOrNil() instead.
+* If you don't want forwarding at all, use LOOKUP_NIL.
**********************************************************************/
-IMP lookUpImpOrForward(Class cls, SEL sel, id inst,
- bool initialize, bool cache, bool resolver)
+IMP lookUpImpOrForward(id inst, SEL sel, Class cls, int behavior)
{
+ const IMP forward_imp = (IMP)_objc_msgForward_impcache;
IMP imp = nil;
- bool triedResolver = NO;
+ Class curClass;
runtimeLock.assertUnlocked();
// Optimistic cache lookup
- if (cache) {
+ if (fastpath(behavior & LOOKUP_CACHE)) {
imp = cache_getImp(cls, sel);
- if (imp) return imp;
+ if (imp) goto done_nolock;
}
// runtimeLock is held during isRealized and isInitialized checking
// behalf of the category.
runtimeLock.lock();
+
+ // We don't want people to be able to craft a binary blob that looks like
+ // a class but really isn't one and do a CFI attack.
+ //
+ // To make these harder we want to make sure this is a class that was
+ // either built into the binary or legitimately registered through
+ // objc_duplicateClass, objc_initializeClassPair or objc_allocateClassPair.
+ //
+ // TODO: this check is quite costly during process startup.
checkIsKnownClass(cls);
- if (!cls->isRealized()) {
+ if (slowpath(!cls->isRealized())) {
cls = realizeClassMaybeSwiftAndLeaveLocked(cls, runtimeLock);
// runtimeLock may have been dropped but is now locked again
}
- if (initialize && !cls->isInitialized()) {
+ if (slowpath((behavior & LOOKUP_INITIALIZE) && !cls->isInitialized())) {
cls = initializeAndLeaveLocked(cls, inst, runtimeLock);
// runtimeLock may have been dropped but is now locked again
// from the messenger then it won't happen. 2778172
}
-
- retry:
runtimeLock.assertLocked();
-
- // Try this class's cache.
-
- imp = cache_getImp(cls, sel);
- if (imp) goto done;
-
- // Try this class's method lists.
- {
- Method meth = getMethodNoSuper_nolock(cls, sel);
+ curClass = cls;
+
+ // The code used to lookpu the class's cache again right after
+ // we take the lock but for the vast majority of the cases
+ // evidence shows this is a miss most of the time, hence a time loss.
+ //
+ // The only codepath calling into this without having performed some
+ // kind of cache lookup is class_getInstanceMethod().
+
+ for (unsigned attempts = unreasonableClassCount();;) {
+ // curClass method list.
+ Method meth = getMethodNoSuper_nolock(curClass, sel);
if (meth) {
- log_and_fill_cache(cls, meth->imp, sel, inst, cls);
imp = meth->imp;
goto done;
}
- }
- // Try superclass caches and method lists.
- {
- unsigned attempts = unreasonableClassCount();
- for (Class curClass = cls->superclass;
- curClass != nil;
- curClass = curClass->superclass)
- {
- // Halt if there is a cycle in the superclass chain.
- if (--attempts == 0) {
- _objc_fatal("Memory corruption in class list.");
- }
-
- // Superclass cache.
- imp = cache_getImp(curClass, sel);
- if (imp) {
- if (imp != (IMP)_objc_msgForward_impcache) {
- // Found the method in a superclass. Cache it in this class.
- log_and_fill_cache(cls, imp, sel, inst, curClass);
- goto done;
- }
- else {
- // Found a forward:: entry in a superclass.
- // Stop searching, but don't cache yet; call method
- // resolver for this class first.
- break;
- }
- }
-
- // Superclass method list.
- Method meth = getMethodNoSuper_nolock(curClass, sel);
- if (meth) {
- log_and_fill_cache(cls, meth->imp, sel, inst, curClass);
- imp = meth->imp;
- goto done;
- }
+ if (slowpath((curClass = curClass->superclass) == nil)) {
+ // No implementation found, and method resolver didn't help.
+ // Use forwarding.
+ imp = forward_imp;
+ break;
}
- }
- // No implementation found. Try method resolver once.
+ // Halt if there is a cycle in the superclass chain.
+ if (slowpath(--attempts == 0)) {
+ _objc_fatal("Memory corruption in class list.");
+ }
- if (resolver && !triedResolver) {
- runtimeLock.unlock();
- resolveMethod(cls, sel, inst);
- runtimeLock.lock();
- // Don't cache the result; we don't hold the lock so it may have
- // changed already. Re-do the search from scratch instead.
- triedResolver = YES;
- goto retry;
+ // Superclass cache.
+ imp = cache_getImp(curClass, sel);
+ if (slowpath(imp == forward_imp)) {
+ // Found a forward:: entry in a superclass.
+ // Stop searching, but don't cache yet; call method
+ // resolver for this class first.
+ break;
+ }
+ if (fastpath(imp)) {
+ // Found the method in a superclass. Cache it in this class.
+ goto done;
+ }
}
- // No implementation found, and method resolver didn't help.
- // Use forwarding.
+ // No implementation found. Try method resolver once.
- imp = (IMP)_objc_msgForward_impcache;
- cache_fill(cls, sel, imp, inst);
+ if (slowpath(behavior & LOOKUP_RESOLVER)) {
+ behavior ^= LOOKUP_RESOLVER;
+ return resolveMethod_locked(inst, sel, cls, behavior);
+ }
done:
+ log_and_fill_cache(cls, imp, sel, inst, curClass);
runtimeLock.unlock();
-
+ done_nolock:
+ if (slowpath((behavior & LOOKUP_NIL) && imp == forward_imp)) {
+ return nil;
+ }
return imp;
}
-
-/***********************************************************************
-* lookUpImpOrNil.
-* Like lookUpImpOrForward, but returns nil instead of _objc_msgForward_impcache
-**********************************************************************/
-IMP lookUpImpOrNil(Class cls, SEL sel, id inst,
- bool initialize, bool cache, bool resolver)
-{
- IMP imp = lookUpImpOrForward(cls, sel, inst, initialize, cache, resolver);
- if (imp == _objc_msgForward_impcache) return nil;
- else return imp;
-}
-
-
/***********************************************************************
* lookupMethodInClassAndLoadCache.
-* Like _class_lookupMethodAndLoadCache, but does not search superclasses.
+* Like lookUpImpOrForward, but does not search superclasses.
* Caches and returns objc_msgForward if the method is not found in the class.
**********************************************************************/
IMP lookupMethodInClassAndLoadCache(Class cls, SEL sel)
// fixme this is incomplete - no resolver, +initialize -
// but it's only used for .cxx_construct/destruct so we don't care
- assert(sel == SEL_cxx_construct || sel == SEL_cxx_destruct);
+ ASSERT(sel == SEL_cxx_construct || sel == SEL_cxx_destruct);
// Search cache first.
imp = cache_getImp(cls, sel);
checkIsKnownClass(cls);
- assert(cls->isRealized());
+ ASSERT(cls->isRealized());
for ( ; cls; cls = cls->superclass) {
for (auto& prop : cls->data()->properties) {
Class metacls;
Class cls;
- assert(!isMetaClass());
+ ASSERT(!isMetaClass());
cls = (Class)this;
metacls = cls->ISA();
mutex_locker_t lock(runtimeLock);
- // Scan metaclass for custom AWZ.
- // Scan metaclass for custom RR.
- // Scan class for custom RR.
- // Also print custom RR/AWZ because we probably haven't done it yet.
-
// Special cases:
- // NSObject AWZ class methods are default.
- // NSObject RR instance methods are default.
- // updateCustomRR_AWZ() also knows these special cases.
+ // - NSObject AWZ class methods are default.
+ // - NSObject RR class and instance methods are default.
+ // - NSObject Core class and instance methods are default.
+ // adjustCustomFlagsForMethodChange() also knows these special cases.
// attachMethodLists() also knows these special cases.
- bool inherited;
- bool metaCustomAWZ = NO;
- if (MetaclassNSObjectAWZSwizzled) {
- // Somebody already swizzled NSObject's methods
- metaCustomAWZ = YES;
- inherited = NO;
- }
- else if (metacls == classNSObject()->ISA()) {
- // NSObject's metaclass AWZ is default, but we still need to check cats
- auto& methods = metacls->data()->methods;
- for (auto mlists = methods.beginCategoryMethodLists(),
- end = methods.endCategoryMethodLists(metacls);
- mlists != end;
- ++mlists)
- {
- if (methodListImplementsAWZ(*mlists)) {
- metaCustomAWZ = YES;
- inherited = NO;
- break;
- }
- }
- }
- else if (metacls->superclass->hasCustomAWZ()) {
- // Superclass is custom AWZ, therefore we are too.
- metaCustomAWZ = YES;
- inherited = YES;
- }
- else {
- // Not metaclass NSObject.
- auto& methods = metacls->data()->methods;
- for (auto mlists = methods.beginLists(),
- end = methods.endLists();
- mlists != end;
- ++mlists)
- {
- if (methodListImplementsAWZ(*mlists)) {
- metaCustomAWZ = YES;
- inherited = NO;
- break;
- }
- }
- }
- if (!metaCustomAWZ) metacls->setHasDefaultAWZ();
-
- if (PrintCustomAWZ && metaCustomAWZ) metacls->printCustomAWZ(inherited);
- // metacls->printCustomRR();
-
-
- bool clsCustomRR = NO;
- if (ClassNSObjectRRSwizzled) {
- // Somebody already swizzled NSObject's methods
- clsCustomRR = YES;
- inherited = NO;
- }
- if (cls == classNSObject()) {
- // NSObject's RR is default, but we still need to check categories
- auto& methods = cls->data()->methods;
- for (auto mlists = methods.beginCategoryMethodLists(),
- end = methods.endCategoryMethodLists(cls);
- mlists != end;
- ++mlists)
- {
- if (methodListImplementsRR(*mlists)) {
- clsCustomRR = YES;
- inherited = NO;
- break;
- }
- }
- }
- else if (!cls->superclass) {
- // Custom root class
- clsCustomRR = YES;
- inherited = NO;
- }
- else if (cls->superclass->hasCustomRR()) {
- // Superclass is custom RR, therefore we are too.
- clsCustomRR = YES;
- inherited = YES;
- }
- else {
- // Not class NSObject.
- auto& methods = cls->data()->methods;
- for (auto mlists = methods.beginLists(),
- end = methods.endLists();
- mlists != end;
- ++mlists)
- {
- if (methodListImplementsRR(*mlists)) {
- clsCustomRR = YES;
- inherited = NO;
- break;
- }
- }
- }
- if (!clsCustomRR) cls->setHasDefaultRR();
-
- // cls->printCustomAWZ();
- if (PrintCustomRR && clsCustomRR) cls->printCustomRR(inherited);
+ objc::AWZScanner::scanInitializedClass(cls, metacls);
+ objc::RRScanner::scanInitializedClass(cls, metacls);
+ objc::CoreScanner::scanInitializedClass(cls, metacls);
// Update the +initialize flags.
// Do this last.
}
-/***********************************************************************
-* Return YES if sel is used by retain/release implementors
-**********************************************************************/
-static bool
-isRRSelector(SEL sel)
-{
- return (sel == SEL_retain || sel == SEL_release ||
- sel == SEL_autorelease || sel == SEL_retainCount ||
- sel == SEL_tryRetain || sel == SEL_retainWeakReference ||
- sel == SEL_isDeallocating || sel == SEL_allowsWeakReference);
-}
-
-
-/***********************************************************************
-* Return YES if mlist implements one of the isRRSelector() methods
-**********************************************************************/
-static bool
-methodListImplementsRR(const method_list_t *mlist)
-{
- return (search_method_list(mlist, SEL_retain) ||
- search_method_list(mlist, SEL_release) ||
- search_method_list(mlist, SEL_autorelease) ||
- search_method_list(mlist, SEL_retainCount) ||
- search_method_list(mlist, SEL_tryRetain) ||
- search_method_list(mlist, SEL_isDeallocating) ||
- search_method_list(mlist, SEL_retainWeakReference) ||
- search_method_list(mlist, SEL_allowsWeakReference));
-}
-
-
-/***********************************************************************
-* Return YES if sel is used by alloc or allocWithZone implementors
-**********************************************************************/
-static bool
-isAWZSelector(SEL sel)
-{
- return (sel == SEL_allocWithZone || sel == SEL_alloc);
-}
-
-
-/***********************************************************************
-* Return YES if mlist implements one of the isAWZSelector() methods
-**********************************************************************/
-static bool
-methodListImplementsAWZ(const method_list_t *mlist)
-{
- return (search_method_list(mlist, SEL_allocWithZone) ||
- search_method_list(mlist, SEL_alloc));
-}
-
-
-void
-objc_class::printCustomRR(bool inherited)
-{
- assert(PrintCustomRR);
- assert(hasCustomRR());
- _objc_inform("CUSTOM RR: %s%s%s", nameForLogging(),
- isMetaClass() ? " (meta)" : "",
- inherited ? " (inherited)" : "");
-}
-
-void
-objc_class::printCustomAWZ(bool inherited)
-{
- assert(PrintCustomAWZ);
- assert(hasCustomAWZ());
- _objc_inform("CUSTOM AWZ: %s%s%s", nameForLogging(),
- isMetaClass() ? " (meta)" : "",
- inherited ? " (inherited)" : "");
-}
-
-void
+void
objc_class::printInstancesRequireRawIsa(bool inherited)
{
- assert(PrintRawIsa);
- assert(instancesRequireRawIsa());
+ ASSERT(PrintRawIsa);
+ ASSERT(instancesRequireRawIsa());
_objc_inform("RAW ISA: %s%s%s", nameForLogging(),
isMetaClass() ? " (meta)" : "",
inherited ? " (inherited)" : "");
}
-
-/***********************************************************************
-* Mark this class and all of its subclasses as implementors or
-* inheritors of custom RR (retain/release/autorelease/retainCount)
-**********************************************************************/
-void objc_class::setHasCustomRR(bool inherited)
-{
- Class cls = (Class)this;
- runtimeLock.assertLocked();
-
- if (hasCustomRR()) return;
-
- foreach_realized_class_and_subclass(cls, ^(Class c){
- if (c != cls && !c->isInitialized()) {
- // Subclass not yet initialized. Wait for setInitialized() to do it
- // fixme short circuit recursion?
- return;
- }
- if (c->hasCustomRR()) {
- // fixme short circuit recursion?
- return;
- }
-
- c->bits.setHasCustomRR();
-
- if (PrintCustomRR) c->printCustomRR(inherited || c != cls);
- });
-}
-
-/***********************************************************************
-* Mark this class and all of its subclasses as implementors or
-* inheritors of custom alloc/allocWithZone:
-**********************************************************************/
-void objc_class::setHasCustomAWZ(bool inherited)
-{
- Class cls = (Class)this;
- runtimeLock.assertLocked();
-
- if (hasCustomAWZ()) return;
-
- foreach_realized_class_and_subclass(cls, ^(Class c){
- if (c != cls && !c->isInitialized()) {
- // Subclass not yet initialized. Wait for setInitialized() to do it
- // fixme short circuit recursion?
- return;
- }
- if (c->hasCustomAWZ()) {
- // fixme short circuit recursion?
- return;
- }
-
- c->bits.setHasCustomAWZ();
-
- if (PrintCustomAWZ) c->printCustomAWZ(inherited || c != cls);
- });
-}
-
-
/***********************************************************************
* Mark this class and all of its subclasses as requiring raw isa pointers
**********************************************************************/
-void objc_class::setInstancesRequireRawIsa(bool inherited)
+void objc_class::setInstancesRequireRawIsaRecursively(bool inherited)
{
Class cls = (Class)this;
runtimeLock.assertLocked();
if (instancesRequireRawIsa()) return;
- foreach_realized_class_and_subclass(cls, ^(Class c){
+ foreach_realized_class_and_subclass(cls, [=](Class c){
if (c->instancesRequireRawIsa()) {
- // fixme short circuit recursion?
- return;
+ return false;
}
- c->bits.setInstancesRequireRawIsa();
+ c->setInstancesRequireRawIsa();
if (PrintRawIsa) c->printInstancesRequireRawIsa(inherited || c != cls);
+ return true;
});
}
if (objc_indexed_classes_count >= ISA_INDEX_COUNT) {
// No more indexes available.
- assert(cls->classArrayIndex() == 0);
- cls->setInstancesRequireRawIsa(false/*not inherited*/);
+ ASSERT(cls->classArrayIndex() == 0);
+ cls->setInstancesRequireRawIsaRecursively(false/*not inherited*/);
return;
}
* Update custom RR and AWZ when a method changes its IMP
**********************************************************************/
static void
-updateCustomRR_AWZ(Class cls, method_t *meth)
+adjustCustomFlagsForMethodChange(Class cls, method_t *meth)
{
- // In almost all cases, IMP swizzling does not affect custom RR/AWZ bits.
- // Custom RR/AWZ search will already find the method whether or not
- // it is swizzled, so it does not transition from non-custom to custom.
- //
- // The only cases where IMP swizzling can affect the RR/AWZ bits is
- // if the swizzled method is one of the methods that is assumed to be
- // non-custom. These special cases are listed in setInitialized().
- // We look for such cases here.
-
- if (isRRSelector(meth->name)) {
-
- if ((classNSObject()->isInitialized() &&
- classNSObject()->hasCustomRR())
- ||
- ClassNSObjectRRSwizzled)
- {
- // already custom, nothing would change
- return;
- }
-
- bool swizzlingNSObject = NO;
- if (cls == classNSObject()) {
- swizzlingNSObject = YES;
- } else {
- // Don't know the class.
- // The only special case is class NSObject.
- for (const auto& meth2 : classNSObject()->data()->methods) {
- if (meth == &meth2) {
- swizzlingNSObject = YES;
- break;
- }
- }
- }
- if (swizzlingNSObject) {
- if (classNSObject()->isInitialized()) {
- classNSObject()->setHasCustomRR();
- } else {
- // NSObject not yet +initialized, so custom RR has not yet
- // been checked, and setInitialized() will not notice the
- // swizzle.
- ClassNSObjectRRSwizzled = YES;
- }
- }
- }
- else if (isAWZSelector(meth->name)) {
- Class metaclassNSObject = classNSObject()->ISA();
-
- if ((metaclassNSObject->isInitialized() &&
- metaclassNSObject->hasCustomAWZ())
- ||
- MetaclassNSObjectAWZSwizzled)
- {
- // already custom, nothing would change
- return;
- }
-
- bool swizzlingNSObject = NO;
- if (cls == metaclassNSObject) {
- swizzlingNSObject = YES;
- } else {
- // Don't know the class.
- // The only special case is metaclass NSObject.
- for (const auto& meth2 : metaclassNSObject->data()->methods) {
- if (meth == &meth2) {
- swizzlingNSObject = YES;
- break;
- }
- }
- }
- if (swizzlingNSObject) {
- if (metaclassNSObject->isInitialized()) {
- metaclassNSObject->setHasCustomAWZ();
- } else {
- // NSObject not yet +initialized, so custom RR has not yet
- // been checked, and setInitialized() will not notice the
- // swizzle.
- MetaclassNSObjectAWZSwizzled = YES;
- }
- }
- }
+ objc::AWZScanner::scanChangedMethod(cls, meth);
+ objc::RRScanner::scanChangedMethod(cls, meth);
+ objc::CoreScanner::scanChangedMethod(cls, meth);
}
runtimeLock.assertLocked();
const ivar_list_t *ivars;
- assert(cls->isRealized());
+ ASSERT(cls->isRealized());
if ((ivars = cls->data()->ro->ivars)) {
for (auto& ivar : *ivars) {
if (!ivar.offset) continue; // anonymous bitfield
checkIsKnownClass(cls);
- assert(cls->isRealized());
+ ASSERT(cls->isRealized());
for (const auto& proto_ref : cls->data()->protocols) {
protocol_t *p = remapProtocol(proto_ref);
checkIsKnownClass(cls);
- assert(types);
- assert(cls->isRealized());
+ ASSERT(types);
+ ASSERT(cls->isRealized());
method_t *m;
if ((m = getMethodNoSuper_nolock(cls, name))) {
{
runtimeLock.assertLocked();
- assert(names);
- assert(imps);
- assert(types);
- assert(cls->isRealized());
+ ASSERT(names);
+ ASSERT(imps);
+ ASSERT(types);
+ ASSERT(cls->isRealized());
method_list_t *newlist;
size_t newlistSize = method_list_t::byteSize(sizeof(method_t), count);
mutex_locker_t lock(runtimeLock);
checkIsKnownClass(cls);
- assert(cls->isRealized());
+ ASSERT(cls->isRealized());
// No class variables
if (cls->isMetaClass()) {
mutex_locker_t lock(runtimeLock);
- assert(cls->isRealized());
+ ASSERT(cls->isRealized());
// fixme optimize
protocol_list_t *protolist = (protocol_list_t *)
else {
mutex_locker_t lock(runtimeLock);
- assert(cls->isRealized());
+ ASSERT(cls->isRealized());
property_list_t *proplist = (property_list_t *)
malloc(sizeof(*proplist));
// Call the hook.
Class swiftcls = nil;
if (GetClassHook.get()(name, &swiftcls)) {
- assert(swiftcls->isRealized());
+ ASSERT(swiftcls->isRealized());
result = swiftcls;
}
// Erase the name from tls.
unsigned slot = --tls->classNameLookupsUsed;
- assert(slot >= 0 && slot < tls->classNameLookupsAllocated);
- assert(name == tls->classNameLookups[slot]);
+ ASSERT(slot >= 0 && slot < tls->classNameLookupsAllocated);
+ ASSERT(name == tls->classNameLookups[slot]);
tls->classNameLookups[slot] = nil;
}
checkIsKnownClass(original);
- assert(original->isRealized());
- assert(!original->isMetaClass());
+ ASSERT(original->isRealized());
+ ASSERT(!original->isMetaClass());
duplicate = alloc_class_for_subclass(original, extraBytes);
meta->chooseClassArrayIndex();
cls->chooseClassArrayIndex();
+ // This absolutely needs to be done before addSubclass
+ // as initializeToEmpty() clobbers the FAST_CACHE bits
+ cls->cache.initializeToEmpty();
+ meta->cache.initializeToEmpty();
+
+#if FAST_CACHE_META
+ meta->cache.setBit(FAST_CACHE_META);
+#endif
+ meta->setInstancesRequireRawIsa();
+
// Connect to superclasses and metaclasses
cls->initClassIsa(meta);
+
if (superclass) {
meta->initClassIsa(superclass->ISA()->ISA());
cls->superclass = superclass;
addSubclass(cls, meta);
}
- cls->cache.initializeToEmpty();
- meta->cache.initializeToEmpty();
-
addClassTableEntry(cls);
}
// The only client of this function is old Swift.
// Stable Swift won't use it.
// fixme once Swift in the OS settles we can assert(!cls->isSwiftStable()).
- cls = realizeClassWithoutSwift(cls);
+ cls = realizeClassWithoutSwift(cls, nil);
return cls;
}
runtimeLock.assertLocked();
// categories not yet attached to this class
- removeAllUnattachedCategoriesForClass(cls);
+ objc::unattachedCategories.eraseClass(cls);
// superclass's subclass list
if (cls->isRealized()) {
if (!isMeta) {
removeNamedClass(cls, cls->mangledName());
}
- NXHashRemove(allocatedClasses, cls);
+ objc::allocatedClasses.get().erase(cls);
}
* Note: class_createInstance() and class_createInstances() preflight this.
**********************************************************************/
id
-objc_constructInstance(Class cls, void *bytes)
+objc_constructInstance(Class cls, void *bytes)
{
if (!cls || !bytes) return nil;
}
if (hasCxxCtor) {
- return object_cxxConstructFromClass(obj, cls);
+ return object_cxxConstructFromClass(obj, cls, OBJECT_CONSTRUCT_NONE);
} else {
return obj;
}
* class_createInstance
* fixme
* Locking: none
+*
+* Note: this function has been carefully written so that the fastpath
+* takes no branch.
**********************************************************************/
-
-static __attribute__((always_inline))
-id
-_class_createInstanceFromZone(Class cls, size_t extraBytes, void *zone,
- bool cxxConstruct = true,
+static ALWAYS_INLINE id
+_class_createInstanceFromZone(Class cls, size_t extraBytes, void *zone,
+ int construct_flags = OBJECT_CONSTRUCT_NONE,
+ bool cxxConstruct = true,
size_t *outAllocatedSize = nil)
{
- if (!cls) return nil;
-
- assert(cls->isRealized());
+ ASSERT(cls->isRealized());
// Read class's info bits all at once for performance
- bool hasCxxCtor = cls->hasCxxCtor();
+ bool hasCxxCtor = cxxConstruct && cls->hasCxxCtor();
bool hasCxxDtor = cls->hasCxxDtor();
bool fast = cls->canAllocNonpointer();
+ size_t size;
- size_t size = cls->instanceSize(extraBytes);
+ size = cls->instanceSize(extraBytes);
if (outAllocatedSize) *outAllocatedSize = size;
id obj;
- if (!zone && fast) {
+ if (zone) {
+ obj = (id)malloc_zone_calloc((malloc_zone_t *)zone, 1, size);
+ } else {
obj = (id)calloc(1, size);
- if (!obj) return nil;
- obj->initInstanceIsa(cls, hasCxxDtor);
- }
- else {
- if (zone) {
- obj = (id)malloc_zone_calloc ((malloc_zone_t *)zone, 1, size);
- } else {
- obj = (id)calloc(1, size);
+ }
+ if (slowpath(!obj)) {
+ if (construct_flags & OBJECT_CONSTRUCT_CALL_BADALLOC) {
+ return _objc_callBadAllocHandler(cls);
}
- if (!obj) return nil;
+ return nil;
+ }
- // Use raw pointer isa on the assumption that they might be
+ if (!zone && fast) {
+ obj->initInstanceIsa(cls, hasCxxDtor);
+ } else {
+ // Use raw pointer isa on the assumption that they might be
// doing something weird with the zone or RR.
obj->initIsa(cls);
}
- if (cxxConstruct && hasCxxCtor) {
- obj = _objc_constructOrFree(obj, cls);
+ if (fastpath(!hasCxxCtor)) {
+ return obj;
}
- return obj;
+ construct_flags |= OBJECT_CONSTRUCT_FREE_ONFAILURE;
+ return object_cxxConstructFromClass(obj, cls, construct_flags);
}
-
-id
+id
class_createInstance(Class cls, size_t extraBytes)
{
+ if (!cls) return nil;
return _class_createInstanceFromZone(cls, extraBytes, nil);
}
+NEVER_INLINE
+id
+_objc_rootAllocWithZone(Class cls, malloc_zone_t *zone __unused)
+{
+ // allocWithZone under __OBJC2__ ignores the zone parameter
+ return _class_createInstanceFromZone(cls, 0, nil,
+ OBJECT_CONSTRUCT_CALL_BADALLOC);
+}
/***********************************************************************
* class_createInstances
Class cls = oldObj->ISA();
size_t size;
- id obj = _class_createInstanceFromZone(cls, extraBytes, zone, false, &size);
+ id obj = _class_createInstanceFromZone(cls, extraBytes, zone,
+ OBJECT_CONSTRUCT_NONE, false, &size);
if (!obj) return nil;
// Copy everything except the isa, which was already set above.
id
class_createInstanceFromZone(Class cls, size_t extraBytes, void *zone)
{
+ if (!cls) return nil;
return _class_createInstanceFromZone(cls, extraBytes, zone);
}
msg->sel = sel_registerName((const char *)msg->sel);
if (msg->imp == &objc_msgSend_fixup) {
- if (msg->sel == SEL_alloc) {
+ if (msg->sel == @selector(alloc)) {
msg->imp = (IMP)&objc_alloc;
- } else if (msg->sel == SEL_allocWithZone) {
+ } else if (msg->sel == @selector(allocWithZone:)) {
msg->imp = (IMP)&objc_allocWithZone;
- } else if (msg->sel == SEL_retain) {
+ } else if (msg->sel == @selector(retain)) {
msg->imp = (IMP)&objc_retain;
- } else if (msg->sel == SEL_release) {
+ } else if (msg->sel == @selector(release)) {
msg->imp = (IMP)&objc_release;
- } else if (msg->sel == SEL_autorelease) {
+ } else if (msg->sel == @selector(autorelease)) {
msg->imp = (IMP)&objc_autorelease;
} else {
msg->imp = &objc_msgSend_fixedup;
runtimeLock.assertLocked();
- assert(cls->isRealized());
- assert(newSuper->isRealized());
+ ASSERT(cls->isRealized());
+ ASSERT(newSuper->isRealized());
oldSuper = cls->superclass;
removeSubclass(oldSuper, cls);
return setSuperclass(cls, newSuper);
}
+void runtime_init(void)
+{
+ objc::unattachedCategories.init(32);
+ objc::allocatedClasses.init();
+}
// __OBJC2__
#endif
// set and clear must not overlap
void changeInfo(uint32_t set, uint32_t clear) {
- assert((set & clear) == 0);
+ ASSERT((set & clear) == 0);
uint32_t oldf, newf;
do {
return info & CLS_IS_ARC;
}
- bool hasCustomRR() {
+ bool hasCustomRR() {
return true;
}
- void setHasCustomRR(bool = false) { }
- void setHasDefaultRR() { }
- void printCustomRR(bool) { }
- bool hasCustomAWZ() {
+ bool hasCustomAWZ() {
return true;
}
- void setHasCustomAWZ(bool = false) { }
- void setHasDefaultAWZ() { }
- void printCustomAWZ(bool) { }
bool forbidsAssociatedObjects() {
// Old runtime doesn't support forbidding associated objects.
}
result[i++] = nil;
- assert(i == count+1);
+ ASSERT(i == count+1);
if (outCount) *outCount = count;
return result;
// No duplicate classes allowed.
// Duplicates should have been rejected by _objc_read_classes_from_image
- assert(!oldCls);
+ ASSERT(!oldCls);
}
// Fix up pended class refs to this class, if any
objc_allocateProtocol(const char *name)
{
Class cls = objc_getClass("__IncompleteProtocol");
- assert(cls);
+ ASSERT(cls);
mutex_locker_t lock(classLock);
// Parts of this order are important for correctness or performance.
+ // Fix up selector refs from all images.
+ for (i = 0; i < hCount; i++) {
+ _objc_fixup_selector_refs(hList[i]);
+ }
+
// Read classes from all images.
for (i = 0; i < hCount; i++) {
_objc_read_classes_from_image(hList[i]);
_objc_connect_classes_from_image(hList[i]);
}
- // Fix up class refs, selector refs, and protocol objects from all images.
+ // Fix up class refs, and protocol objects from all images.
for (i = 0; i < hCount; i++) {
_objc_map_class_refs_for_image(hList[i]);
- _objc_fixup_selector_refs(hList[i]);
_objc_fixup_protocol_objects_for_image(hList[i]);
}
{
header_info *hi;
int count = 0;
- int max = HeaderCount;
+ int max = 0;
+ for (hi = FirstHeader; hi != nil; hi = hi->getNext()) {
+ max++;
+ }
#if TARGET_OS_WIN32
const TCHAR **names = (const TCHAR **)calloc(max+1, sizeof(TCHAR *));
#else
mutex_t cacheUpdateLock;
recursive_mutex_t loadMethodLock;
-void lock_init(void)
+void runtime_init(void)
{
}
#include "objc-private.h"
#include "objc-loadmethod.h"
+#include "objc-file.h"
#include "message.h"
/***********************************************************************
// objc's key for pthread_getspecific
+#if SUPPORT_DIRECT_THREAD_KEYS
+#define _objc_pthread_key TLS_DIRECT_KEY
+#else
static tls_key_t _objc_pthread_key;
+#endif
// Selectors
-SEL SEL_load = NULL;
-SEL SEL_initialize = NULL;
-SEL SEL_resolveInstanceMethod = NULL;
-SEL SEL_resolveClassMethod = NULL;
SEL SEL_cxx_construct = NULL;
SEL SEL_cxx_destruct = NULL;
-SEL SEL_retain = NULL;
-SEL SEL_release = NULL;
-SEL SEL_autorelease = NULL;
-SEL SEL_retainCount = NULL;
-SEL SEL_alloc = NULL;
-SEL SEL_allocWithZone = NULL;
-SEL SEL_dealloc = NULL;
-SEL SEL_copy = NULL;
-SEL SEL_new = NULL;
-SEL SEL_forwardInvocation = NULL;
-SEL SEL_tryRetain = NULL;
-SEL SEL_isDeallocating = NULL;
-SEL SEL_retainWeakReference = NULL;
-SEL SEL_allowsWeakReference = NULL;
-
+struct objc::SafeRanges objc::dataSegmentsRanges;
header_info *FirstHeader = 0; // NULL means empty list
header_info *LastHeader = 0; // NULL means invalid; recompute it
-int HeaderCount = 0;
-
// Set to true on the child side of fork()
// if the parent process was multithreaded when fork() was called.
return cls->ISA();
}
+/***********************************************************************
+ * objc::SafeRanges::find. Find an image data segment that contains address
+ **********************************************************************/
+bool
+objc::SafeRanges::find(uintptr_t ptr, uint32_t &pos)
+{
+ if (!sorted) {
+ std::sort(ranges, ranges + count, [](const Range &s1, const Range &s2){
+ return s1.start < s2.start;
+ });
+ sorted = true;
+ }
+
+ uint32_t l = 0, r = count;
+ while (l < r) {
+ uint32_t i = (l + r) / 2;
+
+ if (ptr < ranges[i].start) {
+ r = i;
+ } else if (ptr >= ranges[i].end) {
+ l = i + 1;
+ } else {
+ pos = i;
+ return true;
+ }
+ }
+
+ pos = UINT32_MAX;
+ return false;
+}
+
+/***********************************************************************
+ * objc::SafeRanges::add. Register a new well known data segment.
+ **********************************************************************/
+void
+objc::SafeRanges::add(uintptr_t start, uintptr_t end)
+{
+ if (count == size) {
+ // Have a typical malloc growth:
+ // - size <= 32: grow by 4
+ // - size <= 64: grow by 8
+ // - size <= 128: grow by 16
+ // ... etc
+ size += size < 16 ? 4 : 1 << (fls(size) - 3);
+ ranges = (Range *)realloc(ranges, sizeof(Range) * size);
+ }
+ ranges[count++] = Range{ start, end };
+ sorted = false;
+}
+
+/***********************************************************************
+ * objc::SafeRanges::remove. Remove a previously known data segment.
+ **********************************************************************/
+void
+objc::SafeRanges::remove(uintptr_t start, uintptr_t end)
+{
+ uint32_t pos;
+
+ if (!find(start, pos) || ranges[pos].end != end) {
+ _objc_fatal("Cannot find range %#lx..%#lx", start, end);
+ }
+ if (pos < --count) {
+ ranges[pos] = ranges[count];
+ sorted = false;
+ }
+}
/***********************************************************************
* appendHeader. Add a newly-constructed header_info to the list.
{
// Add the header to the header list.
// The header is appended to the list, to preserve the bottom-up order.
- HeaderCount++;
hi->setNext(NULL);
if (!FirstHeader) {
// list is empty
LastHeader->setNext(hi);
LastHeader = hi;
}
+
+#if __OBJC2__
+ if ((hi->mhdr()->flags & MH_DYLIB_IN_CACHE) == 0) {
+ foreach_data_segment(hi->mhdr(), [](const segmentType *seg, intptr_t slide) {
+ uintptr_t start = (uintptr_t)seg->vmaddr + slide;
+ objc::dataSegmentsRanges.add(start, start + seg->vmsize);
+ });
+ }
+#endif
}
if (LastHeader == deadHead) {
LastHeader = NULL; // will be recomputed next time it's used
}
-
- HeaderCount--;
break;
}
prev = current;
}
+
+#if __OBJC2__
+ if ((hi->mhdr()->flags & MH_DYLIB_IN_CACHE) == 0) {
+ foreach_data_segment(hi->mhdr(), [](const segmentType *seg, intptr_t slide) {
+ uintptr_t start = (uintptr_t)seg->vmaddr + slide;
+ objc::dataSegmentsRanges.remove(start, start + seg->vmsize);
+ });
+ }
+#endif
}
const char *newImage = "??";
// Silently ignore +load replacement because category +load is special
- if (s == SEL_load) return;
+ if (s == @selector(load)) return;
#if TARGET_OS_WIN32
// don't know dladdr()/dli_fname equivalent
void tls_init(void)
{
#if SUPPORT_DIRECT_THREAD_KEYS
- _objc_pthread_key = TLS_DIRECT_KEY;
pthread_key_init_np(TLS_DIRECT_KEY, &_objc_pthread_destroyspecific);
#else
_objc_pthread_key = tls_create(&_objc_pthread_destroyspecific);
#else
// Default forward handler halts the process.
-__attribute__((noreturn)) void
+__attribute__((noreturn, cold)) void
objc_defaultForwardHandler(id self, SEL sel)
{
_objc_fatal("%c[%s %s]: unrecognized selector sent to instance %p "
#if SUPPORT_STRET
struct stret { int i[100]; };
-__attribute__((noreturn)) struct stret
+__attribute__((noreturn, cold)) struct stret
objc_defaultForwardStretHandler(id self, SEL sel)
{
objc_defaultForwardHandler(self, sel);
* Associative Reference Support
**********************************************************************/
-id objc_getAssociatedObject(id object, const void *key) {
- return _object_get_associative_reference(object, (void *)key);
+id
+objc_getAssociatedObject(id object, const void *key)
+{
+ return _object_get_associative_reference(object, key);
+}
+
+static void
+_base_objc_setAssociatedObject(id object, const void *key, id value, objc_AssociationPolicy policy)
+{
+ _object_set_associative_reference(object, key, value, policy);
}
+static ChainedHookFunction<objc_hook_setAssociatedObject> SetAssocHook{_base_objc_setAssociatedObject};
-void objc_setAssociatedObject(id object, const void *key, id value, objc_AssociationPolicy policy) {
- _object_set_associative_reference(object, (void *)key, value, policy);
+void
+objc_setHook_setAssociatedObject(objc_hook_setAssociatedObject _Nonnull newValue,
+ objc_hook_setAssociatedObject _Nullable * _Nonnull outOldValue) {
+ SetAssocHook.set(newValue, outOldValue);
+}
+
+void
+objc_setAssociatedObject(id object, const void *key, id value, objc_AssociationPolicy policy)
+{
+ SetAssocHook.get()(object, key, value, policy);
}
// Register selectors used by libobjc
-#define s(x) SEL_##x = sel_registerNameNoLock(#x, NO)
-#define t(x,y) SEL_##y = sel_registerNameNoLock(#x, NO)
-
mutex_locker_t lock(selLock);
- s(load);
- s(initialize);
- t(resolveInstanceMethod:, resolveInstanceMethod);
- t(resolveClassMethod:, resolveClassMethod);
- t(.cxx_construct, cxx_construct);
- t(.cxx_destruct, cxx_destruct);
- s(retain);
- s(release);
- s(autorelease);
- s(retainCount);
- s(alloc);
- t(allocWithZone:, allocWithZone);
- s(dealloc);
- s(copy);
- s(new);
- t(forwardInvocation:, forwardInvocation);
- t(_tryRetain, tryRetain);
- t(_isDeallocating, isDeallocating);
- s(retainWeakReference);
- s(allowsWeakReference);
+ SEL_cxx_construct = sel_registerNameNoLock(".cxx_construct", NO);
+ SEL_cxx_destruct = sel_registerNameNoLock(".cxx_destruct", NO);
extern SEL FwdSel;
FwdSel = sel_registerNameNoLock("forward::", NO);
-
-#undef s
-#undef t
}
__END_DECLS
# define PTR(x) .long x
#endif
+// These offsets are populated by the dyld shared cache builder.
+// They point to memory allocatd elsewhere in the shared cache.
+
.section __TEXT,__objc_opt_ro
.align 3
.private_extern __objc_opt_data
.long 0 /* table.headeropt_rw_offset */
.space PAGE_MAX_SIZE-28
-/* space for selopt, smax/capacity=1048576, blen/mask=524287+1 */
-.space 4*(8+256) /* header and scramble */
-.space 524288 /* mask tab */
-.space 1048576 /* checkbytes */
-.space 1048576*4 /* offsets */
-
-/* space for clsopt, smax/capacity=131072, blen/mask=32767+1 */
-.space 4*(8+256) /* header and scramble */
-.space 32768 /* mask tab */
-.space 131072 /* checkbytes */
-.space 131072*12 /* offsets to name and class and header_info */
-.space 512*8 /* some duplicate classes */
-
-/* space for some demangled protocol names */
-.space 1024
-
-/* space for protocolopt, smax/capacity=16384, blen/mask=8191+1 */
-.space 4*(8+256) /* header and scramble */
-.space 8192 /* mask tab */
-.space 16384 /* checkbytes */
-.space 16384*8 /* offsets */
-
-/* space for 2048 header_info (RO) structures */
-.space 8 + (2048*16)
-
-
-.section __DATA,__objc_opt_rw
-.align 3
-.private_extern __objc_opt_rw_data
-__objc_opt_rw_data:
-
-/* space for 2048 header_info (RW) structures */
-.space 8 + (2048*8)
-
-/* space for 16384 protocols */
-#if __LP64__
-.space 16384 * 12 * 8
-#else
-.space 16384 * 12 * 4
-#endif
-
/* section of pointers that the shared cache optimizer wants to know about */
.section __DATA,__objc_opt_ptrs
#include "objc-private.h"
#include "objc-cache.h"
+#include "DenseMapExtras.h"
#if SUPPORT_PREOPT
static const objc_selopt_t *builtins = NULL;
+static bool useDyldSelectorLookup = false;
#endif
-static size_t SelrefCount = 0;
-
-static NXMapTable *namedSelectors;
-
+static objc::ExplicitInitDenseSet<const char *> namedSelectors;
static SEL search_builtins(const char *key);
**********************************************************************/
void sel_init(size_t selrefCount)
{
- // save this value for later
- SelrefCount = selrefCount;
-
#if SUPPORT_PREOPT
- builtins = preoptimizedSelectors();
+ // If dyld finds a known shared cache selector, then it must be also looking
+ // in the shared cache table.
+ if (_dyld_get_objc_selector("retain") != nil)
+ useDyldSelectorLookup = true;
+ else
+ builtins = preoptimizedSelectors();
+
+ if (PrintPreopt && useDyldSelectorLookup) {
+ _objc_inform("PREOPTIMIZATION: using dyld selector opt");
+ }
if (PrintPreopt && builtins) {
uint32_t occupied = builtins->occupied;
_objc_inform("PREOPTIMIZATION: %u/%u (%u%%) hash table occupancy",
occupied, capacity,
(unsigned)(occupied/(double)capacity*100));
- }
+ }
+ namedSelectors.init(useDyldSelectorLookup ? 0 : (unsigned)selrefCount);
+#else
+ namedSelectors.init((unsigned)selrefCount);
#endif
// Register selectors used by libobjc
-#define s(x) SEL_##x = sel_registerNameNoLock(#x, NO)
-#define t(x,y) SEL_##y = sel_registerNameNoLock(#x, NO)
-
mutex_locker_t lock(selLock);
- s(load);
- s(initialize);
- t(resolveInstanceMethod:, resolveInstanceMethod);
- t(resolveClassMethod:, resolveClassMethod);
- t(.cxx_construct, cxx_construct);
- t(.cxx_destruct, cxx_destruct);
- s(retain);
- s(release);
- s(autorelease);
- s(retainCount);
- s(alloc);
- t(allocWithZone:, allocWithZone);
- s(dealloc);
- s(copy);
- s(new);
- t(forwardInvocation:, forwardInvocation);
- t(_tryRetain, tryRetain);
- t(_isDeallocating, isDeallocating);
- s(retainWeakReference);
- s(allowsWeakReference);
-
-#undef s
-#undef t
+ SEL_cxx_construct = sel_registerNameNoLock(".cxx_construct", NO);
+ SEL_cxx_destruct = sel_registerNameNoLock(".cxx_destruct", NO);
}
if (sel == search_builtins(name)) return YES;
mutex_locker_t lock(selLock);
- if (namedSelectors) {
- return (sel == (SEL)NXMapGet(namedSelectors, name));
- }
- return false;
+ auto it = namedSelectors.get().find(name);
+ return it != namedSelectors.get().end() && (SEL)*it == sel;
}
static SEL search_builtins(const char *name)
{
#if SUPPORT_PREOPT
- if (builtins) return (SEL)builtins->get(name);
+ if (builtins) {
+ SEL result = 0;
+ if ((result = (SEL)builtins->get(name)))
+ return result;
+
+ if ((result = (SEL)_dyld_get_objc_selector(name)))
+ return result;
+ } else if (useDyldSelectorLookup) {
+ if (SEL result = (SEL)_dyld_get_objc_selector(name))
+ return result;
+ }
#endif
return nil;
}
if (result) return result;
conditional_mutex_locker_t lock(selLock, shouldLock);
- if (namedSelectors) {
- result = (SEL)NXMapGet(namedSelectors, name);
- }
- if (result) return result;
-
- // No match. Insert.
-
- if (!namedSelectors) {
- namedSelectors = NXCreateMapTable(NXStrValueMapPrototype,
- (unsigned)SelrefCount);
- }
- if (!result) {
- result = sel_alloc(name, copy);
- // fixme choose a better container (hash not map for starters)
- NXMapInsert(namedSelectors, sel_getName(result), result);
- }
-
- return result;
+ auto it = namedSelectors.get().insert(name);
+ if (it.second) {
+ // No match. Insert.
+ *it.first = (const char *)sel_alloc(name, copy);
+ }
+ return (SEL)*it.first;
}
if (obj) {
SyncData* data = id2data(obj, ACQUIRE);
- assert(data);
+ ASSERT(data);
data->mutex.lock();
} else {
// @synchronized(nil) does nothing
return result;
}
+BOOL objc_sync_try_enter(id obj)
+{
+ BOOL result = YES;
+
+ if (obj) {
+ SyncData* data = id2data(obj, ACQUIRE);
+ ASSERT(data);
+ result = data->mutex.tryLock();
+ } else {
+ // @synchronized(nil) does nothing
+ if (DebugNilSync) {
+ _objc_inform("NIL SYNC DEBUG: @synchronized(nil); set a breakpoint on objc_sync_nil to debug");
+ }
+ objc_sync_nil();
+ }
+
+ return result;
+}
+
// End synchronizing on 'obj'.
// Returns OBJC_SYNC_SUCCESS or OBJC_SYNC_NOT_OWNING_THREAD_ERROR
static void grow_refs_and_insert(weak_entry_t *entry,
objc_object **new_referrer)
{
- assert(entry->out_of_line());
+ ASSERT(entry->out_of_line());
size_t old_size = TABLE_SIZE(entry);
size_t new_size = old_size ? old_size * 2 : 8;
entry->max_hash_displacement = 0;
}
- assert(entry->out_of_line());
+ ASSERT(entry->out_of_line());
if (entry->num_refs >= TABLE_SIZE(entry) * 3/4) {
return grow_refs_and_insert(entry, new_referrer);
static void weak_entry_insert(weak_table_t *weak_table, weak_entry_t *new_entry)
{
weak_entry_t *weak_entries = weak_table->weak_entries;
- assert(weak_entries != nil);
+ ASSERT(weak_entries != nil);
size_t begin = hash_pointer(new_entry->referent) & (weak_table->mask);
size_t index = begin;
static weak_entry_t *
weak_entry_for_referent(weak_table_t *weak_table, objc_object *referent)
{
- assert(referent);
+ ASSERT(referent);
weak_entry_t *weak_entries = weak_table->weak_entries;
BOOL (*allowsWeakReference)(objc_object *, SEL) =
(BOOL(*)(objc_object *, SEL))
object_getMethodImplementation((id)referent,
- SEL_allowsWeakReference);
+ @selector(allowsWeakReference));
if ((IMP)allowsWeakReference == _objc_msgForward) {
return nil;
}
deallocating =
- ! (*allowsWeakReference)(referent, SEL_allowsWeakReference);
+ ! (*allowsWeakReference)(referent, @selector(allowsWeakReference));
}
if (deallocating) {
# endif
#else
// __OBJC_BOOL_IS_BOOL not set.
-# if TARGET_OS_OSX || TARGET_OS_IOSMAC || (TARGET_OS_IOS && !__LP64__ && !__ARM_ARCH_7K)
+# if TARGET_OS_OSX || TARGET_OS_IOSMAC || ((TARGET_OS_IOS || TARGET_OS_BRIDGE) && !__LP64__ && !__ARM_ARCH_7K)
# define OBJC_BOOL_IS_BOOL 0
# else
# define OBJC_BOOL_IS_BOOL 1
*/
OBJC_EXPORT Class _Nonnull
class_setSuperclass(Class _Nonnull cls, Class _Nonnull newSuper)
- __OSX_DEPRECATED(10.5, 10.5, "not recommended")
- __IOS_DEPRECATED(2.0, 2.0, "not recommended")
- __TVOS_DEPRECATED(9.0, 9.0, "not recommended")
+ __OSX_DEPRECATED(10.5, 10.5, "not recommended")
+ __IOS_DEPRECATED(2.0, 2.0, "not recommended")
+ __TVOS_DEPRECATED(9.0, 9.0, "not recommended")
__WATCHOS_DEPRECATED(1.0, 1.0, "not recommended")
- __BRIDGEOS_DEPRECATED(2.0, 2.0, "not recommended");
+#ifndef __APPLE_BLEACH_SDK__
+ __BRIDGEOS_DEPRECATED(2.0, 2.0, "not recommended")
+#endif
+;
/**
* Returns the version number of a class definition.
*
* @return A C array of protocols adopted by \e proto. The array contains \e *outCount pointers
* followed by a \c NULL terminator. You must free the array with \c free().
- * If the protocol declares no properties, \c NULL is returned and \c *outCount is \c 0.
+ * If the protocol adopts no other protocols, \c NULL is returned and \c *outCount is \c 0.
*/
OBJC_EXPORT Protocol * __unsafe_unretained _Nonnull * _Nullable
protocol_copyProtocolList(Protocol * _Nonnull proto,
#define OBJC_GETCLASSHOOK_DEFINED 1
OBJC_EXPORT void objc_setHook_getClass(objc_hook_getClass _Nonnull newValue,
objc_hook_getClass _Nullable * _Nonnull outOldValue)
- OBJC_AVAILABLE(10.14, 12.0, 12.0, 5.0, 3.0);
-// rdar://44986431 fixme correct availability for _objc_realizeClassFromSwift
+ OBJC_AVAILABLE(10.14.4, 12.2, 12.2, 5.2, 3.2);
#endif
+/**
+ * Function type for a hook that assists objc_setAssociatedObject().
+ *
+ * @param object The source object for the association.
+ * @param key The key for the association.
+ * @param value The value to associate with the key key for object. Pass nil to clear an existing association.
+ * @param policy The policy for the association. For possible values, see “Associative Object Behaviors.”
+ *
+ * @see objc_setAssociatedObject
+ * @see objc_setHook_setAssociatedObject
+ */
+typedef void (*objc_hook_setAssociatedObject)(id _Nonnull object, const void * _Nonnull key,
+ id _Nullable value, objc_AssociationPolicy policy);
+
+/**
+ * Install a hook for objc_setAssociatedObject().
+ *
+ * @param newValue The hook function to install.
+ * @param outOldValue The address of a function pointer variable. On return,
+ * the old hook function is stored in the variable.
+ *
+ * @note The store to *outOldValue is thread-safe: the variable will be
+ * updated before objc_setAssociatedObject() calls your new hook to read it,
+ * even if your new hook is called from another thread before this
+ * setter completes.
+ * @note Your hook should always call the previous hook.
+ *
+ * @see objc_setAssociatedObject
+ * @see objc_hook_setAssociatedObject
+ */
+#if !(TARGET_OS_OSX && __i386__)
+#define OBJC_SETASSOCIATEDOBJECTHOOK_DEFINED 1
+OBJC_EXPORT void objc_setHook_setAssociatedObject(objc_hook_setAssociatedObject _Nonnull newValue,
+ objc_hook_setAssociatedObject _Nullable * _Nonnull outOldValue)
+ OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 4.0);
+#endif
+
+/**
+ * Function type for a function that is called when an image is loaded.
+ *
+ * @param header The newly loaded header.
+ */
+struct mach_header;
+typedef void (*objc_func_loadImage)(const struct mach_header * _Nonnull header);
+
+/**
+ * Add a function to be called when a new image is loaded. The function is
+ * called after ObjC has scanned and fixed up the image. It is called
+ * BEFORE +load methods are invoked.
+ *
+ * When adding a new function, that function is immediately called with all
+ * images that are currently loaded. It is then called as needed for images
+ * that are loaded afterwards.
+ *
+ * Note: the function is called with ObjC's internal runtime lock held.
+ * Be VERY careful with what the function does to avoid deadlocks or
+ * poor performance.
+ *
+ * @param func The function to add.
+ */
+#define OBJC_ADDLOADIMAGEFUNC_DEFINED 1
+OBJC_EXPORT void objc_addLoadImageFunc(objc_func_loadImage _Nonnull func)
+ OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 4.0);
+
/**
* Callback from Objective-C to Swift to perform Swift class initialization.
*/
#define OBJC_REALIZECLASSFROMSWIFT_DEFINED 1
OBJC_EXPORT Class _Nullable
_objc_realizeClassFromSwift(Class _Nullable cls, void * _Nullable previously)
- OBJC_AVAILABLE(10.14, 12.0, 12.0, 5.0, 3.0);
-// rdar://44986431 fixme correct availability for _objc_realizeClassFromSwift
+ OBJC_AVAILABLE(10.14.4, 12.2, 12.2, 5.2, 3.2);
#endif
OBJC_EXPORT IMP _Nullable
class_lookupMethod(Class _Nullable cls, SEL _Nonnull sel)
- __OSX_DEPRECATED(10.0, 10.5, "use class_getMethodImplementation instead")
- __IOS_DEPRECATED(2.0, 2.0, "use class_getMethodImplementation instead")
- __TVOS_DEPRECATED(9.0, 9.0, "use class_getMethodImplementation instead")
+ __OSX_DEPRECATED(10.0, 10.5, "use class_getMethodImplementation instead")
+ __IOS_DEPRECATED(2.0, 2.0, "use class_getMethodImplementation instead")
+ __TVOS_DEPRECATED(9.0, 9.0, "use class_getMethodImplementation instead")
__WATCHOS_DEPRECATED(1.0, 1.0, "use class_getMethodImplementation instead")
- __BRIDGEOS_DEPRECATED(2.0, 2.0, "use class_getMethodImplementation instead");
+#ifndef __APPLE_BLEACH_SDK__
+ __BRIDGEOS_DEPRECATED(2.0, 2.0, "use class_getMethodImplementation instead")
+#endif
+;
OBJC_EXPORT BOOL
class_respondsToMethod(Class _Nullable cls, SEL _Nonnull sel)
- __OSX_DEPRECATED(10.0, 10.5, "use class_respondsToSelector instead")
- __IOS_DEPRECATED(2.0, 2.0, "use class_respondsToSelector instead")
- __TVOS_DEPRECATED(9.0, 9.0, "use class_respondsToSelector instead")
+ __OSX_DEPRECATED(10.0, 10.5, "use class_respondsToSelector instead")
+ __IOS_DEPRECATED(2.0, 2.0, "use class_respondsToSelector instead")
+ __TVOS_DEPRECATED(9.0, 9.0, "use class_respondsToSelector instead")
__WATCHOS_DEPRECATED(1.0, 1.0, "use class_respondsToSelector instead")
- __BRIDGEOS_DEPRECATED(2.0, 2.0, "use class_respondsToSelector instead");
+#ifndef __APPLE_BLEACH_SDK__
+ __BRIDGEOS_DEPRECATED(2.0, 2.0, "use class_respondsToSelector instead")
+#endif
+;
OBJC_EXPORT void
_objc_flush_caches(Class _Nullable cls)
- __OSX_DEPRECATED(10.0, 10.5, "not recommended")
- __IOS_DEPRECATED(2.0, 2.0, "not recommended")
- __TVOS_DEPRECATED(9.0, 9.0, "not recommended")
+ __OSX_DEPRECATED(10.0, 10.5, "not recommended")
+ __IOS_DEPRECATED(2.0, 2.0, "not recommended")
+ __TVOS_DEPRECATED(9.0, 9.0, "not recommended")
__WATCHOS_DEPRECATED(1.0, 1.0, "not recommended")
- __BRIDGEOS_DEPRECATED(2.0, 2.0, "not recommended");
+#ifndef __APPLE_BLEACH_SDK__
+ __BRIDGEOS_DEPRECATED(2.0, 2.0, "not recommended")
+#endif
+;
OBJC_EXPORT id _Nullable
object_copyFromZone(id _Nullable anObject, size_t nBytes, void * _Nullable z)
- __OSX_DEPRECATED(10.0, 10.5, "use object_copy instead")
- __IOS_UNAVAILABLE __TVOS_UNAVAILABLE
- __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE
- OBJC_ARC_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.0, 10.5, "use object_copy instead");
OBJC_EXPORT id _Nullable
object_realloc(id _Nullable anObject, size_t nBytes)
OBJC_EXPORT id _Nullable
class_createInstanceFromZone(Class _Nullable, size_t idxIvars,
void * _Nullable z)
- __OSX_DEPRECATED(10.0, 10.5, "use class_createInstance instead")
- __IOS_UNAVAILABLE __TVOS_UNAVAILABLE
- __WATCHOS_UNAVAILABLE __BRIDGEOS_UNAVAILABLE
- OBJC_ARC_UNAVAILABLE;
+ OBJC_OSX_DEPRECATED_OTHERS_UNAVAILABLE(10.0, 10.5, "use class_createInstance instead");
OBJC_EXPORT void
class_addMethods(Class _Nullable, struct objc_method_list * _Nonnull)
// Macros for array construction.
// ten IMPs
-#define IMPS10 fn0, fn1, fn2, fn3, fn4, fn5, fn6, fn7, fn8, fn9
+#define IMPS10 (IMP)fn0, (IMP)fn1, (IMP)fn2, (IMP)fn3, (IMP)fn4, \
+ (IMP)fn5, (IMP)fn6, (IMP)fn7, (IMP)fn8, (IMP)fn9
// ten method types
#define TYPES10 "", "", "", "", "", "", "", "", "", ""
// ten selectors of the form name0..name9
// similar to dummyIMPs but with different values in each slot
IMP dummyIMPs2[130] = {
- fn5, fn6, fn7, fn8, fn9,
+ (IMP)fn5, (IMP)fn6, (IMP)fn7, (IMP)fn8, (IMP)fn9,
IMPS10, IMPS10, IMPS10, IMPS10, IMPS10,
IMPS10, IMPS10, IMPS10, IMPS10, IMPS10,
IMPS10, IMPS10,
- fn0, fn1, fn2, fn3, fn4,
+ (IMP)fn0, (IMP)fn1, (IMP)fn2, (IMP)fn3, (IMP)fn4,
};
const char *dummyTypes[130] = {
// rdar://44094390 tolerate nil object and nil value
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wnonnull"
- objc_setAssociatedObject(nil, &key, nil, 0);
+ objc_setAssociatedObject(nil, &key, nil, OBJC_ASSOCIATION_ASSIGN);
#pragma clang diagnostic pop
succeed(__FILE__);
};
struct cache_t {
- struct bucket_t *buckets;
+ uintptr_t buckets;
mask_t mask;
mask_t occupied;
};
[obj self];
struct cache_t *cache = &((__bridge struct class_t *)cls)->cache;
-
+
+ // Figure out which cache mask scheme is in use by examining the existing bits.
+ int low4 = 0;
+#if __LP64__
+ int top16 = 0;
+#endif
+ int outlined = 0;
+
+ if (cache->buckets & 0xf) {
+ low4 = 1;
+#if __LP64__
+ } else if ((cache->buckets & (0xffffULL << 48))) {
+ top16 = 1;
+#endif
+ } else {
+ outlined = 1;
+ }
+
# define COUNT 4
+# define COUNTSHIFT 14
struct bucket_t *buckets = (struct bucket_t *)calloc(sizeof(struct bucket_t), COUNT+1);
for (int i = 0; i < COUNT; i++) {
buckets[i].sel = ~0;
buckets[COUNT].sel = 1;
buckets[COUNT].imp = (uintptr_t)buckets;
- cache->mask = COUNT-1;
- cache->occupied = 0;
- cache->buckets = buckets;
+ if (low4) {
+ cache->buckets = (uintptr_t)buckets | COUNTSHIFT;
+#if __LP64__
+ } else if (top16) {
+ cache->buckets = ((uintptr_t)(COUNT - 1) << 48) | (uintptr_t)buckets;
+#endif
+ } else if (outlined) {
+ cache->mask = COUNT-1;
+ cache->buckets = (uintptr_t)buckets;
+ }
+ cache->occupied = 0;
+
fprintf(stderr, "crash now\n");
[obj self];
#if TARGET_OS_OSX
# define RealBool 0
-#elif TARGET_OS_IOS
+#elif TARGET_OS_IOS || TARGET_OS_BRIDGE
# if (__arm__ && !__armv7k__) || __i386__
# define RealBool 0
# else
#include <string.h>
#include <objc/runtime.h>
+#if __LP64__
+# define PTR " .quad "
+#else
+# define PTR " .long "
+#endif
+
static int state = 0;
@interface Super : TestRoot @end
@end
+// Manually build a category that goes in __objc_catlist2.
+#if __has_feature(ptrauth_calls)
+#define SIGNED_CATEGORY_IMP "@AUTH(ia,0,addr)"
+#else
+#define SIGNED_CATEGORY_IMP
+#endif
+asm(
+" .section __DATA,__objc_const \n"
+"L_catlist2CategoryName: \n"
+" .asciz \"Category_catlist2\" \n"
+"L_catlist2MethodString: \n"
+" .asciz \"catlist2Method\" \n"
+"L_catlist2MethodTypes: \n"
+" .asciz \"i16@0:8\" \n"
+
+" .p2align 3 \n"
+"l_OBJC_$_CATEGORY_INSTANCE_METHODS_Super_$_Category_catlist2: \n"
+" .long 24 \n"
+" .long 1 \n"
+" "PTR" L_catlist2MethodString \n"
+" "PTR" L_catlist2MethodTypes \n"
+" "PTR" _catlist2MethodImplementation"SIGNED_CATEGORY_IMP" \n"
+
+" .p2align 3 \n"
+"l_OBJC_$_CATEGORY_Super_$_Category_catlist2: \n"
+" "PTR" L_catlist2CategoryName \n"
+" "PTR" _OBJC_CLASS_$_Super \n"
+" "PTR" l_OBJC_$_CATEGORY_INSTANCE_METHODS_Super_$_Category_catlist2 \n"
+" "PTR" 0 \n"
+" "PTR" 0 \n"
+" "PTR" 0 \n"
+" "PTR" 0 \n"
+" .long 64 \n"
+" .space 4 \n"
+
+" .section __DATA,__objc_catlist2 \n"
+" .p2align 3 \n"
+" "PTR" l_OBJC_$_CATEGORY_Super_$_Category_catlist2 \n"
+
+" .text \n"
+);
+
+@interface Super (Category_catlist2)
+- (int)catlist2Method;
+@end
+
+EXTERN_C int catlist2MethodImplementation(id self __unused, SEL _cmd __unused) {
+ return 0;
+}
+
+
int main()
{
{
testassert(!plist[2]);
free(plist);
+ // method introduced by category in catlist2
+ testassert([[Super new] catlist2Method] == 0);
+
succeed(__FILE__);
}
+++ /dev/null
-// TEST_CONFIG
-
-#include "test.h"
-
-#include "testroot.i"
-#include <objc/runtime.h>
-#include <string.h>
-
-@protocol Proto
--(void) instanceMethod;
-+(void) classMethod;
-@optional
--(void) instanceMethod2;
-+(void) classMethod2;
-@end
-
-@protocol Proto2
--(void) instanceMethod;
-+(void) classMethod;
-@optional
--(void) instanceMethod2;
-+(void) classMethod_that_does_not_exist;
-@end
-
-@protocol Proto3
--(void) instanceMethod;
-+(void) classMethod_that_does_not_exist;
-@optional
--(void) instanceMethod2;
-+(void) classMethod2;
-@end
-
-static int super_initialize;
-
-@interface Super : TestRoot
-@property int superProp;
-@end
-@implementation Super
-@dynamic superProp;
-+(void)initialize { super_initialize++; }
-
-+(void) classMethod { fail("+[Super classMethod] called"); }
-+(void) classMethod2 { fail("+[Super classMethod2] called"); }
--(void) instanceMethod { fail("-[Super instanceMethod] called"); }
--(void) instanceMethod2 { fail("-[Super instanceMethod2] called"); }
-@end
-
-static int state;
-
-static void instance_fn(id self, SEL _cmd __attribute__((unused)))
-{
- testassert(!class_isMetaClass(object_getClass(self)));
- state++;
-}
-
-static void class_fn(id self, SEL _cmd __attribute__((unused)))
-{
- testassert(class_isMetaClass(object_getClass(self)));
- state++;
-}
-
-static void fail_fn(id self __attribute__((unused)), SEL _cmd)
-{
- fail("fail_fn '%s' called", sel_getName(_cmd));
-}
-
-
-static void cycle(void)
-{
- Class cls;
- BOOL ok;
- objc_property_t prop;
- char namebuf[256];
-
- testassert(!objc_getClass("Sub"));
- testassert([Super class]);
-
- // Test subclass with bells and whistles
-
- cls = objc_allocateClassPair([Super class], "Sub", 0);
- testassert(cls);
-
- class_addMethod(cls, @selector(instanceMethod),
- (IMP)&instance_fn, "v@:");
- class_addMethod(object_getClass(cls), @selector(classMethod),
- (IMP)&class_fn, "v@:");
- class_addMethod(object_getClass(cls), @selector(initialize),
- (IMP)&class_fn, "v@:");
- class_addMethod(object_getClass(cls), @selector(load),
- (IMP)&fail_fn, "v@:");
-
- ok = class_addProtocol(cls, @protocol(Proto));
- testassert(ok);
- ok = class_addProtocol(cls, @protocol(Proto));
- testassert(!ok);
-
- char attrname[2];
- char attrvalue[2];
- objc_property_attribute_t attrs[1];
- unsigned int attrcount = sizeof(attrs) / sizeof(attrs[0]);
-
- attrs[0].name = attrname;
- attrs[0].value = attrvalue;
- strcpy(attrname, "T");
- strcpy(attrvalue, "x");
-
- strcpy(namebuf, "subProp");
- ok = class_addProperty(cls, namebuf, attrs, attrcount);
- testassert(ok);
- strcpy(namebuf, "subProp");
- ok = class_addProperty(cls, namebuf, attrs, attrcount);
- testassert(!ok);
- strcpy(attrvalue, "i");
- class_replaceProperty(cls, namebuf, attrs, attrcount);
- strcpy(namebuf, "superProp");
- ok = class_addProperty(cls, namebuf, attrs, attrcount);
- testassert(!ok);
- bzero(namebuf, sizeof(namebuf));
- bzero(attrs, sizeof(attrs));
- bzero(attrname, sizeof(attrname));
- bzero(attrvalue, sizeof(attrvalue));
-
-#ifndef __LP64__
-# define size 4
-# define align 2
-#else
-#define size 8
-# define align 3
-#endif
-
- /*
- {
- int ivar;
- id ivarid;
- id* ivaridstar;
- Block_t ivarblock;
- }
- */
- ok = class_addIvar(cls, "ivar", 4, 2, "i");
- testassert(ok);
- ok = class_addIvar(cls, "ivarid", size, align, "@");
- testassert(ok);
- ok = class_addIvar(cls, "ivaridstar", size, align, "^@");
- testassert(ok);
- ok = class_addIvar(cls, "ivarblock", size, align, "@?");
- testassert(ok);
-
- ok = class_addIvar(cls, "ivar", 4, 2, "i");
- testassert(!ok);
- ok = class_addIvar(object_getClass(cls), "classvar", 4, 2, "i");
- testassert(!ok);
-
- objc_registerClassPair(cls);
-
- // should call cls's +initialize, not super's
- // Provoke +initialize using class_getMethodImplementation(class method)
- // in order to test getNonMetaClass's slow case
- super_initialize = 0;
- state = 0;
- class_getMethodImplementation(object_getClass(cls), @selector(class));
- testassert(super_initialize == 0);
- testassert(state == 1);
-
- testassert(cls == [cls class]);
- testassert(cls == objc_getClass("Sub"));
-
- testassert(!class_isMetaClass(cls));
- testassert(class_isMetaClass(object_getClass(cls)));
-
- testassert(class_getSuperclass(cls) == [Super class]);
- testassert(class_getSuperclass(object_getClass(cls)) == object_getClass([Super class]));
-
- testassert(class_getInstanceSize(cls) >= sizeof(Class) + 4 + 3*size);
- testassert(class_conformsToProtocol(cls, @protocol(Proto)));
-
- class_addMethod(cls, @selector(instanceMethod2),
- (IMP)&instance_fn, "v@:");
- class_addMethod(object_getClass(cls), @selector(classMethod2),
- (IMP)&class_fn, "v@:");
-
- ok = class_addIvar(cls, "ivar2", 4, 4, "i");
- testassert(!ok);
- ok = class_addIvar(object_getClass(cls), "classvar2", 4, 4, "i");
- testassert(!ok);
-
- ok = class_addProtocol(cls, @protocol(Proto2));
- testassert(ok);
- ok = class_addProtocol(cls, @protocol(Proto2));
- testassert(!ok);
- ok = class_addProtocol(cls, @protocol(Proto));
- testassert(!ok);
-
- attrs[0].name = attrname;
- attrs[0].value = attrvalue;
- strcpy(attrname, "T");
- strcpy(attrvalue, "i");
-
- strcpy(namebuf, "subProp2");
- ok = class_addProperty(cls, namebuf, attrs, attrcount);
- testassert(ok);
- strcpy(namebuf, "subProp");
- ok = class_addProperty(cls, namebuf, attrs, attrcount);
- testassert(!ok);
- strcpy(namebuf, "superProp");
- ok = class_addProperty(cls, namebuf, attrs, attrcount);
- testassert(!ok);
- bzero(namebuf, sizeof(namebuf));
- bzero(attrs, sizeof(attrs));
- bzero(attrname, sizeof(attrname));
- bzero(attrvalue, sizeof(attrvalue));
-
- prop = class_getProperty(cls, "subProp");
- testassert(prop);
- testassert(0 == strcmp(property_getName(prop), "subProp"));
- testassert(0 == strcmp(property_getAttributes(prop), "Ti"));
- prop = class_getProperty(cls, "subProp2");
- testassert(prop);
- testassert(0 == strcmp(property_getName(prop), "subProp2"));
- testassert(0 == strcmp(property_getAttributes(prop), "Ti"));
-
- // note: adding more methods here causes a false leak check failure
- state = 0;
- [cls classMethod];
- [cls classMethod2];
- testassert(state == 2);
-
- // put instance tests on a separate thread so they
- // are reliably deallocated before class destruction
- testonthread(^{
- id obj = [cls new];
- state = 0;
- [obj instanceMethod];
- [obj instanceMethod2];
- testassert(state == 2);
- RELEASE_VAR(obj);
- });
-
- // Test ivar layouts of sub-subclass
- Class cls2 = objc_allocateClassPair(cls, "SubSub", 0);
- testassert(cls2);
-
- /*
- {
- id ivarid2;
- id idarray[16];
- void* ptrarray[16];
- char a;
- char b;
- char c;
- }
- */
- ok = class_addIvar(cls2, "ivarid2", size, align, "@");
- testassert(ok);
- ok = class_addIvar(cls2, "idarray", 16*sizeof(id), align, "[16@]");
- testassert(ok);
- ok = class_addIvar(cls2, "ptrarray", 16*sizeof(void*), align, "[16^]");
- testassert(ok);
- ok = class_addIvar(cls2, "a", 1, 0, "c");
- testassert(ok);
- ok = class_addIvar(cls2, "b", 1, 0, "c");
- testassert(ok);
- ok = class_addIvar(cls2, "c", 1, 0, "c");
- testassert(ok);
-
- objc_registerClassPair(cls2);
-
- // 1-byte ivars should be well packed
- testassert(ivar_getOffset(class_getInstanceVariable(cls2, "b")) ==
- ivar_getOffset(class_getInstanceVariable(cls2, "a")) + 1);
- testassert(ivar_getOffset(class_getInstanceVariable(cls2, "c")) ==
- ivar_getOffset(class_getInstanceVariable(cls2, "b")) + 1);
-
- objc_disposeClassPair(cls2);
- objc_disposeClassPair(cls);
-
- testassert(!objc_getClass("Sub"));
-
- // fixme test layout setters
-}
-
-int main()
-{
- int count = 5000;
-
- // fixme even with this long warmup we still
- // suffer false 4096-byte leaks occasionally.
- for (int i = 0; i < 500; i++) {
- testonthread(^{ cycle(); });
- }
-
- leak_mark();
- while (count--) {
- testonthread(^{ cycle(); });
- }
- leak_check(4096);
-
- succeed(__FILE__);
-}
-
--- /dev/null
+// TEST_CONFIG
+
+#include "test.h"
+
+#include "testroot.i"
+#include <objc/runtime.h>
+#include <string.h>
+
+@protocol Proto
+-(void) instanceMethod;
++(void) classMethod;
+@optional
+-(void) instanceMethod2;
++(void) classMethod2;
+@end
+
+@protocol Proto2
+-(void) instanceMethod;
++(void) classMethod;
+@optional
+-(void) instanceMethod2;
++(void) classMethod_that_does_not_exist;
+@end
+
+@protocol Proto3
+-(void) instanceMethod;
++(void) classMethod_that_does_not_exist;
+@optional
+-(void) instanceMethod2;
++(void) classMethod2;
+@end
+
+static int super_initialize;
+static int super_cxxctor;
+static int super_cxxdtor;
+
+struct super_cxx {
+ int foo;
+ super_cxx() : foo(0) {
+ super_cxxctor++;
+ }
+ ~super_cxx() {
+ super_cxxdtor++;
+ }
+};
+
+@interface Super : TestRoot
+@property int superProp;
+@end
+@implementation Super {
+ super_cxx _foo;
+}
+@dynamic superProp;
++(void)initialize { super_initialize++; }
+
++(void) classMethod { fail("+[Super classMethod] called"); }
++(void) classMethod2 { fail("+[Super classMethod2] called"); }
+-(void) instanceMethod { fail("-[Super instanceMethod] called"); }
+-(void) instanceMethod2 { fail("-[Super instanceMethod2] called"); }
+@end
+
+static int state;
+
+static void instance_fn(id self, SEL _cmd __attribute__((unused)))
+{
+ testassert(!class_isMetaClass(object_getClass(self)));
+ state++;
+}
+
+static void class_fn(id self, SEL _cmd __attribute__((unused)))
+{
+ testassert(class_isMetaClass(object_getClass(self)));
+ state++;
+}
+
+static void fail_fn(id self __attribute__((unused)), SEL _cmd)
+{
+ fail("fail_fn '%s' called", sel_getName(_cmd));
+}
+
+
+static void cycle(void)
+{
+ Class cls;
+ BOOL ok;
+ objc_property_t prop;
+ char namebuf[256];
+
+ testassert(!objc_getClass("Sub"));
+ testassert([Super class]);
+
+ // Test subclass with bells and whistles
+
+ cls = objc_allocateClassPair([Super class], "Sub", 0);
+ testassert(cls);
+
+ class_addMethod(cls, @selector(instanceMethod),
+ (IMP)&instance_fn, "v@:");
+ class_addMethod(object_getClass(cls), @selector(classMethod),
+ (IMP)&class_fn, "v@:");
+ class_addMethod(object_getClass(cls), @selector(initialize),
+ (IMP)&class_fn, "v@:");
+ class_addMethod(object_getClass(cls), @selector(load),
+ (IMP)&fail_fn, "v@:");
+
+ ok = class_addProtocol(cls, @protocol(Proto));
+ testassert(ok);
+ ok = class_addProtocol(cls, @protocol(Proto));
+ testassert(!ok);
+
+ char attrname[2];
+ char attrvalue[2];
+ objc_property_attribute_t attrs[1];
+ unsigned int attrcount = sizeof(attrs) / sizeof(attrs[0]);
+
+ attrs[0].name = attrname;
+ attrs[0].value = attrvalue;
+ strcpy(attrname, "T");
+ strcpy(attrvalue, "x");
+
+ strcpy(namebuf, "subProp");
+ ok = class_addProperty(cls, namebuf, attrs, attrcount);
+ testassert(ok);
+ strcpy(namebuf, "subProp");
+ ok = class_addProperty(cls, namebuf, attrs, attrcount);
+ testassert(!ok);
+ strcpy(attrvalue, "i");
+ class_replaceProperty(cls, namebuf, attrs, attrcount);
+ strcpy(namebuf, "superProp");
+ ok = class_addProperty(cls, namebuf, attrs, attrcount);
+ testassert(!ok);
+ bzero(namebuf, sizeof(namebuf));
+ bzero(attrs, sizeof(attrs));
+ bzero(attrname, sizeof(attrname));
+ bzero(attrvalue, sizeof(attrvalue));
+
+#ifndef __LP64__
+# define size 4
+# define align 2
+#else
+#define size 8
+# define align 3
+#endif
+
+ /*
+ {
+ int ivar;
+ id ivarid;
+ id* ivaridstar;
+ Block_t ivarblock;
+ }
+ */
+ ok = class_addIvar(cls, "ivar", 4, 2, "i");
+ testassert(ok);
+ ok = class_addIvar(cls, "ivarid", size, align, "@");
+ testassert(ok);
+ ok = class_addIvar(cls, "ivaridstar", size, align, "^@");
+ testassert(ok);
+ ok = class_addIvar(cls, "ivarblock", size, align, "@?");
+ testassert(ok);
+
+ ok = class_addIvar(cls, "ivar", 4, 2, "i");
+ testassert(!ok);
+ ok = class_addIvar(object_getClass(cls), "classvar", 4, 2, "i");
+ testassert(!ok);
+
+ objc_registerClassPair(cls);
+
+ // should call cls's +initialize, not super's
+ // Provoke +initialize using class_getMethodImplementation(class method)
+ // in order to test getNonMetaClass's slow case
+ super_initialize = 0;
+ state = 0;
+ class_getMethodImplementation(object_getClass(cls), @selector(class));
+ testassert(super_initialize == 0);
+ testassert(state == 1);
+
+ testassert(cls == [cls class]);
+ testassert(cls == objc_getClass("Sub"));
+
+ testassert(!class_isMetaClass(cls));
+ testassert(class_isMetaClass(object_getClass(cls)));
+
+ testassert(class_getSuperclass(cls) == [Super class]);
+ testassert(class_getSuperclass(object_getClass(cls)) == object_getClass([Super class]));
+
+ testassert(class_getInstanceSize(cls) >= sizeof(Class) + 4 + 3*size);
+ testassert(class_conformsToProtocol(cls, @protocol(Proto)));
+
+ class_addMethod(cls, @selector(instanceMethod2),
+ (IMP)&instance_fn, "v@:");
+ class_addMethod(object_getClass(cls), @selector(classMethod2),
+ (IMP)&class_fn, "v@:");
+
+ ok = class_addIvar(cls, "ivar2", 4, 4, "i");
+ testassert(!ok);
+ ok = class_addIvar(object_getClass(cls), "classvar2", 4, 4, "i");
+ testassert(!ok);
+
+ ok = class_addProtocol(cls, @protocol(Proto2));
+ testassert(ok);
+ ok = class_addProtocol(cls, @protocol(Proto2));
+ testassert(!ok);
+ ok = class_addProtocol(cls, @protocol(Proto));
+ testassert(!ok);
+
+ attrs[0].name = attrname;
+ attrs[0].value = attrvalue;
+ strcpy(attrname, "T");
+ strcpy(attrvalue, "i");
+
+ strcpy(namebuf, "subProp2");
+ ok = class_addProperty(cls, namebuf, attrs, attrcount);
+ testassert(ok);
+ strcpy(namebuf, "subProp");
+ ok = class_addProperty(cls, namebuf, attrs, attrcount);
+ testassert(!ok);
+ strcpy(namebuf, "superProp");
+ ok = class_addProperty(cls, namebuf, attrs, attrcount);
+ testassert(!ok);
+ bzero(namebuf, sizeof(namebuf));
+ bzero(attrs, sizeof(attrs));
+ bzero(attrname, sizeof(attrname));
+ bzero(attrvalue, sizeof(attrvalue));
+
+ prop = class_getProperty(cls, "subProp");
+ testassert(prop);
+ testassert(0 == strcmp(property_getName(prop), "subProp"));
+ testassert(0 == strcmp(property_getAttributes(prop), "Ti"));
+ prop = class_getProperty(cls, "subProp2");
+ testassert(prop);
+ testassert(0 == strcmp(property_getName(prop), "subProp2"));
+ testassert(0 == strcmp(property_getAttributes(prop), "Ti"));
+
+ // note: adding more methods here causes a false leak check failure
+ state = 0;
+ [cls classMethod];
+ [cls classMethod2];
+ testassert(state == 2);
+
+ // put instance tests on a separate thread so they
+ // are reliably deallocated before class destruction
+ testonthread(^{
+ super_cxxctor = 0;
+ super_cxxdtor = 0;
+ id obj = [cls new];
+ testassert(super_cxxctor == 1);
+ testassert(super_cxxdtor == 0);
+ state = 0;
+ [obj instanceMethod];
+ [obj instanceMethod2];
+ testassert(state == 2);
+ RELEASE_VAR(obj);
+ testassert(super_cxxctor == 1);
+ testassert(super_cxxdtor == 1);
+ });
+
+ // Test ivar layouts of sub-subclass
+ Class cls2 = objc_allocateClassPair(cls, "SubSub", 0);
+ testassert(cls2);
+
+ /*
+ {
+ id ivarid2;
+ id idarray[16];
+ void* ptrarray[16];
+ char a;
+ char b;
+ char c;
+ }
+ */
+ ok = class_addIvar(cls2, "ivarid2", size, align, "@");
+ testassert(ok);
+ ok = class_addIvar(cls2, "idarray", 16*sizeof(id), align, "[16@]");
+ testassert(ok);
+ ok = class_addIvar(cls2, "ptrarray", 16*sizeof(void*), align, "[16^]");
+ testassert(ok);
+ ok = class_addIvar(cls2, "a", 1, 0, "c");
+ testassert(ok);
+ ok = class_addIvar(cls2, "b", 1, 0, "c");
+ testassert(ok);
+ ok = class_addIvar(cls2, "c", 1, 0, "c");
+ testassert(ok);
+
+ objc_registerClassPair(cls2);
+
+ // 1-byte ivars should be well packed
+ testassert(ivar_getOffset(class_getInstanceVariable(cls2, "b")) ==
+ ivar_getOffset(class_getInstanceVariable(cls2, "a")) + 1);
+ testassert(ivar_getOffset(class_getInstanceVariable(cls2, "c")) ==
+ ivar_getOffset(class_getInstanceVariable(cls2, "b")) + 1);
+
+ objc_disposeClassPair(cls2);
+ objc_disposeClassPair(cls);
+
+ testassert(!objc_getClass("Sub"));
+
+ // fixme test layout setters
+}
+
+int main()
+{
+ int count = 5000;
+
+ // fixme even with this long warmup we still
+ // suffer false 4096-byte leaks occasionally.
+ for (int i = 0; i < 500; i++) {
+ testonthread(^{ cycle(); });
+ }
+
+ leak_mark();
+ while (count--) {
+ testonthread(^{ cycle(); });
+ }
+ leak_check(4096);
+
+ succeed(__FILE__);
+}
+
--- /dev/null
+// TEST_CONFIG MEM=mrc
+
+#define TEST_CALLS_OPERATOR_NEW
+#include "test.h"
+#include "testroot.i"
+#include "swift-class-def.m"
+
+#include <objc/objc-internal.h>
+
+#include <vector>
+
+static Class expectedOldClass;
+
+static std::vector<Class> observedNewClasses1;
+static void handler1(Class _Nonnull oldClass, Class _Nonnull newClass) {
+ testprintf("%s(%p, %p)", __func__, oldClass, newClass);
+ testassert(oldClass == expectedOldClass);
+ observedNewClasses1.push_back(newClass);
+}
+
+static std::vector<Class> observedNewClasses2;
+static void handler2(Class _Nonnull oldClass, Class _Nonnull newClass) {
+ testprintf("%s(%p, %p)", __func__, oldClass, newClass);
+ testassert(oldClass == expectedOldClass);
+ observedNewClasses2.push_back(newClass);
+}
+
+static std::vector<Class> observedNewClasses3;
+static void handler3(Class _Nonnull oldClass, Class _Nonnull newClass) {
+ testprintf("%s(%p, %p)", __func__, oldClass, newClass);
+ testassert(oldClass == expectedOldClass);
+ observedNewClasses3.push_back(newClass);
+}
+
+EXTERN_C Class _objc_realizeClassFromSwift(Class, void *);
+
+EXTERN_C Class init(Class cls, void *arg) {
+ (void)arg;
+ _objc_realizeClassFromSwift(cls, cls);
+ return cls;
+}
+
+@interface SwiftRoot: TestRoot @end
+SWIFT_CLASS(SwiftRoot, TestRoot, init);
+
+int main()
+{
+ expectedOldClass = [SwiftRoot class];
+ Class A = objc_allocateClassPair([RawSwiftRoot class], "A", 0);
+ objc_registerClassPair(A);
+ testassert(observedNewClasses1.size() == 0);
+ testassert(observedNewClasses2.size() == 0);
+ testassert(observedNewClasses3.size() == 0);
+
+ _objc_setClassCopyFixupHandler(handler1);
+
+ expectedOldClass = A;
+ Class B = objc_allocateClassPair(A, "B", 0);
+ objc_registerClassPair(B);
+ testassert(observedNewClasses1.size() == 2);
+ testassert(observedNewClasses2.size() == 0);
+ testassert(observedNewClasses3.size() == 0);
+ testassert(observedNewClasses1[0] == B);
+
+ _objc_setClassCopyFixupHandler(handler2);
+
+ expectedOldClass = B;
+ Class C = objc_allocateClassPair(B, "C", 0);
+ objc_registerClassPair(C);
+ testassert(observedNewClasses1.size() == 4);
+ testassert(observedNewClasses2.size() == 2);
+ testassert(observedNewClasses3.size() == 0);
+ testassert(observedNewClasses1[2] == C);
+ testassert(observedNewClasses2[0] == C);
+
+ _objc_setClassCopyFixupHandler(handler3);
+
+ expectedOldClass = C;
+ Class D = objc_allocateClassPair(C, "D", 0);
+ objc_registerClassPair(D);
+ testassert(observedNewClasses1.size() == 6);
+ testassert(observedNewClasses2.size() == 4);
+ testassert(observedNewClasses3.size() == 2);
+ testassert(observedNewClasses1[4] == D);
+ testassert(observedNewClasses2[2] == D);
+ testassert(observedNewClasses3[0] == D);
+
+ succeed(__FILE__);
+}
--- /dev/null
+// TEST_CONFIG
+
+#include "test.h"
+#include <string.h>
+#include <malloc/malloc.h>
+#include <objc/objc-runtime.h>
+
+@protocol Proto1
++(id)proto1ClassMethod;
+-(id)proto1InstanceMethod;
+@end
+
+void noNullEntries(Protocol * _Nonnull __unsafe_unretained * _Nullable protolist,
+ unsigned int count)
+{
+ for (unsigned int i = 0; i != count; ++i) {
+ testassert(protolist[i]);
+ testassert(protocol_getName(protolist[i]));
+ testprintf("Protocol[%d/%d]: %p %s\n", i, count, protolist[i], protocol_getName(protolist[i]));
+ }
+}
+
+Protocol* getProtocol(Protocol * _Nonnull __unsafe_unretained * _Nullable protolist,
+ unsigned int count, const char* name) {
+ for (unsigned int i = 0; i != count; ++i) {
+ if (!strcmp(protocol_getName(protolist[i]), name))
+ return protolist[i];
+ }
+ return nil;
+}
+
+int main()
+{
+ Protocol * _Nonnull __unsafe_unretained * _Nullable protolist;
+ unsigned int count;
+
+ count = 100;
+ protolist = objc_copyProtocolList(&count);
+ testassert(protolist);
+ testassert(count != 0);
+ testassert(malloc_size(protolist) >= (count * sizeof(Protocol*)));
+ noNullEntries(protolist, count);
+ testassert(protolist[count] == nil);
+ // Check for a shared cache protocol, ie, the one we know comes from libobjc
+ testassert(getProtocol(protolist, count, "NSObject"));
+ // Test for a protocol we know isn't in the cache
+ testassert(getProtocol(protolist, count, "Proto1") == @protocol(Proto1));
+ // Test for a protocol we know isn't there
+ testassert(!getProtocol(protolist, count, "Proto2"));
+ free(protolist);
+
+ // Now add it
+ Protocol* newproto = objc_allocateProtocol("Proto2");
+ objc_registerProtocol(newproto);
+
+ Protocol * _Nonnull __unsafe_unretained * _Nullable newProtolist;
+ unsigned int newCount;
+
+ newCount = 100;
+ newProtolist = objc_copyProtocolList(&newCount);
+ testassert(newProtolist);
+ testassert(newCount == (count + 1));
+ testassert(getProtocol(newProtolist, newCount, "Proto2"));
+ free(newProtolist);
+
+
+ succeed(__FILE__);
+ return 0;
+}
-/*
+/*
TEST_CONFIG MEM=mrc
-TEST_ENV OBJC_PRINT_CUSTOM_RR=YES OBJC_PRINT_CUSTOM_AWZ=YES
+TEST_ENV OBJC_PRINT_CUSTOM_RR=YES OBJC_PRINT_CUSTOM_AWZ=YES OBJC_PRINT_CUSTOM_CORE=YES
TEST_BUILD
- $C{COMPILE} $DIR/customrr-nsobject.m -o customrr-nsobject-awz.exe -DSWIZZLE_AWZ=1
+ $C{COMPILE} $DIR/customrr-nsobject.m -o customrr-nsobject-awz.exe -DSWIZZLE_AWZ=1 -fno-objc-convert-messages-to-runtime-calls
END
TEST_RUN_OUTPUT
-objc\[\d+\]: CUSTOM AWZ: NSObject \(meta\)
+objc\[\d+\]: CUSTOM AWZ: NSObject \(meta\)
OK: customrr-nsobject-awz.exe
END
--- /dev/null
+/*
+
+TEST_CONFIG MEM=mrc
+TEST_ENV OBJC_PRINT_CUSTOM_RR=YES OBJC_PRINT_CUSTOM_AWZ=YES OBJC_PRINT_CUSTOM_CORE=YES
+
+TEST_BUILD
+ $C{COMPILE} $DIR/customrr-nsobject.m -o customrr-nsobject-core.exe -DSWIZZLE_CORE=1 -fno-objc-convert-messages-to-runtime-calls
+END
+
+TEST_RUN_OUTPUT
+objc\[\d+\]: CUSTOM Core: NSObject
+objc\[\d+\]: CUSTOM Core: NSObject \(meta\)
+OK: customrr-nsobject-core.exe
+END
+
+*/
+
-/*
+/*
TEST_CONFIG MEM=mrc
-TEST_ENV OBJC_PRINT_CUSTOM_RR=YES OBJC_PRINT_CUSTOM_AWZ=YES
+TEST_ENV OBJC_PRINT_CUSTOM_RR=YES OBJC_PRINT_CUSTOM_AWZ=YES OBJC_PRINT_CUSTOM_CORE=YES
TEST_BUILD
- $C{COMPILE} $DIR/customrr-nsobject.m -o customrr-nsobject-none.exe
+ $C{COMPILE} $DIR/customrr-nsobject.m -o customrr-nsobject-none.exe -fno-objc-convert-messages-to-runtime-calls
END
TEST_RUN_OUTPUT
-/*
+/*
TEST_CONFIG MEM=mrc
-TEST_ENV OBJC_PRINT_CUSTOM_RR=YES OBJC_PRINT_CUSTOM_AWZ=YES
+TEST_ENV OBJC_PRINT_CUSTOM_RR=YES OBJC_PRINT_CUSTOM_AWZ=YES OBJC_PRINT_CUSTOM_CORE=YES
TEST_BUILD
- $C{COMPILE} $DIR/customrr-nsobject.m -o customrr-nsobject-rr.exe -DSWIZZLE_RELEASE=1
+ $C{COMPILE} $DIR/customrr-nsobject.m -o customrr-nsobject-rr.exe -DSWIZZLE_RELEASE=1 -fno-objc-convert-messages-to-runtime-calls
END
TEST_RUN_OUTPUT
-objc\[\d+\]: CUSTOM RR: NSObject
+objc\[\d+\]: CUSTOM RR: NSObject
OK: customrr-nsobject-rr.exe
END
-/*
+/*
TEST_CONFIG MEM=mrc
-TEST_ENV OBJC_PRINT_CUSTOM_RR=YES OBJC_PRINT_CUSTOM_AWZ=YES
+TEST_ENV OBJC_PRINT_CUSTOM_RR=YES OBJC_PRINT_CUSTOM_AWZ=YES OBJC_PRINT_CUSTOM_CORE=YES
TEST_BUILD
- $C{COMPILE} $DIR/customrr-nsobject.m -o customrr-nsobject-rrawz.exe -DSWIZZLE_RELEASE=1 -DSWIZZLE_AWZ=1
+ $C{COMPILE} $DIR/customrr-nsobject.m -o customrr-nsobject-rrawz.exe -DSWIZZLE_RELEASE=1 -DSWIZZLE_AWZ=1 -fno-objc-convert-messages-to-runtime-calls
END
TEST_RUN_OUTPUT
-objc\[\d+\]: CUSTOM AWZ: NSObject \(meta\)
-objc\[\d+\]: CUSTOM RR: NSObject
+objc\[\d+\]: CUSTOM AWZ: NSObject \(meta\)
+objc\[\d+\]: CUSTOM RR: NSObject
OK: customrr-nsobject-rrawz.exe
END
#include "test.h"
#include <objc/NSObject.h>
+#include <objc/objc-internal.h>
#if __has_feature(ptrauth_calls)
typedef IMP __ptrauth_objc_method_list_imp MethodListIMP;
static int Allocs;
static int AllocWithZones;
static int Inits;
+static int PlusNew;
+static int Self;
+static int PlusSelf;
id (*RealRetain)(id self, SEL _cmd);
void (*RealRelease)(id self, SEL _cmd);
id (*RealAutorelease)(id self, SEL _cmd);
id (*RealAlloc)(id self, SEL _cmd);
id (*RealAllocWithZone)(id self, SEL _cmd, void *zone);
+id (*RealPlusNew)(id self, SEL _cmd);
+id (*RealSelf)(id self);
+id (*RealPlusSelf)(id self);
id HackRetain(id self, SEL _cmd) { Retains++; return RealRetain(self, _cmd); }
void HackRelease(id self, SEL _cmd) { Releases++; return RealRelease(self, _cmd); }
id HackInit(id self, SEL _cmd __unused) { Inits++; return self; }
+id HackPlusNew(id self, SEL _cmd __unused) { PlusNew++; return RealPlusNew(self, _cmd); }
+id HackSelf(id self) { Self++; return RealSelf(self); }
+id HackPlusSelf(id self) { PlusSelf++; return RealPlusSelf(self); }
+
int main(int argc __unused, char **argv)
{
((MethodListIMP *)meth)[2] = (IMP)HackAllocWithZone;
#endif
+ meth = class_getClassMethod(cls, @selector(new));
+ RealPlusNew = (typeof(RealPlusNew))method_getImplementation(meth);
+#if SWIZZLE_CORE
+ method_setImplementation(meth, (IMP)HackPlusNew);
+#else
+ ((MethodListIMP *)meth)[2] = (IMP)HackPlusNew;
+#endif
+
+ meth = class_getClassMethod(cls, @selector(self));
+ RealPlusSelf = (typeof(RealPlusSelf))method_getImplementation(meth);
+#if SWIZZLE_CORE
+ method_setImplementation(meth, (IMP)HackPlusSelf);
+#else
+ ((MethodListIMP *)meth)[2] = (IMP)HackPlusSelf;
+#endif
+
+ meth = class_getInstanceMethod(cls, @selector(self));
+ RealSelf = (typeof(RealSelf))method_getImplementation(meth);
+#if SWIZZLE_CORE
+ method_setImplementation(meth, (IMP)HackSelf);
+#else
+ ((MethodListIMP *)meth)[2] = (IMP)HackSelf;
+#endif
+
meth = class_getInstanceMethod(cls, @selector(release));
RealRelease = (typeof(RealRelease))method_getImplementation(meth);
#if SWIZZLE_RELEASE
testassert(Releases == 0);
#endif
+ PlusNew = 0;
+ Self = 0;
+ PlusSelf = 0;
+ Class nso = objc_opt_self([NSObject class]);
+ obj = objc_opt_new(nso);
+ obj = objc_opt_self(obj);
+#if SWIZZLE_CORE
+ testprintf("swizzled Core should be called\n");
+ testassert(PlusNew == 1);
+ testassert(Self == 1);
+ testassert(PlusSelf == 1);
+#else
+ testprintf("unswizzled CORE should be bypassed\n");
+ testassert(PlusNew == 0);
+ testassert(Self == 0);
+ testassert(PlusSelf == 0);
+#endif
+ testassert([obj isKindOfClass:nso]);
+
succeed(basename(argv[0]));
}
// TEST_CONFIG MEM=mrc
/*
TEST_BUILD
- $C{COMPILE} $DIR/customrr.m -fvisibility=default -o customrr.exe
+ $C{COMPILE} $DIR/customrr.m -fvisibility=default -o customrr.exe -fno-objc-convert-messages-to-runtime-calls
$C{COMPILE} -bundle -bundle_loader customrr.exe $DIR/customrr-cat1.m -o customrr-cat1.bundle
$C{COMPILE} -bundle -bundle_loader customrr.exe $DIR/customrr-cat2.m -o customrr-cat2.bundle
END
objc_autorelease(obj);
testassert(Autoreleases == 0);
+#if SUPPORT_NONPOINTER_ISA
+ objc_retain(cls);
+ testassert(PlusRetains == 0);
+ objc_release(cls);
+ testassert(PlusReleases == 0);
+ objc_autorelease(cls);
+ testassert(PlusAutoreleases == 0);
+#else
objc_retain(cls);
testassert(PlusRetains == 1);
objc_release(cls);
testassert(PlusReleases == 1);
objc_autorelease(cls);
testassert(PlusAutoreleases == 1);
+#endif
objc_retain(inh);
testassert(Retains == 0);
objc_autorelease(inh);
testassert(Autoreleases == 0);
+#if SUPPORT_NONPOINTER_ISA
+ objc_retain(icl);
+ testassert(PlusRetains == 0);
+ objc_release(icl);
+ testassert(PlusReleases == 0);
+ objc_autorelease(icl);
+ testassert(PlusAutoreleases == 0);
+#else
objc_retain(icl);
testassert(PlusRetains == 2);
objc_release(icl);
testassert(PlusReleases == 2);
objc_autorelease(icl);
testassert(PlusAutoreleases == 2);
+#endif
objc_retain(ovr);
testassert(SubRetains == 1);
objc_autorelease(ocl);
testassert(SubPlusAutoreleases == 1);
+#if SUPPORT_NONPOINTER_ISA
+ objc_retain((Class)&OBJC_CLASS_$_UnrealizedSubC1);
+ testassert(PlusRetains == 1);
+ objc_release((Class)&OBJC_CLASS_$_UnrealizedSubC2);
+ testassert(PlusReleases == 1);
+ objc_autorelease((Class)&OBJC_CLASS_$_UnrealizedSubC3);
+ testassert(PlusAutoreleases == 1);
+#else
objc_retain((Class)&OBJC_CLASS_$_UnrealizedSubC1);
testassert(PlusRetains == 3);
objc_release((Class)&OBJC_CLASS_$_UnrealizedSubC2);
testassert(PlusReleases == 3);
objc_autorelease((Class)&OBJC_CLASS_$_UnrealizedSubC3);
testassert(PlusAutoreleases == 3);
-
+#endif
testprintf("unrelated addMethod does not clobber\n");
zero();
// TEST_CONFIG MEM=mrc
/*
TEST_BUILD
- $C{COMPILE} $DIR/customrr.m -fvisibility=default -o customrr2.exe -DTEST_EXCHANGEIMPLEMENTATIONS=1
+ $C{COMPILE} $DIR/customrr.m -fvisibility=default -o customrr2.exe -DTEST_EXCHANGEIMPLEMENTATIONS=1 -fno-objc-convert-messages-to-runtime-calls
$C{COMPILE} -bundle -bundle_loader customrr2.exe $DIR/customrr-cat1.m -o customrr-cat1.bundle
$C{COMPILE} -bundle -bundle_loader customrr2.exe $DIR/customrr-cat2.m -o customrr-cat2.bundle
END
--- /dev/null
+
+#include "test.h"
+
+// This test assumes
+// - that on launch we don't have NSCoding, NSSecureCoding, or NSDictionary, ie, libSystem doesn't contain those
+// - that after dlopening CF, we get NSDictionary and it conforms to NSSecureCoding which conforms to NSCoding
+// - that our NSCoding will be used if we ask either NSSecureCoding or NSDictionary if they conform to our test protocol
+
+@protocol NewNSCodingSuperProto
+@end
+
+@protocol NSCoding <NewNSCodingSuperProto>
+@end
+
+int main()
+{
+ // Before we dlopen, make sure we are using our NSCoding, not the shared cache version
+ Protocol* codingSuperProto = objc_getProtocol("NewNSCodingSuperProto");
+ Protocol* codingProto = objc_getProtocol("NSCoding");
+ if (@protocol(NewNSCodingSuperProto) != codingSuperProto) fail("Protocol mismatch");
+ if (@protocol(NSCoding) != codingProto) fail("Protocol mismatch");
+ if (!protocol_conformsToProtocol(codingProto, codingSuperProto)) fail("Our NSCoding should conform to NewNSCodingSuperProto");
+
+ // Also make sure we don't yet have an NSSecureCoding or NSDictionary
+ if (objc_getProtocol("NSSecureCoding")) fail("Test assumes we don't have NSSecureCoding yet");
+ if (objc_getClass("NSDictionary")) fail("Test assumes we don't have NSDictionary yet");
+
+ void *dl = dlopen("/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation", RTLD_LAZY);
+ if (!dl) fail("couldn't open CoreFoundation");
+
+ // We should now have NSSecureCoding and NSDictionary
+ Protocol* secureCodingProto = objc_getProtocol("NSSecureCoding");
+ id dictionaryClass = objc_getClass("NSDictionary");
+ if (!secureCodingProto) fail("Should have got NSSecureCoding from CoreFoundation");
+ if (!dictionaryClass) fail("Should have got NSDictionary from CoreFoundation");
+
+ // Now make sure that NSDictionary and NSSecureCoding find our new protocols
+ if (!protocol_conformsToProtocol(secureCodingProto, codingProto)) fail("NSSecureCoding should conform to our NSCoding");
+ if (!protocol_conformsToProtocol(secureCodingProto, codingSuperProto)) fail("NSSecureCoding should conform to our NewNSCodingSuperProto");
+ if (!class_conformsToProtocol(dictionaryClass, codingProto)) fail("NSDictionary should conform to our NSCoding");
+ if (!class_conformsToProtocol(dictionaryClass, codingSuperProto)) fail("NSDictionary should conform to our NewNSCodingSuperProto");
+}
+++ /dev/null
-/*
-rdar://8553305
-
-TEST_BUILD
- $C{COMPILE} $DIR/evil-category-0.m -dynamiclib -o libevil.dylib
- $C{COMPILE} $DIR/evil-main.m -x none -DNOT_EVIL libevil.dylib -o evil-category-0.exe
-END
-*/
-
-// NOT EVIL version
-
-#define EVIL_INSTANCE_METHOD 0
-#define EVIL_CLASS_METHOD 0
-
-#define OMIT_CAT 0
-#define OMIT_NL_CAT 0
-
-#include "evil-category-def.m"
+++ /dev/null
-/*
-rdar://8553305
-
-TEST_CONFIG OS=iphoneos
-TEST_CRASHES
-
-TEST_BUILD
- $C{COMPILE} $DIR/evil-category-00.m $DIR/evil-main.m -o evil-category-00.exe
-END
-
-TEST_RUN_OUTPUT
-CRASHED: SIGABRT
-END
-*/
-
-// NOT EVIL version: apps are allowed through (then crash in +load)
-
-#define EVIL_INSTANCE_METHOD 1
-#define EVIL_CLASS_METHOD 1
-
-#define OMIT_CAT 0
-#define OMIT_NL_CAT 0
-
-#include "evil-category-def.m"
+++ /dev/null
-/*
-rdar://8553305
-
-TEST_BUILD
- $C{COMPILE} $DIR/evil-category-000.m -dynamiclib -o libevil.dylib
- $C{COMPILE} $DIR/evil-main.m -x none -DNOT_EVIL libevil.dylib -o evil-category-000.exe
-END
-*/
-
-// NOT EVIL version: category omitted from all lists
-
-#define EVIL_INSTANCE_METHOD 1
-#define EVIL_CLASS_METHOD 1
-
-#define OMIT_CAT 1
-#define OMIT_NL_CAT 1
-
-#include "evil-category-def.m"
+++ /dev/null
-/*
-rdar://8553305
-
-TEST_CONFIG OS=iphoneos
-TEST_CRASHES
-
-TEST_BUILD
- $C{COMPILE} $DIR/evil-category-1.m -dynamiclib -o libevil.dylib
- $C{COMPILE} $DIR/evil-main.m -x none libevil.dylib -o evil-category-1.exe
-END
-
-TEST_RUN_OUTPUT
-objc\[\d+\]: bad method implementation \(0x[0-9a-f]+ at 0x[0-9a-f]+\)
-objc\[\d+\]: HALTED
-END
-*/
-
-#define EVIL_INSTANCE_METHOD 1
-#define EVIL_CLASS_METHOD 0
-
-#define OMIT_CAT 0
-#define OMIT_NL_CAT 0
-
-#include "evil-category-def.m"
+++ /dev/null
-/*
-rdar://8553305
-
-TEST_CONFIG OS=iphoneos
-TEST_CRASHES
-
-TEST_BUILD
- $C{COMPILE} $DIR/evil-category-2.m -dynamiclib -o libevil.dylib
- $C{COMPILE} $DIR/evil-main.m -x none libevil.dylib -o evil-category-2.exe
-END
-
-TEST_RUN_OUTPUT
-objc\[\d+\]: bad method implementation \(0x[0-9a-f]+ at 0x[0-9a-f]+\)
-objc\[\d+\]: HALTED
-END
-*/
-
-#define EVIL_INSTANCE_METHOD 0
-#define EVIL_CLASS_METHOD 1
-
-#define OMIT_CAT 0
-#define OMIT_NL_CAT 0
-
-#include "evil-category-def.m"
+++ /dev/null
-/*
-rdar://8553305
-
-TEST_CONFIG OS=iphoneos
-TEST_CRASHES
-
-TEST_BUILD
- $C{COMPILE} $DIR/evil-category-3.m -dynamiclib -o libevil.dylib
- $C{COMPILE} $DIR/evil-main.m -x none libevil.dylib -o evil-category-3.exe
-END
-
-TEST_RUN_OUTPUT
-objc\[\d+\]: bad method implementation \(0x[0-9a-f]+ at 0x[0-9a-f]+\)
-objc\[\d+\]: HALTED
-END
-*/
-
-#define EVIL_INSTANCE_METHOD 0
-#define EVIL_CLASS_METHOD 1
-
-#define OMIT_CAT 1
-#define OMIT_NL_CAT 0
-
-#include "evil-category-def.m"
+++ /dev/null
-/*
-rdar://8553305
-
-TEST_CONFIG OS=iphoneos
-TEST_CRASHES
-
-TEST_BUILD
- $C{COMPILE} $DIR/evil-category-4.m -dynamiclib -o libevil.dylib
- $C{COMPILE} $DIR/evil-main.m -x none libevil.dylib -o evil-category-4.exe
-END
-
-TEST_RUN_OUTPUT
-objc\[\d+\]: bad method implementation \(0x[0-9a-f]+ at 0x[0-9a-f]+\)
-objc\[\d+\]: HALTED
-END
-*/
-
-#define EVIL_INSTANCE_METHOD 0
-#define EVIL_CLASS_METHOD 1
-
-#define OMIT_CAT 0
-#define OMIT_NL_CAT 1
-
-#include "evil-category-def.m"
+++ /dev/null
-#include <sys/cdefs.h>
-
-#if __LP64__
-# define PTR " .quad "
-#else
-# define PTR " .long "
-#endif
-
-#if __has_feature(ptrauth_calls)
-# define SIGNED_METHOD_LIST_IMP "@AUTH(ia,0,addr) "
-#else
-# define SIGNED_METHOD_LIST_IMP
-#endif
-
-#define str(x) #x
-#define str2(x) str(x)
-
-__BEGIN_DECLS
-void nop(void) { }
-__END_DECLS
-
-asm(
- ".section __DATA,__objc_data \n"
- ".align 3 \n"
- "L_category: \n"
- PTR "L_cat_name \n"
- PTR "_OBJC_CLASS_$_NSObject \n"
-#if EVIL_INSTANCE_METHOD
- PTR "L_evil_methods \n"
-#else
- PTR "L_good_methods \n"
-#endif
-#if EVIL_CLASS_METHOD
- PTR "L_evil_methods \n"
-#else
- PTR "L_good_methods \n"
-#endif
- PTR "0 \n"
- PTR "0 \n"
-
- "L_evil_methods: \n"
- ".long 24 \n"
- ".long 1 \n"
- PTR "L_load \n"
- PTR "L_load \n"
- PTR "_abort" SIGNED_METHOD_LIST_IMP "\n"
- // assumes that abort is inside the dyld shared cache
-
- "L_good_methods: \n"
- ".long 24 \n"
- ".long 1 \n"
- PTR "L_load \n"
- PTR "L_load \n"
- PTR "_nop" SIGNED_METHOD_LIST_IMP "\n"
-
- ".cstring \n"
- "L_cat_name: .ascii \"Evil\\0\" \n"
- "L_load: .ascii \"load\\0\" \n"
-
- ".section __DATA,__objc_catlist \n"
-#if !OMIT_CAT
- PTR "L_category \n"
-#endif
-
- ".section __DATA,__objc_nlcatlist \n"
-#if !OMIT_NL_CAT
- PTR "L_category \n"
-#endif
-
- ".text \n"
- );
-
-void fn(void) { }
+++ /dev/null
-/*
-rdar://8553305
-
-TEST_BUILD
- $C{COMPILE} $DIR/evil-class-0.m -dynamiclib -o libevil.dylib
- $C{COMPILE} $DIR/evil-main.m -x none -DNOT_EVIL libevil.dylib -o evil-class-0.exe
-END
-*/
-
-// NOT EVIL version
-
-#define EVIL_SUPER 0
-#define EVIL_SUPER_META 0
-#define EVIL_SUB 0
-#define EVIL_SUB_META 0
-
-#define OMIT_SUPER 0
-#define OMIT_NL_SUPER 0
-#define OMIT_SUB 0
-#define OMIT_NL_SUB 0
-
-#include "evil-class-def.m"
+++ /dev/null
-/*
-rdar://8553305
-
-TEST_CONFIG OS=iphoneos
-TEST_CRASHES
-
-TEST_BUILD
- $C{COMPILE} $DIR/evil-class-00.m $DIR/evil-main.m -o evil-class-00.exe
-END
-
-TEST_RUN_OUTPUT
-CRASHED: SIGABRT
-END
-*/
-
-// NOT EVIL version: apps are allowed through (then crash in +load)
-
-#define EVIL_SUPER 0
-#define EVIL_SUPER_META 1
-#define EVIL_SUB 0
-#define EVIL_SUB_META 0
-
-#define OMIT_SUPER 1
-#define OMIT_NL_SUPER 1
-#define OMIT_SUB 1
-#define OMIT_NL_SUB 0
-
-#include "evil-class-def.m"
+++ /dev/null
-/*
-rdar://8553305
-
-TEST_BUILD
- $C{COMPILE} $DIR/evil-class-000.m -dynamiclib -o libevil.dylib
- $C{COMPILE} $DIR/evil-main.m -x none -DNOT_EVIL libevil.dylib -o evil-class-000.exe
-END
-*/
-
-// NOT EVIL version: all classes omitted from all lists
-
-#define EVIL_SUPER 1
-#define EVIL_SUPER_META 1
-#define EVIL_SUB 1
-#define EVIL_SUB_META 1
-
-#define OMIT_SUPER 1
-#define OMIT_NL_SUPER 1
-#define OMIT_SUB 1
-#define OMIT_NL_SUB 1
-
-#include "evil-class-def.m"
+++ /dev/null
-/*
-rdar://8553305
-
-TEST_CONFIG OS=iphoneos
-TEST_CRASHES
-
-TEST_BUILD
- $C{COMPILE} $DIR/evil-class-1.m -dynamiclib -o libevil.dylib
- $C{COMPILE} $DIR/evil-main.m -x none libevil.dylib -o evil-class-1.exe
-END
-
-TEST_RUN_OUTPUT
-objc\[\d+\]: bad method implementation \(0x[0-9a-f]+ at 0x[0-9a-f]+\)
-objc\[\d+\]: HALTED
-END
-*/
-
-#define EVIL_SUPER 1
-#define EVIL_SUPER_META 0
-#define EVIL_SUB 0
-#define EVIL_SUB_META 0
-
-#define OMIT_SUPER 0
-#define OMIT_NL_SUPER 0
-#define OMIT_SUB 0
-#define OMIT_NL_SUB 0
-
-#include "evil-class-def.m"
+++ /dev/null
-/*
-rdar://8553305
-
-TEST_CONFIG OS=iphoneos
-TEST_CRASHES
-
-TEST_BUILD
- $C{COMPILE} $DIR/evil-class-2.m -dynamiclib -o libevil.dylib
- $C{COMPILE} $DIR/evil-main.m -x none libevil.dylib -o evil-class-2.exe
-END
-
-TEST_RUN_OUTPUT
-objc\[\d+\]: bad method implementation \(0x[0-9a-f]+ at 0x[0-9a-f]+\)
-objc\[\d+\]: HALTED
-END
-*/
-
-#define EVIL_SUPER 0
-#define EVIL_SUPER_META 1
-#define EVIL_SUB 0
-#define EVIL_SUB_META 0
-
-#define OMIT_SUPER 0
-#define OMIT_NL_SUPER 0
-#define OMIT_SUB 0
-#define OMIT_NL_SUB 0
-
-#include "evil-class-def.m"
+++ /dev/null
-/*
-rdar://8553305
-
-TEST_CONFIG OS=iphoneos
-TEST_CRASHES
-
-TEST_BUILD
- $C{COMPILE} $DIR/evil-class-3.m -dynamiclib -o libevil.dylib
- $C{COMPILE} $DIR/evil-main.m -x none libevil.dylib -o evil-class-3.exe
-END
-
-TEST_RUN_OUTPUT
-objc\[\d+\]: bad method implementation \(0x[0-9a-f]+ at 0x[0-9a-f]+\)
-objc\[\d+\]: HALTED
-END
-*/
-
-#define EVIL_SUPER 0
-#define EVIL_SUPER_META 0
-#define EVIL_SUB 1
-#define EVIL_SUB_META 0
-
-#define OMIT_SUPER 0
-#define OMIT_NL_SUPER 0
-#define OMIT_SUB 0
-#define OMIT_NL_SUB 0
-
-#include "evil-class-def.m"
+++ /dev/null
-/*
-rdar://8553305
-
-TEST_CONFIG OS=iphoneos
-TEST_CRASHES
-
-TEST_BUILD
- $C{COMPILE} $DIR/evil-class-4.m -dynamiclib -o libevil.dylib
- $C{COMPILE} $DIR/evil-main.m -x none libevil.dylib -o evil-class-4.exe
-END
-
-TEST_RUN_OUTPUT
-objc\[\d+\]: bad method implementation \(0x[0-9a-f]+ at 0x[0-9a-f]+\)
-objc\[\d+\]: HALTED
-END
-*/
-
-#define EVIL_SUPER 0
-#define EVIL_SUPER_META 0
-#define EVIL_SUB 0
-#define EVIL_SUB_META 1
-
-#define OMIT_SUPER 0
-#define OMIT_NL_SUPER 0
-#define OMIT_SUB 0
-#define OMIT_NL_SUB 0
-
-#include "evil-class-def.m"
+++ /dev/null
-/*
-rdar://8553305
-
-TEST_DISABLED rdar://19200100
-
-TEST_CONFIG OS=iphoneos
-TEST_CRASHES
-
-TEST_BUILD
- $C{COMPILE} $DIR/evil-class-5.m -dynamiclib -o libevil.dylib
- $C{COMPILE} $DIR/evil-main.m -x none libevil.dylib -o evil-class-5.exe
-END
-
-TEST_RUN_OUTPUT
-objc\[\d+\]: bad method implementation \(0x[0-9a-f]+ at 0x[0-9a-f]+\)
-objc\[\d+\]: HALTED
-END
-*/
-
-#define EVIL_SUPER 0
-#define EVIL_SUPER_META 1
-#define EVIL_SUB 0
-#define EVIL_SUB_META 0
-
-#define OMIT_SUPER 1
-#define OMIT_NL_SUPER 1
-#define OMIT_SUB 1
-#define OMIT_NL_SUB 0
-
-#include "evil-class-def.m"
"L_ro: \n"
".long 2 \n"
".long 0 \n"
- ".long "PTRSIZE" \n"
+ ".long " PTRSIZE " \n"
#if __LP64__
".long 0 \n"
#endif
"L_sub_ro: \n"
".long 2 \n"
".long 0 \n"
- ".long "PTRSIZE" \n"
+ ".long " PTRSIZE " \n"
#if __LP64__
".long 0 \n"
#endif
PTR "0 \n"
"L_evil_methods: \n"
- ".long 3*"PTRSIZE" \n"
+ ".long 3*" PTRSIZE " \n"
".long 1 \n"
PTR "L_load \n"
PTR "L_load \n"
// assumes that abort is inside the dyld shared cache
"L_good_methods: \n"
- ".long 3*"PTRSIZE" \n"
+ ".long 3*" PTRSIZE " \n"
".long 2 \n"
PTR "L_load \n"
PTR "L_load \n"
PTR "_nop" SIGNED_METHOD_LIST_IMP "\n"
"L_super_ivars: \n"
- ".long 4*"PTRSIZE" \n"
+ ".long 4*" PTRSIZE " \n"
".long 1 \n"
PTR "L_super_ivar_offset \n"
PTR "L_super_ivar_name \n"
PTR "L_super_ivar_type \n"
- ".long "LOGPTRSIZE" \n"
- ".long "PTRSIZE" \n"
+ ".long " LOGPTRSIZE " \n"
+ ".long " PTRSIZE " \n"
"L_sub_ivars: \n"
- ".long 4*"PTRSIZE" \n"
+ ".long 4*" PTRSIZE " \n"
".long 1 \n"
PTR "L_sub_ivar_offset \n"
PTR "L_sub_ivar_name \n"
PTR "L_sub_ivar_type \n"
- ".long "LOGPTRSIZE" \n"
- ".long "PTRSIZE" \n"
+ ".long " LOGPTRSIZE " \n"
+ ".long " PTRSIZE " \n"
"L_super_ivar_offset: \n"
".long 0 \n"
"L_sub_ivar_offset: \n"
- ".long "PTRSIZE" \n"
+ ".long " PTRSIZE " \n"
".cstring \n"
"L_super_name: .ascii \"Super\\0\" \n"
+++ /dev/null
-#include "test.h"
-
-extern void fn(void);
-
-int main(int argc __unused, char **argv)
-{
- fn();
-
-#if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR && !defined(NOT_EVIL)
-#pragma unused (argv)
- fail("All that is necessary for the triumph of evil is that good men do nothing.");
-#else
- succeed(basename(argv[0]));
-#endif
-}
dyld: Library not loaded: librequiresgc\.dylib
Referenced from: .*gcenforcer-dylib-requiresgc.exe
Reason: no suitable image found\. Did find:
- .*librequiresgc\.dylib: cannot load '.*librequiresgc\.dylib' because Objective-C garbage collection is not supported
- librequiresgc.dylib: cannot load 'librequiresgc\.dylib' because Objective-C garbage collection is not supported
+ (.*librequiresgc\.dylib: cannot load '.*librequiresgc\.dylib' because Objective-C garbage collection is not supported(\n)?)+
+ librequiresgc.dylib: cannot load 'librequiresgc\.dylib' because Objective-C garbage collection is not supported(
+ .*librequiresgc\.dylib: cannot load '.*librequiresgc\.dylib' because Objective-C garbage collection is not supported(\n)?)*
END
TEST_BUILD
#include <objc/objc-gdb.h>
#include <objc/runtime.h>
+#define SwiftV1MangledName4 "_TtC6Swiftt13SwiftV1Class4"
+__attribute__((objc_runtime_name(SwiftV1MangledName4)))
+@interface SwiftV1Class4 : TestRoot @end
+@implementation SwiftV1Class4 @end
+
int main()
{
// Class hashes
uintptr_t *maskp = (uintptr_t *)dlsym(RTLD_DEFAULT, "objc_debug_class_rw_data_mask");
testassert(maskp);
+
+ // Raw class names
+ testassert(strcmp(objc_debug_class_getNameRaw([SwiftV1Class4 class]), SwiftV1MangledName4) == 0);
+ testassert(strcmp(objc_debug_class_getNameRaw([TestRoot class]), "TestRoot") == 0);
+
succeed(__FILE__);
}
___cxa_atexit (C++ static destructor)
weak external (any weak externals, including operators new and delete)
+Whitelisted imports:
+weak external ____chkstk_darwin (from libSystem)
+
Disallowed exports (nm -U):
__Z* (any C++-mangled export)
weak external (any weak externals, including operators new and delete)
/*
TEST_BUILD
echo $C{XCRUN} nm -m -arch $C{ARCH} $C{TESTLIB}
-$C{XCRUN} nm -u -m -arch $C{ARCH} $C{TESTLIB} | egrep '(weak external| external (___cxa_atexit|___cxa_guard_acquire|___cxa_guard_release))' || true
+$C{XCRUN} nm -u -m -arch $C{ARCH} $C{TESTLIB} | grep -v 'weak external ____chkstk_darwin \(from libSystem\)' | egrep '(weak external| external (___cxa_atexit|___cxa_guard_acquire|___cxa_guard_release))' || true
$C{XCRUN} nm -U -m -arch $C{ARCH} $C{TESTLIB} | egrep '(weak external| external __Z)' || true
$C{COMPILE_C} $DIR/imports.c -o imports.exe
END
/*
TEST_BUILD
- $C{COMPILE} $DIR/include-warnings.c -o include-warnings.exe -Wsystem-headers -Weverything -Wno-undef -Wno-old-style-cast -Wno-nullability-extension 2>&1 | grep -v 'In file' | grep objc || true
+ $C{COMPILE} $DIR/include-warnings.c -o include-warnings.exe -Wsystem-headers -Weverything -Wno-undef -Wno-old-style-cast -Wno-nullability-extension -Wno-c++98-compat 2>&1 | grep -v 'In file' | grep objc || true
END
TEST_RUN_OUTPUT
// -Wno-old-style-cast is tough to avoid in mixed C/C++ code.
// -Wno-nullability-extension disables a warning about non-portable
// _Nullable etc which we already handle correctly in objc-abi.h.
+// -Wno-c++98-compat disables warnings about things that already
+// have guards against C++98.
#include "includes.c"
extern char **environ;
-id dummyIMP(id self, SEL _cmd, ...) { (void)_cmd; return self; }
+id dummyIMP(id self, SEL _cmd) { (void)_cmd; return self; }
char *dupeName(Class cls) {
char *name;
TESTCASE(free(class_copyProtocolList(cls, NULL))),
TESTCASE(class_getProperty(cls, "x")),
TESTCASE(free(class_copyPropertyList(cls, NULL))),
- TESTCASE(class_addMethod(cls, @selector(nop), dummyIMP, "v@:")),
- TESTCASE(class_replaceMethod(cls, @selector(nop), dummyIMP, "v@:")),
+ TESTCASE(class_addMethod(cls, @selector(nop), (IMP)dummyIMP, "v@:")),
+ TESTCASE(class_replaceMethod(cls, @selector(nop), (IMP)dummyIMP, "v@:")),
TESTCASE(class_addIvar(cls, "x", sizeof(int), sizeof(int), @encode(int))),
TESTCASE(class_addProtocol(cls, @protocol(P))),
TESTCASE(class_addProperty(cls, "x", NULL, 0)),
--- /dev/null
+#import "test.h"
+
+#import <Foundation/Foundation.h>
+
+@interface CLASSNAME: NSObject @end
+@implementation CLASSNAME @end
+
--- /dev/null
+/*
+TEST_BUILD
+ $C{COMPILE} -DCLASSNAME=Class1 $DIR/load-image-notification-dylib.m -o load-image-notification1.dylib -dynamiclib
+ $C{COMPILE} -DCLASSNAME=Class2 $DIR/load-image-notification-dylib.m -o load-image-notification2.dylib -dynamiclib
+ $C{COMPILE} -DCLASSNAME=Class3 $DIR/load-image-notification-dylib.m -o load-image-notification3.dylib -dynamiclib
+ $C{COMPILE} -DCLASSNAME=Class4 $DIR/load-image-notification-dylib.m -o load-image-notification4.dylib -dynamiclib
+ $C{COMPILE} -DCLASSNAME=Class5 $DIR/load-image-notification-dylib.m -o load-image-notification5.dylib -dynamiclib
+ $C{COMPILE} $DIR/load-image-notification.m -o load-image-notification.exe
+END
+*/
+
+#include "test.h"
+
+#include <dlfcn.h>
+
+#define ADD_IMAGE_CALLBACK(n) \
+int called ## n = 0; \
+static void add_image ## n(const struct mach_header * mh __unused) { \
+ called ## n++; \
+}
+
+ADD_IMAGE_CALLBACK(1)
+ADD_IMAGE_CALLBACK(2)
+ADD_IMAGE_CALLBACK(3)
+ADD_IMAGE_CALLBACK(4)
+ADD_IMAGE_CALLBACK(5)
+
+int main()
+{
+ objc_addLoadImageFunc(add_image1);
+ testassert(called1 > 0);
+ int oldcalled = called1;
+ void *handle = dlopen("load-image-notification1.dylib", RTLD_LAZY);
+ testassert(handle);
+ testassert(called1 > oldcalled);
+
+ objc_addLoadImageFunc(add_image2);
+ testassert(called2 == called1);
+ oldcalled = called1;
+ handle = dlopen("load-image-notification2.dylib", RTLD_LAZY);
+ testassert(handle);
+ testassert(called1 > oldcalled);
+ testassert(called2 == called1);
+
+ objc_addLoadImageFunc(add_image3);
+ testassert(called3 == called1);
+ oldcalled = called1;
+ handle = dlopen("load-image-notification3.dylib", RTLD_LAZY);
+ testassert(handle);
+ testassert(called1 > oldcalled);
+ testassert(called2 == called1);
+ testassert(called3 == called1);
+
+ objc_addLoadImageFunc(add_image4);
+ testassert(called4 == called1);
+ oldcalled = called1;
+ handle = dlopen("load-image-notification4.dylib", RTLD_LAZY);
+ testassert(handle);
+ testassert(called1 > oldcalled);
+ testassert(called2 == called1);
+ testassert(called3 == called1);
+ testassert(called4 == called1);
+
+ objc_addLoadImageFunc(add_image5);
+ testassert(called5 == called1);
+ oldcalled = called1;
+ handle = dlopen("load-image-notification5.dylib", RTLD_LAZY);
+ testassert(handle);
+ testassert(called1 > oldcalled);
+ testassert(called2 == called1);
+ testassert(called3 == called1);
+ testassert(called4 == called1);
+ testassert(called5 == called1);
+
+ succeed(__FILE__);
+}
--- /dev/null
+// TEST_CONFIG MEM=mrc LANGUAGE=objective-c
+/*
+TEST_RUN_OUTPUT
+[\S\s]*0 leaks for 0 total leaked bytes[\S\s]*
+END
+*/
+
+#include "test.h"
+#include "testroot.i"
+
+#include <spawn.h>
+#include <stdio.h>
+
+void noopIMP(id self __unused, SEL _cmd __unused) {}
+
+id test(int n, int methodCount) {
+ char *name;
+ asprintf(&name, "TestClass%d", n);
+ Class c = objc_allocateClassPair([TestRoot class], name, 0);
+ free(name);
+
+ SEL *sels = malloc(methodCount * sizeof(*sels));
+ for(int i = 0; i < methodCount; i++) {
+ asprintf(&name, "selector%d", i);
+ sels[i] = sel_getUid(name);
+ free(name);
+ }
+
+ for(int i = 0; i < methodCount; i++) {
+ class_addMethod(c, sels[i], (IMP)noopIMP, "v@:");
+ }
+
+ objc_registerClassPair(c);
+
+ id obj = [[c alloc] init];
+ for (int i = 0; i < methodCount; i++) {
+ ((void (*)(id, SEL))objc_msgSend)(obj, sels[i]);
+ }
+ free(sels);
+ return obj;
+}
+
+int main()
+{
+ int classCount = 16;
+ id *objs = malloc(classCount * sizeof(*objs));
+ for (int i = 0; i < classCount; i++) {
+ objs[i] = test(i, 1 << i);
+ }
+
+ char *pidstr;
+ int result = asprintf(&pidstr, "%u", getpid());
+ testassert(result);
+
+ extern char **environ;
+ char *argv[] = { "/usr/bin/leaks", pidstr, NULL };
+ pid_t pid;
+ result = posix_spawn(&pid, "/usr/bin/leaks", NULL, NULL, argv, environ);
+ if (result) {
+ perror("posix_spawn");
+ exit(1);
+ }
+ wait4(pid, NULL, 0, NULL);
+ printf("objs=%p\n", objs);
+}
#include "test.h"
#include "testroot.i"
+#include <libkern/OSCacheControl.h>
#include <sys/stat.h>
#include <objc/objc.h>
#include <objc/runtime.h>
uintptr_t fnaddr(void *fn) { return (uintptr_t)fn; }
#endif
+void flushICache(uintptr_t addr) {
+ sys_icache_invalidate((void *)addr, sizeof(insn_t));
+}
+
insn_t set(uintptr_t dst, insn_t newvalue)
{
uintptr_t start = dst & ~(PAGE_MAX_SIZE-1);
*(insn_t *)dst = newvalue;
err = mprotect((void*)start, PAGE_MAX_SIZE, PROT_READ|PROT_EXEC);
if (err) fail("mprotect(%p, R-X) failed (%d)", start, errno);
+ flushICache(dst);
return oldvalue;
}
if (fstat(placeholder, &st) < 0) {
fail("couldn't stat asm-placeholder.exe (%d)", errno);
}
- char *buf = (char *)malloc(st.st_size);
- if (pread(placeholder, buf, st.st_size, 0) != st.st_size) {
+ ssize_t sz = (ssize_t)st.st_size;
+ char *buf = (char *)malloc(sz);
+ if (pread(placeholder, buf, sz, 0) != sz) {
fail("couldn't read asm-placeholder.exe (%d)", errno);
}
- if (pwrite(fd, buf, st.st_size, 0) != st.st_size) {
+ if (pwrite(fd, buf, sz, 0) != sz) {
fail("couldn't write asm temp file %s (%d)", tempname, errno);
}
free(buf);
if (!initialized) {
initialized = true;
testprintf("Nonpointer during +initialize\n");
- testassert(NONPOINTER(self));
+ testassert(!NONPOINTER(self));
id o = [Fake_OS_object new];
check_nonpointer(o, self);
[o release];
--- /dev/null
+// TEST_CONFIG
+
+#include "test.h"
+#include "testroot.i"
+
+#include <dlfcn.h>
+
+extern uintptr_t objc_debug_realized_class_generation_count;
+
+int main()
+{
+ testassert(objc_debug_realized_class_generation_count > 0);
+ uintptr_t prev = objc_debug_realized_class_generation_count;
+
+ void *handle = dlopen("/System/Library/Frameworks/Foundation.framework/Foundation", RTLD_LAZY);
+ testassert(handle);
+ Class c = objc_getClass("NSFileManager");
+ testassert(c);
+ testassert(objc_debug_realized_class_generation_count > prev);
+
+ prev = objc_debug_realized_class_generation_count;
+ c = objc_allocateClassPair([TestRoot class], "Dynamic", 0);
+ testassert(objc_debug_realized_class_generation_count > prev);
+ prev = objc_debug_realized_class_generation_count;
+ objc_registerClassPair(c);
+ testassert(objc_debug_realized_class_generation_count == prev);
+
+ succeed(__FILE__);
+}
\ No newline at end of file
--- /dev/null
+// TEST_CONFIG OS=macosx,iphoneos,tvos,watchos
+
+// This test checks that objc_msgSend's recovery path works correctly.
+// It continuously runs msgSend on some background threads, then
+// triggers the recovery path constantly as a stress test.
+
+#include "test.h"
+#include "testroot.i"
+#include <dispatch/dispatch.h>
+
+struct Big {
+ uintptr_t a, b, c, d, e, f, g;
+};
+
+@interface C1: TestRoot
+@end
+@implementation C1
+- (id)idret { return nil; }
+- (double)fpret { return 0.0; }
+- (long double)lfpret { return 0.0; }
+- (struct Big)stret { return (struct Big){}; }
+@end
+
+@interface C2: C1
+@end
+@implementation C2
+- (id)idret { return [super idret]; }
+- (double)fpret { return [super fpret]; }
+- (long double)lfpret { return [super lfpret]; }
+- (struct Big)stret { return [super stret]; }
+@end
+
+EXTERN_C kern_return_t task_restartable_ranges_synchronize(task_t task);
+
+EXTERN_C void sendWithMsgLookup(id self, SEL _cmd);
+
+#if defined(__arm64__) && !__has_feature(ptrauth_calls)
+asm(
+"_sendWithMsgLookup: \n"
+" stp fp, lr, [sp, #-16]! \n"
+" mov fp, sp \n"
+" bl _objc_msgLookup \n"
+" mov sp, fp \n"
+" ldp fp, lr, [sp], #16 \n"
+" br x17 \n"
+);
+#elif defined(__x86_64__)
+asm(
+"_sendWithMsgLookup: \n"
+" pushq %rbp \n"
+" movq %rsp, %rbp \n"
+" callq _objc_msgLookup \n"
+" popq %rbp \n"
+" jmpq *%r11 \n"
+);
+#else
+// Just skip it.
+void sendWithMsgLookup(id self __unused, SEL _cmd __unused) {}
+#endif
+
+int main() {
+ id obj = [C2 new];
+ for(int i = 0; i < 2; i++) {
+ dispatch_async(dispatch_get_global_queue(0, 0), ^{
+ while(1) {
+ [obj idret];
+ [obj fpret];
+ [obj lfpret];
+ [obj stret];
+ sendWithMsgLookup(obj, @selector(idret));
+ }
+ });
+ }
+ for(int i = 0; i < 1000000; i++) {
+ task_restartable_ranges_synchronize(mach_task_self());;
+ }
+ succeed(__FILE__);
+}
--- /dev/null
+// TEST_CONFIG
+
+#include "test.h"
+#include "testroot.i"
+
+id sawObject;
+const void *sawKey;
+id sawValue;
+objc_AssociationPolicy sawPolicy;
+
+objc_hook_setAssociatedObject originalSetAssociatedObject;
+
+void hook(id _Nonnull object, const void * _Nonnull key, id _Nullable value, objc_AssociationPolicy policy) {
+ sawObject = object;
+ sawKey = key;
+ sawValue = value;
+ sawPolicy = policy;
+ originalSetAssociatedObject(object, key, value, policy);
+}
+
+int main() {
+ id obj = [TestRoot new];
+ id value = [TestRoot new];
+ const void *key = "key";
+ objc_setAssociatedObject(obj, key, value, OBJC_ASSOCIATION_RETAIN);
+ testassert(sawObject == nil);
+ testassert(sawKey == nil);
+ testassert(sawValue == nil);
+ testassert(sawPolicy == 0);
+
+ id out = objc_getAssociatedObject(obj, key);
+ testassert(out == value);
+
+ objc_setHook_setAssociatedObject(hook, &originalSetAssociatedObject);
+
+ key = "key2";
+ objc_setAssociatedObject(obj, key, value, OBJC_ASSOCIATION_RETAIN);
+ testassert(sawObject == obj);
+ testassert(sawKey == key);
+ testassert(sawValue == value);
+ testassert(sawPolicy == OBJC_ASSOCIATION_RETAIN);
+
+ out = objc_getAssociatedObject(obj, key);
+ testassert(out == value);
+
+ succeed(__FILE__);
+}
\ No newline at end of file
# define PTR " .quad "
# define PTRSIZE "8"
# define LOGPTRSIZE "3"
+# define ONLY_LP64(x) x
#else
# define PTR " .long "
# define PTRSIZE "4"
# define LOGPTRSIZE "2"
+# define ONLY_LP64(x)
#endif
#if __has_feature(ptrauth_calls)
# define SIGNED_METHOD_LIST_IMP "@AUTH(ia,0,addr) "
+# define SIGNED_STUB_INITIALIZER "@AUTH(ia,0xc671,addr) "
#else
# define SIGNED_METHOD_LIST_IMP
+# define SIGNED_STUB_INITIALIZER
#endif
#define str(x) #x
void* nop(void* self) { return self; }
__END_DECLS
-asm(
- ".globl _OBJC_CLASS_$_SwiftSuper \n"
- ".section __DATA,__objc_data \n"
- ".align 3 \n"
- "_OBJC_CLASS_$_SwiftSuper: \n"
- PTR "_OBJC_METACLASS_$_SwiftSuper \n"
- PTR "_OBJC_CLASS_$_NSObject \n"
- PTR "__objc_empty_cache \n"
- PTR "0 \n"
- PTR "L_ro + 2 \n"
- // pad to OBJC_MAX_CLASS_SIZE
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- ""
- "_OBJC_METACLASS_$_SwiftSuper: \n"
- PTR "_OBJC_METACLASS_$_NSObject \n"
- PTR "_OBJC_METACLASS_$_NSObject \n"
- PTR "__objc_empty_cache \n"
- PTR "0 \n"
- PTR "L_meta_ro \n"
- // pad to OBJC_MAX_CLASS_SIZE
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- ""
- "L_ro: \n"
- ".long (1<<6)\n"
- ".long 0 \n"
- ".long "PTRSIZE" \n"
-#if __LP64__
- ".long 0 \n"
-#endif
- PTR "0 \n"
- PTR "L_super_name \n"
- PTR "L_good_methods \n"
- PTR "0 \n"
- PTR "L_super_ivars \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "_initSuper" SIGNED_METHOD_LIST_IMP "\n"
- ""
- "L_meta_ro: \n"
- ".long 1 \n"
- ".long 40 \n"
- ".long 40 \n"
-#if __LP64__
- ".long 0 \n"
-#endif
- PTR "0 \n"
- PTR "L_super_name \n"
- PTR "L_good_methods \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
-
- ".globl _OBJC_CLASS_$_SwiftSub \n"
- ".section __DATA,__objc_data \n"
- ".align 3 \n"
- "_OBJC_CLASS_$_SwiftSub: \n"
- PTR "_OBJC_METACLASS_$_SwiftSub \n"
- PTR "_OBJC_CLASS_$_SwiftSuper \n"
- PTR "__objc_empty_cache \n"
- PTR "0 \n"
- PTR "L_sub_ro + 2 \n"
- // pad to OBJC_MAX_CLASS_SIZE
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- ""
- "_OBJC_METACLASS_$_SwiftSub: \n"
- PTR "_OBJC_METACLASS_$_NSObject \n"
- PTR "_OBJC_METACLASS_$_SwiftSuper \n"
- PTR "__objc_empty_cache \n"
- PTR "0 \n"
- PTR "L_sub_meta_ro \n"
- // pad to OBJC_MAX_CLASS_SIZE
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- ""
- "L_sub_ro: \n"
- ".long (1<<6)\n"
- ".long 0 \n"
- ".long "PTRSIZE" \n"
-#if __LP64__
- ".long 0 \n"
-#endif
- PTR "0 \n"
- PTR "L_sub_name \n"
- PTR "L_good_methods \n"
- PTR "0 \n"
- PTR "L_sub_ivars \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "_initSub" SIGNED_METHOD_LIST_IMP "\n"
- ""
- "L_sub_meta_ro: \n"
- ".long 1 \n"
- ".long 40 \n"
- ".long 40 \n"
-#if __LP64__
- ".long 0 \n"
-#endif
- PTR "0 \n"
- PTR "L_sub_name \n"
- PTR "L_good_methods \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
- PTR "0 \n"
-
- "L_good_methods: \n"
- ".long 3*"PTRSIZE" \n"
- ".long 1 \n"
- PTR "L_self \n"
- PTR "L_self \n"
- PTR "_nop" SIGNED_METHOD_LIST_IMP "\n"
-
- "L_super_ivars: \n"
- ".long 4*"PTRSIZE" \n"
- ".long 1 \n"
- PTR "L_super_ivar_offset \n"
- PTR "L_super_ivar_name \n"
- PTR "L_super_ivar_type \n"
- ".long "LOGPTRSIZE" \n"
- ".long "PTRSIZE" \n"
-
- "L_sub_ivars: \n"
- ".long 4*"PTRSIZE" \n"
- ".long 1 \n"
- PTR "L_sub_ivar_offset \n"
- PTR "L_sub_ivar_name \n"
- PTR "L_sub_ivar_type \n"
- ".long "LOGPTRSIZE" \n"
- ".long "PTRSIZE" \n"
-
- "L_super_ivar_offset: \n"
- ".long 0 \n"
- "L_sub_ivar_offset: \n"
- ".long "PTRSIZE" \n"
-
- ".cstring \n"
- "L_super_name: .ascii \"SwiftSuper\\0\" \n"
- "L_sub_name: .ascii \"SwiftSub\\0\" \n"
- "L_load: .ascii \"load\\0\" \n"
- "L_self: .ascii \"self\\0\" \n"
- "L_super_ivar_name: .ascii \"super_ivar\\0\" \n"
- "L_super_ivar_type: .ascii \"c\\0\" \n"
- "L_sub_ivar_name: .ascii \"sub_ivar\\0\" \n"
- "L_sub_ivar_type: .ascii \"@\\0\" \n"
-
-
- ".section __DATA,__objc_classlist \n"
- PTR "_OBJC_CLASS_$_SwiftSuper \n"
- PTR "_OBJC_CLASS_$_SwiftSub \n"
-
- ".text \n"
-);
+#define SWIFT_CLASS(name, superclass, swiftInit) \
+asm( \
+ ".globl _OBJC_CLASS_$_" #name "\n" \
+ ".section __DATA,__objc_data \n" \
+ ".align 3 \n" \
+ "_OBJC_CLASS_$_" #name ": \n" \
+ PTR "_OBJC_METACLASS_$_" #name "\n" \
+ PTR "_OBJC_CLASS_$_" #superclass "\n" \
+ PTR "__objc_empty_cache \n" \
+ PTR "0 \n" \
+ PTR "L_" #name "_ro + 2 \n" \
+ /* Swift class fields. */ \
+ ".long 0 \n" /* flags */ \
+ ".long 0 \n" /* instanceAddressOffset */ \
+ ".long 16 \n" /* instanceSize */ \
+ ".short 15 \n" /* instanceAlignMask */ \
+ ".short 0 \n" /* reserved */ \
+ ".long 256 \n" /* classSize */ \
+ ".long 0 \n" /* classAddressOffset */ \
+ PTR "0 \n" /* description */ \
+ /* pad to OBJC_MAX_CLASS_SIZE */ \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ \
+ "_OBJC_METACLASS_$_" #name ": \n" \
+ PTR "_OBJC_METACLASS_$_" #superclass "\n" \
+ PTR "_OBJC_METACLASS_$_" #superclass "\n" \
+ PTR "__objc_empty_cache \n" \
+ PTR "0 \n" \
+ PTR "L_" #name "_meta_ro \n" \
+ /* pad to OBJC_MAX_CLASS_SIZE */ \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ \
+ "L_" #name "_ro: \n" \
+ ".long (1<<6) \n" \
+ ".long 0 \n" \
+ ".long " PTRSIZE " \n" \
+ ONLY_LP64(".long 0 \n") \
+ PTR "0 \n" \
+ PTR "L_" #name "_name \n" \
+ PTR "L_" #name "_methods \n" \
+ PTR "0 \n" \
+ PTR "L_" #name "_ivars \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "_" #swiftInit SIGNED_METHOD_LIST_IMP "\n" \
+ \
+ "L_" #name "_meta_ro: \n" \
+ ".long 1 \n" \
+ ".long 40 \n" \
+ ".long 40 \n" \
+ ONLY_LP64(".long 0 \n") \
+ PTR "0 \n" \
+ PTR "L_" #name "_name \n" \
+ PTR "L_" #name "_meta_methods \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ PTR "0 \n" \
+ \
+ "L_" #name "_methods: \n" \
+ "L_" #name "_meta_methods: \n" \
+ ".long 3*" PTRSIZE "\n" \
+ ".long 1 \n" \
+ PTR "L_" #name "_self \n" \
+ PTR "L_" #name "_self \n" \
+ PTR "_nop" SIGNED_METHOD_LIST_IMP "\n" \
+ \
+ "L_" #name "_ivars: \n" \
+ ".long 4*" PTRSIZE " \n" \
+ ".long 1 \n" \
+ PTR "L_" #name "_ivar_offset \n" \
+ PTR "L_" #name "_ivar_name \n" \
+ PTR "L_" #name "_ivar_type \n" \
+ ".long " LOGPTRSIZE "\n" \
+ ".long " PTRSIZE "\n" \
+ \
+ "L_" #name "_ivar_offset: \n" \
+ ".long 0 \n" \
+ \
+ ".cstring \n" \
+ "L_" #name "_name: .ascii \"" #name "\\0\" \n" \
+ "L_" #name "_self: .ascii \"self\\0\" \n" \
+ "L_" #name "_ivar_name: " \
+ " .ascii \"" #name "_ivar\\0\" \n" \
+ "L_" #name "_ivar_type: .ascii \"c\\0\" \n" \
+ \
+ \
+ ".text \n" \
+); \
+extern char OBJC_CLASS_$_ ## name; \
+Class Raw ## name = (Class)&OBJC_CLASS_$_ ## name
+
+#define SWIFT_STUB_CLASSREF(name) \
+extern char OBJC_CLASS_$_ ## name; \
+static Class name ## Classref = (Class)(&OBJC_CLASS_$_ ## name + 1)
+
+#define SWIFT_STUB_CLASS(name, initializer) \
+asm( \
+ ".globl _OBJC_CLASS_$_" #name "\n" \
+ ".section __DATA,__objc_data \n" \
+ ".align 3 \n" \
+ "_dummy" #name ": \n" \
+ PTR "0 \n" \
+ ".alt_entry _OBJC_CLASS_$_" #name "\n" \
+ "_OBJC_CLASS_$_" #name ": \n" \
+ PTR "1 \n" \
+ PTR "_" #initializer SIGNED_STUB_INITIALIZER "\n" \
+ ".text" \
+); \
+extern char OBJC_CLASS_$_ ## name; \
+Class Raw ## name = (Class)&OBJC_CLASS_$_ ## name; \
+SWIFT_STUB_CLASSREF(name)
+
void fn(void) { }
#include "swift-class-def.m"
-// _objc_swiftMetadataInitializer hooks for the classes in swift-class-def.m
+SWIFT_CLASS(SwiftSuper, NSObject, initSuper);
+SWIFT_CLASS(SwiftSub, SwiftSuper, initSub);
+
+// _objc_swiftMetadataInitializer hooks for the fake Swift classes
Class initSuper(Class cls __unused, void *arg __unused)
{
{
testprintf("initSub callback\n");
- extern uintptr_t OBJC_CLASS_$_SwiftSuper;
- extern uintptr_t OBJC_CLASS_$_SwiftSub;
- Class RawSwiftSuper = (Class)&OBJC_CLASS_$_SwiftSuper;
- Class RawSwiftSub = (Class)&OBJC_CLASS_$_SwiftSub;
-
testassert(SubInits == 0);
SubInits++;
testassert(arg == nil);
--- /dev/null
+#include "test.h"
+#include "swift-class-def.m"
+
+SWIFT_CLASS(RealSwiftDylib1A, NSObject, nop);
+SWIFT_STUB_CLASS(SwiftDylib1A, initSwiftDylib1A);
+
+SWIFT_CLASS(RealSwiftDylib1B, NSObject, nop);
+SWIFT_STUB_CLASS(SwiftDylib1B, initSwiftDylib1B);
+
+int Dylib1AInits = 0;
+
+@interface SwiftDylib1A: NSObject @end
+@interface SwiftDylib1B: NSObject @end
+
+@implementation SwiftDylib1A (Category)
+- (const char *)dylib1ACategoryInSameDylib { return "dylib1ACategoryInSameDylib"; }
+@end
+@implementation SwiftDylib1B (Category)
+- (const char *)dylib1BCategoryInSameDylib { return "dylib1BCategoryInSameDylib"; }
+@end
+
+EXTERN_C Class initSwiftDylib1A(Class cls, void *arg)
+{
+ Dylib1AInits++;
+ testassert(arg == nil);
+ testassert(cls == RawSwiftDylib1A);
+
+ if (Dylib1AInits == 1)
+ _objc_realizeClassFromSwift(RawRealSwiftDylib1A, cls);
+
+ return RawRealSwiftDylib1A;
+}
+
+int Dylib1BInits = 0;
+
+EXTERN_C Class initSwiftDylib1B(Class cls, void *arg)
+{
+ Dylib1BInits++;
+ testassert(arg == nil);
+ testassert(cls == RawSwiftDylib1B);
+
+ if (Dylib1BInits == 1)
+ _objc_realizeClassFromSwift(RawRealSwiftDylib1B, cls);
+
+ return RawRealSwiftDylib1B;
+}
+
+EXTERN_C Class objc_loadClassref(_Nullable Class * _Nonnull clsref);
+
+void Dylib1Test(void) {
+ testassert((uintptr_t)SwiftDylib1AClassref & 1);
+ Class SwiftDylib1A = objc_loadClassref(&SwiftDylib1AClassref);
+ testassert(((uintptr_t)SwiftDylib1AClassref & 1) == 0);
+ testassert(SwiftDylib1A == [SwiftDylib1A class]);
+ testassert(SwiftDylib1A == SwiftDylib1AClassref);
+ testassert(Dylib1AInits == 2);
+
+ testassert((uintptr_t)SwiftDylib1BClassref & 1);
+ Class SwiftDylib1B = objc_loadClassref(&SwiftDylib1BClassref);
+ testassert(((uintptr_t)SwiftDylib1BClassref & 1) == 0);
+ testassert(SwiftDylib1B == [SwiftDylib1B class]);
+ testassert(SwiftDylib1B == SwiftDylib1BClassref);
+ testassert(Dylib1BInits == 2);
+}
--- /dev/null
+#include "test.h"
+#include "swift-class-def.m"
+
+@interface SwiftDylib1A: NSObject @end
+@interface SwiftDylib1B: NSObject @end
+
+@interface NSObject (DylibCategories)
+- (char *)dylib1ACategoryInSameDylib;
+- (char *)dylib1BCategoryInSameDylib;
+- (char *)dylib1ACategoryInOtherDylib;
+- (char *)dylib1BCategoryInOtherDylib;
+- (char *)dylib1ACategoryInApp;
+- (char *)dylib1BCategoryInApp;
++ (void)testFromOtherDylib;
+@end
+
+@implementation SwiftDylib1A (Category)
+- (const char *)dylib1ACategoryInOtherDylib { return "dylib1ACategoryInOtherDylib"; }
+@end
+@implementation SwiftDylib1B (Category)
+- (const char *)dylib1BCategoryInOtherDylib { return "dylib1BCategoryInOtherDylib"; }
+@end
+
+SWIFT_STUB_CLASSREF(SwiftDylib1A);
+SWIFT_STUB_CLASSREF(SwiftDylib1B);
+
+Class objc_loadClassref(_Nullable Class * _Nonnull clsref);
+
+@implementation SwiftDylib1A (Test)
++ (void)testFromOtherDylib {
+ Class SwiftDylib1A = objc_loadClassref(&SwiftDylib1AClassref);
+ Class SwiftDylib1B = objc_loadClassref(&SwiftDylib1BClassref);
+ testassert(strcmp([[SwiftDylib1A new] dylib1ACategoryInSameDylib], "dylib1ACategoryInSameDylib") == 0);
+ testassert(strcmp([[SwiftDylib1B new] dylib1BCategoryInSameDylib], "dylib1BCategoryInSameDylib") == 0);
+ testassert(strcmp([[SwiftDylib1A new] dylib1ACategoryInApp], "dylib1ACategoryInApp") == 0);
+ testassert(strcmp([[SwiftDylib1B new] dylib1BCategoryInApp], "dylib1BCategoryInApp") == 0);
+ testassert(strcmp([[SwiftDylib1A new] dylib1ACategoryInOtherDylib], "dylib1ACategoryInOtherDylib") == 0);
+ testassert(strcmp([[SwiftDylib1B new] dylib1BCategoryInOtherDylib], "dylib1BCategoryInOtherDylib") == 0);
+}
+@end
--- /dev/null
+/*
+TEST_CONFIG MEM=mrc
+TEST_BUILD
+ $C{COMPILE} $DIR/swiftMetadataInitializerRealloc-dylib1.m -o libswiftMetadataInitializerRealloc-dylib1.dylib -dynamiclib -Wno-deprecated-objc-pointer-introspection
+ $C{COMPILE} $DIR/swiftMetadataInitializerRealloc-dylib2.m -o libswiftMetadataInitializerRealloc-dylib2.dylib -dynamiclib -L. -lswiftMetadataInitializerRealloc-dylib1
+ $C{COMPILE} $DIR/swiftMetadataInitializerRealloc.m -o swiftMetadataInitializerRealloc.exe -L. -lswiftMetadataInitializerRealloc-dylib1 -Wno-deprecated-objc-pointer-introspection
+END
+*/
+
+#include "test.h"
+#include "swift-class-def.m"
+
+
+// _objc_swiftMetadataInitializer hooks for the classes in swift-class-def.m
+
+Class initSuper(Class cls __unused, void *arg __unused)
+{
+ // This test provokes objc's callback out of superclass order.
+ // SwiftSub's init is first. SwiftSuper's init is never called.
+
+ fail("SwiftSuper's init should not have been called");
+}
+
+bool isRealized(Class cls)
+{
+ // check the is-realized bits directly
+
+#if __LP64__
+# define mask (~(uintptr_t)7)
+#else
+# define mask (~(uintptr_t)3)
+#endif
+#define RW_REALIZED (1<<31)
+
+ uintptr_t rw = ((uintptr_t *)cls)[4] & mask; // class_t->data
+ return ((uint32_t *)rw)[0] & RW_REALIZED; // class_rw_t->flags
+}
+
+SWIFT_CLASS(SwiftSuper, NSObject, initSuper);
+SWIFT_CLASS(RealSwiftSub, SwiftSuper, initSub);
+
+SWIFT_STUB_CLASS(SwiftSub, initSub);
+
+OBJC_EXPORT _Nullable Class
+objc_loadClassref(_Nullable Class * _Nonnull clsref);
+
+static int SubInits = 0;
+Class initSub(Class cls, void *arg)
+{
+ testprintf("initSub callback\n");
+
+ testassert(SubInits == 0);
+ SubInits++;
+ testassert(arg == nil);
+ testassert(cls == RawSwiftSub);
+ testassert(!isRealized(RawSwiftSuper));
+
+ // Copy the class to the heap to ensure they're registered properly.
+ // Classes in the data segment are automatically "known" even if not
+ // added as a known class. Swift dynamically allocates classes from
+ // a statically allocated space in the dylib, then allocates from
+ // the heap after it runs out of room there. Code that only works
+ // when the class is in a dylib can fail a long time down the road
+ // when something finally exceeds the capacity of that space.
+ // Example: rdar://problem/50707074
+ Class HeapSwiftSub = (Class)malloc(OBJC_MAX_CLASS_SIZE);
+ memcpy(HeapSwiftSub, RawRealSwiftSub, OBJC_MAX_CLASS_SIZE);
+
+ testprintf("initSub beginning _objc_realizeClassFromSwift\n");
+ _objc_realizeClassFromSwift(HeapSwiftSub, cls);
+ testprintf("initSub finished _objc_realizeClassFromSwift\n");
+
+ testassert(isRealized(RawSwiftSuper));
+ testassert(isRealized(HeapSwiftSub));
+
+ testprintf("Returning reallocated class %p\n", HeapSwiftSub);
+
+ return HeapSwiftSub;
+}
+
+
+@interface SwiftSub (Addition)
+- (int)number;
+@end
+@implementation SwiftSub (Addition)
+- (int)number { return 42; }
+@end
+
+@interface NSObject (DylibCategories)
+- (const char *)dylib1ACategoryInSameDylib;
+- (const char *)dylib1BCategoryInSameDylib;
+- (const char *)dylib1ACategoryInOtherDylib;
+- (const char *)dylib1BCategoryInOtherDylib;
+- (const char *)dylib1ACategoryInApp;
+- (const char *)dylib1BCategoryInApp;
++ (const char *)dylib1ACategoryInAppClassMethod;
++ (const char *)dylib1BCategoryInAppClassMethod;
++ (void)testFromOtherDylib;
+@end
+
+extern int Dylib1AInits;
+extern int Dylib1BInits;
+SWIFT_STUB_CLASSREF(SwiftDylib1A);
+SWIFT_STUB_CLASSREF(SwiftDylib1B);
+void Dylib1Test(void);
+
+@interface SwiftDylib1A: NSObject @end
+@interface SwiftDylib1B: NSObject @end
+
+@implementation SwiftDylib1A (Category)
+- (const char *)dylib1ACategoryInApp { return "dylib1ACategoryInApp"; }
++ (const char *)dylib1ACategoryInAppClassMethod { return "dylib1ACategoryInAppClassMethod"; }
+@end
+@implementation SwiftDylib1B (Category)
+- (const char *)dylib1BCategoryInApp { return "dylib1BCategoryInApp"; }
++ (const char *)dylib1BCategoryInAppClassMethod { return "dylib1BCategoryInAppClassMethod"; }
+@end
+
+
+int main()
+{
+#define LOG(fmt, expr) testprintf(#expr " is " #fmt "\n", expr);
+ LOG(%p, SwiftSubClassref);
+ Class loadedSwiftSub = objc_loadClassref(&SwiftSubClassref);
+ LOG(%p, SwiftSubClassref);
+ LOG(%p, loadedSwiftSub);
+ LOG(%p, [loadedSwiftSub class]);
+ LOG(%p, [loadedSwiftSub superclass]);
+ LOG(%p, [RawSwiftSuper class]);
+
+ id obj = [[loadedSwiftSub alloc] init];
+ LOG(%p, obj);
+ LOG(%d, [obj number]);
+
+ LOG(%p, SwiftDylib1AClassref);
+ testassert(Dylib1AInits == 0);
+ testassert((uintptr_t)SwiftDylib1AClassref & 1);
+ Class SwiftDylib1A = objc_loadClassref(&SwiftDylib1AClassref);
+ testassert(((uintptr_t)SwiftDylib1AClassref & 1) == 0);
+ testassert(SwiftDylib1A == [SwiftDylib1A class]);
+ testassert(SwiftDylib1A == SwiftDylib1AClassref);
+ testassert(Dylib1AInits == 1);
+ LOG(%p, SwiftDylib1A);
+
+ LOG(%p, SwiftDylib1BClassref);
+ testassert(Dylib1BInits == 0);
+ testassert((uintptr_t)SwiftDylib1BClassref & 1);
+ Class SwiftDylib1B = objc_loadClassref(&SwiftDylib1BClassref);
+ testassert(((uintptr_t)SwiftDylib1BClassref & 1) == 0);
+ testassert(SwiftDylib1B == [SwiftDylib1B class]);
+ testassert(SwiftDylib1B == SwiftDylib1BClassref);
+ testassert(Dylib1BInits == 1);
+ LOG(%p, SwiftDylib1B);
+
+ Dylib1Test();
+
+ testassert(strcmp([[SwiftDylib1A new] dylib1ACategoryInSameDylib], "dylib1ACategoryInSameDylib") == 0);
+ testassert(strcmp([[SwiftDylib1B new] dylib1BCategoryInSameDylib], "dylib1BCategoryInSameDylib") == 0);
+ testassert(strcmp([[SwiftDylib1A new] dylib1ACategoryInApp], "dylib1ACategoryInApp") == 0);
+ testassert(strcmp([[SwiftDylib1B new] dylib1BCategoryInApp], "dylib1BCategoryInApp") == 0);
+
+ void *handle = dlopen("libswiftMetadataInitializerRealloc-dylib2.dylib", RTLD_LAZY);
+ testassert(handle);
+
+ testassert(strcmp([[SwiftDylib1A new] dylib1ACategoryInOtherDylib], "dylib1ACategoryInOtherDylib") == 0);
+ testassert(strcmp([[SwiftDylib1B new] dylib1BCategoryInOtherDylib], "dylib1BCategoryInOtherDylib") == 0);
+ testassert(strcmp([SwiftDylib1A dylib1ACategoryInAppClassMethod], "dylib1ACategoryInAppClassMethod") == 0);
+ testassert(strcmp([SwiftDylib1B dylib1BCategoryInAppClassMethod], "dylib1BCategoryInAppClassMethod") == 0);
+ [SwiftDylib1A testFromOtherDylib];
+
+ testassert(objc_getClass("RealSwiftSub"));
+ testassert(objc_getClass("RealSwiftDylib1A"));
+ testassert(objc_getClass("RealSwiftDylib1B"));
+
+ succeed(__FILE__);
+}
void *thread(void *arg __unused)
{
int err;
+ BOOL locked;
// non-blocking sync_enter
err = objc_sync_enter(obj);
testassert(err == OBJC_SYNC_SUCCESS);
+ // recursive try_sync_enter
+ locked = objc_sync_try_enter(obj);
+ testassert(locked);
+ err = objc_sync_exit(obj);
+ testassert(err == OBJC_SYNC_SUCCESS);
+
semaphore_signal(go);
// main thread: sync_exit of object locked on some other thread
semaphore_wait(stop);
pthread_t th;
int err;
struct timeval start, end;
+ BOOL locked;
obj = [[NSObject alloc] init];
semaphore_signal(stop);
semaphore_wait(go);
+ // contended try_sync_enter
+ locked = objc_sync_try_enter(obj);
+ testassert(!locked);
+
// blocking sync_enter
gettimeofday(&start, NULL);
err = objc_sync_enter(obj);
my $VERBOSE;
my $BATS;
+my $HOST;
+my $PORT;
+
my @TESTLIBNAMES = ("libobjc.A.dylib", "libobjc-trampolines.dylib");
my $TESTLIBDIR = "/usr/lib";
$output =~ s/^warning: key: [^\n]+\n//g;
$output =~ s/^warning: discriminator: [^\n]+\n//g;
$output =~ s/^warning: callee: [^\n]+\n//g;
+ # rdar://38710948
+ $output =~ s/ld: warning: ignoring file [^\n]*libclang_rt\.bridgeos\.a[^\n]*\n//g;
+ # ignore compiler logging of CCC_OVERRIDE_OPTIONS effects
+ if (defined $ENV{CCC_OVERRIDE_OPTIONS}) {
+ $output =~ s/### (CCC_OVERRIDE_OPTIONS:|Adding argument|Deleting argument|Replacing) [^\n]*\n//g;
+ }
my $ok;
if (my $builderror = $T{TEST_BUILD_OUTPUT}) {
# check for expected output and ignore $?
if ($output =~ /$builderror/) {
$ok = 1;
+ } elsif (defined $ENV{CCC_OVERRIDE_OPTIONS} && $builderror =~ /warning:/) {
+ # CCC_OVERRIDE_OPTIONS manipulates compiler diagnostics.
+ # Don't enforce any TEST_BUILD_OUTPUT that looks for warnings.
+ colorprint $yellow, "WARN: /// test '$name' \\\\\\";
+ colorprefix $yellow, $output;
+ colorprint $yellow, "WARN: \\\\\\ test '$name' ///";
+ colorprint $yellow, "WARN: $name (build output does not match TEST_BUILD_OUTPUT; not fatal because CCC_OVERRIDE_OPTIONS is set)";
+ $ok = 1;
} else {
colorprint $red, "FAIL: /// test '$name' \\\\\\";
colorprefix $red, $output;
$env .= " DYLD_INSERT_LIBRARIES=$remotedir/libcrashcatch.dylib";
}
- my $cmd = "ssh iphone 'cd $remotedir && env $env ./$name.exe'";
+ my $cmd = "ssh -p $PORT $HOST 'cd $remotedir && env $env ./$name.exe'";
$output = make("$cmd");
}
elsif ($C{OS} =~ /simulator/) {
# fixme selection of simulated OS version
my $simdevice;
if ($C{OS} =~ /iphonesimulator/) {
- $simdevice = 'iPhone 6';
+ $simdevice = 'iPhone X';
} elsif ($C{OS} =~ /watchsimulator/) {
$simdevice = 'Apple Watch Series 4 - 40mm';
} elsif ($C{OS} =~ /tvsimulator/) {
# libarclite no longer available on i386
# fixme need an archived copy for bincompat testing
$C{FORCE_LOAD_ARCLITE} = "";
+ } elsif ($C{OS} eq "bridgeos") {
+ # no libarclite on bridgeOS
+ $C{FORCE_LOAD_ARCLITE} = "";
} else {
$C{FORCE_LOAD_ARCLITE} = "-Xlinker -force_load -Xlinker " . dirname($C{CC}) . "/../lib/arc/libarclite_$C{OS}.a";
}
sub rsync_ios {
my ($src, $timeout) = @_;
for (my $i = 0; $i < 10; $i++) {
- make("$DIR/timeout.pl $timeout env RSYNC_PASSWORD=alpine rsync -av $src rsync://root\@localhost:10873/root/$REMOTEBASE/");
+ make("$DIR/timeout.pl $timeout rsync -e 'ssh -p $PORT' -av $src $HOST:/$REMOTEBASE/");
return if $? == 0;
colorprint $yellow, "WARN: RETRY\n" if $VERBOSE;
}
$args{OSVERSION} = getargs("OS", "macosx-default-default");
-$args{MEM} = getargs("MEM", "mrc");
-$args{LANGUAGE} = [ map { lc($_) } @{getargs("LANGUAGE", "objective-c")} ];
+$args{MEM} = getargs("MEM", "mrc,arc");
+$args{LANGUAGE} = [ map { lc($_) } @{getargs("LANGUAGE", "c,objective-c,c++,objective-c++")} ];
$args{CC} = getargs("CC", "clang");
+$HOST = getarg("HOST", "iphone");
+$PORT = getarg("PORT", "10022");
+
{
my $guardmalloc = getargs("GUARDMALLOC", 0);
# GUARDMALLOC=1 is the same as GUARDMALLOC=before,after
TEST_BUILD
$C{COMPILE} $DIR/unload4.m -o unload4.dylib -dynamiclib
$C{COMPILE_C} $DIR/unload3.c -o unload3.dylib -dynamiclib
- $C{COMPILE} $DIR/unload2.m -o unload2.bundle -bundle $C{FORCE_LOAD_ARCLITE}
+ $C{COMPILE} $DIR/unload2.m -o unload2.bundle -bundle $C{FORCE_LOAD_ARCLITE} -Xlinker -undefined -Xlinker dynamic_lookup
$C{COMPILE} $DIR/unload.m -o unload.exe -framework Foundation
END
*/
/*
-i386 Mac doesn't have libarclite
TEST_BUILD_OUTPUT
-ld: warning: ignoring file .* which is not the architecture being linked \(i386\).*
+ld: warning: -undefined dynamic_lookup is deprecated on .*
OR
END
*/
Class small = objc_getClass("SmallClass");
Class big = objc_getClass("BigClass");
+ Class missing = objc_getClass("SubclassOfMissingWeakImport");
testassert(small);
testassert(big);
+ testassert(!missing);
name = class_getImageName(small);
testassert(name);
@implementation SmallClass (Category)
-(void)unload2_category_method { }
@end
+
+// This isn't really weak-import: we link with `-undefined dynamic_lookup`
+// instead of providing a valid definition at link time.
+// But it looks the same to the runtime.
+__attribute__((weak_import))
+@interface ClassThatIsWeakImportAndMissing : TestRoot @end
+
+@interface SubclassOfMissingWeakImport : ClassThatIsWeakImportAndMissing <SmallProtocol> @end
+@implementation SubclassOfMissingWeakImport
+-(void)unload2_category_method { }
+@end
+
+@interface ClassThatIsWeakImportAndMissing (Category) <SmallProtocol> @end
+@implementation ClassThatIsWeakImportAndMissing (Category)
+-(void)unload2_category_method { }
+@end
--- /dev/null
+// TEST_CONFIG MEM=mrc
+
+#import "test.h"
+#import "testroot.i"
+
+#import <objc/objc-internal.h>
+
+#import <stdio.h>
+
+char dummy;
+
+Class *seenClasses;
+size_t seenClassesCount;
+
+static void clear(void) {
+ free(seenClasses);
+ seenClasses = NULL;
+ seenClassesCount = 0;
+}
+
+static void willInitializeClass(void *context, Class cls) {
+ testprintf("Will initialize %s\n", class_getName(cls));
+ seenClassesCount++;
+ seenClasses = (Class *)realloc(seenClasses, seenClassesCount * sizeof(*seenClasses));
+ seenClasses[seenClassesCount - 1] = cls;
+ testassert(context == &dummy);
+}
+
+int initializedC;
+@interface C: TestRoot @end
+@implementation C
++ (void)initialize {
+ testprintf("C initialize\n");
+ initializedC = 1;
+}
+@end
+
+int initializedD;
+@interface D: TestRoot @end
+@implementation D
++ (void)initialize {
+ testprintf("D initialize\n");
+ initializedD = 1;
+}
+@end
+
+int initializedE;
+@interface E: TestRoot @end
+@implementation E
++ (void)initialize {
+ testprintf("E initialize\n");
+ initializedE = 1;
+}
+@end
+
+int main()
+{
+ _objc_addWillInitializeClassFunc(willInitializeClass, &dummy);
+
+ // Merely getting a class should not trigger the callback.
+ clear();
+ size_t oldCount = seenClassesCount;
+ Class c = objc_getClass("C");
+ testassert(seenClassesCount == oldCount);
+ testassert(initializedC == 0);
+
+ // Sending a message to C should trigger the callback and the superclass's callback.
+ [c class];
+ testassert(seenClassesCount == oldCount + 2);
+ testassert(seenClasses[seenClassesCount - 2] == [TestRoot class]);
+ testassert(seenClasses[seenClassesCount - 1] == [C class]);
+
+ // Sending a message to D should trigger the callback only for D, since the
+ // superclass is already initialized.
+ oldCount = seenClassesCount;
+ [D class];
+ testassert(seenClassesCount == oldCount + 1);
+ testassert(seenClasses[seenClassesCount - 1] == [D class]);
+
+ // Registering a second callback should inform us of all three exactly once.
+ clear();
+ _objc_addWillInitializeClassFunc(willInitializeClass, &dummy);
+ testassert(seenClassesCount == 3);
+
+ int foundRoot = 0;
+ int foundC = 0;
+ int foundD = 0;
+ for (size_t i = 0; i < seenClassesCount; i++) {
+ if (seenClasses[i] == [TestRoot class])
+ foundRoot++;
+ if (seenClasses[i] == [C class])
+ foundC++;
+ if (seenClasses[i] == [D class])
+ foundD++;
+ }
+ testassert(foundRoot == 1);
+ testassert(foundC == 1);
+ testassert(foundD == 1);
+
+ // Both callbacks should fire when sending a message to E.
+ clear();
+ [E class];
+ testassert(initializedE);
+ testassert(seenClassesCount == 2);
+ testassert(seenClasses[0] == [E class]);
+ testassert(seenClasses[1] == [E class]);
+
+ succeed(__FILE__);
+}