sect->set_sectname("__objc_init_func");
if (debug) printf("disabled __mod_init_func section\n");
}
+ if (segnameStartsWith(sect->segname(), "__TEXT") &&
+ sectnameEquals(sect->sectname(), "__init_offsets"))
+ {
+ // section type 0 is S_REGULAR
+ sect->set_flags(sect->flags() & ~SECTION_TYPE);
+ sect->set_sectname("__objc_init_offs");
+ if (debug) printf("disabled __mod_init_func section\n");
+ }
if (segnameStartsWith(sect->segname(), "__DATA") &&
sectnameEquals(sect->sectname(), "__mod_term_func"))
{
6EF877E22325D93200963DBB /* Symbolication.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 6EF877E12325D93200963DBB /* Symbolication.framework */; };
6EF877E52325FAC400963DBB /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 6EF877E42325FAC400963DBB /* Foundation.framework */; };
6EF877E82326184000963DBB /* json.mm in Sources */ = {isa = PBXBuildFile; fileRef = 6EF877E72326184000963DBB /* json.mm */; };
- 6EF877E923261D3E00963DBB /* objc-cache.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485CB0D6D68A200CEA253 /* objc-cache.mm */; };
6EF877EC232635A700963DBB /* objcdt.1 in Install Manpages */ = {isa = PBXBuildFile; fileRef = 6EF877EA232633CC00963DBB /* objcdt.1 */; };
7213C36321FA7C730090A271 /* NSObject-internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 7213C36221FA7C730090A271 /* NSObject-internal.h */; settings = {ATTRIBUTES = (Private, ); }; };
7593EC58202248E50046AB96 /* objc-object.h in Headers */ = {isa = PBXBuildFile; fileRef = 7593EC57202248DF0046AB96 /* objc-object.h */; };
9672F7EE14D5F488007CEC96 /* NSObject.mm in Sources */ = {isa = PBXBuildFile; fileRef = 9672F7ED14D5F488007CEC96 /* NSObject.mm */; };
C22F5208230EF38B001BFE14 /* objc-ptrauth.h in Headers */ = {isa = PBXBuildFile; fileRef = C22F5207230EF38B001BFE14 /* objc-ptrauth.h */; };
C2E6D3FC2225DCF00059DFAA /* DenseMapExtras.h in Headers */ = {isa = PBXBuildFile; fileRef = C2E6D3FB2225DCF00059DFAA /* DenseMapExtras.h */; };
+ C2EB731D23D8A38A0040672B /* dummy-library-mac-i386.c in Sources */ = {isa = PBXBuildFile; fileRef = C2EB731C23D8A38A0040672B /* dummy-library-mac-i386.c */; };
E8923DA5116AB2820071B552 /* objc-block-trampolines.mm in Sources */ = {isa = PBXBuildFile; fileRef = E8923DA0116AB2820071B552 /* objc-block-trampolines.mm */; };
+ E934A9F123E996D00088F26F /* objc4.plist in CopyFiles */ = {isa = PBXBuildFile; fileRef = E934A9EF23E9967D0088F26F /* objc4.plist */; settings = {ATTRIBUTES = (CodeSignOnCopy, ); }; };
F9BCC71B205C68E800DD9AFC /* objc-blocktramps-arm64.s in Sources */ = {isa = PBXBuildFile; fileRef = 8379996D13CBAF6F007C2B5F /* objc-blocktramps-arm64.s */; };
/* End PBXBuildFile section */
name = "Install Manpages";
runOnlyForDeploymentPostprocessing = 1;
};
+ E934A9F023E996CC0088F26F /* CopyFiles */ = {
+ isa = PBXCopyFilesBuildPhase;
+ buildActionMask = 8;
+ dstPath = /System/Library/FeatureFlags/Domain;
+ dstSubfolderSpec = 0;
+ files = (
+ E934A9F123E996D00088F26F /* objc4.plist in CopyFiles */,
+ );
+ runOnlyForDeploymentPostprocessing = 1;
+ };
/* End PBXCopyFilesBuildPhase section */
/* Begin PBXFileReference section */
9672F7ED14D5F488007CEC96 /* NSObject.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = NSObject.mm; path = runtime/NSObject.mm; sourceTree = "<group>"; };
BC8B5D1212D3D48100C78A5B /* libauto.dylib */ = {isa = PBXFileReference; lastKnownFileType = "compiled.mach-o.dylib"; name = libauto.dylib; path = /usr/lib/libauto.dylib; sourceTree = "<absolute>"; };
C217B55222DE556D004369BA /* objc-env.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = "objc-env.h"; path = "runtime/objc-env.h"; sourceTree = "<group>"; };
+ C2296C682457336C003FAE61 /* objc-bp-assist.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = "objc-bp-assist.h"; path = "runtime/objc-bp-assist.h"; sourceTree = "<group>"; };
C22F5207230EF38B001BFE14 /* objc-ptrauth.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-ptrauth.h"; path = "runtime/objc-ptrauth.h"; sourceTree = "<group>"; };
C2E6D3FB2225DCF00059DFAA /* DenseMapExtras.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DenseMapExtras.h; path = runtime/DenseMapExtras.h; sourceTree = "<group>"; };
+ C2EB731C23D8A38A0040672B /* dummy-library-mac-i386.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = "dummy-library-mac-i386.c"; path = "runtime/dummy-library-mac-i386.c"; sourceTree = "<group>"; };
D2AAC0630554660B00DB518D /* libobjc.A.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libobjc.A.dylib; sourceTree = BUILT_PRODUCTS_DIR; };
E8923D9C116AB2820071B552 /* objc-blocktramps-i386.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "objc-blocktramps-i386.s"; path = "runtime/objc-blocktramps-i386.s"; sourceTree = "<group>"; };
E8923D9D116AB2820071B552 /* objc-blocktramps-x86_64.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "objc-blocktramps-x86_64.s"; path = "runtime/objc-blocktramps-x86_64.s"; sourceTree = "<group>"; };
E8923DA0116AB2820071B552 /* objc-block-trampolines.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-block-trampolines.mm"; path = "runtime/objc-block-trampolines.mm"; sourceTree = "<group>"; };
+ E934A9EF23E9967D0088F26F /* objc4.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = objc4.plist; sourceTree = "<group>"; };
+ E97047552497CC5300781D29 /* check_preopt_caches.entitlements */ = {isa = PBXFileReference; lastKnownFileType = text.plist.entitlements; path = check_preopt_caches.entitlements; sourceTree = "<group>"; };
+ E9AD465924925261002AF1DB /* check_preopt_caches.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = check_preopt_caches.mm; sourceTree = "<group>"; };
F9BCC727205C68E800DD9AFC /* libobjc-trampolines.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = "libobjc-trampolines.dylib"; sourceTree = BUILT_PRODUCTS_DIR; };
/* End PBXFileReference section */
08FB7795FE84155DC02AAC07 /* Source */,
838485B20D6D67F900CEA253 /* Other */,
6EF877D82325D62600963DBB /* objcdt */,
+ E9AD465824925261002AF1DB /* check-preopt-caches */,
1AB674ADFE9D54B511CA2CBB /* Products */,
F9BCC72A205C6A1600DD9AFC /* Frameworks */,
);
08FB7795FE84155DC02AAC07 /* Source */ = {
isa = PBXGroup;
children = (
+ C2EB731C23D8A38A0040672B /* dummy-library-mac-i386.c */,
838485B80D6D687300CEA253 /* hashtable2.mm */,
838485BC0D6D687300CEA253 /* maptable.mm */,
9672F7ED14D5F488007CEC96 /* NSObject.mm */,
838485B40D6D683300CEA253 /* APPLE_LICENSE */,
838485B50D6D683300CEA253 /* ReleaseNotes.rtf */,
83CE671D1E6E76B60095A33E /* interposable.txt */,
+ E934A9EF23E9967D0088F26F /* objc4.plist */,
838485B30D6D682B00CEA253 /* libobjc.order */,
);
name = Other;
83D9269721225A7400299F69 /* arm64-asm.h */,
83D92695212254CF00299F69 /* isa.h */,
838485CF0D6D68A200CEA253 /* objc-config.h */,
+ C2296C682457336C003FAE61 /* objc-bp-assist.h */,
C217B55222DE556D004369BA /* objc-env.h */,
83BE02E50FCCB24D00661494 /* objc-file-old.h */,
83BE02E60FCCB24D00661494 /* objc-file.h */,
name = "Project Headers";
sourceTree = "<group>";
};
+ E9AD465824925261002AF1DB /* check-preopt-caches */ = {
+ isa = PBXGroup;
+ children = (
+ E97047552497CC5300781D29 /* check_preopt_caches.entitlements */,
+ E9AD465924925261002AF1DB /* check_preopt_caches.mm */,
+ );
+ path = "check-preopt-caches";
+ sourceTree = "<group>";
+ };
F9BCC72A205C6A1600DD9AFC /* Frameworks */ = {
isa = PBXGroup;
children = (
D289988505E68E00004EDB86 /* Frameworks */,
830F2AB60D739AB600392440 /* Run Script (markgc) */,
830F2AFA0D73BC5800392440 /* Run Script (symlink) */,
+ E934A9F023E996CC0088F26F /* CopyFiles */,
);
buildRules = (
);
08FB7793FE84155DC02AAC07 /* Project object */ = {
isa = PBXProject;
attributes = {
- BuildIndependentTargetsInParallel = NO;
LastUpgradeCheck = 0440;
TargetAttributes = {
6EF877D62325D62600963DBB = {
isa = PBXSourcesBuildPhase;
buildActionMask = 2147483647;
files = (
- 6EF877E923261D3E00963DBB /* objc-cache.mm in Sources */,
6EF877E82326184000963DBB /* json.mm in Sources */,
6EF877DA2325D62600963DBB /* objcdt.mm in Sources */,
6EF877DE2325D79000963DBB /* objc-probes.d in Sources */,
83B1A8BE0FF1AC0D0019EA5B /* objc-msg-simulator-i386.s in Sources */,
83EB007B121C9EC200B92C16 /* objc-sel-table.s in Sources */,
39ABD72412F0B61800D1054C /* objc-weak.mm in Sources */,
+ C2EB731D23D8A38A0040672B /* dummy-library-mac-i386.c in Sources */,
83D49E4F13C7C84F0057F1DD /* objc-msg-arm64.s in Sources */,
9672F7EE14D5F488007CEC96 /* NSObject.mm in Sources */,
83725F4A14CA5BFA0014370E /* objc-opt.mm in Sources */,
COPY_PHASE_STRIP = NO;
DEPLOYMENT_LOCATION = YES;
DYLIB_CURRENT_VERSION = 228;
+ EXCLUDED_SOURCE_FILE_NAMES = "dummy-library-mac-i386.c";
+ "EXCLUDED_SOURCE_FILE_NAMES[sdk=macosx*][arch=i386]" = "*";
EXECUTABLE_PREFIX = lib;
GCC_CW_ASM_SYNTAX = NO;
GCC_OPTIMIZATION_LEVEL = 0;
"$(CONFIGURATION_BUILD_DIR)/usr/local/include/**",
/System/Library/Frameworks/System.framework/PrivateHeaders,
);
+ "INCLUDED_SOURCE_FILE_NAMES[sdk=macosx*][arch=i386]" = "dummy-library-mac-i386.c";
INSTALL_PATH = /usr/lib;
IS_ZIPPERED = YES;
LLVM_LTO = NO;
"-interposable_list",
"-Xlinker",
interposable.txt,
+ "-Xlinker",
+ "-headerpad",
+ "-Xlinker",
+ 0x100,
);
"OTHER_LDFLAGS[sdk=iphonesimulator*][arch=*]" = (
"-lc++abi",
"-interposable_list",
"-Xlinker",
interposable.txt,
+ "-loah",
);
+ "OTHER_LDFLAGS[sdk=macosx*][arch=i386]" = "-nodefaultlibs";
OTHER_TAPI_FLAGS = "-exclude-public-header $(DSTROOT)/usr/include/objc/ObjectiveC.apinotes -exclude-public-header $(DSTROOT)/usr/include/objc/module.modulemap -Xparser -Wno-deprecated-declarations -Xparser -Wno-unavailable-declarations -Xparser -D_OBJC_PRIVATE_H_=1 -DOBJC_DECLARE_SYMBOLS=1";
PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/objc;
PRODUCT_NAME = objc.A;
"COPY_HEADERS_UNIFDEF_FLAGS[sdk=macosx*]" = "-DBUILD_FOR_OSX";
DEPLOYMENT_LOCATION = YES;
DYLIB_CURRENT_VERSION = 228;
+ EXCLUDED_SOURCE_FILE_NAMES = "dummy-library-mac-i386.c";
+ "EXCLUDED_SOURCE_FILE_NAMES[sdk=macosx*][arch=i386]" = "*";
EXECUTABLE_PREFIX = lib;
GCC_CW_ASM_SYNTAX = NO;
GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS = NO;
"$(CONFIGURATION_BUILD_DIR)/usr/local/include/**",
/System/Library/Frameworks/System.framework/PrivateHeaders,
);
+ "INCLUDED_SOURCE_FILE_NAMES[sdk=macosx*][arch=i386]" = "dummy-library-mac-i386.c";
INSTALL_PATH = /usr/lib;
IS_ZIPPERED = YES;
ORDER_FILE = "$(SDKROOT)/AppleInternal/OrderFiles/libobjc.order";
"-interposable_list",
"-Xlinker",
interposable.txt,
+ "-Xlinker",
+ "-headerpad",
+ "-Xlinker",
+ 0x100,
);
"OTHER_LDFLAGS[sdk=iphonesimulator*][arch=*]" = (
"-lc++abi",
"-interposable_list",
"-Xlinker",
interposable.txt,
+ "-loah",
);
+ "OTHER_LDFLAGS[sdk=macosx*][arch=i386]" = "-nodefaultlibs";
OTHER_TAPI_FLAGS = "-exclude-public-header $(DSTROOT)/usr/include/objc/ObjectiveC.apinotes -exclude-public-header $(DSTROOT)/usr/include/objc/module.modulemap -Xparser -Wno-deprecated-declarations -Xparser -Wno-unavailable-declarations -Xparser -D_OBJC_PRIVATE_H_=1 -DOBJC_DECLARE_SYMBOLS=1";
PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/objc;
PRODUCT_NAME = objc.A;
CLANG_CXX_LIBRARY = "libc++";
CLANG_LINK_OBJC_RUNTIME = NO;
CLANG_OBJC_RUNTIME = NO;
+ CODE_SIGN_IDENTITY = "-";
DEBUG_INFORMATION_FORMAT = dwarf;
GCC_ENABLE_CPP_EXCEPTIONS = NO;
GCC_ENABLE_CPP_RTTI = NO;
CLANG_CXX_LIBRARY = "libc++";
CLANG_LINK_OBJC_RUNTIME = NO;
CLANG_OBJC_RUNTIME = NO;
+ CODE_SIGN_IDENTITY = "-";
DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
GCC_ENABLE_CPP_EXCEPTIONS = NO;
GCC_ENABLE_CPP_RTTI = NO;
buildSettings = {
CODE_SIGN_ENTITLEMENTS = "objcdt/objcdt-entitlements.plist";
CODE_SIGN_IDENTITY = "-";
+ DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
GCC_PREPROCESSOR_DEFINITIONS = (
"__BUILDING_OBJCDT__=1",
"$(inherited)",
OTHER_LDFLAGS = (
"-Xlinker",
"-not_for_dyld_shared_cache",
+ "-nodefaultlibs",
);
PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/objc;
PRODUCT_NAME = "$(TARGET_NAME)";
OTHER_LDFLAGS = (
"-Xlinker",
"-not_for_dyld_shared_cache",
+ "-nodefaultlibs",
);
PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/objc;
PRODUCT_NAME = "$(TARGET_NAME)";
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>preoptimizedCaches</key>
+ <dict>
+ <key>Enabled</key>
+ <true/>
+ </dict>
+</dict>
+</plist>
Help can be obtained using
.Nm
.Ar help
-.Ed
#include <sysexits.h>
#include <getopt.h>
#include <pthread.h>
+#include <sys/sysctl.h>
+#include <string>
int main(int argc, const char *argv[])
{
mov r1, r2 // selector
.endif
mov r2, r9 // class to search
- mov r3, #3 // LOOKUP_INITIALIZE | LOOKUP_INITIALIZE
+ mov r3, #3 // LOOKUP_INITIALIZE | LOOKUP_RESOLVER
blx _lookUpImpOrForward
mov r12, r0 // r12 = IMP
#include <arm/arch.h>
#include "isa.h"
-#include "arm64-asm.h"
#include "objc-config.h"
+#include "arm64-asm.h"
+
+#if TARGET_OS_IPHONE && __LP64__
+ .section __TEXT,__objc_methname,cstring_literals
+l_MagicSelector: /* the shared cache builder knows about this value */
+ .byte 0xf0, 0x9f, 0xa4, 0xaf, 0
+
+ .section __DATA,__objc_selrefs,literal_pointers,no_dead_strip
+ .p2align 3
+_MagicSelRef:
+ .quad l_MagicSelector
+#endif
.data
_objc_restartableRanges:
RestartableEntry _cache_getImp
RestartableEntry _objc_msgSend
- RestartableEntry _objc_msgSendSuper
RestartableEntry _objc_msgSendSuper2
RestartableEntry _objc_msgLookup
RestartableEntry _objc_msgLookupSuper2
/********************************************************************
- * GetClassFromIsa_p16 src
+ * GetClassFromIsa_p16 src, needs_auth, auth_address
* src is a raw isa field. Sets p16 to the corresponding class pointer.
* The raw isa might be an indexed isa to be decoded, or a
* packed isa that needs to be masked.
*
* On exit:
- * $0 is unchanged
+ * src is unchanged
* p16 is a class pointer
* x10 is clobbered
********************************************************************/
.fill ISA_INDEX_COUNT, PTRSIZE, 0
#endif
-.macro GetClassFromIsa_p16 /* src */
+.macro GetClassFromIsa_p16 src, needs_auth, auth_address /* note: auth_address is not required if !needs_auth */
#if SUPPORT_INDEXED_ISA
// Indexed isa
- mov p16, $0 // optimistically set dst = src
+ mov p16, \src // optimistically set dst = src
tbz p16, #ISA_INDEX_IS_NPI_BIT, 1f // done if not non-pointer isa
// isa in p16 is indexed
adrp x10, _objc_indexed_classes@PAGE
1:
#elif __LP64__
+.if \needs_auth == 0 // _cache_getImp takes an authed class already
+ mov p16, \src
+.else
// 64-bit packed isa
- and p16, $0, #ISA_MASK
-
+ ExtractISA p16, \src, \auth_address
+.endif
#else
// 32-bit raw isa
- mov p16, $0
+ mov p16, \src
#endif
#define FrameWithNoSaves 0x04000000 // frame, no non-volatile saves
+#define MSGSEND 100
+#define METHOD_INVOKE 101
+
//////////////////////////////////////////////////////////////////////
//
// SAVE_REGS
// for a function call.
//////////////////////////////////////////////////////////////////////
-.macro SAVE_REGS
+.macro SAVE_REGS kind
// push frame
SignLR
mov fp, sp
// save parameter registers: x0..x8, q0..q7
- sub sp, sp, #(10*8 + 8*16)
- stp q0, q1, [sp, #(0*16)]
- stp q2, q3, [sp, #(2*16)]
- stp q4, q5, [sp, #(4*16)]
- stp q6, q7, [sp, #(6*16)]
- stp x0, x1, [sp, #(8*16+0*8)]
- stp x2, x3, [sp, #(8*16+2*8)]
- stp x4, x5, [sp, #(8*16+4*8)]
- stp x6, x7, [sp, #(8*16+6*8)]
- str x8, [sp, #(8*16+8*8)]
+ sub sp, sp, #(10*8 + 8*16)
+ stp q0, q1, [sp, #(0*16)]
+ stp q2, q3, [sp, #(2*16)]
+ stp q4, q5, [sp, #(4*16)]
+ stp q6, q7, [sp, #(6*16)]
+ stp x0, x1, [sp, #(8*16+0*8)]
+ stp x2, x3, [sp, #(8*16+2*8)]
+ stp x4, x5, [sp, #(8*16+4*8)]
+ stp x6, x7, [sp, #(8*16+6*8)]
+.if \kind == MSGSEND
+ stp x8, x15, [sp, #(8*16+8*8)]
+ mov x16, x15 // stashed by CacheLookup, restore to x16
+.elseif \kind == METHOD_INVOKE
+ str x8, [sp, #(8*16+8*8)]
+.else
+.abort Unknown kind.
+.endif
.endmacro
// SAVE_REGS.
//////////////////////////////////////////////////////////////////////
-.macro RESTORE_REGS
-
- ldp q0, q1, [sp, #(0*16)]
- ldp q2, q3, [sp, #(2*16)]
- ldp q4, q5, [sp, #(4*16)]
- ldp q6, q7, [sp, #(6*16)]
- ldp x0, x1, [sp, #(8*16+0*8)]
- ldp x2, x3, [sp, #(8*16+2*8)]
- ldp x4, x5, [sp, #(8*16+4*8)]
- ldp x6, x7, [sp, #(8*16+6*8)]
- ldr x8, [sp, #(8*16+8*8)]
+.macro RESTORE_REGS kind
+
+ ldp q0, q1, [sp, #(0*16)]
+ ldp q2, q3, [sp, #(2*16)]
+ ldp q4, q5, [sp, #(4*16)]
+ ldp q6, q7, [sp, #(6*16)]
+ ldp x0, x1, [sp, #(8*16+0*8)]
+ ldp x2, x3, [sp, #(8*16+2*8)]
+ ldp x4, x5, [sp, #(8*16+4*8)]
+ ldp x6, x7, [sp, #(8*16+6*8)]
+.if \kind == MSGSEND
+ ldp x8, x16, [sp, #(8*16+8*8)]
+ orr x16, x16, #2 // for the sake of instrumentations, remember it was the slowpath
+.elseif \kind == METHOD_INVOKE
+ ldr x8, [sp, #(8*16+8*8)]
+.else
+.abort Unknown kind.
+.endif
mov sp, fp
ldp fp, lr, [sp], #16
/********************************************************************
*
- * CacheLookup NORMAL|GETIMP|LOOKUP <function>
+ * CacheLookup NORMAL|GETIMP|LOOKUP <function> MissLabelDynamic MissLabelConstant
+ *
+ * MissLabelConstant is only used for the GETIMP variant.
*
* Locate the implementation for a selector in a class method cache.
*
* x16 = class to be searched
*
* Kills:
- * x9,x10,x11,x12, x17
+ * x9,x10,x11,x12,x13,x15,x17
+ *
+ * Untouched:
+ * x14
*
* On exit: (found) calls or returns IMP
* with x16 = class, x17 = IMP
+ * In LOOKUP mode, the two low bits are set to 0x3
+ * if we hit a constant cache (used in objc_trace)
* (not found) jumps to LCacheMiss
+ * with x15 = class
+ * For constant caches in LOOKUP mode, the low bit
+ * of x16 is set to 0x1 to indicate we had to fallback.
+ * In addition, when LCacheMiss is __objc_msgSend_uncached or
+ * __objc_msgLookup_uncached, 0x2 will be set in x16
+ * to remember we took the slowpath.
+ * So the two low bits of x16 on exit mean:
+ * 0: dynamic hit
+ * 1: fallback to the parent class, when there is a preoptimized cache
+ * 2: slowpath
+ * 3: preoptimized cache hit
*
********************************************************************/
#define GETIMP 1
#define LOOKUP 2
-// CacheHit: x17 = cached IMP, x12 = address of cached IMP, x1 = SEL, x16 = isa
+// CacheHit: x17 = cached IMP, x10 = address of buckets, x1 = SEL, x16 = isa
.macro CacheHit
.if $0 == NORMAL
- TailCallCachedImp x17, x12, x1, x16 // authenticate and call imp
+ TailCallCachedImp x17, x10, x1, x16 // authenticate and call imp
.elseif $0 == GETIMP
mov p0, p17
cbz p0, 9f // don't ptrauth a nil imp
- AuthAndResignAsIMP x0, x12, x1, x16 // authenticate imp and re-sign as IMP
+ AuthAndResignAsIMP x0, x10, x1, x16 // authenticate imp and re-sign as IMP
9: ret // return IMP
.elseif $0 == LOOKUP
// No nil check for ptrauth: the caller would crash anyway when they
// jump to a nil IMP. We don't care if that jump also fails ptrauth.
- AuthAndResignAsIMP x17, x12, x1, x16 // authenticate imp and re-sign as IMP
+ AuthAndResignAsIMP x17, x10, x1, x16 // authenticate imp and re-sign as IMP
+ cmp x16, x15
+ cinc x16, x16, ne // x16 += 1 when x15 != x16 (for instrumentation ; fallback to the parent class)
ret // return imp via x17
.else
.abort oops
.endif
.endmacro
-.macro CheckMiss
- // miss if bucket->sel == 0
-.if $0 == GETIMP
- cbz p9, LGetImpMiss
-.elseif $0 == NORMAL
- cbz p9, __objc_msgSend_uncached
-.elseif $0 == LOOKUP
- cbz p9, __objc_msgLookup_uncached
-.else
-.abort oops
-.endif
-.endmacro
-
-.macro JumpMiss
-.if $0 == GETIMP
- b LGetImpMiss
-.elseif $0 == NORMAL
- b __objc_msgSend_uncached
-.elseif $0 == LOOKUP
- b __objc_msgLookup_uncached
-.else
-.abort oops
-.endif
-.endmacro
-
-.macro CacheLookup
+.macro CacheLookup Mode, Function, MissLabelDynamic, MissLabelConstant
//
// Restart protocol:
//
- // As soon as we're past the LLookupStart$1 label we may have loaded
- // an invalid cache pointer or mask.
+ // As soon as we're past the LLookupStart\Function label we may have
+ // loaded an invalid cache pointer or mask.
//
// When task_restartable_ranges_synchronize() is called,
- // (or when a signal hits us) before we're past LLookupEnd$1,
- // then our PC will be reset to LLookupRecover$1 which forcefully
+ // (or when a signal hits us) before we're past LLookupEnd\Function,
+ // then our PC will be reset to LLookupRecover\Function which forcefully
// jumps to the cache-miss codepath which have the following
// requirements:
//
// - x16 contains the isa
// - other registers are set as per calling conventions
//
-LLookupStart$1:
+ mov x15, x16 // stash the original isa
+LLookupStart\Function:
// p1 = SEL, p16 = isa
- ldr p11, [x16, #CACHE] // p11 = mask|buckets
-
-#if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16
+#if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16_BIG_ADDRS
+ ldr p10, [x16, #CACHE] // p10 = mask|buckets
+ lsr p11, p10, #48 // p11 = mask
+ and p10, p10, #0xffffffffffff // p10 = buckets
+ and w12, w1, w11 // x12 = _cmd & mask
+#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16
+ ldr p11, [x16, #CACHE] // p11 = mask|buckets
+#if CONFIG_USE_PREOPT_CACHES
+#if __has_feature(ptrauth_calls)
+ tbnz p11, #0, LLookupPreopt\Function
+ and p10, p11, #0x0000ffffffffffff // p10 = buckets
+#else
+ and p10, p11, #0x0000fffffffffffe // p10 = buckets
+ tbnz p11, #0, LLookupPreopt\Function
+#endif
+ eor p12, p1, p1, LSR #7
+ and p12, p12, p11, LSR #48 // x12 = (_cmd ^ (_cmd >> 7)) & mask
+#else
and p10, p11, #0x0000ffffffffffff // p10 = buckets
and p12, p1, p11, LSR #48 // x12 = _cmd & mask
+#endif // CONFIG_USE_PREOPT_CACHES
#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4
+ ldr p11, [x16, #CACHE] // p11 = mask|buckets
and p10, p11, #~0xf // p10 = buckets
and p11, p11, #0xf // p11 = maskShift
mov p12, #0xffff
- lsr p11, p12, p11 // p11 = mask = 0xffff >> p11
- and p12, p1, p11 // x12 = _cmd & mask
+ lsr p11, p12, p11 // p11 = mask = 0xffff >> p11
+ and p12, p1, p11 // x12 = _cmd & mask
#else
#error Unsupported cache mask storage for ARM64.
#endif
+ add p13, p10, p12, LSL #(1+PTRSHIFT)
+ // p13 = buckets + ((_cmd & mask) << (1+PTRSHIFT))
+
+ // do {
+1: ldp p17, p9, [x13], #-BUCKET_SIZE // {imp, sel} = *bucket--
+ cmp p9, p1 // if (sel != _cmd) {
+ b.ne 3f // scan more
+ // } else {
+2: CacheHit \Mode // hit: call or return imp
+ // }
+3: cbz p9, \MissLabelDynamic // if (sel == 0) goto Miss;
+ cmp p13, p10 // } while (bucket >= buckets)
+ b.hs 1b
+
+ // wrap-around:
+ // p10 = first bucket
+ // p11 = mask (and maybe other bits on LP64)
+ // p12 = _cmd & mask
+ //
+ // A full cache can happen with CACHE_ALLOW_FULL_UTILIZATION.
+ // So stop when we circle back to the first probed bucket
+ // rather than when hitting the first bucket again.
+ //
+ // Note that we might probe the initial bucket twice
+ // when the first probed slot is the last entry.
- add p12, p10, p12, LSL #(1+PTRSHIFT)
- // p12 = buckets + ((_cmd & mask) << (1+PTRSHIFT))
- ldp p17, p9, [x12] // {imp, sel} = *bucket
-1: cmp p9, p1 // if (bucket->sel != _cmd)
- b.ne 2f // scan more
- CacheHit $0 // call or return imp
-
-2: // not hit: p12 = not-hit bucket
- CheckMiss $0 // miss if bucket->sel == 0
- cmp p12, p10 // wrap if bucket == buckets
- b.eq 3f
- ldp p17, p9, [x12, #-BUCKET_SIZE]! // {imp, sel} = *--bucket
- b 1b // loop
-
-3: // wrap: p12 = first bucket, w11 = mask
-#if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16
- add p12, p12, p11, LSR #(48 - (1+PTRSHIFT))
- // p12 = buckets + (mask << 1+PTRSHIFT)
+#if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16_BIG_ADDRS
+ add p13, p10, w11, UXTW #(1+PTRSHIFT)
+ // p13 = buckets + (mask << 1+PTRSHIFT)
+#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16
+ add p13, p10, p11, LSR #(48 - (1+PTRSHIFT))
+ // p13 = buckets + (mask << 1+PTRSHIFT)
+ // see comment about maskZeroBits
#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4
- add p12, p12, p11, LSL #(1+PTRSHIFT)
- // p12 = buckets + (mask << 1+PTRSHIFT)
+ add p13, p10, p11, LSL #(1+PTRSHIFT)
+ // p13 = buckets + (mask << 1+PTRSHIFT)
#else
#error Unsupported cache mask storage for ARM64.
+#endif
+ add p12, p10, p12, LSL #(1+PTRSHIFT)
+ // p12 = first probed bucket
+
+ // do {
+4: ldp p17, p9, [x13], #-BUCKET_SIZE // {imp, sel} = *bucket--
+ cmp p9, p1 // if (sel == _cmd)
+ b.eq 2b // goto hit
+ cmp p9, #0 // } while (sel != 0 &&
+ ccmp p13, p12, #0, ne // bucket > first_probed)
+ b.hi 4b
+
+LLookupEnd\Function:
+LLookupRecover\Function:
+ b \MissLabelDynamic
+
+#if CONFIG_USE_PREOPT_CACHES
+#if CACHE_MASK_STORAGE != CACHE_MASK_STORAGE_HIGH_16
+#error config unsupported
+#endif
+LLookupPreopt\Function:
+#if __has_feature(ptrauth_calls)
+ and p10, p11, #0x007ffffffffffffe // p10 = buckets
+ autdb x10, x16 // auth as early as possible
#endif
- // Clone scanning loop to miss instead of hang when cache is corrupt.
- // The slow path may detect any corruption and halt later.
+ // x12 = (_cmd - first_shared_cache_sel)
+ adrp x9, _MagicSelRef@PAGE
+ ldr p9, [x9, _MagicSelRef@PAGEOFF]
+ sub p12, p1, p9
- ldp p17, p9, [x12] // {imp, sel} = *bucket
-1: cmp p9, p1 // if (bucket->sel != _cmd)
- b.ne 2f // scan more
- CacheHit $0 // call or return imp
-
-2: // not hit: p12 = not-hit bucket
- CheckMiss $0 // miss if bucket->sel == 0
- cmp p12, p10 // wrap if bucket == buckets
- b.eq 3f
- ldp p17, p9, [x12, #-BUCKET_SIZE]! // {imp, sel} = *--bucket
- b 1b // loop
-
-LLookupEnd$1:
-LLookupRecover$1:
-3: // double wrap
- JumpMiss $0
+ // w9 = ((_cmd - first_shared_cache_sel) >> hash_shift & hash_mask)
+#if __has_feature(ptrauth_calls)
+ // bits 63..60 of x11 are the number of bits in hash_mask
+ // bits 59..55 of x11 is hash_shift
+
+ lsr x17, x11, #55 // w17 = (hash_shift, ...)
+ lsr w9, w12, w17 // >>= shift
+
+ lsr x17, x11, #60 // w17 = mask_bits
+ mov x11, #0x7fff
+ lsr x11, x11, x17 // p11 = mask (0x7fff >> mask_bits)
+ and x9, x9, x11 // &= mask
+#else
+ // bits 63..53 of x11 is hash_mask
+ // bits 52..48 of x11 is hash_shift
+ lsr x17, x11, #48 // w17 = (hash_shift, hash_mask)
+ lsr w9, w12, w17 // >>= shift
+ and x9, x9, x11, LSR #53 // &= mask
+#endif
+
+ ldr x17, [x10, x9, LSL #3] // x17 == sel_offs | (imp_offs << 32)
+ cmp x12, w17, uxtw
+
+.if \Mode == GETIMP
+ b.ne \MissLabelConstant // cache miss
+ sub x0, x16, x17, LSR #32 // imp = isa - imp_offs
+ SignAsImp x0
+ ret
+.else
+ b.ne 5f // cache miss
+ sub x17, x16, x17, LSR #32 // imp = isa - imp_offs
+.if \Mode == NORMAL
+ br x17
+.elseif \Mode == LOOKUP
+ orr x16, x16, #3 // for instrumentation, note that we hit a constant cache
+ SignAsImp x17
+ ret
+.else
+.abort unhandled mode \Mode
+.endif
+
+5: ldursw x9, [x10, #-8] // offset -8 is the fallback offset
+ add x16, x16, x9 // compute the fallback isa
+ b LLookupStart\Function // lookup again with a new isa
+.endif
+#endif // CONFIG_USE_PREOPT_CACHES
.endmacro
#if SUPPORT_TAGGED_POINTERS
.data
.align 3
- .globl _objc_debug_taggedpointer_classes
-_objc_debug_taggedpointer_classes:
- .fill 16, 8, 0
.globl _objc_debug_taggedpointer_ext_classes
_objc_debug_taggedpointer_ext_classes:
.fill 256, 8, 0
+
+// Dispatch for split tagged pointers take advantage of the fact that
+// the extended tag classes array immediately precedes the standard
+// tag array. The .alt_entry directive ensures that the two stay
+// together. This is harmless when using non-split tagged pointers.
+ .globl _objc_debug_taggedpointer_classes
+ .alt_entry _objc_debug_taggedpointer_classes
+_objc_debug_taggedpointer_classes:
+ .fill 16, 8, 0
+
+// Look up the class for a tagged pointer in x0, placing it in x16.
+.macro GetTaggedClass
+
+ and x10, x0, #0x7 // x10 = small tag
+ asr x11, x0, #55 // x11 = large tag with 1s filling the top (because bit 63 is 1 on a tagged pointer)
+ cmp x10, #7 // tag == 7?
+ csel x12, x11, x10, eq // x12 = index in tagged pointer classes array, negative for extended tags.
+ // The extended tag array is placed immediately before the basic tag array
+ // so this looks into the right place either way. The sign extension done
+ // by the asr instruction produces the value extended_tag - 256, which produces
+ // the correct index in the extended tagged pointer classes array.
+
+ // x16 = _objc_debug_taggedpointer_classes[x12]
+ adrp x10, _objc_debug_taggedpointer_classes@PAGE
+ add x10, x10, _objc_debug_taggedpointer_classes@PAGEOFF
+ ldr x16, [x10, x12, LSL #3]
+
+.endmacro
#endif
ENTRY _objc_msgSend
b.eq LReturnZero
#endif
ldr p13, [x0] // p13 = isa
- GetClassFromIsa_p16 p13 // p16 = class
+ GetClassFromIsa_p16 p13, 1, x0 // p16 = class
LGetIsaDone:
// calls imp or objc_msgSend_uncached
- CacheLookup NORMAL, _objc_msgSend
+ CacheLookup NORMAL, _objc_msgSend, __objc_msgSend_uncached
#if SUPPORT_TAGGED_POINTERS
LNilOrTagged:
b.eq LReturnZero // nil check
-
- // tagged
- adrp x10, _objc_debug_taggedpointer_classes@PAGE
- add x10, x10, _objc_debug_taggedpointer_classes@PAGEOFF
- ubfx x11, x0, #60, #4
- ldr x16, [x10, x11, LSL #3]
- adrp x10, _OBJC_CLASS_$___NSUnrecognizedTaggedPointer@PAGE
- add x10, x10, _OBJC_CLASS_$___NSUnrecognizedTaggedPointer@PAGEOFF
- cmp x10, x16
- b.ne LGetIsaDone
-
- // ext tagged
- adrp x10, _objc_debug_taggedpointer_ext_classes@PAGE
- add x10, x10, _objc_debug_taggedpointer_ext_classes@PAGEOFF
- ubfx x11, x0, #52, #8
- ldr x16, [x10, x11, LSL #3]
+ GetTaggedClass
b LGetIsaDone
// SUPPORT_TAGGED_POINTERS
#endif
b.eq LLookup_Nil
#endif
ldr p13, [x0] // p13 = isa
- GetClassFromIsa_p16 p13 // p16 = class
+ GetClassFromIsa_p16 p13, 1, x0 // p16 = class
LLookup_GetIsaDone:
// returns imp
- CacheLookup LOOKUP, _objc_msgLookup
+ CacheLookup LOOKUP, _objc_msgLookup, __objc_msgLookup_uncached
#if SUPPORT_TAGGED_POINTERS
LLookup_NilOrTagged:
b.eq LLookup_Nil // nil check
-
- // tagged
- adrp x10, _objc_debug_taggedpointer_classes@PAGE
- add x10, x10, _objc_debug_taggedpointer_classes@PAGEOFF
- ubfx x11, x0, #60, #4
- ldr x16, [x10, x11, LSL #3]
- adrp x10, _OBJC_CLASS_$___NSUnrecognizedTaggedPointer@PAGE
- add x10, x10, _OBJC_CLASS_$___NSUnrecognizedTaggedPointer@PAGEOFF
- cmp x10, x16
- b.ne LLookup_GetIsaDone
-
-LLookup_ExtTag:
- adrp x10, _objc_debug_taggedpointer_ext_classes@PAGE
- add x10, x10, _objc_debug_taggedpointer_ext_classes@PAGEOFF
- ubfx x11, x0, #52, #8
- ldr x16, [x10, x11, LSL #3]
+ GetTaggedClass
b LLookup_GetIsaDone
// SUPPORT_TAGGED_POINTERS
#endif
LLookup_Nil:
- adrp x17, __objc_msgNil@PAGE
- add x17, x17, __objc_msgNil@PAGEOFF
+ adr x17, __objc_msgNil
+ SignAsImp x17
ret
END_ENTRY _objc_msgLookup
UNWIND _objc_msgSendSuper, NoFrame
ldp p0, p16, [x0] // p0 = real receiver, p16 = class
- // calls imp or objc_msgSend_uncached
- CacheLookup NORMAL, _objc_msgSendSuper
+ b L_objc_msgSendSuper2_body
END_ENTRY _objc_msgSendSuper
ENTRY _objc_msgSendSuper2
UNWIND _objc_msgSendSuper2, NoFrame
+#if __has_feature(ptrauth_calls)
+ ldp x0, x17, [x0] // x0 = real receiver, x17 = class
+ add x17, x17, #SUPERCLASS // x17 = &class->superclass
+ ldr x16, [x17] // x16 = class->superclass
+ AuthISASuper x16, x17, ISA_SIGNING_DISCRIMINATOR_CLASS_SUPERCLASS
+LMsgSendSuperResume:
+#else
ldp p0, p16, [x0] // p0 = real receiver, p16 = class
ldr p16, [x16, #SUPERCLASS] // p16 = class->superclass
- CacheLookup NORMAL, _objc_msgSendSuper2
+#endif
+L_objc_msgSendSuper2_body:
+ CacheLookup NORMAL, _objc_msgSendSuper2, __objc_msgSend_uncached
END_ENTRY _objc_msgSendSuper2
ENTRY _objc_msgLookupSuper2
UNWIND _objc_msgLookupSuper2, NoFrame
+#if __has_feature(ptrauth_calls)
+ ldp x0, x17, [x0] // x0 = real receiver, x17 = class
+ add x17, x17, #SUPERCLASS // x17 = &class->superclass
+ ldr x16, [x17] // x16 = class->superclass
+ AuthISASuper x16, x17, ISA_SIGNING_DISCRIMINATOR_CLASS_SUPERCLASS
+LMsgLookupSuperResume:
+#else
ldp p0, p16, [x0] // p0 = real receiver, p16 = class
ldr p16, [x16, #SUPERCLASS] // p16 = class->superclass
- CacheLookup LOOKUP, _objc_msgLookupSuper2
+#endif
+ CacheLookup LOOKUP, _objc_msgLookupSuper2, __objc_msgLookup_uncached
END_ENTRY _objc_msgLookupSuper2
.macro MethodTableLookup
- SAVE_REGS
+ SAVE_REGS MSGSEND
// lookUpImpOrForward(obj, sel, cls, LOOKUP_INITIALIZE | LOOKUP_RESOLVER)
// receiver and selector already in x0 and x1
// IMP in x0
mov x17, x0
- RESTORE_REGS
+ RESTORE_REGS MSGSEND
.endmacro
UNWIND __objc_msgSend_uncached, FrameWithNoSaves
// THIS IS NOT A CALLABLE C FUNCTION
- // Out-of-band p16 is the class to search
+ // Out-of-band p15 is the class to search
MethodTableLookup
TailCallFunctionPointer x17
UNWIND __objc_msgLookup_uncached, FrameWithNoSaves
// THIS IS NOT A CALLABLE C FUNCTION
- // Out-of-band p16 is the class to search
+ // Out-of-band p15 is the class to search
MethodTableLookup
ret
STATIC_ENTRY _cache_getImp
- GetClassFromIsa_p16 p0
- CacheLookup GETIMP, _cache_getImp
+ GetClassFromIsa_p16 p0, 0
+ CacheLookup GETIMP, _cache_getImp, LGetImpMissDynamic, LGetImpMissConstant
-LGetImpMiss:
+LGetImpMissDynamic:
mov p0, #0
ret
+LGetImpMissConstant:
+ mov p0, p2
+ ret
+
END_ENTRY _cache_getImp
L_method_invoke_small:
// Small methods require a call to handle swizzling.
- SAVE_REGS
+ SAVE_REGS METHOD_INVOKE
mov p0, p1
bl __method_getImplementationAndName
// ARM64_32 packs both return values into x0, with SEL in the high bits and IMP in the low.
#if __LP64__
mov x16, x1
#endif
- RESTORE_REGS
+ RESTORE_REGS METHOD_INVOKE
#if __LP64__
mov x1, x16
#else
*/
#include <TargetConditionals.h>
-#if __x86_64__ && TARGET_OS_SIMULATOR && !TARGET_OS_IOSMAC
+#if __x86_64__ && TARGET_OS_SIMULATOR && !TARGET_OS_MACCATALYST
/********************************************************************
********************************************************************
#define GETIMP 101
#define LOOKUP 102
+#define MSGSEND 200
+#define METHOD_INVOKE 201
+#define METHOD_INVOKE_STRET 202
+
/********************************************************************
*
// for a function call.
//////////////////////////////////////////////////////////////////////
-.macro SAVE_REGS
+.macro SAVE_REGS kind
+.if \kind != MSGSEND && \kind != METHOD_INVOKE && \kind != METHOD_INVOKE_STRET
+.abort Unknown kind.
+.endif
push %rbp
mov %rsp, %rbp
- sub $$0x80+8, %rsp // +8 for alignment
+ sub $0x80, %rsp
movdqa %xmm0, -0x80(%rbp)
push %rax // might be xmm parameter count
movdqa %xmm1, -0x70(%rbp)
push %a1
movdqa %xmm2, -0x60(%rbp)
+.if \kind == MSGSEND || \kind == METHOD_INVOKE_STRET
push %a2
+.endif
movdqa %xmm3, -0x50(%rbp)
+.if \kind == MSGSEND || \kind == METHOD_INVOKE
push %a3
+.endif
movdqa %xmm4, -0x40(%rbp)
push %a4
movdqa %xmm5, -0x30(%rbp)
movdqa %xmm6, -0x20(%rbp)
push %a6
movdqa %xmm7, -0x10(%rbp)
+.if \kind == MSGSEND
+ push %r10
+.endif
.endmacro
// SAVE_REGS.
//////////////////////////////////////////////////////////////////////
-.macro RESTORE_REGS
+.macro RESTORE_REGS kind
+.if \kind == MSGSEND
+ pop %r10
+ orq $2, %r10 // for the sake of instrumentations, remember it was the slowpath
+.endif
movdqa -0x80(%rbp), %xmm0
pop %a6
movdqa -0x70(%rbp), %xmm1
movdqa -0x60(%rbp), %xmm2
pop %a4
movdqa -0x50(%rbp), %xmm3
+.if \kind == MSGSEND || \kind == METHOD_INVOKE
pop %a3
+.endif
movdqa -0x40(%rbp), %xmm4
+.if \kind == MSGSEND || \kind == METHOD_INVOKE_STRET
pop %a2
+.endif
movdqa -0x30(%rbp), %xmm5
pop %a1
movdqa -0x20(%rbp), %xmm6
.macro MethodTableLookup
- SAVE_REGS
+ SAVE_REGS MSGSEND
// lookUpImpOrForward(obj, sel, cls, LOOKUP_INITIALIZE | LOOKUP_RESOLVER)
.if $0 == NORMAL
// IMP is now in %rax
movq %rax, %r11
- RESTORE_REGS
+ RESTORE_REGS MSGSEND
.if $0 == NORMAL
test %r11, %r11 // set ne for stret forwarding
L_method_invoke_small:
// Small methods require a call to handle swizzling.
- SAVE_REGS
+ SAVE_REGS METHOD_INVOKE
movq %a2, %a1
call __method_getImplementationAndName
- movq %rdx, %r10
+ movq %rdx, %a2
movq %rax, %r11
- RESTORE_REGS
- movq %r10, %a2
+ RESTORE_REGS METHOD_INVOKE
jmp *%r11
END_ENTRY _method_invoke
L_method_invoke_stret_small:
// Small methods require a call to handle swizzling.
- SAVE_REGS
+ SAVE_REGS METHOD_INVOKE_STRET
movq %a3, %a1
call __method_getImplementationAndName
- movq %rdx, %r10
+ movq %rdx, %a3
movq %rax, %r11
- RESTORE_REGS
- movq %r10, %a3
+ RESTORE_REGS METHOD_INVOKE_STRET
jmp *%r11
END_ENTRY _method_invoke_stret
*/
#include <TargetConditionals.h>
-#if __x86_64__ && !(TARGET_OS_SIMULATOR && !TARGET_OS_IOSMAC)
+#if __x86_64__ && !(TARGET_OS_SIMULATOR && !TARGET_OS_MACCATALYST)
#include "isa.h"
#define GETIMP 101
#define LOOKUP 102
+#define MSGSEND 200
+#define METHOD_INVOKE 201
+#define METHOD_INVOKE_STRET 202
+
/********************************************************************
*
// for a function call.
//////////////////////////////////////////////////////////////////////
-.macro SAVE_REGS
+.macro SAVE_REGS kind
+.if \kind != MSGSEND && \kind != METHOD_INVOKE && \kind != METHOD_INVOKE_STRET
+.abort Unknown kind.
+.endif
push %rbp
mov %rsp, %rbp
- sub $$0x80+8, %rsp // +8 for alignment
+ sub $0x80, %rsp
movdqa %xmm0, -0x80(%rbp)
push %rax // might be xmm parameter count
movdqa %xmm1, -0x70(%rbp)
push %a1
movdqa %xmm2, -0x60(%rbp)
+.if \kind == MSGSEND || \kind == METHOD_INVOKE_STRET
push %a2
+.endif
movdqa %xmm3, -0x50(%rbp)
+.if \kind == MSGSEND || \kind == METHOD_INVOKE
push %a3
+.endif
movdqa %xmm4, -0x40(%rbp)
push %a4
movdqa %xmm5, -0x30(%rbp)
movdqa %xmm6, -0x20(%rbp)
push %a6
movdqa %xmm7, -0x10(%rbp)
+.if \kind == MSGSEND
+ push %r10
+.endif
.endmacro
// SAVE_REGS.
//////////////////////////////////////////////////////////////////////
-.macro RESTORE_REGS
+.macro RESTORE_REGS kind
+.if \kind == MSGSEND
+ pop %r10
+ orq $2, %r10 // for the sake of instrumentations, remember it was the slowpath
+.endif
movdqa -0x80(%rbp), %xmm0
pop %a6
movdqa -0x70(%rbp), %xmm1
movdqa -0x60(%rbp), %xmm2
pop %a4
movdqa -0x50(%rbp), %xmm3
+.if \kind == MSGSEND || \kind == METHOD_INVOKE
pop %a3
+.endif
movdqa -0x40(%rbp), %xmm4
+.if \kind == MSGSEND || \kind == METHOD_INVOKE_STRET
pop %a2
+.endif
movdqa -0x30(%rbp), %xmm5
pop %a1
movdqa -0x20(%rbp), %xmm6
.macro MethodTableLookup
- SAVE_REGS
+ SAVE_REGS MSGSEND
// lookUpImpOrForward(obj, sel, cls, LOOKUP_INITIALIZE | LOOKUP_RESOLVER)
.if $0 == NORMAL
// IMP is now in %rax
movq %rax, %r11
- RESTORE_REGS
+ RESTORE_REGS MSGSEND
.if $0 == NORMAL
test %r11, %r11 // set ne for nonstret forwarding
L_method_invoke_small:
// Small methods require a call to handle swizzling.
- SAVE_REGS
+ SAVE_REGS METHOD_INVOKE
movq %a2, %a1
call __method_getImplementationAndName
- movq %rdx, %r10
+ movq %rdx, %a2
movq %rax, %r11
- RESTORE_REGS
- movq %r10, %a2
+ RESTORE_REGS METHOD_INVOKE
jmp *%r11
END_ENTRY _method_invoke
L_method_invoke_stret_small:
// Small methods require a call to handle swizzling.
- SAVE_REGS
+ SAVE_REGS METHOD_INVOKE_STRET
movq %a3, %a1
call __method_getImplementationAndName
- movq %rdx, %r10
+ movq %rdx, %a3
movq %rax, %r11
- RESTORE_REGS
- movq %r10, %a3
+ RESTORE_REGS METHOD_INVOKE_STRET
jmp *%r11
END_ENTRY _method_invoke_stret
class AutoreleasePoolPage;
struct AutoreleasePoolPageData
{
+#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
+ struct AutoreleasePoolEntry {
+ uintptr_t ptr: 48;
+ uintptr_t count: 16;
+
+ static const uintptr_t maxCount = 65535; // 2^16 - 1
+ };
+ static_assert((AutoreleasePoolEntry){ .ptr = MACH_VM_MAX_ADDRESS }.ptr == MACH_VM_MAX_ADDRESS, "MACH_VM_MAX_ADDRESS doesn't fit into AutoreleasePoolEntry::ptr!");
+#endif
+
magic_t const magic;
__unsafe_unretained id *next;
pthread_t const thread;
#include <map>
#include <execinfo.h>
#include "NSObject-internal.h"
+#include <os/feature_private.h>
+
+extern "C" {
+#include <os/reason_private.h>
+#include <os/variant_private.h>
+}
@interface NSInvocation
- (SEL)selector;
OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_child_offset = __builtin_offsetof(AutoreleasePoolPageData, child);
OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_depth_offset = __builtin_offsetof(AutoreleasePoolPageData, depth);
OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_hiwat_offset = __builtin_offsetof(AutoreleasePoolPageData, hiwat);
+OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_begin_offset = sizeof(AutoreleasePoolPageData);
#if __OBJC2__
+#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
+OBJC_EXTERN const uintptr_t objc_debug_autoreleasepoolpage_ptr_mask = (AutoreleasePoolPageData::AutoreleasePoolEntry){ .ptr = ~(uintptr_t)0 }.ptr;
+#else
+OBJC_EXTERN const uintptr_t objc_debug_autoreleasepoolpage_ptr_mask = ~(uintptr_t)0;
+#endif
OBJC_EXTERN const uint32_t objc_class_abi_version = OBJC_CLASS_ABI_VERSION_MAX;
#endif
}
+static id _initializeSwiftRefcountingThenCallRetain(id objc);
+static void _initializeSwiftRefcountingThenCallRelease(id objc);
+
+explicit_atomic<id(*)(id)> swiftRetain{&_initializeSwiftRefcountingThenCallRetain};
+explicit_atomic<void(*)(id)> swiftRelease{&_initializeSwiftRefcountingThenCallRelease};
+
+static void _initializeSwiftRefcounting() {
+ void *const token = dlopen("/usr/lib/swift/libswiftCore.dylib", RTLD_LAZY | RTLD_LOCAL);
+ ASSERT(token);
+ swiftRetain.store((id(*)(id))dlsym(token, "swift_retain"), memory_order_relaxed);
+ ASSERT(swiftRetain.load(memory_order_relaxed));
+ swiftRelease.store((void(*)(id))dlsym(token, "swift_release"), memory_order_relaxed);
+ ASSERT(swiftRelease.load(memory_order_relaxed));
+ dlclose(token);
+}
+
+static id _initializeSwiftRefcountingThenCallRetain(id objc) {
+ _initializeSwiftRefcounting();
+ return swiftRetain.load(memory_order_relaxed)(objc);
+}
+
+static void _initializeSwiftRefcountingThenCallRelease(id objc) {
+ _initializeSwiftRefcounting();
+ swiftRelease.load(memory_order_relaxed)(objc);
+}
+
+namespace objc {
+ extern int PageCountWarning;
+}
+
namespace {
+#if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR
+uint32_t numFaults = 0;
+#endif
+
// The order of these bits is important.
#define SIDE_TABLE_WEAKLY_REFERENCED (1UL<<0)
#define SIDE_TABLE_DEALLOCATING (1UL<<1) // MSB-ward of weak bit
}
}
+// Call out to the _setWeaklyReferenced method on obj, if implemented.
+static void callSetWeaklyReferenced(id obj) {
+ if (!obj)
+ return;
+
+ Class cls = obj->getIsa();
+
+ if (slowpath(cls->hasCustomRR() && !object_isClass(obj))) {
+ ASSERT(((objc_class *)cls)->isInitializing() || ((objc_class *)cls)->isInitialized());
+ void (*setWeaklyReferenced)(id, SEL) = (void(*)(id, SEL))
+ class_getMethodImplementation(cls, @selector(_setWeaklyReferenced));
+ if ((IMP)setWeaklyReferenced != _objc_msgForward) {
+ (*setWeaklyReferenced)(obj, @selector(_setWeaklyReferenced));
+ }
+ }
+}
+
//
// The -fobjc-arc flag causes the compiler to issue calls to objc_{retain/release/autorelease/retain_block}
//
DontCrashIfDeallocating = false, DoCrashIfDeallocating = true
};
template <HaveOld haveOld, HaveNew haveNew,
- CrashIfDeallocating crashIfDeallocating>
+ enum CrashIfDeallocating crashIfDeallocating>
static id
storeWeak(id *location, objc_object *newObj)
{
if (haveNew) {
newObj = (objc_object *)
weak_register_no_lock(&newTable->weak_table, (id)newObj, location,
- crashIfDeallocating);
+ crashIfDeallocating ? CrashIfDeallocating : ReturnNilIfDeallocating);
// weak_register_no_lock returns nil if weak store should be rejected
// Set is-weakly-referenced bit in refcount table.
- if (newObj && !newObj->isTaggedPointer()) {
+ if (!newObj->isTaggedPointerOrNil()) {
newObj->setWeaklyReferenced_nolock();
}
SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
+ // This must be called without the locks held, as it can invoke
+ // arbitrary code. In particular, even if _setWeaklyReferenced
+ // is not implemented, resolveInstanceMethod: may be, and may
+ // call back into the weak reference machinery.
+ callSetWeaklyReferenced((id)newObj);
+
return (id)newObj;
}
retry:
// fixme std::atomic this load
obj = *location;
- if (!obj) return nil;
- if (obj->isTaggedPointer()) return obj;
+ if (obj->isTaggedPointerOrNil()) return obj;
table = &SideTables()[obj];
else {
// Slow case. We must check for +initialize and call it outside
// the lock if necessary in order to avoid deadlocks.
+ // Use lookUpImpOrForward so we can avoid the assert in
+ // class_getInstanceMethod, since we intentionally make this
+ // callout with the lock held.
if (cls->isInitialized() || _thisThreadIsInitializingClass(cls)) {
BOOL (*tryRetain)(id, SEL) = (BOOL(*)(id, SEL))
- class_getMethodImplementation(cls, @selector(retainWeakReference));
+ lookUpImpOrForwardTryCache(obj, @selector(retainWeakReference), cls);
if ((IMP)tryRetain == _objc_msgForward) {
result = nil;
}
void
objc_moveWeak(id *dst, id *src)
{
- objc_copyWeak(dst, src);
- objc_destroyWeak(src);
+ id obj;
+ SideTable *table;
+
+retry:
+ obj = *src;
+ if (obj == nil) {
+ *dst = nil;
+ return;
+ }
+
+ table = &SideTables()[obj];
+ table->lock();
+ if (*src != obj) {
+ table->unlock();
+ goto retry;
+ }
+
+ weak_unregister_no_lock(&table->weak_table, obj, src);
+ weak_register_no_lock(&table->weak_table, obj, dst, DontCheckDeallocating);
+ *dst = obj;
*src = nil;
+ table->unlock();
}
static pthread_key_t const key = AUTORELEASE_POOL_KEY;
static uint8_t const SCRIBBLE = 0xA3; // 0xA3A3A3A3 after releasing
static size_t const COUNT = SIZE / sizeof(id);
+ static size_t const MAX_FAULTS = 2;
// EMPTY_POOL_PLACEHOLDER is stored in TLS when exactly one pool is
// pushed and it has never contained any objects. This saves memory
#endif
}
+ void checkTooMuchAutorelease()
+ {
+#if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR
+ bool objcModeNoFaults = DisableFaults || getpid() == 1 ||
+ !os_variant_has_internal_diagnostics("com.apple.obj-c");
+ if (!objcModeNoFaults) {
+ if (depth+1 >= (uint32_t)objc::PageCountWarning && numFaults < MAX_FAULTS) { //depth is 0 when first page is allocated
+ os_fault_with_payload(OS_REASON_LIBSYSTEM,
+ OS_REASON_LIBSYSTEM_CODE_FAULT,
+ NULL, 0, "Large Autorelease Pool", 0);
+ numFaults++;
+ }
+ }
+#endif
+ }
+
AutoreleasePoolPage(AutoreleasePoolPage *newParent) :
AutoreleasePoolPageData(begin(),
objc_thread_self(),
newParent,
newParent ? 1+newParent->depth : 0,
newParent ? newParent->hiwat : 0)
- {
+ {
+ if (objc::PageCountWarning != -1) {
+ checkTooMuchAutorelease();
+ }
+
if (parent) {
parent->check();
ASSERT(!parent->child);
{
ASSERT(!full());
unprotect();
- id *ret = next; // faster than `return next-1` because of aliasing
+ id *ret;
+
+#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
+ if (!DisableAutoreleaseCoalescing || !DisableAutoreleaseCoalescingLRU) {
+ if (!DisableAutoreleaseCoalescingLRU) {
+ if (!empty() && (obj != POOL_BOUNDARY)) {
+ AutoreleasePoolEntry *topEntry = (AutoreleasePoolEntry *)next - 1;
+ for (uintptr_t offset = 0; offset < 4; offset++) {
+ AutoreleasePoolEntry *offsetEntry = topEntry - offset;
+ if (offsetEntry <= (AutoreleasePoolEntry*)begin() || *(id *)offsetEntry == POOL_BOUNDARY) {
+ break;
+ }
+ if (offsetEntry->ptr == (uintptr_t)obj && offsetEntry->count < AutoreleasePoolEntry::maxCount) {
+ if (offset > 0) {
+ AutoreleasePoolEntry found = *offsetEntry;
+ memmove(offsetEntry, offsetEntry + 1, offset * sizeof(*offsetEntry));
+ *topEntry = found;
+ }
+ topEntry->count++;
+ ret = (id *)topEntry; // need to reset ret
+ goto done;
+ }
+ }
+ }
+ } else {
+ if (!empty() && (obj != POOL_BOUNDARY)) {
+ AutoreleasePoolEntry *prevEntry = (AutoreleasePoolEntry *)next - 1;
+ if (prevEntry->ptr == (uintptr_t)obj && prevEntry->count < AutoreleasePoolEntry::maxCount) {
+ prevEntry->count++;
+ ret = (id *)prevEntry; // need to reset ret
+ goto done;
+ }
+ }
+ }
+ }
+#endif
+ ret = next; // faster than `return next-1` because of aliasing
*next++ = obj;
+#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
+ // Make sure obj fits in the bits available for it
+ ASSERT(((AutoreleasePoolEntry *)ret)->ptr == (uintptr_t)obj);
+#endif
+ done:
protect();
return ret;
}
}
page->unprotect();
+#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
+ AutoreleasePoolEntry* entry = (AutoreleasePoolEntry*) --page->next;
+
+ // create an obj with the zeroed out top byte and release that
+ id obj = (id)entry->ptr;
+ int count = (int)entry->count; // grab these before memset
+#else
id obj = *--page->next;
+#endif
memset((void*)page->next, SCRIBBLE, sizeof(*page->next));
page->protect();
if (obj != POOL_BOUNDARY) {
+#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
+ // release count+1 times since it is count of the additional
+ // autoreleases beyond the first one
+ for (int i = 0; i < count + 1; i++) {
+ objc_release(obj);
+ }
+#else
objc_release(obj);
+#endif
}
}
public:
static inline id autorelease(id obj)
{
- ASSERT(obj);
- ASSERT(!obj->isTaggedPointer());
+ ASSERT(!obj->isTaggedPointerOrNil());
id *dest __unused = autoreleaseFast(obj);
+#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
+ ASSERT(!dest || dest == EMPTY_POOL_PLACEHOLDER || (id)((AutoreleasePoolEntry *)dest)->ptr == obj);
+#else
ASSERT(!dest || dest == EMPTY_POOL_PLACEHOLDER || *dest == obj);
+#endif
return obj;
}
_objc_inform_now_and_on_crash
("Invalid or prematurely-freed autorelease pool %p. "
"Set a breakpoint on objc_autoreleasePoolInvalid to debug. "
- "Proceeding anyway because the app is old "
- "(SDK version " SDK_FORMAT "). Memory errors are likely.",
- token, FORMAT_SDK(sdkVersion()));
+ "Proceeding anyway because the app is old. Memory errors "
+ "are likely.",
+ token);
}
objc_autoreleasePoolInvalid(token);
}
if (*p == POOL_BOUNDARY) {
_objc_inform("[%p] ################ POOL %p", p, p);
} else {
- _objc_inform("[%p] %#16lx %s",
+#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
+ AutoreleasePoolEntry *entry = (AutoreleasePoolEntry *)p;
+ if (entry->count > 0) {
+ id obj = (id)entry->ptr;
+ _objc_inform("[%p] %#16lx %s autorelease count %u",
+ p, (unsigned long)obj, object_getClassName(obj),
+ entry->count + 1);
+ goto done;
+ }
+#endif
+ _objc_inform("[%p] %#16lx %s",
p, (unsigned long)*p, object_getClassName(*p));
+ done:;
}
}
}
_objc_inform("##############");
}
+#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
+ __attribute__((noinline, cold))
+ unsigned sumOfExtraReleases()
+ {
+ unsigned sumOfExtraReleases = 0;
+ for (id *p = begin(); p < next; p++) {
+ if (*p != POOL_BOUNDARY) {
+ sumOfExtraReleases += ((AutoreleasePoolEntry *)p)->count;
+ }
+ }
+ return sumOfExtraReleases;
+ }
+#endif
+
__attribute__((noinline, cold))
static void printHiwat()
{
// Ignore high water marks under 256 to suppress noise.
AutoreleasePoolPage *p = hotPage();
uint32_t mark = p->depth*COUNT + (uint32_t)(p->next - p->begin());
- if (mark > p->hiwat && mark > 256) {
+ if (mark > p->hiwat + 256) {
+#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
+ unsigned sumOfExtraReleases = 0;
+#endif
for( ; p; p = p->parent) {
p->unprotect();
p->hiwat = mark;
p->protect();
+
+#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
+ sumOfExtraReleases += p->sumOfExtraReleases();
+#endif
}
_objc_inform("POOL HIGHWATER: new high water mark of %u "
"pending releases for thread %p:",
mark, objc_thread_self());
+#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
+ if (sumOfExtraReleases > 0) {
+ _objc_inform("POOL HIGHWATER: extra sequential autoreleases of objects: %u",
+ sumOfExtraReleases);
+ }
+#endif
void *stack[128];
int count = backtrace(stack, sizeof(stack)/sizeof(stack[0]));
NEVER_INLINE id
objc_object::rootRetain_overflow(bool tryRetain)
{
- return rootRetain(tryRetain, true);
+ return rootRetain(tryRetain, RRVariant::Full);
}
NEVER_INLINE uintptr_t
objc_object::rootRelease_underflow(bool performDealloc)
{
- return rootRelease(performDealloc, true);
+ return rootRelease(performDealloc, RRVariant::Full);
}
ASSERT((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
uintptr_t carry;
- size_t refcnt = addc(oldRefcnt, extra_rc << SIDE_TABLE_RC_SHIFT, 0, &carry);
+ size_t refcnt = addc(oldRefcnt, (extra_rc - 1) << SIDE_TABLE_RC_SHIFT, 0, &carry);
if (carry) refcnt = SIDE_TABLE_RC_PINNED;
if (isDeallocating) refcnt |= SIDE_TABLE_DEALLOCATING;
if (weaklyReferenced) refcnt |= SIDE_TABLE_WEAKLY_REFERENCED;
// Move some retain counts from the side table to the isa field.
// Returns the actual count subtracted, which may be less than the request.
-size_t
+objc_object::SidetableBorrow
objc_object::sidetable_subExtraRC_nolock(size_t delta_rc)
{
ASSERT(isa.nonpointer);
RefcountMap::iterator it = table.refcnts.find(this);
if (it == table.refcnts.end() || it->second == 0) {
// Side table retain count is zero. Can't borrow.
- return 0;
+ return { 0, 0 };
}
size_t oldRefcnt = it->second;
size_t newRefcnt = oldRefcnt - (delta_rc << SIDE_TABLE_RC_SHIFT);
ASSERT(oldRefcnt > newRefcnt); // shouldn't underflow
it->second = newRefcnt;
- return delta_rc;
+ return { delta_rc, newRefcnt >> SIDE_TABLE_RC_SHIFT };
}
}
+void
+objc_object::sidetable_clearExtraRC_nolock()
+{
+ ASSERT(isa.nonpointer);
+ SideTable& table = SideTables()[this];
+ RefcountMap::iterator it = table.refcnts.find(this);
+ table.refcnts.erase(it);
+}
+
+
// SUPPORT_NONPOINTER_ISA
#endif
id
-objc_object::sidetable_retain()
+objc_object::sidetable_retain(bool locked)
{
#if SUPPORT_NONPOINTER_ISA
ASSERT(!isa.nonpointer);
#endif
SideTable& table = SideTables()[this];
- table.lock();
+ if (!locked) table.lock();
size_t& refcntStorage = table.refcnts[this];
if (! (refcntStorage & SIDE_TABLE_RC_PINNED)) {
refcntStorage += SIDE_TABLE_RC_ONE;
return result;
}
+#if OBJC_WEAK_FORMATION_CALLOUT_DEFINED
+//Clients can dlsym() for this symbol to see if an ObjC supporting
+//-_setWeaklyReferenced is present
+OBJC_EXPORT const uintptr_t _objc_has_weak_formation_callout = 0;
+static_assert(SUPPORT_NONPOINTER_ISA, "Weak formation callout must only be defined when nonpointer isa is supported.");
+#else
+static_assert(!SUPPORT_NONPOINTER_ISA, "If weak callout is not present then we must not support nonpointer isas.");
+#endif
void
objc_object::sidetable_setWeaklyReferenced_nolock()
#if SUPPORT_NONPOINTER_ISA
ASSERT(!isa.nonpointer);
#endif
-
+
SideTable& table = SideTables()[this];
-
+
table.refcnts[this] |= SIDE_TABLE_WEAKLY_REFERENCED;
}
// return uintptr_t instead of bool so that the various raw-isa
// -release paths all return zero in eax
uintptr_t
-objc_object::sidetable_release(bool performDealloc)
+objc_object::sidetable_release(bool locked, bool performDealloc)
{
#if SUPPORT_NONPOINTER_ISA
ASSERT(!isa.nonpointer);
bool do_dealloc = false;
- table.lock();
+ if (!locked) table.lock();
auto it = table.refcnts.try_emplace(this, SIDE_TABLE_DEALLOCATING);
auto &refcnt = it.first->second;
if (it.second) {
id
objc_retain(id obj)
{
- if (!obj) return obj;
- if (obj->isTaggedPointer()) return obj;
+ if (obj->isTaggedPointerOrNil()) return obj;
return obj->retain();
}
void
objc_release(id obj)
{
- if (!obj) return;
- if (obj->isTaggedPointer()) return;
+ if (obj->isTaggedPointerOrNil()) return;
return obj->release();
}
id
objc_autorelease(id obj)
{
- if (!obj) return obj;
- if (obj->isTaggedPointer()) return obj;
+ if (obj->isTaggedPointerOrNil()) return obj;
return obj->autorelease();
}
obj->rootRelease();
}
-
-// Call [cls alloc] or [cls allocWithZone:nil], with appropriate
+// Call [cls alloc] or [cls allocWithZone:nil], with appropriate
// shortcutting optimizations.
static ALWAYS_INLINE id
callAlloc(Class cls, bool checkNil, bool allocWithZone=false)
}
// Calls [cls allocWithZone:nil].
-id
+id
objc_allocWithZone(Class cls)
{
return callAlloc(cls, true/*checkNil*/, true/*allocWithZone*/);
{
#if __OBJC2__
if (fastpath(cls && !cls->ISA()->hasCustomCore())) {
- return [callAlloc(cls, false/*checkNil*/, true/*allocWithZone*/) init];
+ return [callAlloc(cls, false/*checkNil*/) init];
}
#endif
return ((id(*)(id, SEL))objc_msgSend)(cls, @selector(new));
objc_opt_self(id obj)
{
#if __OBJC2__
- if (fastpath(!obj || obj->isTaggedPointer() || !obj->ISA()->hasCustomCore())) {
+ if (fastpath(obj->isTaggedPointerOrNil() || !obj->ISA()->hasCustomCore())) {
return obj;
}
#endif
if (slowpath(!obj)) return NO;
Class cls = obj->getIsa();
if (fastpath(!cls->hasCustomCore())) {
- for (Class tcls = cls; tcls; tcls = tcls->superclass) {
+ for (Class tcls = cls; tcls; tcls = tcls->getSuperclass()) {
if (tcls == otherClass) return YES;
}
return NO;
}
+ (Class)superclass {
- return self->superclass;
+ return self->getSuperclass();
}
- (Class)superclass {
- return [self class]->superclass;
+ return [self class]->getSuperclass();
}
+ (BOOL)isMemberOfClass:(Class)cls {
}
+ (BOOL)isKindOfClass:(Class)cls {
- for (Class tcls = self->ISA(); tcls; tcls = tcls->superclass) {
+ for (Class tcls = self->ISA(); tcls; tcls = tcls->getSuperclass()) {
if (tcls == cls) return YES;
}
return NO;
}
- (BOOL)isKindOfClass:(Class)cls {
- for (Class tcls = [self class]; tcls; tcls = tcls->superclass) {
+ for (Class tcls = [self class]; tcls; tcls = tcls->getSuperclass()) {
if (tcls == cls) return YES;
}
return NO;
}
+ (BOOL)isSubclassOfClass:(Class)cls {
- for (Class tcls = self; tcls; tcls = tcls->superclass) {
+ for (Class tcls = self; tcls; tcls = tcls->getSuperclass()) {
if (tcls == cls) return YES;
}
return NO;
}
+ (BOOL)isAncestorOfObject:(NSObject *)obj {
- for (Class tcls = [obj class]; tcls; tcls = tcls->superclass) {
+ for (Class tcls = [obj class]; tcls; tcls = tcls->getSuperclass()) {
if (tcls == self) return YES;
}
return NO;
+ (BOOL)conformsToProtocol:(Protocol *)protocol {
if (!protocol) return NO;
- for (Class tcls = self; tcls; tcls = tcls->superclass) {
+ for (Class tcls = self; tcls; tcls = tcls->getSuperclass()) {
if (class_conformsToProtocol(tcls, protocol)) return YES;
}
return NO;
- (BOOL)conformsToProtocol:(Protocol *)protocol {
if (!protocol) return NO;
- for (Class tcls = [self class]; tcls; tcls = tcls->superclass) {
+ for (Class tcls = [self class]; tcls; tcls = tcls->getSuperclass()) {
if (class_conformsToProtocol(tcls, protocol)) return YES;
}
return NO;
// check isKindOf:
Class cls;
Class protoClass = objc_getClass("Protocol");
- for (cls = object_getClass(other); cls; cls = cls->superclass) {
+ for (cls = object_getClass(other); cls; cls = cls->getSuperclass()) {
if (cls == protoClass) break;
}
if (!cls) return NO;
#if __arm64__
+#include "objc-config.h"
+
#if __LP64__
// true arm64
// note: assumes the imp is not nil
eor $1, $1, $2 // mix SEL into ptrauth modifier
eor $1, $1, $3 // mix isa into ptrauth modifier
- autib $0, $1 // authenticate cached imp
+ autib $0, $1 // authenticate cached imp
ldr xzr, [$0] // crash if authentication failed
paciza $0 // resign cached imp as IMP
.endmacro
+.macro ExtractISA
+ and $0, $1, #ISA_MASK
+#if ISA_SIGNING_AUTH_MODE == ISA_SIGNING_STRIP
+ xpacd $0
+#elif ISA_SIGNING_AUTH_MODE == ISA_SIGNING_AUTH
+ mov x10, $2
+ movk x10, #ISA_SIGNING_DISCRIMINATOR, LSL #48
+ autda $0, x10
+#endif
+.endmacro
+
+.macro AuthISASuper dst, addr_mutable, discriminator
+#if ISA_SIGNING_AUTH_MODE == ISA_SIGNING_AUTH
+ movk \addr_mutable, #\discriminator, LSL #48
+ autda \dst, \addr_mutable
+#elif ISA_SIGNING_AUTH_MODE == ISA_SIGNING_STRIP
+ xpacd \dst
+#endif
+.endmacro
+
+.macro SignAsImp
+ paciza $0
+.endmacro
+
// JOP
#else
// not JOP
.macro AuthAndResignAsIMP
// $0 = cached imp, $1 = address of cached imp, $2 = SEL
eor $0, $0, $3
-.endmacro
+.endmacro
+
+.macro SignAsImp
+.endmacro
+
+.macro ExtractISA
+ and $0, $1, #ISA_MASK
+.endmacro
// not JOP
#endif
--- /dev/null
+// This file contains stubs matching the sybols previously exported by libobjc
+// when i386 Mac was actually supported. These stubs allow us to tease apart the
+// dependencies to prepare for removing i386 Mac libobjc entirely.
+//
+// This file is not built when building for any other arch/OS combination. When
+// building for i386 Mac, no other source files are built, just this one. This
+// is handled using the Included/Excluded Source File Names settings in Xcode,
+// with arch/OS-specific overrides.
+//
+// rdar://problem/58541885
+
+#pragma GCC visibility push(default)
+const char ___ld_hide_os10_5__objc_class_name_NSObject __asm__("$ld$hide$os10.5$.objc_class_name_NSObject");
+const char ___ld_hide_os10_6__objc_class_name_NSObject __asm__("$ld$hide$os10.6$.objc_class_name_NSObject");
+const char ___ld_hide_os10_7__objc_class_name_NSObject __asm__("$ld$hide$os10.7$.objc_class_name_NSObject");
+const char ___objc_class_name_List __asm__(".objc_class_name_List");
+const char ___objc_class_name_NSObject __asm__(".objc_class_name_NSObject");
+const char ___objc_class_name_Object __asm__(".objc_class_name_Object");
+const char ___objc_class_name_Protocol __asm__(".objc_class_name_Protocol");
+void NXCompareHashTables(void) {}
+void NXCompareMapTables(void) {}
+void NXCopyHashTable(void) {}
+void NXCopyStringBuffer(void) {}
+void NXCopyStringBufferFromZone(void) {}
+void NXCountHashTable(void) {}
+void NXCountMapTable(void) {}
+void NXCreateHashTable(void) {}
+void NXCreateHashTableFromZone(void) {}
+void NXCreateMapTable(void) {}
+void NXCreateMapTableFromZone(void) {}
+void NXEmptyHashTable(void) {}
+void NXFreeHashTable(void) {}
+void NXFreeMapTable(void) {}
+void NXHashGet(void) {}
+void NXHashInsert(void) {}
+void NXHashInsertIfAbsent(void) {}
+void NXHashMember(void) {}
+void NXHashRemove(void) {}
+void NXInitHashState(void) {}
+void NXInitMapState(void) {}
+void NXMapGet(void) {}
+void NXMapInsert(void) {}
+void NXMapMember(void) {}
+void NXMapRemove(void) {}
+void NXNextHashState(void) {}
+void NXNextMapState(void) {}
+void NXNoEffectFree(void) {}
+const char NXObjectMapPrototype;
+void NXPtrHash(void) {}
+void NXPtrIsEqual(void) {}
+const char NXPtrPrototype;
+const char NXPtrStructKeyPrototype;
+const char NXPtrValueMapPrototype;
+void NXReallyFree(void) {}
+void NXResetHashTable(void) {}
+void NXResetMapTable(void) {}
+void NXStrHash(void) {}
+void NXStrIsEqual(void) {}
+const char NXStrPrototype;
+const char NXStrStructKeyPrototype;
+const char NXStrValueMapPrototype;
+void NXUniqueString(void) {}
+void NXUniqueStringNoCopy(void) {}
+void NXUniqueStringWithLength(void) {}
+char _alloc;
+void _class_getIvarMemoryManagement(void) {}
+void _class_isFutureClass(void) {}
+void _class_isSwift(void) {}
+char _copy;
+char _dealloc;
+char _error;
+void _objcInit(void) {}
+void _objc_addWillInitializeClassFunc(void) {}
+void _objc_atfork_child(void) {}
+void _objc_atfork_parent(void) {}
+void _objc_atfork_prepare(void) {}
+void _objc_autoreleasePoolPop(void) {}
+void _objc_autoreleasePoolPrint(void) {}
+void _objc_autoreleasePoolPush(void) {}
+void _objc_deallocOnMainThreadHelper(void) {}
+const char _objc_debug_class_hash;
+const char _objc_empty_cache;
+void _objc_error(void) {}
+void _objc_flush_caches(void) {}
+void _objc_getFreedObjectClass(void) {}
+void _objc_init(void) {}
+void _objc_msgForward(void) {}
+void _objc_msgForward_stret(void) {}
+void _objc_resolve_categories_for_class(void) {}
+void _objc_rootAlloc(void) {}
+void _objc_rootAllocWithZone(void) {}
+void _objc_rootAutorelease(void) {}
+void _objc_rootDealloc(void) {}
+void _objc_rootFinalize(void) {}
+void _objc_rootHash(void) {}
+void _objc_rootInit(void) {}
+void _objc_rootIsDeallocating(void) {}
+void _objc_rootRelease(void) {}
+void _objc_rootReleaseWasZero(void) {}
+void _objc_rootRetain(void) {}
+void _objc_rootRetainCount(void) {}
+void _objc_rootTryRetain(void) {}
+void _objc_rootZone(void) {}
+void _objc_setBadAllocHandler(void) {}
+void _objc_setClassLoader(void) {}
+void _protocol_getMethodTypeEncoding(void) {}
+char _realloc;
+char _zoneAlloc;
+char _zoneCopy;
+char _zoneRealloc;
+void class_addIvar(void) {}
+void class_addMethod(void) {}
+void class_addMethods(void) {}
+void class_addProperty(void) {}
+void class_addProtocol(void) {}
+void class_conformsToProtocol(void) {}
+void class_copyIvarList(void) {}
+void class_copyMethodList(void) {}
+void class_copyPropertyList(void) {}
+void class_copyProtocolList(void) {}
+void class_createInstance(void) {}
+void class_createInstanceFromZone(void) {}
+void class_createInstances(void) {}
+void class_getClassMethod(void) {}
+void class_getClassVariable(void) {}
+void class_getImageName(void) {}
+void class_getInstanceMethod(void) {}
+void class_getInstanceSize(void) {}
+void class_getInstanceVariable(void) {}
+void class_getIvarLayout(void) {}
+void class_getMethodImplementation(void) {}
+void class_getMethodImplementation_stret(void) {}
+void class_getName(void) {}
+void class_getProperty(void) {}
+void class_getSuperclass(void) {}
+void class_getVersion(void) {}
+void class_getWeakIvarLayout(void) {}
+void class_isMetaClass(void) {}
+void class_lookupMethod(void) {}
+void class_nextMethodList(void) {}
+void class_poseAs(void) {}
+void class_removeMethods(void) {}
+void class_replaceMethod(void) {}
+void class_replaceProperty(void) {}
+void class_respondsToMethod(void) {}
+void class_respondsToSelector(void) {}
+void class_setIvarLayout(void) {}
+void class_setSuperclass(void) {}
+void class_setVersion(void) {}
+void class_setWeakIvarLayout(void) {}
+void gdb_class_getClass(void) {}
+void gdb_object_getClass(void) {}
+void imp_getBlock(void) {}
+void imp_implementationWithBlock(void) {}
+void imp_removeBlock(void) {}
+void instrumentObjcMessageSends(void) {}
+void ivar_getName(void) {}
+void ivar_getOffset(void) {}
+void ivar_getTypeEncoding(void) {}
+void method_copyArgumentType(void) {}
+void method_copyReturnType(void) {}
+void method_exchangeImplementations(void) {}
+void method_getArgumentType(void) {}
+void method_getDescription(void) {}
+void method_getImplementation(void) {}
+void method_getName(void) {}
+void method_getNumberOfArguments(void) {}
+void method_getReturnType(void) {}
+void method_getSizeOfArguments(void) {}
+void method_getTypeEncoding(void) {}
+void method_invoke(void) {}
+void method_invoke_stret(void) {}
+void method_setImplementation(void) {}
+void objc_addClass(void) {}
+void objc_addLoadImageFunc(void) {}
+void objc_alloc(void) {}
+void objc_allocWithZone(void) {}
+void objc_alloc_init(void) {}
+void objc_allocateClassPair(void) {}
+void objc_allocateProtocol(void) {}
+void objc_allocate_object(void) {}
+void objc_appRequiresGC(void) {}
+void objc_assertRegisteredThreadWithCollector(void) {}
+void objc_assign_global(void) {}
+void objc_assign_ivar(void) {}
+void objc_assign_strongCast(void) {}
+void objc_assign_threadlocal(void) {}
+void objc_assign_weak(void) {}
+void objc_atomicCompareAndSwapGlobal(void) {}
+void objc_atomicCompareAndSwapGlobalBarrier(void) {}
+void objc_atomicCompareAndSwapInstanceVariable(void) {}
+void objc_atomicCompareAndSwapInstanceVariableBarrier(void) {}
+void objc_atomicCompareAndSwapPtr(void) {}
+void objc_atomicCompareAndSwapPtrBarrier(void) {}
+void objc_autorelease(void) {}
+void objc_autoreleasePoolPop(void) {}
+void objc_autoreleasePoolPush(void) {}
+void objc_autoreleaseReturnValue(void) {}
+void objc_clear_deallocating(void) {}
+void objc_clear_stack(void) {}
+void objc_collect(void) {}
+void objc_collect_init(void) {}
+void objc_collectableZone(void) {}
+void objc_collectingEnabled(void) {}
+void objc_collecting_enabled(void) {}
+void objc_constructInstance(void) {}
+void objc_copyClassList(void) {}
+void objc_copyClassNamesForImage(void) {}
+void objc_copyClassNamesForImageHeader(void) {}
+void objc_copyCppObjectAtomic(void) {}
+void objc_copyImageNames(void) {}
+void objc_copyProtocolList(void) {}
+void objc_copyStruct(void) {}
+void objc_copyWeak(void) {}
+const char objc_debug_autoreleasepoolpage_child_offset;
+const char objc_debug_autoreleasepoolpage_depth_offset;
+const char objc_debug_autoreleasepoolpage_hiwat_offset;
+const char objc_debug_autoreleasepoolpage_magic_offset;
+const char objc_debug_autoreleasepoolpage_next_offset;
+const char objc_debug_autoreleasepoolpage_parent_offset;
+const char objc_debug_autoreleasepoolpage_thread_offset;
+void objc_destroyWeak(void) {}
+void objc_destructInstance(void) {}
+void objc_disposeClassPair(void) {}
+void objc_dumpHeap(void) {}
+void objc_duplicateClass(void) {}
+void objc_enumerationMutation(void) {}
+void objc_exception_extract(void) {}
+void objc_exception_get_functions(void) {}
+void objc_exception_match(void) {}
+void objc_exception_set_functions(void) {}
+void objc_exception_throw(void) {}
+void objc_exception_try_enter(void) {}
+void objc_exception_try_exit(void) {}
+void objc_finalizeOnMainThread(void) {}
+void objc_getAssociatedObject(void) {}
+void objc_getClass(void) {}
+void objc_getClassList(void) {}
+void objc_getClasses(void) {}
+void objc_getFutureClass(void) {}
+void objc_getMetaClass(void) {}
+void objc_getOrigClass(void) {}
+void objc_getProperty(void) {}
+void objc_getProtocol(void) {}
+void objc_getRequiredClass(void) {}
+void objc_initWeak(void) {}
+void objc_initWeakOrNil(void) {}
+void objc_initializeClassPair(void) {}
+void objc_isAuto(void) {}
+void objc_is_finalized(void) {}
+void objc_loadModule(void) {}
+void objc_loadModules(void) {}
+void objc_loadWeak(void) {}
+void objc_loadWeakRetained(void) {}
+void objc_lookUpClass(void) {}
+void objc_memmove_collectable(void) {}
+void objc_moveWeak(void) {}
+void objc_msgSend(void) {}
+void objc_msgSendSuper(void) {}
+void objc_msgSendSuper_stret(void) {}
+void objc_msgSend_fpret(void) {}
+void objc_msgSend_stret(void) {}
+void objc_msgSendv(void) {}
+void objc_msgSendv_fpret(void) {}
+void objc_msgSendv_stret(void) {}
+void objc_opt_class(void) {}
+void objc_opt_isKindOfClass(void) {}
+void objc_opt_new(void) {}
+void objc_opt_respondsToSelector(void) {}
+void objc_opt_self(void) {}
+void objc_read_weak(void) {}
+void objc_registerClassPair(void) {}
+void objc_registerProtocol(void) {}
+void objc_registerThreadWithCollector(void) {}
+void objc_release(void) {}
+void objc_removeAssociatedObjects(void) {}
+void objc_retain(void) {}
+void objc_retainAutorelease(void) {}
+void objc_retainAutoreleaseReturnValue(void) {}
+void objc_retainAutoreleasedReturnValue(void) {}
+void objc_retainBlock(void) {}
+void objc_retain_autorelease(void) {}
+void objc_retainedObject(void) {}
+void objc_setAssociatedObject(void) {}
+void objc_setClassHandler(void) {}
+void objc_setCollectionRatio(void) {}
+void objc_setCollectionThreshold(void) {}
+void objc_setEnumerationMutationHandler(void) {}
+void objc_setForwardHandler(void) {}
+void objc_setHook_getImageName(void) {}
+void objc_setMultithreaded(void) {}
+void objc_setProperty(void) {}
+void objc_setProperty_atomic(void) {}
+void objc_setProperty_atomic_copy(void) {}
+void objc_setProperty_nonatomic(void) {}
+void objc_setProperty_nonatomic_copy(void) {}
+void objc_set_collection_ratio(void) {}
+void objc_set_collection_threshold(void) {}
+void objc_should_deallocate(void) {}
+void objc_startCollectorThread(void) {}
+void objc_start_collector_thread(void) {}
+void objc_storeStrong(void) {}
+void objc_storeWeak(void) {}
+void objc_storeWeakOrNil(void) {}
+void objc_sync_enter(void) {}
+void objc_sync_exit(void) {}
+void objc_sync_try_enter(void) {}
+void objc_unloadModules(void) {}
+void objc_unregisterThreadWithCollector(void) {}
+void objc_unretainedObject(void) {}
+void objc_unretainedPointer(void) {}
+void objc_unsafeClaimAutoreleasedReturnValue(void) {}
+void object_copy(void) {}
+void object_copyFromZone(void) {}
+void object_dispose(void) {}
+void object_getClass(void) {}
+void object_getClassName(void) {}
+void object_getIndexedIvars(void) {}
+void object_getInstanceVariable(void) {}
+void object_getIvar(void) {}
+void object_getMethodImplementation(void) {}
+void object_getMethodImplementation_stret(void) {}
+void object_isClass(void) {}
+void object_realloc(void) {}
+void object_reallocFromZone(void) {}
+void object_setClass(void) {}
+void object_setInstanceVariable(void) {}
+void object_setInstanceVariableWithStrongDefault(void) {}
+void object_setIvar(void) {}
+void object_setIvarWithStrongDefault(void) {}
+void property_copyAttributeList(void) {}
+void property_copyAttributeValue(void) {}
+void property_getAttributes(void) {}
+void property_getName(void) {}
+void protocol_addMethodDescription(void) {}
+void protocol_addProperty(void) {}
+void protocol_addProtocol(void) {}
+void protocol_conformsToProtocol(void) {}
+void protocol_copyMethodDescriptionList(void) {}
+void protocol_copyPropertyList(void) {}
+void protocol_copyPropertyList2(void) {}
+void protocol_copyProtocolList(void) {}
+void protocol_getMethodDescription(void) {}
+void protocol_getName(void) {}
+void protocol_getProperty(void) {}
+void protocol_isEqual(void) {}
+void sel_getName(void) {}
+void sel_getUid(void) {}
+void sel_isEqual(void) {}
+void sel_isMapped(void) {}
+void sel_registerName(void) {}
+void objc_cache_buckets(void) {}
+void objc_cache_bytesForCapacity(void) {}
+void objc_cache_capacity(void) {}
+void objc_cache_occupied(void) {}
+void objc_copyClassesForImage(void) {}
// uintptr_t extraBytes : 1; // allocated with extra bytes
# if __arm64__
-# define ISA_MASK 0x0000000ffffffff8ULL
-# define ISA_MAGIC_MASK 0x000003f000000001ULL
-# define ISA_MAGIC_VALUE 0x000001a000000001ULL
-# define ISA_BITFIELD \
- uintptr_t nonpointer : 1; \
- uintptr_t has_assoc : 1; \
- uintptr_t has_cxx_dtor : 1; \
- uintptr_t shiftcls : 33; /*MACH_VM_MAX_ADDRESS 0x1000000000*/ \
- uintptr_t magic : 6; \
- uintptr_t weakly_referenced : 1; \
- uintptr_t deallocating : 1; \
- uintptr_t has_sidetable_rc : 1; \
- uintptr_t extra_rc : 19
-# define RC_ONE (1ULL<<45)
-# define RC_HALF (1ULL<<18)
+// ARM64 simulators have a larger address space, so use the ARM64e
+// scheme even when simulators build for ARM64-not-e.
+# if __has_feature(ptrauth_calls) || TARGET_OS_SIMULATOR
+# define ISA_MASK 0x007ffffffffffff8ULL
+# define ISA_MAGIC_MASK 0x0000000000000001ULL
+# define ISA_MAGIC_VALUE 0x0000000000000001ULL
+# define ISA_HAS_CXX_DTOR_BIT 0
+# define ISA_BITFIELD \
+ uintptr_t nonpointer : 1; \
+ uintptr_t has_assoc : 1; \
+ uintptr_t weakly_referenced : 1; \
+ uintptr_t shiftcls_and_sig : 52; \
+ uintptr_t has_sidetable_rc : 1; \
+ uintptr_t extra_rc : 8
+# define RC_ONE (1ULL<<56)
+# define RC_HALF (1ULL<<7)
+# else
+# define ISA_MASK 0x0000000ffffffff8ULL
+# define ISA_MAGIC_MASK 0x000003f000000001ULL
+# define ISA_MAGIC_VALUE 0x000001a000000001ULL
+# define ISA_HAS_CXX_DTOR_BIT 1
+# define ISA_BITFIELD \
+ uintptr_t nonpointer : 1; \
+ uintptr_t has_assoc : 1; \
+ uintptr_t has_cxx_dtor : 1; \
+ uintptr_t shiftcls : 33; /*MACH_VM_MAX_ADDRESS 0x1000000000*/ \
+ uintptr_t magic : 6; \
+ uintptr_t weakly_referenced : 1; \
+ uintptr_t unused : 1; \
+ uintptr_t has_sidetable_rc : 1; \
+ uintptr_t extra_rc : 19
+# define RC_ONE (1ULL<<45)
+# define RC_HALF (1ULL<<18)
+# endif
# elif __x86_64__
# define ISA_MASK 0x00007ffffffffff8ULL
# define ISA_MAGIC_MASK 0x001f800000000001ULL
# define ISA_MAGIC_VALUE 0x001d800000000001ULL
+# define ISA_HAS_CXX_DTOR_BIT 1
# define ISA_BITFIELD \
uintptr_t nonpointer : 1; \
uintptr_t has_assoc : 1; \
uintptr_t shiftcls : 44; /*MACH_VM_MAX_ADDRESS 0x7fffffe00000*/ \
uintptr_t magic : 6; \
uintptr_t weakly_referenced : 1; \
- uintptr_t deallocating : 1; \
+ uintptr_t unused : 1; \
uintptr_t has_sidetable_rc : 1; \
uintptr_t extra_rc : 8
# define RC_ONE (1ULL<<56)
# define ISA_INDEX_COUNT (1 << ISA_INDEX_BITS)
# define ISA_INDEX_MAGIC_MASK 0x001E0001
# define ISA_INDEX_MAGIC_VALUE 0x001C0001
+# define ISA_HAS_CXX_DTOR_BIT 1
# define ISA_BITFIELD \
uintptr_t nonpointer : 1; \
uintptr_t has_assoc : 1; \
uintptr_t magic : 4; \
uintptr_t has_cxx_dtor : 1; \
uintptr_t weakly_referenced : 1; \
- uintptr_t deallocating : 1; \
+ uintptr_t unused : 1; \
uintptr_t has_sidetable_rc : 1; \
uintptr_t extra_rc : 7
# define RC_ONE (1ULL<<25)
/* Linker metadata symbols */
// NSObject was in Foundation/CF on macOS < 10.8.
-#if TARGET_OS_OSX
+#if TARGET_OS_OSX && (__x86_64__ || __i386__)
#if __OBJC2__
OBJC_EXPORT const char __objc_nsobject_class_10_5
Old ABI: Set by some compilers. Not used by the runtime.
*/
+// Description of an expected duplicate class name.
+// __DATA,__objc_dupclass stores one of these. Only the main image is
+// consulted for these purposes.
+typedef struct _objc_duplicate_class {
+ uint32_t version;
+ uint32_t flags;
+ const char name[64];
+} objc_duplicate_class;
+#define OBJC_HAS_DUPLICATE_CLASS 1
/* Properties */
// Extract class pointer from an isa field.
-#if TARGET_OS_SIMULATOR && !TARGET_OS_IOSMAC
+#if TARGET_OS_SIMULATOR && !TARGET_OS_MACCATALYST && !__arm64__
// No simulators use nonpointer isa yet.
#elif __LP64__
# define NS_ENFORCE_NSOBJECT_DESIGNATED_INITIALIZER 1
#endif
+/* The arm64 ABI requires proper casting to ensure arguments are passed
+ * * correctly. */
+#if defined(__arm64__) && !__swift__
+# undef OBJC_OLD_DISPATCH_PROTOTYPES
+# define OBJC_OLD_DISPATCH_PROTOTYPES 0
+#endif
/* OBJC_OLD_DISPATCH_PROTOTYPES == 0 enforces the rule that the dispatch
* functions must be cast to an appropriate function pointer type. */
# define TrampolinePtrauth
#endif
+// A page of trampolines is as big as the maximum supported page size
+// everywhere except i386. i386 only exists for the watch simulator
+// now, and we know it really only has 4kB pages. Also see comments
+// below about PAGE_SIZE and PAGE_MAX_SIZE.
+#ifdef __i386__
+#define TRAMPOLINE_PAGE_SIZE PAGE_MIN_SIZE
+#else
+#define TRAMPOLINE_PAGE_SIZE PAGE_MAX_SIZE
+#endif
+
class TrampolinePointerWrapper {
struct TrampolinePointers {
class TrampolineAddress {
void check() {
#if DEBUG
- ASSERT(impl.address() == textSegment + PAGE_MAX_SIZE);
- ASSERT(impl.address() % PAGE_SIZE == 0); // not PAGE_MAX_SIZE
- assert(impl.address() + PAGE_MAX_SIZE ==
+ ASSERT(impl.address() == textSegment + TRAMPOLINE_PAGE_SIZE);
+ ASSERT(impl.address() % PAGE_SIZE == 0); // not TRAMPOLINE_PAGE_SIZE
+ ASSERT(impl.address() + TRAMPOLINE_PAGE_SIZE ==
last.address() + SLOT_SIZE);
ASSERT(last.address()+8 < textSegment + textSegmentSize);
ASSERT((last.address() - start.address()) % SLOT_SIZE == 0);
# if SUPPORT_STRET
- ASSERT(impl_stret.address() == textSegment + 2*PAGE_MAX_SIZE);
- ASSERT(impl_stret.address() % PAGE_SIZE == 0); // not PAGE_MAX_SIZE
- assert(impl_stret.address() + PAGE_MAX_SIZE ==
+ ASSERT(impl_stret.address() == textSegment + 2*TRAMPOLINE_PAGE_SIZE);
+ ASSERT(impl_stret.address() % PAGE_SIZE == 0); // not TRAMPOLINE_PAGE_SIZE
+ ASSERT(impl_stret.address() + TRAMPOLINE_PAGE_SIZE ==
last_stret.address() + SLOT_SIZE);
- assert(start.address() - impl.address() ==
+ ASSERT(start.address() - impl.address() ==
start_stret.address() - impl_stret.address());
- assert(last_stret.address() + SLOT_SIZE <
+ ASSERT(last_stret.address() + SLOT_SIZE <
textSegment + textSegmentSize);
- assert((last_stret.address() - start_stret.address())
+ ASSERT((last_stret.address() - start_stret.address())
% SLOT_SIZE == 0);
# endif
#endif
uintptr_t textSegment() { return get()->textSegment; }
uintptr_t textSegmentSize() { return get()->textSegmentSize; }
- // See comments below about PAGE_SIZE and PAGE_MAX_SIZE.
- uintptr_t dataSize() { return PAGE_MAX_SIZE; }
+ uintptr_t dataSize() { return TRAMPOLINE_PAGE_SIZE; }
uintptr_t impl() { return get()->impl.address(); }
uintptr_t start() { return get()->start.address(); }
// We must take care with our data layout on architectures that support
// multiple page sizes.
//
-// The trampoline template in __TEXT is sized and aligned with PAGE_MAX_SIZE.
-// On some platforms this requires additional linker flags.
+// The trampoline template in __TEXT is sized and aligned with PAGE_MAX_SIZE,
+// except on i386 which is a weird special case that uses PAGE_MIN_SIZE.
+// The TRAMPOLINE_PAGE_SIZE macro handles this difference. On some platforms,
+// aligning to PAGE_MAX_SIZE requires additional linker flags.
//
-// When we allocate a page group, we use PAGE_MAX_SIZE size.
-// This allows trampoline code to find its data by subtracting PAGE_MAX_SIZE.
+// When we allocate a page group, we use TRAMPOLINE_PAGE_SIZE size.
+// This allows trampoline code to find its data by subtracting TRAMPOLINE_PAGE_SIZE.
//
// When we allocate a page group, we use the process's page alignment.
// This simplifies allocation because we don't need to force greater than
// Payload data: block pointers and free list.
// Bytes parallel with trampoline header code are the fields above or unused
- // uint8_t payloads[PAGE_MAX_SIZE - sizeof(TrampolineBlockPageGroup)]
+ // uint8_t payloads[TRAMPOLINE_PAGE_SIZE - sizeof(TrampolineBlockPageGroup)]
// Code: Mach-O header, then trampoline header followed by trampolines.
// On platforms with struct return we have non-stret trampolines and
// stret trampolines. The stret and non-stret trampolines at a given
// index share the same data page.
- // uint8_t macho[PAGE_MAX_SIZE];
- // uint8_t trampolines[ArgumentModeCount][PAGE_MAX_SIZE];
+ // uint8_t macho[TRAMPOLINE_PAGE_SIZE];
+ // uint8_t trampolines[ArgumentModeCount][TRAMPOLINE_PAGE_SIZE];
// Per-trampoline block data format:
// initial value is 0 while page data is filled sequentially
// Skip over the data area, one page of Mach-O headers,
// and one text page for each mode before this one.
return (uintptr_t)this + Trampolines.dataSize() +
- PAGE_MAX_SIZE * (1 + aMode);
+ TRAMPOLINE_PAGE_SIZE * (1 + aMode);
}
IMP trampoline(int aMode, uintptr_t index) {
.globl __objc_blockTrampolineStart
.globl __objc_blockTrampolineLast
-.align PAGE_SHIFT
+.align 12 /* PAGE_SHIFT */
__objc_blockTrampolineImpl:
movl (%esp), %eax // return address pushed by trampoline
// 4(%esp) is return address pushed by the call site
movl 8(%esp), %ecx // self -> ecx
movl %ecx, 12(%esp) // ecx -> _cmd
- movl -2*PAGE_SIZE-5(%eax), %ecx // block object pointer -> ecx
+ movl -2*4096/*PAGE_SIZE */-5(%eax), %ecx // block object pointer -> ecx
// trampoline is -5 bytes from the return address
// data is -2 pages from the trampoline
movl %ecx, 8(%esp) // ecx -> self
.globl __objc_blockTrampolineStart_stret
.globl __objc_blockTrampolineLast_stret
-.align PAGE_SHIFT
+.align 12 /* PAGE_SHIFT */
__objc_blockTrampolineImpl_stret:
movl (%esp), %eax // return address pushed by trampoline
// 4(%esp) is return address pushed by the call site
// 8(%esp) is struct-return address
movl 12(%esp), %ecx // self -> ecx
movl %ecx, 16(%esp) // ecx -> _cmd
- movl -3*PAGE_SIZE-5(%eax), %ecx // block object pointer -> ecx
+ movl -3*4096/*PAGE_SIZE*/-5(%eax), %ecx // block object pointer -> ecx
// trampoline is -5 bytes from the return address
// data is -3 pages from the trampoline
movl %ecx, 12(%esp) // ecx -> self
.globl __objc_blockTrampolineStart
.globl __objc_blockTrampolineLast
-.align PAGE_SHIFT
+.align PAGE_MAX_SHIFT
__objc_blockTrampolineImpl:
movq (%rsp), %r10 // read return address pushed by TrampolineEntry's callq
movq %rdi, %rsi // arg1 -> arg2
- movq -2*PAGE_SIZE-5(%r10), %rdi // block object pointer -> arg1
+ movq -2*PAGE_MAX_SIZE-5(%r10), %rdi // block object pointer -> arg1
// trampoline is -5 bytes from the return address
// data is -2 pages from the trampoline
ret // back to TrampolineEntry to preserve CPU's return stack
-.macro TrampolineEntry
+.macro TrampolineEntry1
// This trampoline is 8 bytes long.
// This callq is 5 bytes long.
callq __objc_blockTrampolineImpl
jmp *16(%rdi)
.endmacro
+.macro TrampolineEntry4
+ TrampolineEntry1
+ TrampolineEntry1
+ TrampolineEntry1
+ TrampolineEntry1
+.endmacro
+
+#if PAGE_MAX_SHIFT == 12
+#define TrampolineEntry TrampolineEntry1
+#elif PAGE_MAX_SHIFT == 14
+#define TrampolineEntry TrampolineEntry4
+#else
+#error "unknown PAGE_MAX_SHIFT value"
+#endif
+
.align 5
__objc_blockTrampolineStart:
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry
+
+// The above is 507 entries.
+#if PAGE_MAX_SHIFT == 14
+// With 16kB pages, we need (4096*4-32)/8 = 2044 single entries, or
+// 511 "quad" entries as above. We need 3 more regular entries, then
+// 3 more singular entries, and finally a singular entry labeled Last.
+ TrampolineEntry
+ TrampolineEntry
+ TrampolineEntry
+ TrampolineEntry1
+ TrampolineEntry1
+ TrampolineEntry1
+__objc_blockTrampolineLast:
+ TrampolineEntry1
+#else
+// With 4kB pages, we need (4096-32)/8 = 508 entries. We have one
+// more at the end with the Last label for a total of 508.
__objc_blockTrampolineLast:
TrampolineEntry
+#endif
.text
.globl __objc_blockTrampolineStart_stret
.globl __objc_blockTrampolineLast_stret
-.align PAGE_SHIFT
+.align PAGE_MAX_SHIFT
__objc_blockTrampolineImpl_stret:
// %rdi -- arg1 -- is address of return value's space. Don't mess with it.
movq (%rsp), %r10 // read return address pushed by TrampolineEntry's callq
movq %rsi, %rdx // arg2 -> arg3
- movq -3*PAGE_SIZE-5(%r10), %rsi // block object pointer -> arg2
+ movq -3*PAGE_MAX_SIZE-5(%r10), %rsi // block object pointer -> arg2
// trampoline is -5 bytes from the return address
// data is -3 pages from the trampoline
ret // back to TrampolineEntry to preserve CPU's return stack
-.macro TrampolineEntry_stret
+.macro TrampolineEntry_stret1
// This trampoline is 8 bytes long.
// This callq is 5 bytes long.
callq __objc_blockTrampolineImpl_stret
jmp *16(%rsi)
.endmacro
+.macro TrampolineEntry_stret4
+ TrampolineEntry_stret1
+ TrampolineEntry_stret1
+ TrampolineEntry_stret1
+ TrampolineEntry_stret1
+.endmacro
+
+#if PAGE_MAX_SHIFT == 12
+#define TrampolineEntry_stret TrampolineEntry_stret1
+#elif PAGE_MAX_SHIFT == 14
+#define TrampolineEntry_stret TrampolineEntry_stret4
+#else
+#error "unknown PAGE_MAX_SHIFT value"
+#endif
+
.align 5
__objc_blockTrampolineStart_stret:
TrampolineEntry_stret
TrampolineEntry_stret
TrampolineEntry_stret
TrampolineEntry_stret
+
+// See the comment on non-stret's Last for why we have additional
+// entries here.
+#if PAGE_MAX_SHIFT == 14
+ TrampolineEntry_stret
+ TrampolineEntry_stret
+ TrampolineEntry_stret
+ TrampolineEntry_stret1
+ TrampolineEntry_stret1
+ TrampolineEntry_stret1
+__objc_blockTrampolineLast_stret:
+ TrampolineEntry_stret1
+#else
__objc_blockTrampolineLast_stret:
TrampolineEntry_stret
+#endif
#endif
#endif
-void cache_init()
-{
-}
-
// !__OBJC2__
#endif
+++ /dev/null
-
-#ifndef _OBJC_CACHE_H
-#define _OBJC_CACHE_H
-
-#include "objc-private.h"
-
-__BEGIN_DECLS
-
-extern void cache_init(void);
-
-extern IMP cache_getImp(Class cls, SEL sel);
-
-extern void cache_fill(Class cls, SEL sel, IMP imp, id receiver);
-
-extern void cache_erase_nolock(Class cls);
-
-extern void cache_delete(Class cls);
-
-extern void cache_collect(bool collectALot);
-
-__END_DECLS
-
-#endif
* objc_msgSend*
* cache_getImp
*
- * Cache writers (hold cacheUpdateLock while reading or writing; not PC-checked)
- * cache_fill (acquires lock)
- * cache_expand (only called from cache_fill)
- * cache_create (only called from cache_expand)
- * bcopy (only called from instrumented cache_expand)
- * flush_caches (acquires lock)
- * cache_flush (only called from cache_fill and flush_caches)
- * cache_collect_free (only called from cache_expand and cache_flush)
+ * Cache readers/writers (hold cacheUpdateLock during access; not PC-checked)
+ * cache_t::copyCacheNolock (caller must hold the lock)
+ * cache_t::eraseNolock (caller must hold the lock)
+ * cache_t::collectNolock (caller must hold the lock)
+ * cache_t::insert (acquires lock)
+ * cache_t::destroy (acquires lock)
*
* UNPROTECTED cache readers (NOT thread-safe; used for debug info only)
* cache_print
#if __OBJC2__
#include "objc-private.h"
-#include "objc-cache.h"
+#if TARGET_OS_OSX
+#include <Cambria/Traps.h>
+#include <Cambria/Cambria.h>
+#endif
+
+#if __arm__ || __x86_64__ || __i386__
+
+// objc_msgSend has few registers available.
+// Cache scan increments and wraps at special end-marking bucket.
+#define CACHE_END_MARKER 1
+
+// Historical fill ratio of 75% (since the new objc runtime was introduced).
+static inline mask_t cache_fill_ratio(mask_t capacity) {
+ return capacity * 3 / 4;
+}
+
+#elif __arm64__ && !__LP64__
+
+// objc_msgSend has lots of registers available.
+// Cache scan decrements. No end marker needed.
+#define CACHE_END_MARKER 0
+
+// Historical fill ratio of 75% (since the new objc runtime was introduced).
+static inline mask_t cache_fill_ratio(mask_t capacity) {
+ return capacity * 3 / 4;
+}
+
+#elif __arm64__ && __LP64__
+
+// objc_msgSend has lots of registers available.
+// Cache scan decrements. No end marker needed.
+#define CACHE_END_MARKER 0
+
+// Allow 87.5% fill ratio in the fast path for all cache sizes.
+// Increasing the cache fill ratio reduces the fragmentation and wasted space
+// in imp-caches at the cost of potentially increasing the average lookup of
+// a selector in imp-caches by increasing collision chains. Another potential
+// change is that cache table resizes / resets happen at different moments.
+static inline mask_t cache_fill_ratio(mask_t capacity) {
+ return capacity * 7 / 8;
+}
+
+// Allow 100% cache utilization for smaller cache sizes. This has the same
+// advantages and disadvantages as the fill ratio. A very large percentage
+// of caches end up with very few entries and the worst case of collision
+// chains in small tables is relatively small.
+// NOTE: objc_msgSend properly handles a cache lookup with a full cache.
+#define CACHE_ALLOW_FULL_UTILIZATION 1
+
+#else
+#error unknown architecture
+#endif
/* Initial cache bucket count. INIT_CACHE_SIZE must be a power of two. */
enum {
+#if CACHE_END_MARKER || (__arm64__ && !__LP64__)
+ // When we have a cache end marker it fills a bucket slot, so having a
+ // initial cache size of 2 buckets would not be efficient when one of the
+ // slots is always filled with the end marker. So start with a cache size
+ // 4 buckets.
INIT_CACHE_SIZE_LOG2 = 2,
+#else
+ // Allow an initial bucket size of 2 buckets, since a large number of
+ // classes, especially metaclasses, have very few imps, and we support
+ // the ability to fill 100% of the cache before resizing.
+ INIT_CACHE_SIZE_LOG2 = 1,
+#endif
INIT_CACHE_SIZE = (1 << INIT_CACHE_SIZE_LOG2),
MAX_CACHE_SIZE_LOG2 = 16,
MAX_CACHE_SIZE = (1 << MAX_CACHE_SIZE_LOG2),
+ FULL_UTILIZATION_CACHE_SIZE_LOG2 = 3,
+ FULL_UTILIZATION_CACHE_SIZE = (1 << FULL_UTILIZATION_CACHE_SIZE_LOG2),
};
-static void cache_collect_free(struct bucket_t *data, mask_t capacity);
static int _collecting_in_critical(void);
static void _garbage_make_room(void);
#endif
);
+#if CONFIG_USE_PREOPT_CACHES
+__attribute__((used, section("__DATA_CONST,__objc_scoffs")))
+uintptr_t objc_opt_offsets[__OBJC_OPT_OFFSETS_COUNT];
+#endif
-#if __arm__ || __x86_64__ || __i386__
-// objc_msgSend has few registers available.
-// Cache scan increments and wraps at special end-marking bucket.
-#define CACHE_END_MARKER 1
+#if CACHE_END_MARKER
static inline mask_t cache_next(mask_t i, mask_t mask) {
return (i+1) & mask;
}
-
#elif __arm64__
-// objc_msgSend has lots of registers available.
-// Cache scan decrements. No end marker needed.
-#define CACHE_END_MARKER 0
static inline mask_t cache_next(mask_t i, mask_t mask) {
return i ? i-1 : mask;
}
-
#else
-#error unknown architecture
+#error unexpected configuration
#endif
static inline mask_t cache_hash(SEL sel, mask_t mask)
{
- return (mask_t)(uintptr_t)sel & mask;
-}
-
-cache_t *getCache(Class cls)
-{
- ASSERT(cls);
- return &cls->cache;
+ uintptr_t value = (uintptr_t)sel;
+#if CONFIG_USE_PREOPT_CACHES
+ value ^= value >> 7;
+#endif
+ return (mask_t)(value & mask);
}
#if __arm64__
template<Atomicity atomicity, IMPEncoding impEncoding>
-void bucket_t::set(SEL newSel, IMP newImp, Class cls)
+void bucket_t::set(bucket_t *base, SEL newSel, IMP newImp, Class cls)
{
- ASSERT(_sel.load(memory_order::memory_order_relaxed) == 0 ||
- _sel.load(memory_order::memory_order_relaxed) == newSel);
+ ASSERT(_sel.load(memory_order_relaxed) == 0 ||
+ _sel.load(memory_order_relaxed) == newSel);
static_assert(offsetof(bucket_t,_imp) == 0 &&
offsetof(bucket_t,_sel) == sizeof(void *),
"bucket_t layout doesn't match arm64 bucket_t::set()");
uintptr_t encodedImp = (impEncoding == Encoded
- ? encodeImp(newImp, newSel, cls)
+ ? encodeImp(base, newImp, newSel, cls)
: (uintptr_t)newImp);
// LDP/STP guarantees that all observers get
#else
template<Atomicity atomicity, IMPEncoding impEncoding>
-void bucket_t::set(SEL newSel, IMP newImp, Class cls)
+void bucket_t::set(bucket_t *base, SEL newSel, IMP newImp, Class cls)
{
- ASSERT(_sel.load(memory_order::memory_order_relaxed) == 0 ||
- _sel.load(memory_order::memory_order_relaxed) == newSel);
+ ASSERT(_sel.load(memory_order_relaxed) == 0 ||
+ _sel.load(memory_order_relaxed) == newSel);
// objc_msgSend uses sel and imp with no locks.
// It is safe for objc_msgSend to see new imp but NULL sel
// Therefore we write new imp, wait a lot, then write new sel.
uintptr_t newIMP = (impEncoding == Encoded
- ? encodeImp(newImp, newSel, cls)
+ ? encodeImp(base, newImp, newSel, cls)
: (uintptr_t)newImp);
if (atomicity == Atomic) {
- _imp.store(newIMP, memory_order::memory_order_relaxed);
+ _imp.store(newIMP, memory_order_relaxed);
- if (_sel.load(memory_order::memory_order_relaxed) != newSel) {
+ if (_sel.load(memory_order_relaxed) != newSel) {
#ifdef __arm__
mega_barrier();
- _sel.store(newSel, memory_order::memory_order_relaxed);
+ _sel.store(newSel, memory_order_relaxed);
#elif __x86_64__ || __i386__
- _sel.store(newSel, memory_order::memory_order_release);
+ _sel.store(newSel, memory_order_release);
#else
#error Don't know how to do bucket_t::set on this architecture.
#endif
}
} else {
- _imp.store(newIMP, memory_order::memory_order_relaxed);
- _sel.store(newSel, memory_order::memory_order_relaxed);
+ _imp.store(newIMP, memory_order_relaxed);
+ _sel.store(newSel, memory_order_relaxed);
+ }
+}
+
+#endif
+
+void cache_t::initializeToEmpty()
+{
+ _bucketsAndMaybeMask.store((uintptr_t)&_objc_empty_cache, std::memory_order_relaxed);
+ _originalPreoptCache.store(nullptr, std::memory_order_relaxed);
+}
+
+#if CONFIG_USE_PREOPT_CACHES
+/*
+ * The shared cache builder will sometimes have prebuilt an IMP cache
+ * for the class and left a `preopt_cache_t` pointer in _originalPreoptCache.
+ *
+ * However we have this tension:
+ * - when the class is realized it has to have a cache that can't resolve any
+ * selector until the class is properly initialized so that every
+ * caller falls in the slowpath and synchronizes with the class initializing,
+ * - we need to remember that cache pointer and we have no space for that.
+ *
+ * The caches are designed so that preopt_cache::bit_one is set to 1,
+ * so we "disguise" the pointer so that it looks like a cache of capacity 1
+ * where that bit one aliases with where the top bit of a SEL in the bucket_t
+ * would live:
+ *
+ * +----------------+----------------+
+ * | IMP | SEL | << a bucket_t
+ * +----------------+----------------+--------------...
+ * preopt_cache_t >>| 1| ...
+ * +----------------+--------------...
+ *
+ * The shared cache guarantees that there's valid memory to read under "IMP"
+ *
+ * This lets us encode the original preoptimized cache pointer during
+ * initialization, and we can reconstruct its original address and install
+ * it back later.
+ */
+void cache_t::initializeToPreoptCacheInDisguise(const preopt_cache_t *cache)
+{
+ // preopt_cache_t::bit_one is 1 which sets the top bit
+ // and is never set on any valid selector
+
+ uintptr_t value = (uintptr_t)cache + sizeof(preopt_cache_t) -
+ (bucket_t::offsetOfSel() + sizeof(SEL));
+
+ _originalPreoptCache.store(nullptr, std::memory_order_relaxed);
+ setBucketsAndMask((bucket_t *)value, 0);
+ _occupied = cache->occupied;
+}
+
+void cache_t::maybeConvertToPreoptimized()
+{
+ const preopt_cache_t *cache = disguised_preopt_cache();
+
+ if (cache == nil) {
+ return;
}
+
+ if (!cls()->allowsPreoptCaches() ||
+ (cache->has_inlines && !cls()->allowsPreoptInlinedSels())) {
+ if (PrintCaches) {
+ _objc_inform("CACHES: %sclass %s: dropping cache (from %s)",
+ cls()->isMetaClass() ? "meta" : "",
+ cls()->nameForLogging(), "setInitialized");
+ }
+ return setBucketsAndMask(emptyBuckets(), 0);
+ }
+
+ uintptr_t value = (uintptr_t)&cache->entries;
+#if __has_feature(ptrauth_calls)
+ value = (uintptr_t)ptrauth_sign_unauthenticated((void *)value,
+ ptrauth_key_process_dependent_data, (uintptr_t)cls());
+#endif
+ value |= preoptBucketsHashParams(cache) | preoptBucketsMarker;
+ _bucketsAndMaybeMask.store(value, memory_order_relaxed);
+ _occupied = cache->occupied;
+}
+
+void cache_t::initializeToEmptyOrPreoptimizedInDisguise()
+{
+ if (os_fastpath(!DisablePreoptCaches)) {
+ if (!objc::dataSegmentsRanges.inSharedCache((uintptr_t)this)) {
+ if (dyld_shared_cache_some_image_overridden()) {
+ // If the system has roots, then we must disable preoptimized
+ // caches completely. If a class in another image has a
+ // superclass in the root, the offset to the superclass will
+ // be wrong. rdar://problem/61601961
+ cls()->setDisallowPreoptCachesRecursively("roots");
+ }
+ return initializeToEmpty();
+ }
+
+ auto cache = _originalPreoptCache.load(memory_order_relaxed);
+ if (cache) {
+ return initializeToPreoptCacheInDisguise(cache);
+ }
+ }
+
+ return initializeToEmpty();
}
+const preopt_cache_t *cache_t::preopt_cache() const
+{
+ auto addr = _bucketsAndMaybeMask.load(memory_order_relaxed);
+ addr &= preoptBucketsMask;
+#if __has_feature(ptrauth_calls)
+#if __BUILDING_OBJCDT__
+ addr = (uintptr_t)ptrauth_strip((preopt_cache_entry_t *)addr,
+ ptrauth_key_process_dependent_data);
+#else
+ addr = (uintptr_t)ptrauth_auth_data((preopt_cache_entry_t *)addr,
+ ptrauth_key_process_dependent_data, (uintptr_t)cls());
#endif
+#endif
+ return (preopt_cache_t *)(addr - sizeof(preopt_cache_t));
+}
+
+const preopt_cache_t *cache_t::disguised_preopt_cache() const
+{
+ bucket_t *b = buckets();
+ if ((intptr_t)b->sel() >= 0) return nil;
+
+ uintptr_t value = (uintptr_t)b + bucket_t::offsetOfSel() + sizeof(SEL);
+ return (preopt_cache_t *)(value - sizeof(preopt_cache_t));
+}
+
+Class cache_t::preoptFallbackClass() const
+{
+ return (Class)((uintptr_t)cls() + preopt_cache()->fallback_class_offset);
+}
+
+bool cache_t::isConstantOptimizedCache(bool strict, uintptr_t empty_addr) const
+{
+ uintptr_t addr = _bucketsAndMaybeMask.load(memory_order_relaxed);
+ if (addr & preoptBucketsMarker) {
+ return true;
+ }
+ if (strict) {
+ return false;
+ }
+ return mask() == 0 && addr != empty_addr;
+}
+
+bool cache_t::shouldFlush(SEL sel, IMP imp) const
+{
+ // This test isn't backwards: disguised caches aren't "strict"
+ // constant optimized caches
+ if (!isConstantOptimizedCache(/*strict*/true)) {
+ const preopt_cache_t *cache = disguised_preopt_cache();
+ if (cache) {
+ uintptr_t offs = (uintptr_t)sel - (uintptr_t)@selector(🤯);
+ uintptr_t slot = ((offs >> cache->shift) & cache->mask);
+ auto &entry = cache->entries[slot];
+
+ return entry.sel_offs == offs &&
+ (uintptr_t)cls() - entry.imp_offs ==
+ (uintptr_t)ptrauth_strip(imp, ptrauth_key_function_pointer);
+ }
+ }
+
+ return cache_getImp(cls(), sel) == imp;
+}
+
+bool cache_t::isConstantOptimizedCacheWithInlinedSels() const
+{
+ return isConstantOptimizedCache(/* strict */true) && preopt_cache()->has_inlines;
+}
+#endif // CONFIG_USE_PREOPT_CACHES
#if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_OUTLINED
// ensure other threads see buckets contents before buckets pointer
mega_barrier();
- _buckets.store(newBuckets, memory_order::memory_order_relaxed);
-
+ _bucketsAndMaybeMask.store((uintptr_t)newBuckets, memory_order_relaxed);
+
// ensure other threads see new buckets before new mask
mega_barrier();
-
- _mask.store(newMask, memory_order::memory_order_relaxed);
+
+ _maybeMask.store(newMask, memory_order_relaxed);
_occupied = 0;
#elif __x86_64__ || i386
// ensure other threads see buckets contents before buckets pointer
- _buckets.store(newBuckets, memory_order::memory_order_release);
-
+ _bucketsAndMaybeMask.store((uintptr_t)newBuckets, memory_order_release);
+
// ensure other threads see new buckets before new mask
- _mask.store(newMask, memory_order::memory_order_release);
+ _maybeMask.store(newMask, memory_order_release);
_occupied = 0;
#else
#error Don't know how to do setBucketsAndMask on this architecture.
#endif
}
-struct bucket_t *cache_t::emptyBuckets()
-{
- return (bucket_t *)&_objc_empty_cache;
-}
-
-struct bucket_t *cache_t::buckets()
+mask_t cache_t::mask() const
{
- return _buckets.load(memory_order::memory_order_relaxed);
+ return _maybeMask.load(memory_order_relaxed);
}
-mask_t cache_t::mask()
-{
- return _mask.load(memory_order::memory_order_relaxed);
-}
-
-void cache_t::initializeToEmpty()
-{
- bzero(this, sizeof(*this));
- _buckets.store((bucket_t *)&_objc_empty_cache, memory_order::memory_order_relaxed);
-}
-
-#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16
+#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16 || CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16_BIG_ADDRS
void cache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask)
{
uintptr_t buckets = (uintptr_t)newBuckets;
uintptr_t mask = (uintptr_t)newMask;
-
+
ASSERT(buckets <= bucketsMask);
ASSERT(mask <= maxMask);
-
- _maskAndBuckets.store(((uintptr_t)newMask << maskShift) | (uintptr_t)newBuckets, std::memory_order_relaxed);
- _occupied = 0;
-}
-struct bucket_t *cache_t::emptyBuckets()
-{
- return (bucket_t *)&_objc_empty_cache;
-}
-
-struct bucket_t *cache_t::buckets()
-{
- uintptr_t maskAndBuckets = _maskAndBuckets.load(memory_order::memory_order_relaxed);
- return (bucket_t *)(maskAndBuckets & bucketsMask);
+ _bucketsAndMaybeMask.store(((uintptr_t)newMask << maskShift) | (uintptr_t)newBuckets, memory_order_relaxed);
+ _occupied = 0;
}
-mask_t cache_t::mask()
+mask_t cache_t::mask() const
{
- uintptr_t maskAndBuckets = _maskAndBuckets.load(memory_order::memory_order_relaxed);
+ uintptr_t maskAndBuckets = _bucketsAndMaybeMask.load(memory_order_relaxed);
return maskAndBuckets >> maskShift;
}
-void cache_t::initializeToEmpty()
-{
- bzero(this, sizeof(*this));
- _maskAndBuckets.store((uintptr_t)&_objc_empty_cache, std::memory_order_relaxed);
-}
-
#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4
void cache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask)
{
uintptr_t buckets = (uintptr_t)newBuckets;
unsigned mask = (unsigned)newMask;
-
+
ASSERT(buckets == (buckets & bucketsMask));
ASSERT(mask <= 0xffff);
-
- // The shift amount is equal to the number of leading zeroes in
- // the last 16 bits of mask. Count all the leading zeroes, then
- // subtract to ignore the top half.
- uintptr_t maskShift = __builtin_clz(mask) - (sizeof(mask) * CHAR_BIT - 16);
- ASSERT(mask == (0xffff >> maskShift));
-
- _maskAndBuckets.store(buckets | maskShift, memory_order::memory_order_relaxed);
+
+ _bucketsAndMaybeMask.store(buckets | objc::mask16ShiftBits(mask), memory_order_relaxed);
_occupied = 0;
-
+
ASSERT(this->buckets() == newBuckets);
ASSERT(this->mask() == newMask);
}
-struct bucket_t *cache_t::emptyBuckets()
+mask_t cache_t::mask() const
{
- return (bucket_t *)((uintptr_t)&_objc_empty_cache & bucketsMask);
-}
-
-struct bucket_t *cache_t::buckets()
-{
- uintptr_t maskAndBuckets = _maskAndBuckets.load(memory_order::memory_order_relaxed);
- return (bucket_t *)(maskAndBuckets & bucketsMask);
-}
-
-mask_t cache_t::mask()
-{
- uintptr_t maskAndBuckets = _maskAndBuckets.load(memory_order::memory_order_relaxed);
+ uintptr_t maskAndBuckets = _bucketsAndMaybeMask.load(memory_order_relaxed);
uintptr_t maskShift = (maskAndBuckets & maskMask);
return 0xffff >> maskShift;
}
-void cache_t::initializeToEmpty()
-{
- bzero(this, sizeof(*this));
- _maskAndBuckets.store((uintptr_t)&_objc_empty_cache, std::memory_order_relaxed);
-}
-
#else
#error Unknown cache mask storage type.
#endif
-mask_t cache_t::occupied()
+struct bucket_t *cache_t::buckets() const
+{
+ uintptr_t addr = _bucketsAndMaybeMask.load(memory_order_relaxed);
+ return (bucket_t *)(addr & bucketsMask);
+}
+
+mask_t cache_t::occupied() const
{
return _occupied;
}
_occupied++;
}
-unsigned cache_t::capacity()
+unsigned cache_t::capacity() const
{
return mask() ? mask()+1 : 0;
}
+Class cache_t::cls() const
+{
+ return (Class)((uintptr_t)this - offsetof(objc_class, cache));
+}
size_t cache_t::bytesForCapacity(uint32_t cap)
{
return (bucket_t *)((uintptr_t)b + bytesForCapacity(cap)) - 1;
}
-bucket_t *allocateBuckets(mask_t newCapacity)
+bucket_t *cache_t::allocateBuckets(mask_t newCapacity)
{
// Allocate one extra bucket to mark the end of the list.
// This can't overflow mask_t because newCapacity is a power of 2.
- bucket_t *newBuckets = (bucket_t *)
- calloc(cache_t::bytesForCapacity(newCapacity), 1);
+ bucket_t *newBuckets = (bucket_t *)calloc(bytesForCapacity(newCapacity), 1);
- bucket_t *end = cache_t::endMarker(newBuckets, newCapacity);
+ bucket_t *end = endMarker(newBuckets, newCapacity);
#if __arm__
// End marker's sel is 1 and imp points BEFORE the first bucket.
// This saves an instruction in objc_msgSend.
- end->set<NotAtomic, Raw>((SEL)(uintptr_t)1, (IMP)(newBuckets - 1), nil);
+ end->set<NotAtomic, Raw>(newBuckets, (SEL)(uintptr_t)1, (IMP)(newBuckets - 1), nil);
#else
// End marker's sel is 1 and imp points to the first bucket.
- end->set<NotAtomic, Raw>((SEL)(uintptr_t)1, (IMP)newBuckets, nil);
+ end->set<NotAtomic, Raw>(newBuckets, (SEL)(uintptr_t)1, (IMP)newBuckets, nil);
#endif
if (PrintCaches) recordNewCache(newCapacity);
#else
-bucket_t *allocateBuckets(mask_t newCapacity)
+bucket_t *cache_t::allocateBuckets(mask_t newCapacity)
{
if (PrintCaches) recordNewCache(newCapacity);
- return (bucket_t *)calloc(cache_t::bytesForCapacity(newCapacity), 1);
+ return (bucket_t *)calloc(bytesForCapacity(newCapacity), 1);
}
#endif
+struct bucket_t *cache_t::emptyBuckets()
+{
+ return (bucket_t *)((uintptr_t)&_objc_empty_cache & bucketsMask);
+}
-bucket_t *emptyBucketsForCapacity(mask_t capacity, bool allocate = true)
+bucket_t *cache_t::emptyBucketsForCapacity(mask_t capacity, bool allocate)
{
#if CONFIG_USE_CACHE_LOCK
cacheUpdateLock.assertLocked();
runtimeLock.assertLocked();
#endif
- size_t bytes = cache_t::bytesForCapacity(capacity);
+ size_t bytes = bytesForCapacity(capacity);
// Use _objc_empty_cache if the buckets is small enough.
if (bytes <= EMPTY_BYTES) {
- return cache_t::emptyBuckets();
+ return emptyBuckets();
}
// Use shared empty buckets allocated on the heap.
return emptyBucketsList[index];
}
-
-bool cache_t::isConstantEmptyCache()
+bool cache_t::isConstantEmptyCache() const
{
- return
- occupied() == 0 &&
+ return
+ occupied() == 0 &&
buckets() == emptyBucketsForCapacity(capacity(), false);
}
-bool cache_t::canBeFreed()
+bool cache_t::canBeFreed() const
{
- return !isConstantEmptyCache();
+ return !isConstantEmptyCache() && !isConstantOptimizedCache();
}
ALWAYS_INLINE
setBucketsAndMask(newBuckets, newCapacity - 1);
if (freeOld) {
- cache_collect_free(oldBuckets, oldCapacity);
+ collect_free(oldBuckets, oldCapacity);
}
}
-void cache_t::bad_cache(id receiver, SEL sel, Class isa)
+void cache_t::bad_cache(id receiver, SEL sel)
{
// Log in separate steps in case the logging itself causes a crash.
_objc_inform_now_and_on_crash
("Method cache corrupted. This may be a message to an "
"invalid object, or a memory error somewhere else.");
- cache_t *cache = &isa->cache;
#if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_OUTLINED
- bucket_t *buckets = cache->_buckets.load(memory_order::memory_order_relaxed);
+ bucket_t *b = buckets();
_objc_inform_now_and_on_crash
("%s %p, SEL %p, isa %p, cache %p, buckets %p, "
"mask 0x%x, occupied 0x%x",
receiver ? "receiver" : "unused", receiver,
- sel, isa, cache, buckets,
- cache->_mask.load(memory_order::memory_order_relaxed),
- cache->_occupied);
+ sel, cls(), this, b,
+ _maybeMask.load(memory_order_relaxed),
+ _occupied);
_objc_inform_now_and_on_crash
("%s %zu bytes, buckets %zu bytes",
receiver ? "receiver" : "unused", malloc_size(receiver),
- malloc_size(buckets));
+ malloc_size(b));
#elif (CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16 || \
+ CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16_BIG_ADDRS || \
CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4)
- uintptr_t maskAndBuckets = cache->_maskAndBuckets.load(memory_order::memory_order_relaxed);
+ uintptr_t maskAndBuckets = _bucketsAndMaybeMask.load(memory_order_relaxed);
_objc_inform_now_and_on_crash
("%s %p, SEL %p, isa %p, cache %p, buckets and mask 0x%lx, "
"occupied 0x%x",
receiver ? "receiver" : "unused", receiver,
- sel, isa, cache, maskAndBuckets,
- cache->_occupied);
+ sel, cls(), this, maskAndBuckets, _occupied);
_objc_inform_now_and_on_crash
("%s %zu bytes, buckets %zu bytes",
receiver ? "receiver" : "unused", malloc_size(receiver),
- malloc_size(cache->buckets()));
+ malloc_size(buckets()));
#else
#error Unknown cache mask storage type.
#endif
_objc_inform_now_and_on_crash
("selector '%s'", sel_getName(sel));
_objc_inform_now_and_on_crash
- ("isa '%s'", isa->nameForLogging());
+ ("isa '%s'", cls()->nameForLogging());
_objc_fatal
("Method cache corrupted. This may be a message to an "
"invalid object, or a memory error somewhere else.");
}
-ALWAYS_INLINE
-void cache_t::insert(Class cls, SEL sel, IMP imp, id receiver)
+void cache_t::insert(SEL sel, IMP imp, id receiver)
{
-#if CONFIG_USE_CACHE_LOCK
- cacheUpdateLock.assertLocked();
-#else
runtimeLock.assertLocked();
+
+ // Never cache before +initialize is done
+ if (slowpath(!cls()->isInitialized())) {
+ return;
+ }
+
+ if (isConstantOptimizedCache()) {
+ _objc_fatal("cache_t::insert() called with a preoptimized cache for %s",
+ cls()->nameForLogging());
+ }
+
+#if DEBUG_TASK_THREADS
+ return _collecting_in_critical();
+#else
+#if CONFIG_USE_CACHE_LOCK
+ mutex_locker_t lock(cacheUpdateLock);
#endif
- ASSERT(sel != 0 && cls->isInitialized());
+ ASSERT(sel != 0 && cls()->isInitialized());
- // Use the cache as-is if it is less than 3/4 full
+ // Use the cache as-is if until we exceed our expected fill ratio.
mask_t newOccupied = occupied() + 1;
unsigned oldCapacity = capacity(), capacity = oldCapacity;
if (slowpath(isConstantEmptyCache())) {
if (!capacity) capacity = INIT_CACHE_SIZE;
reallocate(oldCapacity, capacity, /* freeOld */false);
}
- else if (fastpath(newOccupied + CACHE_END_MARKER <= capacity / 4 * 3)) {
- // Cache is less than 3/4 full. Use it as-is.
+ else if (fastpath(newOccupied + CACHE_END_MARKER <= cache_fill_ratio(capacity))) {
+ // Cache is less than 3/4 or 7/8 full. Use it as-is.
+ }
+#if CACHE_ALLOW_FULL_UTILIZATION
+ else if (capacity <= FULL_UTILIZATION_CACHE_SIZE && newOccupied + CACHE_END_MARKER <= capacity) {
+ // Allow 100% cache utilization for small buckets. Use it as-is.
}
+#endif
else {
capacity = capacity ? capacity * 2 : INIT_CACHE_SIZE;
if (capacity > MAX_CACHE_SIZE) {
mask_t i = begin;
// Scan for the first unused slot and insert there.
- // There is guaranteed to be an empty slot because the
- // minimum size is 4 and we resized at 3/4 full.
+ // There is guaranteed to be an empty slot.
do {
if (fastpath(b[i].sel() == 0)) {
incrementOccupied();
- b[i].set<Atomic, Encoded>(sel, imp, cls);
+ b[i].set<Atomic, Encoded>(b, sel, imp, cls());
return;
}
if (b[i].sel() == sel) {
}
} while (fastpath((i = cache_next(i, m)) != begin));
- cache_t::bad_cache(receiver, (SEL)sel, cls);
+ bad_cache(receiver, (SEL)sel);
+#endif // !DEBUG_TASK_THREADS
}
-void cache_fill(Class cls, SEL sel, IMP imp, id receiver)
+void cache_t::copyCacheNolock(objc_imp_cache_entry *buffer, int len)
{
- runtimeLock.assertLocked();
-
-#if !DEBUG_TASK_THREADS
- // Never cache before +initialize is done
- if (cls->isInitialized()) {
- cache_t *cache = getCache(cls);
#if CONFIG_USE_CACHE_LOCK
- mutex_locker_t lock(cacheUpdateLock);
+ cacheUpdateLock.assertLocked();
+#else
+ runtimeLock.assertLocked();
#endif
- cache->insert(cls, sel, imp, receiver);
+ int wpos = 0;
+
+#if CONFIG_USE_PREOPT_CACHES
+ if (isConstantOptimizedCache()) {
+ auto cache = preopt_cache();
+ auto mask = cache->mask;
+ uintptr_t sel_base = objc_opt_offsets[OBJC_OPT_METHODNAME_START];
+ uintptr_t imp_base = (uintptr_t)&cache->entries;
+
+ for (uintptr_t index = 0; index <= mask && wpos < len; index++) {
+ auto &ent = cache->entries[index];
+ if (~ent.sel_offs) {
+ buffer[wpos].sel = (SEL)(sel_base + ent.sel_offs);
+ buffer[wpos].imp = (IMP)(imp_base - ent.imp_offs);
+ wpos++;
+ }
+ }
+ return;
}
-#else
- _collecting_in_critical();
#endif
+ {
+ bucket_t *buckets = this->buckets();
+ uintptr_t count = capacity();
+
+ for (uintptr_t index = 0; index < count && wpos < len; index++) {
+ if (buckets[index].sel()) {
+ buffer[wpos].imp = buckets[index].imp(buckets, cls());
+ buffer[wpos].sel = buckets[index].sel();
+ wpos++;
+ }
+ }
+ }
}
-
// Reset this entire cache to the uncached lookup by reallocating it.
// This must not shrink the cache - that breaks the lock-free scheme.
-void cache_erase_nolock(Class cls)
+void cache_t::eraseNolock(const char *func)
{
#if CONFIG_USE_CACHE_LOCK
cacheUpdateLock.assertLocked();
runtimeLock.assertLocked();
#endif
- cache_t *cache = getCache(cls);
-
- mask_t capacity = cache->capacity();
- if (capacity > 0 && cache->occupied() > 0) {
- auto oldBuckets = cache->buckets();
+ if (isConstantOptimizedCache()) {
+ auto c = cls();
+ if (PrintCaches) {
+ _objc_inform("CACHES: %sclass %s: dropping and disallowing preopt cache (from %s)",
+ c->isMetaClass() ? "meta" : "",
+ c->nameForLogging(), func);
+ }
+ setBucketsAndMask(emptyBuckets(), 0);
+ c->setDisallowPreoptCaches();
+ } else if (occupied() > 0) {
+ auto capacity = this->capacity();
+ auto oldBuckets = buckets();
auto buckets = emptyBucketsForCapacity(capacity);
- cache->setBucketsAndMask(buckets, capacity - 1); // also clears occupied
- cache_collect_free(oldBuckets, capacity);
+ setBucketsAndMask(buckets, capacity - 1); // also clears occupied
+ collect_free(oldBuckets, capacity);
}
}
-void cache_delete(Class cls)
+void cache_t::destroy()
{
#if CONFIG_USE_CACHE_LOCK
mutex_locker_t lock(cacheUpdateLock);
#else
runtimeLock.assertLocked();
#endif
- if (cls->cache.canBeFreed()) {
- if (PrintCaches) recordDeadCache(cls->cache.capacity());
- free(cls->cache.buckets());
+ if (canBeFreed()) {
+ if (PrintCaches) recordDeadCache(capacity());
+ free(buckets());
}
}
static bool shouldUseRestartableRanges = true;
#endif
-void cache_init()
+void cache_t::init()
{
#if HAVE_TASK_RESTARTABLE_RANGES
mach_msg_type_number_t count = 0;
continue;
// Find out where thread is executing
+#if TARGET_OS_OSX
+ if (oah_is_current_process_translated()) {
+ kern_return_t ret = objc_thread_get_rip(threads[count], (uint64_t*)&pc);
+ if (ret != KERN_SUCCESS) {
+ pc = PC_SENTINEL;
+ }
+ } else {
+ pc = _get_pc_for_thread (threads[count]);
+ }
+#else
pc = _get_pc_for_thread (threads[count]);
+#endif
// Check for bad status, and if so, assume the worse (can't collect)
if (pc == PC_SENTINEL)
/***********************************************************************
-* cache_collect_free. Add the specified malloc'd memory to the list
+* cache_t::collect_free. Add the specified malloc'd memory to the list
* of them to free at some later point.
* size is used for the collection threshold. It does not have to be
* precisely the block's size.
* Cache locks: cacheUpdateLock must be held by the caller.
**********************************************************************/
-static void cache_collect_free(bucket_t *data, mask_t capacity)
+void cache_t::collect_free(bucket_t *data, mask_t capacity)
{
#if CONFIG_USE_CACHE_LOCK
cacheUpdateLock.assertLocked();
_garbage_make_room ();
garbage_byte_size += cache_t::bytesForCapacity(capacity);
garbage_refs[garbage_count++] = data;
- cache_collect(false);
+ cache_t::collectNolock(false);
}
* collectALot tries harder to free memory.
* Cache locks: cacheUpdateLock must be held by the caller.
**********************************************************************/
-void cache_collect(bool collectALot)
+void cache_t::collectNolock(bool collectALot)
{
#if CONFIG_USE_CACHE_LOCK
cacheUpdateLock.assertLocked();
// DEBUG_TASK_THREADS
#endif
+OBJC_EXPORT bucket_t * objc_cache_buckets(const cache_t * cache) {
+ return cache->buckets();
+}
+
+#if CONFIG_USE_PREOPT_CACHES
+
+OBJC_EXPORT const preopt_cache_t * _Nonnull objc_cache_preoptCache(const cache_t * _Nonnull cache) {
+ return cache->preopt_cache();
+}
+
+OBJC_EXPORT bool objc_cache_isConstantOptimizedCache(const cache_t * _Nonnull cache, bool strict, uintptr_t empty_addr) {
+ return cache->isConstantOptimizedCache(strict, empty_addr);
+}
+
+OBJC_EXPORT unsigned objc_cache_preoptCapacity(const cache_t * _Nonnull cache) {
+ return cache->preopt_cache()->capacity();
+}
+
+OBJC_EXPORT Class _Nonnull objc_cache_preoptFallbackClass(const cache_t * _Nonnull cache) {
+ return cache->preoptFallbackClass();
+}
+
+#endif
+
+OBJC_EXPORT size_t objc_cache_bytesForCapacity(uint32_t cap) {
+ return cache_t::bytesForCapacity(cap);
+}
+
+OBJC_EXPORT uint32_t objc_cache_occupied(const cache_t * _Nonnull cache) {
+ return cache->occupied();
+}
+
+OBJC_EXPORT unsigned objc_cache_capacity(const struct cache_t * _Nonnull cache) {
+ return cache->capacity();
+}
// __OBJC2__
#endif
ASSERT(cls->isMetaClass());
SEL resolve_sel = @selector(resolveClassMethod:);
- if (!lookUpImpOrNil(inst, resolve_sel, cls)) {
+ if (!lookUpImpOrNilTryCache(inst, resolve_sel, cls)) {
// Resolver not implemented.
return;
}
// Cache the result (good or bad) so the resolver doesn't fire next time.
// +resolveClassMethod adds to self->ISA() a.k.a. cls
- IMP imp = lookUpImpOrNil(inst, sel, cls);
+ IMP imp = lookUpImpOrNilTryCache(inst, sel, cls);
if (resolved && PrintResolving) {
if (imp) {
_objc_inform("RESOLVE: method %c[%s %s] "
{
SEL resolve_sel = @selector(resolveInstanceMethod:);
- if (! lookUpImpOrNil(cls, resolve_sel, cls->ISA())) {
+ if (! lookUpImpOrNilTryCache(cls, resolve_sel, cls->ISA())) {
// Resolver not implemented.
return;
}
// Cache the result (good or bad) so the resolver doesn't fire next time.
// +resolveInstanceMethod adds to self a.k.a. cls
- IMP imp = lookUpImpOrNil(inst, sel, cls);
+ IMP imp = lookUpImpOrNilTryCache(inst, sel, cls);
if (resolved && PrintResolving) {
if (imp) {
// try [nonMetaClass resolveClassMethod:sel]
// and [cls resolveInstanceMethod:sel]
_class_resolveClassMethod(inst, sel, cls);
- if (!lookUpImpOrNil(inst, sel, cls)) {
+ if (!lookUpImpOrNilTryCache(inst, sel, cls)) {
_class_resolveInstanceMethod(inst, sel, cls);
}
}
void *object_getIndexedIvars(id obj)
{
// ivars are tacked onto the end of the object
- if (!obj) return nil;
- if (obj->isTaggedPointer()) return nil;
+ if (obj->isTaggedPointerOrNil()) return nil;
return ((char *) obj) + obj->ISA()->alignedInstanceSize();
}
#include "objc-private.h"
#include "objc-abi.h"
#include <objc/message.h>
+#if !TARGET_OS_WIN32
+#include <os/linker_set.h>
+#endif
/***********************************************************************
* Information about multi-thread support:
// weakly-referenced object has an un-+initialized isa.
// Unresolved future classes are not so protected.
if (!cls->isFuture() && !cls->isInitialized()) {
- // use lookUpImpOrNil to indirectly provoke +initialize
+ // use lookUpImpOrNilTryCache to indirectly provoke +initialize
// to avoid duplicating the code to actually send +initialize
- lookUpImpOrNil(nil, @selector(initialize), cls, LOOKUP_INITIALIZE);
+ lookUpImpOrNilTryCache(nil, @selector(initialize), cls, LOOKUP_INITIALIZE);
}
return obj->changeIsa(cls);
// Preflight the hasAutomaticIvars check
// because _class_getClassForIvar() may need to take locks.
bool hasAutomaticIvars = NO;
- for (Class c = cls; c; c = c->superclass) {
+ for (Class c = cls; c; c = c->getSuperclass()) {
if (c->hasAutomaticIvars()) {
hasAutomaticIvars = YES;
break;
static ALWAYS_INLINE
void _object_setIvar(id obj, Ivar ivar, id value, bool assumeStrong)
{
- if (!obj || !ivar || obj->isTaggedPointer()) return;
+ if (!ivar || obj->isTaggedPointerOrNil()) return;
ptrdiff_t offset;
objc_ivar_memory_management_t memoryManagement;
id object_getIvar(id obj, Ivar ivar)
{
- if (!obj || !ivar || obj->isTaggedPointer()) return nil;
+ if (!ivar || obj->isTaggedPointerOrNil()) return nil;
ptrdiff_t offset;
objc_ivar_memory_management_t memoryManagement;
{
Ivar ivar = nil;
- if (obj && name && !obj->isTaggedPointer()) {
+ if (name && !obj->isTaggedPointerOrNil()) {
if ((ivar = _class_getVariable(obj->ISA(), name))) {
_object_setIvar(obj, ivar, (id)value, assumeStrong);
}
Ivar object_getInstanceVariable(id obj, const char *name, void **value)
{
- if (obj && name && !obj->isTaggedPointer()) {
+ if (name && !obj->isTaggedPointerOrNil()) {
Ivar ivar;
if ((ivar = class_getInstanceVariable(obj->ISA(), name))) {
if (value) *value = (void *)object_getIvar(obj, ivar);
// Call cls's dtor first, then superclasses's dtors.
- for ( ; cls; cls = cls->superclass) {
+ for ( ; cls; cls = cls->getSuperclass()) {
if (!cls->hasCxxDtor()) return;
dtor = (void(*)(id))
lookupMethodInClassAndLoadCache(cls, SEL_cxx_destruct);
**********************************************************************/
void object_cxxDestruct(id obj)
{
- if (!obj) return;
- if (obj->isTaggedPointer()) return;
+ if (obj->isTaggedPointerOrNil()) return;
object_cxxDestructFromClass(obj, obj->ISA());
}
id (*ctor)(id);
Class supercls;
- supercls = cls->superclass;
+ supercls = cls->getSuperclass();
// Call superclasses' ctors first, if any.
if (supercls && supercls->hasCxxCtor()) {
}
if (fastpath((*ctor)(obj))) return obj; // ctor called and succeeded - ok
- supercls = cls->superclass; // this reload avoids a spill on the stack
+ supercls = cls->getSuperclass(); // this reload avoids a spill on the stack
// This class's ctor was called and failed.
// Call superclasses's dtors to clean up.
**********************************************************************/
void fixupCopiedIvars(id newObject, id oldObject)
{
- for (Class cls = oldObject->ISA(); cls; cls = cls->superclass) {
+ for (Class cls = oldObject->ISA(); cls; cls = cls->getSuperclass()) {
if (cls->hasAutomaticIvars()) {
// Use alignedInstanceStart() because unaligned bytes at the start
// of this class's ivars are not represented in the layout bitmap.
// inst is an instance of cls or a subclass thereof, or nil if none is known.
// Non-nil inst is faster in some cases. See lookUpImpOrForward() for details.
-NEVER_INLINE BOOL
+NEVER_INLINE __attribute__((flatten)) BOOL
class_respondsToSelector_inst(id inst, SEL sel, Class cls)
{
// Avoids +initialize because it historically did so.
// We're not returning a callable IMP anyway.
- return sel && cls && lookUpImpOrNil(inst, sel, cls, LOOKUP_RESOLVER);
+ return sel && cls && lookUpImpOrNilTryCache(inst, sel, cls, LOOKUP_RESOLVER);
}
return class_getMethodImplementation(cls, sel);
}
+__attribute__((flatten))
IMP class_getMethodImplementation(Class cls, SEL sel)
{
IMP imp;
if (!cls || !sel) return nil;
- imp = lookUpImpOrNil(nil, sel, cls, LOOKUP_INITIALIZE | LOOKUP_RESOLVER);
+ lockdebug_assert_no_locks_locked_except({ &loadMethodLock });
+
+ imp = lookUpImpOrNilTryCache(nil, sel, cls, LOOKUP_INITIALIZE | LOOKUP_RESOLVER);
// Translate forwarding function to C-callable external version
if (!imp) {
Class class_getSuperclass(Class cls)
{
if (!cls) return nil;
- return cls->superclass;
+ return cls->getSuperclass();
}
BOOL class_isMetaClass(Class cls)
const header_info *newHeader = _headerForClass(newCls);
const char *oldName = oldHeader ? oldHeader->fname() : "??";
const char *newName = newHeader ? newHeader->fname() : "??";
+ const objc_duplicate_class **_dupi = NULL;
+
+ LINKER_SET_FOREACH(_dupi, const objc_duplicate_class **, "__objc_dupclass") {
+ const objc_duplicate_class *dupi = *_dupi;
+
+ if (strcmp(dupi->name, name) == 0) {
+ return;
+ }
+ }
(DebugDuplicateClasses ? _objc_fatal : _objc_inform)
("Class %s is implemented in both %s (%p) and %s (%p). "
#include <TargetConditionals.h>
-// Define __OBJC2__ for the benefit of our asm files.
-#ifndef __OBJC2__
-# if TARGET_OS_OSX && !TARGET_OS_IOSMAC && __i386__
- // old ABI
-# else
-# define __OBJC2__ 1
-# endif
-#endif
-
// Avoid the !NDEBUG double negative.
#if !NDEBUG
# define DEBUG 1
#endif
// Define SUPPORT_ZONES=1 to enable malloc zone support in NXHashTable.
-#if !(TARGET_OS_OSX || TARGET_OS_IOSMAC)
+#if !(TARGET_OS_OSX || TARGET_OS_MACCATALYST)
# define SUPPORT_ZONES 0
#else
# define SUPPORT_ZONES 1
// Define SUPPORT_TAGGED_POINTERS=1 to enable tagged pointer objects
// Be sure to edit tagged pointer SPI in objc-internal.h as well.
-#if !(__OBJC2__ && __LP64__)
+#if !__LP64__
# define SUPPORT_TAGGED_POINTERS 0
#else
# define SUPPORT_TAGGED_POINTERS 1
// Define SUPPORT_MSB_TAGGED_POINTERS to use the MSB
// as the tagged pointer marker instead of the LSB.
// Be sure to edit tagged pointer SPI in objc-internal.h as well.
-#if !SUPPORT_TAGGED_POINTERS || (TARGET_OS_OSX || TARGET_OS_IOSMAC)
+#if !SUPPORT_TAGGED_POINTERS || ((TARGET_OS_OSX || TARGET_OS_MACCATALYST) && __x86_64__)
# define SUPPORT_MSB_TAGGED_POINTERS 0
#else
# define SUPPORT_MSB_TAGGED_POINTERS 1
// Define SUPPORT_PACKED_ISA=1 on platforms that store the class in the isa
// field as a maskable pointer with other data around it.
#if (!__LP64__ || TARGET_OS_WIN32 || \
- (TARGET_OS_SIMULATOR && !TARGET_OS_IOSMAC))
+ (TARGET_OS_SIMULATOR && !TARGET_OS_MACCATALYST && !__arm64__))
# define SUPPORT_PACKED_ISA 0
#else
# define SUPPORT_PACKED_ISA 1
// Define SUPPORT_ZEROCOST_EXCEPTIONS to use "zero-cost" exceptions for OBJC2.
// Be sure to edit objc-exception.h as well (objc_add/removeExceptionHandler)
-#if !__OBJC2__ || (defined(__arm__) && __USING_SJLJ_EXCEPTIONS__)
+#if defined(__arm__) && __USING_SJLJ_EXCEPTIONS__
# define SUPPORT_ZEROCOST_EXCEPTIONS 0
#else
# define SUPPORT_ZEROCOST_EXCEPTIONS 1
# define SUPPORT_MESSAGE_LOGGING 1
#endif
+// Define SUPPORT_AUTORELEASEPOOL_DEDDUP_PTRS to combine consecutive pointers to the same object in autorelease pools
+#if !__LP64__
+# define SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS 0
+#else
+# define SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS 1
+#endif
+
// Define HAVE_TASK_RESTARTABLE_RANGES to enable usage of
// task_restartable_ranges_synchronize()
#if TARGET_OS_SIMULATOR || defined(__i386__) || defined(__arm__) || !TARGET_OS_MAC
// because objc-class.h is public and objc-config.h is not.
//#define OBJC_INSTRUMENTED
-// In __OBJC2__, the runtimeLock is a mutex always held
-// hence the cache lock is redundant and can be elided.
+// The runtimeLock is a mutex always held hence the cache lock is
+// redundant and can be elided.
//
// If the runtime lock ever becomes a rwlock again,
// the cache lock would need to be used again
-#if __OBJC2__
#define CONFIG_USE_CACHE_LOCK 0
-#else
-#define CONFIG_USE_CACHE_LOCK 1
-#endif
// Determine how the method cache stores IMPs.
#define CACHE_IMP_ENCODING_NONE 1 // Method cache contains raw IMP.
#define CACHE_MASK_STORAGE_OUTLINED 1
#define CACHE_MASK_STORAGE_HIGH_16 2
#define CACHE_MASK_STORAGE_LOW_4 3
+#define CACHE_MASK_STORAGE_HIGH_16_BIG_ADDRS 4
#if defined(__arm64__) && __LP64__
+#if TARGET_OS_OSX || TARGET_OS_SIMULATOR
+#define CACHE_MASK_STORAGE CACHE_MASK_STORAGE_HIGH_16_BIG_ADDRS
+#else
#define CACHE_MASK_STORAGE CACHE_MASK_STORAGE_HIGH_16
+#endif
#elif defined(__arm64__) && !__LP64__
#define CACHE_MASK_STORAGE CACHE_MASK_STORAGE_LOW_4
#else
#define CACHE_MASK_STORAGE CACHE_MASK_STORAGE_OUTLINED
#endif
+// Constants used for signing/authing isas. This doesn't quite belong
+// here, but the asm files can't import other headers.
+#define ISA_SIGNING_DISCRIMINATOR 0x6AE1
+#define ISA_SIGNING_DISCRIMINATOR_CLASS_SUPERCLASS 0xB5AB
+
+#define ISA_SIGNING_KEY ptrauth_key_process_independent_data
+
+// ISA signing authentication modes. Set ISA_SIGNING_AUTH_MODE to one
+// of these to choose how ISAs are authenticated.
+#define ISA_SIGNING_STRIP 1 // Strip the signature whenever reading an ISA.
+#define ISA_SIGNING_AUTH 2 // Authenticate the signature on all ISAs.
+
+
+// ISA signing modes. Set ISA_SIGNING_SIGN_MODE to one of these to
+// choose how ISAs are signed.
+#define ISA_SIGNING_SIGN_NONE 1 // Sign no ISAs.
+#define ISA_SIGNING_SIGN_ONLY_SWIFT 2 // Only sign ISAs of Swift objects.
+#define ISA_SIGNING_SIGN_ALL 3 // Sign all ISAs.
+
+#if __has_feature(ptrauth_objc_isa_strips) || __has_feature(ptrauth_objc_isa_signs) || __has_feature(ptrauth_objc_isa_authenticates)
+# if __has_feature(ptrauth_objc_isa_authenticates)
+# define ISA_SIGNING_AUTH_MODE ISA_SIGNING_AUTH
+# else
+# define ISA_SIGNING_AUTH_MODE ISA_SIGNING_STRIP
+# endif
+# if __has_feature(ptrauth_objc_isa_signs)
+# define ISA_SIGNING_SIGN_MODE ISA_SIGNING_SIGN_ALL
+# else
+# define ISA_SIGNING_SIGN_MODE ISA_SIGNING_SIGN_NONE
+# endif
+#else
+# if __has_feature(ptrauth_objc_isa)
+# define ISA_SIGNING_AUTH_MODE ISA_SIGNING_AUTH
+# define ISA_SIGNING_SIGN_MODE ISA_SIGNING_SIGN_ALL
+# else
+# define ISA_SIGNING_AUTH_MODE ISA_SIGNING_STRIP
+# define ISA_SIGNING_SIGN_MODE ISA_SIGNING_SIGN_NONE
+# endif
+#endif
+
+// When set, an unsigned superclass pointer is treated as Nil, which
+// will treat the class as if its superclass was weakly linked and
+// not loaded, and cause uses of the class to resolve to Nil.
+#define SUPERCLASS_SIGNING_TREAT_UNSIGNED_AS_NIL 0
+
+#if defined(__arm64__) && TARGET_OS_IOS && !TARGET_OS_SIMULATOR && !TARGET_OS_MACCATALYST
+#define CONFIG_USE_PREOPT_CACHES 1
+#else
+#define CONFIG_USE_PREOPT_CACHES 0
+#endif
+
+// When set to 1, small methods in the shared cache have a direct
+// offset to a selector. When set to 0, small methods in the shared
+// cache have the same format as other small methods, with an offset
+// to a selref.
+#define CONFIG_SHARED_CACHE_RELATIVE_DIRECT_SELECTORS 1
+
#endif
OPTION( DebugPoolAllocation, OBJC_DEBUG_POOL_ALLOCATION, "halt when autorelease pools are popped out of order, and allow heap debuggers to track autorelease pools")
OPTION( DebugDuplicateClasses, OBJC_DEBUG_DUPLICATE_CLASSES, "halt when multiple classes with the same name are present")
OPTION( DebugDontCrash, OBJC_DEBUG_DONT_CRASH, "halt the process by exiting instead of crashing")
+OPTION( DebugPoolDepth, OBJC_DEBUG_POOL_DEPTH, "log fault when at least a set number of autorelease pages has been allocated")
OPTION( DisableVtables, OBJC_DISABLE_VTABLES, "disable vtable dispatch")
OPTION( DisablePreopt, OBJC_DISABLE_PREOPTIMIZATION, "disable preoptimization courtesy of dyld shared cache")
OPTION( DisableTaggedPointerObfuscation, OBJC_DISABLE_TAG_OBFUSCATION, "disable obfuscation of tagged pointers")
OPTION( DisableNonpointerIsa, OBJC_DISABLE_NONPOINTER_ISA, "disable non-pointer isa fields")
OPTION( DisableInitializeForkSafety, OBJC_DISABLE_INITIALIZE_FORK_SAFETY, "disable safety checks for +initialize after fork")
+OPTION( DisableFaults, OBJC_DISABLE_FAULTS, "disable os faults")
+OPTION( DisablePreoptCaches, OBJC_DISABLE_PREOPTIMIZED_CACHES, "disable preoptimized caches")
+OPTION( DisableAutoreleaseCoalescing, OBJC_DISABLE_AUTORELEASE_COALESCING, "disable coalescing of autorelease pool pointers")
+OPTION( DisableAutoreleaseCoalescingLRU, OBJC_DISABLE_AUTORELEASE_COALESCING_LRU, "disable coalescing of autorelease pool pointers using look back N strategy")
Class cls;
for (cls = exception->getIsa();
cls != nil;
- cls = cls->superclass)
+ cls = cls->getSuperclass())
{
if (cls == catch_cls) return 1;
}
private:
uintptr_t storage;
public:
+ UnsignedInitializer(uint32_t offset) {
+ storage = (uintptr_t)&_mh_dylib_header + offset;
+ }
+
void operator () () const {
using Initializer = void(*)();
Initializer init =
extern category_t * const *_getObjc2CategoryList2(const headerType *mhdr, size_t *count);
extern category_t * const *_getObjc2NonlazyCategoryList(const headerType *mhdr, size_t *count);
extern UnsignedInitializer *getLibobjcInitializers(const headerType *mhdr, size_t *count);
+extern uint32_t *getLibobjcInitializerOffsets(const headerType *hi, size_t *count);
static inline void
foreach_data_segment(const headerType *mhdr,
seg = (const segmentType *)((char *)seg + seg->cmdsize);
}
- // enumerate __DATA* segments
+ // enumerate __DATA* and __AUTH* segments
seg = (const segmentType *) (mhdr + 1);
for (unsigned long i = 0; i < mhdr->ncmds; i++) {
if (seg->cmd == SEGMENT_CMD &&
- segnameStartsWith(seg->segname, "__DATA"))
+ (segnameStartsWith(seg->segname, "__DATA") ||
+ segnameStartsWith(seg->segname, "__AUTH")))
{
code(seg, slide);
}
GETSECT(_getObjc2ProtocolRefs, protocol_t *, "__objc_protorefs");
GETSECT(getLibobjcInitializers, UnsignedInitializer, "__objc_init_func");
+uint32_t *getLibobjcInitializerOffsets(const headerType *mhdr, size_t *outCount) {
+ unsigned long byteCount = 0;
+ uint32_t *offsets = (uint32_t *)getsectiondata(mhdr, "__TEXT", "__objc_init_offs", &byteCount);
+ if (outCount) *outCount = byteCount / sizeof(uint32_t);
+ return offsets;
+}
objc_image_info *
_getObjcImageInfo(const headerType *mhdr, size_t *outBytes)
OBJC_EXPORT uintptr_t objc_debug_taggedpointer_obfuscator
OBJC_AVAILABLE(10.14, 12.0, 12.0, 5.0, 3.0);
+#if OBJC_SPLIT_TAGGED_POINTERS
+OBJC_EXPORT uint8_t objc_debug_tag60_permutations[8];
+#endif
+
// tag_slot = (obj >> slot_shift) & slot_mask
OBJC_EXPORT unsigned int objc_debug_taggedpointer_slot_shift
OBJC_EXPORT unsigned int objc_debug_taggedpointer_ext_payload_rshift
OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0, 2.0);
+OBJC_EXPORT uintptr_t objc_debug_constant_cfstring_tag_bits
+ OBJC_AVAILABLE(10.16, 14.0, 14.0, 7.0, 6.0);
+
#endif
OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_child_offset OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 5.0);
OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_depth_offset OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 5.0);
OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_hiwat_offset OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 5.0);
+#if __OBJC2__
+OBJC_EXTERN const uintptr_t objc_debug_autoreleasepoolpage_ptr_mask OBJC_AVAILABLE(10.16, 14.0, 14.0, 7.0, 6.0);
+#endif
__END_DECLS
{
if (cls->isRootClass() || cls->isRootMetaclass()) return true;
- Class rootCls = cls->ISA()->ISA()->superclass;
+ Class rootCls = cls->ISA()->ISA()->getSuperclass();
- IMP rootImp = lookUpImpOrNil(rootCls, @selector(initialize), rootCls->ISA());
- IMP imp = lookUpImpOrNil(cls, @selector(initialize), cls->ISA());
+ IMP rootImp = lookUpImpOrNilTryCache(rootCls, @selector(initialize), rootCls->ISA());
+ IMP imp = lookUpImpOrNilTryCache(cls, @selector(initialize), cls->ISA());
return (imp == nil || imp == (IMP)&objc_noop_imp || imp == rootImp);
}
// Make sure super is done initializing BEFORE beginning to initialize cls.
// See note about deadlock above.
- supercls = cls->superclass;
+ supercls = cls->getSuperclass();
if (supercls && !supercls->isInitialized()) {
initializeNonMetaClass(supercls);
}
#include <mach-o/loader.h>
#include <dispatch/dispatch.h>
+// Include NSObject.h only if we're ObjC. Module imports get unhappy
+// otherwise.
+#if __OBJC__
+#include <objc/NSObject.h>
+#endif
// Termination reasons in the OS_REASON_OBJC namespace.
#define OBJC_EXIT_REASON_UNSPECIFIED 1
// The runtime's class structure will never grow beyond this.
#define OBJC_MAX_CLASS_SIZE (32*sizeof(void*))
+// Private objc_setAssociatedObject policy modifier. When an object is
+// destroyed, associated objects attached to that object that are marked with
+// this will be released after all associated objects not so marked.
+//
+// In addition, such associations are not removed when calling
+// objc_removeAssociatedObjects.
+//
+// NOTE: This should be used sparingly. Performance will be poor when a single
+// object has more than a few (deliberately vague) associated objects marked
+// with this flag. If you're not sure if you should use this, you should not use
+// this!
+#define _OBJC_ASSOCIATION_SYSTEM_OBJECT (1 << 16)
__BEGIN_DECLS
objc_imp_cache_entry *_Nullable
class_copyImpCache(Class _Nonnull cls, int * _Nullable outCount)
OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 5.0);
+
+OBJC_EXPORT
+unsigned long
+sel_hash(SEL _Nullable sel)
+ OBJC_AVAILABLE(10.16, 14.0, 14.0, 7.0, 6.0);
#endif
+
// Plainly-implemented GC barriers. Rosetta used to use these.
OBJC_EXPORT id _Nullable
objc_assign_strongCast_generic(id _Nullable value, id _Nullable * _Nonnull dest)
_objc_setClassLoader(BOOL (* _Nonnull newClassLoader)(const char * _Nonnull))
OBJC2_UNAVAILABLE;
-#if !(TARGET_OS_OSX && !TARGET_OS_IOSMAC && __i386__)
+#if !(TARGET_OS_OSX && !TARGET_OS_MACCATALYST && __i386__)
// Add a class copy fixup handler. The name is a misnomer, as
// multiple calls will install multiple handlers. Older versions
// of the Swift runtime call it by name, and it's only used by Swift
unsigned int * _Nullable outCount)
OBJC_AVAILABLE(10.14, 12.0, 12.0, 5.0, 3.0);
+/**
+ * Returns the all the classes within a library.
+ *
+ * @param image The mach header for library or framework you are inquiring about.
+ * @param outCount The number of class names returned.
+ *
+ * @return An array of Class objects
+ */
+
+OBJC_EXPORT Class _Nonnull * _Nullable
+objc_copyClassesForImage(const char * _Nonnull image,
+ unsigned int * _Nullable outCount)
+ OBJC_AVAILABLE(10.16, 14.0, 14.0, 7.0, 4.0);
+
+
// Tagged pointer objects.
#if __LP64__
OBJC_TAG_NSMethodSignature = 20,
OBJC_TAG_UTTypeRecord = 21,
+ // When using the split tagged pointer representation
+ // (OBJC_SPLIT_TAGGED_POINTERS), this is the first tag where
+ // the tag and payload are unobfuscated. All tags from here to
+ // OBJC_TAG_Last52BitPayload are unobfuscated. The shared cache
+ // builder is able to construct these as long as the low bit is
+ // not set (i.e. even-numbered tags).
+ OBJC_TAG_FirstUnobfuscatedSplitTag = 136, // 128 + 8, first ext tag with high bit set
+
+ OBJC_TAG_Constant_CFString = 136,
+
OBJC_TAG_First60BitPayload = 0,
OBJC_TAG_Last60BitPayload = 6,
OBJC_TAG_First52BitPayload = 8,
- OBJC_TAG_Last52BitPayload = 263,
+ OBJC_TAG_Last52BitPayload = 263,
OBJC_TAG_RESERVED_264 = 264
};
// Don't use the values below. Use the declarations above.
-#if (TARGET_OS_OSX || TARGET_OS_IOSMAC) && __x86_64__
+#if __arm64__
+// ARM64 uses a new tagged pointer scheme where normal tags are in
+// the low bits, extended tags are in the high bits, and half of the
+// extended tag space is reserved for unobfuscated payloads.
+# define OBJC_SPLIT_TAGGED_POINTERS 1
+#else
+# define OBJC_SPLIT_TAGGED_POINTERS 0
+#endif
+
+#if (TARGET_OS_OSX || TARGET_OS_MACCATALYST) && __x86_64__
// 64-bit Mac - tag bit is LSB
# define OBJC_MSB_TAGGED_POINTERS 0
#else
# define OBJC_MSB_TAGGED_POINTERS 1
#endif
-#define _OBJC_TAG_INDEX_MASK 0x7
+#define _OBJC_TAG_INDEX_MASK 0x7UL
+
+#if OBJC_SPLIT_TAGGED_POINTERS
+#define _OBJC_TAG_SLOT_COUNT 8
+#define _OBJC_TAG_SLOT_MASK 0x7UL
+#else
// array slot includes the tag bit itself
#define _OBJC_TAG_SLOT_COUNT 16
-#define _OBJC_TAG_SLOT_MASK 0xf
+#define _OBJC_TAG_SLOT_MASK 0xfUL
+#endif
#define _OBJC_TAG_EXT_INDEX_MASK 0xff
// array slot has no extra bits
#define _OBJC_TAG_EXT_SLOT_COUNT 256
#define _OBJC_TAG_EXT_SLOT_MASK 0xff
-#if OBJC_MSB_TAGGED_POINTERS
+#if OBJC_SPLIT_TAGGED_POINTERS
+# define _OBJC_TAG_MASK (1UL<<63)
+# define _OBJC_TAG_INDEX_SHIFT 0
+# define _OBJC_TAG_SLOT_SHIFT 0
+# define _OBJC_TAG_PAYLOAD_LSHIFT 1
+# define _OBJC_TAG_PAYLOAD_RSHIFT 4
+# define _OBJC_TAG_EXT_MASK (_OBJC_TAG_MASK | 0x7UL)
+# define _OBJC_TAG_NO_OBFUSCATION_MASK ((1UL<<62) | _OBJC_TAG_EXT_MASK)
+# define _OBJC_TAG_CONSTANT_POINTER_MASK \
+ ~(_OBJC_TAG_EXT_MASK | ((uintptr_t)_OBJC_TAG_EXT_SLOT_MASK << _OBJC_TAG_EXT_SLOT_SHIFT))
+# define _OBJC_TAG_EXT_INDEX_SHIFT 55
+# define _OBJC_TAG_EXT_SLOT_SHIFT 55
+# define _OBJC_TAG_EXT_PAYLOAD_LSHIFT 9
+# define _OBJC_TAG_EXT_PAYLOAD_RSHIFT 12
+#elif OBJC_MSB_TAGGED_POINTERS
# define _OBJC_TAG_MASK (1UL<<63)
# define _OBJC_TAG_INDEX_SHIFT 60
# define _OBJC_TAG_SLOT_SHIFT 60
# define _OBJC_TAG_EXT_PAYLOAD_RSHIFT 12
#endif
+// Map of tags to obfuscated tags.
extern uintptr_t objc_debug_taggedpointer_obfuscator;
+#if OBJC_SPLIT_TAGGED_POINTERS
+extern uint8_t objc_debug_tag60_permutations[8];
+
+static inline uintptr_t _objc_basicTagToObfuscatedTag(uintptr_t tag) {
+ return objc_debug_tag60_permutations[tag];
+}
+
+static inline uintptr_t _objc_obfuscatedTagToBasicTag(uintptr_t tag) {
+ for (unsigned i = 0; i < 7; i++)
+ if (objc_debug_tag60_permutations[i] == tag)
+ return i;
+ return 7;
+}
+#endif
+
static inline void * _Nonnull
_objc_encodeTaggedPointer(uintptr_t ptr)
{
- return (void *)(objc_debug_taggedpointer_obfuscator ^ ptr);
+ uintptr_t value = (objc_debug_taggedpointer_obfuscator ^ ptr);
+#if OBJC_SPLIT_TAGGED_POINTERS
+ if ((value & _OBJC_TAG_NO_OBFUSCATION_MASK) == _OBJC_TAG_NO_OBFUSCATION_MASK)
+ return (void *)ptr;
+ uintptr_t basicTag = (value >> _OBJC_TAG_INDEX_SHIFT) & _OBJC_TAG_INDEX_MASK;
+ uintptr_t permutedTag = _objc_basicTagToObfuscatedTag(basicTag);
+ value &= ~(_OBJC_TAG_INDEX_MASK << _OBJC_TAG_INDEX_SHIFT);
+ value |= permutedTag << _OBJC_TAG_INDEX_SHIFT;
+#endif
+ return (void *)value;
+}
+
+static inline uintptr_t
+_objc_decodeTaggedPointer_noPermute(const void * _Nullable ptr)
+{
+ uintptr_t value = (uintptr_t)ptr;
+#if OBJC_SPLIT_TAGGED_POINTERS
+ if ((value & _OBJC_TAG_NO_OBFUSCATION_MASK) == _OBJC_TAG_NO_OBFUSCATION_MASK)
+ return value;
+#endif
+ return value ^ objc_debug_taggedpointer_obfuscator;
}
static inline uintptr_t
_objc_decodeTaggedPointer(const void * _Nullable ptr)
{
- return (uintptr_t)ptr ^ objc_debug_taggedpointer_obfuscator;
+ uintptr_t value = _objc_decodeTaggedPointer_noPermute(ptr);
+#if OBJC_SPLIT_TAGGED_POINTERS
+ uintptr_t basicTag = (value >> _OBJC_TAG_INDEX_SHIFT) & _OBJC_TAG_INDEX_MASK;
+
+ value &= ~(_OBJC_TAG_INDEX_MASK << _OBJC_TAG_INDEX_SHIFT);
+ value |= _objc_obfuscatedTagToBasicTag(basicTag) << _OBJC_TAG_INDEX_SHIFT;
+#endif
+ return value;
}
-static inline bool
+static inline bool
_objc_taggedPointersEnabled(void)
{
extern uintptr_t objc_debug_taggedpointer_mask;
return ((uintptr_t)ptr & _OBJC_TAG_MASK) == _OBJC_TAG_MASK;
}
+static inline bool
+_objc_isTaggedPointerOrNil(const void * _Nullable ptr)
+{
+ // this function is here so that clang can turn this into
+ // a comparison with NULL when this is appropriate
+ // it turns out it's not able to in many cases without this
+ return !ptr || ((uintptr_t)ptr & _OBJC_TAG_MASK) == _OBJC_TAG_MASK;
+}
+
static inline objc_tag_index_t
_objc_getTaggedPointerTag(const void * _Nullable ptr)
{
_objc_getTaggedPointerValue(const void * _Nullable ptr)
{
// ASSERT(_objc_isTaggedPointer(ptr));
- uintptr_t value = _objc_decodeTaggedPointer(ptr);
+ uintptr_t value = _objc_decodeTaggedPointer_noPermute(ptr);
uintptr_t basicTag = (value >> _OBJC_TAG_INDEX_SHIFT) & _OBJC_TAG_INDEX_MASK;
if (basicTag == _OBJC_TAG_INDEX_MASK) {
return (value << _OBJC_TAG_EXT_PAYLOAD_LSHIFT) >> _OBJC_TAG_EXT_PAYLOAD_RSHIFT;
_objc_getTaggedPointerSignedValue(const void * _Nullable ptr)
{
// ASSERT(_objc_isTaggedPointer(ptr));
- uintptr_t value = _objc_decodeTaggedPointer(ptr);
+ uintptr_t value = _objc_decodeTaggedPointer_noPermute(ptr);
uintptr_t basicTag = (value >> _OBJC_TAG_INDEX_SHIFT) & _OBJC_TAG_INDEX_MASK;
if (basicTag == _OBJC_TAG_INDEX_MASK) {
return ((intptr_t)value << _OBJC_TAG_EXT_PAYLOAD_LSHIFT) >> _OBJC_TAG_EXT_PAYLOAD_RSHIFT;
}
}
+# if OBJC_SPLIT_TAGGED_POINTERS
+static inline void * _Nullable
+_objc_getTaggedPointerRawPointerValue(const void * _Nullable ptr) {
+ return (void *)((uintptr_t)ptr & _OBJC_TAG_CONSTANT_POINTER_MASK);
+}
+# endif
+
// OBJC_HAVE_TAGGED_POINTERS
#endif
OBJC_EXPORT BOOL _class_isFutureClass(Class _Nullable cls)
OBJC_AVAILABLE(10.9, 7.0, 9.0, 1.0, 2.0);
+/// Returns true if the class is an ABI stable Swift class. (Despite
+/// the name, this does NOT return true for Swift classes built with
+/// Swift versions prior to 5.0.)
+OBJC_EXPORT BOOL _class_isSwift(Class _Nullable cls)
+ OBJC_AVAILABLE(10.16, 14.0, 14.0, 7.0, 5.0);
// API to only be called by root classes like NSObject or NSProxy
OBJC_EXPORT void _objc_addWillInitializeClassFunc(_objc_func_willInitializeClass _Nonnull func, void * _Nullable context)
OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 4.0);
+// Replicate the conditionals in objc-config.h for packed isa, indexed isa, and preopt caches
+#if __ARM_ARCH_7K__ >= 2 || (__arm64__ && !__LP64__) || \
+ !(!__LP64__ || TARGET_OS_WIN32 || \
+ (TARGET_OS_SIMULATOR && !TARGET_OS_MACCATALYST && !__arm64__))
+OBJC_EXPORT const uintptr_t _objc_has_weak_formation_callout;
+#define OBJC_WEAK_FORMATION_CALLOUT_DEFINED 1
+#else
+#define OBJC_WEAK_FORMATION_CALLOUT_DEFINED 0
+#endif
+
+#if defined(__arm64__) && TARGET_OS_IOS && !TARGET_OS_SIMULATOR && !TARGET_OS_MACCATALYST
+#define CONFIG_USE_PREOPT_CACHES 1
+#else
+#define CONFIG_USE_PREOPT_CACHES 0
+#endif
+
+
+#if __OBJC2__
+// Helper function for objc4 tests only! Do not call this yourself
+// for any reason ever.
+OBJC_EXPORT void _method_setImplementationRawUnsafe(Method _Nonnull m, IMP _Nonnull imp)
+ OBJC_AVAILABLE(10.16, 14.0, 14.0, 7.0, 5.0);
+#endif
+
// API to only be called by classes that provide their own reference count storage
OBJC_EXPORT void
_objc_deallocOnMainThreadHelper(void * _Nullable context)
OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0, 2.0);
+#if __OBJC__
+// Declarations for internal methods used for custom weak reference
+// implementations. These declarations ensure that the compiler knows
+// to exclude these methods from NS_DIRECT_MEMBERS. Do NOT implement
+// these methods unless you really know what you're doing.
+@interface NSObject ()
+- (BOOL)_tryRetain;
+- (BOOL)_isDeallocating;
+@end
+#endif
+
// On async versus sync deallocation and the _dealloc2main flag
//
// Theory:
#define _OBJC_SUPPORTED_INLINE_REFCNT(_rc_ivar) _OBJC_SUPPORTED_INLINE_REFCNT_LOGIC(_rc_ivar, 0)
#define _OBJC_SUPPORTED_INLINE_REFCNT_WITH_DEALLOC2MAIN(_rc_ivar) _OBJC_SUPPORTED_INLINE_REFCNT_LOGIC(_rc_ivar, 1)
+
+// C cache_t wrappers for objcdt and the IMP caches test tool
+struct cache_t;
+struct bucket_t;
+struct preopt_cache_t;
+OBJC_EXPORT struct bucket_t * _Nonnull objc_cache_buckets(const struct cache_t * _Nonnull cache);
+OBJC_EXPORT size_t objc_cache_bytesForCapacity(uint32_t cap);
+OBJC_EXPORT uint32_t objc_cache_occupied(const struct cache_t * _Nonnull cache);
+OBJC_EXPORT unsigned objc_cache_capacity(const struct cache_t * _Nonnull cache);
+
+#if CONFIG_USE_PREOPT_CACHES
+
+OBJC_EXPORT bool objc_cache_isConstantOptimizedCache(const struct cache_t * _Nonnull cache, bool strict, uintptr_t empty_addr);
+OBJC_EXPORT unsigned objc_cache_preoptCapacity(const struct cache_t * _Nonnull cache);
+OBJC_EXPORT Class _Nonnull objc_cache_preoptFallbackClass(const struct cache_t * _Nonnull cache);
+OBJC_EXPORT const struct preopt_cache_t * _Nonnull objc_cache_preoptCache(const struct cache_t * _Nonnull cache);
+
+#endif
+
__END_DECLS
#endif
#if LOCKDEBUG
extern void lockdebug_assert_all_locks_locked();
extern void lockdebug_assert_no_locks_locked();
+extern void lockdebug_assert_no_locks_locked_except(std::initializer_list<void *> canBeLocked);
extern void lockdebug_setInForkPrepare(bool);
extern void lockdebug_lock_precedes_lock(const void *oldlock, const void *newlock);
#else
static constexpr inline void lockdebug_assert_all_locks_locked() { }
static constexpr inline void lockdebug_assert_no_locks_locked() { }
+static constexpr inline void lockdebug_assert_no_locks_locked_except(std::initializer_list<void *> canBeLocked) { };
static constexpr inline void lockdebug_setInForkPrepare(bool) { }
static constexpr inline void lockdebug_lock_precedes_lock(const void *, const void *) { }
#endif
extern void lockdebug_mutex_assert_locked(mutex_tt<true> *lock);
extern void lockdebug_mutex_assert_unlocked(mutex_tt<true> *lock);
-static constexpr inline void lockdebug_remember_mutex(mutex_tt<false> *lock) { }
-static constexpr inline void lockdebug_mutex_lock(mutex_tt<false> *lock) { }
-static constexpr inline void lockdebug_mutex_try_lock(mutex_tt<false> *lock) { }
-static constexpr inline void lockdebug_mutex_unlock(mutex_tt<false> *lock) { }
-static constexpr inline void lockdebug_mutex_assert_locked(mutex_tt<false> *lock) { }
-static constexpr inline void lockdebug_mutex_assert_unlocked(mutex_tt<false> *lock) { }
+static constexpr inline void lockdebug_remember_mutex(__unused mutex_tt<false> *lock) { }
+static constexpr inline void lockdebug_mutex_lock(__unused mutex_tt<false> *lock) { }
+static constexpr inline void lockdebug_mutex_try_lock(__unused mutex_tt<false> *lock) { }
+static constexpr inline void lockdebug_mutex_unlock(__unused mutex_tt<false> *lock) { }
+static constexpr inline void lockdebug_mutex_assert_locked(__unused mutex_tt<false> *lock) { }
+static constexpr inline void lockdebug_mutex_assert_unlocked(__unused mutex_tt<false> *lock) { }
extern void lockdebug_remember_monitor(monitor_tt<true> *lock);
extern void lockdebug_monitor_assert_locked(monitor_tt<true> *lock);
extern void lockdebug_monitor_assert_unlocked(monitor_tt<true> *lock);
-static constexpr inline void lockdebug_remember_monitor(monitor_tt<false> *lock) { }
-static constexpr inline void lockdebug_monitor_enter(monitor_tt<false> *lock) { }
-static constexpr inline void lockdebug_monitor_leave(monitor_tt<false> *lock) { }
-static constexpr inline void lockdebug_monitor_wait(monitor_tt<false> *lock) { }
-static constexpr inline void lockdebug_monitor_assert_locked(monitor_tt<false> *lock) { }
-static constexpr inline void lockdebug_monitor_assert_unlocked(monitor_tt<false> *lock) {}
+static constexpr inline void lockdebug_remember_monitor(__unused monitor_tt<false> *lock) { }
+static constexpr inline void lockdebug_monitor_enter(__unused monitor_tt<false> *lock) { }
+static constexpr inline void lockdebug_monitor_leave(__unused monitor_tt<false> *lock) { }
+static constexpr inline void lockdebug_monitor_wait(__unused monitor_tt<false> *lock) { }
+static constexpr inline void lockdebug_monitor_assert_locked(__unused monitor_tt<false> *lock) { }
+static constexpr inline void lockdebug_monitor_assert_unlocked(__unused monitor_tt<false> *lock) {}
extern void
lockdebug_recursive_mutex_assert_unlocked(recursive_mutex_tt<true> *lock);
static constexpr inline void
-lockdebug_remember_recursive_mutex(recursive_mutex_tt<false> *lock) { }
+lockdebug_remember_recursive_mutex(__unused recursive_mutex_tt<false> *lock) { }
static constexpr inline void
-lockdebug_recursive_mutex_lock(recursive_mutex_tt<false> *lock) { }
+lockdebug_recursive_mutex_lock(__unused recursive_mutex_tt<false> *lock) { }
static constexpr inline void
-lockdebug_recursive_mutex_unlock(recursive_mutex_tt<false> *lock) { }
+lockdebug_recursive_mutex_unlock(__unused recursive_mutex_tt<false> *lock) { }
static constexpr inline void
-lockdebug_recursive_mutex_assert_locked(recursive_mutex_tt<false> *lock) { }
+lockdebug_recursive_mutex_assert_locked(__unused recursive_mutex_tt<false> *lock) { }
static constexpr inline void
-lockdebug_recursive_mutex_assert_unlocked(recursive_mutex_tt<false> *lock) { }
+lockdebug_recursive_mutex_assert_unlocked(__unused recursive_mutex_tt<false> *lock) { }
void
lockdebug_assert_no_locks_locked()
+{
+ lockdebug_assert_no_locks_locked_except({});
+}
+
+void lockdebug_assert_no_locks_locked_except(std::initializer_list<void *> canBeLocked)
{
auto& owned = ownedLocks();
for (const auto& l : AllLocks()) {
+ if (std::find(canBeLocked.begin(), canBeLocked.end(), l.first) != canBeLocked.end())
+ continue;
+
if (hasLock(owned, l.first, l.second.k)) {
_objc_fatal("lock %p:%d is incorrectly owned", l.first, l.second.k);
}
#if SUPPORT_TAGGED_POINTERS
-inline Class
+inline Class
objc_object::getIsa()
{
if (fastpath(!isTaggedPointer())) return ISA();
return _objc_isTaggedPointer(this);
}
+inline bool
+objc_object::isTaggedPointerOrNil()
+{
+ return _objc_isTaggedPointerOrNil(this);
+}
+
inline bool
objc_object::isBasicTaggedPointer()
{
#else
// not SUPPORT_TAGGED_POINTERS
-
-inline Class
+inline Class
objc_object::getIsa()
{
return ISA();
return false;
}
+inline bool
+objc_object::isTaggedPointerOrNil()
+{
+ return !this;
+}
+
inline bool
objc_object::isBasicTaggedPointer()
{
#if SUPPORT_NONPOINTER_ISA
-inline Class
-objc_object::ISA()
-{
- ASSERT(!isTaggedPointer());
+// Set the class field in an isa. Takes both the class to set and
+// a pointer to the object where the isa will ultimately be used.
+// This is necessary to get the pointer signing right.
+//
+// Note: this method does not support setting an indexed isa. When
+// indexed isas are in use, it can only be used to set the class of a
+// raw isa.
+inline void
+isa_t::setClass(Class newCls, UNUSED_WITHOUT_PTRAUTH objc_object *obj)
+{
+ // Match the conditional in isa.h.
+#if __has_feature(ptrauth_calls) || TARGET_OS_SIMULATOR
+# if ISA_SIGNING_SIGN_MODE == ISA_SIGNING_SIGN_NONE
+ // No signing, just use the raw pointer.
+ uintptr_t signedCls = (uintptr_t)newCls;
+
+# elif ISA_SIGNING_SIGN_MODE == ISA_SIGNING_SIGN_ONLY_SWIFT
+ // We're only signing Swift classes. Non-Swift classes just use
+ // the raw pointer
+ uintptr_t signedCls = (uintptr_t)newCls;
+ if (newCls->isSwiftStable())
+ signedCls = (uintptr_t)ptrauth_sign_unauthenticated((void *)newCls, ISA_SIGNING_KEY, ptrauth_blend_discriminator(obj, ISA_SIGNING_DISCRIMINATOR));
+
+# elif ISA_SIGNING_SIGN_MODE == ISA_SIGNING_SIGN_ALL
+ // We're signing everything
+ uintptr_t signedCls = (uintptr_t)ptrauth_sign_unauthenticated((void *)newCls, ISA_SIGNING_KEY, ptrauth_blend_discriminator(obj, ISA_SIGNING_DISCRIMINATOR));
+
+# else
+# error Unknown isa signing mode.
+# endif
+
+ shiftcls_and_sig = signedCls >> 3;
+
+#elif SUPPORT_INDEXED_ISA
+ // Indexed isa only uses this method to set a raw pointer class.
+ // Setting an indexed class is handled separately.
+ cls = newCls;
+
+#else // Nonpointer isa, no ptrauth
+ shiftcls = (uintptr_t)newCls >> 3;
+#endif
+}
+
+// Get the class pointer out of an isa. When ptrauth is supported,
+// this operation is optionally authenticated. Many code paths don't
+// need the authentication, so it can be skipped in those cases for
+// better performance.
+//
+// Note: this method does not support retrieving indexed isas. When
+// indexed isas are in use, it can only be used to retrieve the class
+// of a raw isa.
+#if SUPPORT_INDEXED_ISA || (ISA_SIGNING_AUTH_MODE != ISA_SIGNING_AUTH)
+#define MAYBE_UNUSED_AUTHENTICATED_PARAM __attribute__((unused))
+#else
+#define MAYBE_UNUSED_AUTHENTICATED_PARAM UNUSED_WITHOUT_PTRAUTH
+#endif
+
+inline Class
+isa_t::getClass(MAYBE_UNUSED_AUTHENTICATED_PARAM bool authenticated) {
#if SUPPORT_INDEXED_ISA
- if (isa.nonpointer) {
- uintptr_t slot = isa.indexcls;
- return classForIndex((unsigned)slot);
+ return cls;
+#else
+
+ uintptr_t clsbits = bits;
+
+# if __has_feature(ptrauth_calls)
+# if ISA_SIGNING_AUTH_MODE == ISA_SIGNING_AUTH
+ // Most callers aren't security critical, so skip the
+ // authentication unless they ask for it. Message sending and
+ // cache filling are protected by the auth code in msgSend.
+ if (authenticated) {
+ // Mask off all bits besides the class pointer and signature.
+ clsbits &= ISA_MASK;
+ if (clsbits == 0)
+ return Nil;
+ clsbits = (uintptr_t)ptrauth_auth_data((void *)clsbits, ISA_SIGNING_KEY, ptrauth_blend_discriminator(this, ISA_SIGNING_DISCRIMINATOR));
+ } else {
+ // If not authenticating, strip using the precomputed class mask.
+ clsbits &= objc_debug_isa_class_mask;
}
- return (Class)isa.bits;
+# else
+ // If not authenticating, strip using the precomputed class mask.
+ clsbits &= objc_debug_isa_class_mask;
+# endif
+
+# else
+ clsbits &= ISA_MASK;
+# endif
+
+ return (Class)clsbits;
+#endif
+}
+
+inline Class
+isa_t::getDecodedClass(bool authenticated) {
+#if SUPPORT_INDEXED_ISA
+ if (nonpointer) {
+ return classForIndex(indexcls);
+ }
+ return (Class)cls;
#else
- return (Class)(isa.bits & ISA_MASK);
+ return getClass(authenticated);
#endif
}
+inline Class
+objc_object::ISA(bool authenticated)
+{
+ ASSERT(!isTaggedPointer());
+ return isa.getDecodedClass(authenticated);
+}
+
inline Class
objc_object::rawISA()
{
initIsa(cls, true, hasCxxDtor);
}
+#if !SUPPORT_INDEXED_ISA && !ISA_HAS_CXX_DTOR_BIT
+#define UNUSED_WITHOUT_INDEXED_ISA_AND_DTOR_BIT __attribute__((unused))
+#else
+#define UNUSED_WITHOUT_INDEXED_ISA_AND_DTOR_BIT
+#endif
+
inline void
-objc_object::initIsa(Class cls, bool nonpointer, bool hasCxxDtor)
+objc_object::initIsa(Class cls, bool nonpointer, UNUSED_WITHOUT_INDEXED_ISA_AND_DTOR_BIT bool hasCxxDtor)
{
ASSERT(!isTaggedPointer());
+ isa_t newisa(0);
+
if (!nonpointer) {
- isa = isa_t((uintptr_t)cls);
+ newisa.setClass(cls, this);
} else {
ASSERT(!DisableNonpointerIsa);
ASSERT(!cls->instancesRequireRawIsa());
- isa_t newisa(0);
#if SUPPORT_INDEXED_ISA
ASSERT(cls->classArrayIndex() > 0);
newisa.bits = ISA_MAGIC_VALUE;
// isa.magic is part of ISA_MAGIC_VALUE
// isa.nonpointer is part of ISA_MAGIC_VALUE
+# if ISA_HAS_CXX_DTOR_BIT
newisa.has_cxx_dtor = hasCxxDtor;
- newisa.shiftcls = (uintptr_t)cls >> 3;
+# endif
+ newisa.setClass(cls, this);
#endif
-
- // This write must be performed in a single store in some cases
- // (for example when realizing a class because other threads
- // may simultaneously try to use the class).
- // fixme use atomics here to guarantee single-store and to
- // guarantee memory order w.r.t. the class index table
- // ...but not too atomic because we don't want to hurt instantiation
- isa = newisa;
+ newisa.extra_rc = 1;
}
+
+ // This write must be performed in a single store in some cases
+ // (for example when realizing a class because other threads
+ // may simultaneously try to use the class).
+ // fixme use atomics here to guarantee single-store and to
+ // guarantee memory order w.r.t. the class index table
+ // ...but not too atomic because we don't want to hurt instantiation
+ isa = newisa;
}
ASSERT(!isTaggedPointer());
isa_t oldisa;
- isa_t newisa;
+ isa_t newisa(0);
bool sideTableLocked = false;
bool transcribeToSideTable = false;
+ oldisa = LoadExclusive(&isa.bits);
+
do {
transcribeToSideTable = false;
- oldisa = LoadExclusive(&isa.bits);
if ((oldisa.bits == 0 || oldisa.nonpointer) &&
!newCls->isFuture() && newCls->canAllocNonpointer())
{
// 0 -> nonpointer
// nonpointer -> nonpointer
#if SUPPORT_INDEXED_ISA
- if (oldisa.bits == 0) newisa.bits = ISA_INDEX_MAGIC_VALUE;
- else newisa = oldisa;
+ if (oldisa.bits == 0) {
+ newisa.bits = ISA_INDEX_MAGIC_VALUE;
+ newisa.extra_rc = 1;
+ } else {
+ newisa = oldisa;
+ }
// isa.magic is part of ISA_MAGIC_VALUE
// isa.nonpointer is part of ISA_MAGIC_VALUE
newisa.has_cxx_dtor = newCls->hasCxxDtor();
ASSERT(newCls->classArrayIndex() > 0);
newisa.indexcls = (uintptr_t)newCls->classArrayIndex();
#else
- if (oldisa.bits == 0) newisa.bits = ISA_MAGIC_VALUE;
- else newisa = oldisa;
+ if (oldisa.bits == 0) {
+ newisa.bits = ISA_MAGIC_VALUE;
+ newisa.extra_rc = 1;
+ }
+ else {
+ newisa = oldisa;
+ }
// isa.magic is part of ISA_MAGIC_VALUE
// isa.nonpointer is part of ISA_MAGIC_VALUE
+# if ISA_HAS_CXX_DTOR_BIT
newisa.has_cxx_dtor = newCls->hasCxxDtor();
- newisa.shiftcls = (uintptr_t)newCls >> 3;
+# endif
+ newisa.setClass(newCls, this);
#endif
}
else if (oldisa.nonpointer) {
if (!sideTableLocked) sidetable_lock();
sideTableLocked = true;
transcribeToSideTable = true;
- newisa.cls = newCls;
+ newisa.setClass(newCls, this);
}
else {
// raw pointer -> raw pointer
- newisa.cls = newCls;
+ newisa.setClass(newCls, this);
}
- } while (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits));
+ } while (slowpath(!StoreExclusive(&isa.bits, &oldisa.bits, newisa.bits)));
if (transcribeToSideTable) {
// Copy oldisa's retain count et al to side table.
// oldisa.has_assoc: nothing to do
// oldisa.has_cxx_dtor: nothing to do
sidetable_moveExtraRC_nolock(oldisa.extra_rc,
- oldisa.deallocating,
+ oldisa.isDeallocating(),
oldisa.weakly_referenced);
}
if (sideTableLocked) sidetable_unlock();
- if (oldisa.nonpointer) {
-#if SUPPORT_INDEXED_ISA
- return classForIndex(oldisa.indexcls);
-#else
- return (Class)((uintptr_t)oldisa.shiftcls << 3);
-#endif
- }
- else {
- return oldisa.cls;
- }
+ return oldisa.getDecodedClass(false);
}
-
inline bool
objc_object::hasAssociatedObjects()
{
{
if (isTaggedPointer()) return;
- retry:
- isa_t oldisa = LoadExclusive(&isa.bits);
- isa_t newisa = oldisa;
- if (!newisa.nonpointer || newisa.has_assoc) {
- ClearExclusive(&isa.bits);
- return;
+ if (slowpath(!hasNonpointerIsa() && ISA()->hasCustomRR()) && !ISA()->isFuture() && !ISA()->isMetaClass()) {
+ void(*setAssoc)(id, SEL) = (void(*)(id, SEL)) object_getMethodImplementation((id)this, @selector(_noteAssociatedObjects));
+ if ((IMP)setAssoc != _objc_msgForward) {
+ (*setAssoc)((id)this, @selector(_noteAssociatedObjects));
+ }
}
- newisa.has_assoc = true;
- if (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry;
+
+ isa_t newisa, oldisa = LoadExclusive(&isa.bits);
+ do {
+ newisa = oldisa;
+ if (!newisa.nonpointer || newisa.has_assoc) {
+ ClearExclusive(&isa.bits);
+ return;
+ }
+ newisa.has_assoc = true;
+ } while (slowpath(!StoreExclusive(&isa.bits, &oldisa.bits, newisa.bits)));
}
inline void
objc_object::setWeaklyReferenced_nolock()
{
- retry:
- isa_t oldisa = LoadExclusive(&isa.bits);
- isa_t newisa = oldisa;
- if (slowpath(!newisa.nonpointer)) {
- ClearExclusive(&isa.bits);
- sidetable_setWeaklyReferenced_nolock();
- return;
- }
- if (newisa.weakly_referenced) {
- ClearExclusive(&isa.bits);
- return;
- }
- newisa.weakly_referenced = true;
- if (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry;
+ isa_t newisa, oldisa = LoadExclusive(&isa.bits);
+ do {
+ newisa = oldisa;
+ if (slowpath(!newisa.nonpointer)) {
+ ClearExclusive(&isa.bits);
+ sidetable_setWeaklyReferenced_nolock();
+ return;
+ }
+ if (newisa.weakly_referenced) {
+ ClearExclusive(&isa.bits);
+ return;
+ }
+ newisa.weakly_referenced = true;
+ } while (slowpath(!StoreExclusive(&isa.bits, &oldisa.bits, newisa.bits)));
}
objc_object::hasCxxDtor()
{
ASSERT(!isTaggedPointer());
- if (isa.nonpointer) return isa.has_cxx_dtor;
- else return isa.cls->hasCxxDtor();
+#if ISA_HAS_CXX_DTOR_BIT
+ if (isa.nonpointer)
+ return isa.has_cxx_dtor;
+ else
+#endif
+ return ISA()->hasCxxDtor();
}
objc_object::rootIsDeallocating()
{
if (isTaggedPointer()) return false;
- if (isa.nonpointer) return isa.deallocating;
+ if (isa.nonpointer) return isa.isDeallocating();
return sidetable_isDeallocating();
}
{
if (isTaggedPointer()) return; // fixme necessary?
- if (fastpath(isa.nonpointer &&
- !isa.weakly_referenced &&
- !isa.has_assoc &&
- !isa.has_cxx_dtor &&
+ if (fastpath(isa.nonpointer &&
+ !isa.weakly_referenced &&
+ !isa.has_assoc &&
+#if ISA_HAS_CXX_DTOR_BIT
+ !isa.has_cxx_dtor &&
+#else
+ !isa.getClass(false)->hasCxxDtor() &&
+#endif
!isa.has_sidetable_rc))
{
assert(!sidetable_present());
}
}
+extern explicit_atomic<id(*)(id)> swiftRetain;
+extern explicit_atomic<void(*)(id)> swiftRelease;
// Equivalent to calling [this retain], with shortcuts if there is no override
inline id
{
ASSERT(!isTaggedPointer());
- if (fastpath(!ISA()->hasCustomRR())) {
- return rootRetain();
- }
-
- return ((id(*)(objc_object *, SEL))objc_msgSend)(this, @selector(retain));
+ return rootRetain(false, RRVariant::FastOrMsgSend);
}
-
// Base retain implementation, ignoring overrides.
// This does not check isa.fast_rr; if there is an RR override then
// it was already called and it chose to call [super retain].
ALWAYS_INLINE id
objc_object::rootRetain()
{
- return rootRetain(false, false);
+ return rootRetain(false, RRVariant::Fast);
}
ALWAYS_INLINE bool
objc_object::rootTryRetain()
{
- return rootRetain(true, false) ? true : false;
+ return rootRetain(true, RRVariant::Fast) ? true : false;
}
-ALWAYS_INLINE id
-objc_object::rootRetain(bool tryRetain, bool handleOverflow)
+ALWAYS_INLINE id
+objc_object::rootRetain(bool tryRetain, objc_object::RRVariant variant)
{
- if (isTaggedPointer()) return (id)this;
+ if (slowpath(isTaggedPointer())) return (id)this;
bool sideTableLocked = false;
bool transcribeToSideTable = false;
isa_t oldisa;
isa_t newisa;
+ oldisa = LoadExclusive(&isa.bits);
+
+ if (variant == RRVariant::FastOrMsgSend) {
+ // These checks are only meaningful for objc_retain()
+ // They are here so that we avoid a re-load of the isa.
+ if (slowpath(oldisa.getDecodedClass(false)->hasCustomRR())) {
+ ClearExclusive(&isa.bits);
+ if (oldisa.getDecodedClass(false)->canCallSwiftRR()) {
+ return swiftRetain.load(memory_order_relaxed)((id)this);
+ }
+ return ((id(*)(objc_object *, SEL))objc_msgSend)(this, @selector(retain));
+ }
+ }
+
+ if (slowpath(!oldisa.nonpointer)) {
+ // a Class is a Class forever, so we can perform this check once
+ // outside of the CAS loop
+ if (oldisa.getDecodedClass(false)->isMetaClass()) {
+ ClearExclusive(&isa.bits);
+ return (id)this;
+ }
+ }
+
do {
transcribeToSideTable = false;
- oldisa = LoadExclusive(&isa.bits);
newisa = oldisa;
if (slowpath(!newisa.nonpointer)) {
ClearExclusive(&isa.bits);
- if (rawISA()->isMetaClass()) return (id)this;
- if (!tryRetain && sideTableLocked) sidetable_unlock();
if (tryRetain) return sidetable_tryRetain() ? (id)this : nil;
- else return sidetable_retain();
+ else return sidetable_retain(sideTableLocked);
}
// don't check newisa.fast_rr; we already called any RR overrides
- if (slowpath(tryRetain && newisa.deallocating)) {
+ if (slowpath(newisa.isDeallocating())) {
ClearExclusive(&isa.bits);
- if (!tryRetain && sideTableLocked) sidetable_unlock();
- return nil;
+ if (sideTableLocked) {
+ ASSERT(variant == RRVariant::Full);
+ sidetable_unlock();
+ }
+ if (slowpath(tryRetain)) {
+ return nil;
+ } else {
+ return (id)this;
+ }
}
uintptr_t carry;
newisa.bits = addc(newisa.bits, RC_ONE, 0, &carry); // extra_rc++
if (slowpath(carry)) {
// newisa.extra_rc++ overflowed
- if (!handleOverflow) {
+ if (variant != RRVariant::Full) {
ClearExclusive(&isa.bits);
return rootRetain_overflow(tryRetain);
}
newisa.extra_rc = RC_HALF;
newisa.has_sidetable_rc = true;
}
- } while (slowpath(!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)));
+ } while (slowpath(!StoreExclusive(&isa.bits, &oldisa.bits, newisa.bits)));
- if (slowpath(transcribeToSideTable)) {
- // Copy the other half of the retain counts to the side table.
- sidetable_addExtraRC_nolock(RC_HALF);
+ if (variant == RRVariant::Full) {
+ if (slowpath(transcribeToSideTable)) {
+ // Copy the other half of the retain counts to the side table.
+ sidetable_addExtraRC_nolock(RC_HALF);
+ }
+
+ if (slowpath(!tryRetain && sideTableLocked)) sidetable_unlock();
+ } else {
+ ASSERT(!transcribeToSideTable);
+ ASSERT(!sideTableLocked);
}
- if (slowpath(!tryRetain && sideTableLocked)) sidetable_unlock();
return (id)this;
}
{
ASSERT(!isTaggedPointer());
- if (fastpath(!ISA()->hasCustomRR())) {
- rootRelease();
- return;
- }
-
- ((void(*)(objc_object *, SEL))objc_msgSend)(this, @selector(release));
+ rootRelease(true, RRVariant::FastOrMsgSend);
}
ALWAYS_INLINE bool
objc_object::rootRelease()
{
- return rootRelease(true, false);
+ return rootRelease(true, RRVariant::Fast);
}
ALWAYS_INLINE bool
objc_object::rootReleaseShouldDealloc()
{
- return rootRelease(false, false);
+ return rootRelease(false, RRVariant::Fast);
}
-ALWAYS_INLINE bool
-objc_object::rootRelease(bool performDealloc, bool handleUnderflow)
+ALWAYS_INLINE bool
+objc_object::rootRelease(bool performDealloc, objc_object::RRVariant variant)
{
- if (isTaggedPointer()) return false;
+ if (slowpath(isTaggedPointer())) return false;
bool sideTableLocked = false;
- isa_t oldisa;
- isa_t newisa;
+ isa_t newisa, oldisa;
+
+ oldisa = LoadExclusive(&isa.bits);
+
+ if (variant == RRVariant::FastOrMsgSend) {
+ // These checks are only meaningful for objc_release()
+ // They are here so that we avoid a re-load of the isa.
+ if (slowpath(oldisa.getDecodedClass(false)->hasCustomRR())) {
+ ClearExclusive(&isa.bits);
+ if (oldisa.getDecodedClass(false)->canCallSwiftRR()) {
+ swiftRelease.load(memory_order_relaxed)((id)this);
+ return true;
+ }
+ ((void(*)(objc_object *, SEL))objc_msgSend)(this, @selector(release));
+ return true;
+ }
+ }
- retry:
+ if (slowpath(!oldisa.nonpointer)) {
+ // a Class is a Class forever, so we can perform this check once
+ // outside of the CAS loop
+ if (oldisa.getDecodedClass(false)->isMetaClass()) {
+ ClearExclusive(&isa.bits);
+ return false;
+ }
+ }
+
+retry:
do {
- oldisa = LoadExclusive(&isa.bits);
newisa = oldisa;
if (slowpath(!newisa.nonpointer)) {
ClearExclusive(&isa.bits);
- if (rawISA()->isMetaClass()) return false;
- if (sideTableLocked) sidetable_unlock();
- return sidetable_release(performDealloc);
+ return sidetable_release(sideTableLocked, performDealloc);
+ }
+ if (slowpath(newisa.isDeallocating())) {
+ ClearExclusive(&isa.bits);
+ if (sideTableLocked) {
+ ASSERT(variant == RRVariant::Full);
+ sidetable_unlock();
+ }
+ return false;
}
+
// don't check newisa.fast_rr; we already called any RR overrides
uintptr_t carry;
newisa.bits = subc(newisa.bits, RC_ONE, 0, &carry); // extra_rc--
// don't ClearExclusive()
goto underflow;
}
- } while (slowpath(!StoreReleaseExclusive(&isa.bits,
- oldisa.bits, newisa.bits)));
+ } while (slowpath(!StoreReleaseExclusive(&isa.bits, &oldisa.bits, newisa.bits)));
- if (slowpath(sideTableLocked)) sidetable_unlock();
+ if (slowpath(newisa.isDeallocating()))
+ goto deallocate;
+
+ if (variant == RRVariant::Full) {
+ if (slowpath(sideTableLocked)) sidetable_unlock();
+ } else {
+ ASSERT(!sideTableLocked);
+ }
return false;
underflow:
newisa = oldisa;
if (slowpath(newisa.has_sidetable_rc)) {
- if (!handleUnderflow) {
+ if (variant != RRVariant::Full) {
ClearExclusive(&isa.bits);
return rootRelease_underflow(performDealloc);
}
sideTableLocked = true;
// Need to start over to avoid a race against
// the nonpointer -> raw pointer transition.
+ oldisa = LoadExclusive(&isa.bits);
goto retry;
}
// Try to remove some retain counts from the side table.
- size_t borrowed = sidetable_subExtraRC_nolock(RC_HALF);
+ auto borrow = sidetable_subExtraRC_nolock(RC_HALF);
- // To avoid races, has_sidetable_rc must remain set
- // even if the side table count is now zero.
+ bool emptySideTable = borrow.remaining == 0; // we'll clear the side table if no refcounts remain there
- if (borrowed > 0) {
+ if (borrow.borrowed > 0) {
// Side table retain count decreased.
// Try to add them to the inline count.
- newisa.extra_rc = borrowed - 1; // redo the original decrement too
- bool stored = StoreReleaseExclusive(&isa.bits,
- oldisa.bits, newisa.bits);
- if (!stored) {
+ bool didTransitionToDeallocating = false;
+ newisa.extra_rc = borrow.borrowed - 1; // redo the original decrement too
+ newisa.has_sidetable_rc = !emptySideTable;
+
+ bool stored = StoreReleaseExclusive(&isa.bits, &oldisa.bits, newisa.bits);
+
+ if (!stored && oldisa.nonpointer) {
// Inline update failed.
// Try it again right now. This prevents livelock on LL/SC
// architectures where the side table access itself may have
// dropped the reservation.
- isa_t oldisa2 = LoadExclusive(&isa.bits);
- isa_t newisa2 = oldisa2;
- if (newisa2.nonpointer) {
- uintptr_t overflow;
- newisa2.bits =
- addc(newisa2.bits, RC_ONE * (borrowed-1), 0, &overflow);
- if (!overflow) {
- stored = StoreReleaseExclusive(&isa.bits, oldisa2.bits,
- newisa2.bits);
+ uintptr_t overflow;
+ newisa.bits =
+ addc(oldisa.bits, RC_ONE * (borrow.borrowed-1), 0, &overflow);
+ newisa.has_sidetable_rc = !emptySideTable;
+ if (!overflow) {
+ stored = StoreReleaseExclusive(&isa.bits, &oldisa.bits, newisa.bits);
+ if (stored) {
+ didTransitionToDeallocating = newisa.isDeallocating();
}
}
}
if (!stored) {
// Inline update failed.
// Put the retains back in the side table.
- sidetable_addExtraRC_nolock(borrowed);
+ ClearExclusive(&isa.bits);
+ sidetable_addExtraRC_nolock(borrow.borrowed);
+ oldisa = LoadExclusive(&isa.bits);
goto retry;
}
// Decrement successful after borrowing from side table.
- // This decrement cannot be the deallocating decrement - the side
- // table lock and has_sidetable_rc bit ensure that if everyone
- // else tried to -release while we worked, the last one would block.
- sidetable_unlock();
- return false;
+ if (emptySideTable)
+ sidetable_clearExtraRC_nolock();
+
+ if (!didTransitionToDeallocating) {
+ if (slowpath(sideTableLocked)) sidetable_unlock();
+ return false;
+ }
}
else {
// Side table is empty after all. Fall-through to the dealloc path.
}
}
+deallocate:
// Really deallocate.
- if (slowpath(newisa.deallocating)) {
- ClearExclusive(&isa.bits);
- if (sideTableLocked) sidetable_unlock();
- return overrelease_error();
- // does not actually return
- }
- newisa.deallocating = true;
- if (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry;
+ ASSERT(newisa.isDeallocating());
+ ASSERT(isa.isDeallocating());
if (slowpath(sideTableLocked)) sidetable_unlock();
if (isTaggedPointer()) return (uintptr_t)this;
sidetable_lock();
- isa_t bits = LoadExclusive(&isa.bits);
- ClearExclusive(&isa.bits);
+ isa_t bits = __c11_atomic_load((_Atomic uintptr_t *)&isa.bits, __ATOMIC_RELAXED);
if (bits.nonpointer) {
- uintptr_t rc = 1 + bits.extra_rc;
+ uintptr_t rc = bits.extra_rc;
if (bits.has_sidetable_rc) {
rc += sidetable_getExtraRC_nolock();
}
#else
// not SUPPORT_NONPOINTER_ISA
+inline void
+isa_t::setClass(Class cls, objc_object *obj)
+{
+ this->cls = cls;
+}
+
+inline Class
+isa_t::getClass(bool authenticated __unused)
+{
+ return cls;
+}
+
+inline Class
+isa_t::getDecodedClass(bool authenticated)
+{
+ return getClass(authenticated);
+}
inline Class
-objc_object::ISA()
+objc_object::ISA(bool authenticated __unused)
{
ASSERT(!isTaggedPointer());
- return isa.cls;
+ return isa.getClass(/*authenticated*/false);
}
inline Class
objc_object::initIsa(Class cls)
{
ASSERT(!isTaggedPointer());
- isa = (uintptr_t)cls;
+ isa.setClass(cls, this);
}
// cls->isInitializing() || cls->isInitialized());
ASSERT(!isTaggedPointer());
-
- isa_t oldisa, newisa;
- newisa.cls = cls;
- do {
- oldisa = LoadExclusive(&isa.bits);
- } while (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits));
-
- if (oldisa.cls && oldisa.cls->instancesHaveAssociatedObjects()) {
+
+ isa_t newisa, oldisa;
+ newisa.setClass(cls, this);
+ oldisa.bits = __c11_atomic_exchange((_Atomic uintptr_t *)&isa.bits, newisa.bits, __ATOMIC_RELAXED);
+
+ Class oldcls = oldisa.getDecodedClass(/*authenticated*/false);
+ if (oldcls && oldcls->instancesHaveAssociatedObjects()) {
cls->setInstancesHaveAssociatedObjects();
}
-
- return oldisa.cls;
+
+ return oldcls;
}
objc_object::hasCxxDtor()
{
ASSERT(!isTaggedPointer());
- return isa.cls->hasCxxDtor();
+ return isa.getClass(/*authenticated*/false)->hasCxxDtor();
}
objc_object::rootRelease()
{
if (isTaggedPointer()) return false;
- return sidetable_release(true);
+ return sidetable_release();
}
inline bool
objc_object::rootReleaseShouldDealloc()
{
if (isTaggedPointer()) return false;
- return sidetable_release(false);
+ return sidetable_release(/*locked*/false, /*performDealloc*/false);
}
const uintptr_t start = (uintptr_t)_dyld_get_shared_cache_range(&length);
if (start) {
- objc::dataSegmentsRanges.add(start, start + length);
+ objc::dataSegmentsRanges.setSharedCacheRange(start, start + length);
}
// `opt` not set at compile time in order to detect too-early usage
}
};
+namespace objc {
+static inline uintptr_t mask16ShiftBits(uint16_t mask)
+{
+ // returns by how much 0xffff must be shifted "right" to return mask
+ uintptr_t maskShift = __builtin_clz(mask) - 16;
+ ASSERT((0xffff >> maskShift) == mask);
+ return maskShift;
+}
+}
+
#if TARGET_OS_MAC
# define OS_UNFAIR_LOCK_INLINE 1
static ALWAYS_INLINE
bool
-StoreExclusive(uintptr_t *dst, uintptr_t oldvalue __unused, uintptr_t value)
+StoreExclusive(uintptr_t *dst, uintptr_t *oldvalue, uintptr_t value)
{
- return !__builtin_arm_strex(value, dst);
+ if (slowpath(__builtin_arm_strex(value, dst))) {
+ *oldvalue = LoadExclusive(dst);
+ return false;
+ }
+ return true;
}
static ALWAYS_INLINE
bool
-StoreReleaseExclusive(uintptr_t *dst, uintptr_t oldvalue __unused, uintptr_t value)
+StoreReleaseExclusive(uintptr_t *dst, uintptr_t *oldvalue, uintptr_t value)
{
- return !__builtin_arm_stlex(value, dst);
+ if (slowpath(__builtin_arm_stlex(value, dst))) {
+ *oldvalue = LoadExclusive(dst);
+ return false;
+ }
+ return true;
}
static ALWAYS_INLINE
static ALWAYS_INLINE
bool
-StoreExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value)
+StoreExclusive(uintptr_t *dst, uintptr_t *oldvalue, uintptr_t value)
{
- return __c11_atomic_compare_exchange_weak((_Atomic(uintptr_t) *)dst, &oldvalue, value, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ return __c11_atomic_compare_exchange_weak((_Atomic(uintptr_t) *)dst, oldvalue, value, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
}
static ALWAYS_INLINE
bool
-StoreReleaseExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value)
+StoreReleaseExclusive(uintptr_t *dst, uintptr_t *oldvalue, uintptr_t value)
{
- return __c11_atomic_compare_exchange_weak((_Atomic(uintptr_t) *)dst, &oldvalue, value, __ATOMIC_RELEASE, __ATOMIC_RELAXED);
+ return __c11_atomic_compare_exchange_weak((_Atomic(uintptr_t) *)dst, oldvalue, value, __ATOMIC_RELEASE, __ATOMIC_RELAXED);
}
static ALWAYS_INLINE
lockdebug_remember_mutex(this);
}
- constexpr mutex_tt(const fork_unsafe_lock_t unsafe) : mLock(OS_UNFAIR_LOCK_INIT) { }
+ constexpr mutex_tt(__unused const fork_unsafe_lock_t unsafe) : mLock(OS_UNFAIR_LOCK_INIT) { }
void lock() {
lockdebug_mutex_lock(this);
// Address-ordered lock discipline for a pair of locks.
static void lockTwo(mutex_tt *lock1, mutex_tt *lock2) {
- if (lock1 < lock2) {
+ if ((uintptr_t)lock1 < (uintptr_t)lock2) {
lock1->lock();
lock2->lock();
} else {
lockdebug_remember_recursive_mutex(this);
}
- constexpr recursive_mutex_tt(const fork_unsafe_lock_t unsafe)
+ constexpr recursive_mutex_tt(__unused const fork_unsafe_lock_t unsafe)
: mLock(OS_UNFAIR_RECURSIVE_LOCK_INIT)
{ }
lockdebug_remember_monitor(this);
}
- monitor_tt(const fork_unsafe_lock_t unsafe)
+ monitor_tt(__unused const fork_unsafe_lock_t unsafe)
: mutex(PTHREAD_MUTEX_INITIALIZER), cond(PTHREAD_COND_INITIALIZER)
{ }
// OS version checking:
//
-// sdkVersion()
-// DYLD_OS_VERSION(mac, ios, tv, watch, bridge)
-// sdkIsOlderThan(mac, ios, tv, watch, bridge)
// sdkIsAtLeast(mac, ios, tv, watch, bridge)
-//
+//
// This version order matches OBJC_AVAILABLE.
+//
+// NOTE: prefer dyld_program_sdk_at_least when possible
+#define sdkIsAtLeast(x, i, t, w, b) \
+ (dyld_program_sdk_at_least(dyld_platform_version_macOS_ ## x) || \
+ dyld_program_sdk_at_least(dyld_platform_version_iOS_ ## i) || \
+ dyld_program_sdk_at_least(dyld_platform_version_tvOS_ ## t) || \
+ dyld_program_sdk_at_least(dyld_platform_version_watchOS_ ## w) || \
+ dyld_program_sdk_at_least(dyld_platform_version_bridgeOS_ ## b))
-#if TARGET_OS_OSX
-# define DYLD_OS_VERSION(x, i, t, w, b) DYLD_MACOSX_VERSION_##x
-# define sdkVersion() dyld_get_program_sdk_version()
-
-#elif TARGET_OS_IOS
-# define DYLD_OS_VERSION(x, i, t, w, b) DYLD_IOS_VERSION_##i
-# define sdkVersion() dyld_get_program_sdk_version()
-
-#elif TARGET_OS_TV
- // dyld does not currently have distinct constants for tvOS
-# define DYLD_OS_VERSION(x, i, t, w, b) DYLD_IOS_VERSION_##t
-# define sdkVersion() dyld_get_program_sdk_version()
-
-#elif TARGET_OS_BRIDGE
-# if TARGET_OS_WATCH
-# error bridgeOS 1.0 not supported
-# endif
- // fixme don't need bridgeOS versioning yet
-# define DYLD_OS_VERSION(x, i, t, w, b) DYLD_IOS_VERSION_##t
-# define sdkVersion() dyld_get_program_sdk_bridge_os_version()
-
-#elif TARGET_OS_WATCH
-# define DYLD_OS_VERSION(x, i, t, w, b) DYLD_WATCHOS_VERSION_##w
- // watchOS has its own API for compatibility reasons
-# define sdkVersion() dyld_get_program_sdk_watch_os_version()
-
-#else
-# error unknown OS
-#endif
-
-
-#define sdkIsOlderThan(x, i, t, w, b) \
- (sdkVersion() < DYLD_OS_VERSION(x, i, t, w, b))
-#define sdkIsAtLeast(x, i, t, w, b) \
- (sdkVersion() >= DYLD_OS_VERSION(x, i, t, w, b))
-
-// Allow bare 0 to be used in DYLD_OS_VERSION() and sdkIsOlderThan()
-#define DYLD_MACOSX_VERSION_0 0
-#define DYLD_IOS_VERSION_0 0
-#define DYLD_TVOS_VERSION_0 0
-#define DYLD_WATCHOS_VERSION_0 0
-#define DYLD_BRIDGEOS_VERSION_0 0
-
-// Pretty-print a DYLD_*_VERSION_* constant.
-#define SDK_FORMAT "%hu.%hhu.%hhu"
-#define FORMAT_SDK(v) \
- (unsigned short)(((uint32_t)(v))>>16), \
- (unsigned char)(((uint32_t)(v))>>8), \
- (unsigned char)(((uint32_t)(v))>>0)
#ifndef __BUILDING_OBJCDT__
// fork() safety requires careful tracking of all locks.
#include "objc-private.h"
#include "objc-loadmethod.h"
-#include "objc-cache.h"
+#include "objc-bp-assist.h"
#if TARGET_OS_WIN32
// Disable +initialize fork safety if the app has a
// __DATA,__objc_fork_ok section.
- if (dyld_get_program_sdk_version() < DYLD_MACOSX_VERSION_10_13) {
+ if (!dyld_program_sdk_at_least(dyld_platform_version_macOS_10_13)) {
DisableInitializeForkSafety = true;
if (PrintInitializing) {
_objc_inform("INITIALIZE: disabling +initialize fork "
"safety enforcement because the app is "
- "too old (SDK version " SDK_FORMAT ")",
- FORMAT_SDK(dyld_get_program_sdk_version()));
+ "too old.)");
}
}
for (size_t i = 0; i < count; i++) {
inits[i]();
}
+ auto offsets = getLibobjcInitializerOffsets(&_mh_dylib_header, &count);
+ for (size_t i = 0; i < count; i++) {
+ UnsignedInitializer init(offsets[i]);
+ init();
+ }
}
static_init();
runtime_init();
exception_init();
- cache_init();
+#if __OBJC2__
+ cache_t::init();
+#endif
_imp_implementationWithBlock_init();
_dyld_objc_notify_register(&map_images, load_images, unmap_image);
#define ASSERT(x) assert(x)
#endif
+// `this` is never NULL in C++ unless we encounter UB, but checking for what's impossible
+// is the point of these asserts, so disable the corresponding warning, and let's hope
+// we will reach the assert despite the UB
+#define ASSERT_THIS_NOT_NULL \
+_Pragma("clang diagnostic push") \
+_Pragma("clang diagnostic ignored \"-Wundefined-bool-conversion\"") \
+ASSERT(this) \
+_Pragma("clang diagnostic pop")
+
+
struct objc_class;
struct objc_object;
struct category_t;
isa_t() { }
isa_t(uintptr_t value) : bits(value) { }
- Class cls;
uintptr_t bits;
+
+private:
+ // Accessing the class requires custom ptrauth operations, so
+ // force clients to go through setClass/getClass by making this
+ // private.
+ Class cls;
+
+public:
#if defined(ISA_BITFIELD)
struct {
ISA_BITFIELD; // defined in isa.h
};
+
+ bool isDeallocating() {
+ return extra_rc == 0 && has_sidetable_rc == 0;
+ }
+ void setDeallocating() {
+ extra_rc = 0;
+ has_sidetable_rc = 0;
+ }
#endif
+
+ void setClass(Class cls, objc_object *obj);
+ Class getClass(bool authenticated);
+ Class getDecodedClass(bool authenticated);
};
public:
// ISA() assumes this is NOT a tagged pointer object
- Class ISA();
+ Class ISA(bool authenticated = false);
// rawISA() assumes this is NOT a tagged pointer object or a non pointer ISA
Class rawISA();
bool hasNonpointerIsa();
bool isTaggedPointer();
+ bool isTaggedPointerOrNil();
bool isBasicTaggedPointer();
bool isExtTaggedPointer();
bool isClass();
uintptr_t overrelease_error();
#if SUPPORT_NONPOINTER_ISA
+ // Controls what parts of root{Retain,Release} to emit/inline
+ // - Full means the full (slow) implementation
+ // - Fast means the fastpaths only
+ // - FastOrMsgSend means the fastpaths but checking whether we should call
+ // -retain/-release or Swift, for the usage of objc_{retain,release}
+ enum class RRVariant {
+ Full,
+ Fast,
+ FastOrMsgSend,
+ };
+
// Unified retain count manipulation for nonpointer isa
- id rootRetain(bool tryRetain, bool handleOverflow);
- bool rootRelease(bool performDealloc, bool handleUnderflow);
+ inline id rootRetain(bool tryRetain, RRVariant variant);
+ inline bool rootRelease(bool performDealloc, RRVariant variant);
id rootRetain_overflow(bool tryRetain);
uintptr_t rootRelease_underflow(bool performDealloc);
void clearDeallocating_slow();
// Side table retain count overflow for nonpointer isa
+ struct SidetableBorrow { size_t borrowed, remaining; };
+
void sidetable_lock();
void sidetable_unlock();
void sidetable_moveExtraRC_nolock(size_t extra_rc, bool isDeallocating, bool weaklyReferenced);
bool sidetable_addExtraRC_nolock(size_t delta_rc);
- size_t sidetable_subExtraRC_nolock(size_t delta_rc);
+ SidetableBorrow sidetable_subExtraRC_nolock(size_t delta_rc);
size_t sidetable_getExtraRC_nolock();
+ void sidetable_clearExtraRC_nolock();
#endif
// Side-table-only retain count
bool sidetable_isWeaklyReferenced();
void sidetable_setWeaklyReferenced_nolock();
- id sidetable_retain();
+ id sidetable_retain(bool locked = false);
id sidetable_retain_slow(SideTable& table);
- uintptr_t sidetable_release(bool performDealloc = true);
+ uintptr_t sidetable_release(bool locked = false, bool performDealloc = true);
uintptr_t sidetable_release_slow(SideTable& table, bool performDealloc = true);
bool sidetable_tryRetain();
}
};
+ struct Range shared_cache;
struct Range *ranges;
uint32_t count;
uint32_t size : 31;
uint32_t sorted : 1;
public:
+ inline bool inSharedCache(uintptr_t ptr) const {
+ return shared_cache.contains(ptr);
+ }
inline bool contains(uint16_t witness, uintptr_t ptr) const {
return witness < count && ranges[witness].contains(ptr);
}
+ inline void setSharedCacheRange(uintptr_t start, uintptr_t end) {
+ shared_cache = Range{start, end};
+ add(start, end);
+ }
bool find(uintptr_t ptr, uint32_t &pos);
void add(uintptr_t start, uintptr_t end);
void remove(uintptr_t start, uintptr_t end);
extern struct SafeRanges dataSegmentsRanges;
+static inline bool inSharedCache(uintptr_t ptr) {
+ return dataSegmentsRanges.inSharedCache(ptr);
+}
+
} // objc
struct header_info;
enum {
LOOKUP_INITIALIZE = 1,
LOOKUP_RESOLVER = 2,
- LOOKUP_CACHE = 4,
- LOOKUP_NIL = 8,
+ LOOKUP_NIL = 4,
+ LOOKUP_NOCACHE = 8,
};
extern IMP lookUpImpOrForward(id obj, SEL, Class cls, int behavior);
-
-static inline IMP
-lookUpImpOrNil(id obj, SEL sel, Class cls, int behavior = 0)
-{
- return lookUpImpOrForward(obj, sel, cls, behavior | LOOKUP_CACHE | LOOKUP_NIL);
-}
+extern IMP lookUpImpOrForwardTryCache(id obj, SEL, Class cls, int behavior = 0);
+extern IMP lookUpImpOrNilTryCache(id obj, SEL, Class cls, int behavior = 0);
extern IMP lookupMethodInClassAndLoadCache(Class cls, SEL sel);
// Global operator new and delete. We must not use any app overrides.
// This ALSO REQUIRES each of these be in libobjc's unexported symbol list.
-#if __cplusplus
+#if __cplusplus && !defined(TEST_OVERRIDES_NEW)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Winline-new-delete"
#include <new>
-inline void* operator new(std::size_t size) throw (std::bad_alloc) { return malloc(size); }
-inline void* operator new[](std::size_t size) throw (std::bad_alloc) { return malloc(size); }
-inline void* operator new(std::size_t size, const std::nothrow_t&) throw() { return malloc(size); }
-inline void* operator new[](std::size_t size, const std::nothrow_t&) throw() { return malloc(size); }
-inline void operator delete(void* p) throw() { free(p); }
-inline void operator delete[](void* p) throw() { free(p); }
-inline void operator delete(void* p, const std::nothrow_t&) throw() { free(p); }
-inline void operator delete[](void* p, const std::nothrow_t&) throw() { free(p); }
+inline void* operator new(std::size_t size) { return malloc(size); }
+inline void* operator new[](std::size_t size) { return malloc(size); }
+inline void* operator new(std::size_t size, const std::nothrow_t&) noexcept(true) { return malloc(size); }
+inline void* operator new[](std::size_t size, const std::nothrow_t&) noexcept(true) { return malloc(size); }
+inline void operator delete(void* p) noexcept(true) { free(p); }
+inline void operator delete[](void* p) noexcept(true) { free(p); }
+inline void operator delete(void* p, const std::nothrow_t&) noexcept(true) { free(p); }
+inline void operator delete[](void* p, const std::nothrow_t&) noexcept(true) { free(p); }
#pragma clang diagnostic pop
#endif
#define __ptrauth_swift_value_witness_function_pointer(__key)
#endif
+// Workaround <rdar://problem/64531063> Definitions of ptrauth_sign_unauthenticated and friends generate unused variables warnings
+#if __has_feature(ptrauth_calls)
+#define UNUSED_WITHOUT_PTRAUTH
+#else
+#define UNUSED_WITHOUT_PTRAUTH __unused
+#endif
#if __has_feature(ptrauth_calls)
// A "ptrauth" struct that just passes pointers through unchanged.
struct PtrauthRaw {
template <typename T>
- static T *sign(T *ptr, const void *address) {
+ static T *sign(T *ptr, __unused const void *address) {
return ptr;
}
template <typename T>
- static T *auth(T *ptr, const void *address) {
+ static T *auth(T *ptr, __unused const void *address) {
return ptr;
}
};
// when reading.
struct PtrauthStrip {
template <typename T>
- static T *sign(T *ptr, const void *address) {
+ static T *sign(T *ptr, __unused const void *address) {
return ptr;
}
template <typename T>
- static T *auth(T *ptr, const void *address) {
+ static T *auth(T *ptr, __unused const void *address) {
return ptrauth_strip(ptr, ptrauth_key_process_dependent_data);
}
};
template <unsigned discriminator>
struct Ptrauth {
template <typename T>
- static T *sign(T *ptr, const void *address) {
+ static T *sign(T *ptr, UNUSED_WITHOUT_PTRAUTH const void *address) {
if (!ptr)
return nullptr;
return ptrauth_sign_unauthenticated(ptr, ptrauth_key_process_dependent_data, ptrauth_blend_discriminator(address, discriminator));
}
template <typename T>
- static T *auth(T *ptr, const void *address) {
+ static T *auth(T *ptr, UNUSED_WITHOUT_PTRAUTH const void *address) {
if (!ptr)
return nullptr;
return ptrauth_auth_data(ptr, ptrauth_key_process_dependent_data, ptrauth_blend_discriminator(address, discriminator));
#if __has_feature(ptrauth_calls)
// Get a ptrauth type that uses a string discriminator.
+#if __BUILDING_OBJCDT__
+#define PTRAUTH_STR(name) PtrauthStrip
+#else
#define PTRAUTH_STR(name) Ptrauth<ptrauth_string_discriminator(#name)>
+#endif
// When ptrauth is available, declare a template that wraps a type
// in a WrappedPtr that uses an authenticated pointer using the
extern void _objc_associations_init();
extern void _object_set_associative_reference(id object, const void *key, id value, uintptr_t policy);
extern id _object_get_associative_reference(id object, const void *key);
-extern void _object_remove_assocations(id object);
+extern void _object_remove_assocations(id object, bool deallocating);
__END_DECLS
OBJC_ASSOCIATION_SETTER_COPY = 3, // NOTE: both bits are set, so we can simply test 1 bit in releaseValue below.
OBJC_ASSOCIATION_GETTER_READ = (0 << 8),
OBJC_ASSOCIATION_GETTER_RETAIN = (1 << 8),
- OBJC_ASSOCIATION_GETTER_AUTORELEASE = (2 << 8)
+ OBJC_ASSOCIATION_GETTER_AUTORELEASE = (2 << 8),
+ OBJC_ASSOCIATION_SYSTEM_OBJECT = _OBJC_ASSOCIATION_SYSTEM_OBJECT, // 1 << 16
};
spinlock_t AssociationsManagerLock;
// retain the new value (if any) outside the lock.
association.acquireValue();
+ bool isFirstAssociation = false;
{
AssociationsManager manager;
AssociationsHashMap &associations(manager.get());
auto refs_result = associations.try_emplace(disguised, ObjectAssociationMap{});
if (refs_result.second) {
/* it's the first association we make */
- object->setHasAssociatedObjects();
+ isFirstAssociation = true;
}
/* establish or replace the association */
}
}
+ // Call setHasAssociatedObjects outside the lock, since this
+ // will call the object's _noteAssociatedObjects method if it
+ // has one, and this may trigger +initialize which might do
+ // arbitrary stuff, including setting more associated objects.
+ if (isFirstAssociation)
+ object->setHasAssociatedObjects();
+
// release the old value (outside of the lock).
association.releaseHeldValue();
}
// raw isa objects (such as OS Objects) that can't track
// whether they have associated objects.
void
-_object_remove_assocations(id object)
+_object_remove_assocations(id object, bool deallocating)
{
ObjectAssociationMap refs{};
AssociationsHashMap::iterator i = associations.find((objc_object *)object);
if (i != associations.end()) {
refs.swap(i->second);
- associations.erase(i);
+
+ // If we are not deallocating, then SYSTEM_OBJECT associations are preserved.
+ bool didReInsert = false;
+ if (!deallocating) {
+ for (auto &ref: refs) {
+ if (ref.second.policy() & OBJC_ASSOCIATION_SYSTEM_OBJECT) {
+ i->second.insert(ref);
+ didReInsert = true;
+ }
+ }
+ }
+ if (!didReInsert)
+ associations.erase(i);
}
}
+ // Associations to be released after the normal ones.
+ SmallVector<ObjcAssociation *, 4> laterRefs;
+
// release everything (outside of the lock).
for (auto &i: refs) {
- i.second.releaseHeldValue();
+ if (i.second.policy() & OBJC_ASSOCIATION_SYSTEM_OBJECT) {
+ // If we are not deallocating, then RELEASE_LATER associations don't get released.
+ if (deallocating)
+ laterRefs.append(&i.second);
+ } else {
+ i.second.releaseHeldValue();
+ }
+ }
+ for (auto *later: laterRefs) {
+ later->releaseHeldValue();
}
}
#define _OBJC_RUNTIME_NEW_H
#include "PointerUnion.h"
+#include <type_traits>
// class_data_bits_t is the class_t->data field (class_rw_t pointer plus flags)
// The extra bits are optimized for the retain/release and alloc/dealloc paths.
// class has started realizing but not yet completed it
#define RW_REALIZING (1<<19)
+#if CONFIG_USE_PREOPT_CACHES
+// this class and its descendants can't have preopt caches with inlined sels
+#define RW_NOPREOPT_SELS (1<<2)
+// this class and its descendants can't have preopt caches
+#define RW_NOPREOPT_CACHE (1<<1)
+#endif
+
// class is a metaclass (copied from ro)
#define RW_META RO_META // (1<<0)
// NOTE: MORE RW_ FLAGS DEFINED BELOW
-
// Values for class_rw_t->flags (RW_*), cache_t->_flags (FAST_CACHE_*),
// or class_t->bits (FAST_*).
//
#endif
// Compute the ptrauth signing modifier from &_imp, newSel, and cls.
- uintptr_t modifierForSEL(SEL newSel, Class cls) const {
- return (uintptr_t)&_imp ^ (uintptr_t)newSel ^ (uintptr_t)cls;
+ uintptr_t modifierForSEL(bucket_t *base, SEL newSel, Class cls) const {
+ return (uintptr_t)base ^ (uintptr_t)newSel ^ (uintptr_t)cls;
}
// Sign newImp, with &_imp, newSel, and cls as modifiers.
- uintptr_t encodeImp(IMP newImp, SEL newSel, Class cls) const {
+ uintptr_t encodeImp(UNUSED_WITHOUT_PTRAUTH bucket_t *base, IMP newImp, UNUSED_WITHOUT_PTRAUTH SEL newSel, Class cls) const {
if (!newImp) return 0;
#if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_PTRAUTH
return (uintptr_t)
ptrauth_auth_and_resign(newImp,
ptrauth_key_function_pointer, 0,
ptrauth_key_process_dependent_code,
- modifierForSEL(newSel, cls));
+ modifierForSEL(base, newSel, cls));
#elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR
return (uintptr_t)newImp ^ (uintptr_t)cls;
#elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_NONE
}
public:
- inline SEL sel() const { return _sel.load(memory_order::memory_order_relaxed); }
+ static inline size_t offsetOfSel() { return offsetof(bucket_t, _sel); }
+ inline SEL sel() const { return _sel.load(memory_order_relaxed); }
- inline IMP rawImp(objc_class *cls) const {
- uintptr_t imp = _imp.load(memory_order::memory_order_relaxed);
+#if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR
+#define MAYBE_UNUSED_ISA
+#else
+#define MAYBE_UNUSED_ISA __attribute__((unused))
+#endif
+ inline IMP rawImp(MAYBE_UNUSED_ISA objc_class *cls) const {
+ uintptr_t imp = _imp.load(memory_order_relaxed);
if (!imp) return nil;
#if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_PTRAUTH
#elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR
return (IMP)imp;
}
- inline IMP imp(Class cls) const {
- uintptr_t imp = _imp.load(memory_order::memory_order_relaxed);
+ inline IMP imp(UNUSED_WITHOUT_PTRAUTH bucket_t *base, Class cls) const {
+ uintptr_t imp = _imp.load(memory_order_relaxed);
if (!imp) return nil;
#if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_PTRAUTH
- SEL sel = _sel.load(memory_order::memory_order_relaxed);
+ SEL sel = _sel.load(memory_order_relaxed);
return (IMP)
ptrauth_auth_and_resign((const void *)imp,
ptrauth_key_process_dependent_code,
- modifierForSEL(sel, cls),
+ modifierForSEL(base, sel, cls),
ptrauth_key_function_pointer, 0);
#elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR
return (IMP)(imp ^ (uintptr_t)cls);
}
template <Atomicity, IMPEncoding>
- void set(SEL newSel, IMP newImp, Class cls);
+ void set(bucket_t *base, SEL newSel, IMP newImp, Class cls);
};
+/* dyld_shared_cache_builder and obj-C agree on these definitions */
+enum {
+ OBJC_OPT_METHODNAME_START = 0,
+ OBJC_OPT_METHODNAME_END = 1,
+ OBJC_OPT_INLINED_METHODS_START = 2,
+ OBJC_OPT_INLINED_METHODS_END = 3,
+
+ __OBJC_OPT_OFFSETS_COUNT,
+};
+
+#if CONFIG_USE_PREOPT_CACHES
+extern uintptr_t objc_opt_offsets[__OBJC_OPT_OFFSETS_COUNT];
+#endif
+
+/* dyld_shared_cache_builder and obj-C agree on these definitions */
+struct preopt_cache_entry_t {
+ uint32_t sel_offs;
+ uint32_t imp_offs;
+};
+
+/* dyld_shared_cache_builder and obj-C agree on these definitions */
+struct preopt_cache_t {
+ int32_t fallback_class_offset;
+ union {
+ struct {
+ uint16_t shift : 5;
+ uint16_t mask : 11;
+ };
+ uint16_t hash_params;
+ };
+ uint16_t occupied : 14;
+ uint16_t has_inlines : 1;
+ uint16_t bit_one : 1;
+ preopt_cache_entry_t entries[];
+
+ inline int capacity() const {
+ return mask + 1;
+ }
+};
+
+// returns:
+// - the cached IMP when one is found
+// - nil if there's no cached value and the cache is dynamic
+// - `value_on_constant_cache_miss` if there's no cached value and the cache is preoptimized
+extern "C" IMP cache_getImp(Class cls, SEL sel, IMP value_on_constant_cache_miss = nil);
struct cache_t {
+private:
+ explicit_atomic<uintptr_t> _bucketsAndMaybeMask;
+ union {
+ struct {
+ explicit_atomic<mask_t> _maybeMask;
+#if __LP64__
+ uint16_t _flags;
+#endif
+ uint16_t _occupied;
+ };
+ explicit_atomic<preopt_cache_t *> _originalPreoptCache;
+ };
+
#if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_OUTLINED
- explicit_atomic<struct bucket_t *> _buckets;
- explicit_atomic<mask_t> _mask;
-#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16
- explicit_atomic<uintptr_t> _maskAndBuckets;
- mask_t _mask_unused;
+ // _bucketsAndMaybeMask is a buckets_t pointer
+ // _maybeMask is the buckets mask
+
+ static constexpr uintptr_t bucketsMask = ~0ul;
+ static_assert(!CONFIG_USE_PREOPT_CACHES, "preoptimized caches not supported");
+#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16_BIG_ADDRS
+ static constexpr uintptr_t maskShift = 48;
+ static constexpr uintptr_t maxMask = ((uintptr_t)1 << (64 - maskShift)) - 1;
+ static constexpr uintptr_t bucketsMask = ((uintptr_t)1 << maskShift) - 1;
+ static_assert(bucketsMask >= MACH_VM_MAX_ADDRESS, "Bucket field doesn't have enough bits for arbitrary pointers.");
+#if CONFIG_USE_PREOPT_CACHES
+ static constexpr uintptr_t preoptBucketsMarker = 1ul;
+ static constexpr uintptr_t preoptBucketsMask = bucketsMask & ~preoptBucketsMarker;
+#endif
+#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16
+ // _bucketsAndMaybeMask is a buckets_t pointer in the low 48 bits
+ // _maybeMask is unused, the mask is stored in the top 16 bits.
+
// How much the mask is shifted by.
static constexpr uintptr_t maskShift = 48;
-
+
// Additional bits after the mask which must be zero. msgSend
// takes advantage of these additional bits to construct the value
// `mask << 4` from `_maskAndBuckets` in a single instruction.
static constexpr uintptr_t maskZeroBits = 4;
-
+
// The largest mask value we can store.
static constexpr uintptr_t maxMask = ((uintptr_t)1 << (64 - maskShift)) - 1;
static constexpr uintptr_t bucketsMask = ((uintptr_t)1 << (maskShift - maskZeroBits)) - 1;
// Ensure we have enough bits for the buckets pointer.
- static_assert(bucketsMask >= MACH_VM_MAX_ADDRESS, "Bucket field doesn't have enough bits for arbitrary pointers.");
+ static_assert(bucketsMask >= MACH_VM_MAX_ADDRESS,
+ "Bucket field doesn't have enough bits for arbitrary pointers.");
+
+#if CONFIG_USE_PREOPT_CACHES
+ static constexpr uintptr_t preoptBucketsMarker = 1ul;
+#if __has_feature(ptrauth_calls)
+ // 63..60: hash_mask_shift
+ // 59..55: hash_shift
+ // 54.. 1: buckets ptr + auth
+ // 0: always 1
+ static constexpr uintptr_t preoptBucketsMask = 0x007ffffffffffffe;
+ static inline uintptr_t preoptBucketsHashParams(const preopt_cache_t *cache) {
+ uintptr_t value = (uintptr_t)cache->shift << 55;
+ // masks have 11 bits but can be 0, so we compute
+ // the right shift for 0x7fff rather than 0xffff
+ return value | ((objc::mask16ShiftBits(cache->mask) - 1) << 60);
+ }
+#else
+ // 63..53: hash_mask
+ // 52..48: hash_shift
+ // 47.. 1: buckets ptr
+ // 0: always 1
+ static constexpr uintptr_t preoptBucketsMask = 0x0000fffffffffffe;
+ static inline uintptr_t preoptBucketsHashParams(const preopt_cache_t *cache) {
+ return (uintptr_t)cache->hash_params << 48;
+ }
+#endif
+#endif // CONFIG_USE_PREOPT_CACHES
#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4
- // _maskAndBuckets stores the mask shift in the low 4 bits, and
- // the buckets pointer in the remainder of the value. The mask
- // shift is the value where (0xffff >> shift) produces the correct
- // mask. This is equal to 16 - log2(cache_size).
- explicit_atomic<uintptr_t> _maskAndBuckets;
- mask_t _mask_unused;
+ // _bucketsAndMaybeMask is a buckets_t pointer in the top 28 bits
+ // _maybeMask is unused, the mask length is stored in the low 4 bits
static constexpr uintptr_t maskBits = 4;
static constexpr uintptr_t maskMask = (1 << maskBits) - 1;
static constexpr uintptr_t bucketsMask = ~maskMask;
+ static_assert(!CONFIG_USE_PREOPT_CACHES, "preoptimized caches not supported");
#else
#error Unknown cache mask storage type.
#endif
-
-#if __LP64__
- uint16_t _flags;
+
+ bool isConstantEmptyCache() const;
+ bool canBeFreed() const;
+ mask_t mask() const;
+
+#if CONFIG_USE_PREOPT_CACHES
+ void initializeToPreoptCacheInDisguise(const preopt_cache_t *cache);
+ const preopt_cache_t *disguised_preopt_cache() const;
#endif
- uint16_t _occupied;
-public:
- static bucket_t *emptyBuckets();
-
- struct bucket_t *buckets();
- mask_t mask();
- mask_t occupied();
void incrementOccupied();
void setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask);
+
+ void reallocate(mask_t oldCapacity, mask_t newCapacity, bool freeOld);
+ void collect_free(bucket_t *oldBuckets, mask_t oldCapacity);
+
+ static bucket_t *emptyBuckets();
+ static bucket_t *allocateBuckets(mask_t newCapacity);
+ static bucket_t *emptyBucketsForCapacity(mask_t capacity, bool allocate = true);
+ static struct bucket_t * endMarker(struct bucket_t *b, uint32_t cap);
+ void bad_cache(id receiver, SEL sel) __attribute__((noreturn, cold));
+
+public:
+ // The following four fields are public for objcdt's use only.
+ // objcdt reaches into fields while the process is suspended
+ // hence doesn't care for locks and pesky little details like this
+ // and can safely use these.
+ unsigned capacity() const;
+ struct bucket_t *buckets() const;
+ Class cls() const;
+
+#if CONFIG_USE_PREOPT_CACHES
+ const preopt_cache_t *preopt_cache() const;
+#endif
+
+ mask_t occupied() const;
void initializeToEmpty();
- unsigned capacity();
- bool isConstantEmptyCache();
- bool canBeFreed();
+#if CONFIG_USE_PREOPT_CACHES
+ bool isConstantOptimizedCache(bool strict = false, uintptr_t empty_addr = (uintptr_t)&_objc_empty_cache) const;
+ bool shouldFlush(SEL sel, IMP imp) const;
+ bool isConstantOptimizedCacheWithInlinedSels() const;
+ Class preoptFallbackClass() const;
+ void maybeConvertToPreoptimized();
+ void initializeToEmptyOrPreoptimizedInDisguise();
+#else
+ inline bool isConstantOptimizedCache(bool strict = false, uintptr_t empty_addr = 0) const { return false; }
+ inline bool shouldFlush(SEL sel, IMP imp) const {
+ return cache_getImp(cls(), sel) == imp;
+ }
+ inline bool isConstantOptimizedCacheWithInlinedSels() const { return false; }
+ inline void initializeToEmptyOrPreoptimizedInDisguise() { initializeToEmpty(); }
+#endif
+
+ void insert(SEL sel, IMP imp, id receiver);
+ void copyCacheNolock(objc_imp_cache_entry *buffer, int len);
+ void destroy();
+ void eraseNolock(const char *func);
+
+ static void init();
+ static void collectNolock(bool collectALot);
+ static size_t bytesForCapacity(uint32_t cap);
#if __LP64__
bool getBit(uint16_t flags) const {
// nothing
}
#endif
-
- static size_t bytesForCapacity(uint32_t cap);
- static struct bucket_t * endMarker(struct bucket_t *b, uint32_t cap);
-
- void reallocate(mask_t oldCapacity, mask_t newCapacity, bool freeOld);
- void insert(Class cls, SEL sel, IMP imp, id receiver);
-
- static void bad_cache(id receiver, SEL sel, Class isa) __attribute__((noreturn, cold));
};
int32_t offset;
T get() const {
+ if (offset == 0)
+ return nullptr;
uintptr_t base = (uintptr_t)&offset;
uintptr_t signExtendedOffset = (uintptr_t)(intptr_t)offset;
uintptr_t pointer = base + signExtendedOffset;
// A pointer modifier that does nothing to the pointer.
struct PointerModifierNop {
template <typename ListType, typename T>
- static T *modify(const ListType &list, T *ptr) { return ptr; }
+ static T *modify(__unused const ListType &list, T *ptr) { return ptr; }
};
/***********************************************************************
};
+namespace objc {
+// Let method_t::small use this from objc-private.h.
+static inline bool inSharedCache(uintptr_t ptr);
+}
+
struct method_t {
static const uint32_t smallMethodListFlag = 0x80000000;
// The representation of a "small" method. This stores three
// relative offsets to the name, types, and implementation.
struct small {
- RelativePointer<SEL *> name;
+ // The name field either refers to a selector (in the shared
+ // cache) or a selref (everywhere else).
+ RelativePointer<const void *> name;
RelativePointer<const char *> types;
RelativePointer<IMP> imp;
+
+ bool inSharedCache() const {
+ return (CONFIG_SHARED_CACHE_RELATIVE_DIRECT_SELECTORS &&
+ objc::inSharedCache((uintptr_t)this));
+ }
};
small &small() const {
return *(struct big *)this;
}
- SEL &name() const {
- return isSmall() ? *small().name.get() : big().name;
+ SEL name() const {
+ if (isSmall()) {
+ return (small().inSharedCache()
+ ? (SEL)small().name.get()
+ : *(SEL *)small().name.get());
+ } else {
+ return big().name;
+ }
}
const char *types() const {
return isSmall() ? small().types.get() : big().types;
return big().imp;
}
+ SEL getSmallNameAsSEL() const {
+ ASSERT(small().inSharedCache());
+ return (SEL)small().name.get();
+ }
+
+ SEL getSmallNameAsSELRef() const {
+ ASSERT(!small().inSharedCache());
+ return *(SEL *)small().name.get();
+ }
+
+ void setName(SEL name) {
+ if (isSmall()) {
+ ASSERT(!small().inSharedCache());
+ *(SEL *)small().name.get() = name;
+ } else {
+ big().name = name;
+ }
+ }
+
void setImp(IMP imp) {
if (isSmall()) {
remapImp(imp);
} else {
big().imp = imp;
}
-
}
objc_method_description *getDescription() const {
bool isCanonical() const;
void clearIsCanonical();
-# define HAS_FIELD(f) (size >= offsetof(protocol_t, f) + sizeof(f))
+# define HAS_FIELD(f) ((uintptr_t)(&f) < ((uintptr_t)this + size))
bool hasExtendedMethodTypesField() const {
return HAS_FIELD(_extendedMethodTypes);
uint32_t reserved;
#endif
- const uint8_t * ivarLayout;
-
- const char * name;
- WrappedPtr<method_list_t, PtrauthStrip> baseMethodList;
+ union {
+ const uint8_t * ivarLayout;
+ Class nonMetaclass;
+ };
+
+ explicit_atomic<const char *> name;
+ // With ptrauth, this is signed if it points to a small list, but
+ // may be unsigned if it points to a big list.
+ void *baseMethodList;
protocol_list_t * baseProtocols;
const ivar_list_t * ivars;
}
}
+ const char *getName() const {
+ return name.load(std::memory_order_acquire);
+ }
+
+ static const uint16_t methodListPointerDiscriminator = 0xC310;
+#if 0 // FIXME: enable this when we get a non-empty definition of __ptrauth_objc_method_list_pointer from ptrauth.h.
+ static_assert(std::is_same<
+ void * __ptrauth_objc_method_list_pointer *,
+ void * __ptrauth(ptrauth_key_method_list_pointer, 1, methodListPointerDiscriminator) *>::value,
+ "Method list pointer signing discriminator must match ptrauth.h");
+#endif
+
method_list_t *baseMethods() const {
- return baseMethodList;
+#if __has_feature(ptrauth_calls)
+ method_list_t *ptr = ptrauth_strip((method_list_t *)baseMethodList, ptrauth_key_method_list_pointer);
+ if (ptr == nullptr)
+ return nullptr;
+
+ // Don't auth if the class_ro and the method list are both in the shared cache.
+ // This is secure since they'll be read-only, and this allows the shared cache
+ // to cut down on the number of signed pointers it has.
+ bool roInSharedCache = objc::inSharedCache((uintptr_t)this);
+ bool listInSharedCache = objc::inSharedCache((uintptr_t)ptr);
+ if (roInSharedCache && listInSharedCache)
+ return ptr;
+
+ // Auth all other small lists.
+ if (ptr->isSmallList())
+ ptr = ptrauth_auth_data((method_list_t *)baseMethodList,
+ ptrauth_key_method_list_pointer,
+ ptrauth_blend_discriminator(&baseMethodList,
+ methodListPointerDiscriminator));
+ return ptr;
+#else
+ return (method_list_t *)baseMethodList;
+#endif
+ }
+
+ uintptr_t baseMethodListPtrauthData() const {
+ return ptrauth_blend_discriminator(&baseMethodList,
+ methodListPointerDiscriminator);
}
class_ro_t *duplicate() const {
- if (flags & RO_HAS_SWIFT_INITIALIZER) {
- size_t size = sizeof(*this) + sizeof(_swiftMetadataInitializer_NEVER_USE[0]);
- class_ro_t *ro = (class_ro_t *)memdup(this, size);
+ bool hasSwiftInitializer = flags & RO_HAS_SWIFT_INITIALIZER;
+
+ size_t size = sizeof(*this);
+ if (hasSwiftInitializer)
+ size += sizeof(_swiftMetadataInitializer_NEVER_USE[0]);
+
+ class_ro_t *ro = (class_ro_t *)memdup(this, size);
+
+ if (hasSwiftInitializer)
ro->_swiftMetadataInitializer_NEVER_USE[0] = this->_swiftMetadataInitializer_NEVER_USE[0];
- return ro;
+
+#if __has_feature(ptrauth_calls)
+ // Re-sign the method list pointer if it was signed.
+ // NOTE: It is possible for a signed pointer to have a signature
+ // that is all zeroes. This is indistinguishable from a raw pointer.
+ // This code will treat such a pointer as signed and re-sign it. A
+ // false positive is safe: method list pointers are either authed or
+ // stripped, so if baseMethods() doesn't expect it to be signed, it
+ // will ignore the signature.
+ void *strippedBaseMethodList = ptrauth_strip(baseMethodList, ptrauth_key_method_list_pointer);
+ void *signedBaseMethodList = ptrauth_sign_unauthenticated(strippedBaseMethodList,
+ ptrauth_key_method_list_pointer,
+ baseMethodListPtrauthData());
+ if (baseMethodList == signedBaseMethodList) {
+ ro->baseMethodList = ptrauth_auth_and_resign(baseMethodList,
+ ptrauth_key_method_list_pointer,
+ baseMethodListPtrauthData(),
+ ptrauth_key_method_list_pointer,
+ ro->baseMethodListPtrauthData());
} else {
- size_t size = sizeof(*this);
- class_ro_t *ro = (class_ro_t *)memdup(this, size);
- return ro;
+ // Special case: a class_ro_t in the shared cache pointing to a
+ // method list in the shared cache will not have a signed pointer,
+ // but the duplicate will be expected to have a signed pointer since
+ // it's not in the shared cache. Detect that and sign it.
+ bool roInSharedCache = objc::inSharedCache((uintptr_t)this);
+ bool listInSharedCache = objc::inSharedCache((uintptr_t)strippedBaseMethodList);
+ if (roInSharedCache && listInSharedCache)
+ ro->baseMethodList = ptrauth_sign_unauthenticated(strippedBaseMethodList,
+ ptrauth_key_method_list_pointer,
+ ro->baseMethodListPtrauthData());
}
+#endif
+
+ return ro;
+ }
+
+ Class getNonMetaclass() const {
+ ASSERT(flags & RO_META);
+ return nonMetaclass;
+ }
+
+ const uint8_t *getIvarLayout() const {
+ if (flags & RO_META)
+ return nullptr;
+ return ivarLayout;
}
};
return iterator(e, e);
}
-
- uint32_t countLists() {
+ inline uint32_t countLists(const std::function<const array_t * (const array_t *)> & peek) const {
if (hasArray()) {
- return array()->count;
+ return peek(array())->count;
} else if (list) {
return 1;
} else {
}
}
+ uint32_t countLists() {
+ return countLists([](array_t *x) { return x; });
+ }
+
const Ptr<List>* beginLists() const {
if (hasArray()) {
return array()->lists;
void setAndClearBits(uintptr_t set, uintptr_t clear)
{
ASSERT((set & clear) == 0);
- uintptr_t oldBits;
- uintptr_t newBits;
+ uintptr_t newBits, oldBits = LoadExclusive(&bits);
do {
- oldBits = LoadExclusive(&bits);
newBits = (oldBits | set) & ~clear;
- } while (!StoreReleaseExclusive(&bits, oldBits, newBits));
+ } while (slowpath(!StoreReleaseExclusive(&bits, &oldBits, newBits)));
}
void setBits(uintptr_t set) {
// Get the class's ro data, even in the presence of concurrent realization.
// fixme this isn't really safe without a compiler barrier at least
// and probably a memory barrier when realizeClass changes the data field
- const class_ro_t *safe_ro() {
+ const class_ro_t *safe_ro() const {
class_rw_t *maybe_rw = data();
if (maybe_rw->flags & RW_REALIZED) {
// maybe_rw is rw
}
}
- void setClassArrayIndex(unsigned Idx) {
#if SUPPORT_INDEXED_ISA
+ void setClassArrayIndex(unsigned Idx) {
// 0 is unused as then we can rely on zero-initialisation from calloc.
ASSERT(Idx > 0);
data()->index = Idx;
-#endif
}
+#else
+ void setClassArrayIndex(__unused unsigned Idx) {
+ }
+#endif
unsigned classArrayIndex() {
#if SUPPORT_INDEXED_ISA
struct objc_class : objc_object {
+ objc_class(const objc_class&) = delete;
+ objc_class(objc_class&&) = delete;
+ void operator=(const objc_class&) = delete;
+ void operator=(objc_class&&) = delete;
// Class ISA;
Class superclass;
cache_t cache; // formerly cache pointer and vtable
class_data_bits_t bits; // class_rw_t * plus custom rr/alloc flags
+ Class getSuperclass() const {
+#if __has_feature(ptrauth_calls)
+# if ISA_SIGNING_AUTH_MODE == ISA_SIGNING_AUTH
+ if (superclass == Nil)
+ return Nil;
+
+#if SUPERCLASS_SIGNING_TREAT_UNSIGNED_AS_NIL
+ void *stripped = ptrauth_strip((void *)superclass, ISA_SIGNING_KEY);
+ if ((void *)superclass == stripped) {
+ void *resigned = ptrauth_sign_unauthenticated(stripped, ISA_SIGNING_KEY, ptrauth_blend_discriminator(&superclass, ISA_SIGNING_DISCRIMINATOR_CLASS_SUPERCLASS));
+ if ((void *)superclass != resigned)
+ return Nil;
+ }
+#endif
+
+ void *result = ptrauth_auth_data((void *)superclass, ISA_SIGNING_KEY, ptrauth_blend_discriminator(&superclass, ISA_SIGNING_DISCRIMINATOR_CLASS_SUPERCLASS));
+ return (Class)result;
+
+# else
+ return (Class)ptrauth_strip((void *)superclass, ISA_SIGNING_KEY);
+# endif
+#else
+ return superclass;
+#endif
+ }
+
+ void setSuperclass(Class newSuperclass) {
+#if ISA_SIGNING_SIGN_MODE == ISA_SIGNING_SIGN_ALL
+ superclass = (Class)ptrauth_sign_unauthenticated((void *)newSuperclass, ISA_SIGNING_KEY, ptrauth_blend_discriminator(&superclass, ISA_SIGNING_DISCRIMINATOR_CLASS_SUPERCLASS));
+#else
+ superclass = newSuperclass;
+#endif
+ }
+
class_rw_t *data() const {
return bits.data();
}
void setInstancesRequireRawIsaRecursively(bool inherited = false);
void printInstancesRequireRawIsa(bool inherited);
+#if CONFIG_USE_PREOPT_CACHES
+ bool allowsPreoptCaches() const {
+ return !(bits.data()->flags & RW_NOPREOPT_CACHE);
+ }
+ bool allowsPreoptInlinedSels() const {
+ return !(bits.data()->flags & RW_NOPREOPT_SELS);
+ }
+ void setDisallowPreoptCaches() {
+ bits.data()->setFlags(RW_NOPREOPT_CACHE | RW_NOPREOPT_SELS);
+ }
+ void setDisallowPreoptInlinedSels() {
+ bits.data()->setFlags(RW_NOPREOPT_SELS);
+ }
+ void setDisallowPreoptCachesRecursively(const char *why);
+ void setDisallowPreoptInlinedSelsRecursively(const char *why);
+#else
+ bool allowsPreoptCaches() const { return false; }
+ bool allowsPreoptInlinedSels() const { return false; }
+ void setDisallowPreoptCaches() { }
+ void setDisallowPreoptInlinedSels() { }
+ void setDisallowPreoptCachesRecursively(const char *why) { }
+ void setDisallowPreoptInlinedSelsRecursively(const char *why) { }
+#endif
+
bool canAllocNonpointer() {
ASSERT(!isFuture());
return !instancesRequireRawIsa();
return bits.isSwiftStable_ButAllowLegacyForNow();
}
+ uint32_t swiftClassFlags() {
+ return *(uint32_t *)(&bits + 1);
+ }
+
+ bool usesSwiftRefcounting() {
+ if (!isSwiftStable()) return false;
+ return bool(swiftClassFlags() & 2); //ClassFlags::UsesSwiftRefcounting
+ }
+
+ bool canCallSwiftRR() {
+ // !hasCustomCore() is being used as a proxy for isInitialized(). All
+ // classes with Swift refcounting are !hasCustomCore() (unless there are
+ // category or swizzling shenanigans), but that bit is not set until a
+ // class is initialized. Checking isInitialized requires an extra
+ // indirection that we want to avoid on RR fast paths.
+ //
+ // In the unlikely event that someone causes a class with Swift
+ // refcounting to be hasCustomCore(), we'll fall back to sending -retain
+ // or -release, which is still correct.
+ return !hasCustomCore() && usesSwiftRefcounting();
+ }
+
bool isStubClass() const {
uintptr_t isa = (uintptr_t)isaBits();
return 1 <= isa && isa < 16;
// Check the true legacy vs stable distinguisher.
// The low bit of Swift's ClassFlags is SET for true legacy
// and UNSET for stable pretending to be legacy.
- uint32_t swiftClassFlags = *(uint32_t *)(&bits + 1);
- bool isActuallySwiftLegacy = bool(swiftClassFlags & 1);
+ bool isActuallySwiftLegacy = bool(swiftClassFlags() & 1);
return !isActuallySwiftLegacy;
}
// Returns true if this is an unrealized future class.
// Locking: To prevent concurrent realization, hold runtimeLock.
bool isFuture() const {
+ if (isStubClass())
+ return false;
return data()->flags & RW_FUTURE;
}
- bool isMetaClass() {
- ASSERT(this);
+ bool isMetaClass() const {
+ ASSERT_THIS_NOT_NULL;
ASSERT(isRealized());
#if FAST_CACHE_META
return cache.getBit(FAST_CACHE_META);
bool isMetaClassMaybeUnrealized() {
static_assert(offsetof(class_rw_t, flags) == offsetof(class_ro_t, flags), "flags alias");
static_assert(RO_META == RW_META, "flags alias");
+ if (isStubClass())
+ return false;
return data()->flags & RW_META;
}
// NOT identical to this->ISA when this is a metaclass
Class getMeta() {
- if (isMetaClass()) return (Class)this;
+ if (isMetaClassMaybeUnrealized()) return (Class)this;
else return this->ISA();
}
bool isRootClass() {
- return superclass == nil;
+ return getSuperclass() == nil;
}
bool isRootMetaclass() {
return ISA() == (Class)this;
}
+
+ // If this class does not have a name already, we can ask Swift to construct one for us.
+ const char *installMangledNameForLazilyNamedClass();
+
+ // Get the class's mangled name, or NULL if the class has a lazy
+ // name that hasn't been created yet.
+ const char *nonlazyMangledName() const {
+ return bits.safe_ro()->getName();
+ }
const char *mangledName() {
// fixme can't assert locks here
- ASSERT(this);
+ ASSERT_THIS_NOT_NULL;
- if (isRealized() || isFuture()) {
- return data()->ro()->name;
- } else {
- return ((const class_ro_t *)data())->name;
+ const char *result = nonlazyMangledName();
+
+ if (!result) {
+ // This class lazily instantiates its name. Emplace and
+ // return it.
+ result = installMangledNameForLazilyNamedClass();
}
+
+ return result;
}
const char *demangledName(bool needsLock);
return word_align(unalignedInstanceSize());
}
- size_t instanceSize(size_t extraBytes) const {
+ inline size_t instanceSize(size_t extraBytes) const {
if (fastpath(cache.hasFastInstanceSize(extraBytes))) {
return cache.fastInstanceSize(extraBytes);
}
#include "objc-private.h"
#include "objc-runtime-new.h"
#include "objc-file.h"
-#include "objc-cache.h"
#include "objc-zalloc.h"
#include <Block.h>
#include <objc/message.h>
static method_t *search_method_list(const method_list_t *mlist, SEL sel);
template<typename T> static bool method_lists_contains_any(T *mlists, T *end,
SEL sels[], size_t selcount);
-static void flushCaches(Class cls);
+static void flushCaches(Class cls, const char *func, bool (^predicate)(Class c));
static void initializeTaggedPointerObfuscator(void);
#if SUPPORT_FIXUP
static void fixupMessageRef(message_ref_t *msg);
asm("\n .globl _objc_absolute_packed_isa_class_mask" \
"\n _objc_absolute_packed_isa_class_mask = " STRINGIFY2(ISA_MASK));
-const uintptr_t objc_debug_isa_class_mask = ISA_MASK;
+// a better definition is
+// (uintptr_t)ptrauth_strip((void *)ISA_MASK, ISA_SIGNING_KEY)
+// however we know that PAC uses bits outside of MACH_VM_MAX_ADDRESS
+// so approximate the definition here to be constant
+template <typename T>
+static constexpr T coveringMask(T n) {
+ for (T mask = 0; mask != ~T{0}; mask = (mask << 1) | 1) {
+ if ((n & mask) == n) return mask;
+ }
+ return ~T{0};
+}
+const uintptr_t objc_debug_isa_class_mask = ISA_MASK & coveringMask(MACH_VM_MAX_ADDRESS - 1);
+
const uintptr_t objc_debug_isa_magic_mask = ISA_MAGIC_MASK;
const uintptr_t objc_debug_isa_magic_value = ISA_MAGIC_VALUE;
/*
Low two bits of mlist->entsize is used as the fixed-up marker.
- PREOPTIMIZED VERSION:
Method lists from shared cache are 1 (uniqued) or 3 (uniqued and sorted).
(Protocol method lists are not sorted because of their extra parallel data)
Runtime fixed-up method lists get 3.
- UN-PREOPTIMIZED VERSION:
- Method lists from shared cache are 1 (uniqued) or 3 (uniqued and sorted)
- Shared cache's sorting and uniquing are not trusted, but do affect the
- location of the selector name string.
- Runtime fixed-up method lists get 2.
High two bits of protocol->flags is used as the fixed-up marker.
PREOPTIMIZED VERSION:
Runtime fixed-up protocols get 3<<30.
*/
-static uint32_t fixed_up_method_list = 3;
-static uint32_t uniqued_method_list = 1;
+static const uint32_t fixed_up_method_list = 3;
+static const uint32_t uniqued_method_list = 1;
static uint32_t fixed_up_protocol = PROTOCOL_FIXED_UP_1;
static uint32_t canonical_protocol = PROTOCOL_IS_CANONICAL;
void
disableSharedCacheOptimizations(void)
{
- fixed_up_method_list = 2;
- // It is safe to set uniqued method lists to 0 as we'll never call it unless
- // the method list was already in need of being fixed up
- uniqued_method_list = 0;
fixed_up_protocol = PROTOCOL_FIXED_UP_1 | PROTOCOL_FIXED_UP_2;
// Its safe to just set canonical protocol to 0 as we'll never call
// clearIsCanonical() unless isCanonical() returned true, which can't happen
{
uint8_t *base = (uint8_t *)obj;
- if (!obj) return nil;
- if (obj->isTaggedPointer()) return nil;
+ if (obj->isTaggedPointerOrNil()) return nil;
if (!obj->isClass()) return base + obj->ISA()->alignedInstanceSize();
cls = cls->data()->firstSubclass;
} else {
while (!cls->data()->nextSiblingClass && cls != top) {
- cls = cls->superclass;
+ cls = cls->getSuperclass();
if (--count == 0) {
_objc_fatal("Memory corruption in class list.");
}
static void
scanAddedClassImpl(Class cls, bool isMeta)
{
- Class NSOClass = (isMeta ? metaclassNSObject() : classNSObject());
bool setCustom = NO, inherited = NO;
if (isNSObjectSwizzled(isMeta)) {
setCustom = YES;
- } else if (cls == NSOClass) {
- // NSObject is default but we need to check categories
+ } else if (Traits::knownClassHasDefaultImpl(cls, isMeta)) {
+ // This class is known to have the default implementations,
+ // but we need to check categories.
auto &methods = as_objc_class(cls)->data()->methods();
setCustom = Traits::scanMethodLists(methods.beginCategoryMethodLists(),
methods.endCategoryMethodLists(cls));
- } else if (!isMeta && !as_objc_class(cls)->superclass) {
+ } else if (!isMeta && !as_objc_class(cls)->getSuperclass()) {
// Custom Root class
setCustom = YES;
- } else if (Traits::isCustom(as_objc_class(cls)->superclass)) {
+ } else if (Traits::isCustom(as_objc_class(cls)->getSuperclass())) {
// Superclass is custom, therefore we are too.
setCustom = YES;
inherited = YES;
}
public:
+ static bool knownClassHasDefaultImpl(Class cls, bool isMeta) {
+ // Typically only NSObject has default implementations.
+ // Allow this to be extended by overriding (to allow
+ // SwiftObject, for example).
+ Class NSOClass = (isMeta ? metaclassNSObject() : classNSObject());
+ return cls == NSOClass;
+ }
+
// Scan a class that is about to be marked Initialized for particular
// bundles of selectors, and mark the class and its children
// accordingly.
//
// +new, ±class, ±self, ±isKindOfClass:, ±respondsToSelector
struct CoreScanner : scanner::Mixin<CoreScanner, Core, PrintCustomCore> {
+ static bool knownClassHasDefaultImpl(Class cls, bool isMeta) {
+ if (scanner::Mixin<CoreScanner, Core, PrintCustomCore>::knownClassHasDefaultImpl(cls, isMeta))
+ return true;
+ if ((cls->isRootClass() || cls->isRootMetaclass())
+ && strcmp(cls->mangledName(), "_TtCs12_SwiftObject") == 0)
+ return true;
+
+ return false;
+ }
+
static bool isCustom(Class cls) {
return cls->hasCustomCore();
}
if (slowpath(PrintConnecting)) {
_objc_inform("CLASS: found category %c%s(%s)",
- cls->isMetaClass() ? '+' : '-',
+ cls->isMetaClassMaybeUnrealized() ? '+' : '-',
cls->nameForLogging(), lc.cat->name);
}
// Unique selectors in list.
for (auto& meth : *mlist) {
const char *name = sel_cname(meth.name());
- meth.name() = sel_registerNameNoLock(name, bundleCopy);
+ meth.setName(sel_registerNameNoLock(name, bundleCopy));
}
}
static void
prepareMethodLists(Class cls, method_list_t **addedLists, int addedCount,
- bool baseMethods, bool methodsFromBundle)
+ bool baseMethods, bool methodsFromBundle, const char *why)
{
runtimeLock.assertLocked();
// Therefore we need not handle any special cases here.
if (baseMethods) {
ASSERT(cls->hasCustomAWZ() && cls->hasCustomRR() && cls->hasCustomCore());
+ } else if (cls->cache.isConstantOptimizedCache()) {
+ cls->setDisallowPreoptCachesRecursively(why);
+ } else if (cls->allowsPreoptInlinedSels()) {
+#if CONFIG_USE_PREOPT_CACHES
+ SEL *sels = (SEL *)objc_opt_offsets[OBJC_OPT_INLINED_METHODS_START];
+ SEL *sels_end = (SEL *)objc_opt_offsets[OBJC_OPT_INLINED_METHODS_END];
+ if (method_lists_contains_any(addedLists, addedLists + addedCount, sels, sels_end - sels)) {
+ cls->setDisallowPreoptInlinedSelsRecursively(why);
+ }
+#endif
}
// Add method lists to array.
method_list_t *mlist = entry.cat->methodsForMeta(isMeta);
if (mlist) {
if (mcount == ATTACH_BUFSIZ) {
- prepareMethodLists(cls, mlists, mcount, NO, fromBundle);
+ prepareMethodLists(cls, mlists, mcount, NO, fromBundle, __func__);
rwe->methods.attachLists(mlists, mcount);
mcount = 0;
}
}
if (mcount > 0) {
- prepareMethodLists(cls, mlists + ATTACH_BUFSIZ - mcount, mcount, NO, fromBundle);
+ prepareMethodLists(cls, mlists + ATTACH_BUFSIZ - mcount, mcount,
+ NO, fromBundle, __func__);
rwe->methods.attachLists(mlists + ATTACH_BUFSIZ - mcount, mcount);
- if (flags & ATTACH_EXISTING) flushCaches(cls);
+ if (flags & ATTACH_EXISTING) {
+ flushCaches(cls, __func__, [](Class c){
+ // constant caches have been dealt with in prepareMethodLists
+ // if the class still is constant here, it's fine to keep
+ return !c->cache.isConstantOptimizedCache();
+ });
+ }
}
rwe->properties.attachLists(proplists + ATTACH_BUFSIZ - propcount, propcount);
// Install methods and properties that the class implements itself.
method_list_t *list = ro->baseMethods();
if (list) {
- if (list->isSmallList() && !_dyld_is_memory_immutable(list, list->byteSize()))
- _objc_fatal("CLASS: class '%s' %p small method list %p is not in immutable memory",
- cls->nameForLogging(), cls, list);
- prepareMethodLists(cls, &list, 1, YES, isBundleClass(cls));
+ prepareMethodLists(cls, &list, 1, YES, isBundleClass(cls), nullptr);
if (rwe) rwe->methods.attachLists(&list, 1);
}
// This is a misnomer: gdb_objc_realized_classes is actually a list of
// named classes not in the dyld shared cache, whether realized or not.
+// This list excludes lazily named classes, which have to be looked up
+// using a getClass hook.
NXMapTable *gdb_objc_realized_classes; // exported for debuggers in objc-gdb.h
uintptr_t objc_debug_realized_class_generation_count;
class_rw_t *rw = objc::zalloc<class_rw_t>();
class_ro_t *ro = (class_ro_t *)calloc(sizeof(class_ro_t), 1);
- ro->name = strdupIfMutable(name);
+ ro->name.store(strdupIfMutable(name), std::memory_order_relaxed);
rw->set_ro(ro);
cls->setData(rw);
cls->data()->flags = RO_FUTURE;
// special case for root metaclass
// where inst == inst->ISA() == metacls is possible
if (metacls->ISA() == metacls) {
- Class cls = metacls->superclass;
+ Class cls = metacls->getSuperclass();
ASSERT(cls->isRealized());
ASSERT(!cls->isMetaClass());
ASSERT(cls->ISA() == metacls);
ASSERT(!cls->isMetaClassMaybeUnrealized());
return cls;
}
- cls = cls->superclass;
+ cls = cls->getSuperclass();
}
#if DEBUG
_objc_fatal("cls is not an instance of metacls");
#endif
}
+ // See if the metaclass has a pointer to its nonmetaclass.
+ if (Class cls = metacls->bits.safe_ro()->getNonMetaclass())
+ return cls;
+
// try name lookup
{
Class cls = getClassExceptSomeSwift(metacls->mangledName());
objc::RRScanner::scanAddedSubClass(subcls, supercls);
objc::CoreScanner::scanAddedSubClass(subcls, supercls);
+ if (!supercls->allowsPreoptCaches()) {
+ subcls->setDisallowPreoptCachesRecursively(__func__);
+ } else if (!supercls->allowsPreoptInlinedSels()) {
+ subcls->setDisallowPreoptInlinedSelsRecursively(__func__);
+ }
+
// Special case: instancesRequireRawIsa does not propagate
// from root class to root metaclass
- if (supercls->instancesRequireRawIsa() && supercls->superclass) {
+ if (supercls->instancesRequireRawIsa() && supercls->getSuperclass()) {
subcls->setInstancesRequireRawIsaRecursively(true);
}
}
runtimeLock.assertLocked();
ASSERT(supercls->isRealized());
ASSERT(subcls->isRealized());
- ASSERT(subcls->superclass == supercls);
+ ASSERT(subcls->getSuperclass() == supercls);
objc_debug_realized_class_generation_count++;
Protocol *result = (Protocol *)NXMapGet(protocols(), name);
if (result) return result;
+ // Try table from dyld3 closure and dyld shared cache
+ result = getPreoptimizedProtocol(name);
+ if (result) return result;
+
// Try Swift-mangled equivalent of the given name.
if (char *swName = copySwiftV1MangledName(name, true/*isProtocol*/)) {
result = (Protocol *)NXMapGet(protocols(), swName);
+
+ // Try table from dyld3 closure and dyld shared cache
+ if (!result)
+ result = getPreoptimizedProtocol(swName);
+
free(swName);
- if (result) return result;
+ return result;
}
- // Try table from dyld3 closure and dyld shared cache
- return getPreoptimizedProtocol(name);
+ return nullptr;
}
class_ro_t *ro_w = make_ro_writeable(rw);
ro = rw->ro();
moveIvars(ro_w, super_ro->instanceSize);
- gdb_objc_class_changed(cls, OBJC_CLASS_IVARS_CHANGED, ro->name);
+ gdb_objc_class_changed(cls, OBJC_CLASS_IVARS_CHANGED, ro->getName());
}
}
+static void validateAlreadyRealizedClass(Class cls) {
+ ASSERT(cls->isRealized());
+#if TARGET_OS_OSX
+ class_rw_t *rw = cls->data();
+ size_t rwSize = malloc_size(rw);
+
+ // Note: this check will need some adjustment if class_rw_t's
+ // size changes to not match the malloc bucket.
+ if (rwSize != sizeof(class_rw_t))
+ _objc_fatal("realized class %p has corrupt data pointer %p", cls, rw);
+#endif
+}
/***********************************************************************
* realizeClassWithoutSwift
Class metacls;
if (!cls) return nil;
- if (cls->isRealized()) return cls;
+ if (cls->isRealized()) {
+ validateAlreadyRealizedClass(cls);
+ return cls;
+ }
ASSERT(cls == remapClass(cls));
// fixme verify class is not in an un-dlopened part of the shared cache?
cls->setData(rw);
}
+ cls->cache.initializeToEmptyOrPreoptimizedInDisguise();
+
#if FAST_CACHE_META
if (isMeta) cls->cache.setBit(FAST_CACHE_META);
#endif
// or that Swift's initializers have already been called.
// fixme that assumption will be wrong if we add support
// for ObjC subclasses of Swift classes.
- supercls = realizeClassWithoutSwift(remapClass(cls->superclass), nil);
+ supercls = realizeClassWithoutSwift(remapClass(cls->getSuperclass()), nil);
metacls = realizeClassWithoutSwift(remapClass(cls->ISA()), nil);
#if SUPPORT_NONPOINTER_ISA
// Non-pointer isa disabled by environment or app SDK version
instancesRequireRawIsa = true;
}
- else if (!hackedDispatch && 0 == strcmp(ro->name, "OS_object"))
+ else if (!hackedDispatch && 0 == strcmp(ro->getName(), "OS_object"))
{
// hack for libdispatch et al - isa also acts as vtable pointer
hackedDispatch = true;
instancesRequireRawIsa = true;
}
- else if (supercls && supercls->superclass &&
+ else if (supercls && supercls->getSuperclass() &&
supercls->instancesRequireRawIsa())
{
// This is also propagated by addSubclass()
#endif
// Update superclass and metaclass in case of remapping
- cls->superclass = supercls;
+ cls->setSuperclass(supercls);
cls->initClassIsa(metacls);
// Reconcile instance variable offsets / layout.
ASSERT(remapClass(cls) == cls);
ASSERT(cls->isSwiftStable_ButAllowLegacyForNow());
ASSERT(!cls->isMetaClassMaybeUnrealized());
- ASSERT(cls->superclass);
+ ASSERT(cls->getSuperclass());
runtimeLock.unlock();
#endif
{
ASSERT(!cls->isRealized());
- if (!cls->superclass) {
+ if (!cls->getSuperclass()) {
// superclass nil. This is normal for root classes only.
return (!(cls->data()->flags & RO_ROOT));
} else {
// superclass not nil. Check if a higher superclass is missing.
- Class supercls = remapClass(cls->superclass);
- ASSERT(cls != cls->superclass);
+ Class supercls = remapClass(cls->getSuperclass());
+ ASSERT(cls != cls->getSuperclass());
ASSERT(cls != supercls);
if (!supercls) return YES;
if (supercls->isRealized()) return NO;
return cls && cls->isFuture();
}
+BOOL _class_isSwift(Class _Nullable cls)
+{
+ return cls && cls->isSwiftStable();
+}
/***********************************************************************
* _objc_flush_caches
* and subclasses thereof. Nil flushes all classes.)
* Locking: acquires runtimeLock
**********************************************************************/
-static void flushCaches(Class cls)
+static void flushCaches(Class cls, const char *func, bool (^predicate)(Class))
{
runtimeLock.assertLocked();
#if CONFIG_USE_CACHE_LOCK
mutex_locker_t lock(cacheUpdateLock);
#endif
+ const auto handler = ^(Class c) {
+ if (predicate(c)) {
+ c->cache.eraseNolock(func);
+ }
+
+ return true;
+ };
+
if (cls) {
- foreach_realized_class_and_subclass(cls, [](Class c){
- cache_erase_nolock(c);
- return true;
- });
- }
- else {
- foreach_realized_class_and_metaclass([](Class c){
- cache_erase_nolock(c);
- return true;
- });
+ foreach_realized_class_and_subclass(cls, handler);
+ } else {
+ foreach_realized_class_and_metaclass(handler);
}
}
{
{
mutex_locker_t lock(runtimeLock);
- flushCaches(cls);
- if (cls && cls->superclass && cls != cls->getIsa()) {
- flushCaches(cls->getIsa());
+ flushCaches(cls, __func__, [](Class c){
+ return !c->cache.isConstantOptimizedCache();
+ });
+ if (cls && !cls->isMetaClass() && !cls->isRootClass()) {
+ flushCaches(cls->ISA(), __func__, [](Class c){
+ return !c->cache.isConstantOptimizedCache();
+ });
} else {
// cls is a root class or root metaclass. Its metaclass is itself
// or a subclass so the metaclass caches were already flushed.
#else
mutex_locker_t lock(runtimeLock);
#endif
- cache_collect(true);
+ cache_t::collectNolock(true);
}
}
**********************************************************************/
Class readClass(Class cls, bool headerIsBundle, bool headerIsPreoptimized)
{
- const char *mangledName = cls->mangledName();
+ const char *mangledName = cls->nonlazyMangledName();
if (missingWeakSuperclass(cls)) {
// No superclass (probably weak-linked).
cls->nameForLogging());
}
addRemappedClass(cls, nil);
- cls->superclass = nil;
+ cls->setSuperclass(nil);
return nil;
}
cls->fixupBackwardDeployingStableSwift();
Class replacing = nil;
- if (Class newCls = popFutureNamedClass(mangledName)) {
- // This name was previously allocated as a future class.
- // Copy objc_class to future class's struct.
- // Preserve future's rw data block.
-
- if (newCls->isAnySwift()) {
- _objc_fatal("Can't complete future class request for '%s' "
- "because the real class is too big.",
- cls->nameForLogging());
+ if (mangledName != nullptr) {
+ if (Class newCls = popFutureNamedClass(mangledName)) {
+ // This name was previously allocated as a future class.
+ // Copy objc_class to future class's struct.
+ // Preserve future's rw data block.
+
+ if (newCls->isAnySwift()) {
+ _objc_fatal("Can't complete future class request for '%s' "
+ "because the real class is too big.",
+ cls->nameForLogging());
+ }
+
+ class_rw_t *rw = newCls->data();
+ const class_ro_t *old_ro = rw->ro();
+ memcpy(newCls, cls, sizeof(objc_class));
+
+ // Manually set address-discriminated ptrauthed fields
+ // so that newCls gets the correct signatures.
+ newCls->setSuperclass(cls->getSuperclass());
+ newCls->initIsa(cls->getIsa());
+
+ rw->set_ro((class_ro_t *)newCls->data());
+ newCls->setData(rw);
+ freeIfMutable((char *)old_ro->getName());
+ free((void *)old_ro);
+
+ addRemappedClass(cls, newCls);
+
+ replacing = cls;
+ cls = newCls;
}
-
- class_rw_t *rw = newCls->data();
- const class_ro_t *old_ro = rw->ro();
- memcpy(newCls, cls, sizeof(objc_class));
- rw->set_ro((class_ro_t *)newCls->data());
- newCls->setData(rw);
- freeIfMutable((char *)old_ro->name);
- free((void *)old_ro);
-
- addRemappedClass(cls, newCls);
-
- replacing = cls;
- cls = newCls;
}
if (headerIsPreoptimized && !replacing) {
// class list built in shared cache
// fixme strict assert doesn't work because of duplicates
// ASSERT(cls == getClass(name));
- ASSERT(getClassExceptSomeSwift(mangledName));
+ ASSERT(mangledName == nullptr || getClassExceptSomeSwift(mangledName));
} else {
- addNamedClass(cls, mangledName, replacing);
+ if (mangledName) { //some Swift generic classes can lazily generate their names
+ addNamedClass(cls, mangledName, replacing);
+ } else {
+ Class meta = cls->ISA();
+ const class_ro_t *metaRO = meta->bits.safe_ro();
+ ASSERT(metaRO->getNonMetaclass() && "Metaclass with lazy name must have a pointer to the corresponding nonmetaclass.");
+ ASSERT(metaRO->getNonMetaclass() == cls && "Metaclass nonmetaclass pointer must equal the original class.");
+ }
addClassTableEntry(cls);
}
}
}
}
- else if (newproto->size >= sizeof(protocol_t)) {
- // New protocol from an un-preoptimized image
- // with sufficient storage. Fix it up in place.
+ else {
+ // New protocol from an un-preoptimized image. Fix it up in place.
// fixme duplicate protocols from unloadable bundle
newproto->initIsa(protocol_class); // fixme pinned
insertFn(protocol_map, newproto->mangledName, newproto);
newproto, newproto->nameForLogging());
}
}
- else {
- // New protocol from an un-preoptimized image
- // with insufficient storage. Reallocate it.
- // fixme duplicate protocols from unloadable bundle
- size_t size = max(sizeof(protocol_t), (size_t)newproto->size);
- protocol_t *installedproto = (protocol_t *)calloc(size, 1);
- memcpy(installedproto, newproto, newproto->size);
- installedproto->size = (typeof(installedproto->size))size;
-
- installedproto->initIsa(protocol_class); // fixme pinned
- insertFn(protocol_map, installedproto->mangledName, installedproto);
- if (PrintProtocols) {
- _objc_inform("PROTOCOLS: protocol at %p is %s ",
- installedproto, installedproto->nameForLogging());
- _objc_inform("PROTOCOLS: protocol at %p is %s "
- "(reallocated to %p)",
- newproto, installedproto->nameForLogging(),
- installedproto);
- }
- }
}
/***********************************************************************
# if TARGET_OS_OSX
// Disable non-pointer isa if the app is too old
// (linked before OS X 10.11)
- if (dyld_get_program_sdk_version() < DYLD_MACOSX_VERSION_10_11) {
+ if (!dyld_program_sdk_at_least(dyld_platform_version_macOS_10_11)) {
DisableNonpointerIsa = true;
if (PrintRawIsa) {
_objc_inform("RAW ISA: disabling non-pointer isa because "
- "the app is too old (SDK version " SDK_FORMAT ")",
- FORMAT_SDK(dyld_get_program_sdk_version()));
+ "the app is too old.");
}
}
}
const method_list_t *mlist;
- if ((mlist = ((class_ro_t *)cls->data())->baseMethods())) {
+ if ((mlist = cls->bits.safe_ro()->baseMethods())) {
PreoptTotalMethodLists++;
if (mlist->isFixedUp()) {
PreoptOptimizedMethodLists++;
}
}
- if ((mlist=((class_ro_t *)cls->ISA()->data())->baseMethods())) {
+ if ((mlist = cls->ISA()->bits.safe_ro()->baseMethods())) {
PreoptTotalMethodLists++;
if (mlist->isFixedUp()) {
PreoptOptimizedMethodLists++;
if (cls->data()->flags & RW_LOADED) return;
// Ensure superclass-first ordering
- schedule_class_load(cls->superclass);
+ schedule_class_load(cls->getSuperclass());
add_class_to_loadable_list(cls);
cls->setInfo(RW_LOADED);
if (!imp) return nil;
IMP old = m->imp(false);
+ SEL sel = m->name();
+
m->setImp(imp);
// Cache updates are slow if cls is nil (i.e. unknown)
// RR/AWZ updates are slow if cls is nil (i.e. unknown)
// fixme build list of classes whose Methods are known externally?
- flushCaches(cls);
+ flushCaches(cls, __func__, [sel, old](Class c){
+ return c->cache.shouldFlush(sel, old);
+ });
adjustCustomFlagsForMethodChange(cls, m);
return _method_setImplementation(Nil, m, imp);
}
+extern void _method_setImplementationRawUnsafe(Method m, IMP imp)
+{
+ mutex_locker_t lock(runtimeLock);
+ m->setImp(imp);
+}
+
void method_exchangeImplementations(Method m1, Method m2)
{
mutex_locker_t lock(runtimeLock);
- IMP m1_imp = m1->imp(false);
- m1->setImp(m2->imp(false));
- m2->setImp(m1_imp);
+ IMP imp1 = m1->imp(false);
+ IMP imp2 = m2->imp(false);
+ SEL sel1 = m1->name();
+ SEL sel2 = m2->name();
+
+ m1->setImp(imp2);
+ m2->setImp(imp1);
// RR/AWZ updates are slow because class is unknown
// Cache updates are slow because class is unknown
// fixme build list of classes whose Methods are known externally?
- flushCaches(nil);
+ flushCaches(nil, __func__, [sel1, sel2, imp1, imp2](Class c){
+ return c->cache.shouldFlush(sel1, imp1) || c->cache.shouldFlush(sel2, imp2);
+ });
adjustCustomFlagsForMethodChange(nil, m1);
adjustCustomFlagsForMethodChange(nil, m2);
const char *
protocol_t::demangledName()
{
- ASSERT(hasDemangledNameField());
+ if (!hasDemangledNameField())
+ return mangledName;
if (! _demangledName) {
char *de = copySwiftV1DemangledName(mangledName, true/*isProtocol*/);
return result;
}
-static void
-class_getImpCache_nolock(Class cls, cache_t &cache, objc_imp_cache_entry *buffer, int len)
-{
- bucket_t *buckets = cache.buckets();
-
- uintptr_t count = cache.capacity();
- uintptr_t index;
- int wpos = 0;
-
- for (index = 0; index < count && wpos < len; index += 1) {
- if (buckets[index].sel()) {
- buffer[wpos].imp = buckets[index].imp(cls);
- buffer[wpos].sel = buckets[index].sel();
- wpos++;
- }
- }
-}
-
/***********************************************************************
* objc_getClassList
* Returns pointers to all classes.
if (count) {
buffer = (objc_imp_cache_entry *)calloc(1+count, sizeof(objc_imp_cache_entry));
- class_getImpCache_nolock(cls, cache, buffer, count);
+ cache.copyCacheNolock(buffer, count);
}
if (outCount) *outCount = count;
return names;
}
+Class *
+copyClassesForImage_nolock(header_info *hi, unsigned int *outCount)
+{
+ runtimeLock.assertLocked();
+ ASSERT(hi);
+
+ size_t count;
+ classref_t const *classlist = _getObjc2ClassList(hi, &count);
+ Class *classes = (Class *)
+ malloc((count+1) * sizeof(Class));
+
+ size_t shift = 0;
+ for (size_t i = 0; i < count; i++) {
+ Class cls = remapClass(classlist[i]);
+ if (cls) {
+ classes[i-shift] = cls;
+ } else {
+ shift++; // ignored weak-linked class
+ }
+ }
+ count -= shift;
+ classes[count] = nil;
+
+ if (outCount) *outCount = (unsigned int)count;
+ return classes;
+}
/***********************************************************************
return copyClassNamesForImage_nolock(hi, outCount);
}
+Class *
+objc_copyClassesForImage(const char *image, unsigned int *outCount)
+{
+ if (!image) {
+ if (outCount) *outCount = 0;
+ return nil;
+ }
+
+ mutex_locker_t lock(runtimeLock);
+
+ // Find the image.
+ header_info *hi;
+ for (hi = FirstHeader; hi != nil; hi = hi->getNext()) {
+ if (0 == strcmp(image, hi->fname())) break;
+ }
+
+ if (!hi) {
+ if (outCount) *outCount = 0;
+ return nil;
+ }
+
+ return copyClassesForImage_nolock(hi, outCount);
+}
/***********************************************************************
* objc_copyClassNamesForImageHeader
// Handle the easy case directly.
if (isRealized() || isFuture()) {
if (!isAnySwift()) {
- return data()->ro()->name;
+ return data()->ro()->getName();
}
auto rwe = data()->ext();
if (rwe && rwe->demangledName) {
char *result;
- const char *name = mangledName();
- char *de = copySwiftV1DemangledName(name);
- if (de) result = de;
- else result = strdup(name);
-
+ if (isStubClass()) {
+ asprintf(&result, "<stub class %p>", this);
+ } else if (const char *name = nonlazyMangledName()) {
+ char *de = copySwiftV1DemangledName(name);
+ if (de) result = de;
+ else result = strdup(name);
+ } else {
+ asprintf(&result, "<lazily named class %p>", this);
+ }
saveTemporaryString(result);
return result;
}
if (isRealized() || isFuture()) {
// Swift metaclasses don't have the is-Swift bit.
// We can't take this shortcut for them.
- if (!isMetaClass() && !isAnySwift()) {
- return data()->ro()->name;
+ if (isFuture() || (!isMetaClass() && !isAnySwift())) {
+ return data()->ro()->getName();
}
auto rwe = data()->ext();
if (rwe && rwe->demangledName) {
/***********************************************************************
* search_method_list_inline
**********************************************************************/
+template<class getNameFunc>
ALWAYS_INLINE static method_t *
-findMethodInSortedMethodList(SEL key, const method_list_t *list)
+findMethodInSortedMethodList(SEL key, const method_list_t *list, const getNameFunc &getName)
{
ASSERT(list);
for (count = list->count; count != 0; count >>= 1) {
probe = base + (count >> 1);
- uintptr_t probeValue = (uintptr_t)probe->name();
+ uintptr_t probeValue = (uintptr_t)getName(probe);
if (keyValue == probeValue) {
// `probe` is a match.
// Rewind looking for the *first* occurrence of this value.
// This is required for correct category overrides.
- while (probe > first && keyValue == (uintptr_t)(probe - 1)->name()) {
+ while (probe > first && keyValue == (uintptr_t)getName((probe - 1))) {
probe--;
}
return &*probe;
return nil;
}
+ALWAYS_INLINE static method_t *
+findMethodInSortedMethodList(SEL key, const method_list_t *list)
+{
+ if (list->isSmallList()) {
+ if (CONFIG_SHARED_CACHE_RELATIVE_DIRECT_SELECTORS && objc::inSharedCache((uintptr_t)list)) {
+ return findMethodInSortedMethodList(key, list, [](method_t &m) { return m.getSmallNameAsSEL(); });
+ } else {
+ return findMethodInSortedMethodList(key, list, [](method_t &m) { return m.getSmallNameAsSELRef(); });
+ }
+ } else {
+ return findMethodInSortedMethodList(key, list, [](method_t &m) { return m.big().name; });
+ }
+}
+
+template<class getNameFunc>
+ALWAYS_INLINE static method_t *
+findMethodInUnsortedMethodList(SEL sel, const method_list_t *list, const getNameFunc &getName)
+{
+ for (auto& meth : *list) {
+ if (getName(meth) == sel) return &meth;
+ }
+ return nil;
+}
+
+ALWAYS_INLINE static method_t *
+findMethodInUnsortedMethodList(SEL key, const method_list_t *list)
+{
+ if (list->isSmallList()) {
+ if (CONFIG_SHARED_CACHE_RELATIVE_DIRECT_SELECTORS && objc::inSharedCache((uintptr_t)list)) {
+ return findMethodInUnsortedMethodList(key, list, [](method_t &m) { return m.getSmallNameAsSEL(); });
+ } else {
+ return findMethodInUnsortedMethodList(key, list, [](method_t &m) { return m.getSmallNameAsSELRef(); });
+ }
+ } else {
+ return findMethodInUnsortedMethodList(key, list, [](method_t &m) { return m.big().name; });
+ }
+}
+
ALWAYS_INLINE static method_t *
search_method_list_inline(const method_list_t *mlist, SEL sel)
{
return findMethodInSortedMethodList(sel, mlist);
} else {
// Linear search of unsorted method list
- for (auto& meth : *mlist) {
- if (meth.name() == sel) return &meth;
- }
+ if (auto *m = findMethodInUnsortedMethodList(sel, mlist))
+ return m;
}
#if DEBUG
}
}
} else {
- for (auto& meth : *mlist) {
- for (size_t i = 0; i < selcount; i++) {
- if (meth.name() == sels[i]) {
- return true;
- }
+ for (size_t i = 0; i < selcount; i++) {
+ if (findMethodInUnsortedMethodList(sels[i], mlist)) {
+ return true;
}
}
}
return false;
}
+
/***********************************************************************
* getMethodNoSuper_nolock
* fixme
ASSERT(cls->isRealized());
while (cls && ((m = getMethodNoSuper_nolock(cls, sel))) == nil) {
- cls = cls->superclass;
+ cls = cls->getSuperclass();
}
return m;
ASSERT(cls->isRealized());
ASSERT(cls->isMetaClass());
- if (!lookUpImpOrNil(inst, @selector(resolveClassMethod:), cls)) {
+ if (!lookUpImpOrNilTryCache(inst, @selector(resolveClassMethod:), cls)) {
// Resolver not implemented.
return;
}
// Cache the result (good or bad) so the resolver doesn't fire next time.
// +resolveClassMethod adds to self->ISA() a.k.a. cls
- IMP imp = lookUpImpOrNil(inst, sel, cls);
+ IMP imp = lookUpImpOrNilTryCache(inst, sel, cls);
if (resolved && PrintResolving) {
if (imp) {
ASSERT(cls->isRealized());
SEL resolve_sel = @selector(resolveInstanceMethod:);
- if (!lookUpImpOrNil(cls, resolve_sel, cls->ISA())) {
+ if (!lookUpImpOrNilTryCache(cls, resolve_sel, cls->ISA(/*authenticated*/true))) {
// Resolver not implemented.
return;
}
// Cache the result (good or bad) so the resolver doesn't fire next time.
// +resolveInstanceMethod adds to self a.k.a. cls
- IMP imp = lookUpImpOrNil(inst, sel, cls);
+ IMP imp = lookUpImpOrNilTryCache(inst, sel, cls);
if (resolved && PrintResolving) {
if (imp) {
// try [nonMetaClass resolveClassMethod:sel]
// and [cls resolveInstanceMethod:sel]
resolveClassMethod(inst, sel, cls);
- if (!lookUpImpOrNil(inst, sel, cls)) {
+ if (!lookUpImpOrNilTryCache(inst, sel, cls)) {
resolveInstanceMethod(inst, sel, cls);
}
}
// chances are that calling the resolver have populated the cache
// so attempt using it
- return lookUpImpOrForward(inst, sel, cls, behavior | LOOKUP_CACHE);
+ return lookUpImpOrForwardTryCache(inst, sel, cls, behavior);
}
if (!cacheIt) return;
}
#endif
- cache_fill(cls, sel, imp, receiver);
+ cls->cache.insert(sel, imp, receiver);
}
/***********************************************************************
-* lookUpImpOrForward.
-* The standard IMP lookup.
+* realizeAndInitializeIfNeeded_locked
+* Realize the given class if not already realized, and initialize it if
+* not already initialized.
+* inst is an instance of cls or a subclass, or nil if none is known.
+* cls is the class to initialize and realize.
+* initializer is true to initialize the class, false to skip initialization.
+**********************************************************************/
+static Class
+realizeAndInitializeIfNeeded_locked(id inst, Class cls, bool initialize)
+{
+ runtimeLock.assertLocked();
+ if (slowpath(!cls->isRealized())) {
+ cls = realizeClassMaybeSwiftAndLeaveLocked(cls, runtimeLock);
+ // runtimeLock may have been dropped but is now locked again
+ }
+
+ if (slowpath(initialize && !cls->isInitialized())) {
+ cls = initializeAndLeaveLocked(cls, inst, runtimeLock);
+ // runtimeLock may have been dropped but is now locked again
+
+ // If sel == initialize, class_initialize will send +initialize and
+ // then the messenger will send +initialize again after this
+ // procedure finishes. Of course, if this is not being called
+ // from the messenger then it won't happen. 2778172
+ }
+ return cls;
+}
+
+/***********************************************************************
+* lookUpImpOrForward / lookUpImpOrForwardTryCache / lookUpImpOrNilTryCache
+* The standard IMP lookup.
+*
+* The TryCache variant attempts a fast-path lookup in the IMP Cache.
+* Most callers should use lookUpImpOrForwardTryCache with LOOKUP_INITIALIZE
+*
* Without LOOKUP_INITIALIZE: tries to avoid +initialize (but sometimes fails)
-* Without LOOKUP_CACHE: skips optimistic unlocked lookup (but uses cache elsewhere)
-* Most callers should use LOOKUP_INITIALIZE and LOOKUP_CACHE
-* inst is an instance of cls or a subclass thereof, or nil if none is known.
+* With LOOKUP_NIL: returns nil on negative cache hits
+*
+* inst is an instance of cls or a subclass thereof, or nil if none is known.
* If cls is an un-initialized metaclass then a non-nil inst is faster.
* May return _objc_msgForward_impcache. IMPs destined for external use
* must be converted to _objc_msgForward or _objc_msgForward_stret.
* If you don't want forwarding at all, use LOOKUP_NIL.
**********************************************************************/
+ALWAYS_INLINE
+static IMP _lookUpImpTryCache(id inst, SEL sel, Class cls, int behavior)
+{
+ runtimeLock.assertUnlocked();
+
+ if (slowpath(!cls->isInitialized())) {
+ // see comment in lookUpImpOrForward
+ return lookUpImpOrForward(inst, sel, cls, behavior);
+ }
+
+ IMP imp = cache_getImp(cls, sel);
+ if (imp != NULL) goto done;
+#if CONFIG_USE_PREOPT_CACHES
+ if (fastpath(cls->cache.isConstantOptimizedCache(/* strict */true))) {
+ imp = cache_getImp(cls->cache.preoptFallbackClass(), sel);
+ }
+#endif
+ if (slowpath(imp == NULL)) {
+ return lookUpImpOrForward(inst, sel, cls, behavior);
+ }
+
+done:
+ if ((behavior & LOOKUP_NIL) && imp == (IMP)_objc_msgForward_impcache) {
+ return nil;
+ }
+ return imp;
+}
+
+IMP lookUpImpOrForwardTryCache(id inst, SEL sel, Class cls, int behavior)
+{
+ return _lookUpImpTryCache(inst, sel, cls, behavior);
+}
+
+IMP lookUpImpOrNilTryCache(id inst, SEL sel, Class cls, int behavior)
+{
+ return _lookUpImpTryCache(inst, sel, cls, behavior | LOOKUP_NIL);
+}
+
+NEVER_INLINE
IMP lookUpImpOrForward(id inst, SEL sel, Class cls, int behavior)
{
const IMP forward_imp = (IMP)_objc_msgForward_impcache;
runtimeLock.assertUnlocked();
- // Optimistic cache lookup
- if (fastpath(behavior & LOOKUP_CACHE)) {
- imp = cache_getImp(cls, sel);
- if (imp) goto done_nolock;
+ if (slowpath(!cls->isInitialized())) {
+ // The first message sent to a class is often +new or +alloc, or +self
+ // which goes through objc_opt_* or various optimized entry points.
+ //
+ // However, the class isn't realized/initialized yet at this point,
+ // and the optimized entry points fall down through objc_msgSend,
+ // which ends up here.
+ //
+ // We really want to avoid caching these, as it can cause IMP caches
+ // to be made with a single entry forever.
+ //
+ // Note that this check is racy as several threads might try to
+ // message a given class for the first time at the same time,
+ // in which case we might cache anyway.
+ behavior |= LOOKUP_NOCACHE;
}
// runtimeLock is held during isRealized and isInitialized checking
// objc_duplicateClass, objc_initializeClassPair or objc_allocateClassPair.
checkIsKnownClass(cls);
- if (slowpath(!cls->isRealized())) {
- cls = realizeClassMaybeSwiftAndLeaveLocked(cls, runtimeLock);
- // runtimeLock may have been dropped but is now locked again
- }
-
- if (slowpath((behavior & LOOKUP_INITIALIZE) && !cls->isInitialized())) {
- cls = initializeAndLeaveLocked(cls, inst, runtimeLock);
- // runtimeLock may have been dropped but is now locked again
-
- // If sel == initialize, class_initialize will send +initialize and
- // then the messenger will send +initialize again after this
- // procedure finishes. Of course, if this is not being called
- // from the messenger then it won't happen. 2778172
- }
-
+ cls = realizeAndInitializeIfNeeded_locked(inst, cls, behavior & LOOKUP_INITIALIZE);
+ // runtimeLock may have been dropped but is now locked again
runtimeLock.assertLocked();
curClass = cls;
- // The code used to lookpu the class's cache again right after
+ // The code used to lookup the class's cache again right after
// we take the lock but for the vast majority of the cases
// evidence shows this is a miss most of the time, hence a time loss.
//
// kind of cache lookup is class_getInstanceMethod().
for (unsigned attempts = unreasonableClassCount();;) {
- // curClass method list.
- Method meth = getMethodNoSuper_nolock(curClass, sel);
- if (meth) {
- imp = meth->imp(false);
- goto done;
- }
+ if (curClass->cache.isConstantOptimizedCache(/* strict */true)) {
+#if CONFIG_USE_PREOPT_CACHES
+ imp = cache_getImp(curClass, sel);
+ if (imp) goto done_unlock;
+ curClass = curClass->cache.preoptFallbackClass();
+#endif
+ } else {
+ // curClass method list.
+ Method meth = getMethodNoSuper_nolock(curClass, sel);
+ if (meth) {
+ imp = meth->imp(false);
+ goto done;
+ }
- if (slowpath((curClass = curClass->superclass) == nil)) {
- // No implementation found, and method resolver didn't help.
- // Use forwarding.
- imp = forward_imp;
- break;
+ if (slowpath((curClass = curClass->getSuperclass()) == nil)) {
+ // No implementation found, and method resolver didn't help.
+ // Use forwarding.
+ imp = forward_imp;
+ break;
+ }
}
// Halt if there is a cycle in the superclass chain.
}
done:
- log_and_fill_cache(cls, imp, sel, inst, curClass);
+ if (fastpath((behavior & LOOKUP_NOCACHE) == 0)) {
+#if CONFIG_USE_PREOPT_CACHES
+ while (cls->cache.isConstantOptimizedCache(/* strict */true)) {
+ cls = cls->cache.preoptFallbackClass();
+ }
+#endif
+ log_and_fill_cache(cls, imp, sel, inst, curClass);
+ }
+ done_unlock:
runtimeLock.unlock();
- done_nolock:
if (slowpath((behavior & LOOKUP_NIL) && imp == forward_imp)) {
return nil;
}
**********************************************************************/
IMP lookupMethodInClassAndLoadCache(Class cls, SEL sel)
{
- Method meth;
IMP imp;
// fixme this is incomplete - no resolver, +initialize -
ASSERT(sel == SEL_cxx_construct || sel == SEL_cxx_destruct);
// Search cache first.
- imp = cache_getImp(cls, sel);
- if (imp) return imp;
+ //
+ // If the cache used for the lookup is preoptimized,
+ // we ask for `_objc_msgForward_impcache` to be returned on cache misses,
+ // so that there's no TOCTOU race between using `isConstantOptimizedCache`
+ // and calling cache_getImp() when not under the runtime lock.
+ //
+ // For dynamic caches, a miss will return `nil`
+ imp = cache_getImp(cls, sel, _objc_msgForward_impcache);
- // Cache miss. Search method list.
+ if (slowpath(imp == nil)) {
+ // Cache miss. Search method list.
- mutex_locker_t lock(runtimeLock);
+ mutex_locker_t lock(runtimeLock);
- meth = getMethodNoSuper_nolock(cls, sel);
+ if (auto meth = getMethodNoSuper_nolock(cls, sel)) {
+ // Hit in method list. Cache it.
+ imp = meth->imp(false);
+ } else {
+ imp = _objc_msgForward_impcache;
+ }
- if (meth) {
- // Hit in method list. Cache it.
- cache_fill(cls, sel, meth->imp(false), nil);
- return meth->imp(false);
- } else {
- // Miss in method list. Cache objc_msgForward.
- cache_fill(cls, sel, _objc_msgForward_impcache, nil);
- return _objc_msgForward_impcache;
+ // Note, because we do not hold the runtime lock above
+ // isConstantOptimizedCache might flip, so we need to double check
+ if (!cls->cache.isConstantOptimizedCache(true /* strict */)) {
+ cls->cache.insert(sel, imp, nil);
+ }
}
+
+ return imp;
}
ASSERT(cls->isRealized());
- for ( ; cls; cls = cls->superclass) {
+ for ( ; cls; cls = cls->getSuperclass()) {
for (auto& prop : cls->data()->properties()) {
if (0 == strcmp(name, prop.name)) {
return (objc_property_t)∝
objc::RRScanner::scanInitializedClass(cls, metacls);
objc::CoreScanner::scanInitializedClass(cls, metacls);
+#if CONFIG_USE_PREOPT_CACHES
+ cls->cache.maybeConvertToPreoptimized();
+ metacls->cache.maybeConvertToPreoptimized();
+#endif
+
+ if (PrintInitializing) {
+ _objc_inform("INITIALIZE: thread %p: setInitialized(%s)",
+ objc_thread_self(), cls->nameForLogging());
+ }
// Update the +initialize flags.
// Do this last.
metacls->changeInfo(RW_INITIALIZED, RW_INITIALIZING);
});
}
+#if CONFIG_USE_PREOPT_CACHES
+void objc_class::setDisallowPreoptCachesRecursively(const char *why)
+{
+ Class cls = (Class)this;
+ runtimeLock.assertLocked();
+
+ if (!allowsPreoptCaches()) return;
+
+ foreach_realized_class_and_subclass(cls, [=](Class c){
+ if (!c->allowsPreoptCaches()) {
+ return false;
+ }
+
+ if (c->cache.isConstantOptimizedCache(/* strict */true)) {
+ c->cache.eraseNolock(why);
+ } else {
+ if (PrintCaches) {
+ _objc_inform("CACHES: %sclass %s: disallow preopt cache (from %s)",
+ isMetaClass() ? "meta" : "",
+ nameForLogging(), why);
+ }
+ c->setDisallowPreoptCaches();
+ }
+ return true;
+ });
+}
+
+void objc_class::setDisallowPreoptInlinedSelsRecursively(const char *why)
+{
+ Class cls = (Class)this;
+ runtimeLock.assertLocked();
+
+ if (!allowsPreoptInlinedSels()) return;
+
+ foreach_realized_class_and_subclass(cls, [=](Class c){
+ if (!c->allowsPreoptInlinedSels()) {
+ return false;
+ }
+
+ if (PrintCaches) {
+ _objc_inform("CACHES: %sclass %s: disallow sel-inlined preopt cache (from %s)",
+ isMetaClass() ? "meta" : "",
+ nameForLogging(), why);
+ }
+
+ c->setDisallowPreoptInlinedSels();
+ if (c->cache.isConstantOptimizedCacheWithInlinedSels()) {
+ c->cache.eraseNolock(why);
+ }
+ return true;
+ });
+}
+#endif
/***********************************************************************
* Choose a class index.
#endif
}
+static const char *empty_lazyClassNamer(Class cls __unused) {
+ return nullptr;
+}
+
+static ChainedHookFunction<objc_hook_lazyClassNamer> LazyClassNamerHook{empty_lazyClassNamer};
+
+void objc_setHook_lazyClassNamer(_Nonnull objc_hook_lazyClassNamer newValue,
+ _Nonnull objc_hook_lazyClassNamer * _Nonnull oldOutValue) {
+ LazyClassNamerHook.set(newValue, oldOutValue);
+}
+
+const char * objc_class::installMangledNameForLazilyNamedClass() {
+ auto lazyClassNamer = LazyClassNamerHook.get();
+ if (!*lazyClassNamer) {
+ _objc_fatal("Lazily named class %p with no lazy name handler registered", this);
+ }
+
+ // If this is called on a metaclass, extract the original class
+ // and make it do the installation instead. It will install
+ // the metaclass's name too.
+ if (isMetaClass()) {
+ Class nonMeta = bits.safe_ro()->getNonMetaclass();
+ return nonMeta->installMangledNameForLazilyNamedClass();
+ }
+
+ Class cls = (Class)this;
+ Class metaclass = ISA();
+
+ const char *name = lazyClassNamer((Class)this);
+ if (!name) {
+ _objc_fatal("Lazily named class %p wasn't named by lazy name handler", this);
+ }
+
+ // Emplace the name into the class_ro_t. If we lose the race,
+ // then we'll free our name and use whatever got placed there
+ // instead of our name.
+ const char *previously = NULL;
+ class_ro_t *ro = (class_ro_t *)cls->bits.safe_ro();
+ bool wonRace = ro->name.compare_exchange_strong(previously, name, std::memory_order_release, std::memory_order_acquire);
+ if (!wonRace) {
+ free((void *)name);
+ name = previously;
+ }
+
+ // Emplace whatever name won the race in the metaclass too.
+ class_ro_t *metaRO = (class_ro_t *)metaclass->bits.safe_ro();
+
+ // Write our pointer if the current value is NULL. There's no
+ // need to loop or check success, since the only way this can
+ // fail is if another thread succeeded in writing the exact
+ // same pointer.
+ const char *expected = NULL;
+ metaRO->name.compare_exchange_strong(expected, name, std::memory_order_release, std::memory_order_acquire);
+
+ return name;
+}
/***********************************************************************
* Update custom RR and AWZ when a method changes its IMP
const uint8_t *
class_getIvarLayout(Class cls)
{
- if (cls) return cls->data()->ro()->ivarLayout;
+ if (cls) return cls->data()->ro()->getIvarLayout();
else return nil;
}
{
if (!cls) return;
+ ASSERT(!cls->isMetaClass());
+
mutex_locker_t lock(runtimeLock);
checkIsKnownClass(cls);
class_ro_t *ro_w = make_ro_writeable(cls->data());
- try_free(ro_w->ivarLayout);
+ try_free(ro_w->getIvarLayout());
ro_w->ivarLayout = ustrdupMaybeNil(layout);
}
{
mutex_locker_t lock(runtimeLock);
- for ( ; cls; cls = cls->superclass) {
+ for ( ; cls; cls = cls->getSuperclass()) {
if (auto ivars = cls->data()->ro()->ivars) {
if (ivars->containsIvar(ivar)) {
return cls;
{
mutex_locker_t lock(runtimeLock);
- for ( ; cls; cls = cls->superclass) {
+ for ( ; cls; cls = cls->getSuperclass()) {
ivar_t *ivar = getIvar(cls, name);
if (ivar) {
return ivar;
return NO;
}
+static void
+addMethods_finish(Class cls, method_list_t *newlist)
+{
+ auto rwe = cls->data()->extAllocIfNeeded();
+
+ if (newlist->count > 1) {
+ method_t::SortBySELAddress sorter;
+ std::stable_sort(&newlist->begin()->big(), &newlist->end()->big(), sorter);
+ }
+
+ prepareMethodLists(cls, &newlist, 1, NO, NO, __func__);
+ rwe->methods.attachLists(&newlist, 1);
+
+ // If the class being modified has a constant cache,
+ // then all children classes are flattened constant caches
+ // and need to be flushed as well.
+ flushCaches(cls, __func__, [](Class c){
+ // constant caches have been dealt with in prepareMethodLists
+ // if the class still is constant here, it's fine to keep
+ return !c->cache.isConstantOptimizedCache();
+ });
+}
+
/**********************************************************************
* addMethod
result = _method_setImplementation(cls, m, imp);
}
} else {
- auto rwe = cls->data()->extAllocIfNeeded();
-
// fixme optimize
method_list_t *newlist;
newlist = (method_list_t *)calloc(method_list_t::byteSize(method_t::bigSize, 1), 1);
first.types = strdupIfMutable(types);
first.imp = imp;
- prepareMethodLists(cls, &newlist, 1, NO, NO);
- rwe->methods.attachLists(&newlist, 1);
- flushCaches(cls);
-
+ addMethods_finish(cls, newlist);
result = nil;
}
}
if (newlist->count > 0) {
- auto rwe = cls->data()->extAllocIfNeeded();
-
// fixme resize newlist because it may have been over-allocated above.
// Note that realloc() alone doesn't work due to ptrauth.
-
- method_t::SortBySELAddress sorter;
- std::stable_sort(&newlist->begin()->big(), &newlist->end()->big(), sorter);
-
- prepareMethodLists(cls, &newlist, 1, NO, NO);
- rwe->methods.attachLists(&newlist, 1);
- flushCaches(cls);
+ addMethods_finish(cls, newlist);
} else {
// Attaching the method list to the class consumes it. If we don't
// do that, we have to free the memory ourselves.
duplicate = alloc_class_for_subclass(original, extraBytes);
duplicate->initClassIsa(original->ISA());
- duplicate->superclass = original->superclass;
+ duplicate->setSuperclass(original->getSuperclass());
duplicate->cache.initializeToEmpty();
duplicate->chooseClassArrayIndex();
- if (duplicate->superclass) {
- addSubclass(duplicate->superclass, duplicate);
+ if (duplicate->getSuperclass()) {
+ addSubclass(duplicate->getSuperclass(), duplicate);
// duplicate->isa == original->isa so don't addSubclass() for it
} else {
addRootClass(duplicate);
// Don't methodize class - construction above is correct
- addNamedClass(duplicate, ro->name);
+ addNamedClass(duplicate, ro->getName());
addClassTableEntry(duplicate, /*addMeta=*/false);
if (PrintConnecting) {
meta->setInstanceSize(meta_ro_w->instanceStart);
}
- cls_ro_w->name = strdupIfMutable(name);
- meta_ro_w->name = strdupIfMutable(name);
+ cls_ro_w->name.store(strdupIfMutable(name), std::memory_order_release);
+ meta_ro_w->name.store(strdupIfMutable(name), std::memory_order_release);
cls_ro_w->ivarLayout = &UnsetLayout;
cls_ro_w->weakIvarLayout = &UnsetLayout;
if (superclass) {
meta->initClassIsa(superclass->ISA()->ISA());
- cls->superclass = superclass;
- meta->superclass = superclass->ISA();
+ cls->setSuperclass(superclass);
+ meta->setSuperclass(superclass->ISA());
addSubclass(superclass, cls);
addSubclass(superclass->ISA(), meta);
} else {
meta->initClassIsa(meta);
- cls->superclass = Nil;
- meta->superclass = cls;
+ cls->setSuperclass(Nil);
+ meta->setSuperclass(cls);
addRootClass(cls);
addSubclass(cls, meta);
}
(cls->ISA()->data()->flags & RW_CONSTRUCTED))
{
_objc_inform("objc_registerClassPair: class '%s' was already "
- "registered!", cls->data()->ro()->name);
+ "registered!", cls->data()->ro()->getName());
return;
}
{
_objc_inform("objc_registerClassPair: class '%s' was not "
"allocated with objc_allocateClassPair!",
- cls->data()->ro()->name);
+ cls->data()->ro()->getName());
return;
}
cls->changeInfo(RW_CONSTRUCTED, RW_CONSTRUCTING | RW_REALIZING);
// Add to named class table.
- addNamedClass(cls, cls->data()->ro()->name);
+ addNamedClass(cls, cls->data()->ro()->getName());
}
// Fail if the superclass isn't kosher.
bool rootOK = bits->data()->flags & RO_ROOT;
- if (!verifySuperclass(bits->superclass, rootOK)){
+ if (!verifySuperclass(bits->getSuperclass(), rootOK)){
return nil;
}
// superclass's subclass list
if (cls->isRealized()) {
- Class supercls = cls->superclass;
+ Class supercls = cls->getSuperclass();
if (supercls) {
removeSubclass(supercls, cls);
} else {
auto rwe = rw->ext();
auto ro = rw->ro();
- cache_delete(cls);
+ cls->cache.destroy();
if (rwe) {
for (auto& meth : rwe->methods) {
rwe->protocols.tryFree();
}
- try_free(ro->ivarLayout);
+ try_free(ro->getIvarLayout());
try_free(ro->weakIvarLayout);
- try_free(ro->name);
+ try_free(ro->getName());
try_free(ro);
objc::zfree(rwe);
objc::zfree(rw);
// disposing still-unregistered class is OK!
_objc_inform("objc_disposeClassPair: class '%s' was not "
"allocated with objc_allocateClassPair!",
- cls->data()->ro()->name);
+ cls->data()->ro()->getName());
return;
}
if (cls->isMetaClass()) {
_objc_inform("objc_disposeClassPair: class '%s' is a metaclass, "
- "not a class!", cls->data()->ro()->name);
+ "not a class!", cls->data()->ro()->getName());
return;
}
// Shouldn't have any live subclasses.
if (cls->data()->firstSubclass) {
_objc_inform("objc_disposeClassPair: class '%s' still has subclasses, "
- "including '%s'!", cls->data()->ro()->name,
+ "including '%s'!", cls->data()->ro()->getName(),
cls->data()->firstSubclass->nameForLogging());
}
if (cls->ISA()->data()->firstSubclass) {
_objc_inform("objc_disposeClassPair: class '%s' still has subclasses, "
- "including '%s'!", cls->data()->ro()->name,
+ "including '%s'!", cls->data()->ro()->getName(),
cls->ISA()->data()->firstSubclass->nameForLogging());
}
static id
_object_copyFromZone(id oldObj, size_t extraBytes, void *zone)
{
- if (!oldObj) return nil;
- if (oldObj->isTaggedPointer()) return oldObj;
+ if (oldObj->isTaggedPointerOrNil()) return oldObj;
// fixme this doesn't handle C++ ivars correctly (#4619414)
- Class cls = oldObj->ISA();
+ Class cls = oldObj->ISA(/*authenticated*/true);
size_t size;
id obj = _class_createInstanceFromZone(cls, extraBytes, zone,
OBJECT_CONSTRUCT_NONE, false, &size);
// This order is important.
if (cxx) object_cxxDestruct(obj);
- if (assoc) _object_remove_assocations(obj);
+ if (assoc) _object_remove_assocations(obj, /*deallocating*/true);
obj->clearDeallocating();
}
unsigned objc_debug_taggedpointer_ext_payload_rshift = 0;
Class objc_debug_taggedpointer_ext_classes[1] = { nil };
+uintptr_t objc_debug_constant_cfstring_tag_bits = 0;
+
static void
disableTaggedPointers() { }
unsigned objc_debug_taggedpointer_ext_payload_rshift = _OBJC_TAG_EXT_PAYLOAD_RSHIFT;
// objc_debug_taggedpointer_ext_classes is defined in objc-msg-*.s
+#if OBJC_SPLIT_TAGGED_POINTERS
+uint8_t objc_debug_tag60_permutations[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
+uintptr_t objc_debug_constant_cfstring_tag_bits = _OBJC_TAG_EXT_MASK | ((uintptr_t)(OBJC_TAG_Constant_CFString - OBJC_TAG_First52BitPayload) << _OBJC_TAG_EXT_SLOT_SHIFT);
+#else
+uintptr_t objc_debug_constant_cfstring_tag_bits = 0;
+#endif
+
static void
disableTaggedPointers()
{
static Class *
classSlotForBasicTagIndex(objc_tag_index_t tag)
{
+#if OBJC_SPLIT_TAGGED_POINTERS
+ uintptr_t obfuscatedTag = _objc_basicTagToObfuscatedTag(tag);
+ return &objc_tag_classes[obfuscatedTag];
+#else
uintptr_t tagObfuscator = ((objc_debug_taggedpointer_obfuscator
>> _OBJC_TAG_INDEX_SHIFT)
& _OBJC_TAG_INDEX_MASK);
uintptr_t obfuscatedTag = tag ^ tagObfuscator;
+
// Array index in objc_tag_classes includes the tagged bit itself
-#if SUPPORT_MSB_TAGGED_POINTERS
+# if SUPPORT_MSB_TAGGED_POINTERS
return &objc_tag_classes[0x8 | obfuscatedTag];
-#else
+# else
return &objc_tag_classes[(obfuscatedTag << 1) | 1];
+# endif
#endif
}
if (tag >= OBJC_TAG_First52BitPayload && tag <= OBJC_TAG_Last52BitPayload) {
int index = tag - OBJC_TAG_First52BitPayload;
+#if OBJC_SPLIT_TAGGED_POINTERS
+ if (tag >= OBJC_TAG_FirstUnobfuscatedSplitTag)
+ return &objc_tag_ext_classes[index];
+#endif
uintptr_t tagObfuscator = ((objc_debug_taggedpointer_obfuscator
>> _OBJC_TAG_EXT_INDEX_SHIFT)
& _OBJC_TAG_EXT_INDEX_MASK);
static void
initializeTaggedPointerObfuscator(void)
{
- if (sdkIsOlderThan(10_14, 12_0, 12_0, 5_0, 3_0) ||
- // Set the obfuscator to zero for apps linked against older SDKs,
- // in case they're relying on the tagged pointer representation.
- DisableTaggedPointerObfuscation) {
- objc_debug_taggedpointer_obfuscator = 0;
- } else {
+ if (!DisableTaggedPointerObfuscation && dyld_program_sdk_at_least(dyld_fall_2018_os_versions)) {
// Pull random data into the variable, then shift away all non-payload bits.
arc4random_buf(&objc_debug_taggedpointer_obfuscator,
sizeof(objc_debug_taggedpointer_obfuscator));
objc_debug_taggedpointer_obfuscator &= ~_OBJC_TAG_MASK;
+
+#if OBJC_SPLIT_TAGGED_POINTERS
+ // The obfuscator doesn't apply to any of the extended tag mask or the no-obfuscation bit.
+ objc_debug_taggedpointer_obfuscator &= ~(_OBJC_TAG_EXT_MASK | _OBJC_TAG_NO_OBFUSCATION_MASK);
+
+ // Shuffle the first seven entries of the tag permutator.
+ int max = 7;
+ for (int i = max - 1; i >= 0; i--) {
+ int target = arc4random_uniform(i + 1);
+ swap(objc_debug_tag60_permutations[i],
+ objc_debug_tag60_permutations[target]);
+ }
+#endif
+ } else {
+ // Set the obfuscator to zero for apps linked against older SDKs,
+ // in case they're relying on the tagged pointer representation.
+ objc_debug_taggedpointer_obfuscator = 0;
}
}
ASSERT(cls->isRealized());
ASSERT(newSuper->isRealized());
- oldSuper = cls->superclass;
+ oldSuper = cls->getSuperclass();
removeSubclass(oldSuper, cls);
removeSubclass(oldSuper->ISA(), cls->ISA());
- cls->superclass = newSuper;
- cls->ISA()->superclass = newSuper->ISA();
+ cls->setSuperclass(newSuper);
+ cls->ISA()->setSuperclass(newSuper->ISA(/*authenticated*/true));
addSubclass(newSuper, cls);
addSubclass(newSuper->ISA(), cls->ISA());
// Flush subclass's method caches.
- flushCaches(cls);
- flushCaches(cls->ISA());
-
+ flushCaches(cls, __func__, [](Class c){ return true; });
+ flushCaches(cls->ISA(), __func__, [](Class c){ return true; });
+
return oldSuper;
}
* Imports.
**********************************************************************/
+#include <os/feature_private.h> // os_feature_enabled_simple()
#include "objc-private.h"
#include "objc-loadmethod.h"
#include "objc-file.h"
#undef OPTION
};
+namespace objc {
+ int PageCountWarning = 50; // Default value if the environment variable is not set
+}
// objc's key for pthread_getspecific
#if SUPPORT_DIRECT_THREAD_KEYS
#endif
}
+/***********************************************************************
+* SetPageCountWarning
+* Convert environment variable value to integer value.
+* If the value is valid, set the global PageCountWarning value.
+**********************************************************************/
+void SetPageCountWarning(const char* envvar) {
+ if (envvar) {
+ long result = strtol(envvar, NULL, 10);
+ if (result <= INT_MAX && result >= -1) {
+ int32_t var = (int32_t)result;
+ if (var != 0) { // 0 is not a valid value for the env var
+ objc::PageCountWarning = var;
+ }
+ }
+ }
+}
/***********************************************************************
* environ_init
return;
}
+ // Turn off autorelease LRU coalescing by default for apps linked against
+ // older SDKs. LRU coalescing can reorder releases and certain older apps
+ // are accidentally relying on the ordering.
+ // rdar://problem/63886091
+ if (!dyld_program_sdk_at_least(dyld_fall_2020_os_versions))
+ DisableAutoreleaseCoalescingLRU = true;
+
bool PrintHelp = false;
bool PrintOptions = false;
bool maybeMallocDebugging = false;
continue;
}
+ if (0 == strncmp(*p, "OBJC_DEBUG_POOL_DEPTH=", 22)) {
+ SetPageCountWarning(*p + 22);
+ continue;
+ }
+
const char *value = strchr(*p, '=');
if (!*value) continue;
value++;
*opt->var = (0 == strcmp(value, "YES"));
break;
}
- }
+ }
}
- // Special case: enable some autorelease pool debugging
+ // Special case: enable some autorelease pool debugging
// when some malloc debugging is enabled
// and OBJC_DEBUG_POOL_ALLOCATION is not set to something other than NO.
if (maybeMallocDebugging) {
}
}
+ if (!os_feature_enabled_simple(objc4, preoptimizedCaches, true)) {
+ DisablePreoptCaches = true;
+ }
+
// Print OBJC_HELP and OBJC_PRINT_OPTIONS output.
if (PrintHelp || PrintOptions) {
if (PrintHelp) {
return _object_get_associative_reference(object, key);
}
-static void
-_base_objc_setAssociatedObject(id object, const void *key, id value, objc_AssociationPolicy policy)
-{
- _object_set_associative_reference(object, key, value, policy);
-}
-
-static ChainedHookFunction<objc_hook_setAssociatedObject> SetAssocHook{_base_objc_setAssociatedObject};
+typedef void (*objc_hook_setAssociatedObject)(id _Nonnull object, const void * _Nonnull key,
+ id _Nullable value, objc_AssociationPolicy policy);
void
objc_setHook_setAssociatedObject(objc_hook_setAssociatedObject _Nonnull newValue,
objc_hook_setAssociatedObject _Nullable * _Nonnull outOldValue) {
- SetAssocHook.set(newValue, outOldValue);
+ // See objc_object::setHasAssociatedObjects() for a replacement
}
void
objc_setAssociatedObject(id object, const void *key, id value, objc_AssociationPolicy policy)
{
- SetAssocHook.get()(object, key, value, policy);
+ _object_set_associative_reference(object, key, value, policy);
}
-
void objc_removeAssociatedObjects(id object)
{
if (object && object->hasAssociatedObjects()) {
- _object_remove_assocations(object);
+ _object_remove_assocations(object, /*deallocating*/false);
}
}
sset->_count = 0;
// heuristic to convert executable's selrefs count to table size
-#if TARGET_OS_IPHONE && !TARGET_OS_IOSMAC
+#if TARGET_OS_IPHONE && !TARGET_OS_MACCATALYST
for (idx = 0; __objc_sel_set_capacities[idx] < selrefs; idx++);
if (idx > 0 && selrefs < 1536) idx--;
#else
#include <mach/vm_param.h>
#if __LP64__
+#if __arm64e__
+// 0x6AE1
+# define PTR(x) .quad x@AUTH(da, 27361, addr)
+#else
# define PTR(x) .quad x
+#endif
#else
# define PTR(x) .long x
#endif
#if __OBJC2__
#include "objc-private.h"
-#include "objc-cache.h"
#include "DenseMapExtras.h"
-
static objc::ExplicitInitDenseSet<const char *> namedSelectors;
static SEL search_builtins(const char *key);
}
+unsigned long sel_hash(SEL sel)
+{
+ unsigned long selAddr = (unsigned long)sel;
+#if CONFIG_USE_PREOPT_CACHES
+ selAddr ^= (selAddr >> 7);
+#endif
+ return selAddr;
+}
+
+
BOOL sel_isMapped(SEL sel)
{
if (!sel) return NO;
uintptr_t max_hash_displacement;
};
+enum WeakRegisterDeallocatingOptions {
+ ReturnNilIfDeallocating,
+ CrashIfDeallocating,
+ DontCheckDeallocating
+};
+
/// Adds an (object, weak pointer) pair to the weak table.
id weak_register_no_lock(weak_table_t *weak_table, id referent,
- id *referrer, bool crashIfDeallocating);
+ id *referrer, WeakRegisterDeallocatingOptions deallocatingOptions);
/// Removes an (object, weak pointer) pair from the weak table.
void weak_unregister_no_lock(weak_table_t *weak_table, id referent, id *referrer);
*/
id
weak_register_no_lock(weak_table_t *weak_table, id referent_id,
- id *referrer_id, bool crashIfDeallocating)
+ id *referrer_id, WeakRegisterDeallocatingOptions deallocatingOptions)
{
objc_object *referent = (objc_object *)referent_id;
objc_object **referrer = (objc_object **)referrer_id;
- if (!referent || referent->isTaggedPointer()) return referent_id;
+ if (referent->isTaggedPointerOrNil()) return referent_id;
// ensure that the referenced object is viable
- bool deallocating;
- if (!referent->ISA()->hasCustomRR()) {
- deallocating = referent->rootIsDeallocating();
- }
- else {
- BOOL (*allowsWeakReference)(objc_object *, SEL) =
- (BOOL(*)(objc_object *, SEL))
- object_getMethodImplementation((id)referent,
- @selector(allowsWeakReference));
- if ((IMP)allowsWeakReference == _objc_msgForward) {
- return nil;
+ if (deallocatingOptions == ReturnNilIfDeallocating ||
+ deallocatingOptions == CrashIfDeallocating) {
+ bool deallocating;
+ if (!referent->ISA()->hasCustomRR()) {
+ deallocating = referent->rootIsDeallocating();
}
- deallocating =
+ else {
+ // Use lookUpImpOrForward so we can avoid the assert in
+ // class_getInstanceMethod, since we intentionally make this
+ // callout with the lock held.
+ auto allowsWeakReference = (BOOL(*)(objc_object *, SEL))
+ lookUpImpOrForwardTryCache((id)referent, @selector(allowsWeakReference),
+ referent->getIsa());
+ if ((IMP)allowsWeakReference == _objc_msgForward) {
+ return nil;
+ }
+ deallocating =
! (*allowsWeakReference)(referent, @selector(allowsWeakReference));
- }
+ }
- if (deallocating) {
- if (crashIfDeallocating) {
- _objc_fatal("Cannot form weak reference to instance (%p) of "
- "class %s. It is possible that this object was "
- "over-released, or is in the process of deallocation.",
- (void*)referent, object_getClassName((id)referent));
- } else {
- return nil;
+ if (deallocating) {
+ if (deallocatingOptions == CrashIfDeallocating) {
+ _objc_fatal("Cannot form weak reference to instance (%p) of "
+ "class %s. It is possible that this object was "
+ "over-released, or is in the process of deallocation.",
+ (void*)referent, object_getClassName((id)referent));
+ } else {
+ return nil;
+ }
}
}
# endif
#else
// __OBJC_BOOL_IS_BOOL not set.
-# if TARGET_OS_OSX || TARGET_OS_IOSMAC || ((TARGET_OS_IOS || TARGET_OS_BRIDGE) && !__LP64__ && !__ARM_ARCH_7K)
+# if TARGET_OS_OSX || TARGET_OS_MACCATALYST || ((TARGET_OS_IOS || TARGET_OS_BRIDGE) && !__LP64__ && !__ARM_ARCH_7K)
# define OBJC_BOOL_IS_BOOL 0
# else
# define OBJC_BOOL_IS_BOOL 1
OBJC_AVAILABLE(10.14.4, 12.2, 12.2, 5.2, 3.2);
#endif
-/**
- * Function type for a hook that assists objc_setAssociatedObject().
- *
- * @param object The source object for the association.
- * @param key The key for the association.
- * @param value The value to associate with the key key for object. Pass nil to clear an existing association.
- * @param policy The policy for the association. For possible values, see “Associative Object Behaviors.”
- *
- * @see objc_setAssociatedObject
- * @see objc_setHook_setAssociatedObject
- */
-typedef void (*objc_hook_setAssociatedObject)(id _Nonnull object, const void * _Nonnull key,
- id _Nullable value, objc_AssociationPolicy policy);
-
-/**
- * Install a hook for objc_setAssociatedObject().
- *
- * @param newValue The hook function to install.
- * @param outOldValue The address of a function pointer variable. On return,
- * the old hook function is stored in the variable.
- *
- * @note The store to *outOldValue is thread-safe: the variable will be
- * updated before objc_setAssociatedObject() calls your new hook to read it,
- * even if your new hook is called from another thread before this
- * setter completes.
- * @note Your hook should always call the previous hook.
- *
- * @see objc_setAssociatedObject
- * @see objc_hook_setAssociatedObject
- */
-#if !(TARGET_OS_OSX && __i386__)
-#define OBJC_SETASSOCIATEDOBJECTHOOK_DEFINED 1
-OBJC_EXPORT void objc_setHook_setAssociatedObject(objc_hook_setAssociatedObject _Nonnull newValue,
- objc_hook_setAssociatedObject _Nullable * _Nonnull outOldValue)
- OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 4.0);
-#endif
-
/**
* Function type for a function that is called when an image is loaded.
*
OBJC_EXPORT void objc_addLoadImageFunc(objc_func_loadImage _Nonnull func)
OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 4.0);
-/**
+/**
+ * Function type for a hook that provides a name for lazily named classes.
+ *
+ * @param cls The class to generate a name for.
+ * @return The name of the class, or NULL if the name isn't known or can't me generated.
+ *
+ * @see objc_setHook_lazyClassNamer
+ */
+typedef const char * _Nullable (*objc_hook_lazyClassNamer)(_Nonnull Class cls);
+
+/**
+ * Install a hook to provide a name for lazily-named classes.
+ *
+ * @param newValue The hook function to install.
+ * @param outOldValue The address of a function pointer variable. On return,
+ * the old hook function is stored in the variable.
+ *
+ * @note The store to *outOldValue is thread-safe: the variable will be
+ * updated before objc_getClass() calls your new hook to read it,
+ * even if your new hook is called from another thread before this
+ * setter completes.
+ * @note Your hook must call the previous hook for class names
+ * that you do not recognize.
+ */
+#if !(TARGET_OS_OSX && __i386__)
+#define OBJC_SETHOOK_LAZYCLASSNAMER_DEFINED 1
+OBJC_EXPORT
+void objc_setHook_lazyClassNamer(_Nonnull objc_hook_lazyClassNamer newValue,
+ _Nonnull objc_hook_lazyClassNamer * _Nonnull oldOutValue)
+ OBJC_AVAILABLE(10.16, 14.0, 14.0, 7.0, 5.0);
+#endif
+
+/**
* Callback from Objective-C to Swift to perform Swift class initialization.
*/
#if !(TARGET_OS_OSX && __i386__)
#include "test.h"
#include <Foundation/NSObject.h>
#include <objc/runtime.h>
+#include <objc/objc-internal.h>
+#include <Block.h>
static int values;
static int supers;
}
@end
+@interface Sub59318867: NSObject @end
+@implementation Sub59318867
++ (void)initialize {
+ objc_setAssociatedObject(self, &key, self, OBJC_ASSOCIATION_ASSIGN);
+}
+@end
+
+@interface CallOnDealloc: NSObject @end
+@implementation CallOnDealloc {
+ void (^_block)(void);
+}
+- (id)initWithBlock: (void (^)(void))block {
+ _block = (__bridge id)Block_copy((__bridge void *)block);
+ return self;
+}
+- (void)dealloc {
+ _block();
+ _Block_release((__bridge void *)_block);
+ SUPER_DEALLOC();
+}
+@end
+
+void TestReleaseLater(void) {
+ int otherObjsCount = 100;
+ char keys1[otherObjsCount];
+ char keys2[otherObjsCount];
+ char laterKey;
+
+ __block int normalDeallocs = 0;
+ __block int laterDeallocs = 0;
+
+ {
+ id target = [NSObject new];
+ for (int i = 0; i < otherObjsCount; i++) {
+ id value = [[CallOnDealloc alloc] initWithBlock: ^{ normalDeallocs++; }];
+ objc_setAssociatedObject(target, keys1 + i, value, OBJC_ASSOCIATION_RETAIN);
+ RELEASE_VALUE(value);
+ }
+ {
+ id laterValue = [[CallOnDealloc alloc] initWithBlock: ^{
+ testassertequal(laterDeallocs, 0);
+ testassertequal(normalDeallocs, otherObjsCount * 2);
+ laterDeallocs++;
+ }];
+ objc_setAssociatedObject(target, &laterKey, laterValue, (objc_AssociationPolicy)(OBJC_ASSOCIATION_RETAIN | _OBJC_ASSOCIATION_SYSTEM_OBJECT));
+ RELEASE_VALUE(laterValue);
+ }
+ for (int i = 0; i < otherObjsCount; i++) {
+ id value = [[CallOnDealloc alloc] initWithBlock: ^{ normalDeallocs++; }];
+ objc_setAssociatedObject(target, keys2 + i, value, OBJC_ASSOCIATION_RETAIN);
+ RELEASE_VALUE(value);
+ }
+ RELEASE_VALUE(target);
+ }
+ testassertequal(laterDeallocs, 1);
+ testassertequal(normalDeallocs, otherObjsCount * 2);
+}
+
+void TestReleaseLaterRemoveAssociations(void) {
+
+ char normalKey;
+ char laterKey;
+
+ __block int normalDeallocs = 0;
+ __block int laterDeallocs = 0;
+
+ @autoreleasepool {
+ id target = [NSObject new];
+ {
+ id normalValue = [[CallOnDealloc alloc] initWithBlock: ^{ normalDeallocs++; }];
+ id laterValue = [[CallOnDealloc alloc] initWithBlock: ^{ laterDeallocs++; }];
+ objc_setAssociatedObject(target, &normalKey, normalValue, OBJC_ASSOCIATION_RETAIN);
+ objc_setAssociatedObject(target, &laterKey, laterValue, (objc_AssociationPolicy)(OBJC_ASSOCIATION_RETAIN | _OBJC_ASSOCIATION_SYSTEM_OBJECT));
+ RELEASE_VALUE(normalValue);
+ RELEASE_VALUE(laterValue);
+ }
+ testassertequal(normalDeallocs, 0);
+ testassertequal(laterDeallocs, 0);
+
+ objc_removeAssociatedObjects(target);
+ testassertequal(normalDeallocs, 1);
+ testassertequal(laterDeallocs, 0);
+
+ id normalValue = objc_getAssociatedObject(target, &normalKey);
+ id laterValue = objc_getAssociatedObject(target, &laterKey);
+ testassert(!normalValue);
+ testassert(laterValue);
+
+ RELEASE_VALUE(target);
+ }
+
+ testassertequal(normalDeallocs, 1);
+ testassertequal(laterDeallocs, 1);
+}
int main()
{
objc_setAssociatedObject(nil, &key, nil, OBJC_ASSOCIATION_ASSIGN);
#pragma clang diagnostic pop
+ // rdar://problem/59318867 Make sure we don't reenter the association lock
+ // when setting an associated object on an uninitialized class.
+ Class Sub59318867Local = objc_getClass("Sub59318867");
+ objc_setAssociatedObject(Sub59318867Local, &key, Sub59318867Local, OBJC_ASSOCIATION_ASSIGN);
+
+ TestReleaseLater();
+ TestReleaseLaterRemoveAssociations();
+
succeed(__FILE__);
}
+++ /dev/null
-// Run test badPool as if it were built with an old SDK.
-
-// TEST_CONFIG MEM=mrc OS=iphoneos,iphonesimulator,appletvos,appletvsimulator
-// TEST_CRASHES
-// TEST_CFLAGS -DOLD=1 -Xlinker -sdk_version -Xlinker 9.0
-
-/*
-TEST_RUN_OUTPUT
-objc\[\d+\]: Invalid or prematurely-freed autorelease pool 0x[0-9a-fA-f]+\. Set a breakpoint .* Proceeding anyway .*
-OK: badPool.m
-END
-*/
-
-#include "badPool.m"
--- /dev/null
+// Run test badPool as if it were built with an old SDK.
+
+// TEST_CONFIG MEM=mrc OS=iphoneos,iphonesimulator ARCH=x86_64,arm64
+// TEST_CRASHES
+// TEST_CFLAGS -DOLD=1 -Xlinker -platform_version -Xlinker ios -Xlinker 9.0 -Xlinker 9.0 -miphoneos-version-min=9.0
+
+/*
+TEST_BUILD_OUTPUT
+ld: warning: passed two min versions.*for platform.*
+END
+
+TEST_RUN_OUTPUT
+objc\[\d+\]: Invalid or prematurely-freed autorelease pool 0x[0-9a-fA-f]+\. Set a breakpoint .* Proceeding anyway .*
+OK: badPool.m
+END
+*/
+
+#include "badPool.m"
// Run test badPool as if it were built with an old SDK.
-// TEST_CONFIG MEM=mrc OS=macosx
+// TEST_CONFIG MEM=mrc OS=macosx ARCH=x86_64
// TEST_CRASHES
-// TEST_CFLAGS -DOLD=1 -Xlinker -sdk_version -Xlinker 10.11
+// TEST_CFLAGS -DOLD=1 -Xlinker -platform_version -Xlinker macos -Xlinker 10.11 -Xlinker 10.11 -mmacosx-version-min=10.11
/*
+TEST_BUILD_OUTPUT
+ld: warning: passed two min versions.*for platform.*
+END
+
TEST_RUN_OUTPUT
objc\[\d+\]: Invalid or prematurely-freed autorelease pool 0x[0-9a-fA-f]+\. Set a breakpoint .* Proceeding anyway .*
OK: badPool.m
--- /dev/null
+// Run test badPool as if it were built with an old SDK.
+
+// TEST_CONFIG MEM=mrc OS=appletvos,appletvsimulator ARCH=x86_64,arm64
+// TEST_CRASHES
+// TEST_CFLAGS -DOLD=1 -Xlinker -platform_version -Xlinker tvos -Xlinker 9.0 -Xlinker 9.0 -mtvos-version-min=9.0
+
+/*
+TEST_BUILD_OUTPUT
+ld: warning: passed two min versions.*for platform.*
+END
+
+TEST_RUN_OUTPUT
+objc\[\d+\]: Invalid or prematurely-freed autorelease pool 0x[0-9a-fA-f]+\. Set a breakpoint .* Proceeding anyway .*
+OK: badPool.m
+END
+*/
+
+#include "badPool.m"
// TEST_CONFIG MEM=mrc OS=watchos,watchsimulator
// TEST_CRASHES
-// TEST_CFLAGS -DOLD=1 -Xlinker -sdk_version -Xlinker 2.0
+// TEST_CFLAGS -DOLD=1 -Xlinker -platform_version -Xlinker watchos -Xlinker 2.0 -Xlinker 2.0 -mwatchos-version-min=2.0
/*
+TEST_BUILD_OUTPUT
+ld: warning: passed two min versions.*for platform.*
+END
+
TEST_RUN_OUTPUT
objc\[\d+\]: Invalid or prematurely-freed autorelease pool 0x[0-9a-fA-f]+\. Set a breakpoint .* Proceeding anyway .*
OK: badPool.m
// Create a cycle in a superclass chain (Sub->supercls == Sub)
// then attempt to walk that chain. Runtime should halt eventually.
_objc_flush_caches(supercls);
- ((Class *)(__bridge void *)subcls)[1] = subcls;
+ ((Class __ptrauth_objc_super_pointer *)(__bridge void *)subcls)[1] = subcls;
#ifdef CACHE_FLUSH
_objc_flush_caches(supercls);
#else
// TEST_CONFIG MEM=mrc
-/*
-TEST_RUN_OUTPUT
-objc\[\d+\]: Deallocator object 0x[0-9a-fA-F]+ overreleased while already deallocating; break on objc_overrelease_during_dealloc_error to debug
-OK: bigrc.m
-OR
-no overrelease enforcement
-OK: bigrc.m
-END
- */
#include "test.h"
#include "testroot.i"
-(void)dealloc
{
id o = self;
- size_t rc = 1;
- testprintf("Retain a lot during dealloc\n");
+ testprintf("Retain/release during dealloc\n");
- testassert(rc == 1);
- testassert([o retainCount] == rc);
- do {
- [o retain];
- if (rc % 0x100000 == 0) testprintf("%zx/%zx ++\n", rc, LOTS);
- } while (++rc < LOTS);
-
- testassert([o retainCount] == rc);
-
- do {
- [o release];
- if (rc % 0x100000 == 0) testprintf("%zx/%zx --\n", rc, LOTS);
- } while (--rc > 1);
-
- testassert(rc == 1);
- testassert([o retainCount] == rc);
-
-
- testprintf("Overrelease during dealloc\n");
-
- // Not all architectures enforce this.
-#if !SUPPORT_NONPOINTER_ISA
- testwarn("no overrelease enforcement");
- fprintf(stderr, "no overrelease enforcement\n");
-#endif
+ testassertequal([o retainCount], 0);
+ [o retain];
+ testassertequal([o retainCount], 0);
[o release];
+ testassertequal([o retainCount], 0);
[super dealloc];
}
#include <objc/objc.h>
#if TARGET_OS_OSX
-# define RealBool 0
+# if __x86_64__
+# define RealBool 0
+# else
+# define RealBool 1
+# endif
#elif TARGET_OS_IOS || TARGET_OS_BRIDGE
# if (__arm__ && !__armv7k__) || __i386__
# define RealBool 0
--- /dev/null
+// TEST_CFLAGS -framework Foundation
+/*
+TEST_RUN_OUTPUT
+foo
+bar
+bar
+foo
+END
+*/
+
+// NOTE: This test won't catch problems when running against a root, so it's of
+// limited utility, but it would at least catch things when testing against the
+// shared cache.
+
+#include <Foundation/Foundation.h>
+#include <objc/runtime.h>
+
+@interface NSBlock: NSObject @end
+
+// NSBlock is a conveniently accessible superclass that (currently) has a constant cache.
+@interface MyBlock: NSBlock
++(void)foo;
++(void)bar;
+@end
+@implementation MyBlock
++(void)foo {
+ printf("foo\n");
+}
++(void)bar {
+ printf("bar\n");
+}
+@end
+
+int main() {
+ [MyBlock foo];
+ [MyBlock bar];
+
+ Method m1 = class_getClassMethod([MyBlock class], @selector(foo));
+ Method m2 = class_getClassMethod([MyBlock class], @selector(bar));
+ method_exchangeImplementations(m1, m2);
+
+ [MyBlock foo];
+ [MyBlock bar];
+}
"l_OBJC_$_CATEGORY_INSTANCE_METHODS_Super_$_Category_catlist2: \n"
" .long 24 \n"
" .long 1 \n"
-" "PTR" L_catlist2MethodString \n"
-" "PTR" L_catlist2MethodTypes \n"
-" "PTR" _catlist2MethodImplementation"SIGNED_CATEGORY_IMP" \n"
+" " PTR " L_catlist2MethodString \n"
+" " PTR " L_catlist2MethodTypes \n"
+" " PTR " _catlist2MethodImplementation" SIGNED_CATEGORY_IMP" \n"
" .p2align 3 \n"
"l_OBJC_$_CATEGORY_Super_$_Category_catlist2: \n"
-" "PTR" L_catlist2CategoryName \n"
-" "PTR" _OBJC_CLASS_$_Super \n"
-" "PTR" l_OBJC_$_CATEGORY_INSTANCE_METHODS_Super_$_Category_catlist2 \n"
-" "PTR" 0 \n"
-" "PTR" 0 \n"
-" "PTR" 0 \n"
-" "PTR" 0 \n"
+" " PTR " L_catlist2CategoryName \n"
+" " PTR " _OBJC_CLASS_$_Super \n"
+" " PTR " l_OBJC_$_CATEGORY_INSTANCE_METHODS_Super_$_Category_catlist2 \n"
+" " PTR " 0 \n"
+" " PTR " 0 \n"
+" " PTR " 0 \n"
+" " PTR " 0 \n"
" .long 64 \n"
" .space 4 \n"
" .section __DATA,__objc_catlist2 \n"
" .p2align 3 \n"
-" "PTR" l_OBJC_$_CATEGORY_Super_$_Category_catlist2 \n"
+" " PTR " l_OBJC_$_CATEGORY_Super_$_Category_catlist2 \n"
" .text \n"
);
--- /dev/null
+//TEST_CONFIG MEM=mrc ARCH=x86_64,ARM64,ARM64e
+//TEST_ENV OBJC_DISABLE_AUTORELEASE_COALESCING=NO OBJC_DISABLE_AUTORELEASE_COALESCING_LRU=NO
+
+#include "test.h"
+#import <Foundation/NSObject.h>
+#include <os/feature_private.h>
+
+@interface Counter: NSObject {
+@public
+ int retains;
+ int releases;
+ int autoreleases;
+}
+@end
+@implementation Counter
+
+- (id)retain {
+ retains++;
+ return [super retain];
+}
+
+- (oneway void)release {
+ releases++;
+ [super release];
+}
+
+- (id)autorelease {
+ autoreleases++;
+ return [super autorelease];
+}
+
+- (void)dealloc {
+ testprintf("%p dealloc\n", self);
+ [super dealloc];
+}
+
+@end
+
+// Create a number of objects, autoreleasing each one a number of times in a
+// round robin fashion. Verify that each object gets sent retain, release, and
+// autorelease the correct number of times. Verify that the gap between
+// autoreleasepool pointers is the given number of objects. Note: this will not
+// work when the pool hits a page boundary, to be sure to stay under that limit.
+void test(int objCount, int autoreleaseCount, int expectedGap) {
+ testprintf("Testing %d objects, %d autoreleases, expecting gap of %d\n",
+ objCount, autoreleaseCount, expectedGap);
+
+ Counter *objs[objCount];
+ for (int i = 0; i < objCount; i++)
+ objs[i] = [Counter new];
+
+ for (int j = 0; j < autoreleaseCount; j++)
+ for (int i = 0; i < objCount; i++)
+ [objs[i] retain];
+
+ for (int i = 0; i < objCount; i++) {
+ testassertequal(objs[i]->retains, autoreleaseCount);
+ testassertequal(objs[i]->releases, 0);
+ testassertequal(objs[i]->autoreleases, 0);
+ }
+
+ void *outer = objc_autoreleasePoolPush();
+ uintptr_t outerAddr = (uintptr_t)outer;
+ for (int j = 0; j < autoreleaseCount; j++)
+ for (int i = 0; i < objCount; i++)
+ [objs[i] autorelease];
+ for (int i = 0; i < objCount; i++) {
+ testassertequal(objs[i]->retains, autoreleaseCount);
+ testassertequal(objs[i]->releases, 0);
+ testassertequal(objs[i]->autoreleases, autoreleaseCount);
+ }
+
+ void *inner = objc_autoreleasePoolPush();
+ uintptr_t innerAddr = (uintptr_t)inner;
+ testprintf("outer=%p inner=%p\n", outer, inner);
+ // Do one more autorelease in the inner pool to make sure we correctly
+ // handle pool boundaries.
+ for (int i = 0; i < objCount; i++)
+ [[objs[i] retain] autorelease];
+ for (int i = 0; i < objCount; i++) {
+ testassertequal(objs[i]->retains, autoreleaseCount + 1);
+ testassertequal(objs[i]->releases, 0);
+ testassertequal(objs[i]->autoreleases, autoreleaseCount + 1);
+ }
+
+ objc_autoreleasePoolPop(inner);
+ for (int i = 0; i < objCount; i++) {
+ testassertequal(objs[i]->retains, autoreleaseCount + 1);
+ testassertequal(objs[i]->releases, 1);
+ testassertequal(objs[i]->autoreleases, autoreleaseCount + 1);
+ }
+
+ objc_autoreleasePoolPop(outer);
+ for (int i = 0; i < objCount; i++) {
+ testassertequal(objs[i]->retains, autoreleaseCount + 1);
+ testassertequal(objs[i]->releases, autoreleaseCount + 1);
+ testassertequal(objs[i]->autoreleases, autoreleaseCount + 1);
+ }
+
+ intptr_t gap = innerAddr - outerAddr;
+ testprintf("gap=%ld\n", gap);
+ testassertequal(gap, expectedGap * sizeof(id));
+
+ // Destroy our test objects.
+ for (int i = 0; i < objCount; i++)
+ [objs[i] release];
+}
+
+int main()
+{
+ // Push a pool here so test() doesn't see a placeholder.
+ objc_autoreleasePoolPush();
+
+ test(1, 1, 2);
+ test(1, 2, 2);
+ test(1, 10, 2);
+ test(1, 100, 2);
+ test(1, 70000, 3);
+
+ test(2, 1, 3);
+ test(2, 2, 3);
+ test(2, 10, 3);
+ test(2, 100, 3);
+ test(2, 70000, 5);
+
+ test(3, 1, 4);
+ test(3, 2, 4);
+ test(3, 10, 4);
+ test(3, 100, 4);
+ test(3, 70000, 7);
+
+ test(4, 1, 5);
+ test(4, 2, 5);
+ test(4, 10, 5);
+ test(4, 100, 5);
+ test(4, 70000, 9);
+
+ test(5, 1, 6);
+ test(5, 2, 11);
+
+ succeed(__FILE__);
+}
typedef IMP MethodListIMP;
#endif
+EXTERN_C void _method_setImplementationRawUnsafe(Method m, IMP imp);
+
static int Retains;
static int Releases;
static int Autoreleases;
#if SWIZZLE_AWZ
method_setImplementation(meth, (IMP)HackAllocWithZone);
#else
- ((MethodListIMP *)meth)[2] = (IMP)HackAllocWithZone;
+ _method_setImplementationRawUnsafe(meth, (IMP)HackAllocWithZone);
#endif
meth = class_getClassMethod(cls, @selector(new));
#if SWIZZLE_CORE
method_setImplementation(meth, (IMP)HackPlusNew);
#else
- ((MethodListIMP *)meth)[2] = (IMP)HackPlusNew;
+ _method_setImplementationRawUnsafe(meth, (IMP)HackPlusNew);
#endif
meth = class_getClassMethod(cls, @selector(self));
#if SWIZZLE_CORE
method_setImplementation(meth, (IMP)HackPlusSelf);
#else
- ((MethodListIMP *)meth)[2] = (IMP)HackPlusSelf;
+ _method_setImplementationRawUnsafe(meth, (IMP)HackPlusSelf);
#endif
meth = class_getInstanceMethod(cls, @selector(self));
#if SWIZZLE_CORE
method_setImplementation(meth, (IMP)HackSelf);
#else
- ((MethodListIMP *)meth)[2] = (IMP)HackSelf;
+ _method_setImplementationRawUnsafe(meth, (IMP)HackSelf);
#endif
meth = class_getInstanceMethod(cls, @selector(release));
#if SWIZZLE_RELEASE
method_setImplementation(meth, (IMP)HackRelease);
#else
- ((MethodListIMP *)meth)[2] = (IMP)HackRelease;
+ _method_setImplementationRawUnsafe(meth, (IMP)HackRelease);
#endif
// These other methods get hacked for counting purposes only
meth = class_getInstanceMethod(cls, @selector(retain));
RealRetain = (typeof(RealRetain))method_getImplementation(meth);
- ((MethodListIMP *)meth)[2] = (IMP)HackRetain;
+ _method_setImplementationRawUnsafe(meth, (IMP)HackRetain);
meth = class_getInstanceMethod(cls, @selector(autorelease));
RealAutorelease = (typeof(RealAutorelease))method_getImplementation(meth);
- ((MethodListIMP *)meth)[2] = (IMP)HackAutorelease;
+ _method_setImplementationRawUnsafe(meth, (IMP)HackAutorelease);
meth = class_getClassMethod(cls, @selector(alloc));
RealAlloc = (typeof(RealAlloc))method_getImplementation(meth);
- ((MethodListIMP *)meth)[2] = (IMP)HackAlloc;
+ _method_setImplementationRawUnsafe(meth, (IMP)HackAlloc);
meth = class_getInstanceMethod(cls, @selector(init));
- ((MethodListIMP *)meth)[2] = (IMP)HackInit;
+ _method_setImplementationRawUnsafe(meth, (IMP)HackInit);
// Verify that the swizzles occurred before +initialize by provoking it now
testassert(PlusInitializes == 0);
// Don't use runtime functions to do this -
// we want the runtime to think that these are NSObject's real code
{
-#if __has_feature(ptrauth_calls)
- typedef IMP __ptrauth_objc_method_list_imp MethodListIMP;
-#else
- typedef IMP MethodListIMP;
-#endif
-
Class cls = [NSObject class];
IMP imp = class_getMethodImplementation(cls, @selector(retain));
- MethodListIMP *m = (MethodListIMP *)
- class_getInstanceMethod(cls, @selector(retain));
- testassert(m[2] == imp); // verify Method struct is as we expect
-
- m = (MethodListIMP *)class_getInstanceMethod(cls, @selector(retain));
- m[2] = (IMP)HackRetain;
- m = (MethodListIMP *)class_getInstanceMethod(cls, @selector(release));
- m[2] = (IMP)HackRelease;
- m = (MethodListIMP *)class_getInstanceMethod(cls, @selector(autorelease));
- m[2] = (IMP)HackAutorelease;
- m = (MethodListIMP *)class_getInstanceMethod(cls, @selector(retainCount));
- m[2] = (IMP)HackRetainCount;
- m = (MethodListIMP *)class_getClassMethod(cls, @selector(retain));
- m[2] = (IMP)HackPlusRetain;
- m = (MethodListIMP *)class_getClassMethod(cls, @selector(release));
- m[2] = (IMP)HackPlusRelease;
- m = (MethodListIMP *)class_getClassMethod(cls, @selector(autorelease));
- m[2] = (IMP)HackPlusAutorelease;
- m = (MethodListIMP *)class_getClassMethod(cls, @selector(retainCount));
- m[2] = (IMP)HackPlusRetainCount;
- m = (MethodListIMP *)class_getClassMethod(cls, @selector(alloc));
- m[2] = (IMP)HackAlloc;
- m = (MethodListIMP *)class_getClassMethod(cls, @selector(allocWithZone:));
- m[2] = (IMP)HackAllocWithZone;
+ Method m = class_getInstanceMethod(cls, @selector(retain));
+ testassert(method_getImplementation(m) == imp); // verify Method struct is as we expect
+
+ m = class_getInstanceMethod(cls, @selector(retain));
+ _method_setImplementationRawUnsafe(m, (IMP)HackRetain);
+ m = class_getInstanceMethod(cls, @selector(release));
+ _method_setImplementationRawUnsafe(m, (IMP)HackRelease);
+ m = class_getInstanceMethod(cls, @selector(autorelease));
+ _method_setImplementationRawUnsafe(m, (IMP)HackAutorelease);
+ m = class_getInstanceMethod(cls, @selector(retainCount));
+ _method_setImplementationRawUnsafe(m, (IMP)HackRetainCount);
+ m = class_getClassMethod(cls, @selector(retain));
+ _method_setImplementationRawUnsafe(m, (IMP)HackPlusRetain);
+ m = class_getClassMethod(cls, @selector(release));
+ _method_setImplementationRawUnsafe(m, (IMP)HackPlusRelease);
+ m = class_getClassMethod(cls, @selector(autorelease));
+ _method_setImplementationRawUnsafe(m, (IMP)HackPlusAutorelease);
+ m = class_getClassMethod(cls, @selector(retainCount));
+ _method_setImplementationRawUnsafe(m, (IMP)HackPlusRetainCount);
+ m = class_getClassMethod(cls, @selector(alloc));
+ _method_setImplementationRawUnsafe(m, (IMP)HackAlloc);
+ m = class_getClassMethod(cls, @selector(allocWithZone:));
+ _method_setImplementationRawUnsafe(m, (IMP)HackAllocWithZone);
_objc_flush_caches(cls);
#if __has_feature(ptrauth_calls)
# define SIGNED_METHOD_LIST_IMP "@AUTH(ia,0,addr) "
+# define SIGNED_METHOD_LIST "@AUTH(da,0xC310,addr) "
+# define SIGNED_ISA "@AUTH(da, 0x6AE1, addr) "
+# define SIGNED_SUPER "@AUTH(da, 0xB5AB, addr) "
#else
# define SIGNED_METHOD_LIST_IMP
+# define SIGNED_METHOD_LIST
+# define SIGNED_ISA
+# define SIGNED_SUPER
#endif
#define str(x) #x
__END_DECLS
asm(
- ".globl _OBJC_CLASS_$_Super \n"
- ".section __DATA,__objc_data \n"
- ".align 3 \n"
- "_OBJC_CLASS_$_Super: \n"
- PTR "_OBJC_METACLASS_$_Super \n"
- PTR "0 \n"
- PTR "__objc_empty_cache \n"
- PTR "0 \n"
- PTR "L_ro \n"
+ ".globl _OBJC_CLASS_$_Super \n"
+ ".section __DATA,__objc_data \n"
+ ".align 3 \n"
+ "_OBJC_CLASS_$_Super: \n"
+ PTR "_OBJC_METACLASS_$_Super" SIGNED_ISA "\n"
+ PTR "0 \n"
+ PTR "__objc_empty_cache \n"
+ PTR "0 \n"
+ PTR "L_ro \n"
// pad to OBJC_MAX_CLASS_SIZE
PTR "0 \n"
PTR "0 \n"
PTR "0 \n"
PTR "0 \n"
""
- "_OBJC_METACLASS_$_Super: \n"
- PTR "_OBJC_METACLASS_$_Super \n"
- PTR "_OBJC_CLASS_$_Super \n"
- PTR "__objc_empty_cache \n"
- PTR "0 \n"
- PTR "L_meta_ro \n"
+ "_OBJC_METACLASS_$_Super: \n"
+ PTR "_OBJC_METACLASS_$_Super" SIGNED_ISA "\n"
+ PTR "_OBJC_CLASS_$_Super" SIGNED_SUPER "\n"
+ PTR "__objc_empty_cache \n"
+ PTR "0 \n"
+ PTR "L_meta_ro \n"
// pad to OBJC_MAX_CLASS_SIZE
PTR "0 \n"
PTR "0 \n"
PTR "0 \n"
PTR "L_super_name \n"
#if EVIL_SUPER
- PTR "L_evil_methods \n"
+ PTR "L_evil_methods" SIGNED_METHOD_LIST "\n"
#else
- PTR "L_good_methods \n"
+ PTR "L_good_methods" SIGNED_METHOD_LIST "\n"
#endif
PTR "0 \n"
PTR "L_super_ivars \n"
PTR "0 \n"
PTR "L_super_name \n"
#if EVIL_SUPER_META
- PTR "L_evil_methods \n"
+ PTR "L_evil_methods" SIGNED_METHOD_LIST "\n"
#else
- PTR "L_good_methods \n"
+ PTR "L_good_methods" SIGNED_METHOD_LIST "\n"
#endif
PTR "0 \n"
PTR "0 \n"
PTR "0 \n"
PTR "0 \n"
- ".globl _OBJC_CLASS_$_Sub \n"
- ".section __DATA,__objc_data \n"
- ".align 3 \n"
- "_OBJC_CLASS_$_Sub: \n"
- PTR "_OBJC_METACLASS_$_Sub \n"
- PTR "_OBJC_CLASS_$_Super \n"
- PTR "__objc_empty_cache \n"
- PTR "0 \n"
- PTR "L_sub_ro \n"
+ ".globl _OBJC_CLASS_$_Sub \n"
+ ".section __DATA,__objc_data \n"
+ ".align 3 \n"
+ "_OBJC_CLASS_$_Sub: \n"
+ PTR "_OBJC_METACLASS_$_Sub" SIGNED_ISA "\n"
+ PTR "_OBJC_CLASS_$_Super" SIGNED_SUPER "\n"
+ PTR "__objc_empty_cache \n"
+ PTR "0 \n"
+ PTR "L_sub_ro \n"
// pad to OBJC_MAX_CLASS_SIZE
PTR "0 \n"
PTR "0 \n"
PTR "0 \n"
PTR "0 \n"
""
- "_OBJC_METACLASS_$_Sub: \n"
- PTR "_OBJC_METACLASS_$_Super \n"
- PTR "_OBJC_METACLASS_$_Super \n"
- PTR "__objc_empty_cache \n"
- PTR "0 \n"
- PTR "L_sub_meta_ro \n"
+ "_OBJC_METACLASS_$_Sub: \n"
+ PTR "_OBJC_METACLASS_$_Super" SIGNED_ISA "\n"
+ PTR "_OBJC_METACLASS_$_Super" SIGNED_SUPER "\n"
+ PTR "__objc_empty_cache \n"
+ PTR "0 \n"
+ PTR "L_sub_meta_ro \n"
// pad to OBJC_MAX_CLASS_SIZE
PTR "0 \n"
PTR "0 \n"
PTR "0 \n"
PTR "L_sub_name \n"
#if EVIL_SUB
- PTR "L_evil_methods \n"
+ PTR "L_evil_methods" SIGNED_METHOD_LIST "\n"
#else
- PTR "L_good_methods \n"
+ PTR "L_good_methods" SIGNED_METHOD_LIST "\n"
#endif
PTR "0 \n"
PTR "L_sub_ivars \n"
PTR "0 \n"
PTR "L_sub_name \n"
#if EVIL_SUB_META
- PTR "L_evil_methods \n"
+ PTR "L_evil_methods" SIGNED_METHOD_LIST "\n"
#else
- PTR "L_good_methods \n"
+ PTR "L_good_methods" SIGNED_METHOD_LIST "\n"
#endif
PTR "0 \n"
PTR "0 \n"
#include <objc/runtime.h>
static int state;
+static int swizzleOld;
+static int swizzleNew;
+static int swizzleB;
#define ONE 1
#define TWO 2
+(void) two { state = TWO; }
+(void) length { state = LENGTH; }
+(void) count { state = COUNT; }
+
+-(void) swizzleTarget {
+ swizzleOld++;
+}
+-(void) swizzleReplacement {
+ swizzleNew++;
+}
@end
#define checkExchange(s1, v1, s2, v2) \
testassert(state == v2); \
} while (0)
+@interface A : Super
+@end
+@implementation A
+@end
+
+@interface B : Super
+@end
+@implementation B
+- (void) swizzleTarget {
+ swizzleB++;
+}
+@end
+
+@interface C : Super
+@end
+@implementation C
+- (void) hello { }
+@end
+
+static IMP findInCache(Class cls, SEL sel)
+{
+ struct objc_imp_cache_entry *ents;
+ int count;
+ IMP ret = nil;
+
+ ents = class_copyImpCache(cls, &count);
+ for (int i = 0; i < count; i++) {
+ if (ents[i].sel == sel) {
+ ret = ents[i].imp;
+ break;
+ }
+ }
+ free(ents);
+ return ret;
+}
+
int main()
{
// Check ordinary selectors
checkExchange(count, COUNT, one, ONE);
checkExchange(two, TWO, length, LENGTH);
+ Super *s = [Super new];
+ A *a = [A new];
+ B *b = [B new];
+ C *c = [C new];
+
+ // cache swizzleTarget in Super, A and B
+ [s swizzleTarget];
+ testassert(swizzleOld == 1);
+ testassert(swizzleNew == 0);
+ testassert(swizzleB == 0);
+ testassert(findInCache([Super class], @selector(swizzleTarget)) != nil);
+
+ [a swizzleTarget];
+ testassert(swizzleOld == 2);
+ testassert(swizzleNew == 0);
+ testassert(swizzleB == 0);
+ testassert(findInCache([A class], @selector(swizzleTarget)) != nil);
+
+ [b swizzleTarget];
+ testassert(swizzleOld == 2);
+ testassert(swizzleNew == 0);
+ testassert(swizzleB == 1);
+ testassert(findInCache([B class], @selector(swizzleTarget)) != nil);
+
+ // prime C's cache too
+ [c hello];
+ testassert(findInCache([C class], @selector(hello)) != nil);
+
+ Method m1 = class_getInstanceMethod([Super class], @selector(swizzleTarget));
+ Method m2 = class_getInstanceMethod([Super class], @selector(swizzleReplacement));
+ method_exchangeImplementations(m1, m2);
+
+ // this should invalidate Super, A, but:
+ // - not B because it overrides - swizzleTarget and hence doesn't care
+ // - not C because it neither called swizzleTarget nor swizzleReplacement
+ testassert(findInCache([Super class], @selector(swizzleTarget)) == nil);
+ testassert(findInCache([A class], @selector(swizzleTarget)) == nil);
+ testassert(findInCache([B class], @selector(swizzleTarget)) != nil);
+ testassert(findInCache([C class], @selector(hello)) != nil);
+
+ // now check that all lookups do the right thing
+ [s swizzleTarget];
+ testassert(swizzleOld == 2);
+ testassert(swizzleNew == 1);
+ testassert(swizzleB == 1);
+
+ [a swizzleTarget];
+ testassert(swizzleOld == 2);
+ testassert(swizzleNew == 2);
+ testassert(swizzleB == 1);
+
+ [b swizzleTarget];
+ testassert(swizzleOld == 2);
+ testassert(swizzleNew == 2);
+ testassert(swizzleB == 2);
+
+ [c swizzleTarget];
+ testassert(swizzleOld == 2);
+ testassert(swizzleNew == 3);
+ testassert(swizzleB == 2);
+
succeed(__FILE__);
}
--- /dev/null
+/*
+Make sure we detect classes with the RW_REALIZED bit set in the binary. rdar://problem/67692760
+TEST_CONFIG OS=macosx
+TEST_CRASHES
+TEST_RUN_OUTPUT
+objc\[\d+\]: realized class 0x[0-9a-fA-F]+ has corrupt data pointer 0x[0-9a-fA-F]+
+objc\[\d+\]: HALTED
+END
+*/
+
+#include "test.h"
+
+#include <objc/NSObject.h>
+
+#define RW_REALIZED (1U<<31)
+
+struct ObjCClass {
+ struct ObjCClass * __ptrauth_objc_isa_pointer isa;
+ struct ObjCClass * __ptrauth_objc_super_pointer superclass;
+ void *cachePtr;
+ uintptr_t zero;
+ uintptr_t data;
+};
+
+struct ObjCClass_ro {
+ uint32_t flags;
+ uint32_t instanceStart;
+ uint32_t instanceSize;
+#ifdef __LP64__
+ uint32_t reserved;
+#endif
+
+ union {
+ const uint8_t * ivarLayout;
+ struct ObjCClass * nonMetaClass;
+ };
+
+ const char * name;
+ struct ObjCMethodList * __ptrauth_objc_method_list_pointer baseMethodList;
+ struct protocol_list_t * baseProtocols;
+ const struct ivar_list_t * ivars;
+
+ const uint8_t * weakIvarLayout;
+ struct property_list_t *baseProperties;
+};
+
+extern struct ObjCClass OBJC_METACLASS_$_NSObject;
+extern struct ObjCClass OBJC_CLASS_$_NSObject;
+
+struct ObjCClass_ro FakeSuperclassRO = {
+ .flags = RW_REALIZED
+};
+
+struct ObjCClass FakeSuperclass = {
+ &OBJC_METACLASS_$_NSObject,
+ &OBJC_METACLASS_$_NSObject,
+ NULL,
+ 0,
+ (uintptr_t)&FakeSuperclassRO
+};
+
+struct ObjCClass_ro FakeSubclassRO;
+
+struct ObjCClass FakeSubclass = {
+ &FakeSuperclass,
+ &FakeSuperclass,
+ NULL,
+ 0,
+ (uintptr_t)&FakeSubclassRO
+};
+
+static struct ObjCClass *class_ptr __attribute__((used)) __attribute((section("__DATA,__objc_nlclslist"))) = &FakeSubclass;
+
+int main() {}
--- /dev/null
+/*
+Variant on fakeRealizedClass which tests a fake class with no superclass rdar://problem/67692760
+TEST_CONFIG OS=macosx
+TEST_CRASHES
+TEST_RUN_OUTPUT
+objc\[\d+\]: realized class 0x[0-9a-fA-F]+ has corrupt data pointer 0x[0-9a-fA-F]+
+objc\[\d+\]: HALTED
+END
+*/
+
+#include "test.h"
+
+#include <objc/NSObject.h>
+
+#define RW_REALIZED (1U<<31)
+
+struct ObjCClass {
+ struct ObjCClass * __ptrauth_objc_isa_pointer isa;
+ struct ObjCClass * __ptrauth_objc_super_pointer superclass;
+ void *cachePtr;
+ uintptr_t zero;
+ uintptr_t data;
+};
+
+struct ObjCClass_ro {
+ uint32_t flags;
+ uint32_t instanceStart;
+ uint32_t instanceSize;
+#ifdef __LP64__
+ uint32_t reserved;
+#endif
+
+ union {
+ const uint8_t * ivarLayout;
+ struct ObjCClass * nonMetaClass;
+ };
+
+ const char * name;
+ struct ObjCMethodList * __ptrauth_objc_method_list_pointer baseMethodList;
+ struct protocol_list_t * baseProtocols;
+ const struct ivar_list_t * ivars;
+
+ const uint8_t * weakIvarLayout;
+ struct property_list_t *baseProperties;
+};
+
+extern struct ObjCClass OBJC_METACLASS_$_NSObject;
+extern struct ObjCClass OBJC_CLASS_$_NSObject;
+
+struct ObjCClass_ro FakeSuperclassRO = {
+ .flags = RW_REALIZED
+};
+
+struct ObjCClass FakeSuperclass = {
+ &OBJC_METACLASS_$_NSObject,
+ NULL,
+ NULL,
+ 0,
+ (uintptr_t)&FakeSuperclassRO
+};
+
+struct ObjCClass_ro FakeSubclassRO;
+
+struct ObjCClass FakeSubclass = {
+ &FakeSuperclass,
+ &FakeSuperclass,
+ NULL,
+ 0,
+ (uintptr_t)&FakeSubclassRO
+};
+
+static struct ObjCClass *class_ptr __attribute__((used)) __attribute((section("__DATA,__objc_nlclslist"))) = &FakeSubclass;
+
+int main() {}
# define p "w" // arm64_32
# endif
void *struct_addr;
- __asm__ volatile("mov %"p"0, "p"8" : "=r" (struct_addr) : : p"8");
+ __asm__ volatile("mov %" p "0, " p "8" : "=r" (struct_addr) : : p "8");
#endif
testassert(self == receiver);
+++ /dev/null
-#include "test.h"
-
-OBJC_ROOT_CLASS
-@interface Main @end
-@implementation Main @end
-
-int main(int argc __attribute__((unused)), char **argv)
-{
- succeed(basename(argv[0]));
-}
+++ /dev/null
-int GC(void) { return 42; }
+++ /dev/null
-#import <objc/objc-api.h>
-
-OBJC_ROOT_CLASS
-@interface GC @end
-@implementation GC @end
-
-// silence "no debug symbols in executable" warning
-void foo(void) { }
+++ /dev/null
-/*
-fixme disabled in BATS because of gcfiles
-TEST_CONFIG OS=macosx BATS=0
-
-TEST_BUILD
- cp $DIR/gcfiles/$C{ARCH}-aso gcenforcer-app-aso.exe
-END
-
-TEST_RUN_OUTPUT
-.*No Info\.plist file in application bundle or no NSPrincipalClass in the Info\.plist file, exiting
-END
-*/
+++ /dev/null
-/*
-fixme disabled in BATS because of gcfiles
-TEST_CONFIG OS=macosx BATS=0
-
-TEST_BUILD
- cp $DIR/gcfiles/$C{ARCH}-gc gcenforcer-app-gc.exe
-END
-
-TEST_CRASHES
-TEST_RUN_OUTPUT
-objc\[\d+\]: Objective-C garbage collection is no longer supported\.
-objc\[\d+\]: HALTED
-END
-*/
+++ /dev/null
-/*
-fixme disabled in BATS because of gcfiles
-TEST_CONFIG OS=macosx BATS=0
-
-TEST_BUILD
- cp $DIR/gcfiles/$C{ARCH}-gcaso gcenforcer-app-gcaso.exe
-END
-
-TEST_CRASHES
-TEST_RUN_OUTPUT
-objc\[\d+\]: Objective-C garbage collection is no longer supported\.
-objc\[\d+\]: HALTED
-END
-*/
+++ /dev/null
-/*
-fixme disabled in BATS because of gcfiles
-TEST_CONFIG OS=macosx BATS=0
-
-TEST_BUILD
- cp $DIR/gcfiles/$C{ARCH}-gcaso2 gcenforcer-app-gcaso2.exe
-END
-
-TEST_CRASHES
-TEST_RUN_OUTPUT
-objc\[\d+\]: Objective-C garbage collection is no longer supported\.
-objc\[\d+\]: HALTED
-END
-*/
+++ /dev/null
-/*
-fixme disabled in BATS because of gcfiles
-TEST_CONFIG OS=macosx BATS=0
-
-TEST_BUILD
- cp $DIR/gcfiles/$C{ARCH}-gconly gcenforcer-app-gconly.exe
-END
-
-TEST_CRASHES
-TEST_RUN_OUTPUT
-objc\[\d+\]: Objective-C garbage collection is no longer supported\.
-objc\[\d+\]: HALTED
-END
-*/
+++ /dev/null
-/*
-fixme disabled in BATS because of gcfiles
-TEST_CONFIG OS=macosx BATS=0
-
-TEST_BUILD
- cp $DIR/gcfiles/$C{ARCH}-nogc gcenforcer-app-nogc.exe
-END
-
-TEST_RUN_OUTPUT
-running
-END
-*/
+++ /dev/null
-/*
-fixme disabled in BATS because of gcfiles
-TEST_CONFIG OS=macosx BATS=0
-
-TEST_BUILD
- cp $DIR/gcfiles/$C{ARCH}-noobjc gcenforcer-app-noobjc.exe
-END
-
-TEST_RUN_OUTPUT
-
-END
-*/
+++ /dev/null
-// gc-off app loading gc-off dylib: should work
-
-/*
-fixme disabled in BATS because of gcfiles
-TEST_CONFIG OS=macosx BATS=0
-
-TEST_BUILD
- cp $DIR/gcfiles/libnogc.dylib .
- $C{COMPILE} $DIR/gc-main.m -x none libnogc.dylib -o gcenforcer-dylib-nogc.exe
-END
-*/
+++ /dev/null
-/*
-fixme disabled in BATS because of gcfiles
-TEST_CONFIG OS=macosx BATS=0
-
-TEST_BUILD
- cp $DIR/gcfiles/libnoobjc.dylib .
- $C{COMPILE} $DIR/gc-main.m -x none libnoobjc.dylib -o gcenforcer-dylib-noobjc.exe
-END
-*/
+++ /dev/null
-// gc-off app loading gc-required dylib: should crash
-// linker sees librequiresgc.fake.dylib, runtime uses librequiresgc.dylib
-
-/*
-fixme disabled in BATS because of gcfiles
-TEST_CONFIG OS=macosx BATS=0
-TEST_CRASHES
-
-TEST_RUN_OUTPUT
-dyld: Library not loaded: librequiresgc\.dylib
- Referenced from: .*gcenforcer-dylib-requiresgc.exe
- Reason: no suitable image found\. Did find:
- (.*librequiresgc\.dylib: cannot load '.*librequiresgc\.dylib' because Objective-C garbage collection is not supported(\n)?)+
- librequiresgc.dylib: cannot load 'librequiresgc\.dylib' because Objective-C garbage collection is not supported(
- .*librequiresgc\.dylib: cannot load '.*librequiresgc\.dylib' because Objective-C garbage collection is not supported(\n)?)*
-END
-
-TEST_BUILD
- cp $DIR/gcfiles/librequiresgc.dylib .
- $C{COMPILE} $DIR/gc-main.m -x none $DIR/gcfiles/librequiresgc.fake.dylib -o gcenforcer-dylib-requiresgc.exe
-END
-*/
+++ /dev/null
-/*
-fixme disabled in BATS because of gcfiles
-TEST_CONFIG OS=macosx BATS=0
-
-TEST_BUILD
- cp $DIR/gcfiles/libsupportsgc.dylib .
- $C{COMPILE} $DIR/gc-main.m -x none libsupportsgc.dylib -o gcenforcer-dylib-supportsgc.exe
-END
-*/
+++ /dev/null
-#pragma clang diagnostic ignored "-Wcomment"
-/*
-fixme disabled in BATS because of gcfiles
-TEST_CONFIG OS=macosx BATS=0
-
-TEST_BUILD
- cp $DIR/gcfiles/* .
- $C{COMPILE} $DIR/gcenforcer-preflight.m -o gcenforcer-preflight.exe
-END
-*/
-
-#include "test.h"
-#include <dlfcn.h>
-
-void check(int expected, const char *name)
-{
- int fd = open(name, O_RDONLY);
- testassert(fd >= 0);
-
- int result = objc_appRequiresGC(fd);
-
- close(fd);
- testprintf("want %2d got %2d for %s\n", expected, result, name);
- if (result != expected) {
- fail("want %2d got %2d for %s\n", expected, result, name);
- }
- testassert(result == expected);
-}
-
-int main()
-{
- int i;
- for (i = 0; i < 1000; i++) {
- // dlopen_preflight
-
- testassert(dlopen_preflight("libsupportsgc.dylib"));
- testassert(dlopen_preflight("libnoobjc.dylib"));
- testassert(! dlopen_preflight("librequiresgc.dylib"));
- testassert(dlopen_preflight("libnogc.dylib"));
-
- // objc_appRequiresGC
-
- // noobjc: no ObjC content
- // nogc: ordinary not GC
- // aso: trivial AppleScriptObjC wrapper that can run without GC
- // gc: -fobjc-gc
- // gconly: -fobjc-gc-only
- // gcaso: non-trivial AppleScriptObjC with too many classrefs
- // gcaso2: non-trivial AppleScriptObjC with too many class impls
-
- check(0, "x86_64-noobjc");
- check(0, "x86_64-nogc");
- check(0, "x86_64-aso");
- check(1, "x86_64-gc");
- check(1, "x86_64-gconly");
- check(1, "x86_64-gcaso");
- check(1, "x86_64-gcaso2");
-
- check(0, "i386-noobjc");
- check(0, "i386-nogc");
- check(0, "i386-aso");
- check(1, "i386-gc");
- check(1, "i386-gconly");
- check(1, "i386-gcaso");
- check(1, "i386-gcaso2");
-
- // fat files
- check(0, "i386-aso--x86_64-aso");
- check(0, "i386-nogc--x86_64-nogc");
- check(1, "i386-gc--x86_64-gc");
- check(1, "i386-gc--x86_64-nogc");
- check(1, "i386-nogc--x86_64-gc");
-
- // broken files
- check(-1, "x86_64-broken");
- check(-1, "i386-broken");
- check(-1, "i386-broken--x86_64-gc");
- check(-1, "i386-broken--x86_64-nogc");
- check(-1, "i386-gc--x86_64-broken");
- check(-1, "i386-nogc--x86_64-broken");
-
- // evil files
- // evil1: claims to have 4 billion load commands of size 0
- check(-1, "evil1");
- }
-
- succeed(__FILE__);
-}
objc\[\d+\]: HALTED
Testing class_setIvarLayout
objc\[\d+\]: \*\*\* Can't set ivar layout for already-registered class 'TestRoot'
-objc\[\d+\]: \*\*\* Can't set ivar layout for already-registered class 'TestRoot'
-objc\[\d+\]: \*\*\* Can't set ivar layout for already-registered class 'NSObject'
objc\[\d+\]: \*\*\* Can't set ivar layout for already-registered class 'NSObject'
objc\[\d+\]: \*\*\* Can't set ivar layout for already-registered class 'AllocatedTestClass2'
-objc\[\d+\]: \*\*\* Can't set ivar layout for already-registered class 'AllocatedTestClass2'
-objc\[\d+\]: \*\*\* Can't set ivar layout for already-registered class 'TestRoot'
objc\[\d+\]: \*\*\* Can't set ivar layout for already-registered class 'DuplicateClass'
Completed test on good classes.
objc\[\d+\]: Attempt to use unknown class 0x[0-9a-f]+.
TESTCASE(class_addProtocol(cls, @protocol(P))),
TESTCASE(class_addProperty(cls, "x", NULL, 0)),
TESTCASE(class_replaceProperty(cls, "x", NULL, 0)),
- TESTCASE(class_setIvarLayout(cls, NULL)),
+ TESTCASE_NOMETA(class_setIvarLayout(cls, NULL)),
TESTCASE(class_setWeakIvarLayout(cls, NULL)),
TESTCASE_NOMETA(objc_registerClassPair(cls)),
TESTCASE_NOMETA(objc_duplicateClass(cls, dupeName(cls), 0)),
#define ustrcmp(a, b) strcmp((char *)a, (char *)b)
+// Aliasing-friendly way to read from a fixed offset in an object.
+uintptr_t readWord(id obj, int offset) {
+ uintptr_t value;
+ char *ptr = (char *)(__bridge void*)obj;
+ memcpy(&value, ptr + offset * sizeof(uintptr_t), sizeof(uintptr_t));
+ return value;
+}
+
#ifdef __cplusplus
class CXX {
public:
static Sub * volatile sub;
sub = [Sub new];
sub->subIvar = 10;
- uintptr_t *subwords = (uintptr_t *)(__bridge void*)sub;
- testassert(subwords[2] == 10);
+ testassertequal(readWord(sub, 2), 10);
#ifdef __cplusplus
- testassert(subwords[5] == 1);
- testassert(sub->cxx.magic == 1);
+ testassertequal(readWord(sub, 5), 1);
+ testassertequal(sub->cxx.magic, 1);
sub->cxx.magic++;
- testassert(subwords[5] == 2);
- testassert(sub->cxx.magic == 2);
+ testassertequal(readWord(sub, 5), 2);
+ testassertequal(sub->cxx.magic, 2);
# if __has_feature(objc_arc)
sub = nil;
# else
*/
Sub2 *sub2 = [Sub2 new];
- uintptr_t *sub2words = (uintptr_t *)(__bridge void*)sub2;
sub2->subIvar = (void *)10;
- testassert(sub2words[11] == 10);
+ testassertequal(readWord(sub2, 11), 10);
- testassert(class_getInstanceSize([Sub2 class]) == 13*sizeof(void*));
+ testassertequal(class_getInstanceSize([Sub2 class]), 13*sizeof(void*));
ivar = class_getInstanceVariable([Sub2 class], "subIvar");
testassert(ivar);
- testassert(11*sizeof(void*) == (size_t)ivar_getOffset(ivar));
+ testassertequal(11*sizeof(void*), (size_t)ivar_getOffset(ivar));
testassert(0 == strcmp(ivar_getName(ivar), "subIvar"));
ivar = class_getInstanceVariable([ShrinkingSuper class], "superIvar");
--- /dev/null
+/*
+TEST_RUN_OUTPUT
+LazyClassName
+LazyClassName2
+END
+*/
+
+#include "test.h"
+#include "testroot.i"
+
+typedef const char * _Nullable (*objc_hook_lazyClassNamer)(_Nonnull Class);
+
+void objc_setHook_lazyClassNamer(_Nonnull objc_hook_lazyClassNamer newValue,
+ _Nonnull objc_hook_lazyClassNamer * _Nonnull oldOutValue);
+
+#define RW_COPIED_RO (1<<27)
+
+struct ObjCClass {
+ struct ObjCClass * __ptrauth_objc_isa_pointer isa;
+ struct ObjCClass * __ptrauth_objc_super_pointer superclass;
+ void *cachePtr;
+ uintptr_t zero;
+ uintptr_t data;
+};
+
+struct ObjCClass_ro {
+ uint32_t flags;
+ uint32_t instanceStart;
+ uint32_t instanceSize;
+#ifdef __LP64__
+ uint32_t reserved;
+#endif
+
+ union {
+ const uint8_t * ivarLayout;
+ struct ObjCClass * nonMetaClass;
+ };
+
+ const char * name;
+ struct ObjCMethodList * __ptrauth_objc_method_list_pointer baseMethodList;
+ struct protocol_list_t * baseProtocols;
+ const struct ivar_list_t * ivars;
+
+ const uint8_t * weakIvarLayout;
+ struct property_list_t *baseProperties;
+};
+
+extern struct ObjCClass OBJC_METACLASS_$_NSObject;
+extern struct ObjCClass OBJC_CLASS_$_NSObject;
+
+extern struct ObjCClass LazyClassName;
+extern struct ObjCClass LazyClassName2;
+
+struct ObjCClass_ro LazyClassNameMetaclass_ro = {
+ .flags = 1,
+ .instanceStart = 40,
+ .instanceSize = 40,
+ .nonMetaClass = &LazyClassName,
+};
+
+struct ObjCClass LazyClassNameMetaclass = {
+ .isa = &OBJC_METACLASS_$_NSObject,
+ .superclass = &OBJC_METACLASS_$_NSObject,
+ .cachePtr = &_objc_empty_cache,
+ .data = (uintptr_t)&LazyClassNameMetaclass_ro,
+};
+
+struct ObjCClass_ro LazyClassName_ro = {
+ .instanceStart = 8,
+ .instanceSize = 8,
+};
+
+struct ObjCClass LazyClassName = {
+ .isa = &LazyClassNameMetaclass,
+ .superclass = &OBJC_CLASS_$_NSObject,
+ .cachePtr = &_objc_empty_cache,
+ .data = (uintptr_t)&LazyClassName_ro + 2,
+};
+
+struct ObjCClass_ro LazyClassName2Metaclass_ro = {
+ .flags = 1,
+ .instanceStart = 40,
+ .instanceSize = 40,
+ .nonMetaClass = &LazyClassName2,
+};
+
+struct ObjCClass LazyClassName2Metaclass = {
+ .isa = &OBJC_METACLASS_$_NSObject,
+ .superclass = &OBJC_METACLASS_$_NSObject,
+ .cachePtr = &_objc_empty_cache,
+ .data = (uintptr_t)&LazyClassName2Metaclass_ro,
+};
+
+struct ObjCClass_ro LazyClassName2_ro = {
+ .instanceStart = 8,
+ .instanceSize = 8,
+};
+
+struct ObjCClass LazyClassName2 = {
+ .isa = &LazyClassName2Metaclass,
+ .superclass = &OBJC_CLASS_$_NSObject,
+ .cachePtr = &_objc_empty_cache,
+ .data = (uintptr_t)&LazyClassName2_ro + 2,
+};
+
+static objc_hook_lazyClassNamer OrigNamer;
+
+static const char *ClassNamer(Class cls) {
+ if (cls == (__bridge Class)&LazyClassName)
+ return "LazyClassName";
+ return OrigNamer(cls);
+}
+
+static objc_hook_lazyClassNamer OrigNamer2;
+
+static const char *ClassNamer2(Class cls) {
+ if (cls == (__bridge Class)&LazyClassName2)
+ return "LazyClassName2";
+ return OrigNamer2(cls);
+}
+
+__attribute__((section("__DATA,__objc_classlist,regular,no_dead_strip")))
+struct ObjCClass *LazyClassNamePtr = &LazyClassName;
+__attribute__((section("__DATA,__objc_classlist,regular,no_dead_strip")))
+struct ObjCClass *LazyClassNamePtr2 = &LazyClassName2;
+
+int main() {
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wunguarded-availability"
+ objc_setHook_lazyClassNamer(ClassNamer, &OrigNamer);
+ objc_setHook_lazyClassNamer(ClassNamer2, &OrigNamer2);
+#pragma clang diagnostic pop
+
+ printf("%s\n", class_getName([(__bridge id)&LazyClassName class]));
+ printf("%s\n", class_getName([(__bridge id)&LazyClassName2 class]));
+}
// etc.) then the typical result is a silent failure and we end up testing
// /usr/lib/libobjc.A.dylib instead. This test detects when DYLD_LIBRARY_PATH is
// set but libobjc isn't loaded from it.
-int main() {
+int main(int argc __unused, char **argv) {
+ char *containingDirectory = realpath(dirname(argv[0]), NULL);
+ testprintf("containingDirectory is %s\n", containingDirectory);
+
char *dyldLibraryPath = getenv("DYLD_LIBRARY_PATH");
testprintf("DYLD_LIBRARY_PATH is %s\n", dyldLibraryPath);
+
if (dyldLibraryPath != NULL && strlen(dyldLibraryPath) > 0) {
int foundMatch = 0;
+ int foundNonMatch = 0;
dyldLibraryPath = strdup(dyldLibraryPath);
while ((path = strsep(&cursor, ":"))) {
char *resolved = realpath(path, NULL);
testprintf("Resolved %s to %s\n", path, resolved);
+ if (strcmp(resolved, containingDirectory) == 0) {
+ testprintf("This is equal to our containing directory, ignoring.\n");
+ continue;
+ }
testprintf("Comparing %s and %s\n", resolved, info.dli_fname);
int comparison = strncmp(resolved, info.dli_fname, strlen(resolved));
free(resolved);
testprintf("Found a match!\n");
foundMatch = 1;
break;
+ } else {
+ foundNonMatch = 1;
}
}
- testprintf("Finished searching, foundMatch=%d\n", foundMatch);
- testassert(foundMatch);
+ testprintf("Finished searching, foundMatch=%d foundNonMatch=%d\n", foundMatch, foundNonMatch);
+ testassert(foundMatch || !foundNonMatch);
}
succeed(__FILE__);
}
exit(1);
}
wait4(pid, NULL, 0, NULL);
- printf("objs=%p\n", objs);
+
+ // Clean up. Otherwise leaks can end up seeing this as a leak, oddly enough.
+ for (int i = 0; i < classCount; i++) {
+ [objs[i] release];
+ }
+ free(objs);
}
#include "test.h"
struct ObjCClass {
- struct ObjCClass *isa;
- struct ObjCClass *superclass;
+ struct ObjCClass * __ptrauth_objc_isa_pointer isa;
+ struct ObjCClass * __ptrauth_objc_super_pointer superclass;
void *cachePtr;
uintptr_t zero;
struct ObjCClass_ro *data;
const uint8_t * ivarLayout;
const char * name;
- struct ObjCMethodList * baseMethodList;
+ struct ObjCMethodList * __ptrauth_objc_method_list_pointer baseMethodList;
struct protocol_list_t * baseProtocols;
const struct ivar_list_t * ivars;
.asciz "v16@0:8"
_MyMethodStretName:
.asciz "myMethodStret"
+_MyMethodNullTypesName:
+ .asciz "myMethodNullTypes"
_StretType:
.asciz "{BigStruct=QQQQQQQ}16@0:8"
)ASM");
.quad _MyMethod3Name
_MyMethodStretNameRef:
.quad _MyMethodStretName
+_MyMethodNullTypesNameRef:
+ .quad _MyMethodNullTypesName
)ASM");
#else
asm(R"ASM(
.long _MyMethod3Name
_MyMethodStretNameRef:
.long _MyMethodStretName
+_MyMethodNullTypesNameRef:
+ .long _MyMethodNullTypesName
)ASM");
#endif
.p2align 2
_Foo_methodlistSmall:
.long 12 | 0x80000000
- .long 4
+ .long 5
.long _MyMethod1NameRef - .
.long _BoringMethodType - .
.long _MyMethodStretNameRef - .
.long _StretType - .
.long _myMethodStret - .
+
+ .long _MyMethodNullTypesNameRef - .
+ .long 0
+ .long _myMethod1 - .
)ASM");
struct ObjCClass_ro Foo_ro = {
auto *descstret = method_getDescription(mstret);
testassert(descstret->name == @selector(myMethodStret));
testassert(descstret->types == method_getTypeEncoding(mstret));
+
+ Method nullTypeMethod = class_getInstanceMethod(c, @selector(myMethodNullTypes));
+ testassert(nullTypeMethod);
+ testassert(method_getName(nullTypeMethod) == @selector(myMethodNullTypes));
+ testassertequal(method_getTypeEncoding(nullTypeMethod), NULL);
+ testassertequal(method_getImplementation(nullTypeMethod), (IMP)myMethod1);
}
int main() {
+++ /dev/null
-/*
-TEST_CFLAGS -std=c++11
-TEST_CRASHES
-TEST_RUN_OUTPUT
-objc\[\d+\]: CLASS: class 'Foo' 0x[0-9a-fA-F]+ small method list 0x[0-9a-fA-F]+ is not in immutable memory
-objc\[\d+\]: HALTED
-END
-*/
-
-#define MUTABLE_METHOD_LIST 1
-
-#include "methodListSmall.h"
-
-int main() {
- Class fooClass = (__bridge Class)&FooClass;
- [fooClass new];
- fail("Should have crashed");
-}
# if __x86_64__
# define RC_ONE (1ULL<<56)
# elif __arm64__ && __LP64__
-# define RC_ONE (1ULL<<45)
+// Quiet the warning about redefining the macro from isa.h.
+# undef RC_ONE
+# define RC_ONE (objc_debug_isa_magic_value == 1 ? 1ULL<<56 : 1ULL<<45)
# elif __ARM_ARCH_7K__ >= 2 || (__arm64__ && !__LP64__)
# define RC_ONE (1ULL<<25)
# else
testassert(!NONPOINTER(obj));
uintptr_t isa = ISA(obj);
- testassert((Class)isa == cls);
- testassert((Class)(isa & objc_debug_isa_class_mask) == cls);
- testassert((Class)(isa & ~objc_debug_isa_class_mask) == 0);
+ testassertequal(ptrauth_strip((void *)isa, ptrauth_key_process_independent_data), (void *)cls);
+ testassertequal((Class)(isa & objc_debug_isa_class_mask), cls);
+ testassertequal(ptrauth_strip((void *)(isa & ~objc_debug_isa_class_mask), ptrauth_key_process_independent_data), 0);
CFRetain(obj);
testassert(ISA(obj) == isa);
void check_nonpointer(id obj, Class cls)
{
- testassert(object_getClass(obj) == cls);
+ testassertequal(object_getClass(obj), cls);
testassert(NONPOINTER(obj));
uintptr_t isa = ISA(obj);
if (objc_debug_indexed_isa_magic_mask != 0) {
// Indexed isa.
- testassert((isa & objc_debug_indexed_isa_magic_mask) == objc_debug_indexed_isa_magic_value);
+ testassertequal((isa & objc_debug_indexed_isa_magic_mask), objc_debug_indexed_isa_magic_value);
testassert((isa & ~objc_debug_indexed_isa_index_mask) != 0);
uintptr_t index = (isa & objc_debug_indexed_isa_index_mask) >> objc_debug_indexed_isa_index_shift;
testassert(index < objc_indexed_classes_count);
- testassert(objc_indexed_classes[index] == cls);
+ testassertequal(objc_indexed_classes[index], cls);
} else {
// Packed isa.
- testassert((Class)(isa & objc_debug_isa_class_mask) == cls);
+ testassertequal((Class)(isa & objc_debug_isa_class_mask), cls);
testassert((Class)(isa & ~objc_debug_isa_class_mask) != 0);
- testassert((isa & objc_debug_isa_magic_mask) == objc_debug_isa_magic_value);
+ testassertequal((isa & objc_debug_isa_magic_mask), objc_debug_isa_magic_value);
}
CFRetain(obj);
- testassert(ISA(obj) == isa + RC_ONE);
- testassert([obj retainCount] == 2);
+ testassertequal(ISA(obj), isa + RC_ONE);
+ testassertequal([obj retainCount], 2);
[obj retain];
- testassert(ISA(obj) == isa + RC_ONE*2);
- testassert([obj retainCount] == 3);
+ testassertequal(ISA(obj), isa + RC_ONE*2);
+ testassertequal([obj retainCount], 3);
CFRelease(obj);
- testassert(ISA(obj) == isa + RC_ONE);
- testassert([obj retainCount] == 2);
+ testassertequal(ISA(obj), isa + RC_ONE);
+ testassertequal([obj retainCount], 2);
[obj release];
- testassert(ISA(obj) == isa);
- testassert([obj retainCount] == 1);
+ testassertequal(ISA(obj), isa);
+ testassertequal([obj retainCount], 1);
}
# if !OBJC_HAVE_NONPOINTER_ISA || !OBJC_HAVE_PACKED_NONPOINTER_ISA || OBJC_HAVE_INDEXED_NONPOINTER_ISA
# error wrong
# endif
- testassert(objc_debug_isa_class_mask == (uintptr_t)&objc_absolute_packed_isa_class_mask);
+ void *absoluteMask = (void *)&objc_absolute_packed_isa_class_mask;
+#if __has_feature(ptrauth_calls)
+ absoluteMask = ptrauth_strip(absoluteMask, ptrauth_key_process_independent_data);
+#endif
+ // absoluteMask should "cover" objc_debug_isa_class_mask
+ testassert((objc_debug_isa_class_mask & (uintptr_t)absoluteMask) == objc_debug_isa_class_mask);
+ // absoluteMask should only possibly differ in the high bits
+ testassert((objc_debug_isa_class_mask & 0xffff) == ((uintptr_t)absoluteMask & 0xffff));
// Indexed isa variables DO NOT exist on packed-isa platforms
testassert(!dlsym(RTLD_DEFAULT, "objc_absolute_indexed_isa_magic_mask"));
testassert(!dlsym(RTLD_DEFAULT, "objc_absolute_indexed_isa_magic_value"));
testassert(!dlsym(RTLD_DEFAULT, "objc_absolute_indexed_isa_index_mask"));
testassert(!dlsym(RTLD_DEFAULT, "objc_absolute_indexed_isa_index_shift"));
-
+
#elif SUPPORT_INDEXED_ISA
# if !OBJC_HAVE_NONPOINTER_ISA || OBJC_HAVE_PACKED_NONPOINTER_ISA || !OBJC_HAVE_INDEXED_NONPOINTER_ISA
# error wrong
#else
# error unknown nonpointer isa format
#endif
-
+
testprintf("Isa with index\n");
id index_o = [Fake_OS_object new];
check_nonpointer(index_o, [Fake_OS_object class]);
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "https://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>com.apple.springboard-ui.client</key>
+ <true/>
+ <key>com.apple.security.system-groups</key>
+ <array>
+ <string>systemgroup.com.apple.powerlog</string>
+ </array>
+</dict>
+</plist>
--- /dev/null
+/*
+TEST_ENTITLEMENTS preopt-caches.entitlements
+TEST_CONFIG OS=iphoneos MEM=mrc
+TEST_BUILD
+ mkdir -p $T{OBJDIR}
+ /usr/sbin/dtrace -h -s $DIR/../runtime/objc-probes.d -o $T{OBJDIR}/objc-probes.h
+ $C{COMPILE} $DIR/preopt-caches.mm -std=gnu++17 -isystem $C{SDK_PATH}/System/Library/Frameworks/System.framework/PrivateHeaders -I$T{OBJDIR} -ldsc -o preopt-caches.exe
+END
+*/
+//
+// check_preopt_caches.m
+// check-preopt-caches
+//
+// Created by Thomas Deniau on 11/06/2020.
+//
+
+#define TEST_CALLS_OPERATOR_NEW
+
+#include "test-defines.h"
+#include "../runtime/objc-private.h"
+#include <objc/objc-internal.h>
+
+#include <dlfcn.h>
+#include <dirent.h>
+#include <objc/runtime.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <mach-o/dyld.h>
+#include <mach-o/dyld_process_info.h>
+#include <mach-o/dyld_cache_format.h>
+#include <mach-o/dsc_iterator.h>
+#include <unordered_map>
+#include <string>
+#include <vector>
+#include <set>
+#include <spawn.h>
+#include <sys/poll.h>
+
+#include "test.h"
+
+int validate_dylib_in_forked_process(const char * const toolPath, const char * const dylib)
+{
+ int out_pipe[2] = {-1};
+ int err_pipe[2] = {-1};
+ int exit_code = -1;
+ pid_t pid = 0;
+ int rval = 0;
+
+ std::string child_stdout;
+ std::string child_stderr;
+
+ posix_spawn_file_actions_t actions = NULL;
+ const char * const args[] = {toolPath, dylib, NULL};
+ int ret = 0;
+
+ if (pipe(out_pipe)) {
+ exit(3);
+ }
+
+ if (pipe(err_pipe)) {
+ exit(3);
+ }
+
+ //Do-si-do the FDs
+ posix_spawn_file_actions_init(&actions);
+ posix_spawn_file_actions_addclose(&actions, out_pipe[0]);
+ posix_spawn_file_actions_addclose(&actions, err_pipe[0]);
+ posix_spawn_file_actions_adddup2(&actions, out_pipe[1], 1);
+ posix_spawn_file_actions_adddup2(&actions, err_pipe[1], 2);
+ posix_spawn_file_actions_addclose(&actions, out_pipe[1]);
+ posix_spawn_file_actions_addclose(&actions, err_pipe[1]);
+
+ // Fork so that we can dlopen the dylib in a clean context
+ ret = posix_spawnp(&pid, args[0], &actions, NULL, (char * const *)args, NULL);
+
+ if (ret != 0) {
+ fail("posix_spawn for %s failed: returned %d, %s\n", dylib, ret, strerror(ret));
+ exit(3);
+ }
+
+ posix_spawn_file_actions_destroy(&actions);
+ close(out_pipe[1]);
+ close(err_pipe[1]);
+
+ std::string buffer(4096,' ');
+ std::vector<pollfd> plist = { {out_pipe[0],POLLIN,0}, {err_pipe[0],POLLIN,0} };
+ while (( (rval = poll(&plist[0],(nfds_t)plist.size(), 100000)) > 0 ) || ((rval < 0) && (errno == EINTR))) {
+ if (rval < 0) {
+ // EINTR
+ continue;
+ }
+
+ ssize_t bytes_read = 0;
+
+ if (plist[0].revents&(POLLERR|POLLHUP) || plist[1].revents&(POLLERR|POLLHUP)) {
+ bytes_read = read(out_pipe[0], &buffer[0], buffer.length());
+ bytes_read = read(err_pipe[0], &buffer[0], buffer.length());
+ break;
+ }
+
+ if (plist[0].revents&POLLIN) {
+ bytes_read = read(out_pipe[0], &buffer[0], buffer.length());
+ child_stdout += buffer.substr(0, static_cast<size_t>(bytes_read));
+ }
+ else if ( plist[1].revents&POLLIN ) {
+ bytes_read = read(err_pipe[0], &buffer[0], buffer.length());
+ child_stderr += buffer.substr(0, static_cast<size_t>(bytes_read));
+ }
+ else break; // nothing left to read
+
+ plist[0].revents = 0;
+ plist[1].revents = 0;
+ }
+ if (rval == 0) {
+ // Early timeout so try to clean up.
+ fail("Failed to validate dylib %s: timeout!\n", dylib);
+ return 1;
+ }
+
+
+ if (err_pipe[0] != -1) {
+ close(err_pipe[0]);
+ }
+
+ if (out_pipe[0] != -1) {
+ close(out_pipe[0]);
+ }
+
+ if (pid != 0) {
+ if (waitpid(pid, &exit_code, 0) < 0) {
+ fail("Could not wait for PID %d (dylib %s): err %s\n", pid, dylib, strerror(errno));
+ }
+
+ if (!WIFEXITED(exit_code)) {
+ fail("PID %d (%s) did not exit: %d. stdout: %s\n stderr: %s\n", pid, dylib, exit_code, child_stdout.c_str(), child_stderr.c_str());
+ }
+ if (WEXITSTATUS(exit_code) != 0) {
+ fail("Failed to validate dylib %s\nstdout: %s\nstderr: %s\n", dylib, child_stdout.c_str(), child_stderr.c_str());
+ }
+ }
+
+ testprintf("%s", child_stdout.c_str());
+
+ return 0;
+}
+
+bool check_class(Class cls, unsigned & cacheCount) {
+ // printf("%s %s\n", class_getName(cls), class_isMetaClass(cls) ? "(metaclass)" : "");
+
+ // For the initialization of the cache so that we setup the constant cache if any
+ class_getMethodImplementation(cls, @selector(initialize));
+
+ if (objc_cache_isConstantOptimizedCache(&(cls->cache), true, (uintptr_t)&_objc_empty_cache)) {
+ cacheCount++;
+ // printf("%s has a preopt cache\n", class_getName(cls));
+
+ // Make the union of all selectors until the preopt fallback class
+ const class_ro_t * fallback = ((const objc_class *) objc_cache_preoptFallbackClass(&(cls->cache)))->data()->ro();
+
+ std::unordered_map<SEL, IMP> methods;
+
+ Method *methodList;
+ unsigned count;
+ Class currentClass = cls;
+ unsigned dynamicCount = 0;
+ while (currentClass->data()->ro() != fallback) {
+ methodList = class_copyMethodList(currentClass, &count);
+ // printf("%d methods in method list for %s\n", count, class_getName(currentClass));
+ for (unsigned i = 0 ; i < count ; i++) {
+ SEL sel = method_getName(methodList[i]);
+ if (methods.find(sel) == methods.end()) {
+ const char *name = sel_getName(sel);
+ // printf("[dynamic] %s -> %p\n", name, method_getImplementation(methodList[i]));
+ methods[sel] = ptrauth_strip(method_getImplementation(methodList[i]), ptrauth_key_function_pointer);
+ if ( (currentClass == cls) ||
+ ( (strcmp(name, ".cxx_construct") != 0)
+ && (strcmp(name, ".cxx_destruct") != 0))) {
+ dynamicCount++;
+ }
+ }
+ }
+ if (count > 0) {
+ free(methodList);
+ }
+ currentClass = class_getSuperclass(currentClass);
+ }
+
+ // Check we have an equality between the two caches
+
+ // Count the methods in the preopt cache
+ unsigned preoptCacheCount = 0;
+ unsigned capacity = objc_cache_preoptCapacity(&(cls->cache));
+ const preopt_cache_entry_t *buckets = objc_cache_preoptCache(&(cls->cache))->entries;
+
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wcast-of-sel-type"
+ const uint8_t *selOffsetsBase = (const uint8_t*)@selector(🤯);
+#pragma clang diagnostic pop
+ for (unsigned i = 0 ; i < capacity ; i++) {
+ uint32_t selOffset = buckets[i].sel_offs;
+ if (selOffset != 0xFFFFFFFF) {
+ SEL sel = (SEL)(selOffsetsBase + selOffset);
+ IMP imp = (IMP)((uint8_t*)cls - buckets[i].imp_offs);
+ if (methods.find(sel) == methods.end()) {
+ fail("ERROR: %s: %s not found in dynamic method list\n", class_getName(cls), sel_getName(sel));
+ return false;
+ }
+ IMP dynamicImp = methods.at(sel);
+ // printf("[static] %s -> %p\n", sel_getName(sel), imp);
+ if (imp != dynamicImp) {
+ fail("ERROR: %s: %s has different implementations %p vs %p in static and dynamic caches", class_getName(cls), sel_getName(sel), imp, dynamicImp);
+ return false;
+ }
+ preoptCacheCount++;
+ }
+ }
+
+ if (preoptCacheCount != dynamicCount) {
+ testwarn("Methods in preopt cache:\n");
+
+ for (unsigned i = 0 ; i < capacity ; i++) {
+ uint32_t selOffset = buckets[i].sel_offs;
+ if (selOffset != 0xFFFFFFFF) {
+ SEL sel = (SEL)(selOffsetsBase + selOffset);
+ testwarn("%s\n", sel_getName(sel));
+ }
+ }
+
+ testwarn("Methods in dynamic cache:\n");
+
+ for (const auto & [sel, imp] : methods) {
+ testwarn("%s\n", sel_getName(sel));
+ }
+
+ fail("ERROR: %s's preoptimized cache is missing some methods\n", class_getName(cls));
+
+ return false;
+ }
+
+ } else {
+ // printf("%s does NOT have a preopt cache\n", class_getName(cls));
+ }
+
+ return true;
+}
+
+bool check_library(const char *path) {
+ std::set<std::string> blacklistedClasses {
+ "PNPWizardScratchpadInkView", // Can only be +initialized on Pencil-capable devices
+ "CACDisplayManager", // rdar://64929282 (CACDisplayManager does layout in +initialize!)
+ };
+
+ testprintf("Checking %s… ", path);
+
+ __unused void *lib = dlopen(path, RTLD_NOW);
+ extern uint32_t _dyld_image_count(void) __OSX_AVAILABLE_STARTING(__MAC_10_1, __IPHONE_2_0);
+ unsigned outCount = 0;
+
+ // Realize all classes first.
+ Class *allClasses = objc_copyClassList(&outCount);
+ if (allClasses != NULL) {
+ free(allClasses);
+ }
+
+ allClasses = objc_copyClassesForImage(path, &outCount);
+ if (allClasses != NULL) {
+ unsigned classCount = 0;
+ unsigned cacheCount = 0;
+
+ for (const Class * clsPtr = allClasses ; *clsPtr != nil ; clsPtr++) {
+ classCount++;
+ Class cls = *clsPtr;
+
+ if (blacklistedClasses.find(class_getName(cls)) != blacklistedClasses.end()) {
+ continue;
+ }
+
+ if (!check_class(cls, cacheCount)) {
+ return false;
+ }
+
+ if (!class_isMetaClass(cls)) {
+ if (!check_class(object_getClass(cls), cacheCount)) {
+ return false;
+ }
+ }
+ }
+ testprintf("checked %d caches in %d classes\n", cacheCount, classCount);
+ free(allClasses);
+ } else {
+ testprintf("could not find %s or no class names inside\n", path);
+ }
+
+ return true;
+}
+
+size_t size_of_shared_cache_with_uuid(uuid_t uuid) {
+ DIR* dfd = opendir(IPHONE_DYLD_SHARED_CACHE_DIR);
+ if (!dfd) {
+ fail("Error: unable to open shared cache dir %s\n",
+ IPHONE_DYLD_SHARED_CACHE_DIR);
+ exit(1);
+ }
+
+ uint64_t shared_cache_size = 0;
+
+ struct dirent *dp;
+ while ((dp = readdir(dfd))) {
+ char full_filename[512];
+ snprintf(full_filename, sizeof(full_filename), "%s%s",
+ IPHONE_DYLD_SHARED_CACHE_DIR, dp->d_name);
+
+ struct stat stat_buf;
+ if (stat(full_filename, &stat_buf) != 0)
+ continue;
+
+ if ((stat_buf.st_mode & S_IFMT) == S_IFDIR)
+ continue;
+
+ int fd = open(full_filename, O_RDONLY);
+ if (fd < 0) {
+ fprintf(stderr, "Error: unable to open file %s\n", full_filename);
+ continue;
+ }
+
+ struct dyld_cache_header header;
+ if (read(fd, &header, sizeof(header)) != sizeof(header)) {
+ fprintf(stderr, "Error: unable to read dyld shared cache header from %s\n",
+ full_filename);
+ close(fd);
+ continue;
+ }
+
+ if (uuid_compare(header.uuid, uuid) == 0) {
+ shared_cache_size = stat_buf.st_size;
+ break;
+ }
+ }
+
+ closedir(dfd);
+
+ return shared_cache_size;
+}
+
+int main (int argc, const char * argv[])
+{
+ if (argc == 1) {
+ int err = 0;
+ dyld_process_info process_info = _dyld_process_info_create(mach_task_self(), 0, &err);
+ if (NULL == process_info) {
+ mach_error("_dyld_process_info_create", err);
+ fail("_dyld_process_info_create");
+ return 2;
+ }
+ dyld_process_cache_info cache_info;
+ _dyld_process_info_get_cache(process_info, &cache_info);
+
+ __block std::set<std::string> dylibsSet;
+ size_t size = size_of_shared_cache_with_uuid(cache_info.cacheUUID);
+ dyld_shared_cache_iterate((void*)cache_info.cacheBaseAddress, (uint32_t)size, ^(const dyld_shared_cache_dylib_info* dylibInfo, __unused const dyld_shared_cache_segment_info* segInfo) {
+ if (dylibInfo->isAlias) return;
+ std::string path(dylibInfo->path);
+ dylibsSet.insert(path);
+ });
+ std::vector<std::string> dylibs(dylibsSet.begin(), dylibsSet.end());
+
+ dispatch_apply(dylibs.size(), DISPATCH_APPLY_AUTO, ^(size_t idx) {
+ validate_dylib_in_forked_process(argv[0], dylibs[idx].c_str());
+ });
+ } else {
+ const char *libraryName = argv[1];
+ if (!check_library(libraryName)) {
+ fail("checking library %s\n", libraryName);
+ return 1;
+ }
+ }
+
+ succeed(__FILE__);
+ return 0;
+}
--- /dev/null
+// TEST_CFLAGS -framework Foundation
+// need Foundation to get NSObject compatibility additions for class Protocol
+// because ARC calls [protocol retain]
+/*
+TEST_BUILD_OUTPUT
+.*protocolSmall.m:\d+:\d+: warning: cannot find protocol definition for 'SmallProto'
+.*protocolSmall.m:\d+:\d+: note: protocol 'SmallProto' has no definition
+END
+*/
+
+#include "test.h"
+#include "testroot.i"
+#include <objc/runtime.h>
+
+struct MethodListOneEntry {
+ uint32_t entSizeAndFlags;
+ uint32_t count;
+ SEL name;
+ const char *types;
+ void *imp;
+};
+
+struct SmallProtoStructure {
+ Class isa;
+ const char *mangledName;
+ struct protocol_list_t *protocols;
+ void *instanceMethods;
+ void *classMethods;
+ void *optionalInstanceMethods;
+ void *optionalClassMethods;
+ void *instanceProperties;
+ uint32_t size; // sizeof(protocol_t)
+ uint32_t flags;
+};
+
+struct MethodListOneEntry SmallProtoMethodList = {
+ .entSizeAndFlags = 3 * sizeof(void *),
+ .count = 1,
+ .name = NULL,
+ .types = "v@:",
+ .imp = NULL,
+};
+
+struct SmallProtoStructure SmallProtoData
+ __asm__("__OBJC_PROTOCOL_$_SmallProto")
+ = {
+ .mangledName = "SmallProto",
+ .instanceMethods = &SmallProtoMethodList,
+ .size = sizeof(struct SmallProtoStructure),
+};
+
+void *SmallProtoListEntry
+ __attribute__((section("__DATA,__objc_protolist,coalesced,no_dead_strip")))
+ = &SmallProtoData;
+
+@protocol SmallProto;
+@protocol NormalProto
+- (void)protoMethod;
+@end
+
+@interface C: TestRoot <SmallProto, NormalProto> @end
+@implementation C
+- (void)protoMethod {}
+@end
+
+int main()
+{
+ // Fix up the method list selector by hand, getting the compiler to generate a
+ // proper selref as a compile-time constant is a pain.
+ SmallProtoMethodList.name = @selector(protoMethod);
+ unsigned protoCount;
+
+ Protocol * __unsafe_unretained *protos = class_copyProtocolList([C class], &protoCount);
+ for (unsigned i = 0; i < protoCount; i++) {
+ testprintf("Checking index %u protocol %p\n", i, protos[i]);
+ const char *name = protocol_getName(protos[i]);
+ testprintf("Name is %s\n", name);
+ testassert(strcmp(name, "SmallProto") == 0 || strcmp(name, "NormalProto") == 0);
+
+ objc_property_t *classProperties = protocol_copyPropertyList2(protos[i], NULL, YES, NO);
+ testassert(classProperties == NULL);
+
+ struct objc_method_description desc = protocol_getMethodDescription(protos[i], @selector(protoMethod), YES, YES);
+ testprintf("Protocol protoMethod name is %s types are %s\n", desc.name, desc.types);
+ testassert(desc.name == @selector(protoMethod));
+ testassert(desc.types[0] == 'v');
+ }
+ free(protos);
+
+ succeed(__FILE__);
+}
// Read a non-root class.
testassert(!objc_getClass("Sub"));
- extern intptr_t OBJC_CLASS_$_Sub[OBJC_MAX_CLASS_SIZE/sizeof(void*)];
+ // Clang assumes too much alignment on this by default (rdar://problem/60881608),
+ // so tell it that it's only as aligned as an intptr_t.
+ extern _Alignas(intptr_t) intptr_t OBJC_CLASS_$_Sub[OBJC_MAX_CLASS_SIZE/sizeof(void*)];
// Make a duplicate of class Sub for use later.
intptr_t Sub2_buf[OBJC_MAX_CLASS_SIZE/sizeof(void*)];
memcpy(Sub2_buf, &OBJC_CLASS_$_Sub, sizeof(Sub2_buf));
+ // Re-sign the isa and super pointers in the new location.
+ ((Class __ptrauth_objc_isa_pointer *)(void *)Sub2_buf)[0] = ((Class __ptrauth_objc_isa_pointer *)(void *)&OBJC_CLASS_$_Sub)[0];
+ ((Class __ptrauth_objc_super_pointer *)(void *)Sub2_buf)[1] = ((Class __ptrauth_objc_super_pointer *)(void *)&OBJC_CLASS_$_Sub)[1];
+
Class Sub = objc_readClassPair((__bridge Class)(void*)&OBJC_CLASS_$_Sub, &ii);
testassert(Sub);
#include "test.h"
#import <Foundation/Foundation.h>
-#define OBJECTS 1
+#define OBJECTS 10
#define LOOPS 256
#define THREADS 16
#if __x86_64__
testassert(strcmp(class_getName([SwiftV1Class3 class]), class_getName(object_getClass([SwiftV1Class3 class]))) == 0);
testassert(strcmp(class_getName([SwiftV1Class4 class]), class_getName(object_getClass([SwiftV1Class4 class]))) == 0);
+ testassert(!_class_isSwift([TestRoot class]));
+ testassert(!_class_isSwift([Sub class]));
+ testassert(_class_isSwift([SwiftV1Class class]));
+ testassert(_class_isSwift([SwiftV1Class2 class]));
+ testassert(_class_isSwift([SwiftV1Class3 class]));
+ testassert(_class_isSwift([SwiftV1Class4 class]));
+
succeed(__FILE__);
}
-// TEST_CONFIG
+/*
+ TEST_CONFIG MEM=mrc
+ TEST_ENV OBJC_DISABLE_NONPOINTER_ISA=YES
+*/
#include "test.h"
#include "testroot.i"
-id sawObject;
-const void *sawKey;
-id sawValue;
-objc_AssociationPolicy sawPolicy;
+bool hasAssociations = false;
-objc_hook_setAssociatedObject originalSetAssociatedObject;
+@interface TestRoot (AssocHooks)
+@end
-void hook(id _Nonnull object, const void * _Nonnull key, id _Nullable value, objc_AssociationPolicy policy) {
- sawObject = object;
- sawKey = key;
- sawValue = value;
- sawPolicy = policy;
- originalSetAssociatedObject(object, key, value, policy);
+@implementation TestRoot (AssocHooks)
+
+- (void)_noteAssociatedObjects {
+ hasAssociations = true;
+}
+
+// -_noteAssociatedObjects is currently limited to raw-isa custom-rr to avoid overhead
+- (void) release {
}
+@end
+
int main() {
id obj = [TestRoot new];
id value = [TestRoot new];
const void *key = "key";
objc_setAssociatedObject(obj, key, value, OBJC_ASSOCIATION_RETAIN);
- testassert(sawObject == nil);
- testassert(sawKey == nil);
- testassert(sawValue == nil);
- testassert(sawPolicy == 0);
+ testassert(hasAssociations == true);
id out = objc_getAssociatedObject(obj, key);
testassert(out == value);
- objc_setHook_setAssociatedObject(hook, &originalSetAssociatedObject);
-
+ hasAssociations = false;
key = "key2";
objc_setAssociatedObject(obj, key, value, OBJC_ASSOCIATION_RETAIN);
- testassert(sawObject == obj);
- testassert(sawKey == key);
- testassert(sawValue == value);
- testassert(sawPolicy == OBJC_ASSOCIATION_RETAIN);
+ testassert(hasAssociations == false); //only called once
+
out = objc_getAssociatedObject(obj, key);
testassert(out == value);
succeed(__FILE__);
-}
\ No newline at end of file
+}
#if __has_feature(ptrauth_calls)
# define SIGNED_METHOD_LIST_IMP "@AUTH(ia,0,addr) "
# define SIGNED_STUB_INITIALIZER "@AUTH(ia,0xc671,addr) "
+# define SIGNED_METHOD_LIST "@AUTH(da,0xC310,addr) "
+# define SIGNED_ISA "@AUTH(da, 0x6AE1, addr) "
+# define SIGNED_SUPER "@AUTH(da, 0xB5AB, addr) "
#else
# define SIGNED_METHOD_LIST_IMP
# define SIGNED_STUB_INITIALIZER
+# define SIGNED_METHOD_LIST
+# define SIGNED_ISA
+# define SIGNED_SUPER
#endif
#define str(x) #x
".section __DATA,__objc_data \n" \
".align 3 \n" \
"_OBJC_CLASS_$_" #name ": \n" \
- PTR "_OBJC_METACLASS_$_" #name "\n" \
- PTR "_OBJC_CLASS_$_" #superclass "\n" \
+ PTR "_OBJC_METACLASS_$_" #name SIGNED_ISA "\n" \
+ PTR "_OBJC_CLASS_$_" #superclass SIGNED_SUPER "\n" \
PTR "__objc_empty_cache \n" \
PTR "0 \n" \
PTR "L_" #name "_ro + 2 \n" \
PTR "0 \n" \
\
"_OBJC_METACLASS_$_" #name ": \n" \
- PTR "_OBJC_METACLASS_$_" #superclass "\n" \
- PTR "_OBJC_METACLASS_$_" #superclass "\n" \
+ PTR "_OBJC_METACLASS_$_" #superclass SIGNED_ISA "\n" \
+ PTR "_OBJC_METACLASS_$_" #superclass SIGNED_SUPER "\n" \
PTR "__objc_empty_cache \n" \
PTR "0 \n" \
PTR "L_" #name "_meta_ro \n" \
ONLY_LP64(".long 0 \n") \
PTR "0 \n" \
PTR "L_" #name "_name \n" \
- PTR "L_" #name "_methods \n" \
+ PTR "L_" #name "_methods" SIGNED_METHOD_LIST "\n" \
PTR "0 \n" \
PTR "L_" #name "_ivars \n" \
PTR "0 \n" \
ONLY_LP64(".long 0 \n") \
PTR "0 \n" \
PTR "L_" #name "_name \n" \
- PTR "L_" #name "_meta_methods \n" \
+ PTR "L_" #name "_meta_methods" SIGNED_METHOD_LIST "\n" \
PTR "0 \n" \
PTR "0 \n" \
PTR "0 \n" \
// Example: rdar://problem/50707074
Class HeapSwiftSub = (Class)malloc(OBJC_MAX_CLASS_SIZE);
memcpy(HeapSwiftSub, RawRealSwiftSub, OBJC_MAX_CLASS_SIZE);
+ // Re-sign the isa and super pointers in the new location.
+ ((Class __ptrauth_objc_isa_pointer *)(void *)HeapSwiftSub)[0] = ((Class __ptrauth_objc_isa_pointer *)(void *)RawRealSwiftSub)[0];
+ ((Class __ptrauth_objc_super_pointer *)(void *)HeapSwiftSub)[1] = ((Class __ptrauth_objc_super_pointer *)(void *)RawRealSwiftSub)[1];
testprintf("initSub beginning _objc_realizeClassFromSwift\n");
_objc_realizeClassFromSwift(HeapSwiftSub, cls);
RELEASE_VAR(w);
}
+#if OBJC_SPLIT_TAGGED_POINTERS
+void testConstantTaggedPointerRoundTrip(void *ptr)
+{
+ uintptr_t tagged = (uintptr_t)ptr | objc_debug_constant_cfstring_tag_bits;
+ void *untagged = _objc_getTaggedPointerRawPointerValue((void *)tagged);
+ testassert(ptr == untagged);
+}
+
+void testConstantTaggedPointers(void)
+{
+ testConstantTaggedPointerRoundTrip(0);
+ testConstantTaggedPointerRoundTrip((void *)sizeof(void *));
+ testConstantTaggedPointerRoundTrip((void *)(MACH_VM_MAX_ADDRESS - sizeof(void *)));
+}
+#endif
+
int main()
{
testassert(objc_debug_taggedpointer_mask != 0);
objc_getClass("TaggedNSObjectSubclass"));
testGenericTaggedPointer(OBJC_TAG_NSManagedObjectID,
objc_getClass("TaggedNSObjectSubclass"));
+
+#if OBJC_SPLIT_TAGGED_POINTERS
+ testConstantTaggedPointers();
+#endif
} POP_POOL;
succeed(__FILE__);
int main()
{
- testassert(_objc_getTaggedPointerTag((void *)1) == 0);
+#if OBJC_SPLIT_TAGGED_POINTERS
+ void *obj = (void *)0;
+#else
+ void *obj = (void *)1;
+#endif
+
+ testassert(_objc_getTaggedPointerTag(obj) == 0);
succeed(__FILE__);
}
--- /dev/null
+#define TEST_OVERRIDES_NEW 1
#include <pthread.h>
#if __cplusplus
#include <atomic>
-using namespace std;
+using std::atomic_int;
+using std::memory_order_relaxed;
#else
#include <stdatomic.h>
#endif
#define __testassert(cond, file, line) \
(fail("failed assertion '%s' at %s:%u", cond, __FILE__, __LINE__))
+static inline char *hexstring(uint8_t *data, size_t size)
+{
+ char *str;
+ switch (size) {
+ case sizeof(unsigned long long):
+ asprintf(&str, "%016llx", *(unsigned long long *)data);
+ break;
+ case sizeof(unsigned int):
+ asprintf(&str, "%08x", *(unsigned int*)data);
+ break;
+ case sizeof(uint16_t):
+ asprintf(&str, "%04x", *(uint16_t *)data);
+ break;
+ default:
+ str = (char *)malloc(size * 2 + 1);
+ for (size_t i = 0; i < size; i++) {
+ sprintf(str + i, "%02x", data[i]);
+ }
+ }
+ return str;
+}
+
+static inline void failnotequal(uint8_t *lhs, size_t lhsSize, uint8_t *rhs, size_t rhsSize, const char *lhsStr, const char *rhsStr, const char *file, unsigned line)
+{
+ fprintf(stderr, "BAD: failed assertion '%s != %s' (0x%s != 0x%s) at %s:%u\n", lhsStr, rhsStr, hexstring(lhs, lhsSize), hexstring(rhs, rhsSize), file, line);
+ exit(1);
+}
+
+#define testassertequal(lhs, rhs) do {\
+ __typeof__(lhs) __lhs = lhs; \
+ __typeof__(rhs) __rhs = rhs; \
+ if ((lhs) != (rhs)) failnotequal((uint8_t *)&__lhs, sizeof(__lhs), (uint8_t *)&__rhs, sizeof(__rhs), #lhs, #rhs, __FILE__, __LINE__); \
+} while(0)
+
/* time-sensitive assertion, disabled under valgrind */
#define timecheck(name, time, fast, slow) \
if (getenv("VALGRIND") && 0 != strcmp(getenv("VALGRIND"), "NO")) { \
`#define TEST_CALLS_OPERATOR_NEW` before including test.h.
*/
#if __cplusplus && !defined(TEST_CALLS_OPERATOR_NEW)
+#if !defined(TEST_OVERRIDES_NEW)
+#define TEST_OVERRIDES_NEW 1
+#endif
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Winline-new-delete"
#import <new>
-inline void* operator new(std::size_t) throw (std::bad_alloc) { fail("called global operator new"); }
-inline void* operator new[](std::size_t) throw (std::bad_alloc) { fail("called global operator new[]"); }
-inline void* operator new(std::size_t, const std::nothrow_t&) throw() { fail("called global operator new(nothrow)"); }
-inline void* operator new[](std::size_t, const std::nothrow_t&) throw() { fail("called global operator new[](nothrow)"); }
-inline void operator delete(void*) throw() { fail("called global operator delete"); }
-inline void operator delete[](void*) throw() { fail("called global operator delete[]"); }
-inline void operator delete(void*, const std::nothrow_t&) throw() { fail("called global operator delete(nothrow)"); }
-inline void operator delete[](void*, const std::nothrow_t&) throw() { fail("called global operator delete[](nothrow)"); }
+inline void* operator new(std::size_t) { fail("called global operator new"); }
+inline void* operator new[](std::size_t) { fail("called global operator new[]"); }
+inline void* operator new(std::size_t, const std::nothrow_t&) noexcept(true) { fail("called global operator new(nothrow)"); }
+inline void* operator new[](std::size_t, const std::nothrow_t&) noexcept(true) { fail("called global operator new[](nothrow)"); }
+inline void operator delete(void*) noexcept(true) { fail("called global operator delete"); }
+inline void operator delete[](void*) noexcept(true) { fail("called global operator delete[]"); }
+inline void operator delete(void*, const std::nothrow_t&) noexcept(true) { fail("called global operator delete(nothrow)"); }
+inline void operator delete[](void*, const std::nothrow_t&) noexcept(true) { fail("called global operator delete[](nothrow)"); }
#pragma clang diagnostic pop
#endif
leak_dump_heap("HEAP AT leak_check"); \
} \
inuse = leak_inuse(); \
- if (inuse > _leak_start + n) { \
+ if (inuse > _leak_start + (n)) { \
fprintf(stderr, "BAD: %zu bytes leaked at %s:%u " \
"(try LEAK_HEAP and HANG_ON_LEAK to debug)\n", \
inuse - _leak_start, __FILE__, __LINE__); \
use strict;
use File::Basename;
+use Config;
+my $supportsParallelBuilds = $Config{useithreads};
+
+if ($supportsParallelBuilds) {
+ require threads;
+ import threads;
+ require Thread::Queue;
+ import Thread::Queue;
+}
+
# We use encode_json() to write BATS plist files.
# JSON::PP does not exist on iOS devices, but we need not write plists there.
# So we simply load JSON:PP if it exists.
JSON::PP->import();
}
+# iOS also doesn't have Text::Glob. We don't need it there.
+my $has_match_glob = 0;
+if (eval { require Text::Glob; 1; }) {
+ Text::Glob->import();
+ $has_match_glob = 1;
+}
+
chdir dirname $0;
chomp (my $DIR = `pwd`);
ARCH=<arch>
OS=<sdk name>[sdk version][-<deployment target>[-<run target>]]
ROOT=/path/to/project.roots/
+ HOST=<test device hostname>
+ DEVICE=<simulator test device name>
CC=<compiler name>
BATS=0|1 (build for and/or run in BATS?)
BUILD_SHARED_CACHE=0|1 (build a dyld shared cache with the root and test against that)
DYLD=2|3 (test in dyld 2 or dyld 3 mode)
+ PARALLELBUILDS=N (number of parallel builds to run simultaneously)
+ SHAREDCACHEDIR=/path/to/custom/shared/cache/directory
examples:
my $HOST;
my $PORT;
+my $DEVICE;
+
+my $PARALLELBUILDS;
+
+my $SHAREDCACHEDIR;
my @TESTLIBNAMES = ("libobjc.A.dylib", "libobjc-trampolines.dylib");
my $TESTLIBDIR = "/usr/lib";
# Run some newline-separated commands like `make` would, stopping if any fail
# run("cmd1 \n cmd2 \n cmd3")
sub make {
+ my ($cmdstr, $cwd) = @_;
my $output = "";
- my @cmds = split("\n", $_[0]);
+ my @cmds = split("\n", $cmdstr);
die if scalar(@cmds) == 0;
$? = 0;
foreach my $cmd (@cmds) {
chomp $cmd;
next if $cmd =~ /^\s*$/;
$cmd .= " 2>&1";
- print "$cmd\n" if $VERBOSE;
- eval {
- local $SIG{ALRM} = sub { die "alarm\n" };
- # Timeout after 600 seconds so a deadlocked test doesn't wedge the
- # entire test suite. Increase to an hour for B&I builds.
- if (exists $ENV{"RC_XBS"}) {
- alarm 3600;
- } else {
- alarm 600;
- }
- $output .= `$cmd`;
- alarm 0;
- };
- if ($@) {
- die unless $@ eq "alarm\n";
- $output .= "\nTIMED OUT";
+ if (defined $cwd) {
+ $cmd = "cd $cwd; $cmd";
}
+ print "$cmd\n" if $VERBOSE;
+ $output .= `$cmd`;
last if $?;
}
print "$output\n" if $VERBOSE;
sub rm_rf_verbose {
my $dir = shift || die;
- print "mkdir -p $dir\n" if $VERBOSE;
+ print "rm -rf $dir\n" if $VERBOSE;
`rm -rf '$dir'`;
die "couldn't rm -rf $dir" if $?;
}
# TEST_BUILD build instructions
# TEST_BUILD_OUTPUT expected build stdout/stderr
# TEST_RUN_OUTPUT expected run stdout/stderr
+ # TEST_ENTITLEMENTS path to entitlements file
open(my $in, "< $file") || die;
my $contents = join "", <$in>;
my ($conditionstring) = ($contents =~ /\bTEST_CONFIG\b(.*)$/m);
my ($envstring) = ($contents =~ /\bTEST_ENV\b(.*)$/m);
my ($cflags) = ($contents =~ /\bTEST_CFLAGS\b(.*)$/m);
+ my ($entitlements) = ($contents =~ /\bTEST_ENTITLEMENTS\b(.*)$/m);
+ $entitlements =~ s/^\s+|\s+$//g;
my ($buildcmd) = extract_multiline("TEST_BUILD", $contents, $name);
my ($builderror) = extract_multiple_multiline("TEST_BUILD_OUTPUT", $contents, $name);
my ($runerror) = extract_multiple_multiline("TEST_RUN_OUTPUT", $contents, $name);
- return 0 if !$test_h && !$disabled && !$crashes && !defined($conditionstring) && !defined($envstring) && !defined($cflags) && !defined($buildcmd) && !defined($builderror) && !defined($runerror);
+ return 0 if !$test_h && !$disabled && !$crashes && !defined($conditionstring)
+ && !defined($envstring) && !defined($cflags) && !defined($buildcmd)
+ && !defined($builderror) && !defined($runerror) && !defined($entitlements);
if ($disabled) {
colorprint $yellow, "SKIP: $name (disabled by $disabled)";
TEST_RUN => $run,
DSTDIR => "$C{DSTDIR}/$name.build",
OBJDIR => "$C{OBJDIR}/$name.build",
+ ENTITLEMENTS => $entitlements,
};
return 1;
my $name = shift;
my %T = %{$C{"TEST_$name"}};
- mkdir_verbose $T{DSTDIR};
- chdir_verbose $T{DSTDIR};
+ my $dstdir = $T{DSTDIR};
+ if (-e "$dstdir/build-succeeded") {
+ # We delete the whole test directory before building (if it existed),
+ # so if this file exists now, that means another configuration already
+ # did an equivalent build.
+ print "note: $name is already built at $dstdir, skipping the build\n" if $VERBOSE;
+ return 1;
+ }
+
+ mkdir_verbose $dstdir;
# we don't mkdir $T{OBJDIR} because most tests don't use it
my $ext = $ALL_TESTS{$name};
my $file = "$DIR/$name.$ext";
if ($T{TEST_CRASHES}) {
- `echo '$crashcatch' > crashcatch.c`;
- make("$C{COMPILE_C} -dynamiclib -o libcrashcatch.dylib -x c crashcatch.c");
- die "$?" if $?;
+ `echo '$crashcatch' > $dstdir/crashcatch.c`;
+ my $output = make("$C{COMPILE_C} -dynamiclib -o libcrashcatch.dylib -x c crashcatch.c", $dstdir);
+ if ($?) {
+ colorprint $red, "FAIL: building crashcatch.c";
+ colorprefix $red, $output;
+ return 0;
+ }
}
my $cmd = $T{TEST_BUILD} ? eval "return \"$T{TEST_BUILD}\"" : "$C{COMPILE} $T{TEST_CFLAGS} $file -o $name.exe";
- my $output = make($cmd);
+ my $output = make($cmd, $dstdir);
# ignore out-of-date text-based stubs (caused by ditto into SDK)
$output =~ s/ld: warning: text-based stub file.*\n//g;
$output =~ s/^warning: callee: [^\n]+\n//g;
# rdar://38710948
$output =~ s/ld: warning: ignoring file [^\n]*libclang_rt\.bridgeos\.a[^\n]*\n//g;
+ $output =~ s/ld: warning: building for iOS Simulator, but[^\n]*\n//g;
# ignore compiler logging of CCC_OVERRIDE_OPTIONS effects
if (defined $ENV{CCC_OVERRIDE_OPTIONS}) {
$output =~ s/### (CCC_OVERRIDE_OPTIONS:|Adding argument|Deleting argument|Replacing) [^\n]*\n//g;
}
if ($ok) {
- foreach my $file (glob("*.exe *.dylib *.bundle")) {
+ foreach my $file (glob("$dstdir/*.exe $dstdir/*.dylib $dstdir/*.bundle")) {
if (!$BATS) {
# not for BATS to save space and build time
# fixme use SYMROOT?
- make("xcrun dsymutil $file");
+ make("xcrun dsymutil $file", $dstdir);
}
if ($C{OS} eq "macosx" || $C{OS} =~ /simulator/) {
# setting any entitlements disables dyld environment variables
} else {
# get-task-allow entitlement is required
# to enable dyld environment variables
- make("xcrun codesign -s - --entitlements $DIR/get_task_allow_entitlement.plist $file");
- die "$?" if $?;
+ if (!$T{ENTITLEMENTS}) {
+ $T{ENTITLEMENTS} = "get_task_allow_entitlement.plist";
+ }
+ my $output = make("xcrun codesign -s - --entitlements $DIR/$T{ENTITLEMENTS} $file", $dstdir);
+ if ($?) {
+ colorprint $red, "FAIL: codesign $file";
+ colorprefix $red, $output;
+ return 0;
+ }
}
}
}
+ # Mark the build as successful so other configs with the same build
+ # requirements can skip buildiing.
+ if ($ok) {
+ make("touch build-succeeded", $dstdir);
+ }
+
return $ok;
}
die "unknown DYLD setting $C{DYLD}";
}
+ if ($SHAREDCACHEDIR) {
+ $env .= " DYLD_SHARED_REGION=private DYLD_SHARED_CACHE_DIR=$SHAREDCACHEDIR";
+ }
+
my $output;
if ($C{ARCH} =~ /^arm/ && `uname -p` !~ /^arm/) {
$env .= " DYLD_INSERT_LIBRARIES=$remotedir/libcrashcatch.dylib";
}
- my $cmd = "ssh -p $PORT $HOST 'cd $remotedir && env $env ./$name.exe'";
+ my $cmd = "ssh $PORT $HOST 'cd $remotedir && env $env ./$name.exe'";
$output = make("$cmd");
}
elsif ($C{OS} =~ /simulator/) {
# run locally in a simulator
- # fixme selection of simulated OS version
- my $simdevice;
- if ($C{OS} =~ /iphonesimulator/) {
- $simdevice = 'iPhone X';
- } elsif ($C{OS} =~ /watchsimulator/) {
- $simdevice = 'Apple Watch Series 4 - 40mm';
- } elsif ($C{OS} =~ /tvsimulator/) {
- $simdevice = 'Apple TV 1080p';
- } else {
- die "unknown simulator $C{OS}\n";
- }
- my $sim = "xcrun -sdk iphonesimulator simctl spawn '$simdevice'";
+ my $sim = "xcrun -sdk iphonesimulator simctl spawn '$DEVICE'";
# Add test dir and libobjc's dir to DYLD_LIBRARY_PATH.
# Insert libcrashcatch.dylib if necessary.
$env .= " DYLD_LIBRARY_PATH=$testdir";
# set the config name now, after massaging the language and OS versions,
# but before adding other settings
- my $configname = config_name(%C);
- die if ($configname =~ /'/);
- die if ($configname =~ / /);
- ($C{NAME} = $configname) =~ s/~/ /g;
- (my $configdir = $configname) =~ s#/##g;
+ my $configdirname = config_dir_name(%C);
+ die if ($configdirname =~ /'/);
+ die if ($configdirname =~ / /);
+ ($C{NAME} = $configdirname) =~ s/~/ /g;
+ (my $configdir = $configdirname) =~ s#/##g;
$C{DSTDIR} = "$DSTROOT$BUILDDIR/$configdir";
$C{OBJDIR} = "$OBJROOT$BUILDDIR/$configdir";
$C{XCRUN} = "env LANG=C /usr/bin/xcrun -toolchain '$C{TOOLCHAIN}'";
$C{COMPILE_C} = "$C{XCRUN} '$C{CC}' $cflags -x c -std=gnu99";
- $C{COMPILE_CXX} = "$C{XCRUN} '$C{CXX}' $cflags -x c++";
+ $C{COMPILE_CXX} = "$C{XCRUN} '$C{CXX}' $cflags -x c++ -std=gnu++17";
$C{COMPILE_M} = "$C{XCRUN} '$C{CC}' $cflags $objcflags -x objective-c -std=gnu99";
- $C{COMPILE_MM} = "$C{XCRUN} '$C{CXX}' $cflags $objcflags -x objective-c++";
+ $C{COMPILE_MM} = "$C{XCRUN} '$C{CXX}' $cflags $objcflags -x objective-c++ -std=gnu++17";
$C{COMPILE_SWIFT} = "$C{XCRUN} '$C{SWIFT}' $swiftflags";
$C{COMPILE} = $C{COMPILE_C} if $C{LANGUAGE} eq "c";
return @newresults;
}
-sub config_name {
+sub config_dir_name {
my %config = @_;
my $name = "";
for my $key (sort keys %config) {
+ # Exclude settings that only influence the run, not the build.
+ next if $key eq "DYLD" || $key eq "GUARDMALLOC";
+
$name .= '~' if $name ne "";
$name .= "$key=$config{$key}";
}
sub rsync_ios {
my ($src, $timeout) = @_;
for (my $i = 0; $i < 10; $i++) {
- make("$DIR/timeout.pl $timeout rsync -e 'ssh -p $PORT' -av $src $HOST:/$REMOTEBASE/");
+ make("$DIR/timeout.pl $timeout rsync -e 'ssh $PORT' -av $src $HOST:/$REMOTEBASE/");
return if $? == 0;
colorprint $yellow, "WARN: RETRY\n" if $VERBOSE;
}
if ($ALL_TESTS{$test}) {
gather_simple(\%C, $test) || next; # not pass, not fail
push @gathertests, $test;
- } else {
- die "No test named '$test'\n";
+ } elsif ($has_match_glob) {
+ my @matched = Text::Glob::match_glob($test, (keys %ALL_TESTS));
+ if (not @matched) {
+ die "No test matched '$test'\n";
+ }
+ foreach my $match (@matched) {
+ gather_simple(\%C, $match) || next; # not pass, not fail
+ push @gathertests, $match;
+ }
}
}
if (!$BUILD) {
@builttests = @gathertests;
$testcount = scalar(@gathertests);
+ } elsif ($PARALLELBUILDS > 1 && $supportsParallelBuilds) {
+ my $workQueue = Thread::Queue->new();
+ my $resultsQueue = Thread::Queue->new();
+ my @threads = map {
+ threads->create(sub {
+ while (defined(my $test = $workQueue->dequeue())) {
+ local *STDOUT;
+ local *STDERR;
+ my $output;
+ open STDOUT, '>>', \$output;
+ open STDERR, '>>', \$output;
+
+ my $success = build_simple(\%C, $test);
+ $resultsQueue->enqueue({ test => $test, success => $success, output => $output });
+ }
+ });
+ } (1 .. $PARALLELBUILDS);
+
+ foreach my $test (@gathertests) {
+ if ($VERBOSE) {
+ print "\nBUILD $test\n";
+ }
+ if ($ALL_TESTS{$test}) {
+ $testcount++;
+ $workQueue->enqueue($test);
+ } else {
+ die "No test named '$test'\n";
+ }
+ }
+ $workQueue->end();
+ foreach (@gathertests) {
+ my $result = $resultsQueue->dequeue();
+ my $test = $result->{test};
+ my $success = $result->{success};
+ my $output = $result->{output};
+
+ print $output;
+ if ($success) {
+ push @builttests, $test;
+ } else {
+ $failcount++;
+ }
+ }
+ foreach my $thread (@threads) {
+ $thread->join();
+ }
} else {
+ if ($PARALLELBUILDS > 1) {
+ print "WARNING: requested parallel builds, but this perl interpreter does not support threads. Falling back to sequential builds.\n";
+ }
foreach my $test (@gathertests) {
if ($VERBOSE) {
print "\nBUILD $test\n";
# nothing to do
}
else {
- if ($C{ARCH} =~ /^arm/ && `uname -p` !~ /^arm/) {
+ if ($HOST && $C{ARCH} =~ /^arm/ && `uname -p` !~ /^arm/) {
# upload timeout - longer for slow watch devices
my $timeout = ($C{OS} =~ /watch/) ? 120 : 20;
$args{CC} = getargs("CC", "clang");
-$HOST = getarg("HOST", "iphone");
-$PORT = getarg("PORT", "10022");
+$HOST = getarg("HOST", 0);
+$PORT = getarg("PORT", "");
+if ($PORT) {
+ $PORT = "-p $PORT";
+}
+$DEVICE = getarg("DEVICE", "booted");
+
+$PARALLELBUILDS = getarg("PARALLELBUILDS", `sysctl -n hw.ncpu`);
+
+$SHAREDCACHEDIR = getarg("SHAREDCACHEDIR", "");
{
my $guardmalloc = getargs("GUARDMALLOC", 0);
}
}
+make("find $DSTROOT$BUILDDIR -name build-succeeded -delete", "/");
+
print "note: -----\n";
my $color = ($failconfigs ? $red : "");
colorprint $color, "note: $testconfigs configurations, " .
int main()
{
+ char *useClosures = getenv("DYLD_USE_CLOSURES");
+ int dyld3 = useClosures != NULL && useClosures[0] != '0';
+
objc_setForwardHandler((void*)&forward_handler, (void*)&forward_handler);
#if defined(__arm__) || defined(__arm64__)
#endif
leak_mark();
- while (count--) {
+ for (int i = 0; i < count; i++) {
cycle();
}
- leak_check(0);
+ // dyld3 currently leaks 8 bytes for each dlopen/dlclose pair, so accommodate it. rdar://problem/53769254
+ leak_check(dyld3 ? (count * sizeof(void *)) : 0);
// 5359412 Make sure dylibs with nothing other than image_info can close
void *dylib = dlopen("unload3.dylib", RTLD_LAZY);
int err = dlclose(dylib);
testassert(err == 0);
err = dlclose(dylib);
- testassert(err == -1); // already closed
+ // dyld3 doesn't error when dlclosing the dylib twice. This is probably expected. rdar://problem/53769374
+ if (!dyld3)
+ testassert(err == -1); // already closed
// Make sure dylibs with real objc content cannot close
dylib = dlopen("unload4.dylib", RTLD_LAZY);
err = dlclose(dylib);
testassert(err == 0);
err = dlclose(dylib);
- testassert(err == -1); // already closed
+ // dyld3 doesn't error when dlclosing the dylib twice. This is probably expected. rdar://problem/53769374
+ if (!dyld3)
+ testassert(err == -1); // already closed
succeed(__FILE__);
}
--- /dev/null
+/*
+ TEST_CONFIG MEM=mrc
+ TEST_ENV OBJC_DISABLE_NONPOINTER_ISA=YES
+*/
+
+#include "test.h"
+#include "testroot.i"
+
+bool hasWeakRefs = false;
+
+@interface TestRoot (WeakHooks)
+@end
+
+@implementation TestRoot (WeakHooks)
+
+- (void)_setWeaklyReferenced {
+ hasWeakRefs = true;
+}
+
+// -_setWeaklyReferenced is currently limited to raw-isa custom-rr to avoid overhead
+- (void) release {
+}
+
+@end
+
+int main() {
+ id obj = [TestRoot new];
+ id wobj = nil;
+ objc_storeWeak(&wobj, obj);
+ testassert(hasWeakRefs == true);
+
+ id out = objc_loadWeak(&wobj);
+ testassert(out == obj);
+
+ objc_storeWeak(&wobj, nil);
+ out = objc_loadWeak(&wobj);
+ testassert(out == nil);
+
+ hasWeakRefs = false;
+ objc_storeWeak(&wobj, obj);
+ testassert(hasWeakRefs == true);
+
+
+ out = objc_loadWeak(&wobj);
+ testassert(out == obj);
+ objc_storeWeak(&wobj, nil);
+
+ succeed(__FILE__);
+}